blob: e78f5a79a35b8052c51fb2e09ae11f11c5f29301 [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040031
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032#define CASE_OP_32_64(x) \
33 glue(glue(case INDEX_op_, x), _i32): \
34 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040035
Richard Henderson170ba882017-11-22 09:07:11 +010036#define CASE_OP_32_64_VEC(x) \
37 glue(glue(case INDEX_op_, x), _i32): \
38 glue(glue(case INDEX_op_, x), _i64): \
39 glue(glue(case INDEX_op_, x), _vec)
40
Richard Hendersonab84dc32023-08-23 23:04:24 -070041typedef struct MemCopyInfo {
42 IntervalTreeNode itree;
43 QSIMPLEQ_ENTRY (MemCopyInfo) next;
44 TCGTemp *ts;
45 TCGType type;
46} MemCopyInfo;
47
Richard Henderson6fcb98e2020-03-30 17:44:30 -070048typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020049 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070050 TCGTemp *prev_copy;
51 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070052 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070053 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070054 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080055 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070056} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040057
Richard Henderson3b3f8472021-08-23 22:06:31 -070058typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070059 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070060 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070061 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070062
Richard Hendersonab84dc32023-08-23 23:04:24 -070063 IntervalTreeRoot mem_copy;
64 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
65
Richard Henderson137f1f42021-08-24 08:49:25 -070066 /* In flight values from optimization. */
Richard Hendersonfae450b2021-08-25 22:42:19 -070067 uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080068 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson67f84c92021-08-25 08:00:20 -070069 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070070} OptContext;
71
Richard Henderson6fcb98e2020-03-30 17:44:30 -070072static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020073{
Richard Henderson63490392017-06-20 13:43:15 -070074 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020075}
76
Richard Henderson6fcb98e2020-03-30 17:44:30 -070077static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020078{
Richard Henderson63490392017-06-20 13:43:15 -070079 return ts_info(arg_temp(arg));
80}
81
Richard Hendersone1b6c142024-12-22 10:26:14 -080082static inline bool ti_is_const(TempOptInfo *ti)
83{
84 return ti->is_const;
85}
86
87static inline uint64_t ti_const_val(TempOptInfo *ti)
88{
89 return ti->val;
90}
91
92static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
93{
94 return ti_is_const(ti) && ti_const_val(ti) == val;
95}
96
Richard Henderson63490392017-06-20 13:43:15 -070097static inline bool ts_is_const(TCGTemp *ts)
98{
Richard Hendersone1b6c142024-12-22 10:26:14 -080099 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -0700100}
101
Richard Henderson27cdb852023-10-23 11:38:00 -0700102static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
103{
Richard Hendersone1b6c142024-12-22 10:26:14 -0800104 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -0700105}
106
Richard Henderson63490392017-06-20 13:43:15 -0700107static inline bool arg_is_const(TCGArg arg)
108{
109 return ts_is_const(arg_temp(arg));
110}
111
Richard Henderson27cdb852023-10-23 11:38:00 -0700112static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
113{
114 return ts_is_const_val(arg_temp(arg), val);
115}
116
Richard Henderson63490392017-06-20 13:43:15 -0700117static inline bool ts_is_copy(TCGTemp *ts)
118{
119 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200120}
121
Richard Henderson9f75e522023-11-02 13:37:46 -0700122static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
123{
124 return a->kind < b->kind ? b : a;
125}
126
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200127/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700128static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200129{
Richard Henderson63490392017-06-20 13:43:15 -0700130 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700131 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700132
Richard Henderson3b3f8472021-08-23 22:06:31 -0700133 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700134 return;
135 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700136 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700137
138 ti = ts->state_ptr;
139 if (ti == NULL) {
140 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700141 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700142 }
143
144 ti->next_copy = ts;
145 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700146 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700147 if (ts->kind == TEMP_CONST) {
148 ti->is_const = true;
149 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700150 ti->z_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800151 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700152 } else {
153 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700154 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700155 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200156 }
157}
158
Richard Hendersonab84dc32023-08-23 23:04:24 -0700159static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
160{
161 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
162 return r ? container_of(r, MemCopyInfo, itree) : NULL;
163}
164
165static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
166{
167 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
168 return r ? container_of(r, MemCopyInfo, itree) : NULL;
169}
170
171static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
172{
173 TCGTemp *ts = mc->ts;
174 TempOptInfo *ti = ts_info(ts);
175
176 interval_tree_remove(&mc->itree, &ctx->mem_copy);
177 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
178 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
179}
180
181static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
182{
183 while (true) {
184 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
185 if (!mc) {
186 break;
187 }
188 remove_mem_copy(ctx, mc);
189 }
190}
191
192static void remove_mem_copy_all(OptContext *ctx)
193{
194 remove_mem_copy_in(ctx, 0, -1);
195 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
196}
197
Richard Henderson9f75e522023-11-02 13:37:46 -0700198static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200199{
Richard Henderson9f75e522023-11-02 13:37:46 -0700200 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200201
Richard Henderson4c868ce2020-04-23 09:02:23 -0700202 /* If this is already readonly, we can't do better. */
203 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700204 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200205 }
206
Richard Henderson9f75e522023-11-02 13:37:46 -0700207 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700208 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700209 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200210 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700211 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200212}
213
Richard Hendersonab84dc32023-08-23 23:04:24 -0700214static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
215{
216 TempOptInfo *si = ts_info(src_ts);
217 TempOptInfo *di = ts_info(dst_ts);
218 MemCopyInfo *mc;
219
220 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
221 tcg_debug_assert(mc->ts == src_ts);
222 mc->ts = dst_ts;
223 }
224 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
225}
226
227/* Reset TEMP's state, possibly removing the temp for the list of copies. */
228static void reset_ts(OptContext *ctx, TCGTemp *ts)
229{
230 TempOptInfo *ti = ts_info(ts);
231 TCGTemp *pts = ti->prev_copy;
232 TCGTemp *nts = ti->next_copy;
233 TempOptInfo *pi = ts_info(pts);
234 TempOptInfo *ni = ts_info(nts);
235
236 ni->prev_copy = ti->prev_copy;
237 pi->next_copy = ti->next_copy;
238 ti->next_copy = ts;
239 ti->prev_copy = ts;
240 ti->is_const = false;
241 ti->z_mask = -1;
242 ti->s_mask = 0;
243
244 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
245 if (ts == nts) {
246 /* Last temp copy being removed, the mem copies die. */
247 MemCopyInfo *mc;
248 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
249 interval_tree_remove(&mc->itree, &ctx->mem_copy);
250 }
251 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
252 } else {
253 move_mem_copies(find_better_copy(nts), ts);
254 }
255 }
256}
257
258static void reset_temp(OptContext *ctx, TCGArg arg)
259{
260 reset_ts(ctx, arg_temp(arg));
261}
262
263static void record_mem_copy(OptContext *ctx, TCGType type,
264 TCGTemp *ts, intptr_t start, intptr_t last)
265{
266 MemCopyInfo *mc;
267 TempOptInfo *ti;
268
269 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
270 if (mc) {
271 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
272 } else {
273 mc = tcg_malloc(sizeof(*mc));
274 }
275
276 memset(mc, 0, sizeof(*mc));
277 mc->itree.start = start;
278 mc->itree.last = last;
279 mc->type = type;
280 interval_tree_insert(&mc->itree, &ctx->mem_copy);
281
282 ts = find_better_copy(ts);
283 ti = ts_info(ts);
284 mc->ts = ts;
285 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
286}
287
Richard Henderson63490392017-06-20 13:43:15 -0700288static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200289{
Richard Henderson63490392017-06-20 13:43:15 -0700290 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200291
Richard Henderson63490392017-06-20 13:43:15 -0700292 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200293 return true;
294 }
295
Richard Henderson63490392017-06-20 13:43:15 -0700296 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200297 return false;
298 }
299
Richard Henderson63490392017-06-20 13:43:15 -0700300 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
301 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200302 return true;
303 }
304 }
305
306 return false;
307}
308
Richard Henderson63490392017-06-20 13:43:15 -0700309static bool args_are_copies(TCGArg arg1, TCGArg arg2)
310{
311 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
312}
313
Richard Hendersonab84dc32023-08-23 23:04:24 -0700314static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
315{
316 MemCopyInfo *mc;
317
318 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
319 if (mc->itree.start == s && mc->type == type) {
320 return find_better_copy(mc->ts);
321 }
322 }
323 return NULL;
324}
325
Richard Henderson26aac972023-10-23 12:31:57 -0700326static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
327{
328 TCGType type = ctx->type;
329 TCGTemp *ts;
330
331 if (type == TCG_TYPE_I32) {
332 val = (int32_t)val;
333 }
334
335 ts = tcg_constant_internal(type, val);
336 init_ts_info(ctx, ts);
337
338 return temp_arg(ts);
339}
340
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100341static TCGArg arg_new_temp(OptContext *ctx)
342{
343 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
344 init_ts_info(ctx, ts);
345 return temp_arg(ts);
346}
347
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700348static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400349{
Richard Henderson63490392017-06-20 13:43:15 -0700350 TCGTemp *dst_ts = arg_temp(dst);
351 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700352 TempOptInfo *di;
353 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700354 TCGOpcode new_op;
355
356 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700357 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700358 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200359 }
360
Richard Henderson986cac12023-01-09 13:59:35 -0800361 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700362 di = ts_info(dst_ts);
363 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700364
365 switch (ctx->type) {
366 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100367 new_op = INDEX_op_mov_i32;
Richard Henderson67f84c92021-08-25 08:00:20 -0700368 break;
369 case TCG_TYPE_I64:
370 new_op = INDEX_op_mov_i64;
371 break;
372 case TCG_TYPE_V64:
373 case TCG_TYPE_V128:
374 case TCG_TYPE_V256:
375 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
376 new_op = INDEX_op_mov_vec;
377 break;
378 default:
379 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100380 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700381 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700382 op->args[0] = dst;
383 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700384
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700385 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700386 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700387
Richard Henderson63490392017-06-20 13:43:15 -0700388 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700389 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700390
391 di->next_copy = si->next_copy;
392 di->prev_copy = src_ts;
393 ni->prev_copy = dst_ts;
394 si->next_copy = dst_ts;
395 di->is_const = si->is_const;
396 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700397
398 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
399 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
400 move_mem_copies(dst_ts, src_ts);
401 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800402 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700403 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400404}
405
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700406static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700407 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700408{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700409 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700410 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700411}
412
Richard Henderson54795542020-09-06 16:21:32 -0700413static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400414{
Richard Henderson03271522013-08-14 14:35:56 -0700415 uint64_t l64, h64;
416
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400417 switch (op) {
418 CASE_OP_32_64(add):
419 return x + y;
420
421 CASE_OP_32_64(sub):
422 return x - y;
423
424 CASE_OP_32_64(mul):
425 return x * y;
426
Richard Hendersonc578ff12021-12-16 06:07:25 -0800427 CASE_OP_32_64_VEC(and):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400428 return x & y;
429
Richard Hendersonc578ff12021-12-16 06:07:25 -0800430 CASE_OP_32_64_VEC(or):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400431 return x | y;
432
Richard Hendersonc578ff12021-12-16 06:07:25 -0800433 CASE_OP_32_64_VEC(xor):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400434 return x ^ y;
435
Kirill Batuzov55c09752011-07-07 16:37:16 +0400436 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700437 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400438
Kirill Batuzov55c09752011-07-07 16:37:16 +0400439 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700440 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400441
442 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700443 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400444
Kirill Batuzov55c09752011-07-07 16:37:16 +0400445 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700446 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400447
448 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700449 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400450
Kirill Batuzov55c09752011-07-07 16:37:16 +0400451 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700452 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400453
454 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700455 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400456
Kirill Batuzov55c09752011-07-07 16:37:16 +0400457 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700458 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400459
460 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700461 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400462
Kirill Batuzov55c09752011-07-07 16:37:16 +0400463 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700464 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400465
Richard Hendersonc578ff12021-12-16 06:07:25 -0800466 CASE_OP_32_64_VEC(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400467 return ~x;
468
Richard Hendersoncb25c802011-08-17 14:11:47 -0700469 CASE_OP_32_64(neg):
470 return -x;
471
Richard Hendersonc578ff12021-12-16 06:07:25 -0800472 CASE_OP_32_64_VEC(andc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700473 return x & ~y;
474
Richard Hendersonc578ff12021-12-16 06:07:25 -0800475 CASE_OP_32_64_VEC(orc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700476 return x | ~y;
477
Richard Hendersoned523472021-12-16 11:17:46 -0800478 CASE_OP_32_64_VEC(eqv):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700479 return ~(x ^ y);
480
Richard Hendersoned523472021-12-16 11:17:46 -0800481 CASE_OP_32_64_VEC(nand):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700482 return ~(x & y);
483
Richard Hendersoned523472021-12-16 11:17:46 -0800484 CASE_OP_32_64_VEC(nor):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700485 return ~(x | y);
486
Richard Henderson0e28d002016-11-16 09:23:28 +0100487 case INDEX_op_clz_i32:
488 return (uint32_t)x ? clz32(x) : y;
489
490 case INDEX_op_clz_i64:
491 return x ? clz64(x) : y;
492
493 case INDEX_op_ctz_i32:
494 return (uint32_t)x ? ctz32(x) : y;
495
496 case INDEX_op_ctz_i64:
497 return x ? ctz64(x) : y;
498
Richard Hendersona768e4e2016-11-21 11:13:39 +0100499 case INDEX_op_ctpop_i32:
500 return ctpop32(x);
501
502 case INDEX_op_ctpop_i64:
503 return ctpop64(x);
504
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700505 CASE_OP_32_64(ext8s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400506 return (int8_t)x;
507
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700508 CASE_OP_32_64(ext16s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400509 return (int16_t)x;
510
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700511 CASE_OP_32_64(ext8u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400512 return (uint8_t)x;
513
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700514 CASE_OP_32_64(ext16u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400515 return (uint16_t)x;
516
Richard Henderson64985942018-11-20 08:53:34 +0100517 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700518 x = bswap16(x);
519 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100520
521 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700522 x = bswap32(x);
523 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100524
525 case INDEX_op_bswap64_i64:
526 return bswap64(x);
527
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200528 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400529 case INDEX_op_ext32s_i64:
530 return (int32_t)x;
531
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200532 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700533 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400534 case INDEX_op_ext32u_i64:
535 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400536
Richard Henderson609ad702015-07-24 07:16:00 -0700537 case INDEX_op_extrh_i64_i32:
538 return (uint64_t)x >> 32;
539
Richard Henderson03271522013-08-14 14:35:56 -0700540 case INDEX_op_muluh_i32:
541 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
542 case INDEX_op_mulsh_i32:
543 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
544
545 case INDEX_op_muluh_i64:
546 mulu64(&l64, &h64, x, y);
547 return h64;
548 case INDEX_op_mulsh_i64:
549 muls64(&l64, &h64, x, y);
550 return h64;
551
Richard Henderson01547f72013-08-14 15:22:46 -0700552 case INDEX_op_div_i32:
553 /* Avoid crashing on divide by zero, otherwise undefined. */
554 return (int32_t)x / ((int32_t)y ? : 1);
555 case INDEX_op_divu_i32:
556 return (uint32_t)x / ((uint32_t)y ? : 1);
557 case INDEX_op_div_i64:
558 return (int64_t)x / ((int64_t)y ? : 1);
559 case INDEX_op_divu_i64:
560 return (uint64_t)x / ((uint64_t)y ? : 1);
561
562 case INDEX_op_rem_i32:
563 return (int32_t)x % ((int32_t)y ? : 1);
564 case INDEX_op_remu_i32:
565 return (uint32_t)x % ((uint32_t)y ? : 1);
566 case INDEX_op_rem_i64:
567 return (int64_t)x % ((int64_t)y ? : 1);
568 case INDEX_op_remu_i64:
569 return (uint64_t)x % ((uint64_t)y ? : 1);
570
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400571 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700572 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400573 }
574}
575
Richard Henderson67f84c92021-08-25 08:00:20 -0700576static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
577 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400578{
Richard Henderson54795542020-09-06 16:21:32 -0700579 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700580 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200581 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400582 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400583 return res;
584}
585
Richard Henderson9519da72012-10-02 11:32:26 -0700586static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
587{
588 switch (c) {
589 case TCG_COND_EQ:
590 return x == y;
591 case TCG_COND_NE:
592 return x != y;
593 case TCG_COND_LT:
594 return (int32_t)x < (int32_t)y;
595 case TCG_COND_GE:
596 return (int32_t)x >= (int32_t)y;
597 case TCG_COND_LE:
598 return (int32_t)x <= (int32_t)y;
599 case TCG_COND_GT:
600 return (int32_t)x > (int32_t)y;
601 case TCG_COND_LTU:
602 return x < y;
603 case TCG_COND_GEU:
604 return x >= y;
605 case TCG_COND_LEU:
606 return x <= y;
607 case TCG_COND_GTU:
608 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700609 case TCG_COND_TSTEQ:
610 return (x & y) == 0;
611 case TCG_COND_TSTNE:
612 return (x & y) != 0;
613 case TCG_COND_ALWAYS:
614 case TCG_COND_NEVER:
615 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700616 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700617 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700618}
619
620static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
621{
622 switch (c) {
623 case TCG_COND_EQ:
624 return x == y;
625 case TCG_COND_NE:
626 return x != y;
627 case TCG_COND_LT:
628 return (int64_t)x < (int64_t)y;
629 case TCG_COND_GE:
630 return (int64_t)x >= (int64_t)y;
631 case TCG_COND_LE:
632 return (int64_t)x <= (int64_t)y;
633 case TCG_COND_GT:
634 return (int64_t)x > (int64_t)y;
635 case TCG_COND_LTU:
636 return x < y;
637 case TCG_COND_GEU:
638 return x >= y;
639 case TCG_COND_LEU:
640 return x <= y;
641 case TCG_COND_GTU:
642 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700643 case TCG_COND_TSTEQ:
644 return (x & y) == 0;
645 case TCG_COND_TSTNE:
646 return (x & y) != 0;
647 case TCG_COND_ALWAYS:
648 case TCG_COND_NEVER:
649 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700650 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700651 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700652}
653
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700654static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700655{
656 switch (c) {
657 case TCG_COND_GT:
658 case TCG_COND_LTU:
659 case TCG_COND_LT:
660 case TCG_COND_GTU:
661 case TCG_COND_NE:
662 return 0;
663 case TCG_COND_GE:
664 case TCG_COND_GEU:
665 case TCG_COND_LE:
666 case TCG_COND_LEU:
667 case TCG_COND_EQ:
668 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700669 case TCG_COND_TSTEQ:
670 case TCG_COND_TSTNE:
671 return -1;
672 case TCG_COND_ALWAYS:
673 case TCG_COND_NEVER:
674 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700675 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700676 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700677}
678
Richard Henderson8d57bf12021-08-24 08:34:27 -0700679/*
680 * Return -1 if the condition can't be simplified,
681 * and the result of the condition (0 or 1) if it can.
682 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700683static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700684 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200685{
Richard Henderson63490392017-06-20 13:43:15 -0700686 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000687 uint64_t xv = arg_info(x)->val;
688 uint64_t yv = arg_info(y)->val;
689
Richard Henderson67f84c92021-08-25 08:00:20 -0700690 switch (type) {
691 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100692 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700693 case TCG_TYPE_I64:
694 return do_constant_folding_cond_64(xv, yv, c);
695 default:
696 /* Only scalar comparisons are optimizable */
697 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200698 }
Richard Henderson63490392017-06-20 13:43:15 -0700699 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700700 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700701 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200702 switch (c) {
703 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700704 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200705 return 0;
706 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700707 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200708 return 1;
709 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700710 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200711 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200712 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700713 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200714}
715
Richard Henderson7a2f7082021-08-26 07:06:39 -0700716/**
717 * swap_commutative:
718 * @dest: TCGArg of the destination argument, or NO_DEST.
719 * @p1: first paired argument
720 * @p2: second paired argument
721 *
722 * If *@p1 is a constant and *@p2 is not, swap.
723 * If *@p2 matches @dest, swap.
724 * Return true if a swap was performed.
725 */
726
727#define NO_DEST temp_arg(NULL)
728
Richard Henderson24c9ae42012-10-02 11:32:21 -0700729static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
730{
731 TCGArg a1 = *p1, a2 = *p2;
732 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700733 sum += arg_is_const(a1);
734 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700735
736 /* Prefer the constant in second argument, and then the form
737 op a, a, b, which is better handled on non-RISC hosts. */
738 if (sum > 0 || (sum == 0 && dest == a2)) {
739 *p1 = a2;
740 *p2 = a1;
741 return true;
742 }
743 return false;
744}
745
Richard Henderson0bfcb862012-10-02 11:32:23 -0700746static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
747{
748 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700749 sum += arg_is_const(p1[0]);
750 sum += arg_is_const(p1[1]);
751 sum -= arg_is_const(p2[0]);
752 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700753 if (sum > 0) {
754 TCGArg t;
755 t = p1[0], p1[0] = p2[0], p2[0] = t;
756 t = p1[1], p1[1] = p2[1], p2[1] = t;
757 return true;
758 }
759 return false;
760}
761
Richard Henderson7e64b112023-10-24 16:53:56 -0700762/*
763 * Return -1 if the condition can't be simplified,
764 * and the result of the condition (0 or 1) if it can.
765 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100766static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700767 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
768{
769 TCGCond cond;
770 bool swap;
771 int r;
772
773 swap = swap_commutative(dest, p1, p2);
774 cond = *pcond;
775 if (swap) {
776 *pcond = cond = tcg_swap_cond(cond);
777 }
778
779 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700780 if (r >= 0) {
781 return r;
782 }
783 if (!is_tst_cond(cond)) {
784 return -1;
785 }
786
787 /*
788 * TSTNE x,x -> NE x,0
789 * TSTNE x,-1 -> NE x,0
790 */
791 if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) {
792 *p2 = arg_new_constant(ctx, 0);
793 *pcond = tcg_tst_eqne_cond(cond);
794 return -1;
795 }
796
797 /* TSTNE x,sign -> LT x,0 */
798 if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32
799 ? INT32_MIN : INT64_MIN))) {
800 *p2 = arg_new_constant(ctx, 0);
801 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100802 return -1;
803 }
804
805 /* Expand to AND with a temporary if no backend support. */
806 if (!TCG_TARGET_HAS_tst) {
807 TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
808 ? INDEX_op_and_i32 : INDEX_op_and_i64);
809 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
810 TCGArg tmp = arg_new_temp(ctx);
811
812 op2->args[0] = tmp;
813 op2->args[1] = *p1;
814 op2->args[2] = *p2;
815
816 *p1 = tmp;
817 *p2 = arg_new_constant(ctx, 0);
818 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700819 }
820 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700821}
822
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100823static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700824{
825 TCGArg al, ah, bl, bh;
826 TCGCond c;
827 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700828 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700829
830 swap = swap_commutative2(args, args + 2);
831 c = args[4];
832 if (swap) {
833 args[4] = c = tcg_swap_cond(c);
834 }
835
836 al = args[0];
837 ah = args[1];
838 bl = args[2];
839 bh = args[3];
840
841 if (arg_is_const(bl) && arg_is_const(bh)) {
842 tcg_target_ulong blv = arg_info(bl)->val;
843 tcg_target_ulong bhv = arg_info(bh)->val;
844 uint64_t b = deposit64(blv, 32, 32, bhv);
845
846 if (arg_is_const(al) && arg_is_const(ah)) {
847 tcg_target_ulong alv = arg_info(al)->val;
848 tcg_target_ulong ahv = arg_info(ah)->val;
849 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700850
851 r = do_constant_folding_cond_64(a, b, c);
852 if (r >= 0) {
853 return r;
854 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700855 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700856
Richard Henderson7e64b112023-10-24 16:53:56 -0700857 if (b == 0) {
858 switch (c) {
859 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700860 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700861 return 0;
862 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700863 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700864 return 1;
865 default:
866 break;
867 }
868 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700869
870 /* TSTNE x,-1 -> NE x,0 */
871 if (b == -1 && is_tst_cond(c)) {
872 args[3] = args[2] = arg_new_constant(ctx, 0);
873 args[4] = tcg_tst_eqne_cond(c);
874 return -1;
875 }
876
877 /* TSTNE x,sign -> LT x,0 */
878 if (b == INT64_MIN && is_tst_cond(c)) {
879 /* bl must be 0, so copy that to bh */
880 args[3] = bl;
881 args[4] = tcg_tst_ltge_cond(c);
882 return -1;
883 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700884 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700885
Richard Henderson7e64b112023-10-24 16:53:56 -0700886 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700887 r = do_constant_folding_cond_eq(c);
888 if (r >= 0) {
889 return r;
890 }
891
892 /* TSTNE x,x -> NE x,0 */
893 if (is_tst_cond(c)) {
894 args[3] = args[2] = arg_new_constant(ctx, 0);
895 args[4] = tcg_tst_eqne_cond(c);
896 return -1;
897 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700898 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100899
900 /* Expand to AND with a temporary if no backend support. */
901 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
902 TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
903 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
904 TCGArg t1 = arg_new_temp(ctx);
905 TCGArg t2 = arg_new_temp(ctx);
906
907 op1->args[0] = t1;
908 op1->args[1] = al;
909 op1->args[2] = bl;
910 op2->args[0] = t2;
911 op2->args[1] = ah;
912 op2->args[2] = bh;
913
914 args[0] = t1;
915 args[1] = t2;
916 args[3] = args[2] = arg_new_constant(ctx, 0);
917 args[4] = tcg_tst_eqne_cond(c);
918 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700919 return -1;
920}
921
Richard Hendersone2577ea2021-08-24 08:00:48 -0700922static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
923{
924 for (int i = 0; i < nb_args; i++) {
925 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000926 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700927 }
928}
929
Richard Henderson8774dde2021-08-24 08:04:47 -0700930static void copy_propagate(OptContext *ctx, TCGOp *op,
931 int nb_oargs, int nb_iargs)
932{
Richard Henderson8774dde2021-08-24 08:04:47 -0700933 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
934 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000935 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700936 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700937 }
938 }
939}
940
Richard Henderson15268552024-12-08 07:45:11 -0600941static void finish_bb(OptContext *ctx)
942{
943 /* We only optimize memory barriers across basic blocks. */
944 ctx->prev_mb = NULL;
945}
946
947static void finish_ebb(OptContext *ctx)
948{
949 finish_bb(ctx);
950 /* We only optimize across extended basic blocks. */
951 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
952 remove_mem_copy_all(ctx);
953}
954
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600955static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700956{
957 const TCGOpDef *def = &tcg_op_defs[op->opc];
958 int i, nb_oargs;
959
Richard Henderson137f1f42021-08-24 08:49:25 -0700960 nb_oargs = def->nb_oargs;
961 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700962 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800963 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700964 /*
Richard Henderson57fe5c62021-08-26 12:04:46 -0700965 * Save the corresponding known-zero/sign bits mask for the
Richard Henderson137f1f42021-08-24 08:49:25 -0700966 * first output argument (only one supported so far).
967 */
968 if (i == 0) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700969 ts_info(ts)->z_mask = ctx->z_mask;
Richard Henderson137f1f42021-08-24 08:49:25 -0700970 }
971 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600972 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700973}
974
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700975/*
976 * The fold_* functions return true when processing is complete,
977 * usually by folding the operation to a constant or to a copy,
978 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
979 * like collect information about the value produced, for use in
980 * optimizing a subsequent operation.
981 *
982 * These first fold_* functions are all helpers, used by other
983 * folders for more specific operations.
984 */
985
986static bool fold_const1(OptContext *ctx, TCGOp *op)
987{
988 if (arg_is_const(op->args[1])) {
989 uint64_t t;
990
991 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700992 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700993 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
994 }
995 return false;
996}
997
998static bool fold_const2(OptContext *ctx, TCGOp *op)
999{
1000 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1001 uint64_t t1 = arg_info(op->args[1])->val;
1002 uint64_t t2 = arg_info(op->args[2])->val;
1003
Richard Henderson67f84c92021-08-25 08:00:20 -07001004 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001005 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1006 }
1007 return false;
1008}
1009
Richard Hendersonc578ff12021-12-16 06:07:25 -08001010static bool fold_commutative(OptContext *ctx, TCGOp *op)
1011{
1012 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1013 return false;
1014}
1015
Richard Henderson7a2f7082021-08-26 07:06:39 -07001016static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1017{
1018 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1019 return fold_const2(ctx, op);
1020}
1021
Richard Hendersond582b142024-12-19 10:43:26 -08001022/*
1023 * Record "zero" and "sign" masks for the single output of @op.
1024 * See TempOptInfo definition of z_mask and s_mask.
1025 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001026 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001027 */
1028static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001029 uint64_t z_mask, int64_t s_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001030{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001031 const TCGOpDef *def = &tcg_op_defs[op->opc];
1032 TCGTemp *ts;
1033 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001034 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001035
1036 /* Only single-output opcodes are supported here. */
1037 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001038
1039 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001040 * 32-bit ops generate 32-bit results, which for the purpose of
1041 * simplifying tcg are sign-extended. Certainly that's how we
1042 * represent our constants elsewhere. Note that the bits will
1043 * be reset properly for a 64-bit value when encountering the
1044 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001045 */
1046 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001047 z_mask = (int32_t)z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001048 s_mask |= INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001049 }
1050
1051 if (z_mask == 0) {
1052 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1053 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001054
1055 ts = arg_temp(op->args[0]);
1056 reset_ts(ctx, ts);
1057
1058 ti = ts_info(ts);
1059 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001060
1061 /* Canonicalize s_mask and incorporate data from z_mask. */
1062 rep = clz64(~s_mask);
1063 rep = MAX(rep, clz64(z_mask));
1064 rep = MAX(rep - 1, 0);
1065 ti->s_mask = INT64_MIN >> rep;
1066
Richard Henderson56e06ec2024-12-08 18:26:48 -06001067 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001068}
1069
Richard Henderson81be07f2024-12-08 19:49:17 -06001070static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1071{
1072 return fold_masks_zs(ctx, op, z_mask, 0);
1073}
1074
Richard Hendersonef6be622024-12-08 20:03:15 -06001075static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1076{
1077 return fold_masks_zs(ctx, op, -1, s_mask);
1078}
1079
Richard Hendersond582b142024-12-19 10:43:26 -08001080static bool fold_masks(OptContext *ctx, TCGOp *op)
1081{
1082 return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
1083}
1084
Richard Henderson045ace32024-12-19 10:33:51 -08001085/*
1086 * An "affected" mask bit is 0 if and only if the result is identical
1087 * to the first input. Thus if the entire mask is 0, the operation
1088 * is equivalent to a copy.
1089 */
1090static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1091{
1092 if (ctx->type == TCG_TYPE_I32) {
1093 a_mask = (uint32_t)a_mask;
1094 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001095 if (a_mask == 0) {
1096 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1097 }
1098 return false;
1099}
1100
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001101/*
1102 * Convert @op to NOT, if NOT is supported by the host.
1103 * Return true f the conversion is successful, which will still
1104 * indicate that the processing is complete.
1105 */
1106static bool fold_not(OptContext *ctx, TCGOp *op);
1107static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1108{
1109 TCGOpcode not_op;
1110 bool have_not;
1111
1112 switch (ctx->type) {
1113 case TCG_TYPE_I32:
1114 not_op = INDEX_op_not_i32;
1115 have_not = TCG_TARGET_HAS_not_i32;
1116 break;
1117 case TCG_TYPE_I64:
1118 not_op = INDEX_op_not_i64;
1119 have_not = TCG_TARGET_HAS_not_i64;
1120 break;
1121 case TCG_TYPE_V64:
1122 case TCG_TYPE_V128:
1123 case TCG_TYPE_V256:
1124 not_op = INDEX_op_not_vec;
1125 have_not = TCG_TARGET_HAS_not_vec;
1126 break;
1127 default:
1128 g_assert_not_reached();
1129 }
1130 if (have_not) {
1131 op->opc = not_op;
1132 op->args[1] = op->args[idx];
1133 return fold_not(ctx, op);
1134 }
1135 return false;
1136}
1137
Richard Hendersonda48e272021-08-25 20:42:04 -07001138/* If the binary operation has first argument @i, fold to @i. */
1139static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1140{
Richard Henderson27cdb852023-10-23 11:38:00 -07001141 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001142 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1143 }
1144 return false;
1145}
1146
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001147/* If the binary operation has first argument @i, fold to NOT. */
1148static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1149{
Richard Henderson27cdb852023-10-23 11:38:00 -07001150 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001151 return fold_to_not(ctx, op, 2);
1152 }
1153 return false;
1154}
1155
Richard Hendersone8679952021-08-25 13:19:52 -07001156/* If the binary operation has second argument @i, fold to @i. */
1157static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1158{
Richard Henderson27cdb852023-10-23 11:38:00 -07001159 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001160 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1161 }
1162 return false;
1163}
1164
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001165/* If the binary operation has second argument @i, fold to identity. */
1166static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1167{
Richard Henderson27cdb852023-10-23 11:38:00 -07001168 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001169 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1170 }
1171 return false;
1172}
1173
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001174/* If the binary operation has second argument @i, fold to NOT. */
1175static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1176{
Richard Henderson27cdb852023-10-23 11:38:00 -07001177 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001178 return fold_to_not(ctx, op, 1);
1179 }
1180 return false;
1181}
1182
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001183/* If the binary operation has both arguments equal, fold to @i. */
1184static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1185{
1186 if (args_are_copies(op->args[1], op->args[2])) {
1187 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1188 }
1189 return false;
1190}
1191
Richard Hendersonca7bb042021-08-25 13:14:21 -07001192/* If the binary operation has both arguments equal, fold to identity. */
1193static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1194{
1195 if (args_are_copies(op->args[1], op->args[2])) {
1196 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1197 }
1198 return false;
1199}
1200
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001201/*
1202 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001203 *
1204 * The ordering of the transformations should be:
1205 * 1) those that produce a constant
1206 * 2) those that produce a copy
1207 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001208 */
1209
1210static bool fold_add(OptContext *ctx, TCGOp *op)
1211{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001212 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001213 fold_xi_to_x(ctx, op, 0)) {
1214 return true;
1215 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001216 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001217}
1218
Richard Hendersonc578ff12021-12-16 06:07:25 -08001219/* We cannot as yet do_constant_folding with vectors. */
1220static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1221{
1222 if (fold_commutative(ctx, op) ||
1223 fold_xi_to_x(ctx, op, 0)) {
1224 return true;
1225 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001226 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001227}
1228
Richard Henderson9531c072021-08-26 06:51:39 -07001229static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001230{
Richard Hendersonf2457572023-10-25 18:39:44 -07001231 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1232 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1233
1234 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001235 uint64_t al = arg_info(op->args[2])->val;
1236 uint64_t ah = arg_info(op->args[3])->val;
1237 uint64_t bl = arg_info(op->args[4])->val;
1238 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001239 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001240 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001241
Richard Henderson9531c072021-08-26 06:51:39 -07001242 if (ctx->type == TCG_TYPE_I32) {
1243 uint64_t a = deposit64(al, 32, 32, ah);
1244 uint64_t b = deposit64(bl, 32, 32, bh);
1245
1246 if (add) {
1247 a += b;
1248 } else {
1249 a -= b;
1250 }
1251
1252 al = sextract64(a, 0, 32);
1253 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001254 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001255 Int128 a = int128_make128(al, ah);
1256 Int128 b = int128_make128(bl, bh);
1257
1258 if (add) {
1259 a = int128_add(a, b);
1260 } else {
1261 a = int128_sub(a, b);
1262 }
1263
1264 al = int128_getlo(a);
1265 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001266 }
1267
1268 rl = op->args[0];
1269 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001270
1271 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Philippe Mathieu-Daudéd4478942022-12-18 22:18:31 +01001272 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001273
1274 tcg_opt_gen_movi(ctx, op, rl, al);
1275 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001276 return true;
1277 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001278
1279 /* Fold sub2 r,x,i to add2 r,x,-i */
1280 if (!add && b_const) {
1281 uint64_t bl = arg_info(op->args[4])->val;
1282 uint64_t bh = arg_info(op->args[5])->val;
1283
1284 /* Negate the two parts without assembling and disassembling. */
1285 bl = -bl;
1286 bh = ~bh + !bl;
1287
1288 op->opc = (ctx->type == TCG_TYPE_I32
1289 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1290 op->args[4] = arg_new_constant(ctx, bl);
1291 op->args[5] = arg_new_constant(ctx, bh);
1292 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001293 return finish_folding(ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001294}
1295
Richard Henderson9531c072021-08-26 06:51:39 -07001296static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001297{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001298 /* Note that the high and low parts may be independently swapped. */
1299 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1300 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1301
Richard Henderson9531c072021-08-26 06:51:39 -07001302 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001303}
1304
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001305static bool fold_and(OptContext *ctx, TCGOp *op)
1306{
Richard Henderson1ca73722024-12-08 18:47:15 -06001307 uint64_t z1, z2, z_mask, s_mask;
1308 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001309
Richard Henderson7a2f7082021-08-26 07:06:39 -07001310 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001311 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001312 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001313 fold_xx_to_x(ctx, op)) {
1314 return true;
1315 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001316
Richard Henderson1ca73722024-12-08 18:47:15 -06001317 t1 = arg_info(op->args[1]);
1318 t2 = arg_info(op->args[2]);
1319 z1 = t1->z_mask;
1320 z2 = t2->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001321
1322 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001323 * Known-zeros does not imply known-ones. Therefore unless
1324 * arg2 is constant, we can't infer affected bits from it.
1325 */
Richard Henderson1ca73722024-12-08 18:47:15 -06001326 if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001327 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001328 }
1329
Richard Henderson1ca73722024-12-08 18:47:15 -06001330 z_mask = z1 & z2;
1331
1332 /*
1333 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1334 * Bitwise operations preserve the relative quantity of the repetitions.
1335 */
1336 s_mask = t1->s_mask & t2->s_mask;
1337
1338 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001339}
1340
1341static bool fold_andc(OptContext *ctx, TCGOp *op)
1342{
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001343 uint64_t z_mask, s_mask;
1344 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001345
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001346 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001347 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001348 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001349 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001350 return true;
1351 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001352
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001353 t1 = arg_info(op->args[1]);
1354 t2 = arg_info(op->args[2]);
1355 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001356
1357 /*
1358 * Known-zeros does not imply known-ones. Therefore unless
1359 * arg2 is constant, we can't infer anything from it.
1360 */
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001361 if (ti_is_const(t2)) {
1362 uint64_t v2 = ti_const_val(t2);
1363 if (fold_affected_mask(ctx, op, z_mask & v2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001364 return true;
1365 }
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001366 z_mask &= ~v2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001367 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001368
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001369 s_mask = t1->s_mask & t2->s_mask;
1370 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001371}
1372
Richard Henderson079b0802021-08-24 09:30:59 -07001373static bool fold_brcond(OptContext *ctx, TCGOp *op)
1374{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001375 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001376 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001377 if (i == 0) {
1378 tcg_op_remove(ctx->tcg, op);
1379 return true;
1380 }
1381 if (i > 0) {
1382 op->opc = INDEX_op_br;
1383 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001384 finish_ebb(ctx);
1385 } else {
1386 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001387 }
Richard Henderson15268552024-12-08 07:45:11 -06001388 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001389}
1390
Richard Henderson764d2ab2021-08-24 09:22:11 -07001391static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1392{
Richard Henderson7e64b112023-10-24 16:53:56 -07001393 TCGCond cond;
1394 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001395 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001396
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001397 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001398 cond = op->args[4];
1399 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001400 if (i >= 0) {
1401 goto do_brcond_const;
1402 }
1403
1404 switch (cond) {
1405 case TCG_COND_LT:
1406 case TCG_COND_GE:
1407 /*
1408 * Simplify LT/GE comparisons vs zero to a single compare
1409 * vs the high word of the input.
1410 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001411 if (arg_is_const_val(op->args[2], 0) &&
1412 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001413 goto do_brcond_high;
1414 }
1415 break;
1416
1417 case TCG_COND_NE:
1418 inv = 1;
1419 QEMU_FALLTHROUGH;
1420 case TCG_COND_EQ:
1421 /*
1422 * Simplify EQ/NE comparisons where one of the pairs
1423 * can be simplified.
1424 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001425 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001426 op->args[2], cond);
1427 switch (i ^ inv) {
1428 case 0:
1429 goto do_brcond_const;
1430 case 1:
1431 goto do_brcond_high;
1432 }
1433
Richard Henderson67f84c92021-08-25 08:00:20 -07001434 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001435 op->args[3], cond);
1436 switch (i ^ inv) {
1437 case 0:
1438 goto do_brcond_const;
1439 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001440 goto do_brcond_low;
1441 }
1442 break;
1443
1444 case TCG_COND_TSTEQ:
1445 case TCG_COND_TSTNE:
1446 if (arg_is_const_val(op->args[2], 0)) {
1447 goto do_brcond_high;
1448 }
1449 if (arg_is_const_val(op->args[3], 0)) {
1450 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001451 }
1452 break;
1453
1454 default:
1455 break;
1456
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001457 do_brcond_low:
1458 op->opc = INDEX_op_brcond_i32;
1459 op->args[1] = op->args[2];
1460 op->args[2] = cond;
1461 op->args[3] = label;
1462 return fold_brcond(ctx, op);
1463
Richard Henderson764d2ab2021-08-24 09:22:11 -07001464 do_brcond_high:
1465 op->opc = INDEX_op_brcond_i32;
1466 op->args[0] = op->args[1];
1467 op->args[1] = op->args[3];
1468 op->args[2] = cond;
1469 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001470 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001471
1472 do_brcond_const:
1473 if (i == 0) {
1474 tcg_op_remove(ctx->tcg, op);
1475 return true;
1476 }
1477 op->opc = INDEX_op_br;
1478 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001479 finish_ebb(ctx);
1480 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001481 }
Richard Henderson15268552024-12-08 07:45:11 -06001482
1483 finish_bb(ctx);
1484 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001485}
1486
Richard Henderson09bacdc2021-08-24 11:58:12 -07001487static bool fold_bswap(OptContext *ctx, TCGOp *op)
1488{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001489 uint64_t z_mask, s_mask, sign;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001490 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001491
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001492 if (ti_is_const(t1)) {
1493 return tcg_opt_gen_movi(ctx, op, op->args[0],
1494 do_constant_folding(op->opc, ctx->type,
1495 ti_const_val(t1),
1496 op->args[2]));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001497 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001498
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001499 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001500 switch (op->opc) {
1501 case INDEX_op_bswap16_i32:
1502 case INDEX_op_bswap16_i64:
1503 z_mask = bswap16(z_mask);
1504 sign = INT16_MIN;
1505 break;
1506 case INDEX_op_bswap32_i32:
1507 case INDEX_op_bswap32_i64:
1508 z_mask = bswap32(z_mask);
1509 sign = INT32_MIN;
1510 break;
1511 case INDEX_op_bswap64_i64:
1512 z_mask = bswap64(z_mask);
1513 sign = INT64_MIN;
1514 break;
1515 default:
1516 g_assert_not_reached();
1517 }
1518
Richard Henderson75c3bf32024-12-19 10:50:40 -08001519 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001520 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1521 case TCG_BSWAP_OZ:
1522 break;
1523 case TCG_BSWAP_OS:
1524 /* If the sign bit may be 1, force all the bits above to 1. */
1525 if (z_mask & sign) {
1526 z_mask |= sign;
1527 }
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001528 /* The value and therefore s_mask is explicitly sign-extended. */
1529 s_mask = sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001530 break;
1531 default:
1532 /* The high bits are undefined: force all bits above the sign to 1. */
1533 z_mask |= sign << 1;
1534 break;
1535 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001536
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001537 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001538}
1539
Richard Henderson5cf32be2021-08-24 08:17:08 -07001540static bool fold_call(OptContext *ctx, TCGOp *op)
1541{
1542 TCGContext *s = ctx->tcg;
1543 int nb_oargs = TCGOP_CALLO(op);
1544 int nb_iargs = TCGOP_CALLI(op);
1545 int flags, i;
1546
1547 init_arguments(ctx, op, nb_oargs + nb_iargs);
1548 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1549
1550 /* If the function reads or writes globals, reset temp data. */
1551 flags = tcg_call_flags(op);
1552 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1553 int nb_globals = s->nb_globals;
1554
1555 for (i = 0; i < nb_globals; i++) {
1556 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001557 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001558 }
1559 }
1560 }
1561
Richard Hendersonab84dc32023-08-23 23:04:24 -07001562 /* If the function has side effects, reset mem data. */
1563 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1564 remove_mem_copy_all(ctx);
1565 }
1566
Richard Henderson5cf32be2021-08-24 08:17:08 -07001567 /* Reset temp data for outputs. */
1568 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001569 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001570 }
1571
1572 /* Stop optimizing MB across calls. */
1573 ctx->prev_mb = NULL;
1574 return true;
1575}
1576
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001577static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1578{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001579 uint64_t z_mask, s_mask;
1580 TempOptInfo *t1 = arg_info(op->args[1]);
1581 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001582
Richard Hendersonce1d6632024-12-08 19:47:51 -06001583 if (ti_is_const(t1)) {
1584 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001585
1586 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001587 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001588 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1589 }
1590 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1591 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001592
1593 switch (ctx->type) {
1594 case TCG_TYPE_I32:
1595 z_mask = 31;
1596 break;
1597 case TCG_TYPE_I64:
1598 z_mask = 63;
1599 break;
1600 default:
1601 g_assert_not_reached();
1602 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001603 s_mask = ~z_mask;
1604 z_mask |= t2->z_mask;
1605 s_mask &= t2->s_mask;
1606
1607 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001608}
1609
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001610static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1611{
Richard Henderson81be07f2024-12-08 19:49:17 -06001612 uint64_t z_mask;
1613
Richard Hendersonfae450b2021-08-25 22:42:19 -07001614 if (fold_const1(ctx, op)) {
1615 return true;
1616 }
1617
1618 switch (ctx->type) {
1619 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001620 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001621 break;
1622 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001623 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001624 break;
1625 default:
1626 g_assert_not_reached();
1627 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001628 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001629}
1630
Richard Henderson1b1907b2021-08-24 10:47:04 -07001631static bool fold_deposit(OptContext *ctx, TCGOp *op)
1632{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001633 TempOptInfo *t1 = arg_info(op->args[1]);
1634 TempOptInfo *t2 = arg_info(op->args[2]);
1635 int ofs = op->args[3];
1636 int len = op->args[4];
Richard Hendersonedb832c2024-12-19 17:56:05 -08001637 int width;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001638 TCGOpcode and_opc;
Richard Hendersonedb832c2024-12-19 17:56:05 -08001639 uint64_t z_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001640
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001641 if (ti_is_const(t1) && ti_is_const(t2)) {
1642 return tcg_opt_gen_movi(ctx, op, op->args[0],
1643 deposit64(ti_const_val(t1), ofs, len,
1644 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001645 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001646
Richard Henderson8f7a8402023-08-13 11:03:05 -07001647 switch (ctx->type) {
1648 case TCG_TYPE_I32:
1649 and_opc = INDEX_op_and_i32;
Richard Hendersonedb832c2024-12-19 17:56:05 -08001650 width = 32;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001651 break;
1652 case TCG_TYPE_I64:
1653 and_opc = INDEX_op_and_i64;
Richard Hendersonedb832c2024-12-19 17:56:05 -08001654 width = 64;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001655 break;
1656 default:
1657 g_assert_not_reached();
1658 }
1659
1660 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001661 if (ti_is_const_val(t1, 0) && ofs == 0) {
1662 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001663
1664 op->opc = and_opc;
1665 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001666 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001667 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001668 }
1669
1670 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001671 if (ti_is_const_val(t2, 0)) {
1672 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001673
1674 op->opc = and_opc;
Richard Henderson26aac972023-10-23 12:31:57 -07001675 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001676 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001677 }
1678
Richard Hendersonedb832c2024-12-19 17:56:05 -08001679 /* The s_mask from the top portion of the deposit is still valid. */
1680 if (ofs + len == width) {
1681 s_mask = t2->s_mask << ofs;
1682 } else {
1683 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1684 }
1685
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001686 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001687 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001688}
1689
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001690static bool fold_divide(OptContext *ctx, TCGOp *op)
1691{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001692 if (fold_const2(ctx, op) ||
1693 fold_xi_to_x(ctx, op, 1)) {
1694 return true;
1695 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001696 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001697}
1698
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001699static bool fold_dup(OptContext *ctx, TCGOp *op)
1700{
1701 if (arg_is_const(op->args[1])) {
1702 uint64_t t = arg_info(op->args[1])->val;
1703 t = dup_const(TCGOP_VECE(op), t);
1704 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1705 }
Richard Hendersone089d692024-12-08 20:00:51 -06001706 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001707}
1708
1709static bool fold_dup2(OptContext *ctx, TCGOp *op)
1710{
1711 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1712 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1713 arg_info(op->args[2])->val);
1714 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1715 }
1716
1717 if (args_are_copies(op->args[1], op->args[2])) {
1718 op->opc = INDEX_op_dup_vec;
1719 TCGOP_VECE(op) = MO_32;
1720 }
Richard Hendersone089d692024-12-08 20:00:51 -06001721 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001722}
1723
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001724static bool fold_eqv(OptContext *ctx, TCGOp *op)
1725{
Richard Hendersonef6be622024-12-08 20:03:15 -06001726 uint64_t s_mask;
1727
Richard Henderson7a2f7082021-08-26 07:06:39 -07001728 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001729 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001730 fold_xi_to_not(ctx, op, 0)) {
1731 return true;
1732 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001733
Richard Hendersonef6be622024-12-08 20:03:15 -06001734 s_mask = arg_info(op->args[1])->s_mask
1735 & arg_info(op->args[2])->s_mask;
1736 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001737}
1738
Richard Hendersonb6617c82021-08-24 10:44:53 -07001739static bool fold_extract(OptContext *ctx, TCGOp *op)
1740{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001741 uint64_t z_mask_old, z_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001742 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001743 int pos = op->args[2];
1744 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001745
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001746 if (ti_is_const(t1)) {
1747 return tcg_opt_gen_movi(ctx, op, op->args[0],
1748 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001749 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001750
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001751 z_mask_old = t1->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001752 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001753 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1754 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001755 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001756
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001757 return fold_masks_z(ctx, op, z_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001758}
1759
Richard Hendersondcd08992021-08-24 10:41:39 -07001760static bool fold_extract2(OptContext *ctx, TCGOp *op)
1761{
1762 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1763 uint64_t v1 = arg_info(op->args[1])->val;
1764 uint64_t v2 = arg_info(op->args[2])->val;
1765 int shr = op->args[3];
1766
1767 if (op->opc == INDEX_op_extract2_i64) {
1768 v1 >>= shr;
1769 v2 <<= 64 - shr;
1770 } else {
1771 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001772 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001773 }
1774 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1775 }
Richard Hendersonc9df99e2024-12-08 20:06:42 -06001776 return finish_folding(ctx, op);
Richard Hendersondcd08992021-08-24 10:41:39 -07001777}
1778
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001779static bool fold_exts(OptContext *ctx, TCGOp *op)
1780{
Richard Hendersona9621922024-12-08 20:08:46 -06001781 uint64_t s_mask_old, s_mask, z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001782 bool type_change = false;
Richard Hendersona9621922024-12-08 20:08:46 -06001783 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001784
1785 if (fold_const1(ctx, op)) {
1786 return true;
1787 }
1788
Richard Hendersona9621922024-12-08 20:08:46 -06001789 t1 = arg_info(op->args[1]);
1790 z_mask = t1->z_mask;
1791 s_mask = t1->s_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001792 s_mask_old = s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001793
1794 switch (op->opc) {
1795 CASE_OP_32_64(ext8s):
Richard Hendersona9621922024-12-08 20:08:46 -06001796 s_mask |= INT8_MIN;
1797 z_mask = (int8_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001798 break;
1799 CASE_OP_32_64(ext16s):
Richard Hendersona9621922024-12-08 20:08:46 -06001800 s_mask |= INT16_MIN;
1801 z_mask = (int16_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001802 break;
1803 case INDEX_op_ext_i32_i64:
1804 type_change = true;
1805 QEMU_FALLTHROUGH;
1806 case INDEX_op_ext32s_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06001807 s_mask |= INT32_MIN;
1808 z_mask = (int32_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001809 break;
1810 default:
1811 g_assert_not_reached();
1812 }
1813
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001814 if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001815 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001816 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001817
Richard Hendersona9621922024-12-08 20:08:46 -06001818 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001819}
1820
1821static bool fold_extu(OptContext *ctx, TCGOp *op)
1822{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001823 uint64_t z_mask_old, z_mask;
1824 bool type_change = false;
1825
1826 if (fold_const1(ctx, op)) {
1827 return true;
1828 }
1829
1830 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1831
1832 switch (op->opc) {
1833 CASE_OP_32_64(ext8u):
1834 z_mask = (uint8_t)z_mask;
1835 break;
1836 CASE_OP_32_64(ext16u):
1837 z_mask = (uint16_t)z_mask;
1838 break;
1839 case INDEX_op_extrl_i64_i32:
1840 case INDEX_op_extu_i32_i64:
1841 type_change = true;
1842 QEMU_FALLTHROUGH;
1843 case INDEX_op_ext32u_i64:
1844 z_mask = (uint32_t)z_mask;
1845 break;
1846 case INDEX_op_extrh_i64_i32:
1847 type_change = true;
1848 z_mask >>= 32;
1849 break;
1850 default:
1851 g_assert_not_reached();
1852 }
1853
Richard Henderson045ace32024-12-19 10:33:51 -08001854 if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1855 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001856 }
Richard Henderson08abe292024-12-08 20:11:44 -06001857
1858 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001859}
1860
Richard Henderson3eefdf22021-08-25 11:06:43 -07001861static bool fold_mb(OptContext *ctx, TCGOp *op)
1862{
1863 /* Eliminate duplicate and redundant fence instructions. */
1864 if (ctx->prev_mb) {
1865 /*
1866 * Merge two barriers of the same type into one,
1867 * or a weaker barrier into a stronger one,
1868 * or two weaker barriers into a stronger one.
1869 * mb X; mb Y => mb X|Y
1870 * mb; strl => mb; st
1871 * ldaq; mb => ld; mb
1872 * ldaq; strl => ld; mb; st
1873 * Other combinations are also merged into a strong
1874 * barrier. This is stricter than specified but for
1875 * the purposes of TCG is better than not optimizing.
1876 */
1877 ctx->prev_mb->args[0] |= op->args[0];
1878 tcg_op_remove(ctx->tcg, op);
1879 } else {
1880 ctx->prev_mb = op;
1881 }
1882 return true;
1883}
1884
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001885static bool fold_mov(OptContext *ctx, TCGOp *op)
1886{
1887 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1888}
1889
Richard Henderson0c310a32021-08-24 10:37:24 -07001890static bool fold_movcond(OptContext *ctx, TCGOp *op)
1891{
Richard Henderson32202782024-12-08 20:16:38 -06001892 uint64_t z_mask, s_mask;
1893 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001894 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001895
Richard Henderson141125e2024-09-06 21:00:10 -07001896 /* If true and false values are the same, eliminate the cmp. */
1897 if (args_are_copies(op->args[3], op->args[4])) {
1898 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1899 }
1900
Richard Henderson7a2f7082021-08-26 07:06:39 -07001901 /*
1902 * Canonicalize the "false" input reg to match the destination reg so
1903 * that the tcg backend can implement a "move if true" operation.
1904 */
1905 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001906 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001907 }
1908
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001909 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001910 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001911 if (i >= 0) {
1912 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1913 }
1914
Richard Henderson32202782024-12-08 20:16:38 -06001915 tt = arg_info(op->args[3]);
1916 ft = arg_info(op->args[4]);
1917 z_mask = tt->z_mask | ft->z_mask;
1918 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001919
Richard Henderson32202782024-12-08 20:16:38 -06001920 if (ti_is_const(tt) && ti_is_const(ft)) {
1921 uint64_t tv = ti_const_val(tt);
1922 uint64_t fv = ti_const_val(ft);
Richard Henderson36355022023-08-04 23:24:04 +00001923 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07001924 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07001925
Richard Henderson67f84c92021-08-25 08:00:20 -07001926 switch (ctx->type) {
1927 case TCG_TYPE_I32:
1928 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00001929 if (TCG_TARGET_HAS_negsetcond_i32) {
1930 negopc = INDEX_op_negsetcond_i32;
1931 }
1932 tv = (int32_t)tv;
1933 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07001934 break;
1935 case TCG_TYPE_I64:
1936 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00001937 if (TCG_TARGET_HAS_negsetcond_i64) {
1938 negopc = INDEX_op_negsetcond_i64;
1939 }
Richard Henderson67f84c92021-08-25 08:00:20 -07001940 break;
1941 default:
1942 g_assert_not_reached();
1943 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001944
1945 if (tv == 1 && fv == 0) {
1946 op->opc = opc;
1947 op->args[3] = cond;
1948 } else if (fv == 1 && tv == 0) {
1949 op->opc = opc;
1950 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00001951 } else if (negopc) {
1952 if (tv == -1 && fv == 0) {
1953 op->opc = negopc;
1954 op->args[3] = cond;
1955 } else if (fv == -1 && tv == 0) {
1956 op->opc = negopc;
1957 op->args[3] = tcg_invert_cond(cond);
1958 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001959 }
1960 }
Richard Henderson32202782024-12-08 20:16:38 -06001961
1962 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07001963}
1964
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001965static bool fold_mul(OptContext *ctx, TCGOp *op)
1966{
Richard Hendersone8679952021-08-25 13:19:52 -07001967 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07001968 fold_xi_to_i(ctx, op, 0) ||
1969 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001970 return true;
1971 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06001972 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001973}
1974
1975static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
1976{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001977 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001978 fold_xi_to_i(ctx, op, 0)) {
1979 return true;
1980 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06001981 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001982}
1983
Richard Henderson407112b2021-08-26 06:33:04 -07001984static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001985{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001986 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
1987
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001988 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07001989 uint64_t a = arg_info(op->args[2])->val;
1990 uint64_t b = arg_info(op->args[3])->val;
1991 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001992 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07001993 TCGOp *op2;
1994
1995 switch (op->opc) {
1996 case INDEX_op_mulu2_i32:
1997 l = (uint64_t)(uint32_t)a * (uint32_t)b;
1998 h = (int32_t)(l >> 32);
1999 l = (int32_t)l;
2000 break;
2001 case INDEX_op_muls2_i32:
2002 l = (int64_t)(int32_t)a * (int32_t)b;
2003 h = l >> 32;
2004 l = (int32_t)l;
2005 break;
2006 case INDEX_op_mulu2_i64:
2007 mulu64(&l, &h, a, b);
2008 break;
2009 case INDEX_op_muls2_i64:
2010 muls64(&l, &h, a, b);
2011 break;
2012 default:
2013 g_assert_not_reached();
2014 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002015
2016 rl = op->args[0];
2017 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002018
2019 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Philippe Mathieu-Daudéd4478942022-12-18 22:18:31 +01002020 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002021
2022 tcg_opt_gen_movi(ctx, op, rl, l);
2023 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002024 return true;
2025 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002026 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002027}
2028
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002029static bool fold_nand(OptContext *ctx, TCGOp *op)
2030{
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002031 uint64_t s_mask;
2032
Richard Henderson7a2f7082021-08-26 07:06:39 -07002033 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002034 fold_xi_to_not(ctx, op, -1)) {
2035 return true;
2036 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002037
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002038 s_mask = arg_info(op->args[1])->s_mask
2039 & arg_info(op->args[2])->s_mask;
2040 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002041}
2042
Richard Hendersone25fe882024-04-04 20:53:50 +00002043static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002044{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002045 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002046 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002047 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002048
Richard Hendersond151fd32024-12-08 20:23:11 -06002049 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002050}
2051
Richard Hendersone25fe882024-04-04 20:53:50 +00002052static bool fold_neg(OptContext *ctx, TCGOp *op)
2053{
2054 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2055}
2056
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002057static bool fold_nor(OptContext *ctx, TCGOp *op)
2058{
Richard Henderson2b7b6952024-12-08 20:25:21 -06002059 uint64_t s_mask;
2060
Richard Henderson7a2f7082021-08-26 07:06:39 -07002061 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002062 fold_xi_to_not(ctx, op, 0)) {
2063 return true;
2064 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002065
Richard Henderson2b7b6952024-12-08 20:25:21 -06002066 s_mask = arg_info(op->args[1])->s_mask
2067 & arg_info(op->args[2])->s_mask;
2068 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002069}
2070
2071static bool fold_not(OptContext *ctx, TCGOp *op)
2072{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002073 if (fold_const1(ctx, op)) {
2074 return true;
2075 }
Richard Henderson608e75f2024-12-08 20:27:02 -06002076 return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002077}
2078
2079static bool fold_or(OptContext *ctx, TCGOp *op)
2080{
Richard Henderson83b1ba32024-12-08 20:28:59 -06002081 uint64_t z_mask, s_mask;
2082 TempOptInfo *t1, *t2;
2083
Richard Henderson7a2f7082021-08-26 07:06:39 -07002084 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002085 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002086 fold_xx_to_x(ctx, op)) {
2087 return true;
2088 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002089
Richard Henderson83b1ba32024-12-08 20:28:59 -06002090 t1 = arg_info(op->args[1]);
2091 t2 = arg_info(op->args[2]);
2092 z_mask = t1->z_mask | t2->z_mask;
2093 s_mask = t1->s_mask & t2->s_mask;
2094 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002095}
2096
2097static bool fold_orc(OptContext *ctx, TCGOp *op)
2098{
Richard Henderson54e26b22024-12-08 20:30:20 -06002099 uint64_t s_mask;
2100
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002101 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002102 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002103 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002104 fold_ix_to_not(ctx, op, 0)) {
2105 return true;
2106 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002107
Richard Henderson54e26b22024-12-08 20:30:20 -06002108 s_mask = arg_info(op->args[1])->s_mask
2109 & arg_info(op->args[2])->s_mask;
2110 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002111}
2112
Richard Henderson6813be92024-12-08 20:33:30 -06002113static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002114{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002115 const TCGOpDef *def = &tcg_op_defs[op->opc];
2116 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2117 MemOp mop = get_memop(oi);
2118 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002119 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002120
Richard Henderson57fe5c62021-08-26 12:04:46 -07002121 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002122 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002123 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002124 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002125 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002126 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002127 }
2128
Richard Henderson3eefdf22021-08-25 11:06:43 -07002129 /* Opcodes that touch guest memory stop the mb optimization. */
2130 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002131
2132 return fold_masks_zs(ctx, op, z_mask, s_mask);
2133}
2134
2135static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2136{
2137 /* Opcodes that touch guest memory stop the mb optimization. */
2138 ctx->prev_mb = NULL;
2139 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002140}
2141
2142static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2143{
2144 /* Opcodes that touch guest memory stop the mb optimization. */
2145 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002146 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002147}
2148
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002149static bool fold_remainder(OptContext *ctx, TCGOp *op)
2150{
Richard Henderson267c17e2021-10-25 11:30:33 -07002151 if (fold_const2(ctx, op) ||
2152 fold_xx_to_i(ctx, op, 0)) {
2153 return true;
2154 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002155 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002156}
2157
Richard Henderson8d65cda2024-03-26 16:00:40 -10002158static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
2159{
2160 uint64_t a_zmask, b_val;
2161 TCGCond cond;
2162
2163 if (!arg_is_const(op->args[2])) {
2164 return false;
2165 }
2166
2167 a_zmask = arg_info(op->args[1])->z_mask;
2168 b_val = arg_info(op->args[2])->val;
2169 cond = op->args[3];
2170
2171 if (ctx->type == TCG_TYPE_I32) {
2172 a_zmask = (uint32_t)a_zmask;
2173 b_val = (uint32_t)b_val;
2174 }
2175
2176 /*
2177 * A with only low bits set vs B with high bits set means that A < B.
2178 */
2179 if (a_zmask < b_val) {
2180 bool inv = false;
2181
2182 switch (cond) {
2183 case TCG_COND_NE:
2184 case TCG_COND_LEU:
2185 case TCG_COND_LTU:
2186 inv = true;
2187 /* fall through */
2188 case TCG_COND_GTU:
2189 case TCG_COND_GEU:
2190 case TCG_COND_EQ:
2191 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2192 default:
2193 break;
2194 }
2195 }
2196
2197 /*
2198 * A with only lsb set is already boolean.
2199 */
2200 if (a_zmask <= 1) {
2201 bool convert = false;
2202 bool inv = false;
2203
2204 switch (cond) {
2205 case TCG_COND_EQ:
2206 inv = true;
2207 /* fall through */
2208 case TCG_COND_NE:
2209 convert = (b_val == 0);
2210 break;
2211 case TCG_COND_LTU:
2212 case TCG_COND_TSTEQ:
2213 inv = true;
2214 /* fall through */
2215 case TCG_COND_GEU:
2216 case TCG_COND_TSTNE:
2217 convert = (b_val == 1);
2218 break;
2219 default:
2220 break;
2221 }
2222 if (convert) {
2223 TCGOpcode add_opc, xor_opc, neg_opc;
2224
2225 if (!inv && !neg) {
2226 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2227 }
2228
2229 switch (ctx->type) {
2230 case TCG_TYPE_I32:
2231 add_opc = INDEX_op_add_i32;
2232 neg_opc = INDEX_op_neg_i32;
2233 xor_opc = INDEX_op_xor_i32;
2234 break;
2235 case TCG_TYPE_I64:
2236 add_opc = INDEX_op_add_i64;
2237 neg_opc = INDEX_op_neg_i64;
2238 xor_opc = INDEX_op_xor_i64;
2239 break;
2240 default:
2241 g_assert_not_reached();
2242 }
2243
2244 if (!inv) {
2245 op->opc = neg_opc;
2246 } else if (neg) {
2247 op->opc = add_opc;
2248 op->args[2] = arg_new_constant(ctx, -1);
2249 } else {
2250 op->opc = xor_opc;
2251 op->args[2] = arg_new_constant(ctx, 1);
2252 }
2253 return false;
2254 }
2255 }
2256
2257 return false;
2258}
2259
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002260static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2261{
Paolo Bonziniff202812024-02-28 12:06:41 +01002262 TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc;
2263 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002264 TCGCond cond = op->args[3];
2265 TCGArg ret, src1, src2;
2266 TCGOp *op2;
2267 uint64_t val;
2268 int sh;
2269 bool inv;
2270
2271 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2272 return;
2273 }
2274
2275 src2 = op->args[2];
2276 val = arg_info(src2)->val;
2277 if (!is_power_of_2(val)) {
2278 return;
2279 }
2280 sh = ctz64(val);
2281
2282 switch (ctx->type) {
2283 case TCG_TYPE_I32:
2284 and_opc = INDEX_op_and_i32;
2285 sub_opc = INDEX_op_sub_i32;
2286 xor_opc = INDEX_op_xor_i32;
2287 shr_opc = INDEX_op_shr_i32;
2288 neg_opc = INDEX_op_neg_i32;
2289 if (TCG_TARGET_extract_i32_valid(sh, 1)) {
2290 uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
2291 sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
2292 }
2293 break;
2294 case TCG_TYPE_I64:
2295 and_opc = INDEX_op_and_i64;
2296 sub_opc = INDEX_op_sub_i64;
2297 xor_opc = INDEX_op_xor_i64;
2298 shr_opc = INDEX_op_shr_i64;
2299 neg_opc = INDEX_op_neg_i64;
2300 if (TCG_TARGET_extract_i64_valid(sh, 1)) {
2301 uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
2302 sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
2303 }
2304 break;
2305 default:
2306 g_assert_not_reached();
2307 }
2308
2309 ret = op->args[0];
2310 src1 = op->args[1];
2311 inv = cond == TCG_COND_TSTEQ;
2312
2313 if (sh && sext_opc && neg && !inv) {
2314 op->opc = sext_opc;
2315 op->args[1] = src1;
2316 op->args[2] = sh;
2317 op->args[3] = 1;
2318 return;
2319 } else if (sh && uext_opc) {
2320 op->opc = uext_opc;
2321 op->args[1] = src1;
2322 op->args[2] = sh;
2323 op->args[3] = 1;
2324 } else {
2325 if (sh) {
2326 op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
2327 op2->args[0] = ret;
2328 op2->args[1] = src1;
2329 op2->args[2] = arg_new_constant(ctx, sh);
2330 src1 = ret;
2331 }
2332 op->opc = and_opc;
2333 op->args[1] = src1;
2334 op->args[2] = arg_new_constant(ctx, 1);
2335 }
2336
2337 if (neg && inv) {
2338 op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
2339 op2->args[0] = ret;
2340 op2->args[1] = ret;
2341 op2->args[2] = arg_new_constant(ctx, 1);
2342 } else if (inv) {
2343 op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
2344 op2->args[0] = ret;
2345 op2->args[1] = ret;
2346 op2->args[2] = arg_new_constant(ctx, 1);
2347 } else if (neg) {
2348 op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
2349 op2->args[0] = ret;
2350 op2->args[1] = ret;
2351 }
2352}
2353
Richard Hendersonc63ff552021-08-24 09:35:30 -07002354static bool fold_setcond(OptContext *ctx, TCGOp *op)
2355{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002356 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002357 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002358 if (i >= 0) {
2359 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2360 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002361
2362 if (fold_setcond_zmask(ctx, op, false)) {
2363 return true;
2364 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002365 fold_setcond_tst_pow2(ctx, op, false);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002366
2367 ctx->z_mask = 1;
Richard Hendersonc63ff552021-08-24 09:35:30 -07002368 return false;
2369}
2370
Richard Henderson36355022023-08-04 23:24:04 +00002371static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2372{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002373 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002374 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002375 if (i >= 0) {
2376 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2377 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002378
2379 if (fold_setcond_zmask(ctx, op, true)) {
2380 return true;
2381 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002382 fold_setcond_tst_pow2(ctx, op, true);
Richard Henderson36355022023-08-04 23:24:04 +00002383
2384 /* Value is {0,-1} so all bits are repetitions of the sign. */
2385 ctx->s_mask = -1;
2386 return false;
2387}
2388
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002389static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2390{
Richard Henderson7e64b112023-10-24 16:53:56 -07002391 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002392 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002393
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002394 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002395 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002396 if (i >= 0) {
2397 goto do_setcond_const;
2398 }
2399
2400 switch (cond) {
2401 case TCG_COND_LT:
2402 case TCG_COND_GE:
2403 /*
2404 * Simplify LT/GE comparisons vs zero to a single compare
2405 * vs the high word of the input.
2406 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002407 if (arg_is_const_val(op->args[3], 0) &&
2408 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002409 goto do_setcond_high;
2410 }
2411 break;
2412
2413 case TCG_COND_NE:
2414 inv = 1;
2415 QEMU_FALLTHROUGH;
2416 case TCG_COND_EQ:
2417 /*
2418 * Simplify EQ/NE comparisons where one of the pairs
2419 * can be simplified.
2420 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002421 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002422 op->args[3], cond);
2423 switch (i ^ inv) {
2424 case 0:
2425 goto do_setcond_const;
2426 case 1:
2427 goto do_setcond_high;
2428 }
2429
Richard Henderson67f84c92021-08-25 08:00:20 -07002430 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002431 op->args[4], cond);
2432 switch (i ^ inv) {
2433 case 0:
2434 goto do_setcond_const;
2435 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002436 goto do_setcond_low;
2437 }
2438 break;
2439
2440 case TCG_COND_TSTEQ:
2441 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002442 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002443 goto do_setcond_high;
2444 }
2445 if (arg_is_const_val(op->args[4], 0)) {
2446 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002447 }
2448 break;
2449
2450 default:
2451 break;
2452
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002453 do_setcond_low:
2454 op->args[2] = op->args[3];
2455 op->args[3] = cond;
2456 op->opc = INDEX_op_setcond_i32;
2457 return fold_setcond(ctx, op);
2458
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002459 do_setcond_high:
2460 op->args[1] = op->args[2];
2461 op->args[2] = op->args[4];
2462 op->args[3] = cond;
2463 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002464 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002465 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002466
2467 ctx->z_mask = 1;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002468 return false;
2469
2470 do_setcond_const:
2471 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2472}
2473
Richard Henderson1f106542024-09-06 12:22:41 -07002474static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
2475{
2476 /* Canonicalize the comparison to put immediate second. */
2477 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2478 op->args[3] = tcg_swap_cond(op->args[3]);
2479 }
2480 return false;
2481}
2482
2483static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
2484{
2485 /* If true and false values are the same, eliminate the cmp. */
2486 if (args_are_copies(op->args[3], op->args[4])) {
2487 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
2488 }
2489
2490 /* Canonicalize the comparison to put immediate second. */
2491 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2492 op->args[5] = tcg_swap_cond(op->args[5]);
2493 }
2494 /*
2495 * Canonicalize the "false" input reg to match the destination,
2496 * so that the tcg backend can implement "move if true".
2497 */
2498 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
2499 op->args[5] = tcg_invert_cond(op->args[5]);
2500 }
2501 return false;
2502}
2503
Richard Hendersonb6617c82021-08-24 10:44:53 -07002504static bool fold_sextract(OptContext *ctx, TCGOp *op)
2505{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002506 uint64_t z_mask, s_mask, s_mask_old;
2507 int pos = op->args[2];
2508 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002509
Richard Hendersonb6617c82021-08-24 10:44:53 -07002510 if (arg_is_const(op->args[1])) {
2511 uint64_t t;
2512
2513 t = arg_info(op->args[1])->val;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002514 t = sextract64(t, pos, len);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002515 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
2516 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002517
Richard Henderson57fe5c62021-08-26 12:04:46 -07002518 z_mask = arg_info(op->args[1])->z_mask;
2519 z_mask = sextract64(z_mask, pos, len);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002520 ctx->z_mask = z_mask;
2521
Richard Henderson57fe5c62021-08-26 12:04:46 -07002522 s_mask_old = arg_info(op->args[1])->s_mask;
2523 s_mask = sextract64(s_mask_old, pos, len);
2524 s_mask |= MAKE_64BIT_MASK(len, 64 - len);
2525 ctx->s_mask = s_mask;
2526
Richard Henderson6d70ddc2024-12-21 21:08:10 -08002527 if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002528 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002529 }
2530
Richard Hendersonfae450b2021-08-25 22:42:19 -07002531 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002532}
2533
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002534static bool fold_shift(OptContext *ctx, TCGOp *op)
2535{
Richard Henderson93a967f2021-08-26 13:24:59 -07002536 uint64_t s_mask, z_mask, sign;
2537
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002538 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002539 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002540 fold_xi_to_x(ctx, op, 0)) {
2541 return true;
2542 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002543
Richard Henderson93a967f2021-08-26 13:24:59 -07002544 s_mask = arg_info(op->args[1])->s_mask;
2545 z_mask = arg_info(op->args[1])->z_mask;
2546
Richard Hendersonfae450b2021-08-25 22:42:19 -07002547 if (arg_is_const(op->args[2])) {
Richard Henderson93a967f2021-08-26 13:24:59 -07002548 int sh = arg_info(op->args[2])->val;
2549
2550 ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
2551
2552 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002553
Richard Hendersonfae450b2021-08-25 22:42:19 -07002554 return fold_masks(ctx, op);
2555 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002556
2557 switch (op->opc) {
2558 CASE_OP_32_64(sar):
2559 /*
2560 * Arithmetic right shift will not reduce the number of
2561 * input sign repetitions.
2562 */
2563 ctx->s_mask = s_mask;
2564 break;
2565 CASE_OP_32_64(shr):
2566 /*
2567 * If the sign bit is known zero, then logical right shift
2568 * will not reduced the number of input sign repetitions.
2569 */
2570 sign = (s_mask & -s_mask) >> 1;
Richard Henderson2911e9b2024-03-26 11:21:38 -10002571 if (sign && !(z_mask & sign)) {
Richard Henderson93a967f2021-08-26 13:24:59 -07002572 ctx->s_mask = s_mask;
2573 }
2574 break;
2575 default:
2576 break;
2577 }
2578
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002579 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002580}
2581
Richard Henderson9caca882021-08-24 13:30:32 -07002582static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2583{
2584 TCGOpcode neg_op;
2585 bool have_neg;
2586
2587 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2588 return false;
2589 }
2590
2591 switch (ctx->type) {
2592 case TCG_TYPE_I32:
2593 neg_op = INDEX_op_neg_i32;
Richard Hendersonb701f192023-10-25 21:14:04 -07002594 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002595 break;
2596 case TCG_TYPE_I64:
2597 neg_op = INDEX_op_neg_i64;
Richard Hendersonb701f192023-10-25 21:14:04 -07002598 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002599 break;
2600 case TCG_TYPE_V64:
2601 case TCG_TYPE_V128:
2602 case TCG_TYPE_V256:
2603 neg_op = INDEX_op_neg_vec;
2604 have_neg = (TCG_TARGET_HAS_neg_vec &&
2605 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2606 break;
2607 default:
2608 g_assert_not_reached();
2609 }
2610 if (have_neg) {
2611 op->opc = neg_op;
2612 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002613 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002614 }
2615 return false;
2616}
2617
Richard Hendersonc578ff12021-12-16 06:07:25 -08002618/* We cannot as yet do_constant_folding with vectors. */
2619static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002620{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002621 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002622 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002623 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002624 return true;
2625 }
2626 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002627}
2628
Richard Hendersonc578ff12021-12-16 06:07:25 -08002629static bool fold_sub(OptContext *ctx, TCGOp *op)
2630{
Richard Henderson6334a962023-10-25 18:39:43 -07002631 if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
2632 return true;
2633 }
2634
2635 /* Fold sub r,x,i to add r,x,-i */
2636 if (arg_is_const(op->args[2])) {
2637 uint64_t val = arg_info(op->args[2])->val;
2638
2639 op->opc = (ctx->type == TCG_TYPE_I32
2640 ? INDEX_op_add_i32 : INDEX_op_add_i64);
2641 op->args[2] = arg_new_constant(ctx, -val);
2642 }
2643 return false;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002644}
2645
Richard Henderson9531c072021-08-26 06:51:39 -07002646static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002647{
Richard Henderson9531c072021-08-26 06:51:39 -07002648 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002649}
2650
Richard Hendersonfae450b2021-08-25 22:42:19 -07002651static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2652{
2653 /* We can't do any folding with a load, but we can record bits. */
2654 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002655 CASE_OP_32_64(ld8s):
2656 ctx->s_mask = MAKE_64BIT_MASK(8, 56);
2657 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002658 CASE_OP_32_64(ld8u):
2659 ctx->z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002660 break;
2661 CASE_OP_32_64(ld16s):
2662 ctx->s_mask = MAKE_64BIT_MASK(16, 48);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002663 break;
2664 CASE_OP_32_64(ld16u):
2665 ctx->z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002666 break;
2667 case INDEX_op_ld32s_i64:
2668 ctx->s_mask = MAKE_64BIT_MASK(32, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002669 break;
2670 case INDEX_op_ld32u_i64:
2671 ctx->z_mask = MAKE_64BIT_MASK(0, 32);
2672 break;
2673 default:
2674 g_assert_not_reached();
2675 }
2676 return false;
2677}
2678
Richard Hendersonab84dc32023-08-23 23:04:24 -07002679static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2680{
2681 TCGTemp *dst, *src;
2682 intptr_t ofs;
2683 TCGType type;
2684
2685 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2686 return false;
2687 }
2688
2689 type = ctx->type;
2690 ofs = op->args[2];
2691 dst = arg_temp(op->args[0]);
2692 src = find_mem_copy_for(ctx, type, ofs);
2693 if (src && src->base_type == type) {
2694 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2695 }
2696
2697 reset_ts(ctx, dst);
2698 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2699 return true;
2700}
2701
2702static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2703{
2704 intptr_t ofs = op->args[2];
2705 intptr_t lm1;
2706
2707 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2708 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002709 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002710 }
2711
2712 switch (op->opc) {
2713 CASE_OP_32_64(st8):
2714 lm1 = 0;
2715 break;
2716 CASE_OP_32_64(st16):
2717 lm1 = 1;
2718 break;
2719 case INDEX_op_st32_i64:
2720 case INDEX_op_st_i32:
2721 lm1 = 3;
2722 break;
2723 case INDEX_op_st_i64:
2724 lm1 = 7;
2725 break;
2726 case INDEX_op_st_vec:
2727 lm1 = tcg_type_size(ctx->type) - 1;
2728 break;
2729 default:
2730 g_assert_not_reached();
2731 }
2732 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002733 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002734}
2735
2736static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2737{
2738 TCGTemp *src;
2739 intptr_t ofs, last;
2740 TCGType type;
2741
2742 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06002743 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002744 }
2745
2746 src = arg_temp(op->args[0]);
2747 ofs = op->args[2];
2748 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002749
2750 /*
2751 * Eliminate duplicate stores of a constant.
2752 * This happens frequently when the target ISA zero-extends.
2753 */
2754 if (ts_is_const(src)) {
2755 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2756 if (src == prev) {
2757 tcg_op_remove(ctx->tcg, op);
2758 return true;
2759 }
2760 }
2761
Richard Hendersonab84dc32023-08-23 23:04:24 -07002762 last = ofs + tcg_type_size(type) - 1;
2763 remove_mem_copy_in(ctx, ofs, last);
2764 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002765 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002766}
2767
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002768static bool fold_xor(OptContext *ctx, TCGOp *op)
2769{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002770 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002771 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002772 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002773 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002774 return true;
2775 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002776
2777 ctx->z_mask = arg_info(op->args[1])->z_mask
2778 | arg_info(op->args[2])->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002779 ctx->s_mask = arg_info(op->args[1])->s_mask
2780 & arg_info(op->args[2])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002781 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002782}
2783
Richard Hendersone58b9772024-09-06 22:30:01 -07002784static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
2785{
2786 /* If true and false values are the same, eliminate the cmp. */
2787 if (args_are_copies(op->args[2], op->args[3])) {
2788 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
2789 }
2790
2791 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
2792 uint64_t tv = arg_info(op->args[2])->val;
2793 uint64_t fv = arg_info(op->args[3])->val;
2794
2795 if (tv == -1 && fv == 0) {
2796 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2797 }
2798 if (tv == 0 && fv == -1) {
2799 if (TCG_TARGET_HAS_not_vec) {
2800 op->opc = INDEX_op_not_vec;
2801 return fold_not(ctx, op);
2802 } else {
2803 op->opc = INDEX_op_xor_vec;
2804 op->args[2] = arg_new_constant(ctx, -1);
2805 return fold_xor(ctx, op);
2806 }
2807 }
2808 }
2809 if (arg_is_const(op->args[2])) {
2810 uint64_t tv = arg_info(op->args[2])->val;
2811 if (tv == -1) {
2812 op->opc = INDEX_op_or_vec;
2813 op->args[2] = op->args[3];
2814 return fold_or(ctx, op);
2815 }
2816 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
2817 op->opc = INDEX_op_andc_vec;
2818 op->args[2] = op->args[1];
2819 op->args[1] = op->args[3];
2820 return fold_andc(ctx, op);
2821 }
2822 }
2823 if (arg_is_const(op->args[3])) {
2824 uint64_t fv = arg_info(op->args[3])->val;
2825 if (fv == 0) {
2826 op->opc = INDEX_op_and_vec;
2827 return fold_and(ctx, op);
2828 }
2829 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
2830 op->opc = INDEX_op_orc_vec;
2831 op->args[2] = op->args[1];
2832 op->args[1] = op->args[3];
2833 return fold_orc(ctx, op);
2834 }
2835 }
2836 return false;
2837}
2838
Kirill Batuzov22613af2011-07-07 16:37:13 +04002839/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002840void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002841{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002842 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002843 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002844 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002845
Richard Hendersonab84dc32023-08-23 23:04:24 -07002846 QSIMPLEQ_INIT(&ctx.mem_free);
2847
Kirill Batuzov22613af2011-07-07 16:37:13 +04002848 /* Array VALS has an element for each temp.
2849 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002850 If this temp is a copy of other ones then the other copies are
2851 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002852
2853 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002854 for (i = 0; i < nb_temps; ++i) {
2855 s->temps[i].state_ptr = NULL;
2856 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002857
Richard Henderson15fa08f2017-11-02 15:19:14 +01002858 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002859 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002860 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002861 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002862
Richard Henderson5cf32be2021-08-24 08:17:08 -07002863 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002864 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002865 fold_call(&ctx, op);
2866 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002867 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002868
2869 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002870 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2871 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002872
Richard Henderson67f84c92021-08-25 08:00:20 -07002873 /* Pre-compute the type of the operation. */
2874 if (def->flags & TCG_OPF_VECTOR) {
2875 ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
2876 } else if (def->flags & TCG_OPF_64BIT) {
2877 ctx.type = TCG_TYPE_I64;
2878 } else {
2879 ctx.type = TCG_TYPE_I32;
2880 }
2881
Richard Henderson57fe5c62021-08-26 12:04:46 -07002882 /* Assume all bits affected, no bits known zero, no sign reps. */
Richard Hendersonfae450b2021-08-25 22:42:19 -07002883 ctx.z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002884 ctx.s_mask = 0;
Paolo Bonzini633f6502013-01-11 15:42:53 -08002885
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002886 /*
2887 * Process each opcode.
2888 * Sorted alphabetically by opcode as much as possible.
2889 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002890 switch (opc) {
Richard Hendersonc578ff12021-12-16 06:07:25 -08002891 CASE_OP_32_64(add):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002892 done = fold_add(&ctx, op);
2893 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002894 case INDEX_op_add_vec:
2895 done = fold_add_vec(&ctx, op);
2896 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002897 CASE_OP_32_64(add2):
2898 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002899 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002900 CASE_OP_32_64_VEC(and):
2901 done = fold_and(&ctx, op);
2902 break;
2903 CASE_OP_32_64_VEC(andc):
2904 done = fold_andc(&ctx, op);
2905 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002906 CASE_OP_32_64(brcond):
2907 done = fold_brcond(&ctx, op);
2908 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002909 case INDEX_op_brcond2_i32:
2910 done = fold_brcond2(&ctx, op);
2911 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002912 CASE_OP_32_64(bswap16):
2913 CASE_OP_32_64(bswap32):
2914 case INDEX_op_bswap64_i64:
2915 done = fold_bswap(&ctx, op);
2916 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002917 CASE_OP_32_64(clz):
2918 CASE_OP_32_64(ctz):
2919 done = fold_count_zeros(&ctx, op);
2920 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002921 CASE_OP_32_64(ctpop):
2922 done = fold_ctpop(&ctx, op);
2923 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002924 CASE_OP_32_64(deposit):
2925 done = fold_deposit(&ctx, op);
2926 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002927 CASE_OP_32_64(div):
2928 CASE_OP_32_64(divu):
2929 done = fold_divide(&ctx, op);
2930 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002931 case INDEX_op_dup_vec:
2932 done = fold_dup(&ctx, op);
2933 break;
2934 case INDEX_op_dup2_vec:
2935 done = fold_dup2(&ctx, op);
2936 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002937 CASE_OP_32_64_VEC(eqv):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002938 done = fold_eqv(&ctx, op);
2939 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002940 CASE_OP_32_64(extract):
2941 done = fold_extract(&ctx, op);
2942 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002943 CASE_OP_32_64(extract2):
2944 done = fold_extract2(&ctx, op);
2945 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002946 CASE_OP_32_64(ext8s):
2947 CASE_OP_32_64(ext16s):
2948 case INDEX_op_ext32s_i64:
2949 case INDEX_op_ext_i32_i64:
2950 done = fold_exts(&ctx, op);
2951 break;
2952 CASE_OP_32_64(ext8u):
2953 CASE_OP_32_64(ext16u):
2954 case INDEX_op_ext32u_i64:
2955 case INDEX_op_extu_i32_i64:
2956 case INDEX_op_extrl_i64_i32:
2957 case INDEX_op_extrh_i64_i32:
2958 done = fold_extu(&ctx, op);
2959 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002960 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002961 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002962 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002963 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002964 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002965 case INDEX_op_ld32u_i64:
2966 done = fold_tcg_ld(&ctx, op);
2967 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002968 case INDEX_op_ld_i32:
2969 case INDEX_op_ld_i64:
2970 case INDEX_op_ld_vec:
2971 done = fold_tcg_ld_memcopy(&ctx, op);
2972 break;
2973 CASE_OP_32_64(st8):
2974 CASE_OP_32_64(st16):
2975 case INDEX_op_st32_i64:
2976 done = fold_tcg_st(&ctx, op);
2977 break;
2978 case INDEX_op_st_i32:
2979 case INDEX_op_st_i64:
2980 case INDEX_op_st_vec:
2981 done = fold_tcg_st_memcopy(&ctx, op);
2982 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002983 case INDEX_op_mb:
2984 done = fold_mb(&ctx, op);
2985 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002986 CASE_OP_32_64_VEC(mov):
2987 done = fold_mov(&ctx, op);
2988 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002989 CASE_OP_32_64(movcond):
2990 done = fold_movcond(&ctx, op);
2991 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002992 CASE_OP_32_64(mul):
2993 done = fold_mul(&ctx, op);
2994 break;
2995 CASE_OP_32_64(mulsh):
2996 CASE_OP_32_64(muluh):
2997 done = fold_mul_highpart(&ctx, op);
2998 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002999 CASE_OP_32_64(muls2):
3000 CASE_OP_32_64(mulu2):
3001 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07003002 break;
Richard Hendersoned523472021-12-16 11:17:46 -08003003 CASE_OP_32_64_VEC(nand):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003004 done = fold_nand(&ctx, op);
3005 break;
3006 CASE_OP_32_64(neg):
3007 done = fold_neg(&ctx, op);
3008 break;
Richard Hendersoned523472021-12-16 11:17:46 -08003009 CASE_OP_32_64_VEC(nor):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003010 done = fold_nor(&ctx, op);
3011 break;
3012 CASE_OP_32_64_VEC(not):
3013 done = fold_not(&ctx, op);
3014 break;
3015 CASE_OP_32_64_VEC(or):
3016 done = fold_or(&ctx, op);
3017 break;
3018 CASE_OP_32_64_VEC(orc):
3019 done = fold_orc(&ctx, op);
3020 break;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07003021 case INDEX_op_qemu_ld_a32_i32:
3022 case INDEX_op_qemu_ld_a64_i32:
Richard Henderson6813be92024-12-08 20:33:30 -06003023 done = fold_qemu_ld_1reg(&ctx, op);
3024 break;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07003025 case INDEX_op_qemu_ld_a32_i64:
3026 case INDEX_op_qemu_ld_a64_i64:
Richard Henderson6813be92024-12-08 20:33:30 -06003027 if (TCG_TARGET_REG_BITS == 64) {
3028 done = fold_qemu_ld_1reg(&ctx, op);
3029 break;
3030 }
3031 QEMU_FALLTHROUGH;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07003032 case INDEX_op_qemu_ld_a32_i128:
3033 case INDEX_op_qemu_ld_a64_i128:
Richard Henderson6813be92024-12-08 20:33:30 -06003034 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07003035 break;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07003036 case INDEX_op_qemu_st8_a32_i32:
3037 case INDEX_op_qemu_st8_a64_i32:
3038 case INDEX_op_qemu_st_a32_i32:
3039 case INDEX_op_qemu_st_a64_i32:
3040 case INDEX_op_qemu_st_a32_i64:
3041 case INDEX_op_qemu_st_a64_i64:
3042 case INDEX_op_qemu_st_a32_i128:
3043 case INDEX_op_qemu_st_a64_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07003044 done = fold_qemu_st(&ctx, op);
3045 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003046 CASE_OP_32_64(rem):
3047 CASE_OP_32_64(remu):
3048 done = fold_remainder(&ctx, op);
3049 break;
3050 CASE_OP_32_64(rotl):
3051 CASE_OP_32_64(rotr):
3052 CASE_OP_32_64(sar):
3053 CASE_OP_32_64(shl):
3054 CASE_OP_32_64(shr):
3055 done = fold_shift(&ctx, op);
3056 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07003057 CASE_OP_32_64(setcond):
3058 done = fold_setcond(&ctx, op);
3059 break;
Richard Henderson36355022023-08-04 23:24:04 +00003060 CASE_OP_32_64(negsetcond):
3061 done = fold_negsetcond(&ctx, op);
3062 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003063 case INDEX_op_setcond2_i32:
3064 done = fold_setcond2(&ctx, op);
3065 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003066 case INDEX_op_cmp_vec:
3067 done = fold_cmp_vec(&ctx, op);
3068 break;
3069 case INDEX_op_cmpsel_vec:
3070 done = fold_cmpsel_vec(&ctx, op);
3071 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003072 case INDEX_op_bitsel_vec:
3073 done = fold_bitsel_vec(&ctx, op);
3074 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003075 CASE_OP_32_64(sextract):
3076 done = fold_sextract(&ctx, op);
3077 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003078 CASE_OP_32_64(sub):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003079 done = fold_sub(&ctx, op);
3080 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003081 case INDEX_op_sub_vec:
3082 done = fold_sub_vec(&ctx, op);
3083 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003084 CASE_OP_32_64(sub2):
3085 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003086 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003087 CASE_OP_32_64_VEC(xor):
3088 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003089 break;
Richard Henderson15268552024-12-08 07:45:11 -06003090 case INDEX_op_set_label:
3091 case INDEX_op_br:
3092 case INDEX_op_exit_tb:
3093 case INDEX_op_goto_tb:
3094 case INDEX_op_goto_ptr:
3095 finish_ebb(&ctx);
3096 done = true;
3097 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003098 default:
3099 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003100 }
3101
Richard Henderson404a1482021-08-24 11:08:21 -07003102 if (!done) {
3103 finish_folding(&ctx, op);
3104 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003105 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003106}