blob: fbc0dc558804c89755d8e53c25ae3d43a0401d55 [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040031
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032#define CASE_OP_32_64(x) \
33 glue(glue(case INDEX_op_, x), _i32): \
34 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040035
Richard Henderson170ba882017-11-22 09:07:11 +010036#define CASE_OP_32_64_VEC(x) \
37 glue(glue(case INDEX_op_, x), _i32): \
38 glue(glue(case INDEX_op_, x), _i64): \
39 glue(glue(case INDEX_op_, x), _vec)
40
Richard Hendersonab84dc32023-08-23 23:04:24 -070041typedef struct MemCopyInfo {
42 IntervalTreeNode itree;
43 QSIMPLEQ_ENTRY (MemCopyInfo) next;
44 TCGTemp *ts;
45 TCGType type;
46} MemCopyInfo;
47
Richard Henderson6fcb98e2020-03-30 17:44:30 -070048typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020049 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070050 TCGTemp *prev_copy;
51 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070052 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070053 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070054 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080055 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070056} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040057
Richard Henderson3b3f8472021-08-23 22:06:31 -070058typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070059 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070060 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070061 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070062
Richard Hendersonab84dc32023-08-23 23:04:24 -070063 IntervalTreeRoot mem_copy;
64 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
65
Richard Henderson137f1f42021-08-24 08:49:25 -070066 /* In flight values from optimization. */
Richard Hendersonfae450b2021-08-25 22:42:19 -070067 uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080068 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson67f84c92021-08-25 08:00:20 -070069 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070070} OptContext;
71
Richard Henderson6fcb98e2020-03-30 17:44:30 -070072static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020073{
Richard Henderson63490392017-06-20 13:43:15 -070074 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020075}
76
Richard Henderson6fcb98e2020-03-30 17:44:30 -070077static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020078{
Richard Henderson63490392017-06-20 13:43:15 -070079 return ts_info(arg_temp(arg));
80}
81
82static inline bool ts_is_const(TCGTemp *ts)
83{
84 return ts_info(ts)->is_const;
85}
86
Richard Henderson27cdb852023-10-23 11:38:00 -070087static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
88{
89 TempOptInfo *ti = ts_info(ts);
90 return ti->is_const && ti->val == val;
91}
92
Richard Henderson63490392017-06-20 13:43:15 -070093static inline bool arg_is_const(TCGArg arg)
94{
95 return ts_is_const(arg_temp(arg));
96}
97
Richard Henderson27cdb852023-10-23 11:38:00 -070098static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
99{
100 return ts_is_const_val(arg_temp(arg), val);
101}
102
Richard Henderson63490392017-06-20 13:43:15 -0700103static inline bool ts_is_copy(TCGTemp *ts)
104{
105 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200106}
107
Richard Henderson9f75e522023-11-02 13:37:46 -0700108static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
109{
110 return a->kind < b->kind ? b : a;
111}
112
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200113/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700114static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200115{
Richard Henderson63490392017-06-20 13:43:15 -0700116 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700117 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700118
Richard Henderson3b3f8472021-08-23 22:06:31 -0700119 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700120 return;
121 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700122 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700123
124 ti = ts->state_ptr;
125 if (ti == NULL) {
126 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700127 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700128 }
129
130 ti->next_copy = ts;
131 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700132 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700133 if (ts->kind == TEMP_CONST) {
134 ti->is_const = true;
135 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700136 ti->z_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800137 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700138 } else {
139 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700140 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700141 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200142 }
143}
144
Richard Hendersonab84dc32023-08-23 23:04:24 -0700145static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
146{
147 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
148 return r ? container_of(r, MemCopyInfo, itree) : NULL;
149}
150
151static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
152{
153 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
154 return r ? container_of(r, MemCopyInfo, itree) : NULL;
155}
156
157static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
158{
159 TCGTemp *ts = mc->ts;
160 TempOptInfo *ti = ts_info(ts);
161
162 interval_tree_remove(&mc->itree, &ctx->mem_copy);
163 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
164 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
165}
166
167static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
168{
169 while (true) {
170 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
171 if (!mc) {
172 break;
173 }
174 remove_mem_copy(ctx, mc);
175 }
176}
177
178static void remove_mem_copy_all(OptContext *ctx)
179{
180 remove_mem_copy_in(ctx, 0, -1);
181 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
182}
183
Richard Henderson9f75e522023-11-02 13:37:46 -0700184static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200185{
Richard Henderson9f75e522023-11-02 13:37:46 -0700186 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200187
Richard Henderson4c868ce2020-04-23 09:02:23 -0700188 /* If this is already readonly, we can't do better. */
189 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700190 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200191 }
192
Richard Henderson9f75e522023-11-02 13:37:46 -0700193 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700194 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700195 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200196 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700197 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198}
199
Richard Hendersonab84dc32023-08-23 23:04:24 -0700200static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
201{
202 TempOptInfo *si = ts_info(src_ts);
203 TempOptInfo *di = ts_info(dst_ts);
204 MemCopyInfo *mc;
205
206 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
207 tcg_debug_assert(mc->ts == src_ts);
208 mc->ts = dst_ts;
209 }
210 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
211}
212
213/* Reset TEMP's state, possibly removing the temp for the list of copies. */
214static void reset_ts(OptContext *ctx, TCGTemp *ts)
215{
216 TempOptInfo *ti = ts_info(ts);
217 TCGTemp *pts = ti->prev_copy;
218 TCGTemp *nts = ti->next_copy;
219 TempOptInfo *pi = ts_info(pts);
220 TempOptInfo *ni = ts_info(nts);
221
222 ni->prev_copy = ti->prev_copy;
223 pi->next_copy = ti->next_copy;
224 ti->next_copy = ts;
225 ti->prev_copy = ts;
226 ti->is_const = false;
227 ti->z_mask = -1;
228 ti->s_mask = 0;
229
230 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
231 if (ts == nts) {
232 /* Last temp copy being removed, the mem copies die. */
233 MemCopyInfo *mc;
234 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
235 interval_tree_remove(&mc->itree, &ctx->mem_copy);
236 }
237 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
238 } else {
239 move_mem_copies(find_better_copy(nts), ts);
240 }
241 }
242}
243
244static void reset_temp(OptContext *ctx, TCGArg arg)
245{
246 reset_ts(ctx, arg_temp(arg));
247}
248
249static void record_mem_copy(OptContext *ctx, TCGType type,
250 TCGTemp *ts, intptr_t start, intptr_t last)
251{
252 MemCopyInfo *mc;
253 TempOptInfo *ti;
254
255 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
256 if (mc) {
257 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
258 } else {
259 mc = tcg_malloc(sizeof(*mc));
260 }
261
262 memset(mc, 0, sizeof(*mc));
263 mc->itree.start = start;
264 mc->itree.last = last;
265 mc->type = type;
266 interval_tree_insert(&mc->itree, &ctx->mem_copy);
267
268 ts = find_better_copy(ts);
269 ti = ts_info(ts);
270 mc->ts = ts;
271 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
272}
273
Richard Henderson63490392017-06-20 13:43:15 -0700274static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200275{
Richard Henderson63490392017-06-20 13:43:15 -0700276 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200277
Richard Henderson63490392017-06-20 13:43:15 -0700278 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200279 return true;
280 }
281
Richard Henderson63490392017-06-20 13:43:15 -0700282 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200283 return false;
284 }
285
Richard Henderson63490392017-06-20 13:43:15 -0700286 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
287 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288 return true;
289 }
290 }
291
292 return false;
293}
294
Richard Henderson63490392017-06-20 13:43:15 -0700295static bool args_are_copies(TCGArg arg1, TCGArg arg2)
296{
297 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
298}
299
Richard Hendersonab84dc32023-08-23 23:04:24 -0700300static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
301{
302 MemCopyInfo *mc;
303
304 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
305 if (mc->itree.start == s && mc->type == type) {
306 return find_better_copy(mc->ts);
307 }
308 }
309 return NULL;
310}
311
Richard Henderson26aac972023-10-23 12:31:57 -0700312static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
313{
314 TCGType type = ctx->type;
315 TCGTemp *ts;
316
317 if (type == TCG_TYPE_I32) {
318 val = (int32_t)val;
319 }
320
321 ts = tcg_constant_internal(type, val);
322 init_ts_info(ctx, ts);
323
324 return temp_arg(ts);
325}
326
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100327static TCGArg arg_new_temp(OptContext *ctx)
328{
329 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
330 init_ts_info(ctx, ts);
331 return temp_arg(ts);
332}
333
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700334static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400335{
Richard Henderson63490392017-06-20 13:43:15 -0700336 TCGTemp *dst_ts = arg_temp(dst);
337 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700338 TempOptInfo *di;
339 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700340 TCGOpcode new_op;
341
342 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700343 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700344 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200345 }
346
Richard Henderson986cac12023-01-09 13:59:35 -0800347 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700348 di = ts_info(dst_ts);
349 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700350
351 switch (ctx->type) {
352 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100353 new_op = INDEX_op_mov_i32;
Richard Henderson67f84c92021-08-25 08:00:20 -0700354 break;
355 case TCG_TYPE_I64:
356 new_op = INDEX_op_mov_i64;
357 break;
358 case TCG_TYPE_V64:
359 case TCG_TYPE_V128:
360 case TCG_TYPE_V256:
361 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
362 new_op = INDEX_op_mov_vec;
363 break;
364 default:
365 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100366 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700367 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700368 op->args[0] = dst;
369 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700370
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700371 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700372 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700373
Richard Henderson63490392017-06-20 13:43:15 -0700374 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700375 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700376
377 di->next_copy = si->next_copy;
378 di->prev_copy = src_ts;
379 ni->prev_copy = dst_ts;
380 si->next_copy = dst_ts;
381 di->is_const = si->is_const;
382 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700383
384 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
385 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
386 move_mem_copies(dst_ts, src_ts);
387 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800388 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700389 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400390}
391
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700392static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700393 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700394{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700395 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700396 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700397}
398
Richard Henderson54795542020-09-06 16:21:32 -0700399static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400400{
Richard Henderson03271522013-08-14 14:35:56 -0700401 uint64_t l64, h64;
402
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400403 switch (op) {
404 CASE_OP_32_64(add):
405 return x + y;
406
407 CASE_OP_32_64(sub):
408 return x - y;
409
410 CASE_OP_32_64(mul):
411 return x * y;
412
Richard Hendersonc578ff12021-12-16 06:07:25 -0800413 CASE_OP_32_64_VEC(and):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400414 return x & y;
415
Richard Hendersonc578ff12021-12-16 06:07:25 -0800416 CASE_OP_32_64_VEC(or):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400417 return x | y;
418
Richard Hendersonc578ff12021-12-16 06:07:25 -0800419 CASE_OP_32_64_VEC(xor):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400420 return x ^ y;
421
Kirill Batuzov55c09752011-07-07 16:37:16 +0400422 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700423 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400424
Kirill Batuzov55c09752011-07-07 16:37:16 +0400425 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700426 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400427
428 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700429 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400430
Kirill Batuzov55c09752011-07-07 16:37:16 +0400431 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700432 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400433
434 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700435 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400436
Kirill Batuzov55c09752011-07-07 16:37:16 +0400437 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700438 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400439
440 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700441 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400442
Kirill Batuzov55c09752011-07-07 16:37:16 +0400443 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700444 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400445
446 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700447 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400448
Kirill Batuzov55c09752011-07-07 16:37:16 +0400449 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700450 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400451
Richard Hendersonc578ff12021-12-16 06:07:25 -0800452 CASE_OP_32_64_VEC(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400453 return ~x;
454
Richard Hendersoncb25c802011-08-17 14:11:47 -0700455 CASE_OP_32_64(neg):
456 return -x;
457
Richard Hendersonc578ff12021-12-16 06:07:25 -0800458 CASE_OP_32_64_VEC(andc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700459 return x & ~y;
460
Richard Hendersonc578ff12021-12-16 06:07:25 -0800461 CASE_OP_32_64_VEC(orc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700462 return x | ~y;
463
Richard Hendersoned523472021-12-16 11:17:46 -0800464 CASE_OP_32_64_VEC(eqv):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700465 return ~(x ^ y);
466
Richard Hendersoned523472021-12-16 11:17:46 -0800467 CASE_OP_32_64_VEC(nand):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700468 return ~(x & y);
469
Richard Hendersoned523472021-12-16 11:17:46 -0800470 CASE_OP_32_64_VEC(nor):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700471 return ~(x | y);
472
Richard Henderson0e28d002016-11-16 09:23:28 +0100473 case INDEX_op_clz_i32:
474 return (uint32_t)x ? clz32(x) : y;
475
476 case INDEX_op_clz_i64:
477 return x ? clz64(x) : y;
478
479 case INDEX_op_ctz_i32:
480 return (uint32_t)x ? ctz32(x) : y;
481
482 case INDEX_op_ctz_i64:
483 return x ? ctz64(x) : y;
484
Richard Hendersona768e4e2016-11-21 11:13:39 +0100485 case INDEX_op_ctpop_i32:
486 return ctpop32(x);
487
488 case INDEX_op_ctpop_i64:
489 return ctpop64(x);
490
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700491 CASE_OP_32_64(ext8s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400492 return (int8_t)x;
493
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700494 CASE_OP_32_64(ext16s):
Kirill Batuzova640f032011-07-07 16:37:17 +0400495 return (int16_t)x;
496
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700497 CASE_OP_32_64(ext8u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400498 return (uint8_t)x;
499
Richard Henderson25c4d9c2011-08-17 14:11:46 -0700500 CASE_OP_32_64(ext16u):
Kirill Batuzova640f032011-07-07 16:37:17 +0400501 return (uint16_t)x;
502
Richard Henderson64985942018-11-20 08:53:34 +0100503 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700504 x = bswap16(x);
505 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100506
507 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700508 x = bswap32(x);
509 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100510
511 case INDEX_op_bswap64_i64:
512 return bswap64(x);
513
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200514 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400515 case INDEX_op_ext32s_i64:
516 return (int32_t)x;
517
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200518 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700519 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400520 case INDEX_op_ext32u_i64:
521 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400522
Richard Henderson609ad702015-07-24 07:16:00 -0700523 case INDEX_op_extrh_i64_i32:
524 return (uint64_t)x >> 32;
525
Richard Henderson03271522013-08-14 14:35:56 -0700526 case INDEX_op_muluh_i32:
527 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
528 case INDEX_op_mulsh_i32:
529 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
530
531 case INDEX_op_muluh_i64:
532 mulu64(&l64, &h64, x, y);
533 return h64;
534 case INDEX_op_mulsh_i64:
535 muls64(&l64, &h64, x, y);
536 return h64;
537
Richard Henderson01547f72013-08-14 15:22:46 -0700538 case INDEX_op_div_i32:
539 /* Avoid crashing on divide by zero, otherwise undefined. */
540 return (int32_t)x / ((int32_t)y ? : 1);
541 case INDEX_op_divu_i32:
542 return (uint32_t)x / ((uint32_t)y ? : 1);
543 case INDEX_op_div_i64:
544 return (int64_t)x / ((int64_t)y ? : 1);
545 case INDEX_op_divu_i64:
546 return (uint64_t)x / ((uint64_t)y ? : 1);
547
548 case INDEX_op_rem_i32:
549 return (int32_t)x % ((int32_t)y ? : 1);
550 case INDEX_op_remu_i32:
551 return (uint32_t)x % ((uint32_t)y ? : 1);
552 case INDEX_op_rem_i64:
553 return (int64_t)x % ((int64_t)y ? : 1);
554 case INDEX_op_remu_i64:
555 return (uint64_t)x % ((uint64_t)y ? : 1);
556
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400557 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700558 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400559 }
560}
561
Richard Henderson67f84c92021-08-25 08:00:20 -0700562static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
563 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400564{
Richard Henderson54795542020-09-06 16:21:32 -0700565 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700566 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200567 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400568 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400569 return res;
570}
571
Richard Henderson9519da72012-10-02 11:32:26 -0700572static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
573{
574 switch (c) {
575 case TCG_COND_EQ:
576 return x == y;
577 case TCG_COND_NE:
578 return x != y;
579 case TCG_COND_LT:
580 return (int32_t)x < (int32_t)y;
581 case TCG_COND_GE:
582 return (int32_t)x >= (int32_t)y;
583 case TCG_COND_LE:
584 return (int32_t)x <= (int32_t)y;
585 case TCG_COND_GT:
586 return (int32_t)x > (int32_t)y;
587 case TCG_COND_LTU:
588 return x < y;
589 case TCG_COND_GEU:
590 return x >= y;
591 case TCG_COND_LEU:
592 return x <= y;
593 case TCG_COND_GTU:
594 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700595 case TCG_COND_TSTEQ:
596 return (x & y) == 0;
597 case TCG_COND_TSTNE:
598 return (x & y) != 0;
599 case TCG_COND_ALWAYS:
600 case TCG_COND_NEVER:
601 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700602 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700603 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700604}
605
606static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
607{
608 switch (c) {
609 case TCG_COND_EQ:
610 return x == y;
611 case TCG_COND_NE:
612 return x != y;
613 case TCG_COND_LT:
614 return (int64_t)x < (int64_t)y;
615 case TCG_COND_GE:
616 return (int64_t)x >= (int64_t)y;
617 case TCG_COND_LE:
618 return (int64_t)x <= (int64_t)y;
619 case TCG_COND_GT:
620 return (int64_t)x > (int64_t)y;
621 case TCG_COND_LTU:
622 return x < y;
623 case TCG_COND_GEU:
624 return x >= y;
625 case TCG_COND_LEU:
626 return x <= y;
627 case TCG_COND_GTU:
628 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700629 case TCG_COND_TSTEQ:
630 return (x & y) == 0;
631 case TCG_COND_TSTNE:
632 return (x & y) != 0;
633 case TCG_COND_ALWAYS:
634 case TCG_COND_NEVER:
635 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700636 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700637 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700638}
639
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700640static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700641{
642 switch (c) {
643 case TCG_COND_GT:
644 case TCG_COND_LTU:
645 case TCG_COND_LT:
646 case TCG_COND_GTU:
647 case TCG_COND_NE:
648 return 0;
649 case TCG_COND_GE:
650 case TCG_COND_GEU:
651 case TCG_COND_LE:
652 case TCG_COND_LEU:
653 case TCG_COND_EQ:
654 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700655 case TCG_COND_TSTEQ:
656 case TCG_COND_TSTNE:
657 return -1;
658 case TCG_COND_ALWAYS:
659 case TCG_COND_NEVER:
660 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700661 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700662 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700663}
664
Richard Henderson8d57bf12021-08-24 08:34:27 -0700665/*
666 * Return -1 if the condition can't be simplified,
667 * and the result of the condition (0 or 1) if it can.
668 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700669static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700670 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200671{
Richard Henderson63490392017-06-20 13:43:15 -0700672 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000673 uint64_t xv = arg_info(x)->val;
674 uint64_t yv = arg_info(y)->val;
675
Richard Henderson67f84c92021-08-25 08:00:20 -0700676 switch (type) {
677 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100678 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700679 case TCG_TYPE_I64:
680 return do_constant_folding_cond_64(xv, yv, c);
681 default:
682 /* Only scalar comparisons are optimizable */
683 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200684 }
Richard Henderson63490392017-06-20 13:43:15 -0700685 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700686 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700687 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200688 switch (c) {
689 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700690 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200691 return 0;
692 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700693 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200694 return 1;
695 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700696 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200697 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200698 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700699 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200700}
701
Richard Henderson7a2f7082021-08-26 07:06:39 -0700702/**
703 * swap_commutative:
704 * @dest: TCGArg of the destination argument, or NO_DEST.
705 * @p1: first paired argument
706 * @p2: second paired argument
707 *
708 * If *@p1 is a constant and *@p2 is not, swap.
709 * If *@p2 matches @dest, swap.
710 * Return true if a swap was performed.
711 */
712
713#define NO_DEST temp_arg(NULL)
714
Richard Henderson24c9ae42012-10-02 11:32:21 -0700715static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
716{
717 TCGArg a1 = *p1, a2 = *p2;
718 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700719 sum += arg_is_const(a1);
720 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700721
722 /* Prefer the constant in second argument, and then the form
723 op a, a, b, which is better handled on non-RISC hosts. */
724 if (sum > 0 || (sum == 0 && dest == a2)) {
725 *p1 = a2;
726 *p2 = a1;
727 return true;
728 }
729 return false;
730}
731
Richard Henderson0bfcb862012-10-02 11:32:23 -0700732static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
733{
734 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700735 sum += arg_is_const(p1[0]);
736 sum += arg_is_const(p1[1]);
737 sum -= arg_is_const(p2[0]);
738 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700739 if (sum > 0) {
740 TCGArg t;
741 t = p1[0], p1[0] = p2[0], p2[0] = t;
742 t = p1[1], p1[1] = p2[1], p2[1] = t;
743 return true;
744 }
745 return false;
746}
747
Richard Henderson7e64b112023-10-24 16:53:56 -0700748/*
749 * Return -1 if the condition can't be simplified,
750 * and the result of the condition (0 or 1) if it can.
751 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100752static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700753 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
754{
755 TCGCond cond;
756 bool swap;
757 int r;
758
759 swap = swap_commutative(dest, p1, p2);
760 cond = *pcond;
761 if (swap) {
762 *pcond = cond = tcg_swap_cond(cond);
763 }
764
765 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700766 if (r >= 0) {
767 return r;
768 }
769 if (!is_tst_cond(cond)) {
770 return -1;
771 }
772
773 /*
774 * TSTNE x,x -> NE x,0
775 * TSTNE x,-1 -> NE x,0
776 */
777 if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) {
778 *p2 = arg_new_constant(ctx, 0);
779 *pcond = tcg_tst_eqne_cond(cond);
780 return -1;
781 }
782
783 /* TSTNE x,sign -> LT x,0 */
784 if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32
785 ? INT32_MIN : INT64_MIN))) {
786 *p2 = arg_new_constant(ctx, 0);
787 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100788 return -1;
789 }
790
791 /* Expand to AND with a temporary if no backend support. */
792 if (!TCG_TARGET_HAS_tst) {
793 TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32
794 ? INDEX_op_and_i32 : INDEX_op_and_i64);
795 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3);
796 TCGArg tmp = arg_new_temp(ctx);
797
798 op2->args[0] = tmp;
799 op2->args[1] = *p1;
800 op2->args[2] = *p2;
801
802 *p1 = tmp;
803 *p2 = arg_new_constant(ctx, 0);
804 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700805 }
806 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700807}
808
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100809static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700810{
811 TCGArg al, ah, bl, bh;
812 TCGCond c;
813 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700814 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700815
816 swap = swap_commutative2(args, args + 2);
817 c = args[4];
818 if (swap) {
819 args[4] = c = tcg_swap_cond(c);
820 }
821
822 al = args[0];
823 ah = args[1];
824 bl = args[2];
825 bh = args[3];
826
827 if (arg_is_const(bl) && arg_is_const(bh)) {
828 tcg_target_ulong blv = arg_info(bl)->val;
829 tcg_target_ulong bhv = arg_info(bh)->val;
830 uint64_t b = deposit64(blv, 32, 32, bhv);
831
832 if (arg_is_const(al) && arg_is_const(ah)) {
833 tcg_target_ulong alv = arg_info(al)->val;
834 tcg_target_ulong ahv = arg_info(ah)->val;
835 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700836
837 r = do_constant_folding_cond_64(a, b, c);
838 if (r >= 0) {
839 return r;
840 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700841 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700842
Richard Henderson7e64b112023-10-24 16:53:56 -0700843 if (b == 0) {
844 switch (c) {
845 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700846 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700847 return 0;
848 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700849 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700850 return 1;
851 default:
852 break;
853 }
854 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700855
856 /* TSTNE x,-1 -> NE x,0 */
857 if (b == -1 && is_tst_cond(c)) {
858 args[3] = args[2] = arg_new_constant(ctx, 0);
859 args[4] = tcg_tst_eqne_cond(c);
860 return -1;
861 }
862
863 /* TSTNE x,sign -> LT x,0 */
864 if (b == INT64_MIN && is_tst_cond(c)) {
865 /* bl must be 0, so copy that to bh */
866 args[3] = bl;
867 args[4] = tcg_tst_ltge_cond(c);
868 return -1;
869 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700870 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700871
Richard Henderson7e64b112023-10-24 16:53:56 -0700872 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700873 r = do_constant_folding_cond_eq(c);
874 if (r >= 0) {
875 return r;
876 }
877
878 /* TSTNE x,x -> NE x,0 */
879 if (is_tst_cond(c)) {
880 args[3] = args[2] = arg_new_constant(ctx, 0);
881 args[4] = tcg_tst_eqne_cond(c);
882 return -1;
883 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700884 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100885
886 /* Expand to AND with a temporary if no backend support. */
887 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
888 TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
889 TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3);
890 TCGArg t1 = arg_new_temp(ctx);
891 TCGArg t2 = arg_new_temp(ctx);
892
893 op1->args[0] = t1;
894 op1->args[1] = al;
895 op1->args[2] = bl;
896 op2->args[0] = t2;
897 op2->args[1] = ah;
898 op2->args[2] = bh;
899
900 args[0] = t1;
901 args[1] = t2;
902 args[3] = args[2] = arg_new_constant(ctx, 0);
903 args[4] = tcg_tst_eqne_cond(c);
904 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700905 return -1;
906}
907
Richard Hendersone2577ea2021-08-24 08:00:48 -0700908static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
909{
910 for (int i = 0; i < nb_args; i++) {
911 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000912 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700913 }
914}
915
Richard Henderson8774dde2021-08-24 08:04:47 -0700916static void copy_propagate(OptContext *ctx, TCGOp *op,
917 int nb_oargs, int nb_iargs)
918{
Richard Henderson8774dde2021-08-24 08:04:47 -0700919 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
920 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000921 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700922 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700923 }
924 }
925}
926
Richard Henderson15268552024-12-08 07:45:11 -0600927static void finish_bb(OptContext *ctx)
928{
929 /* We only optimize memory barriers across basic blocks. */
930 ctx->prev_mb = NULL;
931}
932
933static void finish_ebb(OptContext *ctx)
934{
935 finish_bb(ctx);
936 /* We only optimize across extended basic blocks. */
937 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
938 remove_mem_copy_all(ctx);
939}
940
Richard Henderson137f1f42021-08-24 08:49:25 -0700941static void finish_folding(OptContext *ctx, TCGOp *op)
942{
943 const TCGOpDef *def = &tcg_op_defs[op->opc];
944 int i, nb_oargs;
945
Richard Henderson137f1f42021-08-24 08:49:25 -0700946 nb_oargs = def->nb_oargs;
947 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700948 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800949 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700950 /*
Richard Henderson57fe5c62021-08-26 12:04:46 -0700951 * Save the corresponding known-zero/sign bits mask for the
Richard Henderson137f1f42021-08-24 08:49:25 -0700952 * first output argument (only one supported so far).
953 */
954 if (i == 0) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700955 ts_info(ts)->z_mask = ctx->z_mask;
Richard Henderson137f1f42021-08-24 08:49:25 -0700956 }
957 }
958}
959
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700960/*
961 * The fold_* functions return true when processing is complete,
962 * usually by folding the operation to a constant or to a copy,
963 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
964 * like collect information about the value produced, for use in
965 * optimizing a subsequent operation.
966 *
967 * These first fold_* functions are all helpers, used by other
968 * folders for more specific operations.
969 */
970
971static bool fold_const1(OptContext *ctx, TCGOp *op)
972{
973 if (arg_is_const(op->args[1])) {
974 uint64_t t;
975
976 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700977 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700978 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
979 }
980 return false;
981}
982
983static bool fold_const2(OptContext *ctx, TCGOp *op)
984{
985 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
986 uint64_t t1 = arg_info(op->args[1])->val;
987 uint64_t t2 = arg_info(op->args[2])->val;
988
Richard Henderson67f84c92021-08-25 08:00:20 -0700989 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700990 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
991 }
992 return false;
993}
994
Richard Hendersonc578ff12021-12-16 06:07:25 -0800995static bool fold_commutative(OptContext *ctx, TCGOp *op)
996{
997 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
998 return false;
999}
1000
Richard Henderson7a2f7082021-08-26 07:06:39 -07001001static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1002{
1003 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1004 return fold_const2(ctx, op);
1005}
1006
Richard Hendersond582b142024-12-19 10:43:26 -08001007/*
1008 * Record "zero" and "sign" masks for the single output of @op.
1009 * See TempOptInfo definition of z_mask and s_mask.
1010 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001011 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001012 */
1013static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001014 uint64_t z_mask, int64_t s_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001015{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001016 const TCGOpDef *def = &tcg_op_defs[op->opc];
1017 TCGTemp *ts;
1018 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001019 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001020
1021 /* Only single-output opcodes are supported here. */
1022 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001023
1024 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001025 * 32-bit ops generate 32-bit results, which for the purpose of
1026 * simplifying tcg are sign-extended. Certainly that's how we
1027 * represent our constants elsewhere. Note that the bits will
1028 * be reset properly for a 64-bit value when encountering the
1029 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001030 */
1031 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001032 z_mask = (int32_t)z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001033 s_mask |= INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001034 }
1035
1036 if (z_mask == 0) {
1037 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1038 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001039
1040 ts = arg_temp(op->args[0]);
1041 reset_ts(ctx, ts);
1042
1043 ti = ts_info(ts);
1044 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001045
1046 /* Canonicalize s_mask and incorporate data from z_mask. */
1047 rep = clz64(~s_mask);
1048 rep = MAX(rep, clz64(z_mask));
1049 rep = MAX(rep - 1, 0);
1050 ti->s_mask = INT64_MIN >> rep;
1051
Richard Henderson56e06ec2024-12-08 18:26:48 -06001052 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001053}
1054
Richard Hendersond582b142024-12-19 10:43:26 -08001055static bool fold_masks(OptContext *ctx, TCGOp *op)
1056{
1057 return fold_masks_zs(ctx, op, ctx->z_mask, ctx->s_mask);
1058}
1059
Richard Henderson045ace32024-12-19 10:33:51 -08001060/*
1061 * An "affected" mask bit is 0 if and only if the result is identical
1062 * to the first input. Thus if the entire mask is 0, the operation
1063 * is equivalent to a copy.
1064 */
1065static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1066{
1067 if (ctx->type == TCG_TYPE_I32) {
1068 a_mask = (uint32_t)a_mask;
1069 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001070 if (a_mask == 0) {
1071 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1072 }
1073 return false;
1074}
1075
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001076/*
1077 * Convert @op to NOT, if NOT is supported by the host.
1078 * Return true f the conversion is successful, which will still
1079 * indicate that the processing is complete.
1080 */
1081static bool fold_not(OptContext *ctx, TCGOp *op);
1082static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1083{
1084 TCGOpcode not_op;
1085 bool have_not;
1086
1087 switch (ctx->type) {
1088 case TCG_TYPE_I32:
1089 not_op = INDEX_op_not_i32;
1090 have_not = TCG_TARGET_HAS_not_i32;
1091 break;
1092 case TCG_TYPE_I64:
1093 not_op = INDEX_op_not_i64;
1094 have_not = TCG_TARGET_HAS_not_i64;
1095 break;
1096 case TCG_TYPE_V64:
1097 case TCG_TYPE_V128:
1098 case TCG_TYPE_V256:
1099 not_op = INDEX_op_not_vec;
1100 have_not = TCG_TARGET_HAS_not_vec;
1101 break;
1102 default:
1103 g_assert_not_reached();
1104 }
1105 if (have_not) {
1106 op->opc = not_op;
1107 op->args[1] = op->args[idx];
1108 return fold_not(ctx, op);
1109 }
1110 return false;
1111}
1112
Richard Hendersonda48e272021-08-25 20:42:04 -07001113/* If the binary operation has first argument @i, fold to @i. */
1114static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1115{
Richard Henderson27cdb852023-10-23 11:38:00 -07001116 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001117 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1118 }
1119 return false;
1120}
1121
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001122/* If the binary operation has first argument @i, fold to NOT. */
1123static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1124{
Richard Henderson27cdb852023-10-23 11:38:00 -07001125 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001126 return fold_to_not(ctx, op, 2);
1127 }
1128 return false;
1129}
1130
Richard Hendersone8679952021-08-25 13:19:52 -07001131/* If the binary operation has second argument @i, fold to @i. */
1132static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1133{
Richard Henderson27cdb852023-10-23 11:38:00 -07001134 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001135 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1136 }
1137 return false;
1138}
1139
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001140/* If the binary operation has second argument @i, fold to identity. */
1141static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1142{
Richard Henderson27cdb852023-10-23 11:38:00 -07001143 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001144 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1145 }
1146 return false;
1147}
1148
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001149/* If the binary operation has second argument @i, fold to NOT. */
1150static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1151{
Richard Henderson27cdb852023-10-23 11:38:00 -07001152 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001153 return fold_to_not(ctx, op, 1);
1154 }
1155 return false;
1156}
1157
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001158/* If the binary operation has both arguments equal, fold to @i. */
1159static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1160{
1161 if (args_are_copies(op->args[1], op->args[2])) {
1162 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1163 }
1164 return false;
1165}
1166
Richard Hendersonca7bb042021-08-25 13:14:21 -07001167/* If the binary operation has both arguments equal, fold to identity. */
1168static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1169{
1170 if (args_are_copies(op->args[1], op->args[2])) {
1171 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1172 }
1173 return false;
1174}
1175
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001176/*
1177 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001178 *
1179 * The ordering of the transformations should be:
1180 * 1) those that produce a constant
1181 * 2) those that produce a copy
1182 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001183 */
1184
1185static bool fold_add(OptContext *ctx, TCGOp *op)
1186{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001187 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001188 fold_xi_to_x(ctx, op, 0)) {
1189 return true;
1190 }
1191 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001192}
1193
Richard Hendersonc578ff12021-12-16 06:07:25 -08001194/* We cannot as yet do_constant_folding with vectors. */
1195static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1196{
1197 if (fold_commutative(ctx, op) ||
1198 fold_xi_to_x(ctx, op, 0)) {
1199 return true;
1200 }
1201 return false;
1202}
1203
Richard Henderson9531c072021-08-26 06:51:39 -07001204static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001205{
Richard Hendersonf2457572023-10-25 18:39:44 -07001206 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1207 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1208
1209 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001210 uint64_t al = arg_info(op->args[2])->val;
1211 uint64_t ah = arg_info(op->args[3])->val;
1212 uint64_t bl = arg_info(op->args[4])->val;
1213 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001214 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001215 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001216
Richard Henderson9531c072021-08-26 06:51:39 -07001217 if (ctx->type == TCG_TYPE_I32) {
1218 uint64_t a = deposit64(al, 32, 32, ah);
1219 uint64_t b = deposit64(bl, 32, 32, bh);
1220
1221 if (add) {
1222 a += b;
1223 } else {
1224 a -= b;
1225 }
1226
1227 al = sextract64(a, 0, 32);
1228 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001229 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001230 Int128 a = int128_make128(al, ah);
1231 Int128 b = int128_make128(bl, bh);
1232
1233 if (add) {
1234 a = int128_add(a, b);
1235 } else {
1236 a = int128_sub(a, b);
1237 }
1238
1239 al = int128_getlo(a);
1240 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001241 }
1242
1243 rl = op->args[0];
1244 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001245
1246 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Philippe Mathieu-Daudéd4478942022-12-18 22:18:31 +01001247 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001248
1249 tcg_opt_gen_movi(ctx, op, rl, al);
1250 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001251 return true;
1252 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001253
1254 /* Fold sub2 r,x,i to add2 r,x,-i */
1255 if (!add && b_const) {
1256 uint64_t bl = arg_info(op->args[4])->val;
1257 uint64_t bh = arg_info(op->args[5])->val;
1258
1259 /* Negate the two parts without assembling and disassembling. */
1260 bl = -bl;
1261 bh = ~bh + !bl;
1262
1263 op->opc = (ctx->type == TCG_TYPE_I32
1264 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1265 op->args[4] = arg_new_constant(ctx, bl);
1266 op->args[5] = arg_new_constant(ctx, bh);
1267 }
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001268 return false;
1269}
1270
Richard Henderson9531c072021-08-26 06:51:39 -07001271static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001272{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001273 /* Note that the high and low parts may be independently swapped. */
1274 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1275 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1276
Richard Henderson9531c072021-08-26 06:51:39 -07001277 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001278}
1279
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001280static bool fold_and(OptContext *ctx, TCGOp *op)
1281{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001282 uint64_t z1, z2;
1283
Richard Henderson7a2f7082021-08-26 07:06:39 -07001284 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001285 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001286 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001287 fold_xx_to_x(ctx, op)) {
1288 return true;
1289 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001290
1291 z1 = arg_info(op->args[1])->z_mask;
1292 z2 = arg_info(op->args[2])->z_mask;
1293 ctx->z_mask = z1 & z2;
1294
1295 /*
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001296 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1297 * Bitwise operations preserve the relative quantity of the repetitions.
1298 */
1299 ctx->s_mask = arg_info(op->args[1])->s_mask
1300 & arg_info(op->args[2])->s_mask;
1301
1302 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001303 * Known-zeros does not imply known-ones. Therefore unless
1304 * arg2 is constant, we can't infer affected bits from it.
1305 */
Richard Henderson045ace32024-12-19 10:33:51 -08001306 if (arg_is_const(op->args[2]) &&
1307 fold_affected_mask(ctx, op, z1 & ~z2)) {
1308 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001309 }
1310
1311 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001312}
1313
1314static bool fold_andc(OptContext *ctx, TCGOp *op)
1315{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001316 uint64_t z1;
1317
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001318 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001319 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001320 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001321 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001322 return true;
1323 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001324
1325 z1 = arg_info(op->args[1])->z_mask;
1326
1327 /*
1328 * Known-zeros does not imply known-ones. Therefore unless
1329 * arg2 is constant, we can't infer anything from it.
1330 */
1331 if (arg_is_const(op->args[2])) {
1332 uint64_t z2 = ~arg_info(op->args[2])->z_mask;
Richard Henderson045ace32024-12-19 10:33:51 -08001333 if (fold_affected_mask(ctx, op, z1 & ~z2)) {
1334 return true;
1335 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001336 z1 &= z2;
1337 }
1338 ctx->z_mask = z1;
1339
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001340 ctx->s_mask = arg_info(op->args[1])->s_mask
1341 & arg_info(op->args[2])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001342 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001343}
1344
Richard Henderson079b0802021-08-24 09:30:59 -07001345static bool fold_brcond(OptContext *ctx, TCGOp *op)
1346{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001347 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001348 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001349 if (i == 0) {
1350 tcg_op_remove(ctx->tcg, op);
1351 return true;
1352 }
1353 if (i > 0) {
1354 op->opc = INDEX_op_br;
1355 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001356 finish_ebb(ctx);
1357 } else {
1358 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001359 }
Richard Henderson15268552024-12-08 07:45:11 -06001360 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001361}
1362
Richard Henderson764d2ab2021-08-24 09:22:11 -07001363static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1364{
Richard Henderson7e64b112023-10-24 16:53:56 -07001365 TCGCond cond;
1366 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001367 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001368
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001369 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001370 cond = op->args[4];
1371 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001372 if (i >= 0) {
1373 goto do_brcond_const;
1374 }
1375
1376 switch (cond) {
1377 case TCG_COND_LT:
1378 case TCG_COND_GE:
1379 /*
1380 * Simplify LT/GE comparisons vs zero to a single compare
1381 * vs the high word of the input.
1382 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001383 if (arg_is_const_val(op->args[2], 0) &&
1384 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001385 goto do_brcond_high;
1386 }
1387 break;
1388
1389 case TCG_COND_NE:
1390 inv = 1;
1391 QEMU_FALLTHROUGH;
1392 case TCG_COND_EQ:
1393 /*
1394 * Simplify EQ/NE comparisons where one of the pairs
1395 * can be simplified.
1396 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001397 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001398 op->args[2], cond);
1399 switch (i ^ inv) {
1400 case 0:
1401 goto do_brcond_const;
1402 case 1:
1403 goto do_brcond_high;
1404 }
1405
Richard Henderson67f84c92021-08-25 08:00:20 -07001406 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001407 op->args[3], cond);
1408 switch (i ^ inv) {
1409 case 0:
1410 goto do_brcond_const;
1411 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001412 goto do_brcond_low;
1413 }
1414 break;
1415
1416 case TCG_COND_TSTEQ:
1417 case TCG_COND_TSTNE:
1418 if (arg_is_const_val(op->args[2], 0)) {
1419 goto do_brcond_high;
1420 }
1421 if (arg_is_const_val(op->args[3], 0)) {
1422 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001423 }
1424 break;
1425
1426 default:
1427 break;
1428
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001429 do_brcond_low:
1430 op->opc = INDEX_op_brcond_i32;
1431 op->args[1] = op->args[2];
1432 op->args[2] = cond;
1433 op->args[3] = label;
1434 return fold_brcond(ctx, op);
1435
Richard Henderson764d2ab2021-08-24 09:22:11 -07001436 do_brcond_high:
1437 op->opc = INDEX_op_brcond_i32;
1438 op->args[0] = op->args[1];
1439 op->args[1] = op->args[3];
1440 op->args[2] = cond;
1441 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001442 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001443
1444 do_brcond_const:
1445 if (i == 0) {
1446 tcg_op_remove(ctx->tcg, op);
1447 return true;
1448 }
1449 op->opc = INDEX_op_br;
1450 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001451 finish_ebb(ctx);
1452 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001453 }
Richard Henderson15268552024-12-08 07:45:11 -06001454
1455 finish_bb(ctx);
1456 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001457}
1458
Richard Henderson09bacdc2021-08-24 11:58:12 -07001459static bool fold_bswap(OptContext *ctx, TCGOp *op)
1460{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001461 uint64_t z_mask, s_mask, sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001462
Richard Henderson09bacdc2021-08-24 11:58:12 -07001463 if (arg_is_const(op->args[1])) {
1464 uint64_t t = arg_info(op->args[1])->val;
1465
Richard Henderson67f84c92021-08-25 08:00:20 -07001466 t = do_constant_folding(op->opc, ctx->type, t, op->args[2]);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001467 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1468 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001469
1470 z_mask = arg_info(op->args[1])->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001471
Richard Hendersonfae450b2021-08-25 22:42:19 -07001472 switch (op->opc) {
1473 case INDEX_op_bswap16_i32:
1474 case INDEX_op_bswap16_i64:
1475 z_mask = bswap16(z_mask);
1476 sign = INT16_MIN;
1477 break;
1478 case INDEX_op_bswap32_i32:
1479 case INDEX_op_bswap32_i64:
1480 z_mask = bswap32(z_mask);
1481 sign = INT32_MIN;
1482 break;
1483 case INDEX_op_bswap64_i64:
1484 z_mask = bswap64(z_mask);
1485 sign = INT64_MIN;
1486 break;
1487 default:
1488 g_assert_not_reached();
1489 }
1490
Richard Henderson75c3bf32024-12-19 10:50:40 -08001491 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001492 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1493 case TCG_BSWAP_OZ:
1494 break;
1495 case TCG_BSWAP_OS:
1496 /* If the sign bit may be 1, force all the bits above to 1. */
1497 if (z_mask & sign) {
1498 z_mask |= sign;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001499 s_mask = sign << 1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001500 }
1501 break;
1502 default:
1503 /* The high bits are undefined: force all bits above the sign to 1. */
1504 z_mask |= sign << 1;
1505 break;
1506 }
1507 ctx->z_mask = z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001508 ctx->s_mask = s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001509
1510 return fold_masks(ctx, op);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001511}
1512
Richard Henderson5cf32be2021-08-24 08:17:08 -07001513static bool fold_call(OptContext *ctx, TCGOp *op)
1514{
1515 TCGContext *s = ctx->tcg;
1516 int nb_oargs = TCGOP_CALLO(op);
1517 int nb_iargs = TCGOP_CALLI(op);
1518 int flags, i;
1519
1520 init_arguments(ctx, op, nb_oargs + nb_iargs);
1521 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1522
1523 /* If the function reads or writes globals, reset temp data. */
1524 flags = tcg_call_flags(op);
1525 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1526 int nb_globals = s->nb_globals;
1527
1528 for (i = 0; i < nb_globals; i++) {
1529 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001530 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001531 }
1532 }
1533 }
1534
Richard Hendersonab84dc32023-08-23 23:04:24 -07001535 /* If the function has side effects, reset mem data. */
1536 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1537 remove_mem_copy_all(ctx);
1538 }
1539
Richard Henderson5cf32be2021-08-24 08:17:08 -07001540 /* Reset temp data for outputs. */
1541 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001542 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001543 }
1544
1545 /* Stop optimizing MB across calls. */
1546 ctx->prev_mb = NULL;
1547 return true;
1548}
1549
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001550static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1551{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001552 uint64_t z_mask;
1553
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001554 if (arg_is_const(op->args[1])) {
1555 uint64_t t = arg_info(op->args[1])->val;
1556
1557 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001558 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001559 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1560 }
1561 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1562 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001563
1564 switch (ctx->type) {
1565 case TCG_TYPE_I32:
1566 z_mask = 31;
1567 break;
1568 case TCG_TYPE_I64:
1569 z_mask = 63;
1570 break;
1571 default:
1572 g_assert_not_reached();
1573 }
1574 ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001575 return false;
1576}
1577
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001578static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1579{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001580 if (fold_const1(ctx, op)) {
1581 return true;
1582 }
1583
1584 switch (ctx->type) {
1585 case TCG_TYPE_I32:
1586 ctx->z_mask = 32 | 31;
1587 break;
1588 case TCG_TYPE_I64:
1589 ctx->z_mask = 64 | 63;
1590 break;
1591 default:
1592 g_assert_not_reached();
1593 }
1594 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001595}
1596
Richard Henderson1b1907b2021-08-24 10:47:04 -07001597static bool fold_deposit(OptContext *ctx, TCGOp *op)
1598{
Richard Henderson8f7a8402023-08-13 11:03:05 -07001599 TCGOpcode and_opc;
1600
Richard Henderson1b1907b2021-08-24 10:47:04 -07001601 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1602 uint64_t t1 = arg_info(op->args[1])->val;
1603 uint64_t t2 = arg_info(op->args[2])->val;
1604
1605 t1 = deposit64(t1, op->args[3], op->args[4], t2);
1606 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
1607 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001608
Richard Henderson8f7a8402023-08-13 11:03:05 -07001609 switch (ctx->type) {
1610 case TCG_TYPE_I32:
1611 and_opc = INDEX_op_and_i32;
1612 break;
1613 case TCG_TYPE_I64:
1614 and_opc = INDEX_op_and_i64;
1615 break;
1616 default:
1617 g_assert_not_reached();
1618 }
1619
1620 /* Inserting a value into zero at offset 0. */
Richard Henderson27cdb852023-10-23 11:38:00 -07001621 if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) {
Richard Henderson8f7a8402023-08-13 11:03:05 -07001622 uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]);
1623
1624 op->opc = and_opc;
1625 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001626 op->args[2] = arg_new_constant(ctx, mask);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001627 ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
1628 return false;
1629 }
1630
1631 /* Inserting zero into a value. */
Richard Henderson27cdb852023-10-23 11:38:00 -07001632 if (arg_is_const_val(op->args[2], 0)) {
Richard Henderson8f7a8402023-08-13 11:03:05 -07001633 uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0);
1634
1635 op->opc = and_opc;
Richard Henderson26aac972023-10-23 12:31:57 -07001636 op->args[2] = arg_new_constant(ctx, mask);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001637 ctx->z_mask = mask & arg_info(op->args[1])->z_mask;
1638 return false;
1639 }
1640
Richard Hendersonfae450b2021-08-25 22:42:19 -07001641 ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask,
1642 op->args[3], op->args[4],
1643 arg_info(op->args[2])->z_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001644 return false;
1645}
1646
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001647static bool fold_divide(OptContext *ctx, TCGOp *op)
1648{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001649 if (fold_const2(ctx, op) ||
1650 fold_xi_to_x(ctx, op, 1)) {
1651 return true;
1652 }
1653 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001654}
1655
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001656static bool fold_dup(OptContext *ctx, TCGOp *op)
1657{
1658 if (arg_is_const(op->args[1])) {
1659 uint64_t t = arg_info(op->args[1])->val;
1660 t = dup_const(TCGOP_VECE(op), t);
1661 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1662 }
1663 return false;
1664}
1665
1666static bool fold_dup2(OptContext *ctx, TCGOp *op)
1667{
1668 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1669 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1670 arg_info(op->args[2])->val);
1671 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1672 }
1673
1674 if (args_are_copies(op->args[1], op->args[2])) {
1675 op->opc = INDEX_op_dup_vec;
1676 TCGOP_VECE(op) = MO_32;
1677 }
1678 return false;
1679}
1680
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001681static bool fold_eqv(OptContext *ctx, TCGOp *op)
1682{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001683 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001684 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001685 fold_xi_to_not(ctx, op, 0)) {
1686 return true;
1687 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001688
1689 ctx->s_mask = arg_info(op->args[1])->s_mask
1690 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001691 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001692}
1693
Richard Hendersonb6617c82021-08-24 10:44:53 -07001694static bool fold_extract(OptContext *ctx, TCGOp *op)
1695{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001696 uint64_t z_mask_old, z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001697 int pos = op->args[2];
1698 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001699
Richard Hendersonb6617c82021-08-24 10:44:53 -07001700 if (arg_is_const(op->args[1])) {
1701 uint64_t t;
1702
1703 t = arg_info(op->args[1])->val;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001704 t = extract64(t, pos, len);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001705 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1706 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001707
1708 z_mask_old = arg_info(op->args[1])->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001709 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001710 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1711 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001712 }
1713 ctx->z_mask = z_mask;
1714
1715 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001716}
1717
Richard Hendersondcd08992021-08-24 10:41:39 -07001718static bool fold_extract2(OptContext *ctx, TCGOp *op)
1719{
1720 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1721 uint64_t v1 = arg_info(op->args[1])->val;
1722 uint64_t v2 = arg_info(op->args[2])->val;
1723 int shr = op->args[3];
1724
1725 if (op->opc == INDEX_op_extract2_i64) {
1726 v1 >>= shr;
1727 v2 <<= 64 - shr;
1728 } else {
1729 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001730 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001731 }
1732 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1733 }
1734 return false;
1735}
1736
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001737static bool fold_exts(OptContext *ctx, TCGOp *op)
1738{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001739 uint64_t s_mask_old, s_mask, z_mask, sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001740 bool type_change = false;
1741
1742 if (fold_const1(ctx, op)) {
1743 return true;
1744 }
1745
Richard Henderson57fe5c62021-08-26 12:04:46 -07001746 z_mask = arg_info(op->args[1])->z_mask;
1747 s_mask = arg_info(op->args[1])->s_mask;
1748 s_mask_old = s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001749
1750 switch (op->opc) {
1751 CASE_OP_32_64(ext8s):
1752 sign = INT8_MIN;
1753 z_mask = (uint8_t)z_mask;
1754 break;
1755 CASE_OP_32_64(ext16s):
1756 sign = INT16_MIN;
1757 z_mask = (uint16_t)z_mask;
1758 break;
1759 case INDEX_op_ext_i32_i64:
1760 type_change = true;
1761 QEMU_FALLTHROUGH;
1762 case INDEX_op_ext32s_i64:
1763 sign = INT32_MIN;
1764 z_mask = (uint32_t)z_mask;
1765 break;
1766 default:
1767 g_assert_not_reached();
1768 }
1769
1770 if (z_mask & sign) {
1771 z_mask |= sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001772 }
Richard Henderson57fe5c62021-08-26 12:04:46 -07001773 s_mask |= sign << 1;
1774
Richard Hendersonfae450b2021-08-25 22:42:19 -07001775 ctx->z_mask = z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001776 ctx->s_mask = s_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001777 if (0 && !type_change && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001778 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001779 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001780
1781 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001782}
1783
1784static bool fold_extu(OptContext *ctx, TCGOp *op)
1785{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001786 uint64_t z_mask_old, z_mask;
1787 bool type_change = false;
1788
1789 if (fold_const1(ctx, op)) {
1790 return true;
1791 }
1792
1793 z_mask_old = z_mask = arg_info(op->args[1])->z_mask;
1794
1795 switch (op->opc) {
1796 CASE_OP_32_64(ext8u):
1797 z_mask = (uint8_t)z_mask;
1798 break;
1799 CASE_OP_32_64(ext16u):
1800 z_mask = (uint16_t)z_mask;
1801 break;
1802 case INDEX_op_extrl_i64_i32:
1803 case INDEX_op_extu_i32_i64:
1804 type_change = true;
1805 QEMU_FALLTHROUGH;
1806 case INDEX_op_ext32u_i64:
1807 z_mask = (uint32_t)z_mask;
1808 break;
1809 case INDEX_op_extrh_i64_i32:
1810 type_change = true;
1811 z_mask >>= 32;
1812 break;
1813 default:
1814 g_assert_not_reached();
1815 }
1816
1817 ctx->z_mask = z_mask;
Richard Henderson045ace32024-12-19 10:33:51 -08001818 if (!type_change && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1819 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001820 }
1821 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001822}
1823
Richard Henderson3eefdf22021-08-25 11:06:43 -07001824static bool fold_mb(OptContext *ctx, TCGOp *op)
1825{
1826 /* Eliminate duplicate and redundant fence instructions. */
1827 if (ctx->prev_mb) {
1828 /*
1829 * Merge two barriers of the same type into one,
1830 * or a weaker barrier into a stronger one,
1831 * or two weaker barriers into a stronger one.
1832 * mb X; mb Y => mb X|Y
1833 * mb; strl => mb; st
1834 * ldaq; mb => ld; mb
1835 * ldaq; strl => ld; mb; st
1836 * Other combinations are also merged into a strong
1837 * barrier. This is stricter than specified but for
1838 * the purposes of TCG is better than not optimizing.
1839 */
1840 ctx->prev_mb->args[0] |= op->args[0];
1841 tcg_op_remove(ctx->tcg, op);
1842 } else {
1843 ctx->prev_mb = op;
1844 }
1845 return true;
1846}
1847
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001848static bool fold_mov(OptContext *ctx, TCGOp *op)
1849{
1850 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1851}
1852
Richard Henderson0c310a32021-08-24 10:37:24 -07001853static bool fold_movcond(OptContext *ctx, TCGOp *op)
1854{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001855 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001856
Richard Henderson141125e2024-09-06 21:00:10 -07001857 /* If true and false values are the same, eliminate the cmp. */
1858 if (args_are_copies(op->args[3], op->args[4])) {
1859 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1860 }
1861
Richard Henderson7a2f7082021-08-26 07:06:39 -07001862 /*
1863 * Canonicalize the "false" input reg to match the destination reg so
1864 * that the tcg backend can implement a "move if true" operation.
1865 */
1866 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001867 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001868 }
1869
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001870 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001871 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001872 if (i >= 0) {
1873 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1874 }
1875
Richard Hendersonfae450b2021-08-25 22:42:19 -07001876 ctx->z_mask = arg_info(op->args[3])->z_mask
1877 | arg_info(op->args[4])->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001878 ctx->s_mask = arg_info(op->args[3])->s_mask
1879 & arg_info(op->args[4])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001880
Richard Henderson0c310a32021-08-24 10:37:24 -07001881 if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) {
1882 uint64_t tv = arg_info(op->args[3])->val;
1883 uint64_t fv = arg_info(op->args[4])->val;
Richard Henderson36355022023-08-04 23:24:04 +00001884 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07001885 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07001886
Richard Henderson67f84c92021-08-25 08:00:20 -07001887 switch (ctx->type) {
1888 case TCG_TYPE_I32:
1889 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00001890 if (TCG_TARGET_HAS_negsetcond_i32) {
1891 negopc = INDEX_op_negsetcond_i32;
1892 }
1893 tv = (int32_t)tv;
1894 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07001895 break;
1896 case TCG_TYPE_I64:
1897 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00001898 if (TCG_TARGET_HAS_negsetcond_i64) {
1899 negopc = INDEX_op_negsetcond_i64;
1900 }
Richard Henderson67f84c92021-08-25 08:00:20 -07001901 break;
1902 default:
1903 g_assert_not_reached();
1904 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001905
1906 if (tv == 1 && fv == 0) {
1907 op->opc = opc;
1908 op->args[3] = cond;
1909 } else if (fv == 1 && tv == 0) {
1910 op->opc = opc;
1911 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00001912 } else if (negopc) {
1913 if (tv == -1 && fv == 0) {
1914 op->opc = negopc;
1915 op->args[3] = cond;
1916 } else if (fv == -1 && tv == 0) {
1917 op->opc = negopc;
1918 op->args[3] = tcg_invert_cond(cond);
1919 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001920 }
1921 }
1922 return false;
1923}
1924
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001925static bool fold_mul(OptContext *ctx, TCGOp *op)
1926{
Richard Hendersone8679952021-08-25 13:19:52 -07001927 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07001928 fold_xi_to_i(ctx, op, 0) ||
1929 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001930 return true;
1931 }
1932 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001933}
1934
1935static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
1936{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001937 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001938 fold_xi_to_i(ctx, op, 0)) {
1939 return true;
1940 }
1941 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001942}
1943
Richard Henderson407112b2021-08-26 06:33:04 -07001944static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001945{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001946 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
1947
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001948 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07001949 uint64_t a = arg_info(op->args[2])->val;
1950 uint64_t b = arg_info(op->args[3])->val;
1951 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001952 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07001953 TCGOp *op2;
1954
1955 switch (op->opc) {
1956 case INDEX_op_mulu2_i32:
1957 l = (uint64_t)(uint32_t)a * (uint32_t)b;
1958 h = (int32_t)(l >> 32);
1959 l = (int32_t)l;
1960 break;
1961 case INDEX_op_muls2_i32:
1962 l = (int64_t)(int32_t)a * (int32_t)b;
1963 h = l >> 32;
1964 l = (int32_t)l;
1965 break;
1966 case INDEX_op_mulu2_i64:
1967 mulu64(&l, &h, a, b);
1968 break;
1969 case INDEX_op_muls2_i64:
1970 muls64(&l, &h, a, b);
1971 break;
1972 default:
1973 g_assert_not_reached();
1974 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001975
1976 rl = op->args[0];
1977 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07001978
1979 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Philippe Mathieu-Daudéd4478942022-12-18 22:18:31 +01001980 op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07001981
1982 tcg_opt_gen_movi(ctx, op, rl, l);
1983 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07001984 return true;
1985 }
1986 return false;
1987}
1988
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001989static bool fold_nand(OptContext *ctx, TCGOp *op)
1990{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001991 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001992 fold_xi_to_not(ctx, op, -1)) {
1993 return true;
1994 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001995
1996 ctx->s_mask = arg_info(op->args[1])->s_mask
1997 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001998 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001999}
2000
Richard Hendersone25fe882024-04-04 20:53:50 +00002001static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002002{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002003 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002004 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002005 ctx->z_mask = -(z_mask & -z_mask);
2006
Richard Henderson9caca882021-08-24 13:30:32 -07002007 /*
2008 * Because of fold_sub_to_neg, we want to always return true,
2009 * via finish_folding.
2010 */
2011 finish_folding(ctx, op);
2012 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002013}
2014
Richard Hendersone25fe882024-04-04 20:53:50 +00002015static bool fold_neg(OptContext *ctx, TCGOp *op)
2016{
2017 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2018}
2019
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002020static bool fold_nor(OptContext *ctx, TCGOp *op)
2021{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002022 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002023 fold_xi_to_not(ctx, op, 0)) {
2024 return true;
2025 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002026
2027 ctx->s_mask = arg_info(op->args[1])->s_mask
2028 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002029 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002030}
2031
2032static bool fold_not(OptContext *ctx, TCGOp *op)
2033{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002034 if (fold_const1(ctx, op)) {
2035 return true;
2036 }
2037
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002038 ctx->s_mask = arg_info(op->args[1])->s_mask;
2039
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002040 /* Because of fold_to_not, we want to always return true, via finish. */
2041 finish_folding(ctx, op);
2042 return true;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002043}
2044
2045static bool fold_or(OptContext *ctx, TCGOp *op)
2046{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002047 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002048 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002049 fold_xx_to_x(ctx, op)) {
2050 return true;
2051 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002052
2053 ctx->z_mask = arg_info(op->args[1])->z_mask
2054 | arg_info(op->args[2])->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002055 ctx->s_mask = arg_info(op->args[1])->s_mask
2056 & arg_info(op->args[2])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002057 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002058}
2059
2060static bool fold_orc(OptContext *ctx, TCGOp *op)
2061{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002062 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002063 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002064 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002065 fold_ix_to_not(ctx, op, 0)) {
2066 return true;
2067 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002068
2069 ctx->s_mask = arg_info(op->args[1])->s_mask
2070 & arg_info(op->args[2])->s_mask;
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002071 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002072}
2073
Richard Henderson3eefdf22021-08-25 11:06:43 -07002074static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
2075{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002076 const TCGOpDef *def = &tcg_op_defs[op->opc];
2077 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2078 MemOp mop = get_memop(oi);
2079 int width = 8 * memop_size(mop);
2080
Richard Henderson57fe5c62021-08-26 12:04:46 -07002081 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002082 if (mop & MO_SIGN) {
2083 ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width);
2084 } else {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002085 ctx->z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002086 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002087 }
2088
Richard Henderson3eefdf22021-08-25 11:06:43 -07002089 /* Opcodes that touch guest memory stop the mb optimization. */
2090 ctx->prev_mb = NULL;
2091 return false;
2092}
2093
2094static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2095{
2096 /* Opcodes that touch guest memory stop the mb optimization. */
2097 ctx->prev_mb = NULL;
2098 return false;
2099}
2100
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002101static bool fold_remainder(OptContext *ctx, TCGOp *op)
2102{
Richard Henderson267c17e2021-10-25 11:30:33 -07002103 if (fold_const2(ctx, op) ||
2104 fold_xx_to_i(ctx, op, 0)) {
2105 return true;
2106 }
2107 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002108}
2109
Richard Henderson8d65cda2024-03-26 16:00:40 -10002110static bool fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
2111{
2112 uint64_t a_zmask, b_val;
2113 TCGCond cond;
2114
2115 if (!arg_is_const(op->args[2])) {
2116 return false;
2117 }
2118
2119 a_zmask = arg_info(op->args[1])->z_mask;
2120 b_val = arg_info(op->args[2])->val;
2121 cond = op->args[3];
2122
2123 if (ctx->type == TCG_TYPE_I32) {
2124 a_zmask = (uint32_t)a_zmask;
2125 b_val = (uint32_t)b_val;
2126 }
2127
2128 /*
2129 * A with only low bits set vs B with high bits set means that A < B.
2130 */
2131 if (a_zmask < b_val) {
2132 bool inv = false;
2133
2134 switch (cond) {
2135 case TCG_COND_NE:
2136 case TCG_COND_LEU:
2137 case TCG_COND_LTU:
2138 inv = true;
2139 /* fall through */
2140 case TCG_COND_GTU:
2141 case TCG_COND_GEU:
2142 case TCG_COND_EQ:
2143 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2144 default:
2145 break;
2146 }
2147 }
2148
2149 /*
2150 * A with only lsb set is already boolean.
2151 */
2152 if (a_zmask <= 1) {
2153 bool convert = false;
2154 bool inv = false;
2155
2156 switch (cond) {
2157 case TCG_COND_EQ:
2158 inv = true;
2159 /* fall through */
2160 case TCG_COND_NE:
2161 convert = (b_val == 0);
2162 break;
2163 case TCG_COND_LTU:
2164 case TCG_COND_TSTEQ:
2165 inv = true;
2166 /* fall through */
2167 case TCG_COND_GEU:
2168 case TCG_COND_TSTNE:
2169 convert = (b_val == 1);
2170 break;
2171 default:
2172 break;
2173 }
2174 if (convert) {
2175 TCGOpcode add_opc, xor_opc, neg_opc;
2176
2177 if (!inv && !neg) {
2178 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2179 }
2180
2181 switch (ctx->type) {
2182 case TCG_TYPE_I32:
2183 add_opc = INDEX_op_add_i32;
2184 neg_opc = INDEX_op_neg_i32;
2185 xor_opc = INDEX_op_xor_i32;
2186 break;
2187 case TCG_TYPE_I64:
2188 add_opc = INDEX_op_add_i64;
2189 neg_opc = INDEX_op_neg_i64;
2190 xor_opc = INDEX_op_xor_i64;
2191 break;
2192 default:
2193 g_assert_not_reached();
2194 }
2195
2196 if (!inv) {
2197 op->opc = neg_opc;
2198 } else if (neg) {
2199 op->opc = add_opc;
2200 op->args[2] = arg_new_constant(ctx, -1);
2201 } else {
2202 op->opc = xor_opc;
2203 op->args[2] = arg_new_constant(ctx, 1);
2204 }
2205 return false;
2206 }
2207 }
2208
2209 return false;
2210}
2211
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002212static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2213{
Paolo Bonziniff202812024-02-28 12:06:41 +01002214 TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc;
2215 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002216 TCGCond cond = op->args[3];
2217 TCGArg ret, src1, src2;
2218 TCGOp *op2;
2219 uint64_t val;
2220 int sh;
2221 bool inv;
2222
2223 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2224 return;
2225 }
2226
2227 src2 = op->args[2];
2228 val = arg_info(src2)->val;
2229 if (!is_power_of_2(val)) {
2230 return;
2231 }
2232 sh = ctz64(val);
2233
2234 switch (ctx->type) {
2235 case TCG_TYPE_I32:
2236 and_opc = INDEX_op_and_i32;
2237 sub_opc = INDEX_op_sub_i32;
2238 xor_opc = INDEX_op_xor_i32;
2239 shr_opc = INDEX_op_shr_i32;
2240 neg_opc = INDEX_op_neg_i32;
2241 if (TCG_TARGET_extract_i32_valid(sh, 1)) {
2242 uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0;
2243 sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0;
2244 }
2245 break;
2246 case TCG_TYPE_I64:
2247 and_opc = INDEX_op_and_i64;
2248 sub_opc = INDEX_op_sub_i64;
2249 xor_opc = INDEX_op_xor_i64;
2250 shr_opc = INDEX_op_shr_i64;
2251 neg_opc = INDEX_op_neg_i64;
2252 if (TCG_TARGET_extract_i64_valid(sh, 1)) {
2253 uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0;
2254 sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0;
2255 }
2256 break;
2257 default:
2258 g_assert_not_reached();
2259 }
2260
2261 ret = op->args[0];
2262 src1 = op->args[1];
2263 inv = cond == TCG_COND_TSTEQ;
2264
2265 if (sh && sext_opc && neg && !inv) {
2266 op->opc = sext_opc;
2267 op->args[1] = src1;
2268 op->args[2] = sh;
2269 op->args[3] = 1;
2270 return;
2271 } else if (sh && uext_opc) {
2272 op->opc = uext_opc;
2273 op->args[1] = src1;
2274 op->args[2] = sh;
2275 op->args[3] = 1;
2276 } else {
2277 if (sh) {
2278 op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3);
2279 op2->args[0] = ret;
2280 op2->args[1] = src1;
2281 op2->args[2] = arg_new_constant(ctx, sh);
2282 src1 = ret;
2283 }
2284 op->opc = and_opc;
2285 op->args[1] = src1;
2286 op->args[2] = arg_new_constant(ctx, 1);
2287 }
2288
2289 if (neg && inv) {
2290 op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3);
2291 op2->args[0] = ret;
2292 op2->args[1] = ret;
2293 op2->args[2] = arg_new_constant(ctx, 1);
2294 } else if (inv) {
2295 op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3);
2296 op2->args[0] = ret;
2297 op2->args[1] = ret;
2298 op2->args[2] = arg_new_constant(ctx, 1);
2299 } else if (neg) {
2300 op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2);
2301 op2->args[0] = ret;
2302 op2->args[1] = ret;
2303 }
2304}
2305
Richard Hendersonc63ff552021-08-24 09:35:30 -07002306static bool fold_setcond(OptContext *ctx, TCGOp *op)
2307{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002308 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002309 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002310 if (i >= 0) {
2311 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2312 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002313
2314 if (fold_setcond_zmask(ctx, op, false)) {
2315 return true;
2316 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002317 fold_setcond_tst_pow2(ctx, op, false);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002318
2319 ctx->z_mask = 1;
Richard Hendersonc63ff552021-08-24 09:35:30 -07002320 return false;
2321}
2322
Richard Henderson36355022023-08-04 23:24:04 +00002323static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2324{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002325 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002326 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002327 if (i >= 0) {
2328 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2329 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002330
2331 if (fold_setcond_zmask(ctx, op, true)) {
2332 return true;
2333 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002334 fold_setcond_tst_pow2(ctx, op, true);
Richard Henderson36355022023-08-04 23:24:04 +00002335
2336 /* Value is {0,-1} so all bits are repetitions of the sign. */
2337 ctx->s_mask = -1;
2338 return false;
2339}
2340
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002341static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2342{
Richard Henderson7e64b112023-10-24 16:53:56 -07002343 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002344 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002345
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002346 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002347 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002348 if (i >= 0) {
2349 goto do_setcond_const;
2350 }
2351
2352 switch (cond) {
2353 case TCG_COND_LT:
2354 case TCG_COND_GE:
2355 /*
2356 * Simplify LT/GE comparisons vs zero to a single compare
2357 * vs the high word of the input.
2358 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002359 if (arg_is_const_val(op->args[3], 0) &&
2360 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002361 goto do_setcond_high;
2362 }
2363 break;
2364
2365 case TCG_COND_NE:
2366 inv = 1;
2367 QEMU_FALLTHROUGH;
2368 case TCG_COND_EQ:
2369 /*
2370 * Simplify EQ/NE comparisons where one of the pairs
2371 * can be simplified.
2372 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002373 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002374 op->args[3], cond);
2375 switch (i ^ inv) {
2376 case 0:
2377 goto do_setcond_const;
2378 case 1:
2379 goto do_setcond_high;
2380 }
2381
Richard Henderson67f84c92021-08-25 08:00:20 -07002382 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002383 op->args[4], cond);
2384 switch (i ^ inv) {
2385 case 0:
2386 goto do_setcond_const;
2387 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002388 goto do_setcond_low;
2389 }
2390 break;
2391
2392 case TCG_COND_TSTEQ:
2393 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002394 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002395 goto do_setcond_high;
2396 }
2397 if (arg_is_const_val(op->args[4], 0)) {
2398 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002399 }
2400 break;
2401
2402 default:
2403 break;
2404
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002405 do_setcond_low:
2406 op->args[2] = op->args[3];
2407 op->args[3] = cond;
2408 op->opc = INDEX_op_setcond_i32;
2409 return fold_setcond(ctx, op);
2410
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002411 do_setcond_high:
2412 op->args[1] = op->args[2];
2413 op->args[2] = op->args[4];
2414 op->args[3] = cond;
2415 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002416 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002417 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002418
2419 ctx->z_mask = 1;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002420 return false;
2421
2422 do_setcond_const:
2423 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2424}
2425
Richard Henderson1f106542024-09-06 12:22:41 -07002426static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
2427{
2428 /* Canonicalize the comparison to put immediate second. */
2429 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2430 op->args[3] = tcg_swap_cond(op->args[3]);
2431 }
2432 return false;
2433}
2434
2435static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
2436{
2437 /* If true and false values are the same, eliminate the cmp. */
2438 if (args_are_copies(op->args[3], op->args[4])) {
2439 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
2440 }
2441
2442 /* Canonicalize the comparison to put immediate second. */
2443 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
2444 op->args[5] = tcg_swap_cond(op->args[5]);
2445 }
2446 /*
2447 * Canonicalize the "false" input reg to match the destination,
2448 * so that the tcg backend can implement "move if true".
2449 */
2450 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
2451 op->args[5] = tcg_invert_cond(op->args[5]);
2452 }
2453 return false;
2454}
2455
Richard Hendersonb6617c82021-08-24 10:44:53 -07002456static bool fold_sextract(OptContext *ctx, TCGOp *op)
2457{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002458 uint64_t z_mask, s_mask, s_mask_old;
2459 int pos = op->args[2];
2460 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002461
Richard Hendersonb6617c82021-08-24 10:44:53 -07002462 if (arg_is_const(op->args[1])) {
2463 uint64_t t;
2464
2465 t = arg_info(op->args[1])->val;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002466 t = sextract64(t, pos, len);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002467 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
2468 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002469
Richard Henderson57fe5c62021-08-26 12:04:46 -07002470 z_mask = arg_info(op->args[1])->z_mask;
2471 z_mask = sextract64(z_mask, pos, len);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002472 ctx->z_mask = z_mask;
2473
Richard Henderson57fe5c62021-08-26 12:04:46 -07002474 s_mask_old = arg_info(op->args[1])->s_mask;
2475 s_mask = sextract64(s_mask_old, pos, len);
2476 s_mask |= MAKE_64BIT_MASK(len, 64 - len);
2477 ctx->s_mask = s_mask;
2478
Richard Henderson6d70ddc2024-12-21 21:08:10 -08002479 if (0 && pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002480 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002481 }
2482
Richard Hendersonfae450b2021-08-25 22:42:19 -07002483 return fold_masks(ctx, op);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002484}
2485
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002486static bool fold_shift(OptContext *ctx, TCGOp *op)
2487{
Richard Henderson93a967f2021-08-26 13:24:59 -07002488 uint64_t s_mask, z_mask, sign;
2489
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002490 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002491 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002492 fold_xi_to_x(ctx, op, 0)) {
2493 return true;
2494 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002495
Richard Henderson93a967f2021-08-26 13:24:59 -07002496 s_mask = arg_info(op->args[1])->s_mask;
2497 z_mask = arg_info(op->args[1])->z_mask;
2498
Richard Hendersonfae450b2021-08-25 22:42:19 -07002499 if (arg_is_const(op->args[2])) {
Richard Henderson93a967f2021-08-26 13:24:59 -07002500 int sh = arg_info(op->args[2])->val;
2501
2502 ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
2503
2504 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002505
Richard Hendersonfae450b2021-08-25 22:42:19 -07002506 return fold_masks(ctx, op);
2507 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002508
2509 switch (op->opc) {
2510 CASE_OP_32_64(sar):
2511 /*
2512 * Arithmetic right shift will not reduce the number of
2513 * input sign repetitions.
2514 */
2515 ctx->s_mask = s_mask;
2516 break;
2517 CASE_OP_32_64(shr):
2518 /*
2519 * If the sign bit is known zero, then logical right shift
2520 * will not reduced the number of input sign repetitions.
2521 */
2522 sign = (s_mask & -s_mask) >> 1;
Richard Henderson2911e9b2024-03-26 11:21:38 -10002523 if (sign && !(z_mask & sign)) {
Richard Henderson93a967f2021-08-26 13:24:59 -07002524 ctx->s_mask = s_mask;
2525 }
2526 break;
2527 default:
2528 break;
2529 }
2530
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002531 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002532}
2533
Richard Henderson9caca882021-08-24 13:30:32 -07002534static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2535{
2536 TCGOpcode neg_op;
2537 bool have_neg;
2538
2539 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2540 return false;
2541 }
2542
2543 switch (ctx->type) {
2544 case TCG_TYPE_I32:
2545 neg_op = INDEX_op_neg_i32;
Richard Hendersonb701f192023-10-25 21:14:04 -07002546 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002547 break;
2548 case TCG_TYPE_I64:
2549 neg_op = INDEX_op_neg_i64;
Richard Hendersonb701f192023-10-25 21:14:04 -07002550 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002551 break;
2552 case TCG_TYPE_V64:
2553 case TCG_TYPE_V128:
2554 case TCG_TYPE_V256:
2555 neg_op = INDEX_op_neg_vec;
2556 have_neg = (TCG_TARGET_HAS_neg_vec &&
2557 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2558 break;
2559 default:
2560 g_assert_not_reached();
2561 }
2562 if (have_neg) {
2563 op->opc = neg_op;
2564 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002565 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002566 }
2567 return false;
2568}
2569
Richard Hendersonc578ff12021-12-16 06:07:25 -08002570/* We cannot as yet do_constant_folding with vectors. */
2571static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002572{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002573 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002574 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002575 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002576 return true;
2577 }
2578 return false;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002579}
2580
Richard Hendersonc578ff12021-12-16 06:07:25 -08002581static bool fold_sub(OptContext *ctx, TCGOp *op)
2582{
Richard Henderson6334a962023-10-25 18:39:43 -07002583 if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) {
2584 return true;
2585 }
2586
2587 /* Fold sub r,x,i to add r,x,-i */
2588 if (arg_is_const(op->args[2])) {
2589 uint64_t val = arg_info(op->args[2])->val;
2590
2591 op->opc = (ctx->type == TCG_TYPE_I32
2592 ? INDEX_op_add_i32 : INDEX_op_add_i64);
2593 op->args[2] = arg_new_constant(ctx, -val);
2594 }
2595 return false;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002596}
2597
Richard Henderson9531c072021-08-26 06:51:39 -07002598static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002599{
Richard Henderson9531c072021-08-26 06:51:39 -07002600 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002601}
2602
Richard Hendersonfae450b2021-08-25 22:42:19 -07002603static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2604{
2605 /* We can't do any folding with a load, but we can record bits. */
2606 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002607 CASE_OP_32_64(ld8s):
2608 ctx->s_mask = MAKE_64BIT_MASK(8, 56);
2609 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002610 CASE_OP_32_64(ld8u):
2611 ctx->z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002612 break;
2613 CASE_OP_32_64(ld16s):
2614 ctx->s_mask = MAKE_64BIT_MASK(16, 48);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002615 break;
2616 CASE_OP_32_64(ld16u):
2617 ctx->z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002618 break;
2619 case INDEX_op_ld32s_i64:
2620 ctx->s_mask = MAKE_64BIT_MASK(32, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002621 break;
2622 case INDEX_op_ld32u_i64:
2623 ctx->z_mask = MAKE_64BIT_MASK(0, 32);
2624 break;
2625 default:
2626 g_assert_not_reached();
2627 }
2628 return false;
2629}
2630
Richard Hendersonab84dc32023-08-23 23:04:24 -07002631static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2632{
2633 TCGTemp *dst, *src;
2634 intptr_t ofs;
2635 TCGType type;
2636
2637 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2638 return false;
2639 }
2640
2641 type = ctx->type;
2642 ofs = op->args[2];
2643 dst = arg_temp(op->args[0]);
2644 src = find_mem_copy_for(ctx, type, ofs);
2645 if (src && src->base_type == type) {
2646 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2647 }
2648
2649 reset_ts(ctx, dst);
2650 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2651 return true;
2652}
2653
2654static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2655{
2656 intptr_t ofs = op->args[2];
2657 intptr_t lm1;
2658
2659 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2660 remove_mem_copy_all(ctx);
2661 return false;
2662 }
2663
2664 switch (op->opc) {
2665 CASE_OP_32_64(st8):
2666 lm1 = 0;
2667 break;
2668 CASE_OP_32_64(st16):
2669 lm1 = 1;
2670 break;
2671 case INDEX_op_st32_i64:
2672 case INDEX_op_st_i32:
2673 lm1 = 3;
2674 break;
2675 case INDEX_op_st_i64:
2676 lm1 = 7;
2677 break;
2678 case INDEX_op_st_vec:
2679 lm1 = tcg_type_size(ctx->type) - 1;
2680 break;
2681 default:
2682 g_assert_not_reached();
2683 }
2684 remove_mem_copy_in(ctx, ofs, ofs + lm1);
2685 return false;
2686}
2687
2688static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2689{
2690 TCGTemp *src;
2691 intptr_t ofs, last;
2692 TCGType type;
2693
2694 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2695 fold_tcg_st(ctx, op);
2696 return false;
2697 }
2698
2699 src = arg_temp(op->args[0]);
2700 ofs = op->args[2];
2701 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002702
2703 /*
2704 * Eliminate duplicate stores of a constant.
2705 * This happens frequently when the target ISA zero-extends.
2706 */
2707 if (ts_is_const(src)) {
2708 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2709 if (src == prev) {
2710 tcg_op_remove(ctx->tcg, op);
2711 return true;
2712 }
2713 }
2714
Richard Hendersonab84dc32023-08-23 23:04:24 -07002715 last = ofs + tcg_type_size(type) - 1;
2716 remove_mem_copy_in(ctx, ofs, last);
2717 record_mem_copy(ctx, type, src, ofs, last);
2718 return false;
2719}
2720
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002721static bool fold_xor(OptContext *ctx, TCGOp *op)
2722{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002723 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002724 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002725 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002726 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002727 return true;
2728 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002729
2730 ctx->z_mask = arg_info(op->args[1])->z_mask
2731 | arg_info(op->args[2])->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002732 ctx->s_mask = arg_info(op->args[1])->s_mask
2733 & arg_info(op->args[2])->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002734 return fold_masks(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002735}
2736
Richard Hendersone58b9772024-09-06 22:30:01 -07002737static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
2738{
2739 /* If true and false values are the same, eliminate the cmp. */
2740 if (args_are_copies(op->args[2], op->args[3])) {
2741 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
2742 }
2743
2744 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
2745 uint64_t tv = arg_info(op->args[2])->val;
2746 uint64_t fv = arg_info(op->args[3])->val;
2747
2748 if (tv == -1 && fv == 0) {
2749 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2750 }
2751 if (tv == 0 && fv == -1) {
2752 if (TCG_TARGET_HAS_not_vec) {
2753 op->opc = INDEX_op_not_vec;
2754 return fold_not(ctx, op);
2755 } else {
2756 op->opc = INDEX_op_xor_vec;
2757 op->args[2] = arg_new_constant(ctx, -1);
2758 return fold_xor(ctx, op);
2759 }
2760 }
2761 }
2762 if (arg_is_const(op->args[2])) {
2763 uint64_t tv = arg_info(op->args[2])->val;
2764 if (tv == -1) {
2765 op->opc = INDEX_op_or_vec;
2766 op->args[2] = op->args[3];
2767 return fold_or(ctx, op);
2768 }
2769 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
2770 op->opc = INDEX_op_andc_vec;
2771 op->args[2] = op->args[1];
2772 op->args[1] = op->args[3];
2773 return fold_andc(ctx, op);
2774 }
2775 }
2776 if (arg_is_const(op->args[3])) {
2777 uint64_t fv = arg_info(op->args[3])->val;
2778 if (fv == 0) {
2779 op->opc = INDEX_op_and_vec;
2780 return fold_and(ctx, op);
2781 }
2782 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
2783 op->opc = INDEX_op_orc_vec;
2784 op->args[2] = op->args[1];
2785 op->args[1] = op->args[3];
2786 return fold_orc(ctx, op);
2787 }
2788 }
2789 return false;
2790}
2791
Kirill Batuzov22613af2011-07-07 16:37:13 +04002792/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002793void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002794{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002795 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002796 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002797 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002798
Richard Hendersonab84dc32023-08-23 23:04:24 -07002799 QSIMPLEQ_INIT(&ctx.mem_free);
2800
Kirill Batuzov22613af2011-07-07 16:37:13 +04002801 /* Array VALS has an element for each temp.
2802 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002803 If this temp is a copy of other ones then the other copies are
2804 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002805
2806 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002807 for (i = 0; i < nb_temps; ++i) {
2808 s->temps[i].state_ptr = NULL;
2809 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002810
Richard Henderson15fa08f2017-11-02 15:19:14 +01002811 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002812 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002813 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002814 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002815
Richard Henderson5cf32be2021-08-24 08:17:08 -07002816 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002817 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002818 fold_call(&ctx, op);
2819 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002820 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002821
2822 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002823 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2824 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002825
Richard Henderson67f84c92021-08-25 08:00:20 -07002826 /* Pre-compute the type of the operation. */
2827 if (def->flags & TCG_OPF_VECTOR) {
2828 ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op);
2829 } else if (def->flags & TCG_OPF_64BIT) {
2830 ctx.type = TCG_TYPE_I64;
2831 } else {
2832 ctx.type = TCG_TYPE_I32;
2833 }
2834
Richard Henderson57fe5c62021-08-26 12:04:46 -07002835 /* Assume all bits affected, no bits known zero, no sign reps. */
Richard Hendersonfae450b2021-08-25 22:42:19 -07002836 ctx.z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002837 ctx.s_mask = 0;
Paolo Bonzini633f6502013-01-11 15:42:53 -08002838
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002839 /*
2840 * Process each opcode.
2841 * Sorted alphabetically by opcode as much as possible.
2842 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002843 switch (opc) {
Richard Hendersonc578ff12021-12-16 06:07:25 -08002844 CASE_OP_32_64(add):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002845 done = fold_add(&ctx, op);
2846 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002847 case INDEX_op_add_vec:
2848 done = fold_add_vec(&ctx, op);
2849 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002850 CASE_OP_32_64(add2):
2851 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002852 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002853 CASE_OP_32_64_VEC(and):
2854 done = fold_and(&ctx, op);
2855 break;
2856 CASE_OP_32_64_VEC(andc):
2857 done = fold_andc(&ctx, op);
2858 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002859 CASE_OP_32_64(brcond):
2860 done = fold_brcond(&ctx, op);
2861 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002862 case INDEX_op_brcond2_i32:
2863 done = fold_brcond2(&ctx, op);
2864 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002865 CASE_OP_32_64(bswap16):
2866 CASE_OP_32_64(bswap32):
2867 case INDEX_op_bswap64_i64:
2868 done = fold_bswap(&ctx, op);
2869 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002870 CASE_OP_32_64(clz):
2871 CASE_OP_32_64(ctz):
2872 done = fold_count_zeros(&ctx, op);
2873 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002874 CASE_OP_32_64(ctpop):
2875 done = fold_ctpop(&ctx, op);
2876 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002877 CASE_OP_32_64(deposit):
2878 done = fold_deposit(&ctx, op);
2879 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002880 CASE_OP_32_64(div):
2881 CASE_OP_32_64(divu):
2882 done = fold_divide(&ctx, op);
2883 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002884 case INDEX_op_dup_vec:
2885 done = fold_dup(&ctx, op);
2886 break;
2887 case INDEX_op_dup2_vec:
2888 done = fold_dup2(&ctx, op);
2889 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002890 CASE_OP_32_64_VEC(eqv):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002891 done = fold_eqv(&ctx, op);
2892 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002893 CASE_OP_32_64(extract):
2894 done = fold_extract(&ctx, op);
2895 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002896 CASE_OP_32_64(extract2):
2897 done = fold_extract2(&ctx, op);
2898 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002899 CASE_OP_32_64(ext8s):
2900 CASE_OP_32_64(ext16s):
2901 case INDEX_op_ext32s_i64:
2902 case INDEX_op_ext_i32_i64:
2903 done = fold_exts(&ctx, op);
2904 break;
2905 CASE_OP_32_64(ext8u):
2906 CASE_OP_32_64(ext16u):
2907 case INDEX_op_ext32u_i64:
2908 case INDEX_op_extu_i32_i64:
2909 case INDEX_op_extrl_i64_i32:
2910 case INDEX_op_extrh_i64_i32:
2911 done = fold_extu(&ctx, op);
2912 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002913 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002914 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002915 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002916 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002917 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002918 case INDEX_op_ld32u_i64:
2919 done = fold_tcg_ld(&ctx, op);
2920 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002921 case INDEX_op_ld_i32:
2922 case INDEX_op_ld_i64:
2923 case INDEX_op_ld_vec:
2924 done = fold_tcg_ld_memcopy(&ctx, op);
2925 break;
2926 CASE_OP_32_64(st8):
2927 CASE_OP_32_64(st16):
2928 case INDEX_op_st32_i64:
2929 done = fold_tcg_st(&ctx, op);
2930 break;
2931 case INDEX_op_st_i32:
2932 case INDEX_op_st_i64:
2933 case INDEX_op_st_vec:
2934 done = fold_tcg_st_memcopy(&ctx, op);
2935 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002936 case INDEX_op_mb:
2937 done = fold_mb(&ctx, op);
2938 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002939 CASE_OP_32_64_VEC(mov):
2940 done = fold_mov(&ctx, op);
2941 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002942 CASE_OP_32_64(movcond):
2943 done = fold_movcond(&ctx, op);
2944 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002945 CASE_OP_32_64(mul):
2946 done = fold_mul(&ctx, op);
2947 break;
2948 CASE_OP_32_64(mulsh):
2949 CASE_OP_32_64(muluh):
2950 done = fold_mul_highpart(&ctx, op);
2951 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002952 CASE_OP_32_64(muls2):
2953 CASE_OP_32_64(mulu2):
2954 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002955 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002956 CASE_OP_32_64_VEC(nand):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002957 done = fold_nand(&ctx, op);
2958 break;
2959 CASE_OP_32_64(neg):
2960 done = fold_neg(&ctx, op);
2961 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002962 CASE_OP_32_64_VEC(nor):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002963 done = fold_nor(&ctx, op);
2964 break;
2965 CASE_OP_32_64_VEC(not):
2966 done = fold_not(&ctx, op);
2967 break;
2968 CASE_OP_32_64_VEC(or):
2969 done = fold_or(&ctx, op);
2970 break;
2971 CASE_OP_32_64_VEC(orc):
2972 done = fold_orc(&ctx, op);
2973 break;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07002974 case INDEX_op_qemu_ld_a32_i32:
2975 case INDEX_op_qemu_ld_a64_i32:
2976 case INDEX_op_qemu_ld_a32_i64:
2977 case INDEX_op_qemu_ld_a64_i64:
2978 case INDEX_op_qemu_ld_a32_i128:
2979 case INDEX_op_qemu_ld_a64_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07002980 done = fold_qemu_ld(&ctx, op);
2981 break;
Richard Hendersonfecccfc2023-05-16 20:07:20 -07002982 case INDEX_op_qemu_st8_a32_i32:
2983 case INDEX_op_qemu_st8_a64_i32:
2984 case INDEX_op_qemu_st_a32_i32:
2985 case INDEX_op_qemu_st_a64_i32:
2986 case INDEX_op_qemu_st_a32_i64:
2987 case INDEX_op_qemu_st_a64_i64:
2988 case INDEX_op_qemu_st_a32_i128:
2989 case INDEX_op_qemu_st_a64_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07002990 done = fold_qemu_st(&ctx, op);
2991 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002992 CASE_OP_32_64(rem):
2993 CASE_OP_32_64(remu):
2994 done = fold_remainder(&ctx, op);
2995 break;
2996 CASE_OP_32_64(rotl):
2997 CASE_OP_32_64(rotr):
2998 CASE_OP_32_64(sar):
2999 CASE_OP_32_64(shl):
3000 CASE_OP_32_64(shr):
3001 done = fold_shift(&ctx, op);
3002 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07003003 CASE_OP_32_64(setcond):
3004 done = fold_setcond(&ctx, op);
3005 break;
Richard Henderson36355022023-08-04 23:24:04 +00003006 CASE_OP_32_64(negsetcond):
3007 done = fold_negsetcond(&ctx, op);
3008 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003009 case INDEX_op_setcond2_i32:
3010 done = fold_setcond2(&ctx, op);
3011 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003012 case INDEX_op_cmp_vec:
3013 done = fold_cmp_vec(&ctx, op);
3014 break;
3015 case INDEX_op_cmpsel_vec:
3016 done = fold_cmpsel_vec(&ctx, op);
3017 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003018 case INDEX_op_bitsel_vec:
3019 done = fold_bitsel_vec(&ctx, op);
3020 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003021 CASE_OP_32_64(sextract):
3022 done = fold_sextract(&ctx, op);
3023 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003024 CASE_OP_32_64(sub):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003025 done = fold_sub(&ctx, op);
3026 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003027 case INDEX_op_sub_vec:
3028 done = fold_sub_vec(&ctx, op);
3029 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003030 CASE_OP_32_64(sub2):
3031 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003032 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003033 CASE_OP_32_64_VEC(xor):
3034 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003035 break;
Richard Henderson15268552024-12-08 07:45:11 -06003036 case INDEX_op_set_label:
3037 case INDEX_op_br:
3038 case INDEX_op_exit_tb:
3039 case INDEX_op_goto_tb:
3040 case INDEX_op_goto_ptr:
3041 finish_ebb(&ctx);
3042 done = true;
3043 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003044 default:
3045 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003046 }
3047
Richard Henderson404a1482021-08-24 11:08:21 -07003048 if (!done) {
3049 finish_folding(&ctx, op);
3050 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003051 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003052}