blob: 875d80c2542cdd8ec89b6737a88eedcdb6b4209f [file] [log] [blame]
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04001/*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydell757e7252016-01-26 18:17:08 +000026#include "qemu/osdep.h"
Richard Henderson9531c072021-08-26 06:51:39 -070027#include "qemu/int128.h"
Richard Hendersonab84dc32023-08-23 23:04:24 -070028#include "qemu/interval-tree.h"
Richard Hendersonad3d0e42023-03-28 18:17:24 -070029#include "tcg/tcg-op-common.h"
Richard Henderson90163902021-03-18 10:21:45 -060030#include "tcg-internal.h"
Richard Henderson93280b62025-01-08 22:51:55 +010031#include "tcg-has.h"
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040032
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040033#define CASE_OP_32_64(x) \
34 glue(glue(case INDEX_op_, x), _i32): \
35 glue(glue(case INDEX_op_, x), _i64)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +040036
Richard Henderson170ba882017-11-22 09:07:11 +010037#define CASE_OP_32_64_VEC(x) \
38 glue(glue(case INDEX_op_, x), _i32): \
39 glue(glue(case INDEX_op_, x), _i64): \
40 glue(glue(case INDEX_op_, x), _vec)
41
Richard Hendersonab84dc32023-08-23 23:04:24 -070042typedef struct MemCopyInfo {
43 IntervalTreeNode itree;
44 QSIMPLEQ_ENTRY (MemCopyInfo) next;
45 TCGTemp *ts;
46 TCGType type;
47} MemCopyInfo;
48
Richard Henderson6fcb98e2020-03-30 17:44:30 -070049typedef struct TempOptInfo {
Aurelien Jarnob41059d2015-07-27 12:41:44 +020050 bool is_const;
Richard Henderson63490392017-06-20 13:43:15 -070051 TCGTemp *prev_copy;
52 TCGTemp *next_copy;
Richard Hendersonab84dc32023-08-23 23:04:24 -070053 QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy;
Richard Henderson54795542020-09-06 16:21:32 -070054 uint64_t val;
Richard Hendersonb1fde412021-08-23 13:07:49 -070055 uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */
Richard Henderson6d70ddc2024-12-21 21:08:10 -080056 uint64_t s_mask; /* mask bit is 1 if value bit matches msb */
Richard Henderson6fcb98e2020-03-30 17:44:30 -070057} TempOptInfo;
Kirill Batuzov22613af2011-07-07 16:37:13 +040058
Richard Henderson3b3f8472021-08-23 22:06:31 -070059typedef struct OptContext {
Richard Hendersondc849882021-08-24 07:13:45 -070060 TCGContext *tcg;
Richard Hendersond0ed5152021-08-24 07:38:39 -070061 TCGOp *prev_mb;
Richard Henderson3b3f8472021-08-23 22:06:31 -070062 TCGTempSet temps_used;
Richard Henderson137f1f42021-08-24 08:49:25 -070063
Richard Hendersonab84dc32023-08-23 23:04:24 -070064 IntervalTreeRoot mem_copy;
65 QSIMPLEQ_HEAD(, MemCopyInfo) mem_free;
66
Richard Henderson137f1f42021-08-24 08:49:25 -070067 /* In flight values from optimization. */
Richard Henderson67f84c92021-08-25 08:00:20 -070068 TCGType type;
Richard Henderson3b3f8472021-08-23 22:06:31 -070069} OptContext;
70
Richard Henderson6fcb98e2020-03-30 17:44:30 -070071static inline TempOptInfo *ts_info(TCGTemp *ts)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020072{
Richard Henderson63490392017-06-20 13:43:15 -070073 return ts->state_ptr;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020074}
75
Richard Henderson6fcb98e2020-03-30 17:44:30 -070076static inline TempOptInfo *arg_info(TCGArg arg)
Aurelien Jarnod9c769c2015-07-27 12:41:44 +020077{
Richard Henderson63490392017-06-20 13:43:15 -070078 return ts_info(arg_temp(arg));
79}
80
Richard Hendersone1b6c142024-12-22 10:26:14 -080081static inline bool ti_is_const(TempOptInfo *ti)
82{
83 return ti->is_const;
84}
85
86static inline uint64_t ti_const_val(TempOptInfo *ti)
87{
88 return ti->val;
89}
90
91static inline bool ti_is_const_val(TempOptInfo *ti, uint64_t val)
92{
93 return ti_is_const(ti) && ti_const_val(ti) == val;
94}
95
Richard Henderson63490392017-06-20 13:43:15 -070096static inline bool ts_is_const(TCGTemp *ts)
97{
Richard Hendersone1b6c142024-12-22 10:26:14 -080098 return ti_is_const(ts_info(ts));
Richard Henderson63490392017-06-20 13:43:15 -070099}
100
Richard Henderson27cdb852023-10-23 11:38:00 -0700101static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val)
102{
Richard Hendersone1b6c142024-12-22 10:26:14 -0800103 return ti_is_const_val(ts_info(ts), val);
Richard Henderson27cdb852023-10-23 11:38:00 -0700104}
105
Richard Henderson63490392017-06-20 13:43:15 -0700106static inline bool arg_is_const(TCGArg arg)
107{
108 return ts_is_const(arg_temp(arg));
109}
110
Richard Henderson27cdb852023-10-23 11:38:00 -0700111static inline bool arg_is_const_val(TCGArg arg, uint64_t val)
112{
113 return ts_is_const_val(arg_temp(arg), val);
114}
115
Richard Henderson63490392017-06-20 13:43:15 -0700116static inline bool ts_is_copy(TCGTemp *ts)
117{
118 return ts_info(ts)->next_copy != ts;
Aurelien Jarnod9c769c2015-07-27 12:41:44 +0200119}
120
Richard Henderson9f75e522023-11-02 13:37:46 -0700121static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b)
122{
123 return a->kind < b->kind ? b : a;
124}
125
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200126/* Initialize and activate a temporary. */
Richard Henderson3b3f8472021-08-23 22:06:31 -0700127static void init_ts_info(OptContext *ctx, TCGTemp *ts)
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200128{
Richard Henderson63490392017-06-20 13:43:15 -0700129 size_t idx = temp_idx(ts);
Richard Henderson8f17a972020-03-30 19:52:02 -0700130 TempOptInfo *ti;
Richard Henderson63490392017-06-20 13:43:15 -0700131
Richard Henderson3b3f8472021-08-23 22:06:31 -0700132 if (test_bit(idx, ctx->temps_used.l)) {
Richard Henderson8f17a972020-03-30 19:52:02 -0700133 return;
134 }
Richard Henderson3b3f8472021-08-23 22:06:31 -0700135 set_bit(idx, ctx->temps_used.l);
Richard Henderson8f17a972020-03-30 19:52:02 -0700136
137 ti = ts->state_ptr;
138 if (ti == NULL) {
139 ti = tcg_malloc(sizeof(TempOptInfo));
Richard Henderson63490392017-06-20 13:43:15 -0700140 ts->state_ptr = ti;
Richard Henderson8f17a972020-03-30 19:52:02 -0700141 }
142
143 ti->next_copy = ts;
144 ti->prev_copy = ts;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700145 QSIMPLEQ_INIT(&ti->mem_copy);
Richard Henderson8f17a972020-03-30 19:52:02 -0700146 if (ts->kind == TEMP_CONST) {
147 ti->is_const = true;
148 ti->val = ts->val;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700149 ti->z_mask = ts->val;
Richard Henderson6d70ddc2024-12-21 21:08:10 -0800150 ti->s_mask = INT64_MIN >> clrsb64(ts->val);
Richard Henderson8f17a972020-03-30 19:52:02 -0700151 } else {
152 ti->is_const = false;
Richard Hendersonb1fde412021-08-23 13:07:49 -0700153 ti->z_mask = -1;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700154 ti->s_mask = 0;
Aurelien Jarno1208d7d2015-07-27 12:41:44 +0200155 }
156}
157
Richard Hendersonab84dc32023-08-23 23:04:24 -0700158static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l)
159{
160 IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l);
161 return r ? container_of(r, MemCopyInfo, itree) : NULL;
162}
163
164static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l)
165{
166 IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l);
167 return r ? container_of(r, MemCopyInfo, itree) : NULL;
168}
169
170static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc)
171{
172 TCGTemp *ts = mc->ts;
173 TempOptInfo *ti = ts_info(ts);
174
175 interval_tree_remove(&mc->itree, &ctx->mem_copy);
176 QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next);
177 QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next);
178}
179
180static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l)
181{
182 while (true) {
183 MemCopyInfo *mc = mem_copy_first(ctx, s, l);
184 if (!mc) {
185 break;
186 }
187 remove_mem_copy(ctx, mc);
188 }
189}
190
191static void remove_mem_copy_all(OptContext *ctx)
192{
193 remove_mem_copy_in(ctx, 0, -1);
194 tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy));
195}
196
Richard Henderson9f75e522023-11-02 13:37:46 -0700197static TCGTemp *find_better_copy(TCGTemp *ts)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200198{
Richard Henderson9f75e522023-11-02 13:37:46 -0700199 TCGTemp *i, *ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200200
Richard Henderson4c868ce2020-04-23 09:02:23 -0700201 /* If this is already readonly, we can't do better. */
202 if (temp_readonly(ts)) {
Richard Henderson63490392017-06-20 13:43:15 -0700203 return ts;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200204 }
205
Richard Henderson9f75e522023-11-02 13:37:46 -0700206 ret = ts;
Richard Henderson63490392017-06-20 13:43:15 -0700207 for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700208 ret = cmp_better_copy(ret, i);
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200209 }
Richard Henderson9f75e522023-11-02 13:37:46 -0700210 return ret;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200211}
212
Richard Hendersonab84dc32023-08-23 23:04:24 -0700213static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts)
214{
215 TempOptInfo *si = ts_info(src_ts);
216 TempOptInfo *di = ts_info(dst_ts);
217 MemCopyInfo *mc;
218
219 QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) {
220 tcg_debug_assert(mc->ts == src_ts);
221 mc->ts = dst_ts;
222 }
223 QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy);
224}
225
226/* Reset TEMP's state, possibly removing the temp for the list of copies. */
227static void reset_ts(OptContext *ctx, TCGTemp *ts)
228{
229 TempOptInfo *ti = ts_info(ts);
230 TCGTemp *pts = ti->prev_copy;
231 TCGTemp *nts = ti->next_copy;
232 TempOptInfo *pi = ts_info(pts);
233 TempOptInfo *ni = ts_info(nts);
234
235 ni->prev_copy = ti->prev_copy;
236 pi->next_copy = ti->next_copy;
237 ti->next_copy = ts;
238 ti->prev_copy = ts;
239 ti->is_const = false;
240 ti->z_mask = -1;
241 ti->s_mask = 0;
242
243 if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) {
244 if (ts == nts) {
245 /* Last temp copy being removed, the mem copies die. */
246 MemCopyInfo *mc;
247 QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) {
248 interval_tree_remove(&mc->itree, &ctx->mem_copy);
249 }
250 QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy);
251 } else {
252 move_mem_copies(find_better_copy(nts), ts);
253 }
254 }
255}
256
257static void reset_temp(OptContext *ctx, TCGArg arg)
258{
259 reset_ts(ctx, arg_temp(arg));
260}
261
262static void record_mem_copy(OptContext *ctx, TCGType type,
263 TCGTemp *ts, intptr_t start, intptr_t last)
264{
265 MemCopyInfo *mc;
266 TempOptInfo *ti;
267
268 mc = QSIMPLEQ_FIRST(&ctx->mem_free);
269 if (mc) {
270 QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next);
271 } else {
272 mc = tcg_malloc(sizeof(*mc));
273 }
274
275 memset(mc, 0, sizeof(*mc));
276 mc->itree.start = start;
277 mc->itree.last = last;
278 mc->type = type;
279 interval_tree_insert(&mc->itree, &ctx->mem_copy);
280
281 ts = find_better_copy(ts);
282 ti = ts_info(ts);
283 mc->ts = ts;
284 QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next);
285}
286
Richard Henderson63490392017-06-20 13:43:15 -0700287static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2)
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200288{
Richard Henderson63490392017-06-20 13:43:15 -0700289 TCGTemp *i;
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200290
Richard Henderson63490392017-06-20 13:43:15 -0700291 if (ts1 == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200292 return true;
293 }
294
Richard Henderson63490392017-06-20 13:43:15 -0700295 if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200296 return false;
297 }
298
Richard Henderson63490392017-06-20 13:43:15 -0700299 for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) {
300 if (i == ts2) {
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +0200301 return true;
302 }
303 }
304
305 return false;
306}
307
Richard Henderson63490392017-06-20 13:43:15 -0700308static bool args_are_copies(TCGArg arg1, TCGArg arg2)
309{
310 return ts_are_copies(arg_temp(arg1), arg_temp(arg2));
311}
312
Richard Hendersonab84dc32023-08-23 23:04:24 -0700313static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s)
314{
315 MemCopyInfo *mc;
316
317 for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) {
318 if (mc->itree.start == s && mc->type == type) {
319 return find_better_copy(mc->ts);
320 }
321 }
322 return NULL;
323}
324
Richard Henderson26aac972023-10-23 12:31:57 -0700325static TCGArg arg_new_constant(OptContext *ctx, uint64_t val)
326{
327 TCGType type = ctx->type;
328 TCGTemp *ts;
329
330 if (type == TCG_TYPE_I32) {
331 val = (int32_t)val;
332 }
333
334 ts = tcg_constant_internal(type, val);
335 init_ts_info(ctx, ts);
336
337 return temp_arg(ts);
338}
339
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100340static TCGArg arg_new_temp(OptContext *ctx)
341{
342 TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB);
343 init_ts_info(ctx, ts);
344 return temp_arg(ts);
345}
346
Richard Hendersona3c1c572025-04-21 11:05:29 -0700347static TCGOp *opt_insert_after(OptContext *ctx, TCGOp *op,
348 TCGOpcode opc, unsigned narg)
349{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800350 return tcg_op_insert_after(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700351}
352
353static TCGOp *opt_insert_before(OptContext *ctx, TCGOp *op,
354 TCGOpcode opc, unsigned narg)
355{
Richard Hendersoncf5c9f62025-01-21 20:34:41 -0800356 return tcg_op_insert_before(ctx->tcg, op, opc, ctx->type, narg);
Richard Hendersona3c1c572025-04-21 11:05:29 -0700357}
358
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700359static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src)
Kirill Batuzov22613af2011-07-07 16:37:13 +0400360{
Richard Henderson63490392017-06-20 13:43:15 -0700361 TCGTemp *dst_ts = arg_temp(dst);
362 TCGTemp *src_ts = arg_temp(src);
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700363 TempOptInfo *di;
364 TempOptInfo *si;
Richard Henderson63490392017-06-20 13:43:15 -0700365 TCGOpcode new_op;
366
367 if (ts_are_copies(dst_ts, src_ts)) {
Richard Hendersondc849882021-08-24 07:13:45 -0700368 tcg_op_remove(ctx->tcg, op);
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700369 return true;
Aurelien Jarno53657182015-06-04 21:53:25 +0200370 }
371
Richard Henderson986cac12023-01-09 13:59:35 -0800372 reset_ts(ctx, dst_ts);
Richard Henderson63490392017-06-20 13:43:15 -0700373 di = ts_info(dst_ts);
374 si = ts_info(src_ts);
Richard Henderson67f84c92021-08-25 08:00:20 -0700375
376 switch (ctx->type) {
377 case TCG_TYPE_I32:
Richard Henderson67f84c92021-08-25 08:00:20 -0700378 case TCG_TYPE_I64:
Richard Hendersonb5701262024-12-28 15:58:24 -0800379 new_op = INDEX_op_mov;
Richard Henderson67f84c92021-08-25 08:00:20 -0700380 break;
381 case TCG_TYPE_V64:
382 case TCG_TYPE_V128:
383 case TCG_TYPE_V256:
Richard Henderson4d872212025-01-02 19:43:06 -0800384 /* TCGOP_TYPE and TCGOP_VECE remain unchanged. */
Richard Henderson67f84c92021-08-25 08:00:20 -0700385 new_op = INDEX_op_mov_vec;
386 break;
387 default:
388 g_assert_not_reached();
Richard Henderson170ba882017-11-22 09:07:11 +0100389 }
Richard Hendersonc45cb8b2014-09-19 13:49:15 -0700390 op->opc = new_op;
Richard Henderson63490392017-06-20 13:43:15 -0700391 op->args[0] = dst;
392 op->args[1] = src;
Richard Hendersona62f6f52014-05-22 10:59:12 -0700393
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700394 di->z_mask = si->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -0700395 di->s_mask = si->s_mask;
Richard Henderson24666ba2014-05-22 11:14:10 -0700396
Richard Henderson63490392017-06-20 13:43:15 -0700397 if (src_ts->type == dst_ts->type) {
Richard Henderson6fcb98e2020-03-30 17:44:30 -0700398 TempOptInfo *ni = ts_info(si->next_copy);
Richard Henderson63490392017-06-20 13:43:15 -0700399
400 di->next_copy = si->next_copy;
401 di->prev_copy = src_ts;
402 ni->prev_copy = dst_ts;
403 si->next_copy = dst_ts;
404 di->is_const = si->is_const;
405 di->val = si->val;
Richard Hendersonab84dc32023-08-23 23:04:24 -0700406
407 if (!QSIMPLEQ_EMPTY(&si->mem_copy)
408 && cmp_better_copy(src_ts, dst_ts) == dst_ts) {
409 move_mem_copies(dst_ts, src_ts);
410 }
Paolo Bonzini3a9d8b12013-01-11 15:42:52 -0800411 }
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700412 return true;
Kirill Batuzov22613af2011-07-07 16:37:13 +0400413}
414
Richard Henderson6b99d5b2021-08-24 10:57:56 -0700415static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op,
Richard Hendersondc849882021-08-24 07:13:45 -0700416 TCGArg dst, uint64_t val)
Richard Henderson8fe35e02020-03-30 20:42:43 -0700417{
Richard Hendersonfaa2e102021-08-26 09:03:59 -0700418 /* Convert movi to mov with constant temp. */
Richard Henderson26aac972023-10-23 12:31:57 -0700419 return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val));
Richard Henderson8fe35e02020-03-30 20:42:43 -0700420}
421
Richard Henderson54795542020-09-06 16:21:32 -0700422static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400423{
Richard Henderson03271522013-08-14 14:35:56 -0700424 uint64_t l64, h64;
425
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400426 switch (op) {
Richard Henderson79602f62025-01-06 09:11:39 -0800427 case INDEX_op_add:
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400428 return x + y;
429
430 CASE_OP_32_64(sub):
431 return x - y;
432
433 CASE_OP_32_64(mul):
434 return x * y;
435
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800436 case INDEX_op_and:
437 case INDEX_op_and_vec:
Kirill Batuzov9a810902011-07-07 16:37:15 +0400438 return x & y;
439
Richard Hendersonc578ff12021-12-16 06:07:25 -0800440 CASE_OP_32_64_VEC(or):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400441 return x | y;
442
Richard Hendersonc578ff12021-12-16 06:07:25 -0800443 CASE_OP_32_64_VEC(xor):
Kirill Batuzov9a810902011-07-07 16:37:15 +0400444 return x ^ y;
445
Kirill Batuzov55c09752011-07-07 16:37:16 +0400446 case INDEX_op_shl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700447 return (uint32_t)x << (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400448
Kirill Batuzov55c09752011-07-07 16:37:16 +0400449 case INDEX_op_shl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700450 return (uint64_t)x << (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400451
452 case INDEX_op_shr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700453 return (uint32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400454
Kirill Batuzov55c09752011-07-07 16:37:16 +0400455 case INDEX_op_shr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700456 return (uint64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400457
458 case INDEX_op_sar_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700459 return (int32_t)x >> (y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400460
Kirill Batuzov55c09752011-07-07 16:37:16 +0400461 case INDEX_op_sar_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700462 return (int64_t)x >> (y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400463
464 case INDEX_op_rotr_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700465 return ror32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400466
Kirill Batuzov55c09752011-07-07 16:37:16 +0400467 case INDEX_op_rotr_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700468 return ror64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400469
470 case INDEX_op_rotl_i32:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700471 return rol32(x, y & 31);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400472
Kirill Batuzov55c09752011-07-07 16:37:16 +0400473 case INDEX_op_rotl_i64:
Richard Henderson50c5c4d2014-03-18 07:45:39 -0700474 return rol64(x, y & 63);
Kirill Batuzov55c09752011-07-07 16:37:16 +0400475
Richard Hendersonc578ff12021-12-16 06:07:25 -0800476 CASE_OP_32_64_VEC(not):
Kirill Batuzova640f032011-07-07 16:37:17 +0400477 return ~x;
478
Richard Hendersoncb25c802011-08-17 14:11:47 -0700479 CASE_OP_32_64(neg):
480 return -x;
481
Richard Henderson46f96bf2025-01-06 12:37:02 -0800482 case INDEX_op_andc:
483 case INDEX_op_andc_vec:
Richard Hendersoncb25c802011-08-17 14:11:47 -0700484 return x & ~y;
485
Richard Hendersonc578ff12021-12-16 06:07:25 -0800486 CASE_OP_32_64_VEC(orc):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700487 return x | ~y;
488
Richard Hendersoned523472021-12-16 11:17:46 -0800489 CASE_OP_32_64_VEC(eqv):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700490 return ~(x ^ y);
491
Richard Hendersoned523472021-12-16 11:17:46 -0800492 CASE_OP_32_64_VEC(nand):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700493 return ~(x & y);
494
Richard Hendersoned523472021-12-16 11:17:46 -0800495 CASE_OP_32_64_VEC(nor):
Richard Hendersoncb25c802011-08-17 14:11:47 -0700496 return ~(x | y);
497
Richard Henderson0e28d002016-11-16 09:23:28 +0100498 case INDEX_op_clz_i32:
499 return (uint32_t)x ? clz32(x) : y;
500
501 case INDEX_op_clz_i64:
502 return x ? clz64(x) : y;
503
504 case INDEX_op_ctz_i32:
505 return (uint32_t)x ? ctz32(x) : y;
506
507 case INDEX_op_ctz_i64:
508 return x ? ctz64(x) : y;
509
Richard Hendersona768e4e2016-11-21 11:13:39 +0100510 case INDEX_op_ctpop_i32:
511 return ctpop32(x);
512
513 case INDEX_op_ctpop_i64:
514 return ctpop64(x);
515
Richard Henderson64985942018-11-20 08:53:34 +0100516 CASE_OP_32_64(bswap16):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700517 x = bswap16(x);
518 return y & TCG_BSWAP_OS ? (int16_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100519
520 CASE_OP_32_64(bswap32):
Richard Henderson0b76ff82021-06-13 13:04:00 -0700521 x = bswap32(x);
522 return y & TCG_BSWAP_OS ? (int32_t)x : x;
Richard Henderson64985942018-11-20 08:53:34 +0100523
524 case INDEX_op_bswap64_i64:
525 return bswap64(x);
526
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200527 case INDEX_op_ext_i32_i64:
Kirill Batuzova640f032011-07-07 16:37:17 +0400528 return (int32_t)x;
529
Aurelien Jarno8bcb5c82015-07-27 12:41:45 +0200530 case INDEX_op_extu_i32_i64:
Richard Henderson609ad702015-07-24 07:16:00 -0700531 case INDEX_op_extrl_i64_i32:
Kirill Batuzova640f032011-07-07 16:37:17 +0400532 return (uint32_t)x;
Kirill Batuzova640f032011-07-07 16:37:17 +0400533
Richard Henderson609ad702015-07-24 07:16:00 -0700534 case INDEX_op_extrh_i64_i32:
535 return (uint64_t)x >> 32;
536
Richard Henderson03271522013-08-14 14:35:56 -0700537 case INDEX_op_muluh_i32:
538 return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32;
539 case INDEX_op_mulsh_i32:
540 return ((int64_t)(int32_t)x * (int32_t)y) >> 32;
541
542 case INDEX_op_muluh_i64:
543 mulu64(&l64, &h64, x, y);
544 return h64;
545 case INDEX_op_mulsh_i64:
546 muls64(&l64, &h64, x, y);
547 return h64;
548
Richard Henderson01547f72013-08-14 15:22:46 -0700549 case INDEX_op_div_i32:
550 /* Avoid crashing on divide by zero, otherwise undefined. */
551 return (int32_t)x / ((int32_t)y ? : 1);
552 case INDEX_op_divu_i32:
553 return (uint32_t)x / ((uint32_t)y ? : 1);
554 case INDEX_op_div_i64:
555 return (int64_t)x / ((int64_t)y ? : 1);
556 case INDEX_op_divu_i64:
557 return (uint64_t)x / ((uint64_t)y ? : 1);
558
559 case INDEX_op_rem_i32:
560 return (int32_t)x % ((int32_t)y ? : 1);
561 case INDEX_op_remu_i32:
562 return (uint32_t)x % ((uint32_t)y ? : 1);
563 case INDEX_op_rem_i64:
564 return (int64_t)x % ((int64_t)y ? : 1);
565 case INDEX_op_remu_i64:
566 return (uint64_t)x % ((uint64_t)y ? : 1);
567
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400568 default:
Richard Henderson732e89f2023-04-05 12:09:14 -0700569 g_assert_not_reached();
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400570 }
571}
572
Richard Henderson67f84c92021-08-25 08:00:20 -0700573static uint64_t do_constant_folding(TCGOpcode op, TCGType type,
574 uint64_t x, uint64_t y)
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400575{
Richard Henderson54795542020-09-06 16:21:32 -0700576 uint64_t res = do_constant_folding_2(op, x, y);
Richard Henderson67f84c92021-08-25 08:00:20 -0700577 if (type == TCG_TYPE_I32) {
Aurelien Jarno29f3ff82015-07-10 18:03:31 +0200578 res = (int32_t)res;
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400579 }
Kirill Batuzov53108fb2011-07-07 16:37:14 +0400580 return res;
581}
582
Richard Henderson9519da72012-10-02 11:32:26 -0700583static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c)
584{
585 switch (c) {
586 case TCG_COND_EQ:
587 return x == y;
588 case TCG_COND_NE:
589 return x != y;
590 case TCG_COND_LT:
591 return (int32_t)x < (int32_t)y;
592 case TCG_COND_GE:
593 return (int32_t)x >= (int32_t)y;
594 case TCG_COND_LE:
595 return (int32_t)x <= (int32_t)y;
596 case TCG_COND_GT:
597 return (int32_t)x > (int32_t)y;
598 case TCG_COND_LTU:
599 return x < y;
600 case TCG_COND_GEU:
601 return x >= y;
602 case TCG_COND_LEU:
603 return x <= y;
604 case TCG_COND_GTU:
605 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700606 case TCG_COND_TSTEQ:
607 return (x & y) == 0;
608 case TCG_COND_TSTNE:
609 return (x & y) != 0;
610 case TCG_COND_ALWAYS:
611 case TCG_COND_NEVER:
612 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700613 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700614 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700615}
616
617static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c)
618{
619 switch (c) {
620 case TCG_COND_EQ:
621 return x == y;
622 case TCG_COND_NE:
623 return x != y;
624 case TCG_COND_LT:
625 return (int64_t)x < (int64_t)y;
626 case TCG_COND_GE:
627 return (int64_t)x >= (int64_t)y;
628 case TCG_COND_LE:
629 return (int64_t)x <= (int64_t)y;
630 case TCG_COND_GT:
631 return (int64_t)x > (int64_t)y;
632 case TCG_COND_LTU:
633 return x < y;
634 case TCG_COND_GEU:
635 return x >= y;
636 case TCG_COND_LEU:
637 return x <= y;
638 case TCG_COND_GTU:
639 return x > y;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700640 case TCG_COND_TSTEQ:
641 return (x & y) == 0;
642 case TCG_COND_TSTNE:
643 return (x & y) != 0;
644 case TCG_COND_ALWAYS:
645 case TCG_COND_NEVER:
646 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700647 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700648 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700649}
650
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700651static int do_constant_folding_cond_eq(TCGCond c)
Richard Henderson9519da72012-10-02 11:32:26 -0700652{
653 switch (c) {
654 case TCG_COND_GT:
655 case TCG_COND_LTU:
656 case TCG_COND_LT:
657 case TCG_COND_GTU:
658 case TCG_COND_NE:
659 return 0;
660 case TCG_COND_GE:
661 case TCG_COND_GEU:
662 case TCG_COND_LE:
663 case TCG_COND_LEU:
664 case TCG_COND_EQ:
665 return 1;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700666 case TCG_COND_TSTEQ:
667 case TCG_COND_TSTNE:
668 return -1;
669 case TCG_COND_ALWAYS:
670 case TCG_COND_NEVER:
671 break;
Richard Henderson9519da72012-10-02 11:32:26 -0700672 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700673 g_assert_not_reached();
Richard Henderson9519da72012-10-02 11:32:26 -0700674}
675
Richard Henderson8d57bf12021-08-24 08:34:27 -0700676/*
677 * Return -1 if the condition can't be simplified,
678 * and the result of the condition (0 or 1) if it can.
679 */
Richard Henderson67f84c92021-08-25 08:00:20 -0700680static int do_constant_folding_cond(TCGType type, TCGArg x,
Richard Henderson8d57bf12021-08-24 08:34:27 -0700681 TCGArg y, TCGCond c)
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200682{
Richard Henderson63490392017-06-20 13:43:15 -0700683 if (arg_is_const(x) && arg_is_const(y)) {
Alex Bennée9becc362022-02-09 11:21:42 +0000684 uint64_t xv = arg_info(x)->val;
685 uint64_t yv = arg_info(y)->val;
686
Richard Henderson67f84c92021-08-25 08:00:20 -0700687 switch (type) {
688 case TCG_TYPE_I32:
Richard Henderson170ba882017-11-22 09:07:11 +0100689 return do_constant_folding_cond_32(xv, yv, c);
Richard Henderson67f84c92021-08-25 08:00:20 -0700690 case TCG_TYPE_I64:
691 return do_constant_folding_cond_64(xv, yv, c);
692 default:
693 /* Only scalar comparisons are optimizable */
694 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200695 }
Richard Henderson63490392017-06-20 13:43:15 -0700696 } else if (args_are_copies(x, y)) {
Richard Henderson9519da72012-10-02 11:32:26 -0700697 return do_constant_folding_cond_eq(c);
Richard Henderson27cdb852023-10-23 11:38:00 -0700698 } else if (arg_is_const_val(y, 0)) {
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200699 switch (c) {
700 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700701 case TCG_COND_TSTNE:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200702 return 0;
703 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700704 case TCG_COND_TSTEQ:
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200705 return 1;
706 default:
Richard Henderson8d57bf12021-08-24 08:34:27 -0700707 return -1;
Aurelien Jarnob336ceb2012-09-18 19:37:00 +0200708 }
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200709 }
Richard Henderson8d57bf12021-08-24 08:34:27 -0700710 return -1;
Aurelien Jarnof8dd19e2012-09-06 16:47:14 +0200711}
712
Richard Henderson7a2f7082021-08-26 07:06:39 -0700713/**
714 * swap_commutative:
715 * @dest: TCGArg of the destination argument, or NO_DEST.
716 * @p1: first paired argument
717 * @p2: second paired argument
718 *
719 * If *@p1 is a constant and *@p2 is not, swap.
720 * If *@p2 matches @dest, swap.
721 * Return true if a swap was performed.
722 */
723
724#define NO_DEST temp_arg(NULL)
725
Richard Henderson24c9ae42012-10-02 11:32:21 -0700726static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2)
727{
728 TCGArg a1 = *p1, a2 = *p2;
729 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700730 sum += arg_is_const(a1);
731 sum -= arg_is_const(a2);
Richard Henderson24c9ae42012-10-02 11:32:21 -0700732
733 /* Prefer the constant in second argument, and then the form
734 op a, a, b, which is better handled on non-RISC hosts. */
735 if (sum > 0 || (sum == 0 && dest == a2)) {
736 *p1 = a2;
737 *p2 = a1;
738 return true;
739 }
740 return false;
741}
742
Richard Henderson0bfcb862012-10-02 11:32:23 -0700743static bool swap_commutative2(TCGArg *p1, TCGArg *p2)
744{
745 int sum = 0;
Richard Henderson63490392017-06-20 13:43:15 -0700746 sum += arg_is_const(p1[0]);
747 sum += arg_is_const(p1[1]);
748 sum -= arg_is_const(p2[0]);
749 sum -= arg_is_const(p2[1]);
Richard Henderson0bfcb862012-10-02 11:32:23 -0700750 if (sum > 0) {
751 TCGArg t;
752 t = p1[0], p1[0] = p2[0], p2[0] = t;
753 t = p1[1], p1[1] = p2[1], p2[1] = t;
754 return true;
755 }
756 return false;
757}
758
Richard Henderson7e64b112023-10-24 16:53:56 -0700759/*
760 * Return -1 if the condition can't be simplified,
761 * and the result of the condition (0 or 1) if it can.
762 */
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100763static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest,
Richard Henderson246c4b72023-10-24 16:36:50 -0700764 TCGArg *p1, TCGArg *p2, TCGArg *pcond)
765{
766 TCGCond cond;
Paolo Bonzini35020622024-01-22 10:48:11 +0100767 TempOptInfo *i1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700768 bool swap;
769 int r;
770
771 swap = swap_commutative(dest, p1, p2);
772 cond = *pcond;
773 if (swap) {
774 *pcond = cond = tcg_swap_cond(cond);
775 }
776
777 r = do_constant_folding_cond(ctx->type, *p1, *p2, cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700778 if (r >= 0) {
779 return r;
780 }
781 if (!is_tst_cond(cond)) {
782 return -1;
783 }
784
Paolo Bonzini35020622024-01-22 10:48:11 +0100785 i1 = arg_info(*p1);
786
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700787 /*
788 * TSTNE x,x -> NE x,0
Paolo Bonzini35020622024-01-22 10:48:11 +0100789 * TSTNE x,i -> NE x,0 if i includes all nonzero bits of x
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700790 */
Paolo Bonzini35020622024-01-22 10:48:11 +0100791 if (args_are_copies(*p1, *p2) ||
792 (arg_is_const(*p2) && (i1->z_mask & ~arg_info(*p2)->val) == 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700793 *p2 = arg_new_constant(ctx, 0);
794 *pcond = tcg_tst_eqne_cond(cond);
795 return -1;
796 }
797
Paolo Bonzini35020622024-01-22 10:48:11 +0100798 /* TSTNE x,i -> LT x,0 if i only includes sign bit copies */
799 if (arg_is_const(*p2) && (arg_info(*p2)->val & ~i1->s_mask) == 0) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700800 *p2 = arg_new_constant(ctx, 0);
801 *pcond = tcg_tst_ltge_cond(cond);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100802 return -1;
803 }
804
805 /* Expand to AND with a temporary if no backend support. */
806 if (!TCG_TARGET_HAS_tst) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800807 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100808 TCGArg tmp = arg_new_temp(ctx);
809
810 op2->args[0] = tmp;
811 op2->args[1] = *p1;
812 op2->args[2] = *p2;
813
814 *p1 = tmp;
815 *p2 = arg_new_constant(ctx, 0);
816 *pcond = tcg_tst_eqne_cond(cond);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700817 }
818 return -1;
Richard Henderson246c4b72023-10-24 16:36:50 -0700819}
820
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100821static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args)
Richard Henderson7e64b112023-10-24 16:53:56 -0700822{
823 TCGArg al, ah, bl, bh;
824 TCGCond c;
825 bool swap;
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700826 int r;
Richard Henderson7e64b112023-10-24 16:53:56 -0700827
828 swap = swap_commutative2(args, args + 2);
829 c = args[4];
830 if (swap) {
831 args[4] = c = tcg_swap_cond(c);
832 }
833
834 al = args[0];
835 ah = args[1];
836 bl = args[2];
837 bh = args[3];
838
839 if (arg_is_const(bl) && arg_is_const(bh)) {
840 tcg_target_ulong blv = arg_info(bl)->val;
841 tcg_target_ulong bhv = arg_info(bh)->val;
842 uint64_t b = deposit64(blv, 32, 32, bhv);
843
844 if (arg_is_const(al) && arg_is_const(ah)) {
845 tcg_target_ulong alv = arg_info(al)->val;
846 tcg_target_ulong ahv = arg_info(ah)->val;
847 uint64_t a = deposit64(alv, 32, 32, ahv);
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700848
849 r = do_constant_folding_cond_64(a, b, c);
850 if (r >= 0) {
851 return r;
852 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700853 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700854
Richard Henderson7e64b112023-10-24 16:53:56 -0700855 if (b == 0) {
856 switch (c) {
857 case TCG_COND_LTU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700858 case TCG_COND_TSTNE:
Richard Henderson7e64b112023-10-24 16:53:56 -0700859 return 0;
860 case TCG_COND_GEU:
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700861 case TCG_COND_TSTEQ:
Richard Henderson7e64b112023-10-24 16:53:56 -0700862 return 1;
863 default:
864 break;
865 }
866 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700867
868 /* TSTNE x,-1 -> NE x,0 */
869 if (b == -1 && is_tst_cond(c)) {
870 args[3] = args[2] = arg_new_constant(ctx, 0);
871 args[4] = tcg_tst_eqne_cond(c);
872 return -1;
873 }
874
875 /* TSTNE x,sign -> LT x,0 */
876 if (b == INT64_MIN && is_tst_cond(c)) {
877 /* bl must be 0, so copy that to bh */
878 args[3] = bl;
879 args[4] = tcg_tst_ltge_cond(c);
880 return -1;
881 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700882 }
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700883
Richard Henderson7e64b112023-10-24 16:53:56 -0700884 if (args_are_copies(al, bl) && args_are_copies(ah, bh)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -0700885 r = do_constant_folding_cond_eq(c);
886 if (r >= 0) {
887 return r;
888 }
889
890 /* TSTNE x,x -> NE x,0 */
891 if (is_tst_cond(c)) {
892 args[3] = args[2] = arg_new_constant(ctx, 0);
893 args[4] = tcg_tst_eqne_cond(c);
894 return -1;
895 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700896 }
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100897
898 /* Expand to AND with a temporary if no backend support. */
899 if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) {
Richard Hendersonc3b920b2025-01-06 10:32:44 -0800900 TCGOp *op1 = opt_insert_before(ctx, op, INDEX_op_and, 3);
901 TCGOp *op2 = opt_insert_before(ctx, op, INDEX_op_and, 3);
Richard Hendersonfb04ab72024-01-10 18:21:58 +1100902 TCGArg t1 = arg_new_temp(ctx);
903 TCGArg t2 = arg_new_temp(ctx);
904
905 op1->args[0] = t1;
906 op1->args[1] = al;
907 op1->args[2] = bl;
908 op2->args[0] = t2;
909 op2->args[1] = ah;
910 op2->args[2] = bh;
911
912 args[0] = t1;
913 args[1] = t2;
914 args[3] = args[2] = arg_new_constant(ctx, 0);
915 args[4] = tcg_tst_eqne_cond(c);
916 }
Richard Henderson7e64b112023-10-24 16:53:56 -0700917 return -1;
918}
919
Richard Hendersone2577ea2021-08-24 08:00:48 -0700920static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args)
921{
922 for (int i = 0; i < nb_args; i++) {
923 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000924 init_ts_info(ctx, ts);
Richard Hendersone2577ea2021-08-24 08:00:48 -0700925 }
926}
927
Richard Henderson8774dde2021-08-24 08:04:47 -0700928static void copy_propagate(OptContext *ctx, TCGOp *op,
929 int nb_oargs, int nb_iargs)
930{
Richard Henderson8774dde2021-08-24 08:04:47 -0700931 for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) {
932 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson39004a72022-11-11 10:09:37 +1000933 if (ts_is_copy(ts)) {
Richard Henderson9f75e522023-11-02 13:37:46 -0700934 op->args[i] = temp_arg(find_better_copy(ts));
Richard Henderson8774dde2021-08-24 08:04:47 -0700935 }
936 }
937}
938
Richard Henderson15268552024-12-08 07:45:11 -0600939static void finish_bb(OptContext *ctx)
940{
941 /* We only optimize memory barriers across basic blocks. */
942 ctx->prev_mb = NULL;
943}
944
945static void finish_ebb(OptContext *ctx)
946{
947 finish_bb(ctx);
948 /* We only optimize across extended basic blocks. */
949 memset(&ctx->temps_used, 0, sizeof(ctx->temps_used));
950 remove_mem_copy_all(ctx);
951}
952
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600953static bool finish_folding(OptContext *ctx, TCGOp *op)
Richard Henderson137f1f42021-08-24 08:49:25 -0700954{
955 const TCGOpDef *def = &tcg_op_defs[op->opc];
956 int i, nb_oargs;
957
Richard Henderson137f1f42021-08-24 08:49:25 -0700958 nb_oargs = def->nb_oargs;
959 for (i = 0; i < nb_oargs; i++) {
Richard Henderson57fe5c62021-08-26 12:04:46 -0700960 TCGTemp *ts = arg_temp(op->args[i]);
Richard Henderson986cac12023-01-09 13:59:35 -0800961 reset_ts(ctx, ts);
Richard Henderson137f1f42021-08-24 08:49:25 -0700962 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -0600963 return true;
Richard Henderson137f1f42021-08-24 08:49:25 -0700964}
965
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700966/*
967 * The fold_* functions return true when processing is complete,
968 * usually by folding the operation to a constant or to a copy,
969 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
970 * like collect information about the value produced, for use in
971 * optimizing a subsequent operation.
972 *
973 * These first fold_* functions are all helpers, used by other
974 * folders for more specific operations.
975 */
976
977static bool fold_const1(OptContext *ctx, TCGOp *op)
978{
979 if (arg_is_const(op->args[1])) {
980 uint64_t t;
981
982 t = arg_info(op->args[1])->val;
Richard Henderson67f84c92021-08-25 08:00:20 -0700983 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700984 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
985 }
986 return false;
987}
988
989static bool fold_const2(OptContext *ctx, TCGOp *op)
990{
991 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
992 uint64_t t1 = arg_info(op->args[1])->val;
993 uint64_t t2 = arg_info(op->args[2])->val;
994
Richard Henderson67f84c92021-08-25 08:00:20 -0700995 t1 = do_constant_folding(op->opc, ctx->type, t1, t2);
Richard Henderson2f9f08b2021-08-25 12:03:48 -0700996 return tcg_opt_gen_movi(ctx, op, op->args[0], t1);
997 }
998 return false;
999}
1000
Richard Hendersonc578ff12021-12-16 06:07:25 -08001001static bool fold_commutative(OptContext *ctx, TCGOp *op)
1002{
1003 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1004 return false;
1005}
1006
Richard Henderson7a2f7082021-08-26 07:06:39 -07001007static bool fold_const2_commutative(OptContext *ctx, TCGOp *op)
1008{
1009 swap_commutative(op->args[0], &op->args[1], &op->args[2]);
1010 return fold_const2(ctx, op);
1011}
1012
Richard Hendersond582b142024-12-19 10:43:26 -08001013/*
1014 * Record "zero" and "sign" masks for the single output of @op.
1015 * See TempOptInfo definition of z_mask and s_mask.
1016 * If z_mask allows, fold the output to constant zero.
Richard Henderson75c3bf32024-12-19 10:50:40 -08001017 * The passed s_mask may be augmented by z_mask.
Richard Hendersond582b142024-12-19 10:43:26 -08001018 */
1019static bool fold_masks_zs(OptContext *ctx, TCGOp *op,
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001020 uint64_t z_mask, int64_t s_mask)
Richard Hendersonfae450b2021-08-25 22:42:19 -07001021{
Richard Henderson56e06ec2024-12-08 18:26:48 -06001022 const TCGOpDef *def = &tcg_op_defs[op->opc];
1023 TCGTemp *ts;
1024 TempOptInfo *ti;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001025 int rep;
Richard Henderson56e06ec2024-12-08 18:26:48 -06001026
1027 /* Only single-output opcodes are supported here. */
1028 tcg_debug_assert(def->nb_oargs == 1);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001029
1030 /*
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001031 * 32-bit ops generate 32-bit results, which for the purpose of
1032 * simplifying tcg are sign-extended. Certainly that's how we
1033 * represent our constants elsewhere. Note that the bits will
1034 * be reset properly for a 64-bit value when encountering the
1035 * type changing opcodes.
Richard Hendersonfae450b2021-08-25 22:42:19 -07001036 */
1037 if (ctx->type == TCG_TYPE_I32) {
Richard Hendersonfaa2e102021-08-26 09:03:59 -07001038 z_mask = (int32_t)z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001039 s_mask |= INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001040 }
1041
1042 if (z_mask == 0) {
1043 return tcg_opt_gen_movi(ctx, op, op->args[0], 0);
1044 }
Richard Henderson56e06ec2024-12-08 18:26:48 -06001045
1046 ts = arg_temp(op->args[0]);
1047 reset_ts(ctx, ts);
1048
1049 ti = ts_info(ts);
1050 ti->z_mask = z_mask;
Richard Henderson6d70ddc2024-12-21 21:08:10 -08001051
1052 /* Canonicalize s_mask and incorporate data from z_mask. */
1053 rep = clz64(~s_mask);
1054 rep = MAX(rep, clz64(z_mask));
1055 rep = MAX(rep - 1, 0);
1056 ti->s_mask = INT64_MIN >> rep;
1057
Richard Henderson56e06ec2024-12-08 18:26:48 -06001058 return true;
Richard Henderson045ace32024-12-19 10:33:51 -08001059}
1060
Richard Henderson81be07f2024-12-08 19:49:17 -06001061static bool fold_masks_z(OptContext *ctx, TCGOp *op, uint64_t z_mask)
1062{
1063 return fold_masks_zs(ctx, op, z_mask, 0);
1064}
1065
Richard Hendersonef6be622024-12-08 20:03:15 -06001066static bool fold_masks_s(OptContext *ctx, TCGOp *op, uint64_t s_mask)
1067{
1068 return fold_masks_zs(ctx, op, -1, s_mask);
1069}
1070
Richard Henderson045ace32024-12-19 10:33:51 -08001071/*
1072 * An "affected" mask bit is 0 if and only if the result is identical
1073 * to the first input. Thus if the entire mask is 0, the operation
1074 * is equivalent to a copy.
1075 */
1076static bool fold_affected_mask(OptContext *ctx, TCGOp *op, uint64_t a_mask)
1077{
1078 if (ctx->type == TCG_TYPE_I32) {
1079 a_mask = (uint32_t)a_mask;
1080 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001081 if (a_mask == 0) {
1082 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1083 }
1084 return false;
1085}
1086
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001087/*
1088 * Convert @op to NOT, if NOT is supported by the host.
1089 * Return true f the conversion is successful, which will still
1090 * indicate that the processing is complete.
1091 */
1092static bool fold_not(OptContext *ctx, TCGOp *op);
1093static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
1094{
1095 TCGOpcode not_op;
1096 bool have_not;
1097
1098 switch (ctx->type) {
1099 case TCG_TYPE_I32:
1100 not_op = INDEX_op_not_i32;
1101 have_not = TCG_TARGET_HAS_not_i32;
1102 break;
1103 case TCG_TYPE_I64:
1104 not_op = INDEX_op_not_i64;
1105 have_not = TCG_TARGET_HAS_not_i64;
1106 break;
1107 case TCG_TYPE_V64:
1108 case TCG_TYPE_V128:
1109 case TCG_TYPE_V256:
1110 not_op = INDEX_op_not_vec;
1111 have_not = TCG_TARGET_HAS_not_vec;
1112 break;
1113 default:
1114 g_assert_not_reached();
1115 }
1116 if (have_not) {
1117 op->opc = not_op;
1118 op->args[1] = op->args[idx];
1119 return fold_not(ctx, op);
1120 }
1121 return false;
1122}
1123
Richard Hendersonda48e272021-08-25 20:42:04 -07001124/* If the binary operation has first argument @i, fold to @i. */
1125static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1126{
Richard Henderson27cdb852023-10-23 11:38:00 -07001127 if (arg_is_const_val(op->args[1], i)) {
Richard Hendersonda48e272021-08-25 20:42:04 -07001128 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1129 }
1130 return false;
1131}
1132
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001133/* If the binary operation has first argument @i, fold to NOT. */
1134static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1135{
Richard Henderson27cdb852023-10-23 11:38:00 -07001136 if (arg_is_const_val(op->args[1], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001137 return fold_to_not(ctx, op, 2);
1138 }
1139 return false;
1140}
1141
Richard Hendersone8679952021-08-25 13:19:52 -07001142/* If the binary operation has second argument @i, fold to @i. */
1143static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1144{
Richard Henderson27cdb852023-10-23 11:38:00 -07001145 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersone8679952021-08-25 13:19:52 -07001146 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1147 }
1148 return false;
1149}
1150
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001151/* If the binary operation has second argument @i, fold to identity. */
1152static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i)
1153{
Richard Henderson27cdb852023-10-23 11:38:00 -07001154 if (arg_is_const_val(op->args[2], i)) {
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001155 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1156 }
1157 return false;
1158}
1159
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001160/* If the binary operation has second argument @i, fold to NOT. */
1161static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
1162{
Richard Henderson27cdb852023-10-23 11:38:00 -07001163 if (arg_is_const_val(op->args[2], i)) {
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001164 return fold_to_not(ctx, op, 1);
1165 }
1166 return false;
1167}
1168
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001169/* If the binary operation has both arguments equal, fold to @i. */
1170static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
1171{
1172 if (args_are_copies(op->args[1], op->args[2])) {
1173 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
1174 }
1175 return false;
1176}
1177
Richard Hendersonca7bb042021-08-25 13:14:21 -07001178/* If the binary operation has both arguments equal, fold to identity. */
1179static bool fold_xx_to_x(OptContext *ctx, TCGOp *op)
1180{
1181 if (args_are_copies(op->args[1], op->args[2])) {
1182 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1183 }
1184 return false;
1185}
1186
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001187/*
1188 * These outermost fold_<op> functions are sorted alphabetically.
Richard Hendersonca7bb042021-08-25 13:14:21 -07001189 *
1190 * The ordering of the transformations should be:
1191 * 1) those that produce a constant
1192 * 2) those that produce a copy
1193 * 3) those that produce information about the result value.
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001194 */
1195
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001196static bool fold_or(OptContext *ctx, TCGOp *op);
1197static bool fold_orc(OptContext *ctx, TCGOp *op);
1198static bool fold_xor(OptContext *ctx, TCGOp *op);
1199
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001200static bool fold_add(OptContext *ctx, TCGOp *op)
1201{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001202 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001203 fold_xi_to_x(ctx, op, 0)) {
1204 return true;
1205 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001206 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001207}
1208
Richard Hendersonc578ff12021-12-16 06:07:25 -08001209/* We cannot as yet do_constant_folding with vectors. */
1210static bool fold_add_vec(OptContext *ctx, TCGOp *op)
1211{
1212 if (fold_commutative(ctx, op) ||
1213 fold_xi_to_x(ctx, op, 0)) {
1214 return true;
1215 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001216 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08001217}
1218
Richard Henderson9531c072021-08-26 06:51:39 -07001219static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001220{
Richard Hendersonf2457572023-10-25 18:39:44 -07001221 bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]);
1222 bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]);
1223
1224 if (a_const && b_const) {
Richard Henderson9531c072021-08-26 06:51:39 -07001225 uint64_t al = arg_info(op->args[2])->val;
1226 uint64_t ah = arg_info(op->args[3])->val;
1227 uint64_t bl = arg_info(op->args[4])->val;
1228 uint64_t bh = arg_info(op->args[5])->val;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001229 TCGArg rl, rh;
Richard Henderson9531c072021-08-26 06:51:39 -07001230 TCGOp *op2;
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001231
Richard Henderson9531c072021-08-26 06:51:39 -07001232 if (ctx->type == TCG_TYPE_I32) {
1233 uint64_t a = deposit64(al, 32, 32, ah);
1234 uint64_t b = deposit64(bl, 32, 32, bh);
1235
1236 if (add) {
1237 a += b;
1238 } else {
1239 a -= b;
1240 }
1241
1242 al = sextract64(a, 0, 32);
1243 ah = sextract64(a, 32, 32);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001244 } else {
Richard Henderson9531c072021-08-26 06:51:39 -07001245 Int128 a = int128_make128(al, ah);
1246 Int128 b = int128_make128(bl, bh);
1247
1248 if (add) {
1249 a = int128_add(a, b);
1250 } else {
1251 a = int128_sub(a, b);
1252 }
1253
1254 al = int128_getlo(a);
1255 ah = int128_gethi(a);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001256 }
1257
1258 rl = op->args[0];
1259 rh = op->args[1];
Richard Henderson9531c072021-08-26 06:51:39 -07001260
1261 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07001262 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson9531c072021-08-26 06:51:39 -07001263
1264 tcg_opt_gen_movi(ctx, op, rl, al);
1265 tcg_opt_gen_movi(ctx, op2, rh, ah);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001266 return true;
1267 }
Richard Hendersonf2457572023-10-25 18:39:44 -07001268
1269 /* Fold sub2 r,x,i to add2 r,x,-i */
1270 if (!add && b_const) {
1271 uint64_t bl = arg_info(op->args[4])->val;
1272 uint64_t bh = arg_info(op->args[5])->val;
1273
1274 /* Negate the two parts without assembling and disassembling. */
1275 bl = -bl;
1276 bh = ~bh + !bl;
1277
1278 op->opc = (ctx->type == TCG_TYPE_I32
1279 ? INDEX_op_add2_i32 : INDEX_op_add2_i64);
1280 op->args[4] = arg_new_constant(ctx, bl);
1281 op->args[5] = arg_new_constant(ctx, bh);
1282 }
Richard Hendersonf3ed3cf2024-12-08 18:39:47 -06001283 return finish_folding(ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001284}
1285
Richard Henderson9531c072021-08-26 06:51:39 -07001286static bool fold_add2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001287{
Richard Henderson7a2f7082021-08-26 07:06:39 -07001288 /* Note that the high and low parts may be independently swapped. */
1289 swap_commutative(op->args[0], &op->args[2], &op->args[4]);
1290 swap_commutative(op->args[1], &op->args[3], &op->args[5]);
1291
Richard Henderson9531c072021-08-26 06:51:39 -07001292 return fold_addsub2(ctx, op, true);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07001293}
1294
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001295static bool fold_and(OptContext *ctx, TCGOp *op)
1296{
Richard Henderson1ca73722024-12-08 18:47:15 -06001297 uint64_t z1, z2, z_mask, s_mask;
1298 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001299
Richard Henderson7a2f7082021-08-26 07:06:39 -07001300 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07001301 fold_xi_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001302 fold_xi_to_x(ctx, op, -1) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07001303 fold_xx_to_x(ctx, op)) {
1304 return true;
1305 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001306
Richard Henderson1ca73722024-12-08 18:47:15 -06001307 t1 = arg_info(op->args[1]);
1308 t2 = arg_info(op->args[2]);
1309 z1 = t1->z_mask;
1310 z2 = t2->z_mask;
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001311
1312 /*
Richard Hendersonfae450b2021-08-25 22:42:19 -07001313 * Known-zeros does not imply known-ones. Therefore unless
1314 * arg2 is constant, we can't infer affected bits from it.
1315 */
Richard Henderson1ca73722024-12-08 18:47:15 -06001316 if (ti_is_const(t2) && fold_affected_mask(ctx, op, z1 & ~z2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001317 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001318 }
1319
Richard Henderson1ca73722024-12-08 18:47:15 -06001320 z_mask = z1 & z2;
1321
1322 /*
1323 * Sign repetitions are perforce all identical, whether they are 1 or 0.
1324 * Bitwise operations preserve the relative quantity of the repetitions.
1325 */
1326 s_mask = t1->s_mask & t2->s_mask;
1327
1328 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001329}
1330
1331static bool fold_andc(OptContext *ctx, TCGOp *op)
1332{
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001333 uint64_t z_mask, s_mask;
1334 TempOptInfo *t1, *t2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001335
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001336 if (fold_const2(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001337 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001338 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001339 fold_ix_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07001340 return true;
1341 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001342
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001343 t1 = arg_info(op->args[1]);
1344 t2 = arg_info(op->args[2]);
1345 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001346
Richard Henderson899281c2023-11-15 11:18:55 -08001347 if (ti_is_const(t2)) {
1348 /* Fold andc r,x,i to and r,x,~i. */
1349 switch (ctx->type) {
1350 case TCG_TYPE_I32:
1351 case TCG_TYPE_I64:
1352 op->opc = INDEX_op_and;
1353 break;
1354 case TCG_TYPE_V64:
1355 case TCG_TYPE_V128:
1356 case TCG_TYPE_V256:
1357 op->opc = INDEX_op_and_vec;
1358 break;
1359 default:
1360 g_assert_not_reached();
1361 }
1362 op->args[2] = arg_new_constant(ctx, ~ti_const_val(t2));
1363 return fold_and(ctx, op);
1364 }
1365
Richard Hendersonfae450b2021-08-25 22:42:19 -07001366 /*
1367 * Known-zeros does not imply known-ones. Therefore unless
1368 * arg2 is constant, we can't infer anything from it.
1369 */
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001370 if (ti_is_const(t2)) {
1371 uint64_t v2 = ti_const_val(t2);
1372 if (fold_affected_mask(ctx, op, z_mask & v2)) {
Richard Henderson045ace32024-12-19 10:33:51 -08001373 return true;
1374 }
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001375 z_mask &= ~v2;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001376 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001377
Richard Henderson21e2b5f2024-12-08 18:56:55 -06001378 s_mask = t1->s_mask & t2->s_mask;
1379 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001380}
1381
Richard Henderson7d3c63a2024-12-09 14:06:08 -06001382static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op)
1383{
1384 /* If true and false values are the same, eliminate the cmp. */
1385 if (args_are_copies(op->args[2], op->args[3])) {
1386 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1387 }
1388
1389 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
1390 uint64_t tv = arg_info(op->args[2])->val;
1391 uint64_t fv = arg_info(op->args[3])->val;
1392
1393 if (tv == -1 && fv == 0) {
1394 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1395 }
1396 if (tv == 0 && fv == -1) {
1397 if (TCG_TARGET_HAS_not_vec) {
1398 op->opc = INDEX_op_not_vec;
1399 return fold_not(ctx, op);
1400 } else {
1401 op->opc = INDEX_op_xor_vec;
1402 op->args[2] = arg_new_constant(ctx, -1);
1403 return fold_xor(ctx, op);
1404 }
1405 }
1406 }
1407 if (arg_is_const(op->args[2])) {
1408 uint64_t tv = arg_info(op->args[2])->val;
1409 if (tv == -1) {
1410 op->opc = INDEX_op_or_vec;
1411 op->args[2] = op->args[3];
1412 return fold_or(ctx, op);
1413 }
1414 if (tv == 0 && TCG_TARGET_HAS_andc_vec) {
1415 op->opc = INDEX_op_andc_vec;
1416 op->args[2] = op->args[1];
1417 op->args[1] = op->args[3];
1418 return fold_andc(ctx, op);
1419 }
1420 }
1421 if (arg_is_const(op->args[3])) {
1422 uint64_t fv = arg_info(op->args[3])->val;
1423 if (fv == 0) {
1424 op->opc = INDEX_op_and_vec;
1425 return fold_and(ctx, op);
1426 }
1427 if (fv == -1 && TCG_TARGET_HAS_orc_vec) {
1428 op->opc = INDEX_op_orc_vec;
1429 op->args[2] = op->args[1];
1430 op->args[1] = op->args[3];
1431 return fold_orc(ctx, op);
1432 }
1433 }
1434 return finish_folding(ctx, op);
1435}
1436
Richard Henderson079b0802021-08-24 09:30:59 -07001437static bool fold_brcond(OptContext *ctx, TCGOp *op)
1438{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001439 int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0],
Richard Henderson246c4b72023-10-24 16:36:50 -07001440 &op->args[1], &op->args[2]);
Richard Henderson079b0802021-08-24 09:30:59 -07001441 if (i == 0) {
1442 tcg_op_remove(ctx->tcg, op);
1443 return true;
1444 }
1445 if (i > 0) {
1446 op->opc = INDEX_op_br;
1447 op->args[0] = op->args[3];
Richard Henderson15268552024-12-08 07:45:11 -06001448 finish_ebb(ctx);
1449 } else {
1450 finish_bb(ctx);
Richard Henderson079b0802021-08-24 09:30:59 -07001451 }
Richard Henderson15268552024-12-08 07:45:11 -06001452 return true;
Richard Henderson079b0802021-08-24 09:30:59 -07001453}
1454
Richard Henderson764d2ab2021-08-24 09:22:11 -07001455static bool fold_brcond2(OptContext *ctx, TCGOp *op)
1456{
Richard Henderson7e64b112023-10-24 16:53:56 -07001457 TCGCond cond;
1458 TCGArg label;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001459 int i, inv = 0;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001460
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001461 i = do_constant_folding_cond2(ctx, op, &op->args[0]);
Richard Henderson7e64b112023-10-24 16:53:56 -07001462 cond = op->args[4];
1463 label = op->args[5];
Richard Henderson764d2ab2021-08-24 09:22:11 -07001464 if (i >= 0) {
1465 goto do_brcond_const;
1466 }
1467
1468 switch (cond) {
1469 case TCG_COND_LT:
1470 case TCG_COND_GE:
1471 /*
1472 * Simplify LT/GE comparisons vs zero to a single compare
1473 * vs the high word of the input.
1474 */
Richard Henderson27cdb852023-10-23 11:38:00 -07001475 if (arg_is_const_val(op->args[2], 0) &&
1476 arg_is_const_val(op->args[3], 0)) {
Richard Henderson764d2ab2021-08-24 09:22:11 -07001477 goto do_brcond_high;
1478 }
1479 break;
1480
1481 case TCG_COND_NE:
1482 inv = 1;
1483 QEMU_FALLTHROUGH;
1484 case TCG_COND_EQ:
1485 /*
1486 * Simplify EQ/NE comparisons where one of the pairs
1487 * can be simplified.
1488 */
Richard Henderson67f84c92021-08-25 08:00:20 -07001489 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001490 op->args[2], cond);
1491 switch (i ^ inv) {
1492 case 0:
1493 goto do_brcond_const;
1494 case 1:
1495 goto do_brcond_high;
1496 }
1497
Richard Henderson67f84c92021-08-25 08:00:20 -07001498 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Henderson764d2ab2021-08-24 09:22:11 -07001499 op->args[3], cond);
1500 switch (i ^ inv) {
1501 case 0:
1502 goto do_brcond_const;
1503 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001504 goto do_brcond_low;
1505 }
1506 break;
1507
1508 case TCG_COND_TSTEQ:
1509 case TCG_COND_TSTNE:
1510 if (arg_is_const_val(op->args[2], 0)) {
1511 goto do_brcond_high;
1512 }
1513 if (arg_is_const_val(op->args[3], 0)) {
1514 goto do_brcond_low;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001515 }
1516 break;
1517
1518 default:
1519 break;
1520
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001521 do_brcond_low:
1522 op->opc = INDEX_op_brcond_i32;
1523 op->args[1] = op->args[2];
1524 op->args[2] = cond;
1525 op->args[3] = label;
1526 return fold_brcond(ctx, op);
1527
Richard Henderson764d2ab2021-08-24 09:22:11 -07001528 do_brcond_high:
1529 op->opc = INDEX_op_brcond_i32;
1530 op->args[0] = op->args[1];
1531 op->args[1] = op->args[3];
1532 op->args[2] = cond;
1533 op->args[3] = label;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07001534 return fold_brcond(ctx, op);
Richard Henderson764d2ab2021-08-24 09:22:11 -07001535
1536 do_brcond_const:
1537 if (i == 0) {
1538 tcg_op_remove(ctx->tcg, op);
1539 return true;
1540 }
1541 op->opc = INDEX_op_br;
1542 op->args[0] = label;
Richard Henderson15268552024-12-08 07:45:11 -06001543 finish_ebb(ctx);
1544 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001545 }
Richard Henderson15268552024-12-08 07:45:11 -06001546
1547 finish_bb(ctx);
1548 return true;
Richard Henderson764d2ab2021-08-24 09:22:11 -07001549}
1550
Richard Henderson09bacdc2021-08-24 11:58:12 -07001551static bool fold_bswap(OptContext *ctx, TCGOp *op)
1552{
Richard Henderson57fe5c62021-08-26 12:04:46 -07001553 uint64_t z_mask, s_mask, sign;
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001554 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001555
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001556 if (ti_is_const(t1)) {
1557 return tcg_opt_gen_movi(ctx, op, op->args[0],
1558 do_constant_folding(op->opc, ctx->type,
1559 ti_const_val(t1),
1560 op->args[2]));
Richard Henderson09bacdc2021-08-24 11:58:12 -07001561 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001562
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001563 z_mask = t1->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001564 switch (op->opc) {
1565 case INDEX_op_bswap16_i32:
1566 case INDEX_op_bswap16_i64:
1567 z_mask = bswap16(z_mask);
1568 sign = INT16_MIN;
1569 break;
1570 case INDEX_op_bswap32_i32:
1571 case INDEX_op_bswap32_i64:
1572 z_mask = bswap32(z_mask);
1573 sign = INT32_MIN;
1574 break;
1575 case INDEX_op_bswap64_i64:
1576 z_mask = bswap64(z_mask);
1577 sign = INT64_MIN;
1578 break;
1579 default:
1580 g_assert_not_reached();
1581 }
1582
Richard Henderson75c3bf32024-12-19 10:50:40 -08001583 s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001584 switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) {
1585 case TCG_BSWAP_OZ:
1586 break;
1587 case TCG_BSWAP_OS:
1588 /* If the sign bit may be 1, force all the bits above to 1. */
1589 if (z_mask & sign) {
1590 z_mask |= sign;
1591 }
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001592 /* The value and therefore s_mask is explicitly sign-extended. */
1593 s_mask = sign;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001594 break;
1595 default:
1596 /* The high bits are undefined: force all bits above the sign to 1. */
1597 z_mask |= sign << 1;
1598 break;
1599 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001600
Richard Hendersonc1e7b982024-12-08 19:42:20 -06001601 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson09bacdc2021-08-24 11:58:12 -07001602}
1603
Richard Henderson5cf32be2021-08-24 08:17:08 -07001604static bool fold_call(OptContext *ctx, TCGOp *op)
1605{
1606 TCGContext *s = ctx->tcg;
1607 int nb_oargs = TCGOP_CALLO(op);
1608 int nb_iargs = TCGOP_CALLI(op);
1609 int flags, i;
1610
1611 init_arguments(ctx, op, nb_oargs + nb_iargs);
1612 copy_propagate(ctx, op, nb_oargs, nb_iargs);
1613
1614 /* If the function reads or writes globals, reset temp data. */
1615 flags = tcg_call_flags(op);
1616 if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) {
1617 int nb_globals = s->nb_globals;
1618
1619 for (i = 0; i < nb_globals; i++) {
1620 if (test_bit(i, ctx->temps_used.l)) {
Richard Henderson986cac12023-01-09 13:59:35 -08001621 reset_ts(ctx, &ctx->tcg->temps[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001622 }
1623 }
1624 }
1625
Richard Hendersonab84dc32023-08-23 23:04:24 -07001626 /* If the function has side effects, reset mem data. */
1627 if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) {
1628 remove_mem_copy_all(ctx);
1629 }
1630
Richard Henderson5cf32be2021-08-24 08:17:08 -07001631 /* Reset temp data for outputs. */
1632 for (i = 0; i < nb_oargs; i++) {
Richard Henderson986cac12023-01-09 13:59:35 -08001633 reset_temp(ctx, op->args[i]);
Richard Henderson5cf32be2021-08-24 08:17:08 -07001634 }
1635
1636 /* Stop optimizing MB across calls. */
1637 ctx->prev_mb = NULL;
1638 return true;
1639}
1640
Richard Henderson29f65862024-12-09 14:09:49 -06001641static bool fold_cmp_vec(OptContext *ctx, TCGOp *op)
1642{
1643 /* Canonicalize the comparison to put immediate second. */
1644 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1645 op->args[3] = tcg_swap_cond(op->args[3]);
1646 }
1647 return finish_folding(ctx, op);
1648}
1649
1650static bool fold_cmpsel_vec(OptContext *ctx, TCGOp *op)
1651{
1652 /* If true and false values are the same, eliminate the cmp. */
1653 if (args_are_copies(op->args[3], op->args[4])) {
1654 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1655 }
1656
1657 /* Canonicalize the comparison to put immediate second. */
1658 if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) {
1659 op->args[5] = tcg_swap_cond(op->args[5]);
1660 }
1661 /*
1662 * Canonicalize the "false" input reg to match the destination,
1663 * so that the tcg backend can implement "move if true".
1664 */
1665 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
1666 op->args[5] = tcg_invert_cond(op->args[5]);
1667 }
1668 return finish_folding(ctx, op);
1669}
1670
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001671static bool fold_count_zeros(OptContext *ctx, TCGOp *op)
1672{
Richard Hendersonce1d6632024-12-08 19:47:51 -06001673 uint64_t z_mask, s_mask;
1674 TempOptInfo *t1 = arg_info(op->args[1]);
1675 TempOptInfo *t2 = arg_info(op->args[2]);
Richard Hendersonfae450b2021-08-25 22:42:19 -07001676
Richard Hendersonce1d6632024-12-08 19:47:51 -06001677 if (ti_is_const(t1)) {
1678 uint64_t t = ti_const_val(t1);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001679
1680 if (t != 0) {
Richard Henderson67f84c92021-08-25 08:00:20 -07001681 t = do_constant_folding(op->opc, ctx->type, t, 0);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001682 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1683 }
1684 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]);
1685 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001686
1687 switch (ctx->type) {
1688 case TCG_TYPE_I32:
1689 z_mask = 31;
1690 break;
1691 case TCG_TYPE_I64:
1692 z_mask = 63;
1693 break;
1694 default:
1695 g_assert_not_reached();
1696 }
Richard Hendersonce1d6632024-12-08 19:47:51 -06001697 s_mask = ~z_mask;
1698 z_mask |= t2->z_mask;
1699 s_mask &= t2->s_mask;
1700
1701 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson30dd0bf2021-08-24 10:51:34 -07001702}
1703
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001704static bool fold_ctpop(OptContext *ctx, TCGOp *op)
1705{
Richard Henderson81be07f2024-12-08 19:49:17 -06001706 uint64_t z_mask;
1707
Richard Hendersonfae450b2021-08-25 22:42:19 -07001708 if (fold_const1(ctx, op)) {
1709 return true;
1710 }
1711
1712 switch (ctx->type) {
1713 case TCG_TYPE_I32:
Richard Henderson81be07f2024-12-08 19:49:17 -06001714 z_mask = 32 | 31;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001715 break;
1716 case TCG_TYPE_I64:
Richard Henderson81be07f2024-12-08 19:49:17 -06001717 z_mask = 64 | 63;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001718 break;
1719 default:
1720 g_assert_not_reached();
1721 }
Richard Henderson81be07f2024-12-08 19:49:17 -06001722 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001723}
1724
Richard Henderson1b1907b2021-08-24 10:47:04 -07001725static bool fold_deposit(OptContext *ctx, TCGOp *op)
1726{
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001727 TempOptInfo *t1 = arg_info(op->args[1]);
1728 TempOptInfo *t2 = arg_info(op->args[2]);
1729 int ofs = op->args[3];
1730 int len = op->args[4];
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001731 int width = 8 * tcg_type_size(ctx->type);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001732 uint64_t z_mask, s_mask;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001733
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001734 if (ti_is_const(t1) && ti_is_const(t2)) {
1735 return tcg_opt_gen_movi(ctx, op, op->args[0],
1736 deposit64(ti_const_val(t1), ofs, len,
1737 ti_const_val(t2)));
Richard Henderson1b1907b2021-08-24 10:47:04 -07001738 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001739
Richard Henderson8f7a8402023-08-13 11:03:05 -07001740 /* Inserting a value into zero at offset 0. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001741 if (ti_is_const_val(t1, 0) && ofs == 0) {
1742 uint64_t mask = MAKE_64BIT_MASK(0, len);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001743
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001744 op->opc = INDEX_op_and;
Richard Henderson8f7a8402023-08-13 11:03:05 -07001745 op->args[1] = op->args[2];
Richard Henderson26aac972023-10-23 12:31:57 -07001746 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001747 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001748 }
1749
1750 /* Inserting zero into a value. */
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001751 if (ti_is_const_val(t2, 0)) {
1752 uint64_t mask = deposit64(-1, ofs, len, 0);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001753
Richard Hendersonc3b920b2025-01-06 10:32:44 -08001754 op->opc = INDEX_op_and;
Richard Henderson26aac972023-10-23 12:31:57 -07001755 op->args[2] = arg_new_constant(ctx, mask);
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001756 return fold_and(ctx, op);
Richard Henderson8f7a8402023-08-13 11:03:05 -07001757 }
1758
Richard Hendersonedb832c2024-12-19 17:56:05 -08001759 /* The s_mask from the top portion of the deposit is still valid. */
1760 if (ofs + len == width) {
1761 s_mask = t2->s_mask << ofs;
1762 } else {
1763 s_mask = t1->s_mask & ~MAKE_64BIT_MASK(0, ofs + len);
1764 }
1765
Richard Hendersonc7739ab2024-12-08 19:57:28 -06001766 z_mask = deposit64(t1->z_mask, ofs, len, t2->z_mask);
Richard Hendersonedb832c2024-12-19 17:56:05 -08001767 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson1b1907b2021-08-24 10:47:04 -07001768}
1769
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001770static bool fold_divide(OptContext *ctx, TCGOp *op)
1771{
Richard Henderson2f9d9a32021-10-25 11:30:14 -07001772 if (fold_const2(ctx, op) ||
1773 fold_xi_to_x(ctx, op, 1)) {
1774 return true;
1775 }
Richard Henderson3d5ec802024-12-08 19:59:15 -06001776 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001777}
1778
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001779static bool fold_dup(OptContext *ctx, TCGOp *op)
1780{
1781 if (arg_is_const(op->args[1])) {
1782 uint64_t t = arg_info(op->args[1])->val;
1783 t = dup_const(TCGOP_VECE(op), t);
1784 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1785 }
Richard Hendersone089d692024-12-08 20:00:51 -06001786 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001787}
1788
1789static bool fold_dup2(OptContext *ctx, TCGOp *op)
1790{
1791 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1792 uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32,
1793 arg_info(op->args[2])->val);
1794 return tcg_opt_gen_movi(ctx, op, op->args[0], t);
1795 }
1796
1797 if (args_are_copies(op->args[1], op->args[2])) {
1798 op->opc = INDEX_op_dup_vec;
1799 TCGOP_VECE(op) = MO_32;
1800 }
Richard Hendersone089d692024-12-08 20:00:51 -06001801 return finish_folding(ctx, op);
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07001802}
1803
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001804static bool fold_eqv(OptContext *ctx, TCGOp *op)
1805{
Richard Hendersonef6be622024-12-08 20:03:15 -06001806 uint64_t s_mask;
1807
Richard Henderson7a2f7082021-08-26 07:06:39 -07001808 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07001809 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07001810 fold_xi_to_not(ctx, op, 0)) {
1811 return true;
1812 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07001813
Richard Hendersonef6be622024-12-08 20:03:15 -06001814 s_mask = arg_info(op->args[1])->s_mask
1815 & arg_info(op->args[2])->s_mask;
1816 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001817}
1818
Richard Hendersonb6617c82021-08-24 10:44:53 -07001819static bool fold_extract(OptContext *ctx, TCGOp *op)
1820{
Richard Hendersonfae450b2021-08-25 22:42:19 -07001821 uint64_t z_mask_old, z_mask;
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001822 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07001823 int pos = op->args[2];
1824 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07001825
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001826 if (ti_is_const(t1)) {
1827 return tcg_opt_gen_movi(ctx, op, op->args[0],
1828 extract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07001829 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001830
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001831 z_mask_old = t1->z_mask;
Richard Henderson57fe5c62021-08-26 12:04:46 -07001832 z_mask = extract64(z_mask_old, pos, len);
Richard Henderson045ace32024-12-19 10:33:51 -08001833 if (pos == 0 && fold_affected_mask(ctx, op, z_mask_old ^ z_mask)) {
1834 return true;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001835 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07001836
Richard Hendersonb6cd00f2024-12-08 20:05:11 -06001837 return fold_masks_z(ctx, op, z_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07001838}
1839
Richard Hendersondcd08992021-08-24 10:41:39 -07001840static bool fold_extract2(OptContext *ctx, TCGOp *op)
1841{
1842 if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) {
1843 uint64_t v1 = arg_info(op->args[1])->val;
1844 uint64_t v2 = arg_info(op->args[2])->val;
1845 int shr = op->args[3];
1846
1847 if (op->opc == INDEX_op_extract2_i64) {
1848 v1 >>= shr;
1849 v2 <<= 64 - shr;
1850 } else {
1851 v1 = (uint32_t)v1 >> shr;
Richard Henderson225bec02021-11-09 23:17:59 +01001852 v2 = (uint64_t)((int32_t)v2 << (32 - shr));
Richard Hendersondcd08992021-08-24 10:41:39 -07001853 }
1854 return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2);
1855 }
Richard Hendersonc9df99e2024-12-08 20:06:42 -06001856 return finish_folding(ctx, op);
Richard Hendersondcd08992021-08-24 10:41:39 -07001857}
1858
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001859static bool fold_exts(OptContext *ctx, TCGOp *op)
1860{
Richard Henderson48e8de62024-12-26 12:01:57 -08001861 uint64_t s_mask, z_mask;
Richard Hendersona9621922024-12-08 20:08:46 -06001862 TempOptInfo *t1;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001863
1864 if (fold_const1(ctx, op)) {
1865 return true;
1866 }
1867
Richard Hendersona9621922024-12-08 20:08:46 -06001868 t1 = arg_info(op->args[1]);
1869 z_mask = t1->z_mask;
1870 s_mask = t1->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001871
1872 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001873 case INDEX_op_ext_i32_i64:
Richard Hendersona9621922024-12-08 20:08:46 -06001874 s_mask |= INT32_MIN;
1875 z_mask = (int32_t)z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001876 break;
1877 default:
1878 g_assert_not_reached();
1879 }
Richard Hendersona9621922024-12-08 20:08:46 -06001880 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001881}
1882
1883static bool fold_extu(OptContext *ctx, TCGOp *op)
1884{
Richard Henderson48e8de62024-12-26 12:01:57 -08001885 uint64_t z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001886
1887 if (fold_const1(ctx, op)) {
1888 return true;
1889 }
1890
Richard Henderson48e8de62024-12-26 12:01:57 -08001891 z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001892 switch (op->opc) {
Richard Hendersonfae450b2021-08-25 22:42:19 -07001893 case INDEX_op_extrl_i64_i32:
1894 case INDEX_op_extu_i32_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001895 z_mask = (uint32_t)z_mask;
1896 break;
1897 case INDEX_op_extrh_i64_i32:
Richard Hendersonfae450b2021-08-25 22:42:19 -07001898 z_mask >>= 32;
1899 break;
1900 default:
1901 g_assert_not_reached();
1902 }
Richard Henderson08abe292024-12-08 20:11:44 -06001903 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07001904}
1905
Richard Henderson3eefdf22021-08-25 11:06:43 -07001906static bool fold_mb(OptContext *ctx, TCGOp *op)
1907{
1908 /* Eliminate duplicate and redundant fence instructions. */
1909 if (ctx->prev_mb) {
1910 /*
1911 * Merge two barriers of the same type into one,
1912 * or a weaker barrier into a stronger one,
1913 * or two weaker barriers into a stronger one.
1914 * mb X; mb Y => mb X|Y
1915 * mb; strl => mb; st
1916 * ldaq; mb => ld; mb
1917 * ldaq; strl => ld; mb; st
1918 * Other combinations are also merged into a strong
1919 * barrier. This is stricter than specified but for
1920 * the purposes of TCG is better than not optimizing.
1921 */
1922 ctx->prev_mb->args[0] |= op->args[0];
1923 tcg_op_remove(ctx->tcg, op);
1924 } else {
1925 ctx->prev_mb = op;
1926 }
1927 return true;
1928}
1929
Richard Henderson2cfac7f2021-08-25 13:05:43 -07001930static bool fold_mov(OptContext *ctx, TCGOp *op)
1931{
1932 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
1933}
1934
Richard Henderson0c310a32021-08-24 10:37:24 -07001935static bool fold_movcond(OptContext *ctx, TCGOp *op)
1936{
Richard Henderson32202782024-12-08 20:16:38 -06001937 uint64_t z_mask, s_mask;
1938 TempOptInfo *tt, *ft;
Richard Henderson7a2f7082021-08-26 07:06:39 -07001939 int i;
Richard Henderson0c310a32021-08-24 10:37:24 -07001940
Richard Henderson141125e2024-09-06 21:00:10 -07001941 /* If true and false values are the same, eliminate the cmp. */
1942 if (args_are_copies(op->args[3], op->args[4])) {
1943 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[3]);
1944 }
1945
Richard Henderson7a2f7082021-08-26 07:06:39 -07001946 /*
1947 * Canonicalize the "false" input reg to match the destination reg so
1948 * that the tcg backend can implement a "move if true" operation.
1949 */
1950 if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) {
Richard Henderson246c4b72023-10-24 16:36:50 -07001951 op->args[5] = tcg_invert_cond(op->args[5]);
Richard Henderson7a2f7082021-08-26 07:06:39 -07001952 }
1953
Richard Hendersonfb04ab72024-01-10 18:21:58 +11001954 i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07001955 &op->args[2], &op->args[5]);
Richard Henderson0c310a32021-08-24 10:37:24 -07001956 if (i >= 0) {
1957 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]);
1958 }
1959
Richard Henderson32202782024-12-08 20:16:38 -06001960 tt = arg_info(op->args[3]);
1961 ft = arg_info(op->args[4]);
1962 z_mask = tt->z_mask | ft->z_mask;
1963 s_mask = tt->s_mask & ft->s_mask;
Richard Hendersonfae450b2021-08-25 22:42:19 -07001964
Richard Henderson32202782024-12-08 20:16:38 -06001965 if (ti_is_const(tt) && ti_is_const(ft)) {
1966 uint64_t tv = ti_const_val(tt);
1967 uint64_t fv = ti_const_val(ft);
Richard Henderson36355022023-08-04 23:24:04 +00001968 TCGOpcode opc, negopc = 0;
Richard Henderson246c4b72023-10-24 16:36:50 -07001969 TCGCond cond = op->args[5];
Richard Henderson0c310a32021-08-24 10:37:24 -07001970
Richard Henderson67f84c92021-08-25 08:00:20 -07001971 switch (ctx->type) {
1972 case TCG_TYPE_I32:
1973 opc = INDEX_op_setcond_i32;
Richard Henderson36355022023-08-04 23:24:04 +00001974 if (TCG_TARGET_HAS_negsetcond_i32) {
1975 negopc = INDEX_op_negsetcond_i32;
1976 }
1977 tv = (int32_t)tv;
1978 fv = (int32_t)fv;
Richard Henderson67f84c92021-08-25 08:00:20 -07001979 break;
1980 case TCG_TYPE_I64:
1981 opc = INDEX_op_setcond_i64;
Richard Henderson36355022023-08-04 23:24:04 +00001982 if (TCG_TARGET_HAS_negsetcond_i64) {
1983 negopc = INDEX_op_negsetcond_i64;
1984 }
Richard Henderson67f84c92021-08-25 08:00:20 -07001985 break;
1986 default:
1987 g_assert_not_reached();
1988 }
Richard Henderson0c310a32021-08-24 10:37:24 -07001989
1990 if (tv == 1 && fv == 0) {
1991 op->opc = opc;
1992 op->args[3] = cond;
1993 } else if (fv == 1 && tv == 0) {
1994 op->opc = opc;
1995 op->args[3] = tcg_invert_cond(cond);
Richard Henderson36355022023-08-04 23:24:04 +00001996 } else if (negopc) {
1997 if (tv == -1 && fv == 0) {
1998 op->opc = negopc;
1999 op->args[3] = cond;
2000 } else if (fv == -1 && tv == 0) {
2001 op->opc = negopc;
2002 op->args[3] = tcg_invert_cond(cond);
2003 }
Richard Henderson0c310a32021-08-24 10:37:24 -07002004 }
2005 }
Richard Henderson32202782024-12-08 20:16:38 -06002006
2007 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson0c310a32021-08-24 10:37:24 -07002008}
2009
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002010static bool fold_mul(OptContext *ctx, TCGOp *op)
2011{
Richard Hendersone8679952021-08-25 13:19:52 -07002012 if (fold_const2(ctx, op) ||
Richard Henderson5b5cf472021-10-25 11:19:14 -07002013 fold_xi_to_i(ctx, op, 0) ||
2014 fold_xi_to_x(ctx, op, 1)) {
Richard Hendersone8679952021-08-25 13:19:52 -07002015 return true;
2016 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002017 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002018}
2019
2020static bool fold_mul_highpart(OptContext *ctx, TCGOp *op)
2021{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002022 if (fold_const2_commutative(ctx, op) ||
Richard Hendersone8679952021-08-25 13:19:52 -07002023 fold_xi_to_i(ctx, op, 0)) {
2024 return true;
2025 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002026 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002027}
2028
Richard Henderson407112b2021-08-26 06:33:04 -07002029static bool fold_multiply2(OptContext *ctx, TCGOp *op)
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002030{
Richard Henderson7a2f7082021-08-26 07:06:39 -07002031 swap_commutative(op->args[0], &op->args[2], &op->args[3]);
2032
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002033 if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) {
Richard Henderson407112b2021-08-26 06:33:04 -07002034 uint64_t a = arg_info(op->args[2])->val;
2035 uint64_t b = arg_info(op->args[3])->val;
2036 uint64_t h, l;
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002037 TCGArg rl, rh;
Richard Henderson407112b2021-08-26 06:33:04 -07002038 TCGOp *op2;
2039
2040 switch (op->opc) {
2041 case INDEX_op_mulu2_i32:
2042 l = (uint64_t)(uint32_t)a * (uint32_t)b;
2043 h = (int32_t)(l >> 32);
2044 l = (int32_t)l;
2045 break;
2046 case INDEX_op_muls2_i32:
2047 l = (int64_t)(int32_t)a * (int32_t)b;
2048 h = l >> 32;
2049 l = (int32_t)l;
2050 break;
2051 case INDEX_op_mulu2_i64:
2052 mulu64(&l, &h, a, b);
2053 break;
2054 case INDEX_op_muls2_i64:
2055 muls64(&l, &h, a, b);
2056 break;
2057 default:
2058 g_assert_not_reached();
2059 }
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002060
2061 rl = op->args[0];
2062 rh = op->args[1];
Richard Henderson407112b2021-08-26 06:33:04 -07002063
2064 /* The proper opcode is supplied by tcg_opt_gen_mov. */
Richard Hendersona3c1c572025-04-21 11:05:29 -07002065 op2 = opt_insert_before(ctx, op, 0, 2);
Richard Henderson407112b2021-08-26 06:33:04 -07002066
2067 tcg_opt_gen_movi(ctx, op, rl, l);
2068 tcg_opt_gen_movi(ctx, op2, rh, h);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002069 return true;
2070 }
Richard Hendersoncd9c5832024-12-08 20:18:02 -06002071 return finish_folding(ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002072}
2073
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002074static bool fold_nand(OptContext *ctx, TCGOp *op)
2075{
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002076 uint64_t s_mask;
2077
Richard Henderson7a2f7082021-08-26 07:06:39 -07002078 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002079 fold_xi_to_not(ctx, op, -1)) {
2080 return true;
2081 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002082
Richard Hendersonfa3168e2024-12-08 20:20:40 -06002083 s_mask = arg_info(op->args[1])->s_mask
2084 & arg_info(op->args[2])->s_mask;
2085 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002086}
2087
Richard Hendersone25fe882024-04-04 20:53:50 +00002088static bool fold_neg_no_const(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002089{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002090 /* Set to 1 all bits to the left of the rightmost. */
Richard Hendersone25fe882024-04-04 20:53:50 +00002091 uint64_t z_mask = arg_info(op->args[1])->z_mask;
Richard Hendersond151fd32024-12-08 20:23:11 -06002092 z_mask = -(z_mask & -z_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002093
Richard Hendersond151fd32024-12-08 20:23:11 -06002094 return fold_masks_z(ctx, op, z_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002095}
2096
Richard Hendersone25fe882024-04-04 20:53:50 +00002097static bool fold_neg(OptContext *ctx, TCGOp *op)
2098{
2099 return fold_const1(ctx, op) || fold_neg_no_const(ctx, op);
2100}
2101
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002102static bool fold_nor(OptContext *ctx, TCGOp *op)
2103{
Richard Henderson2b7b6952024-12-08 20:25:21 -06002104 uint64_t s_mask;
2105
Richard Henderson7a2f7082021-08-26 07:06:39 -07002106 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002107 fold_xi_to_not(ctx, op, 0)) {
2108 return true;
2109 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002110
Richard Henderson2b7b6952024-12-08 20:25:21 -06002111 s_mask = arg_info(op->args[1])->s_mask
2112 & arg_info(op->args[2])->s_mask;
2113 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002114}
2115
2116static bool fold_not(OptContext *ctx, TCGOp *op)
2117{
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002118 if (fold_const1(ctx, op)) {
2119 return true;
2120 }
Richard Henderson608e75f2024-12-08 20:27:02 -06002121 return fold_masks_s(ctx, op, arg_info(op->args[1])->s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002122}
2123
2124static bool fold_or(OptContext *ctx, TCGOp *op)
2125{
Richard Henderson83b1ba32024-12-08 20:28:59 -06002126 uint64_t z_mask, s_mask;
2127 TempOptInfo *t1, *t2;
2128
Richard Henderson7a2f7082021-08-26 07:06:39 -07002129 if (fold_const2_commutative(ctx, op) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002130 fold_xi_to_x(ctx, op, 0) ||
Richard Hendersonca7bb042021-08-25 13:14:21 -07002131 fold_xx_to_x(ctx, op)) {
2132 return true;
2133 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002134
Richard Henderson83b1ba32024-12-08 20:28:59 -06002135 t1 = arg_info(op->args[1]);
2136 t2 = arg_info(op->args[2]);
2137 z_mask = t1->z_mask | t2->z_mask;
2138 s_mask = t1->s_mask & t2->s_mask;
2139 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002140}
2141
2142static bool fold_orc(OptContext *ctx, TCGOp *op)
2143{
Richard Henderson54e26b22024-12-08 20:30:20 -06002144 uint64_t s_mask;
2145
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002146 if (fold_const2(ctx, op) ||
Richard Henderson4e858d92021-08-26 07:31:13 -07002147 fold_xx_to_i(ctx, op, -1) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002148 fold_xi_to_x(ctx, op, -1) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002149 fold_ix_to_not(ctx, op, 0)) {
2150 return true;
2151 }
Richard Henderson3f2b1f82021-08-26 13:08:54 -07002152
Richard Henderson54e26b22024-12-08 20:30:20 -06002153 s_mask = arg_info(op->args[1])->s_mask
2154 & arg_info(op->args[2])->s_mask;
2155 return fold_masks_s(ctx, op, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002156}
2157
Richard Henderson6813be92024-12-08 20:33:30 -06002158static bool fold_qemu_ld_1reg(OptContext *ctx, TCGOp *op)
Richard Henderson3eefdf22021-08-25 11:06:43 -07002159{
Richard Hendersonfae450b2021-08-25 22:42:19 -07002160 const TCGOpDef *def = &tcg_op_defs[op->opc];
2161 MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs];
2162 MemOp mop = get_memop(oi);
2163 int width = 8 * memop_size(mop);
Richard Henderson6813be92024-12-08 20:33:30 -06002164 uint64_t z_mask = -1, s_mask = 0;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002165
Richard Henderson57fe5c62021-08-26 12:04:46 -07002166 if (width < 64) {
Richard Henderson75c3bf32024-12-19 10:50:40 -08002167 if (mop & MO_SIGN) {
Richard Henderson6813be92024-12-08 20:33:30 -06002168 s_mask = MAKE_64BIT_MASK(width - 1, 64 - (width - 1));
Richard Henderson75c3bf32024-12-19 10:50:40 -08002169 } else {
Richard Henderson6813be92024-12-08 20:33:30 -06002170 z_mask = MAKE_64BIT_MASK(0, width);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002171 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002172 }
2173
Richard Henderson3eefdf22021-08-25 11:06:43 -07002174 /* Opcodes that touch guest memory stop the mb optimization. */
2175 ctx->prev_mb = NULL;
Richard Henderson6813be92024-12-08 20:33:30 -06002176
2177 return fold_masks_zs(ctx, op, z_mask, s_mask);
2178}
2179
2180static bool fold_qemu_ld_2reg(OptContext *ctx, TCGOp *op)
2181{
2182 /* Opcodes that touch guest memory stop the mb optimization. */
2183 ctx->prev_mb = NULL;
2184 return finish_folding(ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002185}
2186
2187static bool fold_qemu_st(OptContext *ctx, TCGOp *op)
2188{
2189 /* Opcodes that touch guest memory stop the mb optimization. */
2190 ctx->prev_mb = NULL;
Richard Henderson082b3ef2024-12-08 20:34:57 -06002191 return true;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002192}
2193
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002194static bool fold_remainder(OptContext *ctx, TCGOp *op)
2195{
Richard Henderson267c17e2021-10-25 11:30:33 -07002196 if (fold_const2(ctx, op) ||
2197 fold_xx_to_i(ctx, op, 0)) {
2198 return true;
2199 }
Richard Hendersonf9e39342024-12-08 20:36:50 -06002200 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002201}
2202
Richard Henderson95eb2292024-12-08 20:47:59 -06002203/* Return 1 if finished, -1 if simplified, 0 if unchanged. */
2204static int fold_setcond_zmask(OptContext *ctx, TCGOp *op, bool neg)
Richard Henderson8d65cda2024-03-26 16:00:40 -10002205{
2206 uint64_t a_zmask, b_val;
2207 TCGCond cond;
2208
2209 if (!arg_is_const(op->args[2])) {
2210 return false;
2211 }
2212
2213 a_zmask = arg_info(op->args[1])->z_mask;
2214 b_val = arg_info(op->args[2])->val;
2215 cond = op->args[3];
2216
2217 if (ctx->type == TCG_TYPE_I32) {
2218 a_zmask = (uint32_t)a_zmask;
2219 b_val = (uint32_t)b_val;
2220 }
2221
2222 /*
2223 * A with only low bits set vs B with high bits set means that A < B.
2224 */
2225 if (a_zmask < b_val) {
2226 bool inv = false;
2227
2228 switch (cond) {
2229 case TCG_COND_NE:
2230 case TCG_COND_LEU:
2231 case TCG_COND_LTU:
2232 inv = true;
2233 /* fall through */
2234 case TCG_COND_GTU:
2235 case TCG_COND_GEU:
2236 case TCG_COND_EQ:
2237 return tcg_opt_gen_movi(ctx, op, op->args[0], neg ? -inv : inv);
2238 default:
2239 break;
2240 }
2241 }
2242
2243 /*
2244 * A with only lsb set is already boolean.
2245 */
2246 if (a_zmask <= 1) {
2247 bool convert = false;
2248 bool inv = false;
2249
2250 switch (cond) {
2251 case TCG_COND_EQ:
2252 inv = true;
2253 /* fall through */
2254 case TCG_COND_NE:
2255 convert = (b_val == 0);
2256 break;
2257 case TCG_COND_LTU:
2258 case TCG_COND_TSTEQ:
2259 inv = true;
2260 /* fall through */
2261 case TCG_COND_GEU:
2262 case TCG_COND_TSTNE:
2263 convert = (b_val == 1);
2264 break;
2265 default:
2266 break;
2267 }
2268 if (convert) {
Richard Henderson79602f62025-01-06 09:11:39 -08002269 TCGOpcode xor_opc, neg_opc;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002270
2271 if (!inv && !neg) {
2272 return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]);
2273 }
2274
2275 switch (ctx->type) {
2276 case TCG_TYPE_I32:
Richard Henderson8d65cda2024-03-26 16:00:40 -10002277 neg_opc = INDEX_op_neg_i32;
2278 xor_opc = INDEX_op_xor_i32;
2279 break;
2280 case TCG_TYPE_I64:
Richard Henderson8d65cda2024-03-26 16:00:40 -10002281 neg_opc = INDEX_op_neg_i64;
2282 xor_opc = INDEX_op_xor_i64;
2283 break;
2284 default:
2285 g_assert_not_reached();
2286 }
2287
2288 if (!inv) {
2289 op->opc = neg_opc;
2290 } else if (neg) {
Richard Henderson79602f62025-01-06 09:11:39 -08002291 op->opc = INDEX_op_add;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002292 op->args[2] = arg_new_constant(ctx, -1);
2293 } else {
2294 op->opc = xor_opc;
2295 op->args[2] = arg_new_constant(ctx, 1);
2296 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002297 return -1;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002298 }
2299 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002300 return 0;
Richard Henderson8d65cda2024-03-26 16:00:40 -10002301}
2302
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002303static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg)
2304{
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002305 TCGOpcode xor_opc, neg_opc, shr_opc;
Paolo Bonziniff202812024-02-28 12:06:41 +01002306 TCGOpcode uext_opc = 0, sext_opc = 0;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002307 TCGCond cond = op->args[3];
2308 TCGArg ret, src1, src2;
2309 TCGOp *op2;
2310 uint64_t val;
2311 int sh;
2312 bool inv;
2313
2314 if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) {
2315 return;
2316 }
2317
2318 src2 = op->args[2];
2319 val = arg_info(src2)->val;
2320 if (!is_power_of_2(val)) {
2321 return;
2322 }
2323 sh = ctz64(val);
2324
2325 switch (ctx->type) {
2326 case TCG_TYPE_I32:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002327 xor_opc = INDEX_op_xor_i32;
2328 shr_opc = INDEX_op_shr_i32;
2329 neg_opc = INDEX_op_neg_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002330 if (TCG_TARGET_extract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002331 uext_opc = INDEX_op_extract_i32;
Richard Henderson4bce7522024-12-25 18:55:45 -08002332 }
2333 if (TCG_TARGET_sextract_valid(TCG_TYPE_I32, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002334 sext_opc = INDEX_op_sextract_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002335 }
2336 break;
2337 case TCG_TYPE_I64:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002338 xor_opc = INDEX_op_xor_i64;
2339 shr_opc = INDEX_op_shr_i64;
2340 neg_opc = INDEX_op_neg_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002341 if (TCG_TARGET_extract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002342 uext_opc = INDEX_op_extract_i64;
Richard Henderson4bce7522024-12-25 18:55:45 -08002343 }
2344 if (TCG_TARGET_sextract_valid(TCG_TYPE_I64, sh, 1)) {
Richard Hendersonc334de12024-12-26 00:43:19 -08002345 sext_opc = INDEX_op_sextract_i64;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002346 }
2347 break;
2348 default:
2349 g_assert_not_reached();
2350 }
2351
2352 ret = op->args[0];
2353 src1 = op->args[1];
2354 inv = cond == TCG_COND_TSTEQ;
2355
2356 if (sh && sext_opc && neg && !inv) {
2357 op->opc = sext_opc;
2358 op->args[1] = src1;
2359 op->args[2] = sh;
2360 op->args[3] = 1;
2361 return;
2362 } else if (sh && uext_opc) {
2363 op->opc = uext_opc;
2364 op->args[1] = src1;
2365 op->args[2] = sh;
2366 op->args[3] = 1;
2367 } else {
2368 if (sh) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002369 op2 = opt_insert_before(ctx, op, shr_opc, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002370 op2->args[0] = ret;
2371 op2->args[1] = src1;
2372 op2->args[2] = arg_new_constant(ctx, sh);
2373 src1 = ret;
2374 }
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002375 op->opc = INDEX_op_and;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002376 op->args[1] = src1;
2377 op->args[2] = arg_new_constant(ctx, 1);
2378 }
2379
2380 if (neg && inv) {
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002381 op2 = opt_insert_after(ctx, op, INDEX_op_add, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002382 op2->args[0] = ret;
2383 op2->args[1] = ret;
Richard Henderson93a9ddb2025-01-06 22:06:08 -08002384 op2->args[2] = arg_new_constant(ctx, -1);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002385 } else if (inv) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002386 op2 = opt_insert_after(ctx, op, xor_opc, 3);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002387 op2->args[0] = ret;
2388 op2->args[1] = ret;
2389 op2->args[2] = arg_new_constant(ctx, 1);
2390 } else if (neg) {
Richard Hendersona3c1c572025-04-21 11:05:29 -07002391 op2 = opt_insert_after(ctx, op, neg_opc, 2);
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002392 op2->args[0] = ret;
2393 op2->args[1] = ret;
2394 }
2395}
2396
Richard Hendersonc63ff552021-08-24 09:35:30 -07002397static bool fold_setcond(OptContext *ctx, TCGOp *op)
2398{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002399 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002400 &op->args[2], &op->args[3]);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002401 if (i >= 0) {
2402 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2403 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002404
Richard Henderson95eb2292024-12-08 20:47:59 -06002405 i = fold_setcond_zmask(ctx, op, false);
2406 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002407 return true;
2408 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002409 if (i == 0) {
2410 fold_setcond_tst_pow2(ctx, op, false);
2411 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002412
Richard Henderson2c8a2832024-12-08 20:50:37 -06002413 return fold_masks_z(ctx, op, 1);
Richard Hendersonc63ff552021-08-24 09:35:30 -07002414}
2415
Richard Henderson36355022023-08-04 23:24:04 +00002416static bool fold_negsetcond(OptContext *ctx, TCGOp *op)
2417{
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002418 int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1],
Richard Henderson246c4b72023-10-24 16:36:50 -07002419 &op->args[2], &op->args[3]);
Richard Henderson36355022023-08-04 23:24:04 +00002420 if (i >= 0) {
2421 return tcg_opt_gen_movi(ctx, op, op->args[0], -i);
2422 }
Richard Henderson8d65cda2024-03-26 16:00:40 -10002423
Richard Henderson95eb2292024-12-08 20:47:59 -06002424 i = fold_setcond_zmask(ctx, op, true);
2425 if (i > 0) {
Richard Henderson8d65cda2024-03-26 16:00:40 -10002426 return true;
2427 }
Richard Henderson95eb2292024-12-08 20:47:59 -06002428 if (i == 0) {
2429 fold_setcond_tst_pow2(ctx, op, true);
2430 }
Richard Henderson36355022023-08-04 23:24:04 +00002431
2432 /* Value is {0,-1} so all bits are repetitions of the sign. */
Richard Henderson081cf082024-12-08 20:50:58 -06002433 return fold_masks_s(ctx, op, -1);
Richard Henderson36355022023-08-04 23:24:04 +00002434}
2435
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002436static bool fold_setcond2(OptContext *ctx, TCGOp *op)
2437{
Richard Henderson7e64b112023-10-24 16:53:56 -07002438 TCGCond cond;
Richard Henderson7a2f7082021-08-26 07:06:39 -07002439 int i, inv = 0;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002440
Richard Hendersonfb04ab72024-01-10 18:21:58 +11002441 i = do_constant_folding_cond2(ctx, op, &op->args[1]);
Richard Henderson7e64b112023-10-24 16:53:56 -07002442 cond = op->args[5];
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002443 if (i >= 0) {
2444 goto do_setcond_const;
2445 }
2446
2447 switch (cond) {
2448 case TCG_COND_LT:
2449 case TCG_COND_GE:
2450 /*
2451 * Simplify LT/GE comparisons vs zero to a single compare
2452 * vs the high word of the input.
2453 */
Richard Henderson27cdb852023-10-23 11:38:00 -07002454 if (arg_is_const_val(op->args[3], 0) &&
2455 arg_is_const_val(op->args[4], 0)) {
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002456 goto do_setcond_high;
2457 }
2458 break;
2459
2460 case TCG_COND_NE:
2461 inv = 1;
2462 QEMU_FALLTHROUGH;
2463 case TCG_COND_EQ:
2464 /*
2465 * Simplify EQ/NE comparisons where one of the pairs
2466 * can be simplified.
2467 */
Richard Henderson67f84c92021-08-25 08:00:20 -07002468 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002469 op->args[3], cond);
2470 switch (i ^ inv) {
2471 case 0:
2472 goto do_setcond_const;
2473 case 1:
2474 goto do_setcond_high;
2475 }
2476
Richard Henderson67f84c92021-08-25 08:00:20 -07002477 i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2],
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002478 op->args[4], cond);
2479 switch (i ^ inv) {
2480 case 0:
2481 goto do_setcond_const;
2482 case 1:
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002483 goto do_setcond_low;
2484 }
2485 break;
2486
2487 case TCG_COND_TSTEQ:
2488 case TCG_COND_TSTNE:
Richard Hendersona71d9df2024-06-30 19:46:23 -07002489 if (arg_is_const_val(op->args[3], 0)) {
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002490 goto do_setcond_high;
2491 }
2492 if (arg_is_const_val(op->args[4], 0)) {
2493 goto do_setcond_low;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002494 }
2495 break;
2496
2497 default:
2498 break;
2499
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002500 do_setcond_low:
2501 op->args[2] = op->args[3];
2502 op->args[3] = cond;
2503 op->opc = INDEX_op_setcond_i32;
2504 return fold_setcond(ctx, op);
2505
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002506 do_setcond_high:
2507 op->args[1] = op->args[2];
2508 op->args[2] = op->args[4];
2509 op->args[3] = cond;
2510 op->opc = INDEX_op_setcond_i32;
Richard Hendersonceb9ee02023-10-23 23:44:27 -07002511 return fold_setcond(ctx, op);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002512 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002513
Richard Hendersona53502c2024-12-08 20:56:36 -06002514 return fold_masks_z(ctx, op, 1);
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07002515
2516 do_setcond_const:
2517 return tcg_opt_gen_movi(ctx, op, op->args[0], i);
2518}
2519
Richard Hendersonb6617c82021-08-24 10:44:53 -07002520static bool fold_sextract(OptContext *ctx, TCGOp *op)
2521{
Richard Henderson57fe5c62021-08-26 12:04:46 -07002522 uint64_t z_mask, s_mask, s_mask_old;
Richard Hendersonbaff5072024-12-08 21:09:30 -06002523 TempOptInfo *t1 = arg_info(op->args[1]);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002524 int pos = op->args[2];
2525 int len = op->args[3];
Richard Hendersonfae450b2021-08-25 22:42:19 -07002526
Richard Hendersonbaff5072024-12-08 21:09:30 -06002527 if (ti_is_const(t1)) {
2528 return tcg_opt_gen_movi(ctx, op, op->args[0],
2529 sextract64(ti_const_val(t1), pos, len));
Richard Hendersonb6617c82021-08-24 10:44:53 -07002530 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002531
Richard Hendersonbaff5072024-12-08 21:09:30 -06002532 s_mask_old = t1->s_mask;
2533 s_mask = s_mask_old >> pos;
2534 s_mask |= -1ull << (len - 1);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002535
Richard Hendersonaa9e0502024-12-21 22:03:53 -08002536 if (pos == 0 && fold_affected_mask(ctx, op, s_mask & ~s_mask_old)) {
Richard Henderson045ace32024-12-19 10:33:51 -08002537 return true;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002538 }
2539
Richard Hendersonbaff5072024-12-08 21:09:30 -06002540 z_mask = sextract64(t1->z_mask, pos, len);
2541 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonb6617c82021-08-24 10:44:53 -07002542}
2543
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002544static bool fold_shift(OptContext *ctx, TCGOp *op)
2545{
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002546 uint64_t s_mask, z_mask;
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002547 TempOptInfo *t1, *t2;
Richard Henderson93a967f2021-08-26 13:24:59 -07002548
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002549 if (fold_const2(ctx, op) ||
Richard Hendersonda48e272021-08-25 20:42:04 -07002550 fold_ix_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002551 fold_xi_to_x(ctx, op, 0)) {
2552 return true;
2553 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002554
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002555 t1 = arg_info(op->args[1]);
2556 t2 = arg_info(op->args[2]);
2557 s_mask = t1->s_mask;
2558 z_mask = t1->z_mask;
Richard Henderson93a967f2021-08-26 13:24:59 -07002559
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002560 if (ti_is_const(t2)) {
2561 int sh = ti_const_val(t2);
Richard Henderson93a967f2021-08-26 13:24:59 -07002562
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002563 z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002564 s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh);
Richard Henderson93a967f2021-08-26 13:24:59 -07002565
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002566 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002567 }
Richard Henderson93a967f2021-08-26 13:24:59 -07002568
2569 switch (op->opc) {
2570 CASE_OP_32_64(sar):
2571 /*
2572 * Arithmetic right shift will not reduce the number of
2573 * input sign repetitions.
2574 */
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002575 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002576 CASE_OP_32_64(shr):
2577 /*
2578 * If the sign bit is known zero, then logical right shift
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002579 * will not reduce the number of input sign repetitions.
Richard Henderson93a967f2021-08-26 13:24:59 -07002580 */
Richard Henderson4ed2ba32024-12-19 19:38:54 -08002581 if (~z_mask & -s_mask) {
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002582 return fold_masks_s(ctx, op, s_mask);
Richard Henderson93a967f2021-08-26 13:24:59 -07002583 }
2584 break;
2585 default:
2586 break;
2587 }
2588
Richard Henderson4e9ce6a2024-12-08 21:13:41 -06002589 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002590}
2591
Richard Henderson9caca882021-08-24 13:30:32 -07002592static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op)
2593{
2594 TCGOpcode neg_op;
2595 bool have_neg;
2596
2597 if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) {
2598 return false;
2599 }
2600
2601 switch (ctx->type) {
2602 case TCG_TYPE_I32:
2603 neg_op = INDEX_op_neg_i32;
Richard Hendersonb701f192023-10-25 21:14:04 -07002604 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002605 break;
2606 case TCG_TYPE_I64:
2607 neg_op = INDEX_op_neg_i64;
Richard Hendersonb701f192023-10-25 21:14:04 -07002608 have_neg = true;
Richard Henderson9caca882021-08-24 13:30:32 -07002609 break;
2610 case TCG_TYPE_V64:
2611 case TCG_TYPE_V128:
2612 case TCG_TYPE_V256:
2613 neg_op = INDEX_op_neg_vec;
2614 have_neg = (TCG_TARGET_HAS_neg_vec &&
2615 tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0);
2616 break;
2617 default:
2618 g_assert_not_reached();
2619 }
2620 if (have_neg) {
2621 op->opc = neg_op;
2622 op->args[1] = op->args[2];
Richard Hendersone25fe882024-04-04 20:53:50 +00002623 return fold_neg_no_const(ctx, op);
Richard Henderson9caca882021-08-24 13:30:32 -07002624 }
2625 return false;
2626}
2627
Richard Hendersonc578ff12021-12-16 06:07:25 -08002628/* We cannot as yet do_constant_folding with vectors. */
2629static bool fold_sub_vec(OptContext *ctx, TCGOp *op)
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002630{
Richard Hendersonc578ff12021-12-16 06:07:25 -08002631 if (fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002632 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson9caca882021-08-24 13:30:32 -07002633 fold_sub_to_neg(ctx, op)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002634 return true;
2635 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002636 return finish_folding(ctx, op);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002637}
2638
Richard Hendersonc578ff12021-12-16 06:07:25 -08002639static bool fold_sub(OptContext *ctx, TCGOp *op)
2640{
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002641 if (fold_const2(ctx, op) ||
2642 fold_xx_to_i(ctx, op, 0) ||
2643 fold_xi_to_x(ctx, op, 0) ||
2644 fold_sub_to_neg(ctx, op)) {
Richard Henderson6334a962023-10-25 18:39:43 -07002645 return true;
2646 }
2647
2648 /* Fold sub r,x,i to add r,x,-i */
2649 if (arg_is_const(op->args[2])) {
2650 uint64_t val = arg_info(op->args[2])->val;
2651
Richard Henderson79602f62025-01-06 09:11:39 -08002652 op->opc = INDEX_op_add;
Richard Henderson6334a962023-10-25 18:39:43 -07002653 op->args[2] = arg_new_constant(ctx, -val);
2654 }
Richard Hendersonfe1d0072024-12-08 21:15:22 -06002655 return finish_folding(ctx, op);
Richard Hendersonc578ff12021-12-16 06:07:25 -08002656}
2657
Richard Henderson9531c072021-08-26 06:51:39 -07002658static bool fold_sub2(OptContext *ctx, TCGOp *op)
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002659{
Richard Henderson9531c072021-08-26 06:51:39 -07002660 return fold_addsub2(ctx, op, false);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002661}
2662
Richard Hendersonfae450b2021-08-25 22:42:19 -07002663static bool fold_tcg_ld(OptContext *ctx, TCGOp *op)
2664{
Richard Hendersond33e0f02024-12-09 08:53:20 -06002665 uint64_t z_mask = -1, s_mask = 0;
2666
Richard Hendersonfae450b2021-08-25 22:42:19 -07002667 /* We can't do any folding with a load, but we can record bits. */
2668 switch (op->opc) {
Richard Henderson57fe5c62021-08-26 12:04:46 -07002669 CASE_OP_32_64(ld8s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002670 s_mask = INT8_MIN;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002671 break;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002672 CASE_OP_32_64(ld8u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002673 z_mask = MAKE_64BIT_MASK(0, 8);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002674 break;
2675 CASE_OP_32_64(ld16s):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002676 s_mask = INT16_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002677 break;
2678 CASE_OP_32_64(ld16u):
Richard Hendersond33e0f02024-12-09 08:53:20 -06002679 z_mask = MAKE_64BIT_MASK(0, 16);
Richard Henderson57fe5c62021-08-26 12:04:46 -07002680 break;
2681 case INDEX_op_ld32s_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002682 s_mask = INT32_MIN;
Richard Hendersonfae450b2021-08-25 22:42:19 -07002683 break;
2684 case INDEX_op_ld32u_i64:
Richard Hendersond33e0f02024-12-09 08:53:20 -06002685 z_mask = MAKE_64BIT_MASK(0, 32);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002686 break;
2687 default:
2688 g_assert_not_reached();
2689 }
Richard Hendersond33e0f02024-12-09 08:53:20 -06002690 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Hendersonfae450b2021-08-25 22:42:19 -07002691}
2692
Richard Hendersonab84dc32023-08-23 23:04:24 -07002693static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op)
2694{
2695 TCGTemp *dst, *src;
2696 intptr_t ofs;
2697 TCGType type;
2698
2699 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson0fb5b752024-12-09 09:44:40 -06002700 return finish_folding(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002701 }
2702
2703 type = ctx->type;
2704 ofs = op->args[2];
2705 dst = arg_temp(op->args[0]);
2706 src = find_mem_copy_for(ctx, type, ofs);
2707 if (src && src->base_type == type) {
2708 return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src));
2709 }
2710
2711 reset_ts(ctx, dst);
2712 record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1);
2713 return true;
2714}
2715
2716static bool fold_tcg_st(OptContext *ctx, TCGOp *op)
2717{
2718 intptr_t ofs = op->args[2];
2719 intptr_t lm1;
2720
2721 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
2722 remove_mem_copy_all(ctx);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002723 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002724 }
2725
2726 switch (op->opc) {
2727 CASE_OP_32_64(st8):
2728 lm1 = 0;
2729 break;
2730 CASE_OP_32_64(st16):
2731 lm1 = 1;
2732 break;
2733 case INDEX_op_st32_i64:
2734 case INDEX_op_st_i32:
2735 lm1 = 3;
2736 break;
2737 case INDEX_op_st_i64:
2738 lm1 = 7;
2739 break;
2740 case INDEX_op_st_vec:
2741 lm1 = tcg_type_size(ctx->type) - 1;
2742 break;
2743 default:
2744 g_assert_not_reached();
2745 }
2746 remove_mem_copy_in(ctx, ofs, ofs + lm1);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002747 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002748}
2749
2750static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op)
2751{
2752 TCGTemp *src;
2753 intptr_t ofs, last;
2754 TCGType type;
2755
2756 if (op->args[1] != tcgv_ptr_arg(tcg_env)) {
Richard Henderson082b3ef2024-12-08 20:34:57 -06002757 return fold_tcg_st(ctx, op);
Richard Hendersonab84dc32023-08-23 23:04:24 -07002758 }
2759
2760 src = arg_temp(op->args[0]);
2761 ofs = op->args[2];
2762 type = ctx->type;
Richard Henderson3eaadae2023-08-23 23:13:06 -07002763
2764 /*
2765 * Eliminate duplicate stores of a constant.
2766 * This happens frequently when the target ISA zero-extends.
2767 */
2768 if (ts_is_const(src)) {
2769 TCGTemp *prev = find_mem_copy_for(ctx, type, ofs);
2770 if (src == prev) {
2771 tcg_op_remove(ctx->tcg, op);
2772 return true;
2773 }
2774 }
2775
Richard Hendersonab84dc32023-08-23 23:04:24 -07002776 last = ofs + tcg_type_size(type) - 1;
2777 remove_mem_copy_in(ctx, ofs, last);
2778 record_mem_copy(ctx, type, src, ofs, last);
Richard Henderson082b3ef2024-12-08 20:34:57 -06002779 return true;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002780}
2781
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002782static bool fold_xor(OptContext *ctx, TCGOp *op)
2783{
Richard Hendersonc890fd72024-12-08 21:39:01 -06002784 uint64_t z_mask, s_mask;
2785 TempOptInfo *t1, *t2;
2786
Richard Henderson7a2f7082021-08-26 07:06:39 -07002787 if (fold_const2_commutative(ctx, op) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002788 fold_xx_to_i(ctx, op, 0) ||
Richard Hendersona63ce0e2021-08-25 20:28:53 -07002789 fold_xi_to_x(ctx, op, 0) ||
Richard Henderson0e0a32b2021-08-24 13:18:01 -07002790 fold_xi_to_not(ctx, op, -1)) {
Richard Hendersoncbe42fb2021-08-25 13:02:00 -07002791 return true;
2792 }
Richard Hendersonfae450b2021-08-25 22:42:19 -07002793
Richard Hendersonc890fd72024-12-08 21:39:01 -06002794 t1 = arg_info(op->args[1]);
2795 t2 = arg_info(op->args[2]);
2796 z_mask = t1->z_mask | t2->z_mask;
2797 s_mask = t1->s_mask & t2->s_mask;
2798 return fold_masks_zs(ctx, op, z_mask, s_mask);
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002799}
2800
Kirill Batuzov22613af2011-07-07 16:37:13 +04002801/* Propagate constants and copies, fold constant expressions. */
Aurelien Jarno36e60ef2015-06-04 21:53:27 +02002802void tcg_optimize(TCGContext *s)
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002803{
Richard Henderson5cf32be2021-08-24 08:17:08 -07002804 int nb_temps, i;
Richard Hendersond0ed5152021-08-24 07:38:39 -07002805 TCGOp *op, *op_next;
Richard Hendersondc849882021-08-24 07:13:45 -07002806 OptContext ctx = { .tcg = s };
Richard Henderson5d8f5362012-09-21 10:13:38 -07002807
Richard Hendersonab84dc32023-08-23 23:04:24 -07002808 QSIMPLEQ_INIT(&ctx.mem_free);
2809
Kirill Batuzov22613af2011-07-07 16:37:13 +04002810 /* Array VALS has an element for each temp.
2811 If this temp holds a constant then its value is kept in VALS' element.
Aurelien Jarnoe590d4e2012-09-11 12:31:21 +02002812 If this temp is a copy of other ones then the other copies are
2813 available through the doubly linked circular list. */
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002814
2815 nb_temps = s->nb_temps;
Richard Henderson8f17a972020-03-30 19:52:02 -07002816 for (i = 0; i < nb_temps; ++i) {
2817 s->temps[i].state_ptr = NULL;
2818 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04002819
Richard Henderson15fa08f2017-11-02 15:19:14 +01002820 QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) {
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002821 TCGOpcode opc = op->opc;
Richard Henderson5cf32be2021-08-24 08:17:08 -07002822 const TCGOpDef *def;
Richard Henderson404a1482021-08-24 11:08:21 -07002823 bool done = false;
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002824
Richard Henderson5cf32be2021-08-24 08:17:08 -07002825 /* Calls are special. */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002826 if (opc == INDEX_op_call) {
Richard Henderson5cf32be2021-08-24 08:17:08 -07002827 fold_call(&ctx, op);
2828 continue;
Richard Hendersoncf066672014-03-22 20:06:52 -07002829 }
Richard Henderson5cf32be2021-08-24 08:17:08 -07002830
2831 def = &tcg_op_defs[opc];
Richard Hendersonec5d4cb2021-08-24 08:20:27 -07002832 init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs);
2833 copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs);
Kirill Batuzov22613af2011-07-07 16:37:13 +04002834
Richard Henderson67f84c92021-08-25 08:00:20 -07002835 /* Pre-compute the type of the operation. */
Richard Henderson4d872212025-01-02 19:43:06 -08002836 ctx.type = TCGOP_TYPE(op);
Richard Henderson67f84c92021-08-25 08:00:20 -07002837
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002838 /*
2839 * Process each opcode.
2840 * Sorted alphabetically by opcode as much as possible.
2841 */
Richard Hendersonc45cb8b2014-09-19 13:49:15 -07002842 switch (opc) {
Richard Henderson79602f62025-01-06 09:11:39 -08002843 case INDEX_op_add:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002844 done = fold_add(&ctx, op);
2845 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08002846 case INDEX_op_add_vec:
2847 done = fold_add_vec(&ctx, op);
2848 break;
Richard Henderson9531c072021-08-26 06:51:39 -07002849 CASE_OP_32_64(add2):
2850 done = fold_add2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07002851 break;
Richard Hendersonc3b920b2025-01-06 10:32:44 -08002852 case INDEX_op_and:
2853 case INDEX_op_and_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002854 done = fold_and(&ctx, op);
2855 break;
Richard Henderson46f96bf2025-01-06 12:37:02 -08002856 case INDEX_op_andc:
2857 case INDEX_op_andc_vec:
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002858 done = fold_andc(&ctx, op);
2859 break;
Richard Henderson079b0802021-08-24 09:30:59 -07002860 CASE_OP_32_64(brcond):
2861 done = fold_brcond(&ctx, op);
2862 break;
Richard Henderson764d2ab2021-08-24 09:22:11 -07002863 case INDEX_op_brcond2_i32:
2864 done = fold_brcond2(&ctx, op);
2865 break;
Richard Henderson09bacdc2021-08-24 11:58:12 -07002866 CASE_OP_32_64(bswap16):
2867 CASE_OP_32_64(bswap32):
2868 case INDEX_op_bswap64_i64:
2869 done = fold_bswap(&ctx, op);
2870 break;
Richard Henderson30dd0bf2021-08-24 10:51:34 -07002871 CASE_OP_32_64(clz):
2872 CASE_OP_32_64(ctz):
2873 done = fold_count_zeros(&ctx, op);
2874 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002875 CASE_OP_32_64(ctpop):
2876 done = fold_ctpop(&ctx, op);
2877 break;
Richard Henderson1b1907b2021-08-24 10:47:04 -07002878 CASE_OP_32_64(deposit):
2879 done = fold_deposit(&ctx, op);
2880 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002881 CASE_OP_32_64(div):
2882 CASE_OP_32_64(divu):
2883 done = fold_divide(&ctx, op);
2884 break;
Richard Henderson8cdb3fc2021-08-24 12:06:33 -07002885 case INDEX_op_dup_vec:
2886 done = fold_dup(&ctx, op);
2887 break;
2888 case INDEX_op_dup2_vec:
2889 done = fold_dup2(&ctx, op);
2890 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002891 CASE_OP_32_64_VEC(eqv):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002892 done = fold_eqv(&ctx, op);
2893 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07002894 CASE_OP_32_64(extract):
2895 done = fold_extract(&ctx, op);
2896 break;
Richard Hendersondcd08992021-08-24 10:41:39 -07002897 CASE_OP_32_64(extract2):
2898 done = fold_extract2(&ctx, op);
2899 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002900 case INDEX_op_ext_i32_i64:
2901 done = fold_exts(&ctx, op);
2902 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002903 case INDEX_op_extu_i32_i64:
2904 case INDEX_op_extrl_i64_i32:
2905 case INDEX_op_extrh_i64_i32:
2906 done = fold_extu(&ctx, op);
2907 break;
Richard Henderson57fe5c62021-08-26 12:04:46 -07002908 CASE_OP_32_64(ld8s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002909 CASE_OP_32_64(ld8u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002910 CASE_OP_32_64(ld16s):
Richard Hendersonfae450b2021-08-25 22:42:19 -07002911 CASE_OP_32_64(ld16u):
Richard Henderson57fe5c62021-08-26 12:04:46 -07002912 case INDEX_op_ld32s_i64:
Richard Hendersonfae450b2021-08-25 22:42:19 -07002913 case INDEX_op_ld32u_i64:
2914 done = fold_tcg_ld(&ctx, op);
2915 break;
Richard Hendersonab84dc32023-08-23 23:04:24 -07002916 case INDEX_op_ld_i32:
2917 case INDEX_op_ld_i64:
2918 case INDEX_op_ld_vec:
2919 done = fold_tcg_ld_memcopy(&ctx, op);
2920 break;
2921 CASE_OP_32_64(st8):
2922 CASE_OP_32_64(st16):
2923 case INDEX_op_st32_i64:
2924 done = fold_tcg_st(&ctx, op);
2925 break;
2926 case INDEX_op_st_i32:
2927 case INDEX_op_st_i64:
2928 case INDEX_op_st_vec:
2929 done = fold_tcg_st_memcopy(&ctx, op);
2930 break;
Richard Henderson3eefdf22021-08-25 11:06:43 -07002931 case INDEX_op_mb:
2932 done = fold_mb(&ctx, op);
2933 break;
Richard Hendersonb5701262024-12-28 15:58:24 -08002934 case INDEX_op_mov:
2935 case INDEX_op_mov_vec:
Richard Henderson2cfac7f2021-08-25 13:05:43 -07002936 done = fold_mov(&ctx, op);
2937 break;
Richard Henderson0c310a32021-08-24 10:37:24 -07002938 CASE_OP_32_64(movcond):
2939 done = fold_movcond(&ctx, op);
2940 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002941 CASE_OP_32_64(mul):
2942 done = fold_mul(&ctx, op);
2943 break;
2944 CASE_OP_32_64(mulsh):
2945 CASE_OP_32_64(muluh):
2946 done = fold_mul_highpart(&ctx, op);
2947 break;
Richard Henderson407112b2021-08-26 06:33:04 -07002948 CASE_OP_32_64(muls2):
2949 CASE_OP_32_64(mulu2):
2950 done = fold_multiply2(&ctx, op);
Richard Henderson6b8ac0d2021-08-24 10:24:12 -07002951 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002952 CASE_OP_32_64_VEC(nand):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002953 done = fold_nand(&ctx, op);
2954 break;
2955 CASE_OP_32_64(neg):
2956 done = fold_neg(&ctx, op);
2957 break;
Richard Hendersoned523472021-12-16 11:17:46 -08002958 CASE_OP_32_64_VEC(nor):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002959 done = fold_nor(&ctx, op);
2960 break;
2961 CASE_OP_32_64_VEC(not):
2962 done = fold_not(&ctx, op);
2963 break;
2964 CASE_OP_32_64_VEC(or):
2965 done = fold_or(&ctx, op);
2966 break;
2967 CASE_OP_32_64_VEC(orc):
2968 done = fold_orc(&ctx, op);
2969 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08002970 case INDEX_op_qemu_ld_i32:
Richard Henderson6813be92024-12-08 20:33:30 -06002971 done = fold_qemu_ld_1reg(&ctx, op);
2972 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08002973 case INDEX_op_qemu_ld_i64:
Richard Henderson6813be92024-12-08 20:33:30 -06002974 if (TCG_TARGET_REG_BITS == 64) {
2975 done = fold_qemu_ld_1reg(&ctx, op);
2976 break;
2977 }
2978 QEMU_FALLTHROUGH;
Richard Henderson50b7a192025-02-04 13:46:09 -08002979 case INDEX_op_qemu_ld_i128:
Richard Henderson6813be92024-12-08 20:33:30 -06002980 done = fold_qemu_ld_2reg(&ctx, op);
Richard Henderson3eefdf22021-08-25 11:06:43 -07002981 break;
Richard Henderson50b7a192025-02-04 13:46:09 -08002982 case INDEX_op_qemu_st8_i32:
2983 case INDEX_op_qemu_st_i32:
2984 case INDEX_op_qemu_st_i64:
2985 case INDEX_op_qemu_st_i128:
Richard Henderson3eefdf22021-08-25 11:06:43 -07002986 done = fold_qemu_st(&ctx, op);
2987 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07002988 CASE_OP_32_64(rem):
2989 CASE_OP_32_64(remu):
2990 done = fold_remainder(&ctx, op);
2991 break;
2992 CASE_OP_32_64(rotl):
2993 CASE_OP_32_64(rotr):
2994 CASE_OP_32_64(sar):
2995 CASE_OP_32_64(shl):
2996 CASE_OP_32_64(shr):
2997 done = fold_shift(&ctx, op);
2998 break;
Richard Hendersonc63ff552021-08-24 09:35:30 -07002999 CASE_OP_32_64(setcond):
3000 done = fold_setcond(&ctx, op);
3001 break;
Richard Henderson36355022023-08-04 23:24:04 +00003002 CASE_OP_32_64(negsetcond):
3003 done = fold_negsetcond(&ctx, op);
3004 break;
Richard Hendersonbc47b1a2021-08-24 09:09:35 -07003005 case INDEX_op_setcond2_i32:
3006 done = fold_setcond2(&ctx, op);
3007 break;
Richard Henderson1f106542024-09-06 12:22:41 -07003008 case INDEX_op_cmp_vec:
3009 done = fold_cmp_vec(&ctx, op);
3010 break;
3011 case INDEX_op_cmpsel_vec:
3012 done = fold_cmpsel_vec(&ctx, op);
3013 break;
Richard Hendersone58b9772024-09-06 22:30:01 -07003014 case INDEX_op_bitsel_vec:
3015 done = fold_bitsel_vec(&ctx, op);
3016 break;
Richard Hendersonb6617c82021-08-24 10:44:53 -07003017 CASE_OP_32_64(sextract):
3018 done = fold_sextract(&ctx, op);
3019 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003020 CASE_OP_32_64(sub):
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003021 done = fold_sub(&ctx, op);
3022 break;
Richard Hendersonc578ff12021-12-16 06:07:25 -08003023 case INDEX_op_sub_vec:
3024 done = fold_sub_vec(&ctx, op);
3025 break;
Richard Henderson9531c072021-08-26 06:51:39 -07003026 CASE_OP_32_64(sub2):
3027 done = fold_sub2(&ctx, op);
Richard Hendersone3f7dc22021-08-24 10:30:38 -07003028 break;
Richard Henderson2f9f08b2021-08-25 12:03:48 -07003029 CASE_OP_32_64_VEC(xor):
3030 done = fold_xor(&ctx, op);
Richard Hendersonb10f3832021-08-23 22:30:17 -07003031 break;
Richard Henderson15268552024-12-08 07:45:11 -06003032 case INDEX_op_set_label:
3033 case INDEX_op_br:
3034 case INDEX_op_exit_tb:
3035 case INDEX_op_goto_tb:
3036 case INDEX_op_goto_ptr:
3037 finish_ebb(&ctx);
3038 done = true;
3039 break;
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003040 default:
Richard Henderson0ae56422024-12-08 21:42:53 -06003041 done = finish_folding(&ctx, op);
Richard Henderson2cfac7f2021-08-25 13:05:43 -07003042 break;
Richard Hendersonb10f3832021-08-23 22:30:17 -07003043 }
Richard Henderson0ae56422024-12-08 21:42:53 -06003044 tcg_debug_assert(done);
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003045 }
Kirill Batuzov8f2e8c02011-07-07 16:37:12 +04003046}