bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1 | /* |
陳韋任 | e965fc3 | 2012-02-06 14:02:55 +0800 | [diff] [blame] | 2 | * emulator main execution loop |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | 66321a1 | 2005-04-06 20:47:48 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 5 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 10 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 15 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 18 | */ |
Peter Maydell | 7b31bbc | 2016-01-26 18:16:56 +0000 | [diff] [blame] | 19 | #include "qemu/osdep.h" |
Blue Swirl | cea5f9a | 2011-05-15 16:03:25 +0000 | [diff] [blame] | 20 | #include "cpu.h" |
Daniel P. Berrange | 0ab8ed1 | 2017-01-25 16:14:15 +0000 | [diff] [blame] | 21 | #include "trace-root.h" |
Paolo Bonzini | 76cad71 | 2012-10-24 11:12:21 +0200 | [diff] [blame] | 22 | #include "disas/disas.h" |
Paolo Bonzini | 63c9155 | 2016-03-15 13:18:37 +0100 | [diff] [blame] | 23 | #include "exec/exec-all.h" |
bellard | 7cb69ca | 2008-05-10 10:55:51 +0000 | [diff] [blame] | 24 | #include "tcg.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 25 | #include "qemu/atomic.h" |
Paolo Bonzini | 9c17d61 | 2012-12-17 18:20:04 +0100 | [diff] [blame] | 26 | #include "sysemu/qtest.h" |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 27 | #include "qemu/timer.h" |
Paolo Bonzini | 9d82b5a | 2013-08-16 08:26:30 +0200 | [diff] [blame] | 28 | #include "exec/address-spaces.h" |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 29 | #include "qemu/rcu.h" |
Peter Crosthwaite | e1b8932 | 2015-05-30 23:11:45 -0700 | [diff] [blame] | 30 | #include "exec/tb-hash.h" |
Paolo Bonzini | 508127e | 2016-01-07 16:55:28 +0300 | [diff] [blame] | 31 | #include "exec/log.h" |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 32 | #include "qemu/main-loop.h" |
Pavel Dovgalyuk | 6220e90 | 2015-09-17 19:23:31 +0300 | [diff] [blame] | 33 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
| 34 | #include "hw/i386/apic.h" |
| 35 | #endif |
Pavel Dovgalyuk | 6f06096 | 2015-09-17 19:24:16 +0300 | [diff] [blame] | 36 | #include "sysemu/replay.h" |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 37 | |
| 38 | /* -icount align implementation. */ |
| 39 | |
| 40 | typedef struct SyncClocks { |
| 41 | int64_t diff_clk; |
| 42 | int64_t last_cpu_icount; |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 43 | int64_t realtime_clock; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 44 | } SyncClocks; |
| 45 | |
| 46 | #if !defined(CONFIG_USER_ONLY) |
| 47 | /* Allow the guest to have a max 3ms advance. |
| 48 | * The difference between the 2 clocks could therefore |
| 49 | * oscillate around 0. |
| 50 | */ |
| 51 | #define VM_CLOCK_ADVANCE 3000000 |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 52 | #define THRESHOLD_REDUCE 1.5 |
| 53 | #define MAX_DELAY_PRINT_RATE 2000000000LL |
| 54 | #define MAX_NB_PRINTS 100 |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 55 | |
| 56 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) |
| 57 | { |
| 58 | int64_t cpu_icount; |
| 59 | |
| 60 | if (!icount_align_option) { |
| 61 | return; |
| 62 | } |
| 63 | |
| 64 | cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; |
| 65 | sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); |
| 66 | sc->last_cpu_icount = cpu_icount; |
| 67 | |
| 68 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { |
| 69 | #ifndef _WIN32 |
| 70 | struct timespec sleep_delay, rem_delay; |
| 71 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; |
| 72 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; |
| 73 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { |
Paolo Bonzini | a498d0e | 2015-01-28 10:09:55 +0100 | [diff] [blame] | 74 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 75 | } else { |
| 76 | sc->diff_clk = 0; |
| 77 | } |
| 78 | #else |
| 79 | Sleep(sc->diff_clk / SCALE_MS); |
| 80 | sc->diff_clk = 0; |
| 81 | #endif |
| 82 | } |
| 83 | } |
| 84 | |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 85 | static void print_delay(const SyncClocks *sc) |
| 86 | { |
| 87 | static float threshold_delay; |
| 88 | static int64_t last_realtime_clock; |
| 89 | static int nb_prints; |
| 90 | |
| 91 | if (icount_align_option && |
| 92 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && |
| 93 | nb_prints < MAX_NB_PRINTS) { |
| 94 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || |
| 95 | (-sc->diff_clk / (float)1000000000LL < |
| 96 | (threshold_delay - THRESHOLD_REDUCE))) { |
| 97 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; |
| 98 | printf("Warning: The guest is now late by %.1f to %.1f seconds\n", |
| 99 | threshold_delay - 1, |
| 100 | threshold_delay); |
| 101 | nb_prints++; |
| 102 | last_realtime_clock = sc->realtime_clock; |
| 103 | } |
| 104 | } |
| 105 | } |
| 106 | |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 107 | static void init_delay_params(SyncClocks *sc, |
| 108 | const CPUState *cpu) |
| 109 | { |
| 110 | if (!icount_align_option) { |
| 111 | return; |
| 112 | } |
Paolo Bonzini | 2e91cc6 | 2015-01-28 10:16:37 +0100 | [diff] [blame] | 113 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
| 114 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 115 | sc->last_cpu_icount = cpu->icount_extra + cpu->icount_decr.u16.low; |
Sebastian Tanase | 27498be | 2014-07-25 11:56:33 +0200 | [diff] [blame] | 116 | if (sc->diff_clk < max_delay) { |
| 117 | max_delay = sc->diff_clk; |
| 118 | } |
| 119 | if (sc->diff_clk > max_advance) { |
| 120 | max_advance = sc->diff_clk; |
| 121 | } |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 122 | |
| 123 | /* Print every 2s max if the guest is late. We limit the number |
| 124 | of printed messages to NB_PRINT_MAX(currently 100) */ |
| 125 | print_delay(sc); |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 126 | } |
| 127 | #else |
| 128 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) |
| 129 | { |
| 130 | } |
| 131 | |
| 132 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) |
| 133 | { |
| 134 | } |
| 135 | #endif /* CONFIG USER ONLY */ |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 136 | |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 137 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 138 | static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 139 | { |
| 140 | CPUArchState *env = cpu->env_ptr; |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 141 | uintptr_t ret; |
| 142 | TranslationBlock *last_tb; |
| 143 | int tb_exit; |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 144 | uint8_t *tb_ptr = itb->tc_ptr; |
| 145 | |
Alex Bennée | d977e1c | 2016-03-15 14:30:21 +0000 | [diff] [blame] | 146 | qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, |
Alex Bennée | 4426f83 | 2016-10-27 16:10:01 +0100 | [diff] [blame] | 147 | "Trace %p [%d: " TARGET_FMT_lx "] %s\n", |
| 148 | itb->tc_ptr, cpu->cpu_index, itb->pc, |
| 149 | lookup_symbol(itb->pc)); |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 150 | |
| 151 | #if defined(DEBUG_DISAS) |
Richard Henderson | be2208e | 2016-07-12 23:39:16 -0700 | [diff] [blame] | 152 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU) |
| 153 | && qemu_log_in_addr_range(itb->pc)) { |
Richard Henderson | 1ee7321 | 2016-09-22 15:17:10 -0700 | [diff] [blame] | 154 | qemu_log_lock(); |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 155 | #if defined(TARGET_I386) |
| 156 | log_cpu_state(cpu, CPU_DUMP_CCOP); |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 157 | #else |
| 158 | log_cpu_state(cpu, 0); |
| 159 | #endif |
Richard Henderson | 1ee7321 | 2016-09-22 15:17:10 -0700 | [diff] [blame] | 160 | qemu_log_unlock(); |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 161 | } |
| 162 | #endif /* DEBUG_DISAS */ |
| 163 | |
Paolo Bonzini | 414b15c | 2015-06-24 14:16:26 +0200 | [diff] [blame] | 164 | cpu->can_do_io = !use_icount; |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 165 | ret = tcg_qemu_tb_exec(env, tb_ptr); |
Pavel Dovgalyuk | 626cf8f | 2014-12-08 10:53:17 +0300 | [diff] [blame] | 166 | cpu->can_do_io = 1; |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 167 | last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); |
| 168 | tb_exit = ret & TB_EXIT_MASK; |
| 169 | trace_exec_tb_exit(last_tb, tb_exit); |
Alex Bennée | 6db8b53 | 2014-08-01 17:08:57 +0100 | [diff] [blame] | 170 | |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 171 | if (tb_exit > TB_EXIT_IDX1) { |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 172 | /* We didn't start executing this TB (eg because the instruction |
| 173 | * counter hit zero); we must restore the guest PC to the address |
| 174 | * of the start of the TB. |
| 175 | */ |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 176 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 177 | qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, |
Alex Bennée | d977e1c | 2016-03-15 14:30:21 +0000 | [diff] [blame] | 178 | "Stopped execution of TB chain before %p [" |
| 179 | TARGET_FMT_lx "] %s\n", |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 180 | last_tb->tc_ptr, last_tb->pc, |
| 181 | lookup_symbol(last_tb->pc)); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 182 | if (cc->synchronize_from_tb) { |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 183 | cc->synchronize_from_tb(cpu, last_tb); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 184 | } else { |
| 185 | assert(cc->set_pc); |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 186 | cc->set_pc(cpu, last_tb->pc); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 187 | } |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 188 | } |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 189 | if (tb_exit == TB_EXIT_REQUESTED) { |
Peter Maydell | 378df4b | 2013-02-22 18:10:03 +0000 | [diff] [blame] | 190 | /* We were asked to stop executing TBs (probably a pending |
| 191 | * interrupt. We've now stopped, so clear the flag. |
| 192 | */ |
Alex Bennée | 027d9a7 | 2016-09-30 22:30:59 +0100 | [diff] [blame] | 193 | atomic_set(&cpu->tcg_exit_req, 0); |
Peter Maydell | 378df4b | 2013-02-22 18:10:03 +0000 | [diff] [blame] | 194 | } |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 195 | return ret; |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 196 | } |
| 197 | |
Paolo Bonzini | 7687bf5 | 2015-08-11 11:05:12 +0200 | [diff] [blame] | 198 | #ifndef CONFIG_USER_ONLY |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 199 | /* Execute the code without caching the generated code. An interpreter |
| 200 | could be used if available. */ |
Peter Crosthwaite | ea3e984 | 2015-06-18 10:24:55 -0700 | [diff] [blame] | 201 | static void cpu_exec_nocache(CPUState *cpu, int max_cycles, |
Pavel Dovgalyuk | 56c0269 | 2015-09-17 19:23:59 +0300 | [diff] [blame] | 202 | TranslationBlock *orig_tb, bool ignore_icount) |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 203 | { |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 204 | TranslationBlock *tb; |
| 205 | |
| 206 | /* Should never happen. |
| 207 | We only end up here when an existing TB is too long. */ |
| 208 | if (max_cycles > CF_COUNT_MASK) |
| 209 | max_cycles = CF_COUNT_MASK; |
| 210 | |
KONRAD Frederic | a5e9982 | 2016-10-27 16:10:06 +0100 | [diff] [blame] | 211 | tb_lock(); |
Sergey Fedorov | 02d57ea | 2015-06-30 12:35:09 +0300 | [diff] [blame] | 212 | tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, orig_tb->flags, |
Pavel Dovgalyuk | 56c0269 | 2015-09-17 19:23:59 +0300 | [diff] [blame] | 213 | max_cycles | CF_NOCACHE |
| 214 | | (ignore_icount ? CF_IGNORE_ICOUNT : 0)); |
Sergey Fedorov | 3359baa | 2016-08-02 18:27:43 +0100 | [diff] [blame] | 215 | tb->orig_tb = orig_tb; |
KONRAD Frederic | a5e9982 | 2016-10-27 16:10:06 +0100 | [diff] [blame] | 216 | tb_unlock(); |
| 217 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 218 | /* execute the generated code */ |
Alex Bennée | 6db8b53 | 2014-08-01 17:08:57 +0100 | [diff] [blame] | 219 | trace_exec_tb_nocache(tb, tb->pc); |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 220 | cpu_tb_exec(cpu, tb); |
KONRAD Frederic | a5e9982 | 2016-10-27 16:10:06 +0100 | [diff] [blame] | 221 | |
| 222 | tb_lock(); |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 223 | tb_phys_invalidate(tb, -1); |
| 224 | tb_free(tb); |
KONRAD Frederic | a5e9982 | 2016-10-27 16:10:06 +0100 | [diff] [blame] | 225 | tb_unlock(); |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 226 | } |
Paolo Bonzini | 7687bf5 | 2015-08-11 11:05:12 +0200 | [diff] [blame] | 227 | #endif |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 228 | |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 229 | static void cpu_exec_step(CPUState *cpu) |
| 230 | { |
| 231 | CPUArchState *env = (CPUArchState *)cpu->env_ptr; |
| 232 | TranslationBlock *tb; |
| 233 | target_ulong cs_base, pc; |
| 234 | uint32_t flags; |
| 235 | |
| 236 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
Pranith Kumar | 4ec6670 | 2017-02-23 18:29:06 +0000 | [diff] [blame] | 237 | tb_lock(); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 238 | tb = tb_gen_code(cpu, pc, cs_base, flags, |
| 239 | 1 | CF_NOCACHE | CF_IGNORE_ICOUNT); |
| 240 | tb->orig_tb = NULL; |
Pranith Kumar | 4ec6670 | 2017-02-23 18:29:06 +0000 | [diff] [blame] | 241 | tb_unlock(); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 242 | /* execute the generated code */ |
| 243 | trace_exec_tb_nocache(tb, pc); |
| 244 | cpu_tb_exec(cpu, tb); |
Pranith Kumar | 4ec6670 | 2017-02-23 18:29:06 +0000 | [diff] [blame] | 245 | tb_lock(); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 246 | tb_phys_invalidate(tb, -1); |
| 247 | tb_free(tb); |
Pranith Kumar | 4ec6670 | 2017-02-23 18:29:06 +0000 | [diff] [blame] | 248 | tb_unlock(); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 249 | } |
| 250 | |
| 251 | void cpu_exec_step_atomic(CPUState *cpu) |
| 252 | { |
| 253 | start_exclusive(); |
| 254 | |
| 255 | /* Since we got here, we know that parallel_cpus must be true. */ |
| 256 | parallel_cpus = false; |
| 257 | cpu_exec_step(cpu); |
| 258 | parallel_cpus = true; |
| 259 | |
| 260 | end_exclusive(); |
| 261 | } |
| 262 | |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 263 | struct tb_desc { |
| 264 | target_ulong pc; |
| 265 | target_ulong cs_base; |
| 266 | CPUArchState *env; |
| 267 | tb_page_addr_t phys_page1; |
| 268 | uint32_t flags; |
| 269 | }; |
| 270 | |
| 271 | static bool tb_cmp(const void *p, const void *d) |
| 272 | { |
| 273 | const TranslationBlock *tb = p; |
| 274 | const struct tb_desc *desc = d; |
| 275 | |
| 276 | if (tb->pc == desc->pc && |
| 277 | tb->page_addr[0] == desc->phys_page1 && |
| 278 | tb->cs_base == desc->cs_base && |
Paolo Bonzini | 6d21e42 | 2016-07-19 08:36:18 +0200 | [diff] [blame] | 279 | tb->flags == desc->flags && |
| 280 | !atomic_read(&tb->invalid)) { |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 281 | /* check next page if needed */ |
| 282 | if (tb->page_addr[1] == -1) { |
| 283 | return true; |
| 284 | } else { |
| 285 | tb_page_addr_t phys_page2; |
| 286 | target_ulong virt_page2; |
| 287 | |
| 288 | virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
| 289 | phys_page2 = get_page_addr_code(desc->env, virt_page2); |
| 290 | if (tb->page_addr[1] == phys_page2) { |
| 291 | return true; |
| 292 | } |
| 293 | } |
| 294 | } |
| 295 | return false; |
| 296 | } |
| 297 | |
Sergey Fedorov | b34de45 | 2016-07-15 20:58:52 +0300 | [diff] [blame] | 298 | static TranslationBlock *tb_htable_lookup(CPUState *cpu, |
Paolo Bonzini | 9fd1a94 | 2015-08-11 11:33:24 +0200 | [diff] [blame] | 299 | target_ulong pc, |
| 300 | target_ulong cs_base, |
Emilio G. Cota | 89fee74 | 2016-04-07 13:19:22 -0400 | [diff] [blame] | 301 | uint32_t flags) |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 302 | { |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 303 | tb_page_addr_t phys_pc; |
| 304 | struct tb_desc desc; |
Emilio G. Cota | 42bd322 | 2016-06-08 14:55:25 -0400 | [diff] [blame] | 305 | uint32_t h; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 306 | |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 307 | desc.env = (CPUArchState *)cpu->env_ptr; |
| 308 | desc.cs_base = cs_base; |
| 309 | desc.flags = flags; |
| 310 | desc.pc = pc; |
| 311 | phys_pc = get_page_addr_code(desc.env, pc); |
| 312 | desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; |
Emilio G. Cota | 42bd322 | 2016-06-08 14:55:25 -0400 | [diff] [blame] | 313 | h = tb_hash_func(phys_pc, pc, flags); |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 314 | return qht_lookup(&tcg_ctx.tb_ctx.htable, tb_cmp, &desc, h); |
Paolo Bonzini | 9fd1a94 | 2015-08-11 11:33:24 +0200 | [diff] [blame] | 315 | } |
| 316 | |
Sergey Fedorov | bd2710d | 2016-07-15 20:58:51 +0300 | [diff] [blame] | 317 | static inline TranslationBlock *tb_find(CPUState *cpu, |
| 318 | TranslationBlock *last_tb, |
| 319 | int tb_exit) |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 320 | { |
Peter Crosthwaite | ea3e984 | 2015-06-18 10:24:55 -0700 | [diff] [blame] | 321 | CPUArchState *env = (CPUArchState *)cpu->env_ptr; |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 322 | TranslationBlock *tb; |
| 323 | target_ulong cs_base, pc; |
Emilio G. Cota | 89fee74 | 2016-04-07 13:19:22 -0400 | [diff] [blame] | 324 | uint32_t flags; |
Sergey Fedorov | 74d356d | 2016-07-15 20:58:50 +0300 | [diff] [blame] | 325 | bool have_tb_lock = false; |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 326 | |
| 327 | /* we record a subset of the CPU state. It will |
| 328 | always be the same before a given translated block |
| 329 | is executed. */ |
aliguori | 6b91754 | 2008-11-18 19:46:41 +0000 | [diff] [blame] | 330 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
Sergey Fedorov | 89a16b1 | 2016-07-15 20:58:43 +0300 | [diff] [blame] | 331 | tb = atomic_rcu_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); |
ths | 551bd27 | 2008-07-03 17:57:36 +0000 | [diff] [blame] | 332 | if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || |
| 333 | tb->flags != flags)) { |
Sergey Fedorov | b34de45 | 2016-07-15 20:58:52 +0300 | [diff] [blame] | 334 | tb = tb_htable_lookup(cpu, pc, cs_base, flags); |
Sergey Fedorov | bd2710d | 2016-07-15 20:58:51 +0300 | [diff] [blame] | 335 | if (!tb) { |
| 336 | |
| 337 | /* mmap_lock is needed by tb_gen_code, and mmap_lock must be |
| 338 | * taken outside tb_lock. As system emulation is currently |
| 339 | * single threaded the locks are NOPs. |
| 340 | */ |
| 341 | mmap_lock(); |
| 342 | tb_lock(); |
| 343 | have_tb_lock = true; |
| 344 | |
| 345 | /* There's a chance that our desired tb has been translated while |
| 346 | * taking the locks so we check again inside the lock. |
| 347 | */ |
Sergey Fedorov | b34de45 | 2016-07-15 20:58:52 +0300 | [diff] [blame] | 348 | tb = tb_htable_lookup(cpu, pc, cs_base, flags); |
Sergey Fedorov | bd2710d | 2016-07-15 20:58:51 +0300 | [diff] [blame] | 349 | if (!tb) { |
| 350 | /* if no translated code available, then translate it now */ |
| 351 | tb = tb_gen_code(cpu, pc, cs_base, flags, 0); |
| 352 | } |
| 353 | |
| 354 | mmap_unlock(); |
| 355 | } |
| 356 | |
| 357 | /* We add the TB in the virtual pc hash table for the fast lookup */ |
| 358 | atomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 359 | } |
Sergey Fedorov | c88c67e | 2016-05-16 16:13:00 +0300 | [diff] [blame] | 360 | #ifndef CONFIG_USER_ONLY |
| 361 | /* We don't take care of direct jumps when address mapping changes in |
| 362 | * system emulation. So it's not safe to make a direct jump to a TB |
| 363 | * spanning two pages because the mapping for the second page can change. |
| 364 | */ |
| 365 | if (tb->page_addr[1] != -1) { |
Sergey Fedorov | 4b7e695 | 2016-07-15 20:58:42 +0300 | [diff] [blame] | 366 | last_tb = NULL; |
Sergey Fedorov | c88c67e | 2016-05-16 16:13:00 +0300 | [diff] [blame] | 367 | } |
| 368 | #endif |
Sergey Fedorov | a0522c7 | 2016-04-25 18:17:30 +0300 | [diff] [blame] | 369 | /* See if we can patch the calling TB. */ |
Sergey Fedorov | 4b7e695 | 2016-07-15 20:58:42 +0300 | [diff] [blame] | 370 | if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { |
Sergey Fedorov | 74d356d | 2016-07-15 20:58:50 +0300 | [diff] [blame] | 371 | if (!have_tb_lock) { |
| 372 | tb_lock(); |
| 373 | have_tb_lock = true; |
| 374 | } |
Sergey Fedorov | 3359baa | 2016-08-02 18:27:43 +0100 | [diff] [blame] | 375 | if (!tb->invalid) { |
Sergey Fedorov | 118b073 | 2016-07-15 20:58:44 +0300 | [diff] [blame] | 376 | tb_add_jump(last_tb, tb_exit, tb); |
| 377 | } |
Sergey Fedorov | 74d356d | 2016-07-15 20:58:50 +0300 | [diff] [blame] | 378 | } |
| 379 | if (have_tb_lock) { |
Alex Bennée | 518615c | 2016-07-15 20:58:49 +0300 | [diff] [blame] | 380 | tb_unlock(); |
Sergey Fedorov | a0522c7 | 2016-04-25 18:17:30 +0300 | [diff] [blame] | 381 | } |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 382 | return tb; |
| 383 | } |
| 384 | |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 385 | static inline bool cpu_handle_halt(CPUState *cpu) |
| 386 | { |
| 387 | if (cpu->halted) { |
| 388 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
| 389 | if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) |
| 390 | && replay_interrupt()) { |
| 391 | X86CPU *x86_cpu = X86_CPU(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 392 | qemu_mutex_lock_iothread(); |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 393 | apic_poll_irq(x86_cpu->apic_state); |
| 394 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 395 | qemu_mutex_unlock_iothread(); |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 396 | } |
| 397 | #endif |
| 398 | if (!cpu_has_work(cpu)) { |
| 399 | current_cpu = NULL; |
| 400 | return true; |
| 401 | } |
| 402 | |
| 403 | cpu->halted = 0; |
| 404 | } |
| 405 | |
| 406 | return false; |
| 407 | } |
| 408 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 409 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 410 | { |
Peter Maydell | 86025ee | 2014-09-12 14:06:48 +0100 | [diff] [blame] | 411 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 412 | CPUWatchpoint *wp; |
| 413 | |
Andreas Färber | ff4700b | 2013-08-26 18:23:18 +0200 | [diff] [blame] | 414 | if (!cpu->watchpoint_hit) { |
| 415 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 416 | wp->flags &= ~BP_WATCHPOINT_HIT; |
| 417 | } |
| 418 | } |
Peter Maydell | 86025ee | 2014-09-12 14:06:48 +0100 | [diff] [blame] | 419 | |
| 420 | cc->debug_excp_handler(cpu); |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 421 | } |
| 422 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 423 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
| 424 | { |
| 425 | if (cpu->exception_index >= 0) { |
| 426 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
| 427 | /* exit request from the cpu execution loop */ |
| 428 | *ret = cpu->exception_index; |
| 429 | if (*ret == EXCP_DEBUG) { |
| 430 | cpu_handle_debug_exception(cpu); |
| 431 | } |
| 432 | cpu->exception_index = -1; |
| 433 | return true; |
| 434 | } else { |
| 435 | #if defined(CONFIG_USER_ONLY) |
| 436 | /* if user mode only, we simulate a fake exception |
| 437 | which will be handled outside the cpu execution |
| 438 | loop */ |
| 439 | #if defined(TARGET_I386) |
| 440 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 441 | cc->do_interrupt(cpu); |
| 442 | #endif |
| 443 | *ret = cpu->exception_index; |
| 444 | cpu->exception_index = -1; |
| 445 | return true; |
| 446 | #else |
| 447 | if (replay_exception()) { |
| 448 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 449 | qemu_mutex_lock_iothread(); |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 450 | cc->do_interrupt(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 451 | qemu_mutex_unlock_iothread(); |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 452 | cpu->exception_index = -1; |
| 453 | } else if (!replay_has_interrupt()) { |
| 454 | /* give a chance to iothread in replay mode */ |
| 455 | *ret = EXCP_INTERRUPT; |
| 456 | return true; |
| 457 | } |
| 458 | #endif |
| 459 | } |
| 460 | #ifndef CONFIG_USER_ONLY |
| 461 | } else if (replay_has_exception() |
| 462 | && cpu->icount_decr.u16.low + cpu->icount_extra == 0) { |
| 463 | /* try to cause an exception pending in the log */ |
Sergey Fedorov | bd2710d | 2016-07-15 20:58:51 +0300 | [diff] [blame] | 464 | cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0), true); |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 465 | *ret = -1; |
| 466 | return true; |
| 467 | #endif |
| 468 | } |
| 469 | |
| 470 | return false; |
| 471 | } |
| 472 | |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 473 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 474 | TranslationBlock **last_tb) |
| 475 | { |
| 476 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 477 | |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 478 | if (unlikely(atomic_read(&cpu->interrupt_request))) { |
| 479 | int interrupt_request; |
| 480 | qemu_mutex_lock_iothread(); |
| 481 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 482 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
| 483 | /* Mask out external interrupts for this step. */ |
| 484 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; |
| 485 | } |
| 486 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
| 487 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
| 488 | cpu->exception_index = EXCP_DEBUG; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 489 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 490 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 491 | } |
| 492 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
| 493 | /* Do nothing */ |
| 494 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { |
| 495 | replay_interrupt(); |
| 496 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; |
| 497 | cpu->halted = 1; |
| 498 | cpu->exception_index = EXCP_HLT; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 499 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 500 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 501 | } |
| 502 | #if defined(TARGET_I386) |
| 503 | else if (interrupt_request & CPU_INTERRUPT_INIT) { |
| 504 | X86CPU *x86_cpu = X86_CPU(cpu); |
| 505 | CPUArchState *env = &x86_cpu->env; |
| 506 | replay_interrupt(); |
Paolo Bonzini | 65c9d60 | 2017-02-16 12:30:05 +0100 | [diff] [blame] | 507 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 508 | do_cpu_init(x86_cpu); |
| 509 | cpu->exception_index = EXCP_HALTED; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 510 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 511 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 512 | } |
| 513 | #else |
| 514 | else if (interrupt_request & CPU_INTERRUPT_RESET) { |
| 515 | replay_interrupt(); |
| 516 | cpu_reset(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 517 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 518 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 519 | } |
| 520 | #endif |
| 521 | /* The target hook has 3 exit conditions: |
| 522 | False when the interrupt isn't processed, |
| 523 | True when it is, and we should restart on a new TB, |
| 524 | and via longjmp via cpu_loop_exit. */ |
| 525 | else { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 526 | if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { |
Pavel Dovgalyuk | d718b14 | 2017-01-24 10:17:08 +0300 | [diff] [blame] | 527 | replay_interrupt(); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 528 | *last_tb = NULL; |
| 529 | } |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 530 | /* The target hook may have updated the 'cpu->interrupt_request'; |
| 531 | * reload the 'interrupt_request' value */ |
| 532 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 533 | } |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 534 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 535 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
| 536 | /* ensure that no TB jump will be modified as |
| 537 | the program flow was changed */ |
| 538 | *last_tb = NULL; |
| 539 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 540 | |
| 541 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ |
| 542 | qemu_mutex_unlock_iothread(); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 543 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 544 | |
| 545 | |
Alex Bennée | 027d9a7 | 2016-09-30 22:30:59 +0100 | [diff] [blame] | 546 | if (unlikely(atomic_read(&cpu->exit_request) || replay_has_interrupt())) { |
| 547 | atomic_set(&cpu->exit_request, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 548 | cpu->exception_index = EXCP_INTERRUPT; |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 549 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 550 | } |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 551 | |
| 552 | return false; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 553 | } |
| 554 | |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 555 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
| 556 | TranslationBlock **last_tb, int *tb_exit, |
| 557 | SyncClocks *sc) |
| 558 | { |
| 559 | uintptr_t ret; |
| 560 | |
Alex Bennée | 027d9a7 | 2016-09-30 22:30:59 +0100 | [diff] [blame] | 561 | if (unlikely(atomic_read(&cpu->exit_request))) { |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 562 | return; |
| 563 | } |
| 564 | |
| 565 | trace_exec_tb(tb, tb->pc); |
| 566 | ret = cpu_tb_exec(cpu, tb); |
Paolo Bonzini | 43d70dd | 2017-01-29 12:00:59 +0100 | [diff] [blame] | 567 | tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 568 | *tb_exit = ret & TB_EXIT_MASK; |
| 569 | switch (*tb_exit) { |
| 570 | case TB_EXIT_REQUESTED: |
Alex Bennée | e5143e3 | 2017-02-23 18:29:12 +0000 | [diff] [blame^] | 571 | /* Something asked us to stop executing chained TBs; just |
| 572 | * continue round the main loop. Whatever requested the exit |
| 573 | * will also have set something else (eg interrupt_request) |
| 574 | * which we will handle next time around the loop. But we |
| 575 | * need to ensure the tcg_exit_req read in generated code |
| 576 | * comes before the next read of cpu->exit_request or |
| 577 | * cpu->interrupt_request. |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 578 | */ |
Paolo Bonzini | a70fe14 | 2017-01-29 12:15:15 +0100 | [diff] [blame] | 579 | smp_mb(); |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 580 | *last_tb = NULL; |
| 581 | break; |
| 582 | case TB_EXIT_ICOUNT_EXPIRED: |
| 583 | { |
| 584 | /* Instruction counter expired. */ |
| 585 | #ifdef CONFIG_USER_ONLY |
| 586 | abort(); |
| 587 | #else |
| 588 | int insns_left = cpu->icount_decr.u32; |
Paolo Bonzini | 43d70dd | 2017-01-29 12:00:59 +0100 | [diff] [blame] | 589 | *last_tb = NULL; |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 590 | if (cpu->icount_extra && insns_left >= 0) { |
| 591 | /* Refill decrementer and continue execution. */ |
| 592 | cpu->icount_extra += insns_left; |
| 593 | insns_left = MIN(0xffff, cpu->icount_extra); |
| 594 | cpu->icount_extra -= insns_left; |
| 595 | cpu->icount_decr.u16.low = insns_left; |
| 596 | } else { |
| 597 | if (insns_left > 0) { |
| 598 | /* Execute remaining instructions. */ |
Paolo Bonzini | 43d70dd | 2017-01-29 12:00:59 +0100 | [diff] [blame] | 599 | cpu_exec_nocache(cpu, insns_left, tb, false); |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 600 | align_clocks(sc, cpu); |
| 601 | } |
| 602 | cpu->exception_index = EXCP_INTERRUPT; |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 603 | cpu_loop_exit(cpu); |
| 604 | } |
| 605 | break; |
| 606 | #endif |
| 607 | } |
| 608 | default: |
Paolo Bonzini | 43d70dd | 2017-01-29 12:00:59 +0100 | [diff] [blame] | 609 | *last_tb = tb; |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 610 | break; |
| 611 | } |
| 612 | } |
| 613 | |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 614 | /* main execution loop */ |
| 615 | |
Peter Crosthwaite | ea3e984 | 2015-06-18 10:24:55 -0700 | [diff] [blame] | 616 | int cpu_exec(CPUState *cpu) |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 617 | { |
Andreas Färber | 97a8ea5 | 2013-02-02 10:57:51 +0100 | [diff] [blame] | 618 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 619 | int ret; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 620 | SyncClocks sc; |
| 621 | |
Pavel Dovgalyuk | 6f06096 | 2015-09-17 19:24:16 +0300 | [diff] [blame] | 622 | /* replay_interrupt may need current_cpu */ |
| 623 | current_cpu = cpu; |
| 624 | |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 625 | if (cpu_handle_halt(cpu)) { |
| 626 | return EXCP_HALTED; |
Paolo Bonzini | eda48c3 | 2011-03-12 17:43:56 +0100 | [diff] [blame] | 627 | } |
bellard | 5a1e3cf | 2005-11-23 21:02:53 +0000 | [diff] [blame] | 628 | |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 629 | rcu_read_lock(); |
| 630 | |
Richard Henderson | cffe7b3 | 2014-09-13 09:45:12 -0700 | [diff] [blame] | 631 | cc->cpu_exec_enter(cpu); |
bellard | 9d27abd | 2003-05-10 13:13:54 +0000 | [diff] [blame] | 632 | |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 633 | /* Calculate difference between guest clock and host clock. |
| 634 | * This delay includes the delay of the last cycle, so |
| 635 | * what we have to do is sleep until it is 0. As for the |
| 636 | * advance/delay we gain here, we try to fix it next time. |
| 637 | */ |
| 638 | init_delay_params(&sc, cpu); |
| 639 | |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 640 | /* prepare setjmp context for exception handling */ |
| 641 | if (sigsetjmp(cpu->jmp_env, 0) != 0) { |
Stefan Weil | 0448f5f | 2015-09-26 13:23:26 +0200 | [diff] [blame] | 642 | #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 643 | /* Some compilers wrongly smash all local variables after |
| 644 | * siglongjmp. There were bug reports for gcc 4.5.0 and clang. |
| 645 | * Reload essential local variables here for those compilers. |
| 646 | * Newer versions of gcc would complain about this code (-Wclobbered). */ |
| 647 | cpu = current_cpu; |
| 648 | cc = CPU_GET_CLASS(cpu); |
Stefan Weil | 0448f5f | 2015-09-26 13:23:26 +0200 | [diff] [blame] | 649 | #else /* buggy compiler */ |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 650 | /* Assert that the compiler does not smash local variables. */ |
| 651 | g_assert(cpu == current_cpu); |
| 652 | g_assert(cc == CPU_GET_CLASS(cpu)); |
Stefan Weil | 0448f5f | 2015-09-26 13:23:26 +0200 | [diff] [blame] | 653 | #endif /* buggy compiler */ |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 654 | cpu->can_do_io = 1; |
| 655 | tb_lock_reset(); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 656 | if (qemu_mutex_iothread_locked()) { |
| 657 | qemu_mutex_unlock_iothread(); |
| 658 | } |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 659 | } |
| 660 | |
| 661 | /* if an exception is pending, we execute it here */ |
| 662 | while (!cpu_handle_exception(cpu, &ret)) { |
| 663 | TranslationBlock *last_tb = NULL; |
| 664 | int tb_exit = 0; |
| 665 | |
| 666 | while (!cpu_handle_interrupt(cpu, &last_tb)) { |
| 667 | TranslationBlock *tb = tb_find(cpu, last_tb, tb_exit); |
| 668 | cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit, &sc); |
| 669 | /* Try to align the host and virtual clocks |
| 670 | if the guest is in advance */ |
| 671 | align_clocks(&sc, cpu); |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 672 | } |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 673 | } |
bellard | 3fb2ded | 2003-06-24 13:22:59 +0000 | [diff] [blame] | 674 | |
Richard Henderson | cffe7b3 | 2014-09-13 09:45:12 -0700 | [diff] [blame] | 675 | cc->cpu_exec_exit(cpu); |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 676 | rcu_read_unlock(); |
pbrook | 1057eaa | 2007-02-04 13:37:44 +0000 | [diff] [blame] | 677 | |
Andreas Färber | 4917cf4 | 2013-05-27 05:17:50 +0200 | [diff] [blame] | 678 | /* fail safe : never use current_cpu outside cpu_exec() */ |
| 679 | current_cpu = NULL; |
Paolo Bonzini | 9373e63 | 2015-08-18 06:24:34 -0700 | [diff] [blame] | 680 | |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 681 | return ret; |
| 682 | } |