Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 1 | /* |
| 2 | * CPU thread main loop - common bits for user and system mode emulation |
| 3 | * |
| 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Chetan Pant | d6ea423 | 2020-10-23 12:33:53 +0000 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
| 20 | #include "qemu/osdep.h" |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 21 | #include "qemu/main-loop.h" |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 22 | #include "exec/cpu-common.h" |
Markus Armbruster | 2e5b09f | 2019-07-09 17:20:52 +0200 | [diff] [blame] | 23 | #include "hw/core/cpu.h" |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 24 | #include "sysemu/cpus.h" |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 25 | #include "qemu/lockable.h" |
Philippe Mathieu-Daudé | 8a8dc26 | 2022-11-24 16:36:49 +0100 | [diff] [blame] | 26 | #include "trace/trace-root.h" |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 27 | |
Jamie Iles | 370ed60 | 2023-04-27 03:09:24 +0100 | [diff] [blame] | 28 | QemuMutex qemu_cpu_list_lock; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 29 | static QemuCond exclusive_cond; |
| 30 | static QemuCond exclusive_resume; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 31 | static QemuCond qemu_work_cond; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 32 | |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 33 | /* >= 1 if a thread is inside start_exclusive/end_exclusive. Written |
| 34 | * under qemu_cpu_list_lock, read with atomic operations. |
| 35 | */ |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 36 | static int pending_cpus; |
| 37 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 38 | void qemu_init_cpu_list(void) |
| 39 | { |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 40 | /* This is needed because qemu_init_cpu_list is also called by the |
| 41 | * child process in a fork. */ |
| 42 | pending_cpus = 0; |
| 43 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 44 | qemu_mutex_init(&qemu_cpu_list_lock); |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 45 | qemu_cond_init(&exclusive_cond); |
| 46 | qemu_cond_init(&exclusive_resume); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 47 | qemu_cond_init(&qemu_work_cond); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | void cpu_list_lock(void) |
| 51 | { |
| 52 | qemu_mutex_lock(&qemu_cpu_list_lock); |
| 53 | } |
| 54 | |
| 55 | void cpu_list_unlock(void) |
| 56 | { |
| 57 | qemu_mutex_unlock(&qemu_cpu_list_lock); |
| 58 | } |
| 59 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 60 | |
Harsh Prateek Bora | 18530e7 | 2024-06-18 13:53:53 +0530 | [diff] [blame] | 61 | int cpu_get_free_index(void) |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 62 | { |
| 63 | CPUState *some_cpu; |
Alex Bennée | 716386e | 2020-05-20 15:05:38 +0100 | [diff] [blame] | 64 | int max_cpu_index = 0; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 65 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 66 | CPU_FOREACH(some_cpu) { |
Alex Bennée | 716386e | 2020-05-20 15:05:38 +0100 | [diff] [blame] | 67 | if (some_cpu->cpu_index >= max_cpu_index) { |
| 68 | max_cpu_index = some_cpu->cpu_index + 1; |
| 69 | } |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 70 | } |
Alex Bennée | 716386e | 2020-05-20 15:05:38 +0100 | [diff] [blame] | 71 | return max_cpu_index; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 72 | } |
| 73 | |
Philippe Mathieu-Daudé | 3c55dd5 | 2023-10-09 09:02:04 +0200 | [diff] [blame] | 74 | CPUTailQ cpus_queue = QTAILQ_HEAD_INITIALIZER(cpus_queue); |
Hyman Huang(黄勇) | ab1a161 | 2022-06-26 01:38:31 +0800 | [diff] [blame] | 75 | static unsigned int cpu_list_generation_id; |
| 76 | |
| 77 | unsigned int cpu_list_generation_id_get(void) |
| 78 | { |
| 79 | return cpu_list_generation_id; |
| 80 | } |
Philippe Mathieu-Daudé | 421a75e | 2020-07-02 12:40:17 +0200 | [diff] [blame] | 81 | |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 82 | void cpu_list_add(CPUState *cpu) |
| 83 | { |
Harsh Prateek Bora | 18530e7 | 2024-06-18 13:53:53 +0530 | [diff] [blame] | 84 | static bool cpu_index_auto_assigned; |
| 85 | |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 86 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 87 | if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { |
Harsh Prateek Bora | 18530e7 | 2024-06-18 13:53:53 +0530 | [diff] [blame] | 88 | cpu_index_auto_assigned = true; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 89 | cpu->cpu_index = cpu_get_free_index(); |
| 90 | assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); |
| 91 | } else { |
| 92 | assert(!cpu_index_auto_assigned); |
| 93 | } |
Philippe Mathieu-Daudé | 3c55dd5 | 2023-10-09 09:02:04 +0200 | [diff] [blame] | 94 | QTAILQ_INSERT_TAIL_RCU(&cpus_queue, cpu, node); |
Hyman Huang(黄勇) | ab1a161 | 2022-06-26 01:38:31 +0800 | [diff] [blame] | 95 | cpu_list_generation_id++; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | void cpu_list_remove(CPUState *cpu) |
| 99 | { |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 100 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 101 | if (!QTAILQ_IN_USE(cpu, node)) { |
| 102 | /* there is nothing to undo since cpu_exec_init() hasn't been called */ |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 103 | return; |
| 104 | } |
| 105 | |
Philippe Mathieu-Daudé | 3c55dd5 | 2023-10-09 09:02:04 +0200 | [diff] [blame] | 106 | QTAILQ_REMOVE_RCU(&cpus_queue, cpu, node); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 107 | cpu->cpu_index = UNASSIGNED_CPU_INDEX; |
Hyman Huang(黄勇) | ab1a161 | 2022-06-26 01:38:31 +0800 | [diff] [blame] | 108 | cpu_list_generation_id++; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 109 | } |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 110 | |
Philippe Mathieu-Daudé | 421a75e | 2020-07-02 12:40:17 +0200 | [diff] [blame] | 111 | CPUState *qemu_get_cpu(int index) |
| 112 | { |
| 113 | CPUState *cpu; |
| 114 | |
| 115 | CPU_FOREACH(cpu) { |
| 116 | if (cpu->cpu_index == index) { |
| 117 | return cpu; |
| 118 | } |
| 119 | } |
| 120 | |
| 121 | return NULL; |
| 122 | } |
| 123 | |
| 124 | /* current CPU in the current thread. It is only valid inside cpu_exec() */ |
| 125 | __thread CPUState *current_cpu; |
| 126 | |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 127 | struct qemu_work_item { |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 128 | QSIMPLEQ_ENTRY(qemu_work_item) node; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 129 | run_on_cpu_func func; |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 130 | run_on_cpu_data data; |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 131 | bool free, exclusive, done; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 132 | }; |
| 133 | |
| 134 | static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) |
| 135 | { |
| 136 | qemu_mutex_lock(&cpu->work_mutex); |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 137 | QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 138 | wi->done = false; |
| 139 | qemu_mutex_unlock(&cpu->work_mutex); |
| 140 | |
| 141 | qemu_cpu_kick(cpu); |
| 142 | } |
| 143 | |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 144 | void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 145 | QemuMutex *mutex) |
| 146 | { |
| 147 | struct qemu_work_item wi; |
| 148 | |
| 149 | if (qemu_cpu_is_self(cpu)) { |
| 150 | func(cpu, data); |
| 151 | return; |
| 152 | } |
| 153 | |
| 154 | wi.func = func; |
| 155 | wi.data = data; |
Paolo Bonzini | 0e55539 | 2016-09-06 17:28:03 +0200 | [diff] [blame] | 156 | wi.done = false; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 157 | wi.free = false; |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 158 | wi.exclusive = false; |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 159 | |
| 160 | queue_work_on_cpu(cpu, &wi); |
Paolo Bonzini | 42abcc5 | 2023-03-03 11:07:04 +0100 | [diff] [blame] | 161 | while (!qatomic_load_acquire(&wi.done)) { |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 162 | CPUState *self_cpu = current_cpu; |
| 163 | |
| 164 | qemu_cond_wait(&qemu_work_cond, mutex); |
| 165 | current_cpu = self_cpu; |
| 166 | } |
| 167 | } |
| 168 | |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 169 | void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data) |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 170 | { |
| 171 | struct qemu_work_item *wi; |
| 172 | |
Markus Armbruster | b21e238 | 2022-03-15 15:41:56 +0100 | [diff] [blame] | 173 | wi = g_new0(struct qemu_work_item, 1); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 174 | wi->func = func; |
| 175 | wi->data = data; |
| 176 | wi->free = true; |
| 177 | |
| 178 | queue_work_on_cpu(cpu, wi); |
| 179 | } |
| 180 | |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 181 | /* Wait for pending exclusive operations to complete. The CPU list lock |
| 182 | must be held. */ |
| 183 | static inline void exclusive_idle(void) |
| 184 | { |
| 185 | while (pending_cpus) { |
| 186 | qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock); |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | /* Start an exclusive operation. |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 191 | Must only be called from outside cpu_exec. */ |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 192 | void start_exclusive(void) |
| 193 | { |
| 194 | CPUState *other_cpu; |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 195 | int running_cpus; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 196 | |
Pierrick Bouvier | 779f30a | 2024-10-25 10:58:57 -0700 | [diff] [blame] | 197 | /* Ensure we are not running, or start_exclusive will be blocked. */ |
| 198 | g_assert(!current_cpu->running); |
| 199 | |
Ilya Leoshkevich | df8a688 | 2023-02-14 15:08:27 +0100 | [diff] [blame] | 200 | if (current_cpu->exclusive_context_count) { |
| 201 | current_cpu->exclusive_context_count++; |
| 202 | return; |
| 203 | } |
| 204 | |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 205 | qemu_mutex_lock(&qemu_cpu_list_lock); |
| 206 | exclusive_idle(); |
| 207 | |
| 208 | /* Make all other cpus stop executing. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 209 | qatomic_set(&pending_cpus, 1); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 210 | |
| 211 | /* Write pending_cpus before reading other_cpu->running. */ |
| 212 | smp_mb(); |
| 213 | running_cpus = 0; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 214 | CPU_FOREACH(other_cpu) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 215 | if (qatomic_read(&other_cpu->running)) { |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 216 | other_cpu->has_waiter = true; |
| 217 | running_cpus++; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 218 | qemu_cpu_kick(other_cpu); |
| 219 | } |
| 220 | } |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 221 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 222 | qatomic_set(&pending_cpus, running_cpus + 1); |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 223 | while (pending_cpus > 1) { |
| 224 | qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); |
| 225 | } |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 226 | |
| 227 | /* Can release mutex, no one will enter another exclusive |
| 228 | * section until end_exclusive resets pending_cpus to 0. |
| 229 | */ |
| 230 | qemu_mutex_unlock(&qemu_cpu_list_lock); |
Emilio G. Cota | cfbc3c6 | 2018-11-26 17:14:43 -0500 | [diff] [blame] | 231 | |
Ilya Leoshkevich | df8a688 | 2023-02-14 15:08:27 +0100 | [diff] [blame] | 232 | current_cpu->exclusive_context_count = 1; |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 233 | } |
| 234 | |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 235 | /* Finish an exclusive operation. */ |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 236 | void end_exclusive(void) |
| 237 | { |
Ilya Leoshkevich | df8a688 | 2023-02-14 15:08:27 +0100 | [diff] [blame] | 238 | current_cpu->exclusive_context_count--; |
| 239 | if (current_cpu->exclusive_context_count) { |
| 240 | return; |
| 241 | } |
Emilio G. Cota | cfbc3c6 | 2018-11-26 17:14:43 -0500 | [diff] [blame] | 242 | |
Paolo Bonzini | 758e1b2 | 2016-09-02 23:33:38 +0200 | [diff] [blame] | 243 | qemu_mutex_lock(&qemu_cpu_list_lock); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 244 | qatomic_set(&pending_cpus, 0); |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 245 | qemu_cond_broadcast(&exclusive_resume); |
| 246 | qemu_mutex_unlock(&qemu_cpu_list_lock); |
| 247 | } |
| 248 | |
| 249 | /* Wait for exclusive ops to finish, and begin cpu execution. */ |
| 250 | void cpu_exec_start(CPUState *cpu) |
| 251 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 252 | qatomic_set(&cpu->running, true); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 253 | |
| 254 | /* Write cpu->running before reading pending_cpus. */ |
| 255 | smp_mb(); |
| 256 | |
| 257 | /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1. |
| 258 | * After taking the lock we'll see cpu->has_waiter == true and run---not |
| 259 | * for long because start_exclusive kicked us. cpu_exec_end will |
| 260 | * decrement pending_cpus and signal the waiter. |
| 261 | * |
| 262 | * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1. |
| 263 | * This includes the case when an exclusive item is running now. |
| 264 | * Then we'll see cpu->has_waiter == false and wait for the item to |
| 265 | * complete. |
| 266 | * |
| 267 | * 3. pending_cpus == 0. Then start_exclusive is definitely going to |
| 268 | * see cpu->running == true, and it will kick the CPU. |
| 269 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 270 | if (unlikely(qatomic_read(&pending_cpus))) { |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 271 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 272 | if (!cpu->has_waiter) { |
| 273 | /* Not counted in pending_cpus, let the exclusive item |
| 274 | * run. Since we have the lock, just set cpu->running to true |
| 275 | * while holding it; no need to check pending_cpus again. |
| 276 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 277 | qatomic_set(&cpu->running, false); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 278 | exclusive_idle(); |
| 279 | /* Now pending_cpus is zero. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 280 | qatomic_set(&cpu->running, true); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 281 | } else { |
| 282 | /* Counted in pending_cpus, go ahead and release the |
| 283 | * waiter at cpu_exec_end. |
| 284 | */ |
| 285 | } |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 286 | } |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 287 | } |
| 288 | |
| 289 | /* Mark cpu as not executing, and release pending exclusive ops. */ |
| 290 | void cpu_exec_end(CPUState *cpu) |
| 291 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 292 | qatomic_set(&cpu->running, false); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 293 | |
| 294 | /* Write cpu->running before reading pending_cpus. */ |
| 295 | smp_mb(); |
| 296 | |
| 297 | /* 1. start_exclusive saw cpu->running == true. Then it will increment |
| 298 | * pending_cpus and wait for exclusive_cond. After taking the lock |
| 299 | * we'll see cpu->has_waiter == true. |
| 300 | * |
| 301 | * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1. |
| 302 | * This includes the case when an exclusive item started after setting |
| 303 | * cpu->running to false and before we read pending_cpus. Then we'll see |
| 304 | * cpu->has_waiter == false and not touch pending_cpus. The next call to |
| 305 | * cpu_exec_start will run exclusive_idle if still necessary, thus waiting |
| 306 | * for the item to complete. |
| 307 | * |
| 308 | * 3. pending_cpus == 0. Then start_exclusive is definitely going to |
| 309 | * see cpu->running == false, and it can ignore this CPU until the |
| 310 | * next cpu_exec_start. |
| 311 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 312 | if (unlikely(qatomic_read(&pending_cpus))) { |
Daniel Brodsky | 6e8a355 | 2020-04-03 21:21:08 -0700 | [diff] [blame] | 313 | QEMU_LOCK_GUARD(&qemu_cpu_list_lock); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 314 | if (cpu->has_waiter) { |
| 315 | cpu->has_waiter = false; |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 316 | qatomic_set(&pending_cpus, pending_cpus - 1); |
Paolo Bonzini | c265e97 | 2016-08-31 21:33:58 +0200 | [diff] [blame] | 317 | if (pending_cpus == 1) { |
| 318 | qemu_cond_signal(&exclusive_cond); |
| 319 | } |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 320 | } |
| 321 | } |
Paolo Bonzini | ab12997 | 2016-08-31 16:56:04 +0200 | [diff] [blame] | 322 | } |
| 323 | |
Paolo Bonzini | 14e6fe1 | 2016-10-31 10:36:08 +0100 | [diff] [blame] | 324 | void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, |
| 325 | run_on_cpu_data data) |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 326 | { |
| 327 | struct qemu_work_item *wi; |
| 328 | |
Markus Armbruster | b21e238 | 2022-03-15 15:41:56 +0100 | [diff] [blame] | 329 | wi = g_new0(struct qemu_work_item, 1); |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 330 | wi->func = func; |
| 331 | wi->data = data; |
| 332 | wi->free = true; |
| 333 | wi->exclusive = true; |
| 334 | |
| 335 | queue_work_on_cpu(cpu, wi); |
| 336 | } |
| 337 | |
Akihiko Odaki | f8b64d3 | 2024-07-14 19:46:52 +0900 | [diff] [blame] | 338 | void free_queued_cpu_work(CPUState *cpu) |
| 339 | { |
| 340 | while (!QSIMPLEQ_EMPTY(&cpu->work_list)) { |
| 341 | struct qemu_work_item *wi = QSIMPLEQ_FIRST(&cpu->work_list); |
| 342 | QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node); |
| 343 | if (wi->free) { |
| 344 | g_free(wi); |
| 345 | } |
| 346 | } |
| 347 | } |
| 348 | |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 349 | void process_queued_cpu_work(CPUState *cpu) |
| 350 | { |
| 351 | struct qemu_work_item *wi; |
| 352 | |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 353 | qemu_mutex_lock(&cpu->work_mutex); |
| 354 | if (QSIMPLEQ_EMPTY(&cpu->work_list)) { |
| 355 | qemu_mutex_unlock(&cpu->work_mutex); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 356 | return; |
| 357 | } |
Emilio G. Cota | 0c0fcc2 | 2020-06-12 20:02:24 +0100 | [diff] [blame] | 358 | while (!QSIMPLEQ_EMPTY(&cpu->work_list)) { |
| 359 | wi = QSIMPLEQ_FIRST(&cpu->work_list); |
| 360 | QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 361 | qemu_mutex_unlock(&cpu->work_mutex); |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 362 | if (wi->exclusive) { |
| 363 | /* Running work items outside the BQL avoids the following deadlock: |
| 364 | * 1) start_exclusive() is called with the BQL taken while another |
| 365 | * CPU is running; 2) cpu_exec in the other CPU tries to takes the |
| 366 | * BQL, so it goes to sleep; start_exclusive() is sleeping too, so |
| 367 | * neither CPU can proceed. |
| 368 | */ |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 369 | bql_unlock(); |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 370 | start_exclusive(); |
| 371 | wi->func(cpu, wi->data); |
| 372 | end_exclusive(); |
Stefan Hajnoczi | 195801d | 2024-01-02 10:35:25 -0500 | [diff] [blame] | 373 | bql_lock(); |
Paolo Bonzini | 53f5ed9 | 2016-08-28 05:38:24 +0200 | [diff] [blame] | 374 | } else { |
| 375 | wi->func(cpu, wi->data); |
| 376 | } |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 377 | qemu_mutex_lock(&cpu->work_mutex); |
| 378 | if (wi->free) { |
| 379 | g_free(wi); |
| 380 | } else { |
Paolo Bonzini | 42abcc5 | 2023-03-03 11:07:04 +0100 | [diff] [blame] | 381 | qatomic_store_release(&wi->done, true); |
Sergey Fedorov | d148d90 | 2016-08-29 09:51:00 +0200 | [diff] [blame] | 382 | } |
| 383 | } |
| 384 | qemu_mutex_unlock(&cpu->work_mutex); |
| 385 | qemu_cond_broadcast(&qemu_work_cond); |
| 386 | } |
Philippe Mathieu-Daudé | 8a8dc26 | 2022-11-24 16:36:49 +0100 | [diff] [blame] | 387 | |
| 388 | /* Add a breakpoint. */ |
| 389 | int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags, |
| 390 | CPUBreakpoint **breakpoint) |
| 391 | { |
| 392 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 393 | CPUBreakpoint *bp; |
| 394 | |
| 395 | if (cc->gdb_adjust_breakpoint) { |
| 396 | pc = cc->gdb_adjust_breakpoint(cpu, pc); |
| 397 | } |
| 398 | |
| 399 | bp = g_malloc(sizeof(*bp)); |
| 400 | |
| 401 | bp->pc = pc; |
| 402 | bp->flags = flags; |
| 403 | |
| 404 | /* keep all GDB-injected breakpoints in front */ |
| 405 | if (flags & BP_GDB) { |
| 406 | QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry); |
| 407 | } else { |
| 408 | QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry); |
| 409 | } |
| 410 | |
| 411 | if (breakpoint) { |
| 412 | *breakpoint = bp; |
| 413 | } |
| 414 | |
| 415 | trace_breakpoint_insert(cpu->cpu_index, pc, flags); |
| 416 | return 0; |
| 417 | } |
| 418 | |
| 419 | /* Remove a specific breakpoint. */ |
| 420 | int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags) |
| 421 | { |
| 422 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 423 | CPUBreakpoint *bp; |
| 424 | |
| 425 | if (cc->gdb_adjust_breakpoint) { |
| 426 | pc = cc->gdb_adjust_breakpoint(cpu, pc); |
| 427 | } |
| 428 | |
| 429 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
| 430 | if (bp->pc == pc && bp->flags == flags) { |
| 431 | cpu_breakpoint_remove_by_ref(cpu, bp); |
| 432 | return 0; |
| 433 | } |
| 434 | } |
| 435 | return -ENOENT; |
| 436 | } |
| 437 | |
| 438 | /* Remove a specific breakpoint by reference. */ |
| 439 | void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *bp) |
| 440 | { |
| 441 | QTAILQ_REMOVE(&cpu->breakpoints, bp, entry); |
| 442 | |
| 443 | trace_breakpoint_remove(cpu->cpu_index, bp->pc, bp->flags); |
| 444 | g_free(bp); |
| 445 | } |
| 446 | |
| 447 | /* Remove all matching breakpoints. */ |
| 448 | void cpu_breakpoint_remove_all(CPUState *cpu, int mask) |
| 449 | { |
| 450 | CPUBreakpoint *bp, *next; |
| 451 | |
| 452 | QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) { |
| 453 | if (bp->flags & mask) { |
| 454 | cpu_breakpoint_remove_by_ref(cpu, bp); |
| 455 | } |
| 456 | } |
| 457 | } |