aboutsummaryrefslogtreecommitdiff
path: root/cpus.c
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2018-02-27 12:52:48 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2018-03-12 17:10:36 +0100
commitd759c951f3287fad04210a52f2dc93f94cf58c7f (patch)
tree5af67500a854ba2e25bc7690bf346bf3c2bbb810 /cpus.c
parent1a423896fa4fc2ea49c64e7a493d88a8b251950d (diff)
replay: push replay_mutex_lock up the call tree
Now instead of using the replay_lock to guard the output of the log we now use it to protect the whole execution section. This replaces what the BQL used to do when it was held during TCG execution. We also introduce some rules for locking order - mainly that you cannot take the replay_mutex while holding the BQL. This leads to some slight sophistry during start-up and extending the replay_mutex_destroy function to unlock the mutex without checking for the BQL condition so it can be cleanly dropped in the non-replay case. Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru> Tested-by: Pavel Dovgalyuk <pavel.dovgaluk@ispras.ru> Message-Id: <20180227095248.1060.40374.stgit@pasha-VirtualBox> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Diffstat (limited to 'cpus.c')
-rw-r--r--cpus.c24
1 files changed, 22 insertions, 2 deletions
diff --git a/cpus.c b/cpus.c
index c652da84cf..2e6701795b 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1317,6 +1317,8 @@ static void prepare_icount_for_run(CPUState *cpu)
insns_left = MIN(0xffff, cpu->icount_budget);
cpu->icount_decr.u16.low = insns_left;
cpu->icount_extra = cpu->icount_budget - insns_left;
+
+ replay_mutex_lock();
}
}
@@ -1332,6 +1334,8 @@ static void process_icount_data(CPUState *cpu)
cpu->icount_budget = 0;
replay_account_executed_instructions();
+
+ replay_mutex_unlock();
}
}
@@ -1346,11 +1350,9 @@ static int tcg_cpu_exec(CPUState *cpu)
#ifdef CONFIG_PROFILER
ti = profile_getclock();
#endif
- qemu_mutex_unlock_iothread();
cpu_exec_start(cpu);
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
#ifdef CONFIG_PROFILER
tcg_time += profile_getclock() - ti;
#endif
@@ -1417,6 +1419,9 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
cpu->exit_request = 1;
while (1) {
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
@@ -1425,6 +1430,8 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
*/
handle_icount_deadline();
+ replay_mutex_unlock();
+
if (!cpu) {
cpu = first_cpu;
}
@@ -1440,11 +1447,13 @@ static void *qemu_tcg_rr_cpu_thread_fn(void *arg)
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
prepare_icount_for_run(cpu);
r = tcg_cpu_exec(cpu);
process_icount_data(cpu);
+ qemu_mutex_lock_iothread();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
@@ -1634,7 +1643,9 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
while (1) {
if (cpu_can_run(cpu)) {
int r;
+ qemu_mutex_unlock_iothread();
r = tcg_cpu_exec(cpu);
+ qemu_mutex_lock_iothread();
switch (r) {
case EXCP_DEBUG:
cpu_handle_guest_debug(cpu);
@@ -1781,12 +1792,21 @@ void pause_all_vcpus(void)
}
}
+ /* We need to drop the replay_lock so any vCPU threads woken up
+ * can finish their replay tasks
+ */
+ replay_mutex_unlock();
+
while (!all_vcpus_paused()) {
qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
CPU_FOREACH(cpu) {
qemu_cpu_kick(cpu);
}
}
+
+ qemu_mutex_unlock_iothread();
+ replay_mutex_lock();
+ qemu_mutex_lock_iothread();
}
void cpu_resume(CPUState *cpu)