summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm/sev.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm/sev.c')
-rw-r--r--arch/x86/kvm/svm/sev.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 0c240ed04f96..b0e793e7d85c 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -603,6 +603,9 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
save->xss = svm->vcpu.arch.ia32_xss;
save->dr6 = svm->vcpu.arch.dr6;
+ pr_debug("Virtual Machine Save Area (VMSA):\n");
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
+
return 0;
}
@@ -1606,38 +1609,35 @@ static int sev_lock_vcpus_for_migration(struct kvm *kvm,
{
struct kvm_vcpu *vcpu;
unsigned long i, j;
- bool first = true;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mutex_lock_killable_nested(&vcpu->mutex, role))
goto out_unlock;
- if (first) {
+#ifdef CONFIG_PROVE_LOCKING
+ if (!i)
/*
* Reset the role to one that avoids colliding with
* the role used for the first vcpu mutex.
*/
role = SEV_NR_MIGRATION_ROLES;
- first = false;
- } else {
+ else
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
- }
+#endif
}
return 0;
out_unlock:
- first = true;
kvm_for_each_vcpu(j, vcpu, kvm) {
if (i == j)
break;
- if (first)
- first = false;
- else
+#ifdef CONFIG_PROVE_LOCKING
+ if (j)
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
-
+#endif
mutex_unlock(&vcpu->mutex);
}