aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm/svm/sev.c
diff options
context:
space:
mode:
authorPeter Gonda <pgonda@google.com>2021-10-21 10:43:00 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2021-11-11 10:35:27 -0500
commitb56639318bb2be66aceba92836279714488709b4 (patch)
tree3262f21b2331eb9594f213d143462fbd5919da02 /arch/x86/kvm/svm/sev.c
parent91b692a03c9993ad2a9dea0534eaff169a98742c (diff)
KVM: SEV: Add support for SEV intra host migration
For SEV to work with intra host migration, contents of the SEV info struct such as the ASID (used to index the encryption key in the AMD SP) and the list of memory regions need to be transferred to the target VM. This change adds a commands for a target VMM to get a source SEV VM's sev info. Signed-off-by: Peter Gonda <pgonda@google.com> Suggested-by: Sean Christopherson <seanjc@google.com> Reviewed-by: Marc Orr <marcorr@google.com> Cc: Marc Orr <marcorr@google.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Sean Christopherson <seanjc@google.com> Cc: David Rientjes <rientjes@google.com> Cc: Dr. David Alan Gilbert <dgilbert@redhat.com> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Tom Lendacky <thomas.lendacky@amd.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Wanpeng Li <wanpengli@tencent.com> Cc: Jim Mattson <jmattson@google.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Message-Id: <20211021174303.385706-3-pgonda@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm/sev.c')
-rw-r--r--arch/x86/kvm/svm/sev.c152
1 files changed, 152 insertions, 0 deletions
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 227becd93cb6..8b529022f0cf 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -1532,6 +1532,158 @@ static bool cmd_allowed_from_miror(u32 cmd_id)
return false;
}
+static int sev_lock_for_migration(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ /*
+ * Bail if this VM is already involved in a migration to avoid deadlock
+ * between two VMs trying to migrate to/from each other.
+ */
+ if (atomic_cmpxchg_acquire(&sev->migration_in_progress, 0, 1))
+ return -EBUSY;
+
+ mutex_lock(&kvm->lock);
+
+ return 0;
+}
+
+static void sev_unlock_after_migration(struct kvm *kvm)
+{
+ struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+
+ mutex_unlock(&kvm->lock);
+ atomic_set_release(&sev->migration_in_progress, 0);
+}
+
+
+static int sev_lock_vcpus_for_migration(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i, j;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (mutex_lock_killable(&vcpu->mutex))
+ goto out_unlock;
+ }
+
+ return 0;
+
+out_unlock:
+ kvm_for_each_vcpu(j, vcpu, kvm) {
+ if (i == j)
+ break;
+
+ mutex_unlock(&vcpu->mutex);
+ }
+ return -EINTR;
+}
+
+static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
+{
+ struct kvm_vcpu *vcpu;
+ int i;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ mutex_unlock(&vcpu->mutex);
+ }
+}
+
+static void sev_migrate_from(struct kvm_sev_info *dst,
+ struct kvm_sev_info *src)
+{
+ dst->active = true;
+ dst->asid = src->asid;
+ dst->handle = src->handle;
+ dst->pages_locked = src->pages_locked;
+
+ src->asid = 0;
+ src->active = false;
+ src->handle = 0;
+ src->pages_locked = 0;
+
+ if (dst->misc_cg != src->misc_cg)
+ sev_misc_cg_uncharge(src);
+
+ put_misc_cg(src->misc_cg);
+ src->misc_cg = NULL;
+
+ INIT_LIST_HEAD(&dst->regions_list);
+ list_replace_init(&src->regions_list, &dst->regions_list);
+}
+
+int svm_vm_migrate_from(struct kvm *kvm, unsigned int source_fd)
+{
+ struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
+ struct kvm_sev_info *src_sev;
+ struct file *source_kvm_file;
+ struct kvm *source_kvm;
+ int ret;
+
+ ret = sev_lock_for_migration(kvm);
+ if (ret)
+ return ret;
+
+ if (sev_guest(kvm)) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ source_kvm_file = fget(source_fd);
+ if (!file_is_kvm(source_kvm_file)) {
+ ret = -EBADF;
+ goto out_fput;
+ }
+
+ source_kvm = source_kvm_file->private_data;
+ ret = sev_lock_for_migration(source_kvm);
+ if (ret)
+ goto out_fput;
+
+ if (!sev_guest(source_kvm) || sev_es_guest(source_kvm)) {
+ ret = -EINVAL;
+ goto out_source;
+ }
+
+ src_sev = &to_kvm_svm(source_kvm)->sev_info;
+ dst_sev->misc_cg = get_current_misc_cg();
+ if (dst_sev->misc_cg != src_sev->misc_cg) {
+ ret = sev_misc_cg_try_charge(dst_sev);
+ if (ret)
+ goto out_dst_put_cgroup;
+ }
+
+ ret = sev_lock_vcpus_for_migration(kvm);
+ if (ret)
+ goto out_dst_cgroup;
+ ret = sev_lock_vcpus_for_migration(source_kvm);
+ if (ret)
+ goto out_dst_vcpu;
+
+ sev_migrate_from(dst_sev, src_sev);
+ kvm_vm_dead(source_kvm);
+ ret = 0;
+
+ sev_unlock_vcpus_for_migration(source_kvm);
+out_dst_vcpu:
+ sev_unlock_vcpus_for_migration(kvm);
+out_dst_cgroup:
+ if (ret < 0) {
+ sev_misc_cg_uncharge(dst_sev);
+out_dst_put_cgroup:
+ put_misc_cg(dst_sev->misc_cg);
+ dst_sev->misc_cg = NULL;
+ }
+out_source:
+ sev_unlock_after_migration(source_kvm);
+out_fput:
+ if (source_kvm_file)
+ fput(source_kvm_file);
+out_unlock:
+ sev_unlock_after_migration(kvm);
+ return ret;
+}
+
int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
{
struct kvm_sev_cmd sev_cmd;