aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2012-06-04 14:53:23 +0300
committerAvi Kivity <avi@redhat.com>2012-06-05 17:46:43 +0300
commit1952639665e92481c34c34c3e2a71bf3e66ba362 (patch)
tree0f6a1904bffb0d59e9b74a46fb175207106436de
parenta6bb7929677aacfce3f864c3cdacaa7d527945d5 (diff)
KVM: MMU: do not iterate over all VMs in mmu_shrink()
mmu_shrink() needlessly iterates over all VMs even though it will not attempt to free mmu pages from more than one on them. Fix that and also check used mmu pages count outside of VM lock to skip inactive VMs faster. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d07e436b7a42..1ca7164a74f1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3944,7 +3944,6 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
- struct kvm *kvm_freed = NULL;
int nr_to_scan = sc->nr_to_scan;
if (nr_to_scan == 0)
@@ -3956,22 +3955,30 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
int idx;
LIST_HEAD(invalid_list);
+ /*
+ * n_used_mmu_pages is accessed without holding kvm->mmu_lock
+ * here. We may skip a VM instance errorneosly, but we do not
+ * want to shrink a VM that only started to populate its MMU
+ * anyway.
+ */
+ if (kvm->arch.n_used_mmu_pages > 0) {
+ if (!nr_to_scan--)
+ break;
+ continue;
+ }
+
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
- if (!kvm_freed && nr_to_scan > 0 &&
- kvm->arch.n_used_mmu_pages > 0) {
- kvm_mmu_remove_some_alloc_mmu_pages(kvm,
- &invalid_list);
- kvm_freed = kvm;
- }
- nr_to_scan--;
+ kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
kvm_mmu_commit_zap_page(kvm, &invalid_list);
+
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
+
+ list_move_tail(&kvm->vm_list, &vm_list);
+ break;
}
- if (kvm_freed)
- list_move_tail(&kvm_freed->vm_list, &vm_list);
raw_spin_unlock(&kvm_lock);