aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMingwei Zhang <mizhang@google.com>2021-08-02 21:46:07 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2021-08-20 16:06:34 -0400
commit71f51d2c3253645ccff69d6fa3a870f47005f0b3 (patch)
tree491d1cba8e77f7dfcb4b623d0fa21f6c954807ef /arch/x86/kvm
parent088acd23526647844aec1c39db4ad02552c86c7b (diff)
KVM: x86/mmu: Add detailed page size stats
Existing KVM code tracks the number of large pages regardless of their sizes. Therefore, when large page of 1GB (or larger) is adopted, the information becomes less useful because lpages counts a mix of 1G and 2M pages. So remove the lpages since it is easy for user space to aggregate the info. Instead, provide a comprehensive page stats of all sizes from 4K to 512G. Suggested-by: Ben Gardon <bgardon@google.com> Reviewed-by: David Matlack <dmatlack@google.com> Reviewed-by: Ben Gardon <bgardon@google.com> Signed-off-by: Mingwei Zhang <mizhang@google.com> Cc: Jing Zhang <jingzhangos@google.com> Cc: David Matlack <dmatlack@google.com> Cc: Sean Christopherson <seanjc@google.com> Message-Id: <20210803044607.599629-4-mizhang@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/mmu.h4
-rw-r--r--arch/x86/kvm/mmu/mmu.c32
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c15
-rw-r--r--arch/x86/kvm/x86.c4
4 files changed, 25 insertions, 30 deletions
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 59e831a8ab9d..e9688a9f7b57 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -261,4 +261,8 @@ kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
return __kvm_mmu_slot_lpages(slot, slot->npages, level);
}
+static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
+{
+ atomic64_add(count, &kvm->stat.pages[level - 1]);
+}
#endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 2f6458bca65d..54cb15e4b550 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -604,10 +604,11 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte)
* state bits, it is used to clear the last level sptep.
* Returns the old PTE.
*/
-static u64 mmu_spte_clear_track_bits(u64 *sptep)
+static int mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep)
{
kvm_pfn_t pfn;
u64 old_spte = *sptep;
+ int level = sptep_to_sp(sptep)->role.level;
if (!spte_has_volatile_bits(old_spte))
__update_clear_spte_fast(sptep, 0ull);
@@ -617,6 +618,8 @@ static u64 mmu_spte_clear_track_bits(u64 *sptep)
if (!is_shadow_present_pte(old_spte))
return old_spte;
+ kvm_update_page_stats(kvm, level, -1);
+
pfn = spte_to_pfn(old_spte);
/*
@@ -1001,14 +1004,15 @@ static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head)
}
}
-static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep)
+static void pte_list_remove(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
+ u64 *sptep)
{
- mmu_spte_clear_track_bits(sptep);
+ mmu_spte_clear_track_bits(kvm, sptep);
__pte_list_remove(sptep, rmap_head);
}
/* Return true if rmap existed, false otherwise */
-static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
+static bool pte_list_destroy(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
struct pte_list_desc *desc, *next;
int i;
@@ -1017,7 +1021,7 @@ static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
return false;
if (!(rmap_head->val & 1)) {
- mmu_spte_clear_track_bits((u64 *)rmap_head->val);
+ mmu_spte_clear_track_bits(kvm, (u64 *)rmap_head->val);
goto out;
}
@@ -1025,7 +1029,7 @@ static bool pte_list_destroy(struct kvm_rmap_head *rmap_head)
for (; desc; desc = next) {
for (i = 0; i < desc->spte_count; i++)
- mmu_spte_clear_track_bits(desc->sptes[i]);
+ mmu_spte_clear_track_bits(kvm, desc->sptes[i]);
next = desc->more;
mmu_free_pte_list_desc(desc);
}
@@ -1188,7 +1192,7 @@ out:
static void drop_spte(struct kvm *kvm, u64 *sptep)
{
- u64 old_spte = mmu_spte_clear_track_bits(sptep);
+ u64 old_spte = mmu_spte_clear_track_bits(kvm, sptep);
if (is_shadow_present_pte(old_spte))
rmap_remove(kvm, sptep);
@@ -1200,7 +1204,6 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
if (is_large_pte(*sptep)) {
WARN_ON(sptep_to_sp(sptep)->role.level == PG_LEVEL_4K);
drop_spte(kvm, sptep);
- --kvm->stat.lpages;
return true;
}
@@ -1450,7 +1453,7 @@ static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
const struct kvm_memory_slot *slot)
{
- return pte_list_destroy(rmap_head);
+ return pte_list_destroy(kvm, rmap_head);
}
static bool kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
@@ -1481,13 +1484,13 @@ restart:
need_flush = 1;
if (pte_write(pte)) {
- pte_list_remove(rmap_head, sptep);
+ pte_list_remove(kvm, rmap_head, sptep);
goto restart;
} else {
new_spte = kvm_mmu_changed_pte_notifier_make_spte(
*sptep, new_pfn);
- mmu_spte_clear_track_bits(sptep);
+ mmu_spte_clear_track_bits(kvm, sptep);
mmu_spte_set(sptep, new_spte);
}
}
@@ -2292,8 +2295,6 @@ static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
if (is_shadow_present_pte(pte)) {
if (is_last_spte(pte, sp->role.level)) {
drop_spte(kvm, spte);
- if (is_large_pte(pte))
- --kvm->stat.lpages;
} else {
child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
drop_parent_pte(child, spte);
@@ -2778,8 +2779,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
trace_kvm_mmu_set_spte(level, gfn, sptep);
if (!was_rmapped) {
- if (is_large_pte(*sptep))
- ++vcpu->kvm->stat.lpages;
+ kvm_update_page_stats(vcpu->kvm, level, 1);
rmap_count = rmap_add(vcpu, sptep, gfn);
if (rmap_count > RMAP_RECYCLE_THRESHOLD)
rmap_recycle(vcpu, sptep, gfn);
@@ -5809,7 +5809,7 @@ restart:
if (sp->role.direct && !kvm_is_reserved_pfn(pfn) &&
sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
pfn, PG_LEVEL_NUM)) {
- pte_list_remove(rmap_head, sptep);
+ pte_list_remove(kvm, rmap_head, sptep);
if (kvm_available_flush_tlb_with_range())
kvm_flush_remote_tlbs_with_address(kvm, sp->gfn,
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 85f69558b490..db636250972a 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -412,7 +412,6 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
bool was_leaf = was_present && is_last_spte(old_spte, level);
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
- bool was_large, is_large;
WARN_ON(level > PT64_ROOT_MAX_LEVEL);
WARN_ON(level < PG_LEVEL_4K);
@@ -471,18 +470,8 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
return;
}
- /*
- * Update large page stats if a large page is being zapped, created, or
- * is replacing an existing shadow page.
- */
- was_large = was_leaf && is_large_pte(old_spte);
- is_large = is_leaf && is_large_pte(new_spte);
- if (was_large != is_large) {
- if (was_large)
- atomic64_sub(1, (atomic64_t *)&kvm->stat.lpages);
- else
- atomic64_add(1, (atomic64_t *)&kvm->stat.lpages);
- }
+ if (is_leaf != was_leaf)
+ kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
if (was_leaf && is_dirty_spte(old_spte) &&
(!is_present || !is_dirty_spte(new_spte) || pfn_changed))
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9425589f34ca..4e07cae56636 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -233,7 +233,9 @@ const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
STATS_DESC_COUNTER(VM, mmu_recycled),
STATS_DESC_COUNTER(VM, mmu_cache_miss),
STATS_DESC_ICOUNTER(VM, mmu_unsync),
- STATS_DESC_ICOUNTER(VM, lpages),
+ STATS_DESC_ICOUNTER(VM, pages_4k),
+ STATS_DESC_ICOUNTER(VM, pages_2m),
+ STATS_DESC_ICOUNTER(VM, pages_1g),
STATS_DESC_ICOUNTER(VM, nx_lpage_splits),
STATS_DESC_PCOUNTER(VM, max_mmu_rmap_size),
STATS_DESC_PCOUNTER(VM, max_mmu_page_hash_collisions)