aboutsummaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 10:14:24 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 10:14:24 -0800
commit55065bc52795faae549abfb912aacc622dd63876 (patch)
tree63683547e41ed459a2a8747eeafb5e969633d54f /include/trace
parent008d23e4852d78bb2618f2035f8b2110b6a6b968 (diff)
parente5c301428294cb8925667c9ee39f817c4ab1c2c9 (diff)
Merge branch 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm
* 'kvm-updates/2.6.38' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (142 commits) KVM: Initialize fpu state in preemptible context KVM: VMX: when entering real mode align segment base to 16 bytes KVM: MMU: handle 'map_writable' in set_spte() function KVM: MMU: audit: allow audit more guests at the same time KVM: Fetch guest cr3 from hardware on demand KVM: Replace reads of vcpu->arch.cr3 by an accessor KVM: MMU: only write protect mappings at pagetable level KVM: VMX: Correct asm constraint in vmcs_load()/vmcs_clear() KVM: MMU: Initialize base_role for tdp mmus KVM: VMX: Optimize atomic EFER load KVM: VMX: Add definitions for more vm entry/exit control bits KVM: SVM: copy instruction bytes from VMCB KVM: SVM: implement enhanced INVLPG intercept KVM: SVM: enhance mov DR intercept handler KVM: SVM: enhance MOV CR intercept handler KVM: SVM: add new SVM feature bit names KVM: cleanup emulate_instruction KVM: move complete_insn_gp() into x86.c KVM: x86: fix CR8 handling KVM guest: Fix kvm clock initialization when it's configured out ...
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/kvm.h121
1 files changed, 121 insertions, 0 deletions
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6dd3a51ab1c..46e3cd8e197 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -6,6 +6,36 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
+#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
+
+#define kvm_trace_exit_reason \
+ ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
+ ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
+ ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
+ ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
+ ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI)
+
+TRACE_EVENT(kvm_userspace_exit,
+ TP_PROTO(__u32 reason, int errno),
+ TP_ARGS(reason, errno),
+
+ TP_STRUCT__entry(
+ __field( __u32, reason )
+ __field( int, errno )
+ ),
+
+ TP_fast_assign(
+ __entry->reason = reason;
+ __entry->errno = errno;
+ ),
+
+ TP_printk("reason %s (%d)",
+ __entry->errno < 0 ?
+ (__entry->errno == -EINTR ? "restart" : "error") :
+ __print_symbolic(__entry->reason, kvm_trace_exit_reason),
+ __entry->errno < 0 ? -__entry->errno : __entry->reason)
+);
+
#if defined(__KVM_HAVE_IOAPIC)
TRACE_EVENT(kvm_set_irq,
TP_PROTO(unsigned int gsi, int level, int irq_source_id),
@@ -185,6 +215,97 @@ TRACE_EVENT(kvm_age_page,
__entry->referenced ? "YOUNG" : "OLD")
);
+#ifdef CONFIG_KVM_ASYNC_PF
+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn),
+
+ TP_STRUCT__entry(
+ __field(__u64, gva)
+ __field(u64, gfn)
+ ),
+
+ TP_fast_assign(
+ __entry->gva = gva;
+ __entry->gfn = gfn;
+ ),
+
+ TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn)
+);
+
+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
+
+ TP_PROTO(u64 token, u64 gva),
+
+ TP_ARGS(token, gva),
+
+ TP_STRUCT__entry(
+ __field(__u64, token)
+ __field(__u64, gva)
+ ),
+
+ TP_fast_assign(
+ __entry->token = token;
+ __entry->gva = gva;
+ ),
+
+ TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
+
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
+
+ TP_PROTO(u64 token, u64 gva),
+
+ TP_ARGS(token, gva)
+);
+
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
+
+ TP_PROTO(u64 token, u64 gva),
+
+ TP_ARGS(token, gva)
+);
+
+TRACE_EVENT(
+ kvm_async_pf_completed,
+ TP_PROTO(unsigned long address, struct page *page, u64 gva),
+ TP_ARGS(address, page, gva),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, address)
+ __field(pfn_t, pfn)
+ __field(u64, gva)
+ ),
+
+ TP_fast_assign(
+ __entry->address = address;
+ __entry->pfn = page ? page_to_pfn(page) : 0;
+ __entry->gva = gva;
+ ),
+
+ TP_printk("gva %#llx address %#lx pfn %#llx", __entry->gva,
+ __entry->address, __entry->pfn)
+);
+
+#endif
+
#endif /* _TRACE_KVM_MAIN_H */
/* This part must be outside protection */