aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2011-03-16 19:04:35 -0400
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2011-03-16 19:04:35 -0400
commitefc1681fb4d936ee043f181898a38aeec5b7851d (patch)
treedf2f291788c748c29d8d142cb73a1864f90b4a51 /mm
parent54e6171895006a006a71fd70183fef1f0375882b (diff)
lttng-instrumentation/lttng-instrumentation-page_alloc
LTTng instrumentation - page_alloc Paging activity instrumentation. Instruments page allocation/free to keep track of page allocation. This does not cover hugetlb activity, which is covered by a separate patch. Those tracepoints are used by LTTng. About the performance impact of tracepoints (which is comparable to markers), even without immediate values optimizations, tests done by Hideo Aoki on ia64 show no regression. His test case was using hackbench on a kernel where scheduler instrumentation (about 5 events in code scheduler code) was added. See the "Tracepoints" patch header for performance result detail. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> CC: Martin Bligh <mbligh@google.com> CC: Masami Hiramatsu <mhiramat@redhat.com> CC: 'Peter Zijlstra' <peterz@infradead.org> CC: "Frank Ch. Eigler" <fche@redhat.com> CC: 'Ingo Molnar' <mingo@elte.hu> CC: 'Hideo AOKI' <haoki@redhat.com> CC: Takashi Nishiie <t-nishiie@np.css.fujitsu.com> CC: 'Steven Rostedt' <rostedt@goodmis.org> CC: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cdef1d4b4e4..4bb4fa13959 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -53,6 +53,7 @@
#include <linux/compaction.h>
#include <trace/events/kmem.h>
#include <linux/ftrace_event.h>
+#include <trace/page_alloc.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -129,6 +130,9 @@ void pm_restrict_gfp_mask(void)
int pageblock_order __read_mostly;
#endif
+DEFINE_TRACE(page_alloc);
+DEFINE_TRACE(page_free);
+
static void __free_pages_ok(struct page *page, unsigned int order);
/*
@@ -647,6 +651,8 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
trace_mm_page_free_direct(page, order);
kmemcheck_free_shadow(page, order);
+ trace_page_free(page, order);
+
if (PageAnon(page))
page->mapping = NULL;
for (i = 0; i < (1 << order); i++)
@@ -2165,6 +2171,7 @@ nopage:
}
return page;
got_pg:
+ trace_page_alloc(page, order);
if (kmemcheck_enabled)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
return page;