aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorCatalin Marinas <catalin.marinas@arm.com>2011-09-26 17:12:53 +0100
committerCatalin Marinas <catalin.marinas@arm.com>2011-12-02 16:12:42 +0000
commitf528f0b8e53d73b18be71e96693cfab9322f33c7 (patch)
tree5a25d6ac1c2f18561e554074aa7c06bf3d899183 /mm
parent74341703edca6bc68a165a18453071b097828407 (diff)
kmemleak: Handle percpu memory allocation
This patch adds kmemleak callbacks from the percpu allocator, reducing a number of false positives caused by kmemleak not scanning such memory blocks. The percpu chunks are never reported as leaks because of current kmemleak limitations with the __percpu pointer not pointing directly to the actual chunks. Reported-by: Huajun Li <huajun.li.lee@gmail.com> Acked-by: Christoph Lameter <cl@gentwo.org> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/kmemleak.c72
-rw-r--r--mm/percpu.c12
2 files changed, 83 insertions, 1 deletions
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index b4f4e6021c1..15c50302ff9 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -230,8 +230,10 @@ static int kmemleak_skip_disable;
/* kmemleak operation type for early logging */
enum {
KMEMLEAK_ALLOC,
+ KMEMLEAK_ALLOC_PERCPU,
KMEMLEAK_FREE,
KMEMLEAK_FREE_PART,
+ KMEMLEAK_FREE_PERCPU,
KMEMLEAK_NOT_LEAK,
KMEMLEAK_IGNORE,
KMEMLEAK_SCAN_AREA,
@@ -852,6 +854,20 @@ out:
rcu_read_unlock();
}
+/*
+ * Log an early allocated block and populate the stack trace.
+ */
+static void early_alloc_percpu(struct early_log *log)
+{
+ unsigned int cpu;
+ const void __percpu *ptr = log->ptr;
+
+ for_each_possible_cpu(cpu) {
+ log->ptr = per_cpu_ptr(ptr, cpu);
+ early_alloc(log);
+ }
+}
+
/**
* kmemleak_alloc - register a newly allocated object
* @ptr: pointer to beginning of the object
@@ -879,6 +895,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
EXPORT_SYMBOL_GPL(kmemleak_alloc);
/**
+ * kmemleak_alloc_percpu - register a newly allocated __percpu object
+ * @ptr: __percpu pointer to beginning of the object
+ * @size: size of the object
+ *
+ * This function is called from the kernel percpu allocator when a new object
+ * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
+ * allocation.
+ */
+void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
+{
+ unsigned int cpu;
+
+ pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
+
+ /*
+ * Percpu allocations are only scanned and not reported as leaks
+ * (min_count is set to 0).
+ */
+ if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ for_each_possible_cpu(cpu)
+ create_object((unsigned long)per_cpu_ptr(ptr, cpu),
+ size, 0, GFP_KERNEL);
+ else if (atomic_read(&kmemleak_early_log))
+ log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
+}
+EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
+
+/**
* kmemleak_free - unregister a previously registered object
* @ptr: pointer to beginning of the object
*
@@ -917,6 +961,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
EXPORT_SYMBOL_GPL(kmemleak_free_part);
/**
+ * kmemleak_free_percpu - unregister a previously registered __percpu object
+ * @ptr: __percpu pointer to beginning of the object
+ *
+ * This function is called from the kernel percpu allocator when an object
+ * (memory block) is freed (free_percpu).
+ */
+void __ref kmemleak_free_percpu(const void __percpu *ptr)
+{
+ unsigned int cpu;
+
+ pr_debug("%s(0x%p)\n", __func__, ptr);
+
+ if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+ for_each_possible_cpu(cpu)
+ delete_object_full((unsigned long)per_cpu_ptr(ptr,
+ cpu));
+ else if (atomic_read(&kmemleak_early_log))
+ log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
+}
+EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
+
+/**
* kmemleak_not_leak - mark an allocated object as false positive
* @ptr: pointer to beginning of the object
*
@@ -1727,12 +1793,18 @@ void __init kmemleak_init(void)
case KMEMLEAK_ALLOC:
early_alloc(log);
break;
+ case KMEMLEAK_ALLOC_PERCPU:
+ early_alloc_percpu(log);
+ break;
case KMEMLEAK_FREE:
kmemleak_free(log->ptr);
break;
case KMEMLEAK_FREE_PART:
kmemleak_free_part(log->ptr, log->size);
break;
+ case KMEMLEAK_FREE_PERCPU:
+ kmemleak_free_percpu(log->ptr);
+ break;
case KMEMLEAK_NOT_LEAK:
kmemleak_not_leak(log->ptr);
break;
diff --git a/mm/percpu.c b/mm/percpu.c
index 3bb810a7200..86c5bdbdc37 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -67,6 +67,7 @@
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
+#include <linux/kmemleak.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
@@ -710,6 +711,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
const char *err;
int slot, off, new_alloc;
unsigned long flags;
+ void __percpu *ptr;
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
WARN(true, "illegal size (%zu) or align (%zu) for "
@@ -802,7 +804,9 @@ area_found:
mutex_unlock(&pcpu_alloc_mutex);
/* return address relative to base address */
- return __addr_to_pcpu_ptr(chunk->base_addr + off);
+ ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
+ kmemleak_alloc_percpu(ptr, size);
+ return ptr;
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
@@ -916,6 +920,8 @@ void free_percpu(void __percpu *ptr)
if (!ptr)
return;
+ kmemleak_free_percpu(ptr);
+
addr = __pcpu_ptr_to_addr(ptr);
spin_lock_irqsave(&pcpu_lock, flags);
@@ -1637,6 +1643,8 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
rc = -ENOMEM;
goto out_free_areas;
}
+ /* kmemleak tracks the percpu allocations separately */
+ kmemleak_free(ptr);
areas[group] = ptr;
base = min(ptr, base);
@@ -1751,6 +1759,8 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
"for cpu%u\n", psize_str, cpu);
goto enomem;
}
+ /* kmemleak tracks the percpu allocations separately */
+ kmemleak_free(ptr);
pages[j++] = virt_to_page(ptr);
}