aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile4
-rw-r--r--mm/compaction.c2
-rw-r--r--mm/early_ioremap.c23
-rw-r--r--mm/kasan/Makefile8
-rw-r--r--mm/kasan/kasan.c572
-rw-r--r--mm/kasan/kasan.h71
-rw-r--r--mm/kasan/kasan_init.c152
-rw-r--r--mm/kasan/report.c298
-rw-r--r--mm/kmemleak.c6
-rw-r--r--mm/mempool.c131
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/slab_common.c5
-rw-r--r--mm/slub.c62
-rw-r--r--mm/vmalloc.c22
14 files changed, 1337 insertions, 22 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 8405eb0023a9..3614adbf6d32 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -2,6 +2,9 @@
# Makefile for the linux memory manager.
#
+KASAN_SANITIZE_slab_common.o := n
+KASAN_SANITIZE_slub.o := n
+
mmu-y := nommu.o
mmu-$(CONFIG_MMU) := fremap.o gup.o highmem.o memory.o mincore.o \
mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
@@ -49,6 +52,7 @@ obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck.o
+obj-$(CONFIG_KASAN) += kasan/
obj-$(CONFIG_FAILSLAB) += failslab.o
obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
obj-$(CONFIG_FS_XIP) += filemap_xip.o
diff --git a/mm/compaction.c b/mm/compaction.c
index b47f08e159d4..81c89c39dcf7 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,6 +16,7 @@
#include <linux/sysfs.h>
#include <linux/balloon_compaction.h>
#include <linux/page-isolation.h>
+#include <linux/kasan.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -59,6 +60,7 @@ static void map_pages(struct list_head *list)
list_for_each_entry(page, list, lru) {
arch_alloc_page(page, 0);
kernel_map_pages(page, 1, 1);
+ kasan_alloc_pages(page, 0);
}
}
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index e10ccd299d66..d0d7cdbfb69f 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -15,6 +15,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <asm/fixmap.h>
+#include <asm/early_ioremap.h>
#ifdef CONFIG_MMU
static int early_ioremap_debug __initdata;
@@ -217,6 +218,28 @@ early_memremap(resource_size_t phys_addr, unsigned long size)
return (__force void *)__early_ioremap(phys_addr, size,
FIXMAP_PAGE_NORMAL);
}
+
+#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
+
+void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size)
+{
+ unsigned long slop, clen;
+ char *p;
+
+ while (size) {
+ slop = src & ~PAGE_MASK;
+ clen = size;
+ if (clen > MAX_MAP_CHUNK - slop)
+ clen = MAX_MAP_CHUNK - slop;
+ p = early_memremap(src & PAGE_MASK, clen + slop);
+ memcpy(dest, p + slop, clen);
+ early_memunmap(p, clen + slop);
+ dest += clen;
+ src += clen;
+ size -= clen;
+ }
+}
+
#else /* CONFIG_MMU */
void __init __iomem *
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
new file mode 100644
index 000000000000..64710148941e
--- /dev/null
+++ b/mm/kasan/Makefile
@@ -0,0 +1,8 @@
+KASAN_SANITIZE := n
+
+CFLAGS_REMOVE_kasan.o = -pg
+# Function splitter causes unnecessary splits in __asan_load1/__asan_store1
+# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
+CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+
+obj-y := kasan.o report.o kasan_init.o
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
new file mode 100644
index 000000000000..8f6ff54e7440
--- /dev/null
+++ b/mm/kasan/kasan.c
@@ -0,0 +1,572 @@
+/*
+ * This file contains shadow memory manipulation code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <adech.fo@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/linkage.h>
+#include <linux/memblock.h>
+#include <linux/memory.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/kasan.h>
+
+#include "kasan.h"
+#include "../slab.h"
+
+/*
+ * Poisons the shadow memory for 'size' bytes starting from 'addr'.
+ * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ */
+static void kasan_poison_shadow(const void *address, size_t size, u8 value)
+{
+ void *shadow_start, *shadow_end;
+
+ shadow_start = kasan_mem_to_shadow(address);
+ shadow_end = kasan_mem_to_shadow(address + size);
+
+ memset(shadow_start, value, shadow_end - shadow_start);
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size)
+{
+ kasan_poison_shadow(address, size, 0);
+
+ if (size & KASAN_SHADOW_MASK) {
+ u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
+ *shadow = size & KASAN_SHADOW_MASK;
+ }
+}
+
+static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
+{
+ void *base = task_stack_page(task);
+ size_t size = sp - base;
+
+ kasan_unpoison_shadow(base, size);
+}
+
+/* Unpoison the entire stack for a task. */
+void kasan_unpoison_task_stack(struct task_struct *task)
+{
+ __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
+}
+
+/* Unpoison the stack for the current task beyond a watermark sp value. */
+asmlinkage void kasan_unpoison_remaining_stack(void *sp)
+{
+ __kasan_unpoison_stack(current, sp);
+}
+
+/*
+ * All functions below always inlined so compiler could
+ * perform better optimizations in each of __asan_loadX/__assn_storeX
+ * depending on memory access size X.
+ */
+
+static __always_inline bool memory_is_poisoned_1(unsigned long addr)
+{
+ s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(shadow_value)) {
+ s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+ return unlikely(last_accessible_byte >= shadow_value);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_2(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ if (memory_is_poisoned_1(addr + 1))
+ return true;
+
+ /*
+ * If single shadow byte covers 2-byte access, we don't
+ * need to do anything more. Otherwise, test the first
+ * shadow byte.
+ */
+ if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
+ return false;
+
+ return unlikely(*(u8 *)shadow_addr);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_4(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ if (memory_is_poisoned_1(addr + 3))
+ return true;
+
+ /*
+ * If single shadow byte covers 4-byte access, we don't
+ * need to do anything more. Otherwise, test the first
+ * shadow byte.
+ */
+ if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
+ return false;
+
+ return unlikely(*(u8 *)shadow_addr);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_8(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ if (memory_is_poisoned_1(addr + 7))
+ return true;
+
+ /*
+ * If single shadow byte covers 8-byte access, we don't
+ * need to do anything more. Otherwise, test the first
+ * shadow byte.
+ */
+ if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+ return false;
+
+ return unlikely(*(u8 *)shadow_addr);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_16(unsigned long addr)
+{
+ u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(*shadow_addr)) {
+ u16 shadow_first_bytes = *(u16 *)shadow_addr;
+
+ if (unlikely(shadow_first_bytes))
+ return true;
+
+ /*
+ * If two shadow bytes covers 16-byte access, we don't
+ * need to do anything more. Otherwise, test the last
+ * shadow byte.
+ */
+ if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+ return false;
+
+ return memory_is_poisoned_1(addr + 15);
+ }
+
+ return false;
+}
+
+static __always_inline unsigned long bytes_is_zero(const u8 *start,
+ size_t size)
+{
+ while (size) {
+ if (unlikely(*start))
+ return (unsigned long)start;
+ start++;
+ size--;
+ }
+
+ return 0;
+}
+
+static __always_inline unsigned long memory_is_zero(const void *start,
+ const void *end)
+{
+ unsigned int words;
+ unsigned long ret;
+ unsigned int prefix = (unsigned long)start % 8;
+
+ if (end - start <= 16)
+ return bytes_is_zero(start, end - start);
+
+ if (prefix) {
+ prefix = 8 - prefix;
+ ret = bytes_is_zero(start, prefix);
+ if (unlikely(ret))
+ return ret;
+ start += prefix;
+ }
+
+ words = (end - start) / 8;
+ while (words) {
+ if (unlikely(*(u64 *)start))
+ return bytes_is_zero(start, 8);
+ start += 8;
+ words--;
+ }
+
+ return bytes_is_zero(start, (end - start) % 8);
+}
+
+static __always_inline bool memory_is_poisoned_n(unsigned long addr,
+ size_t size)
+{
+ unsigned long ret;
+
+ ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
+ kasan_mem_to_shadow((void *)addr + size - 1) + 1);
+
+ if (unlikely(ret)) {
+ unsigned long last_byte = addr + size - 1;
+ s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
+
+ if (unlikely(ret != (unsigned long)last_shadow ||
+ ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
+{
+ if (__builtin_constant_p(size)) {
+ switch (size) {
+ case 1:
+ return memory_is_poisoned_1(addr);
+ case 2:
+ return memory_is_poisoned_2(addr);
+ case 4:
+ return memory_is_poisoned_4(addr);
+ case 8:
+ return memory_is_poisoned_8(addr);
+ case 16:
+ return memory_is_poisoned_16(addr);
+ default:
+ BUILD_BUG();
+ }
+ }
+
+ return memory_is_poisoned_n(addr, size);
+}
+
+
+static __always_inline void check_memory_region(unsigned long addr,
+ size_t size, bool write)
+{
+ if (unlikely(size == 0))
+ return;
+
+ if (unlikely((void *)addr <
+ kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
+ kasan_report(addr, size, write, _RET_IP_);
+ return;
+ }
+
+ if (likely(!memory_is_poisoned(addr, size)))
+ return;
+
+ kasan_report(addr, size, write, _RET_IP_);
+}
+
+void __asan_loadN(unsigned long addr, size_t size);
+void __asan_storeN(unsigned long addr, size_t size);
+
+#undef memset
+void *memset(void *addr, int c, size_t len)
+{
+ __asan_storeN((unsigned long)addr, len);
+
+ return __memset(addr, c, len);
+}
+
+#undef memmove
+void *memmove(void *dest, const void *src, size_t len)
+{
+ __asan_loadN((unsigned long)src, len);
+ __asan_storeN((unsigned long)dest, len);
+
+ return __memmove(dest, src, len);
+}
+
+#undef memcpy
+void *memcpy(void *dest, const void *src, size_t len)
+{
+ __asan_loadN((unsigned long)src, len);
+ __asan_storeN((unsigned long)dest, len);
+
+ return __memcpy(dest, src, len);
+}
+
+void kasan_alloc_pages(struct page *page, unsigned int order)
+{
+ if (likely(!PageHighMem(page)))
+ kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
+}
+
+void kasan_free_pages(struct page *page, unsigned int order)
+{
+ if (likely(!PageHighMem(page)))
+ kasan_poison_shadow(page_address(page),
+ PAGE_SIZE << order,
+ KASAN_FREE_PAGE);
+}
+
+void kasan_poison_slab(struct page *page)
+{
+ kasan_poison_shadow(page_address(page),
+ PAGE_SIZE << compound_order(page),
+ KASAN_KMALLOC_REDZONE);
+}
+
+void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
+{
+ kasan_unpoison_shadow(object, cache->object_size);
+}
+
+void kasan_poison_object_data(struct kmem_cache *cache, void *object)
+{
+ kasan_poison_shadow(object,
+ round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
+ KASAN_KMALLOC_REDZONE);
+}
+
+void kasan_slab_alloc(struct kmem_cache *cache, void *object)
+{
+ kasan_kmalloc(cache, object, cache->object_size);
+}
+
+void kasan_slab_free(struct kmem_cache *cache, void *object)
+{
+ unsigned long size = cache->object_size;
+ unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+
+ /* RCU slabs could be legally used after free within the RCU period */
+ if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
+ return;
+
+ kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
+}
+
+void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size)
+{
+ unsigned long redzone_start;
+ unsigned long redzone_end;
+
+ if (unlikely(object == NULL))
+ return;
+
+ redzone_start = round_up((unsigned long)(object + size),
+ KASAN_SHADOW_SCALE_SIZE);
+ redzone_end = round_up((unsigned long)object + cache->object_size,
+ KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(object, size);
+ kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+ KASAN_KMALLOC_REDZONE);
+}
+EXPORT_SYMBOL(kasan_kmalloc);
+
+void kasan_kmalloc_large(const void *ptr, size_t size)
+{
+ struct page *page;
+ unsigned long redzone_start;
+ unsigned long redzone_end;
+
+ if (unlikely(ptr == NULL))
+ return;
+
+ page = virt_to_page(ptr);
+ redzone_start = round_up((unsigned long)(ptr + size),
+ KASAN_SHADOW_SCALE_SIZE);
+ redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
+
+ kasan_unpoison_shadow(ptr, size);
+ kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
+ KASAN_PAGE_REDZONE);
+}
+
+void kasan_krealloc(const void *object, size_t size)
+{
+ struct page *page;
+
+ if (unlikely(object == ZERO_SIZE_PTR))
+ return;
+
+ page = virt_to_head_page(object);
+
+ if (unlikely(!PageSlab(page)))
+ kasan_kmalloc_large(object, size);
+ else
+ kasan_kmalloc(page->slab_cache, object, size);
+}
+
+void kasan_kfree(void *ptr)
+{
+ struct page *page;
+
+ page = virt_to_head_page(ptr);
+
+ if (unlikely(!PageSlab(page)))
+ kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
+ KASAN_FREE_PAGE);
+ else
+ kasan_slab_free(page->slab_cache, ptr);
+}
+
+void kasan_kfree_large(const void *ptr)
+{
+ struct page *page = virt_to_page(ptr);
+
+ kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
+ KASAN_FREE_PAGE);
+}
+
+int kasan_module_alloc(void *addr, size_t size)
+{
+ void *ret;
+ size_t shadow_size;
+ unsigned long shadow_start;
+
+ shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+ shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
+ PAGE_SIZE);
+
+ if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
+ return -EINVAL;
+
+ ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
+ shadow_start + shadow_size,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
+ __builtin_return_address(0));
+
+ if (ret) {
+ find_vm_area(addr)->flags |= VM_KASAN;
+ kmemleak_ignore(ret);
+ return 0;
+ }
+
+ return -ENOMEM;
+}
+
+void kasan_free_shadow(const struct vm_struct *vm)
+{
+ if (vm->flags & VM_KASAN)
+ vfree(kasan_mem_to_shadow(vm->addr));
+}
+
+static void register_global(struct kasan_global *global)
+{
+ size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(global->beg, global->size);
+
+ kasan_poison_shadow(global->beg + aligned_size,
+ global->size_with_redzone - aligned_size,
+ KASAN_GLOBAL_REDZONE);
+}
+
+void __asan_register_globals(struct kasan_global *globals, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ register_global(&globals[i]);
+}
+EXPORT_SYMBOL(__asan_register_globals);
+
+void __asan_unregister_globals(struct kasan_global *globals, size_t size)
+{
+}
+EXPORT_SYMBOL(__asan_unregister_globals);
+
+#define DEFINE_ASAN_LOAD_STORE(size) \
+ void __asan_load##size(unsigned long addr) \
+ { \
+ check_memory_region(addr, size, false); \
+ } \
+ EXPORT_SYMBOL(__asan_load##size); \
+ __alias(__asan_load##size) \
+ void __asan_load##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_load##size##_noabort); \
+ void __asan_store##size(unsigned long addr) \
+ { \
+ check_memory_region(addr, size, true); \
+ } \
+ EXPORT_SYMBOL(__asan_store##size); \
+ __alias(__asan_store##size) \
+ void __asan_store##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_store##size##_noabort)
+
+DEFINE_ASAN_LOAD_STORE(1);
+DEFINE_ASAN_LOAD_STORE(2);
+DEFINE_ASAN_LOAD_STORE(4);
+DEFINE_ASAN_LOAD_STORE(8);
+DEFINE_ASAN_LOAD_STORE(16);
+
+void __asan_loadN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, false);
+}
+EXPORT_SYMBOL(__asan_loadN);
+
+__alias(__asan_loadN)
+void __asan_loadN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_loadN_noabort);
+
+void __asan_storeN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, true);
+}
+EXPORT_SYMBOL(__asan_storeN);
+
+__alias(__asan_storeN)
+void __asan_storeN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_storeN_noabort);
+
+/* to shut up compiler complaints */
+void __asan_handle_no_return(void) {}
+EXPORT_SYMBOL(__asan_handle_no_return);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static int kasan_mem_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
+}
+
+static int __init kasan_memhotplug_init(void)
+{
+ pr_err("WARNING: KASAN doesn't support memory hot-add\n");
+ pr_err("Memory hot-add will be disabled\n");
+
+ hotplug_memory_notifier(kasan_mem_notifier, 0);
+
+ return 0;
+}
+
+module_init(kasan_memhotplug_init);
+#endif
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
new file mode 100644
index 000000000000..4f6c62e5c21e
--- /dev/null
+++ b/mm/kasan/kasan.h
@@ -0,0 +1,71 @@
+#ifndef __MM_KASAN_KASAN_H
+#define __MM_KASAN_KASAN_H
+
+#include <linux/kasan.h>
+
+#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+
+#define KASAN_FREE_PAGE 0xFF /* page was freed */
+#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
+#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
+#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
+#define KASAN_GLOBAL_REDZONE 0xFA /* redzone for global variable */
+
+/*
+ * Stack redzone shadow values
+ * (Those are compiler's ABI, don't change them)
+ */
+#define KASAN_STACK_LEFT 0xF1
+#define KASAN_STACK_MID 0xF2
+#define KASAN_STACK_RIGHT 0xF3
+#define KASAN_STACK_PARTIAL 0xF4
+
+/* Don't break randconfig/all*config builds */
+#ifndef KASAN_ABI_VERSION
+#define KASAN_ABI_VERSION 1
+#endif
+
+struct kasan_access_info {
+ const void *access_addr;
+ const void *first_bad_addr;
+ size_t access_size;
+ bool is_write;
+ unsigned long ip;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_global {
+ const void *beg; /* Address of the beginning of the global variable. */
+ size_t size; /* Size of the global variable. */
+ size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
+ const void *name;
+ const void *module_name; /* Name of the module where the global variable is declared. */
+ unsigned long has_dynamic_init; /* This needed for C++ */
+#if KASAN_ABI_VERSION >= 4
+ struct kasan_source_location *location;
+#endif
+};
+
+static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
+ << KASAN_SHADOW_SCALE_SHIFT);
+}
+
+static inline bool kasan_report_enabled(void)
+{
+ return !current->kasan_depth;
+}
+
+void kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
+
+#endif
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
new file mode 100644
index 000000000000..f41ac9a551cc
--- /dev/null
+++ b/mm/kasan/kasan_init.c
@@ -0,0 +1,152 @@
+/*
+ * This file contains some kasan initialization code.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+/*
+ * This page serves two purposes:
+ * - It used as early shadow memory. The entire shadow region populated
+ * with this page, before we will be able to setup normal shadow memory.
+ * - Latter it reused it as zero shadow to cover large ranges of memory
+ * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
+ */
+unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+static __init void *early_alloc(size_t size, int node)
+{
+ return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, node);
+}
+
+static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+ pte_t zero_pte;
+
+ zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
+ zero_pte = pte_wrprotect(zero_pte);
+
+ while (addr + PAGE_SIZE <= end) {
+ set_pte_at(&init_mm, addr, pte, zero_pte);
+ addr += PAGE_SIZE;
+ pte = pte_offset_kernel(pmd, addr);
+ }
+}
+
+static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
+ next = pmd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pmd_none(*pmd)) {
+ pmd_populate_kernel(&init_mm, pmd,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pte_populate(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+ pmd_t *pmd;
+
+ pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pud_none(*pud)) {
+ pud_populate(&init_mm, pud,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pmd_populate(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+/**
+ * kasan_populate_zero_shadow - populate shadow memory region with
+ * kasan_zero_page
+ * @shadow_start - start of the memory range to populate
+ * @shadow_end - end of the memory range to populate
+ */
+void __init kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end)
+{
+ unsigned long addr = (unsigned long)shadow_start;
+ unsigned long end = (unsigned long)shadow_end;
+ pgd_t *pgd = pgd_offset_k(addr);
+ unsigned long next;
+
+ do {
+ next = pgd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ /*
+ * kasan_zero_pud should be populated with pmds
+ * at this moment.
+ * [pud,pmd]_populate*() below needed only for
+ * 3,2 - level page tables where we don't have
+ * puds,pmds, so pgd_populate(), pud_populate()
+ * is noops.
+ */
+ pgd_populate(&init_mm, pgd, kasan_zero_pud);
+ pud = pud_offset(pgd, addr);
+ pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pgd_none(*pgd)) {
+ pgd_populate(&init_mm, pgd,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pud_populate(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
new file mode 100644
index 000000000000..e0b3e94ecdb5
--- /dev/null
+++ b/mm/kasan/report.c
@@ -0,0 +1,298 @@
+/*
+ * This file contains error reporting code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <adech.fo@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/stacktrace.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/kasan.h>
+#include <linux/module.h>
+
+#include <asm/sections.h>
+
+#include "kasan.h"
+#include "../slab.h"
+
+/* Shadow layout customization. */
+#define SHADOW_BYTES_PER_BLOCK 1
+#define SHADOW_BLOCKS_PER_ROW 16
+#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
+#define SHADOW_ROWS_AROUND_ADDR 2
+
+static const void *find_first_bad_addr(const void *addr, size_t size)
+{
+ u8 shadow_val = *(u8 *)kasan_mem_to_shadow(addr);
+ const void *first_bad_addr = addr;
+
+ while (!shadow_val && first_bad_addr < addr + size) {
+ first_bad_addr += KASAN_SHADOW_SCALE_SIZE;
+ shadow_val = *(u8 *)kasan_mem_to_shadow(first_bad_addr);
+ }
+ return first_bad_addr;
+}
+
+static void print_error_description(struct kasan_access_info *info)
+{
+ const char *bug_type = "unknown-crash";
+ u8 *shadow_addr;
+
+ info->first_bad_addr = find_first_bad_addr(info->access_addr,
+ info->access_size);
+
+ shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
+
+ /*
+ * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
+ * at the next shadow byte to determine the type of the bad access.
+ */
+ if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
+ shadow_addr++;
+
+ switch (*shadow_addr) {
+ case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
+ /*
+ * In theory it's still possible to see these shadow values
+ * due to a data race in the kernel code.
+ */
+ bug_type = "out-of-bounds";
+ break;
+ case KASAN_PAGE_REDZONE:
+ case KASAN_KMALLOC_REDZONE:
+ bug_type = "slab-out-of-bounds";
+ break;
+ case KASAN_GLOBAL_REDZONE:
+ bug_type = "global-out-of-bounds";
+ break;
+ case KASAN_STACK_LEFT:
+ case KASAN_STACK_MID:
+ case KASAN_STACK_RIGHT:
+ case KASAN_STACK_PARTIAL:
+ bug_type = "stack-out-of-bounds";
+ break;
+ case KASAN_FREE_PAGE:
+ case KASAN_KMALLOC_FREE:
+ bug_type = "use-after-free";
+ break;
+ }
+
+ pr_err("BUG: KASAN: %s in %pS at addr %p\n",
+ bug_type, (void *)info->ip,
+ info->access_addr);
+ pr_err("%s of size %zu by task %s/%d\n",
+ info->is_write ? "Write" : "Read",
+ info->access_size, current->comm, task_pid_nr(current));
+}
+
+static inline bool kernel_or_module_addr(const void *addr)
+{
+ if (addr >= (void *)_stext && addr < (void *)_end)
+ return true;
+ if (is_module_address((unsigned long)addr))
+ return true;
+ return false;
+}
+
+static inline bool init_task_stack_addr(const void *addr)
+{
+ return addr >= (void *)&init_thread_union.stack &&
+ (addr <= (void *)&init_thread_union.stack +
+ sizeof(init_thread_union.stack));
+}
+
+static void print_address_description(struct kasan_access_info *info)
+{
+ const void *addr = info->access_addr;
+
+ if ((addr >= (void *)PAGE_OFFSET) &&
+ (addr < high_memory)) {
+ struct page *page = virt_to_head_page(addr);
+
+ if (PageSlab(page)) {
+ void *object;
+ struct kmem_cache *cache = page->slab_cache;
+ void *last_object;
+
+ object = virt_to_obj(cache, page_address(page), addr);
+ last_object = page_address(page) +
+ page->objects * cache->size;
+
+ if (unlikely(object > last_object))
+ object = last_object; /* we hit into padding */
+
+ object_err(cache, page, object,
+ "kasan: bad access detected");
+ return;
+ }
+ dump_page(page, "kasan: bad access detected");
+ }
+
+ if (kernel_or_module_addr(addr)) {
+ if (!init_task_stack_addr(addr))
+ pr_err("Address belongs to variable %pS\n", addr);
+ }
+
+ dump_stack();
+}
+
+static bool row_is_guilty(const void *row, const void *guilty)
+{
+ return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
+}
+
+static int shadow_pointer_offset(const void *row, const void *shadow)
+{
+ /* The length of ">ff00ff00ff00ff00: " is
+ * 3 + (BITS_PER_LONG/8)*2 chars.
+ */
+ return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
+ (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
+}
+
+static void print_shadow_for_address(const void *addr)
+{
+ int i;
+ const void *shadow = kasan_mem_to_shadow(addr);
+ const void *shadow_row;
+
+ shadow_row = (void *)round_down((unsigned long)shadow,
+ SHADOW_BYTES_PER_ROW)
+ - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
+
+ pr_err("Memory state around the buggy address:\n");
+
+ for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
+ const void *kaddr = kasan_shadow_to_mem(shadow_row);
+ char buffer[4 + (BITS_PER_LONG/8)*2];
+ char shadow_buf[SHADOW_BYTES_PER_ROW];
+
+ snprintf(buffer, sizeof(buffer),
+ (i == 0) ? ">%p: " : " %p: ", kaddr);
+ /*
+ * We should not pass a shadow pointer to generic
+ * function, because generic functions may try to
+ * access kasan mapping for the passed address.
+ */
+ memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
+ print_hex_dump(KERN_ERR, buffer,
+ DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
+ shadow_buf, SHADOW_BYTES_PER_ROW, 0);
+
+ if (row_is_guilty(shadow_row, shadow))
+ pr_err("%*c\n",
+ shadow_pointer_offset(shadow_row, shadow),
+ '^');
+
+ shadow_row += SHADOW_BYTES_PER_ROW;
+ }
+}
+
+static DEFINE_SPINLOCK(report_lock);
+
+static void kasan_report_error(struct kasan_access_info *info)
+{
+ unsigned long flags;
+ const char *bug_type;
+
+ /*
+ * Make sure we don't end up in loop.
+ */
+ kasan_disable_current();
+ spin_lock_irqsave(&report_lock, flags);
+ pr_err("================================="
+ "=================================\n");
+ if (info->access_addr <
+ kasan_shadow_to_mem((void *)KASAN_SHADOW_START)) {
+ if ((unsigned long)info->access_addr < PAGE_SIZE)
+ bug_type = "null-ptr-deref";
+ else if ((unsigned long)info->access_addr < TASK_SIZE)
+ bug_type = "user-memory-access";
+ else
+ bug_type = "wild-memory-access";
+ pr_err("BUG: KASAN: %s on address %p\n",
+ bug_type, info->access_addr);
+ pr_err("%s of size %zu by task %s/%d\n",
+ info->is_write ? "Write" : "Read",
+ info->access_size, current->comm,
+ task_pid_nr(current));
+ dump_stack();
+ } else {
+ print_error_description(info);
+ print_address_description(info);
+ print_shadow_for_address(info->first_bad_addr);
+ }
+ pr_err("================================="
+ "=================================\n");
+ add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irqrestore(&report_lock, flags);
+ kasan_enable_current();
+}
+
+void kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip)
+{
+ struct kasan_access_info info;
+
+ if (likely(!kasan_report_enabled()))
+ return;
+
+ info.access_addr = (void *)addr;
+ info.access_size = size;
+ info.is_write = is_write;
+ info.ip = ip;
+
+ kasan_report_error(&info);
+}
+
+
+#define DEFINE_ASAN_REPORT_LOAD(size) \
+void __asan_report_load##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, false, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_load##size##_noabort)
+
+#define DEFINE_ASAN_REPORT_STORE(size) \
+void __asan_report_store##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, true, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_store##size##_noabort)
+
+DEFINE_ASAN_REPORT_LOAD(1);
+DEFINE_ASAN_REPORT_LOAD(2);
+DEFINE_ASAN_REPORT_LOAD(4);
+DEFINE_ASAN_REPORT_LOAD(8);
+DEFINE_ASAN_REPORT_LOAD(16);
+DEFINE_ASAN_REPORT_STORE(1);
+DEFINE_ASAN_REPORT_STORE(2);
+DEFINE_ASAN_REPORT_STORE(4);
+DEFINE_ASAN_REPORT_STORE(8);
+DEFINE_ASAN_REPORT_STORE(16);
+
+void __asan_report_load_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, false, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_load_n_noabort);
+
+void __asan_report_store_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, true, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_store_n_noabort);
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 1f14ef68a5c8..c607bc059664 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -98,6 +98,7 @@
#include <asm/processor.h>
#include <linux/atomic.h>
+#include <linux/kasan.h>
#include <linux/kmemcheck.h>
#include <linux/kmemleak.h>
#include <linux/memory_hotplug.h>
@@ -1116,7 +1117,10 @@ static bool update_checksum(struct kmemleak_object *object)
if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
return false;
+ kasan_disable_current();
object->checksum = crc32(0, (void *)object->pointer, object->size);
+ kasan_enable_current();
+
return object->checksum != old_csum;
}
@@ -1167,7 +1171,9 @@ static void scan_block(void *_start, void *_end,
BYTES_PER_POINTER))
continue;
+ kasan_disable_current();
pointer = *ptr;
+ kasan_enable_current();
object = find_and_get_object(pointer, 1);
if (!object)
diff --git a/mm/mempool.c b/mm/mempool.c
index e209c98c7203..d4c4e4c1bc56 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -6,26 +6,139 @@
* extreme VM load.
*
* started by Ingo Molnar, Copyright (C) 2001
+ * debugging by David Rientjes, Copyright (C) 2015
*/
#include <linux/mm.h>
#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/kasan.h>
#include <linux/kmemleak.h>
#include <linux/export.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>
+#include "slab.h"
+
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+static void poison_error(mempool_t *pool, void *element, size_t size,
+ size_t byte)
+{
+ const int nr = pool->curr_nr;
+ const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0);
+ const int end = min_t(int, byte + (BITS_PER_LONG / 8), size);
+ int i;
+
+ pr_err("BUG: mempool element poison mismatch\n");
+ pr_err("Mempool %p size %zu\n", pool, size);
+ pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : "");
+ for (i = start; i < end; i++)
+ pr_cont("%x ", *(u8 *)(element + i));
+ pr_cont("%s\n", end < size ? "..." : "");
+ dump_stack();
+}
+
+static void __check_element(mempool_t *pool, void *element, size_t size)
+{
+ u8 *obj = element;
+ size_t i;
+
+ for (i = 0; i < size; i++) {
+ u8 exp = (i < size - 1) ? POISON_FREE : POISON_END;
+
+ if (obj[i] != exp) {
+ poison_error(pool, element, size, i);
+ return;
+ }
+ }
+ memset(obj, POISON_INUSE, size);
+}
+
+static void check_element(mempool_t *pool, void *element)
+{
+ /* Mempools backed by slab allocator */
+ if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
+ __check_element(pool, element, ksize(element));
+
+ /* Mempools backed by page allocator */
+ if (pool->free == mempool_free_pages) {
+ int order = (int)(long)pool->pool_data;
+ void *addr = kmap_atomic((struct page *)element);
+
+ __check_element(pool, addr, 1UL << (PAGE_SHIFT + order));
+ kunmap_atomic(addr);
+ }
+}
+
+static void __poison_element(void *element, size_t size)
+{
+ u8 *obj = element;
+
+ memset(obj, POISON_FREE, size - 1);
+ obj[size - 1] = POISON_END;
+}
+
+static void poison_element(mempool_t *pool, void *element)
+{
+ /* Mempools backed by slab allocator */
+ if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+ __poison_element(element, ksize(element));
+
+ /* Mempools backed by page allocator */
+ if (pool->alloc == mempool_alloc_pages) {
+ int order = (int)(long)pool->pool_data;
+ void *addr = kmap_atomic((struct page *)element);
+
+ __poison_element(addr, 1UL << (PAGE_SHIFT + order));
+ kunmap_atomic(addr);
+ }
+}
+#else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+static inline void check_element(mempool_t *pool, void *element)
+{
+}
+static inline void poison_element(mempool_t *pool, void *element)
+{
+}
+#endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */
+
+static void kasan_poison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_free(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_kfree(element);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_free_pages(element, (unsigned long)pool->pool_data);
+}
+
+static void kasan_unpoison_element(mempool_t *pool, void *element)
+{
+ if (pool->alloc == mempool_alloc_slab)
+ kasan_slab_alloc(pool->pool_data, element);
+ if (pool->alloc == mempool_kmalloc)
+ kasan_krealloc(element, (size_t)pool->pool_data);
+ if (pool->alloc == mempool_alloc_pages)
+ kasan_alloc_pages(element, (unsigned long)pool->pool_data);
+}
static void add_element(mempool_t *pool, void *element)
{
BUG_ON(pool->curr_nr >= pool->min_nr);
+ poison_element(pool, element);
+ kasan_poison_element(pool, element);
pool->elements[pool->curr_nr++] = element;
}
static void *remove_element(mempool_t *pool)
{
- BUG_ON(pool->curr_nr <= 0);
- return pool->elements[--pool->curr_nr];
+ void *element = pool->elements[--pool->curr_nr];
+
+ BUG_ON(pool->curr_nr < 0);
+ kasan_unpoison_element(pool, element);
+ check_element(pool, element);
+ kasan_unpoison_element(pool, element);
+ return element;
}
/**
@@ -38,6 +151,9 @@ static void *remove_element(mempool_t *pool)
*/
void mempool_destroy(mempool_t *pool)
{
+ if (unlikely(!pool))
+ return;
+
while (pool->curr_nr) {
void *element = remove_element(pool);
pool->free(element, pool->pool_data);
@@ -113,23 +229,24 @@ EXPORT_SYMBOL(mempool_create_node);
* mempool_create().
* @new_min_nr: the new minimum number of elements guaranteed to be
* allocated for this pool.
- * @gfp_mask: the usual allocation bitmask.
*
* This function shrinks/grows the pool. In the case of growing,
* it cannot be guaranteed that the pool will be grown to the new
* size immediately, but new mempool_free() calls will refill it.
+ * This function may sleep.
*
* Note, the caller must guarantee that no mempool_destroy is called
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
*/
-int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
+int mempool_resize(mempool_t *pool, int new_min_nr)
{
void *element;
void **new_elements;
unsigned long flags;
BUG_ON(new_min_nr <= 0);
+ might_sleep();
spin_lock_irqsave(&pool->lock, flags);
if (new_min_nr <= pool->min_nr) {
@@ -145,7 +262,8 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
spin_unlock_irqrestore(&pool->lock, flags);
/* Grow the pool */
- new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
+ new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements),
+ GFP_KERNEL);
if (!new_elements)
return -ENOMEM;
@@ -164,7 +282,7 @@ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
while (pool->curr_nr < pool->min_nr) {
spin_unlock_irqrestore(&pool->lock, flags);
- element = pool->alloc(gfp_mask, pool->pool_data);
+ element = pool->alloc(GFP_KERNEL, pool->pool_data);
if (!element)
goto out;
spin_lock_irqsave(&pool->lock, flags);
@@ -332,6 +450,7 @@ EXPORT_SYMBOL(mempool_free);
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
struct kmem_cache *mem = pool_data;
+ VM_BUG_ON(mem->ctor);
return kmem_cache_alloc(mem, gfp_mask);
}
EXPORT_SYMBOL(mempool_alloc_slab);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c78f03a3042f..d208d9893c0b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -25,6 +25,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
+#include <linux/kasan.h>
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
@@ -776,6 +777,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
+ kasan_free_pages(page, order);
if (PageAnon(page))
page->mapping = NULL;
@@ -956,6 +958,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
+ kasan_alloc_pages(page, order);
if (gfp_flags & __GFP_ZERO)
prep_zero_page(page, order, gfp_flags);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index dcdab81bd240..9a18f19360d3 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -789,6 +789,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
page = alloc_kmem_pages(flags, order);
ret = page ? page_address(page) : NULL;
kmemleak_alloc(ret, size, 1, flags);
+ kasan_kmalloc_large(ret, size);
return ret;
}
EXPORT_SYMBOL(kmalloc_order);
@@ -963,8 +964,10 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
if (p)
ks = ksize(p);
- if (ks >= new_size)
+ if (ks >= new_size) {
+ kasan_krealloc((void *)p, new_size);
return (void *)p;
+ }
ret = kmalloc_track_caller(new_size, flags);
if (ret && p)
diff --git a/mm/slub.c b/mm/slub.c
index ae7b9f1ad394..f46deaf3c264 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -20,6 +20,7 @@
#include <linux/proc_fs.h>
#include <linux/notifier.h>
#include <linux/seq_file.h>
+#include <linux/kasan.h>
#include <linux/kmemcheck.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
@@ -458,8 +459,10 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
/*
* Debug settings:
*/
-#ifdef CONFIG_SLUB_DEBUG_ON
+#if defined(CONFIG_SLUB_DEBUG_ON)
static int slub_debug = DEBUG_DEFAULT_FLAGS;
+#elif defined(CONFIG_KASAN)
+static int slub_debug = SLAB_STORE_USER;
#else
static int slub_debug;
#endif
@@ -468,12 +471,30 @@ static char *slub_debug_slabs;
static int disable_higher_order_debug;
/*
+ * slub is about to manipulate internal object metadata. This memory lies
+ * outside the range of the allocated object, so accessing it would normally
+ * be reported by kasan as a bounds error. metadata_access_enable() is used
+ * to tell kasan that these accesses are OK.
+ */
+static inline void metadata_access_enable(void)
+{
+ kasan_disable_current();
+}
+
+static inline void metadata_access_disable(void)
+{
+ kasan_enable_current();
+}
+
+/*
* Object debugging
*/
static void print_section(char *text, u8 *addr, unsigned int length)
{
+ metadata_access_enable();
print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
length, 1);
+ metadata_access_disable();
}
static struct track *get_track(struct kmem_cache *s, void *object,
@@ -503,7 +524,9 @@ static void set_track(struct kmem_cache *s, void *object,
trace.max_entries = TRACK_ADDRS_COUNT;
trace.entries = p->addrs;
trace.skip = 3;
+ metadata_access_enable();
save_stack_trace(&trace);
+ metadata_access_disable();
/* See rant in lockdep.c */
if (trace.nr_entries != 0 &&
@@ -629,7 +652,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
dump_stack();
}
-static void object_err(struct kmem_cache *s, struct page *page,
+void object_err(struct kmem_cache *s, struct page *page,
u8 *object, char *reason)
{
slab_bug(s, "%s", reason);
@@ -677,7 +700,9 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
u8 *fault;
u8 *end;
+ metadata_access_enable();
fault = memchr_inv(start, value, bytes);
+ metadata_access_disable();
if (!fault)
return 1;
@@ -770,7 +795,9 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
if (!remainder)
return 1;
+ metadata_access_enable();
fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
+ metadata_access_disable();
if (!fault)
return 1;
while (end > fault && end[-1] == POISON_INUSE)
@@ -1226,11 +1253,13 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
{
kmemleak_alloc(ptr, size, 1, flags);
+ kasan_kmalloc_large(ptr, size);
}
static inline void kfree_hook(const void *x)
{
kmemleak_free(x);
+ kasan_kfree_large(x);
}
static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
@@ -1248,6 +1277,7 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s,
flags &= gfp_allowed_mask;
kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
+ kasan_slab_alloc(s, object);
}
static inline void slab_free_hook(struct kmem_cache *s, void *x)
@@ -1271,6 +1301,8 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
#endif
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(x, s->object_size);
+
+ kasan_slab_free(s, x);
}
/*
@@ -1365,8 +1397,11 @@ static void setup_object(struct kmem_cache *s, struct page *page,
void *object)
{
setup_object_debug(s, page, object);
- if (unlikely(s->ctor))
+ if (unlikely(s->ctor)) {
+ kasan_unpoison_object_data(s, object);
s->ctor(object);
+ kasan_poison_object_data(s, object);
+ }
}
static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
@@ -1396,6 +1431,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
if (unlikely(s->flags & SLAB_POISON))
memset(start, POISON_INUSE, PAGE_SIZE << order);
+ kasan_poison_slab(page);
+
for_each_object_idx(p, idx, s, start, page->objects) {
setup_object(s, page, p);
if (likely(idx < page->objects))
@@ -2472,6 +2509,7 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
{
void *ret = slab_alloc(s, gfpflags, _RET_IP_);
trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
+ kasan_kmalloc(s, ret, size);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_trace);
@@ -2498,6 +2536,8 @@ void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
trace_kmalloc_node(_RET_IP_, ret,
size, s->size, gfpflags, node);
+
+ kasan_kmalloc(s, ret, size);
return ret;
}
EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
@@ -2881,6 +2921,7 @@ static void early_kmem_cache_node_alloc(int node)
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
init_tracking(kmem_cache_node, n);
#endif
+ kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node));
init_kmem_cache_node(n);
inc_slabs_node(kmem_cache_node, node, page->objects);
@@ -3253,6 +3294,8 @@ void *__kmalloc(size_t size, gfp_t flags)
trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
+ kasan_kmalloc(s, ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc);
@@ -3296,12 +3339,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
+ kasan_kmalloc(s, ret, size);
+
return ret;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
-size_t ksize(const void *object)
+static size_t __ksize(const void *object)
{
struct page *page;
@@ -3317,6 +3362,15 @@ size_t ksize(const void *object)
return slab_ksize(page->slab_cache);
}
+
+size_t ksize(const void *object)
+{
+ size_t size = __ksize(object);
+ /* We assume that ksize callers could use whole allocated area,
+ so we need unpoison this area. */
+ kasan_krealloc(object, size);
+ return size;
+}
EXPORT_SYMBOL(ksize);
void kfree(const void *x)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 90520af7f186..3f814de8eb8d 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1325,10 +1325,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
if (unlikely(!area))
return NULL;
- /*
- * We always allocate a guard page.
- */
- size += PAGE_SIZE;
+ if (!(flags & VM_NO_GUARD))
+ size += PAGE_SIZE;
va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
if (IS_ERR(va)) {
@@ -1421,8 +1419,8 @@ struct vm_struct *remove_vm_area(const void *addr)
spin_unlock(&vmap_area_lock);
vmap_debug_free_range(va->va_start, va->va_end);
+ kasan_free_shadow(vm);
free_unmap_vmap_area(va);
- vm->size -= PAGE_SIZE;
return vm;
}
@@ -1447,8 +1445,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
return;
}
- debug_check_no_locks_freed(addr, area->size);
- debug_check_no_obj_freed(addr, area->size);
+ debug_check_no_locks_freed(addr, get_vm_area_size(area));
+ debug_check_no_obj_freed(addr, get_vm_area_size(area));
if (deallocate_pages) {
int i;
@@ -1622,6 +1620,7 @@ fail:
* @end: vm area range end
* @gfp_mask: flags for the page level allocator
* @prot: protection mask for the allocated pages
+ * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
* @node: node to use for allocation or NUMA_NO_NODE
* @caller: caller's return address
*
@@ -1631,7 +1630,8 @@ fail:
*/
void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, int node, const void *caller)
+ pgprot_t prot, unsigned long vm_flags, int node,
+ const void *caller)
{
struct vm_struct *area;
void *addr;
@@ -1641,8 +1641,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
if (!size || (size >> PAGE_SHIFT) > totalram_pages)
goto fail;
- area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
- start, end, node, gfp_mask, caller);
+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
+ vm_flags, start, end, node, gfp_mask, caller);
if (!area)
goto fail;
@@ -1691,7 +1691,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
int node, const void *caller)
{
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
- gfp_mask, prot, node, caller);
+ gfp_mask, prot, 0, node, caller);
}
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)