aboutsummaryrefslogtreecommitdiff
path: root/mm/kasan/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kasan/common.c')
-rw-r--r--mm/kasan/common.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6adbf5891aff..80dd71e88ef8 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -107,7 +107,7 @@ void *memcpy(void *dest, const void *src, size_t len)
/*
* Poisons the shadow memory for 'size' bytes starting from 'addr'.
- * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ * Memory addresses should be aligned to KASAN_GRANULE_SIZE.
*/
void poison_range(const void *address, size_t size, u8 value)
{
@@ -151,13 +151,13 @@ void unpoison_range(const void *address, size_t size)
poison_range(address, size, tag);
- if (size & KASAN_SHADOW_MASK) {
+ if (size & KASAN_GRANULE_MASK) {
u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
*shadow = tag;
else
- *shadow = size & KASAN_SHADOW_MASK;
+ *shadow = size & KASAN_GRANULE_MASK;
}
}
@@ -314,7 +314,7 @@ void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
void kasan_poison_object_data(struct kmem_cache *cache, void *object)
{
poison_range(object,
- round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
+ round_up(cache->object_size, KASAN_GRANULE_SIZE),
KASAN_KMALLOC_REDZONE);
}
@@ -386,7 +386,7 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
{
if (IS_ENABLED(CONFIG_KASAN_GENERIC))
return shadow_byte < 0 ||
- shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
+ shadow_byte >= KASAN_GRANULE_SIZE;
/* else CONFIG_KASAN_SW_TAGS: */
if ((u8)shadow_byte == KASAN_TAG_INVALID)
@@ -428,7 +428,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
return true;
}
- rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
+ rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
poison_range(object, rounded_up_size, KASAN_KMALLOC_FREE);
if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
@@ -464,9 +464,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
return (void *)object;
redzone_start = round_up((unsigned long)(object + size),
- KASAN_SHADOW_SCALE_SIZE);
+ KASAN_GRANULE_SIZE);
redzone_end = round_up((unsigned long)object + cache->object_size,
- KASAN_SHADOW_SCALE_SIZE);
+ KASAN_GRANULE_SIZE);
if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
tag = assign_tag(cache, object, false, keep_tag);
@@ -510,7 +510,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
page = virt_to_page(ptr);
redzone_start = round_up((unsigned long)(ptr + size),
- KASAN_SHADOW_SCALE_SIZE);
+ KASAN_GRANULE_SIZE);
redzone_end = (unsigned long)ptr + page_size(page);
unpoison_range(ptr, size);
@@ -608,8 +608,8 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
shadow_size = nr_shadow_pages << PAGE_SHIFT;
shadow_end = shadow_start + shadow_size;
- if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
- WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
+ if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
+ WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
return NOTIFY_BAD;
switch (action) {
@@ -767,7 +767,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
if (!is_vmalloc_or_module_addr(start))
return;
- size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size = round_up(size, KASAN_GRANULE_SIZE);
poison_range(start, size, KASAN_VMALLOC_INVALID);
}
@@ -880,22 +880,22 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
unsigned long region_start, region_end;
unsigned long size;
- region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
- region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+ region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
+ region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
free_region_start = ALIGN(free_region_start,
- PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+ PAGE_SIZE * KASAN_GRANULE_SIZE);
if (start != region_start &&
free_region_start < region_start)
- region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+ region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
free_region_end = ALIGN_DOWN(free_region_end,
- PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+ PAGE_SIZE * KASAN_GRANULE_SIZE);
if (end != region_end &&
free_region_end > region_end)
- region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+ region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
shadow_start = kasan_mem_to_shadow((void *)region_start);
shadow_end = kasan_mem_to_shadow((void *)region_end);
@@ -921,7 +921,8 @@ int kasan_module_alloc(void *addr, size_t size)
unsigned long shadow_start;
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
- scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+ scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
+ KASAN_SHADOW_SCALE_SHIFT;
shadow_size = round_up(scaled_size, PAGE_SIZE);
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))