aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMinchan Kim <minchan@kernel.org>2014-12-12 16:56:58 -0800
committerAlex Shi <alex.shi@linaro.org>2015-05-11 21:35:17 +0800
commit5b0af867b453597531c989254f75be634b8e9197 (patch)
treebfcde61296607f5ed4fd4ee2b410049e1334593c
parent211d013d9e8abbd172c43ace2b6d5ed928a4dd6a (diff)
downloadlinux-linaro-stable-5b0af867b453597531c989254f75be634b8e9197.tar.gz
zsmalloc: correct fragile [kmap|kunmap]_atomic use
The kunmap_atomic should use virtual address getting by kmap_atomic. However, some pieces of code in zsmalloc uses modified address, not the one got by kmap_atomic for kunmap_atomic. It's okay for working because zsmalloc modifies the address inner PAGE_SIZE bounday so it works with current kmap_atomic's implementation. But it's still fragile with potential changing of kmap_atomic so let's correct it. I got a subtle bug when I implemented a new feature of zsmalloc (compaction) due to a link's mishandling (the link was over page boundary). Although it was totally my mistake, it took a while to find the cause because an unpredictable kmapped address was unmapped causing an almost random crash. Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Cc: Dan Streetman <ddstreet@ieee.org> Cc: Seth Jennings <sjennings@variantweb.net> Cc: Jerome Marchand <jmarchan@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit af4ee5e977acb150371c28bd85cb7e34cac48b13) Signed-off-by: Alex Shi <alex.shi@linaro.org>
-rw-r--r--mm/zsmalloc.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index f3d9a14a23f6..7031e12fcf2b 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -595,6 +595,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
struct page *next_page;
struct link_free *link;
unsigned int i = 1;
+ void *vaddr;
/*
* page->index stores offset of first object starting
@@ -605,8 +606,8 @@ static void init_zspage(struct page *first_page, struct size_class *class)
if (page != first_page)
page->index = off;
- link = (struct link_free *)kmap_atomic(page) +
- off / sizeof(*link);
+ vaddr = kmap_atomic(page);
+ link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
link->next = obj_location_to_handle(page, i++);
@@ -620,7 +621,7 @@ static void init_zspage(struct page *first_page, struct size_class *class)
*/
next_page = get_next_page(page);
link->next = obj_location_to_handle(next_page, 0);
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
page = next_page;
off %= PAGE_SIZE;
}
@@ -1031,6 +1032,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
unsigned long obj;
struct link_free *link;
struct size_class *class;
+ void *vaddr;
struct page *first_page, *m_page;
unsigned long m_objidx, m_offset;
@@ -1059,11 +1061,11 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
obj_handle_to_location(obj, &m_page, &m_objidx);
m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
- link = (struct link_free *)kmap_atomic(m_page) +
- m_offset / sizeof(*link);
+ vaddr = kmap_atomic(m_page);
+ link = (struct link_free *)vaddr + m_offset / sizeof(*link);
first_page->freelist = link->next;
memset(link, POISON_INUSE, sizeof(*link));
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->inuse++;
/* Now move the zspage to another fullness group, if required */
@@ -1079,6 +1081,7 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
struct link_free *link;
struct page *first_page, *f_page;
unsigned long f_objidx, f_offset;
+ void *vaddr;
int class_idx;
struct size_class *class;
@@ -1097,10 +1100,10 @@ void zs_free(struct zs_pool *pool, unsigned long obj)
spin_lock(&class->lock);
/* Insert this object in containing zspage's freelist */
- link = (struct link_free *)((unsigned char *)kmap_atomic(f_page)
- + f_offset);
+ vaddr = kmap_atomic(f_page);
+ link = (struct link_free *)(vaddr + f_offset);
link->next = first_page->freelist;
- kunmap_atomic(link);
+ kunmap_atomic(vaddr);
first_page->freelist = (void *)obj;
first_page->inuse--;