From 582d8b9e2879512507b8a9409f85fb1394e1c53e Mon Sep 17 00:00:00 2001 From: Andrea Arcangeli Date: Fri, 18 Mar 2011 00:16:35 +0100 Subject: mm: PageBuddy and mapcount robustness commit ef2b4b95a63a1d23958dcb99eb2c6898eddc87d0 upstream. Change the _mapcount value indicating PageBuddy from -2 to -128 for more robusteness against page_mapcount() undeflows. Use reset_page_mapcount instead of __ClearPageBuddy in bad_page to ignore the previous retval of PageBuddy(). Signed-off-by: Andrea Arcangeli Reported-by: Hugh Dickins Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- include/linux/mm.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mm.h b/include/linux/mm.h index f6385fc17ad..c67adb4ea5e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -402,16 +402,23 @@ static inline void init_page_count(struct page *page) /* * PageBuddy() indicate that the page is free and in the buddy system * (see mm/page_alloc.c). + * + * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to + * -2 so that an underflow of the page_mapcount() won't be mistaken + * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very + * efficiently by most CPU architectures. */ +#define PAGE_BUDDY_MAPCOUNT_VALUE (-128) + static inline int PageBuddy(struct page *page) { - return atomic_read(&page->_mapcount) == -2; + return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE; } static inline void __SetPageBuddy(struct page *page) { VM_BUG_ON(atomic_read(&page->_mapcount) != -1); - atomic_set(&page->_mapcount, -2); + atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE); } static inline void __ClearPageBuddy(struct page *page) -- cgit v1.2.3