From 7eb0b7339722ec570a47503f0d7b14d9c14b8d86 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 3 May 2017 14:51:54 -0700 Subject: mm: fix check for reclaimable pages in PF_MEMALLOC reclaim throttling [ Upstream commit d450abd81b081d45adb12f303a07dd44b15eb1bc ] PF_MEMALLOC direct reclaimers get throttled on a node when the sum of all free pages in each zone fall below half the min watermark. During the summation, we want to exclude zones that don't have reclaimables. Checking the same pgdat over and over again doesn't make sense. Fixes: 599d0c954f91 ("mm, vmscan: move LRU lists to node") Link: http://lkml.kernel.org/r/20170228214007.5621-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Hillf Danton Acked-by: Michal Hocko Cc: Jia He Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/vmscan.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 4365de74bdb0..cdd5c3b5c357 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2841,8 +2841,10 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) for (i = 0; i <= ZONE_NORMAL; i++) { zone = &pgdat->node_zones[i]; - if (!managed_zone(zone) || - pgdat_reclaimable_pages(pgdat) == 0) + if (!managed_zone(zone)) + continue; + + if (!zone_reclaimable_pages(zone)) continue; pfmemalloc_reserve += min_wmark_pages(zone); -- cgit v1.2.3 From 82d938a9ed8e8d880dfd7e4def4adc1578e2710e Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Wed, 3 May 2017 14:53:02 -0700 Subject: mm, vmstat: suppress pcp stats for unpopulated zones in zoneinfo [ Upstream commit 7dfb8bf3b9caef4049bee51d2c22e1c3a311d483 ] After "mm, vmstat: print non-populated zones in zoneinfo", /proc/zoneinfo will show unpopulated zones. The per-cpu pageset statistics are not relevant for unpopulated zones and can be potentially lengthy, so supress them when they are not interesting. Also moves lowmem reserve protection information above pcp stats since it is relevant for all zones per vm.lowmem_reserve_ratio. Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1703061400500.46428@chino.kir.corp.google.com Signed-off-by: David Rientjes Cc: Anshuman Khandual Cc: Vlastimil Babka Cc: Mel Gorman Cc: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/vmstat.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/vmstat.c b/mm/vmstat.c index 3863b5d6d598..68b3193e4493 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -1387,18 +1387,24 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat, zone->present_pages, zone->managed_pages); - for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) - seq_printf(m, "\n %-12s %lu", vmstat_text[i], - zone_page_state(zone, i)); - seq_printf(m, "\n protection: (%ld", zone->lowmem_reserve[0]); for (i = 1; i < ARRAY_SIZE(zone->lowmem_reserve); i++) seq_printf(m, ", %ld", zone->lowmem_reserve[i]); - seq_printf(m, - ")" - "\n pagesets"); + seq_putc(m, ')'); + + /* If unpopulated, no other information is useful */ + if (!populated_zone(zone)) { + seq_putc(m, '\n'); + return; + } + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + seq_printf(m, "\n %-12s %lu", vmstat_text[i], + zone_page_state(zone, i)); + + seq_printf(m, "\n pagesets"); for_each_online_cpu(i) { struct per_cpu_pageset *pageset; -- cgit v1.2.3 From 0b42ce075d8ed11259b36cdbed99bbab8f1f69f1 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Wed, 3 May 2017 14:56:22 -0700 Subject: mm: hwpoison: call shake_page() after try_to_unmap() for mlocked page [ Upstream commit 286c469a988fbaf68e3a97ddf1e6c245c1446968 ] Memory error handler calls try_to_unmap() for error pages in various states. If the error page is a mlocked page, error handling could fail with "still referenced by 1 users" message. This is because the page is linked to and stays in lru cache after the following call chain. try_to_unmap_one page_remove_rmap clear_page_mlock putback_lru_page lru_cache_add memory_failure() calls shake_page() to hanlde the similar issue, but current code doesn't cover because shake_page() is called only before try_to_unmap(). So this patches adds shake_page(). Fixes: 23a003bfd23ea9ea0b7756b920e51f64b284b468 ("mm/madvise: pass return code of memory_failure() to userspace") Link: http://lkml.kernel.org/r/20170417055948.GM31394@yexl-desktop Link: http://lkml.kernel.org/r/1493197841-23986-3-git-send-email-n-horiguchi@ah.jp.nec.com Signed-off-by: Naoya Horiguchi Reported-by: kernel test robot Cc: Xiaolong Ye Cc: Chen Gong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- mm/memory-failure.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5aa71a82ca73..851efb004857 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -921,6 +921,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, int ret; int kill = 1, forcekill; struct page *hpage = *hpagep; + bool mlocked = PageMlocked(hpage); /* * Here we are interested only in user-mapped pages, so skip any @@ -984,6 +985,13 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn, pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", pfn, page_mapcount(hpage)); + /* + * try_to_unmap() might put mlocked page in lru cache, so call + * shake_page() again to ensure that it's flushed. + */ + if (mlocked) + shake_page(hpage, 0); + /* * Now that the dirty bit has been propagated to the * struct page and all unmaps done we can decide if -- cgit v1.2.3