aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-01-14 15:20:15 -0800
committerAlex Shi <alex.shi@linaro.org>2016-11-29 15:25:03 +0800
commitaaaf9e59c00aa2ff9c2bd1da05c0cf1ba2a9634a (patch)
tree6e21c2802b29ed74c99a272ea7d988bd875ba80e
parent83595f56588d5f5d677e62a576b6248328862b34 (diff)
downloadlinux-linaro-stable-aaaf9e59c00aa2ff9c2bd1da05c0cf1ba2a9634a.tar.gz
mm: page_alloc: generalize the dirty balance reserve
The dirty balance reserve that dirty throttling has to consider is merely memory not available to userspace allocations. There is nothing writeback-specific about it. Generalize the name so that it's reusable outside of that context. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (cherry picked from commit a8d0143730d7b42c9fe6d1435d92ecce6863a62a) Signed-off-by: Alex Shi <alex.shi@linaro.org>
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/swap.h1
-rw-r--r--mm/page-writeback.c14
-rw-r--r--mm/page_alloc.c21
4 files changed, 18 insertions, 24 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e23a9e704536..9134ae3f61ff 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -361,10 +361,10 @@ struct zone {
struct per_cpu_pageset __percpu *pageset;
/*
- * This is a per-zone reserve of pages that should not be
- * considered dirtyable memory.
+ * This is a per-zone reserve of pages that are not available
+ * to userspace allocations.
*/
- unsigned long dirty_balance_reserve;
+ unsigned long totalreserve_pages;
#ifndef CONFIG_SPARSEMEM
/*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d8ca2eaa3a8b..f1a52c11de0e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -289,7 +289,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
/* linux/mm/page_alloc.c */
extern unsigned long totalram_pages;
extern unsigned long totalreserve_pages;
-extern unsigned long dirty_balance_reserve;
extern unsigned long nr_free_buffer_pages(void);
extern unsigned long nr_free_pagecache_pages(void);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index fd51ebfc423f..1e6769449ac2 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -278,7 +278,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone)
unsigned long nr_pages;
nr_pages = zone_page_state(zone, NR_FREE_PAGES);
- nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+ /*
+ * Pages reserved for the kernel should not be considered
+ * dirtyable, to prevent a situation where reclaim has to
+ * clean pages in order to balance the zones.
+ */
+ nr_pages -= min(nr_pages, zone->totalreserve_pages);
nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
@@ -332,7 +337,12 @@ static unsigned long global_dirtyable_memory(void)
unsigned long x;
x = global_page_state(NR_FREE_PAGES);
- x -= min(x, dirty_balance_reserve);
+ /*
+ * Pages reserved for the kernel should not be considered
+ * dirtyable, to prevent a situation where reclaim has to
+ * clean pages in order to balance the zones.
+ */
+ x -= min(x, totalreserve_pages);
x += global_page_state(NR_INACTIVE_FILE);
x += global_page_state(NR_ACTIVE_FILE);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2bcdfbf8c36d..b0ca09f607b4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -114,13 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
unsigned long totalram_pages __read_mostly;
unsigned long totalreserve_pages __read_mostly;
unsigned long totalcma_pages __read_mostly;
-/*
- * When calculating the number of globally allowed dirty pages, there
- * is a certain number of per-zone reserves that should not be
- * considered dirtyable memory. This is the sum of those reserves
- * over all existing zones that contribute dirtyable memory.
- */
-unsigned long dirty_balance_reserve __read_mostly;
int percpu_pagelist_fraction;
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -5978,20 +5971,12 @@ static void calculate_totalreserve_pages(void)
if (max > zone->managed_pages)
max = zone->managed_pages;
+
+ zone->totalreserve_pages = max;
+
reserve_pages += max;
- /*
- * Lowmem reserves are not available to
- * GFP_HIGHUSER page cache allocations and
- * kswapd tries to balance zones to their high
- * watermark. As a result, neither should be
- * regarded as dirtyable memory, to prevent a
- * situation where reclaim has to clean pages
- * in order to balance the zones.
- */
- zone->dirty_balance_reserve = max;
}
}
- dirty_balance_reserve = reserve_pages;
totalreserve_pages = reserve_pages;
}