diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-01-14 15:20:15 -0800 |
---|---|---|
committer | Alex Shi <alex.shi@linaro.org> | 2016-06-08 11:09:40 +0800 |
commit | 4896a47c3b158949774fae695bb15d992c6fea75 (patch) | |
tree | 08fc071a71dd7e77588d0ae2b15122f200e1d813 | |
parent | 1ef96584835e0a2f2befaccf656e873a344b1c02 (diff) |
mm: page_alloc: generalize the dirty balance reserve
The dirty balance reserve that dirty throttling has to consider is
merely memory not available to userspace allocations. There is nothing
writeback-specific about it. Generalize the name so that it's reusable
outside of that context.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
(cherry picked from commit a8d0143730d7b42c9fe6d1435d92ecce6863a62a)
Signed-off-by: Alex Shi <alex.shi@linaro.org>
-rw-r--r-- | include/linux/mmzone.h | 6 | ||||
-rw-r--r-- | include/linux/swap.h | 1 | ||||
-rw-r--r-- | mm/page-writeback.c | 14 | ||||
-rw-r--r-- | mm/page_alloc.c | 21 |
4 files changed, 18 insertions, 24 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 54d74f6eb233..0a3dc9d6579a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -354,10 +354,10 @@ struct zone { struct per_cpu_pageset __percpu *pageset; /* - * This is a per-zone reserve of pages that should not be - * considered dirtyable memory. + * This is a per-zone reserve of pages that are not available + * to userspace allocations. */ - unsigned long dirty_balance_reserve; + unsigned long totalreserve_pages; #ifndef CONFIG_SPARSEMEM /* diff --git a/include/linux/swap.h b/include/linux/swap.h index a3702a6e3e71..cc20f4bd455b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -287,7 +287,6 @@ static inline void workingset_node_shadows_dec(struct radix_tree_node *node) /* linux/mm/page_alloc.c */ extern unsigned long totalram_pages; extern unsigned long totalreserve_pages; -extern unsigned long dirty_balance_reserve; extern unsigned long nr_free_buffer_pages(void); extern unsigned long nr_free_pagecache_pages(void); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d8b93b35829f..5a70bc8121b4 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -278,7 +278,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) unsigned long nr_pages; nr_pages = zone_page_state(zone, NR_FREE_PAGES); - nr_pages -= min(nr_pages, zone->dirty_balance_reserve); + /* + * Pages reserved for the kernel should not be considered + * dirtyable, to prevent a situation where reclaim has to + * clean pages in order to balance the zones. + */ + nr_pages -= min(nr_pages, zone->totalreserve_pages); nr_pages += zone_page_state(zone, NR_INACTIVE_FILE); nr_pages += zone_page_state(zone, NR_ACTIVE_FILE); @@ -332,7 +337,12 @@ static unsigned long global_dirtyable_memory(void) unsigned long x; x = global_page_state(NR_FREE_PAGES); - x -= min(x, dirty_balance_reserve); + /* + * Pages reserved for the kernel should not be considered + * dirtyable, to prevent a situation where reclaim has to + * clean pages in order to balance the zones. + */ + x -= min(x, totalreserve_pages); x += global_page_state(NR_INACTIVE_FILE); x += global_page_state(NR_ACTIVE_FILE); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 18490f3bd7f1..fe9cebd1ed77 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -113,13 +113,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock); unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; unsigned long totalcma_pages __read_mostly; -/* - * When calculating the number of globally allowed dirty pages, there - * is a certain number of per-zone reserves that should not be - * considered dirtyable memory. This is the sum of those reserves - * over all existing zones that contribute dirtyable memory. - */ -unsigned long dirty_balance_reserve __read_mostly; int percpu_pagelist_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; @@ -5681,20 +5674,12 @@ static void calculate_totalreserve_pages(void) if (max > zone->managed_pages) max = zone->managed_pages; + + zone->totalreserve_pages = max; + reserve_pages += max; - /* - * Lowmem reserves are not available to - * GFP_HIGHUSER page cache allocations and - * kswapd tries to balance zones to their high - * watermark. As a result, neither should be - * regarded as dirtyable memory, to prevent a - * situation where reclaim has to clean pages - * in order to balance the zones. - */ - zone->dirty_balance_reserve = max; } } - dirty_balance_reserve = reserve_pages; totalreserve_pages = reserve_pages; } |