From 46123744a14e5aff835680ffc6ba8e16b5912266 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= Date: Tue, 17 Feb 2009 14:51:02 -0800 Subject: mm: Add min_free_order_shift tunable. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit By default the kernel tries to keep half as much memory free at each order as it does for one order below. This can be too agressive when running without swap. Change-Id: I5efc1a0b50f41ff3ac71e92d2efd175dedd54ead Signed-off-by: Arve Hjønnevåg --- mm/page_alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3bac76ae4b30..348ca037b055 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -206,6 +206,7 @@ static char * const zone_names[MAX_NR_ZONES] = { int min_free_kbytes = 1024; int user_min_free_kbytes = -1; +int min_free_order_shift = 1; static unsigned long __meminitdata nr_kernel_pages; static unsigned long __meminitdata nr_all_pages; @@ -1712,7 +1713,7 @@ static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark, free_pages -= z->free_area[o].nr_free << o; /* Require fewer higher order pages to be free */ - min >>= 1; + min >>= min_free_order_shift; if (free_pages <= min) return false; -- cgit v1.2.3