summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2006-09-25 23:31:27 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 08:48:48 -0700
commit408d85441cd5a9bd6bc851d677a10c605ed8db5f (patch)
tree547ba9fbd4000585f14e9fcdd3cf539cde58a25a /mm
parent6ddab3b9ebebc88bfdd8107c64f12d7e4480c559 (diff)
[PATCH] oom: use unreclaimable info
__alloc_pages currently starts shooting if page reclaim has failed to free up swap_cluster_max pages in one run through the priorities. This is not always a good indicator on its own, so make use of the all_unreclaimable logic as well: don't consider going OOM until all zones we're interested in are unreclaimable. Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 16180587fd7..ba18d0c36b8 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -62,6 +62,8 @@ struct scan_control {
int swap_cluster_max;
int swappiness;
+
+ int all_unreclaimable;
};
/*
@@ -925,6 +927,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
unsigned long nr_reclaimed = 0;
int i;
+ sc->all_unreclaimable = 1;
for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
@@ -941,6 +944,8 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
+ sc->all_unreclaimable = 0;
+
nr_reclaimed += shrink_zone(priority, zone, sc);
}
return nr_reclaimed;
@@ -1021,6 +1026,9 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
blk_congestion_wait(WRITE, HZ/10);
}
+ /* top priority shrink_caches still had more to do? don't OOM, then */
+ if (!sc.all_unreclaimable)
+ ret = 1;
out:
for (i = 0; zones[i] != 0; i++) {
struct zone *zone = zones[i];