aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2009-06-16 15:32:29 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 19:47:39 -0700
commit6e08a369ee10b361ac1cdcdf4fabd420fd08beb3 (patch)
tree9dbf870cad025b64781d9051b6680a8a23927e5a
parent56e49d218890f49b0057710a4b6fef31f5ffbfec (diff)
vmscan: cleanup the scan batching code
The vmscan batching logic is twisting. Move it into a standalone function nr_scan_try_batch() and document it. No behavior change. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Christoph Lameter <cl@linux-foundation.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmscan.c39
-rw-r--r--mm/vmstat.c8
4 files changed, 35 insertions, 18 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index dd8487f0442..db976b9f879 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -334,9 +334,9 @@ struct zone {
/* Fields commonly accessed by the page reclaim scanner */
spinlock_t lru_lock;
- struct {
+ struct zone_lru {
struct list_head list;
- unsigned long nr_scan;
+ unsigned long nr_saved_scan; /* accumulated for batching */
} lru[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 131655cdb6b..e5b8f628d16 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3657,7 +3657,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
zone_pcp_init(zone);
for_each_lru(l) {
INIT_LIST_HEAD(&zone->lru[l].list);
- zone->lru[l].nr_scan = 0;
+ zone->lru[l].nr_saved_scan = 0;
}
zone->reclaim_stat.recent_rotated[0] = 0;
zone->reclaim_stat.recent_rotated[1] = 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9673437a545..d4da097533c 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1492,6 +1492,26 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
percent[1] = 100 - percent[0];
}
+/*
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
+ */
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+ unsigned long *nr_saved_scan,
+ unsigned long swap_cluster_max)
+{
+ unsigned long nr;
+
+ *nr_saved_scan += nr_to_scan;
+ nr = *nr_saved_scan;
+
+ if (nr >= swap_cluster_max)
+ *nr_saved_scan = 0;
+ else
+ nr = 0;
+
+ return nr;
+}
/*
* This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
@@ -1517,14 +1537,11 @@ static void shrink_zone(int priority, struct zone *zone,
scan >>= priority;
scan = (scan * percent[file]) / 100;
}
- if (scanning_global_lru(sc)) {
- zone->lru[l].nr_scan += scan;
- nr[l] = zone->lru[l].nr_scan;
- if (nr[l] >= swap_cluster_max)
- zone->lru[l].nr_scan = 0;
- else
- nr[l] = 0;
- } else
+ if (scanning_global_lru(sc))
+ nr[l] = nr_scan_try_batch(scan,
+ &zone->lru[l].nr_saved_scan,
+ swap_cluster_max);
+ else
nr[l] = scan;
}
@@ -2124,11 +2141,11 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
l == LRU_ACTIVE_FILE))
continue;
- zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
- if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+ zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
+ if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
unsigned long nr_to_scan;
- zone->lru[l].nr_scan = 0;
+ zone->lru[l].nr_saved_scan = 0;
nr_to_scan = min(nr_pages, lru_pages);
nr_reclaimed += shrink_list(l, nr_to_scan, zone,
sc, prio);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 415110772c7..84c05555691 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -718,10 +718,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
low_wmark_pages(zone),
high_wmark_pages(zone),
zone->pages_scanned,
- zone->lru[LRU_ACTIVE_ANON].nr_scan,
- zone->lru[LRU_INACTIVE_ANON].nr_scan,
- zone->lru[LRU_ACTIVE_FILE].nr_scan,
- zone->lru[LRU_INACTIVE_FILE].nr_scan,
+ zone->lru[LRU_ACTIVE_ANON].nr_saved_scan,
+ zone->lru[LRU_INACTIVE_ANON].nr_saved_scan,
+ zone->lru[LRU_ACTIVE_FILE].nr_saved_scan,
+ zone->lru[LRU_INACTIVE_FILE].nr_saved_scan,
zone->spanned_pages,
zone->present_pages);