aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 01:55:39 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 11:25:35 -0700
commitb1e7a8fd854d2f895730e82137400012b509650e (patch)
tree9fba87ff6b0146ebd4ee5bc7d5f0c8b037dbb3ad /mm
parentdf849a1529c106f7460e51479ca78fe07b07dc8c (diff)
[PATCH] zoned vm counters: conversion of nr_dirty to per zone counter
This makes nr_dirty a per zone counter. Looping over all processors is avoided during writeback state determination. The counter aggregation for nr_dirty had to be undone in the NFS layer since we summed up the page counts from multiple zones. Someone more familiar with NFS should probably review what I have done. [akpm@osdl.org: bugfix] Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c11
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/vmstat.c2
3 files changed, 8 insertions, 7 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0faacfe1890..da854783009 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -109,7 +109,7 @@ struct writeback_state
static void get_writeback_state(struct writeback_state *wbs)
{
- wbs->nr_dirty = read_page_state(nr_dirty);
+ wbs->nr_dirty = global_page_state(NR_FILE_DIRTY);
wbs->nr_unstable = read_page_state(nr_unstable);
wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
global_page_state(NR_ANON_PAGES);
@@ -641,7 +641,8 @@ int __set_page_dirty_nobuffers(struct page *page)
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
if (mapping_cap_account_dirty(mapping))
- inc_page_state(nr_dirty);
+ __inc_zone_page_state(page,
+ NR_FILE_DIRTY);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
@@ -728,9 +729,9 @@ int test_clear_page_dirty(struct page *page)
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_DIRTY);
- write_unlock_irqrestore(&mapping->tree_lock, flags);
if (mapping_cap_account_dirty(mapping))
- dec_page_state(nr_dirty);
+ __dec_zone_page_state(page, NR_FILE_DIRTY);
+ write_unlock_irqrestore(&mapping->tree_lock, flags);
return 1;
}
write_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -761,7 +762,7 @@ int clear_page_dirty_for_io(struct page *page)
if (mapping) {
if (TestClearPageDirty(page)) {
if (mapping_cap_account_dirty(mapping))
- dec_page_state(nr_dirty);
+ dec_zone_page_state(page, NR_FILE_DIRTY);
return 1;
}
return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ed3f2a7b407..c2b9aa4acc4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1314,7 +1314,7 @@ void show_free_areas(void)
"unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
active,
inactive,
- ps.nr_dirty,
+ global_page_state(NR_FILE_DIRTY),
ps.nr_writeback,
ps.nr_unstable,
nr_free_pages(),
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 292a35fe56c..1982fb533a4 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -400,9 +400,9 @@ static char *vmstat_text[] = {
"nr_file_pages",
"nr_slab",
"nr_page_table_pages",
+ "nr_dirty",
/* Page state */
- "nr_dirty",
"nr_writeback",
"nr_unstable",