path: root/mm/filemap.c
diff options
authorNick Piggin <>2008-07-25 19:45:30 -0700
committerLinus Torvalds <>2008-07-26 12:00:06 -0700
commite286781d5f2e9c846e012a39653a166e9d31777d (patch)
tree14958fe6d8f3e0459c96c68b3034ea2433ab85ac /mm/filemap.c
parent47feff2c8eefe85099f87c43d3096855f0085ca0 (diff)
mm: speculative page references
If we can be sure that elevating the page_count on a pagecache page will pin it, we can speculatively run this operation, and subsequently check to see if we hit the right page rather than relying on holding a lock or otherwise pinning a reference to the page. This can be done if get_page/put_page behaves consistently throughout the whole tree (ie. if we "get" the page after it has been used for something else, we must be able to free it with a put_page). Actually, there is a period where the count behaves differently: when the page is free or if it is a constituent page of a compound page. We need an atomic_inc_not_zero operation to ensure we don't try to grab the page in either case. This patch introduces the core locking protocol to the pagecache (ie. adds page_cache_get_speculative, and tweaks some update-side code to make it work). Thanks to Hugh for pointing out an improvement to the algorithm setting page_count to zero when we have control of all references, in order to hold off speculative getters. [ fix migration_entry_wait()] [ fix add_to_page_cache] [ repair a comment] Signed-off-by: Nick Piggin <> Cc: Jeff Garzik <> Cc: Benjamin Herrenschmidt <> Cc: Paul Mackerras <> Cc: Hugh Dickins <> Cc: "Paul E. McKenney" <> Reviewed-by: Peter Zijlstra <> Signed-off-by: Daisuke Nishimura <> Signed-off-by: KAMEZAWA Hiroyuki <> Signed-off-by: KOSAKI Motohiro <> Signed-off-by: Hugh Dickins <> Acked-by: Nick Piggin <> Signed-off-by: Andrew Morton <> Signed-off-by: Linus Torvalds <>
Diffstat (limited to 'mm/filemap.c')
1 files changed, 18 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 2d3ec1ffc66..4e182a9a14c 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -442,39 +442,43 @@ int filemap_write_and_wait_range(struct address_space *mapping,
- * add_to_page_cache - add newly allocated pagecache pages
+ * add_to_page_cache_locked - add a locked page to the pagecache
* @page: page to add
* @mapping: the page's address_space
* @offset: page index
* @gfp_mask: page allocation mode
- * This function is used to add newly allocated pagecache pages;
- * the page is new, so we can just run SetPageLocked() against it.
- * The other page state flags were set by rmqueue().
- *
+ * This function is used to add a page to the pagecache. It must be locked.
* This function does not add the page to the LRU. The caller must do that.
-int add_to_page_cache(struct page *page, struct address_space *mapping,
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)
- int error = mem_cgroup_cache_charge(page, current->mm,
+ int error;
+ VM_BUG_ON(!PageLocked(page));
+ error = mem_cgroup_cache_charge(page, current->mm,
gfp_mask & ~__GFP_HIGHMEM);
if (error)
goto out;
error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
if (error == 0) {
+ page_cache_get(page);
+ page->mapping = mapping;
+ page->index = offset;
error = radix_tree_insert(&mapping->page_tree, offset, page);
- if (!error) {
- page_cache_get(page);
- SetPageLocked(page);
- page->mapping = mapping;
- page->index = offset;
+ if (likely(!error)) {
__inc_zone_page_state(page, NR_FILE_PAGES);
- } else
+ } else {
+ page->mapping = NULL;
+ page_cache_release(page);
+ }
@@ -483,7 +487,7 @@ int add_to_page_cache(struct page *page, struct address_space *mapping,
return error;
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t offset, gfp_t gfp_mask)