aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@canonical.com>2010-07-29 09:46:41 +0100
committerJohn Rigby <john.rigby@linaro.org>2010-12-13 16:42:41 -0700
commit591ece97eb95bdd59679820fce3cf63c1717a5df (patch)
tree1c004069ce91367dc637b0955c597676e4e09894 /mm
parent65e5830634fc3626a0500794f87cf86f24f2ae20 (diff)
UBUNTU: SAUCE: add tracing for user initiated readahead requests
Track pages which undergo readahead and for each record which were actually consumed, via either read or faulted into a map. This allows userspace readahead applications (such as ureadahead) to track which pages in core at the end of a boot are actually required and generate an optimal readahead pack. It also allows pack adjustment and optimisation in parallel with readahead, allowing the pack to evolve to be accurate as userspace paths change. The status of the pages are reported back via the mincore() call using a newly allocated bit. Signed-off-by: Andy Whitcroft <apw@canonical.com> Acked-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Leann Ogasawara <leann.ogasawara@canonical.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/readahead.c1
4 files changed, 12 insertions, 1 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index ea89840fc65f..d0fa9c7aac94 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1202,6 +1202,9 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
if (size > count)
size = count;
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
diff --git a/mm/memory.c b/mm/memory.c
index 02e48aa0ed13..7dedad0a8634 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2952,10 +2952,15 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
else
VM_BUG_ON(!PageLocked(vmf.page));
+ page = vmf.page;
+
+ /* Mark the page as used on fault. */
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Should we do an early C-O-W break?
*/
- page = vmf.page;
if (flags & FAULT_FLAG_WRITE) {
if (!(vma->vm_flags & VM_SHARED)) {
anon = 1;
diff --git a/mm/mincore.c b/mm/mincore.c
index 9ac42dc6d7b6..a4e573a2d607 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -77,6 +77,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
page = find_get_page(mapping, pgoff);
if (page) {
present = PageUptodate(page);
+ if (present)
+ present |= (PageReadaheadUnused(page) << 7);
page_cache_release(page);
}
diff --git a/mm/readahead.c b/mm/readahead.c
index 77506a291a2d..6948b9297b91 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -181,6 +181,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
+ SetPageReadaheadUnused(page);
ret++;
}