aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorAndy Whitcroft <apw@canonical.com>2010-07-29 09:46:41 +0100
committerJohn Rigby <john.rigby@linaro.org>2011-03-16 15:46:21 -0600
commit51eb41bee6a1a4fa4da8b840944000cfbcc45fdf (patch)
treea7334d23c1aa38cde161ad2cd25ff764f82bec6e /mm
parent43779a3029536338bd599ed614fa4fe6f30dee0d (diff)
UBUNTU: SAUCE: add tracing for user initiated readahead requests
Track pages which undergo readahead and for each record which were actually consumed, via either read or faulted into a map. This allows userspace readahead applications (such as ureadahead) to track which pages in core at the end of a boot are actually required and generate an optimal readahead pack. It also allows pack adjustment and optimisation in parallel with readahead, allowing the pack to evolve to be accurate as userspace paths change. The status of the pages are reported back via the mincore() call using a newly allocated bit. Signed-off-by: Andy Whitcroft <apw@canonical.com> Acked-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Leann Ogasawara <leann.ogasawara@canonical.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/memory.c7
-rw-r--r--mm/mincore.c2
-rw-r--r--mm/readahead.c1
4 files changed, 12 insertions, 1 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 83a45d35468b..4fdc9668d4ed 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1211,6 +1211,9 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
if (size > count)
size = count;
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
diff --git a/mm/memory.c b/mm/memory.c
index 5823698c2b71..42ed735e9f08 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3030,10 +3030,15 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
else
VM_BUG_ON(!PageLocked(vmf.page));
+ page = vmf.page;
+
+ /* Mark the page as used on fault. */
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Should we do an early C-O-W break?
*/
- page = vmf.page;
if (flags & FAULT_FLAG_WRITE) {
if (!(vma->vm_flags & VM_SHARED)) {
anon = 1;
diff --git a/mm/mincore.c b/mm/mincore.c
index a4e6b9d75c76..e7b61de86f7b 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -77,6 +77,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
page = find_get_page(mapping, pgoff);
if (page) {
present = PageUptodate(page);
+ if (present)
+ present |= (PageReadaheadUnused(page) << 7);
page_cache_release(page);
}
diff --git a/mm/readahead.c b/mm/readahead.c
index 77506a291a2d..6948b9297b91 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -181,6 +181,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
+ SetPageReadaheadUnused(page);
ret++;
}