aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-02-19 13:09:32 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2009-02-19 13:09:32 -0800
commit620565ef5f35f4196e5081417db381c16b0ae791 (patch)
tree6bfbcd9c26de367a64177392e9eb0dc553094f36 /fs
parenta5e753638874b9caabde01c8774e1a39e31cb614 (diff)
parent27e88bf6af7d42adf790f7b2ed7d65475f191cf2 (diff)
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
* 'for-linus' of git://oss.sgi.com/xfs/xfs: Revert "[XFS] remove old vmap cache" Revert "[XFS] use scalable vmap API"
Diffstat (limited to 'fs')
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c79
1 files changed, 76 insertions, 3 deletions
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index d71dc44e21e..cb329edc925 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -166,6 +166,75 @@ test_page_region(
}
/*
+ * Mapping of multi-page buffers into contiguous virtual space
+ */
+
+typedef struct a_list {
+ void *vm_addr;
+ struct a_list *next;
+} a_list_t;
+
+static a_list_t *as_free_head;
+static int as_list_len;
+static DEFINE_SPINLOCK(as_lock);
+
+/*
+ * Try to batch vunmaps because they are costly.
+ */
+STATIC void
+free_address(
+ void *addr)
+{
+ a_list_t *aentry;
+
+#ifdef CONFIG_XEN
+ /*
+ * Xen needs to be able to make sure it can get an exclusive
+ * RO mapping of pages it wants to turn into a pagetable. If
+ * a newly allocated page is also still being vmap()ed by xfs,
+ * it will cause pagetable construction to fail. This is a
+ * quick workaround to always eagerly unmap pages so that Xen
+ * is happy.
+ */
+ vunmap(addr);
+ return;
+#endif
+
+ aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
+ if (likely(aentry)) {
+ spin_lock(&as_lock);
+ aentry->next = as_free_head;
+ aentry->vm_addr = addr;
+ as_free_head = aentry;
+ as_list_len++;
+ spin_unlock(&as_lock);
+ } else {
+ vunmap(addr);
+ }
+}
+
+STATIC void
+purge_addresses(void)
+{
+ a_list_t *aentry, *old;
+
+ if (as_free_head == NULL)
+ return;
+
+ spin_lock(&as_lock);
+ aentry = as_free_head;
+ as_free_head = NULL;
+ as_list_len = 0;
+ spin_unlock(&as_lock);
+
+ while ((old = aentry) != NULL) {
+ vunmap(aentry->vm_addr);
+ aentry = aentry->next;
+ kfree(old);
+ }
+}
+
+/*
* Internal xfs_buf_t object manipulation
*/
@@ -264,7 +333,7 @@ xfs_buf_free(
uint i;
if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
- vm_unmap_ram(bp->b_addr - bp->b_offset, bp->b_page_count);
+ free_address(bp->b_addr - bp->b_offset);
for (i = 0; i < bp->b_page_count; i++) {
struct page *page = bp->b_pages[i];
@@ -386,8 +455,10 @@ _xfs_buf_map_pages(
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} else if (flags & XBF_MAPPED) {
- bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
- -1, PAGE_KERNEL);
+ if (as_list_len > 64)
+ purge_addresses();
+ bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
+ VM_MAP, PAGE_KERNEL);
if (unlikely(bp->b_addr == NULL))
return -ENOMEM;
bp->b_addr += bp->b_offset;
@@ -1672,6 +1743,8 @@ xfsbufd(
count++;
}
+ if (as_list_len > 0)
+ purge_addresses();
if (count)
blk_run_address_space(target->bt_mapping);