aboutsummaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-14 14:29:49 +1000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-07-14 14:29:49 +1000
commit11c2d8174ed3dc4f1971564732689b4a39129702 (patch)
treeac00daa548ea8ac24ae7a5c8062312e335ab9858 /mm
parentcde274c0c789404df8ece3f9e7d6506caf0127e2 (diff)
parentbce7f793daec3e65ec5c5705d2457b81fe7b5725 (diff)
Merge commit 'origin/HEAD' into test-merge
Manual fixup of include/asm-powerpc/pgtable-ppc64.h
Diffstat (limited to 'mm')
-rw-r--r--mm/allocpercpu.c2
-rw-r--r--mm/memory.c16
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/slub.c18
-rw-r--r--mm/sparse-vmemmap.c2
7 files changed, 36 insertions, 11 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index f4026bae6ee..05f2b4009cc 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -1,7 +1,7 @@
/*
* linux/mm/allocpercpu.c
*
- * Separated from slab.c August 11, 2006 Christoph Lameter <clameter@sgi.com>
+ * Separated from slab.c August 11, 2006 Christoph Lameter
*/
#include <linux/mm.h>
#include <linux/module.h>
diff --git a/mm/memory.c b/mm/memory.c
index d14b251a25a..2302d228fe0 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1151,7 +1151,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
* be processed until returning to user space.
*/
if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
- return -ENOMEM;
+ return i ? i : -ENOMEM;
if (write)
foll_flags |= FOLL_WRITE;
@@ -1697,8 +1697,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *dirty_page = NULL;
old_page = vm_normal_page(vma, address, orig_pte);
- if (!old_page)
+ if (!old_page) {
+ /*
+ * VM_MIXEDMAP !pfn_valid() case
+ *
+ * We should not cow pages in a shared writeable mapping.
+ * Just mark the pages writable as we can't do any dirty
+ * accounting on raw pfn maps.
+ */
+ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+ (VM_WRITE|VM_SHARED))
+ goto reuse;
goto gotten;
+ }
/*
* Take out anonymous pages first, anonymous shared vmas are
@@ -1751,6 +1762,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
if (reuse) {
+reuse:
flush_cache_page(vma, address, pte_pfn(orig_pte));
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index a37a5034f63..c94e58b192c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -729,7 +729,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
} else {
*policy = pol == &default_policy ? MPOL_DEFAULT :
pol->mode;
- *policy |= pol->flags;
+ /*
+ * Internal mempolicy flags must be masked off before exposing
+ * the policy to userspace.
+ */
+ *policy |= (pol->flags & MPOL_MODE_FLAGS);
}
if (vma) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 112bcaeaa10..55bd355d170 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -9,7 +9,7 @@
* IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
* Hirokazu Takahashi <taka@valinux.co.jp>
* Dave Hansen <haveblue@us.ibm.com>
- * Christoph Lameter <clameter@sgi.com>
+ * Christoph Lameter
*/
#include <linux/migrate.h>
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2f552955a02..f32fae3121f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2328,7 +2328,6 @@ static void build_zonelists(pg_data_t *pgdat)
static void build_zonelist_cache(pg_data_t *pgdat)
{
pgdat->node_zonelists[0].zlcache_ptr = NULL;
- pgdat->node_zonelists[1].zlcache_ptr = NULL;
}
#endif /* CONFIG_NUMA */
diff --git a/mm/slub.c b/mm/slub.c
index 0987d1cd943..315c392253c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5,7 +5,7 @@
* The allocator synchronizes using per slab locks and only
* uses a centralized lock to manage a pool of partial slabs.
*
- * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
+ * (C) 2007 SGI, Christoph Lameter
*/
#include <linux/mm.h>
@@ -1628,9 +1628,11 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
void **object;
struct kmem_cache_cpu *c;
unsigned long flags;
+ unsigned int objsize;
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
+ objsize = c->objsize;
if (unlikely(!c->freelist || !node_match(c, node)))
object = __slab_alloc(s, gfpflags, node, addr, c);
@@ -1643,7 +1645,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
local_irq_restore(flags);
if (unlikely((gfpflags & __GFP_ZERO) && object))
- memset(object, 0, c->objsize);
+ memset(object, 0, objsize);
return object;
}
@@ -2995,8 +2997,6 @@ void __init kmem_cache_init(void)
create_kmalloc_cache(&kmalloc_caches[1],
"kmalloc-96", 96, GFP_KERNEL);
caches++;
- }
- if (KMALLOC_MIN_SIZE <= 128) {
create_kmalloc_cache(&kmalloc_caches[2],
"kmalloc-192", 192, GFP_KERNEL);
caches++;
@@ -3026,6 +3026,16 @@ void __init kmem_cache_init(void)
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
+ if (KMALLOC_MIN_SIZE == 128) {
+ /*
+ * The 192 byte sized cache is not used if the alignment
+ * is 128 byte. Redirect kmalloc to use the 256 byte cache
+ * instead.
+ */
+ for (i = 128 + 8; i <= 192; i += 8)
+ size_index[(i - 1) / 8] = 8;
+ }
+
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 99c4f36eb8a..a91b5f8fcaf 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -1,7 +1,7 @@
/*
* Virtual Memory Map support
*
- * (C) 2007 sgi. Christoph Lameter <clameter@sgi.com>.
+ * (C) 2007 sgi. Christoph Lameter.
*
* Virtual memory maps allow VM primitives pfn_to_page, page_to_pfn,
* virt_to_page, page_address() to be implemented as a base offset