aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/Makefile9
-rw-r--r--arch/powerpc/mm/fault.c12
-rw-r--r--arch/powerpc/mm/hash_native_64.c13
-rw-r--r--arch/powerpc/mm/init_64.c2
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c19
-rw-r--r--arch/powerpc/mm/numa.c2
-rw-r--r--arch/powerpc/mm/slb.c2
7 files changed, 44 insertions, 15 deletions
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index b746f4ca4209..2d2192e48de7 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -2,6 +2,8 @@
# Makefile for the linux ppc-specific parts of the memory manager.
#
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
endif
@@ -11,10 +13,11 @@ obj-y := fault.o mem.o pgtable.o gup.o \
pgtable_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o
-hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
-obj-$(CONFIG_PPC64) += hash_utils_64.o \
+obj-$(CONFIG_PPC64) += mmap_64.o
+hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
+obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \
slb_low.o slb.o stab.o \
- mmap_64.o $(hash-y)
+ mmap_64.o $(hash64-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
tlb_hash$(CONFIG_WORD_SIZE).o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 76993941cac9..830bef0a1131 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#include <linux/perf_counter.h>
#include <asm/firmware.h>
#include <asm/page.h>
@@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
die("Weird page fault", regs, SIGSEGV);
}
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
@@ -299,7 +302,7 @@ good_area:
* the fault.
*/
survive:
- ret = handle_mm_fault(mm, vma, address, is_write);
+ ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
@@ -309,6 +312,8 @@ good_area:
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+ regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
preempt_disable();
@@ -316,8 +321,11 @@ good_area:
preempt_enable();
}
#endif
- } else
+ } else {
current->min_flt++;
+ perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+ regs, address);
+ }
up_read(&mm->mmap_sem);
return 0;
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 34e5c0b219b9..056d23a1b105 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -27,6 +27,7 @@
#include <asm/cputable.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
+#include <asm/ppc-opcode.h>
#ifdef DEBUG_LOW
#define DBG_LOW(fmt...) udbg_printf(fmt)
@@ -49,14 +50,21 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
case MMU_PAGE_4K:
va &= ~0xffful;
va |= ssize << 8;
- asm volatile("tlbie %0,0" : : "r" (va) : "memory");
+ asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0),
+ %2)
+ : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
+ : "memory");
break;
default:
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
- asm volatile("tlbie %0,1" : : "r" (va) : "memory");
+ va |= 1; /* L */
+ asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0),
+ %2)
+ : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206)
+ : "memory");
break;
}
}
@@ -80,6 +88,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
va |= ssize << 8;
+ va |= 1; /* L */
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
break;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3e6a6543f53a..68a821add28d 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -66,6 +66,7 @@
#include "mmu_decl.h"
+#ifdef CONFIG_PPC_STD_MMU_64
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
@@ -73,6 +74,7 @@
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be.
#endif
+#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0;
phys_addr_t kernstart_addr;
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 030d0005b4d2..8343986809c0 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -46,7 +46,7 @@ static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
-static spinlock_t context_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -73,7 +73,6 @@ static unsigned int steal_context_smp(unsigned int id)
struct mm_struct *mm;
unsigned int cpu, max;
- again:
max = last_context - first_context;
/* Attempt to free next_context first and then loop until we manage */
@@ -108,7 +107,9 @@ static unsigned int steal_context_smp(unsigned int id)
spin_unlock(&context_lock);
cpu_relax();
spin_lock(&context_lock);
- goto again;
+
+ /* This will cause the caller to try again */
+ return MMU_NO_CONTEXT;
}
#endif /* CONFIG_SMP */
@@ -194,6 +195,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
WARN_ON(prev->context.active < 1);
prev->context.active--;
}
+
+ again:
#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */
@@ -212,7 +215,8 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) {
id = steal_context_smp(id);
- goto stolen;
+ if (id == MMU_NO_CONTEXT)
+ goto again;
}
#endif /* CONFIG_SMP */
id = steal_context_up(id);
@@ -272,6 +276,7 @@ int init_new_context(struct task_struct *t, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
+ unsigned long flags;
unsigned int id;
if (mm->context.id == MMU_NO_CONTEXT)
@@ -279,18 +284,18 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(mm->context.active != 0);
- spin_lock(&context_lock);
+ spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
mm->context.id = MMU_NO_CONTEXT;
#ifdef DEBUG_MAP_CONSISTENCY
mm->context.active = 0;
- context_mm[id] = NULL;
#endif
+ context_mm[id] = NULL;
nr_free_contexts++;
}
- spin_unlock(&context_lock);
+ spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9047145095aa..b037d95eeadc 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -981,6 +981,8 @@ void __init do_init_bootmem(void)
mark_reserved_regions_for_nid(nid);
sparse_memory_present_with_active_regions(nid);
}
+
+ init_bootmem_done = 1;
}
void __init paging_init(void)
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 89497fb04280..3b52c80e5e33 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -2,7 +2,7 @@
* PowerPC64 SLB support.
*
* Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
- * Based on earlier code writteh by:
+ * Based on earlier code written by:
* Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
* Copyright (c) 2001 Dave Engebretsen
* Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM