aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_low_32.S34
-rw-r--r--arch/powerpc/mm/hash_low_64.S31
-rw-r--r--arch/powerpc/mm/hash_native_64.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c84
-rw-r--r--arch/powerpc/mm/lmb.c43
-rw-r--r--arch/powerpc/mm/mem.c6
-rw-r--r--arch/powerpc/mm/mmu_context_32.c2
-rw-r--r--arch/powerpc/mm/mmu_context_64.c3
-rw-r--r--arch/powerpc/mm/numa.c8
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c16
-rw-r--r--arch/powerpc/mm/slb.c32
-rw-r--r--arch/powerpc/mm/slb_low.S17
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c5
15 files changed, 185 insertions, 108 deletions
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index ea469eefa14..94255beeecd 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -74,12 +74,6 @@ _GLOBAL(hash_page_sync)
*/
.text
_GLOBAL(hash_page)
-#ifdef CONFIG_PPC64BRIDGE
- mfmsr r0
- clrldi r0,r0,1 /* make sure it's in 32-bit mode */
- MTMSRD(r0)
- isync
-#endif
tophys(r7,0) /* gets -KERNELBASE into r7 */
#ifdef CONFIG_SMP
addis r8,r7,mmu_hash_lock@h
@@ -285,7 +279,6 @@ Hash_base = 0xc0180000
Hash_bits = 12 /* e.g. 256kB hash table */
Hash_msk = (((1 << Hash_bits) - 1) * 64)
-#ifndef CONFIG_PPC64BRIDGE
/* defines for the PTE format for 32-bit PPCs */
#define PTE_SIZE 8
#define PTEG_SIZE 64
@@ -299,21 +292,6 @@ Hash_msk = (((1 << Hash_bits) - 1) * 64)
#define SET_V(r) oris r,r,PTE_V@h
#define CLR_V(r,t) rlwinm r,r,0,1,31
-#else
-/* defines for the PTE format for 64-bit PPCs */
-#define PTE_SIZE 16
-#define PTEG_SIZE 128
-#define LG_PTEG_SIZE 7
-#define LDPTEu ldu
-#define STPTE std
-#define CMPPTE cmpd
-#define PTE_H 2
-#define PTE_V 1
-#define TST_V(r) andi. r,r,PTE_V
-#define SET_V(r) ori r,r,PTE_V
-#define CLR_V(r,t) li t,PTE_V; andc r,r,t
-#endif /* CONFIG_PPC64BRIDGE */
-
#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
#define HASH_RIGHT 31-LG_PTEG_SIZE
@@ -331,14 +309,8 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
/* Construct the high word of the PPC-style PTE (r5) */
-#ifndef CONFIG_PPC64BRIDGE
rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
-#else /* CONFIG_PPC64BRIDGE */
- clrlwi r3,r3,8 /* reduce vsid to 24 bits */
- sldi r5,r3,12 /* shift vsid into position */
- rlwimi r5,r4,16,20,24 /* put in API (abbrev page index) */
-#endif /* CONFIG_PPC64BRIDGE */
SET_V(r5) /* set V (valid) bit */
/* Get the address of the primary PTE group in the hash table (r3) */
@@ -516,14 +488,8 @@ _GLOBAL(flush_hash_pages)
add r3,r3,r0 /* note code below trims to 24 bits */
/* Construct the high word of the PPC-style PTE (r11) */
-#ifndef CONFIG_PPC64BRIDGE
rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
-#else /* CONFIG_PPC64BRIDGE */
- clrlwi r3,r3,8 /* reduce vsid to 24 bits */
- sldi r11,r3,12 /* shift vsid into position */
- rlwimi r11,r4,16,20,24 /* put in API (abbrev page index) */
-#endif /* CONFIG_PPC64BRIDGE */
SET_V(r11) /* set V (valid) bit */
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index e0d02c4a261..52e91423895 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -136,6 +136,7 @@ _GLOBAL(__hash_page_4K)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+ ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -368,6 +369,7 @@ _GLOBAL(__hash_page_4K)
rlwinm r30,r4,32-9+7,31-7,31-7 /* _PAGE_RW -> _PAGE_DIRTY */
or r30,r30,r31
ori r30,r30,_PAGE_BUSY | _PAGE_ACCESSED | _PAGE_HASHPTE
+ oris r30,r30,_PAGE_COMBO@h
/* Write the linux PTE atomically (setting busy) */
stdcx. r30,0,r6
bne- 1b
@@ -400,6 +402,7 @@ _GLOBAL(__hash_page_4K)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+ ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
@@ -426,6 +429,14 @@ END_FTR_SECTION(CPU_FTR_NOEXECUTE|CPU_FTR_COHERENT_ICACHE, CPU_FTR_NOEXECUTE)
andi. r0,r31,_PAGE_HASHPTE
li r26,0 /* Default hidx */
beq htab_insert_pte
+
+ /*
+ * Check if the pte was already inserted into the hash table
+ * as a 64k HW page, and invalidate the 64k HPTE if so.
+ */
+ andis. r0,r31,_PAGE_COMBO@h
+ beq htab_inval_old_hpte
+
ld r6,STK_PARM(r6)(r1)
ori r26,r6,0x8000 /* Load the hidx mask */
ld r26,0(r26)
@@ -496,6 +507,19 @@ _GLOBAL(htab_call_hpte_remove)
/* Try all again */
b htab_insert_pte
+ /*
+ * Call out to C code to invalidate an 64k HW HPTE that is
+ * useless now that the segment has been switched to 4k pages.
+ */
+htab_inval_old_hpte:
+ mr r3,r29 /* virtual addr */
+ mr r4,r31 /* PTE.pte */
+ li r5,0 /* PTE.hidx */
+ li r6,MMU_PAGE_64K /* psize */
+ ld r7,STK_PARM(r8)(r1) /* local */
+ bl .flush_hash_page
+ b htab_insert_pte
+
htab_bail_ok:
li r3,0
b htab_bail
@@ -636,6 +660,12 @@ _GLOBAL(__hash_page_64K)
* is changing this PTE anyway and might hash it.
*/
bne- ht64_bail_ok
+BEGIN_FTR_SECTION
+ /* Check if PTE has the cache-inhibit bit set */
+ andi. r0,r31,_PAGE_NO_CACHE
+ /* If so, bail out and refault as a 4k page */
+ bne- ht64_bail_ok
+END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
/* Prepare new PTE value (turn access RW into DIRTY, then
* add BUSY,HASHPTE and ACCESSED)
*/
@@ -671,6 +701,7 @@ _GLOBAL(__hash_page_64K)
and r0,r0,r4 /* _PAGE_RW & _PAGE_DIRTY ->r0 bit 30*/
andc r0,r30,r0 /* r0 = pte & ~r0 */
rlwimi r3,r0,32-1,31,31 /* Insert result into PP lsb */
+ ori r3,r3,HPTE_R_C /* Always add "C" bit for perf. */
/* We eventually do the icache sync here (maybe inline that
* code rather than call a C function...)
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 994856e55b7..a0f3cbd00d3 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -238,7 +238,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
DBG_LOW(" -> hit\n");
/* Update the HPTE */
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
- (newpp & (HPTE_R_PP | HPTE_R_N));
+ (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
native_unlock_hpte(hptep);
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index c006d903963..d03fd2b4445 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -92,10 +92,15 @@ unsigned long htab_size_bytes;
unsigned long htab_hash_mask;
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
+int mmu_vmalloc_psize = MMU_PAGE_4K;
+int mmu_io_psize = MMU_PAGE_4K;
#ifdef CONFIG_HUGETLB_PAGE
int mmu_huge_psize = MMU_PAGE_16M;
unsigned int HPAGE_SHIFT;
#endif
+#ifdef CONFIG_PPC_64K_PAGES
+int mmu_ci_restrictions;
+#endif
/* There are definitions of page sizes arrays to be used when none
* is provided by the firmware.
@@ -308,20 +313,31 @@ static void __init htab_init_page_sizes(void)
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
mmu_linear_psize = MMU_PAGE_1M;
+#ifdef CONFIG_PPC_64K_PAGES
/*
* Pick a size for the ordinary pages. Default is 4K, we support
- * 64K if cache inhibited large pages are supported by the
- * processor
+ * 64K for user mappings and vmalloc if supported by the processor.
+ * We only use 64k for ioremap if the processor
+ * (and firmware) support cache-inhibited large pages.
+ * If not, we use 4k and set mmu_ci_restrictions so that
+ * hash_page knows to switch processes that use cache-inhibited
+ * mappings to 4k pages.
*/
-#ifdef CONFIG_PPC_64K_PAGES
- if (mmu_psize_defs[MMU_PAGE_64K].shift &&
- cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+ if (mmu_psize_defs[MMU_PAGE_64K].shift) {
mmu_virtual_psize = MMU_PAGE_64K;
+ mmu_vmalloc_psize = MMU_PAGE_64K;
+ if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
+ mmu_io_psize = MMU_PAGE_64K;
+ else
+ mmu_ci_restrictions = 1;
+ }
#endif
- printk(KERN_INFO "Page orders: linear mapping = %d, others = %d\n",
+ printk(KERN_DEBUG "Page orders: linear mapping = %d, "
+ "virtual = %d, io = %d\n",
mmu_psize_defs[mmu_linear_psize].shift,
- mmu_psize_defs[mmu_virtual_psize].shift);
+ mmu_psize_defs[mmu_virtual_psize].shift,
+ mmu_psize_defs[mmu_io_psize].shift);
#ifdef CONFIG_HUGETLB_PAGE
/* Init large page size. Currently, we pick 16M or 1M depending
@@ -556,6 +572,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
pte_t *ptep;
cpumask_t tmp;
int rc, user_region = 0, local = 0;
+ int psize;
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap);
@@ -575,10 +592,15 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
return 1;
}
vsid = get_vsid(mm->context.id, ea);
+ psize = mm->context.user_psize;
break;
case VMALLOC_REGION_ID:
mm = &init_mm;
vsid = get_kernel_vsid(ea);
+ if (ea < VMALLOC_END)
+ psize = mmu_vmalloc_psize;
+ else
+ psize = mmu_io_psize;
break;
default:
/* Not a valid range
@@ -629,7 +651,40 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
#ifndef CONFIG_PPC_64K_PAGES
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_virtual_psize == MMU_PAGE_64K)
+ if (mmu_ci_restrictions) {
+ /* If this PTE is non-cacheable, switch to 4k */
+ if (psize == MMU_PAGE_64K &&
+ (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ if (user_region) {
+ psize = MMU_PAGE_4K;
+ mm->context.user_psize = MMU_PAGE_4K;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[MMU_PAGE_4K].sllp;
+ } else if (ea < VMALLOC_END) {
+ /*
+ * some driver did a non-cacheable mapping
+ * in vmalloc space, so switch vmalloc
+ * to 4k pages
+ */
+ printk(KERN_ALERT "Reducing vmalloc segment "
+ "to 4kB pages because of "
+ "non-cacheable mapping\n");
+ psize = mmu_vmalloc_psize = MMU_PAGE_4K;
+ }
+ }
+ if (user_region) {
+ if (psize != get_paca()->context.user_psize) {
+ get_paca()->context = mm->context;
+ slb_flush_and_rebolt();
+ }
+ } else if (get_paca()->vmalloc_sllp !=
+ mmu_psize_defs[mmu_vmalloc_psize].sllp) {
+ get_paca()->vmalloc_sllp =
+ mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ slb_flush_and_rebolt();
+ }
+ }
+ if (psize == MMU_PAGE_64K)
rc = __hash_page_64K(ea, access, vsid, ptep, trap, local);
else
rc = __hash_page_4K(ea, access, vsid, ptep, trap, local);
@@ -681,7 +736,18 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
#ifndef CONFIG_PPC_64K_PAGES
__hash_page_4K(ea, access, vsid, ptep, trap, local);
#else
- if (mmu_virtual_psize == MMU_PAGE_64K)
+ if (mmu_ci_restrictions) {
+ /* If this PTE is non-cacheable, switch to 4k */
+ if (mm->context.user_psize == MMU_PAGE_64K &&
+ (pte_val(*ptep) & _PAGE_NO_CACHE)) {
+ mm->context.user_psize = MMU_PAGE_4K;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[MMU_PAGE_4K].sllp;
+ get_paca()->context = mm->context;
+ slb_flush_and_rebolt();
+ }
+ }
+ if (mm->context.user_psize == MMU_PAGE_64K)
__hash_page_64K(ea, access, vsid, ptep, trap, local);
else
__hash_page_4K(ea, access, vsid, ptep, trap, local);
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 417d5851855..8b6f522655a 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -89,20 +89,25 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn,
return lmb_addrs_adjacent(base1, size1, base2, size2);
}
-/* Assumption: base addr of region 1 < base addr of region 2 */
-static void __init lmb_coalesce_regions(struct lmb_region *rgn,
- unsigned long r1, unsigned long r2)
+static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
{
unsigned long i;
- rgn->region[r1].size += rgn->region[r2].size;
- for (i=r2; i < rgn->cnt-1; i++) {
- rgn->region[i].base = rgn->region[i+1].base;
- rgn->region[i].size = rgn->region[i+1].size;
+ for (i = r; i < rgn->cnt - 1; i++) {
+ rgn->region[i].base = rgn->region[i + 1].base;
+ rgn->region[i].size = rgn->region[i + 1].size;
}
rgn->cnt--;
}
+/* Assumption: base addr of region 1 < base addr of region 2 */
+static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+ unsigned long r1, unsigned long r2)
+{
+ rgn->region[r1].size += rgn->region[r2].size;
+ lmb_remove_region(rgn, r2);
+}
+
/* This routine called with relocation disabled. */
void __init lmb_init(void)
{
@@ -294,17 +299,16 @@ unsigned long __init lmb_end_of_DRAM(void)
return (lmb.memory.region[idx].base + lmb.memory.region[idx].size);
}
-/*
- * Truncate the lmb list to memory_limit if it's set
- * You must call lmb_analyze() after this.
- */
+/* You must call lmb_analyze() after this. */
void __init lmb_enforce_memory_limit(unsigned long memory_limit)
{
unsigned long i, limit;
+ struct lmb_property *p;
if (! memory_limit)
return;
+ /* Truncate the lmb regions to satisfy the memory limit. */
limit = memory_limit;
for (i = 0; i < lmb.memory.cnt; i++) {
if (limit > lmb.memory.region[i].size) {
@@ -316,4 +320,21 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit)
lmb.memory.cnt = i + 1;
break;
}
+
+ lmb.rmo_size = lmb.memory.region[0].size;
+
+ /* And truncate any reserves above the limit also. */
+ for (i = 0; i < lmb.reserved.cnt; i++) {
+ p = &lmb.reserved.region[i];
+
+ if (p->base > memory_limit)
+ p->size = 0;
+ else if ((p->base + p->size) > memory_limit)
+ p->size = memory_limit - p->base;
+
+ if (p->size == 0) {
+ lmb_remove_region(&lmb.reserved, i);
+ i--;
+ }
+ }
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 741dd8802d4..69f3b9a20be 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -299,9 +299,9 @@ void __init paging_init(void)
kmap_prot = PAGE_KERNEL;
#endif /* CONFIG_HIGHMEM */
- printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
+ printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
- printk(KERN_INFO "Memory hole size: %ldMB\n",
+ printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
/*
* All pages are DMA-able so we put them all in the DMA zone.
@@ -380,7 +380,7 @@ void __init mem_init(void)
totalhigh_pages++;
}
totalram_pages += totalhigh_pages;
- printk(KERN_INFO "High memory: %luk\n",
+ printk(KERN_DEBUG "High memory: %luk\n",
totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* CONFIG_HIGHMEM */
diff --git a/arch/powerpc/mm/mmu_context_32.c b/arch/powerpc/mm/mmu_context_32.c
index a8816e0f6a8..e326e4249e1 100644
--- a/arch/powerpc/mm/mmu_context_32.c
+++ b/arch/powerpc/mm/mmu_context_32.c
@@ -30,7 +30,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
-mm_context_t next_mmu_context;
+unsigned long next_mmu_context;
unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
#ifdef FEW_CONTEXTS
atomic_t nr_free_contexts;
diff --git a/arch/powerpc/mm/mmu_context_64.c b/arch/powerpc/mm/mmu_context_64.c
index 714a84dd8d5..65d18dca266 100644
--- a/arch/powerpc/mm/mmu_context_64.c
+++ b/arch/powerpc/mm/mmu_context_64.c
@@ -49,6 +49,9 @@ again:
}
mm->context.id = index;
+ mm->context.user_psize = mmu_virtual_psize;
+ mm->context.sllp = SLB_VSID_USER |
+ mmu_psize_defs[mmu_virtual_psize].sllp;
return 0;
}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 092355f3739..aa98cb3b59d 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -487,9 +487,9 @@ static void __init setup_nonnuma(void)
unsigned long total_ram = lmb_phys_mem_size();
unsigned int i;
- printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
+ printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
- printk(KERN_INFO "Memory hole size: %ldMB\n",
+ printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(top_of_ram - total_ram) >> 20);
for (i = 0; i < lmb.memory.cnt; ++i)
@@ -507,7 +507,7 @@ void __init dump_numa_cpu_topology(void)
return;
for_each_online_node(node) {
- printk(KERN_INFO "Node %d CPUs:", node);
+ printk(KERN_DEBUG "Node %d CPUs:", node);
count = 0;
/*
@@ -543,7 +543,7 @@ static void __init dump_numa_memory_topology(void)
for_each_online_node(node) {
unsigned long i;
- printk(KERN_INFO "Node %d Memory:", node);
+ printk(KERN_DEBUG "Node %d Memory:", node);
count = 0;
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index ed7fcfe5fd3..2ed43a493b3 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -42,18 +42,14 @@ unsigned long _SDR1;
union ubat { /* BAT register values to be loaded */
BAT bat;
-#ifdef CONFIG_PPC64BRIDGE
- u64 word[2];
-#else
u32 word[2];
-#endif
-} BATS[4][2]; /* 4 pairs of IBAT, DBAT */
+} BATS[8][2]; /* 8 pairs of IBAT, DBAT */
struct batrange { /* stores address ranges mapped by BATs */
unsigned long start;
unsigned long limit;
unsigned long phys;
-} bat_addrs[4];
+} bat_addrs[8];
/*
* Return PA for this VA if it is mapped by a BAT, or 0
@@ -190,7 +186,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
return;
pmd = pmd_offset(pgd_offset(mm, ea), ea);
if (!pmd_none(*pmd))
- add_hash_page(mm->context, ea, pmd_val(*pmd));
+ add_hash_page(mm->context.id, ea, pmd_val(*pmd));
}
/*
@@ -220,15 +216,9 @@ void __init MMU_init_hw(void)
if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
-#ifdef CONFIG_PPC64BRIDGE
-#define LG_HPTEG_SIZE 7 /* 128 bytes per HPTEG */
-#define SDR1_LOW_BITS (lg_n_hpteg - 11)
-#define MIN_N_HPTEG 2048 /* min 256kB hash table */
-#else
#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
#define MIN_N_HPTEG 1024 /* min 64kB hash table */
-#endif
/*
* Allow 1 HPTE (1/8 HPTEG) for each page of memory.
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index ffc8ed4de62..6a8bf6c6000 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags,
: "memory" );
}
-static void slb_flush_and_rebolt(void)
+void slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */
- unsigned long linear_llp, virtual_llp, lflags, vflags;
+ unsigned long linear_llp, vmalloc_llp, lflags, vflags;
unsigned long ksp_esid_data;
WARN_ON(!irqs_disabled());
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | virtual_llp;
+ vflags = SLB_VSID_KERNEL | vmalloc_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
@@ -122,9 +122,6 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
get_paca()->slb_cache_ptr = 0;
get_paca()->context = mm->context;
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
/*
* preload some userspace segments into the SLB.
@@ -167,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
void slb_initialize(void)
{
- unsigned long linear_llp, virtual_llp;
+ unsigned long linear_llp, vmalloc_llp, io_llp;
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
- extern unsigned int *slb_miss_kernel_load_virtual;
- extern unsigned int *slb_miss_user_load_normal;
+ extern unsigned int *slb_miss_kernel_load_io;
#ifdef CONFIG_HUGETLB_PAGE
extern unsigned int *slb_miss_user_load_huge;
unsigned long huge_llp;
@@ -181,18 +177,19 @@ void slb_initialize(void)
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ io_llp = mmu_psize_defs[mmu_io_psize].sllp;
+ vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
SLB_VSID_KERNEL | linear_llp);
- patch_slb_encoding(slb_miss_kernel_load_virtual,
- SLB_VSID_KERNEL | virtual_llp);
- patch_slb_encoding(slb_miss_user_load_normal,
- SLB_VSID_USER | virtual_llp);
+ patch_slb_encoding(slb_miss_kernel_load_io,
+ SLB_VSID_KERNEL | io_llp);
DBG("SLB: linear LLP = %04x\n", linear_llp);
- DBG("SLB: virtual LLP = %04x\n", virtual_llp);
+ DBG("SLB: io LLP = %04x\n", io_llp);
#ifdef CONFIG_HUGETLB_PAGE
patch_slb_encoding(slb_miss_user_load_huge,
SLB_VSID_USER | huge_llp);
@@ -207,7 +204,7 @@ void slb_initialize(void)
unsigned long lflags, vflags;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | virtual_llp;
+ vflags = SLB_VSID_KERNEL | vmalloc_llp;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
asm volatile("isync":::"memory");
@@ -215,7 +212,6 @@ void slb_initialize(void)
asm volatile("isync; slbia; isync":::"memory");
create_slbe(PAGE_OFFSET, lflags, 0);
- /* VMALLOC space has 4K pages always for now */
create_slbe(VMALLOC_START, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot,
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index abfaabf667b..8548dcf8ef8 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -59,10 +59,19 @@ _GLOBAL(slb_miss_kernel_load_linear)
li r11,0
b slb_finish_load
-1: /* vmalloc/ioremap mapping encoding bits, the "li" instruction below
+1: /* vmalloc/ioremap mapping encoding bits, the "li" instructions below
* will be patched by the kernel at boot
*/
-_GLOBAL(slb_miss_kernel_load_virtual)
+BEGIN_FTR_SECTION
+ /* check whether this is in vmalloc or ioremap space */
+ clrldi r11,r10,48
+ cmpldi r11,(VMALLOC_SIZE >> 28) - 1
+ bgt 5f
+ lhz r11,PACAVMALLOCSLLP(r13)
+ b slb_finish_load
+5:
+END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
+_GLOBAL(slb_miss_kernel_load_io)
li r11,0
b slb_finish_load
@@ -96,9 +105,7 @@ _GLOBAL(slb_miss_user_load_huge)
1:
#endif /* CONFIG_HUGETLB_PAGE */
-_GLOBAL(slb_miss_user_load_normal)
- li r11,0
-
+ lhz r11,PACACONTEXTSLLP(r13)
2:
ld r9,PACACONTEXTID(r13)
rldimi r10,r9,USER_ESID_BITS,0
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 4a9291d9fef..691320c90b7 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -200,10 +200,6 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
__get_cpu_var(stab_cache_ptr) = 0;
-#ifdef CONFIG_PPC_64K_PAGES
- get_paca()->pgdir = mm->pgd;
-#endif /* CONFIG_PPC_64K_PAGES */
-
/* Now preload some entries for the new task */
if (test_tsk_thread_flag(tsk, TIF_32BIT))
unmapped_base = TASK_UNMAPPED_BASE_USER32;
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index ad580f3742e..02eb23e036d 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -42,7 +42,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
if (Hash != 0) {
ptephys = __pa(ptep) & PAGE_MASK;
- flush_hash_pages(mm->context, addr, ptephys, 1);
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
}
}
@@ -102,7 +102,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start,
pmd_t *pmd;
unsigned long pmd_end;
int count;
- unsigned int ctx = mm->context;
+ unsigned int ctx = mm->context.id;
if (Hash == 0) {
_tlbia();
@@ -172,7 +172,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pgd_offset(mm, vmaddr), vmaddr);
if (!pmd_none(*pmd))
- flush_hash_pages(mm->context, vmaddr, pmd_val(*pmd), 1);
+ flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
FINISH_FLUSH;
}
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index f734b11566c..e7449b068c8 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -131,7 +131,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
{
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
unsigned long vsid;
- unsigned int psize = mmu_virtual_psize;
+ unsigned int psize;
int i;
i = batch->index;
@@ -148,7 +148,8 @@ void hpte_update(struct mm_struct *mm, unsigned long addr,
#else
BUG();
#endif
- }
+ } else
+ psize = pte_pagesize_index(pte);
/*
* This can happen when we are in the middle of a TLB batch and