aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 03:16:12 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-06 03:16:12 +0900
commit5f3d2f2e1a63679cf1c4a4210f2f1cc2f335bef6 (patch)
tree9189bd6c81fe5f982a7ae45d2f3d900176658509 /arch/powerpc/mm
parent283dbd82055eb70ff3b469f812d9c695f18c9641 (diff)
parentd900bd7366463fd96a907b2c212242e2b68b27d8 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Benjamin Herrenschmidt: "Some highlights in addition to the usual batch of fixes: - 64TB address space support for 64-bit processes by Aneesh Kumar - Gavin Shan did a major cleanup & re-organization of our EEH support code (IBM fancy PCI error handling & recovery infrastructure) which paves the way for supporting different platform backends, along with some rework of the PCIe code for the PowerNV platform in order to remove home made resource allocations and instead use the generic code (which is possible after some small improvements to it done by Gavin). - Uprobes support by Ananth N Mavinakayanahalli - A pile of embedded updates from Freescale folks, including new SoC and board supports, more KVM stuff including preparing for 64-bit BookE KVM support, ePAPR 1.1 updates, etc..." Fixup trivial conflicts in drivers/scsi/ipr.c * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits) powerpc/iommu: Fix multiple issues with IOMMU pools code powerpc: Fix VMX fix for memcpy case driver/mtd:IFC NAND:Initialise internal SRAM before any write powerpc/fsl-pci: use 'Header Type' to identify PCIE mode powerpc/eeh: Don't release eeh_mutex in eeh_phb_pe_get powerpc: Remove tlb batching hack for nighthawk powerpc: Set paca->data_offset = 0 for boot cpu powerpc/perf: Sample only if SIAR-Valid bit is set in P7+ powerpc/fsl-pci: fix warning when CONFIG_SWIOTLB is disabled powerpc/mpc85xx: Update interrupt handling for IFC controller powerpc/85xx: Enable USB support in p1023rds_defconfig powerpc/smp: Do not disable IPI interrupts during suspend powerpc/eeh: Fix crash on converting OF node to edev powerpc/eeh: Lock module while handling EEH event powerpc/kprobe: Don't emulate store when kprobe stwu r1 powerpc/kprobe: Complete kprobe and migrate exception frame powerpc/kprobe: Introduce a new thread flag powerpc: Remove unused __get_user64() and __put_user64() powerpc/eeh: Global mutex to protect PE tree powerpc/eeh: Remove EEH PE for normal PCI hotplug ...
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/fault.c1
-rw-r--r--arch/powerpc/mm/hash_low_64.S97
-rw-r--r--arch/powerpc/mm/hash_native_64.c192
-rw-r--r--arch/powerpc/mm/hash_utils_64.c48
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c15
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c5
-rw-r--r--arch/powerpc/mm/mmu_context_hash64.c10
-rw-r--r--arch/powerpc/mm/pgtable_64.c13
-rw-r--r--arch/powerpc/mm/slb_low.S62
-rw-r--r--arch/powerpc/mm/slice.c112
-rw-r--r--arch/powerpc/mm/stab.c3
-rw-r--r--arch/powerpc/mm/subpage-prot.c6
-rw-r--r--arch/powerpc/mm/tlb_hash64.c11
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S18
15 files changed, 357 insertions, 237 deletions
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index e5f028b5794..5495ebe983a 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -133,6 +133,7 @@ static int do_sigbus(struct pt_regs *regs, unsigned long address)
up_read(&current->mm->mmap_sem);
if (user_mode(regs)) {
+ current->thread.trap_nr = BUS_ADRERR;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 602aeb06d29..56585086413 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -63,7 +63,7 @@ _GLOBAL(__hash_page_4K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
- * r29 is "va"
+ * r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
@@ -111,10 +111,10 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- /* Calc va and put it in r29 */
- rldicr r29,r5,28,63-28
- rldicl r3,r3,0,36
- or r29,r3,r29
+ /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
+ or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -122,14 +122,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
-3: /* Calc VA and hash in r29 and r28 for 1T segment */
- sldi r29,r5,40 /* vsid << 40 */
- clrldi r3,r3,24 /* ea & 0xffffffffff */
+3: /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
+ or r29,r28,r29
+
+ /*
+ * calculate hash value for primary slot and
+ * store it in r28 for 1T segment
+ */
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
- or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -185,7 +190,7 @@ htab_insert_pte:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -208,7 +213,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -278,7 +283,7 @@ htab_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
- mr r5,r29 /* va */
+ mr r5,r29 /* vpn */
li r6,MMU_PAGE_4K /* page size */
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -339,7 +344,7 @@ _GLOBAL(__hash_page_4K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
- * r29 is "va"
+ * r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
* r26 is the hidx mask
@@ -394,10 +399,14 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- /* Calc va and put it in r29 */
- rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
- rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
- or r29,r3,r29 /* r29 = va */
+ /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT - VPN_SHIFT
+ /*
+ * clrldi r3,r3,64 - SID_SHIFT --> ea & 0xfffffff
+ * srdi r28,r3,VPN_SHIFT
+ */
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
+ or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -405,14 +414,23 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
-3: /* Calc VA and hash in r29 and r28 for 1T segment */
- sldi r29,r5,40 /* vsid << 40 */
- clrldi r3,r3,24 /* ea & 0xffffffffff */
+3: /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
+ /*
+ * clrldi r3,r3,64 - SID_SHIFT_1T --> ea & 0xffffffffff
+ * srdi r28,r3,VPN_SHIFT
+ */
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
+ or r29,r28,r29
+
+ /*
+ * Calculate hash value for primary slot and
+ * store it in r28 for 1T segment
+ */
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
xor r28,r28,r5
- or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -488,7 +506,7 @@ htab_special_pfn:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -515,7 +533,7 @@ _GLOBAL(htab_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_4K /* page size */
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -547,7 +565,7 @@ _GLOBAL(htab_call_hpte_remove)
* useless now that the segment has been switched to 4k pages.
*/
htab_inval_old_hpte:
- mr r3,r29 /* virtual addr */
+ mr r3,r29 /* vpn */
mr r4,r31 /* PTE.pte */
li r5,0 /* PTE.hidx */
li r6,MMU_PAGE_64K /* psize */
@@ -620,7 +638,7 @@ htab_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
- mr r5,r29 /* va */
+ mr r5,r29 /* vpn */
li r6,MMU_PAGE_4K /* page size */
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
@@ -676,7 +694,7 @@ _GLOBAL(__hash_page_64K)
/* Save non-volatile registers.
* r31 will hold "old PTE"
* r30 is "new PTE"
- * r29 is "va"
+ * r29 is vpn
* r28 is a hash value
* r27 is hashtab mask (maybe dynamic patched instead ?)
*/
@@ -729,10 +747,10 @@ BEGIN_FTR_SECTION
cmpdi r9,0 /* check segment size */
bne 3f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
- /* Calc va and put it in r29 */
- rldicr r29,r5,28,63-28
- rldicl r3,r3,0,36
- or r29,r3,r29
+ /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
+ or r29,r28,r29
/* Calculate hash value for primary slot and store it in r28 */
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
@@ -740,14 +758,19 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
xor r28,r5,r0
b 4f
-3: /* Calc VA and hash in r29 and r28 for 1T segment */
- sldi r29,r5,40 /* vsid << 40 */
- clrldi r3,r3,24 /* ea & 0xffffffffff */
+3: /* Calc vpn and put it in r29 */
+ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
+ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
+ or r29,r28,r29
+
+ /*
+ * calculate hash value for primary slot and
+ * store it in r28 for 1T segment
+ */
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
clrldi r5,r5,40 /* vsid & 0xffffff */
rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
xor r28,r28,r5
- or r29,r3,r29 /* VA */
xor r28,r28,r0 /* hash */
/* Convert linux PTE bits into HW equivalents */
@@ -806,7 +829,7 @@ ht64_insert_pte:
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,0 /* !bolted, !secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -829,7 +852,7 @@ _GLOBAL(ht64_call_hpte_insert1)
/* Call ppc_md.hpte_insert */
ld r6,STK_PARAM(R4)(r1) /* Retrieve new pp bits */
- mr r4,r29 /* Retrieve va */
+ mr r4,r29 /* Retrieve vpn */
li r7,HPTE_V_SECONDARY /* !bolted, secondary */
li r8,MMU_PAGE_64K
ld r9,STK_PARAM(R9)(r1) /* segment size */
@@ -899,7 +922,7 @@ ht64_modify_pte:
add r3,r0,r3 /* add slot idx */
/* Call ppc_md.hpte_updatepp */
- mr r5,r29 /* va */
+ mr r5,r29 /* vpn */
li r6,MMU_PAGE_64K
ld r7,STK_PARAM(R9)(r1) /* segment size */
ld r8,STK_PARAM(R8)(r1) /* get "local" param */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 90039bc6411..ffc1e00f7a2 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -14,10 +14,10 @@
#include <linux/spinlock.h>
#include <linux/bitops.h>
+#include <linux/of.h>
#include <linux/threads.h>
#include <linux/smp.h>
-#include <asm/abs_addr.h>
#include <asm/machdep.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
@@ -39,22 +39,35 @@
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void __tlbie(unsigned long va, int psize, int ssize)
+static inline void __tlbie(unsigned long vpn, int psize, int ssize)
{
+ unsigned long va;
unsigned int penc;
- /* clear top 16 bits, non SLS segment */
+ /*
+ * We need 14 to 65 bits of va for a tlibe of 4K page
+ * With vpn we ignore the lower VPN_SHIFT bits already.
+ * And top two bits are already ignored because we can
+ * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
+ * of 12.
+ */
+ va = vpn << VPN_SHIFT;
+ /*
+ * clear top 16 bits of 64bit va, non SLS segment
+ * Older versions of the architecture (2.02 and earler) require the
+ * masking of the top 16 bits.
+ */
va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
- va &= ~0xffful;
va |= ssize << 8;
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
default:
+ /* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
@@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
}
}
-static inline void __tlbiel(unsigned long va, int psize, int ssize)
+static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
{
+ unsigned long va;
unsigned int penc;
- /* clear top 16 bits, non SLS segment */
+ /* VPN_SHIFT can be atmost 12 */
+ va = vpn << VPN_SHIFT;
+ /*
+ * clear top 16 bits of 64 bit va, non SLS segment
+ * Older versions of the architecture (2.02 and earler) require the
+ * masking of the top 16 bits.
+ */
va &= ~(0xffffULL << 48);
switch (psize) {
case MMU_PAGE_4K:
- va &= ~0xffful;
va |= ssize << 8;
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
: : "r"(va) : "memory");
break;
default:
+ /* We need 14 to 14 + i bits of va */
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
va |= penc << 12;
@@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
}
-static inline void tlbie(unsigned long va, int psize, int ssize, int local)
+static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
{
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
@@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
raw_spin_lock(&native_tlbie_lock);
asm volatile("ptesync": : :"memory");
if (use_local) {
- __tlbiel(va, psize, ssize);
+ __tlbiel(vpn, psize, ssize);
asm volatile("ptesync": : :"memory");
} else {
- __tlbie(va, psize, ssize);
+ __tlbie(vpn, psize, ssize);
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
if (lock_tlbie && !use_local)
@@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
clear_bit_unlock(HPTE_LOCK_BIT, word);
}
-static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
+static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
unsigned long pa, unsigned long rflags,
unsigned long vflags, int psize, int ssize)
{
@@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
int i;
if (!(vflags & HPTE_V_BOLTED)) {
- DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
+ DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
" rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
+ hpte_group, vpn, pa, rflags, vflags, psize);
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
@@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
if (i == HPTES_PER_GROUP)
return -1;
- hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
+ hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
hpte_r = hpte_encode_r(pa, psize) | rflags;
if (!(vflags & HPTE_V_BOLTED)) {
@@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
}
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
- unsigned long va, int psize, int ssize,
+ unsigned long vpn, int psize, int ssize,
int local)
{
struct hash_pte *hptep = htab_address + slot;
unsigned long hpte_v, want_v;
int ret = 0;
- want_v = hpte_encode_v(va, psize, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
- DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
- va, want_v & HPTE_V_AVPN, slot, newpp);
+ DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
+ vpn, want_v & HPTE_V_AVPN, slot, newpp);
native_lock_hpte(hptep);
@@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_unlock_hpte(hptep);
/* Ensure it is out of the tlb too. */
- tlbie(va, psize, ssize, local);
+ tlbie(vpn, psize, ssize, local);
return ret;
}
-static long native_hpte_find(unsigned long va, int psize, int ssize)
+static long native_hpte_find(unsigned long vpn, int psize, int ssize)
{
struct hash_pte *hptep;
unsigned long hash;
@@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
long slot;
unsigned long want_v, hpte_v;
- hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
- want_v = hpte_encode_v(va, psize, ssize);
+ hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
/* Bolted mappings are only ever in the primary group */
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
@@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
int psize, int ssize)
{
- unsigned long vsid, va;
+ unsigned long vpn;
+ unsigned long vsid;
long slot;
struct hash_pte *hptep;
vsid = get_kernel_vsid(ea, ssize);
- va = hpt_va(ea, vsid, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
- slot = native_hpte_find(va, psize, ssize);
+ slot = native_hpte_find(vpn, psize, ssize);
if (slot == -1)
panic("could not find page to bolt\n");
hptep = htab_address + slot;
@@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
(newpp & (HPTE_R_PP | HPTE_R_N));
/* Ensure it is out of the tlb too. */
- tlbie(va, psize, ssize, 0);
+ tlbie(vpn, psize, ssize, 0);
}
-static void native_hpte_invalidate(unsigned long slot, unsigned long va,
+static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
int psize, int ssize, int local)
{
struct hash_pte *hptep = htab_address + slot;
@@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
local_irq_save(flags);
- DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
+ DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
- want_v = hpte_encode_v(va, psize, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
@@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
hptep->v = 0;
/* Invalidate the TLB */
- tlbie(va, psize, ssize, local);
+ tlbie(vpn, psize, ssize, local);
local_irq_restore(flags);
}
@@ -349,11 +370,12 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
- int *psize, int *ssize, unsigned long *va)
+ int *psize, int *ssize, unsigned long *vpn)
{
+ unsigned long avpn, pteg, vpi;
unsigned long hpte_r = hpte->r;
unsigned long hpte_v = hpte->v;
- unsigned long avpn;
+ unsigned long vsid, seg_off;
int i, size, shift, penc;
if (!(hpte_v & HPTE_V_LARGE))
@@ -380,32 +402,38 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
}
/* This works for all page sizes, and for 256M and 1T segments */
+ *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
shift = mmu_psize_defs[size].shift;
- avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
-
- if (shift < 23) {
- unsigned long vpi, vsid, pteg;
- pteg = slot / HPTES_PER_GROUP;
- if (hpte_v & HPTE_V_SECONDARY)
- pteg = ~pteg;
- switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
- case MMU_SEGSIZE_256M:
- vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
- break;
- case MMU_SEGSIZE_1T:
- vsid = avpn >> 40;
+ avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
+ pteg = slot / HPTES_PER_GROUP;
+ if (hpte_v & HPTE_V_SECONDARY)
+ pteg = ~pteg;
+
+ switch (*ssize) {
+ case MMU_SEGSIZE_256M:
+ /* We only have 28 - 23 bits of seg_off in avpn */
+ seg_off = (avpn & 0x1f) << 23;
+ vsid = avpn >> 5;
+ /* We can find more bits from the pteg value */
+ if (shift < 23) {
+ vpi = (vsid ^ pteg) & htab_hash_mask;
+ seg_off |= vpi << shift;
+ }
+ *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
+ case MMU_SEGSIZE_1T:
+ /* We only have 40 - 23 bits of seg_off in avpn */
+ seg_off = (avpn & 0x1ffff) << 23;
+ vsid = avpn >> 17;
+ if (shift < 23) {
vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
- break;
- default:
- avpn = vpi = size = 0;
+ seg_off |= vpi << shift;
}
- avpn |= (vpi << mmu_psize_defs[size].shift);
+ *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
+ default:
+ *vpn = size = 0;
}
-
- *va = avpn;
*psize = size;
- *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
}
/*
@@ -418,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
*/
static void native_hpte_clear(void)
{
+ unsigned long vpn = 0;
unsigned long slot, slots, flags;
struct hash_pte *hptep = htab_address;
- unsigned long hpte_v, va;
+ unsigned long hpte_v;
unsigned long pteg_count;
int psize, ssize;
@@ -448,9 +477,9 @@ static void native_hpte_clear(void)
* already hold the native_tlbie_lock.
*/
if (hpte_v & HPTE_V_VALID) {
- hpte_decode(hptep, slot, &psize, &ssize, &va);
+ hpte_decode(hptep, slot, &psize, &ssize, &vpn);
hptep->v = 0;
- __tlbie(va, psize, ssize);
+ __tlbie(vpn, psize, ssize);
}
}
@@ -465,7 +494,8 @@ static void native_hpte_clear(void)
*/
static void native_flush_hash_range(unsigned long number, int local)
{
- unsigned long va, hash, index, hidx, shift, slot;
+ unsigned long vpn;
+ unsigned long hash, index, hidx, shift, slot;
struct hash_pte *hptep;
unsigned long hpte_v;
unsigned long want_v;
@@ -479,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
local_irq_save(flags);
for (i = 0; i < number; i++) {
- va = batch->vaddr[i];
+ vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
- hash = hpt_hash(va, shift, ssize);
+ pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
hptep = htab_address + slot;
- want_v = hpte_encode_v(va, psize, ssize);
+ want_v = hpte_encode_v(vpn, psize, ssize);
native_lock_hpte(hptep);
hpte_v = hptep->v;
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
@@ -505,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
mmu_psize_defs[psize].tlbiel && local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
- va = batch->vaddr[i];
+ vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, va, index,
- shift) {
- __tlbiel(va, psize, ssize);
+ pte_iterate_hashed_subpages(pte, psize,
+ vpn, index, shift) {
+ __tlbiel(vpn, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("ptesync":::"memory");
@@ -522,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
- va = batch->vaddr[i];
+ vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, va, index,
- shift) {
- __tlbie(va, psize, ssize);
+ pte_iterate_hashed_subpages(pte, psize,
+ vpn, index, shift) {
+ __tlbie(vpn, psize, ssize);
} pte_iterate_hashed_end();
}
asm volatile("eieio; tlbsync; ptesync":::"memory");
@@ -539,29 +569,6 @@ static void native_flush_hash_range(unsigned long number, int local)
local_irq_restore(flags);
}
-#ifdef CONFIG_PPC_PSERIES
-/* Disable TLB batching on nighthawk */
-static inline int tlb_batching_enabled(void)
-{
- struct device_node *root = of_find_node_by_path("/");
- int enabled = 1;
-
- if (root) {
- const char *model = of_get_property(root, "model", NULL);
- if (model && !strcmp(model, "IBM,9076-N81"))
- enabled = 0;
- of_node_put(root);
- }
-
- return enabled;
-}
-#else
-static inline int tlb_batching_enabled(void)
-{
- return 1;
-}
-#endif
-
void __init hpte_init_native(void)
{
ppc_md.hpte_invalidate = native_hpte_invalidate;
@@ -570,6 +577,5 @@ void __init hpte_init_native(void)
ppc_md.hpte_insert = native_hpte_insert;
ppc_md.hpte_remove = native_hpte_remove;
ppc_md.hpte_clear_all = native_hpte_clear;
- if (tlb_batching_enabled())
- ppc_md.flush_hash_range = native_flush_hash_range;
+ ppc_md.flush_hash_range = native_flush_hash_range;
}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 377e5cbedbb..3a292be2e07 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -43,7 +43,6 @@
#include <asm/uaccess.h>
#include <asm/machdep.h>
#include <asm/prom.h>
-#include <asm/abs_addr.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include <asm/eeh.h>
@@ -192,18 +191,18 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
vaddr += step, paddr += step) {
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
- unsigned long va = hpt_va(vaddr, vsid, ssize);
+ unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot;
/* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
- hash = hpt_hash(va, shift, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
BUG_ON(!ppc_md.hpte_insert);
- ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
+ ret = ppc_md.hpte_insert(hpteg, vpn, paddr, tprot,
HPTE_V_BOLTED, psize, ssize);
if (ret < 0)
@@ -651,7 +650,7 @@ static void __init htab_initialize(void)
DBG("Hash table allocated at %lx, size: %lx\n", table,
htab_size_bytes);
- htab_address = abs_to_virt(table);
+ htab_address = __va(table);
/* htab absolute addr + encoded htabsize */
_SDR1 = table + __ilog2(pteg_count) - 11;
@@ -804,16 +803,19 @@ unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap)
#ifdef CONFIG_PPC_MM_SLICES
unsigned int get_paca_psize(unsigned long addr)
{
- unsigned long index, slices;
+ u64 lpsizes;
+ unsigned char *hpsizes;
+ unsigned long index, mask_index;
if (addr < SLICE_LOW_TOP) {
- slices = get_paca()->context.low_slices_psize;
+ lpsizes = get_paca()->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
- } else {
- slices = get_paca()->context.high_slices_psize;
- index = GET_HIGH_SLICE_INDEX(addr);
+ return (lpsizes >> (index * 4)) & 0xF;
}
- return (slices >> (index * 4)) & 0xF;
+ hpsizes = get_paca()->context.high_slices_psize;
+ index = GET_HIGH_SLICE_INDEX(addr);
+ mask_index = index & 0x1;
+ return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xF;
}
#else
@@ -1153,21 +1155,21 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* WARNING: This is called from hash_low_64.S, if you change this prototype,
* do not forget to update the assembly call site !
*/
-void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize,
+void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
int local)
{
unsigned long hash, index, shift, hidx, slot;
- DBG_LOW("flush_hash_page(va=%016lx)\n", va);
- pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
- hash = hpt_hash(va, shift, ssize);
+ DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
+ pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, index);
if (hidx & _PTEIDX_SECONDARY)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
- ppc_md.hpte_invalidate(slot, va, psize, ssize, local);
+ ppc_md.hpte_invalidate(slot, vpn, psize, ssize, local);
} pte_iterate_hashed_end();
}
@@ -1181,7 +1183,7 @@ void flush_hash_range(unsigned long number, int local)
&__get_cpu_var(ppc64_tlb_batch);
for (i = 0; i < number; i++)
- flush_hash_page(batch->vaddr[i], batch->pte[i],
+ flush_hash_page(batch->vpn[i], batch->pte[i],
batch->psize, batch->ssize, local);
}
}
@@ -1208,14 +1210,14 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
- unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+ unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
int ret;
- hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+ hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
- ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr),
+ ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize);
BUG_ON (ret < 0);
@@ -1229,9 +1231,9 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
{
unsigned long hash, hidx, slot;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
- unsigned long va = hpt_va(vaddr, vsid, mmu_kernel_ssize);
+ unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
- hash = hpt_hash(va, PAGE_SHIFT, mmu_kernel_ssize);
+ hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
spin_lock(&linear_map_hash_lock);
BUG_ON(!(linear_map_hash_slots[lmi] & 0x80));
hidx = linear_map_hash_slots[lmi] & 0x7f;
@@ -1241,7 +1243,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, mmu_kernel_ssize, 0);
+ ppc_md.hpte_invalidate(slot, vpn, mmu_linear_psize, mmu_kernel_ssize, 0);
}
void kernel_map_pages(struct page *page, int numpages, int enable)
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index cc5c273086c..cecad348f60 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -18,14 +18,15 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize)
{
+ unsigned long vpn;
unsigned long old_pte, new_pte;
- unsigned long va, rflags, pa, sz;
+ unsigned long rflags, pa, sz;
long slot;
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
/* Search the Linux page table for a match with va */
- va = hpt_va(ea, vsid, ssize);
+ vpn = hpt_vpn(ea, vsid, ssize);
/* At this point, we have a pte (old_pte) which can be used to build
* or update an HPTE. There are 2 cases:
@@ -69,19 +70,19 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
/* There MIGHT be an HPTE for this pte */
unsigned long hash, slot;
- hash = hpt_hash(va, shift, ssize);
+ hash = hpt_hash(vpn, shift, ssize);
if (old_pte & _PAGE_F_SECOND)
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += (old_pte & _PAGE_F_GIX) >> 12;
- if (ppc_md.hpte_updatepp(slot, rflags, va, mmu_psize,
+ if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
ssize, local) == -1)
old_pte &= ~_PAGE_HPTEFLAGS;
}
if (likely(!(old_pte & _PAGE_HASHPTE))) {
- unsigned long hash = hpt_hash(va, shift, ssize);
+ unsigned long hash = hpt_hash(vpn, shift, ssize);
unsigned long hpte_group;
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
@@ -101,14 +102,14 @@ repeat:
_PAGE_COHERENT | _PAGE_GUARDED));
/* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags, 0,
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
mmu_psize, ssize);
/* Primary is full, try the secondary */
if (unlikely(slot == -1)) {
hpte_group = ((~hash & htab_hash_mask) *
HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, va, pa, rflags,
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
HPTE_V_SECONDARY,
mmu_psize, ssize);
if (slot == -1) {
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 620b7acd2fd..95a45293e5a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -62,7 +62,6 @@
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/iommu.h>
-#include <asm/abs_addr.h>
#include <asm/vdso.h>
#include "mmu_decl.h"
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index fbdad0e3929..0dba5066c22 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -62,7 +62,7 @@
int init_bootmem_done;
int mem_init_done;
-phys_addr_t memory_limit;
+unsigned long long memory_limit;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
@@ -300,8 +300,7 @@ void __init mem_init(void)
unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
#ifdef CONFIG_SWIOTLB
- if (ppc_swiotlb_enable)
- swiotlb_init(1);
+ swiotlb_init(0);
#endif
num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c
index 40677aa0190..40bc5b0ace5 100644
--- a/arch/powerpc/mm/mmu_context_hash64.c
+++ b/arch/powerpc/mm/mmu_context_hash64.c
@@ -30,11 +30,13 @@ static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida);
/*
- * The proto-VSID space has 2^35 - 1 segments available for user mappings.
- * Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
- * so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
+ * 256MB segment
+ * The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
+ * available for user mappings. Each segment contains 2^28 bytes. Each
+ * context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
+ * (19 == 37 + 28 - 46).
*/
-#define MAX_CONTEXT ((1UL << 19) - 1)
+#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
int __init_new_context(void)
{
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 249a0631c4d..e212a271c7a 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -51,13 +51,22 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/sections.h>
-#include <asm/abs_addr.h>
#include <asm/firmware.h>
#include "mmu_decl.h"
-unsigned long ioremap_bot = IOREMAP_BASE;
+/* Some sanity checking */
+#if TASK_SIZE_USER64 > PGTABLE_RANGE
+#error TASK_SIZE_USER64 exceeds pagetable range
+#endif
+
+#ifdef CONFIG_PPC_STD_MMU_64
+#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
+#error TASK_SIZE_USER64 exceeds user VSID range
+#endif
+#endif
+unsigned long ioremap_bot = IOREMAP_BASE;
#ifdef CONFIG_PPC_MMU_NOHASH
static void *early_alloc_pgtable(unsigned long size)
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index b9ee79ce220..1a16ca22775 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -56,6 +56,12 @@ _GLOBAL(slb_allocate_realmode)
*/
_GLOBAL(slb_miss_kernel_load_linear)
li r11,0
+ li r9,0x1
+ /*
+ * for 1T we shift 12 bits more. slb_finish_load_1T will do
+ * the necessary adjustment
+ */
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -85,6 +91,12 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
_GLOBAL(slb_miss_kernel_load_io)
li r11,0
6:
+ li r9,0x1
+ /*
+ * for 1T we shift 12 bits more. slb_finish_load_1T will do
+ * the necessary adjustment
+ */
+ rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0
BEGIN_FTR_SECTION
b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
@@ -108,17 +120,31 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
* between 4k and 64k standard page size
*/
#ifdef CONFIG_PPC_MM_SLICES
+ /* r10 have esid */
cmpldi r10,16
-
- /* Get the slice index * 4 in r11 and matching slice size mask in r9 */
- ld r9,PACALOWSLICESPSIZE(r13)
- sldi r11,r10,2
+ /* below SLICE_LOW_TOP */
blt 5f
- ld r9,PACAHIGHSLICEPSIZE(r13)
- srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT - 2)
- andi. r11,r11,0x3c
+ /*
+ * Handle hpsizes,
+ * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
+ */
+ srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
+ addi r9,r11,PACAHIGHSLICEPSIZE
+ lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
+ /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
+ rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
+ b 6f
-5: /* Extract the psize and multiply to get an array offset */
+5:
+ /*
+ * Handle lpsizes
+ * r9 is get_paca()->context.low_slices_psize, r11 is index
+ */
+ ld r9,PACALOWSLICESPSIZE(r13)
+ mr r11,r10
+6:
+ sldi r11,r11,2 /* index * 4 */
+ /* Extract the psize and multiply to get an array offset */
srd r9,r9,r11
andi. r9,r9,0xf
mulli r9,r9,MMUPSIZEDEFSIZE
@@ -209,7 +235,11 @@ _GLOBAL(slb_allocate_user)
*/
slb_finish_load:
ASM_VSID_SCRAMBLE(r10,r9,256M)
- rldimi r11,r10,SLB_VSID_SHIFT,16 /* combine VSID and flags */
+ /*
+ * bits above VSID_BITS_256M need to be ignored from r10
+ * also combine VSID and flags
+ */
+ rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
/* r3 = EA, r11 = VSID data */
/*
@@ -252,10 +282,10 @@ _GLOBAL(slb_compare_rr_to_size)
bge 1f
/* still room in the slb cache */
- sldi r11,r3,1 /* r11 = offset * sizeof(u16) */
- rldicl r10,r10,36,28 /* get low 16 bits of the ESID */
- add r11,r11,r13 /* r11 = (u16 *)paca + offset */
- sth r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
+ sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
+ srdi r10,r10,28 /* get the 36 bits of the ESID */
+ add r11,r11,r13 /* r11 = (u32 *)paca + offset */
+ stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
addi r3,r3,1 /* offset++ */
b 2f
1: /* offset >= SLB_CACHE_ENTRIES */
@@ -273,7 +303,11 @@ _GLOBAL(slb_compare_rr_to_size)
slb_finish_load_1T:
srdi r10,r10,40-28 /* get 1T ESID */
ASM_VSID_SCRAMBLE(r10,r9,1T)
- rldimi r11,r10,SLB_VSID_SHIFT_1T,16 /* combine VSID and flags */
+ /*
+ * bits above VSID_BITS_1T need to be ignored from r10
+ * also combine VSID and flags
+ */
+ rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
li r10,MMU_SEGSIZE_1T
rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 73709f7ce92..5829d2a950d 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -34,6 +34,11 @@
#include <asm/mmu.h>
#include <asm/spu.h>
+/* some sanity checks */
+#if (PGTABLE_RANGE >> 43) > SLICE_MASK_SIZE
+#error PGTABLE_RANGE exceeds slice_mask high_slices size
+#endif
+
static DEFINE_SPINLOCK(slice_convert_lock);
@@ -42,7 +47,7 @@ int _slice_debug = 1;
static void slice_print_mask(const char *label, struct slice_mask mask)
{
- char *p, buf[16 + 3 + 16 + 1];
+ char *p, buf[16 + 3 + 64 + 1];
int i;
if (!_slice_debug)
@@ -54,7 +59,7 @@ static void slice_print_mask(const char *label, struct slice_mask mask)
*(p++) = '-';
*(p++) = ' ';
for (i = 0; i < SLICE_NUM_HIGH; i++)
- *(p++) = (mask.high_slices & (1 << i)) ? '1' : '0';
+ *(p++) = (mask.high_slices & (1ul << i)) ? '1' : '0';
*(p++) = 0;
printk(KERN_DEBUG "%s:%s\n", label, buf);
@@ -84,8 +89,8 @@ static struct slice_mask slice_range_to_mask(unsigned long start,
}
if ((start + len) > SLICE_LOW_TOP)
- ret.high_slices = (1u << (GET_HIGH_SLICE_INDEX(end) + 1))
- - (1u << GET_HIGH_SLICE_INDEX(start));
+ ret.high_slices = (1ul << (GET_HIGH_SLICE_INDEX(end) + 1))
+ - (1ul << GET_HIGH_SLICE_INDEX(start));
return ret;
}
@@ -135,26 +140,31 @@ static struct slice_mask slice_mask_for_free(struct mm_struct *mm)
for (i = 0; i < SLICE_NUM_HIGH; i++)
if (!slice_high_has_vma(mm, i))
- ret.high_slices |= 1u << i;
+ ret.high_slices |= 1ul << i;
return ret;
}
static struct slice_mask slice_mask_for_size(struct mm_struct *mm, int psize)
{
+ unsigned char *hpsizes;
+ int index, mask_index;
struct slice_mask ret = { 0, 0 };
unsigned long i;
- u64 psizes;
+ u64 lpsizes;
- psizes = mm->context.low_slices_psize;
+ lpsizes = mm->context.low_slices_psize;
for (i = 0; i < SLICE_NUM_LOW; i++)
- if (((psizes >> (i * 4)) & 0xf) == psize)
+ if (((lpsizes >> (i * 4)) & 0xf) == psize)
ret.low_slices |= 1u << i;
- psizes = mm->context.high_slices_psize;
- for (i = 0; i < SLICE_NUM_HIGH; i++)
- if (((psizes >> (i * 4)) & 0xf) == psize)
- ret.high_slices |= 1u << i;
+ hpsizes = mm->context.high_slices_psize;
+ for (i = 0; i < SLICE_NUM_HIGH; i++) {
+ mask_index = i & 0x1;
+ index = i >> 1;
+ if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
+ ret.high_slices |= 1ul << i;
+ }
return ret;
}
@@ -183,8 +193,10 @@ static void slice_flush_segments(void *parm)
static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psize)
{
+ int index, mask_index;
/* Write the new slice psize bits */
- u64 lpsizes, hpsizes;
+ unsigned char *hpsizes;
+ u64 lpsizes;
unsigned long i, flags;
slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
@@ -201,14 +213,18 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
lpsizes = (lpsizes & ~(0xful << (i * 4))) |
(((unsigned long)psize) << (i * 4));
- hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < SLICE_NUM_HIGH; i++)
- if (mask.high_slices & (1u << i))
- hpsizes = (hpsizes & ~(0xful << (i * 4))) |
- (((unsigned long)psize) << (i * 4));
-
+ /* Assign the value back */
mm->context.low_slices_psize = lpsizes;
- mm->context.high_slices_psize = hpsizes;
+
+ hpsizes = mm->context.high_slices_psize;
+ for (i = 0; i < SLICE_NUM_HIGH; i++) {
+ mask_index = i & 0x1;
+ index = i >> 1;
+ if (mask.high_slices & (1ul << i))
+ hpsizes[index] = (hpsizes[index] &
+ ~(0xf << (mask_index * 4))) |
+ (((unsigned long)psize) << (mask_index * 4));
+ }
slice_dbg(" lsps=%lx, hsps=%lx\n",
mm->context.low_slices_psize,
@@ -587,18 +603,19 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
- u64 psizes;
- int index;
+ unsigned char *hpsizes;
+ int index, mask_index;
if (addr < SLICE_LOW_TOP) {
- psizes = mm->context.low_slices_psize;
+ u64 lpsizes;
+ lpsizes = mm->context.low_slices_psize;
index = GET_LOW_SLICE_INDEX(addr);
- } else {
- psizes = mm->context.high_slices_psize;
- index = GET_HIGH_SLICE_INDEX(addr);
+ return (lpsizes >> (index * 4)) & 0xf;
}
-
- return (psizes >> (index * 4)) & 0xf;
+ hpsizes = mm->context.high_slices_psize;
+ index = GET_HIGH_SLICE_INDEX(addr);
+ mask_index = index & 0x1;
+ return (hpsizes[index >> 1] >> (mask_index * 4)) & 0xf;
}
EXPORT_SYMBOL_GPL(get_slice_psize);
@@ -618,7 +635,9 @@ EXPORT_SYMBOL_GPL(get_slice_psize);
*/
void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
{
- unsigned long flags, lpsizes, hpsizes;
+ int index, mask_index;
+ unsigned char *hpsizes;
+ unsigned long flags, lpsizes;
unsigned int old_psize;
int i;
@@ -639,15 +658,21 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
if (((lpsizes >> (i * 4)) & 0xf) == old_psize)
lpsizes = (lpsizes & ~(0xful << (i * 4))) |
(((unsigned long)psize) << (i * 4));
+ /* Assign the value back */
+ mm->context.low_slices_psize = lpsizes;
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < SLICE_NUM_HIGH; i++)
- if (((hpsizes >> (i * 4)) & 0xf) == old_psize)
- hpsizes = (hpsizes & ~(0xful << (i * 4))) |
- (((unsigned long)psize) << (i * 4));
+ for (i = 0; i < SLICE_NUM_HIGH; i++) {
+ mask_index = i & 0x1;
+ index = i >> 1;
+ if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == old_psize)
+ hpsizes[index] = (hpsizes[index] &
+ ~(0xf << (mask_index * 4))) |
+ (((unsigned long)psize) << (mask_index * 4));
+ }
+
+
- mm->context.low_slices_psize = lpsizes;
- mm->context.high_slices_psize = hpsizes;
slice_dbg(" lsps=%lx, hsps=%lx\n",
mm->context.low_slices_psize,
@@ -660,18 +685,27 @@ void slice_set_user_psize(struct mm_struct *mm, unsigned int psize)
void slice_set_psize(struct mm_struct *mm, unsigned long address,
unsigned int psize)
{
+ unsigned char *hpsizes;
unsigned long i, flags;
- u64 *p;
+ u64 *lpsizes;
spin_lock_irqsave(&slice_convert_lock, flags);
if (address < SLICE_LOW_TOP) {
i = GET_LOW_SLICE_INDEX(address);
- p = &mm->context.low_slices_psize;
+ lpsizes = &mm->context.low_slices_psize;
+ *lpsizes = (*lpsizes & ~(0xful << (i * 4))) |
+ ((unsigned long) psize << (i * 4));
} else {
+ int index, mask_index;
i = GET_HIGH_SLICE_INDEX(address);
- p = &mm->context.high_slices_psize;
+ hpsizes = mm->context.high_slices_psize;
+ mask_index = i & 0x1;
+ index = i >> 1;
+ hpsizes[index] = (hpsizes[index] &
+ ~(0xf << (mask_index * 4))) |
+ (((unsigned long)psize) << (mask_index * 4));
}
- *p = (*p & ~(0xful << (i * 4))) | ((unsigned long) psize << (i * 4));
+
spin_unlock_irqrestore(&slice_convert_lock, flags);
#ifdef CONFIG_SPU_BASE
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 9106ebb118f..3f8efa6f299 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,7 +20,6 @@
#include <asm/paca.h>
#include <asm/cputable.h>
#include <asm/prom.h>
-#include <asm/abs_addr.h>
struct stab_entry {
unsigned long esid_data;
@@ -257,7 +256,7 @@ void __init stabs_alloc(void)
memset((void *)newstab, 0, HW_PAGE_SIZE);
paca[cpu].stab_addr = newstab;
- paca[cpu].stab_real = virt_to_abs(newstab);
+ paca[cpu].stab_real = __pa(newstab);
printk(KERN_INFO "Segment table for CPU %d at 0x%llx "
"virtual, 0x%llx absolute\n",
cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index e4f8f1fc81a..7c415ddde94 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -95,7 +95,8 @@ static void subpage_prot_clear(unsigned long addr, unsigned long len)
struct mm_struct *mm = current->mm;
struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
- int i, nw;
+ unsigned long i;
+ size_t nw;
unsigned long next, limit;
down_write(&mm->mmap_sem);
@@ -144,7 +145,8 @@ long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
struct mm_struct *mm = current->mm;
struct subpage_prot_table *spt = &mm->context.spt;
u32 **spm, *spp;
- int i, nw;
+ unsigned long i;
+ size_t nw;
unsigned long next, limit;
int err;
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 31f18207970..ae758b3ff72 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -42,8 +42,9 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, unsigned long pte, int huge)
{
+ unsigned long vpn;
struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
- unsigned long vsid, vaddr;
+ unsigned long vsid;
unsigned int psize;
int ssize;
real_pte_t rpte;
@@ -86,7 +87,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize;
}
- vaddr = hpt_va(addr, vsid, ssize);
+ vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep);
/*
@@ -96,7 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
* and decide to use local invalidates instead...
*/
if (!batch->active) {
- flush_hash_page(vaddr, rpte, psize, ssize, 0);
+ flush_hash_page(vpn, rpte, psize, ssize, 0);
put_cpu_var(ppc64_tlb_batch);
return;
}
@@ -122,7 +123,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
batch->ssize = ssize;
}
batch->pte[i] = rpte;
- batch->vaddr[i] = vaddr;
+ batch->vpn[i] = vpn;
batch->index = ++i;
if (i >= PPC64_TLB_BATCH_NR)
__flush_tlb_pending(batch);
@@ -146,7 +147,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
if (cpumask_equal(mm_cpumask(batch->mm), tmp))
local = 1;
if (i == 1)
- flush_hash_page(batch->vaddr[0], batch->pte[0],
+ flush_hash_page(batch->vpn[0], batch->pte[0],
batch->psize, batch->ssize, local);
else
flush_hash_range(i, local);
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index f09d48e3268..b4113bf8635 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -20,6 +20,8 @@
#include <asm/pgtable.h>
#include <asm/exception-64e.h>
#include <asm/ppc-opcode.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_booke_hv_asm.h>
#ifdef CONFIG_PPC_64K_PAGES
#define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
@@ -37,12 +39,18 @@
* *
**********************************************************************/
-.macro tlb_prolog_bolted addr
- mtspr SPRN_SPRG_TLB_SCRATCH,r13
+.macro tlb_prolog_bolted intnum addr
+ mtspr SPRN_SPRG_GEN_SCRATCH,r13
mfspr r13,SPRN_SPRG_PACA
std r10,PACA_EXTLB+EX_TLB_R10(r13)
mfcr r10
std r11,PACA_EXTLB+EX_TLB_R11(r13)
+#ifdef CONFIG_KVM_BOOKE_HV
+BEGIN_FTR_SECTION
+ mfspr r11, SPRN_SRR1
+END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
+#endif
+ DO_KVM \intnum, SPRN_SRR1
std r16,PACA_EXTLB+EX_TLB_R16(r13)
mfspr r16,\addr /* get faulting address */
std r14,PACA_EXTLB+EX_TLB_R14(r13)
@@ -61,12 +69,12 @@
ld r15,PACA_EXTLB+EX_TLB_R15(r13)
TLB_MISS_RESTORE_STATS_BOLTED
ld r16,PACA_EXTLB+EX_TLB_R16(r13)
- mfspr r13,SPRN_SPRG_TLB_SCRATCH
+ mfspr r13,SPRN_SPRG_GEN_SCRATCH
.endm
/* Data TLB miss */
START_EXCEPTION(data_tlb_miss_bolted)
- tlb_prolog_bolted SPRN_DEAR
+ tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
/* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
@@ -214,7 +222,7 @@ itlb_miss_fault_bolted:
/* Instruction TLB miss */
START_EXCEPTION(instruction_tlb_miss_bolted)
- tlb_prolog_bolted SPRN_SRR0
+ tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
srdi r15,r16,60 /* get region */