aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2006-06-15 10:45:18 +1000
committerPaul Mackerras <paulus@samba.org>2006-06-15 10:45:18 +1000
commitbf72aeba2ffef599d1d386425c9e46b82be657cd (patch)
treeead8e5111dbcfa22e156999d1bb8a96e50f06fef /arch/powerpc/mm/slb.c
parent31925323b1b51bb65db729e029472a8b1f635b7d (diff)
powerpc: Use 64k pages without needing cache-inhibited large pages
Some POWER5+ machines can do 64k hardware pages for normal memory but not for cache-inhibited pages. This patch lets us use 64k hardware pages for most user processes on such machines (assuming the kernel has been configured with CONFIG_PPC_64K_PAGES=y). User processes start out using 64k pages and get switched to 4k pages if they use any non-cacheable mappings. With this, we use 64k pages for the vmalloc region and 4k pages for the imalloc region. If anything creates a non-cacheable mapping in the vmalloc region, the vmalloc region will get switched to 4k pages. I don't know of any driver other than the DRM that would do this, though, and these machines don't have AGP. When a region gets switched from 64k pages to 4k pages, we do not have to clear out all the 64k HPTEs from the hash table immediately. We use the _PAGE_COMBO bit in the Linux PTE to indicate whether the page was hashed in as a 64k page or a set of 4k pages. If hash_page is trying to insert a 4k page for a Linux PTE and it sees that it has already been inserted as a 64k page, it first invalidates the 64k HPTE before inserting the 4k HPTE. The hash invalidation routines also use the _PAGE_COMBO bit, to determine whether to look for a 64k HPTE or a set of 4k HPTEs to remove. With those two changes, we can tolerate a mix of 4k and 64k HPTEs in the hash table, and they will all get removed when the address space is torn down. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 2cc61736fee..6a8bf6c6000 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags,
: "memory" );
}
-static void slb_flush_and_rebolt(void)
+void slb_flush_and_rebolt(void)
{
/* If you change this make sure you change SLB_NUM_BOLTED
* appropriately too. */
- unsigned long linear_llp, virtual_llp, lflags, vflags;
+ unsigned long linear_llp, vmalloc_llp, lflags, vflags;
unsigned long ksp_esid_data;
WARN_ON(!irqs_disabled());
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | virtual_llp;
+ vflags = SLB_VSID_KERNEL | vmalloc_llp;
ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
@@ -164,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr,
void slb_initialize(void)
{
- unsigned long linear_llp, virtual_llp;
+ unsigned long linear_llp, vmalloc_llp, io_llp;
static int slb_encoding_inited;
extern unsigned int *slb_miss_kernel_load_linear;
- extern unsigned int *slb_miss_kernel_load_virtual;
- extern unsigned int *slb_miss_user_load_normal;
+ extern unsigned int *slb_miss_kernel_load_io;
#ifdef CONFIG_HUGETLB_PAGE
extern unsigned int *slb_miss_user_load_huge;
unsigned long huge_llp;
@@ -178,18 +177,19 @@ void slb_initialize(void)
/* Prepare our SLB miss handler based on our page size */
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
- virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp;
+ io_llp = mmu_psize_defs[mmu_io_psize].sllp;
+ vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
+ get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
+
if (!slb_encoding_inited) {
slb_encoding_inited = 1;
patch_slb_encoding(slb_miss_kernel_load_linear,
SLB_VSID_KERNEL | linear_llp);
- patch_slb_encoding(slb_miss_kernel_load_virtual,
- SLB_VSID_KERNEL | virtual_llp);
- patch_slb_encoding(slb_miss_user_load_normal,
- SLB_VSID_USER | virtual_llp);
+ patch_slb_encoding(slb_miss_kernel_load_io,
+ SLB_VSID_KERNEL | io_llp);
DBG("SLB: linear LLP = %04x\n", linear_llp);
- DBG("SLB: virtual LLP = %04x\n", virtual_llp);
+ DBG("SLB: io LLP = %04x\n", io_llp);
#ifdef CONFIG_HUGETLB_PAGE
patch_slb_encoding(slb_miss_user_load_huge,
SLB_VSID_USER | huge_llp);
@@ -204,7 +204,7 @@ void slb_initialize(void)
unsigned long lflags, vflags;
lflags = SLB_VSID_KERNEL | linear_llp;
- vflags = SLB_VSID_KERNEL | virtual_llp;
+ vflags = SLB_VSID_KERNEL | vmalloc_llp;
/* Invalidate the entire SLB (even slot 0) & all the ERATS */
asm volatile("isync":::"memory");
@@ -212,7 +212,6 @@ void slb_initialize(void)
asm volatile("isync; slbia; isync":::"memory");
create_slbe(PAGE_OFFSET, lflags, 0);
- /* VMALLOC space has 4K pages always for now */
create_slbe(VMALLOC_START, vflags, 1);
/* We don't bolt the stack for the time being - we're in boot,