aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/tlb.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-07-23 23:15:28 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-20 10:24:56 +1000
commitc7cc58a1ad8dfe3c199d3b6ce50412b86dd3edaf (patch)
tree1d1ded72de81743ddd1306677d64757136972402 /arch/powerpc/include/asm/tlb.h
parentcf54dc7cd4f9aab55cd3e1794b0b74c3c88cd1a0 (diff)
powerpc/mm: Rework & cleanup page table freeing code path
That patch used to just add a hook to page table flushing but pulling that string brought out a whole bunch of issues, so it now does that and more: - We now make the RCU batching of page freeing SMP only, as I believe it was intended initially. We make a few more things compile to nothing on !CONFIG_SMP - Some macros are turned into functions, though that forced me to out of line a few stuffs due to unsolvable include depenencies, however it's probably better that way anyway, it's not -that- critical code path. - 32-bit didn't call pte_free_finish() on tlb_flush() which means that it wouldn't push out the batch to RCU for delayed freeing when a bunch of page tables have been freed, they would just stay in there until the batch gets full. 64-bit BookE will use that hook to maintain the virtually linear page tables or the indirect entries in the TLB when using the HW loader. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/tlb.h')
-rw-r--r--arch/powerpc/include/asm/tlb.h38
1 files changed, 3 insertions, 35 deletions
diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h
index e20ff7541f3..e2b428b0f7b 100644
--- a/arch/powerpc/include/asm/tlb.h
+++ b/arch/powerpc/include/asm/tlb.h
@@ -25,57 +25,25 @@
#include <linux/pagemap.h>
-struct mmu_gather;
-
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
-#if !defined(CONFIG_PPC_STD_MMU)
-
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
-
-#elif defined(__powerpc64__)
-
-extern void pte_free_finish(void);
-
-static inline void tlb_flush(struct mmu_gather *tlb)
-{
- struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch);
-
- /* If there's a TLB batch pending, then we must flush it because the
- * pages are going to be freed and we really don't want to have a CPU
- * access a freed page because it has a stale TLB
- */
- if (tlbbatch->index)
- __flush_tlb_pending(tlbbatch);
-
- pte_free_finish();
-}
-
-#else
-
extern void tlb_flush(struct mmu_gather *tlb);
-#endif
-
/* Get the generic bits... */
#include <asm-generic/tlb.h>
-#if !defined(CONFIG_PPC_STD_MMU) || defined(__powerpc64__)
-
-#define __tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
-
-#else
extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
unsigned long address);
static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
- unsigned long address)
+ unsigned long address)
{
+#ifdef CONFIG_PPC_STD_MMU_32
if (pte_val(*ptep) & _PAGE_HASHPTE)
flush_hash_entry(tlb->mm, ptep, address);
+#endif
}
-#endif
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_TLB_H */