aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/mm/mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r--arch/powerpc/mm/mem.c51
1 files changed, 19 insertions, 32 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 0a64fffabee1..20394e52fe27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -246,35 +246,19 @@ static int __init mark_nonram_nosave(void)
}
#endif
-static bool zone_limits_final;
-
/*
- * The memory zones past TOP_ZONE are managed by generic mm code.
- * These should be set to zero since that's what every other
- * architecture does.
+ * Zones usage:
+ *
+ * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
+ * everything else. GFP_DMA32 page allocations automatically fall back to
+ * ZONE_DMA.
+ *
+ * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
+ * inform the generic DMA mapping code. 32-bit only devices (if not handled
+ * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
+ * otherwise served by ZONE_DMA.
*/
-static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
- [0 ... TOP_ZONE ] = ~0UL,
- [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
-};
-
-/*
- * Restrict the specified zone and all more restrictive zones
- * to be below the specified pfn. May not be called after
- * paging_init().
- */
-void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
-{
- int i;
-
- if (WARN_ON(zone_limits_final))
- return;
-
- for (i = zone; i >= 0; i--) {
- if (max_zone_pfns[i] > pfn_limit)
- max_zone_pfns[i] = pfn_limit;
- }
-}
+static unsigned long max_zone_pfns[MAX_NR_ZONES];
/*
* Find the least restrictive zone that is entirely below the
@@ -324,11 +308,14 @@ void __init paging_init(void)
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(long int)((top_of_ram - total_ram) >> 20));
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT);
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
#ifdef CONFIG_HIGHMEM
- limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
+ max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
#endif
- limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
- zone_limits_final = true;
+
free_area_init_nodes(max_zone_pfns);
mark_nonram_nosave();
@@ -503,7 +490,7 @@ EXPORT_SYMBOL(flush_icache_user_range);
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
-#ifdef CONFIG_PPC_STD_MMU
+#ifdef CONFIG_PPC_BOOK3S
/*
* We don't need to worry about _PAGE_PRESENT here because we are
* called with either mm->page_table_lock held or ptl lock held
@@ -541,7 +528,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
}
hash_preload(vma->vm_mm, address, is_exec, trap);
-#endif /* CONFIG_PPC_STD_MMU */
+#endif /* CONFIG_PPC_BOOK3S */
#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
&& defined(CONFIG_HUGETLB_PAGE)
if (is_vm_hugetlb_page(vma))