aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorStefan Agner <stefan@agner.ch>2015-08-13 00:01:52 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2015-08-18 14:00:29 +0100
commita5f4c561b3b19a9bc43a81da6382b0098ebbc1fb (patch)
tree80905f24141bfc42c6506f65168d0188a2060b69 /arch/arm/mm
parentefaa6e266ba70439da00e7f1c8a218e243ae140a (diff)
ARM: 8415/1: early fixmap support for earlycon
Add early fixmap support, initially to support permanent, fixed mapping support for early console. A temporary, early pte is created which is migrated to a permanent mapping in paging_init. This is also needed since the attributes may change as the memory types are initialized. The 3MiB range of fixmap spans two pte tables, but currently only one pte is created for early fixmap support. Re-add FIX_KMAP_BEGIN to the index calculation in highmem.c since the index for kmap does not start at zero anymore. This reverts 4221e2e6b316 ("ARM: 8031/1: fixmap: remove FIX_KMAP_BEGIN and FIX_KMAP_END") to some extent. Cc: Mark Salter <msalter@redhat.com> Cc: Kees Cook <keescook@chromium.org> Cc: Laura Abbott <lauraa@codeaurora.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Rob Herring <robh@kernel.org> Signed-off-by: Stefan Agner <stefan@agner.ch> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/highmem.c6
-rw-r--r--arch/arm/mm/mmu.c88
2 files changed, 84 insertions, 10 deletions
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index ee8dfa793989..9df5f09585ca 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
return page_address(page);
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
+ idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 6ca7d9aa896f..fb9e817d08bb 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
}
EXPORT_SYMBOL(get_mem_type);
+static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
+
+static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+ __aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
+
+static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return &bm_pte[pte_index(addr)];
+}
+
+static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
+{
+ return pte_offset_kernel(dir, addr);
+}
+
+static inline pmd_t * __init fixmap_pmd(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+ pud_t *pud = pud_offset(pgd, addr);
+ pmd_t *pmd = pmd_offset(pud, addr);
+
+ return pmd;
+}
+
+void __init early_fixmap_init(void)
+{
+ pmd_t *pmd;
+
+ /*
+ * The early fixmap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
+ != FIXADDR_TOP >> PMD_SHIFT);
+
+ pmd = fixmap_pmd(FIXADDR_TOP);
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+
+ pte_offset_fixmap = pte_offset_early_fixmap;
+}
+
/*
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
* As a result, this can only be called with preemption disabled, as under
@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{
unsigned long vaddr = __fix_to_virt(idx);
- pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+ pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
- md->virtual >= PAGE_OFFSET &&
+ md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
@@ -1213,10 +1254,10 @@ void __init arm_mm_memblock_reserve(void)
/*
* Set up the device mappings. Since we clear out the page tables for all
- * mappings above VMALLOC_START, we will remove any debug device mappings.
- * This means you have to be careful how you debug this function, or any
- * called function. This means you can't use any function or debugging
- * method which may touch any device, otherwise the kernel _will_ crash.
+ * mappings above VMALLOC_START, except early fixmap, we might remove debug
+ * device mappings. This means earlycon can be used to debug this function
+ * Any other function or debugging method which may touch any device _will_
+ * crash the kernel.
*/
static void __init devicemaps_init(const struct machine_desc *mdesc)
{
@@ -1231,7 +1272,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
early_trap_init(vectors);
- for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
+ /*
+ * Clear page table except top pmd used by early fixmaps
+ */
+ for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -1483,6 +1527,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#endif
+static void __init early_fixmap_shutdown(void)
+{
+ int i;
+ unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
+
+ pte_offset_fixmap = pte_offset_late_fixmap;
+ pmd_clear(fixmap_pmd(va));
+ local_flush_tlb_kernel_page(va);
+
+ for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
+ pte_t *pte;
+ struct map_desc map;
+
+ map.virtual = fix_to_virt(i);
+ pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
+
+ /* Only i/o device mappings are supported ATM */
+ if (pte_none(*pte) ||
+ (pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
+ continue;
+
+ map.pfn = pte_pfn(*pte);
+ map.type = MT_DEVICE;
+ map.length = PAGE_SIZE;
+
+ create_mapping(&map);
+ }
+}
+
/*
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
@@ -1495,6 +1568,7 @@ void __init paging_init(const struct machine_desc *mdesc)
prepare_page_table();
map_lowmem();
dma_contiguous_remap();
+ early_fixmap_shutdown();
devicemaps_init(mdesc);
kmap_init();
tcm_init();