aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Brown <broonie@linaro.org>2014-07-23 13:16:53 +0100
committerMark Brown <broonie@linaro.org>2014-07-23 13:16:53 +0100
commit6a87ef22a8b54095478561f9f2664c40c5c76d98 (patch)
tree66292f45032cad8d8978a8d53b51b398fc91a000
parent6c45113800d819f87b0af7f80d627d0208e10a6c (diff)
parenta4121f52920d3588e475336bf076915899e51509 (diff)
downloadlinux-linaro-stable-6a87ef22a8b54095478561f9f2664c40c5c76d98.tar.gz
Merge branch 'linux-linaro-lsk-v3.14' into linux-linaro-lsk-v3.14-androidlsk-v3.14-android-preview-14.07
-rw-r--r--Documentation/arm64/booting.txt10
-rw-r--r--arch/arm64/include/asm/page.h9
-rw-r--r--arch/arm64/kernel/head.S78
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S7
-rw-r--r--arch/arm64/mm/cache.S11
-rw-r--r--arch/arm64/mm/init.c12
-rw-r--r--arch/arm64/mm/proc.S6
7 files changed, 82 insertions, 51 deletions
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index a9691cc48fe3..beb754e87c65 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -111,8 +111,14 @@ Before jumping into the kernel, the following conditions must be met:
- Caches, MMUs
The MMU must be off.
Instruction cache may be on or off.
- Data cache must be off and invalidated.
- External caches (if present) must be configured and disabled.
+ The address range corresponding to the loaded kernel image must be
+ cleaned to the PoC. In the presence of a system cache or other
+ coherent masters with caches enabled, this will typically require
+ cache maintenance by VA rather than set/way operations.
+ System caches which respect the architected cache maintenance by VA
+ operations must be configured and may be enabled.
+ System caches which do not respect architected cache maintenance by VA
+ operations (not recommended) must be configured and disabled.
- Architected timers
CNTFRQ must be programmed with the timer frequency and CNTVOFF must
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index 46bf66628b6a..a6331e6a92b5 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -31,6 +31,15 @@
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
+/*
+ * The idmap and swapper page tables need some space reserved in the kernel
+ * image. The idmap only requires a pgd and a next level table to (section) map
+ * the kernel, while the swapper also maps the FDT and requires an additional
+ * table to map an early UART. See __create_page_tables for more information.
+ */
+#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
+#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_ARM64_64K_PAGES
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 5627d9e69b3c..05a104c59510 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -26,6 +26,7 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
@@ -34,29 +35,17 @@
#include <asm/page.h>
#include <asm/virt.h>
-/*
- * swapper_pg_dir is the virtual address of the initial page table. We place
- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
- * 2 pages and is placed below swapper_pg_dir.
- */
#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
#error KERNEL_RAM_VADDR must start at 0xXXX80000
#endif
-#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
-
- .globl swapper_pg_dir
- .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
-
- .globl idmap_pg_dir
- .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
-
- .macro pgtbl, ttb0, ttb1, phys
- add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
- sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
+ .macro pgtbl, ttb0, ttb1, virt_to_phys
+ ldr \ttb1, =swapper_pg_dir
+ ldr \ttb0, =idmap_pg_dir
+ add \ttb1, \ttb1, \virt_to_phys
+ add \ttb0, \ttb0, \virt_to_phys
.endm
#ifdef CONFIG_ARM64_64K_PAGES
@@ -229,7 +218,11 @@ ENTRY(set_cpu_boot_mode_flag)
cmp w20, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
-1: str w20, [x1] // This CPU has booted in EL1
+1: dc cvac, x1 // Clean potentially dirty cache line
+ dsb sy
+ str w20, [x1] // This CPU has booted in EL1
+ dc civac, x1 // Clean&invalidate potentially stale cache line
+ dsb sy
ret
ENDPROC(set_cpu_boot_mode_flag)
@@ -240,8 +233,9 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it.
*/
- .pushsection .data
+ .pushsection .data..cacheline_aligned
ENTRY(__boot_cpu_mode)
+ .align L1_CACHE_SHIFT
.long BOOT_CPU_MODE_EL2
.long 0
.popsection
@@ -298,7 +292,7 @@ ENTRY(secondary_startup)
mov x23, x0 // x23=current cpu_table
cbz x23, __error_p // invalid processor (x23=0)?
- pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
+ pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
ldr x12, [x23, #CPU_INFO_SETUP]
add x12, x12, x28 // __virt_to_phys
blr x12 // initialise processor
@@ -389,26 +383,18 @@ ENDPROC(__calc_phys_offset)
* Preserves: tbl, flags
* Corrupts: phys, start, end, pstate
*/
- .macro create_block_map, tbl, flags, phys, start, end, idmap=0
+ .macro create_block_map, tbl, flags, phys, start, end
lsr \phys, \phys, #BLOCK_SHIFT
- .if \idmap
- and \start, \phys, #PTRS_PER_PTE - 1 // table index
- .else
lsr \start, \start, #BLOCK_SHIFT
and \start, \start, #PTRS_PER_PTE - 1 // table index
- .endif
orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
- .ifnc \start,\end
lsr \end, \end, #BLOCK_SHIFT
and \end, \end, #PTRS_PER_PTE - 1 // table end index
- .endif
9999: str \phys, [\tbl, \start, lsl #3] // store the entry
- .ifnc \start,\end
add \start, \start, #1 // next entry
add \phys, \phys, #BLOCK_SIZE // next block
cmp \start, \end
b.ls 9999b
- .endif
.endm
/*
@@ -420,7 +406,16 @@ ENDPROC(__calc_phys_offset)
* - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
*/
__create_page_tables:
- pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
+ pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
+ mov x27, lr
+
+ /*
+ * Invalidate the idmap and swapper page tables to avoid potential
+ * dirty cache lines being evicted.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
/*
* Clear the idmap and swapper page tables.
@@ -440,9 +435,13 @@ __create_page_tables:
* Create the identity mapping.
*/
add x0, x25, #PAGE_SIZE // section table address
- adr x3, __turn_mmu_on // virtual/physical address
+ ldr x3, =KERNEL_START
+ add x3, x3, x28 // __pa(KERNEL_START)
create_pgd_entry x25, x0, x3, x5, x6
- create_block_map x0, x7, x3, x5, x5, idmap=1
+ ldr x6, =KERNEL_END
+ mov x5, x3 // __pa(KERNEL_START)
+ add x6, x6, x28 // __pa(KERNEL_END)
+ create_block_map x0, x7, x3, x5, x6
/*
* Map the kernel image (starting with PHYS_OFFSET).
@@ -450,7 +449,7 @@ __create_page_tables:
add x0, x26, #PAGE_SIZE // section table address
mov x5, #PAGE_OFFSET
create_pgd_entry x26, x0, x5, x3, x6
- ldr x6, =KERNEL_END - 1
+ ldr x6, =KERNEL_END
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -479,6 +478,17 @@ __create_page_tables:
add x0, x26, #2 * PAGE_SIZE // section table address
create_pgd_entry x26, x0, x5, x6, x7
#endif
+
+ /*
+ * Since the page tables have been populated with non-cacheable
+ * accesses (MMU disabled), invalidate the idmap and swapper page
+ * tables again to remove any speculatively loaded cache lines.
+ */
+ mov x0, x25
+ add x1, x26, #SWAPPER_DIR_SIZE
+ bl __inval_cache_range
+
+ mov lr, x27
ret
ENDPROC(__create_page_tables)
.ltorg
@@ -488,7 +498,7 @@ ENDPROC(__create_page_tables)
__switch_data:
.quad __mmap_switched
.quad __bss_start // x6
- .quad _end // x7
+ .quad __bss_stop // x7
.quad processor_id // x4
.quad __fdt_pointer // x5
.quad memstart_addr // x6
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 4ba7a55b49c7..51258bc4cb08 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -104,6 +104,13 @@ SECTIONS
_edata = .;
BSS_SECTION(0, 0, 0)
+
+ . = ALIGN(PAGE_SIZE);
+ idmap_pg_dir = .;
+ . += IDMAP_DIR_SIZE;
+ swapper_pg_dir = .;
+ . += SWAPPER_DIR_SIZE;
+
_end = .;
STABS_DEBUG
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 9307cd5ffaf6..fda756875fa6 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -30,7 +30,7 @@
*
* Corrupted registers: x0-x7, x9-x11
*/
-ENTRY(__flush_dcache_all)
+__flush_dcache_all:
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
@@ -168,6 +168,14 @@ ENTRY(__flush_dcache_area)
ENDPROC(__flush_dcache_area)
/*
+ * __inval_cache_range(start, end)
+ * - start - start address of region
+ * - end - end address of region
+ */
+ENTRY(__inval_cache_range)
+ /* FALLTHROUGH */
+
+/*
* __dma_inv_range(start, end)
* - start - virtual start address of region
* - end - virtual end address of region
@@ -190,6 +198,7 @@ __dma_inv_range:
b.lo 2b
dsb sy
ret
+ENDPROC(__inval_cache_range)
ENDPROC(__dma_inv_range)
/*
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 88627c450a6c..c872988940db 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -128,20 +128,16 @@ void __init arm64_memblock_init(void)
{
u64 *reserve_map, base, size;
- /* Register the kernel text, kernel data and initrd with memblock */
+ /*
+ * Register the kernel text, kernel data, initrd, and initial
+ * pagetables with memblock.
+ */
memblock_reserve(__pa(_text), _end - _text);
#ifdef CONFIG_BLK_DEV_INITRD
if (initrd_start)
memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
#endif
- /*
- * Reserve the page tables. These are already in use,
- * and can only be in node 0.
- */
- memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
- memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
-
/* Reserve the dtb region */
memblock_reserve(virt_to_phys(initial_boot_params),
be32_to_cpu(initial_boot_params->totalsize));
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 1333e6f9a8e5..e0ef63cd05dc 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -173,12 +173,6 @@ ENDPROC(cpu_do_switch_mm)
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
- /*
- * Preserve the link register across the function call.
- */
- mov x28, lr
- bl __flush_dcache_all
- mov lr, x28
ic iallu // I+BTB cache invalidate
tlbi vmalle1is // invalidate I + D TLBs
dsb sy