aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/armksyms.c3
-rw-r--r--arch/arm/kernel/asm-offsets.c23
-rw-r--r--arch/arm/kernel/devtree.c6
-rw-r--r--arch/arm/kernel/dma.c14
-rw-r--r--arch/arm/kernel/entry-armv.S14
-rw-r--r--arch/arm/kernel/entry-common.S38
-rw-r--r--arch/arm/kernel/entry-ftrace.S75
-rw-r--r--arch/arm/kernel/ftrace.c51
-rw-r--r--arch/arm/kernel/head-nommu.S301
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/hw_breakpoint.c78
-rw-r--r--arch/arm/kernel/irq.c10
-rw-r--r--arch/arm/kernel/machine_kexec.c36
-rw-r--r--arch/arm/kernel/paravirt.c4
-rw-r--r--arch/arm/kernel/perf_event_v6.c18
-rw-r--r--arch/arm/kernel/perf_event_v7.c18
-rw-r--r--arch/arm/kernel/perf_event_xscale.c24
-rw-r--r--arch/arm/kernel/process.c12
-rw-r--r--arch/arm/kernel/ptrace.c10
-rw-r--r--arch/arm/kernel/setup.c9
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/kernel/smp.c18
-rw-r--r--arch/arm/kernel/swp_emulate.c30
-rw-r--r--arch/arm/kernel/sys_arm.c2
-rw-r--r--arch/arm/kernel/sys_oabi-compat.c4
-rw-r--r--arch/arm/kernel/time.c15
-rw-r--r--arch/arm/kernel/topology.c6
-rw-r--r--arch/arm/kernel/traps.c63
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S171
-rw-r--r--arch/arm/kernel/vmlinux.lds.S179
-rw-r--r--arch/arm/kernel/vmlinux.lds.h137
31 files changed, 635 insertions, 750 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 783fbb4de5f9..8fa2dc21d332 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -167,9 +167,6 @@ EXPORT_SYMBOL(_find_next_bit_be);
#endif
#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_OLD_MCOUNT
-EXPORT_SYMBOL(mcount);
-#endif
EXPORT_SYMBOL(__gnu_mcount_nc);
#endif
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index f369ece99958..3968d6c22455 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -38,30 +38,19 @@
#error Sorry, your compiler targets APCS-26 but this kernel requires APCS-32
#endif
/*
- * GCC 3.0, 3.1: general bad code generation.
- * GCC 3.2.0: incorrect function argument offset calculation.
- * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
- * (http://gcc.gnu.org/PR8896) and incorrect structure
- * initialisation in fs/jffs2/erase.c
* GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
* miscompiles find_get_entry(), and can result in EXT3 and EXT4
* filesystem corruption (possibly other FS too).
*/
-#ifdef __GNUC__
-#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
-#error Your compiler is too buggy; it is known to miscompile kernels.
-#error Known good compilers: 3.3, 4.x
-#endif
-#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
+#if defined(GCC_VERSION) && GCC_VERSION >= 40800 && GCC_VERSION < 40803
#error Your compiler is too buggy; it is known to miscompile kernels
#error and result in filesystem corruption and oopses.
#endif
-#endif
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
#endif
BLANK();
@@ -194,9 +183,11 @@ int main(void)
DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used));
DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn));
- DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
- DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
- DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
+ DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
+ DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
+ DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
+ DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
+ DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
return 0;
}
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index ecaa68dd1af5..e3057c1b55b9 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -12,7 +12,6 @@
#include <linux/export.h>
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/of_fdt.h>
@@ -87,14 +86,11 @@ void __init arm_dt_init_cpu_maps(void)
if (!cpus)
return;
- for_each_child_of_node(cpus, cpu) {
+ for_each_of_cpu_node(cpu) {
const __be32 *cell;
int prop_bytes;
u32 hwid;
- if (of_node_cmp(cpu->type, "cpu"))
- continue;
-
pr_debug(" * %pOF...\n", cpu);
/*
* A device tree containing CPU nodes with missing "reg"
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index e651c4d0a0d9..6739d37c2bc5 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -276,21 +276,9 @@ static int proc_dma_show(struct seq_file *m, void *v)
return 0;
}
-static int proc_dma_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_dma_show, NULL);
-}
-
-static const struct file_operations proc_dma_operations = {
- .open = proc_dma_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int __init proc_dma_init(void)
{
- proc_create("dma", 0, NULL, &proc_dma_operations);
+ proc_create_single("dma", 0, NULL, proc_dma_show);
return 0;
}
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 1752033b0070..e85a3af9ddeb 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -22,7 +22,7 @@
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
-#ifndef CONFIG_MULTI_IRQ_HANDLER
+#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
#include <mach/entry-macro.S>
#endif
#include <asm/thread_notify.h>
@@ -39,7 +39,7 @@
* Interrupt handling.
*/
.macro irq_handler
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
badr lr, 9997f
@@ -791,7 +791,7 @@ ENTRY(__switch_to)
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
ldr r7, [r2, #TI_TASK]
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
@@ -807,7 +807,7 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
-#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
str r7, [r8]
#endif
THUMB( mov ip, r4 )
@@ -1226,9 +1226,3 @@ vector_addrexcptn:
.globl cr_alignment
cr_alignment:
.space 4
-
-#ifdef CONFIG_MULTI_IRQ_HANDLER
- .globl handle_arch_irq
-handle_arch_irq:
- .space 4
-#endif
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 20df608bf343..0465d65d23de 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -39,14 +39,16 @@ saved_pc .req lr
.section .entry.text,"ax",%progbits
.align 5
-#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
+#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING) || \
+ IS_ENABLED(CONFIG_DEBUG_RSEQ))
/*
* This is the fast syscall return path. We do as little as possible here,
* such as avoiding writing r0 to the stack. We only use this path if we
- * have tracing and context tracking disabled - the overheads from those
- * features make this path too inefficient.
+ * have tracing, context tracking and rseq debug disabled - the overheads
+ * from those features make this path too inefficient.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
@@ -71,14 +73,21 @@ fast_work_pending:
/* fall through to work_pending */
#else
/*
- * The "replacement" ret_fast_syscall for when tracing or context tracking
- * is enabled. As we will need to call out to some C functions, we save
- * r0 first to avoid needing to save registers around each C function call.
+ * The "replacement" ret_fast_syscall for when tracing, context tracking,
+ * or rseq debug is enabled. As we will need to call out to some C functions,
+ * we save r0 first to avoid needing to save registers around each C function
+ * call.
*/
ret_fast_syscall:
+__ret_fast_syscall:
UNWIND(.fnstart )
UNWIND(.cantunwind )
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ldr r2, [tsk, #TI_ADDR_LIMIT]
cmp r2, #TASK_SIZE
@@ -113,6 +122,12 @@ ENDPROC(ret_fast_syscall)
*/
ENTRY(ret_to_user)
ret_slow_syscall:
+#if IS_ENABLED(CONFIG_DEBUG_RSEQ)
+ /* do_rseq_syscall needs interrupts enabled. */
+ enable_irq_notrace @ enable interrupts
+ mov r0, sp @ 'regs'
+ bl do_rseq_syscall
+#endif
disable_irq_notrace @ disable interrupts
ENTRY(ret_to_user_from_irq)
ldr r2, [tsk, #TI_ADDR_LIMIT]
@@ -242,7 +257,7 @@ local_restart:
tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
- invoke_syscall tbl, scno, r10, ret_fast_syscall
+ invoke_syscall tbl, scno, r10, __ret_fast_syscall
add r1, sp, #S_OFF
2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
@@ -281,16 +296,15 @@ __sys_trace:
cmp scno, #-1 @ skip the syscall?
bne 2b
add sp, sp, #S_OFF @ restore stack
- b ret_slow_syscall
-__sys_trace_return:
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
+__sys_trace_return_nosave:
+ enable_irq_notrace
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
-__sys_trace_return_nosave:
- enable_irq_notrace
+__sys_trace_return:
+ str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
mov r0, sp
bl syscall_trace_exit
b ret_slow_syscall
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index efcd9f25a14b..0be69e551a64 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -15,23 +15,8 @@
* start of every function. In mcount, apart from the function's address (in
* lr), we need to get hold of the function's caller's address.
*
- * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
- *
- * bl mcount
- *
- * These versions have the limitation that in order for the mcount routine to
- * be able to determine the function's caller's address, an APCS-style frame
- * pointer (which is set up with something like the code below) is required.
- *
- * mov ip, sp
- * push {fp, ip, lr, pc}
- * sub fp, ip, #4
- *
- * With EABI, these frame pointers are not available unless -mapcs-frame is
- * specified, and if building as Thumb-2, not even then.
- *
- * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
- * with call sites like:
+ * Newer GCCs (4.4+) solve this problem by using a version of mcount with call
+ * sites like:
*
* push {lr}
* bl __gnu_mcount_nc
@@ -46,17 +31,10 @@
* allows it to be clobbered in subroutines and doesn't use it to hold
* parameters.)
*
- * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
- * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
- * arch/arm/kernel/ftrace.c).
+ * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
+ * instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
*/
-#ifndef CONFIG_OLD_MCOUNT
-#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
-#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
-#endif
-#endif
-
.macro mcount_adjust_addr rd, rn
bic \rd, \rn, #1 @ clear the Thumb bit if present
sub \rd, \rd, #MCOUNT_INSN_SIZE
@@ -209,51 +187,6 @@ ftrace_graph_call\suffix:
mcount_exit
.endm
-#ifdef CONFIG_OLD_MCOUNT
-/*
- * mcount
- */
-
-.macro mcount_enter
- stmdb sp!, {r0-r3, lr}
-.endm
-
-.macro mcount_get_lr reg
- ldr \reg, [fp, #-4]
-.endm
-
-.macro mcount_exit
- ldr lr, [fp, #-4]
- ldmia sp!, {r0-r3, pc}
-.endm
-
-ENTRY(mcount)
-#ifdef CONFIG_DYNAMIC_FTRACE
- stmdb sp!, {lr}
- ldr lr, [fp, #-4]
- ldmia sp!, {pc}
-#else
- __mcount _old
-#endif
-ENDPROC(mcount)
-
-#ifdef CONFIG_DYNAMIC_FTRACE
-ENTRY(ftrace_caller_old)
- __ftrace_caller _old
-ENDPROC(ftrace_caller_old)
-#endif
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller_old)
- __ftrace_graph_caller
-ENDPROC(ftrace_graph_caller_old)
-#endif
-
-.purgem mcount_enter
-.purgem mcount_get_lr
-.purgem mcount_exit
-#endif
-
/*
* __gnu_mcount_nc
*/
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 5617932a83df..0142fcfcc3d3 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -47,30 +47,6 @@ void arch_ftrace_update_code(int command)
stop_machine(__ftrace_modify_code, &command, NULL);
}
-#ifdef CONFIG_OLD_MCOUNT
-#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
-#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
-
-#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
-
-static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
-{
- return rec->arch.old_mcount ? OLD_NOP : NOP;
-}
-
-static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
-{
- if (!rec->arch.old_mcount)
- return addr;
-
- if (addr == MCOUNT_ADDR)
- addr = OLD_MCOUNT_ADDR;
- else if (addr == FTRACE_ADDR)
- addr = OLD_FTRACE_ADDR;
-
- return addr;
-}
-#else
static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
{
return NOP;
@@ -80,7 +56,6 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
{
return addr;
}
-#endif
int ftrace_arch_code_modify_prepare(void)
{
@@ -150,15 +125,6 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
}
#endif
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret) {
- pc = (unsigned long)&ftrace_call_old;
- new = ftrace_call_replace(pc, (unsigned long)func);
-
- ret = ftrace_modify_code(pc, 0, new, false);
- }
-#endif
-
return ret;
}
@@ -203,16 +169,6 @@ int ftrace_make_nop(struct module *mod,
new = ftrace_nop_replace(rec);
ret = ftrace_modify_code(ip, old, new, true);
-#ifdef CONFIG_OLD_MCOUNT
- if (ret == -EINVAL && addr == MCOUNT_ADDR) {
- rec->arch.old_mcount = true;
-
- old = ftrace_call_replace(ip, adjust_address(rec, addr));
- new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new, true);
- }
-#endif
-
return ret;
}
@@ -290,13 +246,6 @@ static int ftrace_modify_graph_caller(bool enable)
#endif
-#ifdef CONFIG_OLD_MCOUNT
- if (!ret)
- ret = __ftrace_modify_caller(&ftrace_graph_call_old,
- ftrace_graph_caller_old,
- enable);
-#endif
-
return ret;
}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2e38f85b757a..ec29de250076 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -53,7 +53,11 @@ ENTRY(stext)
THUMB(1: )
#endif
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install
+#endif
+ @ ensure svc mode and all interrupts masked
+ safe_svcmode_maskall r9
@ and irqs disabled
#if defined(CONFIG_CPU_CP15)
mrc p15, 0, r9, c0, c0 @ get processor id
@@ -68,14 +72,6 @@ ENTRY(stext)
beq __error_p @ yes, error 'p'
#ifdef CONFIG_ARM_MPU
- /* Calculate the size of a region covering just the kernel */
- ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
- ldr r6, =(_end) @ Cover whole kernel
- sub r6, r6, r5 @ Minimum size of region to map
- clz r6, r6 @ Region size must be 2^N...
- rsb r6, r6, #31 @ ...so round up region size
- lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
- orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
bl __setup_mpu
#endif
@@ -83,8 +79,8 @@ ENTRY(stext)
ldr r12, [r10, #PROCINFO_INITFUNC]
add r12, r12, r10
ret r12
-1: bl __after_proc_init
- b __mmap_switched
+1: ldr lr, =__mmap_switched
+ b __after_proc_init
ENDPROC(stext)
#ifdef CONFIG_SMP
@@ -97,7 +93,11 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
- setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+#ifdef CONFIG_ARM_VIRT_EXT
+ bl __hyp_stub_install_secondary
+#endif
+ safe_svcmode_maskall r9
+
#ifndef CONFIG_CPU_CP15
ldr r9, =CONFIG_PROCESSOR_ID
#else
@@ -110,8 +110,6 @@ ENTRY(secondary_startup)
ldr r7, __secondary_data
#ifdef CONFIG_ARM_MPU
- /* Use MPU region info supplied by __cpu_up */
- ldr r6, [r7] @ get secondary_data.mpu_rgn_info
bl __secondary_setup_mpu @ Initialize the MPU
#endif
@@ -133,12 +131,45 @@ __secondary_data:
/*
* Set the Control Register and Read the process ID.
*/
+ .text
__after_proc_init:
+#ifdef CONFIG_ARM_MPU
+M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
+M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
+M_CLASS(ldr r3, [r12, 0x50])
+AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
+ and r3, r3, #(MMFR0_PMSA) @ PMSA field
+ teq r3, #(MMFR0_PMSAv7) @ PMSA v7
+ beq 1f
+ teq r3, #(MMFR0_PMSAv8) @ PMSA v8
+ /*
+ * Memory region attributes for PMSAv8:
+ *
+ * n = AttrIndx[2:0]
+ * n MAIR
+ * DEVICE_nGnRnE 000 00000000
+ * NORMAL 001 11111111
+ */
+ ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
+ PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
+AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
+M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
+ moveq r3, #0
+AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
+M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
+
+1:
+#endif
#ifdef CONFIG_CPU_CP15
/*
* CP15 system control register value returned in r0 from
* the CPU init function.
*/
+
+#ifdef CONFIG_ARM_MPU
+ biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
+ orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
+#endif
#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
orr r0, r0, #CR_A
#else
@@ -154,7 +185,15 @@ __after_proc_init:
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
+ instr_sync
#elif defined (CONFIG_CPU_V7M)
+#ifdef CONFIG_ARM_MPU
+ ldreq r3, [r12, MPU_CTRL]
+ biceq r3, #MPU_CTRL_PRIVDEFENA
+ orreq r3, #MPU_CTRL_ENABLE
+ streq r3, [r12, MPU_CTRL]
+ isb
+#endif
/* For V7M systems we want to modify the CCR similarly to the SCTLR */
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_DC
@@ -165,9 +204,7 @@ __after_proc_init:
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #V7M_SCB_CCR_IC
#endif
- movw r3, #:lower16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
- movt r3, #:upper16:(BASEADDR_V7M_SCB + V7M_SCB_CCR)
- str r0, [r3]
+ str r0, [r12, V7M_SCB_CCR]
#endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
ret lr
ENDPROC(__after_proc_init)
@@ -184,7 +221,7 @@ ENDPROC(__after_proc_init)
.endm
/* Setup a single MPU region, either D or I side (D-side for unified) */
-.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused
+.macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
@@ -192,14 +229,14 @@ ENDPROC(__after_proc_init)
#else
.macro set_region_nr tmp, rgnr, base
mov \tmp, \rgnr
- str \tmp, [\base, #MPU_RNR]
+ str \tmp, [\base, #PMSAv7_RNR]
.endm
.macro setup_region bar, acr, sr, unused, base
lsl \acr, \acr, #16
orr \acr, \acr, \sr
- str \bar, [\base, #MPU_RBAR]
- str \acr, [\base, #MPU_RASR]
+ str \bar, [\base, #PMSAv7_RBAR]
+ str \acr, [\base, #PMSAv7_RASR]
.endm
#endif
@@ -210,8 +247,9 @@ ENDPROC(__after_proc_init)
* Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
* Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
*
- * r6: Value to be written to DRSR (and IRSR if required) for MPU_RAM_REGION
+ * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
*/
+ __HEAD
ENTRY(__setup_mpu)
@@ -223,7 +261,22 @@ AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
M_CLASS(ldr r0, [r12, 0x50])
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
- bxne lr
+ beq __setup_pmsa_v7
+ teq r0, #(MMFR0_PMSAv8) @ PMSA v8
+ beq __setup_pmsa_v8
+
+ ret lr
+ENDPROC(__setup_mpu)
+
+ENTRY(__setup_pmsa_v7)
+ /* Calculate the size of a region covering just the kernel */
+ ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
+ ldr r6, =(_end) @ Cover whole kernel
+ sub r6, r6, r5 @ Minimum size of region to map
+ clz r6, r6 @ Region size must be 2^N...
+ rsb r6, r6, #31 @ ...so round up region size
+ lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
+ orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
@@ -234,77 +287,189 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE])
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
/* Setup second region first to free up r6 */
- set_region_nr r0, #MPU_RAM_REGION, r12
+ set_region_nr r0, #PMSAv7_RAM_REGION, r12
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
- ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
+ ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
- setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
beq 1f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
1: isb
/* First/background region */
- set_region_nr r0, #MPU_BG_REGION, r12
+ set_region_nr r0, #PMSAv7_BG_REGION, r12
isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0, #0 @ BG region starts at 0x0
- ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA)
- mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled
+ ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
+ mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
- setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
beq 2f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
2: isb
#ifdef CONFIG_XIP_KERNEL
- set_region_nr r0, #MPU_ROM_REGION, r12
+ set_region_nr r0, #PMSAv7_ROM_REGION, r12
isb
- ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL)
+ ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
ldr r6, =(_exiprom) @ ROM end
sub r6, r6, r0 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
- lsl r6, r6, #MPU_RSR_SZ @ Put size in right field
- orr r6, r6, #(1 << MPU_RSR_EN) @ Set region enabled bit
+ lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
+ orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
- setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
beq 3f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
3: isb
#endif
+ ret lr
+ENDPROC(__setup_pmsa_v7)
+
+ENTRY(__setup_pmsa_v8)
+ mov r0, #0
+AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
+M_CLASS(str r0, [r12, #PMSAv8_RNR])
+ isb
+
+#ifdef CONFIG_XIP_KERNEL
+ ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
+ ldr r6, =(_exiprom) @ ROM end
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
+AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
+#endif
- /* Enable the MPU */
-AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR
-AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map'
-AR_CLASS(orr r0, r0, #CR_M) @ Set SCTRL.M (MPU on)
-AR_CLASS(mcr p15, 0, r0, c1, c0, 0) @ Enable MPU
+ ldr r5, =KERNEL_START
+ ldr r6, =KERNEL_END
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
-M_CLASS(ldr r0, [r12, #MPU_CTRL])
-M_CLASS(bic r0, #MPU_CTRL_PRIVDEFENA)
-M_CLASS(orr r0, #MPU_CTRL_ENABLE)
-M_CLASS(str r0, [r12, #MPU_CTRL])
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
+AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
+
+ /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
+#ifdef CONFIG_XIP_KERNEL
+ ldr r6, =KERNEL_START
+ ldr r5, =CONFIG_XIP_PHYS_ADDR
+ cmp r6, r5
+ movcs r6, r5
+#else
+ ldr r6, =KERNEL_START
+#endif
+ cmp r6, #0
+ beq 1f
+
+ mov r5, #0
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
+AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
+
+1:
+ /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
+#ifdef CONFIG_XIP_KERNEL
+ ldr r5, =KERNEL_END
+ ldr r6, =(_exiprom)
+ cmp r5, r6
+ movcc r5, r6
+#else
+ ldr r5, =KERNEL_END
+#endif
+ mov r6, #0xffffffff
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
+AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
+M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
+M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
+
+#ifdef CONFIG_XIP_KERNEL
+ /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
+ ldr r5, =(_exiprom)
+ ldr r6, =KERNEL_END
+ cmp r5, r6
+ movcs r5, r6
+
+ ldr r6, =KERNEL_START
+ ldr r0, =CONFIG_XIP_PHYS_ADDR
+ cmp r6, r0
+ movcc r6, r0
+
+ sub r6, r6, #1
+ bic r6, r6, #(PMSAv8_MINALIGN - 1)
+
+ orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
+ orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
+
+#ifdef CONFIG_CPU_V7M
+ /* There is no alias for n == 4 */
+ mov r0, #4
+ str r0, [r12, #PMSAv8_RNR] @ PRSEL
isb
+ str r5, [r12, #PMSAv8_RBAR_A(0)]
+ str r6, [r12, #PMSAv8_RLAR_A(0)]
+#else
+ mcr p15, 0, r5, c6, c10, 1 @ PRBAR4
+ mcr p15, 0, r6, c6, c10, 2 @ PRLAR4
+#endif
+#endif
ret lr
-ENDPROC(__setup_mpu)
+ENDPROC(__setup_pmsa_v8)
#ifdef CONFIG_SMP
/*
* r6: pointer at mpu_rgn_info
*/
+ .text
ENTRY(__secondary_setup_mpu)
+ /* Use MPU region info supplied by __cpu_up */
+ ldr r6, [r7] @ get secondary_data.mpu_rgn_info
+
/* Probe for v7 PMSA compliance */
mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
- bne __error_p
+ beq __secondary_setup_pmsa_v7
+ teq r0, #(MMFR0_PMSAv8) @ PMSA v8
+ beq __secondary_setup_pmsa_v8
+ b __error_p
+ENDPROC(__secondary_setup_mpu)
+/*
+ * r6: pointer at mpu_rgn_info
+ */
+ENTRY(__secondary_setup_pmsa_v7)
/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
mrc p15, 0, r0, c0, c0, 4 @ MPUIR
@@ -328,25 +493,45 @@ ENTRY(__secondary_setup_mpu)
ldr r6, [r3, #MPU_RGN_DRSR]
ldr r5, [r3, #MPU_RGN_DRACR]
- setup_region r0, r5, r6, MPU_DATA_SIDE
+ setup_region r0, r5, r6, PMSAv7_DATA_SIDE
beq 2f
- setup_region r0, r5, r6, MPU_INSTR_SIDE
+ setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
2: isb
mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
cmp r4, #0
bgt 1b
- /* Enable the MPU */
- mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
- bic r0, r0, #CR_BR @ Disable the 'default mem-map'
- orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
- mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
+ ret lr
+ENDPROC(__secondary_setup_pmsa_v7)
+
+ENTRY(__secondary_setup_pmsa_v8)
+ ldr r4, [r6, #MPU_RNG_INFO_USED]
+#ifndef CONFIG_XIP_KERNEL
+ add r4, r4, #1
+#endif
+ mov r5, #MPU_RNG_SIZE
+ add r3, r6, #MPU_RNG_INFO_RNGS
+ mla r3, r4, r5, r3
+
+1:
+ sub r3, r3, #MPU_RNG_SIZE
+ sub r4, r4, #1
+
+ mcr p15, 0, r4, c6, c2, 1 @ PRSEL
isb
- ret lr
-ENDPROC(__secondary_setup_mpu)
+ ldr r5, [r3, #MPU_RGN_PRBAR]
+ ldr r6, [r3, #MPU_RGN_PRLAR]
+
+ mcr p15, 0, r5, c6, c3, 0 @ PRBAR
+ mcr p15, 0, r6, c6, c3, 1 @ PRLAR
+
+ cmp r4, #0
+ bgt 1b
+ ret lr
+ENDPROC(__secondary_setup_pmsa_v8)
#endif /* CONFIG_SMP */
#endif /* CONFIG_ARM_MPU */
#include "head-common.S"
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 6b1148cafffd..4485d0404514 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -398,7 +398,7 @@ ENTRY(secondary_startup)
ldmia r4, {r5, r7, r12} @ address to jump to after
sub lr, r4, r5 @ mmu has been enabled
add r3, r7, lr
- ldrd r4, [r3, #0] @ get secondary_data.pgdir
+ ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir
ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 629e25152c0d..1d5fbf1d1c67 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -456,14 +456,13 @@ static int get_hbp_len(u8 hbp_len)
/*
* Check whether bp virtual address is in kernel space.
*/
-int arch_check_bp_in_kernelspace(struct perf_event *bp)
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
{
unsigned int len;
unsigned long va;
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- va = info->address;
- len = get_hbp_len(info->ctrl.len);
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
}
@@ -518,42 +517,42 @@ int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
/*
* Construct an arch_hw_breakpoint from a perf_event.
*/
-static int arch_build_bp_info(struct perf_event *bp)
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
-
/* Type */
- switch (bp->attr.bp_type) {
+ switch (attr->bp_type) {
case HW_BREAKPOINT_X:
- info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
+ hw->ctrl.type = ARM_BREAKPOINT_EXECUTE;
break;
case HW_BREAKPOINT_R:
- info->ctrl.type = ARM_BREAKPOINT_LOAD;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD;
break;
case HW_BREAKPOINT_W:
- info->ctrl.type = ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_STORE;
break;
case HW_BREAKPOINT_RW:
- info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
+ hw->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
break;
default:
return -EINVAL;
}
/* Len */
- switch (bp->attr.bp_len) {
+ switch (attr->bp_len) {
case HW_BREAKPOINT_LEN_1:
- info->ctrl.len = ARM_BREAKPOINT_LEN_1;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_1;
break;
case HW_BREAKPOINT_LEN_2:
- info->ctrl.len = ARM_BREAKPOINT_LEN_2;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_2;
break;
case HW_BREAKPOINT_LEN_4:
- info->ctrl.len = ARM_BREAKPOINT_LEN_4;
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_4;
break;
case HW_BREAKPOINT_LEN_8:
- info->ctrl.len = ARM_BREAKPOINT_LEN_8;
- if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
+ hw->ctrl.len = ARM_BREAKPOINT_LEN_8;
+ if ((hw->ctrl.type != ARM_BREAKPOINT_EXECUTE)
&& max_watchpoint_len >= 8)
break;
default:
@@ -566,24 +565,24 @@ static int arch_build_bp_info(struct perf_event *bp)
* by the hardware and must be aligned to the appropriate number of
* bytes.
*/
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
- info->ctrl.len != ARM_BREAKPOINT_LEN_4)
+ if (hw->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
+ hw->ctrl.len != ARM_BREAKPOINT_LEN_4)
return -EINVAL;
/* Address */
- info->address = bp->attr.bp_addr;
+ hw->address = attr->bp_addr;
/* Privilege */
- info->ctrl.privilege = ARM_BREAKPOINT_USER;
- if (arch_check_bp_in_kernelspace(bp))
- info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
+ hw->ctrl.privilege = ARM_BREAKPOINT_USER;
+ if (arch_check_bp_in_kernelspace(hw))
+ hw->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
/* Enabled? */
- info->ctrl.enabled = !bp->attr.disabled;
+ hw->ctrl.enabled = !attr->disabled;
/* Mismatch */
- info->ctrl.mismatch = 0;
+ hw->ctrl.mismatch = 0;
return 0;
}
@@ -591,9 +590,10 @@ static int arch_build_bp_info(struct perf_event *bp)
/*
* Validate the arch-specific HW Breakpoint register settings.
*/
-int arch_validate_hwbkpt_settings(struct perf_event *bp)
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
int ret = 0;
u32 offset, alignment_mask = 0x3;
@@ -602,14 +602,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -ENODEV;
/* Build the arch_hw_breakpoint. */
- ret = arch_build_bp_info(bp);
+ ret = arch_build_bp_info(bp, attr, hw);
if (ret)
goto out;
/* Check address alignment. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_8)
alignment_mask = 0x7;
- offset = info->address & alignment_mask;
+ offset = hw->address & alignment_mask;
switch (offset) {
case 0:
/* Aligned */
@@ -617,19 +617,19 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
case 1:
case 2:
/* Allow halfword watchpoints and breakpoints. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
case 3:
/* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
break;
default:
ret = -EINVAL;
goto out;
}
- info->address &= ~alignment_mask;
- info->ctrl.len <<= offset;
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
if (is_default_overflow_handler(bp)) {
/*
@@ -640,7 +640,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
return -EINVAL;
/* We don't allow mismatch breakpoints in kernel space. */
- if (arch_check_bp_in_kernelspace(bp))
+ if (arch_check_bp_in_kernelspace(hw))
return -EPERM;
/*
@@ -655,8 +655,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
* reports them.
*/
if (!debug_exception_updates_fsr() &&
- (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
- info->ctrl.type == ARM_BREAKPOINT_STORE))
+ (hw->ctrl.type == ARM_BREAKPOINT_LOAD ||
+ hw->ctrl.type == ARM_BREAKPOINT_STORE))
return -EINVAL;
}
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ece04a457486..9908dacf9229 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -102,16 +102,6 @@ void __init init_IRQ(void)
uniphier_cache_init();
}
-#ifdef CONFIG_MULTI_IRQ_HANDLER
-void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
-{
- if (handle_arch_irq)
- return;
-
- handle_arch_irq = handle_irq;
-}
-#endif
-
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 6b38d7a634c1..dd2eb5f76b9f 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -83,7 +83,7 @@ void machine_crash_nonpanic_core(void *unused)
{
struct pt_regs regs;
- crash_setup_regs(&regs, NULL);
+ crash_setup_regs(&regs, get_irq_regs());
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
smp_processor_id());
crash_save_cpu(&regs, smp_processor_id());
@@ -95,6 +95,27 @@ void machine_crash_nonpanic_core(void *unused)
cpu_relax();
}
+void crash_smp_send_stop(void)
+{
+ static int cpus_stopped;
+ unsigned long msecs;
+
+ if (cpus_stopped)
+ return;
+
+ atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
+ smp_call_function(machine_crash_nonpanic_core, NULL, false);
+ msecs = 1000; /* Wait at most a second for the other cpus to stop */
+ while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
+ mdelay(1);
+ msecs--;
+ }
+ if (atomic_read(&waiting_for_crash_ipi) > 0)
+ pr_warn("Non-crashing CPUs did not react to IPI\n");
+
+ cpus_stopped = 1;
+}
+
static void machine_kexec_mask_interrupts(void)
{
unsigned int i;
@@ -120,19 +141,8 @@ static void machine_kexec_mask_interrupts(void)
void machine_crash_shutdown(struct pt_regs *regs)
{
- unsigned long msecs;
-
local_irq_disable();
-
- atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
- smp_call_function(machine_crash_nonpanic_core, NULL, false);
- msecs = 1000; /* Wait at most a second for the other cpus to stop */
- while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
- mdelay(1);
- msecs--;
- }
- if (atomic_read(&waiting_for_crash_ipi) > 0)
- pr_warn("Non-crashing CPUs did not react to IPI\n");
+ crash_smp_send_stop();
crash_save_cpu(regs, smp_processor_id());
machine_kexec_mask_interrupts();
diff --git a/arch/arm/kernel/paravirt.c b/arch/arm/kernel/paravirt.c
index 53f371ed4568..75c158b0353f 100644
--- a/arch/arm/kernel/paravirt.c
+++ b/arch/arm/kernel/paravirt.c
@@ -21,5 +21,5 @@
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
-struct pv_time_ops pv_time_ops;
-EXPORT_SYMBOL_GPL(pv_time_ops);
+struct paravirt_patch_template pv_ops;
+EXPORT_SYMBOL_GPL(pv_ops);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 1d7061a38922..1ae99deeec54 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -233,7 +233,7 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
return ret;
}
-static inline u32 armv6pmu_read_counter(struct perf_event *event)
+static inline u64 armv6pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -251,7 +251,7 @@ static inline u32 armv6pmu_read_counter(struct perf_event *event)
return value;
}
-static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv6pmu_write_counter(struct perf_event *event, u64 value)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -303,12 +303,10 @@ static void armv6pmu_enable_event(struct perf_event *event)
}
static irqreturn_t
-armv6pmu_handle_irq(int irq_num,
- void *dev)
+armv6pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -413,6 +411,12 @@ armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
+static void armv6pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
static void armv6pmu_disable_event(struct perf_event *event)
{
unsigned long val, mask, evt, flags;
@@ -493,11 +497,11 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
}
static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
@@ -544,11 +548,11 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv6pmu_read_counter;
cpu_pmu->write_counter = armv6pmu_write_counter;
cpu_pmu->get_event_idx = armv6pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv6pmu_clear_event_idx;
cpu_pmu->start = armv6pmu_start;
cpu_pmu->stop = armv6pmu_stop;
cpu_pmu->map_event = armv6mpcore_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 870b66c1e4ef..a4fb0f8b8f84 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -743,7 +743,7 @@ static inline void armv7_pmnc_select_counter(int idx)
isb();
}
-static inline u32 armv7pmu_read_counter(struct perf_event *event)
+static inline u64 armv7pmu_read_counter(struct perf_event *event)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -763,7 +763,7 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
return value;
}
-static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
+static inline void armv7pmu_write_counter(struct perf_event *event, u64 value)
{
struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
@@ -946,11 +946,10 @@ static void armv7pmu_disable_event(struct perf_event *event)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
+static irqreturn_t armv7pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
u32 pmnc;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -1059,6 +1058,12 @@ static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
return -EAGAIN;
}
+static void armv7pmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
/*
* Add an event filter to a given event. This will only work for PMUv2 PMUs.
*/
@@ -1168,10 +1173,10 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = armv7pmu_read_counter;
cpu_pmu->write_counter = armv7pmu_write_counter;
cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = armv7pmu_clear_event_idx;
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
- cpu_pmu->max_period = (1LLU << 32) - 1;
};
static void armv7_read_num_pmnc_events(void *info)
@@ -1639,6 +1644,7 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool krait_event = EVENT_CPU(hwc->config_base);
+ armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || krait_event) {
bit = krait_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
@@ -1968,6 +1974,7 @@ static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
bool venum_event = EVENT_VENUM(hwc->config_base);
bool scorpion_event = EVENT_CPU(hwc->config_base);
+ armv7pmu_clear_event_idx(cpuc, event);
if (venum_event || scorpion_event) {
bit = scorpion_event_to_bit(event, region, group);
clear_bit(bit, cpuc->used_mask);
@@ -2031,6 +2038,7 @@ static struct platform_driver armv7_pmu_driver = {
.driver = {
.name = "armv7-pmu",
.of_match_table = armv7_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
},
.probe = armv7_pmu_device_probe,
};
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index fcf218da660e..f6cdcacfb96d 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -142,11 +142,10 @@ xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
}
static irqreturn_t
-xscale1pmu_handle_irq(int irq_num, void *dev)
+xscale1pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmnc;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -293,6 +292,12 @@ xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
}
}
+static void xscalepmu_clear_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ clear_bit(event->hw.idx, cpuc->used_mask);
+}
+
static void xscale1pmu_start(struct arm_pmu *cpu_pmu)
{
unsigned long flags, val;
@@ -317,7 +322,7 @@ static void xscale1pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32 xscale1pmu_read_counter(struct perf_event *event)
+static inline u64 xscale1pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -338,7 +343,7 @@ static inline u32 xscale1pmu_read_counter(struct perf_event *event)
return val;
}
-static inline void xscale1pmu_write_counter(struct perf_event *event, u32 val)
+static inline void xscale1pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -371,11 +376,11 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale1pmu_read_counter;
cpu_pmu->write_counter = xscale1pmu_write_counter;
cpu_pmu->get_event_idx = xscale1pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale1pmu_start;
cpu_pmu->stop = xscale1pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 3;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
@@ -489,11 +494,10 @@ xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
}
static irqreturn_t
-xscale2pmu_handle_irq(int irq_num, void *dev)
+xscale2pmu_handle_irq(struct arm_pmu *cpu_pmu)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
- struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
struct pt_regs *regs;
int idx;
@@ -681,7 +685,7 @@ static void xscale2pmu_stop(struct arm_pmu *cpu_pmu)
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static inline u32 xscale2pmu_read_counter(struct perf_event *event)
+static inline u64 xscale2pmu_read_counter(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -708,7 +712,7 @@ static inline u32 xscale2pmu_read_counter(struct perf_event *event)
return val;
}
-static inline void xscale2pmu_write_counter(struct perf_event *event, u32 val)
+static inline void xscale2pmu_write_counter(struct perf_event *event, u64 val)
{
struct hw_perf_event *hwc = &event->hw;
int counter = hwc->idx;
@@ -741,11 +745,11 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->read_counter = xscale2pmu_read_counter;
cpu_pmu->write_counter = xscale2pmu_write_counter;
cpu_pmu->get_event_idx = xscale2pmu_get_event_idx;
+ cpu_pmu->clear_event_idx = xscalepmu_clear_event_idx;
cpu_pmu->start = xscale2pmu_start;
cpu_pmu->stop = xscale2pmu_stop;
cpu_pmu->map_event = xscale_map_event;
cpu_pmu->num_events = 5;
- cpu_pmu->max_period = (1LLU << 32) - 1;
return 0;
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 1523cb18b109..82ab015bf42b 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -39,7 +39,7 @@
#include <asm/tls.h>
#include <asm/vdso.h>
-#ifdef CONFIG_CC_STACKPROTECTOR
+#ifdef CONFIG_STACKPROTECTOR
#include <linux/stackprotector.h>
unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
@@ -330,15 +330,15 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
* atomic helpers. Insert it into the gate_vma so that it is visible
* through ptrace and /proc/<pid>/mem.
*/
-static struct vm_area_struct gate_vma = {
- .vm_start = 0xffff0000,
- .vm_end = 0xffff0000 + PAGE_SIZE,
- .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
-};
+static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
+ vma_init(&gate_vma, NULL);
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+ gate_vma.vm_start = 0xffff0000;
+ gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
+ gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
return 0;
}
arch_initcall(gate_vma_init);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 7724b0f661b3..6fa5b6387556 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -203,14 +203,8 @@ void ptrace_disable(struct task_struct *child)
*/
void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
- siginfo_t info;
-
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_BRKPT;
- info.si_addr = (void __user *)instruction_pointer(regs);
-
- force_sig_info(SIGTRAP, &info, tsk);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT,
+ (void __user *)instruction_pointer(regs), tsk);
}
static int break_trap(struct pt_regs *regs, unsigned int instr)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index cd46a595422c..375b13f7e780 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -16,7 +16,6 @@
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
-#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/of_platform.h>
@@ -764,7 +763,7 @@ int __init arm_add_memory(u64 start, u64 size)
else
size -= aligned_start - start;
-#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+#ifndef CONFIG_PHYS_ADDR_T_64BIT
if (aligned_start > ULONG_MAX) {
pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
(long long)start);
@@ -867,7 +866,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
*/
boot_alias_start = phys_to_idmap(start);
if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
res->end = phys_to_idmap(end);
@@ -875,7 +874,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
request_resource(&iomem_resource, res);
}
- res = memblock_virt_alloc(sizeof(*res), 0);
+ res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
res->name = "System RAM";
res->start = start;
res->end = end;
@@ -1155,7 +1154,7 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
-#ifdef CONFIG_MULTI_IRQ_HANDLER
+#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index a8155fdf352d..b908382b69ff 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -549,6 +549,12 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
int ret;
/*
+ * Increment event counter and perform fixup for the pre-signal
+ * frame.
+ */
+ rseq_signal_deliver(ksig, regs);
+
+ /*
* Set up the stack frame
*/
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
@@ -668,6 +674,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
}
local_irq_disable();
@@ -711,3 +718,10 @@ asmlinkage void addr_limit_check_failed(void)
{
addr_limit_user_check();
}
+
+#ifdef CONFIG_DEBUG_RSEQ
+asmlinkage void do_rseq_syscall(struct pt_regs *regs)
+{
+ rseq_syscall(regs);
+}
+#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 82b879db32ee..3bf82232b1be 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -266,8 +266,6 @@ int __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
- clear_tasks_mm_cpumask(cpu);
-
return 0;
}
@@ -285,6 +283,7 @@ void __cpu_die(unsigned int cpu)
}
pr_debug("CPU%u: shutdown\n", cpu);
+ clear_tasks_mm_cpumask(cpu);
/*
* platform_cpu_kill() is generally expected to do the powering off
* and/or cutting of clocks to the dying CPU. Optionally, this may
@@ -725,6 +724,21 @@ void smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
+ */
+void panic_smp_self_stop(void)
+{
+ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+ smp_processor_id());
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ cpu_relax();
+}
+
/*
* not supported here
*/
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 3bda08bee674..a188d5e8ab7f 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -91,18 +91,6 @@ static int proc_status_show(struct seq_file *m, void *v)
seq_printf(m, "Last process:\t\t%d\n", previous_pid);
return 0;
}
-
-static int proc_status_open(struct inode *inode, struct file *file)
-{
- return single_open(file, proc_status_show, PDE_DATA(inode));
-}
-
-static const struct file_operations proc_status_fops = {
- .open = proc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
#endif
/*
@@ -110,21 +98,20 @@ static const struct file_operations proc_status_fops = {
*/
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
- siginfo_t info;
+ int si_code;
down_read(&current->mm->mmap_sem);
if (find_vma(current->mm, addr) == NULL)
- info.si_code = SEGV_MAPERR;
+ si_code = SEGV_MAPERR;
else
- info.si_code = SEGV_ACCERR;
+ si_code = SEGV_ACCERR;
up_read(&current->mm->mmap_sem);
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_addr = (void *) instruction_pointer(regs);
-
pr_debug("SWP{B} emulation: access caused memory abort!\n");
- arm_notify_die("Illegal memory access", regs, &info, 0, 0);
+ arm_notify_die("Illegal memory access", regs,
+ SIGSEGV, si_code,
+ (void __user *)instruction_pointer(regs),
+ 0, 0);
abtcounter++;
}
@@ -260,7 +247,8 @@ static int __init swp_emulation_init(void)
return 0;
#ifdef CONFIG_PROC_FS
- if (!proc_create("cpu/swp_emulation", S_IRUGO, NULL, &proc_status_fops))
+ if (!proc_create_single("cpu/swp_emulation", S_IRUGO, NULL,
+ proc_status_show))
return -ENOMEM;
#endif /* CONFIG_PROC_FS */
diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c
index 3151f5623d0e..bdf7514204ab 100644
--- a/arch/arm/kernel/sys_arm.c
+++ b/arch/arm/kernel/sys_arm.c
@@ -35,5 +35,5 @@
asmlinkage long sys_arm_fadvise64_64(int fd, int advice,
loff_t offset, loff_t len)
{
- return sys_fadvise64_64(fd, offset, len, advice);
+ return ksys_fadvise64_64(fd, offset, len, advice);
}
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index a87684532327..40da0872170f 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -287,7 +287,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
return -EINVAL;
if (!access_ok(VERIFY_WRITE, events, sizeof(*events) * maxevents))
return -EFAULT;
- kbuf = kmalloc(sizeof(*kbuf) * maxevents, GFP_KERNEL);
+ kbuf = kmalloc_array(maxevents, sizeof(*kbuf), GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
fs = get_fs();
@@ -328,7 +328,7 @@ asmlinkage long sys_oabi_semtimedop(int semid,
return -EINVAL;
if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops))
return -EFAULT;
- sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL);
+ sops = kmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
if (!sops)
return -ENOMEM;
err = 0;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index cf2701cb0de8..078b259ead4e 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -83,29 +83,18 @@ static void dummy_clock_access(struct timespec64 *ts)
}
static clock_access_fn __read_persistent_clock = dummy_clock_access;
-static clock_access_fn __read_boot_clock = dummy_clock_access;
void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}
-void read_boot_clock64(struct timespec64 *ts)
-{
- __read_boot_clock(ts);
-}
-
-int __init register_persistent_clock(clock_access_fn read_boot,
- clock_access_fn read_persistent)
+int __init register_persistent_clock(clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
- if (__read_persistent_clock == dummy_clock_access &&
- __read_boot_clock == dummy_clock_access) {
- if (read_boot)
- __read_boot_clock = read_boot;
+ if (__read_persistent_clock == dummy_clock_access) {
if (read_persistent)
__read_persistent_clock = read_persistent;
-
return 0;
}
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 24ac3cab411d..60e375ce1ab2 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -94,12 +94,6 @@ static void __init parse_dt_topology(void)
__cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
GFP_NOWAIT);
- cn = of_find_node_by_path("/cpus");
- if (!cn) {
- pr_err("No CPU information found in DT\n");
- return;
- }
-
for_each_possible_cpu(cpu) {
const u32 *rate;
int len;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5e3633c24e63..2d668cff8ef4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/bug.h>
@@ -364,13 +365,14 @@ void die(const char *str, struct pt_regs *regs, int err)
}
void arm_notify_die(const char *str, struct pt_regs *regs,
- struct siginfo *info, unsigned long err, unsigned long trap)
+ int signo, int si_code, void __user *addr,
+ unsigned long err, unsigned long trap)
{
if (user_mode(regs)) {
current->thread.error_code = err;
current->thread.trap_no = trap;
- force_sig_info(info->si_signo, info, current);
+ force_sig_fault(signo, si_code, addr, current);
} else {
die(str, regs, err);
}
@@ -417,7 +419,8 @@ void unregister_undef_hook(struct undef_hook *hook)
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
{
struct undef_hook *hook;
unsigned long flags;
@@ -436,7 +439,6 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
asmlinkage void do_undefinstr(struct pt_regs *regs)
{
unsigned int instr;
- siginfo_t info;
void __user *pc;
pc = (void __user *)instruction_pointer(regs);
@@ -482,14 +484,10 @@ die_sig:
dump_instr(KERN_INFO, regs);
}
#endif
-
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = pc;
-
- arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
+ arm_notify_die("Oops - undefined instruction", regs,
+ SIGILL, ILL_ILLOPC, pc, 0, 6);
}
+NOKPROBE_SYMBOL(do_undefinstr)
/*
* Handle FIQ similarly to NMI on x86 systems.
@@ -535,8 +533,6 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason)
static int bad_syscall(int n, struct pt_regs *regs)
{
- siginfo_t info;
-
if ((current->personality & PER_MASK) != PER_LINUX) {
send_sig(SIGSEGV, current, 1);
return regs->ARM_r0;
@@ -550,13 +546,10 @@ static int bad_syscall(int n, struct pt_regs *regs)
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (thumb_mode(regs) ? 2 : 4);
-
- arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
+ arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
+ (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4),
+ n, 0);
return regs->ARM_r0;
}
@@ -602,19 +595,13 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
asmlinkage int arm_syscall(int no, struct pt_regs *regs)
{
- siginfo_t info;
-
if ((no >> 16) != (__ARM_NR_BASE>> 16))
return bad_syscall(no, regs);
switch (no & 0xffff) {
case 0: /* branch through 0 */
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- info.si_code = SEGV_MAPERR;
- info.si_addr = NULL;
-
- arm_notify_die("branch through zero", regs, &info, 0, 0);
+ arm_notify_die("branch through zero", regs,
+ SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
return 0;
case NR(breakpoint): /* SWI BREAK_POINT */
@@ -682,13 +669,10 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
}
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLTRP;
- info.si_addr = (void __user *)instruction_pointer(regs) -
- (thumb_mode(regs) ? 2 : 4);
-
- arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
+ arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
+ (void __user *)instruction_pointer(regs) -
+ (thumb_mode(regs) ? 2 : 4),
+ no, 0);
return 0;
}
@@ -738,7 +722,6 @@ asmlinkage void
baddataabort(int code, unsigned long instr, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
- siginfo_t info;
#ifdef CONFIG_DEBUG_USER
if (user_debug & UDBG_BADABORT) {
@@ -749,12 +732,8 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
}
#endif
- info.si_signo = SIGILL;
- info.si_errno = 0;
- info.si_code = ILL_ILLOPC;
- info.si_addr = (void __user *)addr;
-
- arm_notify_die("unknown data abort code", regs, &info, instr, 0);
+ arm_notify_die("unknown data abort code", regs,
+ SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
}
void __readwrite_bug(const char *fn)
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 12b87591eb7c..8c74037ade22 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -13,40 +13,10 @@
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/mpu.h>
#include <asm/page.h>
-#define PROC_INFO \
- . = ALIGN(4); \
- VMLINUX_SYMBOL(__proc_info_begin) = .; \
- *(.proc.info.init) \
- VMLINUX_SYMBOL(__proc_info_end) = .;
-
-#define IDMAP_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__idmap_text_start) = .; \
- *(.idmap.text) \
- VMLINUX_SYMBOL(__idmap_text_end) = .; \
- . = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
- *(.hyp.idmap.text) \
- VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define ARM_CPU_DISCARD(x)
-#define ARM_CPU_KEEP(x) x
-#else
-#define ARM_CPU_DISCARD(x) x
-#define ARM_CPU_KEEP(x)
-#endif
-
-#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
- defined(CONFIG_GENERIC_BUG)
-#define ARM_EXIT_KEEP(x) x
-#define ARM_EXIT_DISCARD(x)
-#else
-#define ARM_EXIT_KEEP(x)
-#define ARM_EXIT_DISCARD(x) x
-#endif
+#include "vmlinux.lds.h"
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -69,20 +39,9 @@ SECTIONS
* unwind sections get included.
*/
/DISCARD/ : {
- *(.ARM.exidx.exit.text)
- *(.ARM.extab.exit.text)
- ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
- ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
- ARM_EXIT_DISCARD(EXIT_TEXT)
- ARM_EXIT_DISCARD(EXIT_DATA)
- EXIT_CALL
-#ifndef CONFIG_MMU
- *(.text.fixup)
- *(__ex_table)
-#endif
+ ARM_DISCARD
*(.alt.smp.init)
- *(.discard)
- *(.discard.*)
+ *(.pv_table)
}
. = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
@@ -95,22 +54,7 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- IDMAP_TEXT
- __entry_text_start = .;
- *(.entry.text)
- __entry_text_end = .;
- IRQENTRY_TEXT
- TEXT_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- *(.gnu.warning)
- *(.glue_7)
- *(.glue_7t)
- . = ALIGN(4);
- *(.got) /* Global offset table */
- ARM_CPU_KEEP(PROC_INFO)
+ ARM_TEXT
}
RO_DATA(PAGE_SIZE)
@@ -118,53 +62,19 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
-#ifdef CONFIG_MMU
- *(__ex_table)
-#endif
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
- /*
- * Stack unwinding tables
- */
- . = ALIGN(8);
- .ARM.unwind_idx : {
- __start_unwind_idx = .;
- *(.ARM.exidx*)
- __stop_unwind_idx = .;
- }
- .ARM.unwind_tab : {
- __start_unwind_tab = .;
- *(.ARM.extab*)
- __stop_unwind_tab = .;
- }
+ ARM_UNWIND_SECTIONS
#endif
NOTES
_etext = .; /* End of text and rodata section */
- /*
- * The vectors and stubs are relocatable code, and the
- * only thing that matters is their relative offsets
- */
- __vectors_start = .;
- .vectors 0xffff0000 : AT(__vectors_start) {
- *(.vectors)
- }
- . = __vectors_start + SIZEOF(.vectors);
- __vectors_end = .;
-
- __stubs_start = .;
- .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
- *(.stubs)
- }
- . = __stubs_start + SIZEOF(.stubs);
- __stubs_end = .;
-
- PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
-
+ ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
@@ -186,7 +96,6 @@ SECTIONS
INIT_SETUP(16)
INIT_CALLS
CON_INITCALL
- SECURITY_INITCALL
INIT_RAM_FS
}
@@ -223,6 +132,10 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
+#ifdef CONFIG_HAVE_TCM
+ ARM_TCM
+#endif
+
/*
* End of copied data. We need a dummy section to get its LMA.
* Also located before final ALIGN() as trailing padding is not stored
@@ -234,64 +147,10 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_end = .;
-#ifdef CONFIG_HAVE_TCM
- /*
- * We align everything to a page boundary so we can
- * free it after init has commenced and TCM contents have
- * been copied to its destination.
- */
- .tcm_start : {
- . = ALIGN(PAGE_SIZE);
- __tcm_start = .;
- __itcm_start = .;
- }
-
- /*
- * Link these to the ITCM RAM
- * Put VMA to the TCM address and LMA to the common RAM
- * and we'll upload the contents from RAM to TCM and free
- * the used RAM after that.
- */
- .text_itcm ITCM_OFFSET : AT(__itcm_start)
- {
- __sitcm_text = .;
- *(.tcm.text)
- *(.tcm.rodata)
- . = ALIGN(4);
- __eitcm_text = .;
- }
-
- /*
- * Reset the dot pointer, this is needed to create the
- * relative __dtcm_start below (to be used as extern in code).
- */
- . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
-
- .dtcm_start : {
- __dtcm_start = .;
- }
-
- /* TODO: add remainder of ITCM as well, that can be used for data! */
- .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
- {
- . = ALIGN(4);
- __sdtcm_data = .;
- *(.tcm.data)
- . = ALIGN(4);
- __edtcm_data = .;
- }
-
- /* Reset the dot pointer or the linker gets confused */
- . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
-
- /* End marker for freeing TCM copy in linked object */
- .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
- . = ALIGN(PAGE_SIZE);
- __tcm_end = .;
- }
-#endif
-
BSS_SECTION(0, 0, 8)
+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(PMSAv8_MINALIGN);
+#endif
_end = .;
STABS_DEBUG
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 84a1ae3ce46e..23150c0f0f4d 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -12,46 +12,11 @@
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/mpu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
-#define PROC_INFO \
- . = ALIGN(4); \
- VMLINUX_SYMBOL(__proc_info_begin) = .; \
- *(.proc.info.init) \
- VMLINUX_SYMBOL(__proc_info_end) = .;
-
-#define HYPERVISOR_TEXT \
- VMLINUX_SYMBOL(__hyp_text_start) = .; \
- *(.hyp.text) \
- VMLINUX_SYMBOL(__hyp_text_end) = .;
-
-#define IDMAP_TEXT \
- ALIGN_FUNCTION(); \
- VMLINUX_SYMBOL(__idmap_text_start) = .; \
- *(.idmap.text) \
- VMLINUX_SYMBOL(__idmap_text_end) = .; \
- . = ALIGN(PAGE_SIZE); \
- VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \
- *(.hyp.idmap.text) \
- VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
-
-#ifdef CONFIG_HOTPLUG_CPU
-#define ARM_CPU_DISCARD(x)
-#define ARM_CPU_KEEP(x) x
-#else
-#define ARM_CPU_DISCARD(x) x
-#define ARM_CPU_KEEP(x)
-#endif
-
-#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
- defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
-#define ARM_EXIT_KEEP(x) x
-#define ARM_EXIT_DISCARD(x)
-#else
-#define ARM_EXIT_KEEP(x)
-#define ARM_EXIT_DISCARD(x) x
-#endif
+#include "vmlinux.lds.h"
OUTPUT_ARCH(arm)
ENTRY(stext)
@@ -74,22 +39,10 @@ SECTIONS
* unwind sections get included.
*/
/DISCARD/ : {
- *(.ARM.exidx.exit.text)
- *(.ARM.extab.exit.text)
- ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
- ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
- ARM_EXIT_DISCARD(EXIT_TEXT)
- ARM_EXIT_DISCARD(EXIT_DATA)
- EXIT_CALL
-#ifndef CONFIG_MMU
- *(.text.fixup)
- *(__ex_table)
-#endif
+ ARM_DISCARD
#ifndef CONFIG_SMP_ON_UP
*(.alt.smp.init)
#endif
- *(.discard)
- *(.discard.*)
}
. = PAGE_OFFSET + TEXT_OFFSET;
@@ -102,26 +55,12 @@ SECTIONS
. = ALIGN(1<<SECTION_SHIFT);
#endif
+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(PMSAv8_MINALIGN);
+#endif
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- IDMAP_TEXT
- __entry_text_start = .;
- *(.entry.text)
- __entry_text_end = .;
- IRQENTRY_TEXT
- SOFTIRQENTRY_TEXT
- TEXT_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- HYPERVISOR_TEXT
- KPROBES_TEXT
- *(.gnu.warning)
- *(.glue_7)
- *(.glue_7t)
- . = ALIGN(4);
- *(.got) /* Global offset table */
- ARM_CPU_KEEP(PROC_INFO)
+ ARM_TEXT
}
#ifdef CONFIG_DEBUG_ALIGN_RODATA
@@ -134,27 +73,12 @@ SECTIONS
. = ALIGN(4);
__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
__start___ex_table = .;
-#ifdef CONFIG_MMU
- *(__ex_table)
-#endif
+ ARM_MMU_KEEP(*(__ex_table))
__stop___ex_table = .;
}
#ifdef CONFIG_ARM_UNWIND
- /*
- * Stack unwinding tables
- */
- . = ALIGN(8);
- .ARM.unwind_idx : {
- __start_unwind_idx = .;
- *(.ARM.exidx*)
- __stop_unwind_idx = .;
- }
- .ARM.unwind_tab : {
- __start_unwind_tab = .;
- *(.ARM.extab*)
- __stop_unwind_tab = .;
- }
+ ARM_UNWIND_SECTIONS
#endif
NOTES
@@ -166,26 +90,7 @@ SECTIONS
#endif
__init_begin = .;
- /*
- * The vectors and stubs are relocatable code, and the
- * only thing that matters is their relative offsets
- */
- __vectors_start = .;
- .vectors 0xffff0000 : AT(__vectors_start) {
- *(.vectors)
- }
- . = __vectors_start + SIZEOF(.vectors);
- __vectors_end = .;
-
- __stubs_start = .;
- .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) {
- *(.stubs)
- }
- . = __stubs_start + SIZEOF(.stubs);
- __stubs_end = .;
-
- PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
-
+ ARM_VECTORS
INIT_TEXT_SECTION(8)
.exit.text : {
ARM_EXIT_KEEP(EXIT_TEXT)
@@ -226,6 +131,10 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
#endif
+#ifdef CONFIG_HAVE_TCM
+ ARM_TCM
+#endif
+
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
@@ -237,64 +146,10 @@ SECTIONS
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
-#ifdef CONFIG_HAVE_TCM
- /*
- * We align everything to a page boundary so we can
- * free it after init has commenced and TCM contents have
- * been copied to its destination.
- */
- .tcm_start : {
- . = ALIGN(PAGE_SIZE);
- __tcm_start = .;
- __itcm_start = .;
- }
-
- /*
- * Link these to the ITCM RAM
- * Put VMA to the TCM address and LMA to the common RAM
- * and we'll upload the contents from RAM to TCM and free
- * the used RAM after that.
- */
- .text_itcm ITCM_OFFSET : AT(__itcm_start)
- {
- __sitcm_text = .;
- *(.tcm.text)
- *(.tcm.rodata)
- . = ALIGN(4);
- __eitcm_text = .;
- }
-
- /*
- * Reset the dot pointer, this is needed to create the
- * relative __dtcm_start below (to be used as extern in code).
- */
- . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
-
- .dtcm_start : {
- __dtcm_start = .;
- }
-
- /* TODO: add remainder of ITCM as well, that can be used for data! */
- .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
- {
- . = ALIGN(4);
- __sdtcm_data = .;
- *(.tcm.data)
- . = ALIGN(4);
- __edtcm_data = .;
- }
-
- /* Reset the dot pointer or the linker gets confused */
- . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
-
- /* End marker for freeing TCM copy in linked object */
- .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
- . = ALIGN(PAGE_SIZE);
- __tcm_end = .;
- }
-#endif
-
BSS_SECTION(0, 0, 0)
+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(PMSAv8_MINALIGN);
+#endif
_end = .;
STABS_DEBUG
diff --git a/arch/arm/kernel/vmlinux.lds.h b/arch/arm/kernel/vmlinux.lds.h
new file mode 100644
index 000000000000..8247bc15addc
--- /dev/null
+++ b/arch/arm/kernel/vmlinux.lds.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x) x
+#else
+#define ARM_CPU_DISCARD(x) x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
+#define ARM_EXIT_KEEP(x) x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x) x
+#endif
+
+#ifdef CONFIG_MMU
+#define ARM_MMU_KEEP(x) x
+#define ARM_MMU_DISCARD(x)
+#else
+#define ARM_MMU_KEEP(x)
+#define ARM_MMU_DISCARD(x) x
+#endif
+
+#define PROC_INFO \
+ . = ALIGN(4); \
+ __proc_info_begin = .; \
+ *(.proc.info.init) \
+ __proc_info_end = .;
+
+#define HYPERVISOR_TEXT \
+ __hyp_text_start = .; \
+ *(.hyp.text) \
+ __hyp_text_end = .;
+
+#define IDMAP_TEXT \
+ ALIGN_FUNCTION(); \
+ __idmap_text_start = .; \
+ *(.idmap.text) \
+ __idmap_text_end = .; \
+ . = ALIGN(PAGE_SIZE); \
+ __hyp_idmap_text_start = .; \
+ *(.hyp.idmap.text) \
+ __hyp_idmap_text_end = .;
+
+#define ARM_DISCARD \
+ *(.ARM.exidx.exit.text) \
+ *(.ARM.extab.exit.text) \
+ *(.ARM.exidx.text.exit) \
+ *(.ARM.extab.text.exit) \
+ ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) \
+ ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) \
+ ARM_EXIT_DISCARD(EXIT_TEXT) \
+ ARM_EXIT_DISCARD(EXIT_DATA) \
+ EXIT_CALL \
+ ARM_MMU_DISCARD(*(.text.fixup)) \
+ ARM_MMU_DISCARD(*(__ex_table)) \
+ *(.discard) \
+ *(.discard.*)
+
+#define ARM_TEXT \
+ IDMAP_TEXT \
+ __entry_text_start = .; \
+ *(.entry.text) \
+ __entry_text_end = .; \
+ IRQENTRY_TEXT \
+ SOFTIRQENTRY_TEXT \
+ TEXT_TEXT \
+ SCHED_TEXT \
+ CPUIDLE_TEXT \
+ LOCK_TEXT \
+ HYPERVISOR_TEXT \
+ KPROBES_TEXT \
+ *(.gnu.warning) \
+ *(.glue_7) \
+ *(.glue_7t) \
+ . = ALIGN(4); \
+ *(.got) /* Global offset table */ \
+ ARM_CPU_KEEP(PROC_INFO)
+
+/* Stack unwinding tables */
+#define ARM_UNWIND_SECTIONS \
+ . = ALIGN(8); \
+ .ARM.unwind_idx : { \
+ __start_unwind_idx = .; \
+ *(.ARM.exidx*) \
+ __stop_unwind_idx = .; \
+ } \
+ .ARM.unwind_tab : { \
+ __start_unwind_tab = .; \
+ *(.ARM.extab*) \
+ __stop_unwind_tab = .; \
+ }
+
+/*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+#define ARM_VECTORS \
+ __vectors_start = .; \
+ .vectors 0xffff0000 : AT(__vectors_start) { \
+ *(.vectors) \
+ } \
+ . = __vectors_start + SIZEOF(.vectors); \
+ __vectors_end = .; \
+ \
+ __stubs_start = .; \
+ .stubs ADDR(.vectors) + 0x1000 : AT(__stubs_start) { \
+ *(.stubs) \
+ } \
+ . = __stubs_start + SIZEOF(.stubs); \
+ __stubs_end = .; \
+ \
+ PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors));
+
+#define ARM_TCM \
+ __itcm_start = ALIGN(4); \
+ .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \
+ __sitcm_text = .; \
+ *(.tcm.text) \
+ *(.tcm.rodata) \
+ . = ALIGN(4); \
+ __eitcm_text = .; \
+ } \
+ . = __itcm_start + SIZEOF(.text_itcm); \
+ \
+ __dtcm_start = .; \
+ .data_dtcm DTCM_OFFSET : AT(__dtcm_start - LOAD_OFFSET) { \
+ __sdtcm_data = .; \
+ *(.tcm.data) \
+ . = ALIGN(4); \
+ __edtcm_data = .; \
+ } \
+ . = __dtcm_start + SIZEOF(.data_dtcm);