aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile9
-rw-r--r--arch/arm/kernel/asm-offsets.c15
-rw-r--r--arch/arm/kernel/crash_dump.c2
-rw-r--r--arch/arm/kernel/devtree.c2
-rw-r--r--arch/arm/kernel/entry-armv.S109
-rw-r--r--arch/arm/kernel/entry-common.S46
-rw-r--r--arch/arm/kernel/etm.c660
-rw-r--r--arch/arm/kernel/fiq.c19
-rw-r--r--arch/arm/kernel/head.S15
-rw-r--r--arch/arm/kernel/hw_breakpoint.c7
-rw-r--r--arch/arm/kernel/hyp-stub.S2
-rw-r--r--arch/arm/kernel/irq.c2
-rw-r--r--arch/arm/kernel/kprobes-common.c19
-rw-r--r--arch/arm/kernel/kprobes-thumb.c20
-rw-r--r--arch/arm/kernel/kprobes.c9
-rw-r--r--arch/arm/kernel/machine_kexec.c25
-rw-r--r--arch/arm/kernel/module.c65
-rw-r--r--arch/arm/kernel/perf_event.c41
-rw-r--r--arch/arm/kernel/perf_event_cpu.c181
-rw-r--r--arch/arm/kernel/perf_event_v7.c57
-rw-r--r--arch/arm/kernel/process.c53
-rw-r--r--arch/arm/kernel/psci.c49
-rw-r--r--arch/arm/kernel/psci_smp.c84
-rw-r--r--arch/arm/kernel/relocate_kernel.S8
-rw-r--r--arch/arm/kernel/sched_clock.c13
-rw-r--r--arch/arm/kernel/setup.c55
-rw-r--r--arch/arm/kernel/signal.c72
-rw-r--r--arch/arm/kernel/signal.h12
-rw-r--r--arch/arm/kernel/sigreturn_codes.S80
-rw-r--r--arch/arm/kernel/sleep.S33
-rw-r--r--arch/arm/kernel/smp.c32
-rw-r--r--arch/arm/kernel/smp_scu.c14
-rw-r--r--arch/arm/kernel/smp_tlb.c18
-rw-r--r--arch/arm/kernel/smp_twd.c26
-rw-r--r--arch/arm/kernel/stacktrace.c20
-rw-r--r--arch/arm/kernel/topology.c135
-rw-r--r--arch/arm/kernel/traps.c80
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
38 files changed, 1085 insertions, 1021 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5f3338eacad2..65160d5fe850 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -17,13 +17,13 @@ CFLAGS_REMOVE_return_address.o = -pg
obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
process.o ptrace.o return_address.o sched_clock.o \
- setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
+ setup.o signal.o sigreturn_codes.o \
+ stacktrace.o sys_arm.o time.o traps.o
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
-obj-$(CONFIG_OC_ETM) += etm.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
@@ -82,6 +82,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
-obj-$(CONFIG_ARM_PSCI) += psci.o
+ifeq ($(CONFIG_ARM_PSCI),y)
+obj-y += psci.o
+obj-$(CONFIG_SMP) += psci_smp.o
+endif
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index ee68cce6b48e..776d9186e9c1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -168,6 +168,7 @@ int main(void)
DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
+ DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
@@ -175,13 +176,13 @@ int main(void)
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 90c50d4b43f7..5d1286d51154 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -39,7 +39,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize)
return 0;
- vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE);
if (!vaddr)
return -ENOMEM;
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5859c8bc727c..a44e7d11ab02 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
}
if (!mdesc_best) {
const char *prop;
- long size;
+ int size;
early_print("\nError: unrecognized/unsupported "
"device tree compatible list:\n[ ");
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 582b405befc5..45a68d6bb2a3 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -192,6 +192,7 @@ __dabt_svc:
svc_entry
mov r2, sp
dabt_helper
+ THUMB( ldr r5, [sp, #S_PSR] ) @ potentially updated CPSR
svc_exit r5 @ return from exception
UNWIND(.fnend )
ENDPROC(__dabt_svc)
@@ -415,9 +416,8 @@ __und_usr:
bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4
1: ldrt r0, [r4]
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r0, r0 @ little endian instruction
-#endif
+ ARM_BE8(rev r0, r0) @ little endian instruction
+
@ r0 = 32-bit ARM instruction which caused the exception
@ r2 = PC value for the following instruction (:= regs->ARM_pc)
@ r4 = PC value for the faulting instruction
@@ -741,6 +741,18 @@ ENDPROC(__switch_to)
#endif
.endm
+ .macro kuser_pad, sym, size
+ .if (. - \sym) & 3
+ .rept 4 - (. - \sym) & 3
+ .byte 0
+ .endr
+ .endif
+ .rept (\size - (. - \sym)) / 4
+ .word 0xe7fddef1
+ .endr
+ .endm
+
+#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -831,18 +843,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration"
#endif
- /* pad to next slot */
- .rept (16 - (. - __kuser_cmpxchg64)/4)
- .word 0
- .endr
-
- .align 5
+ kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
- .align 5
+ kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
@@ -915,13 +922,14 @@ kuser_cmpxchg32_fixup:
#endif
- .align 5
+ kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
- .rep 4
+ kuser_pad __kuser_get_tls, 16
+ .rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
@@ -931,14 +939,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+#endif
+
THUMB( .thumb )
/*
* Vector stubs.
*
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's. Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not exceed
+ * a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -985,8 +995,17 @@ ENDPROC(vector_\name)
1:
.endm
- .globl __stubs_start
+ .section .stubs, "ax", %progbits
__stubs_start:
+ @ This must be the first word
+ .word vector_swi
+
+vector_rst:
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ b vector_und
+
/*
* Interrupt dispatcher
*/
@@ -1081,6 +1100,16 @@ __stubs_start:
.align 5
/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1093,45 +1122,19 @@ __stubs_start:
vector_fiq:
subs pc, lr, #4
-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
+ .globl vector_fiq_offset
+ .equ vector_fiq_offset, vector_fiq
-vector_addrexcptn:
- b vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
- .align 5
-
-.LCvswi:
- .word vector_swi
-
- .globl __stubs_end
-__stubs_end:
-
- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
-
- .globl __vectors_start
+ .section .vectors, "ax", %progbits
__vectors_start:
- ARM( swi SYS_ERROR0 )
- THUMB( svc #0 )
- THUMB( nop )
- W(b) vector_und + stubs_offset
- W(ldr) pc, .LCvswi + stubs_offset
- W(b) vector_pabt + stubs_offset
- W(b) vector_dabt + stubs_offset
- W(b) vector_addrexcptn + stubs_offset
- W(b) vector_irq + stubs_offset
- W(b) vector_fiq + stubs_offset
-
- .globl __vectors_end
-__vectors_end:
+ W(b) vector_rst
+ W(b) vector_und
+ W(ldr) pc, __vectors_start + 0x1000
+ W(b) vector_pabt
+ W(b) vector_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_irq
+ W(b) vector_fiq
.data
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc5bc0a97131..11d68917d3b1 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -362,6 +362,16 @@ ENTRY(vector_swi)
str r0, [sp, #S_OLD_R0] @ Save OLD_R0
zero_fp
+#ifdef CONFIG_ALIGNMENT_TRAP
+ ldr ip, __cr_alignment
+ ldr ip, [ip]
+ mcr p15, 0, ip, c1, c0 @ update control register
+#endif
+
+ enable_irq
+ ct_user_exit
+ get_thread_info tsk
+
/*
* Get the system call number.
*/
@@ -375,13 +385,11 @@ ENTRY(vector_swi)
#ifdef CONFIG_ARM_THUMB
tst r8, #PSR_T_BIT
movne r10, #0 @ no thumb OABI emulation
- ldreq r10, [lr, #-4] @ get SWI instruction
+ USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
#else
- ldr r10, [lr, #-4] @ get SWI instruction
-#endif
-#ifdef CONFIG_CPU_ENDIAN_BE8
- rev r10, r10 @ little endian instruction
+ USER( ldr r10, [lr, #-4] ) @ get SWI instruction
#endif
+ ARM_BE8(rev r10, r10) @ little endian instruction
#elif defined(CONFIG_AEABI)
@@ -392,22 +400,13 @@ ENTRY(vector_swi)
/* Legacy ABI only, possibly thumb mode. */
tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
- ldreq scno, [lr, #-4]
+ USER( ldreq scno, [lr, #-4] )
#else
/* Legacy ABI only. */
- ldr scno, [lr, #-4] @ get SWI instruction
-#endif
-
-#ifdef CONFIG_ALIGNMENT_TRAP
- ldr ip, __cr_alignment
- ldr ip, [ip]
- mcr p15, 0, ip, c1, c0 @ update control register
+ USER( ldr scno, [lr, #-4] ) @ get SWI instruction
#endif
- enable_irq
- ct_user_exit
- get_thread_info tsk
adr tbl, sys_call_table @ load syscall table pointer
#if defined(CONFIG_OABI_COMPAT)
@@ -442,6 +441,21 @@ local_restart:
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
bcs arm_syscall
b sys_ni_syscall @ not private func
+
+#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
+ /*
+ * We failed to handle a fault trying to access the page
+ * containing the swi instruction, but we're not really in a
+ * position to return -EFAULT. Instead, return back to the
+ * instruction and re-enter the user fault handling path trying
+ * to page it in. This will likely result in sending SEGV to the
+ * current task.
+ */
+9001:
+ sub lr, lr, #4
+ str lr, [sp, #S_PC]
+ b ret_fast_syscall
+#endif
ENDPROC(vector_swi)
/*
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
deleted file mode 100644
index 8ff0ecdc637f..000000000000
--- a/arch/arm/kernel/etm.c
+++ /dev/null
@@ -1,660 +0,0 @@
-/*
- * linux/arch/arm/kernel/etm.c
- *
- * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer.
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/sysrq.h>
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/amba/bus.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <asm/hardware/coresight.h>
-#include <asm/sections.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alexander Shishkin");
-
-/*
- * ETM tracer state
- */
-struct tracectx {
- unsigned int etb_bufsz;
- void __iomem *etb_regs;
- void __iomem *etm_regs;
- unsigned long flags;
- int ncmppairs;
- int etm_portsz;
- struct device *dev;
- struct clk *emu_clk;
- struct mutex mutex;
-};
-
-static struct tracectx tracer;
-
-static inline bool trace_isrunning(struct tracectx *t)
-{
- return !!(t->flags & TRACER_RUNNING);
-}
-
-static int etm_setup_address_range(struct tracectx *t, int n,
- unsigned long start, unsigned long end, int exclude, int data)
-{
- u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \
- ETMAAT_NOVALCMP;
-
- if (n < 1 || n > t->ncmppairs)
- return -EINVAL;
-
- /* comparators and ranges are numbered starting with 1 as opposed
- * to bits in a word */
- n--;
-
- if (data)
- flags |= ETMAAT_DLOADSTORE;
- else
- flags |= ETMAAT_IEXEC;
-
- /* first comparator for the range */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2));
- etm_writel(t, start, ETMR_COMP_VAL(n * 2));
-
- /* second comparator is right next to it */
- etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
- etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1));
-
- flags = exclude ? ETMTE_INCLEXCL : 0;
- etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL);
-
- return 0;
-}
-
-static int trace_start(struct tracectx *t)
-{
- u32 v;
- unsigned long timeout = TRACER_TIMEOUT;
-
- etb_unlock(t);
-
- etb_writel(t, 0, ETBR_FORMATTERCTRL);
- etb_writel(t, 1, ETBR_CTRL);
-
- etb_lock(t);
-
- /* configure etm */
- v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
-
- if (t->flags & TRACER_CYCLE_ACC)
- v |= ETMCTRL_CYCLEACCURATE;
-
- etm_unlock(t);
-
- etm_writel(t, v, ETMR_CTRL);
-
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
- return -EFAULT;
- }
-
- etm_setup_address_range(t, 1, (unsigned long)_stext,
- (unsigned long)_etext, 0, 0);
- etm_writel(t, 0, ETMR_TRACEENCTRL2);
- etm_writel(t, 0, ETMR_TRACESSCTRL);
- etm_writel(t, 0x6f, ETMR_TRACEENEVT);
-
- v &= ~ETMCTRL_PROGRAM;
- v |= ETMCTRL_PORTSEL;
-
- etm_writel(t, v, ETMR_CTRL);
-
- timeout = TRACER_TIMEOUT;
- while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
- etm_lock(t);
- return -EFAULT;
- }
-
- etm_lock(t);
-
- t->flags |= TRACER_RUNNING;
-
- return 0;
-}
-
-static int trace_stop(struct tracectx *t)
-{
- unsigned long timeout = TRACER_TIMEOUT;
-
- etm_unlock(t);
-
- etm_writel(t, 0x440, ETMR_CTRL);
- while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
- etm_lock(t);
- return -EFAULT;
- }
-
- etm_lock(t);
-
- etb_unlock(t);
- etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
-
- timeout = TRACER_TIMEOUT;
- while (etb_readl(t, ETBR_FORMATTERCTRL) &
- ETBFF_MANUAL_FLUSH && --timeout)
- ;
- if (!timeout) {
- dev_dbg(t->dev, "Waiting for formatter flush to commence "
- "timed out\n");
- etb_lock(t);
- return -EFAULT;
- }
-
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_lock(t);
-
- t->flags &= ~TRACER_RUNNING;
-
- return 0;
-}
-
-static int etb_getdatalen(struct tracectx *t)
-{
- u32 v;
- int rp, wp;
-
- v = etb_readl(t, ETBR_STATUS);
-
- if (v & 1)
- return t->etb_bufsz;
-
- rp = etb_readl(t, ETBR_READADDR);
- wp = etb_readl(t, ETBR_WRITEADDR);
-
- if (rp > wp) {
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
- return 0;
- }
-
- return wp - rp;
-}
-
-/* sysrq+v will always stop the running trace and leave it at that */
-static void etm_dump(void)
-{
- struct tracectx *t = &tracer;
- u32 first = 0;
- int length;
-
- if (!t->etb_regs) {
- printk(KERN_INFO "No tracing hardware found\n");
- return;
- }
-
- if (trace_isrunning(t))
- trace_stop(t);
-
- etb_unlock(t);
-
- length = etb_getdatalen(t);
-
- if (length == t->etb_bufsz)
- first = etb_readl(t, ETBR_WRITEADDR);
-
- etb_writel(t, first, ETBR_READADDR);
-
- printk(KERN_INFO "Trace buffer contents length: %d\n", length);
- printk(KERN_INFO "--- ETB buffer begin ---\n");
- for (; length; length--)
- printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
- printk(KERN_INFO "\n--- ETB buffer end ---\n");
-
- /* deassert the overflow bit */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_WRITEADDR);
-
- etb_lock(t);
-}
-
-static void sysrq_etm_dump(int key)
-{
- dev_dbg(tracer.dev, "Dumping ETB buffer\n");
- etm_dump();
-}
-
-static struct sysrq_key_op sysrq_etm_op = {
- .handler = sysrq_etm_dump,
- .help_msg = "etm-buffer-dump(v)",
- .action_msg = "etm",
-};
-
-static int etb_open(struct inode *inode, struct file *file)
-{
- if (!tracer.etb_regs)
- return -ENODEV;
-
- file->private_data = &tracer;
-
- return nonseekable_open(inode, file);
-}
-
-static ssize_t etb_read(struct file *file, char __user *data,
- size_t len, loff_t *ppos)
-{
- int total, i;
- long length;
- struct tracectx *t = file->private_data;
- u32 first = 0;
- u32 *buf;
-
- mutex_lock(&t->mutex);
-
- if (trace_isrunning(t)) {
- length = 0;
- goto out;
- }
-
- etb_unlock(t);
-
- total = etb_getdatalen(t);
- if (total == t->etb_bufsz)
- first = etb_readl(t, ETBR_WRITEADDR);
-
- etb_writel(t, first, ETBR_READADDR);
-
- length = min(total * 4, (int)len);
- buf = vmalloc(length);
-
- dev_dbg(t->dev, "ETB buffer length: %d\n", total);
- dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
- for (i = 0; i < length / 4; i++)
- buf[i] = etb_readl(t, ETBR_READMEM);
-
- /* the only way to deassert overflow bit in ETB status is this */
- etb_writel(t, 1, ETBR_CTRL);
- etb_writel(t, 0, ETBR_CTRL);
-
- etb_writel(t, 0, ETBR_WRITEADDR);
- etb_writel(t, 0, ETBR_READADDR);
- etb_writel(t, 0, ETBR_TRIGGERCOUNT);
-
- etb_lock(t);
-
- length -= copy_to_user(data, buf, length);
- vfree(buf);
-
-out:
- mutex_unlock(&t->mutex);
-
- return length;
-}
-
-static int etb_release(struct inode *inode, struct file *file)
-{
- /* there's nothing to do here, actually */
- return 0;
-}
-
-static const struct file_operations etb_fops = {
- .owner = THIS_MODULE,
- .read = etb_read,
- .open = etb_open,
- .release = etb_release,
- .llseek = no_llseek,
-};
-
-static struct miscdevice etb_miscdev = {
- .name = "tracebuf",
- .minor = 0,
- .fops = &etb_fops,
-};
-
-static int etb_probe(struct amba_device *dev, const struct amba_id *id)
-{
- struct tracectx *t = &tracer;
- int ret = 0;
-
- ret = amba_request_regions(dev, NULL);
- if (ret)
- goto out;
-
- t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
- if (!t->etb_regs) {
- ret = -ENOMEM;
- goto out_release;
- }
-
- amba_set_drvdata(dev, t);
-
- etb_miscdev.parent = &dev->dev;
-
- ret = misc_register(&etb_miscdev);
- if (ret)
- goto out_unmap;
-
- t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
- if (IS_ERR(t->emu_clk)) {
- dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
- return -EFAULT;
- }
-
- clk_enable(t->emu_clk);
-
- etb_unlock(t);
- t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
- dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
-
- /* make sure trace capture is disabled */
- etb_writel(t, 0, ETBR_CTRL);
- etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
- etb_lock(t);
-
- dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
-
-out:
- return ret;
-
-out_unmap:
- amba_set_drvdata(dev, NULL);
- iounmap(t->etb_regs);
-
-out_release:
- amba_release_regions(dev);
-
- return ret;
-}
-
-static int etb_remove(struct amba_device *dev)
-{
- struct tracectx *t = amba_get_drvdata(dev);
-
- amba_set_drvdata(dev, NULL);
-
- iounmap(t->etb_regs);
- t->etb_regs = NULL;
-
- clk_disable(t->emu_clk);
- clk_put(t->emu_clk);
-
- amba_release_regions(dev);
-
- return 0;
-}
-
-static struct amba_id etb_ids[] = {
- {
- .id = 0x0003b907,
- .mask = 0x0007ffff,
- },
- { 0, 0 },
-};
-
-static struct amba_driver etb_driver = {
- .drv = {
- .name = "etb",
- .owner = THIS_MODULE,
- },
- .probe = etb_probe,
- .remove = etb_remove,
- .id_table = etb_ids,
-};
-
-/* use a sysfs file "trace_running" to start/stop tracing */
-static ssize_t trace_running_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%x\n", trace_isrunning(&tracer));
-}
-
-static ssize_t trace_running_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned int value;
- int ret;
-
- if (sscanf(buf, "%u", &value) != 1)
- return -EINVAL;
-
- mutex_lock(&tracer.mutex);
- ret = value ? trace_start(&tracer) : trace_stop(&tracer);
- mutex_unlock(&tracer.mutex);
-
- return ret ? : n;
-}
-
-static struct kobj_attribute trace_running_attr =
- __ATTR(trace_running, 0644, trace_running_show, trace_running_store);
-
-static ssize_t trace_info_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
- int datalen;
-
- etb_unlock(&tracer);
- datalen = etb_getdatalen(&tracer);
- etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
- etb_ra = etb_readl(&tracer, ETBR_READADDR);
- etb_st = etb_readl(&tracer, ETBR_STATUS);
- etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
- etb_lock(&tracer);
-
- etm_unlock(&tracer);
- etm_ctrl = etm_readl(&tracer, ETMR_CTRL);
- etm_st = etm_readl(&tracer, ETMR_STATUS);
- etm_lock(&tracer);
-
- return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
- "ETBR_WRITEADDR:\t%08x\n"
- "ETBR_READADDR:\t%08x\n"
- "ETBR_STATUS:\t%08x\n"
- "ETBR_FORMATTERCTRL:\t%08x\n"
- "ETMR_CTRL:\t%08x\n"
- "ETMR_STATUS:\t%08x\n",
- datalen,
- tracer.ncmppairs,
- etb_wa,
- etb_ra,
- etb_st,
- etb_fc,
- etm_ctrl,
- etm_st
- );
-}
-
-static struct kobj_attribute trace_info_attr =
- __ATTR(trace_info, 0444, trace_info_show, NULL);
-
-static ssize_t trace_mode_show(struct kobject *kobj,
- struct kobj_attribute *attr,
- char *buf)
-{
- return sprintf(buf, "%d %d\n",
- !!(tracer.flags & TRACER_CYCLE_ACC),
- tracer.etm_portsz);
-}
-
-static ssize_t trace_mode_store(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t n)
-{
- unsigned int cycacc, portsz;
-
- if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2)
- return -EINVAL;
-
- mutex_lock(&tracer.mutex);
- if (cycacc)
- tracer.flags |= TRACER_CYCLE_ACC;
- else
- tracer.flags &= ~TRACER_CYCLE_ACC;
-
- tracer.etm_portsz = portsz & 0x0f;
- mutex_unlock(&tracer.mutex);
-
- return n;
-}
-
-static struct kobj_attribute trace_mode_attr =
- __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
-
-static int etm_probe(struct amba_device *dev, const struct amba_id *id)
-{
- struct tracectx *t = &tracer;
- int ret = 0;
-
- if (t->etm_regs) {
- dev_dbg(&dev->dev, "ETM already initialized\n");
- ret = -EBUSY;
- goto out;
- }
-
- ret = amba_request_regions(dev, NULL);
- if (ret)
- goto out;
-
- t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
- if (!t->etm_regs) {
- ret = -ENOMEM;
- goto out_release;
- }
-
- amba_set_drvdata(dev, t);
-
- mutex_init(&t->mutex);
- t->dev = &dev->dev;
- t->flags = TRACER_CYCLE_ACC;
- t->etm_portsz = 1;
-
- etm_unlock(t);
- (void)etm_readl(t, ETMMR_PDSR);
- /* dummy first read */
- (void)etm_readl(&tracer, ETMMR_OSSRR);
-
- t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf;
- etm_writel(t, 0x440, ETMR_CTRL);
- etm_lock(t);
-
- ret = sysfs_create_file(&dev->dev.kobj,
- &trace_running_attr.attr);
- if (ret)
- goto out_unmap;
-
- /* failing to create any of these two is not fatal */
- ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr);
- if (ret)
- dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n");
-
- ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr);
- if (ret)
- dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
-
- dev_dbg(t->dev, "ETM AMBA driver initialized.\n");
-
-out:
- return ret;
-
-out_unmap:
- amba_set_drvdata(dev, NULL);
- iounmap(t->etm_regs);
-
-out_release:
- amba_release_regions(dev);
-
- return ret;
-}
-
-static int etm_remove(struct amba_device *dev)
-{
- struct tracectx *t = amba_get_drvdata(dev);
-
- amba_set_drvdata(dev, NULL);
-
- iounmap(t->etm_regs);
- t->etm_regs = NULL;
-
- amba_release_regions(dev);
-
- sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
- sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-
- return 0;
-}
-
-static struct amba_id etm_ids[] = {
- {
- .id = 0x0003b921,
- .mask = 0x0007ffff,
- },
- { 0, 0 },
-};
-
-static struct amba_driver etm_driver = {
- .drv = {
- .name = "etm",
- .owner = THIS_MODULE,
- },
- .probe = etm_probe,
- .remove = etm_remove,
- .id_table = etm_ids,
-};
-
-static int __init etm_init(void)
-{
- int retval;
-
- retval = amba_driver_register(&etb_driver);
- if (retval) {
- printk(KERN_ERR "Failed to register etb\n");
- return retval;
- }
-
- retval = amba_driver_register(&etm_driver);
- if (retval) {
- amba_driver_unregister(&etb_driver);
- printk(KERN_ERR "Failed to probe etm\n");
- return retval;
- }
-
- /* not being able to install this handler is not fatal */
- (void)register_sysrq_key('v', &sysrq_etm_op);
-
- return 0;
-}
-
-device_initcall(etm_init);
-
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712f..25442f451148 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
#include <asm/irq.h>
#include <asm/traps.h>
+#define FIQ_OFFSET ({ \
+ extern void *vector_fiq_offset; \
+ (unsigned)&vector_fiq_offset; \
+ })
+
static unsigned long no_fiq_insn;
/* Default reacquire function
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
- memcpy((void *)0xffff001c, start, length);
+ void *base = (void *)0xffff0000;
#else
- memcpy(vectors_page + 0x1c, start, length);
+ void *base = vectors_page;
#endif
- flush_icache_range(0xffff001c, 0xffff001c + length);
+ unsigned offset = FIQ_OFFSET;
+
+ memcpy(base + offset, start, length);
+ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
if (!vectors_high())
- flush_icache_range(0x1c, 0x1c + length);
+ flush_icache_range(offset, offset + length);
}
int claim_fiq(struct fiq_handler *f)
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start)
{
- no_fiq_insn = *(unsigned long *)0xffff001c;
+ unsigned offset = FIQ_OFFSET;
+ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start;
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 8bac553fe213..2d1bf8c6353d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -77,6 +77,7 @@
__HEAD
ENTRY(stext)
+ ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
@@ -109,7 +110,7 @@ ENTRY(stext)
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
- ldr r8, =PHYS_OFFSET @ always constant in this case
+ ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
@@ -342,7 +343,6 @@ __turn_mmu_on_loc:
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
- __CPUINIT
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
@@ -351,6 +351,9 @@ ENTRY(secondary_startup)
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
+
+ ARM_BE8(setend be) @ ensure we are in BE8 mode
+
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
@@ -584,8 +587,10 @@ __fixup_a_pv_table:
b 2f
1: add r7, r3
ldrh ip, [r7, #2]
+ARM_BE8(rev16 ip, ip)
and ip, 0x8f00
orr ip, r6 @ mask in offset bits 31-24
+ARM_BE8(rev16 ip, ip)
strh ip, [r7, #2]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
@@ -594,8 +599,14 @@ __fixup_a_pv_table:
#else
b 2f
1: ldr ip, [r7, r3]
+#ifdef CONFIG_CPU_ENDIAN_BE8
+ @ in BE8, we load data in BE, but instructions still in LE
+ bic ip, ip, #0xff000000
+ orr ip, ip, r6, lsl#24
+#else
bic ip, ip, #0x000000ff
orr ip, ip, r6 @ mask in offset bits 31-24
+#endif
str ip, [r7, r3]
2: cmp r4, r5
ldrcc r7, [r4], #4 @ use branch for delay slot
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..7eee611b6ee5 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -29,6 +29,7 @@
#include <linux/hw_breakpoint.h>
#include <linux/smp.h>
#include <linux/cpu_pm.h>
+#include <linux/coresight.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
@@ -36,7 +37,6 @@
#include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
#include <asm/traps.h>
-#include <asm/hardware/coresight.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -975,7 +975,7 @@ static void reset_ctrl_regs(void *unused)
* Unconditionally clear the OS lock by writing a value
* other than CS_LAR_KEY to the access register.
*/
- ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
+ ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
isb();
/*
@@ -1049,7 +1049,8 @@ static struct notifier_block dbg_cpu_pm_nb = {
static void __init pm_init(void)
{
- cpu_pm_register_notifier(&dbg_cpu_pm_nb);
+ if (has_ossr)
+ cpu_pm_register_notifier(&dbg_cpu_pm_nb);
}
#else
static inline void pm_init(void)
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 1315c4ccfa56..dbe21107945a 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -153,6 +153,8 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
+ mov r7, #0
+ mcrr p15, 4, r7, r7, c14 @ CNTVOFF
1:
#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 9723d17b8f38..1e782bdeee49 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -163,7 +163,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
index 18a76282970e..380c20fb9c85 100644
--- a/arch/arm/kernel/kprobes-common.c
+++ b/arch/arm/kernel/kprobes-common.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <asm/system_info.h>
+#include <asm/opcodes.h>
#include "kprobes.h"
@@ -305,7 +306,8 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
if (handler) {
/* We can emulate the instruction in (possibly) modified form */
- asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
+ asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
+ (rn << 16) | reglist);
asi->insn_handler = handler;
return INSN_GOOD;
}
@@ -334,13 +336,14 @@ prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
#ifdef CONFIG_THUMB2_KERNEL
if (thumb) {
u16 *thumb_insn = (u16 *)asi->insn;
- thumb_insn[1] = 0x4770; /* Thumb bx lr */
- thumb_insn[2] = 0x4770; /* Thumb bx lr */
+ /* Thumb bx lr */
+ thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
+ thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
return insn;
}
- asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
+ asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
#else
- asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
+ asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
#endif
/* Make an ARM instruction unconditional */
if (insn < 0xe0000000)
@@ -360,12 +363,12 @@ set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
if (thumb) {
u16 *ip = (u16 *)asi->insn;
if (is_wide_instruction(insn))
- *ip++ = insn >> 16;
- *ip++ = insn;
+ *ip++ = __opcode_to_mem_thumb16(insn >> 16);
+ *ip++ = __opcode_to_mem_thumb16(insn);
return;
}
#endif
- asi->insn[0] = insn;
+ asi->insn[0] = __opcode_to_mem_arm(insn);
}
/*
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
index 6123daf397a7..b82e798983c4 100644
--- a/arch/arm/kernel/kprobes-thumb.c
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -163,9 +163,9 @@ t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
/* Fixup modified instruction to have halfwords in correct order...*/
- insn = asi->insn[0];
- ((u16 *)asi->insn)[0] = insn >> 16;
- ((u16 *)asi->insn)[1] = insn & 0xffff;
+ insn = __mem_to_opcode_arm(asi->insn[0]);
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
return ret;
}
@@ -1153,7 +1153,7 @@ t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
{
insn &= ~0x00ff;
insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
- ((u16 *)asi->insn)[0] = insn;
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
asi->insn_handler = t16_emulate_hiregs;
return INSN_GOOD;
}
@@ -1182,8 +1182,10 @@ t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
* and call it with R9=SP and LR in the register list represented
* by R8.
*/
- ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */
- ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
+ /* 1st half STMDB R9!,{} */
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
+ /* 2nd half (register list) */
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
asi->insn_handler = t16_emulate_push;
return INSN_GOOD;
}
@@ -1232,8 +1234,10 @@ t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
* and call it with R9=SP and PC in the register list represented
* by R8.
*/
- ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */
- ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
+ /* 1st half LDMIA R9!,{} */
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
+ /* 2nd half (register list) */
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
: t16_emulate_pop_nopc;
return INSN_GOOD;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f34003f..1c6ece51781c 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -26,6 +26,7 @@
#include <linux/stop_machine.h>
#include <linux/stringify.h>
#include <asm/traps.h>
+#include <asm/opcodes.h>
#include <asm/cacheflush.h>
#include "kprobes.h"
@@ -62,10 +63,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
#ifdef CONFIG_THUMB2_KERNEL
thumb = true;
addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
- insn = ((u16 *)addr)[0];
+ insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
if (is_wide_instruction(insn)) {
- insn <<= 16;
- insn |= ((u16 *)addr)[1];
+ u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
+ insn = __opcode_thumb32_compose(insn, inst2);
decode_insn = thumb32_kprobe_decode_insn;
} else
decode_insn = thumb16_kprobe_decode_insn;
@@ -73,7 +74,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
thumb = false;
if (addr & 0x3)
return -EINVAL;
- insn = *p->addr;
+ insn = __mem_to_opcode_arm(*p->addr);
decode_insn = arm_kprobe_decode_insn;
#endif
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c446bf..70ae735dec53 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -14,10 +14,11 @@
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/system_misc.h>
-extern const unsigned char relocate_new_kernel[];
+extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
extern unsigned long kexec_start_address;
@@ -73,6 +74,7 @@ void machine_crash_nonpanic_core(void *unused)
crash_save_cpu(&regs, smp_processor_id());
flush_cache_all();
+ set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
while (1)
cpu_relax();
@@ -132,6 +134,8 @@ void machine_kexec(struct kimage *image)
{
unsigned long page_list;
unsigned long reboot_code_buffer_phys;
+ unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
+ unsigned long reboot_entry_phys;
void *reboot_code_buffer;
if (num_online_cpus() > 1) {
@@ -155,16 +159,23 @@ void machine_kexec(struct kimage *image)
/* copy our kernel relocation code to the control code page */
- memcpy(reboot_code_buffer,
- relocate_new_kernel, relocate_new_kernel_size);
+ reboot_entry = fncpy(reboot_code_buffer,
+ reboot_entry,
+ relocate_new_kernel_size);
+ reboot_entry_phys = (unsigned long)reboot_entry +
+ (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
-
- flush_icache_range((unsigned long) reboot_code_buffer,
- (unsigned long) reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
if (kexec_reinit)
kexec_reinit();
- soft_restart(reboot_code_buffer_phys);
+ soft_restart(reboot_entry_phys);
+}
+
+void arch_crash_save_vmcoreinfo(void)
+{
+#ifdef CONFIG_ARM_LPAE
+ VMCOREINFO_CONFIG(ARM_LPAE);
+#endif
}
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 1e9be5d25e56..1705ee80d097 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -24,6 +24,7 @@
#include <asm/sections.h>
#include <asm/smp_plat.h>
#include <asm/unwind.h>
+#include <asm/opcodes.h>
#ifdef CONFIG_XIP_KERNEL
/*
@@ -60,6 +61,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
Elf32_Sym *sym;
const char *symname;
s32 offset;
+ u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
u32 upper, lower, sign, j1, j2;
#endif
@@ -95,7 +97,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
- offset = (*(u32 *)loc & 0x00ffffff) << 2;
+ offset = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = (offset & 0x00ffffff) << 2;
if (offset & 0x02000000)
offset -= 0x04000000;
@@ -111,9 +114,10 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
}
offset >>= 2;
+ offset &= 0x00ffffff;
- *(u32 *)loc &= 0xff000000;
- *(u32 *)loc |= offset & 0x00ffffff;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xff000000);
+ *(u32 *)loc |= __opcode_to_mem_arm(offset);
break;
case R_ARM_V4BX:
@@ -121,8 +125,8 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
* other bits to re-code instruction as
* MOV PC,Rm.
*/
- *(u32 *)loc &= 0xf000000f;
- *(u32 *)loc |= 0x01a0f000;
+ *(u32 *)loc &= __opcode_to_mem_arm(0xf000000f);
+ *(u32 *)loc |= __opcode_to_mem_arm(0x01a0f000);
break;
case R_ARM_PREL31:
@@ -132,7 +136,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVT_ABS:
- offset = *(u32 *)loc;
+ offset = tmp = __mem_to_opcode_arm(*(u32 *)loc);
offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
offset = (offset ^ 0x8000) - 0x8000;
@@ -140,16 +144,18 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
offset >>= 16;
- *(u32 *)loc &= 0xfff0f000;
- *(u32 *)loc |= ((offset & 0xf000) << 4) |
- (offset & 0x0fff);
+ tmp &= 0xfff0f000;
+ tmp |= ((offset & 0xf000) << 4) |
+ (offset & 0x0fff);
+
+ *(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* 25 bit signed address range (Thumb-2 BL and B.W
@@ -198,17 +204,20 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
sign = (offset >> 24) & 1;
j1 = sign ^ (~(offset >> 23) & 1);
j2 = sign ^ (~(offset >> 22) & 1);
- *(u16 *)loc = (u16)((upper & 0xf800) | (sign << 10) |
+ upper = (u16)((upper & 0xf800) | (sign << 10) |
((offset >> 12) & 0x03ff));
- *(u16 *)(loc + 2) = (u16)((lower & 0xd000) |
- (j1 << 13) | (j2 << 11) |
- ((offset >> 1) & 0x07ff));
+ lower = (u16)((lower & 0xd000) |
+ (j1 << 13) | (j2 << 11) |
+ ((offset >> 1) & 0x07ff));
+
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
- upper = *(u16 *)loc;
- lower = *(u16 *)(loc + 2);
+ upper = __mem_to_opcode_thumb16(*(u16 *)loc);
+ lower = __mem_to_opcode_thumb16(*(u16 *)(loc + 2));
/*
* MOVT/MOVW instructions encoding in Thumb-2:
@@ -229,12 +238,14 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
offset >>= 16;
- *(u16 *)loc = (u16)((upper & 0xfbf0) |
- ((offset & 0xf000) >> 12) |
- ((offset & 0x0800) >> 1));
- *(u16 *)(loc + 2) = (u16)((lower & 0x8f00) |
- ((offset & 0x0700) << 4) |
- (offset & 0x00ff));
+ upper = (u16)((upper & 0xfbf0) |
+ ((offset & 0xf000) >> 12) |
+ ((offset & 0x0800) >> 1));
+ lower = (u16)((lower & 0x8f00) |
+ ((offset & 0x0700) << 4) |
+ (offset & 0x00ff));
+ *(u16 *)loc = __opcode_to_mem_thumb16(upper);
+ *(u16 *)(loc + 2) = __opcode_to_mem_thumb16(lower);
break;
#endif
@@ -296,6 +307,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[ARM_SEC_EXIT].unw_sec = s;
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
maps[ARM_SEC_DEVEXIT].unw_sec = s;
+ else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
+ maps[ARM_SEC_UNLIKELY].unw_sec = s;
+ else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
+ maps[ARM_SEC_HOT].unw_sec = s;
else if (strcmp(".init.text", secname) == 0)
maps[ARM_SEC_INIT].txt_sec = s;
else if (strcmp(".devinit.text", secname) == 0)
@@ -306,6 +321,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[ARM_SEC_EXIT].txt_sec = s;
else if (strcmp(".devexit.text", secname) == 0)
maps[ARM_SEC_DEVEXIT].txt_sec = s;
+ else if (strcmp(".text.unlikely", secname) == 0)
+ maps[ARM_SEC_UNLIKELY].txt_sec = s;
+ else if (strcmp(".text.hot", secname) == 0)
+ maps[ARM_SEC_HOT].txt_sec = s;
}
for (i = 0; i < ARM_SEC_MAX; i++)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8c3094d0f7b7..b41749fe56dc 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,6 +12,7 @@
*/
#define pr_fmt(fmt) "hw perfevents: " fmt
+#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -53,7 +54,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -ENOENT;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -81,6 +87,9 @@ armpmu_map_event(struct perf_event *event,
return armpmu_map_cache_event(cache_map, config);
case PERF_TYPE_RAW:
return armpmu_map_raw_event(raw_event_mask, config);
+ default:
+ if (event->attr.type >= PERF_TYPE_MAX)
+ return armpmu_map_raw_event(raw_event_mask, config);
}
return -ENOENT;
@@ -158,6 +167,8 @@ armpmu_stop(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to update the counter, so ignore
* PERF_EF_UPDATE, see comments in armpmu_start().
@@ -174,6 +185,8 @@ static void armpmu_start(struct perf_event *event, int flags)
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct hw_perf_event *hwc = &event->hw;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
/*
* ARM pmu always has to reprogram the period, so ignore
* PERF_EF_RELOAD, see the comment below.
@@ -201,6 +214,9 @@ armpmu_del(struct perf_event *event, int flags)
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return;
+
armpmu_stop(event, PERF_EF_UPDATE);
hw_events->events[idx] = NULL;
clear_bit(idx, hw_events->used_mask);
@@ -217,6 +233,10 @@ armpmu_add(struct perf_event *event, int flags)
int idx;
int err = 0;
+ /* An event following a process won't be stopped earlier */
+ if (!cpumask_test_cpu(smp_processor_id(), &armpmu->valid_cpus))
+ return 0;
+
perf_pmu_disable(event->pmu);
/* If we don't have a space for the counter then finish early. */
@@ -253,6 +273,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
@@ -295,11 +318,18 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
struct arm_pmu *armpmu = (struct arm_pmu *) dev;
struct platform_device *plat_device = armpmu->plat_device;
struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
+ int ret;
+ u64 start_clock, finish_clock;
+ start_clock = sched_clock();
if (plat && plat->handle_irq)
- return plat->handle_irq(irq, dev, armpmu->handle_irq);
+ ret = plat->handle_irq(irq, dev, armpmu->handle_irq);
else
- return armpmu->handle_irq(irq, dev);
+ ret = armpmu->handle_irq(irq, dev);
+ finish_clock = sched_clock();
+
+ perf_sample_event_took(finish_clock - start_clock);
+ return ret;
}
static void
@@ -416,6 +446,10 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ if (event->cpu != -1 &&
+ !cpumask_test_cpu(event->cpu, &armpmu->valid_cpus))
+ return -ENOENT;
+
/* does not support taken branch sampling */
if (has_branch_stack(event))
return -EOPNOTSUPP;
@@ -569,6 +603,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
+ perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..e0665b871f5b 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -19,6 +19,7 @@
#define pr_fmt(fmt) "CPU PMU: " fmt
#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/of.h>
@@ -31,33 +32,36 @@
#include <asm/pmu.h>
/* Set at runtime when we know what CPU type we are. */
-static struct arm_pmu *cpu_pmu;
+static DEFINE_PER_CPU(struct arm_pmu *, cpu_pmu);
static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
+static DEFINE_PER_CPU(struct cpupmu_regs, cpu_pmu_regs);
+
/*
* Despite the names, these two functions are CPU-specific and are used
* by the OProfile/perf code.
*/
const char *perf_pmu_name(void)
{
- if (!cpu_pmu)
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
+ if (!pmu)
return NULL;
- return cpu_pmu->name;
+ return pmu->name;
}
EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
- int max_events = 0;
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, 0);
- if (cpu_pmu != NULL)
- max_events = cpu_pmu->num_events;
+ if (!pmu)
+ return 0;
- return max_events;
+ return pmu->num_events;
}
EXPORT_SYMBOL_GPL(perf_num_counters);
@@ -75,11 +79,13 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
irqs = min(pmu_device->num_resources, num_possible_cpus());
for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
+ if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
@@ -91,6 +97,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
{
int i, err, irq, irqs;
struct platform_device *pmu_device = cpu_pmu->plat_device;
+ int cpu = -1;
if (!pmu_device)
return -ENODEV;
@@ -103,6 +110,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
for (i = 0; i < irqs; ++i) {
err = 0;
+ cpu = cpumask_next(cpu, &cpu_pmu->valid_cpus);
irq = platform_get_irq(pmu_device, i);
if (irq < 0)
continue;
@@ -112,7 +120,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
* assume that we're running on a uniprocessor machine and
* continue. Otherwise, continue without this interrupt.
*/
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) {
pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
irq, i);
continue;
@@ -126,7 +134,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return err;
}
- cpumask_set_cpu(i, &cpu_pmu->active_irqs);
+ cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
}
return 0;
@@ -135,7 +143,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_cpu_mask(cpu, cpu_pmu->valid_cpus) {
struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
events->events = per_cpu(hw_events, cpu);
events->used_mask = per_cpu(used_mask, cpu);
@@ -148,7 +156,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
/* Ensure the PMU has sane values out of reset. */
if (cpu_pmu->reset)
- on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);
+ on_each_cpu_mask(&cpu_pmu->valid_cpus, cpu_pmu->reset, cpu_pmu, 1);
}
/*
@@ -160,21 +168,46 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, (long)hcpu);
+
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
- if (cpu_pmu && cpu_pmu->reset)
- cpu_pmu->reset(cpu_pmu);
+ if (pmu && pmu->reset)
+ pmu->reset(pmu);
else
return NOTIFY_DONE;
return NOTIFY_OK;
}
+static int cpu_pmu_pm_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ int cpu = smp_processor_id();
+ struct arm_pmu *pmu = per_cpu(cpu_pmu, cpu);
+ struct cpupmu_regs *pmuregs = &per_cpu(cpu_pmu_regs, cpu);
+
+ if (!pmu)
+ return NOTIFY_DONE;
+
+ if (action == CPU_PM_ENTER && pmu->save_regs) {
+ pmu->save_regs(pmu, pmuregs);
+ } else if (action == CPU_PM_EXIT && pmu->restore_regs) {
+ pmu->restore_regs(pmu, pmuregs);
+ }
+
+ return NOTIFY_OK;
+}
+
static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
.notifier_call = cpu_pmu_notify,
};
+static struct notifier_block __cpuinitdata cpu_pmu_pm_notifier = {
+ .notifier_call = cpu_pmu_pm_notify,
+};
+
/*
* PMU platform driver and devicetree bindings.
*/
@@ -201,51 +234,44 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
static int probe_current_pmu(struct arm_pmu *pmu)
{
int cpu = get_cpu();
- unsigned long implementor = read_cpuid_implementor();
- unsigned long part_number = read_cpuid_part_number();
int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu);
+ switch (read_cpuid_part()) {
/* ARM Ltd CPUs. */
- if (implementor == ARM_CPU_IMP_ARM) {
- switch (part_number) {
- case ARM_CPU_PART_ARM1136:
- case ARM_CPU_PART_ARM1156:
- case ARM_CPU_PART_ARM1176:
- ret = armv6pmu_init(pmu);
- break;
- case ARM_CPU_PART_ARM11MPCORE:
- ret = armv6mpcore_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A8:
- ret = armv7_a8_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A9:
- ret = armv7_a9_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A5:
- ret = armv7_a5_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A15:
- ret = armv7_a15_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A7:
- ret = armv7_a7_pmu_init(pmu);
- break;
- }
- /* Intel CPUs [xscale]. */
- } else if (implementor == ARM_CPU_IMP_INTEL) {
- switch (xscale_cpu_arch_version()) {
- case ARM_CPU_XSCALE_ARCH_V1:
- ret = xscale1pmu_init(pmu);
- break;
- case ARM_CPU_XSCALE_ARCH_V2:
- ret = xscale2pmu_init(pmu);
- break;
+ case ARM_CPU_PART_ARM1136:
+ case ARM_CPU_PART_ARM1156:
+ case ARM_CPU_PART_ARM1176:
+ ret = armv6pmu_init(pmu);
+ break;
+ case ARM_CPU_PART_ARM11MPCORE:
+ ret = armv6mpcore_pmu_init(pmu);
+ break;
+ case ARM_CPU_PART_CORTEX_A8:
+ ret = armv7_a8_pmu_init(pmu);
+ break;
+ case ARM_CPU_PART_CORTEX_A9:
+ ret = armv7_a9_pmu_init(pmu);
+ break;
+
+ default:
+ if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
+ switch (xscale_cpu_arch_version()) {
+ case ARM_CPU_XSCALE_ARCH_V1:
+ ret = xscale1pmu_init(pmu);
+ break;
+ case ARM_CPU_XSCALE_ARCH_V2:
+ ret = xscale2pmu_init(pmu);
+ break;
+ }
}
+ break;
}
+ /* assume PMU support all the CPUs in this case */
+ cpumask_setall(&pmu->valid_cpus);
+
put_cpu();
return ret;
}
@@ -253,15 +279,10 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int cpu_pmu_device_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id;
- int (*init_fn)(struct arm_pmu *);
struct device_node *node = pdev->dev.of_node;
struct arm_pmu *pmu;
- int ret = -ENODEV;
-
- if (cpu_pmu) {
- pr_info("attempt to register multiple PMU devices!");
- return -ENOSPC;
- }
+ int ret = 0;
+ int cpu;
pmu = kzalloc(sizeof(struct arm_pmu), GFP_KERNEL);
if (!pmu) {
@@ -270,8 +291,28 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
}
if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
- init_fn = of_id->data;
- ret = init_fn(pmu);
+ smp_call_func_t init_fn = (smp_call_func_t)of_id->data;
+ struct device_node *ncluster;
+ int cluster = -1;
+ cpumask_t sibling_mask;
+
+ ncluster = of_parse_phandle(node, "cluster", 0);
+ if (ncluster) {
+ int len;
+ const u32 *hwid;
+ hwid = of_get_property(ncluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster = be32_to_cpup(hwid);
+ }
+ /* set sibling mask to all cpu mask if socket is not specified */
+ if (cluster == -1 ||
+ cluster_to_logical_mask(cluster, &sibling_mask))
+ cpumask_setall(&sibling_mask);
+
+ smp_call_function_any(&sibling_mask, init_fn, pmu, 1);
+
+ /* now set the valid_cpus after init */
+ cpumask_copy(&pmu->valid_cpus, &sibling_mask);
} else {
ret = probe_current_pmu(pmu);
}
@@ -281,10 +322,12 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
goto out_free;
}
- cpu_pmu = pmu;
- cpu_pmu->plat_device = pdev;
- cpu_pmu_init(cpu_pmu);
- ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW);
+ for_each_cpu_mask(cpu, pmu->valid_cpus)
+ per_cpu(cpu_pmu, cpu) = pmu;
+
+ pmu->plat_device = pdev;
+ cpu_pmu_init(pmu);
+ ret = armpmu_register(pmu, -1);
if (!ret)
return 0;
@@ -313,9 +356,17 @@ static int __init register_pmu_driver(void)
if (err)
return err;
+ err = cpu_pm_register_notifier(&cpu_pmu_pm_notifier);
+ if (err) {
+ unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ return err;
+ }
+
err = platform_driver_register(&cpu_pmu_driver);
- if (err)
+ if (err) {
+ cpu_pm_unregister_notifier(&cpu_pmu_pm_notifier);
unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
+ }
return err;
}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 039cffb053a7..654db5030c31 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -950,6 +950,51 @@ static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
}
#endif
+static void armv7pmu_save_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (regs->pmc));
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (regs->pmcntenset));
+ asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r" (regs->pmuseren));
+ asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (regs->pmintenset));
+ asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mrc p15, 0, %0, c9, c13, 1"
+ : "=r"(regs->pmxevttype[cnt]));
+ asm volatile("mrc p15, 0, %0, c9, c13, 2"
+ : "=r"(regs->pmxevtcnt[cnt]));
+ }
+ return;
+}
+
+static void armv7pmu_restore_regs(struct arm_pmu *cpu_pmu,
+ struct cpupmu_regs *regs)
+{
+ unsigned int cnt;
+ if (!(regs->pmc & ARMV7_PMNC_E))
+ return;
+
+ asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (regs->pmcntenset));
+ asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (regs->pmuseren));
+ asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (regs->pmintenset));
+ asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (regs->pmxevtcnt[0]));
+ for (cnt = ARMV7_IDX_COUNTER0;
+ cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
+ armv7_pmnc_select_counter(cnt);
+ asm volatile("mcr p15, 0, %0, c9, c13, 1"
+ : : "r"(regs->pmxevttype[cnt]));
+ asm volatile("mcr p15, 0, %0, c9, c13, 2"
+ : : "r"(regs->pmxevtcnt[cnt]));
+ }
+ asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (regs->pmc));
+}
+
static void armv7pmu_enable_event(struct perf_event *event)
{
unsigned long flags;
@@ -1223,6 +1268,8 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
cpu_pmu->start = armv7pmu_start;
cpu_pmu->stop = armv7pmu_stop;
cpu_pmu->reset = armv7pmu_reset;
+ cpu_pmu->save_regs = armv7pmu_save_regs;
+ cpu_pmu->restore_regs = armv7pmu_restore_regs;
cpu_pmu->max_period = (1LLU << 32) - 1;
};
@@ -1240,7 +1287,7 @@ static u32 armv7_read_num_pmnc_events(void)
static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A8";
+ cpu_pmu->name = "ARMv7_Cortex_A8";
cpu_pmu->map_event = armv7_a8_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1249,7 +1296,7 @@ static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A9";
+ cpu_pmu->name = "ARMv7_Cortex_A9";
cpu_pmu->map_event = armv7_a9_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1258,7 +1305,7 @@ static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A5";
+ cpu_pmu->name = "ARMv7_Cortex_A5";
cpu_pmu->map_event = armv7_a5_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
return 0;
@@ -1267,7 +1314,7 @@ static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A15";
+ cpu_pmu->name = "ARMv7_Cortex_A15";
cpu_pmu->map_event = armv7_a15_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
@@ -1277,7 +1324,7 @@ static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7pmu_init(cpu_pmu);
- cpu_pmu->name = "ARMv7 Cortex-A7";
+ cpu_pmu->name = "ARMv7_Cortex_A7";
cpu_pmu->map_event = armv7_a7_map_event;
cpu_pmu->num_events = armv7_read_num_pmnc_events();
cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 6e8931ccf13e..ac4c2e5e17e4 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -408,6 +408,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -416,9 +417,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame) < 0)
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
@@ -433,10 +436,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Insert it into the
- * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers. Insert it into the gate_vma so that it is visible
+ * through ptrace and /proc/<pid>/mem.
*/
static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000,
@@ -465,9 +469,48 @@ int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
}
+#define is_gate_vma(vma) ((vma) == &gate_vma)
+#else
+#define is_gate_vma(vma) 0
+#endif
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma == &gate_vma) ? "[vectors]" : NULL;
+ return is_gate_vma(vma) ? "[vectors]" :
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ "[sigpage]" : NULL;
+}
+
+static struct page *signal_page;
+extern struct page *get_signal_page(void);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ if (!signal_page)
+ signal_page = get_signal_page();
+ if (!signal_page)
+ return -ENOMEM;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &signal_page);
+
+ if (ret == 0)
+ mm->context.sigpage = addr;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
}
#endif
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index 36531643cc2c..0daf4f252284 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/string.h>
#include <asm/compiler.h>
#include <asm/errno.h>
@@ -26,6 +27,11 @@
struct psci_operations psci_ops;
+/* Type of psci support. Currently can only be enabled or disabled */
+#define PSCI_SUP_DISABLED 0
+#define PSCI_SUP_ENABLED 1
+
+static unsigned int psci;
static int (*invoke_psci_fn)(u32, u32, u32, u32);
enum psci_function {
@@ -42,6 +48,7 @@ static u32 psci_function_id[PSCI_FN_MAX];
#define PSCI_RET_EOPNOTSUPP -1
#define PSCI_RET_EINVAL -2
#define PSCI_RET_EPERM -3
+#define PSCI_RET_EALREADYON -4
static int psci_to_linux_errno(int errno)
{
@@ -54,6 +61,8 @@ static int psci_to_linux_errno(int errno)
return -EINVAL;
case PSCI_RET_EPERM:
return -EPERM;
+ case PSCI_RET_EALREADYON:
+ return -EAGAIN;
};
return -EINVAL;
@@ -158,15 +167,18 @@ static const struct of_device_id psci_of_match[] __initconst = {
{},
};
-static int __init psci_init(void)
+void __init psci_init(void)
{
struct device_node *np;
const char *method;
u32 id;
+ if (psci == PSCI_SUP_DISABLED)
+ return;
+
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
- return 0;
+ return;
pr_info("probing function IDs from device-tree\n");
@@ -206,6 +218,35 @@ static int __init psci_init(void)
out_put_node:
of_node_put(np);
- return 0;
+ return;
+}
+
+int __init psci_probe(void)
+{
+ struct device_node *np;
+ int ret = -ENODEV;
+
+ if (psci == PSCI_SUP_ENABLED) {
+ np = of_find_matching_node(NULL, psci_of_match);
+ if (np)
+ ret = 0;
+ }
+
+ of_node_put(np);
+ return ret;
+}
+
+static int __init early_psci(char *val)
+{
+ int ret = 0;
+
+ if (strcmp(val, "enable") == 0)
+ psci = PSCI_SUP_ENABLED;
+ else if (strcmp(val, "disable") == 0)
+ psci = PSCI_SUP_DISABLED;
+ else
+ ret = -EINVAL;
+
+ return ret;
}
-early_initcall(psci_init);
+early_param("psci", early_psci);
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
new file mode 100644
index 000000000000..23a11424c568
--- /dev/null
+++ b/arch/arm/kernel/psci_smp.c
@@ -0,0 +1,84 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+
+#include <asm/psci.h>
+#include <asm/smp_plat.h>
+
+/*
+ * psci_smp assumes that the following is true about PSCI:
+ *
+ * cpu_suspend Suspend the execution on a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * cpu_off Power down a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * no return on successful call
+ *
+ * cpu_on Power up a CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * migrate Migrate the context to a different CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * returns 0 success, < 0 on failure
+ *
+ */
+
+extern void secondary_startup(void);
+
+static int __cpuinit psci_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ if (psci_ops.cpu_on)
+ return psci_ops.cpu_on(cpu_logical_map(cpu),
+ __pa(secondary_startup));
+ return -ENODEV;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __ref psci_cpu_die(unsigned int cpu)
+{
+ const struct psci_power_state ps = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ if (psci_ops.cpu_off)
+ psci_ops.cpu_off(ps);
+
+ /* We should never return */
+ panic("psci: cpu %d failed to shutdown\n", cpu);
+}
+#else
+#define psci_cpu_die NULL
+#endif
+
+bool __init psci_smp_available(void)
+{
+ /* is cpu_on available at least? */
+ return (psci_ops.cpu_on != NULL);
+}
+
+struct smp_operations __initdata psci_smp_ops = {
+ .smp_boot_secondary = psci_boot_secondary,
+ .cpu_die = psci_cpu_die,
+};
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index d0cdedf4864d..95858966d84e 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -2,10 +2,12 @@
* relocate_kernel.S - put the kernel image in place to boot
*/
+#include <linux/linkage.h>
#include <asm/kexec.h>
- .globl relocate_new_kernel
-relocate_new_kernel:
+ .align 3 /* not needed for this code, but keeps fncpy() happy */
+
+ENTRY(relocate_new_kernel)
ldr r0,kexec_indirection_page
ldr r1,kexec_start_address
@@ -79,6 +81,8 @@ kexec_mach_type:
kexec_boot_atags:
.long 0x0
+ENDPROC(relocate_new_kernel)
+
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index e8edcaa0e432..a57cc5d33540 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -51,10 +51,11 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
return (cyc * mult) >> shift;
}
-static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace sched_clock_32(void)
{
u64 epoch_ns;
u32 epoch_cyc;
+ u32 cyc;
if (cd.suspended)
return cd.epoch_ns;
@@ -73,7 +74,9 @@ static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
smp_rmb();
} while (epoch_cyc != cd.epoch_cyc_copy);
- return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+ cyc = read_sched_clock();
+ cyc = (cyc - epoch_cyc) & sched_clock_mask;
+ return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
}
/*
@@ -165,12 +168,6 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
pr_debug("Registered %pF as sched_clock source\n", read);
}
-static unsigned long long notrace sched_clock_32(void)
-{
- u32 cyc = read_sched_clock();
- return cyc_to_sched_clock(cyc, sched_clock_mask);
-}
-
unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
unsigned long long notrace sched_clock(void)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b4b1d397592b..29beb8c76560 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/procinfo.h>
+#include <asm/psci.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -261,6 +262,19 @@ static int cpu_has_aliasing_icache(unsigned int arch)
int aliasing_icache;
unsigned int id_reg, num_sets, line_size;
+#ifdef CONFIG_BIG_LITTLE
+ /*
+ * We expect a combination of Cortex-A15 and Cortex-A7 cores.
+ * A7 = VIPT aliasing I-cache
+ * A15 = PIPT (non-aliasing) I-cache
+ * To cater for this discrepancy, let's assume aliasing I-cache
+ * all the time. This means unneeded extra work on the A15 but
+ * only ptrace is affected which is not performance critical.
+ */
+ if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc0f0)
+ return 1;
+#endif
+
/* PIPT caches never alias. */
if (icache_is_pipt())
return 0;
@@ -530,6 +544,7 @@ void __init dump_machine_table(void)
int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
{
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
+ u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
@@ -542,10 +557,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* Size is appropriately rounded down, start is rounded up.
*/
size -= start & ~PAGE_MASK;
- bank->start = PAGE_ALIGN(start);
+ aligned_start = PAGE_ALIGN(start);
+
+#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
+ if (aligned_start > ULONG_MAX) {
+ printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
+ "32-bit physical address space\n", (long long)start);
+ return -EINVAL;
+ }
-#ifndef CONFIG_ARM_LPAE
- if (bank->start + size < bank->start) {
+ if (aligned_start + size > ULONG_MAX) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
/*
@@ -553,10 +574,25 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
* 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
* This means we lose a page after masking.
*/
- size = ULONG_MAX - bank->start;
+ size = ULONG_MAX - aligned_start;
}
#endif
+ if (aligned_start < PHYS_OFFSET) {
+ if (aligned_start + size <= PHYS_OFFSET) {
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, aligned_start + size);
+ return -EINVAL;
+ }
+
+ pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
+ aligned_start, (u64)PHYS_OFFSET);
+
+ size -= PHYS_OFFSET - aligned_start;
+ aligned_start = PHYS_OFFSET;
+ }
+
+ bank->start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/*
@@ -796,9 +832,15 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
arm_dt_init_cpu_maps();
+ psci_init();
#ifdef CONFIG_SMP
if (is_smp()) {
- smp_set_ops(mdesc->smp);
+ if (!mdesc->smp_init || !mdesc->smp_init()) {
+ if (psci_smp_available())
+ smp_set_ops(&psci_smp_ops);
+ else if (mdesc->smp)
+ smp_set_ops(mdesc->smp);
+ }
smp_init_cpus();
}
#endif
@@ -872,6 +914,9 @@ static const char *hwcap_str[] = {
"vfpv4",
"idiva",
"idivt",
+ "vfpd32",
+ "lpae",
+ "evtstrm",
NULL
};
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 296786bdbb73..3c23086dc8e2 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
+#include <linux/random.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
@@ -15,35 +16,14 @@
#include <asm/elf.h>
#include <asm/cacheflush.h>
+#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
-#include "signal.h"
+extern const unsigned long sigreturn_codes[7];
-/*
- * For ARM syscalls, we encode the syscall number into the instruction.
- */
-#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
-
-/*
- * With EABI, the syscall number has to be loaded into r7.
- */
-#define MOV_R7_NR_SIGRETURN (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-/*
- * For Thumb syscalls, we pass the syscall number via r7. We therefore
- * need two 16-bit instructions.
- */
-#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
-#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-
-const unsigned long sigreturn_codes[7] = {
- MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
- MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
-};
+static unsigned long signal_return_offset;
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
@@ -396,13 +376,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
+#ifdef CONFIG_MMU
if (cpsr & MODE32_BIT) {
+ struct mm_struct *mm = current->mm;
/*
- * 32-bit code can use the new high-page
- * signal return code support.
+ * 32-bit code can use the signal return page
+ * except when the MPU has protected the vectors
+ * page from PL0
*/
- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
- } else {
+ retcode = mm->context.sigpage + signal_return_offset +
+ (idx << 2) + thumb;
+ } else
+#endif
+ {
/*
* Ensure that the instruction cache sees
* the return code written onto the stack.
@@ -603,3 +589,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
+
+struct page *get_signal_page(void)
+{
+ unsigned long ptr;
+ unsigned offset;
+ struct page *page;
+ void *addr;
+
+ page = alloc_pages(GFP_KERNEL, 0);
+
+ if (!page)
+ return NULL;
+
+ addr = page_address(page);
+
+ /* Give the signal return code some randomness */
+ offset = 0x200 + (get_random_int() & 0x7fc);
+ signal_return_offset = offset;
+
+ /*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+
+ ptr = (unsigned long)addr + offset;
+ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+
+ return page;
+}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
deleted file mode 100644
index 5ff067b7c752..000000000000
--- a/arch/arm/kernel/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * linux/arch/arm/kernel/signal.h
- *
- * Copyright (C) 2005-2009 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-
-extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/sigreturn_codes.S b/arch/arm/kernel/sigreturn_codes.S
new file mode 100644
index 000000000000..3c5d0f2170fd
--- /dev/null
+++ b/arch/arm/kernel/sigreturn_codes.S
@@ -0,0 +1,80 @@
+/*
+ * sigreturn_codes.S - code sinpets for sigreturn syscalls
+ *
+ * Created by: Victor Kamensky, 2013-08-13
+ * Copyright: (C) 2013 Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/unistd.h>
+
+/*
+ * For ARM syscalls, we encode the syscall number into the instruction.
+ * With EABI, the syscall number has to be loaded into r7. As result
+ * ARM syscall sequence snippet will have move and svc in .arm encoding
+ *
+ * For Thumb syscalls, we pass the syscall number via r7. We therefore
+ * need two 16-bit instructions in .thumb encoding
+ *
+ * Please note sigreturn_codes code are not executed in place. Instead
+ * they just copied by kernel into appropriate places. Code inside of
+ * arch/arm/kernel/signal.c is very sensitive to layout of these code
+ * snippets.
+ */
+
+#if __LINUX_ARM_ARCH__ <= 4
+ /*
+ * Note we manually set minimally required arch that supports
+ * required thumb opcodes for early arch versions. It is OK
+ * for this file to be used in combination with other
+ * lower arch variants, since these code snippets are only
+ * used as input data.
+ */
+ .arch armv4t
+#endif
+
+ .section .rodata
+ .global sigreturn_codes
+ .type sigreturn_codes, #object
+
+ .arm
+
+sigreturn_codes:
+
+ /* ARM sigreturn syscall code snippet */
+ mov r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn syscall code snippet */
+ .thumb
+ movs r7, #(__NR_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /* ARM sigreturn_rt syscall code snippet */
+ .arm
+ mov r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)
+
+ /* Thumb sigreturn_rt syscall code snippet */
+ .thumb
+ movs r7, #(__NR_rt_sigreturn - __NR_SYSCALL_BASE)
+ swi #0
+
+ /*
+ * Note on addtional space: setup_return in signal.c
+ * algorithm uses two words copy regardless whether
+ * it is thumb case or not, so we need additional
+ * word after real last entry.
+ */
+ .arm
+ .space 4
+
+ .size sigreturn_codes, . - sigreturn_codes
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415c..baf4d28213a5 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -4,6 +4,7 @@
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
+#include "entry-header.S"
.text
/*
@@ -30,9 +31,8 @@ ENTRY(__cpu_suspend)
mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
- ALT_UP(mov lr, #0)
- and lr, lr, #15
+ get_thread_info r5
+ ldr lr, [r5, #TI_CPU] @ cpu logical index
add r3, r3, lr, lsl #2
#endif
bl __cpu_suspend_save
@@ -81,11 +81,15 @@ ENDPROC(cpu_resume_after_mmu)
.data
.align
ENTRY(cpu_resume)
+ARM_BE8(setend be) @ ensure we are in BE mode
#ifdef CONFIG_SMP
+ mov r1, #0 @ fall-back logical index for UP
+ ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
+ ALT_UP_B(1f)
+ bic r0, #0xff000000
+ bl cpu_logical_index @ return logical index in r1
+1:
adr r0, sleep_save_sp
- ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
- ALT_UP(mov r1, #0)
- and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
@@ -102,3 +106,20 @@ sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr
+
+#ifdef CONFIG_SMP
+cpu_logical_index:
+ adr r3, cpu_map_ptr
+ ldr r2, [r3]
+ add r3, r3, r2 @ virt_to_phys(__cpu_logical_map)
+ mov r1, #0
+1:
+ ldr r2, [r3, r1, lsl #2]
+ cmp r2, r0
+ moveq pc, lr
+ add r1, r1, #1
+ b 1b
+
+cpu_map_ptr:
+ .long __cpu_logical_map - .
+#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5919eb451bb9..d6e3d1ca4ddf 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -46,6 +46,9 @@
#include <asm/virt.h>
#include <asm/mach/arch.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/arm-ipi.h>
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -57,7 +60,7 @@ struct secondary_data secondary_data;
* control for which core is the next to come out of the secondary
* boot "holding pen"
*/
-volatile int __cpuinitdata pen_release = -1;
+volatile int pen_release = -1;
enum ipi_msg_type {
IPI_WAKEUP,
@@ -66,6 +69,7 @@ enum ipi_msg_type {
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_COMPLETION,
};
static DECLARE_COMPLETION(cpu_running);
@@ -87,8 +91,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
* its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
- secondary_data.pgdir = virt_to_phys(idmap_pgd);
- secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
+ secondary_data.pgdir = virt_to_idmap(idmap_pgd);
+ secondary_data.swapper_pg_dir = virt_to_idmap(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@@ -463,6 +467,7 @@ static const char *ipi_types[NR_IPI] = {
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_COMPLETION, "completion interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -588,6 +593,19 @@ static void ipi_cpu_stop(unsigned int cpu)
cpu_relax();
}
+static DEFINE_PER_CPU(struct completion *, cpu_completion);
+
+int register_ipi_completion(struct completion *completion, int cpu)
+{
+ per_cpu(cpu_completion, cpu) = completion;
+ return IPI_COMPLETION;
+}
+
+static void ipi_complete(unsigned int cpu)
+{
+ complete(per_cpu(cpu_completion, cpu));
+}
+
/*
* Main handler for inter-processor interrupts
*/
@@ -604,6 +622,7 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
if (ipinr < NR_IPI)
__inc_irq_stat(cpu, ipi_irqs[ipinr]);
+ trace_arm_ipi_entry(ipinr);
switch (ipinr) {
case IPI_WAKEUP:
break;
@@ -638,11 +657,18 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
irq_exit();
break;
+ case IPI_COMPLETION:
+ irq_enter();
+ ipi_complete(cpu);
+ irq_exit();
+ break;
+
default:
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
cpu, ipinr);
break;
}
+ trace_arm_ipi_exit(ipinr);
set_irq_regs(old_regs);
}
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index 5bc1a63284e3..1aafa0d785eb 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -28,7 +28,7 @@
*/
unsigned int __init scu_get_core_count(void __iomem *scu_base)
{
- unsigned int ncores = __raw_readl(scu_base + SCU_CONFIG);
+ unsigned int ncores = readl_relaxed(scu_base + SCU_CONFIG);
return (ncores & 0x03) + 1;
}
@@ -42,19 +42,19 @@ void scu_enable(void __iomem *scu_base)
#ifdef CONFIG_ARM_ERRATA_764369
/* Cortex-A9 only */
if ((read_cpuid_id() & 0xff0ffff0) == 0x410fc090) {
- scu_ctrl = __raw_readl(scu_base + 0x30);
+ scu_ctrl = readl_relaxed(scu_base + 0x30);
if (!(scu_ctrl & 1))
- __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
+ writel_relaxed(scu_ctrl | 0x1, scu_base + 0x30);
}
#endif
- scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
+ scu_ctrl = readl_relaxed(scu_base + SCU_CTRL);
/* already enabled? */
if (scu_ctrl & 1)
return;
scu_ctrl |= 1;
- __raw_writel(scu_ctrl, scu_base + SCU_CTRL);
+ writel_relaxed(scu_ctrl, scu_base + SCU_CTRL);
/*
* Ensure that the data accessed by CPU0 before the SCU was
@@ -80,9 +80,9 @@ int scu_power_mode(void __iomem *scu_base, unsigned int mode)
if (mode > 3 || mode == 1 || cpu > 3)
return -EINVAL;
- val = __raw_readb(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
+ val = readb_relaxed(scu_base + SCU_CPU_STATUS + cpu) & ~0x03;
val |= mode;
- __raw_writeb(val, scu_base + SCU_CPU_STATUS + cpu);
+ writeb_relaxed(val, scu_base + SCU_CPU_STATUS + cpu);
return 0;
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9a52a07aa40e..a98b62dca2fa 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void)
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
- int cpu, this_cpu;
+ int this_cpu;
cpumask_t mask = { CPU_BITS_NONE };
if (!erratum_a15_798181())
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
dummy_flush_tlb_a15_erratum();
this_cpu = get_cpu();
- for_each_online_cpu(cpu) {
- if (cpu == this_cpu)
- continue;
- /*
- * We only need to send an IPI if the other CPUs are running
- * the same ASID as the one being invalidated. There is no
- * need for locking around the active_asids check since the
- * switch_mm() function has at least one dmb() (as required by
- * this workaround) in case a context switch happens on
- * another CPU after the condition below.
- */
- if (atomic64_read(&mm->context.id) ==
- atomic64_read(&per_cpu(active_asids, cpu)))
- cpumask_set_cpu(cpu, &mask);
- }
+ a15_erratum_get_cpumask(this_cpu, mm, &mask);
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
put_cpu();
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 90525d9d290b..4971ccf012ca 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -45,7 +45,7 @@ static void twd_set_mode(enum clock_event_mode mode,
case CLOCK_EVT_MODE_PERIODIC:
ctrl = TWD_TIMER_CONTROL_ENABLE | TWD_TIMER_CONTROL_IT_ENABLE
| TWD_TIMER_CONTROL_PERIODIC;
- __raw_writel(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
+ writel_relaxed(DIV_ROUND_CLOSEST(twd_timer_rate, HZ),
twd_base + TWD_TIMER_LOAD);
break;
case CLOCK_EVT_MODE_ONESHOT:
@@ -58,18 +58,18 @@ static void twd_set_mode(enum clock_event_mode mode,
ctrl = 0;
}
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
}
static int twd_set_next_event(unsigned long evt,
struct clock_event_device *unused)
{
- unsigned long ctrl = __raw_readl(twd_base + TWD_TIMER_CONTROL);
+ unsigned long ctrl = readl_relaxed(twd_base + TWD_TIMER_CONTROL);
ctrl |= TWD_TIMER_CONTROL_ENABLE;
- __raw_writel(evt, twd_base + TWD_TIMER_COUNTER);
- __raw_writel(ctrl, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(evt, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(ctrl, twd_base + TWD_TIMER_CONTROL);
return 0;
}
@@ -82,8 +82,8 @@ static int twd_set_next_event(unsigned long evt,
*/
static int twd_timer_ack(void)
{
- if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
- __raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
+ if (readl_relaxed(twd_base + TWD_TIMER_INTSTAT)) {
+ writel_relaxed(1, twd_base + TWD_TIMER_INTSTAT);
return 1;
}
@@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb,
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
- smp_call_function(twd_update_frequency,
+ on_each_cpu(twd_update_frequency,
(void *)&cnd->new_rate, 1);
return NOTIFY_OK;
@@ -209,15 +209,15 @@ static void __cpuinit twd_calibrate_rate(void)
waitjiffies += 5;
/* enable, no interrupt or reload */
- __raw_writel(0x1, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0x1, twd_base + TWD_TIMER_CONTROL);
/* maximum value */
- __raw_writel(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
+ writel_relaxed(0xFFFFFFFFU, twd_base + TWD_TIMER_COUNTER);
while (get_jiffies_64() < waitjiffies)
udelay(10);
- count = __raw_readl(twd_base + TWD_TIMER_COUNTER);
+ count = readl_relaxed(twd_base + TWD_TIMER_COUNTER);
twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
@@ -275,7 +275,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
* bother with the below.
*/
if (per_cpu(percpu_setup_called, cpu)) {
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clockevents_register_device(*__this_cpu_ptr(twd_evt));
enable_percpu_irq(clk->irq, 0);
return 0;
@@ -288,7 +288,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
* The following is done once per CPU the first time .setup() is
* called.
*/
- __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+ writel_relaxed(0, twd_base + TWD_TIMER_CONTROL);
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e59985b..6582c4adc182 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
- if (fp < (low + 12) || fp + 4 >= high)
+ if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
@@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
return trace->nr_entries >= trace->max_entries;
}
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+/* This must be noinline to so that our skip calculation works correctly */
+static noinline void __save_stack_trace(struct task_struct *tsk,
+ struct stack_trace *trace, unsigned int nosched)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
+ data.no_sched_functions = nosched;
if (tsk != current) {
#ifdef CONFIG_SMP
@@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX;
return;
#else
- data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */
@@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
} else {
register unsigned long current_sp asm ("sp");
- data.no_sched_functions = 0;
+ /* We don't want this function nor the caller */
+ data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)save_stack_trace_tsk;
+ frame.pc = (unsigned long)__save_stack_trace;
}
walk_stackframe(&frame, save_trace, &data);
@@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ __save_stack_trace(tsk, trace, 1);
+}
+
void save_stack_trace(struct stack_trace *trace)
{
- save_stack_trace_tsk(current, trace);
+ __save_stack_trace(current, trace, 0);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index c5a59546a256..677da58d9e88 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <asm/cputype.h>
+#include <asm/smp_plat.h>
#include <asm/topology.h>
/*
@@ -289,6 +290,140 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
+
+#ifdef CONFIG_SCHED_HMP
+
+static const char * const little_cores[] = {
+ "arm,cortex-a7",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ const u32 *mpidr;
+ int len;
+
+ mpidr = of_get_property(cn, "reg", &len);
+ if (!mpidr || len != 4) {
+ pr_err("* %s missing reg property\n", cn->full_name);
+ continue;
+ }
+
+ cpu = get_logical_index(be32_to_cpup(mpidr));
+ if (cpu == -EINVAL) {
+ pr_err("couldn't get logical index for mpidr %x\n",
+ be32_to_cpup(mpidr));
+ break;
+ }
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+struct cpumask hmp_slow_cpu_mask;
+
+void __init arch_get_hmp_domains(struct list_head *hmp_domains_list)
+{
+ struct cpumask hmp_fast_cpu_mask;
+ struct hmp_domain *domain;
+
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
+
+ /*
+ * Initialize hmp_domains
+ * Must be ordered with respect to compute capacity.
+ * Fastest domain at head of list.
+ */
+ if(!cpumask_empty(&hmp_slow_cpu_mask)) {
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_slow_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+ }
+ domain = (struct hmp_domain *)
+ kmalloc(sizeof(struct hmp_domain), GFP_KERNEL);
+ cpumask_copy(&domain->possible_cpus, &hmp_fast_cpu_mask);
+ cpumask_and(&domain->cpus, cpu_online_mask, &domain->possible_cpus);
+ list_add(&domain->hmp_domains, hmp_domains_list);
+}
+#endif /* CONFIG_SCHED_HMP */
+
+
+/*
+ * cluster_to_logical_mask - return cpu logical mask of CPUs in a cluster
+ * @socket_id: cluster HW identifier
+ * @cluster_mask: the cpumask location to be initialized, modified by the
+ * function only if return value == 0
+ *
+ * Return:
+ *
+ * 0 on success
+ * -EINVAL if cluster_mask is NULL or there is no record matching socket_id
+ */
+int cluster_to_logical_mask(unsigned int socket_id, cpumask_t *cluster_mask)
+{
+ int cpu;
+
+ if (!cluster_mask)
+ return -EINVAL;
+
+ for_each_online_cpu(cpu)
+ if (socket_id == topology_physical_package_id(cpu)) {
+ cpumask_copy(cluster_mask, topology_core_cpumask(cpu));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 18b32e8e4497..b4fd850c34b2 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -34,10 +34,15 @@
#include <asm/unwind.h>
#include <asm/tls.h>
#include <asm/system_misc.h>
-
-#include "signal.h"
-
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+#include <asm/opcodes.h>
+
+static const char *handler[]= {
+ "prefetch abort",
+ "data abort",
+ "address exception",
+ "interrupt",
+ "undefined instruction",
+};
void *vectors_page;
@@ -343,15 +348,17 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
int is_valid_bugaddr(unsigned long pc)
{
#ifdef CONFIG_THUMB2_KERNEL
- unsigned short bkpt;
+ u16 bkpt;
+ u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
#else
- unsigned long bkpt;
+ u32 bkpt;
+ u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
#endif
if (probe_kernel_address((unsigned *)pc, bkpt))
return 0;
- return bkpt == BUG_INSTR_VALUE;
+ return bkpt == insn;
}
#endif
@@ -404,25 +411,28 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
if (processor_mode(regs) == SVC_MODE) {
#ifdef CONFIG_THUMB2_KERNEL
if (thumb_mode(regs)) {
- instr = ((u16 *)pc)[0];
+ instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
if (is_wide_instruction(instr)) {
- instr <<= 16;
- instr |= ((u16 *)pc)[1];
+ u16 inst2;
+ inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
+ instr = __opcode_thumb32_compose(instr, inst2);
}
} else
#endif
- instr = *(u32 *) pc;
+ instr = __mem_to_opcode_arm(*(u32 *) pc);
} else if (thumb_mode(regs)) {
if (get_user(instr, (u16 __user *)pc))
goto die_sig;
+ instr = __mem_to_opcode_thumb16(instr);
if (is_wide_instruction(instr)) {
unsigned int instr2;
if (get_user(instr2, (u16 __user *)pc+1))
goto die_sig;
- instr <<= 16;
- instr |= instr2;
+ instr2 = __mem_to_opcode_thumb16(instr2);
+ instr = __opcode_thumb32_compose(instr, instr2);
}
} else if (get_user(instr, (u32 __user *)pc)) {
+ instr = __mem_to_opcode_arm(instr);
goto die_sig;
}
@@ -800,47 +810,55 @@ void __init trap_init(void)
return;
}
-static void __init kuser_get_tls_init(unsigned long vectors)
+#ifdef CONFIG_KUSER_HELPERS
+static void __init kuser_init(void *vectors)
{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
+#else
+static void __init kuser_init(void *vectors)
+{
+}
+#endif
void __init early_trap_init(void *vectors_base)
{
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
- extern char __kuser_helper_start[], __kuser_helper_end[];
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
vectors_page = vectors_base;
/*
+ * Poison the vectors page with an undefined instruction. This
+ * instruction is chosen to be undefined for both ARM and Thumb
+ * ISAs. The Thumb version is an undefined instruction with a
+ * branch back to the undefined instruction.
+ */
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ ((u32 *)vectors_base)[i] = 0xe7fddef1;
+
+ /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
- /*
- * Do processor specific fixups for the kuser helpers
- */
- kuser_get_tls_init(vectors);
-
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
- sigreturn_codes, sizeof(sigreturn_codes));
+ kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a871b8e00fca..33f2ea32f5a0 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -152,6 +152,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
#endif
+ /*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+ __vectors_start = .;
+ .vectors 0 : AT(__vectors_start) {
+ *(.vectors)
+ }
+ . = __vectors_start + SIZEOF(.vectors);
+ __vectors_end = .;
+
+ __stubs_start = .;
+ .stubs 0x1000 : AT(__stubs_start) {
+ *(.stubs)
+ }
+ . = __stubs_start + SIZEOF(.stubs);
+ __stubs_end = .;
INIT_TEXT_SECTION(8)
.exit.text : {