aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile5
-rw-r--r--arch/arm/kernel/asm-offsets.c15
-rw-r--r--arch/arm/kernel/devtree.c2
-rw-r--r--arch/arm/kernel/entry-armv.S103
-rw-r--r--arch/arm/kernel/fiq.c19
-rw-r--r--arch/arm/kernel/perf_event.c11
-rw-r--r--arch/arm/kernel/perf_event_cpu.c64
-rw-r--r--arch/arm/kernel/process.c46
-rw-r--r--arch/arm/kernel/psci.c197
-rw-r--r--arch/arm/kernel/psci_smp.c116
-rw-r--r--arch/arm/kernel/setup.c7
-rw-r--r--arch/arm/kernel/signal.c52
-rw-r--r--arch/arm/kernel/signal.h12
-rw-r--r--arch/arm/kernel/smp.c4
-rw-r--r--arch/arm/kernel/smp_tlb.c18
-rw-r--r--arch/arm/kernel/smp_twd.c2
-rw-r--r--arch/arm/kernel/traps.c46
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
18 files changed, 534 insertions, 202 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5f3338eacad2..dd9d90ab65d0 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -82,6 +82,9 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
-obj-$(CONFIG_ARM_PSCI) += psci.o
+ifeq ($(CONFIG_ARM_PSCI),y)
+obj-y += psci.o
+obj-$(CONFIG_SMP) += psci_smp.o
+endif
extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index ee68cce6b48e..776d9186e9c1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -168,6 +168,7 @@ int main(void)
DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
+ DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
@@ -175,13 +176,13 @@ int main(void)
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
- DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
- DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
- DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
- DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
- DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
- DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
+ DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
+ DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
+ DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
+ DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
+ DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
+ DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
#ifdef CONFIG_KVM_ARM_TIMER
DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5859c8bc727c..a44e7d11ab02 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -212,7 +212,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
}
if (!mdesc_best) {
const char *prop;
- long size;
+ int size;
early_print("\nError: unrecognized/unsupported "
"device tree compatible list:\n[ ");
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 582b405befc5..d43c7e54ec6c 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -741,6 +741,18 @@ ENDPROC(__switch_to)
#endif
.endm
+ .macro kuser_pad, sym, size
+ .if (. - \sym) & 3
+ .rept 4 - (. - \sym) & 3
+ .byte 0
+ .endr
+ .endif
+ .rept (\size - (. - \sym)) / 4
+ .word 0xe7fddef1
+ .endr
+ .endm
+
+#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -831,18 +843,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration"
#endif
- /* pad to next slot */
- .rept (16 - (. - __kuser_cmpxchg64)/4)
- .word 0
- .endr
-
- .align 5
+ kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
- .align 5
+ kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
@@ -915,13 +922,14 @@ kuser_cmpxchg32_fixup:
#endif
- .align 5
+ kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
- .rep 4
+ kuser_pad __kuser_get_tls, 16
+ .rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
@@ -931,14 +939,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+#endif
+
THUMB( .thumb )
/*
* Vector stubs.
*
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's. Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not exceed
+ * a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -985,8 +995,17 @@ ENDPROC(vector_\name)
1:
.endm
- .globl __stubs_start
+ .section .stubs, "ax", %progbits
__stubs_start:
+ @ This must be the first word
+ .word vector_swi
+
+vector_rst:
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ b vector_und
+
/*
* Interrupt dispatcher
*/
@@ -1081,6 +1100,16 @@ __stubs_start:
.align 5
/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1093,45 +1122,19 @@ __stubs_start:
vector_fiq:
subs pc, lr, #4
-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
-
-vector_addrexcptn:
- b vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
- .align 5
-
-.LCvswi:
- .word vector_swi
-
- .globl __stubs_end
-__stubs_end:
-
- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
+ .globl vector_fiq_offset
+ .equ vector_fiq_offset, vector_fiq
- .globl __vectors_start
+ .section .vectors, "ax", %progbits
__vectors_start:
- ARM( swi SYS_ERROR0 )
- THUMB( svc #0 )
- THUMB( nop )
- W(b) vector_und + stubs_offset
- W(ldr) pc, .LCvswi + stubs_offset
- W(b) vector_pabt + stubs_offset
- W(b) vector_dabt + stubs_offset
- W(b) vector_addrexcptn + stubs_offset
- W(b) vector_irq + stubs_offset
- W(b) vector_fiq + stubs_offset
-
- .globl __vectors_end
-__vectors_end:
+ W(b) vector_rst
+ W(b) vector_und
+ W(ldr) pc, __vectors_start + 0x1000
+ W(b) vector_pabt
+ W(b) vector_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_irq
+ W(b) vector_fiq
.data
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712f..25442f451148 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
#include <asm/irq.h>
#include <asm/traps.h>
+#define FIQ_OFFSET ({ \
+ extern void *vector_fiq_offset; \
+ (unsigned)&vector_fiq_offset; \
+ })
+
static unsigned long no_fiq_insn;
/* Default reacquire function
@@ -80,13 +85,16 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
- memcpy((void *)0xffff001c, start, length);
+ void *base = (void *)0xffff0000;
#else
- memcpy(vectors_page + 0x1c, start, length);
+ void *base = vectors_page;
#endif
- flush_icache_range(0xffff001c, 0xffff001c + length);
+ unsigned offset = FIQ_OFFSET;
+
+ memcpy(base + offset, start, length);
+ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
if (!vectors_high())
- flush_icache_range(0x1c, 0x1c + length);
+ flush_icache_range(offset, offset + length);
}
int claim_fiq(struct fiq_handler *f)
@@ -144,6 +152,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start)
{
- no_fiq_insn = *(unsigned long *)0xffff001c;
+ unsigned offset = FIQ_OFFSET;
+ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start;
}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8c3094d0f7b7..e19edc6f2d15 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -ENOENT;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
@@ -569,6 +577,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
return;
}
+ perf_callchain_store(entry, regs->ARM_pc);
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 1f2740e3dbc0..0e9609657c79 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -201,49 +201,39 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {
static int probe_current_pmu(struct arm_pmu *pmu)
{
int cpu = get_cpu();
- unsigned long implementor = read_cpuid_implementor();
- unsigned long part_number = read_cpuid_part_number();
int ret = -ENODEV;
pr_info("probing PMU on CPU %d\n", cpu);
+ switch (read_cpuid_part()) {
/* ARM Ltd CPUs. */
- if (implementor == ARM_CPU_IMP_ARM) {
- switch (part_number) {
- case ARM_CPU_PART_ARM1136:
- case ARM_CPU_PART_ARM1156:
- case ARM_CPU_PART_ARM1176:
- ret = armv6pmu_init(pmu);
- break;
- case ARM_CPU_PART_ARM11MPCORE:
- ret = armv6mpcore_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A8:
- ret = armv7_a8_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A9:
- ret = armv7_a9_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A5:
- ret = armv7_a5_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A15:
- ret = armv7_a15_pmu_init(pmu);
- break;
- case ARM_CPU_PART_CORTEX_A7:
- ret = armv7_a7_pmu_init(pmu);
- break;
- }
- /* Intel CPUs [xscale]. */
- } else if (implementor == ARM_CPU_IMP_INTEL) {
- switch (xscale_cpu_arch_version()) {
- case ARM_CPU_XSCALE_ARCH_V1:
- ret = xscale1pmu_init(pmu);
- break;
- case ARM_CPU_XSCALE_ARCH_V2:
- ret = xscale2pmu_init(pmu);
- break;
+ case ARM_CPU_PART_ARM1136:
+ case ARM_CPU_PART_ARM1156:
+ case ARM_CPU_PART_ARM1176:
+ ret = armv6pmu_init(pmu);
+ break;
+ case ARM_CPU_PART_ARM11MPCORE:
+ ret = armv6mpcore_pmu_init(pmu);
+ break;
+ case ARM_CPU_PART_CORTEX_A8:
+ ret = armv7_a8_pmu_init(pmu);
+ break;
+ case ARM_CPU_PART_CORTEX_A9:
+ ret = armv7_a9_pmu_init(pmu);
+ break;
+
+ default:
+ if (read_cpuid_implementor() == ARM_CPU_IMP_INTEL) {
+ switch (xscale_cpu_arch_version()) {
+ case ARM_CPU_XSCALE_ARCH_V1:
+ ret = xscale1pmu_init(pmu);
+ break;
+ case ARM_CPU_XSCALE_ARCH_V2:
+ ret = xscale2pmu_init(pmu);
+ break;
+ }
}
+ break;
}
put_cpu();
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 6e8931ccf13e..5bc2615268d4 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -433,10 +433,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Insert it into the
- * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers. Insert it into the gate_vma so that it is visible
+ * through ptrace and /proc/<pid>/mem.
*/
static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000,
@@ -465,9 +466,48 @@ int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
}
+#define is_gate_vma(vma) ((vma) = &gate_vma)
+#else
+#define is_gate_vma(vma) 0
+#endif
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma == &gate_vma) ? "[vectors]" : NULL;
+ return is_gate_vma(vma) ? "[vectors]" :
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ "[sigpage]" : NULL;
+}
+
+static struct page *signal_page;
+extern struct page *get_signal_page(void);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ if (!signal_page)
+ signal_page = get_signal_page();
+ if (!signal_page)
+ return -ENOMEM;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &signal_page);
+
+ if (ret == 0)
+ mm->context.sigpage = addr;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
}
#endif
diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
index 36531643cc2c..f73891b6b730 100644
--- a/arch/arm/kernel/psci.c
+++ b/arch/arm/kernel/psci.c
@@ -17,63 +17,58 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <uapi/linux/psci.h>
#include <asm/compiler.h>
#include <asm/errno.h>
#include <asm/opcodes-sec.h>
#include <asm/opcodes-virt.h>
#include <asm/psci.h>
+#include <asm/system_misc.h>
struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u32, u32, u32, u32);
+typedef int (*psci_initcall_t)(const struct device_node *);
enum psci_function {
PSCI_FN_CPU_SUSPEND,
PSCI_FN_CPU_ON,
PSCI_FN_CPU_OFF,
PSCI_FN_MIGRATE,
+ PSCI_FN_AFFINITY_INFO,
+ PSCI_FN_MIGRATE_INFO_TYPE,
PSCI_FN_MAX,
};
static u32 psci_function_id[PSCI_FN_MAX];
-#define PSCI_RET_SUCCESS 0
-#define PSCI_RET_EOPNOTSUPP -1
-#define PSCI_RET_EINVAL -2
-#define PSCI_RET_EPERM -3
-
static int psci_to_linux_errno(int errno)
{
switch (errno) {
case PSCI_RET_SUCCESS:
return 0;
- case PSCI_RET_EOPNOTSUPP:
+ case PSCI_RET_NOT_SUPPORTED:
return -EOPNOTSUPP;
- case PSCI_RET_EINVAL:
+ case PSCI_RET_INVALID_PARAMS:
return -EINVAL;
- case PSCI_RET_EPERM:
+ case PSCI_RET_DENIED:
return -EPERM;
};
return -EINVAL;
}
-#define PSCI_POWER_STATE_ID_MASK 0xffff
-#define PSCI_POWER_STATE_ID_SHIFT 0
-#define PSCI_POWER_STATE_TYPE_MASK 0x1
-#define PSCI_POWER_STATE_TYPE_SHIFT 16
-#define PSCI_POWER_STATE_AFFL_MASK 0x3
-#define PSCI_POWER_STATE_AFFL_SHIFT 24
-
static u32 psci_power_state_pack(struct psci_power_state state)
{
- return ((state.id & PSCI_POWER_STATE_ID_MASK)
- << PSCI_POWER_STATE_ID_SHIFT) |
- ((state.type & PSCI_POWER_STATE_TYPE_MASK)
- << PSCI_POWER_STATE_TYPE_SHIFT) |
- ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
- << PSCI_POWER_STATE_AFFL_SHIFT);
+ return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
+ & PSCI_0_2_POWER_STATE_ID_MASK) |
+ ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+ & PSCI_0_2_POWER_STATE_TYPE_MASK) |
+ ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+ & PSCI_0_2_POWER_STATE_AFFL_MASK);
}
/*
@@ -110,6 +105,14 @@ static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1,
return function_id;
}
+static int psci_get_version(void)
+{
+ int err;
+
+ err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+ return err;
+}
+
static int psci_cpu_suspend(struct psci_power_state state,
unsigned long entry_point)
{
@@ -153,26 +156,36 @@ static int psci_migrate(unsigned long cpuid)
return psci_to_linux_errno(err);
}
-static const struct of_device_id psci_of_match[] __initconst = {
- { .compatible = "arm,psci", },
- {},
-};
+static int psci_affinity_info(unsigned long target_affinity,
+ unsigned long lowest_affinity_level)
+{
+ int err;
+ u32 fn;
-static int __init psci_init(void)
+ fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
+ err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
+ return err;
+}
+
+static int psci_migrate_info_type(void)
{
- struct device_node *np;
- const char *method;
- u32 id;
+ int err;
+ u32 fn;
- np = of_find_matching_node(NULL, psci_of_match);
- if (!np)
- return 0;
+ fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
+ err = invoke_psci_fn(fn, 0, 0, 0);
+ return err;
+}
+
+static int get_set_conduit_method(struct device_node *np)
+{
+ const char *method;
- pr_info("probing function IDs from device-tree\n");
+ pr_info("probing for conduit method from DT.\n");
if (of_property_read_string(np, "method", &method)) {
- pr_warning("missing \"method\" property\n");
- goto out_put_node;
+ pr_warn("missing \"method\" property\n");
+ return -ENXIO;
}
if (!strcmp("hvc", method)) {
@@ -180,10 +193,99 @@ static int __init psci_init(void)
} else if (!strcmp("smc", method)) {
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
- pr_warning("invalid \"method\" property: %s\n", method);
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
+
+static void psci_sys_poweroff(void)
+{
+ invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * PSCI Function IDs for v0.2+ are well defined so use
+ * standard values.
+ */
+static int psci_0_2_init(struct device_node *np)
+{
+ int err, ver;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+
+ ver = psci_get_version();
+
+ if (ver == PSCI_RET_NOT_SUPPORTED) {
+ /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
+ pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
+ err = -EOPNOTSUPP;
goto out_put_node;
+ } else {
+ pr_info("PSCIv%d.%d detected in firmware.\n",
+ PSCI_VERSION_MAJOR(ver),
+ PSCI_VERSION_MINOR(ver));
+
+ if (PSCI_VERSION_MAJOR(ver) == 0 &&
+ PSCI_VERSION_MINOR(ver) < 2) {
+ err = -EINVAL;
+ pr_err("Conflicting PSCI version detected.\n");
+ goto out_put_node;
+ }
}
+ pr_info("Using standard PSCI v0.2 function IDs\n");
+ psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN_CPU_SUSPEND;
+ psci_ops.cpu_suspend = psci_cpu_suspend;
+
+ psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+ psci_ops.cpu_off = psci_cpu_off;
+
+ psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN_CPU_ON;
+ psci_ops.cpu_on = psci_cpu_on;
+
+ psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN_MIGRATE;
+ psci_ops.migrate = psci_migrate;
+
+ psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN_AFFINITY_INFO;
+ psci_ops.affinity_info = psci_affinity_info;
+
+ psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
+ PSCI_0_2_FN_MIGRATE_INFO_TYPE;
+ psci_ops.migrate_info_type = psci_migrate_info_type;
+
+ arm_pm_restart = psci_sys_reset;
+
+ pm_power_off = psci_sys_poweroff;
+
+out_put_node:
+ of_node_put(np);
+ return err;
+}
+
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int psci_0_1_init(struct device_node *np)
+{
+ u32 id;
+ int err;
+
+ err = get_set_conduit_method(np);
+
+ if (err)
+ goto out_put_node;
+
+ pr_info("Using PSCI v0.1 Function IDs from DT\n");
+
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
psci_ops.cpu_suspend = psci_cpu_suspend;
@@ -206,6 +308,25 @@ static int __init psci_init(void)
out_put_node:
of_node_put(np);
- return 0;
+ return err;
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+ { .compatible = "arm,psci", .data = psci_0_1_init},
+ { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
+ {},
+};
+
+int __init psci_init(void)
+{
+ struct device_node *np;
+ const struct of_device_id *matched_np;
+ psci_initcall_t init_fn;
+
+ np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+ if (!np)
+ return -ENODEV;
+
+ init_fn = (psci_initcall_t)matched_np->data;
+ return init_fn(np);
}
-early_initcall(psci_init);
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
new file mode 100644
index 000000000000..fc1dbd258255
--- /dev/null
+++ b/arch/arm/kernel/psci_smp.c
@@ -0,0 +1,116 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * Author: Will Deacon <will.deacon@arm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <uapi/linux/psci.h>
+
+#include <asm/psci.h>
+#include <asm/smp_plat.h>
+
+/*
+ * psci_smp assumes that the following is true about PSCI:
+ *
+ * cpu_suspend Suspend the execution on a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * cpu_off Power down a CPU
+ * @state we don't currently describe affinity levels, so just pass 0.
+ * no return on successful call
+ *
+ * cpu_on Power up a CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * @entry_point the first instruction to be executed on return
+ * returns 0 success, < 0 on failure
+ *
+ * migrate Migrate the context to a different CPU
+ * @cpuid cpuid of target CPU, as from MPIDR
+ * returns 0 success, < 0 on failure
+ *
+ */
+
+extern void secondary_startup(void);
+
+static int __cpuinit psci_boot_secondary(unsigned int cpu,
+ struct task_struct *idle)
+{
+ if (psci_ops.cpu_on)
+ return psci_ops.cpu_on(cpu_logical_map(cpu),
+ __pa(secondary_startup));
+ return -ENODEV;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void __ref psci_cpu_die(unsigned int cpu)
+{
+ const struct psci_power_state ps = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ if (psci_ops.cpu_off)
+ psci_ops.cpu_off(ps);
+
+ /* We should never return */
+ panic("psci: cpu %d failed to shutdown\n", cpu);
+}
+
+int __ref psci_cpu_kill(unsigned int cpu)
+{
+ int err, i;
+
+ if (!psci_ops.affinity_info)
+ return 1;
+ /*
+ * cpu_kill could race with cpu_die and we can
+ * potentially end up declaring this cpu undead
+ * while it is dying. So, try again a few times.
+ */
+
+ for (i = 0; i < 10; i++) {
+ err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
+ if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
+ pr_info("CPU%d killed.\n", cpu);
+ return 1;
+ }
+
+ msleep(10);
+ pr_info("Retrying again to check for CPU kill\n");
+ }
+
+ pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
+ cpu, err);
+ /* Make platform_cpu_kill() fail. */
+ return 0;
+}
+
+#endif
+
+bool __init psci_smp_available(void)
+{
+ /* is cpu_on available at least? */
+ return (psci_ops.cpu_on != NULL);
+}
+
+struct smp_operations __initdata psci_smp_ops = {
+ .smp_boot_secondary = psci_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = psci_cpu_die,
+ .cpu_kill = psci_cpu_kill,
+#endif
+};
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b4b1d397592b..860952438bd3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -37,6 +37,7 @@
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/procinfo.h>
+#include <asm/psci.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -796,9 +797,13 @@ void __init setup_arch(char **cmdline_p)
unflatten_device_tree();
arm_dt_init_cpu_maps();
+ psci_init();
#ifdef CONFIG_SMP
if (is_smp()) {
- smp_set_ops(mdesc->smp);
+ if (psci_smp_available())
+ smp_set_ops(&psci_smp_ops);
+ else if (mdesc->smp)
+ smp_set_ops(mdesc->smp);
smp_init_cpus();
}
#endif
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 296786bdbb73..5a42c12767af 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
+#include <linux/random.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
@@ -15,12 +16,11 @@
#include <asm/elf.h>
#include <asm/cacheflush.h>
+#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
-#include "signal.h"
-
/*
* For ARM syscalls, we encode the syscall number into the instruction.
*/
@@ -40,11 +40,13 @@
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-const unsigned long sigreturn_codes[7] = {
+static const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
};
+static unsigned long signal_return_offset;
+
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{
@@ -396,13 +398,19 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
+#ifdef CONFIG_MMU
if (cpsr & MODE32_BIT) {
+ struct mm_struct *mm = current->mm;
/*
- * 32-bit code can use the new high-page
- * signal return code support.
+ * 32-bit code can use the signal return page
+ * except when the MPU has protected the vectors
+ * page from PL0
*/
- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
- } else {
+ retcode = mm->context.sigpage + signal_return_offset +
+ (idx << 2) + thumb;
+ } else
+#endif
+ {
/*
* Ensure that the instruction cache sees
* the return code written onto the stack.
@@ -603,3 +611,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
+
+struct page *get_signal_page(void)
+{
+ unsigned long ptr;
+ unsigned offset;
+ struct page *page;
+ void *addr;
+
+ page = alloc_pages(GFP_KERNEL, 0);
+
+ if (!page)
+ return NULL;
+
+ addr = page_address(page);
+
+ /* Give the signal return code some randomness */
+ offset = 0x200 + (get_random_int() & 0x7fc);
+ signal_return_offset = offset;
+
+ /*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+
+ ptr = (unsigned long)addr + offset;
+ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+
+ return page;
+}
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
deleted file mode 100644
index 5ff067b7c752..000000000000
--- a/arch/arm/kernel/signal.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * linux/arch/arm/kernel/signal.h
- *
- * Copyright (C) 2005-2009 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-
-extern const unsigned long sigreturn_codes[7];
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5919eb451bb9..5a8ad2c8eda0 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -87,8 +87,8 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
* its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
- secondary_data.pgdir = virt_to_phys(idmap_pgd);
- secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
+ secondary_data.pgdir = virt_to_idmap(idmap_pgd);
+ secondary_data.swapper_pg_dir = virt_to_idmap(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9a52a07aa40e..a98b62dca2fa 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void)
static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
- int cpu, this_cpu;
+ int this_cpu;
cpumask_t mask = { CPU_BITS_NONE };
if (!erratum_a15_798181())
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
dummy_flush_tlb_a15_erratum();
this_cpu = get_cpu();
- for_each_online_cpu(cpu) {
- if (cpu == this_cpu)
- continue;
- /*
- * We only need to send an IPI if the other CPUs are running
- * the same ASID as the one being invalidated. There is no
- * need for locking around the active_asids check since the
- * switch_mm() function has at least one dmb() (as required by
- * this workaround) in case a context switch happens on
- * another CPU after the condition below.
- */
- if (atomic64_read(&mm->context.id) ==
- atomic64_read(&per_cpu(active_asids, cpu)))
- cpumask_set_cpu(cpu, &mask);
- }
+ a15_erratum_get_cpumask(this_cpu, mm, &mask);
smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
put_cpu();
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 90525d9d290b..f6fd1d4398c6 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb,
* changing cpu.
*/
if (flags == POST_RATE_CHANGE)
- smp_call_function(twd_update_frequency,
+ on_each_cpu(twd_update_frequency,
(void *)&cnd->new_rate, 1);
return NOTIFY_OK;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 18b32e8e4497..6b9567e19bdc 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -35,8 +35,6 @@
#include <asm/tls.h>
#include <asm/system_misc.h>
-#include "signal.h"
-
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page;
@@ -800,47 +798,55 @@ void __init trap_init(void)
return;
}
-static void __init kuser_get_tls_init(unsigned long vectors)
+#ifdef CONFIG_KUSER_HELPERS
+static void __init kuser_init(void *vectors)
{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
+#else
+static void __init kuser_init(void *vectors)
+{
+}
+#endif
void __init early_trap_init(void *vectors_base)
{
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
- extern char __kuser_helper_start[], __kuser_helper_end[];
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
vectors_page = vectors_base;
/*
+ * Poison the vectors page with an undefined instruction. This
+ * instruction is chosen to be undefined for both ARM and Thumb
+ * ISAs. The Thumb version is an undefined instruction with a
+ * branch back to the undefined instruction.
+ */
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ ((u32 *)vectors_base)[i] = 0xe7fddef1;
+
+ /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
- /*
- * Do processor specific fixups for the kuser helpers
- */
- kuser_get_tls_init(vectors);
-
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
- sigreturn_codes, sizeof(sigreturn_codes));
+ kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index a871b8e00fca..33f2ea32f5a0 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -152,6 +152,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
#endif
+ /*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+ __vectors_start = .;
+ .vectors 0 : AT(__vectors_start) {
+ *(.vectors)
+ }
+ . = __vectors_start + SIZEOF(.vectors);
+ __vectors_end = .;
+
+ __stubs_start = .;
+ .stubs 0x1000 : AT(__stubs_start) {
+ *(.stubs)
+ }
+ . = __stubs_start + SIZEOF(.stubs);
+ __stubs_end = .;
INIT_TEXT_SECTION(8)
.exit.text : {