aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/debug.S19
-rw-r--r--arch/arm/kernel/elf.c17
-rw-r--r--arch/arm/kernel/etm.c4
-rw-r--r--arch/arm/kernel/hw_breakpoint.c7
-rw-r--r--arch/arm/kernel/kprobes-decode.c10
-rw-r--r--arch/arm/kernel/perf_event.c33
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c26
-rw-r--r--arch/arm/kernel/perf_event_xscale.c4
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/sleep.S14
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/traps.c3
14 files changed, 91 insertions, 54 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 74554f1742d..8d95446150a 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
-obj-$(CONFIG_PM) += sleep.o
+obj-$(CONFIG_PM_SLEEP) += sleep.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index d2d983be096..bcd66e00bdb 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -25,7 +25,7 @@
.macro addruart, rp, rv
.endm
-#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
+#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
.macro senduart, rd, rx
mcr p14, 0, \rd, c0, c5, 0
@@ -49,23 +49,6 @@
1002:
.endm
-#elif defined(CONFIG_CPU_V7)
-
- .macro senduart, rd, rx
- mcr p14, 0, \rd, c0, c5, 0
- .endm
-
- .macro busyuart, rd, rx
-busy: mrc p14, 0, pc, c0, c1, 0
- bcs busy
- .endm
-
- .macro waituart, rd, rx
-wait: mrc p14, 0, pc, c0, c1, 0
- bcs wait
-
- .endm
-
#elif defined(CONFIG_CPU_XSCALE)
.macro senduart, rd, rx
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index d4a0da1e48f..9b05c6a0dce 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -40,15 +40,22 @@ EXPORT_SYMBOL(elf_check_arch);
void elf_set_personality(const struct elf32_hdr *x)
{
unsigned int eflags = x->e_flags;
- unsigned int personality = PER_LINUX_32BIT;
+ unsigned int personality = current->personality & ~PER_MASK;
+
+ /*
+ * We only support Linux ELF executables, so always set the
+ * personality to LINUX.
+ */
+ personality |= PER_LINUX;
/*
* APCS-26 is only valid for OABI executables
*/
- if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
- if (eflags & EF_ARM_APCS_26)
- personality = PER_LINUX;
- }
+ if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN &&
+ (eflags & EF_ARM_APCS_26))
+ personality &= ~ADDR_LIMIT_32BIT;
+ else
+ personality |= ADDR_LIMIT_32BIT;
set_personality(personality);
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 052b509e2d5..1bec8b5f22f 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -338,7 +338,7 @@ static struct miscdevice etb_miscdev = {
.fops = &etb_fops,
};
-static int __init etb_probe(struct amba_device *dev, const struct amba_id *id)
+static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
@@ -530,7 +530,7 @@ static ssize_t trace_mode_store(struct kobject *kobj,
static struct kobj_attribute trace_mode_attr =
__ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
-static int __init etm_probe(struct amba_device *dev, const struct amba_id *id)
+static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
{
struct tracectx *t = &tracer;
int ret = 0;
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 8dbc126f715..87acc25d7a3 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -868,6 +868,13 @@ static void reset_ctrl_regs(void *info)
*/
asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
isb();
+
+ /*
+ * Clear any configured vector-catch events before
+ * enabling monitor mode.
+ */
+ asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
+ isb();
}
if (enable_monitor_mode())
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 8f6ed43861f..23891317dc4 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -594,7 +594,8 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
long cpsr = regs->ARM_cpsr;
fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
- regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
+ if (rn != 15)
+ regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
rdv = fnr.r1;
if (rd == 15) {
@@ -622,10 +623,11 @@ static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs)
long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd];
long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn];
long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
+ long rnv_wb;
- /* Save Rn in case of writeback. */
- regs->uregs[rn] =
- insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
+ rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
+ if (rn != 15)
+ regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */
}
static void __kprobes emulate_mrrc(struct kprobe *p, struct pt_regs *regs)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 22e194eb853..979da3947f4 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -79,6 +79,7 @@ struct arm_pmu {
void (*write_counter)(int idx, u32 val);
void (*start)(void);
void (*stop)(void);
+ void (*reset)(void *);
const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
@@ -204,11 +205,9 @@ armpmu_event_set_period(struct perf_event *event,
static u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx)
+ int idx, int overflow)
{
- int shift = 64 - 32;
- s64 prev_raw_count, new_raw_count;
- u64 delta;
+ u64 delta, prev_raw_count, new_raw_count;
again:
prev_raw_count = local64_read(&hwc->prev_count);
@@ -218,8 +217,13 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- delta = (new_raw_count << shift) - (prev_raw_count << shift);
- delta >>= shift;
+ new_raw_count &= armpmu->max_period;
+ prev_raw_count &= armpmu->max_period;
+
+ if (overflow)
+ delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
+ else
+ delta = new_raw_count - prev_raw_count;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -236,7 +240,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
}
static void
@@ -254,7 +258,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx);
+ armpmu_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -624,6 +628,19 @@ static struct pmu pmu = {
#include "perf_event_v6.c"
#include "perf_event_v7.c"
+/*
+ * Ensure the PMU has sane values out of reset.
+ * This requires SMP to be available, so exists as a separate initcall.
+ */
+static int __init
+armpmu_reset(void)
+{
+ if (armpmu && armpmu->reset)
+ return on_each_cpu(armpmu->reset, NULL, 1);
+ return 0;
+}
+arch_initcall(armpmu_reset);
+
static int __init
init_hw_perf_events(void)
{
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 6fc2d228db5..f1e8dd94afe 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -474,7 +474,7 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 2e1402556fa..4960686afb5 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -466,6 +466,7 @@ static inline unsigned long armv7_pmnc_read(void)
static inline void armv7_pmnc_write(unsigned long val)
{
val &= ARMV7_PMNC_MASK;
+ isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
}
@@ -502,6 +503,7 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
+ isb();
return idx;
}
@@ -780,7 +782,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -847,6 +849,18 @@ static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
}
}
+static void armv7pmu_reset(void *info)
+{
+ u32 idx, nb_cnt = armpmu->num_events;
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ for (idx = 1; idx < nb_cnt; ++idx)
+ armv7pmu_disable_event(NULL, idx);
+
+ /* Initialize & Reset PMNC: C and P bits */
+ armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+}
+
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
@@ -856,17 +870,15 @@ static struct arm_pmu armv7pmu = {
.get_event_idx = armv7pmu_get_event_idx,
.start = armv7pmu_start,
.stop = armv7pmu_stop,
+ .reset = armv7pmu_reset,
.raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1,
};
-static u32 __init armv7_reset_read_pmnc(void)
+static u32 __init armv7_read_num_pmnc_events(void)
{
u32 nb_cnt;
- /* Initialize & Reset PMNC: C and P bits */
- armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
-
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
@@ -880,7 +892,7 @@ static const struct arm_pmu *__init armv7_a8_pmu_init(void)
armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.cache_map = &armv7_a8_perf_cache_map;
armv7pmu.event_map = &armv7_a8_perf_map;
- armv7pmu.num_events = armv7_reset_read_pmnc();
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
@@ -890,7 +902,7 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.cache_map = &armv7_a9_perf_cache_map;
armv7pmu.event_map = &armv7_a9_perf_map;
- armv7pmu.num_events = armv7_reset_read_pmnc();
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
#else
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 28cd3b025bc..39affbe4fdb 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -246,7 +246,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -578,7 +578,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx);
+ armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94bbedbed63..5e1e5419722 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -372,6 +372,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
if (clone_flags & CLONE_SETTLS)
thread->tp_value = regs->ARM_r3;
+ thread_notify(THREAD_NOTIFY_COPY, thread);
+
return 0;
}
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index bfad698a02e..6398ead9d1c 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -119,11 +119,19 @@ ENTRY(cpu_resume)
#else
ldr r0, sleep_save_sp @ stack phys addr
#endif
- msr cpsr_c, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
+ setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
#ifdef MULTI_CPU
- ldmia r0!, {r1, sp, lr, pc} @ load v:p, stack, return fn, resume fn
+ @ load v:p, stack, return fn, resume fn
+ ARM( ldmia r0!, {r1, sp, lr, pc} )
+THUMB( ldmia r0!, {r1, r2, r3, r4} )
+THUMB( mov sp, r2 )
+THUMB( mov lr, r3 )
+THUMB( bx r4 )
#else
- ldmia r0!, {r1, sp, lr} @ load v:p, stack, return fn
+ @ load v:p, stack, return fn
+ ARM( ldmia r0!, {r1, sp, lr} )
+THUMB( ldmia r0!, {r1, r2, lr} )
+THUMB( mov sp, r2 )
b cpu_do_resume
#endif
ENDPROC(cpu_resume)
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 7a576092291..40ee7e5045e 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -158,7 +158,7 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
if (res == 0) {
/*
- * Barrier also required between aquiring a lock for a
+ * Barrier also required between acquiring a lock for a
* protected resource and accessing the resource. Inserted for
* same reason as above.
*/
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index f0000e188c8..3b54ad19d48 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -410,8 +410,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
struct thread_info *thread = current_thread_info();
siginfo_t info;
- if (current->personality != PER_LINUX &&
- current->personality != PER_LINUX_32BIT &&
+ if ((current->personality & PER_MASK) != PER_LINUX &&
thread->exec_domain->handler) {
thread->exec_domain->handler(n, regs);
return regs->ARM_r0;