aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile20
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/bios32.c76
-rw-r--r--arch/arm/kernel/cpuidle.c21
-rw-r--r--arch/arm/kernel/crunch-bits.S305
-rw-r--r--arch/arm/kernel/crunch.c88
-rw-r--r--arch/arm/kernel/debug.S26
-rw-r--r--arch/arm/kernel/ecard.c1231
-rw-r--r--arch/arm/kernel/ecard.h69
-rw-r--r--arch/arm/kernel/elf.c1
-rw-r--r--arch/arm/kernel/entry-armv.S8
-rw-r--r--arch/arm/kernel/entry-common.S27
-rw-r--r--arch/arm/kernel/fiq.c2
-rw-r--r--arch/arm/kernel/ftrace.c100
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/head.S18
-rw-r--r--arch/arm/kernel/hw_breakpoint.c1
-rw-r--r--arch/arm/kernel/insn.c62
-rw-r--r--arch/arm/kernel/insn.h29
-rw-r--r--arch/arm/kernel/irq.c6
-rw-r--r--arch/arm/kernel/jump_label.c39
-rw-r--r--arch/arm/kernel/kprobes-common.c1
-rw-r--r--arch/arm/kernel/kprobes.c88
-rw-r--r--arch/arm/kernel/machine_kexec.c27
-rw-r--r--arch/arm/kernel/patch.c75
-rw-r--r--arch/arm/kernel/patch.h7
-rw-r--r--arch/arm/kernel/perf_event.c52
-rw-r--r--arch/arm/kernel/perf_event_v6.c22
-rw-r--r--arch/arm/kernel/perf_event_v7.c184
-rw-r--r--arch/arm/kernel/perf_event_xscale.c20
-rw-r--r--arch/arm/kernel/process.c69
-rw-r--r--arch/arm/kernel/ptrace.c36
-rw-r--r--arch/arm/kernel/sched_clock.c18
-rw-r--r--arch/arm/kernel/setup.c22
-rw-r--r--arch/arm/kernel/signal.c29
-rw-r--r--arch/arm/kernel/sleep.S1
-rw-r--r--arch/arm/kernel/smp.c77
-rw-r--r--arch/arm/kernel/smp_tlb.c20
-rw-r--r--arch/arm/kernel/smp_twd.c127
-rw-r--r--arch/arm/kernel/tcm.c1
-rw-r--r--arch/arm/kernel/thumbee.c1
-rw-r--r--arch/arm/kernel/time.c4
-rw-r--r--arch/arm/kernel/traps.c26
-rw-r--r--arch/arm/kernel/vmlinux.lds.S10
44 files changed, 922 insertions, 2127 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 43b740d0e374..7b787d642af4 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -7,6 +7,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_insn.o = -pg
+CFLAGS_REMOVE_patch.o = -pg
endif
CFLAGS_REMOVE_return_address.o = -pg
@@ -14,30 +16,29 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-armv.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o return_address.o setup.o signal.o \
- sys_arm.o stacktrace.o time.o traps.o
+ process.o ptrace.o return_address.o sched_clock.o \
+ setup.o signal.o stacktrace.o sys_arm.o time.o traps.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += compat.o
obj-$(CONFIG_LEDS) += leds.o
obj-$(CONFIG_OC_ETM) += etm.o
-
+obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_ISA_DMA_API) += dma.o
-obj-$(CONFIG_ARCH_ACORN) += ecard.o
obj-$(CONFIG_FIQ) += fiq.o fiqasm.o
obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
-obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
-obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
-obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
+obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
-obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o
+obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o patch.o
ifdef CONFIG_THUMB2_KERNEL
obj-$(CONFIG_KPROBES) += kprobes-thumb.o
else
@@ -62,9 +63,6 @@ obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
-obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
-AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
-
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5b0bce61eb69..b57c75e0b01f 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -18,7 +18,6 @@
#include <linux/io.h>
#include <asm/checksum.h>
-#include <asm/system.h>
#include <asm/ftrace.h>
/*
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index f58ba3589908..ede5f7741c42 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -16,7 +16,6 @@
#include <asm/mach/pci.h>
static int debug_pci;
-static int use_firmware;
/*
* We can't use pci_find_device() here since we are
@@ -295,34 +294,11 @@ static inline int pdev_bad_for_parity(struct pci_dev *dev)
}
/*
- * Adjust the device resources from bus-centric to Linux-centric.
- */
-static void __devinit
-pdev_fixup_device_resources(struct pci_sys_data *root, struct pci_dev *dev)
-{
- resource_size_t offset;
- int i;
-
- for (i = 0; i < PCI_NUM_RESOURCES; i++) {
- if (dev->resource[i].start == 0)
- continue;
- if (dev->resource[i].flags & IORESOURCE_MEM)
- offset = root->mem_offset;
- else
- offset = root->io_offset;
-
- dev->resource[i].start += offset;
- dev->resource[i].end += offset;
- }
-}
-
-/*
* pcibios_fixup_bus - Called after each bus is probed,
* but before its children are examined.
*/
void pcibios_fixup_bus(struct pci_bus *bus)
{
- struct pci_sys_data *root = bus->sysdata;
struct pci_dev *dev;
u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
@@ -333,8 +309,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
list_for_each_entry(dev, &bus->devices, bus_list) {
u16 status;
- pdev_fixup_device_resources(root, dev);
-
pci_read_config_word(dev, PCI_STATUS, &status);
/*
@@ -400,43 +374,6 @@ EXPORT_SYMBOL(pcibios_fixup_bus);
#endif
/*
- * Convert from Linux-centric to bus-centric addresses for bridge devices.
- */
-void
-pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
- struct resource *res)
-{
- struct pci_sys_data *root = dev->sysdata;
- unsigned long offset = 0;
-
- if (res->flags & IORESOURCE_IO)
- offset = root->io_offset;
- if (res->flags & IORESOURCE_MEM)
- offset = root->mem_offset;
-
- region->start = res->start - offset;
- region->end = res->end - offset;
-}
-EXPORT_SYMBOL(pcibios_resource_to_bus);
-
-void __devinit
-pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
- struct pci_bus_region *region)
-{
- struct pci_sys_data *root = dev->sysdata;
- unsigned long offset = 0;
-
- if (res->flags & IORESOURCE_IO)
- offset = root->io_offset;
- if (res->flags & IORESOURCE_MEM)
- offset = root->mem_offset;
-
- res->start = region->start + offset;
- res->end = region->end + offset;
-}
-EXPORT_SYMBOL(pcibios_bus_to_resource);
-
-/*
* Swizzle the device pin each time we cross a bridge.
* This might update pin and returns the slot number.
*/
@@ -497,10 +434,10 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
if (ret > 0) {
if (list_empty(&sys->resources)) {
- pci_add_resource(&sys->resources,
- &ioport_resource);
- pci_add_resource(&sys->resources,
- &iomem_resource);
+ pci_add_resource_offset(&sys->resources,
+ &ioport_resource, sys->io_offset);
+ pci_add_resource_offset(&sys->resources,
+ &iomem_resource, sys->mem_offset);
}
sys->bus = hw->scan(nr, sys);
@@ -525,6 +462,7 @@ void __init pci_common_init(struct hw_pci *hw)
INIT_LIST_HEAD(&hw->buses);
+ pci_add_flags(PCI_REASSIGN_ALL_RSRC);
if (hw->preinit)
hw->preinit();
pcibios_init_hw(hw);
@@ -536,7 +474,7 @@ void __init pci_common_init(struct hw_pci *hw)
list_for_each_entry(sys, &hw->buses, node) {
struct pci_bus *bus = sys->bus;
- if (!use_firmware) {
+ if (!pci_has_flag(PCI_PROBE_ONLY)) {
/*
* Size the bridge windows.
*/
@@ -573,7 +511,7 @@ char * __init pcibios_setup(char *str)
debug_pci = 1;
return NULL;
} else if (!strcmp(str, "firmware")) {
- use_firmware = 1;
+ pci_add_flags(PCI_PROBE_ONLY);
return NULL;
}
return str;
diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c
new file mode 100644
index 000000000000..89545f6c8403
--- /dev/null
+++ b/arch/arm/kernel/cpuidle.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright 2012 Linaro Ltd.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/cpuidle.h>
+#include <asm/proc-fns.h>
+
+int arm_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ cpu_do_idle();
+
+ return index;
+}
diff --git a/arch/arm/kernel/crunch-bits.S b/arch/arm/kernel/crunch-bits.S
deleted file mode 100644
index 0ec9bb48fab9..000000000000
--- a/arch/arm/kernel/crunch-bits.S
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * arch/arm/kernel/crunch-bits.S
- * Cirrus MaverickCrunch context switching and handling
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * Shamelessly stolen from the iWMMXt code by Nicolas Pitre, which is
- * Copyright (c) 2003-2004, MontaVista Software, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/linkage.h>
-#include <asm/ptrace.h>
-#include <asm/thread_info.h>
-#include <asm/asm-offsets.h>
-#include <mach/ep93xx-regs.h>
-
-/*
- * We can't use hex constants here due to a bug in gas.
- */
-#define CRUNCH_MVDX0 0
-#define CRUNCH_MVDX1 8
-#define CRUNCH_MVDX2 16
-#define CRUNCH_MVDX3 24
-#define CRUNCH_MVDX4 32
-#define CRUNCH_MVDX5 40
-#define CRUNCH_MVDX6 48
-#define CRUNCH_MVDX7 56
-#define CRUNCH_MVDX8 64
-#define CRUNCH_MVDX9 72
-#define CRUNCH_MVDX10 80
-#define CRUNCH_MVDX11 88
-#define CRUNCH_MVDX12 96
-#define CRUNCH_MVDX13 104
-#define CRUNCH_MVDX14 112
-#define CRUNCH_MVDX15 120
-#define CRUNCH_MVAX0L 128
-#define CRUNCH_MVAX0M 132
-#define CRUNCH_MVAX0H 136
-#define CRUNCH_MVAX1L 140
-#define CRUNCH_MVAX1M 144
-#define CRUNCH_MVAX1H 148
-#define CRUNCH_MVAX2L 152
-#define CRUNCH_MVAX2M 156
-#define CRUNCH_MVAX2H 160
-#define CRUNCH_MVAX3L 164
-#define CRUNCH_MVAX3M 168
-#define CRUNCH_MVAX3H 172
-#define CRUNCH_DSPSC 176
-
-#define CRUNCH_SIZE 184
-
- .text
-
-/*
- * Lazy switching of crunch coprocessor context
- *
- * r10 = struct thread_info pointer
- * r9 = ret_from_exception
- * lr = undefined instr exit
- *
- * called from prefetch exception handler with interrupts disabled
- */
-ENTRY(crunch_task_enable)
- ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
-
- ldr r1, [r8, #0x80]
- tst r1, #0x00800000 @ access to crunch enabled?
- movne pc, lr @ if so no business here
- mov r3, #0xaa @ unlock syscon swlock
- str r3, [r8, #0xc0]
- orr r1, r1, #0x00800000 @ enable access to crunch
- str r1, [r8, #0x80]
-
- ldr r3, =crunch_owner
- add r0, r10, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r2, [sp, #60] @ current task pc value
- ldr r1, [r3] @ get current crunch owner
- str r0, [r3] @ this task now owns crunch
- sub r2, r2, #4 @ adjust pc back
- str r2, [sp, #60]
-
- ldr r2, [r8, #0x80]
- mov r2, r2 @ flush out enable (@@@)
-
- teq r1, #0 @ test for last ownership
- mov lr, r9 @ normal exit from exception
- beq crunch_load @ no owner, skip save
-
-crunch_save:
- cfstr64 mvdx0, [r1, #CRUNCH_MVDX0] @ save 64b registers
- cfstr64 mvdx1, [r1, #CRUNCH_MVDX1]
- cfstr64 mvdx2, [r1, #CRUNCH_MVDX2]
- cfstr64 mvdx3, [r1, #CRUNCH_MVDX3]
- cfstr64 mvdx4, [r1, #CRUNCH_MVDX4]
- cfstr64 mvdx5, [r1, #CRUNCH_MVDX5]
- cfstr64 mvdx6, [r1, #CRUNCH_MVDX6]
- cfstr64 mvdx7, [r1, #CRUNCH_MVDX7]
- cfstr64 mvdx8, [r1, #CRUNCH_MVDX8]
- cfstr64 mvdx9, [r1, #CRUNCH_MVDX9]
- cfstr64 mvdx10, [r1, #CRUNCH_MVDX10]
- cfstr64 mvdx11, [r1, #CRUNCH_MVDX11]
- cfstr64 mvdx12, [r1, #CRUNCH_MVDX12]
- cfstr64 mvdx13, [r1, #CRUNCH_MVDX13]
- cfstr64 mvdx14, [r1, #CRUNCH_MVDX14]
- cfstr64 mvdx15, [r1, #CRUNCH_MVDX15]
-
-#ifdef __ARMEB__
-#error fix me for ARMEB
-#endif
-
- cfmv32al mvfx0, mvax0 @ save 72b accumulators
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX0L]
- cfmv32am mvfx0, mvax0
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX0M]
- cfmv32ah mvfx0, mvax0
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX0H]
- cfmv32al mvfx0, mvax1
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX1L]
- cfmv32am mvfx0, mvax1
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX1M]
- cfmv32ah mvfx0, mvax1
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX1H]
- cfmv32al mvfx0, mvax2
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX2L]
- cfmv32am mvfx0, mvax2
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX2M]
- cfmv32ah mvfx0, mvax2
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX2H]
- cfmv32al mvfx0, mvax3
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX3L]
- cfmv32am mvfx0, mvax3
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX3M]
- cfmv32ah mvfx0, mvax3
- cfstr32 mvfx0, [r1, #CRUNCH_MVAX3H]
-
- cfmv32sc mvdx0, dspsc @ save status word
- cfstr64 mvdx0, [r1, #CRUNCH_DSPSC]
-
- teq r0, #0 @ anything to load?
- cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
- moveq pc, lr
-
-crunch_load:
- cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
- cfmvsc32 dspsc, mvdx0
-
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX0L] @ load 72b accumulators
- cfmval32 mvax0, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX0M]
- cfmvam32 mvax0, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX0H]
- cfmvah32 mvax0, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX1L]
- cfmval32 mvax1, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX1M]
- cfmvam32 mvax1, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX1H]
- cfmvah32 mvax1, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX2L]
- cfmval32 mvax2, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX2M]
- cfmvam32 mvax2, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX2H]
- cfmvah32 mvax2, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX3L]
- cfmval32 mvax3, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX3M]
- cfmvam32 mvax3, mvfx0
- cfldr32 mvfx0, [r0, #CRUNCH_MVAX3H]
- cfmvah32 mvax3, mvfx0
-
- cfldr64 mvdx0, [r0, #CRUNCH_MVDX0] @ load 64b registers
- cfldr64 mvdx1, [r0, #CRUNCH_MVDX1]
- cfldr64 mvdx2, [r0, #CRUNCH_MVDX2]
- cfldr64 mvdx3, [r0, #CRUNCH_MVDX3]
- cfldr64 mvdx4, [r0, #CRUNCH_MVDX4]
- cfldr64 mvdx5, [r0, #CRUNCH_MVDX5]
- cfldr64 mvdx6, [r0, #CRUNCH_MVDX6]
- cfldr64 mvdx7, [r0, #CRUNCH_MVDX7]
- cfldr64 mvdx8, [r0, #CRUNCH_MVDX8]
- cfldr64 mvdx9, [r0, #CRUNCH_MVDX9]
- cfldr64 mvdx10, [r0, #CRUNCH_MVDX10]
- cfldr64 mvdx11, [r0, #CRUNCH_MVDX11]
- cfldr64 mvdx12, [r0, #CRUNCH_MVDX12]
- cfldr64 mvdx13, [r0, #CRUNCH_MVDX13]
- cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
- cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
-
- mov pc, lr
-
-/*
- * Back up crunch regs to save area and disable access to them
- * (mainly for gdb or sleep mode usage)
- *
- * r0 = struct thread_info pointer of target task or NULL for any
- */
-ENTRY(crunch_task_disable)
- stmfd sp!, {r4, r5, lr}
-
- mrs ip, cpsr
- orr r2, ip, #PSR_I_BIT @ disable interrupts
- msr cpsr_c, r2
-
- ldr r4, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
-
- ldr r3, =crunch_owner
- add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r1, [r3] @ get current crunch owner
- teq r1, #0 @ any current owner?
- beq 1f @ no: quit
- teq r0, #0 @ any owner?
- teqne r1, r2 @ or specified one?
- bne 1f @ no: quit
-
- ldr r5, [r4, #0x80] @ enable access to crunch
- mov r2, #0xaa
- str r2, [r4, #0xc0]
- orr r5, r5, #0x00800000
- str r5, [r4, #0x80]
-
- mov r0, #0 @ nothing to load
- str r0, [r3] @ no more current owner
- ldr r2, [r4, #0x80] @ flush out enable (@@@)
- mov r2, r2
- bl crunch_save
-
- mov r2, #0xaa @ disable access to crunch
- str r2, [r4, #0xc0]
- bic r5, r5, #0x00800000
- str r5, [r4, #0x80]
- ldr r5, [r4, #0x80] @ flush out enable (@@@)
- mov r5, r5
-
-1: msr cpsr_c, ip @ restore interrupt mode
- ldmfd sp!, {r4, r5, pc}
-
-/*
- * Copy crunch state to given memory address
- *
- * r0 = struct thread_info pointer of target task
- * r1 = memory address where to store crunch state
- *
- * this is called mainly in the creation of signal stack frames
- */
-ENTRY(crunch_task_copy)
- mrs ip, cpsr
- orr r2, ip, #PSR_I_BIT @ disable interrupts
- msr cpsr_c, r2
-
- ldr r3, =crunch_owner
- add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r3, [r3] @ get current crunch owner
- teq r2, r3 @ does this task own it...
- beq 1f
-
- @ current crunch values are in the task save area
- msr cpsr_c, ip @ restore interrupt mode
- mov r0, r1
- mov r1, r2
- mov r2, #CRUNCH_SIZE
- b memcpy
-
-1: @ this task owns crunch regs -- grab a copy from there
- mov r0, #0 @ nothing to load
- mov r3, lr @ preserve return address
- bl crunch_save
- msr cpsr_c, ip @ restore interrupt mode
- mov pc, r3
-
-/*
- * Restore crunch state from given memory address
- *
- * r0 = struct thread_info pointer of target task
- * r1 = memory address where to get crunch state from
- *
- * this is used to restore crunch state when unwinding a signal stack frame
- */
-ENTRY(crunch_task_restore)
- mrs ip, cpsr
- orr r2, ip, #PSR_I_BIT @ disable interrupts
- msr cpsr_c, r2
-
- ldr r3, =crunch_owner
- add r2, r0, #TI_CRUNCH_STATE @ get task crunch save area
- ldr r3, [r3] @ get current crunch owner
- teq r2, r3 @ does this task own it...
- beq 1f
-
- @ this task doesn't own crunch regs -- use its save area
- msr cpsr_c, ip @ restore interrupt mode
- mov r0, r2
- mov r2, #CRUNCH_SIZE
- b memcpy
-
-1: @ this task owns crunch regs -- load them directly
- mov r0, r1
- mov r1, #0 @ nothing to save
- mov r3, lr @ preserve return address
- bl crunch_load
- msr cpsr_c, ip @ restore interrupt mode
- mov pc, r3
diff --git a/arch/arm/kernel/crunch.c b/arch/arm/kernel/crunch.c
deleted file mode 100644
index 25ef223ba7f3..000000000000
--- a/arch/arm/kernel/crunch.c
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
- * arch/arm/kernel/crunch.c
- * Cirrus MaverickCrunch context switching and handling
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <mach/ep93xx-regs.h>
-#include <asm/thread_notify.h>
-
-struct crunch_state *crunch_owner;
-
-void crunch_task_release(struct thread_info *thread)
-{
- local_irq_disable();
- if (crunch_owner == &thread->crunchstate)
- crunch_owner = NULL;
- local_irq_enable();
-}
-
-static int crunch_enabled(u32 devcfg)
-{
- return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA);
-}
-
-static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t)
-{
- struct thread_info *thread = (struct thread_info *)t;
- struct crunch_state *crunch_state;
- u32 devcfg;
-
- crunch_state = &thread->crunchstate;
-
- switch (cmd) {
- case THREAD_NOTIFY_FLUSH:
- memset(crunch_state, 0, sizeof(*crunch_state));
-
- /*
- * FALLTHROUGH: Ensure we don't try to overwrite our newly
- * initialised state information on the first fault.
- */
-
- case THREAD_NOTIFY_EXIT:
- crunch_task_release(thread);
- break;
-
- case THREAD_NOTIFY_SWITCH:
- devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG);
- if (crunch_enabled(devcfg) || crunch_owner == crunch_state) {
- /*
- * We don't use ep93xx_syscon_swlocked_write() here
- * because we are on the context switch path and
- * preemption is already disabled.
- */
- devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA;
- __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK);
- __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG);
- }
- break;
- }
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block crunch_notifier_block = {
- .notifier_call = crunch_do,
-};
-
-static int __init crunch_init(void)
-{
- thread_register_notifier(&crunch_notifier_block);
- elf_hwcap |= HWCAP_CRUNCH;
-
- return 0;
-}
-
-late_initcall(crunch_init);
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 204e2160cfcc..c45522c36787 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -10,6 +10,7 @@
* 32-bit debugging code
*/
#include <linux/linkage.h>
+#include <asm/assembler.h>
.text
@@ -100,7 +101,7 @@
#endif /* CONFIG_CPU_V6 */
-#else
+#elif !defined(CONFIG_DEBUG_SEMIHOSTING)
#include <mach/debug-macro.S>
#endif /* CONFIG_DEBUG_ICEDCC */
@@ -155,6 +156,8 @@ hexbuf: .space 16
.ltorg
+#ifndef CONFIG_DEBUG_SEMIHOSTING
+
ENTRY(printascii)
addruart_current r3, r1, r2
b 2f
@@ -177,3 +180,24 @@ ENTRY(printch)
mov r0, #0
b 1b
ENDPROC(printch)
+
+#else
+
+ENTRY(printascii)
+ mov r1, r0
+ mov r0, #0x04 @ SYS_WRITE0
+ ARM( svc #0x123456 )
+ THUMB( svc #0xab )
+ mov pc, lr
+ENDPROC(printascii)
+
+ENTRY(printch)
+ adr r1, hexbuf
+ strb r0, [r1]
+ mov r0, #0x03 @ SYS_WRITEC
+ ARM( svc #0x123456 )
+ THUMB( svc #0xab )
+ mov pc, lr
+ENDPROC(printch)
+
+#endif
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c
deleted file mode 100644
index 4dd0edab6a65..000000000000
--- a/arch/arm/kernel/ecard.c
+++ /dev/null
@@ -1,1231 +0,0 @@
-/*
- * linux/arch/arm/kernel/ecard.c
- *
- * Copyright 1995-2001 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Find all installed expansion cards, and handle interrupts from them.
- *
- * Created from information from Acorns RiscOS3 PRMs
- *
- * 08-Dec-1996 RMK Added code for the 9'th expansion card - the ether
- * podule slot.
- * 06-May-1997 RMK Added blacklist for cards whose loader doesn't work.
- * 12-Sep-1997 RMK Created new handling of interrupt enables/disables
- * - cards can now register their own routine to control
- * interrupts (recommended).
- * 29-Sep-1997 RMK Expansion card interrupt hardware not being re-enabled
- * on reset from Linux. (Caused cards not to respond
- * under RiscOS without hard reset).
- * 15-Feb-1998 RMK Added DMA support
- * 12-Sep-1998 RMK Added EASI support
- * 10-Jan-1999 RMK Run loaders in a simulated RISC OS environment.
- * 17-Apr-1999 RMK Support for EASI Type C cycles.
- */
-#define ECARD_C
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/completion.h>
-#include <linux/reboot.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/device.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/kthread.h>
-#include <linux/io.h>
-
-#include <asm/dma.h>
-#include <asm/ecard.h>
-#include <mach/hardware.h>
-#include <asm/irq.h>
-#include <asm/mmu_context.h>
-#include <asm/mach/irq.h>
-#include <asm/tlbflush.h>
-
-#include "ecard.h"
-
-#ifndef CONFIG_ARCH_RPC
-#define HAVE_EXPMASK
-#endif
-
-struct ecard_request {
- void (*fn)(struct ecard_request *);
- ecard_t *ec;
- unsigned int address;
- unsigned int length;
- unsigned int use_loader;
- void *buffer;
- struct completion *complete;
-};
-
-struct expcard_blacklist {
- unsigned short manufacturer;
- unsigned short product;
- const char *type;
-};
-
-static ecard_t *cards;
-static ecard_t *slot_to_expcard[MAX_ECARDS];
-static unsigned int ectcr;
-#ifdef HAS_EXPMASK
-static unsigned int have_expmask;
-#endif
-
-/* List of descriptions of cards which don't have an extended
- * identification, or chunk directories containing a description.
- */
-static struct expcard_blacklist __initdata blacklist[] = {
- { MANU_ACORN, PROD_ACORN_ETHER1, "Acorn Ether1" }
-};
-
-asmlinkage extern int
-ecard_loader_reset(unsigned long base, loader_t loader);
-asmlinkage extern int
-ecard_loader_read(int off, unsigned long base, loader_t loader);
-
-static inline unsigned short ecard_getu16(unsigned char *v)
-{
- return v[0] | v[1] << 8;
-}
-
-static inline signed long ecard_gets24(unsigned char *v)
-{
- return v[0] | v[1] << 8 | v[2] << 16 | ((v[2] & 0x80) ? 0xff000000 : 0);
-}
-
-static inline ecard_t *slot_to_ecard(unsigned int slot)
-{
- return slot < MAX_ECARDS ? slot_to_expcard[slot] : NULL;
-}
-
-/* ===================== Expansion card daemon ======================== */
-/*
- * Since the loader programs on the expansion cards need to be run
- * in a specific environment, create a separate task with this
- * environment up, and pass requests to this task as and when we
- * need to.
- *
- * This should allow 99% of loaders to be called from Linux.
- *
- * From a security standpoint, we trust the card vendors. This
- * may be a misplaced trust.
- */
-static void ecard_task_reset(struct ecard_request *req)
-{
- struct expansion_card *ec = req->ec;
- struct resource *res;
-
- res = ec->slot_no == 8
- ? &ec->resource[ECARD_RES_MEMC]
- : ec->easi
- ? &ec->resource[ECARD_RES_EASI]
- : &ec->resource[ECARD_RES_IOCSYNC];
-
- ecard_loader_reset(res->start, ec->loader);
-}
-
-static void ecard_task_readbytes(struct ecard_request *req)
-{
- struct expansion_card *ec = req->ec;
- unsigned char *buf = req->buffer;
- unsigned int len = req->length;
- unsigned int off = req->address;
-
- if (ec->slot_no == 8) {
- void __iomem *base = (void __iomem *)
- ec->resource[ECARD_RES_MEMC].start;
-
- /*
- * The card maintains an index which increments the address
- * into a 4096-byte page on each access. We need to keep
- * track of the counter.
- */
- static unsigned int index;
- unsigned int page;
-
- page = (off >> 12) * 4;
- if (page > 256 * 4)
- return;
-
- off &= 4095;
-
- /*
- * If we are reading offset 0, or our current index is
- * greater than the offset, reset the hardware index counter.
- */
- if (off == 0 || index > off) {
- writeb(0, base);
- index = 0;
- }
-
- /*
- * Increment the hardware index counter until we get to the
- * required offset. The read bytes are discarded.
- */
- while (index < off) {
- readb(base + page);
- index += 1;
- }
-
- while (len--) {
- *buf++ = readb(base + page);
- index += 1;
- }
- } else {
- unsigned long base = (ec->easi
- ? &ec->resource[ECARD_RES_EASI]
- : &ec->resource[ECARD_RES_IOCSYNC])->start;
- void __iomem *pbase = (void __iomem *)base;
-
- if (!req->use_loader || !ec->loader) {
- off *= 4;
- while (len--) {
- *buf++ = readb(pbase + off);
- off += 4;
- }
- } else {
- while(len--) {
- /*
- * The following is required by some
- * expansion card loader programs.
- */
- *(unsigned long *)0x108 = 0;
- *buf++ = ecard_loader_read(off++, base,
- ec->loader);
- }
- }
- }
-
-}
-
-static DECLARE_WAIT_QUEUE_HEAD(ecard_wait);
-static struct ecard_request *ecard_req;
-static DEFINE_MUTEX(ecard_mutex);
-
-/*
- * Set up the expansion card daemon's page tables.
- */
-static void ecard_init_pgtables(struct mm_struct *mm)
-{
- struct vm_area_struct vma;
-
- /* We want to set up the page tables for the following mapping:
- * Virtual Physical
- * 0x03000000 0x03000000
- * 0x03010000 unmapped
- * 0x03210000 0x03210000
- * 0x03400000 unmapped
- * 0x08000000 0x08000000
- * 0x10000000 unmapped
- *
- * FIXME: we don't follow this 100% yet.
- */
- pgd_t *src_pgd, *dst_pgd;
-
- src_pgd = pgd_offset(mm, (unsigned long)IO_BASE);
- dst_pgd = pgd_offset(mm, IO_START);
-
- memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (IO_SIZE / PGDIR_SIZE));
-
- src_pgd = pgd_offset(mm, (unsigned long)EASI_BASE);
- dst_pgd = pgd_offset(mm, EASI_START);
-
- memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
-
- vma.vm_mm = mm;
-
- flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
- flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
-}
-
-static int ecard_init_mm(void)
-{
- struct mm_struct * mm = mm_alloc();
- struct mm_struct *active_mm = current->active_mm;
-
- if (!mm)
- return -ENOMEM;
-
- current->mm = mm;
- current->active_mm = mm;
- activate_mm(active_mm, mm);
- mmdrop(active_mm);
- ecard_init_pgtables(mm);
- return 0;
-}
-
-static int
-ecard_task(void * unused)
-{
- /*
- * Allocate a mm. We're not a lazy-TLB kernel task since we need
- * to set page table entries where the user space would be. Note
- * that this also creates the page tables. Failure is not an
- * option here.
- */
- if (ecard_init_mm())
- panic("kecardd: unable to alloc mm\n");
-
- while (1) {
- struct ecard_request *req;
-
- wait_event_interruptible(ecard_wait, ecard_req != NULL);
-
- req = xchg(&ecard_req, NULL);
- if (req != NULL) {
- req->fn(req);
- complete(req->complete);
- }
- }
-}
-
-/*
- * Wake the expansion card daemon to action our request.
- *
- * FIXME: The test here is not sufficient to detect if the
- * kcardd is running.
- */
-static void ecard_call(struct ecard_request *req)
-{
- DECLARE_COMPLETION_ONSTACK(completion);
-
- req->complete = &completion;
-
- mutex_lock(&ecard_mutex);
- ecard_req = req;
- wake_up(&ecard_wait);
-
- /*
- * Now wait for kecardd to run.
- */
- wait_for_completion(&completion);
- mutex_unlock(&ecard_mutex);
-}
-
-/* ======================= Mid-level card control ===================== */
-
-static void
-ecard_readbytes(void *addr, ecard_t *ec, int off, int len, int useld)
-{
- struct ecard_request req;
-
- req.fn = ecard_task_readbytes;
- req.ec = ec;
- req.address = off;
- req.length = len;
- req.use_loader = useld;
- req.buffer = addr;
-
- ecard_call(&req);
-}
-
-int ecard_readchunk(struct in_chunk_dir *cd, ecard_t *ec, int id, int num)
-{
- struct ex_chunk_dir excd;
- int index = 16;
- int useld = 0;
-
- if (!ec->cid.cd)
- return 0;
-
- while(1) {
- ecard_readbytes(&excd, ec, index, 8, useld);
- index += 8;
- if (c_id(&excd) == 0) {
- if (!useld && ec->loader) {
- useld = 1;
- index = 0;
- continue;
- }
- return 0;
- }
- if (c_id(&excd) == 0xf0) { /* link */
- index = c_start(&excd);
- continue;
- }
- if (c_id(&excd) == 0x80) { /* loader */
- if (!ec->loader) {
- ec->loader = kmalloc(c_len(&excd),
- GFP_KERNEL);
- if (ec->loader)
- ecard_readbytes(ec->loader, ec,
- (int)c_start(&excd),
- c_len(&excd), useld);
- else
- return 0;
- }
- continue;
- }
- if (c_id(&excd) == id && num-- == 0)
- break;
- }
-
- if (c_id(&excd) & 0x80) {
- switch (c_id(&excd) & 0x70) {
- case 0x70:
- ecard_readbytes((unsigned char *)excd.d.string, ec,
- (int)c_start(&excd), c_len(&excd),
- useld);
- break;
- case 0x00:
- break;
- }
- }
- cd->start_offset = c_start(&excd);
- memcpy(cd->d.string, excd.d.string, 256);
- return 1;
-}
-
-/* ======================= Interrupt control ============================ */
-
-static void ecard_def_irq_enable(ecard_t *ec, int irqnr)
-{
-#ifdef HAS_EXPMASK
- if (irqnr < 4 && have_expmask) {
- have_expmask |= 1 << irqnr;
- __raw_writeb(have_expmask, EXPMASK_ENABLE);
- }
-#endif
-}
-
-static void ecard_def_irq_disable(ecard_t *ec, int irqnr)
-{
-#ifdef HAS_EXPMASK
- if (irqnr < 4 && have_expmask) {
- have_expmask &= ~(1 << irqnr);
- __raw_writeb(have_expmask, EXPMASK_ENABLE);
- }
-#endif
-}
-
-static int ecard_def_irq_pending(ecard_t *ec)
-{
- return !ec->irqmask || readb(ec->irqaddr) & ec->irqmask;
-}
-
-static void ecard_def_fiq_enable(ecard_t *ec, int fiqnr)
-{
- panic("ecard_def_fiq_enable called - impossible");
-}
-
-static void ecard_def_fiq_disable(ecard_t *ec, int fiqnr)
-{
- panic("ecard_def_fiq_disable called - impossible");
-}
-
-static int ecard_def_fiq_pending(ecard_t *ec)
-{
- return !ec->fiqmask || readb(ec->fiqaddr) & ec->fiqmask;
-}
-
-static expansioncard_ops_t ecard_default_ops = {
- ecard_def_irq_enable,
- ecard_def_irq_disable,
- ecard_def_irq_pending,
- ecard_def_fiq_enable,
- ecard_def_fiq_disable,
- ecard_def_fiq_pending
-};
-
-/*
- * Enable and disable interrupts from expansion cards.
- * (interrupts are disabled for these functions).
- *
- * They are not meant to be called directly, but via enable/disable_irq.
- */
-static void ecard_irq_unmask(struct irq_data *d)
-{
- ecard_t *ec = slot_to_ecard(d->irq - 32);
-
- if (ec) {
- if (!ec->ops)
- ec->ops = &ecard_default_ops;
-
- if (ec->claimed && ec->ops->irqenable)
- ec->ops->irqenable(ec, d->irq);
- else
- printk(KERN_ERR "ecard: rejecting request to "
- "enable IRQs for %d\n", d->irq);
- }
-}
-
-static void ecard_irq_mask(struct irq_data *d)
-{
- ecard_t *ec = slot_to_ecard(d->irq - 32);
-
- if (ec) {
- if (!ec->ops)
- ec->ops = &ecard_default_ops;
-
- if (ec->ops && ec->ops->irqdisable)
- ec->ops->irqdisable(ec, d->irq);
- }
-}
-
-static struct irq_chip ecard_chip = {
- .name = "ECARD",
- .irq_ack = ecard_irq_mask,
- .irq_mask = ecard_irq_mask,
- .irq_unmask = ecard_irq_unmask,
-};
-
-void ecard_enablefiq(unsigned int fiqnr)
-{
- ecard_t *ec = slot_to_ecard(fiqnr);
-
- if (ec) {
- if (!ec->ops)
- ec->ops = &ecard_default_ops;
-
- if (ec->claimed && ec->ops->fiqenable)
- ec->ops->fiqenable(ec, fiqnr);
- else
- printk(KERN_ERR "ecard: rejecting request to "
- "enable FIQs for %d\n", fiqnr);
- }
-}
-
-void ecard_disablefiq(unsigned int fiqnr)
-{
- ecard_t *ec = slot_to_ecard(fiqnr);
-
- if (ec) {
- if (!ec->ops)
- ec->ops = &ecard_default_ops;
-
- if (ec->ops->fiqdisable)
- ec->ops->fiqdisable(ec, fiqnr);
- }
-}
-
-static void ecard_dump_irq_state(void)
-{
- ecard_t *ec;
-
- printk("Expansion card IRQ state:\n");
-
- for (ec = cards; ec; ec = ec->next) {
- if (ec->slot_no == 8)
- continue;
-
- printk(" %d: %sclaimed, ",
- ec->slot_no, ec->claimed ? "" : "not ");
-
- if (ec->ops && ec->ops->irqpending &&
- ec->ops != &ecard_default_ops)
- printk("irq %spending\n",
- ec->ops->irqpending(ec) ? "" : "not ");
- else
- printk("irqaddr %p, mask = %02X, status = %02X\n",
- ec->irqaddr, ec->irqmask, readb(ec->irqaddr));
- }
-}
-
-static void ecard_check_lockup(struct irq_desc *desc)
-{
- static unsigned long last;
- static int lockup;
-
- /*
- * If the timer interrupt has not run since the last million
- * unrecognised expansion card interrupts, then there is
- * something seriously wrong. Disable the expansion card
- * interrupts so at least we can continue.
- *
- * Maybe we ought to start a timer to re-enable them some time
- * later?
- */
- if (last == jiffies) {
- lockup += 1;
- if (lockup > 1000000) {
- printk(KERN_ERR "\nInterrupt lockup detected - "
- "disabling all expansion card interrupts\n");
-
- desc->irq_data.chip->irq_mask(&desc->irq_data);
- ecard_dump_irq_state();
- }
- } else
- lockup = 0;
-
- /*
- * If we did not recognise the source of this interrupt,
- * warn the user, but don't flood the user with these messages.
- */
- if (!last || time_after(jiffies, last + 5*HZ)) {
- last = jiffies;
- printk(KERN_WARNING "Unrecognised interrupt from backplane\n");
- ecard_dump_irq_state();
- }
-}
-
-static void
-ecard_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- ecard_t *ec;
- int called = 0;
-
- desc->irq_data.chip->irq_mask(&desc->irq_data);
- for (ec = cards; ec; ec = ec->next) {
- int pending;
-
- if (!ec->claimed || ec->irq == NO_IRQ || ec->slot_no == 8)
- continue;
-
- if (ec->ops && ec->ops->irqpending)
- pending = ec->ops->irqpending(ec);
- else
- pending = ecard_default_ops.irqpending(ec);
-
- if (pending) {
- generic_handle_irq(ec->irq);
- called ++;
- }
- }
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
-
- if (called == 0)
- ecard_check_lockup(desc);
-}
-
-#ifdef HAS_EXPMASK
-static unsigned char priority_masks[] =
-{
- 0xf0, 0xf1, 0xf3, 0xf7, 0xff, 0xff, 0xff, 0xff
-};
-
-static unsigned char first_set[] =
-{
- 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
- 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00
-};
-
-static void
-ecard_irqexp_handler(unsigned int irq, struct irq_desc *desc)
-{
- const unsigned int statusmask = 15;
- unsigned int status;
-
- status = __raw_readb(EXPMASK_STATUS) & statusmask;
- if (status) {
- unsigned int slot = first_set[status];
- ecard_t *ec = slot_to_ecard(slot);
-
- if (ec->claimed) {
- /*
- * this ugly code is so that we can operate a
- * prioritorising system:
- *
- * Card 0 highest priority
- * Card 1
- * Card 2
- * Card 3 lowest priority
- *
- * Serial cards should go in 0/1, ethernet/scsi in 2/3
- * otherwise you will lose serial data at high speeds!
- */
- generic_handle_irq(ec->irq);
- } else {
- printk(KERN_WARNING "card%d: interrupt from unclaimed "
- "card???\n", slot);
- have_expmask &= ~(1 << slot);
- __raw_writeb(have_expmask, EXPMASK_ENABLE);
- }
- } else
- printk(KERN_WARNING "Wild interrupt from backplane (masks)\n");
-}
-
-static int __init ecard_probeirqhw(void)
-{
- ecard_t *ec;
- int found;
-
- __raw_writeb(0x00, EXPMASK_ENABLE);
- __raw_writeb(0xff, EXPMASK_STATUS);
- found = (__raw_readb(EXPMASK_STATUS) & 15) == 0;
- __raw_writeb(0xff, EXPMASK_ENABLE);
-
- if (found) {
- printk(KERN_DEBUG "Expansion card interrupt "
- "management hardware found\n");
-
- /* for each card present, set a bit to '1' */
- have_expmask = 0x80000000;
-
- for (ec = cards; ec; ec = ec->next)
- have_expmask |= 1 << ec->slot_no;
-
- __raw_writeb(have_expmask, EXPMASK_ENABLE);
- }
-
- return found;
-}
-#else
-#define ecard_irqexp_handler NULL
-#define ecard_probeirqhw() (0)
-#endif
-
-static void __iomem *__ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
-{
- void __iomem *address = NULL;
- int slot = ec->slot_no;
-
- if (ec->slot_no == 8)
- return ECARD_MEMC8_BASE;
-
- ectcr &= ~(1 << slot);
-
- switch (type) {
- case ECARD_MEMC:
- if (slot < 4)
- address = ECARD_MEMC_BASE + (slot << 14);
- break;
-
- case ECARD_IOC:
- if (slot < 4)
- address = ECARD_IOC_BASE + (slot << 14);
- else
- address = ECARD_IOC4_BASE + ((slot - 4) << 14);
- if (address)
- address += speed << 19;
- break;
-
- case ECARD_EASI:
- address = ECARD_EASI_BASE + (slot << 24);
- if (speed == ECARD_FAST)
- ectcr |= 1 << slot;
- break;
-
- default:
- break;
- }
-
-#ifdef IOMD_ECTCR
- iomd_writeb(ectcr, IOMD_ECTCR);
-#endif
- return address;
-}
-
-static int ecard_prints(struct seq_file *m, ecard_t *ec)
-{
- seq_printf(m, " %d: %s ", ec->slot_no, ec->easi ? "EASI" : " ");
-
- if (ec->cid.id == 0) {
- struct in_chunk_dir incd;
-
- seq_printf(m, "[%04X:%04X] ",
- ec->cid.manufacturer, ec->cid.product);
-
- if (!ec->card_desc && ec->cid.cd &&
- ecard_readchunk(&incd, ec, 0xf5, 0)) {
- ec->card_desc = kmalloc(strlen(incd.d.string)+1, GFP_KERNEL);
-
- if (ec->card_desc)
- strcpy((char *)ec->card_desc, incd.d.string);
- }
-
- seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
- } else
- seq_printf(m, "Simple card %d\n", ec->cid.id);
-
- return 0;
-}
-
-static int ecard_devices_proc_show(struct seq_file *m, void *v)
-{
- ecard_t *ec = cards;
-
- while (ec) {
- ecard_prints(m, ec);
- ec = ec->next;
- }
- return 0;
-}
-
-static int ecard_devices_proc_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ecard_devices_proc_show, NULL);
-}
-
-static const struct file_operations bus_ecard_proc_fops = {
- .owner = THIS_MODULE,
- .open = ecard_devices_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static struct proc_dir_entry *proc_bus_ecard_dir = NULL;
-
-static void ecard_proc_init(void)
-{
- proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL);
- proc_create("devices", 0, proc_bus_ecard_dir, &bus_ecard_proc_fops);
-}
-
-#define ec_set_resource(ec,nr,st,sz) \
- do { \
- (ec)->resource[nr].name = dev_name(&ec->dev); \
- (ec)->resource[nr].start = st; \
- (ec)->resource[nr].end = (st) + (sz) - 1; \
- (ec)->resource[nr].flags = IORESOURCE_MEM; \
- } while (0)
-
-static void __init ecard_free_card(struct expansion_card *ec)
-{
- int i;
-
- for (i = 0; i < ECARD_NUM_RESOURCES; i++)
- if (ec->resource[i].flags)
- release_resource(&ec->resource[i]);
-
- kfree(ec);
-}
-
-static struct expansion_card *__init ecard_alloc_card(int type, int slot)
-{
- struct expansion_card *ec;
- unsigned long base;
- int i;
-
- ec = kzalloc(sizeof(ecard_t), GFP_KERNEL);
- if (!ec) {
- ec = ERR_PTR(-ENOMEM);
- goto nomem;
- }
-
- ec->slot_no = slot;
- ec->easi = type == ECARD_EASI;
- ec->irq = NO_IRQ;
- ec->fiq = NO_IRQ;
- ec->dma = NO_DMA;
- ec->ops = &ecard_default_ops;
-
- dev_set_name(&ec->dev, "ecard%d", slot);
- ec->dev.parent = NULL;
- ec->dev.bus = &ecard_bus_type;
- ec->dev.dma_mask = &ec->dma_mask;
- ec->dma_mask = (u64)0xffffffff;
- ec->dev.coherent_dma_mask = ec->dma_mask;
-
- if (slot < 4) {
- ec_set_resource(ec, ECARD_RES_MEMC,
- PODSLOT_MEMC_BASE + (slot << 14),
- PODSLOT_MEMC_SIZE);
- base = PODSLOT_IOC0_BASE + (slot << 14);
- } else
- base = PODSLOT_IOC4_BASE + ((slot - 4) << 14);
-
-#ifdef CONFIG_ARCH_RPC
- if (slot < 8) {
- ec_set_resource(ec, ECARD_RES_EASI,
- PODSLOT_EASI_BASE + (slot << 24),
- PODSLOT_EASI_SIZE);
- }
-
- if (slot == 8) {
- ec_set_resource(ec, ECARD_RES_MEMC, NETSLOT_BASE, NETSLOT_SIZE);
- } else
-#endif
-
- for (i = 0; i <= ECARD_RES_IOCSYNC - ECARD_RES_IOCSLOW; i++)
- ec_set_resource(ec, i + ECARD_RES_IOCSLOW,
- base + (i << 19), PODSLOT_IOC_SIZE);
-
- for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
- if (ec->resource[i].flags &&
- request_resource(&iomem_resource, &ec->resource[i])) {
- dev_err(&ec->dev, "resource(s) not available\n");
- ec->resource[i].end -= ec->resource[i].start;
- ec->resource[i].start = 0;
- ec->resource[i].flags = 0;
- }
- }
-
- nomem:
- return ec;
-}
-
-static ssize_t ecard_show_irq(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- return sprintf(buf, "%u\n", ec->irq);
-}
-
-static ssize_t ecard_show_dma(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- return sprintf(buf, "%u\n", ec->dma);
-}
-
-static ssize_t ecard_show_resources(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- char *str = buf;
- int i;
-
- for (i = 0; i < ECARD_NUM_RESOURCES; i++)
- str += sprintf(str, "%08x %08x %08lx\n",
- ec->resource[i].start,
- ec->resource[i].end,
- ec->resource[i].flags);
-
- return str - buf;
-}
-
-static ssize_t ecard_show_vendor(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- return sprintf(buf, "%u\n", ec->cid.manufacturer);
-}
-
-static ssize_t ecard_show_device(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- return sprintf(buf, "%u\n", ec->cid.product);
-}
-
-static ssize_t ecard_show_type(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- return sprintf(buf, "%s\n", ec->easi ? "EASI" : "IOC");
-}
-
-static struct device_attribute ecard_dev_attrs[] = {
- __ATTR(device, S_IRUGO, ecard_show_device, NULL),
- __ATTR(dma, S_IRUGO, ecard_show_dma, NULL),
- __ATTR(irq, S_IRUGO, ecard_show_irq, NULL),
- __ATTR(resource, S_IRUGO, ecard_show_resources, NULL),
- __ATTR(type, S_IRUGO, ecard_show_type, NULL),
- __ATTR(vendor, S_IRUGO, ecard_show_vendor, NULL),
- __ATTR_NULL,
-};
-
-
-int ecard_request_resources(struct expansion_card *ec)
-{
- int i, err = 0;
-
- for (i = 0; i < ECARD_NUM_RESOURCES; i++) {
- if (ecard_resource_end(ec, i) &&
- !request_mem_region(ecard_resource_start(ec, i),
- ecard_resource_len(ec, i),
- ec->dev.driver->name)) {
- err = -EBUSY;
- break;
- }
- }
-
- if (err) {
- while (i--)
- if (ecard_resource_end(ec, i))
- release_mem_region(ecard_resource_start(ec, i),
- ecard_resource_len(ec, i));
- }
- return err;
-}
-EXPORT_SYMBOL(ecard_request_resources);
-
-void ecard_release_resources(struct expansion_card *ec)
-{
- int i;
-
- for (i = 0; i < ECARD_NUM_RESOURCES; i++)
- if (ecard_resource_end(ec, i))
- release_mem_region(ecard_resource_start(ec, i),
- ecard_resource_len(ec, i));
-}
-EXPORT_SYMBOL(ecard_release_resources);
-
-void ecard_setirq(struct expansion_card *ec, const struct expansion_card_ops *ops, void *irq_data)
-{
- ec->irq_data = irq_data;
- barrier();
- ec->ops = ops;
-}
-EXPORT_SYMBOL(ecard_setirq);
-
-void __iomem *ecardm_iomap(struct expansion_card *ec, unsigned int res,
- unsigned long offset, unsigned long maxsize)
-{
- unsigned long start = ecard_resource_start(ec, res);
- unsigned long end = ecard_resource_end(ec, res);
-
- if (offset > (end - start))
- return NULL;
-
- start += offset;
- if (maxsize && end - start > maxsize)
- end = start + maxsize;
-
- return devm_ioremap(&ec->dev, start, end - start);
-}
-EXPORT_SYMBOL(ecardm_iomap);
-
-/*
- * Probe for an expansion card.
- *
- * If bit 1 of the first byte of the card is set, then the
- * card does not exist.
- */
-static int __init
-ecard_probe(int slot, card_type_t type)
-{
- ecard_t **ecp;
- ecard_t *ec;
- struct ex_ecid cid;
- void __iomem *addr;
- int i, rc;
-
- ec = ecard_alloc_card(type, slot);
- if (IS_ERR(ec)) {
- rc = PTR_ERR(ec);
- goto nomem;
- }
-
- rc = -ENODEV;
- if ((addr = __ecard_address(ec, type, ECARD_SYNC)) == NULL)
- goto nodev;
-
- cid.r_zero = 1;
- ecard_readbytes(&cid, ec, 0, 16, 0);
- if (cid.r_zero)
- goto nodev;
-
- ec->cid.id = cid.r_id;
- ec->cid.cd = cid.r_cd;
- ec->cid.is = cid.r_is;
- ec->cid.w = cid.r_w;
- ec->cid.manufacturer = ecard_getu16(cid.r_manu);
- ec->cid.product = ecard_getu16(cid.r_prod);
- ec->cid.country = cid.r_country;
- ec->cid.irqmask = cid.r_irqmask;
- ec->cid.irqoff = ecard_gets24(cid.r_irqoff);
- ec->cid.fiqmask = cid.r_fiqmask;
- ec->cid.fiqoff = ecard_gets24(cid.r_fiqoff);
- ec->fiqaddr =
- ec->irqaddr = addr;
-
- if (ec->cid.is) {
- ec->irqmask = ec->cid.irqmask;
- ec->irqaddr += ec->cid.irqoff;
- ec->fiqmask = ec->cid.fiqmask;
- ec->fiqaddr += ec->cid.fiqoff;
- } else {
- ec->irqmask = 1;
- ec->fiqmask = 4;
- }
-
- for (i = 0; i < ARRAY_SIZE(blacklist); i++)
- if (blacklist[i].manufacturer == ec->cid.manufacturer &&
- blacklist[i].product == ec->cid.product) {
- ec->card_desc = blacklist[i].type;
- break;
- }
-
- /*
- * hook the interrupt handlers
- */
- if (slot < 8) {
- ec->irq = 32 + slot;
- irq_set_chip_and_handler(ec->irq, &ecard_chip,
- handle_level_irq);
- set_irq_flags(ec->irq, IRQF_VALID);
- }
-
- if (slot == 8)
- ec->irq = 11;
-#ifdef CONFIG_ARCH_RPC
- /* On RiscPC, only first two slots have DMA capability */
- if (slot < 2)
- ec->dma = 2 + slot;
-#endif
-
- for (ecp = &cards; *ecp; ecp = &(*ecp)->next);
-
- *ecp = ec;
- slot_to_expcard[slot] = ec;
-
- device_register(&ec->dev);
-
- return 0;
-
- nodev:
- ecard_free_card(ec);
- nomem:
- return rc;
-}
-
-/*
- * Initialise the expansion card system.
- * Locate all hardware - interrupt management and
- * actual cards.
- */
-static int __init ecard_init(void)
-{
- struct task_struct *task;
- int slot, irqhw;
-
- task = kthread_run(ecard_task, NULL, "kecardd");
- if (IS_ERR(task)) {
- printk(KERN_ERR "Ecard: unable to create kernel thread: %ld\n",
- PTR_ERR(task));
- return PTR_ERR(task);
- }
-
- printk("Probing expansion cards\n");
-
- for (slot = 0; slot < 8; slot ++) {
- if (ecard_probe(slot, ECARD_EASI) == -ENODEV)
- ecard_probe(slot, ECARD_IOC);
- }
-
- ecard_probe(8, ECARD_IOC);
-
- irqhw = ecard_probeirqhw();
-
- irq_set_chained_handler(IRQ_EXPANSIONCARD,
- irqhw ? ecard_irqexp_handler : ecard_irq_handler);
-
- ecard_proc_init();
-
- return 0;
-}
-
-subsys_initcall(ecard_init);
-
-/*
- * ECARD "bus"
- */
-static const struct ecard_id *
-ecard_match_device(const struct ecard_id *ids, struct expansion_card *ec)
-{
- int i;
-
- for (i = 0; ids[i].manufacturer != 65535; i++)
- if (ec->cid.manufacturer == ids[i].manufacturer &&
- ec->cid.product == ids[i].product)
- return ids + i;
-
- return NULL;
-}
-
-static int ecard_drv_probe(struct device *dev)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- struct ecard_driver *drv = ECARD_DRV(dev->driver);
- const struct ecard_id *id;
- int ret;
-
- id = ecard_match_device(drv->id_table, ec);
-
- ec->claimed = 1;
- ret = drv->probe(ec, id);
- if (ret)
- ec->claimed = 0;
- return ret;
-}
-
-static int ecard_drv_remove(struct device *dev)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- struct ecard_driver *drv = ECARD_DRV(dev->driver);
-
- drv->remove(ec);
- ec->claimed = 0;
-
- /*
- * Restore the default operations. We ensure that the
- * ops are set before we change the data.
- */
- ec->ops = &ecard_default_ops;
- barrier();
- ec->irq_data = NULL;
-
- return 0;
-}
-
-/*
- * Before rebooting, we must make sure that the expansion card is in a
- * sensible state, so it can be re-detected. This means that the first
- * page of the ROM must be visible. We call the expansion cards reset
- * handler, if any.
- */
-static void ecard_drv_shutdown(struct device *dev)
-{
- struct expansion_card *ec = ECARD_DEV(dev);
- struct ecard_driver *drv = ECARD_DRV(dev->driver);
- struct ecard_request req;
-
- if (dev->driver) {
- if (drv->shutdown)
- drv->shutdown(ec);
- ec->claimed = 0;
- }
-
- /*
- * If this card has a loader, call the reset handler.
- */
- if (ec->loader) {
- req.fn = ecard_task_reset;
- req.ec = ec;
- ecard_call(&req);
- }
-}
-
-int ecard_register_driver(struct ecard_driver *drv)
-{
- drv->drv.bus = &ecard_bus_type;
-
- return driver_register(&drv->drv);
-}
-
-void ecard_remove_driver(struct ecard_driver *drv)
-{
- driver_unregister(&drv->drv);
-}
-
-static int ecard_match(struct device *_dev, struct device_driver *_drv)
-{
- struct expansion_card *ec = ECARD_DEV(_dev);
- struct ecard_driver *drv = ECARD_DRV(_drv);
- int ret;
-
- if (drv->id_table) {
- ret = ecard_match_device(drv->id_table, ec) != NULL;
- } else {
- ret = ec->cid.id == drv->id;
- }
-
- return ret;
-}
-
-struct bus_type ecard_bus_type = {
- .name = "ecard",
- .dev_attrs = ecard_dev_attrs,
- .match = ecard_match,
- .probe = ecard_drv_probe,
- .remove = ecard_drv_remove,
- .shutdown = ecard_drv_shutdown,
-};
-
-static int ecard_bus_init(void)
-{
- return bus_register(&ecard_bus_type);
-}
-
-postcore_initcall(ecard_bus_init);
-
-EXPORT_SYMBOL(ecard_readchunk);
-EXPORT_SYMBOL(ecard_register_driver);
-EXPORT_SYMBOL(ecard_remove_driver);
-EXPORT_SYMBOL(ecard_bus_type);
diff --git a/arch/arm/kernel/ecard.h b/arch/arm/kernel/ecard.h
deleted file mode 100644
index 4642d436be2a..000000000000
--- a/arch/arm/kernel/ecard.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*
- * ecard.h
- *
- * Copyright 2007 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/* Definitions internal to ecard.c - for it's use only!!
- *
- * External expansion card header as read from the card
- */
-struct ex_ecid {
- unsigned char r_irq:1;
- unsigned char r_zero:1;
- unsigned char r_fiq:1;
- unsigned char r_id:4;
- unsigned char r_a:1;
-
- unsigned char r_cd:1;
- unsigned char r_is:1;
- unsigned char r_w:2;
- unsigned char r_r1:4;
-
- unsigned char r_r2:8;
-
- unsigned char r_prod[2];
-
- unsigned char r_manu[2];
-
- unsigned char r_country;
-
- unsigned char r_fiqmask;
- unsigned char r_fiqoff[3];
-
- unsigned char r_irqmask;
- unsigned char r_irqoff[3];
-};
-
-/*
- * Chunk directory entry as read from the card
- */
-struct ex_chunk_dir {
- unsigned char r_id;
- unsigned char r_len[3];
- unsigned long r_start;
- union {
- char string[256];
- char data[1];
- } d;
-#define c_id(x) ((x)->r_id)
-#define c_len(x) ((x)->r_len[0]|((x)->r_len[1]<<8)|((x)->r_len[2]<<16))
-#define c_start(x) ((x)->r_start)
-};
-
-typedef enum ecard_type { /* Cards address space */
- ECARD_IOC,
- ECARD_MEMC,
- ECARD_EASI
-} card_type_t;
-
-typedef enum { /* Speed for ECARD_IOC space */
- ECARD_SLOW = 0,
- ECARD_MEDIUM = 1,
- ECARD_FAST = 2,
- ECARD_SYNC = 3
-} card_speed_t;
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index ddba41d1fcf1..d0d1e83150c9 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -3,6 +3,7 @@
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/elf.h>
+#include <asm/system_info.h>
int elf_check_arch(const struct elf32_hdr *x)
{
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3a456c6c7005..7fd3ad048da9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -15,16 +15,19 @@
* that causes it to save wrong values... Be aware!
*/
+#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
+#ifndef CONFIG_MULTI_IRQ_HANDLER
#include <mach/entry-macro.S>
+#endif
#include <asm/thread_notify.h>
#include <asm/unwind.h>
#include <asm/unistd.h>
#include <asm/tls.h>
-#include <asm/system.h>
+#include <asm/system_info.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
@@ -790,7 +793,7 @@ __kuser_cmpxchg64: @ 0xffff0f60
smp_dmb arm
rsbs r0, r3, #0 @ set returned val and C flag
ldmfd sp!, {r4, r5, r6, r7}
- bx lr
+ usr_ret lr
#elif !defined(CONFIG_SMP)
@@ -1101,7 +1104,6 @@ __stubs_start:
* get out of that mode without clobbering one register.
*/
vector_fiq:
- disable_fiq
subs pc, lr, #4
/*=============================================================================
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index b2a27b6b0046..54ee265dd819 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -10,9 +10,15 @@
#include <asm/unistd.h>
#include <asm/ftrace.h>
-#include <mach/entry-macro.S>
#include <asm/unwind.h>
+#ifdef CONFIG_NEED_RET_TO_USER
+#include <mach/entry-macro.S>
+#else
+ .macro arch_ret_to_user, tmp1, tmp2
+ .endm
+#endif
+
#include "entry-header.S"
@@ -87,7 +93,7 @@ ENTRY(ret_from_fork)
get_thread_info tsk
ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1
- tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ tst r1, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
beq ret_slow_syscall
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
@@ -149,6 +155,11 @@ ENDPROC(ret_from_fork)
#endif
#endif
+.macro mcount_adjust_addr rd, rn
+ bic \rd, \rn, #1 @ clear the Thumb bit if present
+ sub \rd, \rd, #MCOUNT_INSN_SIZE
+.endm
+
.macro __mcount suffix
mcount_enter
ldr r0, =ftrace_trace_function
@@ -173,8 +184,7 @@ ENDPROC(ret_from_fork)
mcount_exit
1: mcount_get_lr r1 @ lr of instrumented func
- mov r0, lr @ instrumented function
- sub r0, r0, #MCOUNT_INSN_SIZE
+ mcount_adjust_addr r0, lr @ instrumented function
adr lr, BSYM(2f)
mov pc, r2
2: mcount_exit
@@ -184,8 +194,7 @@ ENDPROC(ret_from_fork)
mcount_enter
mcount_get_lr r1 @ lr of instrumented func
- mov r0, lr @ instrumented function
- sub r0, r0, #MCOUNT_INSN_SIZE
+ mcount_adjust_addr r0, lr @ instrumented function
.globl ftrace_call\suffix
ftrace_call\suffix:
@@ -205,11 +214,11 @@ ftrace_graph_call\suffix:
#ifdef CONFIG_DYNAMIC_FTRACE
@ called from __ftrace_caller, saved in mcount_enter
ldr r1, [sp, #16] @ instrumented routine (func)
+ mcount_adjust_addr r1, r1
#else
@ called from __mcount, untouched in lr
- mov r1, lr @ instrumented routine (func)
+ mcount_adjust_addr r1, lr @ instrumented routine (func)
#endif
- sub r1, r1, #MCOUNT_INSN_SIZE
mov r2, fp @ frame pointer
bl prepare_ftrace_return
mcount_exit
@@ -443,7 +452,7 @@ ENTRY(vector_swi)
1:
#endif
- tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
+ tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls?
bne __sys_trace
cmp scno, #NR_syscalls @ check upper syscall limit
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 4c164ece5891..c32f8456aa09 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -42,9 +42,9 @@
#include <linux/seq_file.h>
#include <asm/cacheflush.h>
+#include <asm/cp15.h>
#include <asm/fiq.h>
#include <asm/irq.h>
-#include <asm/system.h>
#include <asm/traps.h>
static unsigned long no_fiq_insn;
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index c0062ad1e847..df0bf0c8cb79 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -16,10 +16,13 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+#include <asm/opcodes.h>
#include <asm/ftrace.h>
+#include "insn.h"
+
#ifdef CONFIG_THUMB2_KERNEL
-#define NOP 0xeb04f85d /* pop.w {lr} */
+#define NOP 0xf85deb04 /* pop.w {lr} */
#else
#define NOP 0xe8bd4000 /* pop {lr} */
#endif
@@ -60,76 +63,31 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
}
#endif
-#ifdef CONFIG_THUMB2_KERNEL
-static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
- bool link)
-{
- unsigned long s, j1, j2, i1, i2, imm10, imm11;
- unsigned long first, second;
- long offset;
-
- offset = (long)addr - (long)(pc + 4);
- if (offset < -16777216 || offset > 16777214) {
- WARN_ON_ONCE(1);
- return 0;
- }
-
- s = (offset >> 24) & 0x1;
- i1 = (offset >> 23) & 0x1;
- i2 = (offset >> 22) & 0x1;
- imm10 = (offset >> 12) & 0x3ff;
- imm11 = (offset >> 1) & 0x7ff;
-
- j1 = (!i1) ^ s;
- j2 = (!i2) ^ s;
-
- first = 0xf000 | (s << 10) | imm10;
- second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
- if (link)
- second |= 1 << 14;
-
- return (second << 16) | first;
-}
-#else
-static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
- bool link)
-{
- unsigned long opcode = 0xea000000;
- long offset;
-
- if (link)
- opcode |= 1 << 24;
-
- offset = (long)addr - (long)(pc + 8);
- if (unlikely(offset < -33554432 || offset > 33554428)) {
- /* Can't generate branches that far (from ARM ARM). Ftrace
- * doesn't generate branches outside of kernel text.
- */
- WARN_ON_ONCE(1);
- return 0;
- }
-
- offset = (offset >> 2) & 0x00ffffff;
-
- return opcode | offset;
-}
-#endif
-
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
- return ftrace_gen_branch(pc, addr, true);
+ return arm_gen_branch_link(pc, addr);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
- unsigned long new)
+ unsigned long new, bool validate)
{
unsigned long replaced;
- if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
- return -EFAULT;
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ old = __opcode_to_mem_thumb32(old);
+ new = __opcode_to_mem_thumb32(new);
+ } else {
+ old = __opcode_to_mem_arm(old);
+ new = __opcode_to_mem_arm(new);
+ }
- if (replaced != old)
- return -EINVAL;
+ if (validate) {
+ if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
return -EPERM;
@@ -141,23 +99,21 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
int ftrace_update_ftrace_func(ftrace_func_t func)
{
- unsigned long pc, old;
+ unsigned long pc;
unsigned long new;
int ret;
pc = (unsigned long)&ftrace_call;
- memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(pc, (unsigned long)func);
- ret = ftrace_modify_code(pc, old, new);
+ ret = ftrace_modify_code(pc, 0, new, false);
#ifdef CONFIG_OLD_MCOUNT
if (!ret) {
pc = (unsigned long)&ftrace_call_old;
- memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(pc, (unsigned long)func);
- ret = ftrace_modify_code(pc, old, new);
+ ret = ftrace_modify_code(pc, 0, new, false);
}
#endif
@@ -172,7 +128,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
old = ftrace_nop_replace(rec);
new = ftrace_call_replace(ip, adjust_address(rec, addr));
- return ftrace_modify_code(rec->ip, old, new);
+ return ftrace_modify_code(rec->ip, old, new, true);
}
int ftrace_make_nop(struct module *mod,
@@ -185,7 +141,7 @@ int ftrace_make_nop(struct module *mod,
old = ftrace_call_replace(ip, adjust_address(rec, addr));
new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new);
+ ret = ftrace_modify_code(ip, old, new, true);
#ifdef CONFIG_OLD_MCOUNT
if (ret == -EINVAL && addr == MCOUNT_ADDR) {
@@ -193,7 +149,7 @@ int ftrace_make_nop(struct module *mod,
old = ftrace_call_replace(ip, adjust_address(rec, addr));
new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new);
+ ret = ftrace_modify_code(ip, old, new, true);
}
#endif
@@ -249,12 +205,12 @@ static int __ftrace_modify_caller(unsigned long *callsite,
{
unsigned long caller_fn = (unsigned long) func;
unsigned long pc = (unsigned long) callsite;
- unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
+ unsigned long branch = arm_gen_branch(pc, caller_fn);
unsigned long nop = 0xe1a00000; /* mov r0, r0 */
unsigned long old = enable ? nop : branch;
unsigned long new = enable ? branch : nop;
- return ftrace_modify_code(pc, old, new);
+ return ftrace_modify_code(pc, old, new, true);
}
static int ftrace_modify_graph_caller(bool enable)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index d46f25968bec..278cfc144f44 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -17,8 +17,8 @@
#include <asm/assembler.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
+#include <asm/cp15.h>
#include <asm/thread_info.h>
-#include <asm/system.h>
/*
* Kernel startup entry point.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 14e277d2ff91..3bf0c7f8b043 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -15,12 +15,12 @@
#include <linux/init.h>
#include <asm/assembler.h>
+#include <asm/cp15.h>
#include <asm/domain.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/memory.h>
#include <asm/thread_info.h>
-#include <asm/system.h>
#include <asm/pgtable.h>
#ifdef CONFIG_DEBUG_LL
@@ -99,6 +99,14 @@ ENTRY(stext)
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_p @ yes, error 'p'
+#ifdef CONFIG_ARM_LPAE
+ mrc p15, 0, r3, c0, c1, 4 @ read ID_MMFR0
+ and r3, r3, #0xf @ extract VMSA support
+ cmp r3, #5 @ long-descriptor translation table format?
+ THUMB( it lo ) @ force fixup-able long branch encoding
+ blo __error_p @ only classic page table format
+#endif
+
#ifndef CONFIG_XIP_KERNEL
adr r3, 2f
ldmia r3, {r4, r8}
@@ -257,7 +265,7 @@ __create_page_tables:
str r6, [r3]
#ifdef CONFIG_DEBUG_LL
-#ifndef CONFIG_DEBUG_ICEDCC
+#if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING)
/*
* Map in IO space for serial debugging.
* This allows debug messages to be output
@@ -289,10 +297,10 @@ __create_page_tables:
cmp r0, r6
blo 1b
-#else /* CONFIG_DEBUG_ICEDCC */
- /* we don't need any serial debugging mappings for ICEDCC */
+#else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */
+ /* we don't need any serial debugging mappings */
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
-#endif /* !CONFIG_DEBUG_ICEDCC */
+#endif
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
/*
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index d6a95ef9131d..ba386bd94107 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -34,7 +34,6 @@
#include <asm/current.h>
#include <asm/hw_breakpoint.h>
#include <asm/kdebug.h>
-#include <asm/system.h>
#include <asm/traps.h>
/* Breakpoint currently in use for each BRP. */
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c
new file mode 100644
index 000000000000..b760340b7014
--- /dev/null
+++ b/arch/arm/kernel/insn.c
@@ -0,0 +1,62 @@
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <asm/opcodes.h>
+
+static unsigned long
+__arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link)
+{
+ unsigned long s, j1, j2, i1, i2, imm10, imm11;
+ unsigned long first, second;
+ long offset;
+
+ offset = (long)addr - (long)(pc + 4);
+ if (offset < -16777216 || offset > 16777214) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ s = (offset >> 24) & 0x1;
+ i1 = (offset >> 23) & 0x1;
+ i2 = (offset >> 22) & 0x1;
+ imm10 = (offset >> 12) & 0x3ff;
+ imm11 = (offset >> 1) & 0x7ff;
+
+ j1 = (!i1) ^ s;
+ j2 = (!i2) ^ s;
+
+ first = 0xf000 | (s << 10) | imm10;
+ second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
+ if (link)
+ second |= 1 << 14;
+
+ return __opcode_thumb32_compose(first, second);
+}
+
+static unsigned long
+__arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link)
+{
+ unsigned long opcode = 0xea000000;
+ long offset;
+
+ if (link)
+ opcode |= 1 << 24;
+
+ offset = (long)addr - (long)(pc + 8);
+ if (unlikely(offset < -33554432 || offset > 33554428)) {
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ offset = (offset >> 2) & 0x00ffffff;
+
+ return opcode | offset;
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link)
+{
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
+ return __arm_gen_branch_thumb2(pc, addr, link);
+ else
+ return __arm_gen_branch_arm(pc, addr, link);
+}
diff --git a/arch/arm/kernel/insn.h b/arch/arm/kernel/insn.h
new file mode 100644
index 000000000000..e96065da4dae
--- /dev/null
+++ b/arch/arm/kernel/insn.h
@@ -0,0 +1,29 @@
+#ifndef __ASM_ARM_INSN_H
+#define __ASM_ARM_INSN_H
+
+static inline unsigned long
+arm_gen_nop(void)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ return 0xf3af8000; /* nop.w */
+#else
+ return 0xe1a00000; /* mov r0, r0 */
+#endif
+}
+
+unsigned long
+__arm_gen_branch(unsigned long pc, unsigned long addr, bool link);
+
+static inline unsigned long
+arm_gen_branch(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, false);
+}
+
+static inline unsigned long
+arm_gen_branch_link(unsigned long pc, unsigned long addr)
+{
+ return __arm_gen_branch(pc, addr, true);
+}
+
+#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 3efd82cc95f0..71ccdbfed662 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -36,7 +36,6 @@
#include <linux/proc_fs.h>
#include <asm/exception.h>
-#include <asm/system.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
@@ -181,10 +180,7 @@ void migrate_irqs(void)
local_irq_save(flags);
for_each_irq_desc(i, desc) {
- bool affinity_broken = false;
-
- if (!desc)
- continue;
+ bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
new file mode 100644
index 000000000000..4ce4f789446d
--- /dev/null
+++ b/arch/arm/kernel/jump_label.c
@@ -0,0 +1,39 @@
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+
+#include "insn.h"
+#include "patch.h"
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ bool is_static)
+{
+ void *addr = (void *)entry->code;
+ unsigned int insn;
+
+ if (type == JUMP_LABEL_ENABLE)
+ insn = arm_gen_branch(entry->code, entry->target);
+ else
+ insn = arm_gen_nop();
+
+ if (is_static)
+ __patch_text(addr, insn);
+ else
+ patch_text(addr, insn);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, true);
+}
+
+#endif
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
index a5394fb4e4e0..18a76282970e 100644
--- a/arch/arm/kernel/kprobes-common.c
+++ b/arch/arm/kernel/kprobes-common.c
@@ -13,6 +13,7 @@
#include <linux/kernel.h>
#include <linux/kprobes.h>
+#include <asm/system_info.h>
#include "kprobes.h"
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 129c1163248b..4dd41fc9e235 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -29,6 +29,7 @@
#include <asm/cacheflush.h>
#include "kprobes.h"
+#include "patch.h"
#define MIN_STACK_SIZE(addr) \
min((unsigned long)MAX_STACK_SIZE, \
@@ -103,57 +104,33 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
return 0;
}
-#ifdef CONFIG_THUMB2_KERNEL
-
-/*
- * For a 32-bit Thumb breakpoint spanning two memory words we need to take
- * special precautions to insert the breakpoint atomically, especially on SMP
- * systems. This is achieved by calling this arming function using stop_machine.
- */
-static int __kprobes set_t32_breakpoint(void *addr)
-{
- ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16;
- ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff;
- flush_insns(addr, 2*sizeof(u16));
- return 0;
-}
-
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
- uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */
-
- if (!is_wide_instruction(p->opcode)) {
- *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
- flush_insns(addr, sizeof(u16));
- } else if (addr & 2) {
- /* A 32-bit instruction spanning two words needs special care */
- stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map);
+ unsigned int brkp;
+ void *addr;
+
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ /* Remove any Thumb flag */
+ addr = (void *)((uintptr_t)p->addr & ~1);
+
+ if (is_wide_instruction(p->opcode))
+ brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
+ else
+ brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
} else {
- /* Word aligned 32-bit instruction can be written atomically */
- u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
-#ifndef __ARMEB__ /* Swap halfwords for little-endian */
- bkp = (bkp >> 16) | (bkp << 16);
-#endif
- *(u32 *)addr = bkp;
- flush_insns(addr, sizeof(u32));
- }
-}
+ kprobe_opcode_t insn = p->opcode;
-#else /* !CONFIG_THUMB2_KERNEL */
+ addr = p->addr;
+ brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
-void __kprobes arch_arm_kprobe(struct kprobe *p)
-{
- kprobe_opcode_t insn = p->opcode;
- kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
- if (insn >= 0xe0000000)
- brkp |= 0xe0000000; /* Unconditional instruction */
- else
- brkp |= insn & 0xf0000000; /* Copy condition from insn */
- *p->addr = brkp;
- flush_insns(p->addr, sizeof(p->addr[0]));
-}
+ if (insn >= 0xe0000000)
+ brkp |= 0xe0000000; /* Unconditional instruction */
+ else
+ brkp |= insn & 0xf0000000; /* Copy condition from insn */
+ }
-#endif /* !CONFIG_THUMB2_KERNEL */
+ patch_text(addr, brkp);
+}
/*
* The actual disarming is done here on each CPU and synchronized using
@@ -166,31 +143,16 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
int __kprobes __arch_disarm_kprobe(void *p)
{
struct kprobe *kp = p;
-#ifdef CONFIG_THUMB2_KERNEL
- u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1);
- kprobe_opcode_t insn = kp->opcode;
- unsigned int len;
+ void *addr = (void *)((uintptr_t)kp->addr & ~1);
- if (is_wide_instruction(insn)) {
- ((u16 *)addr)[0] = insn>>16;
- ((u16 *)addr)[1] = insn;
- len = 2*sizeof(u16);
- } else {
- ((u16 *)addr)[0] = insn;
- len = sizeof(u16);
- }
- flush_insns(addr, len);
+ __patch_text(addr, kp->opcode);
-#else /* !CONFIG_THUMB2_KERNEL */
- *kp->addr = kp->opcode;
- flush_insns(kp->addr, sizeof(kp->addr[0]));
-#endif
return 0;
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
- stop_machine(__arch_disarm_kprobe, p, &cpu_online_map);
+ stop_machine(__arch_disarm_kprobe, p, cpu_online_mask);
}
void __kprobes arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 764bd456d84f..dfcdb9f7c126 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -7,12 +7,13 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
-#include <asm/system.h>
+#include <asm/system_misc.h>
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
@@ -53,6 +54,29 @@ void machine_crash_nonpanic_core(void *unused)
cpu_relax();
}
+static void machine_kexec_mask_interrupts(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_chip *chip;
+
+ chip = irq_desc_get_chip(desc);
+ if (!chip)
+ continue;
+
+ if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
+ chip->irq_eoi(&desc->irq_data);
+
+ if (chip->irq_mask)
+ chip->irq_mask(&desc->irq_data);
+
+ if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
+ chip->irq_disable(&desc->irq_data);
+ }
+}
+
void machine_crash_shutdown(struct pt_regs *regs)
{
unsigned long msecs;
@@ -70,6 +94,7 @@ void machine_crash_shutdown(struct pt_regs *regs)
printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n");
crash_save_cpu(regs, smp_processor_id());
+ machine_kexec_mask_interrupts();
printk(KERN_INFO "Loading crashdump kernel...\n");
}
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
new file mode 100644
index 000000000000..07314af47733
--- /dev/null
+++ b/arch/arm/kernel/patch.c
@@ -0,0 +1,75 @@
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/stop_machine.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <asm/opcodes.h>
+
+#include "patch.h"
+
+struct patch {
+ void *addr;
+ unsigned int insn;
+};
+
+void __kprobes __patch_text(void *addr, unsigned int insn)
+{
+ bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
+ int size;
+
+ if (thumb2 && __opcode_is_thumb16(insn)) {
+ *(u16 *)addr = __opcode_to_mem_thumb16(insn);
+ size = sizeof(u16);
+ } else if (thumb2 && ((uintptr_t)addr & 2)) {
+ u16 first = __opcode_thumb32_first(insn);
+ u16 second = __opcode_thumb32_second(insn);
+ u16 *addrh = addr;
+
+ addrh[0] = __opcode_to_mem_thumb16(first);
+ addrh[1] = __opcode_to_mem_thumb16(second);
+
+ size = sizeof(u32);
+ } else {
+ if (thumb2)
+ insn = __opcode_to_mem_thumb32(insn);
+ else
+ insn = __opcode_to_mem_arm(insn);
+
+ *(u32 *)addr = insn;
+ size = sizeof(u32);
+ }
+
+ flush_icache_range((uintptr_t)(addr),
+ (uintptr_t)(addr) + size);
+}
+
+static int __kprobes patch_text_stop_machine(void *data)
+{
+ struct patch *patch = data;
+
+ __patch_text(patch->addr, patch->insn);
+
+ return 0;
+}
+
+void __kprobes patch_text(void *addr, unsigned int insn)
+{
+ struct patch patch = {
+ .addr = addr,
+ .insn = insn,
+ };
+
+ if (cache_ops_need_broadcast()) {
+ stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
+ } else {
+ bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
+ && __opcode_is_thumb32(insn)
+ && ((uintptr_t)addr & 2);
+
+ if (straddles_word)
+ stop_machine(patch_text_stop_machine, &patch, NULL);
+ else
+ __patch_text(addr, insn);
+ }
+}
diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h
new file mode 100644
index 000000000000..b4731f2dac38
--- /dev/null
+++ b/arch/arm/kernel/patch.h
@@ -0,0 +1,7 @@
+#ifndef _ARM_KERNEL_PATCH_H
+#define _ARM_KERNEL_PATCH_H
+
+void patch_text(void *addr, unsigned int insn);
+void __patch_text(void *addr, unsigned int insn);
+
+#endif
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5bb91bf3d47f..186c8cb982c5 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
u64
armpmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
- int idx, int overflow)
+ int idx)
{
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
new_raw_count) != prev_raw_count)
goto again;
- new_raw_count &= armpmu->max_period;
- prev_raw_count &= armpmu->max_period;
-
- if (overflow)
- delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
- else
- delta = new_raw_count - prev_raw_count;
+ delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
if (hwc->idx < 0)
return;
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
}
static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
if (!(hwc->state & PERF_HES_STOPPED)) {
armpmu->disable(hwc, hwc->idx);
barrier(); /* why? */
- armpmu_event_update(event, hwc, hwc->idx, 0);
+ armpmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
hwc->config_base |= (unsigned long)mapping;
if (!hwc->sample_period) {
- hwc->sample_period = armpmu->max_period;
+ /*
+ * For non-sampling runs, limit the sample_period to half
+ * of the counter width. That way, the new counter value
+ * is far less likely to overtake the previous one unless
+ * you have some serious IRQ latency issues.
+ */
+ hwc->sample_period = armpmu->max_period >> 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
@@ -539,6 +539,10 @@ static int armpmu_event_init(struct perf_event *event)
int err = 0;
atomic_t *active_events = &armpmu->active_events;
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
if (armpmu->map_event(event) == -ENOENT)
return -ENOENT;
@@ -680,6 +684,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
}
/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+ unsigned long action, void *hcpu)
+{
+ if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+ return NOTIFY_DONE;
+
+ if (cpu_pmu && cpu_pmu->reset)
+ cpu_pmu->reset(NULL);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+ .notifier_call = pmu_cpu_notify,
+};
+
+/*
* CPU PMU identification and registration.
*/
static int __init
@@ -712,6 +738,9 @@ init_hw_perf_events(void)
case 0xC0F0: /* Cortex-A15 */
cpu_pmu = armv7_a15_pmu_init();
break;
+ case 0xC070: /* Cortex-A7 */
+ cpu_pmu = armv7_a7_pmu_init();
+ break;
}
/* Intel CPUs [xscale]. */
} else if (0x69 == implementor) {
@@ -730,6 +759,7 @@ init_hw_perf_events(void)
pr_info("enabled with %s PMU driver, %d counters available\n",
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
+ register_cpu_notifier(&pmu_cpu_notifier);
armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 533be9930ec2..b78af0cc6ef3 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
-static int counter_is_active(unsigned long pmcr, int idx)
-{
- unsigned long mask = 0;
- if (idx == ARMV6_CYCLE_COUNTER)
- mask = ARMV6_PMCR_CCOUNT_IEN;
- else if (idx == ARMV6_COUNTER0)
- mask = ARMV6_PMCR_COUNT0_IEN;
- else if (idx == ARMV6_COUNTER1)
- mask = ARMV6_PMCR_COUNT1_IEN;
-
- if (mask)
- return pmcr & mask;
-
- WARN_ONCE(1, "invalid counter number (%d)\n", idx);
- return 0;
-}
-
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!counter_is_active(pmcr, idx))
+ /* Ignore if we don't have an event. */
+ if (!event)
continue;
/*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 460bbbb6b885..00755d82e2f2 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
/*
@@ -579,6 +593,144 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+/*
+ * Cortex-A7 HW events mapping
+ */
+static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ /*
+ * The performance counters don't differentiate between read
+ * and write accesses/misses so this isn't strictly correct,
+ * but it's the best we can do. Writes and reads get
+ * combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
+ [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
};
/*
@@ -781,6 +933,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
counter = ARMV7_IDX_TO_COUNTER(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+ isb();
+
return idx;
}
@@ -927,6 +1084,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
@@ -935,7 +1096,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -1067,6 +1228,12 @@ static int armv7_a15_map_event(struct perf_event *event)
&armv7_a15_perf_cache_map, 0xFF);
}
+static int armv7_a7_map_event(struct perf_event *event)
+{
+ return map_cpu_event(event, &armv7_a7_perf_map,
+ &armv7_a7_perf_cache_map, 0xFF);
+}
+
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
@@ -1127,6 +1294,16 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
armv7pmu.set_event_filter = armv7pmu_set_event_filter;
return &armv7pmu;
}
+
+static struct arm_pmu *__init armv7_a7_pmu_init(void)
+{
+ armv7pmu.id = ARM_PERF_PMU_ID_CA7;
+ armv7pmu.name = "ARMv7 Cortex-A7";
+ armv7pmu.map_event = armv7_a7_map_event;
+ armv7pmu.num_events = armv7_read_num_pmnc_events();
+ armv7pmu.set_event_filter = armv7pmu_set_event_filter;
+ return &armv7pmu;
+}
#else
static struct arm_pmu *__init armv7_a8_pmu_init(void)
{
@@ -1147,4 +1324,9 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
{
return NULL;
}
+
+static struct arm_pmu *__init armv7_a7_pmu_init(void)
+{
+ return NULL;
+}
#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3b99d8269829..71a21e6712f5 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
+ if (!event)
+ continue;
+
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
- if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ if (!event)
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
continue;
hwc = &event->hw;
- armpmu_event_update(event, hwc, idx, 1);
+ armpmu_event_update(event, hwc, idx);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
- unsigned long flags, ien, evtsel;
+ unsigned long flags, ien, evtsel, of_flags;
struct pmu_hw_events *events = cpu_pmu->get_hw_events();
ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
+ of_flags = XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ of_flags = XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ of_flags = XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ of_flags = XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ of_flags = XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
raw_spin_lock_irqsave(&events->pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
+ xscale2pmu_write_overflow_flags(of_flags);
raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 971d65c253a9..2b7b017a20cd 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -35,7 +35,6 @@
#include <asm/cacheflush.h>
#include <asm/leds.h>
#include <asm/processor.h>
-#include <asm/system.h>
#include <asm/thread_notify.h>
#include <asm/stacktrace.h>
#include <asm/mach/time.h>
@@ -61,8 +60,6 @@ extern void setup_mm_for_reboot(void);
static volatile int hlt_counter;
-#include <mach/system.h>
-
void disable_hlt(void)
{
hlt_counter++;
@@ -181,13 +178,17 @@ void cpu_idle_wait(void)
EXPORT_SYMBOL_GPL(cpu_idle_wait);
/*
- * This is our default idle handler. We need to disable
- * interrupts here to ensure we don't miss a wakeup call.
+ * This is our default idle handler.
*/
+
+void (*arm_pm_idle)(void);
+
static void default_idle(void)
{
- if (!need_resched())
- arch_idle();
+ if (arm_pm_idle)
+ arm_pm_idle();
+ else
+ cpu_do_idle();
local_irq_enable();
}
@@ -215,6 +216,10 @@ void cpu_idle(void)
cpu_die();
#endif
+ /*
+ * We need to disable interrupts here
+ * to ensure we don't miss a wakeup call.
+ */
local_irq_disable();
#ifdef CONFIG_PL310_ERRATA_769419
wmb();
@@ -222,26 +227,23 @@ void cpu_idle(void)
if (hlt_counter) {
local_irq_enable();
cpu_relax();
- } else {
+ } else if (!need_resched()) {
stop_critical_timings();
if (cpuidle_idle_call())
pm_idle();
start_critical_timings();
/*
- * This will eventually be removed - pm_idle
- * functions should always return with IRQs
- * enabled.
+ * pm_idle functions must always
+ * return with IRQs enabled.
*/
WARN_ON(irqs_disabled());
+ } else
local_irq_enable();
- }
}
leds_event(led_idle_end);
rcu_idle_exit();
tick_nohz_idle_exit();
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
+ schedule_preempt_disabled();
}
}
@@ -526,22 +528,39 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
#ifdef CONFIG_MMU
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Let's declare a mapping
- * for it so it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers and the signal restart code. Insert it into the
+ * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
*/
+static struct vm_area_struct gate_vma;
+
+static int __init gate_vma_init(void)
+{
+ gate_vma.vm_start = 0xffff0000;
+ gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
+ gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
+ gate_vma.vm_flags = VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYEXEC;
+ return 0;
+}
+arch_initcall(gate_vma_init);
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+ return &gate_vma;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+ return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
+}
-int vectors_user_mapping(void)
+int in_gate_area_no_mm(unsigned long addr)
{
- struct mm_struct *mm = current->mm;
- return install_special_mapping(mm, 0xffff0000, PAGE_SIZE,
- VM_READ | VM_EXEC |
- VM_MAYREAD | VM_MAYEXEC |
- VM_ALWAYSDUMP | VM_RESERVED,
- NULL);
+ return in_gate_area(NULL, addr);
}
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL;
+ return (vma == &gate_vma) ? "[vectors]" : NULL;
}
#endif
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 483727ad6892..80abafb9bf33 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -23,9 +23,9 @@
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
+#include <linux/audit.h>
#include <asm/pgtable.h>
-#include <asm/system.h>
#include <asm/traps.h>
#define REG_PC 15
@@ -256,7 +256,7 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
{
unsigned long tmp;
- if (off & 3 || off >= sizeof(struct user))
+ if (off & 3)
return -EIO;
tmp = 0;
@@ -268,6 +268,8 @@ static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
tmp = tsk->mm->end_code;
else if (off < sizeof(struct pt_regs))
tmp = get_user_reg(tsk, off >> 2);
+ else if (off >= sizeof(struct user))
+ return -EIO;
return put_user(tmp, ret);
}
@@ -699,10 +701,13 @@ static int vfp_set(struct task_struct *target,
{
int ret;
struct thread_info *thread = task_thread_info(target);
- struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
+ struct vfp_hard_struct new_vfp;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
+ vfp_sync_hwstate(thread);
+ new_vfp = thread->vfpstate.hard;
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs,
user_fpregs_offset,
@@ -723,9 +728,8 @@ static int vfp_set(struct task_struct *target,
if (ret)
return ret;
- vfp_sync_hwstate(thread);
- thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread);
+ thread->vfpstate.hard = new_vfp;
return 0;
}
@@ -902,15 +906,16 @@ long arch_ptrace(struct task_struct *child, long request,
return ret;
}
+#ifdef __ARMEB__
+#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB
+#else
+#define AUDIT_ARCH_NR AUDIT_ARCH_ARM
+#endif
+
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return scno;
- if (!(current->ptrace & PT_PTRACED))
- return scno;
-
/*
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
@@ -918,6 +923,17 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
ip = regs->ARM_ip;
regs->ARM_ip = why;
+ if (!ip)
+ audit_syscall_exit(regs);
+ else
+ audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0,
+ regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return scno;
+ if (!(current->ptrace & PT_PTRACED))
+ return scno;
+
current_thread_info()->syscall = scno;
/* the 0x80 provides a way for the tracing parent to distinguish
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c
index 5416c7c12528..27d186abbc06 100644
--- a/arch/arm/kernel/sched_clock.c
+++ b/arch/arm/kernel/sched_clock.c
@@ -10,6 +10,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
+#include <linux/syscore_ops.h>
#include <linux/timer.h>
#include <asm/sched_clock.h>
@@ -164,3 +165,20 @@ void __init sched_clock_postinit(void)
sched_clock_poll(sched_clock_timer.data);
}
+
+static int sched_clock_suspend(void)
+{
+ sched_clock_poll(sched_clock_timer.data);
+ return 0;
+}
+
+static struct syscore_ops sched_clock_ops = {
+ .suspend = sched_clock_suspend,
+};
+
+static int __init sched_clock_syscore_init(void)
+{
+ register_syscore_ops(&sched_clock_ops);
+ return 0;
+}
+device_initcall(sched_clock_syscore_init);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 129fbd55bde8..b91411371ae1 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -21,7 +21,6 @@
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/of_fdt.h>
-#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
@@ -34,6 +33,7 @@
#include <linux/sort.h>
#include <asm/unified.h>
+#include <asm/cp15.h>
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/elf.h>
@@ -45,12 +45,13 @@
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
#include <asm/tlbflush.h>
-#include <asm/system.h>
#include <asm/prom.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
+#include <asm/system_info.h>
+#include <asm/system_misc.h>
#include <asm/traps.h>
#include <asm/unwind.h>
#include <asm/memblock.h>
@@ -160,7 +161,7 @@ static struct resource mem_res[] = {
.flags = IORESOURCE_MEM
},
{
- .name = "Kernel text",
+ .name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_MEM
@@ -427,6 +428,20 @@ void cpu_init(void)
: "r14");
}
+int __cpu_logical_map[NR_CPUS];
+
+void __init smp_setup_processor_id(void)
+{
+ int i;
+ u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
+
+ cpu_logical_map(0) = cpu;
+ for (i = 1; i < NR_CPUS; ++i)
+ cpu_logical_map(i) = i == cpu ? 0 : i;
+
+ printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
+}
+
static void __init setup_processor(void)
{
struct proc_info_list *list;
@@ -961,7 +976,6 @@ void __init setup_arch(char **cmdline_p)
conswitchp = &dummy_con;
#endif
#endif
- early_trap_init();
if (mdesc->init_early)
mdesc->init_early();
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 0340224cf73c..7cb532fc8aa4 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -66,12 +66,13 @@ const unsigned long syscall_restart_code[2] = {
*/
asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
{
- mask &= _BLOCKABLE;
- spin_lock_irq(&current->sighand->siglock);
+ sigset_t blocked;
+
current->saved_sigmask = current->blocked;
- siginitset(&current->blocked, mask);
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+
+ mask &= _BLOCKABLE;
+ siginitset(&blocked, mask);
+ set_current_blocked(&blocked);
current->state = TASK_INTERRUPTIBLE;
schedule();
@@ -227,6 +228,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
return -EINVAL;
+ vfp_flush_hwstate(thread);
+
/*
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
@@ -251,9 +254,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
__get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
__get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
- if (!err)
- vfp_flush_hwstate(thread);
-
return err ? -EFAULT : 0;
}
@@ -281,10 +281,7 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0) {
sigdelsetmask(&set, ~_BLOCKABLE);
- spin_lock_irq(&current->sighand->siglock);
- current->blocked = set;
- recalc_sigpending();
- spin_unlock_irq(&current->sighand->siglock);
+ set_current_blocked(&set);
}
__get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
@@ -637,13 +634,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
/*
* Block the signal if we were successful.
*/
- spin_lock_irq(&tsk->sighand->siglock);
- sigorsets(&tsk->blocked, &tsk->blocked,
- &ka->sa.sa_mask);
- if (!(ka->sa.sa_flags & SA_NODEFER))
- sigaddset(&tsk->blocked, sig);
- recalc_sigpending();
- spin_unlock_irq(&tsk->sighand->siglock);
+ block_sigmask(ka, sig);
return 0;
}
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 1f268bda4552..987dcf33415c 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -4,7 +4,6 @@
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
-#include <asm/system.h>
.text
/*
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 57db122a4f62..addbbe8028c2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,6 +58,8 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
+static DECLARE_COMPLETION(cpu_running);
+
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -98,20 +100,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
ret = boot_secondary(cpu, idle);
if (ret == 0) {
- unsigned long timeout;
-
/*
* CPU was successfully started, wait for it
* to come online or time out.
*/
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout)) {
- if (cpu_online(cpu))
- break;
-
- udelay(10);
- barrier();
- }
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
@@ -233,20 +227,6 @@ void __ref cpu_die(void)
}
#endif /* CONFIG_HOTPLUG_CPU */
-int __cpu_logical_map[NR_CPUS];
-
-void __init smp_setup_processor_id(void)
-{
- int i;
- u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
-
- cpu_logical_map(0) = cpu;
- for (i = 1; i < NR_CPUS; ++i)
- cpu_logical_map(i) = i == cpu ? 0 : i;
-
- printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
-}
-
/*
* Called by both boot and secondaries to move global data into
* per-processor storage.
@@ -260,6 +240,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
store_cpu_topology(cpuid);
}
+static void percpu_timer_setup(void);
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -300,22 +282,16 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
- * before we continue.
+ * before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
+ complete(&cpu_running);
/*
* Setup the percpu timer for this CPU.
*/
percpu_timer_setup();
- while (!cpu_active(cpu))
- cpu_relax();
-
- /*
- * cpu_active bit is set, so it's safe to enalbe interrupts
- * now.
- */
local_irq_enable();
local_fiq_enable();
@@ -373,7 +349,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* re-initialize the map in platform_smp_prepare_cpus() if
* present != possible (e.g. physical hotplug).
*/
- init_cpu_present(&cpu_possible_map);
+ init_cpu_present(cpu_possible_mask);
/*
* Initialise the SCU if there are more than one CPU
@@ -443,9 +419,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
static void ipi_timer(void)
{
struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
- irq_enter();
evt->event_handler(evt);
- irq_exit();
}
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -475,7 +449,20 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
clockevents_register_device(evt);
}
-void __cpuinit percpu_timer_setup(void)
+static struct local_timer_ops *lt_ops;
+
+#ifdef CONFIG_LOCAL_TIMERS
+int local_timer_register(struct local_timer_ops *ops)
+{
+ if (lt_ops)
+ return -EBUSY;
+
+ lt_ops = ops;
+ return 0;
+}
+#endif
+
+static void __cpuinit percpu_timer_setup(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
@@ -483,7 +470,7 @@ void __cpuinit percpu_timer_setup(void)
evt->cpumask = cpumask_of(cpu);
evt->broadcast = smp_timer_broadcast;
- if (local_timer_setup(evt))
+ if (!lt_ops || lt_ops->setup(evt))
broadcast_timer_setup(evt);
}
@@ -498,7 +485,8 @@ static void percpu_timer_stop(void)
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
- local_timer_stop(evt);
+ if (lt_ops)
+ lt_ops->stop(evt);
}
#endif
@@ -548,7 +536,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
switch (ipinr) {
case IPI_TIMER:
+ irq_enter();
ipi_timer();
+ irq_exit();
break;
case IPI_RESCHEDULE:
@@ -556,15 +546,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
break;
case IPI_CALL_FUNC:
+ irq_enter();
generic_smp_call_function_interrupt();
+ irq_exit();
break;
case IPI_CALL_FUNC_SINGLE:
+ irq_enter();
generic_smp_call_function_single_interrupt();
+ irq_exit();
break;
case IPI_CPU_STOP:
+ irq_enter();
ipi_cpu_stop(cpu);
+ irq_exit();
break;
default:
@@ -585,8 +581,9 @@ void smp_send_stop(void)
unsigned long timeout;
if (num_online_cpus() > 1) {
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
+ struct cpumask mask;
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 7dcb35285be7..02c5d2ce23bf 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -13,18 +13,6 @@
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
-static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
- const struct cpumask *mask)
-{
- preempt_disable();
-
- smp_call_function_many(mask, func, info, wait);
- if (cpumask_test_cpu(smp_processor_id(), mask))
- func(info);
-
- preempt_enable();
-}
-
/**********************************************************************/
/*
@@ -87,7 +75,7 @@ void flush_tlb_all(void)
void flush_tlb_mm(struct mm_struct *mm)
{
if (tlb_ops_need_broadcast())
- on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
+ on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
else
local_flush_tlb_mm(mm);
}
@@ -98,7 +86,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
struct tlb_args ta;
ta.ta_vma = vma;
ta.ta_start = uaddr;
- on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
+ &ta, 1);
} else
local_flush_tlb_page(vma, uaddr);
}
@@ -121,7 +110,8 @@ void flush_tlb_range(struct vm_area_struct *vma,
ta.ta_vma = vma;
ta.ta_start = start;
ta.ta_end = end;
- on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
+ &ta, 1);
} else
local_flush_tlb_range(vma, start, end);
}
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index c8e938553d47..fef42b21cecb 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -18,20 +18,23 @@
#include <linux/smp.h>
#include <linux/jiffies.h>
#include <linux/clockchips.h>
-#include <linux/irq.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include <asm/smp_twd.h>
#include <asm/localtimer.h>
#include <asm/hardware/gic.h>
/* set up by the platform code */
-void __iomem *twd_base;
+static void __iomem *twd_base;
static struct clk *twd_clk;
static unsigned long twd_timer_rate;
static struct clock_event_device __percpu **twd_evt;
+static int twd_ppi;
static void twd_set_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
@@ -77,7 +80,7 @@ static int twd_set_next_event(unsigned long evt,
* If a local timer interrupt has occurred, acknowledge and return 1.
* Otherwise, return 0.
*/
-int twd_timer_ack(void)
+static int twd_timer_ack(void)
{
if (__raw_readl(twd_base + TWD_TIMER_INTSTAT)) {
__raw_writel(1, twd_base + TWD_TIMER_INTSTAT);
@@ -87,7 +90,7 @@ int twd_timer_ack(void)
return 0;
}
-void twd_timer_stop(struct clock_event_device *clk)
+static void twd_timer_stop(struct clock_event_device *clk)
{
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
disable_percpu_irq(clk->irq);
@@ -129,7 +132,7 @@ static struct notifier_block twd_cpufreq_nb = {
static int twd_cpufreq_init(void)
{
- if (!IS_ERR(twd_clk))
+ if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return cpufreq_register_notifier(&twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -222,28 +225,10 @@ static struct clk *twd_get_clock(void)
/*
* Setup the local clock events for a CPU.
*/
-void __cpuinit twd_timer_setup(struct clock_event_device *clk)
+static int __cpuinit twd_timer_setup(struct clock_event_device *clk)
{
struct clock_event_device **this_cpu_clk;
- if (!twd_evt) {
- int err;
-
- twd_evt = alloc_percpu(struct clock_event_device *);
- if (!twd_evt) {
- pr_err("twd: can't allocate memory\n");
- return;
- }
-
- err = request_percpu_irq(clk->irq, twd_handler,
- "twd", twd_evt);
- if (err) {
- pr_err("twd: can't register interrupt %d (%d)\n",
- clk->irq, err);
- return;
- }
- }
-
if (!twd_clk)
twd_clk = twd_get_clock();
@@ -252,12 +237,15 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
else
twd_calibrate_rate();
+ __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+
clk->name = "local_timer";
clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_C3STOP;
clk->rating = 350;
clk->set_mode = twd_set_mode;
clk->set_next_event = twd_set_next_event;
+ clk->irq = twd_ppi;
this_cpu_clk = __this_cpu_ptr(twd_evt);
*this_cpu_clk = clk;
@@ -265,4 +253,95 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
clockevents_config_and_register(clk, twd_timer_rate,
0xf, 0xffffffff);
enable_percpu_irq(clk->irq, 0);
+
+ return 0;
+}
+
+static struct local_timer_ops twd_lt_ops __cpuinitdata = {
+ .setup = twd_timer_setup,
+ .stop = twd_timer_stop,
+};
+
+static int __init twd_local_timer_common_register(void)
+{
+ int err;
+
+ twd_evt = alloc_percpu(struct clock_event_device *);
+ if (!twd_evt) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
+ if (err) {
+ pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
+ goto out_free;
+ }
+
+ err = local_timer_register(&twd_lt_ops);
+ if (err)
+ goto out_irq;
+
+ return 0;
+
+out_irq:
+ free_percpu_irq(twd_ppi, twd_evt);
+out_free:
+ iounmap(twd_base);
+ twd_base = NULL;
+ free_percpu(twd_evt);
+
+ return err;
+}
+
+int __init twd_local_timer_register(struct twd_local_timer *tlt)
+{
+ if (twd_base || twd_evt)
+ return -EBUSY;
+
+ twd_ppi = tlt->res[1].start;
+
+ twd_base = ioremap(tlt->res[0].start, resource_size(&tlt->res[0]));
+ if (!twd_base)
+ return -ENOMEM;
+
+ return twd_local_timer_common_register();
+}
+
+#ifdef CONFIG_OF
+const static struct of_device_id twd_of_match[] __initconst = {
+ { .compatible = "arm,cortex-a9-twd-timer", },
+ { .compatible = "arm,cortex-a5-twd-timer", },
+ { .compatible = "arm,arm11mp-twd-timer", },
+ { },
+};
+
+void __init twd_local_timer_of_register(void)
+{
+ struct device_node *np;
+ int err;
+
+ np = of_find_matching_node(NULL, twd_of_match);
+ if (!np) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ twd_ppi = irq_of_parse_and_map(np, 0);
+ if (!twd_ppi) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ twd_base = of_iomap(np, 0);
+ if (!twd_base) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = twd_local_timer_common_register();
+
+out:
+ WARN(err, "twd_local_timer_of_register failed (%d)\n", err);
}
+#endif
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index 01ec453bb924..30ae6bb4a310 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -16,6 +16,7 @@
#include <asm/cputype.h>
#include <asm/mach/map.h>
#include <asm/memory.h>
+#include <asm/system_info.h>
#include "tcm.h"
static struct gen_pool *tcm_pool;
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index 9cb7aaca159f..aab899764053 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <asm/system_info.h>
#include <asm/thread_notify.h>
/*
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 8c57dd3680e9..fe31b22f18fd 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -25,8 +25,6 @@
#include <linux/timer.h>
#include <linux/irq.h>
-#include <linux/mc146818rtc.h>
-
#include <asm/leds.h>
#include <asm/thread_info.h>
#include <asm/sched_clock.h>
@@ -149,8 +147,6 @@ void __init time_init(void)
{
system_timer = machine_desc->timer;
system_timer->init();
-#ifdef CONFIG_HAVE_SCHED_CLOCK
sched_clock_postinit();
-#endif
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 99a572702509..778454750a6c 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -29,11 +29,11 @@
#include <linux/atomic.h>
#include <asm/cacheflush.h>
#include <asm/exception.h>
-#include <asm/system.h>
#include <asm/unistd.h>
#include <asm/traps.h>
#include <asm/unwind.h>
#include <asm/tls.h>
+#include <asm/system_misc.h>
#include "signal.h"
@@ -227,6 +227,11 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
#else
#define S_SMP ""
#endif
+#ifdef CONFIG_THUMB2_KERNEL
+#define S_ISA " THUMB2"
+#else
+#define S_ISA " ARM"
+#endif
static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
{
@@ -234,8 +239,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt
static int die_counter;
int ret;
- printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
- str, err, ++die_counter);
+ printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
+ S_ISA "\n", str, err, ++die_counter);
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
@@ -266,6 +271,7 @@ void die(const char *str, struct pt_regs *regs, int err)
{
struct thread_info *thread = current_thread_info();
int ret;
+ enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
oops_enter();
@@ -273,7 +279,9 @@ void die(const char *str, struct pt_regs *regs, int err)
console_verbose();
bust_spinlocks(1);
if (!user_mode(regs))
- report_bug(regs->ARM_pc, regs);
+ bug_type = report_bug(regs->ARM_pc, regs);
+ if (bug_type != BUG_TRAP_TYPE_NONE)
+ str = "Oops - BUG";
ret = __die(str, err, thread, regs);
if (regs && kexec_should_crash(thread->task))
@@ -781,18 +789,16 @@ static void __init kuser_get_tls_init(unsigned long vectors)
memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
}
-void __init early_trap_init(void)
+void __init early_trap_init(void *vectors_base)
{
-#if defined(CONFIG_CPU_USE_DOMAINS)
- unsigned long vectors = CONFIG_VECTORS_BASE;
-#else
- unsigned long vectors = (unsigned long)vectors_page;
-#endif
+ unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ vectors_page = vectors_base;
+
/*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index f76e75548670..43a31fb06318 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -4,11 +4,13 @@
*/
#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/page.h>
#define PROC_INFO \
+ . = ALIGN(4); \
VMLINUX_SYMBOL(__proc_info_begin) = .; \
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
@@ -181,7 +183,7 @@ SECTIONS
}
#endif
- PERCPU_SECTION(32)
+ PERCPU_SECTION(L1_CACHE_BYTES)
#ifdef CONFIG_XIP_KERNEL
__data_loc = ALIGN(4); /* location in binary */
@@ -212,13 +214,13 @@ SECTIONS
#endif
NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(32)
- READ_MOSTLY_DATA(32)
+ CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+ READ_MOSTLY_DATA(L1_CACHE_BYTES)
/*
* The exception fixup table (might need resorting at runtime)
*/
- . = ALIGN(32);
+ . = ALIGN(4);
__start___ex_table = .;
#ifdef CONFIG_MMU
*(__ex_table)