aboutsummaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c10
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c20
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c9
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c54
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c9
-rw-r--r--arch/blackfin/kernel/dma-mapping.c2
-rw-r--r--arch/blackfin/kernel/entry.S8
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S23
-rw-r--r--arch/blackfin/kernel/ftrace.c6
-rw-r--r--arch/blackfin/kernel/init_task.c2
-rw-r--r--arch/blackfin/kernel/kgdb.c10
-rw-r--r--arch/blackfin/kernel/nmi.c299
-rw-r--r--arch/blackfin/kernel/process.c7
-rw-r--r--arch/blackfin/kernel/ptrace.c360
-rw-r--r--arch/blackfin/kernel/setup.c34
-rw-r--r--arch/blackfin/kernel/signal.c24
-rw-r--r--arch/blackfin/kernel/time-ts.c205
-rw-r--r--arch/blackfin/kernel/traps.c32
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S70
20 files changed, 819 insertions, 366 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index a8ddbc8ed5a..346a421f156 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
+obj-$(CONFIG_NMI_WATCHDOG) += nmi.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 924c00286ba..26403d1c9e6 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -91,7 +91,7 @@ late_initcall(proc_dma_init);
*/
int request_dma(unsigned int channel, const char *device_id)
{
- pr_debug("request_dma() : BEGIN \n");
+ pr_debug("request_dma() : BEGIN\n");
if (device_id == NULL)
printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
@@ -107,7 +107,7 @@ int request_dma(unsigned int channel, const char *device_id)
#endif
if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
- pr_debug("DMA CHANNEL IN USE \n");
+ pr_debug("DMA CHANNEL IN USE\n");
return -EBUSY;
}
@@ -131,7 +131,7 @@ int request_dma(unsigned int channel, const char *device_id)
* you have to request DMA, before doing any operations on
* descriptor/channel
*/
- pr_debug("request_dma() : END \n");
+ pr_debug("request_dma() : END\n");
return 0;
}
EXPORT_SYMBOL(request_dma);
@@ -171,7 +171,7 @@ static void clear_dma_buffer(unsigned int channel)
void free_dma(unsigned int channel)
{
- pr_debug("freedma() : BEGIN \n");
+ pr_debug("freedma() : BEGIN\n");
BUG_ON(channel >= MAX_DMA_CHANNELS ||
!atomic_read(&dma_ch[channel].chan_status));
@@ -185,7 +185,7 @@ void free_dma(unsigned int channel)
/* Clear the DMA Variable in the Channel */
atomic_set(&dma_ch[channel].chan_status, 0);
- pr_debug("freedma() : END \n");
+ pr_debug("freedma() : END\n");
}
EXPORT_SYMBOL(free_dma);
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index a174596cc00..e35e20f00d9 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -1289,44 +1289,50 @@ __initcall(gpio_register_proc);
#endif
#ifdef CONFIG_GPIOLIB
-int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
+static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_direction_input(gpio);
}
-int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
+static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
{
return bfin_gpio_direction_output(gpio, level);
}
-int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
+static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_get_value(gpio);
}
-void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
+static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
{
return bfin_gpio_set_value(gpio, value);
}
-int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
+static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_request(gpio, chip->label);
}
-void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
+static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
{
return bfin_gpio_free(gpio);
}
+static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
+{
+ return gpio + GPIO_IRQ_BASE;
+}
+
static struct gpio_chip bfin_chip = {
- .label = "Blackfin-GPIOlib",
+ .label = "BFIN-GPIO",
.direction_input = bfin_gpiolib_direction_input,
.get = bfin_gpiolib_get_value,
.direction_output = bfin_gpiolib_direction_output,
.set = bfin_gpiolib_set_value,
.request = bfin_gpiolib_gpio_request,
.free = bfin_gpiolib_gpio_free,
+ .to_irq = bfin_gpiolib_gpio_to_irq,
.base = 0,
.ngpio = MAX_BLACKFIN_GPIOS,
};
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index 8d42b9e50df..30fd6417f06 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -64,6 +64,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
}
+#ifdef CONFIG_ROMKERNEL
+ /* Cover kernel XIP flash area */
+ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
+ dcplb_tbl[cpu][i_d].addr = addr;
+ dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD;
+ icplb_tbl[cpu][i_i].addr = addr;
+ icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD;
+#endif
+
/* Cover L1 memory. One 4M area for code and data each is enough. */
#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 930c01c0681..87b25b1b30e 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -31,6 +31,12 @@ int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
int nr_cplb_flush[NR_CPUS];
+#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
+#define MGR_ATTR __attribute__((l1_text))
+#else
+#define MGR_ATTR
+#endif
+
/*
* Given the contents of the status register, return the index of the
* CPLB that caused the fault.
@@ -59,7 +65,7 @@ static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
/*
* Find an ICPLB entry to be evicted and return its index.
*/
-static int evict_one_icplb(unsigned int cpu)
+MGR_ATTR static int evict_one_icplb(unsigned int cpu)
{
int i;
for (i = first_switched_icplb; i < MAX_CPLBS; i++)
@@ -74,7 +80,7 @@ static int evict_one_icplb(unsigned int cpu)
return i;
}
-static int evict_one_dcplb(unsigned int cpu)
+MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
{
int i;
for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
@@ -89,7 +95,7 @@ static int evict_one_dcplb(unsigned int cpu)
return i;
}
-static noinline int dcplb_miss(unsigned int cpu)
+MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
{
unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
int status = bfin_read_DCPLB_STATUS();
@@ -114,10 +120,15 @@ static noinline int dcplb_miss(unsigned int cpu)
d_data = L2_DMEMORY;
} else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
- addr &= ~(4 * 1024 * 1024 - 1);
- d_data &= ~PAGE_SIZE_4KB;
- d_data |= PAGE_SIZE_4MB;
- d_data |= CPLB_USER_RD | CPLB_USER_WR;
+ mask = current_rwx_mask[cpu];
+ if (mask) {
+ int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ if (mask[idx] & bit)
+ d_data |= CPLB_USER_RD;
+ }
} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
addr &= ~(1 * 1024 * 1024 - 1);
@@ -126,7 +137,9 @@ static noinline int dcplb_miss(unsigned int cpu)
} else
return CPLB_PROT_VIOL;
} else if (addr >= _ramend) {
- d_data |= CPLB_USER_RD | CPLB_USER_WR;
+ d_data |= CPLB_USER_RD | CPLB_USER_WR;
+ if (reserved_mem_dcache_on)
+ d_data |= CPLB_L1_CHBL;
} else {
mask = current_rwx_mask[cpu];
if (mask) {
@@ -156,7 +169,7 @@ static noinline int dcplb_miss(unsigned int cpu)
return 0;
}
-static noinline int icplb_miss(unsigned int cpu)
+MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
{
unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
int status = bfin_read_ICPLB_STATUS();
@@ -204,10 +217,19 @@ static noinline int icplb_miss(unsigned int cpu)
i_data = L2_IMEMORY;
} else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
- addr &= ~(4 * 1024 * 1024 - 1);
- i_data &= ~PAGE_SIZE_4KB;
- i_data |= PAGE_SIZE_4MB;
- i_data |= CPLB_USER_RD;
+ if (!(status & FAULT_USERSUPV)) {
+ unsigned long *mask = current_rwx_mask[cpu];
+
+ if (mask) {
+ int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
+ int idx = page >> 5;
+ int bit = 1 << (page & 31);
+
+ mask += 2 * page_mask_nelts;
+ if (mask[idx] & bit)
+ i_data |= CPLB_USER_RD;
+ }
+ }
} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & FAULT_USERSUPV)) {
addr &= ~(1 * 1024 * 1024 - 1);
@@ -217,6 +239,8 @@ static noinline int icplb_miss(unsigned int cpu)
return CPLB_PROT_VIOL;
} else if (addr >= _ramend) {
i_data |= CPLB_USER_RD;
+ if (reserved_mem_icache_on)
+ i_data |= CPLB_L1_CHBL;
} else {
/*
* Two cases to distinguish - a supervisor access must
@@ -251,7 +275,7 @@ static noinline int icplb_miss(unsigned int cpu)
return 0;
}
-static noinline int dcplb_protection_fault(unsigned int cpu)
+MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
{
int status = bfin_read_DCPLB_STATUS();
@@ -271,7 +295,7 @@ static noinline int dcplb_protection_fault(unsigned int cpu)
return CPLB_PROT_VIOL;
}
-int cplb_hdr(int seqstat, struct pt_regs *regs)
+MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
{
int cause = seqstat & 0x3f;
unsigned int cpu = raw_smp_processor_id();
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index 282a7919821..bfe75af4e8b 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -56,6 +56,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
}
+#ifdef CONFIG_ROMKERNEL
+ /* Cover kernel XIP flash area */
+ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
+ d_tbl[i_d].addr = addr;
+ d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
+ i_tbl[i_i].addr = addr;
+ i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
+#endif
+
/* Cover L1 memory. One 4M area for code and data each is enough. */
if (cpu == 0) {
if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index e937f323d82..04ddcfeb798 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent);
void __dma_sync(dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
- _dma_sync(addr, size, dir);
+ __dma_sync_inline(addr, size, dir);
}
EXPORT_SYMBOL(__dma_sync);
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index f27dc2292e1..686478f5f66 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -44,7 +44,7 @@ ENTRY(_ret_from_fork)
sti r4;
#endif /* CONFIG_IPIPE */
SP += -12;
- call _schedule_tail;
+ pseudo_long_call _schedule_tail, p5;
SP += 12;
r0 = [sp + PT_IPEND];
cc = bittst(r0,1);
@@ -79,7 +79,7 @@ ENTRY(_sys_vfork)
r0 += 24;
[--sp] = rets;
SP += -12;
- call _bfin_vfork;
+ pseudo_long_call _bfin_vfork, p2;
SP += 12;
rets = [sp++];
rts;
@@ -90,7 +90,7 @@ ENTRY(_sys_clone)
r0 += 24;
[--sp] = rets;
SP += -12;
- call _bfin_clone;
+ pseudo_long_call _bfin_clone, p2;
SP += 12;
rets = [sp++];
rts;
@@ -101,7 +101,7 @@ ENTRY(_sys_rt_sigreturn)
r0 += 24;
[--sp] = rets;
SP += -12;
- call _do_rt_sigreturn;
+ pseudo_long_call _do_rt_sigreturn, p2;
SP += 12;
rets = [sp++];
rts;
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
index 76dd4fbcd17..d66446b572c 100644
--- a/arch/blackfin/kernel/ftrace-entry.S
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -1,7 +1,7 @@
/*
* mcount and friends -- ftrace stuff
*
- * Copyright (C) 2009 Analog Devices Inc.
+ * Copyright (C) 2009-2010 Analog Devices Inc.
* Licensed under the GPL-2 or later.
*/
@@ -21,6 +21,15 @@
* function will be waiting there. mmmm pie.
*/
ENTRY(__mcount)
+#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ /* optional micro optimization: return if stopped */
+ p1.l = _function_trace_stop;
+ p1.h = _function_trace_stop;
+ r3 = [p1];
+ cc = r3 == 0;
+ if ! cc jump _ftrace_stub (bp);
+#endif
+
/* save third function arg early so we can do testing below */
[--sp] = r2;
@@ -106,9 +115,12 @@ ENTRY(_ftrace_graph_caller)
[--sp] = r1;
[--sp] = rets;
- /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */
- r0 = sp;
- r1 = rets;
+ /* prepare_ftrace_return(parent, self_addr, frame_pointer) */
+ r0 = sp; /* unsigned long *parent */
+ r1 = rets; /* unsigned long self_addr */
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
+ r2 = fp; /* unsigned long frame_pointer */
+#endif
r0 += 16; /* skip the 4 local regs on stack */
r1 += -MCOUNT_INSN_SIZE;
call _prepare_ftrace_return;
@@ -127,6 +139,9 @@ ENTRY(_return_to_handler)
[--sp] = r1;
/* get original return address */
+#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
+ r0 = fp; /* Blackfin is sane, so omit this */
+#endif
call _ftrace_return_to_handler;
rets = r0;
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
index f2c85ac6f2d..a61d948ea92 100644
--- a/arch/blackfin/kernel/ftrace.c
+++ b/arch/blackfin/kernel/ftrace.c
@@ -16,7 +16,8 @@
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
-void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
{
struct ftrace_graph_ent trace;
unsigned long return_hooker = (unsigned long)&return_to_handler;
@@ -24,7 +25,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
- if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, 0) == -EBUSY)
+ if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
+ frame_pointer) == -EBUSY)
return;
trace.func = self_addr;
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c
index 118c5b9deda..d3970e8acd1 100644
--- a/arch/blackfin/kernel/init_task.c
+++ b/arch/blackfin/kernel/init_task.c
@@ -28,5 +28,5 @@ EXPORT_SYMBOL(init_task);
* "init_task" linker map entry.
*/
union thread_union init_thread_union
- __attribute__ ((__section__(".init_task.data"))) = {
+ __init_task_data = {
INIT_THREAD_INFO(init_task)};
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index 34c7c3ed2c9..2c501ceb1e5 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -145,7 +145,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
#endif
}
-struct hw_breakpoint {
+static struct hw_breakpoint {
unsigned int occupied:1;
unsigned int skip:1;
unsigned int enabled:1;
@@ -155,7 +155,7 @@ struct hw_breakpoint {
unsigned int addr;
} breakinfo[HW_WATCHPOINT_NUM];
-int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
+static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
{
int breakno;
int bfin_type;
@@ -202,7 +202,7 @@ int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
return -ENOSPC;
}
-int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
+static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
{
int breakno;
int bfin_type;
@@ -230,7 +230,7 @@ int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
return 0;
}
-void bfin_remove_all_hw_break(void)
+static void bfin_remove_all_hw_break(void)
{
int breakno;
@@ -242,7 +242,7 @@ void bfin_remove_all_hw_break(void)
breakinfo[breakno].type = TYPE_DATA_WATCHPOINT;
}
-void bfin_correct_hw_break(void)
+static void bfin_correct_hw_break(void)
{
int breakno;
unsigned int wpiactl = 0;
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
new file mode 100644
index 00000000000..0b5f72f17fd
--- /dev/null
+++ b/arch/blackfin/kernel/nmi.c
@@ -0,0 +1,299 @@
+/*
+ * Blackfin nmi_watchdog Driver
+ *
+ * Originally based on bfin_wdt.c
+ * Copyright 2010-2010 Analog Devices Inc.
+ * Graff Yang <graf.yang@analog.com>
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/bitops.h>
+#include <linux/hardirq.h>
+#include <linux/sysdev.h>
+#include <linux/pm.h>
+#include <linux/nmi.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <asm/blackfin.h>
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/bfin_watchdog.h>
+
+#define DRV_NAME "nmi-wdt"
+
+#define NMI_WDT_TIMEOUT 5 /* 5 seconds */
+#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */
+static int nmi_wdt_cpu = 1;
+
+static unsigned int timeout = NMI_WDT_TIMEOUT;
+static int nmi_active;
+
+static unsigned short wdoga_ctl;
+static unsigned int wdoga_cnt;
+static struct corelock_slot saved_corelock;
+static atomic_t nmi_touched[NR_CPUS];
+static struct timer_list ntimer;
+
+enum {
+ COREA_ENTER_NMI = 0,
+ COREA_EXIT_NMI,
+ COREB_EXIT_NMI,
+
+ NMI_EVENT_NR,
+};
+static unsigned long nmi_event __attribute__ ((__section__(".l2.bss")));
+
+/* we are in nmi, non-atomic bit ops is safe */
+static inline void set_nmi_event(int event)
+{
+ __set_bit(event, &nmi_event);
+}
+
+static inline void wait_nmi_event(int event)
+{
+ while (!test_bit(event, &nmi_event))
+ barrier();
+ __clear_bit(event, &nmi_event);
+}
+
+static inline void send_corea_nmi(void)
+{
+ wdoga_ctl = bfin_read_WDOGA_CTL();
+ wdoga_cnt = bfin_read_WDOGA_CNT();
+
+ bfin_write_WDOGA_CTL(WDEN_DISABLE);
+ bfin_write_WDOGA_CNT(0);
+ bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI);
+}
+
+static inline void restore_corea_nmi(void)
+{
+ bfin_write_WDOGA_CTL(WDEN_DISABLE);
+ bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
+
+ bfin_write_WDOGA_CNT(wdoga_cnt);
+ bfin_write_WDOGA_CTL(wdoga_ctl);
+}
+
+static inline void save_corelock(void)
+{
+ saved_corelock = corelock;
+ corelock.lock = 0;
+}
+
+static inline void restore_corelock(void)
+{
+ corelock = saved_corelock;
+}
+
+
+static inline void nmi_wdt_keepalive(void)
+{
+ bfin_write_WDOGB_STAT(0);
+}
+
+static inline void nmi_wdt_stop(void)
+{
+ bfin_write_WDOGB_CTL(WDEN_DISABLE);
+}
+
+/* before calling this function, you must stop the WDT */
+static inline void nmi_wdt_clear(void)
+{
+ /* clear TRO bit, disable event generation */
+ bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
+}
+
+static inline void nmi_wdt_start(void)
+{
+ bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI);
+}
+
+static inline int nmi_wdt_running(void)
+{
+ return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE);
+}
+
+static inline int nmi_wdt_set_timeout(unsigned long t)
+{
+ u32 cnt, max_t, sclk;
+ int run;
+
+ sclk = get_sclk();
+ max_t = -1 / sclk;
+ cnt = t * sclk;
+ if (t > max_t) {
+ pr_warning("NMI: timeout value is too large\n");
+ return -EINVAL;
+ }
+
+ run = nmi_wdt_running();
+ nmi_wdt_stop();
+ bfin_write_WDOGB_CNT(cnt);
+ if (run)
+ nmi_wdt_start();
+
+ timeout = t;
+
+ return 0;
+}
+
+int check_nmi_wdt_touched(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpumask_t mask = cpu_online_map;
+
+ if (!atomic_read(&nmi_touched[this_cpu]))
+ return 0;
+
+ atomic_set(&nmi_touched[this_cpu], 0);
+
+ cpu_clear(this_cpu, mask);
+ for_each_cpu_mask(cpu, mask) {
+ invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
+ (unsigned long)(&nmi_touched[cpu]));
+ if (!atomic_read(&nmi_touched[cpu]))
+ return 0;
+ atomic_set(&nmi_touched[cpu], 0);
+ }
+
+ return 1;
+}
+
+static void nmi_wdt_timer(unsigned long data)
+{
+ if (check_nmi_wdt_touched())
+ nmi_wdt_keepalive();
+
+ mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT);
+}
+
+static int __init init_nmi_wdt(void)
+{
+ nmi_wdt_set_timeout(timeout);
+ nmi_wdt_start();
+ nmi_active = true;
+
+ init_timer(&ntimer);
+ ntimer.function = nmi_wdt_timer;
+ ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
+ add_timer(&ntimer);
+
+ pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout);
+ return 0;
+}
+device_initcall(init_nmi_wdt);
+
+void touch_nmi_watchdog(void)
+{
+ atomic_set(&nmi_touched[smp_processor_id()], 1);
+}
+
+/* Suspend/resume support */
+#ifdef CONFIG_PM
+static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state)
+{
+ nmi_wdt_stop();
+ return 0;
+}
+
+static int nmi_wdt_resume(struct sys_device *dev)
+{
+ if (nmi_active)
+ nmi_wdt_start();
+ return 0;
+}
+
+static struct sysdev_class nmi_sysclass = {
+ .name = DRV_NAME,
+ .resume = nmi_wdt_resume,
+ .suspend = nmi_wdt_suspend,
+};
+
+static struct sys_device device_nmi_wdt = {
+ .id = 0,
+ .cls = &nmi_sysclass,
+};
+
+static int __init init_nmi_wdt_sysfs(void)
+{
+ int error;
+
+ if (!nmi_active)
+ return 0;
+
+ error = sysdev_class_register(&nmi_sysclass);
+ if (!error)
+ error = sysdev_register(&device_nmi_wdt);
+ return error;
+}
+late_initcall(init_nmi_wdt_sysfs);
+
+#endif /* CONFIG_PM */
+
+
+asmlinkage notrace void do_nmi(struct pt_regs *fp)
+{
+ unsigned int cpu = smp_processor_id();
+ nmi_enter();
+
+ cpu_pda[cpu].__nmi_count += 1;
+
+ if (cpu == nmi_wdt_cpu) {
+ /* CoreB goes here first */
+
+ /* reload the WDOG_STAT */
+ nmi_wdt_keepalive();
+
+ /* clear nmi interrupt for CoreB */
+ nmi_wdt_stop();
+ nmi_wdt_clear();
+
+ /* trigger NMI interrupt of CoreA */
+ send_corea_nmi();
+
+ /* waiting CoreB to enter NMI */
+ wait_nmi_event(COREA_ENTER_NMI);
+
+ /* recover WDOGA's settings */
+ restore_corea_nmi();
+
+ save_corelock();
+
+ /* corelock is save/cleared, CoreA is dummping messages */
+
+ wait_nmi_event(COREA_EXIT_NMI);
+ } else {
+ /* OK, CoreA entered NMI */
+ set_nmi_event(COREA_ENTER_NMI);
+ }
+
+ pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu);
+ dump_bfin_process(fp);
+ dump_bfin_mem(fp);
+ show_regs(fp);
+ dump_bfin_trace_buffer();
+ show_stack(current, (unsigned long *)fp);
+
+ if (cpu == nmi_wdt_cpu) {
+ pr_emerg("This fault is not recoverable, sorry!\n");
+
+ /* CoreA dump finished, restore the corelock */
+ restore_corelock();
+
+ set_nmi_event(COREB_EXIT_NMI);
+ } else {
+ /* CoreB dump finished, notice the CoreA we are done */
+ set_nmi_event(COREA_EXIT_NMI);
+
+ /* synchronize with CoreA */
+ wait_nmi_event(COREB_EXIT_NMI);
+ }
+
+ nmi_exit();
+}
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index b56b0e485e0..29705cec91d 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -98,13 +98,6 @@ void cpu_idle(void)
}
}
-/* Fill in the fpu structure for a core dump. */
-
-int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
-{
- return 1;
-}
-
/*
* This gets run with P1 containing the
* function to call, and R1 containing
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 65567dc4b9f..43eb969405d 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -1,6 +1,6 @@
/*
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
- * these modifications are Copyright 2004-2009 Analog Devices Inc.
+ * these modifications are Copyright 2004-2010 Analog Devices Inc.
*
* Licensed under the GPL-2
*/
@@ -9,10 +9,13 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/elf.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
+#include <linux/regset.h>
#include <linux/signal.h>
+#include <linux/tracehook.h>
#include <linux/uaccess.h>
#include <asm/page.h>
@@ -25,90 +28,57 @@
#include <asm/cacheflush.h>
#include <asm/mem_map.h>
-#define TEXT_OFFSET 0
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
-/* determines which bits in the SYSCFG reg the user has access to. */
-/* 1 = access 0 = no access */
-#define SYSCFG_MASK 0x0007 /* SYSCFG reg */
-/* sets the trace bits. */
-#define TRACE_BITS 0x0001
-
-/* Find the stack offset for a register, relative to thread.esp0. */
-#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
-
-/*
- * Get the address of the live pt_regs for the specified task.
- * These are saved onto the top kernel stack when the process
- * is not running.
- *
- * Note: if a user thread is execve'd from kernel space, the
- * kernel stack will not be empty on entry to the kernel, so
- * ptracing these tasks will fail.
- */
-static inline struct pt_regs *get_user_regs(struct task_struct *task)
-{
- return (struct pt_regs *)
- ((unsigned long)task_stack_page(task) +
- (THREAD_SIZE - sizeof(struct pt_regs)));
-}
-
-/*
- * Get all user integer registers.
- */
-static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
-{
- struct pt_regs regs;
- memcpy(&regs, get_user_regs(tsk), sizeof(regs));
- regs.usp = tsk->thread.usp;
- return copy_to_user(uregs, &regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
-}
-
-/* Mapping from PT_xxx to the stack offset at which the register is
- * saved. Notice that usp has no stack-slot and needs to be treated
- * specially (see get_reg/put_reg below).
- */
-
/*
* Get contents of register REGNO in task TASK.
*/
-static inline long get_reg(struct task_struct *task, int regno)
+static inline long
+get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
{
- unsigned char *reg_ptr;
+ long tmp;
+ struct pt_regs *regs = task_pt_regs(task);
- struct pt_regs *regs =
- (struct pt_regs *)((unsigned long)task_stack_page(task) +
- (THREAD_SIZE - sizeof(struct pt_regs)));
- reg_ptr = (char *)regs;
+ if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ return -EIO;
switch (regno) {
+ case PT_TEXT_ADDR:
+ tmp = task->mm->start_code;
+ break;
+ case PT_TEXT_END_ADDR:
+ tmp = task->mm->end_code;
+ break;
+ case PT_DATA_ADDR:
+ tmp = task->mm->start_data;
+ break;
case PT_USP:
- return task->thread.usp;
+ tmp = task->thread.usp;
+ break;
default:
- if (regno <= 216)
- return *(long *)(reg_ptr + regno);
+ if (regno < sizeof(*regs)) {
+ void *reg_ptr = regs;
+ tmp = *(long *)(reg_ptr + regno);
+ } else
+ return -EIO;
}
- /* slight mystery ... never seems to come here but kernel misbehaves without this code! */
- printk(KERN_WARNING "Request to get for unknown register %d\n", regno);
- return 0;
+ return put_user(tmp, datap);
}
/*
* Write contents of register REGNO in task TASK.
*/
static inline int
-put_reg(struct task_struct *task, int regno, unsigned long data)
+put_reg(struct task_struct *task, long regno, unsigned long data)
{
- char *reg_ptr;
+ struct pt_regs *regs = task_pt_regs(task);
- struct pt_regs *regs =
- (struct pt_regs *)((unsigned long)task_stack_page(task) +
- (THREAD_SIZE - sizeof(struct pt_regs)));
- reg_ptr = (char *)regs;
+ if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
+ return -EIO;
switch (regno) {
case PT_PC:
@@ -125,10 +95,18 @@ put_reg(struct task_struct *task, int regno, unsigned long data)
regs->usp = data;
task->thread.usp = data;
break;
+ case PT_SYSCFG: /* don't let userspace screw with this */
+ if ((data & ~1) != 0x6)
+ pr_warning("ptrace: ignore syscfg write of %#lx\n", data);
+ break; /* regs->syscfg = data; break; */
default:
- if (regno <= 216)
- *(long *)(reg_ptr + regno) = data;
+ if (regno < sizeof(*regs)) {
+ void *reg_offset = regs;
+ *(long *)(reg_offset + regno) = data;
+ }
+ /* Ignore writes to pseudo registers */
}
+
return 0;
}
@@ -160,24 +138,98 @@ static inline int is_user_addr_valid(struct task_struct *child,
return -EIO;
}
-void ptrace_enable(struct task_struct *child)
+/*
+ * retrieve the contents of Blackfin userspace general registers
+ */
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
{
- unsigned long tmp;
- tmp = get_reg(child, PT_SYSCFG) | (TRACE_BITS);
- put_reg(child, PT_SYSCFG, tmp);
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* This sucks ... */
+ regs->usp = target->thread.usp;
+
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+ regs, 0, sizeof(*regs));
+ if (ret < 0)
+ return ret;
+
+ return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ sizeof(*regs), -1);
}
/*
- * Called by kernel/ptrace.c when detaching..
- *
- * Make sure the single step bit is not set.
+ * update the contents of the Blackfin userspace general registers
+ */
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ int ret;
+
+ /* Don't let people set SYSCFG (it's at the end of pt_regs) */
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ regs, 0, PT_SYSCFG);
+ if (ret < 0)
+ return ret;
+
+ /* This sucks ... */
+ target->thread.usp = regs->usp;
+ /* regs->retx = regs->pc; */
+
+ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ PT_SYSCFG, -1);
+}
+
+/*
+ * Define the register sets available on the Blackfin under Linux
*/
-void ptrace_disable(struct task_struct *child)
+enum bfin_regset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset bfin_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = sizeof(struct pt_regs) / sizeof(long),
+ .size = sizeof(long),
+ .align = sizeof(long),
+ .get = genregs_get,
+ .set = genregs_set,
+ },
+};
+
+static const struct user_regset_view user_bfin_native_view = {
+ .name = "Blackfin",
+ .e_machine = EM_BLACKFIN,
+ .regsets = bfin_regsets,
+ .n = ARRAY_SIZE(bfin_regsets),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_bfin_native_view;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+ struct pt_regs *regs = task_pt_regs(child);
+ regs->syscfg |= SYSCFG_SSSTEP;
+
+ set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
{
- unsigned long tmp;
- /* make sure the single step bit is not set. */
- tmp = get_reg(child, PT_SYSCFG) & ~TRACE_BITS;
- put_reg(child, PT_SYSCFG, tmp);
+ struct pt_regs *regs = task_pt_regs(child);
+ regs->syscfg &= ~SYSCFG_SSSTEP;
+
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
@@ -240,40 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- /* read the word at location addr in the USER area. */
- case PTRACE_PEEKUSR:
- {
- unsigned long tmp;
- ret = -EIO;
- tmp = 0;
- if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) {
- printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning "
- "0 - %x sizeof(pt_regs) is %lx\n",
- (int)addr, sizeof(struct pt_regs));
- break;
- }
- if (addr == sizeof(struct pt_regs)) {
- /* PT_TEXT_ADDR */
- tmp = child->mm->start_code + TEXT_OFFSET;
- } else if (addr == (sizeof(struct pt_regs) + 4)) {
- /* PT_TEXT_END_ADDR */
- tmp = child->mm->end_code;
- } else if (addr == (sizeof(struct pt_regs) + 8)) {
- /* PT_DATA_ADDR */
- tmp = child->mm->start_data;
-#ifdef CONFIG_BINFMT_ELF_FDPIC
- } else if (addr == (sizeof(struct pt_regs) + 12)) {
- goto case_PTRACE_GETFDPIC_EXEC;
- } else if (addr == (sizeof(struct pt_regs) + 16)) {
- goto case_PTRACE_GETFDPIC_INTERP;
-#endif
- } else {
- tmp = get_reg(child, addr);
- }
- ret = put_user(tmp, datap);
- break;
- }
-
#ifdef CONFIG_BINFMT_ELF_FDPIC
case PTRACE_GETFDPIC: {
unsigned long tmp = 0;
@@ -336,78 +354,36 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
break;
}
- case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
- ret = -EIO;
- if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) {
- printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n");
- break;
- }
-
- if (addr >= (sizeof(struct pt_regs))) {
- ret = 0;
- break;
- }
- if (addr == PT_SYSCFG) {
- data &= SYSCFG_MASK;
- data |= get_reg(child, PT_SYSCFG);
+ case PTRACE_PEEKUSR:
+ switch (addr) {
+#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
+ case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC;
+ case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP;
+#endif
+ default:
+ ret = get_reg(child, addr, datap);
}
- ret = put_reg(child, addr, data);
+ pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret);
break;
- case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
- case PTRACE_CONT: /* restart after signal. */
- pr_debug("ptrace: syscall/cont\n");
-
- ret = -EIO;
- if (!valid_signal(data))
- break;
- if (request == PTRACE_SYSCALL)
- set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- else
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- child->exit_code = data;
- ptrace_disable(child);
- pr_debug("ptrace: before wake_up_process\n");
- wake_up_process(child);
- ret = 0;
- break;
-
- /*
- * make the child exit. Best I can do is send it a sigkill.
- * perhaps it should be put in the status that it wants to
- * exit.
- */
- case PTRACE_KILL:
- ret = 0;
- if (child->exit_state == EXIT_ZOMBIE) /* already dead */
- break;
- child->exit_code = SIGKILL;
- ptrace_disable(child);
- wake_up_process(child);
- break;
-
- case PTRACE_SINGLESTEP: /* set the trap flag. */
- pr_debug("ptrace: single step\n");
- ret = -EIO;
- if (!valid_signal(data))
- break;
- clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- ptrace_enable(child);
- child->exit_code = data;
- wake_up_process(child);
- ret = 0;
+ case PTRACE_POKEUSR:
+ ret = put_reg(child, addr, data);
+ pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret);
break;
case PTRACE_GETREGS:
- /* Get all gp regs from the child. */
- ret = ptrace_getregs(child, datap);
- break;
+ pr_debug("ptrace: PTRACE_GETREGS\n");
+ return copy_regset_to_user(child, &user_bfin_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ (void __user *)data);
case PTRACE_SETREGS:
- printk(KERN_WARNING "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n");
- /* Set all gp regs in the child. */
- ret = 0;
- break;
+ pr_debug("ptrace: PTRACE_SETREGS\n");
+ return copy_regset_from_user(child, &user_bfin_native_view,
+ REGSET_GENERAL,
+ 0, sizeof(struct pt_regs),
+ (const void __user *)data);
default:
ret = ptrace_request(child, request, addr, data);
@@ -417,27 +393,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
return ret;
}
-asmlinkage void syscall_trace(void)
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
{
- if (!test_thread_flag(TIF_SYSCALL_TRACE))
- return;
-
- if (!(current->ptrace & PT_PTRACED))
- return;
-
- /* the 0x80 provides a way for the tracing parent to distinguish
- * between a syscall stop and SIGTRAP delivery
- */
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
-
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ int ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(regs);
+
+ return ret;
+}
+
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 95448ae9c43..8e2efceb364 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -220,6 +220,16 @@ void __init bfin_relocate_l1_mem(void)
memcpy(_stext_l2, _l2_lma, l2_len);
}
+#ifdef CONFIG_ROMKERNEL
+void __init bfin_relocate_xip_data(void)
+{
+ early_shadow_stamp();
+
+ memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
+ memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
+}
+#endif
+
/* add_memory_region to memmap */
static void __init add_memory_region(unsigned long long start,
unsigned long long size, int type)
@@ -504,7 +514,7 @@ static __init void memory_setup(void)
#endif
unsigned long max_mem;
- _rambase = (unsigned long)_stext;
+ _rambase = CONFIG_BOOT_LOAD;
_ramstart = (unsigned long)_end;
if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
@@ -597,7 +607,12 @@ static __init void memory_setup(void)
}
#ifdef CONFIG_MPU
+#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
+ page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
+ ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
+#else
page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
+#endif
page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
#endif
@@ -630,7 +645,7 @@ static __init void memory_setup(void)
__bss_start, __bss_stop,
_sdata, _edata,
(void *)&init_thread_union,
- (void *)((int)(&init_thread_union) + 0x2000),
+ (void *)((int)(&init_thread_union) + THREAD_SIZE),
__init_begin, __init_end,
(void *)_ramstart, (void *)memory_end
#ifdef CONFIG_MTD_UCLINUX
@@ -792,10 +807,17 @@ static inline int __init get_mem_size(void)
BUG();
}
+__attribute__((weak))
+void __init native_machine_early_platform_add_devices(void)
+{
+}
+
void __init setup_arch(char **cmdline_p)
{
unsigned long sclk, cclk;
+ native_machine_early_platform_add_devices();
+
enable_shadow_console();
/* Check to make sure we are running on the right processor */
@@ -1217,10 +1239,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
BFIN_DLINES);
#ifdef __ARCH_SYNC_CORE_DCACHE
- seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
+ seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]);
#endif
#ifdef __ARCH_SYNC_CORE_ICACHE
- seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
+ seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]);
#endif
if (cpu_num != num_possible_cpus() - 1)
@@ -1249,8 +1271,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
- ((int)memory_end - (int)_stext) >> 10,
- _stext,
+ ((int)memory_end - (int)_rambase) >> 10,
+ (void *)_rambase,
(void *)memory_end);
seq_printf(m, "\n");
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index e0fd63e9e38..d536f35d1f4 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2004-2009 Analog Devices Inc.
+ * Copyright 2004-2010 Analog Devices Inc.
*
* Licensed under the GPL-2 or later
*/
@@ -17,6 +17,7 @@
#include <asm/cacheflush.h>
#include <asm/ucontext.h>
#include <asm/fixed_code.h>
+#include <asm/syscall.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
@@ -50,6 +51,9 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p
unsigned long usp = 0;
int err = 0;
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x)
/* restore passed registers */
@@ -206,16 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
regs->r1 = (unsigned long)(&frame->info);
regs->r2 = (unsigned long)(&frame->uc);
- /*
- * Clear the trace flag when entering the signal handler, but
- * notify any tracer that was single-stepping it. The tracer
- * may want to single-step inside the handler too.
- */
- if (regs->syscfg & TRACE_BITS) {
- regs->syscfg &= ~TRACE_BITS;
- ptrace_notify(SIGTRAP);
- }
-
return 0;
give_sigsegv:
@@ -247,6 +241,11 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->r0 = regs->orig_r0;
regs->pc -= 2;
break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->p0 = __NR_restart_syscall;
+ regs->pc -= 2;
+ break;
}
}
@@ -315,6 +314,9 @@ asmlinkage void do_signal(struct pt_regs *regs)
* clear the TIF_RESTORE_SIGMASK flag */
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
+
+ tracehook_signal_handler(signr, &info, &ka, regs,
+ test_thread_flag(TIF_SINGLESTEP));
}
return;
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 17c38c5b5b2..cb7a01d4f00 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -21,6 +21,7 @@
#include <asm/blackfin.h>
#include <asm/time.h>
#include <asm/gptimers.h>
+#include <asm/nmi.h>
/* Accelerators for sched_clock()
* convert from cycles(64bits) => nanoseconds (64bits)
@@ -50,7 +51,11 @@
static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
{
+#ifdef CONFIG_CPU_FREQ
return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
+#else
+ return get_cycles();
+#endif
}
static struct clocksource bfin_cs_cycles = {
@@ -132,7 +137,6 @@ static int __init bfin_cs_gptimer0_init(void)
# define bfin_cs_gptimer0_init()
#endif
-
#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
/* prefer to use cycles since it has higher rating */
notrace unsigned long long sched_clock(void)
@@ -145,47 +149,8 @@ notrace unsigned long long sched_clock(void)
}
#endif
-#ifdef CONFIG_CORE_TIMER_IRQ_L1
-__attribute__((l1_text))
-#endif
-irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-static int bfin_timer_set_next_event(unsigned long, \
- struct clock_event_device *);
-
-static void bfin_timer_set_mode(enum clock_event_mode, \
- struct clock_event_device *);
-
-static struct clock_event_device clockevent_bfin = {
-#if defined(CONFIG_TICKSOURCE_GPTMR0)
- .name = "bfin_gptimer0",
- .rating = 300,
- .irq = IRQ_TIMER0,
-#else
- .name = "bfin_core_timer",
- .rating = 350,
- .irq = IRQ_CORETMR,
-#endif
- .shift = 32,
- .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = bfin_timer_set_next_event,
- .set_mode = bfin_timer_set_mode,
-};
-
-static struct irqaction bfin_timer_irq = {
-#if defined(CONFIG_TICKSOURCE_GPTMR0)
- .name = "Blackfin GPTimer0",
-#else
- .name = "Blackfin CoreTimer",
-#endif
- .flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_PERCPU,
- .handler = timer_interrupt,
- .dev_id = &clockevent_bfin,
-};
-
#if defined(CONFIG_TICKSOURCE_GPTMR0)
-static int bfin_timer_set_next_event(unsigned long cycles,
+static int bfin_gptmr0_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
disable_gptimers(TIMER0bit);
@@ -196,7 +161,7 @@ static int bfin_timer_set_next_event(unsigned long cycles,
return 0;
}
-static void bfin_timer_set_mode(enum clock_event_mode mode,
+static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
@@ -224,25 +189,65 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
}
}
-static void bfin_timer_ack(void)
+static void bfin_gptmr0_ack(void)
{
set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
}
-static void __init bfin_timer_init(void)
+static void __init bfin_gptmr0_init(void)
{
disable_gptimers(TIMER0bit);
}
-static unsigned long __init bfin_clockevent_check(void)
+#ifdef CONFIG_CORE_TIMER_IRQ_L1
+__attribute__((l1_text))
+#endif
+irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
{
- setup_irq(IRQ_TIMER0, &bfin_timer_irq);
- return get_sclk();
+ struct clock_event_device *evt = dev_id;
+ smp_mb();
+ evt->event_handler(evt);
+ bfin_gptmr0_ack();
+ return IRQ_HANDLED;
}
-#else /* CONFIG_TICKSOURCE_CORETMR */
+static struct irqaction gptmr0_irq = {
+ .name = "Blackfin GPTimer0",
+ .flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_PERCPU,
+ .handler = bfin_gptmr0_interrupt,
+};
-static int bfin_timer_set_next_event(unsigned long cycles,
+static struct clock_event_device clockevent_gptmr0 = {
+ .name = "bfin_gptimer0",
+ .rating = 300,
+ .irq = IRQ_TIMER0,
+ .shift = 32,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_next_event = bfin_gptmr0_set_next_event,
+ .set_mode = bfin_gptmr0_set_mode,
+};
+
+static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
+{
+ unsigned long clock_tick;
+
+ clock_tick = get_sclk();
+ evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(-1, evt);
+ evt->min_delta_ns = clockevent_delta2ns(100, evt);
+
+ evt->cpumask = cpumask_of(0);
+
+ clockevents_register_device(evt);
+}
+#endif /* CONFIG_TICKSOURCE_GPTMR0 */
+
+#if defined(CONFIG_TICKSOURCE_CORETMR)
+/* per-cpu local core timer */
+static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
+
+static int bfin_coretmr_set_next_event(unsigned long cycles,
struct clock_event_device *evt)
{
bfin_write_TCNTL(TMPWR);
@@ -253,7 +258,7 @@ static int bfin_timer_set_next_event(unsigned long cycles,
return 0;
}
-static void bfin_timer_set_mode(enum clock_event_mode mode,
+static void bfin_coretmr_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
@@ -285,19 +290,13 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
}
}
-static void bfin_timer_ack(void)
-{
-}
-
-static void __init bfin_timer_init(void)
+void bfin_coretmr_init(void)
{
/* power up the timer, but don't enable it just yet */
bfin_write_TCNTL(TMPWR);
CSYNC();
- /*
- * the TSCALE prescaler counter.
- */
+ /* the TSCALE prescaler counter. */
bfin_write_TSCALE(TIME_SCALE - 1);
bfin_write_TPERIOD(0);
bfin_write_TCOUNT(0);
@@ -305,48 +304,54 @@ static void __init bfin_timer_init(void)
CSYNC();
}
-static unsigned long __init bfin_clockevent_check(void)
-{
- setup_irq(IRQ_CORETMR, &bfin_timer_irq);
- return get_cclk() / TIME_SCALE;
-}
-
-void __init setup_core_timer(void)
+#ifdef CONFIG_CORE_TIMER_IRQ_L1
+__attribute__((l1_text))
+#endif
+irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
{
- bfin_timer_init();
- bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL);
-}
-#endif /* CONFIG_TICKSOURCE_GPTMR0 */
+ int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
-/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
- */
-irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *evt = dev_id;
smp_mb();
evt->event_handler(evt);
- bfin_timer_ack();
- return IRQ_HANDLED;
-}
-
-static int __init bfin_clockevent_init(void)
-{
- unsigned long timer_clk;
- timer_clk = bfin_clockevent_check();
+ touch_nmi_watchdog();
- bfin_timer_init();
+ return IRQ_HANDLED;
+}
- clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
- clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin);
- clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin);
- clockevent_bfin.cpumask = cpumask_of(0);
- clockevents_register_device(&clockevent_bfin);
+static struct irqaction coretmr_irq = {
+ .name = "Blackfin CoreTimer",
+ .flags = IRQF_DISABLED | IRQF_TIMER | \
+ IRQF_IRQPOLL | IRQF_PERCPU,
+ .handler = bfin_coretmr_interrupt,
+};
- return 0;
+void bfin_coretmr_clockevent_init(void)
+{
+ unsigned long clock_tick;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
+
+ evt->name = "bfin_core_timer";
+ evt->rating = 350;
+ evt->irq = -1;
+ evt->shift = 32;
+ evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+ evt->set_next_event = bfin_coretmr_set_next_event;
+ evt->set_mode = bfin_coretmr_set_mode;
+
+ clock_tick = get_cclk() / TIME_SCALE;
+ evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
+ evt->max_delta_ns = clockevent_delta2ns(-1, evt);
+ evt->min_delta_ns = clockevent_delta2ns(100, evt);
+
+ evt->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(evt);
}
+#endif /* CONFIG_TICKSOURCE_CORETMR */
+
void __init time_init(void)
{
@@ -370,5 +375,21 @@ void __init time_init(void)
bfin_cs_cycles_init();
bfin_cs_gptimer0_init();
- bfin_clockevent_init();
+
+#if defined(CONFIG_TICKSOURCE_CORETMR)
+ bfin_coretmr_init();
+ setup_irq(IRQ_CORETMR, &coretmr_irq);
+ bfin_coretmr_clockevent_init();
+#endif
+
+#if defined(CONFIG_TICKSOURCE_GPTMR0)
+ bfin_gptmr0_init();
+ setup_irq(IRQ_TIMER0, &gptmr0_irq);
+ gptmr0_irq.dev_id = &clockevent_gptmr0;
+ bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
+#endif
+
+#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
+# error at least one clock event device is required
+#endif
}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d3cbcd6bd98..ba70c4bc269 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -138,6 +138,12 @@ static void decode_address(char *buf, unsigned long address)
if (!mm)
continue;
+ if (!down_read_trylock(&mm->mmap_sem)) {
+ if (!in_atomic)
+ mmput(mm);
+ continue;
+ }
+
for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
struct vm_area_struct *vma;
@@ -177,6 +183,7 @@ static void decode_address(char *buf, unsigned long address)
sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
name, vma->vm_start, vma->vm_end);
+ up_read(&mm->mmap_sem);
if (!in_atomic)
mmput(mm);
@@ -186,11 +193,16 @@ static void decode_address(char *buf, unsigned long address)
goto done;
}
}
+
+ up_read(&mm->mmap_sem);
if (!in_atomic)
mmput(mm);
}
- /* we were unable to find this address anywhere */
+ /*
+ * we were unable to find this address anywhere,
+ * or some MMs were skipped because they were in use.
+ */
sprintf(buf, "/* kernel dynamic memory */");
done:
@@ -248,9 +260,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
int j;
#endif
-#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
unsigned int cpu = raw_smp_processor_id();
-#endif
const char *strerror = NULL;
int sig = 0;
siginfo_t info;
@@ -639,7 +649,17 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
{
info.si_signo = sig;
info.si_errno = 0;
- info.si_addr = (void __user *)fp->pc;
+ switch (trapnr) {
+ case VEC_CPLB_VL:
+ case VEC_MISALI_D:
+ case VEC_CPLB_M:
+ case VEC_CPLB_MHIT:
+ info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr;
+ break;
+ default:
+ info.si_addr = (void __user *)fp->pc;
+ break;
+ }
force_sig_info(sig, &info, current);
}
@@ -712,7 +732,7 @@ static void decode_instruction(unsigned short *address)
verbose_printk("RTE");
else if (opcode == 0x0025)
verbose_printk("EMUEXCPT");
- else if (opcode == 0x0040 && opcode <= 0x0047)
+ else if (opcode >= 0x0040 && opcode <= 0x0047)
verbose_printk("STI R%i", opcode & 7);
else if (opcode >= 0x0050 && opcode <= 0x0057)
verbose_printk("JUMP (P%i)", opcode & 7);
@@ -1096,7 +1116,7 @@ void dump_bfin_mem(struct pt_regs *fp)
/* And the last RETI points to the current userspace context */
if ((fp + 1)->pc >= current->mm->start_code &&
(fp + 1)->pc <= current->mm->end_code) {
- verbose_printk(KERN_NOTICE "It might be better to look around here : \n");
+ verbose_printk(KERN_NOTICE "It might be better to look around here :\n");
verbose_printk(KERN_NOTICE "-------------------------------------------\n");
show_regs(fp + 1);
verbose_printk(KERN_NOTICE "-------------------------------------------\n");
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 66799e763dc..984c7817239 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -15,7 +15,12 @@ _jiffies = _jiffies_64;
SECTIONS
{
+#ifdef CONFIG_RAMKERNEL
. = CONFIG_BOOT_LOAD;
+#else
+ . = CONFIG_ROM_BASE;
+#endif
+
/* Neither the text, ro_data or bss section need to be aligned
* So pack them back to back
*/
@@ -31,6 +36,12 @@ SECTIONS
LOCK_TEXT
IRQENTRY_TEXT
KPROBES_TEXT
+#ifdef CONFIG_ROMKERNEL
+ __sinittext = .;
+ INIT_TEXT
+ __einittext = .;
+ EXIT_TEXT
+#endif
*(.text.*)
*(.fixup)
@@ -50,8 +61,14 @@ SECTIONS
/* Just in case the first read only is a 32-bit access */
RO_DATA(4)
+ __rodata_end = .;
+#ifdef CONFIG_ROMKERNEL
+ . = CONFIG_BOOT_LOAD;
+ .bss : AT(__rodata_end)
+#else
.bss :
+#endif
{
. = ALIGN(4);
___bss_start = .;
@@ -67,7 +84,11 @@ SECTIONS
___bss_stop = .;
}
+#if defined(CONFIG_ROMKERNEL)
+ .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
+#else
.data :
+#endif
{
__sdata = .;
/* This gets done first, so the glob doesn't suck it in */
@@ -94,6 +115,8 @@ SECTIONS
__edata = .;
}
+ __data_lma = LOADADDR(.data);
+ __data_len = SIZEOF(.data);
/* The init section should be last, so when we free it, it goes into
* the general memory pool, and (hopefully) will decrease fragmentation
@@ -103,25 +126,58 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
___init_begin = .;
+#ifdef CONFIG_RAMKERNEL
INIT_TEXT_SECTION(PAGE_SIZE)
- . = ALIGN(16);
- INIT_DATA_SECTION(16)
- PERCPU(4)
- /* we have to discard exit text and such at runtime, not link time, to
+ /* We have to discard exit text and such at runtime, not link time, to
* handle embedded cross-section references (alt instructions, bug
- * table, eh_frame, etc...)
+ * table, eh_frame, etc...). We need all of our .text up front and
+ * .data after it for PCREL call issues.
*/
.exit.text :
{
EXIT_TEXT
}
+
+ . = ALIGN(16);
+ INIT_DATA_SECTION(16)
+ PERCPU(4)
+
.exit.data :
{
EXIT_DATA
}
.text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
+#else
+ .init.data : AT(__data_lma + __data_len)
+ {
+ __sinitdata = .;
+ INIT_DATA
+ INIT_SETUP(16)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ INIT_RAM_FS
+
+ . = ALIGN(4);
+ ___per_cpu_load = .;
+ ___per_cpu_start = .;
+ *(.data.percpu.first)
+ *(.data.percpu.page_aligned)
+ *(.data.percpu)
+ *(.data.percpu.shared_aligned)
+ ___per_cpu_end = .;
+
+ EXIT_DATA
+ __einitdata = .;
+ }
+ __init_data_lma = LOADADDR(.init.data);
+ __init_data_len = SIZEOF(.init.data);
+ __init_data_end = .;
+
+ .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
+#endif
{
. = ALIGN(4);
__stext_l1 = .;
@@ -202,7 +258,11 @@ SECTIONS
/* Force trailing alignment of our init section so that when we
* free our init memory, we don't leave behind a partial page.
*/
+#ifdef CONFIG_RAMKERNEL
. = __l2_lma + __l2_len;
+#else
+ . = __init_data_end;
+#endif
. = ALIGN(PAGE_SIZE);
___init_end = .;