aboutsummaryrefslogtreecommitdiff
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c29
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c6
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c1
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c10
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c36
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c23
-rw-r--r--arch/blackfin/kernel/ipipe.c7
-rw-r--r--arch/blackfin/kernel/irqchip.c114
-rw-r--r--arch/blackfin/kernel/kgdb.c297
-rw-r--r--arch/blackfin/kernel/mcount.S70
-rw-r--r--arch/blackfin/kernel/process.c174
-rw-r--r--arch/blackfin/kernel/ptrace.c1
-rw-r--r--arch/blackfin/kernel/setup.c207
-rw-r--r--arch/blackfin/kernel/sys_bfin.c1
-rw-r--r--arch/blackfin/kernel/traps.c103
16 files changed, 526 insertions, 554 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 3731088e181..141d9281e4b 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
CFLAGS_REMOVE_ftrace.o = -pg
obj-$(CONFIG_IPIPE) += ipipe.o
-obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index e0bf8cc0690..9f9b8281665 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -253,32 +253,31 @@ void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
BUG_ON(src % 4);
BUG_ON(size % 4);
- /* Force a sync in case a previous config reset on this channel
- * occurred. This is needed so subsequent writes to DMA registers
- * are not spuriously lost/corrupted.
- */
- __builtin_bfin_ssync();
-
src_ch = 0;
/* Find an avalible memDMA channel */
while (1) {
- if (!src_ch || src_ch == (struct dma_register *)MDMA_S1_NEXT_DESC_PTR) {
- dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
- src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
- } else {
+ if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
+ } else {
+ dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
+ src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
}
- if (!bfin_read16(&src_ch->cfg)) {
+ if (!bfin_read16(&src_ch->cfg))
+ break;
+ else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) {
+ bfin_write16(&src_ch->cfg, 0);
break;
- } else {
- if (bfin_read16(&src_ch->irq_status) & DMA_DONE)
- bfin_write16(&src_ch->cfg, 0);
}
-
}
+ /* Force a sync in case a previous config reset on this channel
+ * occurred. This is needed so subsequent writes to DMA registers
+ * are not spuriously lost/corrupted.
+ */
+ __builtin_bfin_ssync();
+
/* Destination */
bfin_write32(&dst_ch->start_addr, dst);
bfin_write16(&dst_ch->x_count, size >> 2);
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index beffa00a93c..6b944627137 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -686,14 +686,12 @@ void bfin_gpio_pm_hibernate_restore(void)
*port_fer[bank] = gpio_bank_saved[bank].fer;
#endif
gpio_array[bank]->inen = gpio_bank_saved[bank].inen;
+ gpio_array[bank]->data_set = gpio_bank_saved[bank].data
+ & gpio_bank_saved[bank].dir;
gpio_array[bank]->dir = gpio_bank_saved[bank].dir;
gpio_array[bank]->polar = gpio_bank_saved[bank].polar;
gpio_array[bank]->edge = gpio_bank_saved[bank].edge;
gpio_array[bank]->both = gpio_bank_saved[bank].both;
-
- gpio_array[bank]->data_set = gpio_bank_saved[bank].data
- | gpio_bank_saved[bank].dir;
-
gpio_array[bank]->maska = gpio_bank_saved[bank].maska;
}
AWA_DUMMY_READ(maska);
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index aa05e638fb7..ed8392c117e 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -10,6 +10,7 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+#include <asm/io.h>
/* Allow people to have their own Blackfin exception handler in a module */
EXPORT_SYMBOL(bfin_return_from_exception);
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index c006a44527b..36193eed9a1 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -46,13 +46,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
d_cache = CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
+#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
d_cache |= CPLB_L1_AOW | CPLB_WT;
#endif
#endif
@@ -91,9 +91,9 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
/* Cover L2 memory */
#if L2_LENGTH > 0
dcplb_tbl[cpu][i_d].addr = L2_START;
- dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB;
+ dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
icplb_tbl[cpu][i_i].addr = L2_START;
- icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB;
+ icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
#endif
first_mask_dcplb = i_d;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 784923e52a9..bcdfe9b0b71 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -150,15 +150,19 @@ static noinline int dcplb_miss(unsigned int cpu)
nr_dcplb_miss[cpu]++;
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
if (bfin_addr_dcacheable(addr)) {
d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
-#ifdef CONFIG_BFIN_WT
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
d_data |= CPLB_L1_AOW | CPLB_WT;
-#endif
+# endif
}
#endif
- if (addr >= physical_mem_end) {
+
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
&& (status & FAULT_USERSUPV)) {
addr &= ~0x3fffff;
@@ -235,7 +239,7 @@ static noinline int icplb_miss(unsigned int cpu)
i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_ICACHE
+#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
/*
* Normal RAM, and possibly the reserved memory area, are
* cacheable.
@@ -245,7 +249,10 @@ static noinline int icplb_miss(unsigned int cpu)
i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
- if (addr >= physical_mem_end) {
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ i_data = L2_IMEMORY;
+ } else if (addr >= physical_mem_end) {
if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
&& (status & FAULT_USERSUPV)) {
addr &= ~(1 * 1024 * 1024 - 1);
@@ -365,13 +372,18 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
local_irq_save_hw(flags);
current_rwx_mask[cpu] = masks;
- d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
-#ifdef CONFIG_BFIN_DCACHE
- d_data |= CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_WT
- d_data |= CPLB_L1_AOW | CPLB_WT;
-#endif
+ if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
+ addr = L2_START;
+ d_data = L2_DMEMORY;
+ } else {
+ d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
+#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
+ d_data |= CPLB_L1_CHBL;
+# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ d_data |= CPLB_L1_AOW | CPLB_WT;
+# endif
#endif
+ }
disable_dcplb();
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index d6c067782e6..685f160a5a3 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -72,13 +72,24 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
}
/* Cover L1 memory. One 4M area for code and data each is enough. */
- if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
- d_tbl[i_d].addr = L1_DATA_A_START;
- d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+ if (cpu == 0) {
+ if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
+ d_tbl[i_d].addr = L1_DATA_A_START;
+ d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+ }
+ i_tbl[i_i].addr = L1_CODE_START;
+ i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
}
- i_tbl[i_i].addr = L1_CODE_START;
- i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
-
+#ifdef CONFIG_SMP
+ else {
+ if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
+ d_tbl[i_d].addr = COREB_L1_DATA_A_START;
+ d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
+ }
+ i_tbl[i_i].addr = COREB_L1_CODE_START;
+ i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
+ }
+#endif
first_switched_dcplb = i_d;
first_switched_icplb = i_i;
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index d8cde1fc5cb..b8d22034b9a 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale);
atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
-unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
EXPORT_SYMBOL(__ipipe_irq_lvmask);
static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -342,8 +342,3 @@ void ___ipipe_sync_pipeline(unsigned long syncmask)
}
EXPORT_SYMBOL(show_stack);
-
-#ifdef CONFIG_IPIPE_TRACE_MCOUNT
-void notrace _mcount(void);
-EXPORT_SYMBOL(_mcount);
-#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 6e31e935bb3..4b5fd36187d 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -38,38 +38,15 @@
#include <asm/pda.h>
static atomic_t irq_err_count;
-static spinlock_t irq_controller_lock;
-
-/*
- * Dummy mask/unmask handler
- */
-void dummy_mask_unmask_irq(unsigned int irq)
-{
-}
-
void ack_bad_irq(unsigned int irq)
{
atomic_inc(&irq_err_count);
printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
}
-static struct irq_chip bad_chip = {
- .ack = dummy_mask_unmask_irq,
- .mask = dummy_mask_unmask_irq,
- .unmask = dummy_mask_unmask_irq,
-};
-
-static int bad_stats;
static struct irq_desc bad_irq_desc = {
- .status = IRQ_DISABLED,
- .chip = &bad_chip,
.handle_irq = handle_bad_irq,
- .depth = 1,
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
- .kstat_irqs = &bad_stats,
-#ifdef CONFIG_SMP
- .affinity = CPU_MASK_ALL
-#endif
};
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -77,6 +54,7 @@ static struct irq_desc bad_irq_desc = {
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
#endif
+#ifdef CONFIG_PROC_FS
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
@@ -108,50 +86,29 @@ int show_interrupts(struct seq_file *p, void *v)
}
return 0;
}
-
-/*
- * do_IRQ handles all hardware IRQs. Decoded IRQs should not
- * come via this function. Instead, they should provide their
- * own 'handler'
- */
-#ifdef CONFIG_DO_IRQ_L1
-__attribute__((l1_text))
-#endif
-asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
- struct irq_desc *desc = irq_desc + irq;
-#ifndef CONFIG_IPIPE
- unsigned short pending, other_ints;
#endif
- old_regs = set_irq_regs(regs);
- /*
- * Some hardware gives randomly wrong interrupts. Rather
- * than crashing, do something sensible.
- */
- if (irq >= NR_IRQS)
- desc = &bad_irq_desc;
-
- irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
+static void check_stack_overflow(int irq)
+{
/* Debugging check for stack overflow: is there less than STACK_WARN free? */
- {
- long sp;
-
- sp = __get_SP() & (THREAD_SIZE-1);
+ long sp = __get_SP() & (THREAD_SIZE - 1);
- if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
- dump_stack();
- printk(KERN_EMERG "%s: possible stack overflow while handling irq %i "
- " only %ld bytes free\n",
- __func__, irq, sp - sizeof(struct thread_info));
- }
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ dump_stack();
+ pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
+ irq, sp - sizeof(struct thread_info));
}
+}
+#else
+static inline void check_stack_overflow(int irq) { }
#endif
- generic_handle_irq(irq);
#ifndef CONFIG_IPIPE
+static void maybe_lower_to_irq14(void)
+{
+ unsigned short pending, other_ints;
+
/*
* If we're the only interrupt running (ignoring IRQ15 which
* is for syscalls), lower our priority to IRQ14 so that
@@ -165,7 +122,38 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
-#endif /* !CONFIG_IPIPE */
+}
+#else
+static inline void maybe_lower_to_irq14(void) { }
+#endif
+
+/*
+ * do_IRQ handles all hardware IRQs. Decoded IRQs should not
+ * come via this function. Instead, they should provide their
+ * own 'handler'
+ */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+
+ check_stack_overflow(irq);
+
+ /*
+ * Some hardware gives randomly wrong interrupts. Rather
+ * than crashing, do something sensible.
+ */
+ if (irq >= NR_IRQS)
+ handle_bad_irq(irq, &bad_irq_desc);
+ else
+ generic_handle_irq(irq);
+
+ maybe_lower_to_irq14();
+
irq_exit();
set_irq_regs(old_regs);
@@ -173,14 +161,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
void __init init_IRQ(void)
{
- struct irq_desc *desc;
- int irq;
-
- spin_lock_init(&irq_controller_lock);
- for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
- *desc = bad_irq_desc;
- }
-
init_arch_irq();
#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index da28f796ad7..cce79d05b90 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,15 +34,6 @@ int gdb_bfin_vector = -1;
#error change the definition of slavecpulocks
#endif
-#define IN_MEM(addr, size, l1_addr, l1_size) \
-({ \
- unsigned long __addr = (unsigned long)(addr); \
- (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
-})
-#define ASYNC_BANK_SIZE \
- (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
- ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
-
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
gdb_regs[BFIN_R0] = regs->r0;
@@ -463,41 +454,88 @@ static int hex(char ch)
static int validate_memory_access_address(unsigned long addr, int size)
{
- int cpu = raw_smp_processor_id();
-
- if (size < 0)
+ if (size < 0 || addr == 0)
return -EFAULT;
- if (addr >= 0x1000 && (addr + size) <= physical_mem_end)
- return 0;
- if (addr >= SYSMMR_BASE)
- return 0;
- if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE))
- return 0;
- if (cpu == 0) {
- if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
- return 0;
- if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
- return 0;
-#ifdef CONFIG_SMP
- } else if (cpu == 1) {
- if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return bfin_mem_access_type(addr, size);
+}
+
+static int bfin_probe_kernel_read(char *dst, char *src, int size)
+{
+ unsigned long lsrc = (unsigned long)src;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(lsrc, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (lsrc >= SYSMMR_BASE) {
+ if (size == 2 && lsrc % 2 == 0) {
+ u16 mmr = bfin_read16(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
+ } else if (size == 4 && lsrc % 4 == 0) {
+ u32 mmr = bfin_read32(src);
+ memcpy(dst, &mmr, sizeof(mmr));
return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_read(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
+ }
+
+ return -EFAULT;
+}
+
+static int bfin_probe_kernel_write(char *dst, char *src, int size)
+{
+ unsigned long ldst = (unsigned long)dst;
+ int mem_type;
+
+ mem_type = validate_memory_access_address(ldst, size);
+ if (mem_type < 0)
+ return mem_type;
+
+ if (ldst >= SYSMMR_BASE) {
+ if (size == 2 && ldst % 2 == 0) {
+ u16 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write16(dst, mmr);
return 0;
- if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
+ } else if (size == 4 && ldst % 4 == 0) {
+ u32 mmr;
+ memcpy(&mmr, src, sizeof(mmr));
+ bfin_write32(dst, mmr);
return 0;
-#endif
+ }
+ } else {
+ switch (mem_type) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ return probe_kernel_write(dst, src, size);
+ /* XXX: should support IDMA here with SMP */
+ case BFIN_MEM_ACCESS_DMA:
+ if (dma_memcpy(dst, src, size))
+ return 0;
+ break;
+ case BFIN_MEM_ACCESS_ITEST:
+ if (isram_memcpy(dst, src, size))
+ return 0;
+ break;
+ }
}
- if (IN_MEM(addr, size, L2_START, L2_LENGTH))
- return 0;
-
return -EFAULT;
}
@@ -509,14 +547,6 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
{
char *tmp;
int err;
- unsigned char *pch;
- unsigned short mmr16;
- unsigned long mmr32;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -524,44 +554,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
*/
tmp = buf + count;
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = *(unsigned short *)mem;
- pch = (unsigned char *)&mmr16;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 2;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = *(unsigned long *)mem;
- pch = (unsigned char *)&mmr32;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- *tmp++ = *pch++;
- tmp -= 4;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM*/
- if (dma_memcpy(tmp, mem, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_read(tmp, mem, count);
-
+ err = bfin_probe_kernel_read(tmp, mem, count);
if (!err) {
while (count > 0) {
buf = pack_hex_byte(buf, *tmp);
@@ -582,13 +575,8 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
*/
int kgdb_ebin2mem(char *buf, char *mem, int count)
{
- char *tmp_old;
- char *tmp_new;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
+ char *tmp_old, *tmp_new;
int size;
- int cpu = raw_smp_processor_id();
tmp_old = tmp_new = buf;
@@ -601,41 +589,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
tmp_old++;
}
- err = validate_memory_access_address((unsigned long)mem, size);
- if (err)
- return err;
-
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (size) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)buf;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)buf;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, buf, size) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, buf, size);
-
- return err;
+ return bfin_probe_kernel_write(mem, buf, count);
}
/*
@@ -645,16 +599,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
*/
int kgdb_hex2mem(char *buf, char *mem, int count)
{
- char *tmp_raw;
- char *tmp_hex;
- unsigned short *mmr16;
- unsigned long *mmr32;
- int err;
- int cpu = raw_smp_processor_id();
-
- err = validate_memory_access_address((unsigned long)mem, count);
- if (err)
- return err;
+ char *tmp_raw, *tmp_hex;
/*
* We use the upper half of buf as an intermediate buffer for the
@@ -669,39 +614,18 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
*tmp_raw |= hex(*tmp_hex--) << 4;
}
- if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
- switch (count) {
- case 2:
- if ((unsigned int)mem % 2 == 0) {
- mmr16 = (unsigned short *)tmp_raw;
- *(unsigned short *)mem = *mmr16;
- } else
- err = -EFAULT;
- break;
- case 4:
- if ((unsigned int)mem % 4 == 0) {
- mmr32 = (unsigned long *)tmp_raw;
- *(unsigned long *)mem = *mmr32;
- } else
- err = -EFAULT;
- break;
- default:
- err = -EFAULT;
- }
- } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(mem, tmp_raw, count) == NULL)
- err = -EFAULT;
- } else
- err = probe_kernel_write(mem, tmp_raw, count);
-
- return err;
+ return bfin_probe_kernel_write(mem, tmp_raw, count);
}
+#define IN_MEM(addr, size, l1_addr, l1_size) \
+({ \
+ unsigned long __addr = (unsigned long)(addr); \
+ (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
+})
+#define ASYNC_BANK_SIZE \
+ (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
+ ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
+
int kgdb_validate_break_address(unsigned long addr)
{
int cpu = raw_smp_processor_id();
@@ -724,46 +648,17 @@ int kgdb_validate_break_address(unsigned long addr)
int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
{
- int err;
- int cpu = raw_smp_processor_id();
-
- if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH))
-#ifdef CONFIG_SMP
- || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH))
-#endif
- ) {
- /* access L1 instruction SRAM */
- if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
- == NULL)
- return -EFAULT;
-
- if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
- BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else {
- err = probe_kernel_read(saved_instr, (char *)addr,
- BREAK_INSTR_SIZE);
- if (err)
- return err;
-
- return probe_kernel_write((char *)addr,
- arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
- }
+ int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
+ BREAK_INSTR_SIZE);
+ if (err)
+ return err;
+ return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
}
int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
{
- if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) {
- /* access L1 instruction SRAM */
- if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
- return -EFAULT;
-
- return 0;
- } else
- return probe_kernel_write((char *)addr,
- (char *)bundle, BREAK_INSTR_SIZE);
+ return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
}
int kgdb_arch_init(void)
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
deleted file mode 100644
index edcfb3865f4..00000000000
--- a/arch/blackfin/kernel/mcount.S
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * linux/arch/blackfin/mcount.S
- *
- * Copyright (C) 2006 Analog Devices Inc.
- *
- * 2007/04/12 Save index, length, modify and base registers. --rpm
- */
-
-#include <linux/linkage.h>
-#include <asm/blackfin.h>
-
-.text
-
-.align 4 /* just in case */
-
-ENTRY(__mcount)
- [--sp] = i0;
- [--sp] = i1;
- [--sp] = i2;
- [--sp] = i3;
- [--sp] = l0;
- [--sp] = l1;
- [--sp] = l2;
- [--sp] = l3;
- [--sp] = m0;
- [--sp] = m1;
- [--sp] = m2;
- [--sp] = m3;
- [--sp] = b0;
- [--sp] = b1;
- [--sp] = b2;
- [--sp] = b3;
- [--sp] = ( r7:0, p5:0 );
- [--sp] = ASTAT;
-
- p1.L = _ipipe_trace_enable;
- p1.H = _ipipe_trace_enable;
- r7 = [p1];
- CC = r7 == 0;
- if CC jump out;
- link 0x10;
- r0 = 0x0;
- [sp + 0xc] = r0; /* v */
- r0 = 0x0; /* type: IPIPE_TRACE_FN */
- r1 = rets;
- p0 = [fp]; /* p0: Prior FP */
- r2 = [p0 + 4]; /* r2: Prior RETS */
- call ___ipipe_trace;
- unlink;
-out:
- ASTAT = [sp++];
- ( r7:0, p5:0 ) = [sp++];
- b3 = [sp++];
- b2 = [sp++];
- b1 = [sp++];
- b0 = [sp++];
- m3 = [sp++];
- m2 = [sp++];
- m1 = [sp++];
- m0 = [sp++];
- l3 = [sp++];
- l2 = [sp++];
- l1 = [sp++];
- l0 = [sp++];
- i3 = [sp++];
- i2 = [sp++];
- i1 = [sp++];
- i0 = [sp++];
- rts;
-ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 30d0843ed70..9da36bab7cc 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -160,6 +160,29 @@ pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
}
EXPORT_SYMBOL(kernel_thread);
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * pass the data segment into user programs if it exists,
+ * it can't hurt anything as far as I can tell
+ */
+void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
+{
+ set_fs(USER_DS);
+ regs->pc = new_ip;
+ if (current->mm)
+ regs->p5 = current->mm->start_data;
+#ifdef CONFIG_SMP
+ task_thread_info(current)->l1_task_info.stack_start =
+ (void *)current->mm->context.stack_start;
+ task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
+ memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
+ sizeof(*L1_SCRATCH_TASK_INFO));
+#endif
+ wrusp(new_sp);
+}
+EXPORT_SYMBOL_GPL(start_thread);
+
void flush_thread(void)
{
}
@@ -321,6 +344,87 @@ void finish_atomic_sections (struct pt_regs *regs)
}
}
+static inline
+int in_mem(unsigned long addr, unsigned long size,
+ unsigned long start, unsigned long end)
+{
+ return addr >= start && addr + size <= end;
+}
+static inline
+int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return const_size &&
+ in_mem(addr, size, const_addr + off, const_addr + const_size);
+}
+static inline
+int in_mem_const(unsigned long addr, unsigned long size,
+ unsigned long const_addr, unsigned long const_size)
+{
+ return in_mem_const_off(addr, size, 0, const_addr, const_size);
+}
+#define IN_ASYNC(bnum, bctlnum) \
+({ \
+ (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
+ bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
+ BFIN_MEM_ACCESS_CORE; \
+})
+
+int bfin_mem_access_type(unsigned long addr, unsigned long size)
+{
+ int cpu = raw_smp_processor_id();
+
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
+ return -EFAULT;
+
+ if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
+ return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
+ return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
+#endif
+ if (in_mem_const(addr, size, L2_START, L2_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+
+ if (addr >= SYSMMR_BASE)
+ return BFIN_MEM_ACCESS_CORE_ONLY;
+
+ /* We can't read EBIU banks that aren't enabled or we end up hanging
+ * on the access to the async space.
+ */
+ if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
+ return IN_ASYNC(0, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
+ return IN_ASYNC(1, 0);
+ if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
+ return IN_ASYNC(2, 1);
+ if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
+ return IN_ASYNC(3, 1);
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_CORE;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return BFIN_MEM_ACCESS_DMA;
+
+ return -EFAULT;
+}
+
#if defined(CONFIG_ACCESS_CHECK)
#ifdef CONFIG_ACCESS_OK_L1
__attribute__((l1_text))
@@ -330,51 +434,61 @@ int _access_ok(unsigned long addr, unsigned long size)
{
if (size == 0)
return 1;
- if (addr > (addr + size))
+ /* Check that things do not wrap around */
+ if (addr > ULONG_MAX - size)
return 0;
if (segment_eq(get_fs(), KERNEL_DS))
return 1;
#ifdef CONFIG_MTD_UCLINUX
- if (addr >= memory_start && (addr + size) <= memory_end)
- return 1;
- if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end)
+ if (1)
+#else
+ if (0)
+#endif
+ {
+ if (in_mem(addr, size, memory_start, memory_end))
+ return 1;
+ if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
+ return 1;
+# ifndef CONFIG_ROMFS_ON_MTD
+ if (0)
+# endif
+ /* For XIP, allow user space to use pointers within the ROMFS. */
+ if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
+ return 1;
+ } else {
+ if (in_mem(addr, size, memory_start, physical_mem_end))
+ return 1;
+ }
+
+ if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
return 1;
-#ifdef CONFIG_ROMFS_ON_MTD
- /* For XIP, allow user space to use pointers within the ROMFS. */
- if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
+ if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
return 1;
-#endif
-#else
- if (addr >= memory_start && (addr + size) <= physical_mem_end)
+ if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
return 1;
-#endif
- if (addr >= (unsigned long)__init_begin &&
- addr + size <= (unsigned long)__init_end)
+ if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
return 1;
- if (addr >= get_l1_scratch_start()
- && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
+ if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
return 1;
-#if L1_CODE_LENGTH != 0
- if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
- && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
+#ifdef COREB_L1_CODE_START
+ if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
return 1;
-#endif
-#if L1_DATA_A_LENGTH != 0
- if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
- && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
+ if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
return 1;
-#endif
-#if L1_DATA_B_LENGTH != 0
- if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
- && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
+ if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
return 1;
-#endif
-#if L2_LENGTH != 0
- if (addr >= L2_START + (_ebss_l2 - _stext_l2)
- && addr + size <= L2_START + L2_LENGTH)
+ if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
return 1;
#endif
+ if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
+ return 1;
+
+ if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
+ return 1;
+ if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
+ return 1;
+
return 0;
}
EXPORT_SYMBOL(_access_ok);
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index d76618db50d..6a387eec6b6 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -31,7 +31,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 6454babdfaf..6225edae488 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -117,15 +117,49 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
*/
#ifdef CONFIG_BFIN_ICACHE
printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# ifdef CONFIG_BFIN_L2_ICACHEABLE
+ " cacheable"
+# else
+ " uncacheable"
+# endif
+ " in instruction cache\n");
+
+#else
+ printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
#endif
+
#ifdef CONFIG_BFIN_DCACHE
- printk(KERN_INFO "Data Cache Enabled for CPU%u"
-# if defined CONFIG_BFIN_WB
- " (write-back)"
-# elif defined CONFIG_BFIN_WT
- " (write-through)"
+ printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
+ printk(KERN_INFO " External memory:"
+# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
+# endif
+ " in data cache\n");
+ if (L2_LENGTH)
+ printk(KERN_INFO " L2 SRAM :"
+# if defined CONFIG_BFIN_L2_WRITEBACK
+ " cacheable (write-back)"
+# elif defined CONFIG_BFIN_L2_WRITETHROUGH
+ " cacheable (write-through)"
+# else
+ " uncacheable"
# endif
- "\n", cpu);
+ " in data cache\n");
+#else
+ printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
#endif
}
@@ -134,7 +168,6 @@ void __cpuinit bfin_setup_cpudata(unsigned int cpu)
struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
cpudata->idle = current;
- cpudata->loops_per_jiffy = loops_per_jiffy;
cpudata->imemctl = bfin_read_IMEM_CONTROL();
cpudata->dmemctl = bfin_read_DMEM_CONTROL();
}
@@ -374,13 +407,14 @@ static void __init print_memory_map(char *who)
bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
switch (bfin_memmap.map[i].type) {
case BFIN_MEMMAP_RAM:
- printk("(usable)\n");
- break;
+ printk(KERN_CONT "(usable)\n");
+ break;
case BFIN_MEMMAP_RESERVED:
- printk("(reserved)\n");
- break;
- default: printk("type %lu\n", bfin_memmap.map[i].type);
- break;
+ printk(KERN_CONT "(reserved)\n");
+ break;
+ default:
+ printk(KERN_CONT "type %lu\n", bfin_memmap.map[i].type);
+ break;
}
}
}
@@ -443,9 +477,11 @@ static __init void parse_cmdline_early(char *cmdline_p)
} else if (!memcmp(to, "clkin_hz=", 9)) {
to += 9;
early_init_clkin_hz(to);
+#ifdef CONFIG_EARLY_PRINTK
} else if (!memcmp(to, "earlyprintk=", 12)) {
to += 12;
setup_early_printk(to);
+#endif
} else if (!memcmp(to, "memmap=", 7)) {
to += 7;
parse_memmap(to);
@@ -516,7 +552,7 @@ static __init void memory_setup(void)
&& ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
mtd_size =
PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
-# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -531,20 +567,26 @@ static __init void memory_setup(void)
# endif /* ANOMALY_05000263 */
# endif /* CONFIG_ROMFS_FS */
- memory_end -= mtd_size;
-
- if (mtd_size == 0) {
- console_init();
- panic("Don't boot kernel without rootfs attached.");
+ /* Since the default MTD_UCLINUX has no magic number, we just blindly
+ * read 8 past the end of the kernel's image, and look at it.
+ * When no image is attached, mtd_size is set to a random number
+ * Do some basic sanity checks before operating on things
+ */
+ if (mtd_size == 0 || memory_end <= mtd_size) {
+ pr_emerg("Could not find valid ram mtd attached.\n");
+ } else {
+ memory_end -= mtd_size;
+
+ /* Relocate MTD image to the top of memory after the uncached memory area */
+ uclinux_ram_map.phys = memory_mtd_start = memory_end;
+ uclinux_ram_map.size = mtd_size;
+ pr_info("Found mtd parition at 0x%p, (len=0x%lx), moving to 0x%p\n",
+ _end, mtd_size, (void *)memory_mtd_start);
+ dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
}
-
- /* Relocate MTD image to the top of memory after the uncached memory area */
- uclinux_ram_map.phys = memory_mtd_start = memory_end;
- uclinux_ram_map.size = mtd_size;
- dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
#endif /* CONFIG_MTD_UCLINUX */
-#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
+#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
/* Due to a Hardware Anomaly we need to limit the size of usable
* instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
* 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -578,19 +620,19 @@ static __init void memory_setup(void)
printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
printk(KERN_INFO "Memory map:\n"
- KERN_INFO " fixedcode = 0x%p-0x%p\n"
- KERN_INFO " text = 0x%p-0x%p\n"
- KERN_INFO " rodata = 0x%p-0x%p\n"
- KERN_INFO " bss = 0x%p-0x%p\n"
- KERN_INFO " data = 0x%p-0x%p\n"
- KERN_INFO " stack = 0x%p-0x%p\n"
- KERN_INFO " init = 0x%p-0x%p\n"
- KERN_INFO " available = 0x%p-0x%p\n"
+ " fixedcode = 0x%p-0x%p\n"
+ " text = 0x%p-0x%p\n"
+ " rodata = 0x%p-0x%p\n"
+ " bss = 0x%p-0x%p\n"
+ " data = 0x%p-0x%p\n"
+ " stack = 0x%p-0x%p\n"
+ " init = 0x%p-0x%p\n"
+ " available = 0x%p-0x%p\n"
#ifdef CONFIG_MTD_UCLINUX
- KERN_INFO " rootfs = 0x%p-0x%p\n"
+ " rootfs = 0x%p-0x%p\n"
#endif
#if DMA_UNCACHED_REGION > 0
- KERN_INFO " DMA Zone = 0x%p-0x%p\n"
+ " DMA Zone = 0x%p-0x%p\n"
#endif
, (void *)FIXED_CODE_START, (void *)FIXED_CODE_END,
_stext, _etext,
@@ -764,6 +806,11 @@ void __init setup_arch(char **cmdline_p)
{
unsigned long sclk, cclk;
+ /* Check to make sure we are running on the right processor */
+ if (unlikely(CPUID != bfin_cpuid()))
+ printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
+ CPU, bfin_cpuid(), bfin_revid());
+
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
@@ -778,14 +825,17 @@ void __init setup_arch(char **cmdline_p)
memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
- /* setup memory defaults from the user config */
- physical_mem_end = 0;
- _ramend = get_mem_size() * 1024 * 1024;
-
memset(&bfin_memmap, 0, sizeof(bfin_memmap));
+ /* If the user does not specify things on the command line, use
+ * what the bootloader set things up as
+ */
+ physical_mem_end = 0;
parse_cmdline_early(&command_line[0]);
+ if (_ramend == 0)
+ _ramend = get_mem_size() * 1024 * 1024;
+
if (physical_mem_end == 0)
physical_mem_end = _ramend;
@@ -815,20 +865,13 @@ void __init setup_arch(char **cmdline_p)
#endif
printk(KERN_INFO "Hardware Trace ");
if (bfin_read_TBUFCTL() & 0x1)
- printk("Active ");
+ printk(KERN_CONT "Active ");
else
- printk("Off ");
+ printk(KERN_CONT "Off ");
if (bfin_read_TBUFCTL() & 0x2)
- printk("and Enabled\n");
+ printk(KERN_CONT "and Enabled\n");
else
- printk("and Disabled\n");
-
-#if defined(CONFIG_CHR_DEV_FLASH) || defined(CONFIG_BLK_DEV_FLASH)
- /* we need to initialize the Flashrom device here since we might
- * do things with flash early on in the boot
- */
- flash_probe();
-#endif
+ printk(KERN_CONT "and Disabled\n");
printk(KERN_INFO "Boot Mode: %i\n", bfin_read_SYSCR() & 0xF);
@@ -837,7 +880,8 @@ void __init setup_arch(char **cmdline_p)
defined(CONFIG_BF538) || defined(CONFIG_BF539)
_bfin_swrst = bfin_read_SWRST();
#else
- _bfin_swrst = bfin_read_SYSCR();
+ /* Clear boot mode field */
+ _bfin_swrst = bfin_read_SYSCR() & ~0xf;
#endif
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -875,10 +919,7 @@ void __init setup_arch(char **cmdline_p)
else
printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
- if (unlikely(CPUID != bfin_cpuid()))
- printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
- CPU, bfin_cpuid(), bfin_revid());
- else {
+ if (likely(CPUID == bfin_cpuid())) {
if (bfin_revid() != bfin_compiled_revid()) {
if (bfin_compiled_revid() == -1)
printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
@@ -895,10 +936,6 @@ void __init setup_arch(char **cmdline_p)
CPU, bfin_revid());
}
- /* We can't run on BF548-0.1 due to ANOMALY 05000448 */
- if (bfin_cpuid() == 0x27de && bfin_revid() == 1)
- panic("You can't run on this processor due to 05000448");
-
printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
@@ -1121,9 +1158,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
sclk/1000000, sclk%1000000);
seq_printf(m, "bogomips\t: %lu.%02lu\n"
"Calibration\t: %lu loops\n",
- (cpudata->loops_per_jiffy * HZ) / 500000,
- ((cpudata->loops_per_jiffy * HZ) / 5000) % 100,
- (cpudata->loops_per_jiffy * HZ));
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100,
+ (loops_per_jiffy * HZ));
/* Check Cache configutation */
switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
@@ -1157,16 +1194,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
icache_size = 0;
seq_printf(m, "cache size\t: %d KB(L1 icache) "
- "%d KB(L1 dcache%s) %d KB(L2 cache)\n",
- icache_size, dcache_size,
-#if defined CONFIG_BFIN_WB
- "-wb"
-#elif defined CONFIG_BFIN_WT
- "-wt"
-#endif
- "", 0);
-
+ "%d KB(L1 dcache) %d KB(L2 cache)\n",
+ icache_size, dcache_size, 0);
seq_printf(m, "%s\n", cache);
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "external memory\t: "
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
if (icache_size)
seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
@@ -1239,8 +1285,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_num != num_possible_cpus() - 1)
return 0;
- if (L2_LENGTH)
+ if (L2_LENGTH) {
seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_ICACHEABLE)
+ "cacheable"
+#else
+ "uncacheable"
+#endif
+ " in instruction cache\n");
+ seq_printf(m, "L2 SRAM\t\t: "
+#if defined(CONFIG_BFIN_L2_WRITEBACK)
+ "cacheable (write-back)"
+#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
+ "cacheable (write-through)"
+#else
+ "uncacheable"
+#endif
+ " in data cache\n");
+ }
seq_printf(m, "board name\t: %s\n", bfin_board_name);
seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index a8f1329c15a..3da60fb13ce 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -29,7 +29,6 @@
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <linux/smp_lock.h>
#include <linux/spinlock.h>
#include <linux/sem.h>
#include <linux/msg.h>
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d279552fe9b..bf2b2d1f8ae 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -37,6 +37,7 @@
#include <asm/traps.h>
#include <asm/cacheflush.h>
#include <asm/cplb.h>
+#include <asm/dma.h>
#include <asm/blackfin.h>
#include <asm/irq_handler.h>
#include <linux/irq.h>
@@ -211,7 +212,7 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
console_verbose();
oops_in_progress = 1;
#ifdef CONFIG_DEBUG_VERBOSE
- printk(KERN_EMERG "\n" KERN_EMERG "Double Fault\n");
+ printk(KERN_EMERG "Double Fault\n");
#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
unsigned int cpu = smp_processor_id();
@@ -569,11 +570,12 @@ asmlinkage void trap_c(struct pt_regs *fp)
if (kernel_mode_regs(fp) || (current && !current->mm)) {
console_verbose();
oops_in_progress = 1;
- if (strerror)
- verbose_printk(strerror);
}
if (sig != SIGTRAP) {
+ if (strerror)
+ verbose_printk(strerror);
+
dump_bfin_process(fp);
dump_bfin_mem(fp);
show_regs(fp);
@@ -582,15 +584,14 @@ asmlinkage void trap_c(struct pt_regs *fp)
#ifndef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
if (trapnr == VEC_CPLB_I_M || trapnr == VEC_CPLB_M)
verbose_printk(KERN_NOTICE "No trace since you do not have "
- "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n"
- KERN_NOTICE "\n");
+ "CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE enabled\n\n");
else
#endif
dump_bfin_trace_buffer();
if (oops_in_progress) {
/* Dump the current kernel stack */
- verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "Kernel Stack\n");
+ verbose_printk(KERN_NOTICE "Kernel Stack\n");
show_stack(current, NULL);
print_modules();
#ifndef CONFIG_ACCESS_CHECK
@@ -619,7 +620,9 @@ asmlinkage void trap_c(struct pt_regs *fp)
force_sig_info(sig, &info, current);
}
- if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8))
+ if ((ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8)) ||
+ (ANOMALY_05000281 && trapnr == VEC_HWERR) ||
+ (ANOMALY_05000189 && (trapnr == VEC_CPLB_I_VL || trapnr == VEC_CPLB_VL)))
fp->pc = SAFE_USER_INSTRUCTION;
traps_done:
@@ -636,57 +639,30 @@ asmlinkage void trap_c(struct pt_regs *fp)
*/
static bool get_instruction(unsigned short *val, unsigned short *address)
{
-
- unsigned long addr;
-
- addr = (unsigned long)address;
+ unsigned long addr = (unsigned long)address;
/* Check for odd addresses */
if (addr & 0x1)
return false;
- /* Check that things do not wrap around */
- if (addr > (addr + 2))
+ /* MMR region will never have instructions */
+ if (addr >= SYSMMR_BASE)
return false;
- /*
- * Since we are in exception context, we need to do a little address checking
- * We need to make sure we are only accessing valid memory, and
- * we don't read something in the async space that can hang forever
- */
- if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) ||
-#if L2_LENGTH != 0
- (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) ||
-#endif
- (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) ||
-#if L1_DATA_A_LENGTH != 0
- (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) ||
-#endif
-#if L1_DATA_B_LENGTH != 0
- (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) ||
-#endif
- (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) &&
- addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) &&
- addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) &&
- addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) ||
- (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) &&
- addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) {
- *val = *address;
- return true;
- }
-
-#if L1_CODE_LENGTH != 0
- if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) {
- isram_memcpy(val, address, 2);
- return true;
+ switch (bfin_mem_access_type(addr, 2)) {
+ case BFIN_MEM_ACCESS_CORE:
+ case BFIN_MEM_ACCESS_CORE_ONLY:
+ *val = *address;
+ return true;
+ case BFIN_MEM_ACCESS_DMA:
+ dma_memcpy(val, address, 2);
+ return true;
+ case BFIN_MEM_ACCESS_ITEST:
+ isram_memcpy(val, address, 2);
+ return true;
+ default: /* invalid access */
+ return false;
}
-#endif
-
-
- return false;
}
/*
@@ -932,7 +908,7 @@ void show_stack(struct task_struct *task, unsigned long *stack)
ret_addr = 0;
if (!j && i % 8 == 0)
- printk("\n" KERN_NOTICE "%p:",addr);
+ printk(KERN_NOTICE "%p:",addr);
/* if it is an odd address, or zero, just skip it */
if (*addr & 0x1 || !*addr)
@@ -1022,9 +998,9 @@ void dump_bfin_process(struct pt_regs *fp)
printk(KERN_NOTICE "CPU = %d\n", current_thread_info()->cpu);
if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
- verbose_printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
- KERN_NOTICE " BSS = 0x%p-0x%p USER-STACK = 0x%p\n"
- KERN_NOTICE "\n",
+ verbose_printk(KERN_NOTICE
+ "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
+ " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
(void *)current->mm->start_code,
(void *)current->mm->end_code,
(void *)current->mm->start_data,
@@ -1035,8 +1011,8 @@ void dump_bfin_process(struct pt_regs *fp)
else
verbose_printk(KERN_NOTICE "invalid mm\n");
} else
- verbose_printk(KERN_NOTICE "\n" KERN_NOTICE
- "No Valid process in current context\n");
+ verbose_printk(KERN_NOTICE
+ "No Valid process in current context\n");
#endif
}
@@ -1054,7 +1030,7 @@ void dump_bfin_mem(struct pt_regs *fp)
addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
addr++) {
if (!((unsigned long)addr & 0xF))
- verbose_printk("\n" KERN_NOTICE "0x%p: ", addr);
+ verbose_printk(KERN_NOTICE "0x%p: ", addr);
if (!get_instruction(&val, addr)) {
val = 0;
@@ -1082,9 +1058,9 @@ void dump_bfin_mem(struct pt_regs *fp)
oops_in_progress)){
verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n");
#ifndef CONFIG_DEBUG_HWERR
- verbose_printk(KERN_NOTICE "The remaining message may be meaningless\n"
- KERN_NOTICE "You should enable CONFIG_DEBUG_HWERR to get a"
- " better idea where it came from\n");
+ verbose_printk(KERN_NOTICE
+"The remaining message may be meaningless\n"
+"You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
#else
/* If we are handling only one peripheral interrupt
* and current mm and pid are valid, and the last error
@@ -1140,9 +1116,10 @@ void show_regs(struct pt_regs *fp)
verbose_printk(KERN_NOTICE "%s", linux_banner);
- verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted());
+ verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n",
+ print_tainted());
verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n",
- (long)fp->seqstat, fp->ipend, fp->syscfg);
+ (long)fp->seqstat, fp->ipend, fp->syscfg);
if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n",
(fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
@@ -1210,7 +1187,7 @@ unlock:
verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
}
- verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "PROCESSOR STATE:\n");
+ verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n");
verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
fp->r0, fp->r1, fp->r2, fp->r3);
verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",