aboutsummaryrefslogtreecommitdiff
path: root/arch/arc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/delay.h5
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/sections.h1
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/syscall.h5
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/kernel/entry.S4
-rw-r--r--arch/arc/kernel/head.S7
-rw-r--r--arch/arc/kernel/irq.c3
-rw-r--r--arch/arc/kernel/ptrace.c2
-rw-r--r--arch/arc/kernel/setup.c3
-rw-r--r--arch/arc/kernel/signal.c25
-rw-r--r--arch/arc/kernel/unaligned.c6
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arc/mm/fault.c6
15 files changed, 58 insertions, 34 deletions
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
index 442ce5d0f709..43de30256981 100644
--- a/arch/arc/include/asm/delay.h
+++ b/arch/arc/include/asm/delay.h
@@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
{
unsigned long loops;
- /* (long long) cast ensures 64 bit MPY - real or emulated
+ /* (u64) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
- loops = ((long long)(usecs * 4295 * HZ) *
- (long long)(loops_per_jiffy)) >> 32;
+ loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;
__delay(loops);
}
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 6179de7e07c2..2046a89a57cf 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -52,12 +52,14 @@ struct pt_regs {
/*to distinguish bet excp, syscall, irq */
union {
+ struct {
#ifdef CONFIG_CPU_BIG_ENDIAN
/* so that assembly code is same for LE/BE */
unsigned long orig_r8:16, event:16;
#else
unsigned long event:16, orig_r8:16;
#endif
+ };
long orig_r8_word;
};
};
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
index 6fc1159dfefe..764f1e3ba752 100644
--- a/arch/arc/include/asm/sections.h
+++ b/arch/arc/include/asm/sections.h
@@ -11,7 +11,6 @@
#include <asm-generic/sections.h>
-extern char _int_vec_base_lds[];
extern char __arc_dccm_base[];
extern char __dtb_start[];
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
+
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
smp_mb();
}
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 33ab3048e9b2..29de09804306 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -18,7 +18,7 @@ static inline long
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
if (user_mode(regs) && in_syscall(regs))
- return regs->orig_r8;
+ return regs->r8;
else
return -1;
}
@@ -26,8 +26,7 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
static inline void
syscall_rollback(struct task_struct *task, struct pt_regs *regs)
{
- /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
- regs->r8 = regs->orig_r8;
+ regs->r0 = regs->orig_r0;
}
static inline long
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
* Because it essentially checks if buffer end is within limit and @len is
* non-ngeative, which implies that buffer start will be within limit too.
*
- * The reason for rewriting being, for majorit yof cases, @len is generally
+ * The reason for rewriting being, for majority of cases, @len is generally
* compile time constant, causing first sub-expression to be compile time
* subsumed.
*
@@ -53,7 +53,7 @@
*
*/
#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
- (((addr)+(sz)) <= get_fs()))
+ ((addr) <= (get_fs() - (sz))))
#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
likely(__user_ok((addr), (sz))))
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
index 0c6d664d4a83..6dbe359c760d 100644
--- a/arch/arc/kernel/entry.S
+++ b/arch/arc/kernel/entry.S
@@ -498,7 +498,7 @@ tracesys_exit:
trap_with_param:
; stop_pc info by gdb needs this info
- stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+ st orig_r8_IS_BRKPT, [sp, PT_orig_r8]
mov r0, r12
lr r1, [efa]
@@ -723,7 +723,7 @@ not_exception:
; things to what they were, before returning from L2 context
;----------------------------------------------------------------
- ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
+ ld r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 006dec3fc353..0f944f024513 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -27,11 +27,16 @@ stext:
; Don't clobber r0-r4 yet. It might have bootloader provided info
;-------------------------------------------------------------------
+ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
+
#ifdef CONFIG_SMP
; Only Boot (Master) proceeds. Others wait in platform dependent way
; IDENTITY Reg [ 3 2 1 0 ]
; (cpu-id) ^^^ => Zero for UP ARC700
; => #Core-ID if SMP (Master 0)
+ ; Note that non-boot CPUs might not land here if halt-on-reset and
+ ; instead breath life from @first_lines_of_secondary, but we still
+ ; need to make sure only boot cpu takes this path.
GET_CPU_ID r5
cmp r5, 0
jnz arc_platform_smp_wait_to_boot
@@ -96,6 +101,8 @@ stext:
first_lines_of_secondary:
+ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
+
; setup per-cpu idle task as "current" on this CPU
ld r0, [@secondary_idle_tsk]
SET_CURR_TASK_ON_CPU r0, r1
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 8115fa531575..a199471ce01e 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -24,7 +24,6 @@
* -Needed for each CPU (hence not foldable into init_IRQ)
*
* what it does ?
- * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
* -Disable all IRQs (on CPU side)
* -Optionally, setup the High priority Interrupts as Level 2 IRQs
*/
@@ -32,8 +31,6 @@ void __cpuinit arc_init_IRQ(void)
{
int level_mask = 0;
- write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
-
/* Disable all IRQs: enable them as devices request */
write_aux_reg(AUX_IENABLE, 0);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index c6a81c58d0f3..0851604bb9cd 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -92,7 +92,7 @@ static int genregs_set(struct task_struct *target,
REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */
REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
REG_IGNORE_ONE(efa); /* efa update invalid */
- REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+ REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
return ret;
}
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2b3731dd1e9..2d7786b69a8a 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,10 +47,7 @@ void __cpuinit read_arc_build_cfg_regs(void)
READ_BCR(AUX_IDENTITY, cpu->core);
cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
-
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
- if (cpu->vec_base == 0)
- cpu->vec_base = (unsigned int)_int_vec_base_lds;
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
cpu->uncached_base = uncached_space.start << 24;
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index ee6ef2f60a28..7e95e1a86510 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
{
struct rt_sigframe __user *sf;
unsigned int magic;
- int err;
struct pt_regs *regs = current_pt_regs();
/* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
goto badframe;
- err = restore_usr_regs(regs, sf);
- err |= __get_user(magic, &sf->sigret_magic);
- if (err)
+ if (__get_user(magic, &sf->sigret_magic))
goto badframe;
if (unlikely(is_do_ss_needed(magic)))
if (restore_altstack(&sf->uc.uc_stack))
goto badframe;
+ if (restore_usr_regs(regs, sf))
+ goto badframe;
+
/* Don't restart from sigreturn */
syscall_wont_restart(regs);
@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
return 1;
/*
+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+ * during signal handler execution. This works for SA_SIGINFO as well
+ * although the semantics are now overloaded (the same reg state can be
+ * inspected by userland: but are they allowed to fiddle with it ?
+ */
+ err |= stash_usr_regs(sf, regs, set);
+
+ /*
* SA_SIGINFO requires 3 args to signal handler:
* #1: sig-no (common to any handler)
* #2: struct siginfo
@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
magic = MAGIC_SIGALTSTK;
}
- /*
- * w/o SA_SIGINFO, struct ucontext is partially populated (only
- * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
- * during signal handler execution. This works for SA_SIGINFO as well
- * although the semantics are now overloaded (the same reg state can be
- * inspected by userland: but are they allowed to fiddle with it ?
- */
- err |= stash_usr_regs(sf, regs, set);
err |= __put_user(magic, &sf->sigret_magic);
if (err)
return err;
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 4cd81633febd..116d3e09b5b5 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -233,6 +233,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
regs->status32 &= ~STATUS_DE_MASK;
} else {
regs->ret += state.instr_len;
+
+ /* handle zero-overhead-loop */
+ if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
+ regs->ret = regs->lp_start;
+ regs->lp_count--;
+ }
}
return 0;
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d477..9c548c7cf001 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b .Lfound_char ; Likewise this one.
+#else
+ and r12,r12,r4
+ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
+ lsr_s r12,r12,7
+ bic r2,r7,r6
+ b.d .Lfound_char_b
+ and_s r2,r2,r12
+#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
lsr r7,r7,7
bic r2,r7,r6
+.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 689ffd86d5e9..331a0846628e 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -16,7 +16,7 @@
#include <linux/kdebug.h>
#include <asm/pgalloc.h>
-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+static int handle_vmalloc_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
@@ -26,7 +26,7 @@ static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
- pgd = pgd_offset_fast(mm, address);
+ pgd = pgd_offset_fast(current->active_mm, address);
pgd_k = pgd_offset_k(address);
if (!pgd_present(*pgd_k))
@@ -72,7 +72,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
* nothing more.
*/
if (address >= VMALLOC_START && address <= VMALLOC_END) {
- ret = handle_vmalloc_fault(mm, address);
+ ret = handle_vmalloc_fault(address);
if (unlikely(ret))
goto bad_area_nosemaphore;
else