aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cpu-probe.c62
-rw-r--r--arch/mips/kernel/genex.S8
-rw-r--r--arch/mips/kernel/i8259.c4
-rw-r--r--arch/mips/kernel/irixsig.c63
-rw-r--r--arch/mips/kernel/linux32.c14
-rw-r--r--arch/mips/kernel/process.c282
-rw-r--r--arch/mips/kernel/scall32-o32.S15
-rw-r--r--arch/mips/kernel/scall64-64.S4
-rw-r--r--arch/mips/kernel/scall64-n32.S8
-rw-r--r--arch/mips/kernel/scall64-o32.S6
-rw-r--r--arch/mips/kernel/setup.c439
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mips/kernel/signal32.c7
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smtc-asm.S2
-rw-r--r--arch/mips/kernel/stacktrace.c85
-rw-r--r--arch/mips/kernel/syscall.c60
-rw-r--r--arch/mips/kernel/sysirix.c22
-rw-r--r--arch/mips/kernel/time.c14
-rw-r--r--arch/mips/kernel/traps.c116
-rw-r--r--arch/mips/kernel/vpe.c6
22 files changed, 715 insertions, 513 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 881c467c6982..cd9cec9e39e9 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -11,6 +11,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
irix5sys.o sysirix.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_MODULES) += mips_ksyms.o module.o
obj-$(CONFIG_APM) += apm.o
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index aa2caa67299a..9fbf8430c849 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -38,15 +38,40 @@ static void r3081_wait(void)
static void r39xx_wait(void)
{
- unsigned long cfg = read_c0_conf();
- write_c0_conf(cfg | TX39_CONF_HALT);
+ local_irq_disable();
+ if (!need_resched())
+ write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
+ local_irq_enable();
}
+/*
+ * There is a race when WAIT instruction executed with interrupt
+ * enabled.
+ * But it is implementation-dependent wheter the pipelie restarts when
+ * a non-enabled interrupt is requested.
+ */
static void r4k_wait(void)
{
- __asm__(".set\tmips3\n\t"
- "wait\n\t"
- ".set\tmips0");
+ __asm__(" .set mips3 \n"
+ " wait \n"
+ " .set mips0 \n");
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+static void r4k_wait_irqoff(void)
+{
+ local_irq_disable();
+ if (!need_resched())
+ __asm__(" .set mips3 \n"
+ " wait \n"
+ " .set mips0 \n");
+ local_irq_enable();
}
/* The Au1xxx wait is available only if using 32khz counter or
@@ -56,17 +81,17 @@ int allow_au1k_wait;
static void au1k_wait(void)
{
/* using the wait instruction makes CP0 counter unusable */
- __asm__(".set mips3\n\t"
- "cache 0x14, 0(%0)\n\t"
- "cache 0x14, 32(%0)\n\t"
- "sync\n\t"
- "nop\n\t"
- "wait\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n\t"
- "nop\n\t"
- ".set mips0\n\t"
+ __asm__(" .set mips3 \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " sync \n"
+ " nop \n"
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set mips0 \n"
: : "r" (au1k_wait));
}
@@ -111,7 +136,6 @@ static inline void check_wait(void)
case CPU_NEVADA:
case CPU_RM7000:
case CPU_RM9000:
- case CPU_TX49XX:
case CPU_4KC:
case CPU_4KEC:
case CPU_4KSC:
@@ -125,6 +149,10 @@ static inline void check_wait(void)
cpu_wait = r4k_wait;
printk(" available.\n");
break;
+ case CPU_TX49XX:
+ cpu_wait = r4k_wait_irqoff;
+ printk(" available.\n");
+ break;
case CPU_AU1000:
case CPU_AU1100:
case CPU_AU1500:
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 37fda3dcdfc5..af6ef2fd8300 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -220,8 +220,8 @@ NESTED(except_vec_vi_handler, 0, sp)
CLI
TRACE_IRQS_OFF
move a0, sp
- jalr v0
- j ret_from_irq
+ PTR_LA ra, ret_from_irq
+ jr v0
END(except_vec_vi_handler)
/*
@@ -349,8 +349,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set at
__BUILD_\verbose \exception
move a0, sp
- jal do_\handler
- j ret_from_exception
+ PTR_LA ra, ret_from_exception
+ j do_\handler
END(handle_\exception)
.endm
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index ea36c8e8852c..48e3418c217b 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -302,11 +302,11 @@ static struct irqaction irq2 = {
};
static struct resource pic1_io_resource = {
- .name = "pic1", .start = 0x20, .end = 0x3f, .flags = IORESOURCE_BUSY
+ .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY
};
static struct resource pic2_io_resource = {
- .name = "pic2", .start = 0xa0, .end = 0xbf, .flags = IORESOURCE_BUSY
+ .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY
};
/*
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 676e868d26fb..2132485caa74 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -17,6 +17,7 @@
#include <asm/ptrace.h>
#include <asm/uaccess.h>
+#include <asm/unistd.h>
#undef DEBUG_SIG
@@ -172,11 +173,12 @@ static inline int handle_signal(unsigned long sig, siginfo_t *info,
return ret;
}
-asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
+void do_irix_signal(struct pt_regs *regs)
{
struct k_sigaction ka;
siginfo_t info;
int signr;
+ sigset_t *oldset;
/*
* We want the common case to go fast, which is why we may in certain
@@ -184,19 +186,28 @@ asmlinkage int do_irix_signal(sigset_t *oldset, struct pt_regs *regs)
* if so.
*/
if (!user_mode(regs))
- return 1;
+ return;
- if (try_to_freeze())
- goto no_signal;
-
- if (!oldset)
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ oldset = &current->saved_sigmask;
+ else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
- if (signr > 0)
- return handle_signal(signr, &info, &ka, oldset, regs);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
+ /* a signal was successfully delivered; the saved
+ * sigmask will have been stored in the signal frame,
+ * and will be restored by sigreturn, so we can simply
+ * clear the TIF_RESTORE_SIGMASK flag */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK))
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ }
+
+ return;
+ }
-no_signal:
/*
* Who's code doesn't conform to the restartable syscall convention
* dies here!!! The li instruction, a single machine instruction,
@@ -208,8 +219,22 @@ no_signal:
regs->regs[2] == ERESTARTNOINTR) {
regs->cp0_epc -= 8;
}
+ if (regs->regs[2] == ERESTART_RESTARTBLOCK) {
+ regs->regs[2] = __NR_restart_syscall;
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 4;
+ }
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
+ clear_thread_flag(TIF_RESTORE_SIGMASK);
+ sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
- return 0;
}
asmlinkage void
@@ -298,6 +323,9 @@ struct sigact_irix5 {
int _unused0[2];
};
+#define SIG_SETMASK32 256 /* Goodie from SGI for BSD compatibility:
+ set only the low 32 bit of the sigset. */
+
#ifdef DEBUG_SIG
static inline void dump_sigact_irix5(struct sigact_irix5 *p)
{
@@ -413,7 +441,7 @@ asmlinkage int irix_sigprocmask(int how, irix_sigset_t __user *new,
asmlinkage int irix_sigsuspend(struct pt_regs *regs)
{
- sigset_t saveset, newset;
+ sigset_t newset;
sigset_t __user *uset;
uset = (sigset_t __user *) regs->regs[4];
@@ -422,18 +450,15 @@ asmlinkage int irix_sigsuspend(struct pt_regs *regs)
sigdelsetmask(&newset, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
- saveset = current->blocked;
+ current->saved_sigmask = current->blocked;
current->blocked = newset;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
- regs->regs[2] = -EINTR;
- while (1) {
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- if (do_irix_signal(&saveset, regs))
- return -EINTR;
- }
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ set_thread_flag(TIF_RESTORE_SIGMASK);
+ return -ERESTARTNOHAND;
}
/* hate hate hate... */
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 450ac592da57..53f4171fc188 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -77,6 +77,8 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
memset(&tmp, 0, sizeof(tmp));
tmp.st_dev = new_encode_dev(stat->dev);
tmp.st_ino = stat->ino;
+ if (sizeof(tmp.st_ino) < sizeof(stat->ino) && tmp.st_ino != stat->ino)
+ return -EOVERFLOW;
tmp.st_mode = stat->mode;
tmp.st_nlink = stat->nlink;
SET_UID(tmp.st_uid, stat->uid);
@@ -991,7 +993,7 @@ struct sysctl_args32
unsigned int __unused[4];
};
-#ifdef CONFIG_SYSCTL
+#ifdef CONFIG_SYSCTL_SYSCALL
asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
{
@@ -1032,14 +1034,14 @@ asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
return error;
}
-#endif /* CONFIG_SYSCTL */
+#endif /* CONFIG_SYSCTL_SYSCALL */
asmlinkage long sys32_newuname(struct new_utsname __user * name)
{
int ret = 0;
down_read(&uts_sem);
- if (copy_to_user(name,&system_utsname,sizeof *name))
+ if (copy_to_user(name, utsname(), sizeof *name))
ret = -EFAULT;
up_read(&uts_sem);
@@ -1296,9 +1298,3 @@ _sys32_clone(nabi_no_regargs struct pt_regs regs)
return do_fork(clone_flags, newsp, &regs, 0,
parent_tidptr, child_tidptr);
}
-
-extern asmlinkage void sys_set_thread_area(u32 addr);
-asmlinkage void sys32_set_thread_area(u32 addr)
-{
- sys_set_thread_area(AA(addr));
-}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 7ab67f786bfe..045d987bc683 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -40,6 +40,7 @@
#include <asm/elf.h>
#include <asm/isadep.h>
#include <asm/inst.h>
+#include <asm/stacktrace.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
extern void smtc_idle_loop_hook(void);
@@ -273,104 +274,107 @@ long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
}
-static struct mips_frame_info {
- void *func;
- unsigned long func_size;
- int frame_size;
- int pc_offset;
-} *schedule_frame, mfinfo[64];
-static int mfinfo_num;
+/*
+ *
+ */
+struct mips_frame_info {
+ void *func;
+ unsigned long func_size;
+ int frame_size;
+ int pc_offset;
+};
-static int __init get_frame_info(struct mips_frame_info *info)
+static inline int is_ra_save_ins(union mips_instruction *ip)
{
- int i;
- void *func = info->func;
- union mips_instruction *ip = (union mips_instruction *)func;
+ /* sw / sd $ra, offset($sp) */
+ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
+ ip->i_format.rs == 29 &&
+ ip->i_format.rt == 31;
+}
+
+static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
+{
+ if (ip->j_format.opcode == jal_op)
+ return 1;
+ if (ip->r_format.opcode != spec_op)
+ return 0;
+ return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+}
+
+static inline int is_sp_move_ins(union mips_instruction *ip)
+{
+ /* addiu/daddiu sp,sp,-imm */
+ if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
+ return 0;
+ if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
+ return 1;
+ return 0;
+}
+
+static int get_frame_info(struct mips_frame_info *info)
+{
+ union mips_instruction *ip = info->func;
+ unsigned max_insns = info->func_size / sizeof(union mips_instruction);
+ unsigned i;
+
info->pc_offset = -1;
info->frame_size = 0;
- for (i = 0; i < 128; i++, ip++) {
- /* if jal, jalr, jr, stop. */
- if (ip->j_format.opcode == jal_op ||
- (ip->r_format.opcode == spec_op &&
- (ip->r_format.func == jalr_op ||
- ip->r_format.func == jr_op)))
- break;
- if (info->func_size && i >= info->func_size / 4)
+ if (!ip)
+ goto err;
+
+ if (max_insns == 0)
+ max_insns = 128U; /* unknown function size */
+ max_insns = min(128U, max_insns);
+
+ for (i = 0; i < max_insns; i++, ip++) {
+
+ if (is_jal_jalr_jr_ins(ip))
break;
- if (
-#ifdef CONFIG_32BIT
- ip->i_format.opcode == addiu_op &&
-#endif
-#ifdef CONFIG_64BIT
- ip->i_format.opcode == daddiu_op &&
-#endif
- ip->i_format.rs == 29 &&
- ip->i_format.rt == 29) {
- /* addiu/daddiu sp,sp,-imm */
- if (info->frame_size)
- continue;
- info->frame_size = - ip->i_format.simmediate;
+ if (!info->frame_size) {
+ if (is_sp_move_ins(ip))
+ info->frame_size = - ip->i_format.simmediate;
+ continue;
}
-
- if (
-#ifdef CONFIG_32BIT
- ip->i_format.opcode == sw_op &&
-#endif
-#ifdef CONFIG_64BIT
- ip->i_format.opcode == sd_op &&
-#endif
- ip->i_format.rs == 29 &&
- ip->i_format.rt == 31) {
- /* sw / sd $ra, offset($sp) */
- if (info->pc_offset != -1)
- continue;
+ if (info->pc_offset == -1 && is_ra_save_ins(ip)) {
info->pc_offset =
ip->i_format.simmediate / sizeof(long);
+ break;
}
}
- if (info->pc_offset == -1 || info->frame_size == 0) {
- if (func == schedule)
- printk("Can't analyze prologue code at %p\n", func);
- info->pc_offset = -1;
- info->frame_size = 0;
- }
-
- return 0;
+ if (info->frame_size && info->pc_offset >= 0) /* nested */
+ return 0;
+ if (info->pc_offset < 0) /* leaf */
+ return 1;
+ /* prologue seems boggus... */
+err:
+ return -1;
}
+static struct mips_frame_info schedule_mfi __read_mostly;
+
static int __init frame_info_init(void)
{
- int i;
+ unsigned long size = 0;
#ifdef CONFIG_KALLSYMS
+ unsigned long ofs;
char *modname;
char namebuf[KSYM_NAME_LEN + 1];
- unsigned long start, size, ofs;
- extern char __sched_text_start[], __sched_text_end[];
- extern char __lock_text_start[], __lock_text_end[];
-
- start = (unsigned long)__sched_text_start;
- for (i = 0; i < ARRAY_SIZE(mfinfo); i++) {
- if (start == (unsigned long)schedule)
- schedule_frame = &mfinfo[i];
- if (!kallsyms_lookup(start, &size, &ofs, &modname, namebuf))
- break;
- mfinfo[i].func = (void *)(start + ofs);
- mfinfo[i].func_size = size;
- start += size - ofs;
- if (start >= (unsigned long)__lock_text_end)
- break;
- if (start == (unsigned long)__sched_text_end)
- start = (unsigned long)__lock_text_start;
- }
-#else
- mfinfo[0].func = schedule;
- schedule_frame = &mfinfo[0];
+
+ kallsyms_lookup((unsigned long)schedule, &size, &ofs, &modname, namebuf);
#endif
- for (i = 0; i < ARRAY_SIZE(mfinfo) && mfinfo[i].func; i++)
- get_frame_info(&mfinfo[i]);
+ schedule_mfi.func = schedule;
+ schedule_mfi.func_size = size;
+
+ get_frame_info(&schedule_mfi);
+
+ /*
+ * Without schedule() frame info, result given by
+ * thread_saved_pc() and get_wchan() are not reliable.
+ */
+ if (schedule_mfi.pc_offset < 0)
+ printk("Can't analyze schedule() prologue at %p\n", schedule);
- mfinfo_num = i;
return 0;
}
@@ -386,54 +390,112 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
/* New born processes are a special case */
if (t->reg31 == (unsigned long) ret_from_fork)
return t->reg31;
-
- if (!schedule_frame || schedule_frame->pc_offset < 0)
+ if (schedule_mfi.pc_offset < 0)
return 0;
- return ((unsigned long *)t->reg29)[schedule_frame->pc_offset];
+ return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
}
-/* get_wchan - a maintenance nightmare^W^Wpain in the ass ... */
-unsigned long get_wchan(struct task_struct *p)
+
+#ifdef CONFIG_KALLSYMS
+/* used by show_backtrace() */
+unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long *ra)
{
unsigned long stack_page;
- unsigned long pc;
-#ifdef CONFIG_KALLSYMS
- unsigned long frame;
-#endif
+ struct mips_frame_info info;
+ char *modname;
+ char namebuf[KSYM_NAME_LEN + 1];
+ unsigned long size, ofs;
+ int leaf;
+ extern void ret_from_irq(void);
+ extern void ret_from_exception(void);
- if (!p || p == current || p->state == TASK_RUNNING)
+ stack_page = (unsigned long)task_stack_page(task);
+ if (!stack_page)
return 0;
- stack_page = (unsigned long)task_stack_page(p);
- if (!stack_page || !mfinfo_num)
+ /*
+ * If we reached the bottom of interrupt context,
+ * return saved pc in pt_regs.
+ */
+ if (pc == (unsigned long)ret_from_irq ||
+ pc == (unsigned long)ret_from_exception) {
+ struct pt_regs *regs;
+ if (*sp >= stack_page &&
+ *sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
+ regs = (struct pt_regs *)*sp;
+ pc = regs->cp0_epc;
+ if (__kernel_text_address(pc)) {
+ *sp = regs->regs[29];
+ *ra = regs->regs[31];
+ return pc;
+ }
+ }
+ return 0;
+ }
+ if (!kallsyms_lookup(pc, &size, &ofs, &modname, namebuf))
+ return 0;
+ /*
+ * Return ra if an exception occured at the first instruction
+ */
+ if (unlikely(ofs == 0)) {
+ pc = *ra;
+ *ra = 0;
+ return pc;
+ }
+
+ info.func = (void *)(pc - ofs);
+ info.func_size = ofs; /* analyze from start to ofs */
+ leaf = get_frame_info(&info);
+ if (leaf < 0)
+ return 0;
+
+ if (*sp < stack_page ||
+ *sp + info.frame_size > stack_page + THREAD_SIZE - 32)
return 0;
- pc = thread_saved_pc(p);
+ if (leaf)
+ /*
+ * For some extreme cases, get_frame_info() can
+ * consider wrongly a nested function as a leaf
+ * one. In that cases avoid to return always the
+ * same value.
+ */
+ pc = pc != *ra ? *ra : 0;
+ else
+ pc = ((unsigned long *)(*sp))[info.pc_offset];
+
+ *sp += info.frame_size;
+ *ra = 0;
+ return __kernel_text_address(pc) ? pc : 0;
+}
+#endif
+
+/*
+ * get_wchan - a maintenance nightmare^W^Wpain in the ass ...
+ */
+unsigned long get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
#ifdef CONFIG_KALLSYMS
- if (!in_sched_functions(pc))
- return pc;
+ unsigned long sp;
+ unsigned long ra = 0;
+#endif
- frame = p->thread.reg29 + schedule_frame->frame_size;
- do {
- int i;
+ if (!task || task == current || task->state == TASK_RUNNING)
+ goto out;
+ if (!task_stack_page(task))
+ goto out;
- if (frame < stack_page || frame > stack_page + THREAD_SIZE - 32)
- return 0;
+ pc = thread_saved_pc(task);
- for (i = mfinfo_num - 1; i >= 0; i--) {
- if (pc >= (unsigned long) mfinfo[i].func)
- break;
- }
- if (i < 0)
- break;
+#ifdef CONFIG_KALLSYMS
+ sp = task->thread.reg29 + schedule_mfi.frame_size;
- pc = ((unsigned long *)frame)[mfinfo[i].pc_offset];
- if (!mfinfo[i].frame_size)
- break;
- frame += mfinfo[i].frame_size;
- } while (in_sched_functions(pc));
+ while (in_sched_functions(pc))
+ pc = unwind_stack(task, &sp, pc, &ra);
#endif
+out:
return pc;
}
-
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index ba1bcd83c7d3..61362e6fa9ec 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -28,18 +28,7 @@
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
-#ifdef CONFIG_TRACE_IRQFLAGS
- TRACE_IRQS_ON
-#ifdef CONFIG_64BIT
- LONG_L $8, PT_R8(sp)
- LONG_L $9, PT_R9(sp)
-#endif
- LONG_L $7, PT_R7(sp)
- LONG_L $6, PT_R6(sp)
- LONG_L $5, PT_R5(sp)
- LONG_L $4, PT_R4(sp)
- LONG_L $2, PT_R2(sp)
-#endif
+ TRACE_IRQS_ON_RELOAD
STI
.set at
@@ -662,6 +651,8 @@ einval: li v0, -EINVAL
sys sys_tee 4
sys sys_vmsplice 4
sys sys_move_pages 6
+ sys sys_set_robust_list 2
+ sys sys_get_robust_list 3
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 939e172db953..6c7b5ed0ea6e 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -34,7 +34,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
*/
.set noat
SAVE_SOME
- TRACE_IRQS_ON
+ TRACE_IRQS_ON_RELOAD
STI
.set at
#endif
@@ -466,3 +466,5 @@ sys_call_table:
PTR sys_tee /* 5265 */
PTR sys_vmsplice
PTR sys_move_pages
+ PTR sys_set_robust_list
+ PTR sys_get_robust_list
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 98abbc5a9f13..6d9f18727ac5 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -33,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
#ifndef CONFIG_MIPS32_O32
.set noat
SAVE_SOME
- TRACE_IRQS_ON
+ TRACE_IRQS_ON_RELOAD
STI
.set at
#endif
@@ -247,7 +247,7 @@ EXPORT(sysn32_call_table)
PTR sys_capset
PTR sys32_rt_sigpending /* 6125 */
PTR compat_sys_rt_sigtimedwait
- PTR sys_rt_sigqueueinfo
+ PTR sys32_rt_sigqueueinfo
PTR sysn32_rt_sigsuspend
PTR sys32_sigaltstack
PTR compat_sys_utime /* 6130 */
@@ -390,5 +390,7 @@ EXPORT(sysn32_call_table)
PTR sys_splice
PTR sys_sync_file_range
PTR sys_tee
- PTR sys_vmsplice /* 6271 */
+ PTR sys_vmsplice /* 6270 */
PTR sys_move_pages
+ PTR compat_sys_set_robust_list
+ PTR compat_sys_get_robust_list
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 505c9ee54009..2e6d0673163e 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -28,7 +28,7 @@
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
- TRACE_IRQS_ON
+ TRACE_IRQS_ON_RELOAD
STI
.set at
ld t1, PT_EPC(sp) # skip syscall on return
@@ -498,7 +498,7 @@ sys_call_table:
PTR sys_mknodat /* 4290 */
PTR sys_fchownat
PTR compat_sys_futimesat
- PTR compat_sys_newfstatat
+ PTR sys_newfstatat
PTR sys_unlinkat
PTR sys_renameat /* 4295 */
PTR sys_linkat
@@ -514,4 +514,6 @@ sys_call_table:
PTR sys_tee
PTR sys_vmsplice
PTR compat_sys_move_pages
+ PTR compat_sys_set_robust_list
+ PTR compat_sys_get_robust_list /* 4310 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8c2b596a136f..fdbb508661c5 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -10,29 +10,15 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2000 2001, 2002 Maciej W. Rozycki
*/
-#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/stddef.h>
-#include <linux/string.h>
-#include <linux/unistd.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/utsname.h>
-#include <linux/a.out.h>
#include <linux/screen_info.h>
#include <linux/bootmem.h>
#include <linux/initrd.h>
-#include <linux/major.h>
-#include <linux/kdev_t.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/console.h>
-#include <linux/mmzone.h>
#include <linux/pfn.h>
#include <asm/addrspace.h>
@@ -96,6 +82,12 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
int x = boot_mem_map.nr_map;
struct boot_mem_map_entry *prev = boot_mem_map.map + x - 1;
+ /* Sanity check */
+ if (start + size < start) {
+ printk("Trying to add an invalid memory region, skipped\n");
+ return;
+ }
+
/*
* Try to merge with previous entry if any. This is far less than
* perfect but is sufficient for most real world cases.
@@ -143,167 +135,132 @@ static void __init print_memory_map(void)
}
}
-static inline void parse_cmdline_early(void)
+/*
+ * Manage initrd
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init rd_start_early(char *p)
{
- char c = ' ', *to = command_line, *from = saved_command_line;
- unsigned long start_at, mem_size;
- int len = 0;
- int usermem = 0;
+ unsigned long start = memparse(p, &p);
- printk("Determined physical RAM map:\n");
- print_memory_map();
+#ifdef CONFIG_64BIT
+ /* HACK: Guess if the sign extension was forgotten */
+ if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
+ start |= 0xffffffff00000000UL;
+#endif
+ initrd_start = start;
+ initrd_end += start;
- for (;;) {
- /*
- * "mem=XXX[kKmM]" defines a memory region from
- * 0 to <XXX>, overriding the determined size.
- * "mem=XXX[KkmM]@YYY[KkmM]" defines a memory region from
- * <YYY> to <YYY>+<XXX>, overriding the determined size.
- */
- if (c == ' ' && !memcmp(from, "mem=", 4)) {
- if (to != command_line)
- to--;
- /*
- * If a user specifies memory size, we
- * blow away any automatically generated
- * size.
- */
- if (usermem == 0) {
- boot_mem_map.nr_map = 0;
- usermem = 1;
- }
- mem_size = memparse(from + 4, &from);
- if (*from == '@')
- start_at = memparse(from + 1, &from);
- else
- start_at = 0;
- add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
- }
- c = *(from++);
- if (!c)
- break;
- if (CL_SIZE <= ++len)
- break;
- *(to++) = c;
- }
- *to = '\0';
+ return 0;
+}
+early_param("rd_start", rd_start_early);
- if (usermem) {
- printk("User-defined physical RAM map:\n");
- print_memory_map();
- }
+static int __init rd_size_early(char *p)
+{
+ initrd_end += memparse(p, &p);
+
+ return 0;
}
+early_param("rd_size", rd_size_early);
-static inline int parse_rd_cmdline(unsigned long* rd_start, unsigned long* rd_end)
+static unsigned long __init init_initrd(void)
{
+ unsigned long tmp, end, size;
+ u32 *initrd_header;
+
+ ROOT_DEV = Root_RAM0;
+
/*
- * "rd_start=0xNNNNNNNN" defines the memory address of an initrd
- * "rd_size=0xNN" it's size
+ * Board specific code or command line parser should have
+ * already set up initrd_start and initrd_end. In these cases
+ * perfom sanity checks and use them if all looks good.
*/
- unsigned long start = 0;
- unsigned long size = 0;
- unsigned long end;
- char cmd_line[CL_SIZE];
- char *start_str;
- char *size_str;
- char *tmp;
-
- strcpy(cmd_line, command_line);
- *command_line = 0;
- tmp = cmd_line;
- /* Ignore "rd_start=" strings in other parameters. */
- start_str = strstr(cmd_line, "rd_start=");
- if (start_str && start_str != cmd_line && *(start_str - 1) != ' ')
- start_str = strstr(start_str, " rd_start=");
- while (start_str) {
- if (start_str != cmd_line)
- strncat(command_line, tmp, start_str - tmp);
- start = memparse(start_str + 9, &start_str);
- tmp = start_str + 1;
- start_str = strstr(start_str, " rd_start=");
+ size = initrd_end - initrd_start;
+ if (initrd_end == 0 || size == 0) {
+ initrd_start = 0;
+ initrd_end = 0;
+ } else
+ return initrd_end;
+
+ end = (unsigned long)&_end;
+ tmp = PAGE_ALIGN(end) - sizeof(u32) * 2;
+ if (tmp < end)
+ tmp += PAGE_SIZE;
+
+ initrd_header = (u32 *)tmp;
+ if (initrd_header[0] == 0x494E5244) {
+ initrd_start = (unsigned long)&initrd_header[2];
+ initrd_end = initrd_start + initrd_header[1];
}
- if (*tmp)
- strcat(command_line, tmp);
-
- strcpy(cmd_line, command_line);
- *command_line = 0;
- tmp = cmd_line;
- /* Ignore "rd_size" strings in other parameters. */
- size_str = strstr(cmd_line, "rd_size=");
- if (size_str && size_str != cmd_line && *(size_str - 1) != ' ')
- size_str = strstr(size_str, " rd_size=");
- while (size_str) {
- if (size_str != cmd_line)
- strncat(command_line, tmp, size_str - tmp);
- size = memparse(size_str + 8, &size_str);
- tmp = size_str + 1;
- size_str = strstr(size_str, " rd_size=");
+ return initrd_end;
+}
+
+static void __init finalize_initrd(void)
+{
+ unsigned long size = initrd_end - initrd_start;
+
+ if (size == 0) {
+ printk(KERN_INFO "Initrd not found or empty");
+ goto disable;
+ }
+ if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ printk("Initrd extends beyond end of memory");
+ goto disable;
}
- if (*tmp)
- strcat(command_line, tmp);
-#ifdef CONFIG_64BIT
- /* HACK: Guess if the sign extension was forgotten */
- if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
- start |= 0xffffffff00000000UL;
+ reserve_bootmem(CPHYSADDR(initrd_start), size);
+ initrd_below_start_ok = 1;
+
+ printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
+ initrd_start, size);
+ return;
+disable:
+ printk(" - disabling initrd\n");
+ initrd_start = 0;
+ initrd_end = 0;
+}
+
+#else /* !CONFIG_BLK_DEV_INITRD */
+
+#define init_initrd() 0
+#define finalize_initrd() do {} while (0)
+
#endif
- end = start + size;
- if (start && end) {
- *rd_start = start;
- *rd_end = end;
- return 1;
- }
- return 0;
+/*
+ * Initialize the bootmem allocator. It also setup initrd related data
+ * if needed.
+ */
+#ifdef CONFIG_SGI_IP27
+
+static void __init bootmem_init(void)
+{
+ init_initrd();
+ finalize_initrd();
}
-#define MAXMEM HIGHMEM_START
-#define MAXMEM_PFN PFN_DOWN(MAXMEM)
+#else /* !CONFIG_SGI_IP27 */
-static inline void bootmem_init(void)
+static void __init bootmem_init(void)
{
- unsigned long start_pfn;
- unsigned long reserved_end = (unsigned long)&_end;
-#ifndef CONFIG_SGI_IP27
- unsigned long first_usable_pfn;
+ unsigned long reserved_end;
+ unsigned long highest = 0;
+ unsigned long mapstart = -1UL;
unsigned long bootmap_size;
int i;
-#endif
-#ifdef CONFIG_BLK_DEV_INITRD
- int initrd_reserve_bootmem = 0;
-
- /* Board specific code should have set up initrd_start and initrd_end */
- ROOT_DEV = Root_RAM0;
- if (parse_rd_cmdline(&initrd_start, &initrd_end)) {
- reserved_end = max(reserved_end, initrd_end);
- initrd_reserve_bootmem = 1;
- } else {
- unsigned long tmp;
- u32 *initrd_header;
-
- tmp = ((reserved_end + PAGE_SIZE-1) & PAGE_MASK) - sizeof(u32) * 2;
- if (tmp < reserved_end)
- tmp += PAGE_SIZE;
- initrd_header = (u32 *)tmp;
- if (initrd_header[0] == 0x494E5244) {
- initrd_start = (unsigned long)&initrd_header[2];
- initrd_end = initrd_start + initrd_header[1];
- reserved_end = max(reserved_end, initrd_end);
- initrd_reserve_bootmem = 1;
- }
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
/*
- * Partially used pages are not usable - thus
- * we are rounding upwards.
+ * Init any data related to initrd. It's a nop if INITRD is
+ * not selected. Once that done we can determine the low bound
+ * of usable memory.
*/
- start_pfn = PFN_UP(CPHYSADDR(reserved_end));
+ reserved_end = init_initrd();
+ reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end)));
-#ifndef CONFIG_SGI_IP27
- /* Find the highest page frame number we have available. */
- max_pfn = 0;
- first_usable_pfn = -1UL;
+ /*
+ * Find the highest page frame number we have available.
+ */
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -312,56 +269,38 @@ static inline void bootmem_init(void)
start = PFN_UP(boot_mem_map.map[i].addr);
end = PFN_DOWN(boot_mem_map.map[i].addr
- + boot_mem_map.map[i].size);
+ + boot_mem_map.map[i].size);
- if (start >= end)
+ if (end > highest)
+ highest = end;
+ if (end <= reserved_end)
continue;
- if (end > max_pfn)
- max_pfn = end;
- if (start < first_usable_pfn) {
- if (start > start_pfn) {
- first_usable_pfn = start;
- } else if (end > start_pfn) {
- first_usable_pfn = start_pfn;
- }
- }
+ if (start >= mapstart)
+ continue;
+ mapstart = max(reserved_end, start);
}
/*
* Determine low and high memory ranges
*/
- max_low_pfn = max_pfn;
- if (max_low_pfn > MAXMEM_PFN) {
- max_low_pfn = MAXMEM_PFN;
-#ifndef CONFIG_HIGHMEM
- /* Maximum memory usable is what is directly addressable */
- printk(KERN_WARNING "Warning only %ldMB will be used.\n",
- MAXMEM >> 20);
- printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
+ if (highest > PFN_DOWN(HIGHMEM_START)) {
+#ifdef CONFIG_HIGHMEM
+ highstart_pfn = PFN_DOWN(HIGHMEM_START);
+ highend_pfn = highest;
#endif
+ highest = PFN_DOWN(HIGHMEM_START);
}
-#ifdef CONFIG_HIGHMEM
/*
- * Crude, we really should make a better attempt at detecting
- * highstart_pfn
+ * Initialize the boot-time allocator with low memory only.
*/
- highstart_pfn = highend_pfn = max_pfn;
- if (max_pfn > MAXMEM_PFN) {
- highstart_pfn = MAXMEM_PFN;
- printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
- (highend_pfn - highstart_pfn) >> (20 - PAGE_SHIFT));
- }
-#endif
-
- /* Initialize the boot-time allocator with low memory only. */
- bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
+ bootmap_size = init_bootmem(mapstart, highest);
/*
* Register fully available low RAM pages with the bootmem allocator.
*/
for (i = 0; i < boot_mem_map.nr_map; i++) {
- unsigned long curr_pfn, last_pfn, size;
+ unsigned long start, end, size;
/*
* Reserve usable memory.
@@ -369,85 +308,50 @@ static inline void bootmem_init(void)
if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
continue;
- /*
- * We are rounding up the start address of usable memory:
- */
- curr_pfn = PFN_UP(boot_mem_map.map[i].addr);
- if (curr_pfn >= max_low_pfn)
- continue;
- if (curr_pfn < start_pfn)
- curr_pfn = start_pfn;
-
- /*
- * ... and at the end of the usable range downwards:
- */
- last_pfn = PFN_DOWN(boot_mem_map.map[i].addr
+ start = PFN_UP(boot_mem_map.map[i].addr);
+ end = PFN_DOWN(boot_mem_map.map[i].addr
+ boot_mem_map.map[i].size);
-
- if (last_pfn > max_low_pfn)
- last_pfn = max_low_pfn;
-
/*
- * Only register lowmem part of lowmem segment with bootmem.
+ * We are rounding up the start address of usable memory
+ * and at the end of the usable range downwards.
*/
- size = last_pfn - curr_pfn;
- if (curr_pfn > PFN_DOWN(HIGHMEM_START))
- continue;
- if (curr_pfn + size - 1 > PFN_DOWN(HIGHMEM_START))
- size = PFN_DOWN(HIGHMEM_START) - curr_pfn;
- if (!size)
+ if (start >= max_low_pfn)
continue;
+ if (start < reserved_end)
+ start = reserved_end;
+ if (end > max_low_pfn)
+ end = max_low_pfn;
/*
- * ... finally, did all the rounding and playing
- * around just make the area go away?
+ * ... finally, is the area going away?
*/
- if (last_pfn <= curr_pfn)
+ if (end <= start)
continue;
+ size = end - start;
/* Register lowmem ranges */
- free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
- memory_present(0, curr_pfn, curr_pfn + size - 1);
+ free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
+ memory_present(0, start, end);
}
- /* Reserve the bootmap memory. */
- reserve_bootmem(PFN_PHYS(first_usable_pfn), bootmap_size);
-#endif /* CONFIG_SGI_IP27 */
-
-#ifdef CONFIG_BLK_DEV_INITRD
- initrd_below_start_ok = 1;
- if (initrd_start) {
- unsigned long initrd_size = ((unsigned char *)initrd_end) -
- ((unsigned char *)initrd_start);
- const int width = sizeof(long) * 2;
-
- printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
- (void *)initrd_start, initrd_size);
-
- if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
- printk("initrd extends beyond end of memory "
- "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
- width,
- (unsigned long long) CPHYSADDR(initrd_end),
- width,
- (unsigned long long) PFN_PHYS(max_low_pfn));
- initrd_start = initrd_end = 0;
- initrd_reserve_bootmem = 0;
- }
+ /*
+ * Reserve the bootmap memory.
+ */
+ reserve_bootmem(PFN_PHYS(mapstart), bootmap_size);
- if (initrd_reserve_bootmem)
- reserve_bootmem(CPHYSADDR(initrd_start), initrd_size);
- }
-#endif /* CONFIG_BLK_DEV_INITRD */
+ /*
+ * Reserve initrd memory if needed.
+ */
+ finalize_initrd();
}
+#endif /* CONFIG_SGI_IP27 */
+
/*
* arch_mem_init - initialize memory managment subsystem
*
* o plat_mem_setup() detects the memory configuration and will record detected
* memory areas using add_memory_region.
- * o parse_cmdline_early() parses the command line for mem= options which,
- * iff detected, will override the results of the automatic detection.
*
* At this stage the memory configuration of the system is known to the
* kernel but generic memory managment system is still entirely uninitialized.
@@ -465,25 +369,59 @@ static inline void bootmem_init(void)
* initialization hook for anything else was introduced.
*/
-extern void plat_mem_setup(void);
+static int usermem __initdata = 0;
+
+static int __init early_parse_mem(char *p)
+{
+ unsigned long start, size;
+
+ /*
+ * If a user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ boot_mem_map.nr_map = 0;
+ usermem = 1;
+ }
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+
+ add_memory_region(start, size, BOOT_MEM_RAM);
+ return 0;
+}
+early_param("mem", early_parse_mem);
static void __init arch_mem_init(char **cmdline_p)
{
+ extern void plat_mem_setup(void);
+
/* call board setup routine */
plat_mem_setup();
+ printk("Determined physical RAM map:\n");
+ print_memory_map();
+
strlcpy(command_line, arcs_cmdline, sizeof(command_line));
strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
- parse_cmdline_early();
+ parse_early_param();
+
+ if (usermem) {
+ printk("User-defined physical RAM map:\n");
+ print_memory_map();
+ }
+
bootmem_init();
sparse_init();
paging_init();
}
-static inline void resource_init(void)
+static void __init resource_init(void)
{
int i;
@@ -504,10 +442,10 @@ static inline void resource_init(void)
start = boot_mem_map.map[i].addr;
end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
- if (start >= MAXMEM)
+ if (start >= HIGHMEM_START)
continue;
- if (end >= MAXMEM)
- end = MAXMEM - 1;
+ if (end >= HIGHMEM_START)
+ end = HIGHMEM_START - 1;
res = alloc_bootmem(sizeof(struct resource));
switch (boot_mem_map.map[i].type) {
@@ -536,9 +474,6 @@ static inline void resource_init(void)
}
}
-#undef MAXMEM
-#undef MAXMEM_PFN
-
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 6b4d9be31615..b9d358e05214 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -424,15 +424,11 @@ void do_signal(struct pt_regs *regs)
if (!user_mode(regs))
return;
- if (try_to_freeze())
- goto no_signal;
-
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
-
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
@@ -446,9 +442,10 @@ void do_signal(struct pt_regs *regs)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+
+ return;
}
-no_signal:
/*
* Who's code doesn't conform to the restartable syscall convention
* dies here!!! The li instruction, a single machine instruction,
@@ -466,6 +463,7 @@ no_signal:
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index f32a22997c3d..c86a5ddff050 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -815,9 +815,6 @@ void do_signal32(struct pt_regs *regs)
if (!user_mode(regs))
return;
- if (try_to_freeze())
- goto no_signal;
-
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
@@ -836,9 +833,10 @@ void do_signal32(struct pt_regs *regs)
if (test_thread_flag(TIF_RESTORE_SIGMASK))
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
+
+ return;
}
-no_signal:
/*
* Who's code doesn't conform to the restartable syscall convention
* dies here!!! The li instruction, a single machine instruction,
@@ -856,6 +854,7 @@ no_signal:
regs->regs[7] = regs->regs[26];
regs->cp0_epc -= 4;
}
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 93429a4d3012..766253c44f3f 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -203,7 +203,7 @@ void plat_smp_setup(void)
write_vpe_c0_config( read_c0_config());
/* make sure there are no software interrupts pending */
- write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
+ write_vpe_c0_cause(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 4cc3dea36612..76cb31d57482 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -8,7 +8,7 @@
#include <asm/regdef.h>
#include <asm/asmmacro.h>
#include <asm/stackframe.h>
-#include <asm/stackframe.h>
+#include <asm/irqflags.h>
/*
* "Software Interrupt" linkage.
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
new file mode 100644
index 000000000000..4aabe526a68e
--- /dev/null
+++ b/arch/mips/kernel/stacktrace.c
@@ -0,0 +1,85 @@
+/*
+ * arch/mips/kernel/stacktrace.c
+ *
+ * Stack trace management functions
+ *
+ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <asm/stacktrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer:
+ */
+static void save_raw_context_stack(struct stack_trace *trace,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+
+static void save_context_stack(struct stack_trace *trace,
+ struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(task);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_context_stack(trace, sp);
+ return;
+ }
+ do {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = pc;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ pc = unwind_stack(task, &sp, pc, &ra);
+ } while (pc);
+#else
+ save_raw_context_stack(sp);
+#endif
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
+{
+ struct pt_regs dummyregs;
+ struct pt_regs *regs = &dummyregs;
+
+ WARN_ON(trace->nr_entries || !trace->max_entries);
+
+ if (task && task != current) {
+ regs->regs[29] = task->thread.reg29;
+ regs->regs[31] = 0;
+ regs->cp0_epc = task->thread.reg31;
+ } else {
+ if (!task)
+ task = current;
+ prepare_frametrace(regs);
+ }
+
+ save_context_stack(trace, task, regs);
+}
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 0721314db657..26e1a7e78d13 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -231,7 +231,7 @@ out:
*/
asmlinkage int sys_uname(struct old_utsname __user * name)
{
- if (name && !copy_to_user(name, &system_utsname, sizeof (*name)))
+ if (name && !copy_to_user(name, utsname(), sizeof (*name)))
return 0;
return -EFAULT;
}
@@ -248,22 +248,27 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
if (!access_ok(VERIFY_WRITE,name,sizeof(struct oldold_utsname)))
return -EFAULT;
- error = __copy_to_user(&name->sysname,&system_utsname.sysname,__OLD_UTS_LEN);
- error -= __put_user(0,name->sysname+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->nodename,&system_utsname.nodename,__OLD_UTS_LEN);
- error -= __put_user(0,name->nodename+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->release,&system_utsname.release,__OLD_UTS_LEN);
- error -= __put_user(0,name->release+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->version,&system_utsname.version,__OLD_UTS_LEN);
- error -= __put_user(0,name->version+__OLD_UTS_LEN);
- error -= __copy_to_user(&name->machine,&system_utsname.machine,__OLD_UTS_LEN);
- error = __put_user(0,name->machine+__OLD_UTS_LEN);
+ error = __copy_to_user(&name->sysname, &utsname()->sysname,
+ __OLD_UTS_LEN);
+ error -= __put_user(0, name->sysname + __OLD_UTS_LEN);
+ error -= __copy_to_user(&name->nodename, &utsname()->nodename,
+ __OLD_UTS_LEN);
+ error -= __put_user(0, name->nodename + __OLD_UTS_LEN);
+ error -= __copy_to_user(&name->release, &utsname()->release,
+ __OLD_UTS_LEN);
+ error -= __put_user(0, name->release + __OLD_UTS_LEN);
+ error -= __copy_to_user(&name->version, &utsname()->version,
+ __OLD_UTS_LEN);
+ error -= __put_user(0, name->version + __OLD_UTS_LEN);
+ error -= __copy_to_user(&name->machine, &utsname()->machine,
+ __OLD_UTS_LEN);
+ error = __put_user(0, name->machine + __OLD_UTS_LEN);
error = error ? -EFAULT : 0;
return error;
}
-void sys_set_thread_area(unsigned long addr)
+asmlinkage int sys_set_thread_area(unsigned long addr)
{
struct thread_info *ti = task_thread_info(current);
@@ -271,6 +276,8 @@ void sys_set_thread_area(unsigned long addr)
/* If some future MIPS implementation has this register in hardware,
* we will need to update it here (and in context switches). */
+
+ return 0;
}
asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
@@ -399,3 +406,32 @@ asmlinkage void bad_stack(void)
{
do_exit(SIGSEGV);
}
+
+/*
+ * Do a system call from kernel instead of calling sys_execve so we
+ * end up with proper pt_regs.
+ */
+int kernel_execve(const char *filename, char *const argv[], char *const envp[])
+{
+ register unsigned long __a0 asm("$4") = (unsigned long) filename;
+ register unsigned long __a1 asm("$5") = (unsigned long) argv;
+ register unsigned long __a2 asm("$6") = (unsigned long) envp;
+ register unsigned long __a3 asm("$7");
+ unsigned long __v0;
+
+ __asm__ volatile (" \n"
+ " .set noreorder \n"
+ " li $2, %5 # __NR_execve \n"
+ " syscall \n"
+ " move %0, $2 \n"
+ " .set reorder \n"
+ : "=&r" (__v0), "=r" (__a3)
+ : "r" (__a0), "r" (__a1), "r" (__a2), "i" (__NR_execve)
+ : "$2", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24",
+ "memory");
+
+ if (__a3 == 0)
+ return __v0;
+
+ return -__v0;
+}
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 1137dd6ea7aa..93c74fefff76 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -884,7 +884,7 @@ asmlinkage int irix_getdomainname(char __user *name, int len)
down_read(&uts_sem);
if (len > __NEW_UTS_LEN)
len = __NEW_UTS_LEN;
- err = copy_to_user(name, system_utsname.domainname, len) ? -EFAULT : 0;
+ err = copy_to_user(name, utsname()->domainname, len) ? -EFAULT : 0;
up_read(&uts_sem);
return err;
@@ -1127,11 +1127,11 @@ struct iuname {
asmlinkage int irix_uname(struct iuname __user *buf)
{
down_read(&uts_sem);
- if (copy_from_user(system_utsname.sysname, buf->sysname, 65)
- || copy_from_user(system_utsname.nodename, buf->nodename, 65)
- || copy_from_user(system_utsname.release, buf->release, 65)
- || copy_from_user(system_utsname.version, buf->version, 65)
- || copy_from_user(system_utsname.machine, buf->machine, 65)) {
+ if (copy_from_user(utsname()->sysname, buf->sysname, 65)
+ || copy_from_user(utsname()->nodename, buf->nodename, 65)
+ || copy_from_user(utsname()->release, buf->release, 65)
+ || copy_from_user(utsname()->version, buf->version, 65)
+ || copy_from_user(utsname()->machine, buf->machine, 65)) {
return -EFAULT;
}
up_read(&uts_sem);
@@ -1739,12 +1739,13 @@ struct irix_dirent32_callback {
#define ROUND_UP32(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
static int irix_filldir32(void *__buf, const char *name,
- int namlen, loff_t offset, ino_t ino, unsigned int d_type)
+ int namlen, loff_t offset, u64 ino, unsigned int d_type)
{
struct irix_dirent32 __user *dirent;
struct irix_dirent32_callback *buf = __buf;
unsigned short reclen = ROUND_UP32(NAME_OFFSET32(dirent) + namlen + 1);
int err = 0;
+ u32 d_ino;
#ifdef DEBUG_GETDENTS
printk("\nirix_filldir32[reclen<%d>namlen<%d>count<%d>]",
@@ -1753,12 +1754,15 @@ static int irix_filldir32(void *__buf, const char *name,
buf->error = -EINVAL; /* only used if we fail.. */
if (reclen > buf->count)
return -EINVAL;
+ d_ino = ino;
+ if (sizeof(d_ino) < sizeof(ino) && d_ino != ino)
+ return -EOVERFLOW;
dirent = buf->previous;
if (dirent)
err = __put_user(offset, &dirent->d_off);
dirent = buf->current_dir;
err |= __put_user(dirent, &buf->previous);
- err |= __put_user(ino, &dirent->d_ino);
+ err |= __put_user(d_ino, &dirent->d_ino);
err |= __put_user(reclen, &dirent->d_reclen);
err |= copy_to_user((char __user *)dirent->d_name, name, namlen) ? -EFAULT : 0;
err |= __put_user(0, &dirent->d_name[namlen]);
@@ -1837,7 +1841,7 @@ struct irix_dirent64_callback {
#define ROUND_UP64(x) (((x)+sizeof(u64)-1) & ~(sizeof(u64)-1))
static int irix_filldir64(void *__buf, const char *name,
- int namlen, loff_t offset, ino_t ino, unsigned int d_type)
+ int namlen, loff_t offset, u64 ino, unsigned int d_type)
{
struct irix_dirent64 __user *dirent;
struct irix_dirent64_callback * buf = __buf;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 170cb67f4ede..845c7e55505d 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -47,8 +47,6 @@
/*
* forward reference
*/
-extern volatile unsigned long wall_jiffies;
-
DEFINE_SPINLOCK(rtc_lock);
/*
@@ -159,7 +157,6 @@ void (*mips_hpt_init)(unsigned int);
void do_gettimeofday(struct timeval *tv)
{
unsigned long seq;
- unsigned long lost;
unsigned long usec, sec;
unsigned long max_ntp_tick;
@@ -168,8 +165,6 @@ void do_gettimeofday(struct timeval *tv)
usec = do_gettimeoffset();
- lost = jiffies - wall_jiffies;
-
/*
* If time_adjust is negative then NTP is slowing the clock
* so make sure not to go into next possible interval.
@@ -178,11 +173,7 @@ void do_gettimeofday(struct timeval *tv)
if (unlikely(time_adjust < 0)) {
max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
usec = min(usec, max_ntp_tick);
-
- if (lost)
- usec += lost * max_ntp_tick;
- } else if (unlikely(lost))
- usec += lost * (USEC_PER_SEC / HZ);
+ }
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / 1000);
@@ -217,7 +208,6 @@ int do_settimeofday(struct timespec *tv)
* made, and then undo it!
*/
nsec -= do_gettimeoffset() * NSEC_PER_USEC;
- nsec -= (jiffies - wall_jiffies) * tick_nsec;
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
@@ -434,7 +424,7 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
/*
* call the generic timer interrupt handling
*/
- do_timer(regs);
+ do_timer(1);
/*
* If we have an externally synchronized Linux clock, then update
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 954a198494ef..b7292a56d4cd 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
+#include <linux/interrupt.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
@@ -40,6 +41,7 @@
#include <asm/mmu_context.h>
#include <asm/watch.h>
#include <asm/types.h>
+#include <asm/stacktrace.h>
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_tlbm(void);
@@ -72,28 +74,62 @@ void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
-/*
- * These constant is for searching for possible module text segments.
- * MODULE_RANGE is a guess of how much space is likely to be vmalloced.
- */
-#define MODULE_RANGE (8*1024*1024)
+
+static void show_raw_backtrace(unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ printk("Call Trace:");
+#ifdef CONFIG_KALLSYMS
+ printk("\n");
+#endif
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr))
+ print_ip_sym(addr);
+ }
+ printk("\n");
+}
+
+#ifdef CONFIG_KALLSYMS
+int raw_show_trace;
+static int __init set_raw_show_trace(char *str)
+{
+ raw_show_trace = 1;
+ return 1;
+}
+__setup("raw_show_trace", set_raw_show_trace);
+#endif
+
+static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ show_raw_backtrace(sp);
+ return;
+ }
+ printk("Call Trace:\n");
+ do {
+ print_ip_sym(pc);
+ pc = unwind_stack(task, &sp, pc, &ra);
+ } while (pc);
+ printk("\n");
+}
/*
* This routine abuses get_user()/put_user() to reference pointers
* with at least a bit of error checking ...
*/
-void show_stack(struct task_struct *task, unsigned long *sp)
+static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
-
- if (!sp) {
- if (task && task != current)
- sp = (unsigned long *) task->thread.reg29;
- else
- sp = (unsigned long *) &sp;
- }
+ unsigned long *sp = (unsigned long *)regs->regs[29];
printk("Stack :");
i = 0;
@@ -114,32 +150,26 @@ void show_stack(struct task_struct *task, unsigned long *sp)
i++;
}
printk("\n");
+ show_backtrace(task, regs);
}
-void show_trace(struct task_struct *task, unsigned long *stack)
+void show_stack(struct task_struct *task, unsigned long *sp)
{
- const int field = 2 * sizeof(unsigned long);
- unsigned long addr;
-
- if (!stack) {
- if (task && task != current)
- stack = (unsigned long *) task->thread.reg29;
- else
- stack = (unsigned long *) &stack;
- }
-
- printk("Call Trace:");
-#ifdef CONFIG_KALLSYMS
- printk("\n");
-#endif
- while (!kstack_end(stack)) {
- addr = *stack++;
- if (__kernel_text_address(addr)) {
- printk(" [<%0*lx>] ", field, addr);
- print_symbol("%s\n", addr);
+ struct pt_regs regs;
+ if (sp) {
+ regs.regs[29] = (unsigned long)sp;
+ regs.regs[31] = 0;
+ regs.cp0_epc = 0;
+ } else {
+ if (task && task != current) {
+ regs.regs[29] = task->thread.reg29;
+ regs.regs[31] = 0;
+ regs.cp0_epc = task->thread.reg31;
+ } else {
+ prepare_frametrace(&regs);
}
}
- printk("\n");
+ show_stacktrace(task, &regs);
}
/*
@@ -147,9 +177,10 @@ void show_trace(struct task_struct *task, unsigned long *stack)
*/
void dump_stack(void)
{
- unsigned long stack;
+ struct pt_regs regs;
- show_trace(current, &stack);
+ prepare_frametrace(&regs);
+ show_backtrace(current, &regs);
}
EXPORT_SYMBOL(dump_stack);
@@ -268,8 +299,7 @@ void show_registers(struct pt_regs *regs)
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
current->comm, current->pid, current_thread_info(), current);
- show_stack(current, (long *) regs->regs[29]);
- show_trace(current, (long *) regs->regs[29]);
+ show_stacktrace(current, regs);
show_code((unsigned int *) regs->cp0_epc);
printk("\n");
}
@@ -292,6 +322,16 @@ NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
spin_unlock_irq(&die_lock);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ ssleep(5);
+ panic("Fatal exception");
+ }
+
do_exit(SIGSEGV);
}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 9ee0ec2cd067..51ddd2166898 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -768,10 +768,16 @@ int vpe_run(struct vpe * v)
*/
write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
+
+ back_to_back_c0_hazard();
+
/* Set up the XTC bit in vpeconf0 to point at our tc */
write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
| (t->index << VPECONF0_XTC_SHIFT));
+ back_to_back_c0_hazard();
+
/* enable this VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);