aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile10
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c153
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c142
-rw-r--r--arch/mips/kernel/cevt-r4k.c290
-rw-r--r--arch/mips/kernel/cevt-sb1250.c152
-rw-r--r--arch/mips/kernel/cevt-txx9.c171
-rw-r--r--arch/mips/kernel/cpu-probe.c5
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c54
-rw-r--r--arch/mips/kernel/csrc-sb1250.c70
-rw-r--r--arch/mips/kernel/genex.S2
-rw-r--r--arch/mips/kernel/head.S20
-rw-r--r--arch/mips/kernel/i8253.c25
-rw-r--r--arch/mips/kernel/irixelf.c16
-rw-r--r--arch/mips/kernel/irixsig.c11
-rw-r--r--arch/mips/kernel/irq-rm7000.c2
-rw-r--r--arch/mips/kernel/irq-rm9000.c2
-rw-r--r--arch/mips/kernel/irq_cpu.c2
-rw-r--r--arch/mips/kernel/linux32.c2
-rw-r--r--arch/mips/kernel/module.c2
-rw-r--r--arch/mips/kernel/proc.c2
-rw-r--r--arch/mips/kernel/ptrace.c22
-rw-r--r--arch/mips/kernel/ptrace32.c4
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/smtc.c57
-rw-r--r--arch/mips/kernel/syscall.c11
-rw-r--r--arch/mips/kernel/sysirix.c4
-rw-r--r--arch/mips/kernel/time.c385
-rw-r--r--arch/mips/kernel/traps.c266
-rw-r--r--arch/mips/kernel/vmlinux.lds.S47
-rw-r--r--arch/mips/kernel/vpe.c45
30 files changed, 1373 insertions, 603 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index a2689f93c16..b551535b7e4 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -8,6 +8,14 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
time.o topology.o traps.o unaligned.o
+obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
+obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
+obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
+obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
+obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+
binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
irix5sys.o sysirix.o
@@ -71,7 +79,7 @@ obj-$(CONFIG_PCSPEAKER) += pcspeaker.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
new file mode 100644
index 00000000000..0a57f86945f
--- /dev/null
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
+#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
+#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+static void sibyte_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ cfg);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Stop the timer until we actually program a shot */
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ __raw_writeq(0, cfg);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
+ case CLOCK_EVT_MODE_RESUME:
+ ;
+ }
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void __cpuinit sb1480_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
+ struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+
+ BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
+
+ sprintf(name, "bcm1480-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of_cpu(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_mode = sibyte_set_mode;
+ clockevents_register_device(cd);
+
+ bcm1480_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu,
+ R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));
+
+ bcm1480_unmask_irq(cpu, irq);
+
+ action->handler = sibyte_counter_handler;
+ action->flags = IRQF_DISABLED | IRQF_PERCPU;
+ action->mask = cpumask_of_cpu(cpu);
+ action->name = name;
+ action->dev_id = cd;
+
+ irq_set_affinity(irq, cpumask_of_cpu(cpu));
+ setup_irq(irq, action);
+}
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
new file mode 100644
index 00000000000..c36772631fe
--- /dev/null
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -0,0 +1,142 @@
+/*
+ * GT641xx clockevent routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+
+#include <asm/gt64120.h>
+#include <asm/time.h>
+
+#include <irq.h>
+
+static DEFINE_SPINLOCK(gt641xx_timer_lock);
+static unsigned int gt641xx_base_clock;
+
+void gt641xx_set_base_clock(unsigned int clock)
+{
+ gt641xx_base_clock = clock;
+}
+
+int gt641xx_timer0_state(void)
+{
+ if (GT_READ(GT_TC0_OFS))
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+ GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK);
+
+ return 1;
+}
+
+static int gt641xx_timer0_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+
+ GT_WRITE(GT_TC0_OFS, delta);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ spin_unlock(&gt641xx_timer_lock);
+
+ return 0;
+}
+
+static void gt641xx_timer0_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+ break;
+ default:
+ break;
+ }
+
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ spin_unlock(&gt641xx_timer_lock);
+}
+
+static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device gt641xx_timer0_clockevent = {
+ .name = "gt641xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .cpumask = CPU_MASK_CPU0,
+ .irq = GT641XX_TIMER0_IRQ,
+ .set_next_event = gt641xx_timer0_set_next_event,
+ .set_mode = gt641xx_timer0_set_mode,
+ .event_handler = gt641xx_timer0_event_handler,
+};
+
+static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &gt641xx_timer0_clockevent;
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction gt641xx_timer0_irqaction = {
+ .handler = gt641xx_timer0_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "gt641xx_timer0",
+};
+
+static int __init gt641xx_timer0_clockevent_init(void)
+{
+ struct clock_event_device *cd;
+
+ if (!gt641xx_base_clock)
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+
+ cd = &gt641xx_timer0_clockevent;
+ cd->rating = 200 + gt641xx_base_clock / 10000000;
+ clockevent_set_clock(cd, gt641xx_base_clock);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ clockevents_register_device(&gt641xx_timer0_clockevent);
+
+ return setup_irq(GT641XX_TIMER0_IRQ, &gt641xx_timer0_irqaction);
+}
+arch_initcall(gt641xx_timer0_clockevent_init);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
new file mode 100644
index 00000000000..bab935a3d74
--- /dev/null
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -0,0 +1,290 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+
+#include <asm/smtc_ipi.h>
+#include <asm/time.h>
+
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ {
+ unsigned long flags, vpflags;
+ local_irq_save(flags);
+ vpflags = dvpe();
+#endif
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
+#ifdef CONFIG_MIPS_MT_SMTC
+ evpe(vpflags);
+ local_irq_restore(flags);
+ }
+#endif
+ return res;
+}
+
+static void mips_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ /* Nothing to do ... */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+static int cp0_timer_irq_installed;
+
+/*
+ * Timer ack for an R4k-compatible timer of a known frequency.
+ */
+static void c0_timer_ack(void)
+{
+ write_c0_compare(read_c0_compare());
+}
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
+static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ goto out;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & (1 << 30))) {
+ c0_timer_ack();
+#ifdef CONFIG_MIPS_MT_SMTC
+ if (cpu_data[cpu].vpe_id)
+ goto out;
+ cpu = 0;
+#endif
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static struct irqaction c0_compare_irqaction = {
+ .handler = c0_compare_interrupt,
+#ifdef CONFIG_MIPS_MT_SMTC
+ .flags = IRQF_DISABLED,
+#else
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+#endif
+ .name = "timer",
+};
+
+#ifdef CONFIG_MIPS_MT_SMTC
+DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
+
+static void smtc_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+}
+
+static void mips_broadcast(cpumask_t mask)
+{
+ unsigned int cpu;
+
+ for_each_cpu_mask(cpu, mask)
+ smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+}
+
+static void setup_smtc_dummy_clockevent_device(void)
+{
+ //uint64_t mips_freq = mips_hpt_^frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
+
+ cd->name = "SMTC";
+ cd->features = CLOCK_EVT_FEAT_DUMMY;
+
+ /* Calculate the min / max delta */
+ cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 0; //32;
+ cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
+
+ cd->rating = 200;
+ cd->irq = 17; //-1;
+// if (cpu)
+// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
+// else
+ cd->cpumask = cpumask_of_cpu(cpu);
+
+ cd->set_mode = smtc_set_mode;
+
+ cd->broadcast = mips_broadcast;
+
+ clockevents_register_device(cd);
+}
+#endif
+
+static void mips_event_handler(struct clock_event_device *dev)
+{
+}
+
+/*
+ * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
+ */
+static int c0_compare_int_pending(void)
+{
+ return (read_c0_cause() >> cp0_compare_irq) & 0x100;
+}
+
+static int c0_compare_int_usable(void)
+{
+ unsigned int delta;
+ unsigned int cnt;
+
+ /*
+ * IP7 already pending? Try to clear it by acking the timer.
+ */
+ if (c0_compare_int_pending()) {
+ write_c0_compare(read_c0_count());
+ irq_disable_hazard();
+ if (c0_compare_int_pending())
+ return 0;
+ }
+
+ for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ irq_disable_hazard();
+ if ((int)(read_c0_count() - cnt) < 0)
+ break;
+ /* increase delta if the timer was already expired */
+ }
+
+ while ((int)(read_c0_count() - cnt) <= 0)
+ ; /* Wait for expiry */
+
+ if (!c0_compare_int_pending())
+ return 0;
+
+ write_c0_compare(read_c0_count());
+ irq_disable_hazard();
+ if (c0_compare_int_pending())
+ return 0;
+
+ /*
+ * Feels like a real count / compare timer.
+ */
+ return 1;
+}
+
+void __cpuinit mips_clockevent_init(void)
+{
+ uint64_t mips_freq = mips_hpt_frequency;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq;
+
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+ setup_smtc_dummy_clockevent_device();
+
+ /*
+ * On SMTC we only register VPE0's compare interrupt as clockevent
+ * device.
+ */
+ if (cpu)
+ return;
+#endif
+
+ if (!c0_compare_int_usable())
+ return;
+
+ /*
+ * With vectored interrupts things are getting platform specific.
+ * get_c0_compare_int is a hook to allow a platform to return the
+ * interrupt number of it's liking.
+ */
+ irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+ if (get_c0_compare_int)
+ irq = get_c0_compare_int();
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ /* Calculate the min / max delta */
+ cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
+ cd->shift = 32;
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+ cd->rating = 300;
+ cd->irq = irq;
+#ifdef CONFIG_MIPS_MT_SMTC
+ cd->cpumask = CPU_MASK_ALL;
+#else
+ cd->cpumask = cpumask_of_cpu(cpu);
+#endif
+ cd->set_next_event = mips_next_event;
+ cd->set_mode = mips_set_mode;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_register_device(cd);
+
+ if (!cp0_timer_irq_installed)
+ return;
+
+ cp0_timer_irq_installed = 1;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
+ setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
+#else
+ setup_irq(irq, &c0_compare_irqaction);
+#endif
+}
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
new file mode 100644
index 00000000000..63ac3ad462b
--- /dev/null
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define IMR_IP2_VAL K_INT_MAP_I0
+#define IMR_IP3_VAL K_INT_MAP_I1
+#define IMR_IP4_VAL K_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+static void sibyte_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ cfg);
+ break;
+
+ case CLOCK_EVT_MODE_ONESHOT:
+ /* Stop the timer until we actually program a shot */
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ __raw_writeq(0, cfg);
+ break;
+
+ case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
+ case CLOCK_EVT_MODE_RESUME:
+ ;
+ }
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void __cpuinit sb1250_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_INT_TIMER_0 + cpu;
+ struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+
+ /* Only have 4 general purpose timers, and we use last one as hpt */
+ BUG_ON(cpu > 2);
+
+ sprintf(name, "sb1250-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of_cpu(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_mode = sibyte_set_mode;
+ clockevents_register_device(cd);
+
+ sb1250_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
+ (irq << 3)));
+
+ sb1250_unmask_irq(cpu, irq);
+
+ action->handler = sibyte_counter_handler;
+ action->flags = IRQF_DISABLED | IRQF_PERCPU;
+ action->mask = cpumask_of_cpu(cpu);
+ action->name = name;
+ action->dev_id = cd;
+
+ irq_set_affinity(irq, cpumask_of_cpu(cpu));
+ setup_irq(irq, action);
+}
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
new file mode 100644
index 00000000000..795cb8fb0d7
--- /dev/null
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -0,0 +1,171 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Based on linux/arch/mips/kernel/cevt-r4k.c,
+ * linux/arch/mips/jmr3927/rbhma3100/setup.c
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <asm/time.h>
+#include <asm/txx9tmr.h>
+
+#define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL)
+#define TIMER_CCD 0 /* 1/2 */
+#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD))
+
+static struct txx9_tmr_reg __iomem *txx9_cs_tmrptr;
+
+static cycle_t txx9_cs_read(void)
+{
+ return __raw_readl(&txx9_cs_tmrptr->trr);
+}
+
+/* Use 1 bit smaller width to use full bits in that width */
+#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)
+
+static struct clocksource txx9_clocksource = {
+ .name = "TXx9",
+ .rating = 200,
+ .read = txx9_cs_read,
+ .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init txx9_clocksource_init(unsigned long baseaddr,
+ unsigned int imbusclk)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ clocksource_set_clock(&txx9_clocksource, TIMER_CLK(imbusclk));
+ clocksource_register(&txx9_clocksource);
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ txx9_cs_tmrptr = tmrptr;
+}
+
+static struct txx9_tmr_reg __iomem *txx9_tmrptr;
+
+static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
+{
+ /* stop and reset counter */
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ /* clear pending interrupt */
+ __raw_writel(0, &tmrptr->tisr);
+}
+
+static void txx9tmr_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE,
+ &tmrptr->itmr);
+ /* start timer */
+ __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >>
+ evt->shift,
+ &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ break;
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ case CLOCK_EVT_MODE_UNUSED:
+ __raw_writel(0, &tmrptr->itmr);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ break;
+ }
+}
+
+static int txx9tmr_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ /* start timer */
+ __raw_writel(delta, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static struct clock_event_device txx9tmr_clock_event_device = {
+ .name = "TXx9",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .cpumask = CPU_MASK_CPU0,
+ .set_mode = txx9tmr_set_mode,
+ .set_next_event = txx9tmr_set_next_event,
+};
+
+static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &txx9tmr_clock_event_device;
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_tmrptr;
+
+ __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction txx9tmr_irq = {
+ .handler = txx9tmr_interrupt,
+ .flags = IRQF_DISABLED | IRQF_PERCPU,
+ .name = "txx9tmr",
+};
+
+void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
+ unsigned int imbusclk)
+{
+ struct clock_event_device *cd = &txx9tmr_clock_event_device;
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ txx9_tmrptr = tmrptr;
+
+ clockevent_set_clock(cd, TIMER_CLK(imbusclk));
+ cd->max_delta_ns =
+ clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
+ cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
+ cd->irq = irq;
+ clockevents_register_device(cd);
+ setup_irq(irq, &txx9tmr_irq);
+ printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
+ baseaddr, irq);
+}
+
+void __init txx9_tmr_init(unsigned long baseaddr)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ __raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(0xffffffff, &tmrptr->cpra);
+ __raw_writel(0, &tmrptr->itmr);
+ __raw_writel(0, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->pgmr);
+ iounmap(tmrptr);
+}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c8c47a2d197..5c2794391bf 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -943,6 +943,11 @@ __init void cpu_probe(void)
}
__cpu_name[cpu] = cpu_to_name(c);
+
+ if (cpu_has_mips_r2)
+ c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
+ else
+ c->srsets = 1;
}
__init void cpu_report(void)
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
new file mode 100644
index 00000000000..868745e7184
--- /dev/null
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clocksource.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+static cycle_t bcm1480_hpt_read(void)
+{
+ return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
+}
+
+struct clocksource bcm1480_clocksource = {
+ .name = "zbbus-cycles",
+ .rating = 200,
+ .read = bcm1480_hpt_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init sb1480_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1480_clocksource;
+ unsigned int plldiv;
+ unsigned long zbbus;
+
+ plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
+ zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
+ clocksource_set_clock(cs, zbbus);
+ clocksource_register(cs);
+}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
new file mode 100644
index 00000000000..92212bbb8e4
--- /dev/null
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/clocksource.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define SB1250_HPT_NUM 3
+#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */
+
+/*
+ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
+ * again.
+ */
+static cycle_t sb1250_hpt_read(void)
+{
+ unsigned int count;
+
+ count = G_SCD_TIMER_CNT(__raw_readq(IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT))));
+
+ return SB1250_HPT_VALUE - count;
+}
+
+struct clocksource bcm1250_clocksource = {
+ .name = "bcm1250-counter-3",
+ .rating = 200,
+ .read = sb1250_hpt_read,
+ .mask = CLOCKSOURCE_MASK(23),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init sb1250_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1250_clocksource;
+
+ /* Setup hpt using timer #3 but do not enable irq for it */
+ __raw_writeq(0,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+ __raw_writeq(SB1250_HPT_VALUE,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_INIT)));
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+
+ clocksource_set_clock(cs, V_SCD_TIMER_FREQ);
+ clocksource_register(cs);
+}
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index c0f19d638b9..e76a76bf0b3 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -146,7 +146,7 @@ NESTED(handle_int, PT_SIZE, sp)
and k0, ST0_IEP
bnez k0, 1f
- mfc0 k0, EP0_EPC
+ mfc0 k0, CP0_EPC
.set noreorder
j k0
rfe
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index e46782b0ebc..23676873106 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -27,16 +27,6 @@
#include <kernel-entry-init.h>
- .macro ARC64_TWIDDLE_PC
-#if defined(CONFIG_ARC64) || defined(CONFIG_MAPPED_KERNEL)
- /* We get launched at a XKPHYS address but the kernel is linked to
- run at a KSEG0 address, so jump there. */
- PTR_LA t0, \@f
- jr t0
-\@:
-#endif
- .endm
-
/*
* inputs are the text nasid in t1, data nasid in t2.
*/
@@ -140,7 +130,7 @@
EXPORT(_stext)
-#ifndef CONFIG_BOOT_RAW
+#ifdef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
* kernel load address. This is needed because this platform does
@@ -149,13 +139,19 @@ EXPORT(_stext)
__INIT
#endif
+ __INIT_REFOK
+
NESTED(kernel_entry, 16, sp) # kernel entry point
kernel_entry_setup # cpu specific setup
setup_c0_status_pri
- ARC64_TWIDDLE_PC
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ PTR_LA t0, 0f
+ jr t0
+0:
#ifdef CONFIG_MIPS_MT_SMTC
/*
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 5d9830df359..c2d497ceffd 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -12,8 +12,9 @@
#include <asm/delay.h>
#include <asm/i8253.h>
#include <asm/io.h>
+#include <asm/time.h>
-static DEFINE_SPINLOCK(i8253_lock);
+DEFINE_SPINLOCK(i8253_lock);
/*
* Initialize the PIT timer.
@@ -87,11 +88,10 @@ struct clock_event_device pit_clockevent = {
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.set_mode = init_pit_timer,
.set_next_event = pit_next_event,
- .shift = 32,
.irq = 0,
};
-irqreturn_t timer_interrupt(int irq, void *dev_id)
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
pit_clockevent.event_handler(&pit_clockevent);
@@ -111,19 +111,20 @@ static struct irqaction irq0 = {
*/
void __init setup_pit_timer(void)
{
+ struct clock_event_device *cd = &pit_clockevent;
+ unsigned int cpu = smp_processor_id();
+
/*
* Start pit with the boot cpu mask and make it global after the
* IO_APIC has been initialized.
*/
- pit_clockevent.cpumask = cpumask_of_cpu(0);
- pit_clockevent.mult = div_sc(CLOCK_TICK_RATE, NSEC_PER_SEC, 32);
- pit_clockevent.max_delta_ns =
- clockevent_delta2ns(0x7FFF, &pit_clockevent);
- pit_clockevent.min_delta_ns =
- clockevent_delta2ns(0xF, &pit_clockevent);
- clockevents_register_device(&pit_clockevent);
-
- irq0.mask = cpumask_of_cpu(0);
+ cd->cpumask = cpumask_of_cpu(cpu);
+ clockevent_set_clock(cd, CLOCK_TICK_RATE);
+ cd->max_delta_ns = clockevent_delta2ns(0x7FFF, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0xF, cd);
+ clockevents_register_device(cd);
+
+ irq0.mask = cpumask_of_cpu(cpu);
setup_irq(0, &irq0);
}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index 8ef5cf4cc42..7852c7cdf29 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -44,11 +44,14 @@
static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs);
static int load_irix_library(struct file *);
static int irix_core_dump(long signr, struct pt_regs * regs,
- struct file *file);
+ struct file *file, unsigned long limit);
static struct linux_binfmt irix_format = {
- NULL, THIS_MODULE, load_irix_binary, load_irix_library,
- irix_core_dump, PAGE_SIZE
+ .module = THIS_MODULE,
+ .load_binary = load_irix_binary,
+ .load_shlib = load_irix_library,
+ .core_dump = irix_core_dump,
+ .min_coredump = PAGE_SIZE,
};
/* Debugging routines. */
@@ -1088,7 +1091,7 @@ end_coredump:
* and then they are actually written out. If we run out of core limit
* we just truncate.
*/
-static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
+static int irix_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit)
{
int has_dumped = 0;
mm_segment_t fs;
@@ -1098,7 +1101,6 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
struct vm_area_struct *vma;
struct elfhdr elf;
off_t offset = 0, dataoff;
- int limit = current->signal->rlim[RLIMIT_CORE].rlim_cur;
int numnote = 3;
struct memelfnote notes[3];
struct elf_prstatus prstatus; /* NT_PRSTATUS */
@@ -1170,8 +1172,8 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
prstatus.pr_sighold = current->blocked.sig[0];
psinfo.pr_pid = prstatus.pr_pid = current->pid;
psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid;
- psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current);
- psinfo.pr_sid = prstatus.pr_sid = process_session(current);
+ psinfo.pr_pgrp = prstatus.pr_pgrp = task_pgrp_nr(current);
+ psinfo.pr_sid = prstatus.pr_sid = task_session_nr(current);
if (current->pid == current->tgid) {
/*
* This is the record for the group leader. Add in the
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 85c2e389edd..5b10ac133ec 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -24,8 +24,12 @@
#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
+#define _IRIX_NSIG 128
+#define _IRIX_NSIG_BPW BITS_PER_LONG
+#define _IRIX_NSIG_WORDS (_IRIX_NSIG / _IRIX_NSIG_BPW)
+
typedef struct {
- unsigned long sig[4];
+ unsigned long sig[_IRIX_NSIG_WORDS];
} irix_sigset_t;
struct sigctx_irix5 {
@@ -426,6 +430,7 @@ asmlinkage int irix_sigprocmask(int how, irix_sigset_t __user *new,
break;
default:
+ spin_unlock_irq(&current->sighand->siglock);
return -EINVAL;
}
recalc_sigpending();
@@ -527,7 +532,7 @@ asmlinkage int irix_sigpoll_sys(unsigned long __user *set,
expire = schedule_timeout_interruptible(expire);
- for (i=0; i<=4; i++)
+ for (i=0; i < _IRIX_NSIG_WORDS; i++)
tmp |= (current->pending.signal.sig[i] & kset.sig[i]);
if (tmp)
@@ -609,7 +614,7 @@ repeat:
p = list_entry(_p, struct task_struct, sibling);
if ((type == IRIX_P_PID) && p->pid != pid)
continue;
- if ((type == IRIX_P_PGID) && process_group(p) != pid)
+ if ((type == IRIX_P_PGID) && task_pgrp_nr(p) != pid)
continue;
if ((p->exit_signal != SIGCHLD))
continue;
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 25073288348..971adf6ef4f 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -44,5 +44,5 @@ void __init rm7k_cpu_irq_init(void)
for (i = base; i < base + 4; i++)
set_irq_chip_and_handler(i, &rm7k_irq_controller,
- handle_level_irq);
+ handle_percpu_irq);
}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index ae83d2df6f3..7b04583bd80 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -104,5 +104,5 @@ void __init rm9k_cpu_irq_init(void)
rm9000_perfcount_irq = base + 1;
set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
- handle_level_irq);
+ handle_percpu_irq);
}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 7b66e03b589..0ee2567b780 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -116,5 +116,5 @@ void __init mips_cpu_irq_init(void)
for (i = irq_base + 2; i < irq_base + 8; i++)
set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
- handle_level_irq);
+ handle_percpu_irq);
}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index d6e01215fb2..2b8ec1102e8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -35,12 +35,12 @@
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/vfs.h>
+#include <linux/ipc.h>
#include <net/sock.h>
#include <net/scm.h>
#include <asm/compat-signal.h>
-#include <asm/ipc.h>
#include <asm/sim.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index cb0801437b6..e7ed0ac4853 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -381,7 +381,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
return e;
}
-/* Put in dbe list if neccessary. */
+/* Put in dbe list if necessary. */
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index efd2d131412..6e6e947cce1 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -60,6 +60,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_has_dsp ? " dsp" : "",
cpu_has_mipsmt ? " mt" : ""
);
+ seq_printf(m, "shadow register sets\t: %d\n",
+ cpu_data[n].srsets);
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available");
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 58aa6fec114..35234b92b9a 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -65,13 +65,13 @@ int ptrace_getregs(struct task_struct *child, __s64 __user *data)
regs = task_pt_regs(child);
for (i = 0; i < 32; i++)
- __put_user(regs->regs[i], data + i);
- __put_user(regs->lo, data + EF_LO - EF_R0);
- __put_user(regs->hi, data + EF_HI - EF_R0);
- __put_user(regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
- __put_user(regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
- __put_user(regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
- __put_user(regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
+ __put_user((long)regs->regs[i], data + i);
+ __put_user((long)regs->lo, data + EF_LO - EF_R0);
+ __put_user((long)regs->hi, data + EF_HI - EF_R0);
+ __put_user((long)regs->cp0_epc, data + EF_CP0_EPC - EF_R0);
+ __put_user((long)regs->cp0_badvaddr, data + EF_CP0_BADVADDR - EF_R0);
+ __put_user((long)regs->cp0_status, data + EF_CP0_STATUS - EF_R0);
+ __put_user((long)regs->cp0_cause, data + EF_CP0_CAUSE - EF_R0);
return 0;
}
@@ -390,11 +390,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__u64 __user *) data);
+ ret = ptrace_getregs(child, (__s64 __user *) data);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__u64 __user *) data);
+ ret = ptrace_setregs(child, (__s64 __user *) data);
break;
case PTRACE_GETFPREGS:
@@ -435,10 +435,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
wake_up_process(child);
break;
- case PTRACE_DETACH: /* detach a process that was attached. */
- ret = ptrace_detach(child, data);
- break;
-
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
(unsigned long __user *) data);
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index f2bffed94fa..76818be6ba7 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -346,11 +346,11 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
}
case PTRACE_GETREGS:
- ret = ptrace_getregs(child, (__u64 __user *) (__u64) data);
+ ret = ptrace_getregs(child, (__s64 __user *) (__u64) data);
break;
case PTRACE_SETREGS:
- ret = ptrace_setregs(child, (__u64 __user *) (__u64) data);
+ ret = ptrace_setregs(child, (__s64 __user *) (__u64) data);
break;
case PTRACE_GETFPREGS:
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 118be24224f..01993ec3368 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -293,7 +293,7 @@ EXPORT(sysn32_call_table)
PTR sys_ni_syscall /* 6170, was get_kernel_syms */
PTR sys_ni_syscall /* was query_module */
PTR sys_quotactl
- PTR sys_nfsservctl
+ PTR compat_sys_nfsservctl
PTR sys_ni_syscall /* res. for getpmsg */
PTR sys_ni_syscall /* 6175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a8c1a698d58..9c92d42996c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -88,11 +88,19 @@ unsigned int smtc_status = 0;
/* Boot command line configuration overrides */
+static int vpe0limit;
static int ipibuffers = 0;
static int nostlb = 0;
static int asidmask = 0;
unsigned long smtc_asid_mask = 0xff;
+static int __init vpe0tcs(char *str)
+{
+ get_option(&str, &vpe0limit);
+
+ return 1;
+}
+
static int __init ipibufs(char *str)
{
get_option(&str, &ipibuffers);
@@ -125,6 +133,7 @@ static int __init asidmask_set(char *str)
return 1;
}
+__setup("vpe0tcs=", vpe0tcs);
__setup("ipibufs=", ipibufs);
__setup("nostlb", stlb_disable);
__setup("asidmask=", asidmask_set);
@@ -340,7 +349,7 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
void mipsmt_prepare_cpus(void)
{
- int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
+ int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
unsigned long flags;
unsigned long val;
int nipi;
@@ -401,8 +410,39 @@ void mipsmt_prepare_cpus(void)
ntc = NR_CPUS;
if (tclimit > 0 && ntc > tclimit)
ntc = tclimit;
- tcpervpe = ntc / nvpe;
- slop = ntc % nvpe; /* Residual TCs, < NVPE */
+ slop = ntc % nvpe;
+ for (i = 0; i < nvpe; i++) {
+ tcpervpe[i] = ntc / nvpe;
+ if (slop) {
+ if((slop - i) > 0) tcpervpe[i]++;
+ }
+ }
+ /* Handle command line override for VPE0 */
+ if (vpe0limit > ntc) vpe0limit = ntc;
+ if (vpe0limit > 0) {
+ int slopslop;
+ if (vpe0limit < tcpervpe[0]) {
+ /* Reducing TC count - distribute to others */
+ slop = tcpervpe[0] - vpe0limit;
+ slopslop = slop % (nvpe - 1);
+ tcpervpe[0] = vpe0limit;
+ for (i = 1; i < nvpe; i++) {
+ tcpervpe[i] += slop / (nvpe - 1);
+ if(slopslop && ((slopslop - (i - 1) > 0)))
+ tcpervpe[i]++;
+ }
+ } else if (vpe0limit > tcpervpe[0]) {
+ /* Increasing TC count - steal from others */
+ slop = vpe0limit - tcpervpe[0];
+ slopslop = slop % (nvpe - 1);
+ tcpervpe[0] = vpe0limit;
+ for (i = 1; i < nvpe; i++) {
+ tcpervpe[i] -= slop / (nvpe - 1);
+ if(slopslop && ((slopslop - (i - 1) > 0)))
+ tcpervpe[i]--;
+ }
+ }
+ }
/* Set up shared TLB */
smtc_configure_tlb();
@@ -416,7 +456,7 @@ void mipsmt_prepare_cpus(void)
if (vpe != 0)
printk(", ");
printk("VPE %d: TC", vpe);
- for (i = 0; i < tcpervpe; i++) {
+ for (i = 0; i < tcpervpe[vpe]; i++) {
/*
* TC 0 is bound to VPE 0 at reset,
* and is presumably executing this
@@ -429,15 +469,6 @@ void mipsmt_prepare_cpus(void)
printk(" %d", tc);
tc++;
}
- if (slop) {
- if (tc != 0) {
- smtc_tc_setup(vpe, tc, cpu);
- cpu++;
- }
- printk(" %d", tc);
- tc++;
- slop--;
- }
if (vpe != 0) {
/*
* Clear any stale software interrupts from VPE's Cause
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 17c4374d220..af1bdc89748 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -28,11 +28,11 @@
#include <linux/shm.h>
#include <linux/compiler.h>
#include <linux/module.h>
+#include <linux/ipc.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
-#include <asm/ipc.h>
#include <asm/asm-offsets.h>
#include <asm/signal.h>
#include <asm/sim.h>
@@ -73,7 +73,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
task_size = STACK_TOP;
+ if (len > task_size)
+ return -ENOMEM;
+
if (flags & MAP_FIXED) {
+ /* Even MAP_FIXED mappings must reside within task_size. */
+ if (task_size - len < addr)
+ return -EINVAL;
+
/*
* We do not accept a shared mapping if it would violate
* cache aliasing constraints.
@@ -83,8 +90,6 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- if (len > task_size)
- return -ENOMEM;
do_color_align = 0;
if (filp || (flags & MAP_SHARED))
do_color_align = 1;
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index ee7790d9deb..4c477c7ff74 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -763,11 +763,11 @@ asmlinkage int irix_setpgrp(int flags)
printk("[%s:%d] setpgrp(%d) ", current->comm, current->pid, flags);
#endif
if(!flags)
- error = process_group(current);
+ error = task_pgrp_nr(current);
else
error = sys_setsid();
#ifdef DEBUG_PROCGRPS
- printk("returning %d\n", process_group(current));
+ printk("returning %d\n", task_pgrp_nr(current));
#endif
return error;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 5892491b40e..3284b9b4eca 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -3,53 +3,31 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
* Copyright (c) 2003, 2004 Maciej W. Rozycki
*
- * Common time service routines for MIPS machines. See
- * Documentation/mips/time.README.
+ * Common time service routines for MIPS machines.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
+#include <linux/bug.h>
#include <linux/clockchips.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/param.h>
-#include <linux/profile.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/smp.h>
-#include <linux/kernel_stat.h>
#include <linux/spinlock.h>
-#include <linux/interrupt.h>
#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <asm/bootinfo.h>
-#include <asm/cache.h>
-#include <asm/compiler.h>
-#include <asm/cpu.h>
#include <asm/cpu-features.h>
#include <asm/div64.h>
-#include <asm/sections.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
-#include <irq.h>
-
-/*
- * The integer part of the number of usecs per jiffy is taken from tick,
- * but the fractional part is not recorded, so we calculate it using the
- * initial value of HZ. This aids systems where tick isn't really an
- * integer (e.g. for HZ = 128).
- */
-#define USECS_PER_JIFFY TICK_SIZE
-#define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ))
-
-#define TICK_SIZE (tick_nsec / 1000)
-
/*
* forward reference
*/
@@ -72,30 +50,6 @@ int update_persistent_clock(struct timespec now)
return rtc_mips_set_mmss(now.tv_sec);
}
-/* how many counter cycles in a jiffy */
-static unsigned long cycles_per_jiffy __read_mostly;
-
-/*
- * Null timer ack for systems not needing one (e.g. i8254).
- */
-static void null_timer_ack(void) { /* nothing */ }
-
-/*
- * Null high precision timer functions for systems lacking one.
- */
-static cycle_t null_hpt_read(void)
-{
- return 0;
-}
-
-/*
- * Timer ack for an R4k-compatible timer of a known frequency.
- */
-static void c0_timer_ack(void)
-{
- write_c0_compare(read_c0_compare());
-}
-
/*
* High precision timer functions for a R4k-compatible timer.
*/
@@ -105,23 +59,6 @@ static cycle_t c0_hpt_read(void)
}
int (*mips_timer_state)(void);
-void (*mips_timer_ack)(void);
-
-/*
- * local_timer_interrupt() does profiling and process accounting
- * on a per-CPU basis.
- *
- * In UP mode, it is invoked from the (global) timer_interrupt.
- *
- * In SMP mode, it might invoked by per-CPU timer interrupt, or
- * a broadcasted inter-processor interrupt which itself is triggered
- * by the global timer interrupt.
- */
-void local_timer_interrupt(int irq, void *dev_id)
-{
- profile_tick(CPU_PROFILING);
- update_process_times(user_mode(get_irq_regs()));
-}
int null_perf_irq(void)
{
@@ -135,35 +72,6 @@ int (*perf_irq)(void) = null_perf_irq;
EXPORT_SYMBOL(perf_irq);
/*
- * Timer interrupt
- */
-int cp0_compare_irq;
-
-/*
- * Performance counter IRQ or -1 if shared with timer
- */
-int cp0_perfcount_irq;
-EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
-
-/*
- * Possibly handle a performance counter interrupt.
- * Return true if the timer interrupt should not be checked
- */
-static inline int handle_perf_irq(int r2)
-{
- /*
- * The performance counter overflow interrupt may be shared with the
- * timer interrupt (cp0_perfcount_irq < 0). If it is and a
- * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
- * and we can't reliably determine if a counter interrupt has also
- * happened (!r2) then don't check for a timer interrupt.
- */
- return (cp0_perfcount_irq < 0) &&
- perf_irq() == IRQ_HANDLED &&
- !r2;
-}
-
-/*
* time_init() - it does the following things.
*
* 1) plat_time_init() -
@@ -172,14 +80,17 @@ static inline int handle_perf_irq(int r2)
* (only needed if you intended to use cpu counter as timer interrupt
* source)
* 2) calculate a couple of cached variables for later usage
- * 3) plat_timer_setup() -
- * a) (optional) over-write any choices made above by time_init().
- * b) machine specific code should setup the timer irqaction.
- * c) enable the timer interrupt
*/
unsigned int mips_hpt_frequency;
+static struct clocksource clocksource_mips = {
+ .name = "MIPS",
+ .read = c0_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
static unsigned int __init calibrate_hpt(void)
{
cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
@@ -222,258 +133,73 @@ static unsigned int __init calibrate_hpt(void)
return frequency >> log_2_loops;
}
-struct clocksource clocksource_mips = {
- .name = "MIPS",
- .mask = CLOCKSOURCE_MASK(32),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static int mips_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- unsigned int cnt;
- int res;
-
-#ifdef CONFIG_MIPS_MT_SMTC
- {
- unsigned long flags, vpflags;
- local_irq_save(flags);
- vpflags = dvpe();
-#endif
- cnt = read_c0_count();
- cnt += delta;
- write_c0_compare(cnt);
- res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
-#ifdef CONFIG_MIPS_MT_SMTC
- evpe(vpflags);
- local_irq_restore(flags);
- }
-#endif
- return res;
-}
-
-static void mips_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
- /* Nothing to do ... */
-}
-
-static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
-static int cp0_timer_irq_installed;
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- const int r2 = cpu_has_mips_r2;
- struct clock_event_device *cd;
- int cpu = smp_processor_id();
-
- /*
- * Suckage alert:
- * Before R2 of the architecture there was no way to see if a
- * performance counter interrupt was pending, so we have to run
- * the performance counter interrupt handler anyway.
- */
- if (handle_perf_irq(r2))
- goto out;
-
- /*
- * The same applies to performance counter interrupts. But with the
- * above we now know that the reason we got here must be a timer
- * interrupt. Being the paranoiacs we are we check anyway.
- */
- if (!r2 || (read_c0_cause() & (1 << 30))) {
- c0_timer_ack();
-#ifdef CONFIG_MIPS_MT_SMTC
- if (cpu_data[cpu].vpe_id)
- goto out;
- cpu = 0;
-#endif
- cd = &per_cpu(mips_clockevent_device, cpu);
- cd->event_handler(cd);
- }
-
-out:
- return IRQ_HANDLED;
-}
-
-static struct irqaction timer_irqaction = {
- .handler = timer_interrupt,
-#ifdef CONFIG_MIPS_MT_SMTC
- .flags = IRQF_DISABLED,
-#else
- .flags = IRQF_DISABLED | IRQF_PERCPU,
-#endif
- .name = "timer",
-};
-
-static void __init init_mips_clocksource(void)
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
{
u64 temp;
u32 shift;
- if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
- return;
-
- /* Calclate a somewhat reasonable rating value */
- clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
/* Find a shift value */
for (shift = 32; shift > 0; shift--) {
temp = (u64) NSEC_PER_SEC << shift;
- do_div(temp, mips_hpt_frequency);
+ do_div(temp, clock);
if ((temp >> 32) == 0)
break;
}
- clocksource_mips.shift = shift;
- clocksource_mips.mult = (u32)temp;
-
- clocksource_register(&clocksource_mips);
-}
-
-void __init __weak plat_time_init(void)
-{
+ cs->shift = shift;
+ cs->mult = (u32) temp;
}
-void __init __weak plat_timer_setup(struct irqaction *irq)
+void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
+ unsigned int clock)
{
-}
-
-#ifdef CONFIG_MIPS_MT_SMTC
-DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
-
-static void smtc_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
-int dummycnt[NR_CPUS];
-
-static void mips_broadcast(cpumask_t mask)
-{
- unsigned int cpu;
+ u64 temp;
+ u32 shift;
- for_each_cpu_mask(cpu, mask)
- smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+ /* Find a shift value */
+ for (shift = 32; shift > 0; shift--) {
+ temp = (u64) clock << shift;
+ do_div(temp, NSEC_PER_SEC);
+ if ((temp >> 32) == 0)
+ break;
+ }
+ cd->shift = shift;
+ cd->mult = (u32) temp;
}
-static void setup_smtc_dummy_clockevent_device(void)
+static void __init init_mips_clocksource(void)
{
- //uint64_t mips_freq = mips_hpt_^frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
-
- cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
-
- cd->name = "SMTC";
- cd->features = CLOCK_EVT_FEAT_DUMMY;
-
- /* Calculate the min / max delta */
- cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 0; //32;
- cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
-
- cd->rating = 200;
- cd->irq = 17; //-1;
-// if (cpu)
-// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
-// else
- cd->cpumask = cpumask_of_cpu(cpu);
-
- cd->set_mode = smtc_set_mode;
+ /* Calclate a somewhat reasonable rating value */
+ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
- cd->broadcast = mips_broadcast;
+ clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
- clockevents_register_device(cd);
+ clocksource_register(&clocksource_mips);
}
-#endif
-static void mips_event_handler(struct clock_event_device *dev)
+void __init __weak plat_time_init(void)
{
}
-void __cpuinit mips_clockevent_init(void)
+/*
+ * This function exists in order to cause an error due to a duplicate
+ * definition if platform code should have its own implementation. The hook
+ * to use instead is plat_time_init. plat_time_init does not receive the
+ * irqaction pointer argument anymore. This is because any function which
+ * initializes an interrupt timer now takes care of its own request_irq rsp.
+ * setup_irq calls and each clock_event_device should use its own
+ * struct irqrequest.
+ */
+void __init plat_timer_setup(void)
{
- uint64_t mips_freq = mips_hpt_frequency;
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *cd;
- unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
-
- if (!cpu_has_counter)
- return;
-
-#ifdef CONFIG_MIPS_MT_SMTC
- setup_smtc_dummy_clockevent_device();
-
- /*
- * On SMTC we only register VPE0's compare interrupt as clockevent
- * device.
- */
- if (cpu)
- return;
-#endif
-
- cd = &per_cpu(mips_clockevent_device, cpu);
-
- cd->name = "MIPS";
- cd->features = CLOCK_EVT_FEAT_ONESHOT;
-
- /* Calculate the min / max delta */
- cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
- cd->shift = 32;
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = clockevent_delta2ns(0x30, cd);
-
- cd->rating = 300;
- cd->irq = irq;
-#ifdef CONFIG_MIPS_MT_SMTC
- cd->cpumask = CPU_MASK_ALL;
-#else
- cd->cpumask = cpumask_of_cpu(cpu);
-#endif
- cd->set_next_event = mips_next_event;
- cd->set_mode = mips_set_mode;
- cd->event_handler = mips_event_handler;
-
- clockevents_register_device(cd);
-
- if (!cp0_timer_irq_installed) {
-#ifdef CONFIG_MIPS_MT_SMTC
-#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
- setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
-#else
- setup_irq(irq, &timer_irqaction);
-#endif /* CONFIG_MIPS_MT_SMTC */
- cp0_timer_irq_installed = 1;
- }
+ BUG();
}
void __init time_init(void)
{
plat_time_init();
- /* Choose appropriate high precision timer routines. */
- if (!cpu_has_counter && !clocksource_mips.read)
- /* No high precision timer -- sorry. */
- clocksource_mips.read = null_hpt_read;
- else if (!mips_hpt_frequency && !mips_timer_state) {
- /* A high precision timer of unknown frequency. */
- if (!clocksource_mips.read)
- /* No external high precision timer -- use R4k. */
- clocksource_mips.read = c0_hpt_read;
- } else {
+ if (cpu_has_counter && (mips_hpt_frequency || mips_timer_state)) {
/* We know counter frequency. Or we can get it. */
- if (!clocksource_mips.read) {
- /* No external high precision timer -- use R4k. */
- clocksource_mips.read = c0_hpt_read;
-
- if (!mips_timer_state) {
- /* No external timer interrupt -- use R4k. */
- mips_timer_ack = c0_timer_ack;
- /* Calculate cache parameters. */
- cycles_per_jiffy =
- (mips_hpt_frequency + HZ / 2) / HZ;
- }
- }
if (!mips_hpt_frequency)
mips_hpt_frequency = calibrate_hpt();
@@ -481,29 +207,8 @@ void __init time_init(void)
printk("Using %u.%03u MHz high precision timer.\n",
((mips_hpt_frequency + 500) / 1000) / 1000,
((mips_hpt_frequency + 500) / 1000) % 1000);
-
-#ifdef CONFIG_IRQ_CPU
- setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
-#endif
+ init_mips_clocksource();
}
- if (!mips_timer_ack)
- /* No timer interrupt ack (e.g. i8254). */
- mips_timer_ack = null_timer_ack;
-
- /*
- * Call board specific timer interrupt setup.
- *
- * this pointer must be setup in machine setup routine.
- *
- * Even if a machine chooses to use a low-level timer interrupt,
- * it still needs to setup the timer_irqaction.
- * In that case, it might be better to set timer_irqaction.handler
- * to be NULL function so that we are sure the high-level code
- * is not invoked accidentally.
- */
- plat_timer_setup(&timer_irqaction);
-
- init_mips_clocksource();
mips_clockevent_init();
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 632bce1bf42..23e73d0650a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -9,9 +9,10 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
- * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
*/
#include <linux/bug.h>
+#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -104,7 +105,7 @@ static int __init set_raw_show_trace(char *str)
__setup("raw_show_trace", set_raw_show_trace);
#endif
-static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
@@ -126,7 +127,8 @@ static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
* This routine abuses get_user()/put_user() to reference pointers
* with at least a bit of error checking ...
*/
-static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
+static void show_stacktrace(struct task_struct *task,
+ const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
@@ -203,7 +205,7 @@ static void show_code(unsigned int __user *pc)
}
}
-void show_regs(struct pt_regs *regs)
+static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int cause = regs->cp0_cause;
@@ -299,12 +301,20 @@ void show_regs(struct pt_regs *regs)
cpu_name_string());
}
-void show_registers(struct pt_regs *regs)
+/*
+ * FIXME: really the generic show_regs should take a const pointer argument.
+ */
+void show_regs(struct pt_regs *regs)
{
- show_regs(regs);
+ __show_regs((struct pt_regs *)regs);
+}
+
+void show_registers(const struct pt_regs *regs)
+{
+ __show_regs(regs);
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
- current->comm, current->pid, current_thread_info(), current);
+ current->comm, task_pid_nr(current), current_thread_info(), current);
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
@@ -312,7 +322,7 @@ void show_registers(struct pt_regs *regs)
static DEFINE_SPINLOCK(die_lock);
-void __noreturn die(const char * str, struct pt_regs * regs)
+void __noreturn die(const char * str, const struct pt_regs * regs)
{
static int die_counter;
#ifdef CONFIG_MIPS_MT_SMTC
@@ -401,7 +411,7 @@ asmlinkage void do_be(struct pt_regs *regs)
}
/*
- * ll/sc emulation
+ * ll/sc, rdhwr, sync emulation
*/
#define OPCODE 0xfc000000
@@ -410,9 +420,11 @@ asmlinkage void do_be(struct pt_regs *regs)
#define OFFSET 0x0000ffff
#define LL 0xc0000000
#define SC 0xe0000000
+#define SPEC0 0x00000000
#define SPEC3 0x7c000000
#define RD 0x0000f800
#define FUNC 0x0000003f
+#define SYNC 0x0000000f
#define RDHWR 0x0000003b
/*
@@ -423,11 +435,10 @@ unsigned long ll_bit;
static struct task_struct *ll_task = NULL;
-static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
+static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
unsigned long value, __user *vaddr;
long offset;
- int signal = 0;
/*
* analyse the ll instruction that just caused a ri exception
@@ -442,14 +453,10 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
- if ((unsigned long)vaddr & 3) {
- signal = SIGBUS;
- goto sig;
- }
- if (get_user(value, vaddr)) {
- signal = SIGSEGV;
- goto sig;
- }
+ if ((unsigned long)vaddr & 3)
+ return SIGBUS;
+ if (get_user(value, vaddr))
+ return SIGSEGV;
preempt_disable();
@@ -462,22 +469,16 @@ static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
preempt_enable();
- compute_return_epc(regs);
-
regs->regs[(opcode & RT) >> 16] = value;
- return;
-
-sig:
- force_sig(signal, current);
+ return 0;
}
-static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
+static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
{
unsigned long __user *vaddr;
unsigned long reg;
long offset;
- int signal = 0;
/*
* analyse the sc instruction that just caused a ri exception
@@ -493,34 +494,25 @@ static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
- if ((unsigned long)vaddr & 3) {
- signal = SIGBUS;
- goto sig;
- }
+ if ((unsigned long)vaddr & 3)
+ return SIGBUS;
preempt_disable();
if (ll_bit == 0 || ll_task != current) {
- compute_return_epc(regs);
regs->regs[reg] = 0;
preempt_enable();
- return;
+ return 0;
}
preempt_enable();
- if (put_user(regs->regs[reg], vaddr)) {
- signal = SIGSEGV;
- goto sig;
- }
+ if (put_user(regs->regs[reg], vaddr))
+ return SIGSEGV;
- compute_return_epc(regs);
regs->regs[reg] = 1;
- return;
-
-sig:
- force_sig(signal, current);
+ return 0;
}
/*
@@ -530,27 +522,14 @@ sig:
* few processors such as NEC's VR4100 throw reserved instruction exceptions
* instead, so we're doing the emulation thing in both exception handlers.
*/
-static inline int simulate_llsc(struct pt_regs *regs)
+static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
- unsigned int opcode;
-
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
- goto out_sigsegv;
+ if ((opcode & OPCODE) == LL)
+ return simulate_ll(regs, opcode);
+ if ((opcode & OPCODE) == SC)
+ return simulate_sc(regs, opcode);
- if ((opcode & OPCODE) == LL) {
- simulate_ll(regs, opcode);
- return 0;
- }
- if ((opcode & OPCODE) == SC) {
- simulate_sc(regs, opcode);
- return 0;
- }
-
- return -EFAULT; /* Strange things going on ... */
-
-out_sigsegv:
- force_sig(SIGSEGV, current);
- return -EFAULT;
+ return -1; /* Must be something else ... */
}
/*
@@ -558,16 +537,9 @@ out_sigsegv:
* registers not implemented in hardware. The only current use of this
* is the thread area pointer.
*/
-static inline int simulate_rdhwr(struct pt_regs *regs)
+static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
{
struct thread_info *ti = task_thread_info(current);
- unsigned int opcode;
-
- if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
- goto out_sigsegv;
-
- if (unlikely(compute_return_epc(regs)))
- return -EFAULT;
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
@@ -577,16 +549,20 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
regs->regs[rt] = ti->tp_value;
return 0;
default:
- return -EFAULT;
+ return -1;
}
}
/* Not ours. */
- return -EFAULT;
+ return -1;
+}
-out_sigsegv:
- force_sig(SIGSEGV, current);
- return -EFAULT;
+static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC)
+ return 0;
+
+ return -1; /* Must be something else ... */
}
asmlinkage void do_ov(struct pt_regs *regs)
@@ -758,16 +734,35 @@ out_sigsegv:
asmlinkage void do_ri(struct pt_regs *regs)
{
- die_if_kernel("Reserved instruction in kernel code", regs);
+ unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
+ unsigned long old_epc = regs->cp0_epc;
+ unsigned int opcode = 0;
+ int status = -1;
- if (!cpu_has_llsc)
- if (!simulate_llsc(regs))
- return;
+ die_if_kernel("Reserved instruction in kernel code", regs);
- if (!simulate_rdhwr(regs))
+ if (unlikely(compute_return_epc(regs) < 0))
return;
- force_sig(SIGILL, current);
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+
+ if (status < 0)
+ status = simulate_rdhwr(regs, opcode);
+
+ if (status < 0)
+ status = simulate_sync(regs, opcode);
+
+ if (status < 0)
+ status = SIGILL;
+
+ if (unlikely(status > 0)) {
+ regs->cp0_epc = old_epc; /* Undo skip-over. */
+ force_sig(status, current);
+ }
}
/*
@@ -799,7 +794,11 @@ static void mt_ase_fp_affinity(void)
asmlinkage void do_cpu(struct pt_regs *regs)
{
+ unsigned int __user *epc;
+ unsigned long old_epc;
+ unsigned int opcode;
unsigned int cpid;
+ int status;
die_if_kernel("do_cpu invoked from kernel context!", regs);
@@ -807,14 +806,32 @@ asmlinkage void do_cpu(struct pt_regs *regs)
switch (cpid) {
case 0:
- if (!cpu_has_llsc)
- if (!simulate_llsc(regs))
- return;
+ epc = (unsigned int __user *)exception_epc(regs);
+ old_epc = regs->cp0_epc;
+ opcode = 0;
+ status = -1;
- if (!simulate_rdhwr(regs))
+ if (unlikely(compute_return_epc(regs) < 0))
return;
- break;
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+
+ if (status < 0)
+ status = simulate_rdhwr(regs, opcode);
+
+ if (status < 0)
+ status = SIGILL;
+
+ if (unlikely(status > 0)) {
+ regs->cp0_epc = old_epc; /* Undo skip-over. */
+ force_sig(status, current);
+ }
+
+ return;
case 1:
if (used_math()) /* Using the FPU again. */
@@ -1083,59 +1100,6 @@ void *set_except_vector(int n, void *addr)
return (void *)old_handler;
}
-#ifdef CONFIG_CPU_MIPSR2_SRS
-/*
- * MIPSR2 shadow register set allocation
- * FIXME: SMP...
- */
-
-static struct shadow_registers {
- /*
- * Number of shadow register sets supported
- */
- unsigned long sr_supported;
- /*
- * Bitmap of allocated shadow registers
- */
- unsigned long sr_allocated;
-} shadow_registers;
-
-static void mips_srs_init(void)
-{
- shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
- printk(KERN_INFO "%ld MIPSR2 register sets available\n",
- shadow_registers.sr_supported);
- shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
-}
-
-int mips_srs_max(void)
-{
- return shadow_registers.sr_supported;
-}
-
-int mips_srs_alloc(void)
-{
- struct shadow_registers *sr = &shadow_registers;
- int set;
-
-again:
- set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
- if (set >= sr->sr_supported)
- return -1;
-
- if (test_and_set_bit(set, &sr->sr_allocated))
- goto again;
-
- return set;
-}
-
-void mips_srs_free(int set)
-{
- struct shadow_registers *sr = &shadow_registers;
-
- clear_bit(set, &sr->sr_allocated);
-}
-
static asmlinkage void do_default_vi(void)
{
show_regs(get_irq_regs());
@@ -1146,6 +1110,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
+ int srssets = current_cpu_data.srsets;
u32 *w;
unsigned char *b;
@@ -1161,7 +1126,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
- if (srs >= mips_srs_max())
+ if (srs >= srssets)
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
@@ -1169,7 +1134,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
board_bind_eic_interrupt(n, srs);
} else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
- if (mips_srs_max() > 1)
+ if (srssets > 1)
change_c0_srsmap(0xf << n*4, srs << n*4);
}
@@ -1236,14 +1201,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
return set_vi_srs_handler(n, addr, 0);
}
-#else
-
-static inline void mips_srs_init(void)
-{
-}
-
-#endif /* CONFIG_CPU_MIPSR2_SRS */
-
/*
* This is used by native signal handling
*/
@@ -1319,6 +1276,17 @@ extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
+/*
+ * Timer interrupt
+ */
+int cp0_compare_irq;
+
+/*
+ * Performance counter IRQ or -1 if shared with timer
+ */
+int cp0_perfcount_irq;
+EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
+
void __init per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
@@ -1475,8 +1443,6 @@ void __init trap_init(void)
else
ebase = CAC_BASE;
- mips_srs_init();
-
per_cpu_trap_init();
/*
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 84f9a4cc6f2..5fc2398bdb7 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -5,6 +5,10 @@
#define mips mips
OUTPUT_ARCH(mips)
ENTRY(kernel_entry)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+ note PT_NOTE FLAGS(4); /* R__ */
+}
jiffies = JIFFIES;
SECTIONS
@@ -22,7 +26,6 @@ SECTIONS
*/
/* . = 0xa800000000300000; */
- /* . = 0xa800000000300000; */
. = 0xffffffff80300000;
#endif
. = LOADADDR;
@@ -32,9 +35,10 @@ SECTIONS
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
- } =0
+ } :text = 0
_etext = .; /* End of text section */
/* Exception table */
@@ -51,25 +55,31 @@ SECTIONS
*(__dbe_table)
__stop___dbe_table = .;
}
+
+ NOTES :text :note
+ .dummy : { *(.dummy) } :text
+
RODATA
/* writeable */
.data : { /* Data */
- . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- /*
- * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which
- * limits the maximum alignment to at most 32kB and results in the following
- * warning:
- *
- * CC arch/mips/kernel/init_task.o
- * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’
- * is greater than maximum object file alignment. Using 32768
- */
- . = ALIGN(_PAGE_SIZE);
- *(.data.init_task)
-
- DATA_DATA
- CONSTRUCTORS
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+ /*
+ * This ALIGN is needed as a workaround for a bug a
+ * gcc bug upto 4.1 which limits the maximum alignment
+ * to at most 32kB and results in the following
+ * warning:
+ *
+ * CC arch/mips/kernel/init_task.o
+ * arch/mips/kernel/init_task.c:30: warning: alignment
+ * of ‘init_thread_union’ is greater than maximum
+ * object file alignment. Using 32768
+ */
+ . = ALIGN(_PAGE_SIZE);
+ *(.data.init_task)
+
+ DATA_DATA
+ CONSTRUCTORS
}
_gp = . + 0x8000;
.lit8 : {
@@ -201,7 +211,4 @@ SECTIONS
*(.gptab.bss)
*(.gptab.sbss)
}
- .note : {
- *(.note)
- }
}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 61b729fa054..38bd33fa2a2 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -942,8 +942,8 @@ static int vpe_elfload(struct vpe * v)
if (phdr->p_type != PT_LOAD)
continue;
- memcpy((void *)phdr->p_vaddr, (char *)hdr + phdr->p_offset, phdr->p_filesz);
- memset((void *)phdr->p_vaddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
+ memcpy((void *)phdr->p_paddr, (char *)hdr + phdr->p_offset, phdr->p_filesz);
+ memset((void *)phdr->p_paddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz);
phdr++;
}
@@ -1003,6 +1003,7 @@ static void cleanup_tc(struct tc *tc)
write_tc_c0_tcstatus(tmp);
write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
/* bind it to anything other than VPE1 */
// write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
@@ -1235,9 +1236,12 @@ int vpe_free(vpe_handle vpe)
settc(t->index);
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
- /* mark the TC unallocated and halt'ed */
- write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
+ /* halt the TC */
write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ /* mark the TC unallocated */
+ write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
v->state = VPE_STATE_UNUSED;
@@ -1317,7 +1321,8 @@ static void kspd_sp_exit( int sp_id)
}
#endif
-static ssize_t store_kill(struct class_device *dev, const char *buf, size_t len)
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct vpe *vpe = get_vpe(tclimit);
struct vpe_notifications *not;
@@ -1334,14 +1339,16 @@ static ssize_t store_kill(struct class_device *dev, const char *buf, size_t len)
return len;
}
-static ssize_t show_ntcs(struct class_device *cd, char *buf)
+static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
+ char *buf)
{
struct vpe *vpe = get_vpe(tclimit);
return sprintf(buf, "%d\n", vpe->ntcs);
}
-static ssize_t store_ntcs(struct class_device *dev, const char *buf, size_t len)
+static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct vpe *vpe = get_vpe(tclimit);
unsigned long new;
@@ -1362,13 +1369,13 @@ out_einval:
return -EINVAL;;
}
-static struct class_device_attribute vpe_class_attributes[] = {
+static struct device_attribute vpe_class_attributes[] = {
__ATTR(kill, S_IWUSR, NULL, store_kill),
__ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
{}
};
-static void vpe_class_device_release(struct class_device *cd)
+static void vpe_device_release(struct device *cd)
{
kfree(cd);
}
@@ -1376,11 +1383,11 @@ static void vpe_class_device_release(struct class_device *cd)
struct class vpe_class = {
.name = "vpe",
.owner = THIS_MODULE,
- .release = vpe_class_device_release,
- .class_dev_attrs = vpe_class_attributes,
+ .dev_release = vpe_device_release,
+ .dev_attrs = vpe_class_attributes,
};
-struct class_device vpe_device;
+struct device vpe_device;
static int __init vpe_module_init(void)
{
@@ -1423,12 +1430,12 @@ static int __init vpe_module_init(void)
goto out_chrdev;
}
- class_device_initialize(&vpe_device);
+ device_initialize(&vpe_device);
vpe_device.class = &vpe_class,
vpe_device.parent = NULL,
- strlcpy(vpe_device.class_id, "vpe1", BUS_ID_SIZE);
+ strlcpy(vpe_device.bus_id, "vpe1", BUS_ID_SIZE);
vpe_device.devt = MKDEV(major, minor);
- err = class_device_add(&vpe_device);
+ err = device_add(&vpe_device);
if (err) {
printk(KERN_ERR "Adding vpe_device failed\n");
goto out_class;
@@ -1530,14 +1537,16 @@ static int __init vpe_module_init(void)
t->pvpe = get_vpe(0); /* set the parent vpe */
}
+ /* halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
tmp = read_tc_c0_tcstatus();
/* mark not activated and not dynamically allocatable */
tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
tmp |= TCSTATUS_IXMT; /* interrupt exempt */
write_tc_c0_tcstatus(tmp);
-
- write_tc_c0_tchalt(TCHALT_H);
}
}
@@ -1573,7 +1582,7 @@ static void __exit vpe_module_exit(void)
}
}
- class_device_del(&vpe_device);
+ device_del(&vpe_device);
unregister_chrdev(major, module_name);
}