aboutsummaryrefslogtreecommitdiff
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/process.c3
-rw-r--r--arch/sh/kernel/time.c172
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c182
3 files changed, 167 insertions, 190 deletions
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index d755589ba8b..6b4f5748d0b 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -16,6 +16,7 @@
#include <linux/kallsyms.h>
#include <linux/kexec.h>
#include <linux/kdebug.h>
+#include <linux/tick.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
@@ -60,8 +61,10 @@ void cpu_idle(void)
if (!idle)
idle = default_idle;
+ tick_nohz_stop_sched_tick();
while (!need_resched())
idle();
+ tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index d47e775962e..a3a67d151e5 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
- * Copyright (C) 2002 - 2006 Paul Mundt
+ * Copyright (C) 2002 - 2007 Paul Mundt
* Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
*
* Some code taken from i386 version.
@@ -15,6 +15,7 @@
#include <linux/profile.h>
#include <linux/timex.h>
#include <linux/sched.h>
+#include <linux/clockchips.h>
#include <asm/clock.h>
#include <asm/rtc.h>
#include <asm/timer.h>
@@ -38,6 +39,14 @@ static int null_rtc_set_time(const time_t secs)
return 0;
}
+/*
+ * Null high precision timer functions for systems lacking one.
+ */
+static cycle_t null_hpt_read(void)
+{
+ return 0;
+}
+
void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
@@ -101,6 +110,7 @@ int do_settimeofday(struct timespec *tv)
EXPORT_SYMBOL(do_settimeofday);
#endif /* !CONFIG_GENERIC_TIME */
+#ifndef CONFIG_GENERIC_CLOCKEVENTS
/* last time the RTC clock got updated */
static long last_rtc_update;
@@ -138,6 +148,7 @@ void handle_timer_tick(void)
last_rtc_update = xtime.tv_sec - 600;
}
}
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
#ifdef CONFIG_PM
int timer_suspend(struct sys_device *dev, pm_message_t state)
@@ -168,136 +179,58 @@ static struct sysdev_class timer_sysclass = {
.resume = timer_resume,
};
-#ifdef CONFIG_NO_IDLE_HZ
-static int timer_dyn_tick_enable(void)
+static int __init timer_init_sysfs(void)
{
- struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
- unsigned long flags;
- int ret = -ENODEV;
-
- if (dyn_tick) {
- spin_lock_irqsave(&dyn_tick->lock, flags);
- ret = 0;
- if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
- ret = dyn_tick->enable();
-
- if (ret == 0)
- dyn_tick->state |= DYN_TICK_ENABLED;
- }
- spin_unlock_irqrestore(&dyn_tick->lock, flags);
- }
+ int ret = sysdev_class_register(&timer_sysclass);
+ if (ret != 0)
+ return ret;
- return ret;
+ sys_timer->dev.cls = &timer_sysclass;
+ return sysdev_register(&sys_timer->dev);
}
+device_initcall(timer_init_sysfs);
-static int timer_dyn_tick_disable(void)
-{
- struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
- unsigned long flags;
- int ret = -ENODEV;
-
- if (dyn_tick) {
- spin_lock_irqsave(&dyn_tick->lock, flags);
- ret = 0;
- if (dyn_tick->state & DYN_TICK_ENABLED) {
- ret = dyn_tick->disable();
-
- if (ret == 0)
- dyn_tick->state &= ~DYN_TICK_ENABLED;
- }
- spin_unlock_irqrestore(&dyn_tick->lock, flags);
- }
-
- return ret;
-}
+void (*board_time_init)(void);
/*
- * Reprogram the system timer for at least the calculated time interval.
- * This function should be called from the idle thread with IRQs disabled,
- * immediately before sleeping.
+ * Shamelessly based on the MIPS and Sparc64 work.
*/
-void timer_dyn_reprogram(void)
-{
- struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
- unsigned long next, seq, flags;
-
- if (!dyn_tick)
- return;
-
- spin_lock_irqsave(&dyn_tick->lock, flags);
- if (dyn_tick->state & DYN_TICK_ENABLED) {
- next = next_timer_interrupt();
- do {
- seq = read_seqbegin(&xtime_lock);
- dyn_tick->reprogram(next - jiffies);
- } while (read_seqretry(&xtime_lock, seq));
- }
- spin_unlock_irqrestore(&dyn_tick->lock, flags);
-}
+static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
+unsigned long sh_hpt_frequency = 0;
+
+#define NSEC_PER_CYC_SHIFT 10
+
+struct clocksource clocksource_sh = {
+ .name = "SuperH",
+ .rating = 200,
+ .mask = CLOCKSOURCE_MASK(32),
+ .read = null_hpt_read,
+ .shift = 16,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
-static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf)
+static void __init init_sh_clocksource(void)
{
- return sprintf(buf, "%i\n",
- (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1);
-}
+ if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
+ return;
-static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf,
- size_t count)
-{
- unsigned int enable = simple_strtoul(buf, NULL, 2);
+ clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
+ clocksource_sh.shift);
- if (enable)
- timer_dyn_tick_enable();
- else
- timer_dyn_tick_disable();
+ timer_ticks_per_nsec_quotient =
+ clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
- return count;
+ clocksource_register(&clocksource_sh);
}
-static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
-/*
- * dyntick=enable|disable
- */
-static char dyntick_str[4] __initdata = "";
-
-static int __init dyntick_setup(char *str)
+#ifdef CONFIG_GENERIC_TIME
+unsigned long long sched_clock(void)
{
- if (str)
- strlcpy(dyntick_str, str, sizeof(dyntick_str));
- return 1;
+ unsigned long long ticks = clocksource_sh.read();
+ return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
}
-
-__setup("dyntick=", dyntick_setup);
-#endif
-
-static int __init timer_init_sysfs(void)
-{
- int ret = sysdev_class_register(&timer_sysclass);
- if (ret != 0)
- return ret;
-
- sys_timer->dev.cls = &timer_sysclass;
- ret = sysdev_register(&sys_timer->dev);
-
-#ifdef CONFIG_NO_IDLE_HZ
- if (ret == 0 && sys_timer->dyn_tick) {
- ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick);
-
- /*
- * Turn on dynamic tick after calibrate delay
- * for correct bogomips
- */
- if (ret == 0 && dyntick_str[0] == 'e')
- ret = timer_dyn_tick_enable();
- }
#endif
- return ret;
-}
-device_initcall(timer_init_sysfs);
-
-void (*board_time_init)(void);
-
void __init time_init(void)
{
if (board_time_init)
@@ -316,10 +249,15 @@ void __init time_init(void)
sys_timer = get_sys_timer();
printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
-#ifdef CONFIG_NO_IDLE_HZ
- if (sys_timer->dyn_tick)
- spin_lock_init(&sys_timer->dyn_tick->lock);
-#endif
+ if (sys_timer->ops->read)
+ clocksource_sh.read = sys_timer->ops->read;
+
+ init_sh_clocksource();
+
+ if (sh_hpt_frequency)
+ printk("Using %lu.%03lu MHz high precision timer.\n",
+ ((sh_hpt_frequency + 500) / 1000) / 1000,
+ ((sh_hpt_frequency + 500) / 1000) % 1000);
#if defined(CONFIG_SH_KGDB)
/*
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index d9e3151c891..2d997e2a5b6 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -1,7 +1,7 @@
/*
* arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
*
- * Copyright (C) 2005 Paul Mundt
+ * Copyright (C) 2005 - 2007 Paul Mundt
*
* TMU handling code hacked out of arch/sh/kernel/time.c
*
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/seqlock.h>
+#include <linux/clockchips.h>
#include <asm/timer.h>
#include <asm/rtc.h>
#include <asm/io.h>
@@ -25,56 +26,75 @@
#include <asm/clock.h>
#define TMU_TOCR_INIT 0x00
-#define TMU0_TCR_INIT 0x0020
-#define TMU_TSTR_INIT 1
+#define TMU_TCR_INIT 0x0020
-#define TMU0_TCR_CALIB 0x0000
+static int tmu_timer_start(void)
+{
+ ctrl_outb(ctrl_inb(TMU_TSTR) | 0x3, TMU_TSTR);
+ return 0;
+}
-static unsigned long tmu_timer_get_offset(void)
+static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload)
{
- int count;
- static int count_p = 0x7fffffff; /* for the first call after boot */
- static unsigned long jiffies_p = 0;
+ ctrl_outl(interval, TMU0_TCNT);
/*
- * cache volatile jiffies temporarily; we have IRQs turned off.
+ * TCNT reloads from TCOR on underflow, clear it if we don't
+ * intend to auto-reload
*/
- unsigned long jiffies_t;
+ if (reload)
+ ctrl_outl(interval, TMU0_TCOR);
+ else
+ ctrl_outl(0, TMU0_TCOR);
- /* timer count may underflow right here */
- count = ctrl_inl(TMU0_TCNT); /* read the latched count */
+ tmu_timer_start();
+}
- jiffies_t = jiffies;
+static int tmu_timer_stop(void)
+{
+ ctrl_outb(ctrl_inb(TMU_TSTR) & ~0x3, TMU_TSTR);
+ return 0;
+}
- /*
- * avoiding timer inconsistencies (they are rare, but they happen)...
- * there is one kind of problem that must be avoided here:
- * 1. the timer counter underflows
- */
+static cycle_t tmu_timer_read(void)
+{
+ return ~ctrl_inl(TMU1_TCNT);
+}
+
+static int tmu_set_next_event(unsigned long cycles,
+ struct clock_event_device *evt)
+{
+ tmu0_timer_set_interval(cycles, 1);
+ return 0;
+}
- if (jiffies_t == jiffies_p) {
- if (count > count_p) {
- /* the nutcase */
- if (ctrl_inw(TMU0_TCR) & 0x100) { /* Check UNF bit */
- count -= LATCH;
- } else {
- printk("%s (): hardware timer problem?\n",
- __FUNCTION__);
- }
- }
- } else
- jiffies_p = jiffies_t;
-
- count_p = count;
-
- count = ((LATCH-1) - count) * TICK_SIZE;
- count = (count + LATCH/2) / LATCH;
-
- return count;
+static void tmu_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ ctrl_outl(0, TMU0_TCOR);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ break;
+ }
}
+static struct clock_event_device tmu0_clockevent = {
+ .name = "tmu0",
+ .shift = 32,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = tmu_set_mode,
+ .set_next_event = tmu_set_next_event,
+};
+
static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
{
+ struct clock_event_device *evt = &tmu0_clockevent;
unsigned long timer_status;
/* Clear UNF bit */
@@ -82,72 +102,76 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
timer_status &= ~0x100;
ctrl_outw(timer_status, TMU0_TCR);
- /*
- * Here we are in the timer irq handler. We just have irqs locally
- * disabled but we don't know if the timer_bh is running on the other
- * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
- * the irq version of write_lock because as just said we have irq
- * locally disabled. -arca
- */
- write_seqlock(&xtime_lock);
- handle_timer_tick();
- write_sequnlock(&xtime_lock);
+ evt->event_handler(evt);
return IRQ_HANDLED;
}
-static struct irqaction tmu_irq = {
- .name = "timer",
+static struct irqaction tmu0_irq = {
+ .name = "periodic timer",
.handler = tmu_timer_interrupt,
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.mask = CPU_MASK_NONE,
};
-static void tmu_clk_init(struct clk *clk)
+static void tmu0_clk_init(struct clk *clk)
{
- u8 divisor = TMU0_TCR_INIT & 0x7;
- ctrl_outw(TMU0_TCR_INIT, TMU0_TCR);
+ u8 divisor = TMU_TCR_INIT & 0x7;
+ ctrl_outw(TMU_TCR_INIT, TMU0_TCR);
clk->rate = clk->parent->rate / (4 << (divisor << 1));
}
-static void tmu_clk_recalc(struct clk *clk)
+static void tmu0_clk_recalc(struct clk *clk)
{
u8 divisor = ctrl_inw(TMU0_TCR) & 0x7;
clk->rate = clk->parent->rate / (4 << (divisor << 1));
}
-static struct clk_ops tmu_clk_ops = {
- .init = tmu_clk_init,
- .recalc = tmu_clk_recalc,
+static struct clk_ops tmu0_clk_ops = {
+ .init = tmu0_clk_init,
+ .recalc = tmu0_clk_recalc,
};
static struct clk tmu0_clk = {
.name = "tmu0_clk",
- .ops = &tmu_clk_ops,
+ .ops = &tmu0_clk_ops,
};
-static int tmu_timer_start(void)
+static void tmu1_clk_init(struct clk *clk)
{
- ctrl_outb(TMU_TSTR_INIT, TMU_TSTR);
- return 0;
+ u8 divisor = TMU_TCR_INIT & 0x7;
+ ctrl_outw(divisor, TMU1_TCR);
+ clk->rate = clk->parent->rate / (4 << (divisor << 1));
}
-static int tmu_timer_stop(void)
+static void tmu1_clk_recalc(struct clk *clk)
{
- ctrl_outb(0, TMU_TSTR);
- return 0;
+ u8 divisor = ctrl_inw(TMU1_TCR) & 0x7;
+ clk->rate = clk->parent->rate / (4 << (divisor << 1));
}
+static struct clk_ops tmu1_clk_ops = {
+ .init = tmu1_clk_init,
+ .recalc = tmu1_clk_recalc,
+};
+
+static struct clk tmu1_clk = {
+ .name = "tmu1_clk",
+ .ops = &tmu1_clk_ops,
+};
+
static int tmu_timer_init(void)
{
unsigned long interval;
+ unsigned long frequency;
- setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq);
+ setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
tmu0_clk.parent = clk_get(NULL, "module_clk");
+ tmu1_clk.parent = clk_get(NULL, "module_clk");
- /* Start TMU0 */
tmu_timer_stop();
+
#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && \
!defined(CONFIG_CPU_SUBTYPE_SH7760) && \
!defined(CONFIG_CPU_SUBTYPE_SH7785)
@@ -155,15 +179,29 @@ static int tmu_timer_init(void)
#endif
clk_register(&tmu0_clk);
+ clk_register(&tmu1_clk);
clk_enable(&tmu0_clk);
+ clk_enable(&tmu1_clk);
- interval = (clk_get_rate(&tmu0_clk) + HZ / 2) / HZ;
- printk(KERN_INFO "Interval = %ld\n", interval);
+ frequency = clk_get_rate(&tmu0_clk);
+ interval = (frequency + HZ / 2) / HZ;
- ctrl_outl(interval, TMU0_TCOR);
- ctrl_outl(interval, TMU0_TCNT);
+ sh_hpt_frequency = clk_get_rate(&tmu1_clk);
+ ctrl_outl(~0, TMU1_TCNT);
+ ctrl_outl(~0, TMU1_TCOR);
- tmu_timer_start();
+ tmu0_timer_set_interval(interval, 1);
+
+ tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
+ tmu0_clockevent.shift);
+ tmu0_clockevent.max_delta_ns =
+ clockevent_delta2ns(-1, &tmu0_clockevent);
+ tmu0_clockevent.min_delta_ns =
+ clockevent_delta2ns(1, &tmu0_clockevent);
+
+ tmu0_clockevent.cpumask = cpumask_of_cpu(0);
+
+ clockevents_register_device(&tmu0_clockevent);
return 0;
}
@@ -172,9 +210,7 @@ struct sys_timer_ops tmu_timer_ops = {
.init = tmu_timer_init,
.start = tmu_timer_start,
.stop = tmu_timer_stop,
-#ifndef CONFIG_GENERIC_TIME
- .get_offset = tmu_timer_get_offset,
-#endif
+ .read = tmu_timer_read,
};
struct sys_timer tmu_timer = {