aboutsummaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-12 15:39:39 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-12 15:39:39 -0700
commit57c5b9998ea05a90ebacaa13c45f985ffe09dbe9 (patch)
tree89ce5149dbba2d9f827f77d35b3b7f8609f85ea9 /arch
parenta6e3d7dba92e19acffaa36aad962741a762aa8c5 (diff)
parented6fb174eea8869e88d8bc506a55f3ef76fcb7ed (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86
* git://git.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (40 commits) x86: HPET add another ICH7 PCI id x86: HPET force enable ICH5 suspend/resume fix x86: HPET force enable for ICH5 x86: HPET try to activate force detected hpet x86: HPET force enable o ICH7 and later x86: HPET restructure hpet code for hpet force enable clock events: allow replacement of broadcast timer i386/x8664: cleanup the shared hpet code i386: Remove the useless #ifdef in i8253.h ACPI: remove the now unused ifdef code jiffies: remove unused macros x86_64: cleanup apic.c after clock events switch x86_64: remove now unused code x86: unify timex.h variants x86: kill 8253pit.h x86: disable apic timer for AMD C1E enabled CPUs x86: Fix irq0 / local apic timer accounting x86_64: convert to clock events x86_64: Add (not yet used) clock event functions x86_64: prepare idle loop for dynamic ticks ...
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/Kconfig10
-rw-r--r--arch/x86/kernel/Makefile_326
-rw-r--r--arch/x86/kernel/Makefile_644
-rw-r--r--arch/x86/kernel/apic_64.c356
-rw-r--r--arch/x86/kernel/geode_32.c4
-rw-r--r--arch/x86/kernel/hpet.c (renamed from arch/x86/kernel/hpet_32.c)250
-rw-r--r--arch/x86/kernel/hpet_64.c493
-rw-r--r--arch/x86/kernel/i8253.c (renamed from arch/x86/kernel/i8253_32.c)4
-rw-r--r--arch/x86/kernel/i8259_32.c1
-rw-r--r--arch/x86/kernel/i8259_64.c46
-rw-r--r--arch/x86/kernel/mfgpt_32.c362
-rw-r--r--arch/x86/kernel/nmi_32.c3
-rw-r--r--arch/x86/kernel/nmi_64.c2
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/quirks.c205
-rw-r--r--arch/x86/kernel/setup_64.c34
-rw-r--r--arch/x86/kernel/smpboot_64.c4
-rw-r--r--arch/x86/kernel/time_32.c3
-rw-r--r--arch/x86/kernel/time_64.c177
-rw-r--r--arch/x86/kernel/tsc_64.c93
-rw-r--r--arch/x86_64/Kconfig14
21 files changed, 1091 insertions, 984 deletions
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 2d85e4b8730..6bbbc2755e4 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1206,6 +1206,16 @@ config SCx200HR_TIMER
processor goes idle (as is done by the scheduler). The
other workaround is idle=poll boot option.
+config GEODE_MFGPT_TIMER
+ bool "Geode Multi-Function General Purpose Timer (MFGPT) events"
+ depends on MGEODE_LX && GENERIC_TIME && GENERIC_CLOCKEVENTS
+ default y
+ help
+ This driver provides a clock event source based on the MFGPT
+ timer(s) in the CS5535 and CS5536 companion chip for the geode.
+ MFGPTs have a better resolution and max interval than the
+ generic PIT, and are suitable for use as high-res timers.
+
config K8_NB
def_bool y
depends on AGP_AMD64
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index c624193740f..7ff02063b85 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -7,7 +7,7 @@ extra-y := head_32.o init_task_32.o vmlinux.lds
obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
pci-dma_32.o i386_ksyms_32.o i387_32.o bootflag.o e820_32.o\
- quirks.o i8237.o topology.o alternative.o i8253_32.o tsc_32.o
+ quirks.o i8237.o topology.o alternative.o i8253.o tsc_32.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += cpu/
@@ -37,9 +37,9 @@ obj-$(CONFIG_EFI) += efi_32.o efi_stub_32.o
obj-$(CONFIG_DOUBLEFAULT) += doublefault_32.o
obj-$(CONFIG_VM86) += vm86_32.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-obj-$(CONFIG_HPET_TIMER) += hpet_32.o
+obj-$(CONFIG_HPET_TIMER) += hpet.o
obj-$(CONFIG_K8_NB) += k8.o
-obj-$(CONFIG_MGEODE_LX) += geode_32.o
+obj-$(CONFIG_MGEODE_LX) += geode_32.o mfgpt_32.o
obj-$(CONFIG_VMI) += vmi_32.o vmiclock_32.o
obj-$(CONFIG_PARAVIRT) += paravirt_32.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index 3ab017a0a3b..43da66213a4 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -8,8 +8,8 @@ obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
x8664_ksyms_64.o i387_64.o syscall_64.o vsyscall_64.o \
setup64.o bootflag.o e820_64.o reboot_64.o quirks.o i8237.o \
- pci-dma_64.o pci-nommu_64.o alternative.o hpet_64.o tsc_64.o bugs_64.o \
- perfctr-watchdog.o
+ pci-dma_64.o pci-nommu_64.o alternative.o hpet.o tsc_64.o bugs_64.o \
+ perfctr-watchdog.o i8253.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_X86_MCE) += mce_64.o therm_throt.o
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 925758dbca0..395928de28e 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -25,6 +25,7 @@
#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/clockchips.h>
#include <asm/atomic.h>
#include <asm/smp.h>
@@ -39,12 +40,9 @@
#include <asm/hpet.h>
#include <asm/apic.h>
-int apic_mapped;
int apic_verbosity;
-int apic_runs_main_timer;
-int apic_calibrate_pmtmr __initdata;
-
-int disable_apic_timer __initdata;
+int disable_apic_timer __cpuinitdata;
+static int apic_calibrate_pmtmr __initdata;
/* Local APIC timer works in C2? */
int local_apic_timer_c2_ok;
@@ -56,14 +54,78 @@ static struct resource lapic_resource = {
.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
};
+static unsigned int calibration_result;
+
+static int lapic_next_event(unsigned long delta,
+ struct clock_event_device *evt);
+static void lapic_timer_setup(enum clock_event_mode mode,
+ struct clock_event_device *evt);
+
+static void lapic_timer_broadcast(cpumask_t mask);
+
+static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen);
+
+static struct clock_event_device lapic_clockevent = {
+ .name = "lapic",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT
+ | CLOCK_EVT_FEAT_C3STOP | CLOCK_EVT_FEAT_DUMMY,
+ .shift = 32,
+ .set_mode = lapic_timer_setup,
+ .set_next_event = lapic_next_event,
+ .broadcast = lapic_timer_broadcast,
+ .rating = 100,
+ .irq = -1,
+};
+static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
+
+static int lapic_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ apic_write(APIC_TMICT, delta);
+ return 0;
+}
+
+static void lapic_timer_setup(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ unsigned long flags;
+ unsigned int v;
+
+ /* Lapic used as dummy for broadcast ? */
+ if (evt->features & CLOCK_EVT_FEAT_DUMMY)
+ return;
+
+ local_irq_save(flags);
+
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ case CLOCK_EVT_MODE_ONESHOT:
+ __setup_APIC_LVTT(calibration_result,
+ mode != CLOCK_EVT_MODE_PERIODIC, 1);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ v = apic_read(APIC_LVTT);
+ v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
+ apic_write(APIC_LVTT, v);
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ /* Nothing to do here */
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
/*
- * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
- * IPIs in place of local APIC timers
+ * Local APIC timer broadcast function
*/
-static cpumask_t timer_interrupt_broadcast_ipi_mask;
-
-/* Using APIC to generate smp_local_timer_interrupt? */
-int using_apic_timer __read_mostly = 0;
+static void lapic_timer_broadcast(cpumask_t mask)
+{
+#ifdef CONFIG_SMP
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+#endif
+}
static void apic_pm_activate(void);
@@ -184,7 +246,10 @@ void disconnect_bsp_APIC(int virt_wire_setup)
apic_write(APIC_SPIV, value);
if (!virt_wire_setup) {
- /* For LVT0 make it edge triggered, active high, external and enabled */
+ /*
+ * For LVT0 make it edge triggered, active high,
+ * external and enabled
+ */
value = apic_read(APIC_LVT0);
value &= ~(APIC_MODE_MASK | APIC_SEND_PENDING |
APIC_INPUT_POLARITY | APIC_LVT_REMOTE_IRR |
@@ -420,10 +485,12 @@ void __cpuinit setup_local_APIC (void)
value = apic_read(APIC_LVT0) & APIC_LVT_MASKED;
if (!smp_processor_id() && !value) {
value = APIC_DM_EXTINT;
- apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", smp_processor_id());
+ apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n",
+ smp_processor_id());
} else {
value = APIC_DM_EXTINT | APIC_LVT_MASKED;
- apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n", smp_processor_id());
+ apic_printk(APIC_VERBOSE, "masked ExtINT on CPU#%d\n",
+ smp_processor_id());
}
apic_write(APIC_LVT0, value);
@@ -706,8 +773,8 @@ void __init init_apic_mappings(void)
apic_phys = mp_lapic_addr;
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
- apic_mapped = 1;
- apic_printk(APIC_VERBOSE,"mapped APIC to %16lx (%16lx)\n", APIC_BASE, apic_phys);
+ apic_printk(APIC_VERBOSE, "mapped APIC to %16lx (%16lx)\n",
+ APIC_BASE, apic_phys);
/* Put local APIC into the resource map. */
lapic_resource.start = apic_phys;
@@ -730,12 +797,14 @@ void __init init_apic_mappings(void)
if (smp_found_config) {
ioapic_phys = mp_ioapics[i].mpc_apicaddr;
} else {
- ioapic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
+ ioapic_phys = (unsigned long)
+ alloc_bootmem_pages(PAGE_SIZE);
ioapic_phys = __pa(ioapic_phys);
}
set_fixmap_nocache(idx, ioapic_phys);
- apic_printk(APIC_VERBOSE,"mapped IOAPIC to %016lx (%016lx)\n",
- __fix_to_virt(idx), ioapic_phys);
+ apic_printk(APIC_VERBOSE,
+ "mapped IOAPIC to %016lx (%016lx)\n",
+ __fix_to_virt(idx), ioapic_phys);
idx++;
if (ioapic_res != NULL) {
@@ -758,16 +827,14 @@ void __init init_apic_mappings(void)
* P5 APIC double write bug.
*/
-#define APIC_DIVISOR 16
-
-static void __setup_APIC_LVTT(unsigned int clocks)
+static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
{
unsigned int lvtt_value, tmp_value;
- int cpu = smp_processor_id();
- lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
-
- if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
+ lvtt_value = LOCAL_TIMER_VECTOR;
+ if (!oneshot)
+ lvtt_value |= APIC_LVT_TIMER_PERIODIC;
+ if (!irqen)
lvtt_value |= APIC_LVT_MASKED;
apic_write(APIC_LVTT, lvtt_value);
@@ -780,44 +847,18 @@ static void __setup_APIC_LVTT(unsigned int clocks)
& ~(APIC_TDR_DIV_1 | APIC_TDR_DIV_TMBASE))
| APIC_TDR_DIV_16);
- apic_write(APIC_TMICT, clocks/APIC_DIVISOR);
+ if (!oneshot)
+ apic_write(APIC_TMICT, clocks);
}
-static void setup_APIC_timer(unsigned int clocks)
+static void setup_APIC_timer(void)
{
- unsigned long flags;
+ struct clock_event_device *levt = &__get_cpu_var(lapic_events);
- local_irq_save(flags);
+ memcpy(levt, &lapic_clockevent, sizeof(*levt));
+ levt->cpumask = cpumask_of_cpu(smp_processor_id());
- /* wait for irq slice */
- if (hpet_address && hpet_use_timer) {
- u32 trigger = hpet_readl(HPET_T0_CMP);
- while (hpet_readl(HPET_T0_CMP) == trigger)
- /* do nothing */ ;
- } else {
- int c1, c2;
- outb_p(0x00, 0x43);
- c2 = inb_p(0x40);
- c2 |= inb_p(0x40) << 8;
- do {
- c1 = c2;
- outb_p(0x00, 0x43);
- c2 = inb_p(0x40);
- c2 |= inb_p(0x40) << 8;
- } while (c2 - c1 < 300);
- }
- __setup_APIC_LVTT(clocks);
- /* Turn off PIT interrupt if we use APIC timer as main timer.
- Only works with the PM timer right now
- TBD fix it for HPET too. */
- if ((pmtmr_ioport != 0) &&
- smp_processor_id() == boot_cpu_id &&
- apic_runs_main_timer == 1 &&
- !cpu_isset(boot_cpu_id, timer_interrupt_broadcast_ipi_mask)) {
- stop_timer_interrupt();
- apic_runs_main_timer++;
- }
- local_irq_restore(flags);
+ clockevents_register_device(levt);
}
/*
@@ -835,17 +876,22 @@ static void setup_APIC_timer(unsigned int clocks)
#define TICK_COUNT 100000000
-static int __init calibrate_APIC_clock(void)
+static void __init calibrate_APIC_clock(void)
{
unsigned apic, apic_start;
unsigned long tsc, tsc_start;
int result;
+
+ local_irq_disable();
+
/*
* Put whatever arbitrary (but long enough) timeout
* value into the APIC clock, we just want to get the
* counter running for calibration.
+ *
+ * No interrupt enable !
*/
- __setup_APIC_LVTT(4000000000);
+ __setup_APIC_LVTT(250000000, 0, 0);
apic_start = apic_read(APIC_TMCCT);
#ifdef CONFIG_X86_PM_TIMER
@@ -867,123 +913,62 @@ static int __init calibrate_APIC_clock(void)
result = (apic_start - apic) * 1000L * tsc_khz /
(tsc - tsc_start);
}
- printk("result %d\n", result);
+ local_irq_enable();
+
+ printk(KERN_DEBUG "APIC timer calibration result %d\n", result);
printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n",
result / 1000 / 1000, result / 1000 % 1000);
- return result * APIC_DIVISOR / HZ;
-}
+ /* Calculate the scaled math multiplication factor */
+ lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32);
+ lapic_clockevent.max_delta_ns =
+ clockevent_delta2ns(0x7FFFFF, &lapic_clockevent);
+ lapic_clockevent.min_delta_ns =
+ clockevent_delta2ns(0xF, &lapic_clockevent);
-static unsigned int calibration_result;
+ calibration_result = result / HZ;
+}
void __init setup_boot_APIC_clock (void)
{
+ /*
+ * The local apic timer can be disabled via the kernel commandline.
+ * Register the lapic timer as a dummy clock event source on SMP
+ * systems, so the broadcast mechanism is used. On UP systems simply
+ * ignore it.
+ */
if (disable_apic_timer) {
printk(KERN_INFO "Disabling APIC timer\n");
+ /* No broadcast on UP ! */
+ if (num_possible_cpus() > 1)
+ setup_APIC_timer();
return;
}
printk(KERN_INFO "Using local APIC timer interrupts.\n");
- using_apic_timer = 1;
-
- local_irq_disable();
+ calibrate_APIC_clock();
- calibration_result = calibrate_APIC_clock();
/*
- * Now set up the timer for real.
+ * If nmi_watchdog is set to IO_APIC, we need the
+ * PIT/HPET going. Otherwise register lapic as a dummy
+ * device.
*/
- setup_APIC_timer(calibration_result);
+ if (nmi_watchdog != NMI_IO_APIC)
+ lapic_clockevent.features &= ~CLOCK_EVT_FEAT_DUMMY;
+ else
+ printk(KERN_WARNING "APIC timer registered as dummy,"
+ " due to nmi_watchdog=1!\n");
- local_irq_enable();
+ setup_APIC_timer();
}
void __cpuinit setup_secondary_APIC_clock(void)
{
- local_irq_disable(); /* FIXME: Do we need this? --RR */
- setup_APIC_timer(calibration_result);
- local_irq_enable();
+ setup_APIC_timer();
}
-void disable_APIC_timer(void)
-{
- if (using_apic_timer) {
- unsigned long v;
-
- v = apic_read(APIC_LVTT);
- /*
- * When an illegal vector value (0-15) is written to an LVT
- * entry and delivery mode is Fixed, the APIC may signal an
- * illegal vector error, with out regard to whether the mask
- * bit is set or whether an interrupt is actually seen on input.
- *
- * Boot sequence might call this function when the LVTT has
- * '0' vector value. So make sure vector field is set to
- * valid value.
- */
- v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
- apic_write(APIC_LVTT, v);
- }
-}
-
-void enable_APIC_timer(void)
-{
- int cpu = smp_processor_id();
-
- if (using_apic_timer &&
- !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
- unsigned long v;
-
- v = apic_read(APIC_LVTT);
- apic_write(APIC_LVTT, v & ~APIC_LVT_MASKED);
- }
-}
-
-void switch_APIC_timer_to_ipi(void *cpumask)
-{
- cpumask_t mask = *(cpumask_t *)cpumask;
- int cpu = smp_processor_id();
-
- if (cpu_isset(cpu, mask) &&
- !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
- disable_APIC_timer();
- cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
- }
-}
-EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
-
-void smp_send_timer_broadcast_ipi(void)
-{
- int cpu = smp_processor_id();
- cpumask_t mask;
-
- cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
-
- if (cpu_isset(cpu, mask)) {
- cpu_clear(cpu, mask);
- add_pda(apic_timer_irqs, 1);
- smp_local_timer_interrupt();
- }
-
- if (!cpus_empty(mask)) {
- send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
- }
-}
-
-void switch_ipi_to_APIC_timer(void *cpumask)
-{
- cpumask_t mask = *(cpumask_t *)cpumask;
- int cpu = smp_processor_id();
-
- if (cpu_isset(cpu, mask) &&
- cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
- cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask);
- enable_APIC_timer();
- }
-}
-EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
-
int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
@@ -997,8 +982,6 @@ void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
apic_write(reg, v);
}
-#undef APIC_DIVISOR
-
/*
* Local timer interrupt handler. It does both profiling and
* process statistics/rescheduling.
@@ -1011,22 +994,34 @@ void setup_APIC_extended_lvt(unsigned char lvt_off, unsigned char vector,
void smp_local_timer_interrupt(void)
{
- profile_tick(CPU_PROFILING);
-#ifdef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
- if (apic_runs_main_timer > 1 && smp_processor_id() == boot_cpu_id)
- main_timer_handler();
+ int cpu = smp_processor_id();
+ struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
+
/*
- * We take the 'long' return path, and there every subsystem
- * grabs the appropriate locks (kernel lock/ irq lock).
+ * Normally we should not be here till LAPIC has been initialized but
+ * in some cases like kdump, its possible that there is a pending LAPIC
+ * timer interrupt from previous kernel's context and is delivered in
+ * new kernel the moment interrupts are enabled.
*
- * We might want to decouple profiling from the 'long path',
- * and do the profiling totally in assembly.
- *
- * Currently this isn't too much of an issue (performance wise),
- * we can take more than 100K local irqs per second on a 100 MHz P5.
+ * Interrupts are enabled early and LAPIC is setup much later, hence
+ * its possible that when we get here evt->event_handler is NULL.
+ * Check for event_handler being NULL and discard the interrupt as
+ * spurious.
+ */
+ if (!evt->event_handler) {
+ printk(KERN_WARNING
+ "Spurious LAPIC timer interrupt on cpu %d\n", cpu);
+ /* Switch it off */
+ lapic_timer_setup(CLOCK_EVT_MODE_SHUTDOWN, evt);
+ return;
+ }
+
+ /*
+ * the NMI deadlock-detector uses this.
*/
+ add_pda(apic_timer_irqs, 1);
+
+ evt->event_handler(evt);
}
/*
@@ -1042,11 +1037,6 @@ void smp_apic_timer_interrupt(struct pt_regs *regs)
struct pt_regs *old_regs = set_irq_regs(regs);
/*
- * the NMI deadlock-detector uses this.
- */
- add_pda(apic_timer_irqs, 1);
-
- /*
* NOTE! We'd better ACK the irq immediately,
* because timer handling can be slow.
*/
@@ -1225,29 +1215,13 @@ static __init int setup_noapictimer(char *str)
disable_apic_timer = 1;
return 1;
}
-
-static __init int setup_apicmaintimer(char *str)
-{
- apic_runs_main_timer = 1;
- nohpet = 1;
- return 1;
-}
-__setup("apicmaintimer", setup_apicmaintimer);
-
-static __init int setup_noapicmaintimer(char *str)
-{
- apic_runs_main_timer = -1;
- return 1;
-}
-__setup("noapicmaintimer", setup_noapicmaintimer);
+__setup("noapictimer", setup_noapictimer);
static __init int setup_apicpmtimer(char *s)
{
apic_calibrate_pmtmr = 1;
notsc_setup(NULL);
- return setup_apicmaintimer(NULL);
+ return 0;
}
__setup("apicpmtimer", setup_apicpmtimer);
-__setup("noapictimer", setup_noapictimer);
-
diff --git a/arch/x86/kernel/geode_32.c b/arch/x86/kernel/geode_32.c
index 41e8aec4c61..f12d8c5d980 100644
--- a/arch/x86/kernel/geode_32.c
+++ b/arch/x86/kernel/geode_32.c
@@ -145,10 +145,14 @@ EXPORT_SYMBOL_GPL(geode_gpio_setup_event);
static int __init geode_southbridge_init(void)
{
+ int timers;
+
if (!is_geode())
return -ENODEV;
init_lbars();
+ timers = geode_mfgpt_detect();
+ printk(KERN_INFO "geode: %d MFGPT timers available.\n", timers);
return 0;
}
diff --git a/arch/x86/kernel/hpet_32.c b/arch/x86/kernel/hpet.c
index 533d4932bc7..f8367074da0 100644
--- a/arch/x86/kernel/hpet_32.c
+++ b/arch/x86/kernel/hpet.c
@@ -1,5 +1,6 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
+#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/hpet.h>
#include <linux/init.h>
@@ -7,11 +8,11 @@
#include <linux/pm.h>
#include <linux/delay.h>
+#include <asm/fixmap.h>
#include <asm/hpet.h>
+#include <asm/i8253.h>
#include <asm/io.h>
-extern struct clock_event_device *global_clock_event;
-
#define HPET_MASK CLOCKSOURCE_MASK(32)
#define HPET_SHIFT 22
@@ -22,9 +23,9 @@ extern struct clock_event_device *global_clock_event;
* HPET address is set in acpi/boot.c, when an ACPI entry exists
*/
unsigned long hpet_address;
-static void __iomem * hpet_virt_address;
+static void __iomem *hpet_virt_address;
-static inline unsigned long hpet_readl(unsigned long a)
+unsigned long hpet_readl(unsigned long a)
{
return readl(hpet_virt_address + a);
}
@@ -34,6 +35,36 @@ static inline void hpet_writel(unsigned long d, unsigned long a)
writel(d, hpet_virt_address + a);
}
+#ifdef CONFIG_X86_64
+
+#include <asm/pgtable.h>
+
+static inline void hpet_set_mapping(void)
+{
+ set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
+ __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
+ hpet_virt_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
+}
+
+static inline void hpet_clear_mapping(void)
+{
+ hpet_virt_address = NULL;
+}
+
+#else
+
+static inline void hpet_set_mapping(void)
+{
+ hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
+}
+
+static inline void hpet_clear_mapping(void)
+{
+ iounmap(hpet_virt_address);
+ hpet_virt_address = NULL;
+}
+#endif
+
/*
* HPET command line enable / disable
*/
@@ -49,6 +80,13 @@ static int __init hpet_setup(char* str)
}
__setup("hpet=", hpet_setup);
+static int __init disable_hpet(char *str)
+{
+ boot_hpet_disable = 1;
+ return 1;
+}
+__setup("nohpet", disable_hpet);
+
static inline int is_hpet_capable(void)
{
return (!boot_hpet_disable && hpet_address);
@@ -83,7 +121,7 @@ static void hpet_reserve_platform_timers(unsigned long id)
memset(&hd, 0, sizeof (hd));
hd.hd_phys_address = hpet_address;
- hd.hd_address = hpet_virt_address;
+ hd.hd_address = hpet;
hd.hd_nirqs = nrtimers;
hd.hd_flags = HPET_DATA_PLATFORM;
hpet_reserve_timer(&hd, 0);
@@ -111,9 +149,9 @@ static void hpet_reserve_platform_timers(unsigned long id) { }
*/
static unsigned long hpet_period;
-static void hpet_set_mode(enum clock_event_mode mode,
+static void hpet_legacy_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt);
-static int hpet_next_event(unsigned long delta,
+static int hpet_legacy_next_event(unsigned long delta,
struct clock_event_device *evt);
/*
@@ -122,10 +160,11 @@ static int hpet_next_event(unsigned long delta,
static struct clock_event_device hpet_clockevent = {
.name = "hpet",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
- .set_mode = hpet_set_mode,
- .set_next_event = hpet_next_event,
+ .set_mode = hpet_legacy_set_mode,
+ .set_next_event = hpet_legacy_next_event,
.shift = 32,
.irq = 0,
+ .rating = 50,
};
static void hpet_start_counter(void)
@@ -140,7 +179,18 @@ static void hpet_start_counter(void)
hpet_writel(cfg, HPET_CFG);
}
-static void hpet_enable_int(void)
+static void hpet_resume_device(void)
+{
+ force_hpet_resume();
+}
+
+static void hpet_restart_counter(void)
+{
+ hpet_resume_device();
+ hpet_start_counter();
+}
+
+static void hpet_enable_legacy_int(void)
{
unsigned long cfg = hpet_readl(HPET_CFG);
@@ -149,7 +199,39 @@ static void hpet_enable_int(void)
hpet_legacy_int_enabled = 1;
}
-static void hpet_set_mode(enum clock_event_mode mode,
+static void hpet_legacy_clockevent_register(void)
+{
+ uint64_t hpet_freq;
+
+ /* Start HPET legacy interrupts */
+ hpet_enable_legacy_int();
+
+ /*
+ * The period is a femto seconds value. We need to calculate the
+ * scaled math multiplication factor for nanosecond to hpet tick
+ * conversion.
+ */
+ hpet_freq = 1000000000000000ULL;
+ do_div(hpet_freq, hpet_period);
+ hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
+ NSEC_PER_SEC, 32);
+ /* Calculate the min / max delta */
+ hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
+ &hpet_clockevent);
+ hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
+ &hpet_clockevent);
+
+ /*
+ * Start hpet with the boot cpu mask and make it
+ * global after the IO_APIC has been initialized.
+ */
+ hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
+ clockevents_register_device(&hpet_clockevent);
+ global_clock_event = &hpet_clockevent;
+ printk(KERN_DEBUG "hpet clockevent registered\n");
+}
+
+static void hpet_legacy_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
unsigned long cfg, cmp, now;
@@ -190,12 +272,12 @@ static void hpet_set_mode(enum clock_event_mode mode,
break;
case CLOCK_EVT_MODE_RESUME:
- hpet_enable_int();
+ hpet_enable_legacy_int();
break;
}
}
-static int hpet_next_event(unsigned long delta,
+static int hpet_legacy_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long cnt;
@@ -215,6 +297,13 @@ static cycle_t read_hpet(void)
return (cycle_t)hpet_readl(HPET_COUNTER);
}
+#ifdef CONFIG_X86_64
+static cycle_t __vsyscall_fn vread_hpet(void)
+{
+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
+}
+#endif
+
static struct clocksource clocksource_hpet = {
.name = "hpet",
.rating = 250,
@@ -222,61 +311,17 @@ static struct clocksource clocksource_hpet = {
.mask = HPET_MASK,
.shift = HPET_SHIFT,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .resume = hpet_start_counter,
+ .resume = hpet_restart_counter,
+#ifdef CONFIG_X86_64
+ .vread = vread_hpet,
+#endif
};
-/*
- * Try to setup the HPET timer
- */
-int __init hpet_enable(void)
+static int hpet_clocksource_register(void)
{
- unsigned long id;
- uint64_t hpet_freq;
u64 tmp, start, now;
cycle_t t1;
- if (!is_hpet_capable())
- return 0;
-
- hpet_virt_address = ioremap_nocache(hpet_address, HPET_MMAP_SIZE);
-
- /*
- * Read the period and check for a sane value:
- */
- hpet_period = hpet_readl(HPET_PERIOD);
- if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
- goto out_nohpet;
-
- /*
- * The period is a femto seconds value. We need to calculate the
- * scaled math multiplication factor for nanosecond to hpet tick
- * conversion.
- */
- hpet_freq = 1000000000000000ULL;
- do_div(hpet_freq, hpet_period);
- hpet_clockevent.mult = div_sc((unsigned long) hpet_freq,
- NSEC_PER_SEC, 32);
- /* Calculate the min / max delta */
- hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
- &hpet_clockevent);
- hpet_clockevent.min_delta_ns = clockevent_delta2ns(0x30,
- &hpet_clockevent);
-
- /*
- * Read the HPET ID register to retrieve the IRQ routing
- * information and the number of channels
- */
- id = hpet_readl(HPET_ID);
-
-#ifdef CONFIG_HPET_EMULATE_RTC
- /*
- * The legacy routing mode needs at least two channels, tick timer
- * and the rtc emulation channel.
- */
- if (!(id & HPET_ID_NUMBER))
- goto out_nohpet;
-#endif
-
/* Start the counter */
hpet_start_counter();
@@ -298,7 +343,7 @@ int __init hpet_enable(void)
if (t1 == read_hpet()) {
printk(KERN_WARNING
"HPET counter not counting. HPET disabled\n");
- goto out_nohpet;
+ return -ENODEV;
}
/* Initialize and register HPET clocksource
@@ -319,27 +364,84 @@ int __init hpet_enable(void)
clocksource_register(&clocksource_hpet);
+ return 0;
+}
+
+/*
+ * Try to setup the HPET timer
+ */
+int __init hpet_enable(void)
+{
+ unsigned long id;
+
+ if (!is_hpet_capable())
+ return 0;
+
+ hpet_set_mapping();
+
+ /*
+ * Read the period and check for a sane value:
+ */
+ hpet_period = hpet_readl(HPET_PERIOD);
+ if (hpet_period < HPET_MIN_PERIOD || hpet_period > HPET_MAX_PERIOD)
+ goto out_nohpet;
+
+ /*
+ * Read the HPET ID register to retrieve the IRQ routing
+ * information and the number of channels
+ */
+ id = hpet_readl(HPET_ID);
+
+#ifdef CONFIG_HPET_EMULATE_RTC
+ /*
+ * The legacy routing mode needs at least two channels, tick timer
+ * and the rtc emulation channel.
+ */
+ if (!(id & HPET_ID_NUMBER))
+ goto out_nohpet;
+#endif
+
+ if (hpet_clocksource_register())
+ goto out_nohpet;
+
if (id & HPET_ID_LEGSUP) {
- hpet_enable_int();
- hpet_reserve_platform_timers(id);
- /*
- * Start hpet with the boot cpu mask and make it
- * global after the IO_APIC has been initialized.
- */
- hpet_clockevent.cpumask = cpumask_of_cpu(smp_processor_id());
- clockevents_register_device(&hpet_clockevent);
- global_clock_event = &hpet_clockevent;
+ hpet_legacy_clockevent_register();
return 1;
}
return 0;
out_nohpet:
- iounmap(hpet_virt_address);
- hpet_virt_address = NULL;
+ hpet_clear_mapping();
boot_hpet_disable = 1;
return 0;
}
+/*
+ * Needs to be late, as the reserve_timer code calls kalloc !
+ *
+ * Not a problem on i386 as hpet_enable is called from late_time_init,
+ * but on x86_64 it is necessary !
+ */
+static __init int hpet_late_init(void)
+{
+ if (boot_hpet_disable)
+ return -ENODEV;
+
+ if (!hpet_address) {
+ if (!force_hpet_address)
+ return -ENODEV;
+
+ hpet_address = force_hpet_address;
+ hpet_enable();
+ if (!hpet_virt_address)
+ return -ENODEV;
+ }
+
+ hpet_reserve_platform_timers(hpet_readl(HPET_ID));
+
+ return 0;
+}
+fs_initcall(hpet_late_init);
#ifdef CONFIG_HPET_EMULATE_RTC
diff --git a/arch/x86/kernel/hpet_64.c b/arch/x86/kernel/hpet_64.c
deleted file mode 100644
index e2d1b912e15..00000000000
--- a/arch/x86/kernel/hpet_64.c
+++ /dev/null
@@ -1,493 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/mc146818rtc.h>
-#include <linux/time.h>
-#include <linux/clocksource.h>
-#include <linux/ioport.h>
-#include <linux/acpi.h>
-#include <linux/hpet.h>
-#include <asm/pgtable.h>
-#include <asm/vsyscall.h>
-#include <asm/timex.h>
-#include <asm/hpet.h>
-
-#define HPET_MASK 0xFFFFFFFF
-#define HPET_SHIFT 22
-
-/* FSEC = 10^-15 NSEC = 10^-9 */
-#define FSEC_PER_NSEC 1000000
-
-int nohpet __initdata;
-
-unsigned long hpet_address;
-unsigned long hpet_period; /* fsecs / HPET clock */
-unsigned long hpet_tick; /* HPET clocks / interrupt */
-
-int hpet_use_timer; /* Use counter of hpet for time keeping,
- * otherwise PIT
- */
-
-#ifdef CONFIG_HPET
-static __init int late_hpet_init(void)
-{
- struct hpet_data hd;
- unsigned int ntimer;
-
- if (!hpet_address)
- return 0;
-
- memset(&hd, 0, sizeof(hd));
-
- ntimer = hpet_readl(HPET_ID);
- ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
- ntimer++;
-
- /*
- * Register with driver.
- * Timer0 and Timer1 is used by platform.
- */
- hd.hd_phys_address = hpet_address;
- hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
- hd.hd_nirqs = ntimer;
- hd.hd_flags = HPET_DATA_PLATFORM;
- hpet_reserve_timer(&hd, 0);
-#ifdef CONFIG_HPET_EMULATE_RTC
- hpet_reserve_timer(&hd, 1);
-#endif
- hd.hd_irq[0] = HPET_LEGACY_8254;
- hd.hd_irq[1] = HPET_LEGACY_RTC;
- if (ntimer > 2) {
- struct hpet *hpet;
- struct hpet_timer *timer;
- int i;
-
- hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
- timer = &hpet->hpet_timers[2];
- for (i = 2; i < ntimer; timer++, i++)
- hd.hd_irq[i] = (timer->hpet_config &
- Tn_INT_ROUTE_CNF_MASK) >>
- Tn_INT_ROUTE_CNF_SHIFT;
-
- }
-
- hpet_alloc(&hd);
- return 0;
-}
-fs_initcall(late_hpet_init);
-#endif
-
-int hpet_timer_stop_set_go(unsigned long tick)
-{
- unsigned int cfg;
-
-/*
- * Stop the timers and reset the main counter.
- */
-
- cfg = hpet_readl(HPET_CFG);
- cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
- hpet_writel(cfg, HPET_CFG);
- hpet_writel(0, HPET_COUNTER);
- hpet_writel(0, HPET_COUNTER + 4);
-
-/*
- * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
- * and period also hpet_tick.
- */
- if (hpet_use_timer) {
- hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
- HPET_TN_32BIT, HPET_T0_CFG);
- hpet_writel(hpet_tick, HPET_T0_CMP); /* next interrupt */
- hpet_writel(hpet_tick, HPET_T0_CMP); /* period */
- cfg |= HPET_CFG_LEGACY;
- }
-/*
- * Go!
- */
-
- cfg |= HPET_CFG_ENABLE;
- hpet_writel(cfg, HPET_CFG);
-
- return 0;
-}
-
-static cycle_t read_hpet(void)
-{
- return (cycle_t)hpet_readl(HPET_COUNTER);
-}
-
-static cycle_t __vsyscall_fn vread_hpet(void)
-{
- return readl((void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
-}
-
-struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = (cycle_t)HPET_MASK,
- .mult = 0, /* set below */
- .shift = HPET_SHIFT,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
- .vread = vread_hpet,
-};
-
-int __init hpet_arch_init(void)
-{
- unsigned int id;
- u64 tmp;
-
- if (!hpet_address)
- return -1;
- set_fixmap_nocache(FIX_HPET_BASE, hpet_address);
- __set_fixmap(VSYSCALL_HPET, hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
-
-/*
- * Read the period, compute tick and quotient.
- */
-
- id = hpet_readl(HPET_ID);
-
- if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
- return -1;
-
- hpet_period = hpet_readl(HPET_PERIOD);
- if (hpet_period < 100000 || hpet_period > 100000000)
- return -1;
-
- hpet_tick = (FSEC_PER_TICK + hpet_period / 2) / hpet_period;
-
- hpet_use_timer = (id & HPET_ID_LEGSUP);
-
- /*
- * hpet period is in femto seconds per cycle
- * so we need to convert this to ns/cyc units
- * aproximated by mult/2^shift
- *
- * fsec/cyc * 1nsec/1000000fsec = nsec/cyc = mult/2^shift
- * fsec/cyc * 1ns/1000000fsec * 2^shift = mult
- * fsec/cyc * 2^shift * 1nsec/1000000fsec = mult
- * (fsec/cyc << shift)/1000000 = mult
- * (hpet_period << shift)/FSEC_PER_NSEC = mult
- */
- tmp = (u64)hpet_period << HPET_SHIFT;
- do_div(tmp, FSEC_PER_NSEC);
- clocksource_hpet.mult = (u32)tmp;
- clocksource_register(&clocksource_hpet);
-
- return hpet_timer_stop_set_go(hpet_tick);
-}
-
-int hpet_reenable(void)
-{
- return hpet_timer_stop_set_go(hpet_tick);
-}
-
-/*
- * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
- * it to the HPET timer of known frequency.
- */
-
-#define TICK_COUNT 100000000
-#define SMI_THRESHOLD 50000
-#define MAX_TRIES 5
-
-/*
- * Some platforms take periodic SMI interrupts with 5ms duration. Make sure none
- * occurs between the reads of the hpet & TSC.
- */
-static void __init read_hpet_tsc(int *hpet, int *tsc)
-{
- int tsc1, tsc2, hpet1, i;
-
- for (i = 0; i < MAX_TRIES; i++) {
- tsc1 = get_cycles_sync();
- hpet1 = hpet_readl(HPET_COUNTER);
- tsc2 = get_cycles_sync();
- if ((tsc2 - tsc1) < SMI_THRESHOLD)
- break;
- }
- *hpet = hpet1;
- *tsc = tsc2;
-}
-
-unsigned int __init hpet_calibrate_tsc(void)
-{
- int tsc_start, hpet_start;
- int tsc_now, hpet_now;
- unsigned long flags;
-
- local_irq_save(flags);
-
- read_hpet_tsc(&hpet_start, &tsc_start);
-
- do {
- local_irq_disable();
- read_hpet_tsc(&hpet_now, &tsc_now);
- local_irq_restore(flags);
- } while ((tsc_now - tsc_start) < TICK_COUNT &&
- (hpet_now - hpet_start) < TICK_COUNT);
-
- return (tsc_now - tsc_start) * 1000000000L
- / ((hpet_now - hpet_start) * hpet_period / 1000);
-}
-
-#ifdef CONFIG_HPET_EMULATE_RTC
-/* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
- * is enabled, we support RTC interrupt functionality in software.
- * RTC has 3 kinds of interrupts:
- * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
- * is updated
- * 2) Alarm Interrupt - generate an interrupt at a specific time of day
- * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
- * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
- * (1) and (2) above are implemented using polling at a frequency of
- * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
- * overhead. (DEFAULT_RTC_INT_FREQ)
- * For (3), we use interrupts at 64Hz or user specified periodic
- * frequency, whichever is higher.
- */
-#include <linux/rtc.h>
-
-#define DEFAULT_RTC_INT_FREQ 64
-#define RTC_NUM_INTS 1
-
-static unsigned long UIE_on;
-static unsigned long prev_update_sec;
-
-static unsigned long AIE_on;
-static struct rtc_time alarm_time;
-
-static unsigned long PIE_on;
-static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
-static unsigned long PIE_count;
-
-static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
-static unsigned int hpet_t1_cmp; /* cached comparator register */
-
-int is_hpet_enabled(void)
-{
- return hpet_address != 0;
-}
-
-/*
- * Timer 1 for RTC, we do not use periodic interrupt feature,
- * even if HPET supports periodic interrupts on Timer 1.
- * The reason being, to set up a periodic interrupt in HPET, we need to
- * stop the main counter. And if we do that everytime someone diables/enables
- * RTC, we will have adverse effect on main kernel timer running on Timer 0.
- * So, for the time being, simulate the periodic interrupt in software.
- *
- * hpet_rtc_timer_init() is called for the first time and during subsequent
- * interuppts reinit happens through hpet_rtc_timer_reinit().
- */
-int hpet_rtc_timer_init(void)
-{
- unsigned int cfg, cnt;
- unsigned long flags;
-
- if (!is_hpet_enabled())
- return 0;
- /*
- * Set the counter 1 and enable the interrupts.
- */
- if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
- hpet_rtc_int_freq = PIE_freq;
- else
- hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
-
- local_irq_save(flags);
-
- cnt = hpet_readl(HPET_COUNTER);
- cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
- hpet_writel(cnt, HPET_T1_CMP);
- hpet_t1_cmp = cnt;
-
- cfg = hpet_readl(HPET_T1_CFG);
- cfg &= ~HPET_TN_PERIODIC;
- cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
- hpet_writel(cfg, HPET_T1_CFG);
-
- local_irq_restore(flags);
-
- return 1;
-}
-
-static void hpet_rtc_timer_reinit(void)
-{
- unsigned int cfg, cnt, ticks_per_int, lost_ints;
-
- if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
- cfg = hpet_readl(HPET_T1_CFG);
- cfg &= ~HPET_TN_ENABLE;
- hpet_writel(cfg, HPET_T1_CFG);
- return;
- }
-
- if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
- hpet_rtc_int_freq = PIE_freq;
- else
- hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
-
- /* It is more accurate to use the comparator value than current count.*/
- ticks_per_int = hpet_tick * HZ / hpet_rtc_int_freq;
- hpet_t1_cmp += ticks_per_int;
- hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
-
- /*
- * If the interrupt handler was delayed too long, the write above tries
- * to schedule the next interrupt in the past and the hardware would
- * not interrupt until the counter had wrapped around.
- * So we have to check that the comparator wasn't set to a past time.
- */
- cnt = hpet_readl(HPET_COUNTER);
- if (unlikely((int)(cnt - hpet_t1_cmp) > 0)) {
- lost_ints = (cnt - hpet_t1_cmp) / ticks_per_int + 1;
- /* Make sure that, even with the time needed to execute
- * this code, the next scheduled interrupt has been moved
- * back to the future: */
- lost_ints++;
-
- hpet_t1_cmp += lost_ints * ticks_per_int;
- hpet_writel(hpet_t1_cmp, HPET_T1_CMP);
-
- if (PIE_on)
- PIE_count += lost_ints;
-
- if (printk_ratelimit())
- printk(KERN_WARNING "rtc: lost some interrupts at %ldHz.\n",
- hpet_rtc_int_freq);
- }
-}
-
-/*
- * The functions below are called from rtc driver.
- * Return 0 if HPET is not being used.
- * Otherwise do the necessary changes and return 1.
- */
-int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
-{
- if (!is_hpet_enabled())
- return 0;
-
- if (bit_mask & RTC_UIE)
- UIE_on = 0;
- if (bit_mask & RTC_PIE)
- PIE_on = 0;
- if (bit_mask & RTC_AIE)
- AIE_on = 0;
-
- return 1;
-}
-
-int hpet_set_rtc_irq_bit(unsigned long bit_mask)
-{
- int timer_init_reqd = 0;
-
- if (!is_hpet_enabled())
- return 0;
-
- if (!(PIE_on | AIE_on | UIE_on))
- timer_init_reqd = 1;
-
- if (bit_mask & RTC_UIE) {
- UIE_on = 1;
- }
- if (bit_mask & RTC_PIE) {
- PIE_on = 1;
- PIE_count = 0;
- }
- if (bit_mask & RTC_AIE) {
- AIE_on = 1;
- }
-
- if (timer_init_reqd)
- hpet_rtc_timer_init();
-
- return 1;
-}
-
-int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
-{
- if (!is_hpet_enabled())
- return 0;
-
- alarm_time.tm_hour = hrs;
- alarm_time.tm_min = min;
- alarm_time.tm_sec = sec;
-
- return 1;
-}
-
-int hpet_set_periodic_freq(unsigned long freq)
-{
- if (!is_hpet_enabled())
- return 0;
-
- PIE_freq = freq;
- PIE_count = 0;
-
- return 1;
-}
-
-int hpet_rtc_dropped_irq(void)
-{
- if (!is_hpet_enabled())
- return 0;
-
- return 1;
-}
-
-irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
-{
- struct rtc_time curr_time;
- unsigned long rtc_int_flag = 0;
- int call_rtc_interrupt = 0;
-
- hpet_rtc_timer_reinit();
-
- if (UIE_on | AIE_on) {
- rtc_get_rtc_time(&curr_time);
- }
- if (UIE_on) {
- if (curr_time.tm_sec != prev_update_sec) {
- /* Set update int info, call real rtc int routine */
- call_rtc_interrupt = 1;
- rtc_int_flag = RTC_UF;
- prev_update_sec = curr_time.tm_sec;
- }
- }
- if (PIE_on) {
- PIE_count++;
- if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
- /* Set periodic int info, call real rtc int routine */
- call_rtc_interrupt = 1;
- rtc_int_flag |= RTC_PF;
- PIE_count = 0;
- }
- }
- if (AIE_on) {
- if ((curr_time.tm_sec == alarm_time.tm_sec) &&
- (curr_time.tm_min == alarm_time.tm_min) &&
- (curr_time.tm_hour == alarm_time.tm_hour)) {
- /* Set alarm int info, call real rtc int routine */
- call_rtc_interrupt = 1;
- rtc_int_flag |= RTC_AF;
- }
- }
- if (call_rtc_interrupt) {
- rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
- rtc_interrupt(rtc_int_flag, dev_id);
- }
- return IRQ_HANDLED;
-}
-#endif
-
-static int __init nohpet_setup(char *s)
-{
- nohpet = 1;
- return 1;
-}
-
-__setup("nohpet", nohpet_setup);
diff --git a/arch/x86/kernel/i8253_32.c b/arch/x86/kernel/i8253.c
index 6d839f2f1b1..ac15e4cbd9c 100644
--- a/arch/x86/kernel/i8253_32.c
+++ b/arch/x86/kernel/i8253.c
@@ -13,7 +13,6 @@
#include <asm/delay.h>
#include <asm/i8253.h>
#include <asm/io.h>
-#include <asm/timer.h>
DEFINE_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
@@ -120,6 +119,7 @@ void __init setup_pit_timer(void)
global_clock_event = &pit_clockevent;
}
+#ifndef CONFIG_X86_64
/*
* Since the PIT overflows every tick, its not very useful
* to just read by itself. So use jiffies to emulate a free
@@ -204,3 +204,5 @@ static int __init init_pit_clocksource(void)
return clocksource_register(&clocksource_pit);
}
arch_initcall(init_pit_clocksource);
+
+#endif
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
index 0499cbe9871..679bb33acbf 100644
--- a/arch/x86/kernel/i8259_32.c
+++ b/arch/x86/kernel/i8259_32.c
@@ -10,7 +10,6 @@
#include <linux/sysdev.h>
#include <linux/bitops.h>
-#include <asm/8253pit.h>
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/io.h>
diff --git a/arch/x86/kernel/i8259_64.c b/arch/x86/kernel/i8259_64.c
index 948cae64609..eb72976cc13 100644
--- a/arch/x86/kernel/i8259_64.c
+++ b/arch/x86/kernel/i8259_64.c
@@ -444,46 +444,6 @@ void __init init_ISA_irqs (void)
}
}
-static void setup_timer_hardware(void)
-{
- outb_p(0x34,0x43); /* binary, mode 2, LSB/MSB, ch 0 */
- udelay(10);
- outb_p(LATCH & 0xff , 0x40); /* LSB */
- udelay(10);
- outb(LATCH >> 8 , 0x40); /* MSB */
-}
-
-static int timer_resume(struct sys_device *dev)
-{
- setup_timer_hardware();
- return 0;
-}
-
-void i8254_timer_resume(void)
-{
- setup_timer_hardware();
-}
-
-static struct sysdev_class timer_sysclass = {
- set_kset_name("timer_pit"),
- .resume = timer_resume,
-};
-
-static struct sys_device device_timer = {
- .id = 0,
- .cls = &timer_sysclass,
-};
-
-static int __init init_timer_sysfs(void)
-{
- int error = sysdev_class_register(&timer_sysclass);
- if (!error)
- error = sysdev_register(&device_timer);
- return error;
-}
-
-device_initcall(init_timer_sysfs);
-
void __init init_IRQ(void)
{
int i;
@@ -533,12 +493,6 @@ void __init init_IRQ(void)
set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
- /*
- * Set the clock to HZ Hz, we already have a valid
- * vector now:
- */
- setup_timer_hardware();
-
if (!acpi_ioapic)
setup_irq(2, &irq2);
}
diff --git a/arch/x86/kernel/mfgpt_32.c b/arch/x86/kernel/mfgpt_32.c
new file mode 100644
index 00000000000..0ab680f2d9d
--- /dev/null
+++ b/arch/x86/kernel/mfgpt_32.c
@@ -0,0 +1,362 @@
+/*
+ * Driver/API for AMD Geode Multi-Function General Purpose Timers (MFGPT)
+ *
+ * Copyright (C) 2006, Advanced Micro Devices, Inc.
+ * Copyright (C) 2007, Andres Salomon <dilinger@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
+ */
+
+/*
+ * We are using the 32Khz input clock - its the only one that has the
+ * ranges we find desirable. The following table lists the suitable
+ * divisors and the associated hz, minimum interval
+ * and the maximum interval:
+ *
+ * Divisor Hz Min Delta (S) Max Delta (S)
+ * 1 32000 .0005 2.048
+ * 2 16000 .001 4.096
+ * 4 8000 .002 8.192
+ * 8 4000 .004 16.384
+ * 16 2000 .008 32.768
+ * 32 1000 .016 65.536
+ * 64 500 .032 131.072
+ * 128 250 .064 262.144
+ * 256 125 .128 524.288
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <asm/geode.h>
+
+#define F_AVAIL 0x01
+
+static struct mfgpt_timer_t {
+ int flags;
+ struct module *owner;
+} mfgpt_timers[MFGPT_MAX_TIMERS];
+
+/* Selected from the table above */
+
+#define MFGPT_DIVISOR 16
+#define MFGPT_SCALE 4 /* divisor = 2^(scale) */
+#define MFGPT_HZ (32000 / MFGPT_DIVISOR)
+#define MFGPT_PERIODIC (MFGPT_HZ / HZ)
+
+#ifdef CONFIG_GEODE_MFGPT_TIMER
+static int __init mfgpt_timer_setup(void);
+#else
+#define mfgpt_timer_setup() (0)
+#endif
+
+/* Allow for disabling of MFGPTs */
+static int disable;
+static int __init mfgpt_disable(char *s)
+{
+ disable = 1;
+ return 1;
+}
+__setup("nomfgpt", mfgpt_disable);
+
+/*
+ * Check whether any MFGPTs are available for the kernel to use. In most
+ * cases, firmware that uses AMD's VSA code will claim all timers during
+ * bootup; we certainly don't want to take them if they're already in use.
+ * In other cases (such as with VSAless OpenFirmware), the system firmware
+ * leaves timers available for us to use.
+ */
+int __init geode_mfgpt_detect(void)
+{
+ int count = 0, i;
+ u16 val;
+
+ if (disable) {
+ printk(KERN_INFO "geode-mfgpt: Skipping MFGPT setup\n");
+ return 0;
+ }
+
+ for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
+ val = geode_mfgpt_read(i, MFGPT_REG_SETUP);
+ if (!(val & MFGPT_SETUP_SETUP)) {
+ mfgpt_timers[i].flags = F_AVAIL;
+ count++;
+ }
+ }
+
+ /* set up clock event device, if desired */
+ i = mfgpt_timer_setup();
+
+ return count;
+}
+
+int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
+{
+ u32 msr, mask, value, dummy;
+ int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
+
+ if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
+ return -EIO;
+
+ /*
+ * The register maps for these are described in sections 6.17.1.x of
+ * the AMD Geode CS5536 Companion Device Data Book.
+ */
+ switch (event) {
+ case MFGPT_EVENT_RESET:
+ /*
+ * XXX: According to the docs, we cannot reset timers above
+ * 6; that is, resets for 7 and 8 will be ignored. Is this
+ * a problem? -dilinger
+ */
+ msr = MFGPT_NR_MSR;
+ mask = 1 << (timer + 24);
+ break;
+
+ case MFGPT_EVENT_NMI:
+ msr = MFGPT_NR_MSR;
+ mask = 1 << (timer + shift);
+ break;
+
+ case MFGPT_EVENT_IRQ:
+ msr = MFGPT_IRQ_MSR;
+ mask = 1 << (timer + shift);
+ break;
+
+ default:
+ return -EIO;
+ }
+
+ rdmsr(msr, value, dummy);
+
+ if (enable)
+ value |= mask;
+ else
+ value &= ~mask;
+
+ wrmsr(msr, value, dummy);
+ return 0;
+}
+
+int geode_mfgpt_set_irq(int timer, int cmp, int irq, int enable)
+{
+ u32 val, dummy;
+ int offset;
+
+ if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
+ return -EIO;
+
+ if (geode_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
+ return -EIO;
+
+ rdmsr(MSR_PIC_ZSEL_LOW, val, dummy);
+
+ offset = (timer % 4) * 4;
+
+ val &= ~((0xF << offset) | (0xF << (offset + 16)));
+
+ if (enable) {
+ val |= (irq & 0x0F) << (offset);
+ val |= (irq & 0x0F) << (offset + 16);
+ }
+
+ wrmsr(MSR_PIC_ZSEL_LOW, val, dummy);
+ return 0;
+}
+
+static int mfgpt_get(int timer, struct module *owner)
+{
+ mfgpt_timers[timer].flags &= ~F_AVAIL;
+ mfgpt_timers[timer].owner = owner;
+ printk(KERN_INFO "geode-mfgpt: Registered timer %d\n", timer);
+ return timer;
+}
+
+int geode_mfgpt_alloc_timer(int timer, int domain, struct module *owner)
+{
+ int i;
+
+ if (!geode_get_dev_base(GEODE_DEV_MFGPT))
+ return -ENODEV;
+ if (timer >= MFGPT_MAX_TIMERS)
+ return -EIO;
+
+ if (timer < 0) {
+ /* Try to find an available timer */
+ for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
+ if (mfgpt_timers[i].flags & F_AVAIL)
+ return mfgpt_get(i, owner);
+
+ if (i == 5 && domain == MFGPT_DOMAIN_WORKING)
+ break;
+ }
+ } else {
+ /* If they requested a specific timer, try to honor that */
+ if (mfgpt_timers[timer].flags & F_AVAIL)
+ return mfgpt_get(timer, owner);
+ }
+
+ /* No timers available - too bad */
+ return -1;
+}
+
+
+#ifdef CONFIG_GEODE_MFGPT_TIMER
+
+/*
+ * The MFPGT timers on the CS5536 provide us with suitable timers to use
+ * as clock event sources - not as good as a HPET or APIC, but certainly
+ * better then the PIT. This isn't a general purpose MFGPT driver, but
+ * a simplified one designed specifically to act as a clock event source.
+ * For full details about the MFGPT, please consult the CS5536 data sheet.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+
+static unsigned int mfgpt_tick_mode = CLOCK_EVT_MODE_SHUTDOWN;
+static u16 mfgpt_event_clock;
+
+static int irq = 7;
+static int __init mfgpt_setup(char *str)
+{
+ get_option(&str, &irq);
+ return 1;
+}
+__setup("mfgpt_irq=", mfgpt_setup);
+
+static inline void mfgpt_disable_timer(u16 clock)
+{
+ u16 val = geode_mfgpt_read(clock, MFGPT_REG_SETUP);
+ geode_mfgpt_write(clock, MFGPT_REG_SETUP, val & ~MFGPT_SETUP_CNTEN);
+}
+
+static int mfgpt_next_event(unsigned long, struct clock_event_device *);
+static void mfgpt_set_mode(enum clock_event_mode, struct clock_event_device *);
+
+static struct clock_event_device mfgpt_clockevent = {
+ .name = "mfgpt-timer",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = mfgpt_set_mode,
+ .set_next_event = mfgpt_next_event,
+ .rating = 250,
+ .cpumask = CPU_MASK_ALL,
+ .shift = 32
+};
+
+static inline void mfgpt_start_timer(u16 clock, u16 delta)
+{
+ geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_CMP2, (u16) delta);
+ geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
+
+ geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
+ MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
+}
+
+static void mfgpt_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *evt)
+{
+ mfgpt_disable_timer(mfgpt_event_clock);
+
+ if (mode == CLOCK_EVT_MODE_PERIODIC)
+ mfgpt_start_timer(mfgpt_event_clock, MFGPT_PERIODIC);
+
+ mfgpt_tick_mode = mode;
+}
+
+static int mfgpt_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ mfgpt_start_timer(mfgpt_event_clock, delta);
+ return 0;
+}
+
+/* Assume (foolishly?), that this interrupt was due to our tick */
+
+static irqreturn_t mfgpt_tick(int irq, void *dev_id)
+{
+ if (mfgpt_tick_mode == CLOCK_EVT_MODE_SHUTDOWN)
+ return IRQ_HANDLED;
+
+ /* Turn off the clock */
+ mfgpt_disable_timer(mfgpt_event_clock);
+
+ /* Clear the counter */
+ geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_COUNTER, 0);
+
+ /* Restart the clock in periodic mode */
+
+ if (mfgpt_tick_mode == CLOCK_EVT_MODE_PERIODIC) {
+ geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP,
+ MFGPT_SETUP_CNTEN | MFGPT_SETUP_CMP2);
+ }
+
+ mfgpt_clockevent.event_handler(&mfgpt_clockevent);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction mfgptirq = {
+ .handler = mfgpt_tick,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING,
+ .mask = CPU_MASK_NONE,
+ .name = "mfgpt-timer"
+};
+
+static int __init mfgpt_timer_setup(void)
+{
+ int timer, ret;
+ u16 val;
+
+ timer = geode_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING,
+ THIS_MODULE);
+ if (timer < 0) {
+ printk(KERN_ERR
+ "mfgpt-timer: Could not allocate a MFPGT timer\n");
+ return -ENODEV;
+ }
+
+ mfgpt_event_clock = timer;
+ /* Set the clock scale and enable the event mode for CMP2 */
+ val = MFGPT_SCALE | (3 << 8);
+
+ geode_mfgpt_write(mfgpt_event_clock, MFGPT_REG_SETUP, val);
+
+ /* Set up the IRQ on the MFGPT side */
+ if (geode_mfgpt_setup_irq(mfgpt_event_clock, MFGPT_CMP2, irq)) {
+ printk(KERN_ERR "mfgpt-timer: Could not set up IRQ %d\n", irq);
+ return -EIO;
+ }
+
+ /* And register it with the kernel */
+ ret = setup_irq(irq, &mfgptirq);
+
+ if (ret) {
+ printk(KERN_ERR
+ "mfgpt-timer: Unable to set up the interrupt.\n");
+ goto err;
+ }
+
+ /* Set up the clock event */
+ mfgpt_clockevent.mult = div_sc(MFGPT_HZ, NSEC_PER_SEC, 32);
+ mfgpt_clockevent.min_delta_ns = clockevent_delta2ns(0xF,
+ &mfgpt_clockevent);
+ mfgpt_clockevent.max_delta_ns = clockevent_delta2ns(0xFFFE,
+ &mfgpt_clockevent);
+
+ printk(KERN_INFO
+ "mfgpt-timer: registering the MFGT timer as a clock event.\n");
+ clockevents_register_device(&mfgpt_clockevent);
+
+ return 0;
+
+err:
+ geode_mfgpt_release_irq(mfgpt_event_clock, MFGPT_CMP2, irq);
+ printk(KERN_ERR
+ "mfgpt-timer: Unable to set up the MFGPT clock source\n");
+ return -EIO;
+}
+
+#endif
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
index c7227e2180f..95d3fc203cf 100644
--- a/arch/x86/kernel/nmi_32.c
+++ b/arch/x86/kernel/nmi_32.c
@@ -353,7 +353,8 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
* Take the local apic timer and PIT/HPET into account. We don't
* know which one is active, when we have highres/dyntick on
*/
- sum = per_cpu(irq_stat, cpu).apic_timer_irqs + kstat_cpu(cpu).irqs[0];
+ sum = per_cpu(irq_stat, cpu).apic_timer_irqs +
+ per_cpu(irq_stat, cpu).irq0_irqs;
/* if the none of the timers isn't firing, this cpu isn't doing much */
if (!touched && last_irq_sums[cpu] == sum) {
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c
index 0ec6d2ddb93..e60ac0da528 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi_64.c
@@ -329,7 +329,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
touched = 1;
}
- sum = read_pda(apic_timer_irqs);
+ sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
if (__get_cpu_var(nmi_touch)) {
__get_cpu_var(nmi_touch) = 0;
touched = 1;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 98956555450..6f9dbbe65ee 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -38,6 +38,7 @@
#include <linux/notifier.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#include <linux/tick.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -208,6 +209,8 @@ void cpu_idle (void)
if (__get_cpu_var(cpu_idle_state))
__get_cpu_var(cpu_idle_state) = 0;
+ tick_nohz_stop_sched_tick();
+
rmb();
idle = pm_idle;
if (!idle)
@@ -228,6 +231,7 @@ void cpu_idle (void)
__exit_idle();
}
+ tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
preempt_disable();
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 6722469c263..d769e204f94 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -4,6 +4,8 @@
#include <linux/pci.h>
#include <linux/irq.h>
+#include <asm/hpet.h>
+
#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
@@ -47,3 +49,206 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quir
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
#endif
+
+#if defined(CONFIG_HPET_TIMER)
+unsigned long force_hpet_address;
+
+static enum {
+ NONE_FORCE_HPET_RESUME,
+ OLD_ICH_FORCE_HPET_RESUME,
+ ICH_FORCE_HPET_RESUME
+} force_hpet_resume_type;
+
+static void __iomem *rcba_base;
+
+static void ich_force_hpet_resume(void)
+{
+ u32 val;
+
+ if (!force_hpet_address)
+ return;
+
+ if (rcba_base == NULL)
+ BUG();
+
+ /* read the Function Disable register, dword mode only */
+ val = readl(rcba_base + 0x3404);
+ if (!(val & 0x80)) {
+ /* HPET disabled in HPTC. Trying to enable */
+ writel(val | 0x80, rcba_base + 0x3404);
+ }
+
+ val = readl(rcba_base + 0x3404);
+ if (!(val & 0x80))
+ BUG();
+ else
+ printk(KERN_DEBUG "Force enabled HPET at resume\n");
+
+ return;
+}
+
+static void ich_force_enable_hpet(struct pci_dev *dev)
+{
+ u32 val;
+ u32 uninitialized_var(rcba);
+ int err = 0;
+
+ if (hpet_address || force_hpet_address)
+ return;
+
+ pci_read_config_dword(dev, 0xF0, &rcba);
+ rcba &= 0xFFFFC000;
+ if (rcba == 0) {
+ printk(KERN_DEBUG "RCBA disabled. Cannot force enable HPET\n");
+ return;
+ }
+
+ /* use bits 31:14, 16 kB aligned */
+ rcba_base = ioremap_nocache(rcba, 0x4000);
+ if (rcba_base == NULL) {
+ printk(KERN_DEBUG "ioremap failed. Cannot force enable HPET\n");
+ return;
+ }
+
+ /* read the Function Disable register, dword mode only */
+ val = readl(rcba_base + 0x3404);
+
+ if (val & 0x80) {
+ /* HPET is enabled in HPTC. Just not reported by BIOS */
+ val = val & 0x3;
+ force_hpet_address = 0xFED00000 | (val << 12);
+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
+ force_hpet_address);
+ iounmap(rcba_base);
+ return;
+ }
+
+ /* HPET disabled in HPTC. Trying to enable */
+ writel(val | 0x80, rcba_base + 0x3404);
+
+ val = readl(rcba_base + 0x3404);
+ if (!(val & 0x80)) {
+ err = 1;
+ } else {
+ val = val & 0x3;
+ force_hpet_address = 0xFED00000 | (val << 12);
+ }
+
+ if (err) {
+ force_hpet_address = 0;
+ iounmap(rcba_base);
+ printk(KERN_DEBUG "Failed to force enable HPET\n");
+ } else {
+ force_hpet_resume_type = ICH_FORCE_HPET_RESUME;
+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
+ force_hpet_address);
+ }
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
+ ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
+ ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
+ ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
+ ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
+ ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
+ ich_force_enable_hpet);
+
+
+static struct pci_dev *cached_dev;
+
+static void old_ich_force_hpet_resume(void)
+{
+ u32 val;
+ u32 uninitialized_var(gen_cntl);
+
+ if (!force_hpet_address || !cached_dev)
+ return;
+
+ pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
+ gen_cntl &= (~(0x7 << 15));
+ gen_cntl |= (0x4 << 15);
+
+ pci_write_config_dword(cached_dev, 0xD0, gen_cntl);
+ pci_read_config_dword(cached_dev, 0xD0, &gen_cntl);
+ val = gen_cntl >> 15;
+ val &= 0x7;
+ if (val == 0x4)
+ printk(KERN_DEBUG "Force enabled HPET at resume\n");
+ else
+ BUG();
+}
+
+static void old_ich_force_enable_hpet(struct pci_dev *dev)
+{
+ u32 val;
+ u32 uninitialized_var(gen_cntl);
+
+ if (hpet_address || force_hpet_address)
+ return;
+
+ pci_read_config_dword(dev, 0xD0, &gen_cntl);
+ /*
+ * Bit 17 is HPET enable bit.
+ * Bit 16:15 control the HPET base address.
+ */
+ val = gen_cntl >> 15;
+ val &= 0x7;
+ if (val & 0x4) {
+ val &= 0x3;
+ force_hpet_address = 0xFED00000 | (val << 12);
+ printk(KERN_DEBUG "HPET at base address 0x%lx\n",
+ force_hpet_address);
+ return;
+ }
+
+ /*
+ * HPET is disabled. Trying enabling at FED00000 and check
+ * whether it sticks
+ */
+ gen_cntl &= (~(0x7 << 15));
+ gen_cntl |= (0x4 << 15);
+ pci_write_config_dword(dev, 0xD0, gen_cntl);
+
+ pci_read_config_dword(dev, 0xD0, &gen_cntl);
+
+ val = gen_cntl >> 15;
+ val &= 0x7;
+ if (val & 0x4) {
+ /* HPET is enabled in HPTC. Just not reported by BIOS */
+ val &= 0x3;
+ force_hpet_address = 0xFED00000 | (val << 12);
+ printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
+ force_hpet_address);
+ cached_dev = dev;
+ force_hpet_resume_type = OLD_ICH_FORCE_HPET_RESUME;
+ return;
+ }
+
+ printk(KERN_DEBUG "Failed to force enable HPET\n");
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
+ old_ich_force_enable_hpet);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
+ old_ich_force_enable_hpet);
+
+void force_hpet_resume(void)
+{
+ switch (force_hpet_resume_type) {
+ case ICH_FORCE_HPET_RESUME:
+ return ich_force_hpet_resume();
+
+ case OLD_ICH_FORCE_HPET_RESUME:
+ return old_ich_force_hpet_resume();
+
+ default:
+ break;
+ }
+}
+
+#endif
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index af838f6b0b7..32054bf5ba4 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -546,6 +546,37 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
#endif
}
+#define ENABLE_C1E_MASK 0x18000000
+#define CPUID_PROCESSOR_SIGNATURE 1
+#define CPUID_XFAM 0x0ff00000
+#define CPUID_XFAM_K8 0x00000000
+#define CPUID_XFAM_10H 0x00100000
+#define CPUID_XFAM_11H 0x00200000
+#define CPUID_XMOD 0x000f0000
+#define CPUID_XMOD_REV_F 0x00040000
+
+/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
+static __cpuinit int amd_apic_timer_broken(void)
+{
+ u32 lo, hi;
+ u32 eax = cpuid_eax(CPUID_PROCESSOR_SIGNATURE);
+ switch (eax & CPUID_XFAM) {
+ case CPUID_XFAM_K8:
+ if ((eax & CPUID_XMOD) < CPUID_XMOD_REV_F)
+ break;
+ case CPUID_XFAM_10H:
+ case CPUID_XFAM_11H:
+ rdmsr(MSR_K8_ENABLE_C1E, lo, hi);
+ if (lo & ENABLE_C1E_MASK)
+ return 1;
+ break;
+ default:
+ /* err on the side of caution */
+ return 1;
+ }
+ return 0;
+}
+
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
unsigned level;
@@ -617,6 +648,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
/* Family 10 doesn't support C states in MWAIT so don't use it */
if (c->x86 == 0x10 && !force_mwait)
clear_bit(X86_FEATURE_MWAIT, &c->x86_capability);
+
+ if (amd_apic_timer_broken())
+ disable_apic_timer = 1;
}
static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 32f50783edc..57ccf7cb6b9 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -223,8 +223,6 @@ void __cpuinit smp_callin(void)
local_irq_disable();
Dprintk("Stack at about %p\n",&cpuid);
- disable_APIC_timer();
-
/*
* Save our processor parameters
*/
@@ -348,8 +346,6 @@ void __cpuinit start_secondary(void)
enable_8259A_irq(0);
}
- enable_APIC_timer();
-
/*
* The sibling maps must be set before turing the online map on for
* this cpu
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c
index 19a6c678d02..56dadfc2f41 100644
--- a/arch/x86/kernel/time_32.c
+++ b/arch/x86/kernel/time_32.c
@@ -157,6 +157,9 @@ EXPORT_SYMBOL(profile_pc);
*/
irqreturn_t timer_interrupt(int irq, void *dev_id)
{
+ /* Keep nmi watchdog up to date */
+ per_cpu(irq_stat, smp_processor_id()).irq0_irqs++;
+
#ifdef CONFIG_X86_IO_APIC
if (timer_ack) {
/*
diff --git a/arch/x86/kernel/time_64.c b/arch/x86/kernel/time_64.c
index 6d48a4e826d..e0134d6c88d 100644
--- a/arch/x86/kernel/time_64.c
+++ b/arch/x86/kernel/time_64.c
@@ -28,11 +28,12 @@
#include <linux/cpu.h>
#include <linux/kallsyms.h>
#include <linux/acpi.h>
+#include <linux/clockchips.h>
+
#ifdef CONFIG_ACPI
#include <acpi/achware.h> /* for PM timer frequency */
#include <acpi/acpi_bus.h>
#endif
-#include <asm/8253pit.h>
#include <asm/i8253.h>
#include <asm/pgtable.h>
#include <asm/vsyscall.h>
@@ -47,12 +48,8 @@
#include <asm/nmi.h>
#include <asm/vgtod.h>
-static char *timename = NULL;
-
DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
-DEFINE_SPINLOCK(i8253_lock);
-EXPORT_SYMBOL(i8253_lock);
volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
@@ -153,45 +150,12 @@ int update_persistent_clock(struct timespec now)
return set_rtc_mmss(now.tv_sec);
}
-void main_timer_handler(void)
+static irqreturn_t timer_event_interrupt(int irq, void *dev_id)
{
-/*
- * Here we are in the timer irq handler. We have irqs locally disabled (so we
- * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
- * on the other CPU, so we need a lock. We also need to lock the vsyscall
- * variables, because both do_timer() and us change them -arca+vojtech
- */
-
- write_seqlock(&xtime_lock);
+ add_pda(irq0_irqs, 1);
-/*
- * Do the timer stuff.
- */
-
- do_timer(1);
-#ifndef CONFIG_SMP
- update_process_times(user_mode(get_irq_regs()));
-#endif
+ global_clock_event->event_handler(global_clock_event);
-/*
- * In the SMP case we use the local APIC timer interrupt to do the profiling,
- * except when we simulate SMP mode on a uniprocessor system, in that case we
- * have to call the local interrupt handler.
- */
-
- if (!using_apic_timer)
- smp_local_timer_interrupt();
-
- write_sequnlock(&xtime_lock);
-}
-
-static irqreturn_t timer_interrupt(int irq, void *dev_id)
-{
- if (apic_runs_main_timer > 1)
- return IRQ_HANDLED;
- main_timer_handler();
- if (using_apic_timer)
- smp_send_timer_broadcast_ipi();
return IRQ_HANDLED;
}
@@ -292,97 +256,21 @@ static unsigned int __init tsc_calibrate_cpu_khz(void)
return pmc_now * tsc_khz / (tsc_now - tsc_start);
}
-/*
- * pit_calibrate_tsc() uses the speaker output (channel 2) of
- * the PIT. This is better than using the timer interrupt output,
- * because we can read the value of the speaker with just one inb(),
- * where we need three i/o operations for the interrupt channel.
- * We count how many ticks the TSC does in 50 ms.
- */
-
-static unsigned int __init pit_calibrate_tsc(void)
-{
- unsigned long start, end;
- unsigned long flags;
-
- spin_lock_irqsave(&i8253_lock, flags);
-
- outb((inb(0x61) & ~0x02) | 0x01, 0x61);
-
- outb(0xb0, 0x43);
- outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
- outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
- start = get_cycles_sync();
- while ((inb(0x61) & 0x20) == 0);
- end = get_cycles_sync();
-
- spin_unlock_irqrestore(&i8253_lock, flags);
-
- return (end - start) / 50;
-}
-
-#define PIT_MODE 0x43
-#define PIT_CH0 0x40
-
-static void __pit_init(int val, u8 mode)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&i8253_lock, flags);
- outb_p(mode, PIT_MODE);
- outb_p(val & 0xff, PIT_CH0); /* LSB */
- outb_p(val >> 8, PIT_CH0); /* MSB */
- spin_unlock_irqrestore(&i8253_lock, flags);
-}
-
-void __init pit_init(void)
-{
- __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
-}
-
-void pit_stop_interrupt(void)
-{
- __pit_init(0, 0x30); /* mode 0 */
-}
-
-void stop_timer_interrupt(void)
-{
- char *name;
- if (hpet_address) {
- name = "HPET";
- hpet_timer_stop_set_go(0);
- } else {
- name = "PIT";
- pit_stop_interrupt();
- }
- printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
-}
-
static struct irqaction irq0 = {
- .handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_IRQPOLL,
+ .handler = timer_event_interrupt,
+ .flags = IRQF_DISABLED | IRQF_IRQPOLL | IRQF_NOBALANCING,
.mask = CPU_MASK_NONE,
.name = "timer"
};
void __init time_init(void)
{
- if (nohpet)
- hpet_address = 0;
+ if (!hpet_enable())
+ setup_pit_timer();
- if (hpet_arch_init())
- hpet_address = 0;
+ setup_irq(0, &irq0);
- if (hpet_use_timer) {
- /* set tick_nsec to use the proper rate for HPET */
- tick_nsec = TICK_NSEC_HPET;
- tsc_khz = hpet_calibrate_tsc();
- timename = "HPET";
- } else {
- pit_init();
- tsc_khz = pit_calibrate_tsc();
- timename = "PIT";
- }
+ tsc_calibrate();
cpu_khz = tsc_khz;
if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
@@ -398,50 +286,7 @@ void __init time_init(void)
else
vgetcpu_mode = VGETCPU_LSL;
- set_cyc2ns_scale(tsc_khz);
printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
cpu_khz / 1000, cpu_khz % 1000);
init_tsc_clocksource();
-
- setup_irq(0, &irq0);
-}
-
-/*
- * sysfs support for the timer.
- */
-
-static int timer_suspend(struct sys_device *dev, pm_message_t state)
-{
- return 0;
-}
-
-static int timer_resume(struct sys_device *dev)
-{
- if (hpet_address)
- hpet_reenable();
- else
- i8254_timer_resume();
- return 0;
}
-
-static struct sysdev_class timer_sysclass = {
- .resume = timer_resume,
- .suspend = timer_suspend,
- set_kset_name("timer"),
-};
-
-/* XXX this sysfs stuff should probably go elsewhere later -john */
-static struct sys_device device_timer = {
- .id = 0,
- .cls = &timer_sysclass,
-};
-
-static int time_init_device(void)
-{
- int error = sysdev_class_register(&timer_sysclass);
- if (!error)
- error = sysdev_register(&device_timer);
- return error;
-}
-
-device_initcall(time_init_device);
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 2a59bde663f..9f22e542c37 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -6,7 +6,9 @@
#include <linux/time.h>
#include <linux/acpi.h>
#include <linux/cpufreq.h>
+#include <linux/acpi_pmtmr.h>
+#include <asm/hpet.h>
#include <asm/timex.h>
static int notsc __initdata = 0;
@@ -18,7 +20,7 @@ EXPORT_SYMBOL(tsc_khz);
static unsigned int cyc2ns_scale __read_mostly;
-void set_cyc2ns_scale(unsigned long khz)
+static inline void set_cyc2ns_scale(unsigned long khz)
{
cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
}
@@ -118,6 +120,95 @@ core_initcall(cpufreq_tsc);
#endif
+#define MAX_RETRIES 5
+#define SMI_TRESHOLD 50000
+
+/*
+ * Read TSC and the reference counters. Take care of SMI disturbance
+ */
+static unsigned long __init tsc_read_refs(unsigned long *pm,
+ unsigned long *hpet)
+{
+ unsigned long t1, t2;
+ int i;
+
+ for (i = 0; i < MAX_RETRIES; i++) {
+ t1 = get_cycles_sync();
+ if (hpet)
+ *hpet = hpet_readl(HPET_COUNTER) & 0xFFFFFFFF;
+ else
+ *pm = acpi_pm_read_early();
+ t2 = get_cycles_sync();
+ if ((t2 - t1) < SMI_TRESHOLD)
+ return t2;
+ }
+ return ULONG_MAX;
+}
+
+/**
+ * tsc_calibrate - calibrate the tsc on boot
+ */
+void __init tsc_calibrate(void)
+{
+ unsigned long flags, tsc1, tsc2, tr1, tr2, pm1, pm2, hpet1, hpet2;
+ int hpet = is_hpet_enabled();
+
+ local_irq_save(flags);
+
+ tsc1 = tsc_read_refs(&pm1, hpet ? &hpet1 : NULL);
+
+ outb((inb(0x61) & ~0x02) | 0x01, 0x61);
+
+ outb(0xb0, 0x43);
+ outb((CLOCK_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
+ outb((CLOCK_TICK_RATE / (1000 / 50)) >> 8, 0x42);
+ tr1 = get_cycles_sync();
+ while ((inb(0x61) & 0x20) == 0);
+ tr2 = get_cycles_sync();
+
+ tsc2 = tsc_read_refs(&pm2, hpet ? &hpet2 : NULL);
+
+ local_irq_restore(flags);
+
+ /*
+ * Preset the result with the raw and inaccurate PIT
+ * calibration value
+ */
+ tsc_khz = (tr2 - tr1) / 50;
+
+ /* hpet or pmtimer available ? */
+ if (!hpet && !pm1 && !pm2) {
+ printk(KERN_INFO "TSC calibrated against PIT\n");
+ return;
+ }
+
+ /* Check, whether the sampling was disturbed by an SMI */
+ if (tsc1 == ULONG_MAX || tsc2 == ULONG_MAX) {
+ printk(KERN_WARNING "TSC calibration disturbed by SMI, "
+ "using PIT calibration result\n");
+ return;
+ }
+
+ tsc2 = (tsc2 - tsc1) * 1000000L;
+
+ if (hpet) {
+ printk(KERN_INFO "TSC calibrated against HPET\n");
+ if (hpet2 < hpet1)
+ hpet2 += 0x100000000;
+ hpet2 -= hpet1;
+ tsc1 = (hpet2 * hpet_readl(HPET_PERIOD)) / 1000000;
+ } else {
+ printk(KERN_INFO "TSC calibrated against PM_TIMER\n");
+ if (pm2 < pm1)
+ pm2 += ACPI_PM_OVRRUN;
+ pm2 -= pm1;
+ tsc1 = (pm2 * 1000000000) / PMTMR_TICKS_PER_SEC;
+ }
+
+ tsc_khz = tsc2 / tsc1;
+ set_cyc2ns_scale(tsc_khz);
+}
+
/*
* Make an educated guess if the TSC is trustworthy and synchronized
* over all CPUs.
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index b1b98e614f7..eb80f5aca54 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -36,6 +36,18 @@ config GENERIC_CMOS_UPDATE
bool
default y
+config CLOCKSOURCE_WATCHDOG
+ bool
+ default y
+
+config GENERIC_CLOCKEVENTS
+ bool
+ default y
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+ bool
+ default y
+
config ZONE_DMA32
bool
default y
@@ -130,6 +142,8 @@ source "init/Kconfig"
menu "Processor type and features"
+source "kernel/time/Kconfig"
+
choice
prompt "Subarchitecture Type"
default X86_PC