aboutsummaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/appldata/appldata_base.c130
-rw-r--r--arch/s390/include/asm/cputime.h6
-rw-r--r--arch/s390/include/asm/timer.h51
-rw-r--r--arch/s390/include/asm/vtimer.h33
-rw-r--r--arch/s390/kernel/asm-offsets.c10
-rw-r--r--arch/s390/kernel/entry.S37
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/entry64.S39
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/smp.c8
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/vtime.c370
-rw-r--r--arch/s390/lib/delay.c2
13 files changed, 222 insertions, 472 deletions
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index fadefce09962..bae0f402bf2a 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -27,7 +27,7 @@
#include <linux/suspend.h>
#include <linux/platform_device.h>
#include <asm/appldata.h>
-#include <asm/timer.h>
+#include <asm/vtimer.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/smp.h>
@@ -82,8 +82,7 @@ static struct ctl_table appldata_dir_table[] = {
/*
* Timer
*/
-static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
-static atomic_t appldata_expire_count = ATOMIC_INIT(0);
+static struct vtimer_list appldata_timer;
static DEFINE_SPINLOCK(appldata_timer_lock);
static int appldata_interval = APPLDATA_CPU_INTERVAL;
@@ -113,10 +112,7 @@ static LIST_HEAD(appldata_ops_list);
*/
static void appldata_timer_function(unsigned long data)
{
- if (atomic_dec_and_test(&appldata_expire_count)) {
- atomic_set(&appldata_expire_count, num_online_cpus());
- queue_work(appldata_wq, (struct work_struct *) data);
- }
+ queue_work(appldata_wq, (struct work_struct *) data);
}
/*
@@ -129,7 +125,6 @@ static void appldata_work_fn(struct work_struct *work)
struct list_head *lh;
struct appldata_ops *ops;
- get_online_cpus();
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list);
@@ -138,7 +133,6 @@ static void appldata_work_fn(struct work_struct *work)
}
}
mutex_unlock(&appldata_ops_mutex);
- put_online_cpus();
}
/*
@@ -166,20 +160,6 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
/****************************** /proc stuff **********************************/
-/*
- * appldata_mod_vtimer_wrap()
- *
- * wrapper function for mod_virt_timer(), because smp_call_function_single()
- * accepts only one parameter.
- */
-static void __appldata_mod_vtimer_wrap(void *p) {
- struct {
- struct vtimer_list *timer;
- u64 expires;
- } *args = p;
- mod_virt_timer_periodic(args->timer, args->expires);
-}
-
#define APPLDATA_ADD_TIMER 0
#define APPLDATA_DEL_TIMER 1
#define APPLDATA_MOD_TIMER 2
@@ -190,49 +170,28 @@ static void __appldata_mod_vtimer_wrap(void *p) {
* Add, delete or modify virtual timers on all online cpus.
* The caller needs to get the appldata_timer_lock spinlock.
*/
-static void
-__appldata_vtimer_setup(int cmd)
+static void __appldata_vtimer_setup(int cmd)
{
- u64 per_cpu_interval;
- int i;
+ u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
switch (cmd) {
case APPLDATA_ADD_TIMER:
if (appldata_timer_active)
break;
- per_cpu_interval = (u64) (appldata_interval*1000 /
- num_online_cpus()) * TOD_MICRO;
- for_each_online_cpu(i) {
- per_cpu(appldata_timer, i).expires = per_cpu_interval;
- smp_call_function_single(i, add_virt_timer_periodic,
- &per_cpu(appldata_timer, i),
- 1);
- }
+ appldata_timer.expires = timer_interval;
+ add_virt_timer_periodic(&appldata_timer);
appldata_timer_active = 1;
break;
case APPLDATA_DEL_TIMER:
- for_each_online_cpu(i)
- del_virt_timer(&per_cpu(appldata_timer, i));
+ del_virt_timer(&appldata_timer);
if (!appldata_timer_active)
break;
appldata_timer_active = 0;
- atomic_set(&appldata_expire_count, num_online_cpus());
break;
case APPLDATA_MOD_TIMER:
- per_cpu_interval = (u64) (appldata_interval*1000 /
- num_online_cpus()) * TOD_MICRO;
if (!appldata_timer_active)
break;
- for_each_online_cpu(i) {
- struct {
- struct vtimer_list *timer;
- u64 expires;
- } args;
- args.timer = &per_cpu(appldata_timer, i);
- args.expires = per_cpu_interval;
- smp_call_function_single(i, __appldata_mod_vtimer_wrap,
- &args, 1);
- }
+ mod_virt_timer_periodic(&appldata_timer, timer_interval);
}
}
@@ -263,14 +222,12 @@ appldata_timer_handler(ctl_table *ctl, int write,
len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
if (buf[0] == '1')
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else if (buf[0] == '0')
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
out:
*lenp = len;
*ppos += len;
@@ -303,20 +260,17 @@ appldata_interval_handler(ctl_table *ctl, int write,
goto out;
}
len = *lenp;
- if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
+ if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
- }
interval = 0;
sscanf(buf, "%i", &interval);
if (interval <= 0)
return -EINVAL;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
out:
*lenp = len;
*ppos += len;
@@ -483,14 +437,12 @@ static int appldata_freeze(struct device *dev)
int rc;
struct list_head *lh;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
if (appldata_timer_active) {
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
appldata_timer_suspended = 1;
}
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
@@ -514,14 +466,12 @@ static int appldata_restore(struct device *dev)
int rc;
struct list_head *lh;
- get_online_cpus();
spin_lock(&appldata_timer_lock);
if (appldata_timer_suspended) {
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
appldata_timer_suspended = 0;
}
spin_unlock(&appldata_timer_lock);
- put_online_cpus();
mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) {
@@ -565,53 +515,6 @@ static struct platform_driver appldata_pdrv = {
/******************************* init / exit *********************************/
-static void __cpuinit appldata_online_cpu(int cpu)
-{
- init_virt_timer(&per_cpu(appldata_timer, cpu));
- per_cpu(appldata_timer, cpu).function = appldata_timer_function;
- per_cpu(appldata_timer, cpu).data = (unsigned long)
- &appldata_work;
- atomic_inc(&appldata_expire_count);
- spin_lock(&appldata_timer_lock);
- __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
- spin_unlock(&appldata_timer_lock);
-}
-
-static void __cpuinit appldata_offline_cpu(int cpu)
-{
- del_virt_timer(&per_cpu(appldata_timer, cpu));
- if (atomic_dec_and_test(&appldata_expire_count)) {
- atomic_set(&appldata_expire_count, num_online_cpus());
- queue_work(appldata_wq, &appldata_work);
- }
- spin_lock(&appldata_timer_lock);
- __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
- spin_unlock(&appldata_timer_lock);
-}
-
-static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
- unsigned long action,
- void *hcpu)
-{
- switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- appldata_online_cpu((long) hcpu);
- break;
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
- appldata_offline_cpu((long) hcpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block __cpuinitdata appldata_nb = {
- .notifier_call = appldata_cpu_notify,
-};
-
/*
* appldata_init()
*
@@ -619,7 +522,10 @@ static struct notifier_block __cpuinitdata appldata_nb = {
*/
static int __init appldata_init(void)
{
- int i, rc;
+ int rc;
+
+ appldata_timer.function = appldata_timer_function;
+ appldata_timer.data = (unsigned long) &appldata_work;
rc = platform_driver_register(&appldata_pdrv);
if (rc)
@@ -637,14 +543,6 @@ static int __init appldata_init(void)
goto out_device;
}
- get_online_cpus();
- for_each_online_cpu(i)
- appldata_online_cpu(i);
- put_online_cpus();
-
- /* Register cpu hotplug notifier */
- register_hotcpu_notifier(&appldata_nb);
-
appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
return 0;
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 357ea7b9714e..8709bdef233c 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -168,9 +168,11 @@ struct s390_idle_data {
int nohz_delay;
unsigned int sequence;
unsigned long long idle_count;
- unsigned long long idle_enter;
- unsigned long long idle_exit;
unsigned long long idle_time;
+ unsigned long long clock_idle_enter;
+ unsigned long long clock_idle_exit;
+ unsigned long long timer_idle_enter;
+ unsigned long long timer_idle_exit;
};
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
deleted file mode 100644
index 15d647901e5c..000000000000
--- a/arch/s390/include/asm/timer.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * include/asm-s390/timer.h
- *
- * (C) Copyright IBM Corp. 2003,2006
- * Virtual CPU timer
- *
- * Author: Jan Glauber (jang@de.ibm.com)
- */
-
-#ifndef _ASM_S390_TIMER_H
-#define _ASM_S390_TIMER_H
-
-#include <linux/timer.h>
-
-#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
-
-struct vtimer_list {
- struct list_head entry;
-
- int cpu;
- __u64 expires;
- __u64 interval;
-
- void (*function)(unsigned long);
- unsigned long data;
-};
-
-/* the vtimer value will wrap after ca. 71 years */
-struct vtimer_queue {
- struct list_head list;
- spinlock_t lock;
- __u64 timer; /* last programmed timer */
- __u64 elapsed; /* elapsed time of timer expire values */
- __u64 idle_enter; /* cpu timer on idle enter */
- __u64 idle_exit; /* cpu timer on idle exit */
-};
-
-extern void init_virt_timer(struct vtimer_list *timer);
-extern void add_virt_timer(void *new);
-extern void add_virt_timer_periodic(void *new);
-extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
-extern int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires);
-extern int del_virt_timer(struct vtimer_list *timer);
-
-extern void init_cpu_vtimer(void);
-extern void vtime_init(void);
-
-extern void vtime_stop_cpu(void);
-extern void vtime_start_leave(void);
-
-#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h
new file mode 100644
index 000000000000..bfe25d513ad2
--- /dev/null
+++ b/arch/s390/include/asm/vtimer.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright IBM Corp. 2003, 2012
+ * Virtual CPU timer
+ *
+ * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_TIMER_H
+#define _ASM_S390_TIMER_H
+
+#define VTIMER_MAX_SLICE (0x7fffffffffffffffULL)
+
+struct vtimer_list {
+ struct list_head entry;
+ u64 expires;
+ u64 interval;
+ void (*function)(unsigned long);
+ unsigned long data;
+};
+
+extern void init_virt_timer(struct vtimer_list *timer);
+extern void add_virt_timer(struct vtimer_list *timer);
+extern void add_virt_timer_periodic(struct vtimer_list *timer);
+extern int mod_virt_timer(struct vtimer_list *timer, u64 expires);
+extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires);
+extern int del_virt_timer(struct vtimer_list *timer);
+
+extern void init_cpu_vtimer(void);
+extern void vtime_init(void);
+
+extern void vtime_stop_cpu(void);
+
+#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 0e974ddd156b..45ef1a7b08f9 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -9,7 +9,6 @@
#include <linux/kbuild.h>
#include <linux/sched.h>
#include <asm/cputime.h>
-#include <asm/timer.h>
#include <asm/vdso.h>
#include <asm/pgtable.h>
@@ -72,11 +71,10 @@ int main(void)
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* idle data offsets */
- DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter));
- DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit));
- /* vtimer queue offsets */
- DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter));
- DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit));
+ DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
+ DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit));
+ DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter));
+ DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit));
/* lowcore offsets */
DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 2c0eff488875..870bad6d56fc 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -616,17 +616,13 @@ ext_skip:
* Load idle PSW. The second "half" of this function is in cleanup_idle.
*/
ENTRY(psw_idle)
- st %r4,__SF_EMPTY(%r15)
+ st %r3,__SF_EMPTY(%r15)
basr %r1,0
la %r1,psw_idle_lpsw+4-.(%r1)
st %r1,__SF_EMPTY+4(%r15)
oi __SF_EMPTY+4(%r15),0x80
- la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1)
- stck __IDLE_ENTER(%r2)
- ltr %r5,%r5
- stpt __VQ_IDLE_ENTER(%r3)
- jz psw_idle_lpsw
- spt 0(%r1)
+ stck __CLOCK_IDLE_ENTER(%r2)
+ stpt __TIMER_IDLE_ENTER(%r2)
psw_idle_lpsw:
lpsw __SF_EMPTY(%r15)
br %r14
@@ -885,33 +881,28 @@ cleanup_io_restore_insn:
cleanup_idle:
# copy interrupt clock & cpu timer
- mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
chi %r11,__LC_SAVE_AREA_ASYNC
je 0f
- mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck has been executed
cl %r9,BASED(cleanup_idle_insn)
jhe 1f
- mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
- mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
- j 2f
-1: # check if the cpu timer has been reprogrammed
- ltr %r5,%r5
- jz 2f
- spt __VQ_IDLE_ENTER(%r3)
-2: # account system time going idle
+ mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
+ mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
+1: # account system time going idle
lm %r9,%r10,__LC_STEAL_TIMER
- ADD64 %r9,%r10,__IDLE_ENTER(%r2)
+ ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
stm %r9,%r10,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
+ mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lm %r9,%r10,__LC_SYSTEM_TIMER
ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
- SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3)
+ SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
stm %r9,%r10,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
+ mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
n %r8,BASED(cleanup_idle_wait) # clear wait state bit
l %r9,24(%r11) # return from psw_idle
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index f66a229ab0b3..a5f4dc42a5db 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,7 +5,6 @@
#include <linux/signal.h>
#include <asm/ptrace.h>
#include <asm/cputime.h>
-#include <asm/timer.h>
extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack;
@@ -17,8 +16,7 @@ void io_int_handler(void);
void mcck_int_handler(void);
void restart_int_handler(void);
void restart_call_handler(void);
-void psw_idle(struct s390_idle_data *, struct vtimer_queue *,
- unsigned long, int);
+void psw_idle(struct s390_idle_data *, unsigned long);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 1983c22a8a99..349b7eeb348a 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -642,15 +642,11 @@ ext_skip:
* Load idle PSW. The second "half" of this function is in cleanup_idle.
*/
ENTRY(psw_idle)
- stg %r4,__SF_EMPTY(%r15)
+ stg %r3,__SF_EMPTY(%r15)
larl %r1,psw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15)
- larl %r1,.Lvtimer_max
- STCK __IDLE_ENTER(%r2)
- ltr %r5,%r5
- stpt __VQ_IDLE_ENTER(%r3)
- jz psw_idle_lpsw
- spt 0(%r1)
+ STCK __CLOCK_IDLE_ENTER(%r2)
+ stpt __TIMER_IDLE_ENTER(%r2)
psw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
br %r14
@@ -918,33 +914,28 @@ cleanup_io_restore_insn:
cleanup_idle:
# copy interrupt clock & cpu timer
- mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
- mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
- mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER
+ mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
+ mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck & stpt have been executed
clg %r9,BASED(cleanup_idle_insn)
jhe 1f
- mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2)
- mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3)
- j 2f
-1: # check if the cpu timer has been reprogrammed
- ltr %r5,%r5
- jz 2f
- spt __VQ_IDLE_ENTER(%r3)
-2: # account system time going idle
+ mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
+ mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
+1: # account system time going idle
lg %r9,__LC_STEAL_TIMER
- alg %r9,__IDLE_ENTER(%r2)
+ alg %r9,__CLOCK_IDLE_ENTER(%r2)
slg %r9,__LC_LAST_UPDATE_CLOCK
stg %r9,__LC_STEAL_TIMER
- mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2)
+ mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lg %r9,__LC_SYSTEM_TIMER
alg %r9,__LC_LAST_UPDATE_TIMER
- slg %r9,__VQ_IDLE_ENTER(%r3)
+ slg %r9,__TIMER_IDLE_ENTER(%r2)
stg %r9,__LC_SYSTEM_TIMER
- mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3)
+ mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
nihh %r8,0xfffd # clear wait state bit
lg %r9,48(%r11) # return from psw_idle
@@ -960,8 +951,6 @@ cleanup_idle_insn:
.quad __critical_start
.Lcritical_length:
.quad __critical_end - __critical_start
-.Lvtimer_max:
- .quad 0x7fffffffffffffff
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7efbfa53d659..733175373a4c 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -25,8 +25,8 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/processor.h>
+#include <asm/vtimer.h>
#include <asm/irq.h>
-#include <asm/timer.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include <asm/switch_to.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index eeb441bbddae..5481da80926a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -38,7 +38,7 @@
#include <asm/setup.h>
#include <asm/irq.h>
#include <asm/tlbflush.h>
-#include <asm/timer.h>
+#include <asm/vtimer.h>
#include <asm/lowcore.h>
#include <asm/sclp.h>
#include <asm/vdso.h>
@@ -917,7 +917,7 @@ static ssize_t show_idle_count(struct device *dev,
do {
sequence = ACCESS_ONCE(idle->sequence);
idle_count = ACCESS_ONCE(idle->idle_count);
- if (ACCESS_ONCE(idle->idle_enter))
+ if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++;
} while ((sequence & 1) || (idle->sequence != sequence));
return sprintf(buf, "%llu\n", idle_count);
@@ -935,8 +935,8 @@ static ssize_t show_idle_time(struct device *dev,
now = get_clock();
sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time);
- idle_enter = ACCESS_ONCE(idle->idle_enter);
- idle_exit = ACCESS_ONCE(idle->idle_exit);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (idle->sequence != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 30cf3bdc9b77..dcec960fc724 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -44,7 +44,7 @@
#include <asm/vdso.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
-#include <asm/timer.h>
+#include <asm/vtimer.h>
#include <asm/etr.h>
#include <asm/cio.h>
#include "entry.h"
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 39ebff506946..4fc97b40a6e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -1,71 +1,82 @@
/*
- * arch/s390/kernel/vtime.c
* Virtual cpu timer based timer functions.
*
- * S390 version
- * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2004, 2012
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
*/
-#include <linux/module.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/kprobes.h>
+#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/time.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/types.h>
#include <linux/timex.h>
-#include <linux/notifier.h>
-#include <linux/kernel_stat.h>
-#include <linux/rcupdate.h>
-#include <linux/posix-timers.h>
+#include <linux/types.h>
+#include <linux/time.h>
#include <linux/cpu.h>
-#include <linux/kprobes.h>
+#include <linux/smp.h>
-#include <asm/timer.h>
#include <asm/irq_regs.h>
#include <asm/cputime.h>
+#include <asm/vtimer.h>
#include <asm/irq.h>
#include "entry.h"
-static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+static void virt_timer_expire(void);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
-static inline __u64 get_vtimer(void)
+static LIST_HEAD(virt_timer_list);
+static DEFINE_SPINLOCK(virt_timer_lock);
+static atomic64_t virt_timer_current;
+static atomic64_t virt_timer_elapsed;
+
+static inline u64 get_vtimer(void)
{
- __u64 timer;
+ u64 timer;
- asm volatile("STPT %0" : "=m" (timer));
+ asm volatile("stpt %0" : "=m" (timer));
return timer;
}
-static inline void set_vtimer(__u64 expires)
+static inline void set_vtimer(u64 expires)
{
- __u64 timer;
+ u64 timer;
- asm volatile (" STPT %0\n" /* Store current cpu timer value */
- " SPT %1" /* Set new value immediately afterwards */
- : "=m" (timer) : "m" (expires) );
+ asm volatile(
+ " stpt %0\n" /* Store current cpu timer value */
+ " spt %1" /* Set new value imm. afterwards */
+ : "=m" (timer) : "m" (expires));
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
S390_lowcore.last_update_timer = expires;
}
+static inline int virt_timer_forward(u64 elapsed)
+{
+ BUG_ON(!irqs_disabled());
+
+ if (list_empty(&virt_timer_list))
+ return 0;
+ elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
+ return elapsed >= atomic64_read(&virt_timer_current);
+}
+
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
struct thread_info *ti = task_thread_info(tsk);
- __u64 timer, clock, user, system, steal;
+ u64 timer, clock, user, system, steal;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
- asm volatile (" STPT %0\n" /* Store current cpu timer value */
- " STCK %1" /* Store current tod clock value */
- : "=m" (S390_lowcore.last_update_timer),
- "=m" (S390_lowcore.last_update_clock) );
+ asm volatile(
+ " stpt %0\n" /* Store current cpu timer value */
+ " stck %1" /* Store current tod clock value */
+ : "=m" (S390_lowcore.last_update_timer),
+ "=m" (S390_lowcore.last_update_clock));
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
@@ -84,6 +95,8 @@ static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
S390_lowcore.steal_timer = 0;
account_steal_time(steal);
}
+
+ return virt_timer_forward(user + system);
}
void account_vtime(struct task_struct *prev, struct task_struct *next)
@@ -101,7 +114,8 @@ void account_vtime(struct task_struct *prev, struct task_struct *next)
void account_process_tick(struct task_struct *tsk, int user_tick)
{
- do_account_vtime(tsk, HARDIRQ_OFFSET);
+ if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+ virt_timer_expire();
}
/*
@@ -111,7 +125,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
void account_system_vtime(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
- __u64 timer, system;
+ u64 timer, system;
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
@@ -121,13 +135,14 @@ void account_system_vtime(struct task_struct *tsk)
S390_lowcore.steal_timer -= system;
ti->system_timer = S390_lowcore.system_timer;
account_system_time(tsk, 0, system, system);
+
+ virt_timer_forward(system);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
void __kprobes vtime_stop_cpu(void)
{
struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
- struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
unsigned long long idle_time;
unsigned long psw_mask;
@@ -141,7 +156,7 @@ void __kprobes vtime_stop_cpu(void)
idle->nohz_delay = 0;
/* Call the assembler magic in entry.S */
- psw_idle(idle, vq, psw_mask, !list_empty(&vq->list));
+ psw_idle(idle, psw_mask);
/* Reenable preemption tracer. */
start_critical_timings();
@@ -149,9 +164,9 @@ void __kprobes vtime_stop_cpu(void)
/* Account time spent with enabled wait psw loaded as idle time. */
idle->sequence++;
smp_wmb();
- idle_time = idle->idle_exit - idle->idle_enter;
+ idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
+ idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
idle->idle_time += idle_time;
- idle->idle_enter = idle->idle_exit = 0ULL;
idle->idle_count++;
account_idle_time(idle_time);
smp_wmb();
@@ -167,10 +182,10 @@ cputime64_t s390_get_idle_time(int cpu)
do {
now = get_clock();
sequence = ACCESS_ONCE(idle->sequence);
- idle_enter = ACCESS_ONCE(idle->idle_enter);
- idle_exit = ACCESS_ONCE(idle->idle_exit);
+ idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
+ idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (idle->sequence != sequence));
- return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+ return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
}
/*
@@ -179,11 +194,11 @@ cputime64_t s390_get_idle_time(int cpu)
*/
static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
{
- struct vtimer_list *event;
+ struct vtimer_list *tmp;
- list_for_each_entry(event, head, entry) {
- if (event->expires > timer->expires) {
- list_add_tail(&timer->entry, &event->entry);
+ list_for_each_entry(tmp, head, entry) {
+ if (tmp->expires > timer->expires) {
+ list_add_tail(&timer->entry, &tmp->entry);
return;
}
}
@@ -191,82 +206,45 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
}
/*
- * Do the callback functions of expired vtimer events.
- * Called from within the interrupt handler.
- */
-static void do_callbacks(struct list_head *cb_list)
-{
- struct vtimer_queue *vq;
- struct vtimer_list *event, *tmp;
-
- if (list_empty(cb_list))
- return;
-
- vq = &__get_cpu_var(virt_cpu_timer);
-
- list_for_each_entry_safe(event, tmp, cb_list, entry) {
- list_del_init(&event->entry);
- (event->function)(event->data);
- if (event->interval) {
- /* Recharge interval timer */
- event->expires = event->interval + vq->elapsed;
- spin_lock(&vq->lock);
- list_add_sorted(event, &vq->list);
- spin_unlock(&vq->lock);
- }
- }
-}
-
-/*
- * Handler for the virtual CPU timer.
+ * Handler for expired virtual CPU timer.
*/
-static void do_cpu_timer_interrupt(struct ext_code ext_code,
- unsigned int param32, unsigned long param64)
+static void virt_timer_expire(void)
{
- struct vtimer_queue *vq;
- struct vtimer_list *event, *tmp;
- struct list_head cb_list; /* the callback queue */
- __u64 elapsed, next;
-
- kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++;
- INIT_LIST_HEAD(&cb_list);
- vq = &__get_cpu_var(virt_cpu_timer);
-
- /* walk timer list, fire all expired events */
- spin_lock(&vq->lock);
-
- elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
- BUG_ON((s64) elapsed < 0);
- vq->elapsed = 0;
- list_for_each_entry_safe(event, tmp, &vq->list, entry) {
- if (event->expires < elapsed)
+ struct vtimer_list *timer, *tmp;
+ unsigned long elapsed;
+ LIST_HEAD(cb_list);
+
+ /* walk timer list, fire all expired timers */
+ spin_lock(&virt_timer_lock);
+ elapsed = atomic64_read(&virt_timer_elapsed);
+ list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
+ if (timer->expires < elapsed)
/* move expired timer to the callback queue */
- list_move_tail(&event->entry, &cb_list);
+ list_move_tail(&timer->entry, &cb_list);
else
- event->expires -= elapsed;
+ timer->expires -= elapsed;
}
- spin_unlock(&vq->lock);
-
- do_callbacks(&cb_list);
-
- /* next event is first in list */
- next = VTIMER_MAX_SLICE;
- spin_lock(&vq->lock);
- if (!list_empty(&vq->list)) {
- event = list_first_entry(&vq->list, struct vtimer_list, entry);
- next = event->expires;
+ if (!list_empty(&virt_timer_list)) {
+ timer = list_first_entry(&virt_timer_list,
+ struct vtimer_list, entry);
+ atomic64_set(&virt_timer_current, timer->expires);
+ }
+ atomic64_sub(elapsed, &virt_timer_elapsed);
+ spin_unlock(&virt_timer_lock);
+
+ /* Do callbacks and recharge periodic timers */
+ list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
+ list_del_init(&timer->entry);
+ timer->function(timer->data);
+ if (timer->interval) {
+ /* Recharge interval timer */
+ timer->expires = timer->interval +
+ atomic64_read(&virt_timer_elapsed);
+ spin_lock(&virt_timer_lock);
+ list_add_sorted(timer, &virt_timer_list);
+ spin_unlock(&virt_timer_lock);
+ }
}
- spin_unlock(&vq->lock);
- /*
- * To improve precision add the time spent by the
- * interrupt handler to the elapsed time.
- * Note: CPU timer counts down and we got an interrupt,
- * the current content is negative
- */
- elapsed = S390_lowcore.async_enter_timer - get_vtimer();
- set_vtimer(next - elapsed);
- vq->timer = next - elapsed;
- vq->elapsed = elapsed;
}
void init_virt_timer(struct vtimer_list *timer)
@@ -278,179 +256,108 @@ EXPORT_SYMBOL(init_virt_timer);
static inline int vtimer_pending(struct vtimer_list *timer)
{
- return (!list_empty(&timer->entry));
+ return !list_empty(&timer->entry);
}
-/*
- * this function should only run on the specified CPU
- */
static void internal_add_vtimer(struct vtimer_list *timer)
{
- struct vtimer_queue *vq;
- unsigned long flags;
- __u64 left, expires;
-
- vq = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vq->lock, flags);
-
- BUG_ON(timer->cpu != smp_processor_id());
-
- if (list_empty(&vq->list)) {
- /* First timer on this cpu, just program it. */
- list_add(&timer->entry, &vq->list);
- set_vtimer(timer->expires);
- vq->timer = timer->expires;
- vq->elapsed = 0;
+ if (list_empty(&virt_timer_list)) {
+ /* First timer, just program it. */
+ atomic64_set(&virt_timer_current, timer->expires);
+ atomic64_set(&virt_timer_elapsed, 0);
+ list_add(&timer->entry, &virt_timer_list);
} else {
- /* Check progress of old timers. */
- expires = timer->expires;
- left = get_vtimer();
- if (likely((s64) expires < (s64) left)) {
+ /* Update timer against current base. */
+ timer->expires += atomic64_read(&virt_timer_elapsed);
+ if (likely((s64) timer->expires <
+ (s64) atomic64_read(&virt_timer_current)))
/* The new timer expires before the current timer. */
- set_vtimer(expires);
- vq->elapsed += vq->timer - left;
- vq->timer = expires;
- } else {
- vq->elapsed += vq->timer - left;
- vq->timer = left;
- }
- /* Insert new timer into per cpu list. */
- timer->expires += vq->elapsed;
- list_add_sorted(timer, &vq->list);
+ atomic64_set(&virt_timer_current, timer->expires);
+ /* Insert new timer into the list. */
+ list_add_sorted(timer, &virt_timer_list);
}
-
- spin_unlock_irqrestore(&vq->lock, flags);
- /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
- put_cpu();
}
-static inline void prepare_vtimer(struct vtimer_list *timer)
+static void __add_vtimer(struct vtimer_list *timer, int periodic)
{
- BUG_ON(!timer->function);
- BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE);
- BUG_ON(vtimer_pending(timer));
- timer->cpu = get_cpu();
+ unsigned long flags;
+
+ timer->interval = periodic ? timer->expires : 0;
+ spin_lock_irqsave(&virt_timer_lock, flags);
+ internal_add_vtimer(timer);
+ spin_unlock_irqrestore(&virt_timer_lock, flags);
}
/*
* add_virt_timer - add an oneshot virtual CPU timer
*/
-void add_virt_timer(void *new)
+void add_virt_timer(struct vtimer_list *timer)
{
- struct vtimer_list *timer;
-
- timer = (struct vtimer_list *)new;
- prepare_vtimer(timer);
- timer->interval = 0;
- internal_add_vtimer(timer);
+ __add_vtimer(timer, 0);
}
EXPORT_SYMBOL(add_virt_timer);
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
-void add_virt_timer_periodic(void *new)
+void add_virt_timer_periodic(struct vtimer_list *timer)
{
- struct vtimer_list *timer;
-
- timer = (struct vtimer_list *)new;
- prepare_vtimer(timer);
- timer->interval = timer->expires;
- internal_add_vtimer(timer);
+ __add_vtimer(timer, 1);
}
EXPORT_SYMBOL(add_virt_timer_periodic);
-static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic)
+static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
{
- struct vtimer_queue *vq;
unsigned long flags;
- int cpu;
+ int rc;
BUG_ON(!timer->function);
- BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
if (timer->expires == expires && vtimer_pending(timer))
return 1;
-
- cpu = get_cpu();
- vq = &per_cpu(virt_cpu_timer, cpu);
-
- /* disable interrupts before test if timer is pending */
- spin_lock_irqsave(&vq->lock, flags);
-
- /* if timer isn't pending add it on the current CPU */
- if (!vtimer_pending(timer)) {
- spin_unlock_irqrestore(&vq->lock, flags);
-
- if (periodic)
- timer->interval = expires;
- else
- timer->interval = 0;
- timer->expires = expires;
- timer->cpu = cpu;
- internal_add_vtimer(timer);
- return 0;
- }
-
- /* check if we run on the right CPU */
- BUG_ON(timer->cpu != cpu);
-
- list_del_init(&timer->entry);
+ spin_lock_irqsave(&virt_timer_lock, flags);
+ rc = vtimer_pending(timer);
+ if (rc)
+ list_del_init(&timer->entry);
+ timer->interval = periodic ? expires : 0;
timer->expires = expires;
- if (periodic)
- timer->interval = expires;
-
- /* the timer can't expire anymore so we can release the lock */
- spin_unlock_irqrestore(&vq->lock, flags);
internal_add_vtimer(timer);
- return 1;
+ spin_unlock_irqrestore(&virt_timer_lock, flags);
+ return rc;
}
/*
- * If we change a pending timer the function must be called on the CPU
- * where the timer is running on.
- *
* returns whether it has modified a pending timer (1) or not (0)
*/
-int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
+int mod_virt_timer(struct vtimer_list *timer, u64 expires)
{
return __mod_vtimer(timer, expires, 0);
}
EXPORT_SYMBOL(mod_virt_timer);
/*
- * If we change a pending timer the function must be called on the CPU
- * where the timer is running on.
- *
* returns whether it has modified a pending timer (1) or not (0)
*/
-int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires)
+int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
{
return __mod_vtimer(timer, expires, 1);
}
EXPORT_SYMBOL(mod_virt_timer_periodic);
/*
- * delete a virtual timer
+ * Delete a virtual timer.
*
* returns whether the deleted timer was pending (1) or not (0)
*/
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
- struct vtimer_queue *vq;
- /* check if timer is pending */
if (!vtimer_pending(timer))
return 0;
-
- vq = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vq->lock, flags);
-
- /* we don't interrupt a running timer, just let it expire! */
+ spin_lock_irqsave(&virt_timer_lock, flags);
list_del_init(&timer->entry);
-
- spin_unlock_irqrestore(&vq->lock, flags);
+ spin_unlock_irqrestore(&virt_timer_lock, flags);
return 1;
}
EXPORT_SYMBOL(del_virt_timer);
@@ -458,20 +365,10 @@ EXPORT_SYMBOL(del_virt_timer);
/*
* Start the virtual CPU timer on the current CPU.
*/
-void init_cpu_vtimer(void)
+void __cpuinit init_cpu_vtimer(void)
{
- struct vtimer_queue *vq;
-
- /* initialize per cpu vtimer structure */
- vq = &__get_cpu_var(virt_cpu_timer);
- INIT_LIST_HEAD(&vq->list);
- spin_lock_init(&vq->lock);
-
- /* enable cpu timer interrupts */
- __ctl_set_bit(0,10);
-
/* set initial cpu timer */
- set_vtimer(0x7fffffffffffffffULL);
+ set_vtimer(VTIMER_MAX_SLICE);
}
static int __cpuinit s390_nohz_notify(struct notifier_block *self,
@@ -493,12 +390,7 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
void __init vtime_init(void)
{
- /* request the cpu timer external interrupt */
- if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
- panic("Couldn't request external interrupt 0x1005");
-
/* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
cpu_notifier(s390_nohz_notify, 0);
}
-
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 7d7c3750f438..42d0cf89121d 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -12,8 +12,8 @@
#include <linux/module.h>
#include <linux/irqflags.h>
#include <linux/interrupt.h>
+#include <asm/vtimer.h>
#include <asm/div64.h>
-#include <asm/timer.h>
void __delay(unsigned long loops)
{