aboutsummaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig4
-rw-r--r--arch/s390/hypfs/inode.c1
-rw-r--r--arch/s390/include/asm/atomic.h7
-rw-r--r--arch/s390/include/asm/byteorder.h91
-rw-r--r--arch/s390/include/asm/chpid.h2
-rw-r--r--arch/s390/include/asm/chsc.h1
-rw-r--r--arch/s390/include/asm/cmb.h3
-rw-r--r--arch/s390/include/asm/cpu.h7
-rw-r--r--arch/s390/include/asm/cputime.h42
-rw-r--r--arch/s390/include/asm/dasd.h2
-rw-r--r--arch/s390/include/asm/elf.h16
-rw-r--r--arch/s390/include/asm/kvm.h2
-rw-r--r--arch/s390/include/asm/lowcore.h49
-rw-r--r--arch/s390/include/asm/posix_types.h4
-rw-r--r--arch/s390/include/asm/ptrace.h7
-rw-r--r--arch/s390/include/asm/qeth.h1
-rw-r--r--arch/s390/include/asm/s390_rdev.h15
-rw-r--r--arch/s390/include/asm/schid.h2
-rw-r--r--arch/s390/include/asm/swab.h91
-rw-r--r--arch/s390/include/asm/system.h4
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/timer.h16
-rw-r--r--arch/s390/include/asm/topology.h2
-rw-r--r--arch/s390/include/asm/types.h6
-rw-r--r--arch/s390/include/asm/vdso.h15
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/compat_wrapper.S42
-rw-r--r--arch/s390/kernel/entry.S5
-rw-r--r--arch/s390/kernel/entry.h18
-rw-r--r--arch/s390/kernel/entry64.S50
-rw-r--r--arch/s390/kernel/head64.S2
-rw-r--r--arch/s390/kernel/init_task.c1
-rw-r--r--arch/s390/kernel/kprobes.c9
-rw-r--r--arch/s390/kernel/process.c52
-rw-r--r--arch/s390/kernel/s390_ext.c2
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/signal.c19
-rw-r--r--arch/s390/kernel/smp.c41
-rw-r--r--arch/s390/kernel/sys_s390.c54
-rw-r--r--arch/s390/kernel/syscalls.S24
-rw-r--r--arch/s390/kernel/time.c8
-rw-r--r--arch/s390/kernel/topology.c5
-rw-r--r--arch/s390/kernel/vdso.c124
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S5
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S39
-rw-r--r--arch/s390/kernel/vtime.c490
-rw-r--r--arch/s390/kvm/diag.c2
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/kvm/kvm-s390.c41
-rw-r--r--arch/s390/kvm/priv.c2
-rw-r--r--arch/s390/mm/init.c2
52 files changed, 830 insertions, 616 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8152fefc97b..6b0a3538dc6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -77,12 +77,14 @@ mainmenu "Linux Kernel Configuration"
config S390
def_bool y
select USE_GENERIC_SMP_HELPERS if SMP
+ select HAVE_SYSCALL_WRAPPERS
select HAVE_FUNCTION_TRACER
select HAVE_OPROFILE
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK
+ select INIT_ALL_POSSIBLE
source "init/Kconfig"
@@ -298,7 +300,7 @@ config WARN_STACK
This option enables the compiler options -mwarn-framesize and
-mwarn-dynamicstack. If the compiler supports these options it
will generate warnings for function which either use alloca or
- create a stack frame bigger then CONFIG_WARN_STACK_SIZE.
+ create a stack frame bigger than CONFIG_WARN_STACK_SIZE.
Say N if you are unsure.
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 9d4f8e6c080..5a805df216b 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -106,7 +106,6 @@ static struct inode *hypfs_make_inode(struct super_block *sb, int mode)
ret->i_mode = mode;
ret->i_uid = hypfs_info->uid;
ret->i_gid = hypfs_info->gid;
- ret->i_blocks = 0;
ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
if (mode & S_IFDIR)
ret->i_nlink = 2;
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 2d184655bc5..de432f2de2d 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -2,6 +2,7 @@
#define __ARCH_S390_ATOMIC__
#include <linux/compiler.h>
+#include <linux/types.h>
/*
* include/asm-s390/atomic.h
@@ -23,9 +24,6 @@
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment
*/
-typedef struct {
- int counter;
-} __attribute__ ((aligned (4))) atomic_t;
#define ATOMIC_INIT(i) { (i) }
#ifdef __KERNEL__
@@ -149,9 +147,6 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
#undef __CS_LOOP
#ifdef __s390x__
-typedef struct {
- long long counter;
-} __attribute__ ((aligned (8))) atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ > 2)
diff --git a/arch/s390/include/asm/byteorder.h b/arch/s390/include/asm/byteorder.h
index 8bcf277c846..a332e59e26f 100644
--- a/arch/s390/include/asm/byteorder.h
+++ b/arch/s390/include/asm/byteorder.h
@@ -1,95 +1,6 @@
#ifndef _S390_BYTEORDER_H
#define _S390_BYTEORDER_H
-/*
- * include/asm-s390/byteorder.h
- *
- * S390 version
- * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- */
-
-#include <asm/types.h>
-
-#define __BIG_ENDIAN
-
-#ifndef __s390x__
-# define __SWAB_64_THRU_32__
-#endif
-
-#ifdef __s390x__
-static inline __u64 __arch_swab64p(const __u64 *x)
-{
- __u64 result;
-
- asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
- return result;
-}
-#define __arch_swab64p __arch_swab64p
-
-static inline __u64 __arch_swab64(__u64 x)
-{
- __u64 result;
-
- asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
- return result;
-}
-#define __arch_swab64 __arch_swab64
-
-static inline void __arch_swab64s(__u64 *x)
-{
- *x = __arch_swab64p(x);
-}
-#define __arch_swab64s __arch_swab64s
-#endif /* __s390x__ */
-
-static inline __u32 __arch_swab32p(const __u32 *x)
-{
- __u32 result;
-
- asm volatile(
-#ifndef __s390x__
- " icm %0,8,3(%1)\n"
- " icm %0,4,2(%1)\n"
- " icm %0,2,1(%1)\n"
- " ic %0,0(%1)"
- : "=&d" (result) : "a" (x), "m" (*x) : "cc");
-#else /* __s390x__ */
- " lrv %0,%1"
- : "=d" (result) : "m" (*x));
-#endif /* __s390x__ */
- return result;
-}
-#define __arch_swab32p __arch_swab32p
-
-#ifdef __s390x__
-static inline __u32 __arch_swab32(__u32 x)
-{
- __u32 result;
-
- asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
- return result;
-}
-#define __arch_swab32 __arch_swab32
-#endif /* __s390x__ */
-
-static inline __u16 __arch_swab16p(const __u16 *x)
-{
- __u16 result;
-
- asm volatile(
-#ifndef __s390x__
- " icm %0,2,1(%1)\n"
- " ic %0,0(%1)\n"
- : "=&d" (result) : "a" (x), "m" (*x) : "cc");
-#else /* __s390x__ */
- " lrvh %0,%1"
- : "=d" (result) : "m" (*x));
-#endif /* __s390x__ */
- return result;
-}
-#define __arch_swab16p __arch_swab16p
-
-#include <linux/byteorder.h>
+#include <linux/byteorder/big_endian.h>
#endif /* _S390_BYTEORDER_H */
diff --git a/arch/s390/include/asm/chpid.h b/arch/s390/include/asm/chpid.h
index dfe3c7f3439..fc71d8a6709 100644
--- a/arch/s390/include/asm/chpid.h
+++ b/arch/s390/include/asm/chpid.h
@@ -9,7 +9,7 @@
#define _ASM_S390_CHPID_H _ASM_S390_CHPID_H
#include <linux/string.h>
-#include <asm/types.h>
+#include <linux/types.h>
#define __MAX_CHPID 255
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index d38d0cf62d4..807997f7414 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -8,6 +8,7 @@
#ifndef _ASM_CHSC_H
#define _ASM_CHSC_H
+#include <linux/types.h>
#include <asm/chpid.h>
#include <asm/schid.h>
diff --git a/arch/s390/include/asm/cmb.h b/arch/s390/include/asm/cmb.h
index 50196857d27..39ae0329479 100644
--- a/arch/s390/include/asm/cmb.h
+++ b/arch/s390/include/asm/cmb.h
@@ -1,5 +1,8 @@
#ifndef S390_CMB_H
#define S390_CMB_H
+
+#include <linux/types.h>
+
/**
* struct cmbdata - channel measurement block data for user space
* @size: size of the stored data
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index e5a6a9ba3ad..d60a2eefb17 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -14,7 +14,6 @@
struct s390_idle_data {
spinlock_t lock;
- unsigned int in_idle;
unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_time;
@@ -22,12 +21,12 @@ struct s390_idle_data {
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
-void s390_idle_leave(void);
+void vtime_start_cpu(void);
static inline void s390_idle_check(void)
{
- if ((&__get_cpu_var(s390_idle))->in_idle)
- s390_idle_leave();
+ if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
+ vtime_start_cpu();
}
#endif /* _ASM_S390_CPU_H_ */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 133ce054fc8..521726430af 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -11,7 +11,7 @@
#include <asm/div64.h>
-/* We want to use micro-second resolution. */
+/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
typedef unsigned long long cputime_t;
typedef unsigned long long cputime64_t;
@@ -53,9 +53,9 @@ __div(unsigned long long n, unsigned int base)
#define cputime_ge(__a, __b) ((__a) >= (__b))
#define cputime_lt(__a, __b) ((__a) < (__b))
#define cputime_le(__a, __b) ((__a) <= (__b))
-#define cputime_to_jiffies(__ct) (__div((__ct), 1000000 / HZ))
+#define cputime_to_jiffies(__ct) (__div((__ct), 4096000000ULL / HZ))
#define cputime_to_scaled(__ct) (__ct)
-#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (1000000 / HZ))
+#define jiffies_to_cputime(__hz) ((cputime_t)(__hz) * (4096000000ULL / HZ))
#define cputime64_zero (0ULL)
#define cputime64_add(__a, __b) ((__a) + (__b))
@@ -64,7 +64,7 @@ __div(unsigned long long n, unsigned int base)
static inline u64
cputime64_to_jiffies64(cputime64_t cputime)
{
- do_div(cputime, 1000000 / HZ);
+ do_div(cputime, 4096000000ULL / HZ);
return cputime;
}
@@ -74,13 +74,13 @@ cputime64_to_jiffies64(cputime64_t cputime)
static inline unsigned int
cputime_to_msecs(const cputime_t cputime)
{
- return __div(cputime, 1000);
+ return __div(cputime, 4096000);
}
static inline cputime_t
msecs_to_cputime(const unsigned int m)
{
- return (cputime_t) m * 1000;
+ return (cputime_t) m * 4096000;
}
/*
@@ -89,13 +89,13 @@ msecs_to_cputime(const unsigned int m)
static inline unsigned int
cputime_to_secs(const cputime_t cputime)
{
- return __div(cputime, 1000000);
+ return __div(cputime, 2048000000) >> 1;
}
static inline cputime_t
secs_to_cputime(const unsigned int s)
{
- return (cputime_t) s * 1000000;
+ return (cputime_t) s * 4096000000ULL;
}
/*
@@ -104,7 +104,7 @@ secs_to_cputime(const unsigned int s)
static inline cputime_t
timespec_to_cputime(const struct timespec *value)
{
- return value->tv_nsec / 1000 + (u64) value->tv_sec * 1000000;
+ return value->tv_nsec * 4096 / 1000 + (u64) value->tv_sec * 4096000000ULL;
}
static inline void
@@ -114,12 +114,12 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
register_pair rp;
rp.pair = cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1));
- value->tv_nsec = rp.subreg.even * 1000;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
+ value->tv_nsec = rp.subreg.even * 1000 / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_nsec = (cputime % 1000000) * 1000;
- value->tv_sec = cputime / 1000000;
+ value->tv_nsec = (cputime % 4096000000ULL) * 1000 / 4096;
+ value->tv_sec = cputime / 4096000000ULL;
#endif
}
@@ -131,7 +131,7 @@ cputime_to_timespec(const cputime_t cputime, struct timespec *value)
static inline cputime_t
timeval_to_cputime(const struct timeval *value)
{
- return value->tv_usec + (u64) value->tv_sec * 1000000;
+ return value->tv_usec * 4096 + (u64) value->tv_sec * 4096000000ULL;
}
static inline void
@@ -141,12 +141,12 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
register_pair rp;
rp.pair = cputime >> 1;
- asm ("dr %0,%1" : "+d" (rp) : "d" (1000000 >> 1));
- value->tv_usec = rp.subreg.even;
+ asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL));
+ value->tv_usec = rp.subreg.even / 4096;
value->tv_sec = rp.subreg.odd;
#else
- value->tv_usec = cputime % 1000000;
- value->tv_sec = cputime / 1000000;
+ value->tv_usec = cputime % 4096000000ULL;
+ value->tv_sec = cputime / 4096000000ULL;
#endif
}
@@ -156,13 +156,13 @@ cputime_to_timeval(const cputime_t cputime, struct timeval *value)
static inline clock_t
cputime_to_clock_t(cputime_t cputime)
{
- return __div(cputime, 1000000 / USER_HZ);
+ return __div(cputime, 4096000000ULL / USER_HZ);
}
static inline cputime_t
clock_t_to_cputime(unsigned long x)
{
- return (cputime_t) x * (1000000 / USER_HZ);
+ return (cputime_t) x * (4096000000ULL / USER_HZ);
}
/*
@@ -171,7 +171,7 @@ clock_t_to_cputime(unsigned long x)
static inline clock_t
cputime64_to_clock_t(cputime64_t cputime)
{
- return __div(cputime, 1000000 / USER_HZ);
+ return __div(cputime, 4096000000ULL / USER_HZ);
}
#endif /* _S390_CPUTIME_H */
diff --git a/arch/s390/include/asm/dasd.h b/arch/s390/include/asm/dasd.h
index 55b2b80cdf6..e2db6f16d9c 100644
--- a/arch/s390/include/asm/dasd.h
+++ b/arch/s390/include/asm/dasd.h
@@ -14,6 +14,7 @@
#ifndef DASD_H
#define DASD_H
+#include <linux/types.h>
#include <linux/ioctl.h>
#define DASD_IOCTL_LETTER 'D'
@@ -78,6 +79,7 @@ typedef struct dasd_information2_t {
#define DASD_FEATURE_USEDIAG 0x02
#define DASD_FEATURE_INITIAL_ONLINE 0x04
#define DASD_FEATURE_ERPLOG 0x08
+#define DASD_FEATURE_FAILFAST 0x10
#define DASD_PARTN_BITS 2
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index d480f39d65e..74d0bbb7d95 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -172,14 +172,14 @@ extern char elf_platform[];
#ifndef __s390x__
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
#else /* __s390x__ */
-#define SET_PERSONALITY(ex) \
-do { \
- if (current->personality != PER_LINUX32) \
- set_personality(PER_LINUX); \
- if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- set_thread_flag(TIF_31BIT); \
- else \
- clear_thread_flag(TIF_31BIT); \
+#define SET_PERSONALITY(ex) \
+do { \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX); \
+ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ set_thread_flag(TIF_31BIT); \
+ else \
+ clear_thread_flag(TIF_31BIT); \
} while (0)
#endif /* __s390x__ */
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index d74002f9579..e1f54654e3a 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -13,7 +13,7 @@
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
-#include <asm/types.h>
+#include <linux/types.h>
/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
struct kvm_pic_state {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 0bc51d52a89..ffdef5fe858 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -67,11 +67,11 @@
#define __LC_SYNC_ENTER_TIMER 0x248
#define __LC_ASYNC_ENTER_TIMER 0x250
#define __LC_EXIT_TIMER 0x258
-#define __LC_LAST_UPDATE_TIMER 0x260
-#define __LC_USER_TIMER 0x268
-#define __LC_SYSTEM_TIMER 0x270
-#define __LC_LAST_UPDATE_CLOCK 0x278
-#define __LC_STEAL_CLOCK 0x280
+#define __LC_USER_TIMER 0x260
+#define __LC_SYSTEM_TIMER 0x268
+#define __LC_STEAL_TIMER 0x270
+#define __LC_LAST_UPDATE_TIMER 0x278
+#define __LC_LAST_UPDATE_CLOCK 0x280
#define __LC_RETURN_MCCK_PSW 0x288
#define __LC_KERNEL_STACK 0xC40
#define __LC_THREAD_INFO 0xC44
@@ -89,11 +89,11 @@
#define __LC_SYNC_ENTER_TIMER 0x250
#define __LC_ASYNC_ENTER_TIMER 0x258
#define __LC_EXIT_TIMER 0x260
-#define __LC_LAST_UPDATE_TIMER 0x268
-#define __LC_USER_TIMER 0x270
-#define __LC_SYSTEM_TIMER 0x278
-#define __LC_LAST_UPDATE_CLOCK 0x280
-#define __LC_STEAL_CLOCK 0x288
+#define __LC_USER_TIMER 0x268
+#define __LC_SYSTEM_TIMER 0x270
+#define __LC_STEAL_TIMER 0x278
+#define __LC_LAST_UPDATE_TIMER 0x280
+#define __LC_LAST_UPDATE_CLOCK 0x288
#define __LC_RETURN_MCCK_PSW 0x290
#define __LC_KERNEL_STACK 0xD40
#define __LC_THREAD_INFO 0xD48
@@ -106,8 +106,10 @@
#define __LC_IPLDEV 0xDB8
#define __LC_CURRENT 0xDD8
#define __LC_INT_CLOCK 0xDE8
+#define __LC_VDSO_PER_CPU 0xE38
#endif /* __s390x__ */
+#define __LC_PASTE 0xE40
#define __LC_PANIC_MAGIC 0xE00
#ifndef __s390x__
@@ -252,11 +254,11 @@ struct _lowcore
__u64 sync_enter_timer; /* 0x248 */
__u64 async_enter_timer; /* 0x250 */
__u64 exit_timer; /* 0x258 */
- __u64 last_update_timer; /* 0x260 */
- __u64 user_timer; /* 0x268 */
- __u64 system_timer; /* 0x270 */
- __u64 last_update_clock; /* 0x278 */
- __u64 steal_clock; /* 0x280 */
+ __u64 user_timer; /* 0x260 */
+ __u64 system_timer; /* 0x268 */
+ __u64 steal_timer; /* 0x270 */
+ __u64 last_update_timer; /* 0x278 */
+ __u64 last_update_clock; /* 0x280 */
psw_t return_mcck_psw; /* 0x288 */
__u8 pad8[0xc00-0x290]; /* 0x290 */
@@ -343,11 +345,11 @@ struct _lowcore
__u64 sync_enter_timer; /* 0x250 */
__u64 async_enter_timer; /* 0x258 */
__u64 exit_timer; /* 0x260 */
- __u64 last_update_timer; /* 0x268 */
- __u64 user_timer; /* 0x270 */
- __u64 system_timer; /* 0x278 */
- __u64 last_update_clock; /* 0x280 */
- __u64 steal_clock; /* 0x288 */
+ __u64 user_timer; /* 0x268 */
+ __u64 system_timer; /* 0x270 */
+ __u64 steal_timer; /* 0x278 */
+ __u64 last_update_timer; /* 0x280 */
+ __u64 last_update_clock; /* 0x288 */
psw_t return_mcck_psw; /* 0x290 */
__u8 pad8[0xc00-0x2a0]; /* 0x2a0 */
/* System info area */
@@ -381,7 +383,12 @@ struct _lowcore
/* whether the kernel died with panic() or not */
__u32 panic_magic; /* 0xe00 */
- __u8 pad13[0x11b8-0xe04]; /* 0xe04 */
+ /* Per cpu primary space access list */
+ __u8 pad_0xe04[0xe3c-0xe04]; /* 0xe04 */
+ __u32 vdso_per_cpu_data; /* 0xe3c */
+ __u32 paste[16]; /* 0xe40 */
+
+ __u8 pad13[0x11b8-0xe80]; /* 0xe80 */
/* 64 bit extparam used for pfault, diag 250 etc */
__u64 ext_params2; /* 0x11B8 */
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 397d93fba3a..8cc113f9252 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -68,11 +68,7 @@ typedef unsigned short __kernel_old_dev_t;
#endif /* __s390x__ */
typedef struct {
-#if defined(__KERNEL__) || defined(__USE_ALL)
int val[2];
-#else /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
- int __val[2];
-#endif /* !defined(__KERNEL__) && !defined(__USE_ALL)*/
} __kernel_fsid_t;
diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h
index 5396f9f1226..8920025c3c0 100644
--- a/arch/s390/include/asm/ptrace.h
+++ b/arch/s390/include/asm/ptrace.h
@@ -272,12 +272,15 @@ typedef struct
#define PSW_ASC_SECONDARY 0x0000800000000000UL
#define PSW_ASC_HOME 0x0000C00000000000UL
-extern long psw_user32_bits;
-
#endif /* __s390x__ */
+#ifdef __KERNEL__
extern long psw_kernel_bits;
extern long psw_user_bits;
+#ifdef CONFIG_64BIT
+extern long psw_user32_bits;
+#endif
+#endif
/* This macro merges a NEW PSW mask specified by the user into
the currently active PSW mask CURRENT, modifying only those
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 930d378ef75..06cbd1e8c94 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -10,6 +10,7 @@
*/
#ifndef __ASM_S390_QETH_IOCTL_H__
#define __ASM_S390_QETH_IOCTL_H__
+#include <linux/types.h>
#include <linux/ioctl.h>
#define SIOC_QETH_ARP_SET_NO_ENTRIES (SIOCDEVPRIVATE)
diff --git a/arch/s390/include/asm/s390_rdev.h b/arch/s390/include/asm/s390_rdev.h
deleted file mode 100644
index 6fa20442a48..00000000000
--- a/arch/s390/include/asm/s390_rdev.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * include/asm-s390/ccwdev.h
- *
- * Copyright (C) 2002,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
- * Carsten Otte <cotte@de.ibm.com>
- *
- * Interface for s390 root device
- */
-
-#ifndef _S390_RDEV_H_
-#define _S390_RDEV_H_
-extern struct device *s390_root_dev_register(const char *);
-extern void s390_root_dev_unregister(struct device *);
-#endif /* _S390_RDEV_H_ */
diff --git a/arch/s390/include/asm/schid.h b/arch/s390/include/asm/schid.h
index 825503cf3dc..3e4d401b4e4 100644
--- a/arch/s390/include/asm/schid.h
+++ b/arch/s390/include/asm/schid.h
@@ -1,6 +1,8 @@
#ifndef ASM_SCHID_H
#define ASM_SCHID_H
+#include <linux/types.h>
+
struct subchannel_id {
__u32 cssid : 8;
__u32 : 4;
diff --git a/arch/s390/include/asm/swab.h b/arch/s390/include/asm/swab.h
new file mode 100644
index 00000000000..eb18dc1f327
--- /dev/null
+++ b/arch/s390/include/asm/swab.h
@@ -0,0 +1,91 @@
+#ifndef _S390_SWAB_H
+#define _S390_SWAB_H
+
+/*
+ * include/asm-s390/swab.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
+ */
+
+#include <linux/types.h>
+
+#ifndef __s390x__
+# define __SWAB_64_THRU_32__
+#endif
+
+#ifdef __s390x__
+static inline __u64 __arch_swab64p(const __u64 *x)
+{
+ __u64 result;
+
+ asm volatile("lrvg %0,%1" : "=d" (result) : "m" (*x));
+ return result;
+}
+#define __arch_swab64p __arch_swab64p
+
+static inline __u64 __arch_swab64(__u64 x)
+{
+ __u64 result;
+
+ asm volatile("lrvgr %0,%1" : "=d" (result) : "d" (x));
+ return result;
+}
+#define __arch_swab64 __arch_swab64
+
+static inline void __arch_swab64s(__u64 *x)
+{
+ *x = __arch_swab64p(x);
+}
+#define __arch_swab64s __arch_swab64s
+#endif /* __s390x__ */
+
+static inline __u32 __arch_swab32p(const __u32 *x)
+{
+ __u32 result;
+
+ asm volatile(
+#ifndef __s390x__
+ " icm %0,8,3(%1)\n"
+ " icm %0,4,2(%1)\n"
+ " icm %0,2,1(%1)\n"
+ " ic %0,0(%1)"
+ : "=&d" (result) : "a" (x), "m" (*x) : "cc");
+#else /* __s390x__ */
+ " lrv %0,%1"
+ : "=d" (result) : "m" (*x));
+#endif /* __s390x__ */
+ return result;
+}
+#define __arch_swab32p __arch_swab32p
+
+#ifdef __s390x__
+static inline __u32 __arch_swab32(__u32 x)
+{
+ __u32 result;
+
+ asm volatile("lrvr %0,%1" : "=d" (result) : "d" (x));
+ return result;
+}
+#define __arch_swab32 __arch_swab32
+#endif /* __s390x__ */
+
+static inline __u16 __arch_swab16p(const __u16 *x)
+{
+ __u16 result;
+
+ asm volatile(
+#ifndef __s390x__
+ " icm %0,2,1(%1)\n"
+ " ic %0,0(%1)\n"
+ : "=&d" (result) : "a" (x), "m" (*x) : "cc");
+#else /* __s390x__ */
+ " lrvh %0,%1"
+ : "=d" (result) : "m" (*x));
+#endif /* __s390x__ */
+ return result;
+}
+#define __arch_swab16p __arch_swab16p
+
+#endif /* _S390_SWAB_H */
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h
index 024ef42ed6d..3a8b26eb1f2 100644
--- a/arch/s390/include/asm/system.h
+++ b/arch/s390/include/asm/system.h
@@ -99,7 +99,7 @@ static inline void restore_access_regs(unsigned int *acrs)
prev = __switch_to(prev,next); \
} while (0)
-extern void account_vtime(struct task_struct *);
+extern void account_vtime(struct task_struct *, struct task_struct *);
extern void account_tick_vtime(struct task_struct *);
extern void account_system_vtime(struct task_struct *);
@@ -121,7 +121,7 @@ static inline void cmma_init(void) { }
#define finish_arch_switch(prev) do { \
set_fs(current->thread.mm_segment); \
- account_vtime(prev); \
+ account_vtime(prev, current); \
} while (0)
#define nop() asm volatile("nop")
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index c1eaf9604da..c544aa52453 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -47,6 +47,8 @@ struct thread_info {
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
struct restart_block restart_block;
+ __u64 user_timer;
+ __u64 system_timer;
};
/*
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index 61705d60f99..e4bcab739c1 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -23,20 +23,18 @@ struct vtimer_list {
__u64 expires;
__u64 interval;
- spinlock_t lock;
- unsigned long magic;
-
void (*function)(unsigned long);
unsigned long data;
};
-/* the offset value will wrap after ca. 71 years */
+/* the vtimer value will wrap after ca. 71 years */
struct vtimer_queue {
struct list_head list;
spinlock_t lock;
- __u64 to_expire; /* current event expire time */
- __u64 offset; /* list offset to zero */
- __u64 idle; /* temp var for idle */
+ __u64 timer; /* last programmed timer */
+ __u64 elapsed; /* elapsed time of timer expire values */
+ __u64 idle; /* temp var for idle */
+ int do_spt; /* =1: reprogram cpu timer in idle */
};
extern void init_virt_timer(struct vtimer_list *timer);
@@ -48,8 +46,8 @@ extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void);
extern void vtime_init(void);
-extern void vtime_start_cpu_timer(void);
-extern void vtime_stop_cpu_timer(void);
+extern void vtime_stop_cpu(void);
+extern void vtime_start_leave(void);
#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index d96c9164345..c93eb50e1d0 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -6,10 +6,12 @@
#define mc_capable() (1)
cpumask_t cpu_coregroup_map(unsigned int cpu);
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu);
extern cpumask_t cpu_core_map[NR_CPUS];
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
+#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
int topology_set_cpu_management(int fc);
void topology_schedule_update(void);
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 41c54765613..3dc3fc22881 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -9,11 +9,7 @@
#ifndef _S390_TYPES_H
#define _S390_TYPES_H
-#ifndef __s390x__
-# include <asm-generic/int-ll64.h>
-#else
-# include <asm-generic/int-l64.h>
-#endif
+#include <asm-generic/int-ll64.h>
#ifndef __ASSEMBLY__
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index a44f4fe16a3..7bdd7c8ebc9 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -12,9 +12,9 @@
#ifndef __ASSEMBLY__
/*
- * Note about this structure:
+ * Note about the vdso_data and vdso_per_cpu_data structures:
*
- * NEVER USE THIS IN USERSPACE CODE DIRECTLY. The layout of this
+ * NEVER USE THEM IN USERSPACE CODE DIRECTLY. The layout of the
* structure is supposed to be known only to the function in the vdso
* itself and may change without notice.
*/
@@ -28,10 +28,21 @@ struct vdso_data {
__u64 wtom_clock_nsec; /* 0x28 */
__u32 tz_minuteswest; /* Minutes west of Greenwich 0x30 */
__u32 tz_dsttime; /* Type of dst correction 0x34 */
+ __u32 ectg_available;
+};
+
+struct vdso_per_cpu_data {
+ __u64 ectg_timer_base;
+ __u64 ectg_user_time;
};
extern struct vdso_data *vdso_data;
+#ifdef CONFIG_64BIT
+int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore);
+void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore);
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e641f60bac9..67a60016bab 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -48,6 +48,11 @@ int main(void)
DEFINE(__VDSO_WTOM_SEC, offsetof(struct vdso_data, wtom_clock_sec));
DEFINE(__VDSO_WTOM_NSEC, offsetof(struct vdso_data, wtom_clock_nsec));
DEFINE(__VDSO_TIMEZONE, offsetof(struct vdso_data, tz_minuteswest));
+ DEFINE(__VDSO_ECTG_OK, offsetof(struct vdso_data, ectg_available));
+ DEFINE(__VDSO_ECTG_BASE,
+ offsetof(struct vdso_per_cpu_data, ectg_timer_base));
+ DEFINE(__VDSO_ECTG_USER,
+ offsetof(struct vdso_per_cpu_data, ectg_user_time));
/* constants used by the vdso */
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index fc2c97197a5..62c706eb0de 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -547,7 +547,7 @@ sys32_setdomainname_wrapper:
.globl sys32_newuname_wrapper
sys32_newuname_wrapper:
llgtr %r2,%r2 # struct new_utsname *
- jg s390x_newuname # branch to system call
+ jg sys_s390_newuname # branch to system call
.globl compat_sys_adjtimex_wrapper
compat_sys_adjtimex_wrapper:
@@ -615,7 +615,7 @@ sys32_sysfs_wrapper:
.globl sys32_personality_wrapper
sys32_personality_wrapper:
llgfr %r2,%r2 # unsigned long
- jg s390x_personality # branch to system call
+ jg sys_s390_personality # branch to system call
.globl sys32_setfsuid16_wrapper
sys32_setfsuid16_wrapper:
@@ -1767,3 +1767,41 @@ sys_dup3_wrapper:
sys_epoll_create1_wrapper:
lgfr %r2,%r2 # int
jg sys_epoll_create1 # branch to system call
+
+ .globl sys32_readahead_wrapper
+sys32_readahead_wrapper:
+ lgfr %r2,%r2 # int
+ llgfr %r3,%r3 # u32
+ llgfr %r4,%r4 # u32
+ lgfr %r5,%r5 # s32
+ jg sys32_readahead # branch to system call
+
+ .globl sys32_sendfile64_wrapper
+sys32_sendfile64_wrapper:
+ lgfr %r2,%r2 # int
+ lgfr %r3,%r3 # int
+ llgtr %r4,%r4 # compat_loff_t *
+ lgfr %r5,%r5 # s32
+ jg sys32_sendfile64 # branch to system call
+
+ .globl sys_tkill_wrapper
+sys_tkill_wrapper:
+ lgfr %r2,%r2 # pid_t
+ lgfr %r3,%r3 # int
+ jg sys_tkill # branch to system call
+
+ .globl sys_tgkill_wrapper
+sys_tgkill_wrapper:
+ lgfr %r2,%r2 # pid_t
+ lgfr %r3,%r3 # pid_t
+ lgfr %r4,%r4 # int
+ jg sys_tgkill # branch to system call
+
+ .globl compat_sys_keyctl_wrapper
+compat_sys_keyctl_wrapper:
+ llgfr %r2,%r2 # u32
+ llgfr %r3,%r3 # u32
+ llgfr %r4,%r4 # u32
+ llgfr %r5,%r5 # u32
+ llgfr %r6,%r6 # u32
+ jg compat_sys_keyctl # branch to system call
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 55de521aef7..1268aa2991b 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -583,8 +583,8 @@ kernel_per:
.globl io_int_handler
io_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+16
@@ -723,8 +723,8 @@ io_notify_resume:
.globl ext_int_handler
ext_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+16
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16
@@ -750,6 +750,7 @@ __critical_end:
.globl mcck_int_handler
mcck_int_handler:
+ stck __LC_INT_CLOCK
spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer
lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs
SAVE_ALL_BASE __LC_SAVE_AREA+32
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 6b1896345ed..950c59c6688 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -30,23 +30,23 @@ struct fadvise64_64_args;
struct old_sigaction;
struct sel_arg_struct;
-long sys_pipe(unsigned long __user *fildes);
long sys_mmap2(struct mmap_arg_struct __user *arg);
-long old_mmap(struct mmap_arg_struct __user *arg);
+long sys_s390_old_mmap(struct mmap_arg_struct __user *arg);
long sys_ipc(uint call, int first, unsigned long second,
unsigned long third, void __user *ptr);
-long s390x_newuname(struct new_utsname __user *name);
-long s390x_personality(unsigned long personality);
-long s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
+long sys_s390_newuname(struct new_utsname __user *name);
+long sys_s390_personality(unsigned long personality);
+long sys_s390_fadvise64(int fd, u32 offset_high, u32 offset_low,
size_t len, int advice);
-long s390_fadvise64_64(struct fadvise64_64_args __user *args);
-long s390_fallocate(int fd, int mode, loff_t offset, u32 len_high, u32 len_low);
+long sys_s390_fadvise64_64(struct fadvise64_64_args __user *args);
+long sys_s390_fallocate(int fd, int mode, loff_t offset, u32 len_high,
+ u32 len_low);
long sys_fork(void);
long sys_clone(void);
long sys_vfork(void);
void execve_tail(void);
long sys_execve(void);
-int sys_sigsuspend(int history0, int history1, old_sigset_t mask);
+long sys_sigsuspend(int history0, int history1, old_sigset_t mask);
long sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact);
long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss);
@@ -54,7 +54,5 @@ long sys_sigreturn(void);
long sys_rt_sigreturn(void);
long sys32_sigreturn(void);
long sys32_rt_sigreturn(void);
-long old_select(struct sel_arg_struct __user *arg);
-long sys_ptrace(long request, long pid, long addr, long data);
#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 16bb4fd1a40..c6fbde13971 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -177,8 +177,11 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
.if !\sync
ni \psworg+1,0xfd # clear wait state bit
.endif
- lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user
+ lg %r14,__LC_VDSO_PER_CPU
+ lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user
stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user
lpswe \psworg # back to caller
.endm
@@ -559,8 +562,8 @@ kernel_per:
*/
.globl io_int_handler
io_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_IO_OLD_PSW,__LC_SAVE_AREA+32
@@ -721,8 +724,8 @@ io_notify_resume:
*/
.globl ext_int_handler
ext_int_handler:
- stpt __LC_ASYNC_ENTER_TIMER
stck __LC_INT_CLOCK
+ stpt __LC_ASYNC_ENTER_TIMER
SAVE_ALL_BASE __LC_SAVE_AREA+32
SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
CREATE_STACK_FRAME __LC_EXT_OLD_PSW,__LC_SAVE_AREA+32
@@ -746,6 +749,7 @@ __critical_end:
*/
.globl mcck_int_handler
mcck_int_handler:
+ stck __LC_INT_CLOCK
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -979,23 +983,23 @@ cleanup_sysc_return:
cleanup_sysc_leave:
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn)
- je 2f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ je 3f
clc 8(8,%r12),BASED(cleanup_sysc_leave_insn+8)
- je 2f
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
+ jhe 0f
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW
- jne 0f
+ jne 1f
mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
- j 1f
-0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
-1: lmg %r0,%r11,SP_R0(%r15)
+ j 2f
+1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
+2: lmg %r0,%r11,SP_R0(%r15)
lg %r15,SP_R15(%r15)
-2: la %r12,__LC_RETURN_PSW
+3: la %r12,__LC_RETURN_PSW
br %r14
cleanup_sysc_leave_insn:
.quad sysc_done - 4
- .quad sysc_done - 8
+ .quad sysc_done - 16
cleanup_io_return:
mvc __LC_RETURN_PSW(8),0(%r12)
@@ -1005,23 +1009,23 @@ cleanup_io_return:
cleanup_io_leave:
clc 8(8,%r12),BASED(cleanup_io_leave_insn)
- je 2f
- mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ je 3f
clc 8(8,%r12),BASED(cleanup_io_leave_insn+8)
- je 2f
- mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
+ jhe 0f
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15)
cghi %r12,__LC_MCK_OLD_PSW
- jne 0f
+ jne 1f
mvc __LC_SAVE_AREA+64(32),SP_R12(%r15)
- j 1f
-0: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
-1: lmg %r0,%r11,SP_R0(%r15)
+ j 2f
+1: mvc __LC_SAVE_AREA+32(32),SP_R12(%r15)
+2: lmg %r0,%r11,SP_R0(%r15)
lg %r15,SP_R15(%r15)
-2: la %r12,__LC_RETURN_PSW
+3: la %r12,__LC_RETURN_PSW
br %r14
cleanup_io_leave_insn:
.quad io_done - 4
- .quad io_done - 8
+ .quad io_done - 16
/*
* Integer constants
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 3ccd36b24b8..f9f70aa1524 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -87,6 +87,8 @@ startup_continue:
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
+ lghi %r0,__LC_PASTE
+ stg %r0,__LC_VDSO_PER_CPU
#
# Setup stack
#
diff --git a/arch/s390/kernel/init_task.c b/arch/s390/kernel/init_task.c
index e8071684361..7db95c0b869 100644
--- a/arch/s390/kernel/init_task.c
+++ b/arch/s390/kernel/init_task.c
@@ -16,7 +16,6 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>
-static struct fs_struct init_fs = INIT_FS;
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
struct mm_struct init_mm = INIT_MM(init_mm);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 569079ec4ff..a01cf0284db 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -218,9 +218,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p)
{
- mutex_lock(&kprobe_mutex);
- free_insn_slot(p->ainsn.insn, 0);
- mutex_unlock(&kprobe_mutex);
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
}
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
@@ -381,7 +382,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
- * have a return probe installed on them, and/or more then one return
+ * have a return probe installed on them, and/or more than one return
* return probe was registered for a target function.
*
* We can handle this because:
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 04f8c67a610..5cd38a90e64 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -38,6 +38,8 @@
#include <linux/utsname.h>
#include <linux/tick.h>
#include <linux/elfcore.h>
+#include <linux/kernel_stat.h>
+#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -45,7 +47,6 @@
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/timer.h>
-#include <asm/cpu.h>
#include "entry.h"
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
@@ -75,36 +76,6 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sf->gprs[8];
}
-DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
- .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
-};
-
-static int s390_idle_enter(void)
-{
- struct s390_idle_data *idle;
-
- idle = &__get_cpu_var(s390_idle);
- spin_lock(&idle->lock);
- idle->idle_count++;
- idle->in_idle = 1;
- idle->idle_enter = get_clock();
- spin_unlock(&idle->lock);
- vtime_stop_cpu_timer();
- return NOTIFY_OK;
-}
-
-void s390_idle_leave(void)
-{
- struct s390_idle_data *idle;
-
- vtime_start_cpu_timer();
- idle = &__get_cpu_var(s390_idle);
- spin_lock(&idle->lock);
- idle->idle_time += get_clock() - idle->idle_enter;
- idle->in_idle = 0;
- spin_unlock(&idle->lock);
-}
-
extern void s390_handle_mcck(void);
/*
* The idle loop on a S390...
@@ -117,10 +88,6 @@ static void default_idle(void)
local_irq_enable();
return;
}
- if (s390_idle_enter() == NOTIFY_BAD) {
- local_irq_enable();
- return;
- }
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id())) {
preempt_enable_no_resched();
@@ -130,7 +97,6 @@ static void default_idle(void)
local_mcck_disable();
if (test_thread_flag(TIF_MCCK_PENDING)) {
local_mcck_enable();
- s390_idle_leave();
local_irq_enable();
s390_handle_mcck();
return;
@@ -138,9 +104,9 @@ static void default_idle(void)
trace_hardirqs_on();
/* Don't trace preempt off for idle. */
stop_critical_timings();
- /* Wait for external, I/O or machine check interrupt. */
- __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
- PSW_MASK_IO | PSW_MASK_EXT);
+ /* Stop virtual timer and halt the cpu. */
+ vtime_stop_cpu();
+ /* Reenable preemption tracer. */
start_critical_timings();
}
@@ -260,13 +226,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
return 0;
}
-asmlinkage long sys_fork(void)
+SYSCALL_DEFINE0(fork)
{
struct pt_regs *regs = task_pt_regs(current);
return do_fork(SIGCHLD, regs->gprs[15], regs, 0, NULL, NULL);
}
-asmlinkage long sys_clone(void)
+SYSCALL_DEFINE0(clone)
{
struct pt_regs *regs = task_pt_regs(current);
unsigned long clone_flags;
@@ -293,7 +259,7 @@ asmlinkage long sys_clone(void)
* do not have enough call-clobbered registers to hold all
* the information you need.
*/
-asmlinkage long sys_vfork(void)
+SYSCALL_DEFINE0(vfork)
{
struct pt_regs *regs = task_pt_regs(current);
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
@@ -313,7 +279,7 @@ asmlinkage void execve_tail(void)
/*
* sys_execve() executes a new program.
*/
-asmlinkage long sys_execve(void)
+SYSCALL_DEFINE0(execve)
{
struct pt_regs *regs = task_pt_regs(current);
char *filename;
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index e019b419efc..a0d2d55d7fb 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -119,8 +119,8 @@ void do_extint(struct pt_regs *regs, unsigned short code)
struct pt_regs *old_regs;
old_regs = set_irq_regs(regs);
- irq_enter();
s390_idle_check();
+ irq_enter();
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
/* Serve timer interrupts first. */
clock_comparator_work();
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b7a1efd5522..d825f4950e4 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -427,6 +427,8 @@ setup_lowcore(void)
/* enable extended save area */
__ctl_set_bit(14, 29);
}
+#else
+ lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
#endif
set_prefix((u32)(unsigned long) lc);
}
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 8e6812a2267..3cf74c3ccb6 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -25,6 +25,7 @@
#include <linux/personality.h>
#include <linux/binfmts.h>
#include <linux/tracehook.h>
+#include <linux/syscalls.h>
#include <asm/ucontext.h>
#include <asm/uaccess.h>
#include <asm/lowcore.h>
@@ -53,8 +54,7 @@ typedef struct
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
-asmlinkage int
-sys_sigsuspend(int history0, int history1, old_sigset_t mask)
+SYSCALL_DEFINE3(sigsuspend, int, history0, int, history1, old_sigset_t, mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
@@ -70,9 +70,8 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask)
return -ERESTARTNOHAND;
}
-asmlinkage long
-sys_sigaction(int sig, const struct old_sigaction __user *act,
- struct old_sigaction __user *oact)
+SYSCALL_DEFINE3(sigaction, int, sig, const struct old_sigaction __user *, act,
+ struct old_sigaction __user *, oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
@@ -102,15 +101,13 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
return ret;
}
-asmlinkage long
-sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
+SYSCALL_DEFINE2(sigaltstack, const stack_t __user *, uss,
+ stack_t __user *, uoss)
{
struct pt_regs *regs = task_pt_regs(current);
return do_sigaltstack(uss, uoss, regs->gprs[15]);
}
-
-
/* Returns non-zero on fault. */
static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
@@ -164,7 +161,7 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
return 0;
}
-asmlinkage long sys_sigreturn(void)
+SYSCALL_DEFINE0(sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe __user *frame = (sigframe __user *)regs->gprs[15];
@@ -191,7 +188,7 @@ badframe:
return 0;
}
-asmlinkage long sys_rt_sigreturn(void)
+SYSCALL_DEFINE0(rt_sigreturn)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fc78541dc5..2d337cbb932 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -47,6 +47,7 @@
#include <asm/lowcore.h>
#include <asm/sclp.h>
#include <asm/cpu.h>
+#include <asm/vdso.h>
#include "entry.h"
/*
@@ -55,12 +56,6 @@
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
-cpumask_t cpu_online_map = CPU_MASK_NONE;
-EXPORT_SYMBOL(cpu_online_map);
-
-cpumask_t cpu_possible_map = CPU_MASK_ALL;
-EXPORT_SYMBOL(cpu_possible_map);
-
static struct task_struct *current_set[NR_CPUS];
static u8 smp_cpu_type;
@@ -506,6 +501,9 @@ static int __cpuinit smp_alloc_lowcore(int cpu)
goto out;
lowcore->extended_save_area_addr = (u32) save_area;
}
+#else
+ if (vdso_alloc_per_cpu(cpu, lowcore))
+ goto out;
#endif
lowcore_ptr[cpu] = lowcore;
return 0;
@@ -528,6 +526,8 @@ static void smp_free_lowcore(int cpu)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
free_page((unsigned long) lowcore->extended_save_area_addr);
+#else
+ vdso_free_per_cpu(cpu, lowcore);
#endif
free_page(lowcore->panic_stack - PAGE_SIZE);
free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
@@ -670,6 +670,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
panic_stack = __get_free_page(GFP_KERNEL);
async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+ BUG_ON(!lowcore || !panic_stack || !async_stack);
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
save_area = get_zeroed_page(GFP_KERNEL);
@@ -683,6 +684,9 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
#ifndef CONFIG_64BIT
if (MACHINE_HAS_IEEE)
lowcore->extended_save_area_addr = (u32) save_area;
+#else
+ if (vdso_alloc_per_cpu(smp_processor_id(), lowcore))
+ BUG();
#endif
set_prefix((u32)(unsigned long) lowcore);
local_mcck_enable();
@@ -851,9 +855,11 @@ static ssize_t show_idle_count(struct sys_device *dev,
unsigned long long idle_count;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock_irq(&idle->lock);
+ spin_lock(&idle->lock);
idle_count = idle->idle_count;
- spin_unlock_irq(&idle->lock);
+ if (idle->idle_enter)
+ idle_count++;
+ spin_unlock(&idle->lock);
return sprintf(buf, "%llu\n", idle_count);
}
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -862,18 +868,17 @@ static ssize_t show_idle_time(struct sys_device *dev,
struct sysdev_attribute *attr, char *buf)
{
struct s390_idle_data *idle;
- unsigned long long new_time;
+ unsigned long long now, idle_time, idle_enter;
idle = &per_cpu(s390_idle, dev->id);
- spin_lock_irq(&idle->lock);
- if (idle->in_idle) {
- new_time = get_clock();
- idle->idle_time += new_time - idle->idle_enter;
- idle->idle_enter = new_time;
- }
- new_time = idle->idle_time;
- spin_unlock_irq(&idle->lock);
- return sprintf(buf, "%llu\n", new_time >> 12);
+ spin_lock(&idle->lock);
+ now = get_clock();
+ idle_time = idle->idle_time;
+ idle_enter = idle->idle_enter;
+ if (idle_enter != 0ULL && idle_enter < now)
+ idle_time += now - idle_enter;
+ spin_unlock(&idle->lock);
+ return sprintf(buf, "%llu\n", idle_time >> 12);
}
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c
index 4fe952e557a..c7ae4b17e0e 100644
--- a/arch/s390/kernel/sys_s390.c
+++ b/arch/s390/kernel/sys_s390.c
@@ -29,6 +29,7 @@
#include <linux/personality.h>
#include <linux/unistd.h>
#include <linux/ipc.h>
+#include <linux/syscalls.h>
#include <asm/uaccess.h>
#include "entry.h"
@@ -74,7 +75,7 @@ struct mmap_arg_struct {
unsigned long offset;
};
-asmlinkage long sys_mmap2(struct mmap_arg_struct __user *arg)
+SYSCALL_DEFINE1(mmap2, struct mmap_arg_struct __user *, arg)
{
struct mmap_arg_struct a;
int error = -EFAULT;
@@ -86,7 +87,7 @@ out:
return error;
}
-asmlinkage long old_mmap(struct mmap_arg_struct __user *arg)
+SYSCALL_DEFINE1(s390_old_mmap, struct mmap_arg_struct __user *, arg)
{
struct mmap_arg_struct a;
long error = -EFAULT;
@@ -103,32 +104,13 @@ out:
return error;
}
-#ifndef CONFIG_64BIT
-struct sel_arg_struct {
- unsigned long n;
- fd_set __user *inp, *outp, *exp;
- struct timeval __user *tvp;
-};
-
-asmlinkage long old_select(struct sel_arg_struct __user *arg)
-{
- struct sel_arg_struct a;
-
- if (copy_from_user(&a, arg, sizeof(a)))
- return -EFAULT;
- /* sys_select() does the appropriate kernel locking */
- return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
-
-}
-#endif /* CONFIG_64BIT */
-
/*
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
*
* This is really horribly ugly.
*/
-asmlinkage long sys_ipc(uint call, int first, unsigned long second,
- unsigned long third, void __user *ptr)
+SYSCALL_DEFINE5(ipc, uint, call, int, first, unsigned long, second,
+ unsigned long, third, void __user *, ptr)
{
struct ipc_kludge tmp;
int ret;
@@ -194,7 +176,7 @@ asmlinkage long sys_ipc(uint call, int first, unsigned long second,
}
#ifdef CONFIG_64BIT
-asmlinkage long s390x_newuname(struct new_utsname __user *name)
+SYSCALL_DEFINE1(s390_newuname, struct new_utsname __user *, name)
{
int ret = sys_newuname(name);
@@ -205,7 +187,7 @@ asmlinkage long s390x_newuname(struct new_utsname __user *name)
return ret;
}
-asmlinkage long s390x_personality(unsigned long personality)
+SYSCALL_DEFINE1(s390_personality, unsigned long, personality)
{
int ret;
@@ -224,15 +206,13 @@ asmlinkage long s390x_personality(unsigned long personality)
*/
#ifndef CONFIG_64BIT
-asmlinkage long
-s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
+SYSCALL_DEFINE5(s390_fadvise64, int, fd, u32, offset_high, u32, offset_low,
+ size_t, len, int, advice)
{
return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
len, advice);
}
-#endif
-
struct fadvise64_64_args {
int fd;
long long offset;
@@ -240,8 +220,7 @@ struct fadvise64_64_args {
int advice;
};
-asmlinkage long
-s390_fadvise64_64(struct fadvise64_64_args __user *args)
+SYSCALL_DEFINE1(s390_fadvise64_64, struct fadvise64_64_args __user *, args)
{
struct fadvise64_64_args a;
@@ -250,7 +229,6 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
}
-#ifndef CONFIG_64BIT
/*
* This is a wrapper to call sys_fallocate(). For 31 bit s390 the last
* 64 bit argument "len" is split into the upper and lower 32 bits. The
@@ -263,9 +241,19 @@ s390_fadvise64_64(struct fadvise64_64_args __user *args)
* to
* %r2: fd, %r3: mode, %r4/%r5: offset, 96(%r15)-103(%r15): len
*/
-asmlinkage long s390_fallocate(int fd, int mode, loff_t offset,
+SYSCALL_DEFINE(s390_fallocate)(int fd, int mode, loff_t offset,
u32 len_high, u32 len_low)
{
return sys_fallocate(fd, mode, offset, ((u64)len_high << 32) | len_low);
}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_s390_fallocate(long fd, long mode, loff_t offset,
+ long len_high, long len_low)
+{
+ return SYSC_s390_fallocate((int) fd, (int) mode, offset,
+ (u32) len_high, (u32) len_low);
+}
+SYSCALL_ALIAS(sys_s390_fallocate, SyS_s390_fallocate);
+#endif
+
#endif
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 2d61787949d..fe5b25a988a 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -98,7 +98,7 @@ SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
-SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper) /* 90 */
+SYSCALL(sys_s390_old_mmap,sys_s390_old_mmap,old32_mmap_wrapper) /* 90 */
SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
@@ -130,7 +130,7 @@ SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
SYSCALL(sys_sigreturn,sys_sigreturn,sys32_sigreturn)
SYSCALL(sys_clone,sys_clone,sys32_clone) /* 120 */
SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
-SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
+SYSCALL(sys_newuname,sys_s390_newuname,sys32_newuname_wrapper)
NI_SYSCALL /* modify_ldt for i386 */
SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
@@ -144,7 +144,7 @@ SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper) /* 135 */
-SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper)
+SYSCALL(sys_personality,sys_s390_personality,sys32_personality_wrapper)
NI_SYSCALL /* for afs_syscall */
SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */
SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */
@@ -194,7 +194,7 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall
SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
-SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack)
+SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack_wrapper)
SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
NI_SYSCALL /* streams1 */
NI_SYSCALL /* streams2 */
@@ -230,8 +230,8 @@ SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper)
SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */
SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
-SYSCALL(sys_readahead,sys_readahead,sys32_readahead)
-SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64)
+SYSCALL(sys_readahead,sys_readahead,sys32_readahead_wrapper)
+SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64_wrapper)
SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */
SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
@@ -245,11 +245,11 @@ SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper)
SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */
SYSCALL(sys_gettid,sys_gettid,sys_gettid)
-SYSCALL(sys_tkill,sys_tkill,sys_tkill)
+SYSCALL(sys_tkill,sys_tkill,sys_tkill_wrapper)
SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper)
SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */
-SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill)
+SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill_wrapper)
NI_SYSCALL /* reserved for TUX */
SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper)
SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper)
@@ -261,7 +261,7 @@ SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */
SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
-SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
+SYSCALL(sys_s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper) /* 255 */
SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
@@ -272,7 +272,7 @@ SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260
SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
NI_SYSCALL /* reserved for vserver */
-SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
+SYSCALL(sys_s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
@@ -288,7 +288,7 @@ SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper)
SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load_wrapper)
SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
-SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */
+SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl_wrapper) /* 280 */
SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper)
SYSCALL(sys_ioprio_set,sys_ioprio_set,sys_ioprio_set_wrapper)
SYSCALL(sys_ioprio_get,sys_ioprio_get,sys_ioprio_get_wrapper)
@@ -322,7 +322,7 @@ NI_SYSCALL /* 310 sys_move_pages */
SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper)
SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
-SYSCALL(s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
+SYSCALL(sys_s390_fallocate,sys_fallocate,sys_fallocate_wrapper)
SYSCALL(sys_utimensat,sys_utimensat,compat_sys_utimensat_wrapper) /* 315 */
SYSCALL(sys_signalfd,sys_signalfd,compat_sys_signalfd_wrapper)
NI_SYSCALL /* 317 old sys_timer_fd */
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 5be981a36c3..fc468cae446 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -160,7 +160,7 @@ void init_cpu_timer(void)
cd->min_delta_ns = 1;
cd->max_delta_ns = LONG_MAX;
cd->rating = 400;
- cd->cpumask = cpumask_of_cpu(cpu);
+ cd->cpumask = cpumask_of(cpu);
cd->set_next_event = s390_next_event;
cd->set_mode = s390_set_mode;
@@ -399,8 +399,10 @@ static struct workqueue_struct *time_sync_wq;
static void __init time_init_wq(void)
{
- if (!time_sync_wq)
- time_sync_wq = create_singlethread_workqueue("timesync");
+ if (time_sync_wq)
+ return;
+ time_sync_wq = create_singlethread_workqueue("timesync");
+ stop_machine_create();
}
/*
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 90e9ba11eba..cc362c9ea8f 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -97,6 +97,11 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
return mask;
}
+const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
+{
+ return &cpu_core_map[cpu];
+}
+
static void add_cpus_to_core(struct tl_cpu *tl_cpu, struct core_info *core)
{
unsigned int cpu;
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index 10a6ccef441..690e1781968 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -31,9 +31,6 @@
#include <asm/sections.h>
#include <asm/vdso.h>
-/* Max supported size for symbol names */
-#define MAX_SYMNAME 64
-
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
extern char vdso32_start, vdso32_end;
static void *vdso32_kbase = &vdso32_start;
@@ -71,6 +68,119 @@ static union {
struct vdso_data *vdso_data = &vdso_data_store.data;
/*
+ * Setup vdso data page.
+ */
+static void vdso_init_data(struct vdso_data *vd)
+{
+ unsigned int facility_list;
+
+ facility_list = stfl();
+ vd->ectg_available = switch_amode && (facility_list & 1);
+}
+
+#ifdef CONFIG_64BIT
+/*
+ * Setup per cpu vdso data page.
+ */
+static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
+{
+}
+
+/*
+ * Allocate/free per cpu vdso data.
+ */
+#ifdef CONFIG_64BIT
+#define SEGMENT_ORDER 2
+#else
+#define SEGMENT_ORDER 1
+#endif
+
+int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
+{
+ unsigned long segment_table, page_table, page_frame;
+ u32 *psal, *aste;
+ int i;
+
+ lowcore->vdso_per_cpu_data = __LC_PASTE;
+
+ if (!switch_amode || !vdso_enabled)
+ return 0;
+
+ segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
+ page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ page_frame = get_zeroed_page(GFP_KERNEL);
+ if (!segment_table || !page_table || !page_frame)
+ goto out;
+
+ clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
+ PAGE_SIZE << SEGMENT_ORDER);
+ clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
+ 256*sizeof(unsigned long));
+
+ *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
+ *(unsigned long *) page_table = _PAGE_RO + page_frame;
+
+ psal = (u32 *) (page_table + 256*sizeof(unsigned long));
+ aste = psal + 32;
+
+ for (i = 4; i < 32; i += 4)
+ psal[i] = 0x80000000;
+
+ lowcore->paste[4] = (u32)(addr_t) psal;
+ psal[0] = 0x20000000;
+ psal[2] = (u32)(addr_t) aste;
+ *(unsigned long *) (aste + 2) = segment_table +
+ _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
+ aste[4] = (u32)(addr_t) psal;
+ lowcore->vdso_per_cpu_data = page_frame;
+
+ vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
+ return 0;
+
+out:
+ free_page(page_frame);
+ free_page(page_table);
+ free_pages(segment_table, SEGMENT_ORDER);
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
+{
+ unsigned long segment_table, page_table, page_frame;
+ u32 *psal, *aste;
+
+ if (!switch_amode || !vdso_enabled)
+ return;
+
+ psal = (u32 *)(addr_t) lowcore->paste[4];
+ aste = (u32 *)(addr_t) psal[2];
+ segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
+ page_table = *(unsigned long *) segment_table;
+ page_frame = *(unsigned long *) page_table;
+
+ free_page(page_frame);
+ free_page(page_table);
+ free_pages(segment_table, SEGMENT_ORDER);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static void __vdso_init_cr5(void *dummy)
+{
+ unsigned long cr5;
+
+ cr5 = offsetof(struct _lowcore, paste);
+ __ctl_load(cr5, 5, 5);
+}
+
+static void vdso_init_cr5(void)
+{
+ if (switch_amode && vdso_enabled)
+ on_each_cpu(__vdso_init_cr5, NULL, 1);
+}
+#endif /* CONFIG_64BIT */
+
+/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
@@ -172,6 +282,9 @@ static int __init vdso_init(void)
{
int i;
+ if (!vdso_enabled)
+ return 0;
+ vdso_init_data(vdso_data);
#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
/* Calculate the size of the 32 bit vDSO */
vdso32_pages = ((&vdso32_end - &vdso32_start
@@ -208,6 +321,11 @@ static int __init vdso_init(void)
}
vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
vdso64_pagelist[vdso64_pages] = NULL;
+#ifndef CONFIG_SMP
+ if (vdso_alloc_per_cpu(0, &S390_lowcore))
+ BUG();
+#endif
+ vdso_init_cr5();
#endif /* CONFIG_64BIT */
get_page(virt_to_page(vdso_data));
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index c32f29c3d70..ad8acfc949f 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -13,10 +13,6 @@
#include <asm/asm-offsets.h>
#include <asm/unistd.h>
-#include <asm/vdso.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
.text
.align 4
.globl __kernel_gettimeofday
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 488e31a3c0e..9ce8caafdb4 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -22,7 +22,12 @@ __kernel_clock_getres:
cghi %r2,CLOCK_REALTIME
je 0f
cghi %r2,CLOCK_MONOTONIC
+ je 0f
+ cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
jne 2f
+ larl %r5,_vdso_data
+ icm %r0,15,__LC_ECTG_OK(%r5)
+ jz 2f
0: ltgr %r3,%r3
jz 1f /* res == NULL */
larl %r1,3f
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 738a410b7eb..79dbfee831e 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -22,8 +22,10 @@ __kernel_clock_gettime:
larl %r5,_vdso_data
cghi %r2,CLOCK_REALTIME
je 4f
+ cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ je 9f
cghi %r2,CLOCK_MONOTONIC
- jne 9f
+ jne 12f
/* CLOCK_MONOTONIC */
ltgr %r3,%r3
@@ -42,7 +44,7 @@ __kernel_clock_gettime:
alg %r0,__VDSO_WTOM_SEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
- larl %r5,10f
+ larl %r5,13f
1: clg %r1,0(%r5)
jl 2f
slg %r1,0(%r5)
@@ -68,7 +70,7 @@ __kernel_clock_gettime:
lg %r0,__VDSO_XTIME_SEC(%r5)
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 5b
- larl %r5,10f
+ larl %r5,13f
6: clg %r1,0(%r5)
jl 7f
slg %r1,0(%r5)
@@ -79,11 +81,38 @@ __kernel_clock_gettime:
8: lghi %r2,0
br %r14
+ /* CLOCK_THREAD_CPUTIME_ID for this thread */
+9: icm %r0,15,__VDSO_ECTG_OK(%r5)
+ jz 12f
+ ear %r2,%a4
+ llilh %r4,0x0100
+ sar %a4,%r4
+ lghi %r4,0
+ sacf 512 /* Magic ectg instruction */
+ .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
+ sacf 0
+ sar %a4,%r2
+ algr %r1,%r0 /* r1 = cputime as TOD value */
+ mghi %r1,1000 /* convert to nanoseconds */
+ srlg %r1,%r1,12 /* r1 = cputime in nanosec */
+ lgr %r4,%r1
+ larl %r5,13f
+ srlg %r1,%r1,9 /* divide by 1000000000 */
+ mlg %r0,8(%r5)
+ srlg %r0,%r0,11 /* r0 = tv_sec */
+ stg %r0,0(%r3)
+ msg %r0,0(%r5) /* calculate tv_nsec */
+ slgr %r4,%r0 /* r4 = tv_nsec */
+ stg %r4,8(%r3)
+ lghi %r2,0
+ br %r14
+
/* Fallback to system call */
-9: lghi %r1,__NR_clock_gettime
+12: lghi %r1,__NR_clock_gettime
svc 0
br %r14
-10: .quad 1000000000
+13: .quad 1000000000
+14: .quad 19342813113834067
.cfi_endproc
.size __kernel_clock_gettime,.-__kernel_clock_gettime
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 75a6e62ea97..ecf0304e61c 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -23,19 +23,43 @@
#include <asm/s390_ext.h>
#include <asm/timer.h>
#include <asm/irq_regs.h>
+#include <asm/cpu.h>
static ext_int_info_t ext_int_info_timer;
+
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
+ .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
+};
+
+static inline __u64 get_vtimer(void)
+{
+ __u64 timer;
+
+ asm volatile("STPT %0" : "=m" (timer));
+ return timer;
+}
+
+static inline void set_vtimer(__u64 expires)
+{
+ __u64 timer;
+
+ asm volatile (" STPT %0\n" /* Store current cpu timer value */
+ " SPT %1" /* Set new value immediatly afterwards */
+ : "=m" (timer) : "m" (expires) );
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
+ S390_lowcore.last_update_timer = expires;
+}
+
/*
* Update process times based on virtual cpu times stored by entry.S
* to the lowcore fields user_timer, system_timer & steal_clock.
*/
-void account_process_tick(struct task_struct *tsk, int user_tick)
+static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
{
- cputime_t cputime;
- __u64 timer, clock;
- int rcu_user_flag;
+ struct thread_info *ti = task_thread_info(tsk);
+ __u64 timer, clock, user, system, steal;
timer = S390_lowcore.last_update_timer;
clock = S390_lowcore.last_update_clock;
@@ -44,50 +68,41 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
: "=m" (S390_lowcore.last_update_timer),
"=m" (S390_lowcore.last_update_clock) );
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
- S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
-
- cputime = S390_lowcore.user_timer >> 12;
- rcu_user_flag = cputime != 0;
- S390_lowcore.user_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_user_time(tsk, cputime);
-
- cputime = S390_lowcore.system_timer >> 12;
- S390_lowcore.system_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_system_time(tsk, HARDIRQ_OFFSET, cputime);
-
- cputime = S390_lowcore.steal_clock;
- if ((__s64) cputime > 0) {
- cputime >>= 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_steal_time(tsk, cputime);
+ S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
+
+ user = S390_lowcore.user_timer - ti->user_timer;
+ S390_lowcore.steal_timer -= user;
+ ti->user_timer = S390_lowcore.user_timer;
+ account_user_time(tsk, user, user);
+
+ system = S390_lowcore.system_timer - ti->system_timer;
+ S390_lowcore.steal_timer -= system;
+ ti->system_timer = S390_lowcore.system_timer;
+ account_system_time(tsk, hardirq_offset, system, system);
+
+ steal = S390_lowcore.steal_timer;
+ if ((s64) steal > 0) {
+ S390_lowcore.steal_timer = 0;
+ account_steal_time(steal);
}
}
-/*
- * Update process times based on virtual cpu times stored by entry.S
- * to the lowcore fields user_timer, system_timer & steal_clock.
- */
-void account_vtime(struct task_struct *tsk)
+void account_vtime(struct task_struct *prev, struct task_struct *next)
{
- cputime_t cputime;
- __u64 timer;
-
- timer = S390_lowcore.last_update_timer;
- asm volatile (" STPT %0" /* Store current cpu timer value */
- : "=m" (S390_lowcore.last_update_timer) );
- S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
-
- cputime = S390_lowcore.user_timer >> 12;
- S390_lowcore.user_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_user_time(tsk, cputime);
+ struct thread_info *ti;
+
+ do_account_vtime(prev, 0);
+ ti = task_thread_info(prev);
+ ti->user_timer = S390_lowcore.user_timer;
+ ti->system_timer = S390_lowcore.system_timer;
+ ti = task_thread_info(next);
+ S390_lowcore.user_timer = ti->user_timer;
+ S390_lowcore.system_timer = ti->system_timer;
+}
- cputime = S390_lowcore.system_timer >> 12;
- S390_lowcore.system_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_system_time(tsk, 0, cputime);
+void account_process_tick(struct task_struct *tsk, int user_tick)
+{
+ do_account_vtime(tsk, HARDIRQ_OFFSET);
}
/*
@@ -96,80 +111,131 @@ void account_vtime(struct task_struct *tsk)
*/
void account_system_vtime(struct task_struct *tsk)
{
- cputime_t cputime;
- __u64 timer;
+ struct thread_info *ti = task_thread_info(tsk);
+ __u64 timer, system;
timer = S390_lowcore.last_update_timer;
- asm volatile (" STPT %0" /* Store current cpu timer value */
- : "=m" (S390_lowcore.last_update_timer) );
+ S390_lowcore.last_update_timer = get_vtimer();
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
- cputime = S390_lowcore.system_timer >> 12;
- S390_lowcore.system_timer -= cputime << 12;
- S390_lowcore.steal_clock -= cputime << 12;
- account_system_time(tsk, 0, cputime);
+ system = S390_lowcore.system_timer - ti->system_timer;
+ S390_lowcore.steal_timer -= system;
+ ti->system_timer = S390_lowcore.system_timer;
+ account_system_time(tsk, 0, system, system);
}
EXPORT_SYMBOL_GPL(account_system_vtime);
-static inline void set_vtimer(__u64 expires)
-{
- __u64 timer;
-
- asm volatile (" STPT %0\n" /* Store current cpu timer value */
- " SPT %1" /* Set new value immediatly afterwards */
- : "=m" (timer) : "m" (expires) );
- S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
- S390_lowcore.last_update_timer = expires;
-
- /* store expire time for this CPU timer */
- __get_cpu_var(virt_cpu_timer).to_expire = expires;
-}
-
-void vtime_start_cpu_timer(void)
+void vtime_start_cpu(void)
{
- struct vtimer_queue *vt_list;
-
- vt_list = &__get_cpu_var(virt_cpu_timer);
-
- /* CPU timer interrupt is pending, don't reprogramm it */
- if (vt_list->idle & 1LL<<63)
- return;
+ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+ struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
+ __u64 idle_time, expires;
+
+ /* Account time spent with enabled wait psw loaded as idle time. */
+ idle_time = S390_lowcore.int_clock - idle->idle_enter;
+ account_idle_time(idle_time);
+ S390_lowcore.last_update_clock = S390_lowcore.int_clock;
+
+ /* Account system time spent going idle. */
+ S390_lowcore.system_timer += S390_lowcore.last_update_timer - vq->idle;
+ S390_lowcore.last_update_timer = S390_lowcore.async_enter_timer;
+
+ /* Restart vtime CPU timer */
+ if (vq->do_spt) {
+ /* Program old expire value but first save progress. */
+ expires = vq->idle - S390_lowcore.async_enter_timer;
+ expires += get_vtimer();
+ set_vtimer(expires);
+ } else {
+ /* Don't account the CPU timer delta while the cpu was idle. */
+ vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
+ }
- if (!list_empty(&vt_list->list))
- set_vtimer(vt_list->idle);
+ spin_lock(&idle->lock);
+ idle->idle_time += idle_time;
+ idle->idle_enter = 0ULL;
+ idle->idle_count++;
+ spin_unlock(&idle->lock);
}
-void vtime_stop_cpu_timer(void)
+void vtime_stop_cpu(void)
{
- struct vtimer_queue *vt_list;
-
- vt_list = &__get_cpu_var(virt_cpu_timer);
-
- /* nothing to do */
- if (list_empty(&vt_list->list)) {
- vt_list->idle = VTIMER_MAX_SLICE;
- goto fire;
+ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+ struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
+ psw_t psw;
+
+ /* Wait for external, I/O or machine check interrupt. */
+ psw.mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_IO | PSW_MASK_EXT;
+
+ /* Check if the CPU timer needs to be reprogrammed. */
+ if (vq->do_spt) {
+ __u64 vmax = VTIMER_MAX_SLICE;
+ /*
+ * The inline assembly is equivalent to
+ * vq->idle = get_cpu_timer();
+ * set_cpu_timer(VTIMER_MAX_SLICE);
+ * idle->idle_enter = get_clock();
+ * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
+ * PSW_MASK_IO | PSW_MASK_EXT);
+ * The difference is that the inline assembly makes sure that
+ * the last three instruction are stpt, stck and lpsw in that
+ * order. This is done to increase the precision.
+ */
+ asm volatile(
+#ifndef CONFIG_64BIT
+ " basr 1,0\n"
+ "0: ahi 1,1f-0b\n"
+ " st 1,4(%2)\n"
+#else /* CONFIG_64BIT */
+ " larl 1,1f\n"
+ " stg 1,8(%2)\n"
+#endif /* CONFIG_64BIT */
+ " stpt 0(%4)\n"
+ " spt 0(%5)\n"
+ " stck 0(%3)\n"
+#ifndef CONFIG_64BIT
+ " lpsw 0(%2)\n"
+#else /* CONFIG_64BIT */
+ " lpswe 0(%2)\n"
+#endif /* CONFIG_64BIT */
+ "1:"
+ : "=m" (idle->idle_enter), "=m" (vq->idle)
+ : "a" (&psw), "a" (&idle->idle_enter),
+ "a" (&vq->idle), "a" (&vmax), "m" (vmax), "m" (psw)
+ : "memory", "cc", "1");
+ } else {
+ /*
+ * The inline assembly is equivalent to
+ * vq->idle = get_cpu_timer();
+ * idle->idle_enter = get_clock();
+ * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
+ * PSW_MASK_IO | PSW_MASK_EXT);
+ * The difference is that the inline assembly makes sure that
+ * the last three instruction are stpt, stck and lpsw in that
+ * order. This is done to increase the precision.
+ */
+ asm volatile(
+#ifndef CONFIG_64BIT
+ " basr 1,0\n"
+ "0: ahi 1,1f-0b\n"
+ " st 1,4(%2)\n"
+#else /* CONFIG_64BIT */
+ " larl 1,1f\n"
+ " stg 1,8(%2)\n"
+#endif /* CONFIG_64BIT */
+ " stpt 0(%4)\n"
+ " stck 0(%3)\n"
+#ifndef CONFIG_64BIT
+ " lpsw 0(%2)\n"
+#else /* CONFIG_64BIT */
+ " lpswe 0(%2)\n"
+#endif /* CONFIG_64BIT */
+ "1:"
+ : "=m" (idle->idle_enter), "=m" (vq->idle)
+ : "a" (&psw), "a" (&idle->idle_enter),
+ "a" (&vq->idle), "m" (psw)
+ : "memory", "cc", "1");
}
-
- /* store the actual expire value */
- asm volatile ("STPT %0" : "=m" (vt_list->idle));
-
- /*
- * If the CPU timer is negative we don't reprogramm
- * it because we will get instantly an interrupt.
- */
- if (vt_list->idle & 1LL<<63)
- return;
-
- vt_list->offset += vt_list->to_expire - vt_list->idle;
-
- /*
- * We cannot halt the CPU timer, we just write a value that
- * nearly never expires (only after 71 years) and re-write
- * the stored expire value if we continue the timer
- */
- fire:
- set_vtimer(VTIMER_MAX_SLICE);
}
/*
@@ -195,30 +261,23 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
*/
static void do_callbacks(struct list_head *cb_list)
{
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
struct vtimer_list *event, *tmp;
- void (*fn)(unsigned long);
- unsigned long data;
if (list_empty(cb_list))
return;
- vt_list = &__get_cpu_var(virt_cpu_timer);
+ vq = &__get_cpu_var(virt_cpu_timer);
list_for_each_entry_safe(event, tmp, cb_list, entry) {
- fn = event->function;
- data = event->data;
- fn(data);
-
- if (!event->interval)
- /* delete one shot timer */
- list_del_init(&event->entry);
- else {
- /* move interval timer back to list */
- spin_lock(&vt_list->lock);
- list_del_init(&event->entry);
- list_add_sorted(event, &vt_list->list);
- spin_unlock(&vt_list->lock);
+ list_del_init(&event->entry);
+ (event->function)(event->data);
+ if (event->interval) {
+ /* Recharge interval timer */
+ event->expires = event->interval + vq->elapsed;
+ spin_lock(&vq->lock);
+ list_add_sorted(event, &vq->list);
+ spin_unlock(&vq->lock);
}
}
}
@@ -228,64 +287,57 @@ static void do_callbacks(struct list_head *cb_list)
*/
static void do_cpu_timer_interrupt(__u16 error_code)
{
- __u64 next, delta;
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
struct vtimer_list *event, *tmp;
- struct list_head *ptr;
- /* the callback queue */
- struct list_head cb_list;
+ struct list_head cb_list; /* the callback queue */
+ __u64 elapsed, next;
INIT_LIST_HEAD(&cb_list);
- vt_list = &__get_cpu_var(virt_cpu_timer);
+ vq = &__get_cpu_var(virt_cpu_timer);
/* walk timer list, fire all expired events */
- spin_lock(&vt_list->lock);
-
- if (vt_list->to_expire < VTIMER_MAX_SLICE)
- vt_list->offset += vt_list->to_expire;
-
- list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
- if (event->expires > vt_list->offset)
- /* found first unexpired event, leave */
- break;
-
- /* re-charge interval timer, we have to add the offset */
- if (event->interval)
- event->expires = event->interval + vt_list->offset;
-
- /* move expired timer to the callback queue */
- list_move_tail(&event->entry, &cb_list);
+ spin_lock(&vq->lock);
+
+ elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
+ BUG_ON((s64) elapsed < 0);
+ vq->elapsed = 0;
+ list_for_each_entry_safe(event, tmp, &vq->list, entry) {
+ if (event->expires < elapsed)
+ /* move expired timer to the callback queue */
+ list_move_tail(&event->entry, &cb_list);
+ else
+ event->expires -= elapsed;
}
- spin_unlock(&vt_list->lock);
+ spin_unlock(&vq->lock);
+
+ vq->do_spt = list_empty(&cb_list);
do_callbacks(&cb_list);
/* next event is first in list */
- spin_lock(&vt_list->lock);
- if (!list_empty(&vt_list->list)) {
- ptr = vt_list->list.next;
- event = list_entry(ptr, struct vtimer_list, entry);
- next = event->expires - vt_list->offset;
-
- /* add the expired time from this interrupt handler
- * and the callback functions
- */
- asm volatile ("STPT %0" : "=m" (delta));
- delta = 0xffffffffffffffffLL - delta + 1;
- vt_list->offset += delta;
- next -= delta;
- } else {
- vt_list->offset = 0;
- next = VTIMER_MAX_SLICE;
- }
- spin_unlock(&vt_list->lock);
- set_vtimer(next);
+ next = VTIMER_MAX_SLICE;
+ spin_lock(&vq->lock);
+ if (!list_empty(&vq->list)) {
+ event = list_first_entry(&vq->list, struct vtimer_list, entry);
+ next = event->expires;
+ } else
+ vq->do_spt = 0;
+ spin_unlock(&vq->lock);
+ /*
+ * To improve precision add the time spent by the
+ * interrupt handler to the elapsed time.
+ * Note: CPU timer counts down and we got an interrupt,
+ * the current content is negative
+ */
+ elapsed = S390_lowcore.async_enter_timer - get_vtimer();
+ set_vtimer(next - elapsed);
+ vq->timer = next - elapsed;
+ vq->elapsed = elapsed;
}
void init_virt_timer(struct vtimer_list *timer)
{
timer->function = NULL;
INIT_LIST_HEAD(&timer->entry);
- spin_lock_init(&timer->lock);
}
EXPORT_SYMBOL(init_virt_timer);
@@ -299,44 +351,40 @@ static inline int vtimer_pending(struct vtimer_list *timer)
*/
static void internal_add_vtimer(struct vtimer_list *timer)
{
+ struct vtimer_queue *vq;
unsigned long flags;
- __u64 done;
- struct vtimer_list *event;
- struct vtimer_queue *vt_list;
+ __u64 left, expires;
- vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vt_list->lock, flags);
+ vq = &per_cpu(virt_cpu_timer, timer->cpu);
+ spin_lock_irqsave(&vq->lock, flags);
BUG_ON(timer->cpu != smp_processor_id());
- /* if list is empty we only have to set the timer */
- if (list_empty(&vt_list->list)) {
- /* reset the offset, this may happen if the last timer was
- * just deleted by mod_virt_timer and the interrupt
- * didn't happen until here
- */
- vt_list->offset = 0;
- goto fire;
+ if (list_empty(&vq->list)) {
+ /* First timer on this cpu, just program it. */
+ list_add(&timer->entry, &vq->list);
+ set_vtimer(timer->expires);
+ vq->timer = timer->expires;
+ vq->elapsed = 0;
+ } else {
+ /* Check progress of old timers. */
+ expires = timer->expires;
+ left = get_vtimer();
+ if (likely((s64) expires < (s64) left)) {
+ /* The new timer expires before the current timer. */
+ set_vtimer(expires);
+ vq->elapsed += vq->timer - left;
+ vq->timer = expires;
+ } else {
+ vq->elapsed += vq->timer - left;
+ vq->timer = left;
+ }
+ /* Insert new timer into per cpu list. */
+ timer->expires += vq->elapsed;
+ list_add_sorted(timer, &vq->list);
}
- /* save progress */
- asm volatile ("STPT %0" : "=m" (done));
-
- /* calculate completed work */
- done = vt_list->to_expire - done + vt_list->offset;
- vt_list->offset = 0;
-
- list_for_each_entry(event, &vt_list->list, entry)
- event->expires -= done;
-
- fire:
- list_add_sorted(timer, &vt_list->list);
-
- /* get first element, which is the next vtimer slice */
- event = list_entry(vt_list->list.next, struct vtimer_list, entry);
-
- set_vtimer(event->expires);
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}
@@ -381,14 +429,15 @@ EXPORT_SYMBOL(add_virt_timer_periodic);
* If we change a pending timer the function must be called on the CPU
* where the timer is running on, e.g. by smp_call_function_single()
*
- * The original mod_timer adds the timer if it is not pending. For compatibility
- * we do the same. The timer will be added on the current CPU as a oneshot timer.
+ * The original mod_timer adds the timer if it is not pending. For
+ * compatibility we do the same. The timer will be added on the current
+ * CPU as a oneshot timer.
*
* returns whether it has modified a pending timer (1) or not (0)
*/
int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
{
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
unsigned long flags;
int cpu;
@@ -404,17 +453,17 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
return 1;
cpu = get_cpu();
- vt_list = &per_cpu(virt_cpu_timer, cpu);
+ vq = &per_cpu(virt_cpu_timer, cpu);
/* check if we run on the right CPU */
BUG_ON(timer->cpu != cpu);
/* disable interrupts before test if timer is pending */
- spin_lock_irqsave(&vt_list->lock, flags);
+ spin_lock_irqsave(&vq->lock, flags);
/* if timer isn't pending add it on the current CPU */
if (!vtimer_pending(timer)) {
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
/* we do not activate an interval timer with mod_virt_timer */
timer->interval = 0;
timer->expires = expires;
@@ -431,7 +480,7 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
timer->interval = expires;
/* the timer can't expire anymore so we can release the lock */
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
internal_add_vtimer(timer);
return 1;
}
@@ -445,25 +494,19 @@ EXPORT_SYMBOL(mod_virt_timer);
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
- struct vtimer_queue *vt_list;
+ struct vtimer_queue *vq;
/* check if timer is pending */
if (!vtimer_pending(timer))
return 0;
- vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
- spin_lock_irqsave(&vt_list->lock, flags);
+ vq = &per_cpu(virt_cpu_timer, timer->cpu);
+ spin_lock_irqsave(&vq->lock, flags);
/* we don't interrupt a running timer, just let it expire! */
list_del_init(&timer->entry);
- /* last timer removed */
- if (list_empty(&vt_list->list)) {
- vt_list->to_expire = 0;
- vt_list->offset = 0;
- }
-
- spin_unlock_irqrestore(&vt_list->lock, flags);
+ spin_unlock_irqrestore(&vq->lock, flags);
return 1;
}
EXPORT_SYMBOL(del_virt_timer);
@@ -473,24 +516,23 @@ EXPORT_SYMBOL(del_virt_timer);
*/
void init_cpu_vtimer(void)
{
- struct vtimer_queue *vt_list;
+ struct thread_info *ti = current_thread_info();
+ struct vtimer_queue *vq;
+
+ S390_lowcore.user_timer = ti->user_timer;
+ S390_lowcore.system_timer = ti->system_timer;
/* kick the virtual timer */
- S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
- S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
- asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
+ asm volatile ("STPT %0" : "=m" (S390_lowcore.last_update_timer));
+
+ /* initialize per cpu vtimer structure */
+ vq = &__get_cpu_var(virt_cpu_timer);
+ INIT_LIST_HEAD(&vq->list);
+ spin_lock_init(&vq->lock);
/* enable cpu timer interrupts */
__ctl_set_bit(0,10);
-
- vt_list = &__get_cpu_var(virt_cpu_timer);
- INIT_LIST_HEAD(&vt_list->list);
- spin_lock_init(&vt_list->lock);
- vt_list->to_expire = 0;
- vt_list->offset = 0;
- vt_list->idle = 0;
-
}
void __init vtime_init(void)
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a0775e1f08d..8300309698f 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -47,7 +47,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
- VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
+ VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx",
vcpu->run->s390_reset_flags);
return -EREMOTE;
}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 2960702b482..f4fe28a2521 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -160,7 +160,7 @@ static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
break;
case KVM_S390_INT_VIRTIO:
- VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+ VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
inti->ext.ext_params, inti->ext.ext_params2);
vcpu->stat.deliver_virtio_interrupt++;
rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
@@ -360,7 +360,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
vcpu->arch.ckc_timer.expires = jiffies + sltime;
add_timer(&vcpu->arch.ckc_timer);
- VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+ VCPU_EVENT(vcpu, 5, "enabled wait timer:%llx jiffies", sltime);
no_timer:
spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
spin_lock_bh(&vcpu->arch.local_int.lock);
@@ -491,7 +491,7 @@ int kvm_s390_inject_vm(struct kvm *kvm,
switch (s390int->type) {
case KVM_S390_INT_VIRTIO:
- VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+ VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
s390int->parm, s390int->parm64);
inti->type = s390int->type;
inti->ext.ext_params = s390int->parm;
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 8b00eb2ddf5..be8497186b9 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -113,8 +113,6 @@ long kvm_arch_dev_ioctl(struct file *filp,
int kvm_dev_ioctl_check_extension(long ext)
{
switch (ext) {
- case KVM_CAP_USER_MEMORY:
- return 1;
default:
return 0;
}
@@ -185,8 +183,6 @@ struct kvm *kvm_arch_create_vm(void)
debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
VM_EVENT(kvm, 3, "%s", "vm created");
- try_module_get(THIS_MODULE);
-
return kvm;
out_nodbf:
free_page((unsigned long)(kvm->arch.sca));
@@ -196,13 +192,33 @@ out_nokvm:
return ERR_PTR(rc);
}
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ VCPU_EVENT(vcpu, 3, "%s", "free cpu");
+ free_page((unsigned long)(vcpu->arch.sie_block));
+ kvm_vcpu_uninit(vcpu);
+ kfree(vcpu);
+}
+
+static void kvm_free_vcpus(struct kvm *kvm)
+{
+ unsigned int i;
+
+ for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+ if (kvm->vcpus[i]) {
+ kvm_arch_vcpu_destroy(kvm->vcpus[i]);
+ kvm->vcpus[i] = NULL;
+ }
+ }
+}
+
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- debug_unregister(kvm->arch.dbf);
+ kvm_free_vcpus(kvm);
kvm_free_physmem(kvm);
free_page((unsigned long)(kvm->arch.sca));
+ debug_unregister(kvm->arch.dbf);
kfree(kvm);
- module_put(THIS_MODULE);
}
/* Section: vcpu related */
@@ -213,8 +229,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
- /* kvm common code refers to this, but does'nt call it */
- BUG();
+ /* Nothing todo */
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -308,8 +323,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
vcpu->arch.sie_block);
- try_module_get(THIS_MODULE);
-
return vcpu;
out_free_cpu:
kfree(vcpu);
@@ -317,14 +330,6 @@ out_nomem:
return ERR_PTR(rc);
}
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
- VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
- free_page((unsigned long)(vcpu->arch.sie_block));
- kfree(vcpu);
- module_put(THIS_MODULE);
-}
-
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
/* kvm common code refers to this, but never calls it */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index cce40ff2913..3605df45dd4 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -118,7 +118,7 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
goto out;
}
- VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
+ VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
out:
return 0;
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 158b0d6d704..f0258ca3b17 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -183,7 +183,7 @@ int arch_add_memory(int nid, u64 start, u64 size)
rc = vmem_add_mapping(start, size);
if (rc)
return rc;
- rc = __add_pages(zone, PFN_DOWN(start), PFN_DOWN(size));
+ rc = __add_pages(nid, zone, PFN_DOWN(start), PFN_DOWN(size));
if (rc)
vmem_remove_mapping(start, size);
return rc;