summaryrefslogtreecommitdiff
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/asm-offsets.h1
-rw-r--r--arch/parisc/include/asm/atomic.h10
-rw-r--r--arch/parisc/include/asm/bug.h4
-rw-r--r--arch/parisc/include/asm/elf.h1
-rw-r--r--arch/parisc/include/asm/fcntl.h5
-rw-r--r--arch/parisc/include/asm/ftrace.h14
-rw-r--r--arch/parisc/include/asm/spinlock.h64
-rw-r--r--arch/parisc/include/asm/spinlock_types.h12
8 files changed, 62 insertions, 49 deletions
diff --git a/arch/parisc/include/asm/asm-offsets.h b/arch/parisc/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..d370ee36a18
--- /dev/null
+++ b/arch/parisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 8bc9e96699b..716634d1f54 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -27,19 +27,19 @@
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
/* Can't use raw_spin_lock_irq because of #include problems, so
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
local_irq_save(f); \
- __raw_spin_lock(s); \
+ arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
- raw_spinlock_t *s = ATOMIC_HASH(l); \
- __raw_spin_unlock(s); \
+ arch_spinlock_t *s = ATOMIC_HASH(l); \
+ arch_spin_unlock(s); \
local_irq_restore(f); \
} while(0)
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 8cfc553fc83..75e46c557a1 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -32,14 +32,14 @@
"\t.popsection" \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (0), "i" (sizeof(struct bug_entry)) ); \
- for(;;) ; \
+ unreachable(); \
} while(0)
#else
#define BUG() \
do { \
asm volatile(PARISC_BUG_BREAK_ASM : : ); \
- for(;;) ; \
+ unreachable(); \
} while(0)
#endif
diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
index 9c802eb4be8..19f6cb1a4a1 100644
--- a/arch/parisc/include/asm/elf.h
+++ b/arch/parisc/include/asm/elf.h
@@ -328,7 +328,6 @@ struct pt_regs; /* forward declaration... */
such function. */
#define ELF_PLAT_INIT(_r, load_addr) _r->gr[23] = 0
-#define USE_ELF_CORE_DUMP
#define ELF_EXEC_PAGESIZE 4096
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
diff --git a/arch/parisc/include/asm/fcntl.h b/arch/parisc/include/asm/fcntl.h
index 1e1c824764e..f357fc693c8 100644
--- a/arch/parisc/include/asm/fcntl.h
+++ b/arch/parisc/include/asm/fcntl.h
@@ -1,14 +1,13 @@
#ifndef _PARISC_FCNTL_H
#define _PARISC_FCNTL_H
-/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
- located on an ext2 file system */
#define O_APPEND 000000010
#define O_BLKSEEK 000000100 /* HPUX only */
#define O_CREAT 000000400 /* not fcntl */
#define O_EXCL 000002000 /* not fcntl */
#define O_LARGEFILE 000004000
-#define O_SYNC 000100000
+#define __O_SYNC 000100000
+#define O_SYNC (__O_SYNC|O_DSYNC)
#define O_NONBLOCK 000200004 /* HPUX has separate NDELAY & NONBLOCK */
#define O_NOCTTY 000400000 /* not fcntl */
#define O_DSYNC 001000000 /* HPUX only */
diff --git a/arch/parisc/include/asm/ftrace.h b/arch/parisc/include/asm/ftrace.h
index 2fa05dd6aee..72c0fafaa03 100644
--- a/arch/parisc/include/asm/ftrace.h
+++ b/arch/parisc/include/asm/ftrace.h
@@ -20,6 +20,20 @@ struct ftrace_ret_stack {
* Defined in entry.S
*/
extern void return_to_handler(void);
+
+
+extern unsigned long return_address(unsigned int);
+
+#define HAVE_ARCH_CALLER_ADDR
+
+#define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+#define CALLER_ADDR1 return_address(1)
+#define CALLER_ADDR2 return_address(2)
+#define CALLER_ADDR3 return_address(3)
+#define CALLER_ADDR4 return_address(4)
+#define CALLER_ADDR5 return_address(5)
+#define CALLER_ADDR6 return_address(6)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PARISC_FTRACE_H */
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index fae03e136fa..74036f436a3 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -5,17 +5,17 @@
#include <asm/processor.h>
#include <asm/spinlock_types.h>
-static inline int __raw_spin_is_locked(raw_spinlock_t *x)
+static inline int arch_spin_is_locked(arch_spinlock_t *x)
{
volatile unsigned int *a = __ldcw_align(x);
return *a == 0;
}
-#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
-#define __raw_spin_unlock_wait(x) \
- do { cpu_relax(); } while (__raw_spin_is_locked(x))
+#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
+#define arch_spin_unlock_wait(x) \
+ do { cpu_relax(); } while (arch_spin_is_locked(x))
-static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
+static inline void arch_spin_lock_flags(arch_spinlock_t *x,
unsigned long flags)
{
volatile unsigned int *a;
@@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
mb();
}
-static inline void __raw_spin_unlock(raw_spinlock_t *x)
+static inline void arch_spin_unlock(arch_spinlock_t *x)
{
volatile unsigned int *a;
mb();
@@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x)
mb();
}
-static inline int __raw_spin_trylock(raw_spinlock_t *x)
+static inline int arch_spin_trylock(arch_spinlock_t *x)
{
volatile unsigned int *a;
int ret;
@@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
+static __inline__ void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
rw->counter--;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
-static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
}
@@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
return 0;
/* Wait until we have a realistic chance at the lock */
- while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0)
+ while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
cpu_relax();
goto retry;
@@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw)
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ void __raw_write_lock(raw_rwlock_t *rw)
+static __inline__ void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
- __raw_spin_lock_flags(&rw->lock, flags);
+ arch_spin_lock_flags(&rw->lock, flags);
if (rw->counter != 0) {
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
while (rw->counter != 0)
@@ -141,27 +141,27 @@ retry:
local_irq_restore(flags);
}
-static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
+static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
-static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
+static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;
local_irq_save(flags);
- if (__raw_spin_trylock(&rw->lock)) {
+ if (arch_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
- __raw_spin_unlock(&rw->lock);
+ arch_spin_unlock(&rw->lock);
}
}
local_irq_restore(flags);
@@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw)
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
{
return rw->counter >= 0;
}
@@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw)
+static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
{
return !rw->counter;
}
-#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
-#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h
index 3f72f47cf4b..8c373aa28a8 100644
--- a/arch/parisc/include/asm/spinlock_types.h
+++ b/arch/parisc/include/asm/spinlock_types.h
@@ -4,18 +4,18 @@
typedef struct {
#ifdef CONFIG_PA20
volatile unsigned int slock;
-# define __RAW_SPIN_LOCK_UNLOCKED { 1 }
+# define __ARCH_SPIN_LOCK_UNLOCKED { 1 }
#else
volatile unsigned int lock[4];
-# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
+# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } }
#endif
-} raw_spinlock_t;
+} arch_spinlock_t;
typedef struct {
- raw_spinlock_t lock;
+ arch_spinlock_t lock;
volatile int counter;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#endif