aboutsummaryrefslogtreecommitdiff
path: root/kernel/spinlock.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-11-09 15:21:34 +0000
committerThomas Gleixner <tglx@linutronix.de>2009-11-13 20:53:28 +0100
commit6beb000923882f6204ea2cfcd932e568e900803f (patch)
tree1849bb1e7493bbf5e538d74cec18c6d6f4b16ced /kernel/spinlock.c
parent156171c71a0dc4bce12b4408bb1591f8fe32dc1a (diff)
locking: Make inlining decision Kconfig based
commit 892a7c67 (locking: Allow arch-inlined spinlocks) implements the selection of which lock functions are inlined based on defines in arch/.../spinlock.h: #define __always_inline__LOCK_FUNCTION Despite of the name __always_inline__* the lock functions can be built out of line depending on config options. Also if the arch does not set some inline defines the generic code might set them; again depending on config options. This makes it unnecessary hard to figure out when and which lock functions are inlined. Aside of that it makes it way harder and messier for -rt to manipulate the lock functions. Convert the inlining decision to CONFIG switches. Each lock function is inlined depending on CONFIG_INLINE_*. The configs implement the existing dependencies. The architecture code can select ARCH_INLINE_* to signal that it wants the corresponding lock function inlined. ARCH_INLINE_* is necessary as Kconfig ignores "depends on" restrictions when a config element is selected. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> LKML-Reference: <20091109151428.504477141@linutronix.de> Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com> Reviewed-by: Ingo Molnar <mingo@elte.hu> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Diffstat (limited to 'kernel/spinlock.c')
-rw-r--r--kernel/spinlock.c56
1 files changed, 28 insertions, 28 deletions
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 5ddab730cb2f..235a9579a875 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -21,7 +21,7 @@
#include <linux/debug_locks.h>
#include <linux/module.h>
-#ifndef _spin_trylock
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK
int __lockfunc _spin_trylock(spinlock_t *lock)
{
return __spin_trylock(lock);
@@ -29,7 +29,7 @@ int __lockfunc _spin_trylock(spinlock_t *lock)
EXPORT_SYMBOL(_spin_trylock);
#endif
-#ifndef _read_trylock
+#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _read_trylock(rwlock_t *lock)
{
return __read_trylock(lock);
@@ -37,7 +37,7 @@ int __lockfunc _read_trylock(rwlock_t *lock)
EXPORT_SYMBOL(_read_trylock);
#endif
-#ifndef _write_trylock
+#ifndef CONFIG_INLINE_WRITE_TRYLOCK
int __lockfunc _write_trylock(rwlock_t *lock)
{
return __write_trylock(lock);
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(_write_trylock);
*/
#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
-#ifndef _read_lock
+#ifndef CONFIG_INLINE_READ_LOCK
void __lockfunc _read_lock(rwlock_t *lock)
{
__read_lock(lock);
@@ -60,7 +60,7 @@ void __lockfunc _read_lock(rwlock_t *lock)
EXPORT_SYMBOL(_read_lock);
#endif
-#ifndef _spin_lock_irqsave
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
{
return __spin_lock_irqsave(lock);
@@ -68,7 +68,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
EXPORT_SYMBOL(_spin_lock_irqsave);
#endif
-#ifndef _spin_lock_irq
+#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
void __lockfunc _spin_lock_irq(spinlock_t *lock)
{
__spin_lock_irq(lock);
@@ -76,7 +76,7 @@ void __lockfunc _spin_lock_irq(spinlock_t *lock)
EXPORT_SYMBOL(_spin_lock_irq);
#endif
-#ifndef _spin_lock_bh
+#ifndef CONFIG_INLINE_SPIN_LOCK_BH
void __lockfunc _spin_lock_bh(spinlock_t *lock)
{
__spin_lock_bh(lock);
@@ -84,7 +84,7 @@ void __lockfunc _spin_lock_bh(spinlock_t *lock)
EXPORT_SYMBOL(_spin_lock_bh);
#endif
-#ifndef _read_lock_irqsave
+#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
{
return __read_lock_irqsave(lock);
@@ -92,7 +92,7 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
EXPORT_SYMBOL(_read_lock_irqsave);
#endif
-#ifndef _read_lock_irq
+#ifndef CONFIG_INLINE_READ_LOCK_IRQ
void __lockfunc _read_lock_irq(rwlock_t *lock)
{
__read_lock_irq(lock);
@@ -100,7 +100,7 @@ void __lockfunc _read_lock_irq(rwlock_t *lock)
EXPORT_SYMBOL(_read_lock_irq);
#endif
-#ifndef _read_lock_bh
+#ifndef CONFIG_INLINE_READ_LOCK_BH
void __lockfunc _read_lock_bh(rwlock_t *lock)
{
__read_lock_bh(lock);
@@ -108,7 +108,7 @@ void __lockfunc _read_lock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_read_lock_bh);
#endif
-#ifndef _write_lock_irqsave
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
{
return __write_lock_irqsave(lock);
@@ -116,7 +116,7 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
EXPORT_SYMBOL(_write_lock_irqsave);
#endif
-#ifndef _write_lock_irq
+#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
void __lockfunc _write_lock_irq(rwlock_t *lock)
{
__write_lock_irq(lock);
@@ -124,7 +124,7 @@ void __lockfunc _write_lock_irq(rwlock_t *lock)
EXPORT_SYMBOL(_write_lock_irq);
#endif
-#ifndef _write_lock_bh
+#ifndef CONFIG_INLINE_WRITE_LOCK_BH
void __lockfunc _write_lock_bh(rwlock_t *lock)
{
__write_lock_bh(lock);
@@ -132,7 +132,7 @@ void __lockfunc _write_lock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_write_lock_bh);
#endif
-#ifndef _spin_lock
+#ifndef CONFIG_INLINE_SPIN_LOCK
void __lockfunc _spin_lock(spinlock_t *lock)
{
__spin_lock(lock);
@@ -140,7 +140,7 @@ void __lockfunc _spin_lock(spinlock_t *lock)
EXPORT_SYMBOL(_spin_lock);
#endif
-#ifndef _write_lock
+#ifndef CONFIG_INLINE_WRITE_LOCK
void __lockfunc _write_lock(rwlock_t *lock)
{
__write_lock(lock);
@@ -272,7 +272,7 @@ EXPORT_SYMBOL(_spin_lock_nest_lock);
#endif
-#ifndef _spin_unlock
+#ifndef CONFIG_INLINE_SPIN_UNLOCK
void __lockfunc _spin_unlock(spinlock_t *lock)
{
__spin_unlock(lock);
@@ -280,7 +280,7 @@ void __lockfunc _spin_unlock(spinlock_t *lock)
EXPORT_SYMBOL(_spin_unlock);
#endif
-#ifndef _write_unlock
+#ifndef CONFIG_INLINE_WRITE_UNLOCK
void __lockfunc _write_unlock(rwlock_t *lock)
{
__write_unlock(lock);
@@ -288,7 +288,7 @@ void __lockfunc _write_unlock(rwlock_t *lock)
EXPORT_SYMBOL(_write_unlock);
#endif
-#ifndef _read_unlock
+#ifndef CONFIG_INLINE_READ_UNLOCK
void __lockfunc _read_unlock(rwlock_t *lock)
{
__read_unlock(lock);
@@ -296,7 +296,7 @@ void __lockfunc _read_unlock(rwlock_t *lock)
EXPORT_SYMBOL(_read_unlock);
#endif
-#ifndef _spin_unlock_irqrestore
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
__spin_unlock_irqrestore(lock, flags);
@@ -304,7 +304,7 @@ void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
EXPORT_SYMBOL(_spin_unlock_irqrestore);
#endif
-#ifndef _spin_unlock_irq
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
{
__spin_unlock_irq(lock);
@@ -312,7 +312,7 @@ void __lockfunc _spin_unlock_irq(spinlock_t *lock)
EXPORT_SYMBOL(_spin_unlock_irq);
#endif
-#ifndef _spin_unlock_bh
+#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
{
__spin_unlock_bh(lock);
@@ -320,7 +320,7 @@ void __lockfunc _spin_unlock_bh(spinlock_t *lock)
EXPORT_SYMBOL(_spin_unlock_bh);
#endif
-#ifndef _read_unlock_irqrestore
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__read_unlock_irqrestore(lock, flags);
@@ -328,7 +328,7 @@ void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
EXPORT_SYMBOL(_read_unlock_irqrestore);
#endif
-#ifndef _read_unlock_irq
+#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
void __lockfunc _read_unlock_irq(rwlock_t *lock)
{
__read_unlock_irq(lock);
@@ -336,7 +336,7 @@ void __lockfunc _read_unlock_irq(rwlock_t *lock)
EXPORT_SYMBOL(_read_unlock_irq);
#endif
-#ifndef _read_unlock_bh
+#ifndef CONFIG_INLINE_READ_UNLOCK_BH
void __lockfunc _read_unlock_bh(rwlock_t *lock)
{
__read_unlock_bh(lock);
@@ -344,7 +344,7 @@ void __lockfunc _read_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_read_unlock_bh);
#endif
-#ifndef _write_unlock_irqrestore
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
__write_unlock_irqrestore(lock, flags);
@@ -352,7 +352,7 @@ void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
EXPORT_SYMBOL(_write_unlock_irqrestore);
#endif
-#ifndef _write_unlock_irq
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
void __lockfunc _write_unlock_irq(rwlock_t *lock)
{
__write_unlock_irq(lock);
@@ -360,7 +360,7 @@ void __lockfunc _write_unlock_irq(rwlock_t *lock)
EXPORT_SYMBOL(_write_unlock_irq);
#endif
-#ifndef _write_unlock_bh
+#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
void __lockfunc _write_unlock_bh(rwlock_t *lock)
{
__write_unlock_bh(lock);
@@ -368,7 +368,7 @@ void __lockfunc _write_unlock_bh(rwlock_t *lock)
EXPORT_SYMBOL(_write_unlock_bh);
#endif
-#ifndef _spin_trylock_bh
+#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
{
return __spin_trylock_bh(lock);