From 5b3f683e694a835f5dfdab06102be1a50604c3b7 Mon Sep 17 00:00:00 2001 From: Philipp Hachtmann Date: Mon, 7 Apr 2014 18:25:23 +0200 Subject: s390/spinlock: cleanup spinlock code Improve the spinlock code in several aspects: - Have _raw_compare_and_swap return true if the operation has been successful instead of returning the old value. - Remove the "volatile" from arch_spinlock_t and arch_rwlock_t - Rename 'owner_cpu' to 'lock' - Add helper functions arch_spin_trylock_once / arch_spin_tryrelease_once [ Martin Schwidefsky: patch breakdown and code beautification ] Signed-off-by: Philipp Hachtmann Signed-off-by: Martin Schwidefsky --- arch/s390/lib/spinlock.c | 55 +++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 29 deletions(-) (limited to 'arch/s390/lib') diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f709983f41f8..4a3b33b2dbb9 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -31,22 +31,21 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) unsigned int owner; while (1) { - owner = lp->owner_cpu; + owner = lp->lock; if (!owner || smp_vcpu_scheduled(~owner)) { for (count = spin_retry; count > 0; count--) { if (arch_spin_is_locked(lp)) continue; - if (_raw_compare_and_swap(&lp->owner_cpu, 0, - cpu) == 0) + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) return; } if (MACHINE_IS_LPAR) continue; } - owner = lp->owner_cpu; + owner = lp->lock; if (owner) smp_yield_cpu(~owner); - if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) return; } } @@ -60,57 +59,55 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); while (1) { - owner = lp->owner_cpu; + owner = lp->lock; if (!owner || smp_vcpu_scheduled(~owner)) { for (count = spin_retry; count > 0; count--) { if (arch_spin_is_locked(lp)) continue; local_irq_disable(); - if (_raw_compare_and_swap(&lp->owner_cpu, 0, - cpu) == 0) + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) return; local_irq_restore(flags); } if (MACHINE_IS_LPAR) continue; } - owner = lp->owner_cpu; + owner = lp->lock; if (owner) smp_yield_cpu(~owner); local_irq_disable(); - if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + if (_raw_compare_and_swap(&lp->lock, 0, cpu)) return; local_irq_restore(flags); } } EXPORT_SYMBOL(arch_spin_lock_wait_flags); +void arch_spin_relax(arch_spinlock_t *lp) +{ + unsigned int cpu = lp->lock; + if (cpu != 0) { + if (MACHINE_IS_VM || MACHINE_IS_KVM || + !smp_vcpu_scheduled(~cpu)) + smp_yield_cpu(~cpu); + } +} +EXPORT_SYMBOL(arch_spin_relax); + int arch_spin_trylock_retry(arch_spinlock_t *lp) { - unsigned int cpu = ~smp_processor_id(); int count; for (count = spin_retry; count > 0; count--) { if (arch_spin_is_locked(lp)) continue; - if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) + if (arch_spin_trylock_once(lp)) return 1; } return 0; } EXPORT_SYMBOL(arch_spin_trylock_retry); -void arch_spin_relax(arch_spinlock_t *lock) -{ - unsigned int cpu = lock->owner_cpu; - if (cpu != 0) { - if (MACHINE_IS_VM || MACHINE_IS_KVM || - !smp_vcpu_scheduled(~cpu)) - smp_yield_cpu(~cpu); - } -} -EXPORT_SYMBOL(arch_spin_relax); - void _raw_read_lock_wait(arch_rwlock_t *rw) { unsigned int old; @@ -124,7 +121,7 @@ void _raw_read_lock_wait(arch_rwlock_t *rw) if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + if (_raw_compare_and_swap(&rw->lock, old, old + 1)) return; } } @@ -145,7 +142,7 @@ void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) continue; old = rw->lock & 0x7fffffffU; local_irq_disable(); - if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + if (_raw_compare_and_swap(&rw->lock, old, old + 1)) return; } } @@ -160,7 +157,7 @@ int _raw_read_trylock_retry(arch_rwlock_t *rw) if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; - if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) + if (_raw_compare_and_swap(&rw->lock, old, old + 1)) return 1; } return 0; @@ -178,7 +175,7 @@ void _raw_write_lock_wait(arch_rwlock_t *rw) } if (!arch_write_can_lock(rw)) continue; - if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) return; } } @@ -197,7 +194,7 @@ void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) if (!arch_write_can_lock(rw)) continue; local_irq_disable(); - if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) return; } } @@ -210,7 +207,7 @@ int _raw_write_trylock_retry(arch_rwlock_t *rw) while (count-- > 0) { if (!arch_write_can_lock(rw)) continue; - if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) + if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000)) return 1; } return 0; -- cgit v1.2.3