aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-10-07 15:10:38 +0100
committerIngo Molnar <mingo@kernel.org>2015-10-20 10:17:23 +0200
commit6e490b0106a2118ee4c37c37847454a5c2dc6e32 (patch)
tree32e3984d00d96945d2a8d48dc6cd0b04959aa0b6 /arch/arm
parenta1a2ab2ff7040bdc9a31bfe7173913971f7d723b (diff)
ARM, locking/atomics: Implement _relaxed variants of atomic[64]_{inc,dec}
Now that the core code supports acquire/release/relaxed versions of the atomic_inc family, implement only the _relaxed flavours in the ARM backend so that we get all of the others for free. Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Davidlohr Bueso <dbueso@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1444227038-12533-1-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/atomic.h8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index 2bf80afb7841..9e10c4567eb4 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -210,8 +210,8 @@ ATOMIC_OP(xor, ^=, eor)
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v) (atomic_add_return(1, v))
-#define atomic_dec_return(v) (atomic_sub_return(1, v))
+#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
+#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
@@ -442,11 +442,11 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)