aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-13 15:48:00 +0200
commitdbb885fecc1b1b35e93416bedd24d21bd20f60ed (patch)
tree9aa92bcc4e3d3594eba0ba85d72b878d85f35a59 /arch/arm64
parentd6dd50e07c5bec00db2005969b1a01f8ca3d25ef (diff)
parent2291059c852706c6f5ffb400366042b7625066cd (diff)
Merge branch 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull arch atomic cleanups from Ingo Molnar: "This is a series kept separate from the main locking tree, which cleans up and improves various details in the atomics type handling: - Remove the unused atomic_or_long() method - Consolidate and compress atomic ops implementations between architectures, to reduce linecount and to make it easier to add new ops. - Rewrite generic atomic support to only require cmpxchg() from an architecture - generate all other methods from that" * 'locking-arch-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (23 commits) locking,arch: Use ACCESS_ONCE() instead of cast to volatile in atomic_read() locking, mips: Fix atomics locking, sparc64: Fix atomics locking,arch: Rewrite generic atomic support locking,arch,xtensa: Fold atomic_ops locking,arch,sparc: Fold atomic_ops locking,arch,sh: Fold atomic_ops locking,arch,powerpc: Fold atomic_ops locking,arch,parisc: Fold atomic_ops locking,arch,mn10300: Fold atomic_ops locking,arch,mips: Fold atomic_ops locking,arch,metag: Fold atomic_ops locking,arch,m68k: Fold atomic_ops locking,arch,m32r: Fold atomic_ops locking,arch,ia64: Fold atomic_ops locking,arch,hexagon: Fold atomic_ops locking,arch,cris: Fold atomic_ops locking,arch,avr32: Fold atomic_ops locking,arch,arm64: Fold atomic_ops locking,arch,arm: Fold atomic_ops ...
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/atomic.h201
1 files changed, 82 insertions, 119 deletions
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 65f1569ac96e..7047051ded40 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -35,7 +35,7 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) (*(volatile int *)&(v)->counter)
+#define atomic_read(v) ACCESS_ONCE((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
/*
@@ -43,69 +43,51 @@
* store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens.
*/
-static inline void atomic_add(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- asm volatile("// atomic_add\n"
-"1: ldxr %w0, %2\n"
-" add %w0, %w0, %w3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
-
- asm volatile("// atomic_add_return\n"
-"1: ldxr %w0, %2\n"
-" add %w0, %w0, %w3\n"
-" stlxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
-
- smp_mb();
- return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
- asm volatile("// atomic_sub\n"
-"1: ldxr %w0, %2\n"
-" sub %w0, %w0, %w3\n"
-" stxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
+#define ATOMIC_OP(op, asm_op) \
+static inline void atomic_##op(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+
+#define ATOMIC_OP_RETURN(op, asm_op) \
+static inline int atomic_##op##_return(int i, atomic_t *v) \
+{ \
+ unsigned long tmp; \
+ int result; \
+ \
+ asm volatile("// atomic_" #op "_return\n" \
+"1: ldxr %w0, %2\n" \
+" " #asm_op " %w0, %w0, %w3\n" \
+" stlxr %w1, %w0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
}
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
- unsigned long tmp;
- int result;
+#define ATOMIC_OPS(op, asm_op) \
+ ATOMIC_OP(op, asm_op) \
+ ATOMIC_OP_RETURN(op, asm_op)
- asm volatile("// atomic_sub_return\n"
-"1: ldxr %w0, %2\n"
-" sub %w0, %w0, %w3\n"
-" stlxr %w1, %w0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
+ATOMIC_OPS(add, add)
+ATOMIC_OPS(sub, sub)
- smp_mb();
- return result;
-}
+#undef ATOMIC_OPS
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
@@ -157,72 +139,53 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
*/
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) (*(volatile long *)&(v)->counter)
+#define atomic64_read(v) ACCESS_ONCE((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
-static inline void atomic64_add(u64 i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_add\n"
-"1: ldxr %0, %2\n"
-" add %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
+#define ATOMIC64_OP(op, asm_op) \
+static inline void atomic64_##op(long i, atomic64_t *v) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i)); \
+} \
+
+#define ATOMIC64_OP_RETURN(op, asm_op) \
+static inline long atomic64_##op##_return(long i, atomic64_t *v) \
+{ \
+ long result; \
+ unsigned long tmp; \
+ \
+ asm volatile("// atomic64_" #op "_return\n" \
+"1: ldxr %0, %2\n" \
+" " #asm_op " %0, %0, %3\n" \
+" stlxr %w1, %0, %2\n" \
+" cbnz %w1, 1b" \
+ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
+ : "Ir" (i) \
+ : "memory"); \
+ \
+ smp_mb(); \
+ return result; \
}
-static inline long atomic64_add_return(long i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
+#define ATOMIC64_OPS(op, asm_op) \
+ ATOMIC64_OP(op, asm_op) \
+ ATOMIC64_OP_RETURN(op, asm_op)
- asm volatile("// atomic64_add_return\n"
-"1: ldxr %0, %2\n"
-" add %0, %0, %3\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
+ATOMIC64_OPS(add, add)
+ATOMIC64_OPS(sub, sub)
- smp_mb();
- return result;
-}
-
-static inline void atomic64_sub(u64 i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_sub\n"
-"1: ldxr %0, %2\n"
-" sub %0, %0, %3\n"
-" stxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i));
-}
-
-static inline long atomic64_sub_return(long i, atomic64_t *v)
-{
- long result;
- unsigned long tmp;
-
- asm volatile("// atomic64_sub_return\n"
-"1: ldxr %0, %2\n"
-" sub %0, %0, %3\n"
-" stlxr %w1, %0, %2\n"
-" cbnz %w1, 1b"
- : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "memory");
-
- smp_mb();
- return result;
-}
+#undef ATOMIC64_OPS
+#undef ATOMIC64_OP_RETURN
+#undef ATOMIC64_OP
static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
{