aboutsummaryrefslogtreecommitdiff
path: root/Documentation
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2011-06-19 23:36:03 -0400
committerNicolas Pitre <nico@fluxnic.net>2011-06-28 15:47:47 -0400
commit40fb79c8a88625504857d44de1bc89dc0341e618 (patch)
tree1f0e417a1f1c80fcaa79729f1a4b29e96f01fed2 /Documentation
parent37b8304642c7f91df54888955c373ae89b577fcc (diff)
ARM: add a kuser_cmpxchg64 user space helper
Some user space applications are designed around the ability to perform atomic operations on 64 bit values. Since this is natively possible only with ARMv6k and above, let's provide a new kuser helper to perform the operation with kernel supervision on pre ARMv6k hardware. Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org> Tested-by: Dave Martin <dave.martin@linaro.org>
Diffstat (limited to 'Documentation')
-rw-r--r--Documentation/arm/kernel_user_helpers.txt64
1 files changed, 64 insertions, 0 deletions
diff --git a/Documentation/arm/kernel_user_helpers.txt b/Documentation/arm/kernel_user_helpers.txt
index 0c33f72d187..a17df9f91d1 100644
--- a/Documentation/arm/kernel_user_helpers.txt
+++ b/Documentation/arm/kernel_user_helpers.txt
@@ -201,3 +201,67 @@ typedef void (__kuser_dmb_t)(void);
Notes:
- Valid only if __kuser_helper_version >= 3 (from kernel version 2.6.15).
+
+kuser_cmpxchg64
+---------------
+
+Location: 0xffff0f60
+
+Reference prototype:
+
+ int __kuser_cmpxchg64(const int64_t *oldval,
+ const int64_t *newval,
+ volatile int64_t *ptr);
+
+Input:
+
+ r0 = pointer to oldval
+ r1 = pointer to newval
+ r2 = pointer to target value
+ lr = return address
+
+Output:
+
+ r0 = success code (zero or non-zero)
+ C flag = set if r0 == 0, clear if r0 != 0
+
+Clobbered registers:
+
+ r3, lr, flags
+
+Definition:
+
+ Atomically store the 64-bit value pointed by *newval in *ptr only if *ptr
+ is equal to the 64-bit value pointed by *oldval. Return zero if *ptr was
+ changed or non-zero if no exchange happened.
+
+ The C flag is also set if *ptr was changed to allow for assembly
+ optimization in the calling code.
+
+Usage example:
+
+typedef int (__kuser_cmpxchg64_t)(const int64_t *oldval,
+ const int64_t *newval,
+ volatile int64_t *ptr);
+#define __kuser_cmpxchg64 (*(__kuser_cmpxchg64_t *)0xffff0f60)
+
+int64_t atomic_add64(volatile int64_t *ptr, int64_t val)
+{
+ int64_t old, new;
+
+ do {
+ old = *ptr;
+ new = old + val;
+ } while(__kuser_cmpxchg64(&old, &new, ptr));
+
+ return new;
+}
+
+Notes:
+
+ - This routine already includes memory barriers as needed.
+
+ - Due to the length of this sequence, this spans 2 conventional kuser
+ "slots", therefore 0xffff0f80 is not used as a valid entry point.
+
+ - Valid only if __kuser_helper_version >= 5 (from kernel version 3.1).