blob: a2b4302869bcfb8ffe1d54ee3282293a18aeae83 [file] [log] [blame]
Sam Ravnborga00736e2008-06-19 20:26:19 +02001/*
2 * rwsem.h: R/W semaphores implemented using CAS
3 *
4 * Written by David S. Miller (davem@redhat.com), 2001.
5 * Derived from asm-i386/rwsem.h
6 */
7#ifndef _SPARC64_RWSEM_H
8#define _SPARC64_RWSEM_H
9
10#ifndef _LINUX_RWSEM_H
11#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12#endif
13
14#ifdef __KERNEL__
15
16#include <linux/list.h>
17#include <linux/spinlock.h>
Sam Ravnborga00736e2008-06-19 20:26:19 +020018
19struct rwsem_waiter;
20
21struct rw_semaphore {
David S. Miller9b3bb862010-08-17 22:49:26 -070022 signed long count;
23#define RWSEM_UNLOCKED_VALUE 0x00000000L
24#define RWSEM_ACTIVE_BIAS 0x00000001L
25#define RWSEM_ACTIVE_MASK 0xffffffffL
26#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
27#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
28#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
29 spinlock_t wait_lock;
30 struct list_head wait_list;
Sam Ravnborga00736e2008-06-19 20:26:19 +020031#ifdef CONFIG_DEBUG_LOCK_ALLOC
David S. Miller9b3bb862010-08-17 22:49:26 -070032 struct lockdep_map dep_map;
Sam Ravnborga00736e2008-06-19 20:26:19 +020033#endif
34};
35
36#ifdef CONFIG_DEBUG_LOCK_ALLOC
37# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
38#else
39# define __RWSEM_DEP_MAP_INIT(lockname)
40#endif
41
42#define __RWSEM_INITIALIZER(name) \
Thomas Gleixner8a2fe6c2009-11-07 22:41:03 -080043{ RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
44 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
Sam Ravnborga00736e2008-06-19 20:26:19 +020045
46#define DECLARE_RWSEM(name) \
47 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
48
David S. Miller9b3bb862010-08-17 22:49:26 -070049extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
50extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
51extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
52extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
53
Sam Ravnborga00736e2008-06-19 20:26:19 +020054extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
55 struct lock_class_key *key);
56
57#define init_rwsem(sem) \
58do { \
59 static struct lock_class_key __key; \
60 \
61 __init_rwsem((sem), #sem, &__key); \
62} while (0)
63
David S. Miller9b3bb862010-08-17 22:49:26 -070064/*
65 * lock for reading
66 */
67static inline void __down_read(struct rw_semaphore *sem)
68{
69 if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
70 rwsem_down_read_failed(sem);
71}
Sam Ravnborga00736e2008-06-19 20:26:19 +020072
David S. Miller9b3bb862010-08-17 22:49:26 -070073static inline int __down_read_trylock(struct rw_semaphore *sem)
74{
75 long tmp;
76
77 while ((tmp = sem->count) >= 0L) {
78 if (tmp == cmpxchg(&sem->count, tmp,
79 tmp + RWSEM_ACTIVE_READ_BIAS)) {
80 return 1;
81 }
82 }
83 return 0;
84}
85
86/*
87 * lock for writing
88 */
Sam Ravnborga00736e2008-06-19 20:26:19 +020089static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
90{
David S. Miller9b3bb862010-08-17 22:49:26 -070091 long tmp;
92
93 tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
94 (atomic64_t *)(&sem->count));
95 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
96 rwsem_down_write_failed(sem);
Sam Ravnborga00736e2008-06-19 20:26:19 +020097}
98
David S. Miller9b3bb862010-08-17 22:49:26 -070099static inline void __down_write(struct rw_semaphore *sem)
Sam Ravnborga00736e2008-06-19 20:26:19 +0200100{
David S. Miller9b3bb862010-08-17 22:49:26 -0700101 __down_write_nested(sem, 0);
Sam Ravnborga00736e2008-06-19 20:26:19 +0200102}
103
David S. Miller9b3bb862010-08-17 22:49:26 -0700104static inline int __down_write_trylock(struct rw_semaphore *sem)
Sam Ravnborga00736e2008-06-19 20:26:19 +0200105{
David S. Miller9b3bb862010-08-17 22:49:26 -0700106 long tmp;
107
108 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
109 RWSEM_ACTIVE_WRITE_BIAS);
110 return tmp == RWSEM_UNLOCKED_VALUE;
111}
112
113/*
114 * unlock after reading
115 */
116static inline void __up_read(struct rw_semaphore *sem)
117{
118 long tmp;
119
120 tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
121 if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
122 rwsem_wake(sem);
123}
124
125/*
126 * unlock after writing
127 */
128static inline void __up_write(struct rw_semaphore *sem)
129{
130 if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
131 (atomic64_t *)(&sem->count)) < 0L))
132 rwsem_wake(sem);
133}
134
135/*
136 * implement atomic add functionality
137 */
138static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
139{
140 atomic64_add(delta, (atomic64_t *)(&sem->count));
141}
142
143/*
144 * downgrade write lock to read lock
145 */
146static inline void __downgrade_write(struct rw_semaphore *sem)
147{
148 long tmp;
149
150 tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
151 if (tmp < 0L)
152 rwsem_downgrade_wake(sem);
153}
154
155/*
156 * implement exchange and add functionality
157 */
158static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
159{
160 return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
Sam Ravnborga00736e2008-06-19 20:26:19 +0200161}
162
163static inline int rwsem_is_locked(struct rw_semaphore *sem)
164{
165 return (sem->count != 0);
166}
167
168#endif /* __KERNEL__ */
169
170#endif /* _SPARC64_RWSEM_H */