blob: 8f2f94d53434af179ae1be20a1573d3e0df026f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * atomic32.c: 32-bit atomic_t implementation
3 *
4 * Copyright (C) 2004 Keith M Wesolowski
Kyle McMartin6197fe42007-05-29 02:51:13 -07005 * Copyright (C) 2007 Kyle McMartin
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 *
7 * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
8 */
9
Arun Sharma600634972011-07-26 16:09:06 -070010#include <linux/atomic.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/spinlock.h>
12#include <linux/module.h>
13
14#ifdef CONFIG_SMP
15#define ATOMIC_HASH_SIZE 4
16#define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
17
18spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
Thomas Gleixner24774fb2011-01-23 15:19:12 +010019 [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020};
21
22#else /* SMP */
23
Ingo Molnara9f6a0d2005-09-09 13:10:41 -070024static DEFINE_SPINLOCK(dummy);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#define ATOMIC_HASH_SIZE 1
26#define ATOMIC_HASH(a) (&dummy)
27
28#endif /* SMP */
29
30int __atomic_add_return(int i, atomic_t *v)
31{
32 int ret;
33 unsigned long flags;
34 spin_lock_irqsave(ATOMIC_HASH(v), flags);
35
36 ret = (v->counter += i);
37
38 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
39 return ret;
40}
Nick Piggin4a6dae62005-11-13 16:07:24 -080041EXPORT_SYMBOL(__atomic_add_return);
42
Andreas Larsson96920742014-11-05 15:52:08 +010043int atomic_xchg(atomic_t *v, int new)
44{
45 int ret;
46 unsigned long flags;
47
48 spin_lock_irqsave(ATOMIC_HASH(v), flags);
49 ret = v->counter;
50 v->counter = new;
51 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
52 return ret;
53}
54EXPORT_SYMBOL(atomic_xchg);
55
Nick Piggin4a6dae62005-11-13 16:07:24 -080056int atomic_cmpxchg(atomic_t *v, int old, int new)
57{
58 int ret;
59 unsigned long flags;
60
61 spin_lock_irqsave(ATOMIC_HASH(v), flags);
62 ret = v->counter;
63 if (likely(ret == old))
64 v->counter = new;
65
66 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
67 return ret;
68}
Robert Reif74e61de2007-03-26 19:10:43 -070069EXPORT_SYMBOL(atomic_cmpxchg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Stephen Rothwell678624e402011-07-27 12:49:44 -070071int __atomic_add_unless(atomic_t *v, int a, int u)
Nick Piggin8426e1f2005-11-13 16:07:25 -080072{
73 int ret;
74 unsigned long flags;
75
76 spin_lock_irqsave(ATOMIC_HASH(v), flags);
77 ret = v->counter;
78 if (ret != u)
79 v->counter += a;
80 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
Josip Rodina61b5822011-08-04 02:47:40 -070081 return ret;
Nick Piggin8426e1f2005-11-13 16:07:25 -080082}
Stephen Rothwell678624e402011-07-27 12:49:44 -070083EXPORT_SYMBOL(__atomic_add_unless);
Nick Piggin8426e1f2005-11-13 16:07:25 -080084
Nick Piggin8426e1f2005-11-13 16:07:25 -080085/* Atomic operations are already serializing */
Linus Torvalds1da177e2005-04-16 15:20:36 -070086void atomic_set(atomic_t *v, int i)
87{
88 unsigned long flags;
Nick Piggin4a6dae62005-11-13 16:07:24 -080089
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 spin_lock_irqsave(ATOMIC_HASH(v), flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 v->counter = i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
93}
Linus Torvalds1da177e2005-04-16 15:20:36 -070094EXPORT_SYMBOL(atomic_set);
David S. Miller8a8b8362006-12-17 16:18:47 -080095
96unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
97{
98 unsigned long old, flags;
99
100 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
101 old = *addr;
102 *addr = old | mask;
103 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
104
105 return old & mask;
106}
107EXPORT_SYMBOL(___set_bit);
108
109unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
110{
111 unsigned long old, flags;
112
113 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
114 old = *addr;
115 *addr = old & ~mask;
116 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
117
118 return old & mask;
119}
120EXPORT_SYMBOL(___clear_bit);
121
122unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
123{
124 unsigned long old, flags;
125
126 spin_lock_irqsave(ATOMIC_HASH(addr), flags);
127 old = *addr;
128 *addr = old ^ mask;
129 spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
130
131 return old & mask;
132}
133EXPORT_SYMBOL(___change_bit);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700134
135unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
136{
137 unsigned long flags;
138 u32 prev;
139
Andrew Morton1fb88122007-05-31 01:19:24 -0700140 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700141 if ((prev = *ptr) == old)
142 *ptr = new;
Andrew Morton1fb88122007-05-31 01:19:24 -0700143 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
Kyle McMartin6197fe42007-05-29 02:51:13 -0700144
145 return (unsigned long)prev;
146}
147EXPORT_SYMBOL(__cmpxchg_u32);
Andreas Larsson96920742014-11-05 15:52:08 +0100148
149unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
150{
151 unsigned long flags;
152 u32 prev;
153
154 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
155 prev = *ptr;
156 *ptr = new;
157 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
158
159 return (unsigned long)prev;
160}
161EXPORT_SYMBOL(__xchg_u32);