blob: 15422933c60ba4b4d9ca12e2187d3154bff4656f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * S390 version
Heiko Carstensa53c8fa2012-07-20 11:15:04 +02003 * Copyright IBM Corp. 1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 *
6 * Derived from "include/asm-i386/bitops.h"
7 * Copyright (C) 1992, Linus Torvalds
8 *
9 */
Heiko Carstensc406abd2006-06-29 14:56:13 +020010
Heiko Carstensa53c8fa2012-07-20 11:15:04 +020011#ifndef _S390_BITOPS_H
12#define _S390_BITOPS_H
13
Jiri Slaby06245172007-10-18 23:40:26 -070014#ifndef _LINUX_BITOPS_H
15#error only <linux/bitops.h> can be included directly
16#endif
17
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/compiler.h>
19
20/*
21 * 32 bit bitops format:
22 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
23 * bit 32 is the LSB of *(addr+4). That combined with the
24 * big endian byte order on S390 give the following bit
25 * order in memory:
26 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
27 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
28 * after that follows the next long with bit numbers
29 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
30 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
31 * The reason for this bit ordering is the fact that
32 * in the architecture independent code bits operations
33 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
34 * with operation of the form "set_bit(bitnr, flags)".
35 *
36 * 64 bit bitops format:
37 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
38 * bit 64 is the LSB of *(addr+8). That combined with the
39 * big endian byte order on S390 give the following bit
40 * order in memory:
41 * 3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
42 * 2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
43 * 1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
44 * 0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
45 * after that follows the next long with bit numbers
46 * 7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
47 * 6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
48 * 5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
49 * 4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
50 * The reason for this bit ordering is the fact that
51 * in the architecture independent code bits operations
52 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
53 * with operation of the form "set_bit(bitnr, flags)".
54 */
55
Heiko Carstense3dd9c22009-03-26 15:24:03 +010056/* bitmap tables from arch/s390/kernel/bitmap.c */
Linus Torvalds1da177e2005-04-16 15:20:36 -070057extern const char _oi_bitmap[];
58extern const char _ni_bitmap[];
59extern const char _zb_findmap[];
60extern const char _sb_findmap[];
61
Heiko Carstensf4815ac2012-05-23 16:24:51 +020062#ifndef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#define __BITOPS_ALIGN 3
65#define __BITOPS_WORDSIZE 32
66#define __BITOPS_OR "or"
67#define __BITOPS_AND "nr"
68#define __BITOPS_XOR "xr"
69
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020070#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
71 asm volatile( \
72 " l %0,%2\n" \
73 "0: lr %1,%0\n" \
74 __op_string " %1,%3\n" \
75 " cs %0,%1,%2\n" \
76 " jl 0b" \
77 : "=&d" (__old), "=&d" (__new), \
78 "=Q" (*(unsigned long *) __addr) \
79 : "d" (__val), "Q" (*(unsigned long *) __addr) \
80 : "cc");
81
Heiko Carstensf4815ac2012-05-23 16:24:51 +020082#else /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84#define __BITOPS_ALIGN 7
85#define __BITOPS_WORDSIZE 64
86#define __BITOPS_OR "ogr"
87#define __BITOPS_AND "ngr"
88#define __BITOPS_XOR "xgr"
89
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +020090#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string) \
91 asm volatile( \
92 " lg %0,%2\n" \
93 "0: lgr %1,%0\n" \
94 __op_string " %1,%3\n" \
95 " csg %0,%1,%2\n" \
96 " jl 0b" \
97 : "=&d" (__old), "=&d" (__new), \
98 "=Q" (*(unsigned long *) __addr) \
99 : "d" (__val), "Q" (*(unsigned long *) __addr) \
100 : "cc");
101
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200102#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200105#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107#ifdef CONFIG_SMP
108/*
109 * SMP safe set_bit routine based on compare and swap (CS)
110 */
111static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
112{
113 unsigned long addr, old, new, mask;
114
115 addr = (unsigned long) ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 /* calculate address for CS */
117 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
118 /* make OR mask */
119 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
120 /* Do the atomic update. */
121 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
122}
123
124/*
125 * SMP safe clear_bit routine based on compare and swap (CS)
126 */
127static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
128{
129 unsigned long addr, old, new, mask;
130
131 addr = (unsigned long) ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 /* calculate address for CS */
133 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
134 /* make AND mask */
135 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
136 /* Do the atomic update. */
137 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
138}
139
140/*
141 * SMP safe change_bit routine based on compare and swap (CS)
142 */
143static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
144{
145 unsigned long addr, old, new, mask;
146
147 addr = (unsigned long) ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 /* calculate address for CS */
149 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
150 /* make XOR mask */
151 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
152 /* Do the atomic update. */
153 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
154}
155
156/*
157 * SMP safe test_and_set_bit routine based on compare and swap (CS)
158 */
159static inline int
160test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
161{
162 unsigned long addr, old, new, mask;
163
164 addr = (unsigned long) ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 /* calculate address for CS */
166 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
167 /* make OR/test mask */
168 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
169 /* Do the atomic update. */
170 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
171 __BITOPS_BARRIER();
172 return (old & mask) != 0;
173}
174
175/*
176 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
177 */
178static inline int
179test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
180{
181 unsigned long addr, old, new, mask;
182
183 addr = (unsigned long) ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 /* calculate address for CS */
185 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
186 /* make AND/test mask */
187 mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
188 /* Do the atomic update. */
189 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
190 __BITOPS_BARRIER();
191 return (old ^ new) != 0;
192}
193
194/*
195 * SMP safe test_and_change_bit routine based on compare and swap (CS)
196 */
197static inline int
198test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
199{
200 unsigned long addr, old, new, mask;
201
202 addr = (unsigned long) ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 /* calculate address for CS */
204 addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
205 /* make XOR/test mask */
206 mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
207 /* Do the atomic update. */
208 __BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
209 __BITOPS_BARRIER();
210 return (old & mask) != 0;
211}
212#endif /* CONFIG_SMP */
213
214/*
215 * fast, non-SMP set_bit routine
216 */
217static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
218{
219 unsigned long addr;
220
221 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200222 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100223 " oc %O0(1,%R0),%1"
224 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
227static inline void
228__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
229{
230 unsigned long addr;
231
232 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200233 *(unsigned char *) addr |= 1 << (nr & 7);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234}
235
236#define set_bit_simple(nr,addr) \
237(__builtin_constant_p((nr)) ? \
238 __constant_set_bit((nr),(addr)) : \
239 __set_bit((nr),(addr)) )
240
241/*
242 * fast, non-SMP clear_bit routine
243 */
244static inline void
245__clear_bit(unsigned long nr, volatile unsigned long *ptr)
246{
247 unsigned long addr;
248
249 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200250 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100251 " nc %O0(1,%R0),%1"
252 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
254
255static inline void
256__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
257{
258 unsigned long addr;
259
260 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200261 *(unsigned char *) addr &= ~(1 << (nr & 7));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
263
264#define clear_bit_simple(nr,addr) \
265(__builtin_constant_p((nr)) ? \
266 __constant_clear_bit((nr),(addr)) : \
267 __clear_bit((nr),(addr)) )
268
269/*
270 * fast, non-SMP change_bit routine
271 */
272static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
273{
274 unsigned long addr;
275
276 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200277 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100278 " xc %O0(1,%R0),%1"
279 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280}
281
282static inline void
283__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
284{
285 unsigned long addr;
286
287 addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200288 *(unsigned char *) addr ^= 1 << (nr & 7);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291#define change_bit_simple(nr,addr) \
292(__builtin_constant_p((nr)) ? \
293 __constant_change_bit((nr),(addr)) : \
294 __change_bit((nr),(addr)) )
295
296/*
297 * fast, non-SMP test_and_set_bit routine
298 */
299static inline int
300test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
301{
302 unsigned long addr;
303 unsigned char ch;
304
305 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
306 ch = *(unsigned char *) addr;
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200307 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100308 " oc %O0(1,%R0),%1"
309 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
310 : "cc", "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 return (ch >> (nr & 7)) & 1;
312}
313#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
314
315/*
316 * fast, non-SMP test_and_clear_bit routine
317 */
318static inline int
319test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
320{
321 unsigned long addr;
322 unsigned char ch;
323
324 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
325 ch = *(unsigned char *) addr;
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200326 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100327 " nc %O0(1,%R0),%1"
328 : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
329 : "cc", "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 return (ch >> (nr & 7)) & 1;
331}
332#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
333
334/*
335 * fast, non-SMP test_and_change_bit routine
336 */
337static inline int
338test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
339{
340 unsigned long addr;
341 unsigned char ch;
342
343 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
344 ch = *(unsigned char *) addr;
Martin Schwidefsky94c12cc2006-09-28 16:56:43 +0200345 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100346 " xc %O0(1,%R0),%1"
347 : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
348 : "cc", "memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return (ch >> (nr & 7)) & 1;
350}
351#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
352
353#ifdef CONFIG_SMP
354#define set_bit set_bit_cs
355#define clear_bit clear_bit_cs
356#define change_bit change_bit_cs
357#define test_and_set_bit test_and_set_bit_cs
358#define test_and_clear_bit test_and_clear_bit_cs
359#define test_and_change_bit test_and_change_bit_cs
360#else
361#define set_bit set_bit_simple
362#define clear_bit clear_bit_simple
363#define change_bit change_bit_simple
364#define test_and_set_bit test_and_set_bit_simple
365#define test_and_clear_bit test_and_clear_bit_simple
366#define test_and_change_bit test_and_change_bit_simple
367#endif
368
369
370/*
371 * This routine doesn't need to be atomic.
372 */
373
374static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
375{
376 unsigned long addr;
377 unsigned char ch;
378
379 addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
380 ch = *(volatile unsigned char *) addr;
381 return (ch >> (nr & 7)) & 1;
382}
383
384static inline int
385__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
Eric Parisef1bea92006-02-11 17:56:04 -0800386 return (((volatile char *) addr)
387 [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388}
389
390#define test_bit(nr,addr) \
391(__builtin_constant_p((nr)) ? \
392 __constant_test_bit((nr),(addr)) : \
393 __test_bit((nr),(addr)) )
394
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700395/*
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100396 * Optimized find bit helper functions.
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700397 */
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700398
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100399/**
400 * __ffz_word_loop - find byte offset of first long != -1UL
401 * @addr: pointer to array of unsigned long
402 * @size: size of the array in bits
403 */
404static inline unsigned long __ffz_word_loop(const unsigned long *addr,
405 unsigned long size)
406{
407 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
408 unsigned long bytes = 0;
409
410 asm volatile(
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200411#ifndef CONFIG_64BIT
Martin Schwidefskya22fb7f2008-02-19 15:29:28 +0100412 " ahi %1,-1\n"
413 " sra %1,5\n"
414 " jz 1f\n"
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100415 "0: c %2,0(%0,%3)\n"
416 " jne 1f\n"
417 " la %0,4(%0)\n"
418 " brct %1,0b\n"
419 "1:\n"
420#else
Martin Schwidefskya22fb7f2008-02-19 15:29:28 +0100421 " aghi %1,-1\n"
422 " srag %1,%1,6\n"
423 " jz 1f\n"
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100424 "0: cg %2,0(%0,%3)\n"
425 " jne 1f\n"
426 " la %0,8(%0)\n"
427 " brct %1,0b\n"
428 "1:\n"
429#endif
Martin Schwidefsky6d88f822008-02-09 18:24:29 +0100430 : "+&a" (bytes), "+&d" (size)
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100431 : "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
432 : "cc" );
433 return bytes;
434}
435
436/**
437 * __ffs_word_loop - find byte offset of first long != 0UL
438 * @addr: pointer to array of unsigned long
439 * @size: size of the array in bits
440 */
441static inline unsigned long __ffs_word_loop(const unsigned long *addr,
442 unsigned long size)
443{
444 typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
445 unsigned long bytes = 0;
446
447 asm volatile(
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200448#ifndef CONFIG_64BIT
Martin Schwidefskya22fb7f2008-02-19 15:29:28 +0100449 " ahi %1,-1\n"
450 " sra %1,5\n"
451 " jz 1f\n"
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100452 "0: c %2,0(%0,%3)\n"
453 " jne 1f\n"
454 " la %0,4(%0)\n"
455 " brct %1,0b\n"
456 "1:\n"
457#else
Martin Schwidefskya22fb7f2008-02-19 15:29:28 +0100458 " aghi %1,-1\n"
459 " srag %1,%1,6\n"
460 " jz 1f\n"
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100461 "0: cg %2,0(%0,%3)\n"
462 " jne 1f\n"
463 " la %0,8(%0)\n"
464 " brct %1,0b\n"
465 "1:\n"
466#endif
Martin Schwidefsky6d88f822008-02-09 18:24:29 +0100467 : "+&a" (bytes), "+&a" (size)
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100468 : "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
469 : "cc" );
470 return bytes;
471}
472
473/**
474 * __ffz_word - add number of the first unset bit
475 * @nr: base value the bit number is added to
476 * @word: the word that is searched for unset bits
477 */
478static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
479{
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200480#ifdef CONFIG_64BIT
Heiko Carstense13ed9b2009-03-26 15:24:35 +0100481 if ((word & 0xffffffff) == 0xffffffff) {
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700482 word >>= 32;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100483 nr += 32;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700484 }
485#endif
Heiko Carstense13ed9b2009-03-26 15:24:35 +0100486 if ((word & 0xffff) == 0xffff) {
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700487 word >>= 16;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100488 nr += 16;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700489 }
Heiko Carstense13ed9b2009-03-26 15:24:35 +0100490 if ((word & 0xff) == 0xff) {
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700491 word >>= 8;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100492 nr += 8;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700493 }
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100494 return nr + _zb_findmap[(unsigned char) word];
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700495}
496
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100497/**
498 * __ffs_word - add number of the first set bit
499 * @nr: base value the bit number is added to
500 * @word: the word that is searched for set bits
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700501 */
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100502static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700503{
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200504#ifdef CONFIG_64BIT
Heiko Carstense13ed9b2009-03-26 15:24:35 +0100505 if ((word & 0xffffffff) == 0) {
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700506 word >>= 32;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100507 nr += 32;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700508 }
509#endif
Heiko Carstense13ed9b2009-03-26 15:24:35 +0100510 if ((word & 0xffff) == 0) {
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700511 word >>= 16;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100512 nr += 16;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700513 }
Heiko Carstense13ed9b2009-03-26 15:24:35 +0100514 if ((word & 0xff) == 0) {
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700515 word >>= 8;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100516 nr += 8;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700517 }
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100518 return nr + _sb_findmap[(unsigned char) word];
519}
520
521
522/**
523 * __load_ulong_be - load big endian unsigned long
524 * @p: pointer to array of unsigned long
525 * @offset: byte offset of source value in the array
526 */
527static inline unsigned long __load_ulong_be(const unsigned long *p,
528 unsigned long offset)
529{
530 p = (unsigned long *)((unsigned long) p + offset);
531 return *p;
532}
533
534/**
535 * __load_ulong_le - load little endian unsigned long
536 * @p: pointer to array of unsigned long
537 * @offset: byte offset of source value in the array
538 */
539static inline unsigned long __load_ulong_le(const unsigned long *p,
540 unsigned long offset)
541{
542 unsigned long word;
543
544 p = (unsigned long *)((unsigned long) p + offset);
Heiko Carstensf4815ac2012-05-23 16:24:51 +0200545#ifndef CONFIG_64BIT
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100546 asm volatile(
Martin Schwidefsky987bcda2010-02-26 22:37:31 +0100547 " ic %0,%O1(%R1)\n"
548 " icm %0,2,%O1+1(%R1)\n"
549 " icm %0,4,%O1+2(%R1)\n"
550 " icm %0,8,%O1+3(%R1)"
551 : "=&d" (word) : "Q" (*p) : "cc");
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100552#else
553 asm volatile(
554 " lrvg %0,%1"
555 : "=d" (word) : "m" (*p) );
556#endif
557 return word;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700558}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560/*
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100561 * The various find bit functions.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 */
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700563
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100564/*
565 * ffz - find first zero in word.
566 * @word: The word to search
567 *
568 * Undefined if no zero exists, so code should check against ~0UL first.
569 */
570static inline unsigned long ffz(unsigned long word)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571{
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100572 return __ffz_word(0, word);
573}
574
575/**
576 * __ffs - find first bit in word.
577 * @word: The word to search
578 *
579 * Undefined if no bit exists, so code should check against 0 first.
580 */
581static inline unsigned long __ffs (unsigned long word)
582{
583 return __ffs_word(0, word);
584}
585
586/**
587 * ffs - find first bit set
588 * @x: the word to search
589 *
590 * This is defined the same way as
591 * the libc and compiler builtin ffs routines, therefore
592 * differs in spirit from the above ffz (man ffs).
593 */
594static inline int ffs(int x)
595{
596 if (!x)
597 return 0;
598 return __ffs_word(1, x);
599}
600
601/**
602 * find_first_zero_bit - find the first zero bit in a memory region
603 * @addr: The address to start the search at
604 * @size: The maximum size to search
605 *
606 * Returns the bit-number of the first zero bit, not the number of the byte
607 * containing a bit.
608 */
609static inline unsigned long find_first_zero_bit(const unsigned long *addr,
610 unsigned long size)
611{
612 unsigned long bytes, bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 if (!size)
615 return 0;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100616 bytes = __ffz_word_loop(addr, size);
617 bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
618 return (bits < size) ? bits : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700620#define find_first_zero_bit find_first_zero_bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100622/**
623 * find_first_bit - find the first set bit in a memory region
624 * @addr: The address to start the search at
625 * @size: The maximum size to search
626 *
627 * Returns the bit-number of the first set bit, not the number of the byte
628 * containing a bit.
629 */
630static inline unsigned long find_first_bit(const unsigned long * addr,
631 unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100633 unsigned long bytes, bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
635 if (!size)
636 return 0;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100637 bytes = __ffs_word_loop(addr, size);
638 bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
639 return (bits < size) ? bits : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700641#define find_first_bit find_first_bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Jan Glaubere56e4e82012-11-29 13:01:15 +0100643/*
644 * Big endian variant whichs starts bit counting from left using
645 * the flogr (find leftmost one) instruction.
646 */
647static inline unsigned long __flo_word(unsigned long nr, unsigned long val)
648{
649 register unsigned long bit asm("2") = val;
650 register unsigned long out asm("3");
651
652 asm volatile (
653 " .insn rre,0xb9830000,%[bit],%[bit]\n"
654 : [bit] "+d" (bit), [out] "=d" (out) : : "cc");
655 return nr + bit;
656}
657
658/*
659 * 64 bit special left bitops format:
660 * order in memory:
661 * 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f
662 * 10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f
663 * 20 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f
664 * 30 31 32 33 34 35 36 37 38 39 3a 3b 3c 3d 3e 3f
665 * after that follows the next long with bit numbers
666 * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f
667 * 50 51 52 53 54 55 56 57 58 59 5a 5b 5c 5d 5e 5f
668 * 60 61 62 63 64 65 66 67 68 69 6a 6b 6c 6d 6e 6f
669 * 70 71 72 73 74 75 76 77 78 79 7a 7b 7c 7d 7e 7f
670 * The reason for this bit ordering is the fact that
671 * the hardware sets bits in a bitmap starting at bit 0
672 * and we don't want to scan the bitmap from the 'wrong
673 * end'.
674 */
675static inline unsigned long find_first_bit_left(const unsigned long *addr,
676 unsigned long size)
677{
678 unsigned long bytes, bits;
679
680 if (!size)
681 return 0;
682 bytes = __ffs_word_loop(addr, size);
683 bits = __flo_word(bytes * 8, __load_ulong_be(addr, bytes));
684 return (bits < size) ? bits : size;
685}
686
687static inline int find_next_bit_left(const unsigned long *addr,
688 unsigned long size,
689 unsigned long offset)
690{
691 const unsigned long *p;
692 unsigned long bit, set;
693
694 if (offset >= size)
695 return size;
696 bit = offset & (__BITOPS_WORDSIZE - 1);
697 offset -= bit;
698 size -= offset;
699 p = addr + offset / __BITOPS_WORDSIZE;
700 if (bit) {
701 set = __flo_word(0, *p & (~0UL << bit));
702 if (set >= size)
703 return size + offset;
704 if (set < __BITOPS_WORDSIZE)
705 return set + offset;
706 offset += __BITOPS_WORDSIZE;
707 size -= __BITOPS_WORDSIZE;
708 p++;
709 }
710 return offset + find_first_bit_left(p, size);
711}
712
713#define for_each_set_bit_left(bit, addr, size) \
714 for ((bit) = find_first_bit_left((addr), (size)); \
715 (bit) < (size); \
716 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
717
718/* same as for_each_set_bit() but use bit as value to start with */
719#define for_each_set_bit_left_cont(bit, addr, size) \
720 for ((bit) = find_next_bit_left((addr), (size), (bit)); \
721 (bit) < (size); \
722 (bit) = find_next_bit_left((addr), (size), (bit) + 1))
723
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100724/**
725 * find_next_zero_bit - find the first zero bit in a memory region
726 * @addr: The address to base the search on
727 * @offset: The bitnumber to start searching at
728 * @size: The maximum size to search
729 */
730static inline int find_next_zero_bit (const unsigned long * addr,
731 unsigned long size,
732 unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733{
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700734 const unsigned long *p;
735 unsigned long bit, set;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700737 if (offset >= size)
738 return size;
739 bit = offset & (__BITOPS_WORDSIZE - 1);
740 offset -= bit;
741 size -= offset;
742 p = addr + offset / __BITOPS_WORDSIZE;
743 if (bit) {
744 /*
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100745 * __ffz_word returns __BITOPS_WORDSIZE
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700746 * if no zero bit is present in the word.
747 */
Eric Sandeen152382a2008-08-21 19:46:37 +0200748 set = __ffz_word(bit, *p >> bit);
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700749 if (set >= size)
750 return size + offset;
751 if (set < __BITOPS_WORDSIZE)
752 return set + offset;
753 offset += __BITOPS_WORDSIZE;
754 size -= __BITOPS_WORDSIZE;
755 p++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 }
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700757 return offset + find_first_zero_bit(p, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700759#define find_next_zero_bit find_next_zero_bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100761/**
762 * find_next_bit - find the first set bit in a memory region
763 * @addr: The address to base the search on
764 * @offset: The bitnumber to start searching at
765 * @size: The maximum size to search
766 */
767static inline int find_next_bit (const unsigned long * addr,
768 unsigned long size,
769 unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700771 const unsigned long *p;
772 unsigned long bit, set;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700774 if (offset >= size)
775 return size;
776 bit = offset & (__BITOPS_WORDSIZE - 1);
777 offset -= bit;
778 size -= offset;
779 p = addr + offset / __BITOPS_WORDSIZE;
780 if (bit) {
781 /*
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100782 * __ffs_word returns __BITOPS_WORDSIZE
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700783 * if no one bit is present in the word.
784 */
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100785 set = __ffs_word(0, *p & (~0UL << bit));
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700786 if (set >= size)
787 return size + offset;
788 if (set < __BITOPS_WORDSIZE)
789 return set + offset;
790 offset += __BITOPS_WORDSIZE;
791 size -= __BITOPS_WORDSIZE;
792 p++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 }
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700794 return offset + find_first_bit(p, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700796#define find_next_bit find_next_bit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
798/*
799 * Every architecture must define this function. It's the fastest
800 * way of searching a 140-bit bitmap where the first 100 bits are
801 * unlikely to be set. It's guaranteed that at least one of the 140
802 * bits is cleared.
803 */
804static inline int sched_find_first_bit(unsigned long *b)
805{
806 return find_first_bit(b, 140);
807}
808
Akinobu Mita7e33db42006-03-26 01:39:34 -0800809#include <asm-generic/bitops/fls.h>
Alexander van Heukelum56a6b1e2008-03-15 18:31:49 +0100810#include <asm-generic/bitops/__fls.h>
Akinobu Mita7e33db42006-03-26 01:39:34 -0800811#include <asm-generic/bitops/fls64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Akinobu Mita7e33db42006-03-26 01:39:34 -0800813#include <asm-generic/bitops/hweight.h>
Nick Piggin26333572007-10-18 03:06:39 -0700814#include <asm-generic/bitops/lock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816/*
817 * ATTENTION: intel byte ordering convention for ext2 and minix !!
818 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
819 * bit 32 is the LSB of (addr+4).
820 * That combined with the little endian byte order of Intel gives the
821 * following bit order in memory:
822 * 07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
823 * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
824 */
825
Akinobu Mita50b9b472011-03-23 16:41:57 -0700826static inline int find_first_zero_bit_le(void *vaddr, unsigned int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827{
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100828 unsigned long bytes, bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 if (!size)
831 return 0;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100832 bytes = __ffz_word_loop(vaddr, size);
833 bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
834 return (bits < size) ? bits : size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700836#define find_first_zero_bit_le find_first_zero_bit_le
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Akinobu Mita50b9b472011-03-23 16:41:57 -0700838static inline int find_next_zero_bit_le(void *vaddr, unsigned long size,
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100839 unsigned long offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700841 unsigned long *addr = vaddr, *p;
Martin Schwidefsky0abbf052008-02-05 16:50:43 +0100842 unsigned long bit, set;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 if (offset >= size)
845 return size;
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700846 bit = offset & (__BITOPS_WORDSIZE - 1);
847 offset -= bit;
848 size -= offset;
849 p = addr + offset / __BITOPS_WORDSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 if (bit) {
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700851 /*
852 * s390 version of ffz returns __BITOPS_WORDSIZE
853 * if no zero bit is present in the word.
854 */
Eric Sandeen152382a2008-08-21 19:46:37 +0200855 set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
Martin Schwidefskyafff7e22005-07-27 11:44:58 -0700856 if (set >= size)
857 return size + offset;
858 if (set < __BITOPS_WORDSIZE)
859 return set + offset;
860 offset += __BITOPS_WORDSIZE;
861 size -= __BITOPS_WORDSIZE;
862 p++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 }
Akinobu Mita50b9b472011-03-23 16:41:57 -0700864 return offset + find_first_zero_bit_le(p, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700866#define find_next_zero_bit_le find_next_zero_bit_le
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Akinobu Mita50b9b472011-03-23 16:41:57 -0700868static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size)
Heiko Carstens67fe9252008-02-05 16:50:44 +0100869{
870 unsigned long bytes, bits;
871
872 if (!size)
873 return 0;
874 bytes = __ffs_word_loop(vaddr, size);
875 bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
876 return (bits < size) ? bits : size;
877}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700878#define find_first_bit_le find_first_bit_le
Heiko Carstens67fe9252008-02-05 16:50:44 +0100879
Akinobu Mita50b9b472011-03-23 16:41:57 -0700880static inline int find_next_bit_le(void *vaddr, unsigned long size,
Heiko Carstens67fe9252008-02-05 16:50:44 +0100881 unsigned long offset)
882{
883 unsigned long *addr = vaddr, *p;
884 unsigned long bit, set;
885
886 if (offset >= size)
887 return size;
888 bit = offset & (__BITOPS_WORDSIZE - 1);
889 offset -= bit;
890 size -= offset;
891 p = addr + offset / __BITOPS_WORDSIZE;
892 if (bit) {
893 /*
894 * s390 version of ffz returns __BITOPS_WORDSIZE
895 * if no zero bit is present in the word.
896 */
Eric Sandeen152382a2008-08-21 19:46:37 +0200897 set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
Heiko Carstens67fe9252008-02-05 16:50:44 +0100898 if (set >= size)
899 return size + offset;
900 if (set < __BITOPS_WORDSIZE)
901 return set + offset;
902 offset += __BITOPS_WORDSIZE;
903 size -= __BITOPS_WORDSIZE;
904 p++;
905 }
Akinobu Mita50b9b472011-03-23 16:41:57 -0700906 return offset + find_first_bit_le(p, size);
Heiko Carstens67fe9252008-02-05 16:50:44 +0100907}
Akinobu Mitaa2812e12011-05-26 16:26:06 -0700908#define find_next_bit_le find_next_bit_le
Heiko Carstens67fe9252008-02-05 16:50:44 +0100909
Akinobu Mita802caab2011-05-26 16:26:12 -0700910#include <asm-generic/bitops/le.h>
911
Akinobu Mita148817b2011-07-26 16:09:04 -0700912#include <asm-generic/bitops/ext2-atomic-setbit.h>
Akinobu Mita50b9b472011-03-23 16:41:57 -0700913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914#endif /* _S390_BITOPS_H */