aboutsummaryrefslogtreecommitdiff
path: root/arch/blackfin/include/asm/bitops.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/include/asm/bitops.h')
-rw-r--r--arch/blackfin/include/asm/bitops.h218
1 files changed, 218 insertions, 0 deletions
diff --git a/arch/blackfin/include/asm/bitops.h b/arch/blackfin/include/asm/bitops.h
new file mode 100644
index 00000000000..b39a175c79c
--- /dev/null
+++ b/arch/blackfin/include/asm/bitops.h
@@ -0,0 +1,218 @@
+#ifndef _BLACKFIN_BITOPS_H
+#define _BLACKFIN_BITOPS_H
+
+/*
+ * Copyright 1992, Linus Torvalds.
+ */
+
+#include <linux/compiler.h>
+#include <asm/byteorder.h> /* swab32 */
+#include <asm/system.h> /* save_flags */
+
+#ifdef __KERNEL__
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffz.h>
+
+static __inline__ void set_bit(int nr, volatile unsigned long *addr)
+{
+ int *a = (int *)addr;
+ int mask;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ *a |= mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
+{
+ int *a = (int *)addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a |= mask;
+}
+
+/*
+ * clear_bit() doesn't provide any barrier for the compiler.
+ */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
+{
+ int *a = (int *)addr;
+ int mask;
+ unsigned long flags;
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ *a &= ~mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ int *a = (int *)addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ *a &= ~mask;
+}
+
+static __inline__ void change_bit(int nr, volatile unsigned long *addr)
+{
+ int mask, flags;
+ unsigned long *ADDR = (unsigned long *)addr;
+
+ ADDR += nr >> 5;
+ mask = 1 << (nr & 31);
+ local_irq_save(flags);
+ *ADDR ^= mask;
+ local_irq_restore(flags);
+}
+
+static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
+{
+ int mask;
+ unsigned long *ADDR = (unsigned long *)addr;
+
+ ADDR += nr >> 5;
+ mask = 1 << (nr & 31);
+ *ADDR ^= mask;
+}
+
+static __inline__ int test_and_set_bit(int nr, void *addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *)addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *)addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a |= mask;
+ return retval;
+}
+
+static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *)addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ local_irq_restore(flags);
+
+ return retval;
+}
+
+static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *)addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a &= ~mask;
+ return retval;
+}
+
+static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *)addr;
+ unsigned long flags;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ local_irq_save(flags);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ local_irq_restore(flags);
+ return retval;
+}
+
+static __inline__ int __test_and_change_bit(int nr,
+ volatile unsigned long *addr)
+{
+ int mask, retval;
+ volatile unsigned int *a = (volatile unsigned int *)addr;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ retval = (mask & *a) != 0;
+ *a ^= mask;
+ return retval;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static __inline__ int __constant_test_bit(int nr, const void *addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static __inline__ int __test_bit(int nr, const void *addr)
+{
+ int *a = (int *)addr;
+ int mask;
+
+ a += nr >> 5;
+ mask = 1 << (nr & 0x1f);
+ return ((mask & *a) != 0);
+}
+
+#define test_bit(nr,addr) \
+(__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr),(addr)) : \
+ __test_bit((nr),(addr)))
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
+#include <asm-generic/bitops/minix.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+
+#endif /* _BLACKFIN_BITOPS_H */