summaryrefslogtreecommitdiff
path: root/arch/m32r/lib/ashxdi3.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m32r/lib/ashxdi3.S
downloadconfigs-2.6.12-rc2.tar.gz
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/m32r/lib/ashxdi3.S')
-rw-r--r--arch/m32r/lib/ashxdi3.S297
1 files changed, 297 insertions, 0 deletions
diff --git a/arch/m32r/lib/ashxdi3.S b/arch/m32r/lib/ashxdi3.S
new file mode 100644
index 00000000000..78effca9d97
--- /dev/null
+++ b/arch/m32r/lib/ashxdi3.S
@@ -0,0 +1,297 @@
+/*
+ * linux/arch/m32r/lib/ashxdi3.S
+ *
+ * Copyright (C) 2001,2002 Hiroyuki Kondo, and Hirokazu Takata
+ *
+ */
+/* $Id$ */
+
+#include <linux/config.h>
+
+;
+; input (r0,r1) src
+; input r2 shift val
+; r3 scratch
+; output (r0,r1)
+;
+
+#ifdef CONFIG_ISA_DUAL_ISSUE
+
+#ifndef __LITTLE_ENDIAN__
+
+ .text
+ .align 4
+ .globl __ashrdi3
+__ashrdi3:
+ cmpz r2 || ldi r3, #32
+ jc r14 || cmpu r2, r3
+ bc 1f
+ ; case 32 =< shift
+ mv r1, r0 || srai r0, #31
+ addi r2, #-32
+ sra r1, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r0 || srl r1, r2
+ sra r0, r2 || neg r2, r2
+ sll r3, r2
+ or r1, r3 || jmp r14
+
+ .align 4
+ .globl __ashldi3
+ .globl __lshldi3
+__ashldi3:
+__lshldi3:
+ cmpz r2 || ldi r3, #32
+ jc r14 || cmpu r2, r3
+ bc 1f
+ ; case 32 =< shift
+ mv r0, r1 || addi r2, #-32
+ sll r0, r2 || ldi r1, #0
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r1 || sll r0, r2
+ sll r1, r2 || neg r2, r2
+ srl r3, r2
+ or r0, r3 || jmp r14
+
+ .align 4
+ .globl __lshrdi3
+__lshrdi3:
+ cmpz r2 || ldi r3, #32
+ jc r14 || cmpu r2, r3
+ bc 1f
+ ; case 32 =< shift
+ mv r1, r0 || addi r2, #-32
+ ldi r0, #0 || srl r1, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r0 || srl r1, r2
+ srl r0, r2 || neg r2, r2
+ sll r3, r2
+ or r1, r3 || jmp r14
+
+#else /* LITTLE_ENDIAN */
+
+ .text
+ .align 4
+ .globl __ashrdi3
+__ashrdi3:
+ cmpz r2 || ldi r3, #32
+ jc r14 || cmpu r2, r3
+ bc 1f
+ ; case 32 =< shift
+ mv r0, r1 || srai r1, #31
+ addi r2, #-32
+ sra r0, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r1 || srl r0, r2
+ sra r1, r2 || neg r2, r2
+ sll r3, r2
+ or r0, r3 || jmp r14
+
+ .align 4
+ .globl __ashldi3
+ .globl __lshldi3
+__ashldi3:
+__lshldi3:
+ cmpz r2 || ldi r3, #32
+ jc r14 || cmpu r2, r3
+ bc 1f
+ ; case 32 =< shift
+ mv r1, r0 || addi r2, #-32
+ sll r1, r2 || ldi r0, #0
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r0 || sll r1, r2
+ sll r0, r2 || neg r2, r2
+ srl r3, r2
+ or r1, r3 || jmp r14
+
+ .align 4
+ .globl __lshrdi3
+__lshrdi3:
+ cmpz r2 || ldi r3, #32
+ jc r14 || cmpu r2, r3
+ bc 1f
+ ; case 32 =< shift
+ mv r0, r1 || addi r2, #-32
+ ldi r1, #0 || srl r0, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r1 || srl r0, r2
+ srl r1, r2 || neg r2, r2
+ sll r3, r2
+ or r0, r3 || jmp r14
+
+#endif
+
+#else /* not CONFIG_ISA_DUAL_ISSUE */
+
+#ifndef __LITTLE_ENDIAN__
+
+ .text
+ .align 4
+ .globl __ashrdi3
+__ashrdi3:
+ beqz r2, 2f
+ cmpui r2, #32
+ bc 1f
+ ; case 32 =< shift
+ mv r1, r0
+ srai r0, #31
+ addi r2, #-32
+ sra r1, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r0
+ srl r1, r2
+ sra r0, r2
+ neg r2, r2
+ sll r3, r2
+ or r1, r3
+ .fillinsn
+2:
+ jmp r14
+
+ .align 4
+ .globl __ashldi3
+ .globl __lshldi3
+__ashldi3:
+__lshldi3:
+ beqz r2, 2f
+ cmpui r2, #32
+ bc 1f
+ ; case 32 =< shift
+ mv r0, r1
+ addi r2, #-32
+ sll r0, r2
+ ldi r1, #0
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r1
+ sll r0, r2
+ sll r1, r2
+ neg r2, r2
+ srl r3, r2
+ or r0, r3
+ .fillinsn
+2:
+ jmp r14
+
+ .align 4
+ .globl __lshrdi3
+__lshrdi3:
+ beqz r2, 2f
+ cmpui r2, #32
+ bc 1f
+ ; case 32 =< shift
+ mv r1, r0
+ ldi r0, #0
+ addi r2, #-32
+ srl r1, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r0
+ srl r1, r2
+ srl r0, r2
+ neg r2, r2
+ sll r3, r2
+ or r1, r3
+ .fillinsn
+2:
+ jmp r14
+
+#else
+
+ .text
+ .align 4
+ .globl __ashrdi3
+__ashrdi3:
+ beqz r2, 2f
+ cmpui r2, #32
+ bc 1f
+ ; case 32 =< shift
+ mv r0, r1
+ srai r1, #31
+ addi r2, #-32
+ sra r0, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r1
+ srl r0, r2
+ sra r1, r2
+ neg r2, r2
+ sll r3, r2
+ or r0, r3
+ .fillinsn
+2:
+ jmp r14
+
+ .align 4
+ .globl __ashldi3
+ .globl __lshldi3
+__ashldi3:
+__lshldi3:
+ beqz r2, 2f
+ cmpui r2, #32
+ bc 1f
+ ; case 32 =< shift
+ mv r1, r0
+ addi r2, #-32
+ sll r1, r2
+ ldi r0, #0
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r0
+ sll r1, r2
+ sll r0, r2
+ neg r2, r2
+ srl r3, r2
+ or r1, r3
+ .fillinsn
+2:
+ jmp r14
+
+ .align 4
+ .globl __lshrdi3
+__lshrdi3:
+ beqz r2, 2f
+ cmpui r2, #32
+ bc 1f
+ ; case 32 =< shift
+ mv r0, r1
+ ldi r1, #0
+ addi r2, #-32
+ srl r0, r2
+ jmp r14
+ .fillinsn
+1: ; case shift <32
+ mv r3, r1
+ srl r0, r2
+ srl r1, r2
+ neg r2, r2
+ sll r3, r2
+ or r0, r3
+ .fillinsn
+2:
+ jmp r14
+
+#endif
+
+#endif /* not CONFIG_ISA_DUAL_ISSUE */
+
+ .end
+