aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/math-emu/mul_Xsig.S
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2007-10-11 11:16:31 +0200
committerThomas Gleixner <tglx@linutronix.de>2007-10-11 11:16:31 +0200
commitda957e111bb0c189a4a3bf8a00caaecb59ed94ca (patch)
tree6916075fdd3e28869dcd3dfa2cf160a74d1cb02e /arch/x86/math-emu/mul_Xsig.S
parent2ec1df4130c60d1eb49dc0fa0ed15858fede6b05 (diff)
i386: move math-emu
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/math-emu/mul_Xsig.S')
-rw-r--r--arch/x86/math-emu/mul_Xsig.S176
1 files changed, 176 insertions, 0 deletions
diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S
new file mode 100644
index 00000000000..717785a53eb
--- /dev/null
+++ b/arch/x86/math-emu/mul_Xsig.S
@@ -0,0 +1,176 @@
+/*---------------------------------------------------------------------------+
+ | mul_Xsig.S |
+ | |
+ | Multiply a 12 byte fixed point number by another fixed point number. |
+ | |
+ | Copyright (C) 1992,1994,1995 |
+ | W. Metzenthen, 22 Parker St, Ormond, Vic 3163, |
+ | Australia. E-mail billm@jacobi.maths.monash.edu.au |
+ | |
+ | Call from C as: |
+ | void mul32_Xsig(Xsig *x, unsigned b) |
+ | |
+ | void mul64_Xsig(Xsig *x, unsigned long long *b) |
+ | |
+ | void mul_Xsig_Xsig(Xsig *x, unsigned *b) |
+ | |
+ | The result is neither rounded nor normalized, and the ls bit or so may |
+ | be wrong. |
+ | |
+ +---------------------------------------------------------------------------*/
+ .file "mul_Xsig.S"
+
+
+#include "fpu_emu.h"
+
+.text
+ENTRY(mul32_Xsig)
+ pushl %ebp
+ movl %esp,%ebp
+ subl $16,%esp
+ pushl %esi
+
+ movl PARAM1,%esi
+ movl PARAM2,%ecx
+
+ xor %eax,%eax
+ movl %eax,-4(%ebp)
+ movl %eax,-8(%ebp)
+
+ movl (%esi),%eax /* lsl of Xsig */
+ mull %ecx /* msl of b */
+ movl %edx,-12(%ebp)
+
+ movl 4(%esi),%eax /* midl of Xsig */
+ mull %ecx /* msl of b */
+ addl %eax,-12(%ebp)
+ adcl %edx,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 8(%esi),%eax /* msl of Xsig */
+ mull %ecx /* msl of b */
+ addl %eax,-8(%ebp)
+ adcl %edx,-4(%ebp)
+
+ movl -12(%ebp),%eax
+ movl %eax,(%esi)
+ movl -8(%ebp),%eax
+ movl %eax,4(%esi)
+ movl -4(%ebp),%eax
+ movl %eax,8(%esi)
+
+ popl %esi
+ leave
+ ret
+
+
+ENTRY(mul64_Xsig)
+ pushl %ebp
+ movl %esp,%ebp
+ subl $16,%esp
+ pushl %esi
+
+ movl PARAM1,%esi
+ movl PARAM2,%ecx
+
+ xor %eax,%eax
+ movl %eax,-4(%ebp)
+ movl %eax,-8(%ebp)
+
+ movl (%esi),%eax /* lsl of Xsig */
+ mull 4(%ecx) /* msl of b */
+ movl %edx,-12(%ebp)
+
+ movl 4(%esi),%eax /* midl of Xsig */
+ mull (%ecx) /* lsl of b */
+ addl %edx,-12(%ebp)
+ adcl $0,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 4(%esi),%eax /* midl of Xsig */
+ mull 4(%ecx) /* msl of b */
+ addl %eax,-12(%ebp)
+ adcl %edx,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 8(%esi),%eax /* msl of Xsig */
+ mull (%ecx) /* lsl of b */
+ addl %eax,-12(%ebp)
+ adcl %edx,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 8(%esi),%eax /* msl of Xsig */
+ mull 4(%ecx) /* msl of b */
+ addl %eax,-8(%ebp)
+ adcl %edx,-4(%ebp)
+
+ movl -12(%ebp),%eax
+ movl %eax,(%esi)
+ movl -8(%ebp),%eax
+ movl %eax,4(%esi)
+ movl -4(%ebp),%eax
+ movl %eax,8(%esi)
+
+ popl %esi
+ leave
+ ret
+
+
+
+ENTRY(mul_Xsig_Xsig)
+ pushl %ebp
+ movl %esp,%ebp
+ subl $16,%esp
+ pushl %esi
+
+ movl PARAM1,%esi
+ movl PARAM2,%ecx
+
+ xor %eax,%eax
+ movl %eax,-4(%ebp)
+ movl %eax,-8(%ebp)
+
+ movl (%esi),%eax /* lsl of Xsig */
+ mull 8(%ecx) /* msl of b */
+ movl %edx,-12(%ebp)
+
+ movl 4(%esi),%eax /* midl of Xsig */
+ mull 4(%ecx) /* midl of b */
+ addl %edx,-12(%ebp)
+ adcl $0,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 8(%esi),%eax /* msl of Xsig */
+ mull (%ecx) /* lsl of b */
+ addl %edx,-12(%ebp)
+ adcl $0,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 4(%esi),%eax /* midl of Xsig */
+ mull 8(%ecx) /* msl of b */
+ addl %eax,-12(%ebp)
+ adcl %edx,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 8(%esi),%eax /* msl of Xsig */
+ mull 4(%ecx) /* midl of b */
+ addl %eax,-12(%ebp)
+ adcl %edx,-8(%ebp)
+ adcl $0,-4(%ebp)
+
+ movl 8(%esi),%eax /* msl of Xsig */
+ mull 8(%ecx) /* msl of b */
+ addl %eax,-8(%ebp)
+ adcl %edx,-4(%ebp)
+
+ movl -12(%ebp),%edx
+ movl %edx,(%esi)
+ movl -8(%ebp),%edx
+ movl %edx,4(%esi)
+ movl -4(%ebp),%edx
+ movl %edx,8(%esi)
+
+ popl %esi
+ leave
+ ret
+