From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- arch/arm26/lib/Makefile | 26 ++ arch/arm26/lib/ashldi3.c | 61 +++ arch/arm26/lib/ashrdi3.c | 61 +++ arch/arm26/lib/backtrace.S | 145 +++++++ arch/arm26/lib/changebit.S | 28 ++ arch/arm26/lib/clearbit.S | 31 ++ arch/arm26/lib/copy_page.S | 62 +++ arch/arm26/lib/csumipv6.S | 32 ++ arch/arm26/lib/csumpartial.S | 130 ++++++ arch/arm26/lib/csumpartialcopy.S | 52 +++ arch/arm26/lib/csumpartialcopygeneric.S | 352 ++++++++++++++++ arch/arm26/lib/csumpartialcopyuser.S | 115 +++++ arch/arm26/lib/delay.S | 57 +++ arch/arm26/lib/ecard.S | 41 ++ arch/arm26/lib/findbit.S | 67 +++ arch/arm26/lib/floppydma.S | 32 ++ arch/arm26/lib/gcclib.h | 21 + arch/arm26/lib/getuser.S | 112 +++++ arch/arm26/lib/io-acorn.S | 71 ++++ arch/arm26/lib/io-readsb.S | 116 ++++++ arch/arm26/lib/io-readsl.S | 78 ++++ arch/arm26/lib/io-readsw.S | 107 +++++ arch/arm26/lib/io-writesb.S | 122 ++++++ arch/arm26/lib/io-writesl.S | 56 +++ arch/arm26/lib/io-writesw.S | 127 ++++++ arch/arm26/lib/kbd.c | 279 +++++++++++++ arch/arm26/lib/lib1funcs.S | 314 ++++++++++++++ arch/arm26/lib/longlong.h | 184 ++++++++ arch/arm26/lib/lshrdi3.c | 61 +++ arch/arm26/lib/memchr.S | 25 ++ arch/arm26/lib/memcpy.S | 318 ++++++++++++++ arch/arm26/lib/memset.S | 80 ++++ arch/arm26/lib/memzero.S | 80 ++++ arch/arm26/lib/muldi3.c | 77 ++++ arch/arm26/lib/putuser.S | 109 +++++ arch/arm26/lib/setbit.S | 29 ++ arch/arm26/lib/strchr.S | 25 ++ arch/arm26/lib/strrchr.S | 25 ++ arch/arm26/lib/testchangebit.S | 29 ++ arch/arm26/lib/testclearbit.S | 29 ++ arch/arm26/lib/testsetbit.S | 29 ++ arch/arm26/lib/uaccess-kernel.S | 173 ++++++++ arch/arm26/lib/uaccess-user.S | 718 ++++++++++++++++++++++++++++++++ arch/arm26/lib/ucmpdi2.c | 51 +++ arch/arm26/lib/udivdi3.c | 242 +++++++++++ 45 files changed, 4979 insertions(+) create mode 100644 arch/arm26/lib/Makefile create mode 100644 arch/arm26/lib/ashldi3.c create mode 100644 arch/arm26/lib/ashrdi3.c create mode 100644 arch/arm26/lib/backtrace.S create mode 100644 arch/arm26/lib/changebit.S create mode 100644 arch/arm26/lib/clearbit.S create mode 100644 arch/arm26/lib/copy_page.S create mode 100644 arch/arm26/lib/csumipv6.S create mode 100644 arch/arm26/lib/csumpartial.S create mode 100644 arch/arm26/lib/csumpartialcopy.S create mode 100644 arch/arm26/lib/csumpartialcopygeneric.S create mode 100644 arch/arm26/lib/csumpartialcopyuser.S create mode 100644 arch/arm26/lib/delay.S create mode 100644 arch/arm26/lib/ecard.S create mode 100644 arch/arm26/lib/findbit.S create mode 100644 arch/arm26/lib/floppydma.S create mode 100644 arch/arm26/lib/gcclib.h create mode 100644 arch/arm26/lib/getuser.S create mode 100644 arch/arm26/lib/io-acorn.S create mode 100644 arch/arm26/lib/io-readsb.S create mode 100644 arch/arm26/lib/io-readsl.S create mode 100644 arch/arm26/lib/io-readsw.S create mode 100644 arch/arm26/lib/io-writesb.S create mode 100644 arch/arm26/lib/io-writesl.S create mode 100644 arch/arm26/lib/io-writesw.S create mode 100644 arch/arm26/lib/kbd.c create mode 100644 arch/arm26/lib/lib1funcs.S create mode 100644 arch/arm26/lib/longlong.h create mode 100644 arch/arm26/lib/lshrdi3.c create mode 100644 arch/arm26/lib/memchr.S create mode 100644 arch/arm26/lib/memcpy.S create mode 100644 arch/arm26/lib/memset.S create mode 100644 arch/arm26/lib/memzero.S create mode 100644 arch/arm26/lib/muldi3.c create mode 100644 arch/arm26/lib/putuser.S create mode 100644 arch/arm26/lib/setbit.S create mode 100644 arch/arm26/lib/strchr.S create mode 100644 arch/arm26/lib/strrchr.S create mode 100644 arch/arm26/lib/testchangebit.S create mode 100644 arch/arm26/lib/testclearbit.S create mode 100644 arch/arm26/lib/testsetbit.S create mode 100644 arch/arm26/lib/uaccess-kernel.S create mode 100644 arch/arm26/lib/uaccess-user.S create mode 100644 arch/arm26/lib/ucmpdi2.c create mode 100644 arch/arm26/lib/udivdi3.c (limited to 'arch/arm26/lib') diff --git a/arch/arm26/lib/Makefile b/arch/arm26/lib/Makefile new file mode 100644 index 00000000000..6df2b793d36 --- /dev/null +++ b/arch/arm26/lib/Makefile @@ -0,0 +1,26 @@ +# +# linux/arch/arm26/lib/Makefile +# +# Copyright (C) 1995-2000 Russell King +# + +lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ + csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ + copy_page.o delay.o findbit.o memchr.o memcpy.o \ + memset.o memzero.o setbit.o \ + strchr.o strrchr.o testchangebit.o \ + testclearbit.o testsetbit.o getuser.o \ + putuser.o ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ + ucmpdi2.o udivdi3.o lib1funcs.o ecard.o io-acorn.o \ + floppydma.o io-readsb.o io-writesb.o io-writesl.o \ + uaccess-kernel.o uaccess-user.o io-readsw.o \ + io-writesw.o io-readsl.o ecard.o io-acorn.o \ + floppydma.o + +lib-n := + +lib-$(CONFIG_VT)+= kbd.o + +csumpartialcopy.o: csumpartialcopygeneric.S +csumpartialcopyuser.o: csumpartialcopygeneric.S + diff --git a/arch/arm26/lib/ashldi3.c b/arch/arm26/lib/ashldi3.c new file mode 100644 index 00000000000..130f5a83966 --- /dev/null +++ b/arch/arm26/lib/ashldi3.c @@ -0,0 +1,61 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +DItype +__ashldi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) + { + w.s.low = 0; + w.s.high = (USItype)uu.s.low << -bm; + } + else + { + USItype carries = (USItype)uu.s.low >> bm; + w.s.low = (USItype)uu.s.low << b; + w.s.high = ((USItype)uu.s.high << b) | carries; + } + + return w.ll; +} + diff --git a/arch/arm26/lib/ashrdi3.c b/arch/arm26/lib/ashrdi3.c new file mode 100644 index 00000000000..71625d218f8 --- /dev/null +++ b/arch/arm26/lib/ashrdi3.c @@ -0,0 +1,61 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +DItype +__ashrdi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) + { + /* w.s.high = 1..1 or 0..0 */ + w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1); + w.s.low = uu.s.high >> -bm; + } + else + { + USItype carries = (USItype)uu.s.high << bm; + w.s.high = uu.s.high >> b; + w.s.low = ((USItype)uu.s.low >> b) | carries; + } + + return w.ll; +} diff --git a/arch/arm26/lib/backtrace.S b/arch/arm26/lib/backtrace.S new file mode 100644 index 00000000000..d793fe4339f --- /dev/null +++ b/arch/arm26/lib/backtrace.S @@ -0,0 +1,145 @@ +/* + * linux/arch/arm26/lib/backtrace.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + .text + +@ fp is 0 or stack frame + +#define frame r4 +#define next r5 +#define save r6 +#define mask r7 +#define offset r8 + +ENTRY(__backtrace) + mov r1, #0x10 + mov r0, fp + +ENTRY(c_backtrace) + +#ifdef CONFIG_NO_FRAME_POINTER + mov pc, lr +#else + + stmfd sp!, {r4 - r8, lr} @ Save an extra register so we have a location... + mov mask, #0xfc000003 + tst mask, r0 + movne r0, #0 + movs frame, r0 +1: moveq r0, #-2 + LOADREGS(eqfd, sp!, {r4 - r8, pc}) + +2: stmfd sp!, {pc} @ calculate offset of PC in STMIA instruction + ldr r0, [sp], #4 + adr r1, 2b - 4 + sub offset, r0, r1 + +3: tst frame, mask @ Check for address exceptions... + bne 1b + +1001: ldr next, [frame, #-12] @ get fp +1002: ldr r2, [frame, #-4] @ get lr +1003: ldr r3, [frame, #0] @ get pc + sub save, r3, offset @ Correct PC for prefetching + bic save, save, mask +1004: ldr r1, [save, #0] @ get instruction at function + mov r1, r1, lsr #10 + ldr r3, .Ldsi+4 + teq r1, r3 + subeq save, save, #4 + adr r0, .Lfe + mov r1, save + bic r2, r2, mask + bl printk @ print pc and link register + + ldr r0, [frame, #-8] @ get sp + sub r0, r0, #4 +1005: ldr r1, [save, #4] @ get instruction at function+4 + mov r3, r1, lsr #10 + ldr r2, .Ldsi+4 + teq r3, r2 @ Check for stmia sp!, {args} + addeq save, save, #4 @ next instruction + bleq .Ldumpstm + + sub r0, frame, #16 +1006: ldr r1, [save, #4] @ Get 'stmia sp!, {rlist, fp, ip, lr, pc}' instruction + mov r3, r1, lsr #10 + ldr r2, .Ldsi + teq r3, r2 + bleq .Ldumpstm + + teq frame, next + movne frame, next + teqne frame, #0 + bne 3b + LOADREGS(fd, sp!, {r4 - r8, pc}) + +/* + * Fixup for LDMDB + */ + .section .fixup,"ax" + .align 0 +1007: ldr r0, =.Lbad + mov r1, frame + bl printk + LOADREGS(fd, sp!, {r4 - r8, pc}) + .ltorg + .previous + + .section __ex_table,"a" + .align 3 + .long 1001b, 1007b + .long 1002b, 1007b + .long 1003b, 1007b + .long 1004b, 1007b + .long 1005b, 1007b + .long 1006b, 1007b + .previous + +#define instr r4 +#define reg r5 +#define stack r6 + +.Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} + mov stack, r0 + mov instr, r1 + mov reg, #9 + mov r7, #0 +1: mov r3, #1 + tst instr, r3, lsl reg + beq 2f + add r7, r7, #1 + teq r7, #4 + moveq r7, #0 + moveq r3, #'\n' + movne r3, #' ' + ldr r2, [stack], #-4 + mov r1, reg + adr r0, .Lfp + bl printk +2: subs reg, reg, #1 + bpl 1b + teq r7, #0 + adrne r0, .Lcr + blne printk + mov r0, stack + LOADREGS(fd, sp!, {instr, reg, stack, r7, pc}) + +.Lfe: .asciz "Function entered at [<%p>] from [<%p>]\n" +.Lfp: .asciz " r%d = %08X%c" +.Lcr: .asciz "\n" +.Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" + .align +.Ldsi: .word 0x00e92dd8 >> 2 + .word 0x00e92d00 >> 2 + +#endif diff --git a/arch/arm26/lib/changebit.S b/arch/arm26/lib/changebit.S new file mode 100644 index 00000000000..1b6a077be5a --- /dev/null +++ b/arch/arm26/lib/changebit.S @@ -0,0 +1,28 @@ +/* + * linux/arch/arm26/lib/changebit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +/* Purpose : Function to change a bit + * Prototype: int change_bit(int bit, void *addr) + */ +ENTRY(_change_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_change_bit_le) + and r2, r0, #7 + mov r3, #1 + mov r3, r3, lsl r2 + save_and_disable_irqs ip, r2 + ldrb r2, [r1, r0, lsr #3] + eor r2, r2, r3 + strb r2, [r1, r0, lsr #3] + restore_irqs ip + RETINSTR(mov,pc,lr) diff --git a/arch/arm26/lib/clearbit.S b/arch/arm26/lib/clearbit.S new file mode 100644 index 00000000000..0a895b0c759 --- /dev/null +++ b/arch/arm26/lib/clearbit.S @@ -0,0 +1,31 @@ +/* + * linux/arch/arm26/lib/clearbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +/* + * Purpose : Function to clear a bit + * Prototype: int clear_bit(int bit, void *addr) + */ +ENTRY(_clear_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_clear_bit_le) + and r2, r0, #7 + mov r3, #1 + mov r3, r3, lsl r2 + save_and_disable_irqs ip, r2 + ldrb r2, [r1, r0, lsr #3] + bic r2, r2, r3 + strb r2, [r1, r0, lsr #3] + restore_irqs ip + RETINSTR(mov,pc,lr) + + diff --git a/arch/arm26/lib/copy_page.S b/arch/arm26/lib/copy_page.S new file mode 100644 index 00000000000..2d79ee12ea1 --- /dev/null +++ b/arch/arm26/lib/copy_page.S @@ -0,0 +1,62 @@ +/* + * linux/arch/arm26/lib/copypage.S + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include +#include + + .text + .align 5 +/* + * ARMv3 optimised copy_user_page + * + * FIXME: rmk do we need to handle cache stuff... + * FIXME: im is this right on ARM26? + */ +ENTRY(__copy_user_page) + stmfd sp!, {r4, lr} @ 2 + mov r2, #PAGE_SZ/64 @ 1 + ldmia r1!, {r3, r4, ip, lr} @ 4+1 +1: stmia r0!, {r3, r4, ip, lr} @ 4 + ldmia r1!, {r3, r4, ip, lr} @ 4+1 + stmia r0!, {r3, r4, ip, lr} @ 4 + ldmia r1!, {r3, r4, ip, lr} @ 4+1 + stmia r0!, {r3, r4, ip, lr} @ 4 + ldmia r1!, {r3, r4, ip, lr} @ 4 + subs r2, r2, #1 @ 1 + stmia r0!, {r3, r4, ip, lr} @ 4 + ldmneia r1!, {r3, r4, ip, lr} @ 4 + bne 1b @ 1 + LOADREGS(fd, sp!, {r4, pc}) @ 3 + + .align 5 +/* + * ARMv3 optimised clear_user_page + * + * FIXME: rmk do we need to handle cache stuff... + */ +ENTRY(__clear_user_page) + str lr, [sp, #-4]! + mov r1, #PAGE_SZ/64 @ 1 + mov r2, #0 @ 1 + mov r3, #0 @ 1 + mov ip, #0 @ 1 + mov lr, #0 @ 1 +1: stmia r0!, {r2, r3, ip, lr} @ 4 + stmia r0!, {r2, r3, ip, lr} @ 4 + stmia r0!, {r2, r3, ip, lr} @ 4 + stmia r0!, {r2, r3, ip, lr} @ 4 + subs r1, r1, #1 @ 1 + bne 1b @ 1 + ldr pc, [sp], #4 + + .section ".init.text", #alloc, #execinstr + diff --git a/arch/arm26/lib/csumipv6.S b/arch/arm26/lib/csumipv6.S new file mode 100644 index 00000000000..62831155acd --- /dev/null +++ b/arch/arm26/lib/csumipv6.S @@ -0,0 +1,32 @@ +/* + * linux/arch/arm26/lib/csumipv6.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +ENTRY(__csum_ipv6_magic) + str lr, [sp, #-4]! + adds ip, r2, r3 + ldmia r1, {r1 - r3, lr} + adcs ip, ip, r1 + adcs ip, ip, r2 + adcs ip, ip, r3 + adcs ip, ip, lr + ldmia r0, {r0 - r3} + adcs r0, ip, r0 + adcs r0, r0, r1 + adcs r0, r0, r2 + ldr r2, [sp, #4] + adcs r0, r0, r3 + adcs r0, r0, r2 + adcs r0, r0, #0 + LOADREGS(fd, sp!, {pc}) + diff --git a/arch/arm26/lib/csumpartial.S b/arch/arm26/lib/csumpartial.S new file mode 100644 index 00000000000..e53e7109e62 --- /dev/null +++ b/arch/arm26/lib/csumpartial.S @@ -0,0 +1,130 @@ +/* + * linux/arch/arm26/lib/csumpartial.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +/* + * Function: __u32 csum_partial(const char *src, int len, __u32 sum) + * Params : r0 = buffer, r1 = len, r2 = checksum + * Returns : r0 = new checksum + */ + +buf .req r0 +len .req r1 +sum .req r2 +td0 .req r3 +td1 .req r4 @ save before use +td2 .req r5 @ save before use +td3 .req lr + +.zero: mov r0, sum + add sp, sp, #4 + ldr pc, [sp], #4 + + /* + * Handle 0 to 7 bytes, with any alignment of source and + * destination pointers. Note that when we get here, C = 0 + */ +.less8: teq len, #0 @ check for zero count + beq .zero + + /* we must have at least one byte. */ + tst buf, #1 @ odd address? + ldrneb td0, [buf], #1 + subne len, len, #1 + adcnes sum, sum, td0, lsl #byte(1) + +.less4: tst len, #6 + beq .less8_byte + + /* we are now half-word aligned */ + +.less8_wordlp: +#if __LINUX_ARM_ARCH__ >= 4 + ldrh td0, [buf], #2 + sub len, len, #2 +#else + ldrb td0, [buf], #1 + ldrb td3, [buf], #1 + sub len, len, #2 + orr td0, td0, td3, lsl #8 +#endif + adcs sum, sum, td0 + tst len, #6 + bne .less8_wordlp + +.less8_byte: tst len, #1 @ odd number of bytes + ldrneb td0, [buf], #1 @ include last byte + adcnes sum, sum, td0, lsl #byte(0) @ update checksum + +.done: adc r0, sum, #0 @ collect up the last carry + ldr td0, [sp], #4 + tst td0, #1 @ check buffer alignment + movne td0, r0, lsl #8 @ rotate checksum by 8 bits + orrne r0, td0, r0, lsr #24 + ldr pc, [sp], #4 @ return + +.not_aligned: tst buf, #1 @ odd address + ldrneb td0, [buf], #1 @ make even + subne len, len, #1 + adcnes sum, sum, td0, lsl #byte(1) @ update checksum + + tst buf, #2 @ 32-bit aligned? +#if __LINUX_ARM_ARCH__ >= 4 + ldrneh td0, [buf], #2 @ make 32-bit aligned + subne len, len, #2 +#else + ldrneb td0, [buf], #1 + ldrneb ip, [buf], #1 + subne len, len, #2 + orrne td0, td0, ip, lsl #8 +#endif + adcnes sum, sum, td0 @ update checksum + mov pc, lr + +ENTRY(csum_partial) + stmfd sp!, {buf, lr} + cmp len, #8 @ Ensure that we have at least + blo .less8 @ 8 bytes to copy. + + adds sum, sum, #0 @ C = 0 + tst buf, #3 @ Test destination alignment + blne .not_aligned @ aligh destination, return here + +1: bics ip, len, #31 + beq 3f + + stmfd sp!, {r4 - r5} +2: ldmia buf!, {td0, td1, td2, td3} + adcs sum, sum, td0 + adcs sum, sum, td1 + adcs sum, sum, td2 + adcs sum, sum, td3 + ldmia buf!, {td0, td1, td2, td3} + adcs sum, sum, td0 + adcs sum, sum, td1 + adcs sum, sum, td2 + adcs sum, sum, td3 + sub ip, ip, #32 + teq ip, #0 + bne 2b + ldmfd sp!, {r4 - r5} + +3: tst len, #0x1c @ should not change C + beq .less4 + +4: ldr td0, [buf], #4 + sub len, len, #4 + adcs sum, sum, td0 + tst len, #0x1c + bne 4b + b .less4 diff --git a/arch/arm26/lib/csumpartialcopy.S b/arch/arm26/lib/csumpartialcopy.S new file mode 100644 index 00000000000..a1c4b5fdd49 --- /dev/null +++ b/arch/arm26/lib/csumpartialcopy.S @@ -0,0 +1,52 @@ +/* + * linux/arch/arm26/lib/csumpartialcopy.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +/* Function: __u32 csum_partial_copy_nocheck(const char *src, char *dst, int len, __u32 sum) + * Params : r0 = src, r1 = dst, r2 = len, r3 = checksum + * Returns : r0 = new checksum + */ + + .macro save_regs + stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc} + .endm + + .macro load_regs,flags + LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc}) + .endm + + .macro load1b, reg1 + ldrb \reg1, [r0], #1 + .endm + + .macro load2b, reg1, reg2 + ldrb \reg1, [r0], #1 + ldrb \reg2, [r0], #1 + .endm + + .macro load1l, reg1 + ldr \reg1, [r0], #4 + .endm + + .macro load2l, reg1, reg2 + ldr \reg1, [r0], #4 + ldr \reg2, [r0], #4 + .endm + + .macro load4l, reg1, reg2, reg3, reg4 + ldmia r0!, {\reg1, \reg2, \reg3, \reg4} + .endm + +#define FN_ENTRY ENTRY(csum_partial_copy_nocheck) + +#include "csumpartialcopygeneric.S" diff --git a/arch/arm26/lib/csumpartialcopygeneric.S b/arch/arm26/lib/csumpartialcopygeneric.S new file mode 100644 index 00000000000..5249c3ad11d --- /dev/null +++ b/arch/arm26/lib/csumpartialcopygeneric.S @@ -0,0 +1,352 @@ +/* + * linux/arch/arm26/lib/csumpartialcopygeneric.S + * + * Copyright (C) 1995-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * JMA 01/06/03 Commented out some shl0s; probobly irrelevant to arm26 + * + */ + +/* + * unsigned int + * csum_partial_copy_xxx(const char *src, char *dst, int len, int sum, ) + * r0 = src, r1 = dst, r2 = len, r3 = sum + * Returns : r0 = checksum + * + * Note that 'tst' and 'teq' preserve the carry flag. + */ + +/* Quick hack */ + .macro save_regs + stmfd sp!, {r1, r4 - r8, fp, ip, lr, pc} + .endm + +/* end Quick Hack */ + +src .req r0 +dst .req r1 +len .req r2 +sum .req r3 + +.zero: mov r0, sum + load_regs ea + + /* + * Align an unaligned destination pointer. We know that + * we have >= 8 bytes here, so we don't need to check + * the length. Note that the source pointer hasn't been + * aligned yet. + */ +.dst_unaligned: tst dst, #1 + beq .dst_16bit + + load1b ip + sub len, len, #1 + adcs sum, sum, ip, lsl #byte(1) @ update checksum + strb ip, [dst], #1 + tst dst, #2 + moveq pc, lr @ dst is now 32bit aligned + +.dst_16bit: load2b r8, ip + sub len, len, #2 + adcs sum, sum, r8, lsl #byte(0) + strb r8, [dst], #1 + adcs sum, sum, ip, lsl #byte(1) + strb ip, [dst], #1 + mov pc, lr @ dst is now 32bit aligned + + /* + * Handle 0 to 7 bytes, with any alignment of source and + * destination pointers. Note that when we get here, C = 0 + */ +.less8: teq len, #0 @ check for zero count + beq .zero + + /* we must have at least one byte. */ + tst dst, #1 @ dst 16-bit aligned + beq .less8_aligned + + /* Align dst */ + load1b ip + sub len, len, #1 + adcs sum, sum, ip, lsl #byte(1) @ update checksum + strb ip, [dst], #1 + tst len, #6 + beq .less8_byteonly + +1: load2b r8, ip + sub len, len, #2 + adcs sum, sum, r8, lsl #byte(0) + strb r8, [dst], #1 + adcs sum, sum, ip, lsl #byte(1) + strb ip, [dst], #1 +.less8_aligned: tst len, #6 + bne 1b +.less8_byteonly: + tst len, #1 + beq .done + load1b r8 + adcs sum, sum, r8, lsl #byte(0) @ update checksum + strb r8, [dst], #1 + b .done + +FN_ENTRY + mov ip, sp + save_regs + sub fp, ip, #4 + + cmp len, #8 @ Ensure that we have at least + blo .less8 @ 8 bytes to copy. + + adds sum, sum, #0 @ C = 0 + tst dst, #3 @ Test destination alignment + blne .dst_unaligned @ align destination, return here + + /* + * Ok, the dst pointer is now 32bit aligned, and we know + * that we must have more than 4 bytes to copy. Note + * that C contains the carry from the dst alignment above. + */ + + tst src, #3 @ Test source alignment + bne .src_not_aligned + + /* Routine for src & dst aligned */ + + bics ip, len, #15 + beq 2f + +1: load4l r4, r5, r6, r7 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + sub ip, ip, #16 + teq ip, #0 + bne 1b + +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r4, r5 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + tst ip, #4 + beq 4f + +3: load1l r4 + str r4, [dst], #4 + adcs sum, sum, r4 + +4: ands len, len, #3 + beq .done + load1l r4 + tst len, #2 +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow! +*/ + beq .exit + adcs sum, sum, r4, push #16 + strb r5, [dst], #1 + mov r5, r4, lsr #byte(1) + strb r5, [dst], #1 + mov r5, r4, lsr #byte(2) +.exit: tst len, #1 + strneb r5, [dst], #1 + andne r5, r5, #255 + adcnes sum, sum, r5, lsl #byte(0) + + /* + * If the dst pointer was not 16-bit aligned, we + * need to rotate the checksum here to get around + * the inefficient byte manipulations in the + * architecture independent code. + */ +.done: adc r0, sum, #0 + ldr sum, [sp, #0] @ dst + tst sum, #1 + movne sum, r0, lsl #8 + orrne r0, sum, r0, lsr #24 + load_regs ea + +.src_not_aligned: + adc sum, sum, #0 @ include C from dst alignment + and ip, src, #3 + bic src, src, #3 + load1l r5 + cmp ip, #2 + beq .src2_aligned + bhi .src3_aligned + mov r4, r5, pull #8 @ C = 0 + bics ip, len, #15 + beq 2f +1: load4l r5, r6, r7, r8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + mov r7, r7, pull #8 + orr r7, r7, r8, push #24 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + mov r4, r8, pull #8 + sub ip, ip, #16 + teq ip, #0 + bne 1b +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r5, r6 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + mov r4, r6, pull #8 + tst ip, #4 + beq 4f +3: load1l r5 + orr r4, r4, r5, push #24 + str r4, [dst], #4 + adcs sum, sum, r4 + mov r4, r5, pull #8 +4: ands len, len, #3 + beq .done +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + tst len, #2 + beq .exit + adcs sum, sum, r4, push #16 + strb r5, [dst], #1 + mov r5, r4, lsr #byte(1) + strb r5, [dst], #1 + mov r5, r4, lsr #byte(2) + b .exit + +.src2_aligned: mov r4, r5, pull #16 + adds sum, sum, #0 + bics ip, len, #15 + beq 2f +1: load4l r5, r6, r7, r8 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + mov r7, r7, pull #16 + orr r7, r7, r8, push #16 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + mov r4, r8, pull #16 + sub ip, ip, #16 + teq ip, #0 + bne 1b +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r5, r6 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + mov r4, r6, pull #16 + tst ip, #4 + beq 4f +3: load1l r5 + orr r4, r4, r5, push #16 + str r4, [dst], #4 + adcs sum, sum, r4 + mov r4, r5, pull #16 +4: ands len, len, #3 + beq .done +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + tst len, #2 + beq .exit + adcs sum, sum, r4 + strb r5, [dst], #1 + mov r5, r4, lsr #byte(1) + strb r5, [dst], #1 + tst len, #1 + beq .done + load1b r5 + b .exit + +.src3_aligned: mov r4, r5, pull #24 + adds sum, sum, #0 + bics ip, len, #15 + beq 2f +1: load4l r5, r6, r7, r8 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + mov r7, r7, pull #24 + orr r7, r7, r8, push #8 + stmia dst!, {r4, r5, r6, r7} + adcs sum, sum, r4 + adcs sum, sum, r5 + adcs sum, sum, r6 + adcs sum, sum, r7 + mov r4, r8, pull #24 + sub ip, ip, #16 + teq ip, #0 + bne 1b +2: ands ip, len, #12 + beq 4f + tst ip, #8 + beq 3f + load2l r5, r6 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + stmia dst!, {r4, r5} + adcs sum, sum, r4 + adcs sum, sum, r5 + mov r4, r6, pull #24 + tst ip, #4 + beq 4f +3: load1l r5 + orr r4, r4, r5, push #8 + str r4, [dst], #4 + adcs sum, sum, r4 + mov r4, r5, pull #24 +4: ands len, len, #3 + beq .done +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + tst len, #2 + beq .exit + strb r5, [dst], #1 + adcs sum, sum, r4 + load1l r4 +/* mov r5, r4, lsr #byte(0) +FIXME? 0 Shift anyhow +*/ + strb r5, [dst], #1 + adcs sum, sum, r4, push #24 + mov r5, r4, lsr #byte(1) + b .exit diff --git a/arch/arm26/lib/csumpartialcopyuser.S b/arch/arm26/lib/csumpartialcopyuser.S new file mode 100644 index 00000000000..5b821188e47 --- /dev/null +++ b/arch/arm26/lib/csumpartialcopyuser.S @@ -0,0 +1,115 @@ +/* + * linux/arch/arm26/lib/csumpartialcopyuser.S + * + * Copyright (C) 1995-1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include + + .text + + .macro save_regs + stmfd sp!, {r1 - r2, r4 - r9, fp, ip, lr, pc} + mov r9, sp, lsr #13 + mov r9, r9, lsl #13 + ldr r9, [r9, #TSK_ADDR_LIMIT] + mov r9, r9, lsr #24 + .endm + + .macro load_regs,flags + ldm\flags fp, {r1, r2, r4-r9, fp, sp, pc}^ + .endm + + .macro load1b, reg1 + tst r9, #0x01 +9999: ldreqbt \reg1, [r0], #1 + ldrneb \reg1, [r0], #1 + .section __ex_table, "a" + .align 3 + .long 9999b, 6001f + .previous + .endm + + .macro load2b, reg1, reg2 + tst r9, #0x01 +9999: ldreqbt \reg1, [r0], #1 + ldrneb \reg1, [r0], #1 +9998: ldreqbt \reg2, [r0], #1 + ldrneb \reg2, [r0], #1 + .section __ex_table, "a" + .long 9999b, 6001f + .long 9998b, 6001f + .previous + .endm + + .macro load1l, reg1 + tst r9, #0x01 +9999: ldreqt \reg1, [r0], #4 + ldrne \reg1, [r0], #4 + .section __ex_table, "a" + .align 3 + .long 9999b, 6001f + .previous + .endm + + .macro load2l, reg1, reg2 + tst r9, #0x01 + ldmneia r0!, {\reg1, \reg2} +9999: ldreqt \reg1, [r0], #4 +9998: ldreqt \reg2, [r0], #4 + .section __ex_table, "a" + .long 9999b, 6001f + .long 9998b, 6001f + .previous + .endm + + .macro load4l, reg1, reg2, reg3, reg4 + tst r9, #0x01 + ldmneia r0!, {\reg1, \reg2, \reg3, \reg4} +9999: ldreqt \reg1, [r0], #4 +9998: ldreqt \reg2, [r0], #4 +9997: ldreqt \reg3, [r0], #4 +9996: ldreqt \reg4, [r0], #4 + .section __ex_table, "a" + .long 9999b, 6001f + .long 9998b, 6001f + .long 9997b, 6001f + .long 9996b, 6001f + .previous + .endm + +/* + * unsigned int + * csum_partial_copy_from_user(const char *src, char *dst, int len, int sum, int *err_ptr) + * r0 = src, r1 = dst, r2 = len, r3 = sum, [sp] = *err_ptr + * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT + */ + +#define FN_ENTRY ENTRY(csum_partial_copy_from_user) + +#include "csumpartialcopygeneric.S" + +/* + * FIXME: minor buglet here + * We don't return the checksum for the data present in the buffer. To do + * so properly, we would have to add in whatever registers were loaded before + * the fault, which, with the current asm above is not predictable. + */ + .align 4 +6001: mov r4, #-EFAULT + ldr r5, [fp, #4] @ *err_ptr + str r4, [r5] + ldmia sp, {r1, r2} @ retrieve dst, len + add r2, r2, r1 + mov r0, #0 @ zero the buffer +6002: teq r2, r1 + strneb r0, [r1], #1 + bne 6002b + load_regs ea diff --git a/arch/arm26/lib/delay.S b/arch/arm26/lib/delay.S new file mode 100644 index 00000000000..66f2b68e1b1 --- /dev/null +++ b/arch/arm26/lib/delay.S @@ -0,0 +1,57 @@ +/* + * linux/arch/arm26/lib/delay.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +LC0: .word loops_per_jiffy + +/* + * 0 <= r0 <= 2000 + */ +ENTRY(udelay) + mov r2, #0x6800 + orr r2, r2, #0x00db + mul r1, r0, r2 + ldr r2, LC0 + ldr r2, [r2] + mov r1, r1, lsr #11 + mov r2, r2, lsr #11 + mul r0, r1, r2 + movs r0, r0, lsr #6 + RETINSTR(moveq,pc,lr) + +/* + * loops = (r0 * 0x10c6 * 100 * loops_per_jiffy) / 2^32 + * + * Oh, if only we had a cycle counter... + */ + +@ Delay routine +ENTRY(__delay) + subs r0, r0, #1 +#if 0 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 + RETINSTR(movls,pc,lr) + subs r0, r0, #1 +#endif + bhi __delay + RETINSTR(mov,pc,lr) diff --git a/arch/arm26/lib/ecard.S b/arch/arm26/lib/ecard.S new file mode 100644 index 00000000000..b4633150f01 --- /dev/null +++ b/arch/arm26/lib/ecard.S @@ -0,0 +1,41 @@ +/* + * linux/arch/arm26/lib/ecard.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include /* for CONFIG_CPU_nn */ +#include +#include +#include + +#define CPSR2SPSR(rt) + +@ Purpose: call an expansion card loader to read bytes. +@ Proto : char read_loader(int offset, char *card_base, char *loader); +@ Returns: byte read + +ENTRY(ecard_loader_read) + stmfd sp!, {r4 - r12, lr} + mov r11, r1 + mov r1, r0 + CPSR2SPSR(r0) + mov lr, pc + mov pc, r2 + LOADREGS(fd, sp!, {r4 - r12, pc}) + +@ Purpose: call an expansion card loader to reset the card +@ Proto : void read_loader(int card_base, char *loader); +@ Returns: byte read + +ENTRY(ecard_loader_reset) + stmfd sp!, {r4 - r12, lr} + mov r11, r0 + CPSR2SPSR(r0) + mov lr, pc + add pc, r1, #8 + LOADREGS(fd, sp!, {r4 - r12, pc}) + diff --git a/arch/arm26/lib/findbit.S b/arch/arm26/lib/findbit.S new file mode 100644 index 00000000000..26f67cccc37 --- /dev/null +++ b/arch/arm26/lib/findbit.S @@ -0,0 +1,67 @@ +/* + * linux/arch/arm/lib/findbit.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * 16th March 2001 - John Ripley + * Fixed so that "size" is an exclusive not an inclusive quantity. + * All users of these functions expect exclusive sizes, and may + * also call with zero size. + * Reworked by rmk. + */ +#include +#include + .text + +/* + * Purpose : Find a 'zero' bit + * Prototype: int find_first_zero_bit(void *addr, unsigned int maxbit); + */ +ENTRY(_find_first_zero_bit_le) + teq r1, #0 + beq 3f + mov r2, #0 +1: ldrb r3, [r0, r2, lsr #3] + eors r3, r3, #0xff @ invert bits + bne .found @ any now set - found zero bit + add r2, r2, #8 @ next bit pointer +2: cmp r2, r1 @ any more? + blo 1b +3: mov r0, r1 @ no free bits + RETINSTR(mov,pc,lr) + +/* + * Purpose : Find next 'zero' bit + * Prototype: int find_next_zero_bit(void *addr, unsigned int maxbit, int offset) + */ +ENTRY(_find_next_zero_bit_le) + teq r1, #0 + beq 2b + ands ip, r2, #7 + beq 1b @ If new byte, goto old routine + ldrb r3, [r0, r2, lsr #3] + eor r3, r3, #0xff @ now looking for a 1 bit + movs r3, r3, lsr ip @ shift off unused bits + bne .found + orr r2, r2, #7 @ if zero, then no bits here + add r2, r2, #1 @ align bit pointer + b 2b @ loop for next bit + +/* + * One or more bits in the LSB of r3 are assumed to be set. + */ +.found: tst r3, #0x0f + addeq r2, r2, #4 + movne r3, r3, lsl #4 + tst r3, #0x30 + addeq r2, r2, #2 + movne r3, r3, lsl #2 + tst r3, #0x40 + addeq r2, r2, #1 + mov r0, r2 + RETINSTR(mov,pc,lr) + diff --git a/arch/arm26/lib/floppydma.S b/arch/arm26/lib/floppydma.S new file mode 100644 index 00000000000..e99ebbb2035 --- /dev/null +++ b/arch/arm26/lib/floppydma.S @@ -0,0 +1,32 @@ +/* + * linux/arch/arm26/lib/floppydma.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + + .global floppy_fiqin_end +ENTRY(floppy_fiqin_start) + subs r9, r9, #1 + ldrgtb r12, [r11, #-4] + ldrleb r12, [r11], #0 + strb r12, [r10], #1 + subs pc, lr, #4 +floppy_fiqin_end: + + .global floppy_fiqout_end +ENTRY(floppy_fiqout_start) + subs r9, r9, #1 + ldrgeb r12, [r10], #1 + movlt r12, #0 + strleb r12, [r11], #0 + subles pc, lr, #4 + strb r12, [r11, #-4] + subs pc, lr, #4 +floppy_fiqout_end: diff --git a/arch/arm26/lib/gcclib.h b/arch/arm26/lib/gcclib.h new file mode 100644 index 00000000000..9895e78904b --- /dev/null +++ b/arch/arm26/lib/gcclib.h @@ -0,0 +1,21 @@ +/* gcclib.h -- definitions for various functions 'borrowed' from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#define BITS_PER_UNIT 8 +#define SI_TYPE_SIZE (sizeof (SItype) * BITS_PER_UNIT) + +typedef unsigned int UQItype __attribute__ ((mode (QI))); +typedef int SItype __attribute__ ((mode (SI))); +typedef unsigned int USItype __attribute__ ((mode (SI))); +typedef int DItype __attribute__ ((mode (DI))); +typedef int word_type __attribute__ ((mode (__word__))); +typedef unsigned int UDItype __attribute__ ((mode (DI))); + +struct DIstruct {SItype low, high;}; + +typedef union +{ + struct DIstruct s; + DItype ll; +} DIunion; + diff --git a/arch/arm26/lib/getuser.S b/arch/arm26/lib/getuser.S new file mode 100644 index 00000000000..e6d59b33485 --- /dev/null +++ b/arch/arm26/lib/getuser.S @@ -0,0 +1,112 @@ +/* + * linux/arch/arm26/lib/getuser.S + * + * Copyright (C) 2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Idea from x86 version, (C) Copyright 1998 Linus Torvalds + * + * These functions have a non-standard call interface to make them more + * efficient, especially as they return an error value in addition to + * the "real" return value. + * + * __get_user_X + * + * Inputs: r0 contains the address + * Outputs: r0 is the error code + * r1, r2 contains the zero-extended value + * lr corrupted + * + * No other registers must be altered. (see include/asm-arm/uaccess.h + * for specific ASM register usage). + * + * Note that ADDR_LIMIT is either 0 or 0xc0000000. + * Note also that it is intended that __get_user_bad is not global. + */ +#include +#include +#include + + .global __get_user_1 +__get_user_1: + bic r1, sp, #0x1f00 + bic r1, r1, #0x00ff + str lr, [sp, #-4]! + ldr r1, [r1, #TI_ADDR_LIMIT] + sub r1, r1, #1 + cmp r0, r1 + bge __get_user_bad + cmp r0, #0x02000000 +1: ldrlsbt r1, [r0] + ldrgeb r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __get_user_2 +__get_user_2: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #2 + cmp r0, r2 + bge __get_user_bad + cmp r0, #0x02000000 +2: ldrlsbt r1, [r0], #1 +3: ldrlsbt r2, [r0] + ldrgeb r1, [r0], #1 + ldrgeb r2, [r0] + orr r1, r1, r2, lsl #8 + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __get_user_4 +__get_user_4: + bic r1, sp, #0x1f00 + bic r1, r1, #0x00ff + str lr, [sp, #-4]! + ldr r1, [r1, #TI_ADDR_LIMIT] + sub r1, r1, #4 + cmp r0, r1 + bge __get_user_bad + cmp r0, #0x02000000 +4: ldrlst r1, [r0] + ldrge r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __get_user_8 +__get_user_8: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #8 + cmp r0, r2 + bge __get_user_bad_8 + cmp r0, #0x02000000 +5: ldrlst r1, [r0], #4 +6: ldrlst r2, [r0] + ldrge r1, [r0], #4 + ldrge r2, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + +__get_user_bad_8: + mov r2, #0 +__get_user_bad: + mov r1, #0 + mov r0, #-EFAULT + ldmfd sp!, {pc}^ + +.section __ex_table, "a" + .long 1b, __get_user_bad + .long 2b, __get_user_bad + .long 3b, __get_user_bad + .long 4b, __get_user_bad + .long 5b, __get_user_bad_8 + .long 6b, __get_user_bad_8 +.previous diff --git a/arch/arm26/lib/io-acorn.S b/arch/arm26/lib/io-acorn.S new file mode 100644 index 00000000000..f6c3e30b1b4 --- /dev/null +++ b/arch/arm26/lib/io-acorn.S @@ -0,0 +1,71 @@ +/* + * linux/arch/arm26/lib/io-acorn.S + * + * Copyright (C) 1995, 1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include /* for CONFIG_CPU_nn */ +#include +#include +#include + + .text + .align + + .equ diff_pcio_base, PCIO_BASE - IO_BASE + + .macro outw2 rd + mov r8, \rd, lsl #16 + orr r8, r8, r8, lsr #16 + str r8, [r3, r0, lsl #2] + mov r8, \rd, lsr #16 + orr r8, r8, r8, lsl #16 + str r8, [r3, r0, lsl #2] + .endm + + .macro inw2 rd, mask, temp + ldr \rd, [r0] + and \rd, \rd, \mask + ldr \temp, [r0] + orr \rd, \rd, \temp, lsl #16 + .endm + + .macro addr rd + tst \rd, #0x80000000 + mov \rd, \rd, lsl #2 + add \rd, \rd, #IO_BASE + addeq \rd, \rd, #diff_pcio_base + .endm + +.iosl_warning: + .ascii "<4>insl/outsl not implemented, called from %08lX\0" + .align + +/* + * These make no sense on Acorn machines. + * Print a warning message. + */ +ENTRY(insl) +ENTRY(outsl) + adr r0, .iosl_warning + mov r1, lr + b printk + +@ Purpose: write a memc register +@ Proto : void memc_write(int register, int value); +@ Returns: nothing + +ENTRY(memc_write) + cmp r0, #7 + RETINSTR(movgt,pc,lr) + mov r0, r0, lsl #17 + mov r1, r1, lsl #15 + mov r1, r1, lsr #17 + orr r0, r0, r1, lsl #2 + add r0, r0, #0x03600000 + strb r0, [r0] + RETINSTR(mov,pc,lr) + diff --git a/arch/arm26/lib/io-readsb.S b/arch/arm26/lib/io-readsb.S new file mode 100644 index 00000000000..4c4d99c0585 --- /dev/null +++ b/arch/arm26/lib/io-readsb.S @@ -0,0 +1,116 @@ +/* + * linux/arch/arm26/lib/io-readsb.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.insb_align: rsb ip, ip, #4 + cmp ip, r2 + movgt ip, r2 + cmp ip, #2 + ldrb r3, [r0] + strb r3, [r1], #1 + ldrgeb r3, [r0] + strgeb r3, [r1], #1 + ldrgtb r3, [r0] + strgtb r3, [r1], #1 + subs r2, r2, ip + bne .insb_aligned + +ENTRY(__raw_readsb) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne .insb_align + +.insb_aligned: stmfd sp!, {r4 - r6, lr} + + subs r2, r2, #16 + bmi .insb_no_16 + +.insb_16_lp: ldrb r3, [r0] + ldrb r4, [r0] + orr r3, r3, r4, lsl #8 + ldrb r4, [r0] + orr r3, r3, r4, lsl #16 + ldrb r4, [r0] + orr r3, r3, r4, lsl #24 + ldrb r4, [r0] + ldrb r5, [r0] + orr r4, r4, r5, lsl #8 + ldrb r5, [r0] + orr r4, r4, r5, lsl #16 + ldrb r5, [r0] + orr r4, r4, r5, lsl #24 + ldrb r5, [r0] + ldrb r6, [r0] + orr r5, r5, r6, lsl #8 + ldrb r6, [r0] + orr r5, r5, r6, lsl #16 + ldrb r6, [r0] + orr r5, r5, r6, lsl #24 + ldrb r6, [r0] + ldrb ip, [r0] + orr r6, r6, ip, lsl #8 + ldrb ip, [r0] + orr r6, r6, ip, lsl #16 + ldrb ip, [r0] + orr r6, r6, ip, lsl #24 + stmia r1!, {r3 - r6} + + subs r2, r2, #16 + bpl .insb_16_lp + + tst r2, #15 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + +.insb_no_16: tst r2, #8 + beq .insb_no_8 + + ldrb r3, [r0] + ldrb r4, [r0] + orr r3, r3, r4, lsl #8 + ldrb r4, [r0] + orr r3, r3, r4, lsl #16 + ldrb r4, [r0] + orr r3, r3, r4, lsl #24 + ldrb r4, [r0] + ldrb r5, [r0] + orr r4, r4, r5, lsl #8 + ldrb r5, [r0] + orr r4, r4, r5, lsl #16 + ldrb r5, [r0] + orr r4, r4, r5, lsl #24 + stmia r1!, {r3, r4} + +.insb_no_8: tst r2, #4 + beq .insb_no_4 + + ldrb r3, [r0] + ldrb r4, [r0] + orr r3, r3, r4, lsl #8 + ldrb r4, [r0] + orr r3, r3, r4, lsl #16 + ldrb r4, [r0] + orr r3, r3, r4, lsl #24 + str r3, [r1], #4 + +.insb_no_4: ands r2, r2, #3 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + + cmp r2, #2 + ldrb r3, [r0] + strb r3, [r1], #1 + ldrgeb r3, [r0] + strgeb r3, [r1], #1 + ldrgtb r3, [r0] + strgtb r3, [r1] + + LOADREGS(fd, sp!, {r4 - r6, pc}) diff --git a/arch/arm26/lib/io-readsl.S b/arch/arm26/lib/io-readsl.S new file mode 100644 index 00000000000..7be208bd23c --- /dev/null +++ b/arch/arm26/lib/io-readsl.S @@ -0,0 +1,78 @@ +/* + * linux/arch/arm26/lib/io-readsl.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +/* + * Note that some reads can be aligned on half-word boundaries. + */ +ENTRY(__raw_readsl) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne 2f + +1: ldr r3, [r0] + str r3, [r1], #4 + subs r2, r2, #1 + bne 1b + mov pc, lr + +2: cmp ip, #2 + ldr ip, [r0] + blt 4f + bgt 6f + + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 +3: subs r2, r2, #1 + ldrne r3, [r0] + orrne ip, ip, r3, lsl #16 + strne ip, [r1], #4 + movne ip, r3, lsr #16 + bne 3b + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov pc, lr + +4: strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 +5: subs r2, r2, #1 + ldrne r3, [r0] + orrne ip, ip, r3, lsl #8 + strne ip, [r1], #4 + movne ip, r3, lsr #24 + bne 5b + strb ip, [r1], #1 + mov pc, lr + +6: strb ip, [r1], #1 + mov ip, ip, lsr #8 +7: subs r2, r2, #1 + ldrne r3, [r0] + orrne ip, ip, r3, lsl #24 + strne ip, [r1], #4 + movne ip, r3, lsr #8 + bne 7b + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov ip, ip, lsr #8 + strb ip, [r1], #1 + mov pc, lr + diff --git a/arch/arm26/lib/io-readsw.S b/arch/arm26/lib/io-readsw.S new file mode 100644 index 00000000000..c65c1f28fcf --- /dev/null +++ b/arch/arm26/lib/io-readsw.S @@ -0,0 +1,107 @@ +/* + * linux/arch/arm26/lib/io-readsw.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.insw_bad_alignment: + adr r0, .insw_bad_align_msg + mov r2, lr + b panic +.insw_bad_align_msg: + .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.insw_align: tst r1, #1 + bne .insw_bad_alignment + + ldr r3, [r0] + strb r3, [r1], #1 + mov r3, r3, lsr #8 + strb r3, [r1], #1 + + subs r2, r2, #1 + RETINSTR(moveq, pc, lr) + +ENTRY(__raw_readsw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .insw_align + +.insw_aligned: mov ip, #0xff + orr ip, ip, ip, lsl #8 + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .no_insw_8 + +.insw_8_lp: ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + ldr r5, [r0] + and r5, r5, ip + ldr r6, [r0] + orr r5, r5, r6, lsl #16 + + ldr r6, [r0] + and r6, r6, ip + ldr lr, [r0] + orr r6, r6, lr, lsl #16 + + stmia r1!, {r3 - r6} + + subs r2, r2, #8 + bpl .insw_8_lp + + tst r2, #7 + LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) + +.no_insw_8: tst r2, #4 + beq .no_insw_4 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + stmia r1!, {r3, r4} + +.no_insw_4: tst r2, #2 + beq .no_insw_2 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + str r3, [r1], #4 + +.no_insw_2: tst r2, #1 + ldrne r3, [r0] + strneb r3, [r1], #1 + movne r3, r3, lsr #8 + strneb r3, [r1] + + LOADREGS(fd, sp!, {r4, r5, r6, pc}) + + diff --git a/arch/arm26/lib/io-writesb.S b/arch/arm26/lib/io-writesb.S new file mode 100644 index 00000000000..16251b4d510 --- /dev/null +++ b/arch/arm26/lib/io-writesb.S @@ -0,0 +1,122 @@ +/* + * linux/arch/arm26/lib/io-writesb.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.outsb_align: rsb ip, ip, #4 + cmp ip, r2 + movgt ip, r2 + cmp ip, #2 + ldrb r3, [r1], #1 + strb r3, [r0] + ldrgeb r3, [r1], #1 + strgeb r3, [r0] + ldrgtb r3, [r1], #1 + strgtb r3, [r0] + subs r2, r2, ip + bne .outsb_aligned + +ENTRY(__raw_writesb) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne .outsb_align + +.outsb_aligned: stmfd sp!, {r4 - r6, lr} + + subs r2, r2, #16 + bmi .outsb_no_16 + +.outsb_16_lp: ldmia r1!, {r3 - r6} + + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + + strb r5, [r0] + mov r5, r5, lsr #8 + strb r5, [r0] + mov r5, r5, lsr #8 + strb r5, [r0] + mov r5, r5, lsr #8 + strb r5, [r0] + + strb r6, [r0] + mov r6, r6, lsr #8 + strb r6, [r0] + mov r6, r6, lsr #8 + strb r6, [r0] + mov r6, r6, lsr #8 + strb r6, [r0] + + subs r2, r2, #16 + bpl .outsb_16_lp + + tst r2, #15 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + +.outsb_no_16: tst r2, #8 + beq .outsb_no_8 + + ldmia r1!, {r3, r4} + + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + mov r4, r4, lsr #8 + strb r4, [r0] + +.outsb_no_8: tst r2, #4 + beq .outsb_no_4 + + ldr r3, [r1], #4 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + mov r3, r3, lsr #8 + strb r3, [r0] + +.outsb_no_4: ands r2, r2, #3 + LOADREGS(eqfd, sp!, {r4 - r6, pc}) + + cmp r2, #2 + ldrb r3, [r1], #1 + strb r3, [r0] + ldrgeb r3, [r1], #1 + strgeb r3, [r0] + ldrgtb r3, [r1] + strgtb r3, [r0] + + LOADREGS(fd, sp!, {r4 - r6, pc}) diff --git a/arch/arm26/lib/io-writesl.S b/arch/arm26/lib/io-writesl.S new file mode 100644 index 00000000000..4d6049b16e7 --- /dev/null +++ b/arch/arm26/lib/io-writesl.S @@ -0,0 +1,56 @@ +/* + * linux/arch/arm26/lib/io-writesl.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +ENTRY(__raw_writesl) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + ands ip, r1, #3 + bne 2f + +1: ldr r3, [r1], #4 + str r3, [r0] + subs r2, r2, #1 + bne 1b + mov pc, lr + +2: bic r1, r1, #3 + cmp ip, #2 + ldr r3, [r1], #4 + bgt 4f + blt 5f + +3: mov ip, r3, lsr #16 + ldr r3, [r1], #4 + orr ip, ip, r3, lsl #16 + str ip, [r0] + subs r2, r2, #1 + bne 3b + mov pc, lr + +4: mov ip, r3, lsr #24 + ldr r3, [r1], #4 + orr ip, ip, r3, lsl #8 + str ip, [r0] + subs r2, r2, #1 + bne 4b + mov pc, lr + +5: mov ip, r3, lsr #8 + ldr r3, [r1], #4 + orr ip, ip, r3, lsl #24 + str ip, [r0] + subs r2, r2, #1 + bne 5b + mov pc, lr + + diff --git a/arch/arm26/lib/io-writesw.S b/arch/arm26/lib/io-writesw.S new file mode 100644 index 00000000000..a24f891f6b1 --- /dev/null +++ b/arch/arm26/lib/io-writesw.S @@ -0,0 +1,127 @@ +/* + * linux/arch/arm26/lib/io-writesw.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +.outsw_bad_alignment: + adr r0, .outsw_bad_align_msg + mov r2, lr + b panic +.outsw_bad_align_msg: + .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.outsw_align: tst r1, #1 + bne .outsw_bad_alignment + + add r1, r1, #2 + + ldr r3, [r1, #-4] + mov r3, r3, lsr #16 + orr r3, r3, r3, lsl #16 + str r3, [r0] + subs r2, r2, #1 + RETINSTR(moveq, pc, lr) + +ENTRY(__raw_writesw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .outsw_align + +.outsw_aligned: stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .no_outsw_8 + +.outsw_8_lp: ldmia r1!, {r3, r4, r5, r6} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r5, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r5, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r6, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r6, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + subs r2, r2, #8 + bpl .outsw_8_lp + + tst r2, #7 + LOADREGS(eqfd, sp!, {r4, r5, r6, pc}) + +.no_outsw_8: tst r2, #4 + beq .no_outsw_4 + + ldmia r1!, {r3, r4} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.no_outsw_4: tst r2, #2 + beq .no_outsw_2 + + ldr r3, [r1], #4 + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.no_outsw_2: tst r2, #1 + + ldrne r3, [r1] + + movne ip, r3, lsl #16 + orrne ip, ip, ip, lsr #16 + strne ip, [r0] + + LOADREGS(fd, sp!, {r4, r5, r6, pc}) diff --git a/arch/arm26/lib/kbd.c b/arch/arm26/lib/kbd.c new file mode 100644 index 00000000000..22d2c93aaf1 --- /dev/null +++ b/arch/arm26/lib/kbd.c @@ -0,0 +1,279 @@ +#include +#include +//#include +#include + +/* + * Translation of escaped scancodes to keycodes. + * This is now user-settable. + * The keycodes 1-88,96-111,119 are fairly standard, and + * should probably not be changed - changing might confuse X. + * X also interprets scancode 0x5d (KEY_Begin). + * + * For 1-88 keycode equals scancode. + */ + +#define E0_KPENTER 96 +#define E0_RCTRL 97 +#define E0_KPSLASH 98 +#define E0_PRSCR 99 +#define E0_RALT 100 +#define E0_BREAK 101 /* (control-pause) */ +#define E0_HOME 102 +#define E0_UP 103 +#define E0_PGUP 104 +#define E0_LEFT 105 +#define E0_RIGHT 106 +#define E0_END 107 +#define E0_DOWN 108 +#define E0_PGDN 109 +#define E0_INS 110 +#define E0_DEL 111 + +/* for USB 106 keyboard */ +#define E0_YEN 124 +#define E0_BACKSLASH 89 + + +#define E1_PAUSE 119 + +/* + * The keycodes below are randomly located in 89-95,112-118,120-127. + * They could be thrown away (and all occurrences below replaced by 0), + * but that would force many users to use the `setkeycodes' utility, where + * they needed not before. It does not matter that there are duplicates, as + * long as no duplication occurs for any single keyboard. + */ +#define SC_LIM 89 + +#define FOCUS_PF1 85 /* actual code! */ +#define FOCUS_PF2 89 +#define FOCUS_PF3 90 +#define FOCUS_PF4 91 +#define FOCUS_PF5 92 +#define FOCUS_PF6 93 +#define FOCUS_PF7 94 +#define FOCUS_PF8 95 +#define FOCUS_PF9 120 +#define FOCUS_PF10 121 +#define FOCUS_PF11 122 +#define FOCUS_PF12 123 + +#define JAP_86 124 +/* tfj@olivia.ping.dk: + * The four keys are located over the numeric keypad, and are + * labelled A1-A4. It's an rc930 keyboard, from + * Regnecentralen/RC International, Now ICL. + * Scancodes: 59, 5a, 5b, 5c. + */ +#define RGN1 124 +#define RGN2 125 +#define RGN3 126 +#define RGN4 127 + +static unsigned char high_keys[128 - SC_LIM] = { + RGN1, RGN2, RGN3, RGN4, 0, 0, 0, /* 0x59-0x5f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x60-0x67 */ + 0, 0, 0, 0, 0, FOCUS_PF11, 0, FOCUS_PF12, /* 0x68-0x6f */ + 0, 0, 0, FOCUS_PF2, FOCUS_PF9, 0, 0, FOCUS_PF3, /* 0x70-0x77 */ + FOCUS_PF4, FOCUS_PF5, FOCUS_PF6, FOCUS_PF7, /* 0x78-0x7b */ + FOCUS_PF8, JAP_86, FOCUS_PF10, 0 /* 0x7c-0x7f */ +}; + +/* BTC */ +#define E0_MACRO 112 +/* LK450 */ +#define E0_F13 113 +#define E0_F14 114 +#define E0_HELP 115 +#define E0_DO 116 +#define E0_F17 117 +#define E0_KPMINPLUS 118 +/* + * My OmniKey generates e0 4c for the "OMNI" key and the + * right alt key does nada. [kkoller@nyx10.cs.du.edu] + */ +#define E0_OK 124 +/* + * New microsoft keyboard is rumoured to have + * e0 5b (left window button), e0 5c (right window button), + * e0 5d (menu button). [or: LBANNER, RBANNER, RMENU] + * [or: Windows_L, Windows_R, TaskMan] + */ +#define E0_MSLW 125 +#define E0_MSRW 126 +#define E0_MSTM 127 + +static unsigned char e0_keys[128] = { + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x00-0x07 */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x08-0x0f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x10-0x17 */ + 0, 0, 0, 0, E0_KPENTER, E0_RCTRL, 0, 0, /* 0x18-0x1f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x20-0x27 */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x28-0x2f */ + 0, 0, 0, 0, 0, E0_KPSLASH, 0, E0_PRSCR, /* 0x30-0x37 */ + E0_RALT, 0, 0, 0, 0, E0_F13, E0_F14, E0_HELP, /* 0x38-0x3f */ + E0_DO, E0_F17, 0, 0, 0, 0, E0_BREAK, E0_HOME, /* 0x40-0x47 */ + E0_UP, E0_PGUP, 0, E0_LEFT, E0_OK, E0_RIGHT, E0_KPMINPLUS, E0_END, /* 0x48-0x4f */ + E0_DOWN, E0_PGDN, E0_INS, E0_DEL, 0, 0, 0, 0, /* 0x50-0x57 */ + 0, 0, 0, E0_MSLW, E0_MSRW, E0_MSTM, 0, 0, /* 0x58-0x5f */ + 0, 0, 0, 0, 0, 0, 0, 0, /* 0x60-0x67 */ + 0, 0, 0, 0, 0, 0, 0, E0_MACRO, /* 0x68-0x6f */ + //0, 0, 0, 0, 0, 0, 0, 0, /* 0x70-0x77 */ + 0, 0, 0, 0, 0, E0_BACKSLASH, 0, 0, /* 0x70-0x77 */ + 0, 0, 0, E0_YEN, 0, 0, 0, 0 /* 0x78-0x7f */ +}; + +static int gen_setkeycode(unsigned int scancode, unsigned int keycode) +{ + if (scancode < SC_LIM || scancode > 255 || keycode > 127) + return -EINVAL; + if (scancode < 128) + high_keys[scancode - SC_LIM] = keycode; + else + e0_keys[scancode - 128] = keycode; + return 0; +} + +static int gen_getkeycode(unsigned int scancode) +{ + return + (scancode < SC_LIM || scancode > 255) ? -EINVAL : + (scancode < + 128) ? high_keys[scancode - SC_LIM] : e0_keys[scancode - 128]; +} + +static int +gen_translate(unsigned char scancode, unsigned char *keycode, char raw_mode) +{ + static int prev_scancode; + + /* special prefix scancodes.. */ + if (scancode == 0xe0 || scancode == 0xe1) { + prev_scancode = scancode; + return 0; + } + + /* 0xFF is sent by a few keyboards, ignore it. 0x00 is error */ + if (scancode == 0x00 || scancode == 0xff) { + prev_scancode = 0; + return 0; + } + + scancode &= 0x7f; + + if (prev_scancode) { + /* + * usually it will be 0xe0, but a Pause key generates + * e1 1d 45 e1 9d c5 when pressed, and nothing when released + */ + if (prev_scancode != 0xe0) { + if (prev_scancode == 0xe1 && scancode == 0x1d) { + prev_scancode = 0x100; + return 0; + } + else if (prev_scancode == 0x100 + && scancode == 0x45) { + *keycode = E1_PAUSE; + prev_scancode = 0; + } else { +#ifdef KBD_REPORT_UNKN + if (!raw_mode) + printk(KERN_INFO + "keyboard: unknown e1 escape sequence\n"); +#endif + prev_scancode = 0; + return 0; + } + } else { + prev_scancode = 0; + /* + * The keyboard maintains its own internal caps lock and + * num lock statuses. In caps lock mode E0 AA precedes make + * code and E0 2A follows break code. In num lock mode, + * E0 2A precedes make code and E0 AA follows break code. + * We do our own book-keeping, so we will just ignore these. + */ + /* + * For my keyboard there is no caps lock mode, but there are + * both Shift-L and Shift-R modes. The former mode generates + * E0 2A / E0 AA pairs, the latter E0 B6 / E0 36 pairs. + * So, we should also ignore the latter. - aeb@cwi.nl + */ + if (scancode == 0x2a || scancode == 0x36) + return 0; + + if (e0_keys[scancode]) + *keycode = e0_keys[scancode]; + else { +#ifdef KBD_REPORT_UNKN + if (!raw_mode) + printk(KERN_INFO + "keyboard: unknown scancode e0 %02x\n", + scancode); +#endif + return 0; + } + } + } else if (scancode >= SC_LIM) { + /* This happens with the FOCUS 9000 keyboard + Its keys PF1..PF12 are reported to generate + 55 73 77 78 79 7a 7b 7c 74 7e 6d 6f + Moreover, unless repeated, they do not generate + key-down events, so we have to zero up_flag below */ + /* Also, Japanese 86/106 keyboards are reported to + generate 0x73 and 0x7d for \ - and \ | respectively. */ + /* Also, some Brazilian keyboard is reported to produce + 0x73 and 0x7e for \ ? and KP-dot, respectively. */ + + *keycode = high_keys[scancode - SC_LIM]; + + if (!*keycode) { + if (!raw_mode) { +#ifdef KBD_REPORT_UNKN + printk(KERN_INFO + "keyboard: unrecognized scancode (%02x)" + " - ignored\n", scancode); +#endif + } + return 0; + } + } else + *keycode = scancode; + return 1; +} + +static char gen_unexpected_up(unsigned char keycode) +{ + /* unexpected, but this can happen: maybe this was a key release for a + FOCUS 9000 PF key; if we want to see it, we have to clear up_flag */ + if (keycode >= SC_LIM || keycode == 85) + return 0; + else + return 0200; +} + +/* + * These are the default mappings + */ +int (*k_setkeycode)(unsigned int, unsigned int) = gen_setkeycode; +int (*k_getkeycode)(unsigned int) = gen_getkeycode; +int (*k_translate)(unsigned char, unsigned char *, char) = gen_translate; +char (*k_unexpected_up)(unsigned char) = gen_unexpected_up; +void (*k_leds)(unsigned char); + +/* Simple translation table for the SysRq keys */ + +#ifdef CONFIG_MAGIC_SYSRQ +static unsigned char gen_sysrq_xlate[128] = + "\000\0331234567890-=\177\t" /* 0x00 - 0x0f */ + "qwertyuiop[]\r\000as" /* 0x10 - 0x1f */ + "dfghjkl;'`\000\\zxcv" /* 0x20 - 0x2f */ + "bnm,./\000*\000 \000\201\202\203\204\205" /* 0x30 - 0x3f */ + "\206\207\210\211\212\000\000789-456+1" /* 0x40 - 0x4f */ + "230\177\000\000\213\214\000\000\000\000\000\000\000\000\000\000" /* 0x50 - 0x5f */ + "\r\000/"; /* 0x60 - 0x6f */ + +unsigned char *k_sysrq_xlate = gen_sysrq_xlate; +int k_sysrq_key = 0x54; +#endif diff --git a/arch/arm26/lib/lib1funcs.S b/arch/arm26/lib/lib1funcs.S new file mode 100644 index 00000000000..b8f9518db87 --- /dev/null +++ b/arch/arm26/lib/lib1funcs.S @@ -0,0 +1,314 @@ +@ libgcc1 routines for ARM cpu. +@ Division routines, written by Richard Earnshaw, (rearnsha@armltd.co.uk) + +/* Copyright (C) 1995, 1996, 1998 Free Software Foundation, Inc. + +This file is free software; you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the +Free Software Foundation; either version 2, or (at your option) any +later version. + +In addition to the permissions in the GNU General Public License, the +Free Software Foundation gives you unlimited permission to link the +compiled version of this file with other programs, and to distribute +those programs without any restriction coming from the use of this +file. (The General Public License restrictions do apply in other +respects; for example, they cover modification of the file, and +distribution when not linked into another program.) + +This file is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* This code is derived from gcc 2.95.3 */ +/* I Molton 29/07/01 */ + +#include +#include +#include +#include + +#define RET movs +#define RETc(x) mov##x##s +#define RETCOND ^ + +dividend .req r0 +divisor .req r1 +result .req r2 +overdone .req r2 +curbit .req r3 +ip .req r12 +sp .req r13 +lr .req r14 +pc .req r15 + +ENTRY(__udivsi3) + cmp divisor, #0 + beq Ldiv0 + mov curbit, #1 + mov result, #0 + cmp dividend, divisor + bcc Lgot_result_udivsi3 +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions, and note which bits + @ are done in the result. On the final pass, this may subtract + @ too much from the dividend, but the result will be ok, since the + @ "bit" will have been shifted out at the bottom. + cmp dividend, divisor + subcs dividend, dividend, divisor + orrcs result, result, curbit + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs result, result, curbit, lsr #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs result, result, curbit, lsr #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs result, result, curbit, lsr #3 + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b +Lgot_result_udivsi3: + mov r0, result + RET pc, lr + +Ldiv0: + str lr, [sp, #-4]! + bl __div0 + mov r0, #0 @ about as wrong as it could be + ldmia sp!, {pc}RETCOND + +/* __umodsi3 ----------------------- */ + +ENTRY(__umodsi3) + cmp divisor, #0 + beq Ldiv0 + mov curbit, #1 + cmp dividend, divisor + RETc(cc) pc, lr +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions. On the final pass, this may + @ subtract too much from the dividend, so keep track of which + @ subtractions are done, we can fix them up afterwards... + mov overdone, #0 + cmp dividend, divisor + subcs dividend, dividend, divisor + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs overdone, overdone, curbit, ror #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs overdone, overdone, curbit, ror #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs overdone, overdone, curbit, ror #3 + mov ip, curbit + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b + + @ Any subtractions that we should not have done will be recorded in + @ the top three bits of "overdone". Exactly which were not needed + @ are governed by the position of the bit, stored in ip. + @ If we terminated early, because dividend became zero, + @ then none of the below will match, since the bit in ip will not be + @ in the bottom nibble. + ands overdone, overdone, #0xe0000000 + RETc(eq) pc, lr @ No fixups needed + tst overdone, ip, ror #3 + addne dividend, dividend, divisor, lsr #3 + tst overdone, ip, ror #2 + addne dividend, dividend, divisor, lsr #2 + tst overdone, ip, ror #1 + addne dividend, dividend, divisor, lsr #1 + RET pc, lr + +ENTRY(__divsi3) + eor ip, dividend, divisor @ Save the sign of the result. + mov curbit, #1 + mov result, #0 + cmp divisor, #0 + rsbmi divisor, divisor, #0 @ Loops below use unsigned. + beq Ldiv0 + cmp dividend, #0 + rsbmi dividend, dividend, #0 + cmp dividend, divisor + bcc Lgot_result_divsi3 + +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions, and note which bits + @ are done in the result. On the final pass, this may subtract + @ too much from the dividend, but the result will be ok, since the + @ "bit" will have been shifted out at the bottom. + cmp dividend, divisor + subcs dividend, dividend, divisor + orrcs result, result, curbit + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs result, result, curbit, lsr #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs result, result, curbit, lsr #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs result, result, curbit, lsr #3 + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b +Lgot_result_divsi3: + mov r0, result + cmp ip, #0 + rsbmi r0, r0, #0 + RET pc, lr + +ENTRY(__modsi3) + mov curbit, #1 + cmp divisor, #0 + rsbmi divisor, divisor, #0 @ Loops below use unsigned. + beq Ldiv0 + @ Need to save the sign of the dividend, unfortunately, we need + @ ip later on; this is faster than pushing lr and using that. + str dividend, [sp, #-4]! + cmp dividend, #0 + rsbmi dividend, dividend, #0 + cmp dividend, divisor + bcc Lgot_result_modsi3 + +1: + @ Unless the divisor is very big, shift it up in multiples of + @ four bits, since this is the amount of unwinding in the main + @ division loop. Continue shifting until the divisor is + @ larger than the dividend. + cmp divisor, #0x10000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #4 + movcc curbit, curbit, lsl #4 + bcc 1b + +2: + @ For very big divisors, we must shift it a bit at a time, or + @ we will be in danger of overflowing. + cmp divisor, #0x80000000 + cmpcc divisor, dividend + movcc divisor, divisor, lsl #1 + movcc curbit, curbit, lsl #1 + bcc 2b + +3: + @ Test for possible subtractions. On the final pass, this may + @ subtract too much from the dividend, so keep track of which + @ subtractions are done, we can fix them up afterwards... + mov overdone, #0 + cmp dividend, divisor + subcs dividend, dividend, divisor + cmp dividend, divisor, lsr #1 + subcs dividend, dividend, divisor, lsr #1 + orrcs overdone, overdone, curbit, ror #1 + cmp dividend, divisor, lsr #2 + subcs dividend, dividend, divisor, lsr #2 + orrcs overdone, overdone, curbit, ror #2 + cmp dividend, divisor, lsr #3 + subcs dividend, dividend, divisor, lsr #3 + orrcs overdone, overdone, curbit, ror #3 + mov ip, curbit + cmp dividend, #0 @ Early termination? + movnes curbit, curbit, lsr #4 @ No, any more bits to do? + movne divisor, divisor, lsr #4 + bne 3b + + @ Any subtractions that we should not have done will be recorded in + @ the top three bits of "overdone". Exactly which were not needed + @ are governed by the position of the bit, stored in ip. + @ If we terminated early, because dividend became zero, + @ then none of the below will match, since the bit in ip will not be + @ in the bottom nibble. + ands overdone, overdone, #0xe0000000 + beq Lgot_result_modsi3 + tst overdone, ip, ror #3 + addne dividend, dividend, divisor, lsr #3 + tst overdone, ip, ror #2 + addne dividend, dividend, divisor, lsr #2 + tst overdone, ip, ror #1 + addne dividend, dividend, divisor, lsr #1 +Lgot_result_modsi3: + ldr ip, [sp], #4 + cmp ip, #0 + rsbmi dividend, dividend, #0 + RET pc, lr diff --git a/arch/arm26/lib/longlong.h b/arch/arm26/lib/longlong.h new file mode 100644 index 00000000000..05ec1abd6a2 --- /dev/null +++ b/arch/arm26/lib/longlong.h @@ -0,0 +1,184 @@ +/* longlong.h -- based on code from gcc-2.95.3 + + definitions for mixed size 32/64 bit arithmetic. + Copyright (C) 1991, 92, 94, 95, 96, 1997, 1998 Free Software Foundation, Inc. + + This definition file is free software; you can redistribute it + and/or modify it under the terms of the GNU General Public + License as published by the Free Software Foundation; either + version 2, or (at your option) any later version. + + This definition file is distributed in the hope that it will be + useful, but WITHOUT ANY WARRANTY; without even the implied + warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +/* Borrowed from GCC 2.95.3, I Molton 29/07/01 */ + +#ifndef SI_TYPE_SIZE +#define SI_TYPE_SIZE 32 +#endif + +#define __BITS4 (SI_TYPE_SIZE / 4) +#define __ll_B (1L << (SI_TYPE_SIZE / 2)) +#define __ll_lowpart(t) ((USItype) (t) % __ll_B) +#define __ll_highpart(t) ((USItype) (t) / __ll_B) + +/* Define auxiliary asm macros. + + 1) umul_ppmm(high_prod, low_prod, multipler, multiplicand) + multiplies two USItype integers MULTIPLER and MULTIPLICAND, + and generates a two-part USItype product in HIGH_PROD and + LOW_PROD. + + 2) __umulsidi3(a,b) multiplies two USItype integers A and B, + and returns a UDItype product. This is just a variant of umul_ppmm. + + 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator) divides a two-word unsigned integer, composed by the + integers HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and + places the quotient in QUOTIENT and the remainder in REMAINDER. + HIGH_NUMERATOR must be less than DENOMINATOR for correct operation. + If, in addition, the most significant bit of DENOMINATOR must be 1, + then the pre-processor symbol UDIV_NEEDS_NORMALIZATION is defined to 1. + + 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator, + denominator). Like udiv_qrnnd but the numbers are signed. The + quotient is rounded towards 0. + + 5) count_leading_zeros(count, x) counts the number of zero-bits from + the msb to the first non-zero bit. This is the number of steps X + needs to be shifted left to set the msb. Undefined for X == 0. + + 6) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, + high_addend_2, low_addend_2) adds two two-word unsigned integers, + composed by HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and + LOW_ADDEND_2 respectively. The result is placed in HIGH_SUM and + LOW_SUM. Overflow (i.e. carry out) is not stored anywhere, and is + lost. + + 7) sub_ddmmss(high_difference, low_difference, high_minuend, + low_minuend, high_subtrahend, low_subtrahend) subtracts two + two-word unsigned integers, composed by HIGH_MINUEND_1 and + LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and LOW_SUBTRAHEND_2 + respectively. The result is placed in HIGH_DIFFERENCE and + LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, + and is lost. + + If any of these macros are left undefined for a particular CPU, + C macros are used. */ + +#if defined (__arm__) +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("adds %1, %4, %5 \n\ + adc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "%r" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "%r" ((USItype) (al)), \ + "rI" ((USItype) (bl))) +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + __asm__ ("subs %1, %4, %5 \n\ + sbc %0, %2, %3" \ + : "=r" ((USItype) (sh)), \ + "=&r" ((USItype) (sl)) \ + : "r" ((USItype) (ah)), \ + "rI" ((USItype) (bh)), \ + "r" ((USItype) (al)), \ + "rI" ((USItype) (bl))) +#define umul_ppmm(xh, xl, a, b) \ +{register USItype __t0, __t1, __t2; \ + __asm__ ("%@ Inlined umul_ppmm \n\ + mov %2, %5, lsr #16 \n\ + mov %0, %6, lsr #16 \n\ + bic %3, %5, %2, lsl #16 \n\ + bic %4, %6, %0, lsl #16 \n\ + mul %1, %3, %4 \n\ + mul %4, %2, %4 \n\ + mul %3, %0, %3 \n\ + mul %0, %2, %0 \n\ + adds %3, %4, %3 \n\ + addcs %0, %0, #65536 \n\ + adds %1, %1, %3, lsl #16 \n\ + adc %0, %0, %3, lsr #16" \ + : "=&r" ((USItype) (xh)), \ + "=r" ((USItype) (xl)), \ + "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \ + : "r" ((USItype) (a)), \ + "r" ((USItype) (b)));} +#define UMUL_TIME 20 +#define UDIV_TIME 100 +#endif /* __arm__ */ + +#define __umulsidi3(u, v) \ + ({DIunion __w; \ + umul_ppmm (__w.s.high, __w.s.low, u, v); \ + __w.ll; }) + +#define __udiv_qrnnd_c(q, r, n1, n0, d) \ + do { \ + USItype __d1, __d0, __q1, __q0; \ + USItype __r1, __r0, __m; \ + __d1 = __ll_highpart (d); \ + __d0 = __ll_lowpart (d); \ + \ + __r1 = (n1) % __d1; \ + __q1 = (n1) / __d1; \ + __m = (USItype) __q1 * __d0; \ + __r1 = __r1 * __ll_B | __ll_highpart (n0); \ + if (__r1 < __m) \ + { \ + __q1--, __r1 += (d); \ + if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */\ + if (__r1 < __m) \ + __q1--, __r1 += (d); \ + } \ + __r1 -= __m; \ + \ + __r0 = __r1 % __d1; \ + __q0 = __r1 / __d1; \ + __m = (USItype) __q0 * __d0; \ + __r0 = __r0 * __ll_B | __ll_lowpart (n0); \ + if (__r0 < __m) \ + { \ + __q0--, __r0 += (d); \ + if (__r0 >= (d)) \ + if (__r0 < __m) \ + __q0--, __r0 += (d); \ + } \ + __r0 -= __m; \ + \ + (q) = (USItype) __q1 * __ll_B | __q0; \ + (r) = __r0; \ + } while (0) + +#define UDIV_NEEDS_NORMALIZATION 1 +#define udiv_qrnnd __udiv_qrnnd_c + +extern const UQItype __clz_tab[]; +#define count_leading_zeros(count, x) \ + do { \ + USItype __xr = (x); \ + USItype __a; \ + \ + if (SI_TYPE_SIZE <= 32) \ + { \ + __a = __xr < ((USItype)1<<2*__BITS4) \ + ? (__xr < ((USItype)1<<__BITS4) ? 0 : __BITS4) \ + : (__xr < ((USItype)1<<3*__BITS4) ? 2*__BITS4 : 3*__BITS4); \ + } \ + else \ + { \ + for (__a = SI_TYPE_SIZE - 8; __a > 0; __a -= 8) \ + if (((__xr >> __a) & 0xff) != 0) \ + break; \ + } \ + \ + (count) = SI_TYPE_SIZE - (__clz_tab[__xr >> __a] + __a); \ + } while (0) diff --git a/arch/arm26/lib/lshrdi3.c b/arch/arm26/lib/lshrdi3.c new file mode 100644 index 00000000000..b666f1bad45 --- /dev/null +++ b/arch/arm26/lib/lshrdi3.c @@ -0,0 +1,61 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +DItype +__lshrdi3 (DItype u, word_type b) +{ + DIunion w; + word_type bm; + DIunion uu; + + if (b == 0) + return u; + + uu.ll = u; + + bm = (sizeof (SItype) * BITS_PER_UNIT) - b; + if (bm <= 0) + { + w.s.high = 0; + w.s.low = (USItype)uu.s.high >> -bm; + } + else + { + USItype carries = (USItype)uu.s.high << bm; + w.s.high = (USItype)uu.s.high >> b; + w.s.low = ((USItype)uu.s.low >> b) | carries; + } + + return w.ll; +} + diff --git a/arch/arm26/lib/memchr.S b/arch/arm26/lib/memchr.S new file mode 100644 index 00000000000..34e7c14c08a --- /dev/null +++ b/arch/arm26/lib/memchr.S @@ -0,0 +1,25 @@ +/* + * linux/arch/arm26/lib/memchr.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 +ENTRY(memchr) +1: subs r2, r2, #1 + bmi 2f + ldrb r3, [r0], #1 + teq r3, r1 + bne 1b + sub r0, r0, #1 +2: movne r0, #0 + RETINSTR(mov,pc,lr) diff --git a/arch/arm26/lib/memcpy.S b/arch/arm26/lib/memcpy.S new file mode 100644 index 00000000000..3f719e41206 --- /dev/null +++ b/arch/arm26/lib/memcpy.S @@ -0,0 +1,318 @@ +/* + * linux/arch/arm26/lib/memcpy.S + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + +#define ENTER \ + mov ip,sp ;\ + stmfd sp!,{r4-r9,fp,ip,lr,pc} ;\ + sub fp,ip,#4 + +#define EXIT \ + LOADREGS(ea, fp, {r4 - r9, fp, sp, pc}) + +#define EXITEQ \ + LOADREGS(eqea, fp, {r4 - r9, fp, sp, pc}) + +/* + * Prototype: void memcpy(void *to,const void *from,unsigned long n); + * ARM3: cant use memcopy here!!! + */ +ENTRY(memcpy) +ENTRY(memmove) + ENTER + cmp r1, r0 + bcc 19f + subs r2, r2, #4 + blt 6f + ands ip, r0, #3 + bne 7f + ands ip, r1, #3 + bne 8f + +1: subs r2, r2, #8 + blt 5f + subs r2, r2, #0x14 + blt 3f +2: ldmia r1!,{r3 - r9, ip} + stmia r0!,{r3 - r9, ip} + subs r2, r2, #32 + bge 2b + cmn r2, #16 + ldmgeia r1!, {r3 - r6} + stmgeia r0!, {r3 - r6} + subge r2, r2, #0x10 +3: adds r2, r2, #0x14 +4: ldmgeia r1!, {r3 - r5} + stmgeia r0!, {r3 - r5} + subges r2, r2, #12 + bge 4b +5: adds r2, r2, #8 + blt 6f + subs r2, r2, #4 + ldrlt r3, [r1], #4 + ldmgeia r1!, {r4, r5} + strlt r3, [r0], #4 + stmgeia r0!, {r4, r5} + subge r2, r2, #4 + +6: adds r2, r2, #4 + EXITEQ + cmp r2, #2 + ldrb r3, [r1], #1 + ldrgeb r4, [r1], #1 + ldrgtb r5, [r1], #1 + strb r3, [r0], #1 + strgeb r4, [r0], #1 + strgtb r5, [r0], #1 + EXIT + +7: rsb ip, ip, #4 + cmp ip, #2 + ldrb r3, [r1], #1 + ldrgeb r4, [r1], #1 + ldrgtb r5, [r1], #1 + strb r3, [r0], #1 + strgeb r4, [r0], #1 + strgtb r5, [r0], #1 + subs r2, r2, ip + blt 6b + ands ip, r1, #3 + beq 1b + +8: bic r1, r1, #3 + ldr r7, [r1], #4 + cmp ip, #2 + bgt 15f + beq 11f + cmp r2, #12 + blt 10f + sub r2, r2, #12 +9: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} + subs r2, r2, #16 + bge 9b + adds r2, r2, #12 + blt 100f +10: mov r3, r7, pull #8 + ldr r7, [r1], #4 + subs r2, r2, #4 + orr r3, r3, r7, push #24 + str r3, [r0], #4 + bge 10b +100: sub r1, r1, #3 + b 6b + +11: cmp r2, #12 + blt 13f /* */ + sub r2, r2, #12 +12: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} + subs r2, r2, #16 + bge 12b + adds r2, r2, #12 + blt 14f +13: mov r3, r7, pull #16 + ldr r7, [r1], #4 + subs r2, r2, #4 + orr r3, r3, r7, push #16 + str r3, [r0], #4 + bge 13b +14: sub r1, r1, #2 + b 6b + +15: cmp r2, #12 + blt 17f + sub r2, r2, #12 +16: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} + subs r2, r2, #16 + bge 16b + adds r2, r2, #12 + blt 18f +17: mov r3, r7, pull #24 + ldr r7, [r1], #4 + subs r2, r2, #4 + orr r3, r3, r7, push #8 + str r3, [r0], #4 + bge 17b +18: sub r1, r1, #1 + b 6b + + +19: add r1, r1, r2 + add r0, r0, r2 + subs r2, r2, #4 + blt 24f + ands ip, r0, #3 + bne 25f + ands ip, r1, #3 + bne 26f + +20: subs r2, r2, #8 + blt 23f + subs r2, r2, #0x14 + blt 22f +21: ldmdb r1!, {r3 - r9, ip} + stmdb r0!, {r3 - r9, ip} + subs r2, r2, #32 + bge 21b +22: cmn r2, #16 + ldmgedb r1!, {r3 - r6} + stmgedb r0!, {r3 - r6} + subge r2, r2, #16 + adds r2, r2, #20 + ldmgedb r1!, {r3 - r5} + stmgedb r0!, {r3 - r5} + subge r2, r2, #12 +23: adds r2, r2, #8 + blt 24f + subs r2, r2, #4 + ldrlt r3, [r1, #-4]! + ldmgedb r1!, {r4, r5} + strlt r3, [r0, #-4]! + stmgedb r0!, {r4, r5} + subge r2, r2, #4 + +24: adds r2, r2, #4 + EXITEQ + cmp r2, #2 + ldrb r3, [r1, #-1]! + ldrgeb r4, [r1, #-1]! + ldrgtb r5, [r1, #-1]! + strb r3, [r0, #-1]! + strgeb r4, [r0, #-1]! + strgtb r5, [r0, #-1]! + EXIT + +25: cmp ip, #2 + ldrb r3, [r1, #-1]! + ldrgeb r4, [r1, #-1]! + ldrgtb r5, [r1, #-1]! + strb r3, [r0, #-1]! + strgeb r4, [r0, #-1]! + strgtb r5, [r0, #-1]! + subs r2, r2, ip + blt 24b + ands ip, r1, #3 + beq 20b + +26: bic r1, r1, #3 + ldr r3, [r1], #0 + cmp ip, #2 + blt 34f + beq 30f + cmp r2, #12 + blt 28f + sub r2, r2, #12 +27: mov r7, r3, push #8 + ldmdb r1!, {r3, r4, r5, r6} + orr r7, r7, r6, pull #24 + mov r6, r6, push #8 + orr r6, r6, r5, pull #24 + mov r5, r5, push #8 + orr r5, r5, r4, pull #24 + mov r4, r4, push #8 + orr r4, r4, r3, pull #24 + stmdb r0!, {r4, r5, r6, r7} + subs r2, r2, #16 + bge 27b + adds r2, r2, #12 + blt 29f +28: mov ip, r3, push #8 + ldr r3, [r1, #-4]! + subs r2, r2, #4 + orr ip, ip, r3, pull #24 + str ip, [r0, #-4]! + bge 28b +29: add r1, r1, #3 + b 24b + +30: cmp r2, #12 + blt 32f + sub r2, r2, #12 +31: mov r7, r3, push #16 + ldmdb r1!, {r3, r4, r5, r6} + orr r7, r7, r6, pull #16 + mov r6, r6, push #16 + orr r6, r6, r5, pull #16 + mov r5, r5, push #16 + orr r5, r5, r4, pull #16 + mov r4, r4, push #16 + orr r4, r4, r3, pull #16 + stmdb r0!, {r4, r5, r6, r7} + subs r2, r2, #16 + bge 31b + adds r2, r2, #12 + blt 33f +32: mov ip, r3, push #16 + ldr r3, [r1, #-4]! + subs r2, r2, #4 + orr ip, ip, r3, pull #16 + str ip, [r0, #-4]! + bge 32b +33: add r1, r1, #2 + b 24b + +34: cmp r2, #12 + blt 36f + sub r2, r2, #12 +35: mov r7, r3, push #24 + ldmdb r1!, {r3, r4, r5, r6} + orr r7, r7, r6, pull #8 + mov r6, r6, push #24 + orr r6, r6, r5, pull #8 + mov r5, r5, push #24 + orr r5, r5, r4, pull #8 + mov r4, r4, push #24 + orr r4, r4, r3, pull #8 + stmdb r0!, {r4, r5, r6, r7} + subs r2, r2, #16 + bge 35b + adds r2, r2, #12 + blt 37f +36: mov ip, r3, push #24 + ldr r3, [r1, #-4]! + subs r2, r2, #4 + orr ip, ip, r3, pull #8 + str ip, [r0, #-4]! + bge 36b +37: add r1, r1, #1 + b 24b + + .align diff --git a/arch/arm26/lib/memset.S b/arch/arm26/lib/memset.S new file mode 100644 index 00000000000..aedec10b58f --- /dev/null +++ b/arch/arm26/lib/memset.S @@ -0,0 +1,80 @@ +/* + * linux/arch/arm26/lib/memset.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 + .word 0 + +1: subs r2, r2, #4 @ 1 do we have enough + blt 5f @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r1, [r0], #1 @ 1 + strleb r1, [r0], #1 @ 1 + strb r1, [r0], #1 @ 1 + add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) +/* + * The pointer is now aligned and the length is adjusted. Try doing the + * memzero again. + */ + +ENTRY(memset) + ands r3, r0, #3 @ 1 unaligned? + bne 1b @ 1 +/* + * we know that the pointer in r0 is aligned to a word boundary. + */ + orr r1, r1, r1, lsl #8 + orr r1, r1, r1, lsl #16 + mov r3, r1 + cmp r2, #16 + blt 4f +/* + * We need an extra register for this loop - save the return address and + * use the LR + */ + str lr, [sp, #-4]! + mov ip, r1 + mov lr, r1 + +2: subs r2, r2, #64 + stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. + stmgeia r0!, {r1, r3, ip, lr} + stmgeia r0!, {r1, r3, ip, lr} + stmgeia r0!, {r1, r3, ip, lr} + bgt 2b + LOADREGS(eqfd, sp!, {pc}) @ Now <64 bytes to go. +/* + * No need to correct the count; we're only testing bits from now on + */ + tst r2, #32 + stmneia r0!, {r1, r3, ip, lr} + stmneia r0!, {r1, r3, ip, lr} + tst r2, #16 + stmneia r0!, {r1, r3, ip, lr} + ldr lr, [sp], #4 + +4: tst r2, #8 + stmneia r0!, {r1, r3} + tst r2, #4 + strne r1, [r0], #4 +/* + * When we get here, we've got less than 4 bytes to zero. We + * may have an unaligned pointer as well. + */ +5: tst r2, #2 + strneb r1, [r0], #1 + strneb r1, [r0], #1 + tst r2, #1 + strneb r1, [r0], #1 + RETINSTR(mov,pc,lr) diff --git a/arch/arm26/lib/memzero.S b/arch/arm26/lib/memzero.S new file mode 100644 index 00000000000..cc5bf686006 --- /dev/null +++ b/arch/arm26/lib/memzero.S @@ -0,0 +1,80 @@ +/* + * linux/arch/arm26/lib/memzero.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + .align 5 + .word 0 +/* + * Align the pointer in r0. r3 contains the number of bytes that we are + * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we + * don't bother; we use byte stores instead. + */ +1: subs r1, r1, #4 @ 1 do we have enough + blt 5f @ 1 bytes to align with? + cmp r3, #2 @ 1 + strltb r2, [r0], #1 @ 1 + strleb r2, [r0], #1 @ 1 + strb r2, [r0], #1 @ 1 + add r1, r1, r3 @ 1 (r1 = r1 - (4 - r3)) +/* + * The pointer is now aligned and the length is adjusted. Try doing the + * memzero again. + */ + +ENTRY(__memzero) + mov r2, #0 @ 1 + ands r3, r0, #3 @ 1 unaligned? + bne 1b @ 1 +/* + * r3 = 0, and we know that the pointer in r0 is aligned to a word boundary. + */ + cmp r1, #16 @ 1 we can skip this chunk if we + blt 4f @ 1 have < 16 bytes +/* + * We need an extra register for this loop - save the return address and + * use the LR + */ + str lr, [sp, #-4]! @ 1 + mov ip, r2 @ 1 + mov lr, r2 @ 1 + +3: subs r1, r1, #64 @ 1 write 32 bytes out per loop + stmgeia r0!, {r2, r3, ip, lr} @ 4 + stmgeia r0!, {r2, r3, ip, lr} @ 4 + stmgeia r0!, {r2, r3, ip, lr} @ 4 + stmgeia r0!, {r2, r3, ip, lr} @ 4 + bgt 3b @ 1 + LOADREGS(eqfd, sp!, {pc}) @ 1/2 quick exit +/* + * No need to correct the count; we're only testing bits from now on + */ + tst r1, #32 @ 1 + stmneia r0!, {r2, r3, ip, lr} @ 4 + stmneia r0!, {r2, r3, ip, lr} @ 4 + tst r1, #16 @ 1 16 bytes or more? + stmneia r0!, {r2, r3, ip, lr} @ 4 + ldr lr, [sp], #4 @ 1 + +4: tst r1, #8 @ 1 8 bytes or more? + stmneia r0!, {r2, r3} @ 2 + tst r1, #4 @ 1 4 bytes or more? + strne r2, [r0], #4 @ 1 +/* + * When we get here, we've got less than 4 bytes to zero. We + * may have an unaligned pointer as well. + */ +5: tst r1, #2 @ 1 2 bytes or more? + strneb r2, [r0], #1 @ 1 + strneb r2, [r0], #1 @ 1 + tst r1, #1 @ 1 a byte left over + strneb r2, [r0], #1 @ 1 + RETINSTR(mov,pc,lr) @ 1 diff --git a/arch/arm26/lib/muldi3.c b/arch/arm26/lib/muldi3.c new file mode 100644 index 00000000000..44d611b1cfd --- /dev/null +++ b/arch/arm26/lib/muldi3.c @@ -0,0 +1,77 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +#define umul_ppmm(xh, xl, a, b) \ +{register USItype __t0, __t1, __t2; \ + __asm__ ("%@ Inlined umul_ppmm \n\ + mov %2, %5, lsr #16 \n\ + mov %0, %6, lsr #16 \n\ + bic %3, %5, %2, lsl #16 \n\ + bic %4, %6, %0, lsl #16 \n\ + mul %1, %3, %4 \n\ + mul %4, %2, %4 \n\ + mul %3, %0, %3 \n\ + mul %0, %2, %0 \n\ + adds %3, %4, %3 \n\ + addcs %0, %0, #65536 \n\ + adds %1, %1, %3, lsl #16 \n\ + adc %0, %0, %3, lsr #16" \ + : "=&r" ((USItype) (xh)), \ + "=r" ((USItype) (xl)), \ + "=&r" (__t0), "=&r" (__t1), "=r" (__t2) \ + : "r" ((USItype) (a)), \ + "r" ((USItype) (b)));} + + +#define __umulsidi3(u, v) \ + ({DIunion __w; \ + umul_ppmm (__w.s.high, __w.s.low, u, v); \ + __w.ll; }) + + +DItype +__muldi3 (DItype u, DItype v) +{ + DIunion w; + DIunion uu, vv; + + uu.ll = u, + vv.ll = v; + + w.ll = __umulsidi3 (uu.s.low, vv.s.low); + w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high + + (USItype) uu.s.high * (USItype) vv.s.low); + + return w.ll; +} + diff --git a/arch/arm26/lib/putuser.S b/arch/arm26/lib/putuser.S new file mode 100644 index 00000000000..87588cbe46a --- /dev/null +++ b/arch/arm26/lib/putuser.S @@ -0,0 +1,109 @@ +/* + * linux/arch/arm26/lib/putuser.S + * + * Copyright (C) 2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Idea from x86 version, (C) Copyright 1998 Linus Torvalds + * + * These functions have a non-standard call interface to make + * them more efficient, especially as they return an error + * value in addition to the "real" return value. + * + * __put_user_X + * + * Inputs: r0 contains the address + * r1, r2 contains the value + * Outputs: r0 is the error code + * lr corrupted + * + * No other registers must be altered. (see include/asm-arm/uaccess.h + * for specific ASM register usage). + * + * Note that ADDR_LIMIT is either 0 or 0xc0000000 + * Note also that it is intended that __put_user_bad is not global. + */ +#include +#include +#include + + .global __put_user_1 +__put_user_1: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #1 + cmp r0, r2 + bge __put_user_bad +1: cmp r0, #0x02000000 + strlsbt r1, [r0] + strgeb r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __put_user_2 +__put_user_2: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #2 + cmp r0, r2 + bge __put_user_bad +2: cmp r0, #0x02000000 + strlsbt r1, [r0], #1 + strgeb r1, [r0], #1 + mov r1, r1, lsr #8 +3: strlsbt r1, [r0] + strgeb r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __put_user_4 +__put_user_4: + bic r2, sp, #0x1f00 + bic r2, r2, #0x00ff + str lr, [sp, #-4]! + ldr r2, [r2, #TI_ADDR_LIMIT] + sub r2, r2, #4 + cmp r0, r2 +4: bge __put_user_bad + cmp r0, #0x02000000 + strlst r1, [r0] + strge r1, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + + .global __put_user_8 +__put_user_8: + bic ip, sp, #0x1f00 + bic ip, ip, #0x00ff + str lr, [sp, #-4]! + ldr ip, [ip, #TI_ADDR_LIMIT] + sub ip, ip, #8 + cmp r0, ip + bge __put_user_bad + cmp r0, #0x02000000 +5: strlst r1, [r0], #4 +6: strlst r2, [r0] + strge r1, [r0], #4 + strge r2, [r0] + mov r0, #0 + ldmfd sp!, {pc}^ + +__put_user_bad: + mov r0, #-EFAULT + mov pc, lr + +.section __ex_table, "a" + .long 1b, __put_user_bad + .long 2b, __put_user_bad + .long 3b, __put_user_bad + .long 4b, __put_user_bad + .long 5b, __put_user_bad + .long 6b, __put_user_bad +.previous diff --git a/arch/arm26/lib/setbit.S b/arch/arm26/lib/setbit.S new file mode 100644 index 00000000000..e180c1a1b2f --- /dev/null +++ b/arch/arm26/lib/setbit.S @@ -0,0 +1,29 @@ +/* + * linux/arch/arm26/lib/setbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +/* + * Purpose : Function to set a bit + * Prototype: int set_bit(int bit, void *addr) + */ +ENTRY(_set_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_set_bit_le) + and r2, r0, #7 + mov r3, #1 + mov r3, r3, lsl r2 + save_and_disable_irqs ip, r2 + ldrb r2, [r1, r0, lsr #3] + orr r2, r2, r3 + strb r2, [r1, r0, lsr #3] + restore_irqs ip + RETINSTR(mov,pc,lr) diff --git a/arch/arm26/lib/strchr.S b/arch/arm26/lib/strchr.S new file mode 100644 index 00000000000..ecfff21aa7c --- /dev/null +++ b/arch/arm26/lib/strchr.S @@ -0,0 +1,25 @@ +/* + * linux/arch/arm26/lib/strchr.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 +ENTRY(strchr) +1: ldrb r2, [r0], #1 + teq r2, r1 + teqne r2, #0 + bne 1b + teq r2, #0 + moveq r0, #0 + subne r0, r0, #1 + RETINSTR(mov,pc,lr) diff --git a/arch/arm26/lib/strrchr.S b/arch/arm26/lib/strrchr.S new file mode 100644 index 00000000000..db43b28e78d --- /dev/null +++ b/arch/arm26/lib/strrchr.S @@ -0,0 +1,25 @@ +/* + * linux/arch/arm26/lib/strrchr.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * ASM optimised string functions + */ +#include +#include + + .text + .align 5 +ENTRY(strrchr) + mov r3, #0 +1: ldrb r2, [r0], #1 + teq r2, r1 + subeq r3, r0, #1 + teq r2, #0 + bne 1b + mov r0, r3 + RETINSTR(mov,pc,lr) diff --git a/arch/arm26/lib/testchangebit.S b/arch/arm26/lib/testchangebit.S new file mode 100644 index 00000000000..17049a2d93a --- /dev/null +++ b/arch/arm26/lib/testchangebit.S @@ -0,0 +1,29 @@ +/* + * linux/arch/arm26/lib/testchangebit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +ENTRY(_test_and_change_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_test_and_change_bit_le) + add r1, r1, r0, lsr #3 + and r3, r0, #7 + mov r0, #1 + save_and_disable_irqs ip, r2 + ldrb r2, [r1] + tst r2, r0, lsl r3 + eor r2, r2, r0, lsl r3 + strb r2, [r1] + restore_irqs ip + moveq r0, #0 + RETINSTR(mov,pc,lr) + + diff --git a/arch/arm26/lib/testclearbit.S b/arch/arm26/lib/testclearbit.S new file mode 100644 index 00000000000..2506bd743ab --- /dev/null +++ b/arch/arm26/lib/testclearbit.S @@ -0,0 +1,29 @@ +/* + * linux/arch/arm26/lib/testclearbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +ENTRY(_test_and_clear_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_test_and_clear_bit_le) + add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #7 @ Get bit offset + mov r0, #1 + save_and_disable_irqs ip, r2 + ldrb r2, [r1] + tst r2, r0, lsl r3 + bic r2, r2, r0, lsl r3 + strb r2, [r1] + restore_irqs ip + moveq r0, #0 + RETINSTR(mov,pc,lr) + + diff --git a/arch/arm26/lib/testsetbit.S b/arch/arm26/lib/testsetbit.S new file mode 100644 index 00000000000..f827de64b22 --- /dev/null +++ b/arch/arm26/lib/testsetbit.S @@ -0,0 +1,29 @@ +/* + * linux/arch/arm26/lib/testsetbit.S + * + * Copyright (C) 1995-1996 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + .text + +ENTRY(_test_and_set_bit_be) + eor r0, r0, #0x18 @ big endian byte ordering +ENTRY(_test_and_set_bit_le) + add r1, r1, r0, lsr #3 @ Get byte offset + and r3, r0, #7 @ Get bit offset + mov r0, #1 + save_and_disable_irqs ip, r2 + ldrb r2, [r1] + tst r2, r0, lsl r3 + orr r2, r2, r0, lsl r3 + strb r2, [r1] + restore_irqs ip + moveq r0, #0 + RETINSTR(mov,pc,lr) + + diff --git a/arch/arm26/lib/uaccess-kernel.S b/arch/arm26/lib/uaccess-kernel.S new file mode 100644 index 00000000000..3950a1f6bc9 --- /dev/null +++ b/arch/arm26/lib/uaccess-kernel.S @@ -0,0 +1,173 @@ +/* + * linux/arch/arm26/lib/uaccess-kernel.S + * + * Copyright (C) 1998 Russell King + * + * Note! Some code fragments found in here have a special calling + * convention - they are not APCS compliant! + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + + .text + +//FIXME - surely this can be done in C not asm, removing the problem of keeping C and asm in sync? (this is a struct uaccess_t) + + .globl uaccess_kernel +uaccess_kernel: + .word uaccess_kernel_put_byte + .word uaccess_kernel_get_byte + .word uaccess_kernel_put_half + .word uaccess_kernel_get_half + .word uaccess_kernel_put_word + .word uaccess_kernel_get_word + .word uaccess_kernel_put_dword + .word uaccess_kernel_copy + .word uaccess_kernel_copy + .word uaccess_kernel_clear + .word uaccess_kernel_strncpy + .word uaccess_kernel_strnlen + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_byte: + stmfd sp!, {lr} + strb r0, [r1] + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_half: + stmfd sp!, {lr} + strb r0, [r1] + mov r0, r0, lsr #8 + strb r0, [r1, #1] + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_word: + stmfd sp!, {lr} + str r0, [r1] + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_kernel_put_dword: + stmfd sp!, {lr} + str r0, [r1], #4 + str r0, [r1], #0 + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_kernel_get_byte: + stmfd sp!, {lr} + ldrb r0, [r0] + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_kernel_get_half: + stmfd sp!, {lr} + ldr r0, [r0] + mov r0, r0, lsl #16 + mov r0, r0, lsr #16 + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_kernel_get_word: + stmfd sp!, {lr} + ldr r0, [r0] + ldmfd sp!, {pc}^ + + +/* Prototype: int uaccess_kernel_copy(void *to, const char *from, size_t n) + * Purpose : copy a block to kernel memory from kernel memory + * Params : to - kernel memory + * : from - kernel memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ +uaccess_kernel_copy: + stmfd sp!, {lr} + bl memcpy + mov r0, #0 + ldmfd sp!, {pc}^ + +/* Prototype: int uaccess_kernel_clear(void *addr, size_t sz) + * Purpose : clear some kernel memory + * Params : addr - kernel memory address to clear + * : sz - number of bytes to clear + * Returns : number of bytes NOT cleared + */ +uaccess_kernel_clear: + stmfd sp!, {lr} + mov r2, #0 + cmp r1, #4 + blt 2f + ands ip, r0, #3 + beq 1f + cmp ip, #1 + strb r2, [r0], #1 + strleb r2, [r0], #1 + strltb r2, [r0], #1 + rsb ip, ip, #4 + sub r1, r1, ip @ 7 6 5 4 3 2 1 +1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7 + bmi 2f + str r2, [r0], #4 + str r2, [r0], #4 + b 1b +2: adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3 + strpl r2, [r0], #4 + tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x + strneb r2, [r0], #1 + strneb r2, [r0], #1 + tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1 + strneb r2, [r0], #1 + mov r0, #0 + ldmfd sp!, {pc}^ + +/* Prototype: size_t uaccess_kernel_strncpy(char *dst, char *src, size_t len) + * Purpose : copy a string from kernel memory to kernel memory + * Params : dst - kernel memory destination + * : src - kernel memory source + * : len - maximum length of string + * Returns : number of characters copied + */ +uaccess_kernel_strncpy: + stmfd sp!, {lr} + mov ip, r2 +1: subs r2, r2, #1 + bmi 2f + ldrb r3, [r1], #1 + strb r3, [r0], #1 + teq r3, #0 + bne 1b +2: subs r0, ip, r2 + ldmfd sp!, {pc}^ + +/* Prototype: int uaccess_kernel_strlen(char *str, long n) + * Purpose : get length of a string in kernel memory + * Params : str - address of string in kernel memory + * Returns : length of string *including terminator*, + * or zero on exception, or n + 1 if too long + */ +uaccess_kernel_strnlen: + stmfd sp!, {lr} + mov r2, r0 +1: ldrb r1, [r0], #1 + teq r1, #0 + beq 2f + subs r1, r1, #1 + bne 1b + add r0, r0, #1 +2: sub r0, r0, r2 + ldmfd sp!, {pc}^ + diff --git a/arch/arm26/lib/uaccess-user.S b/arch/arm26/lib/uaccess-user.S new file mode 100644 index 00000000000..130b8f28610 --- /dev/null +++ b/arch/arm26/lib/uaccess-user.S @@ -0,0 +1,718 @@ +/* + * linux/arch/arm26/lib/uaccess-user.S + * + * Copyright (C) 1995, 1996,1997,1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Routines to block copy data to/from user memory + * These are highly optimised both for the 4k page size + * and for various alignments. + */ +#include +#include +#include +#include + + .text + +//FIXME - surely this can be done in C not asm, removing the problem of keeping C and asm in sync? (this is a struct uaccess_t) + .globl uaccess_user +uaccess_user: + .word uaccess_user_put_byte + .word uaccess_user_get_byte + .word uaccess_user_put_half + .word uaccess_user_get_half + .word uaccess_user_put_word + .word uaccess_user_get_word + .word uaccess_user_put_dword + .word uaccess_user_copy_from_user + .word uaccess_user_copy_to_user + .word uaccess_user_clear_user + .word uaccess_user_strncpy_from_user + .word uaccess_user_strnlen_user + + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_byte: + stmfd sp!, {lr} +USER( strbt r0, [r1]) + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_half: + stmfd sp!, {lr} +USER( strbt r0, [r1], #1) + mov r0, r0, lsr #8 +USER( strbt r0, [r1]) + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_word: + stmfd sp!, {lr} +USER( strt r0, [r1]) + ldmfd sp!, {pc}^ + +@ In : r0 = x, r1 = addr, r2 = error +@ Out: r2 = error +uaccess_user_put_dword: + stmfd sp!, {lr} +USER( strt r0, [r1], #4) +USER( strt r0, [r1], #0) + ldmfd sp!, {pc}^ + +9001: mov r2, #-EFAULT + ldmfd sp!, {pc}^ + + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_user_get_byte: + stmfd sp!, {lr} +USER( ldrbt r0, [r0]) + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_user_get_half: + stmfd sp!, {lr} +USER( ldrt r0, [r0]) + mov r0, r0, lsl #16 + mov r0, r0, lsr #16 + ldmfd sp!, {pc}^ + +@ In : r0 = addr, r1 = error +@ Out: r0 = x, r1 = error +uaccess_user_get_word: + stmfd sp!, {lr} +USER( ldrt r0, [r0]) + ldmfd sp!, {pc}^ + +9001: mov r1, #-EFAULT + ldmfd sp!, {pc}^ + +/* Prototype: int uaccess_user_copy_to_user(void *to, const char *from, size_t n) + * Purpose : copy a block to user memory from kernel memory + * Params : to - user memory + * : from - kernel memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ + +.c2u_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 + ldrb r3, [r1], #1 +USER( strbt r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( strgtbt r3, [r0], #1) @ May fault + sub r2, r2, ip + b .c2u_dest_aligned + +ENTRY(uaccess_user_copy_to_user) + stmfd sp!, {r2, r4 - r7, lr} + cmp r2, #4 + blt .c2u_not_enough + ands ip, r0, #3 + bne .c2u_dest_not_aligned +.c2u_dest_aligned: + + ands ip, r1, #3 + bne .c2u_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.c2u_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_0nowords + ldr r3, [r1], #4 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .c2u_0rem8lp + +.c2u_0cpy8lp: ldmia r1!, {r3 - r6} + stmia r0!, {r3 - r6} @ Shouldnt fault + ldmia r1!, {r3 - r6} + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #32 + bpl .c2u_0cpy8lp +.c2u_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} + stmgeia r0!, {r3 - r6} @ Shouldnt fault + tst ip, #8 + ldmneia r1!, {r3 - r4} + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + ldrne r3, [r1], #4 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_0fupi +.c2u_0nowords: teq ip, #0 + beq .c2u_finished +.c2u_nowords: cmp ip, #2 + ldrb r3, [r1], #1 +USER( strbt r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + +.c2u_not_enough: + movs ip, r2 + bne .c2u_nowords +.c2u_finished: mov r0, #0 + LOADREGS(fd,sp!,{r2, r4 - r7, pc}) + +.c2u_src_not_aligned: + bic r1, r1, #3 + ldr r7, [r1], #4 + cmp ip, #2 + bgt .c2u_3fupi + beq .c2u_2fupi +.c2u_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_1nowords + mov r3, r7, pull #8 + ldr r7, [r1], #4 + orr r3, r3, r7, push #24 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .c2u_1rem8lp + +.c2u_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #16 + bpl .c2u_1cpy8lp +.c2u_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #8 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #24 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_1fupi +.c2u_1nowords: mov r3, r7, lsr #byte(1) + teq ip, #0 + beq .c2u_finished + cmp ip, #2 +USER( strbt r3, [r0], #1) @ May fault + movge r3, r7, lsr #byte(2) +USER( strgebt r3, [r0], #1) @ May fault + movgt r3, r7, lsr #byte(3) +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + +.c2u_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_2nowords + mov r3, r7, pull #16 + ldr r7, [r1], #4 + orr r3, r3, r7, push #16 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .c2u_2rem8lp + +.c2u_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #16 + bpl .c2u_2cpy8lp +.c2u_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #16 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #16 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_2fupi +.c2u_2nowords: mov r3, r7, lsr #byte(2) + teq ip, #0 + beq .c2u_finished + cmp ip, #2 +USER( strbt r3, [r0], #1) @ May fault + movge r3, r7, lsr #byte(3) +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + +.c2u_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .c2u_3nowords + mov r3, r7, pull #24 + ldr r7, [r1], #4 + orr r3, r3, r7, push #8 +USER( strt r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .c2u_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .c2u_3rem8lp + +.c2u_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #16 + bpl .c2u_3cpy8lp +.c2u_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #24 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #8 + strnet r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .c2u_3fupi +.c2u_3nowords: mov r3, r7, lsr #byte(3) + teq ip, #0 + beq .c2u_finished + cmp ip, #2 +USER( strbt r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( strgebt r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( strgtbt r3, [r0], #1) @ May fault + b .c2u_finished + + .section .fixup,"ax" + .align 0 +9001: LOADREGS(fd,sp!, {r0, r4 - r7, pc}) + .previous + +/* Prototype: unsigned long uaccess_user_copy_from_user(void *to,const void *from,unsigned long n); + * Purpose : copy a block from user memory to kernel memory + * Params : to - kernel memory + * : from - user memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ +.cfu_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 +USER( ldrbt r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( ldrgebt r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + sub r2, r2, ip + b .cfu_dest_aligned + +ENTRY(uaccess_user_copy_from_user) + stmfd sp!, {r0, r2, r4 - r7, lr} + cmp r2, #4 + blt .cfu_not_enough + ands ip, r0, #3 + bne .cfu_dest_not_aligned +.cfu_dest_aligned: + ands ip, r1, #3 + bne .cfu_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.cfu_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_0nowords +USER( ldrt r3, [r1], #4) + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .cfu_0rem8lp + +.cfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault + stmia r0!, {r3 - r6} + ldmia r1!, {r3 - r6} @ Shouldnt fault + stmia r0!, {r3 - r6} + subs ip, ip, #32 + bpl .cfu_0cpy8lp +.cfu_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} @ Shouldnt fault + stmgeia r0!, {r3 - r6} + tst ip, #8 + ldmneia r1!, {r3 - r4} @ Shouldnt fault + stmneia r0!, {r3 - r4} + tst ip, #4 + ldrnet r3, [r1], #4 @ Shouldnt fault + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_0fupi +.cfu_0nowords: teq ip, #0 + beq .cfu_finished +.cfu_nowords: cmp ip, #2 +USER( ldrbt r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( ldrgebt r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .cfu_finished + +.cfu_not_enough: + movs ip, r2 + bne .cfu_nowords +.cfu_finished: mov r0, #0 + add sp, sp, #8 + LOADREGS(fd,sp!,{r4 - r7, pc}) + +.cfu_src_not_aligned: + bic r1, r1, #3 +USER( ldrt r7, [r1], #4) @ May fault + cmp ip, #2 + bgt .cfu_3fupi + beq .cfu_2fupi +.cfu_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_1nowords + mov r3, r7, pull #8 +USER( ldrt r7, [r1], #4) @ May fault + orr r3, r3, r7, push #24 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .cfu_1rem8lp + +.cfu_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .cfu_1cpy8lp +.cfu_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #8 +USER( ldrnet r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #24 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_1fupi +.cfu_1nowords: mov r3, r7, lsr #byte(1) + teq ip, #0 + beq .cfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, lsr #byte(2) + strgeb r3, [r0], #1 + movgt r3, r7, lsr #byte(3) + strgtb r3, [r0], #1 + b .cfu_finished + +.cfu_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_2nowords + mov r3, r7, pull #16 +USER( ldrt r7, [r1], #4) @ May fault + orr r3, r3, r7, push #16 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .cfu_2rem8lp + +.cfu_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .cfu_2cpy8lp +.cfu_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #16 +USER( ldrnet r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #16 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_2fupi +.cfu_2nowords: mov r3, r7, lsr #byte(2) + teq ip, #0 + beq .cfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, lsr #byte(3) + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #0) @ May fault + strgtb r3, [r0], #1 + b .cfu_finished + +.cfu_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .cfu_3nowords + mov r3, r7, pull #24 +USER( ldrt r7, [r1], #4) @ May fault + orr r3, r3, r7, push #8 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .cfu_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .cfu_3rem8lp + +.cfu_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .cfu_3cpy8lp +.cfu_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #24 +USER( ldrnet r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #8 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .cfu_3fupi +.cfu_3nowords: mov r3, r7, lsr #byte(3) + teq ip, #0 + beq .cfu_finished + cmp ip, #2 + strb r3, [r0], #1 +USER( ldrgebt r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( ldrgtbt r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .cfu_finished + + .section .fixup,"ax" + .align 0 + /* + * We took an exception. r0 contains a pointer to + * the byte not copied. + */ +9001: ldr r2, [sp], #4 @ void *to + sub r2, r0, r2 @ bytes copied + ldr r1, [sp], #4 @ unsigned long count + subs r4, r1, r2 @ bytes left to copy + movne r1, r4 + blne __memzero + mov r0, r4 + LOADREGS(fd,sp!, {r4 - r7, pc}) + .previous + +/* Prototype: int uaccess_user_clear_user(void *addr, size_t sz) + * Purpose : clear some user memory + * Params : addr - user memory address to clear + * : sz - number of bytes to clear + * Returns : number of bytes NOT cleared + */ +ENTRY(uaccess_user_clear_user) + stmfd sp!, {r1, lr} + mov r2, #0 + cmp r1, #4 + blt 2f + ands ip, r0, #3 + beq 1f + cmp ip, #2 +USER( strbt r2, [r0], #1) +USER( strlebt r2, [r0], #1) +USER( strltbt r2, [r0], #1) + rsb ip, ip, #4 + sub r1, r1, ip @ 7 6 5 4 3 2 1 +1: subs r1, r1, #8 @ -1 -2 -3 -4 -5 -6 -7 +USER( strplt r2, [r0], #4) +USER( strplt r2, [r0], #4) + bpl 1b + adds r1, r1, #4 @ 3 2 1 0 -1 -2 -3 +USER( strplt r2, [r0], #4) +2: tst r1, #2 @ 1x 1x 0x 0x 1x 1x 0x +USER( strnebt r2, [r0], #1) +USER( strnebt r2, [r0], #1) + tst r1, #1 @ x1 x0 x1 x0 x1 x0 x1 +USER( strnebt r2, [r0], #1) + mov r0, #0 + LOADREGS(fd,sp!, {r1, pc}) + + .section .fixup,"ax" + .align 0 +9001: LOADREGS(fd,sp!, {r0, pc}) + .previous + +/* + * Copy a string from user space to kernel space. + * r0 = dst, r1 = src, r2 = byte length + * returns the number of characters copied (strlen of copied string), + * -EFAULT on exception, or "len" if we fill the whole buffer + */ +ENTRY(uaccess_user_strncpy_from_user) + save_lr + mov ip, r1 +1: subs r2, r2, #1 +USER( ldrplbt r3, [r1], #1) + bmi 2f + strb r3, [r0], #1 + teq r3, #0 + bne 1b + sub r1, r1, #1 @ take NUL character out of count +2: sub r0, r1, ip + restore_pc + + .section .fixup,"ax" + .align 0 +9001: mov r3, #0 + strb r3, [r0, #0] @ null terminate + mov r0, #-EFAULT + restore_pc + .previous + +/* Prototype: unsigned long uaccess_user_strnlen_user(const char *str, long n) + * Purpose : get length of a string in user memory + * Params : str - address of string in user memory + * Returns : length of string *including terminator* + * or zero on exception, or n + 1 if too long + */ +ENTRY(uaccess_user_strnlen_user) + save_lr + mov r2, r0 +1: +USER( ldrbt r3, [r0], #1) + teq r3, #0 + beq 2f + subs r1, r1, #1 + bne 1b + add r0, r0, #1 +2: sub r0, r0, r2 + restore_pc + + .section .fixup,"ax" + .align 0 +9001: mov r0, #0 + restore_pc + .previous + diff --git a/arch/arm26/lib/ucmpdi2.c b/arch/arm26/lib/ucmpdi2.c new file mode 100644 index 00000000000..6c6ae63efa0 --- /dev/null +++ b/arch/arm26/lib/ucmpdi2.c @@ -0,0 +1,51 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" + +word_type +__ucmpdi2 (DItype a, DItype b) +{ + DIunion au, bu; + + au.ll = a, bu.ll = b; + + if ((USItype) au.s.high < (USItype) bu.s.high) + return 0; + else if ((USItype) au.s.high > (USItype) bu.s.high) + return 2; + if ((USItype) au.s.low < (USItype) bu.s.low) + return 0; + else if ((USItype) au.s.low > (USItype) bu.s.low) + return 2; + return 1; +} + diff --git a/arch/arm26/lib/udivdi3.c b/arch/arm26/lib/udivdi3.c new file mode 100644 index 00000000000..d25195f673f --- /dev/null +++ b/arch/arm26/lib/udivdi3.c @@ -0,0 +1,242 @@ +/* More subroutines needed by GCC output code on some machines. */ +/* Compile this one with gcc. */ +/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc. + +This file is part of GNU CC. + +GNU CC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2, or (at your option) +any later version. + +GNU CC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with GNU CC; see the file COPYING. If not, write to +the Free Software Foundation, 59 Temple Place - Suite 330, +Boston, MA 02111-1307, USA. */ + +/* As a special exception, if you link this library with other files, + some of which are compiled with GCC, to produce an executable, + this library does not by itself cause the resulting executable + to be covered by the GNU General Public License. + This exception does not however invalidate any other reasons why + the executable file might be covered by the GNU General Public License. + */ +/* support functions required by the kernel. based on code from gcc-2.95.3 */ +/* I Molton 29/07/01 */ + +#include "gcclib.h" +#include "longlong.h" + +static const UQItype __clz_tab[] = +{ + 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, + 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, + 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8, +}; + +UDItype +__udivmoddi4 (UDItype n, UDItype d, UDItype *rp) +{ + DIunion ww; + DIunion nn, dd; + DIunion rr; + USItype d0, d1, n0, n1, n2; + USItype q0, q1; + USItype b, bm; + + nn.ll = n; + dd.ll = d; + + d0 = dd.s.low; + d1 = dd.s.high; + n0 = nn.s.low; + n1 = nn.s.high; + + if (d1 == 0) + { + if (d0 > n1) + { + /* 0q = nn / 0D */ + + count_leading_zeros (bm, d0); + + if (bm != 0) + { + /* Normalize, i.e. make the most significant bit of the + denominator set. */ + + d0 = d0 << bm; + n1 = (n1 << bm) | (n0 >> (SI_TYPE_SIZE - bm)); + n0 = n0 << bm; + } + + udiv_qrnnd (q0, n0, n1, n0, d0); + q1 = 0; + + /* Remainder in n0 >> bm. */ + } + else + { + /* qq = NN / 0d */ + + if (d0 == 0) + d0 = 1 / d0; /* Divide intentionally by zero. */ + + count_leading_zeros (bm, d0); + + if (bm == 0) + { + /* From (n1 >= d0) /\ (the most significant bit of d0 is set), + conclude (the most significant bit of n1 is set) /\ (the + leading quotient digit q1 = 1). + + This special case is necessary, not an optimization. + (Shifts counts of SI_TYPE_SIZE are undefined.) */ + + n1 -= d0; + q1 = 1; + } + else + { + /* Normalize. */ + + b = SI_TYPE_SIZE - bm; + + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; + + udiv_qrnnd (q1, n1, n2, n1, d0); + } + + /* n1 != d0... */ + + udiv_qrnnd (q0, n0, n1, n0, d0); + + /* Remainder in n0 >> bm. */ + } + + if (rp != 0) + { + rr.s.low = n0 >> bm; + rr.s.high = 0; + *rp = rr.ll; + } + } + else + { + if (d1 > n1) + { + /* 00 = nn / DD */ + + q0 = 0; + q1 = 0; + + /* Remainder in n1n0. */ + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } + else + { + /* 0q = NN / dd */ + + count_leading_zeros (bm, d1); + if (bm == 0) + { + /* From (n1 >= d1) /\ (the most significant bit of d1 is set), + conclude (the most significant bit of n1 is set) /\ (the + quotient digit q0 = 0 or 1). + + This special case is necessary, not an optimization. */ + + /* The condition on the next line takes advantage of that + n1 >= d1 (true due to program flow). */ + if (n1 > d1 || n0 >= d0) + { + q0 = 1; + sub_ddmmss (n1, n0, n1, n0, d1, d0); + } + else + q0 = 0; + + q1 = 0; + + if (rp != 0) + { + rr.s.low = n0; + rr.s.high = n1; + *rp = rr.ll; + } + } + else + { + USItype m1, m0; + /* Normalize. */ + + b = SI_TYPE_SIZE - bm; + + d1 = (d1 << bm) | (d0 >> b); + d0 = d0 << bm; + n2 = n1 >> b; + n1 = (n1 << bm) | (n0 >> b); + n0 = n0 << bm; + + udiv_qrnnd (q0, n1, n2, n1, d1); + umul_ppmm (m1, m0, q0, d0); + + if (m1 > n1 || (m1 == n1 && m0 > n0)) + { + q0--; + sub_ddmmss (m1, m0, m1, m0, d1, d0); + } + + q1 = 0; + + /* Remainder in (n1n0 - m1m0) >> bm. */ + if (rp != 0) + { + sub_ddmmss (n1, n0, n1, n0, m1, m0); + rr.s.low = (n1 << b) | (n0 >> bm); + rr.s.high = n1 >> bm; + *rp = rr.ll; + } + } + } + } + + ww.s.low = q0; + ww.s.high = q1; + return ww.ll; +} + +UDItype +__udivdi3 (UDItype n, UDItype d) +{ + return __udivmoddi4 (n, d, (UDItype *) 0); +} + +UDItype +__umoddi3 (UDItype u, UDItype v) +{ + UDItype w; + + (void) __udivmoddi4 (u ,v, &w); + + return w; +} + -- cgit v1.2.3