aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2006-06-21 20:38:17 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-06-28 17:59:46 +0100
commit9641c7cc5a7f6d5c9dc9b43eea4e5f8c3c08c94e (patch)
tree5ff57feabe0538d58404ec4918b0d94d75f69846
parent002547b4f86c27bfac5bae344b723d250857be6b (diff)
[ARM] nommu: uaccess tweaks
MMUless systems have only one address space for all threads, so both the usual access_ok() checks, and the exception handling do not make much sense. Hence, discard the fixup and exception tables at link time, use memcpy/memset for the user copy/clearing functions, and define the permission check macros to be constants. Some of this patch was derived from the equivalent patch by Hyok S. Choi. Signed-off-by: Hyok S. Choi <hyok.choi@samsung.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/kernel/armksyms.c7
-rw-r--r--arch/arm/kernel/vmlinux.lds.S8
-rw-r--r--arch/arm/lib/Makefile13
-rw-r--r--include/asm-arm/uaccess.h139
4 files changed, 108 insertions, 59 deletions
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index f8bb7abd3e9b..da69e660574b 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
/* user mem (segment) */
+EXPORT_SYMBOL(__strnlen_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+
+#ifdef CONFIG_MMU
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
@@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
EXPORT_SYMBOL(__put_user_4);
EXPORT_SYMBOL(__put_user_8);
+#endif
/* crypto hash */
EXPORT_SYMBOL(sha_transform);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 2b254e88595c..2df9688a7028 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -80,6 +80,10 @@ SECTIONS
*(.exit.text)
*(.exit.data)
*(.exitcall.exit)
+#ifndef CONFIG_MMU
+ *(.fixup)
+ *(__ex_table)
+#endif
}
.text : { /* Real text segment */
@@ -87,7 +91,9 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+#ifdef CONFIG_MMU
*(.fixup)
+#endif
*(.gnu.warning)
*(.rodata)
*(.rodata.*)
@@ -142,7 +148,9 @@ SECTIONS
*/
. = ALIGN(32);
__start___ex_table = .;
+#ifdef CONFIG_MMU
*(__ex_table)
+#endif
__stop___ex_table = .;
/*
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 7b726b627ea5..30351cd4560d 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -6,28 +6,31 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
- copy_page.o delay.o findbit.o memchr.o memcpy.o \
+ delay.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
- getuser.o putuser.o clear_user.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o lib1funcs.o div64.o sha1.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o
+mmu-y := clear_user.o copy_page.o getuser.o putuser.o
+
# the code in uaccess.S is not preemption safe and
# probably faster on ARMv3 only
ifeq ($(CONFIG_PREEMPT),y)
- lib-y += copy_from_user.o copy_to_user.o
+ mmu-y += copy_from_user.o copy_to_user.o
else
ifneq ($(CONFIG_CPU_32v3),y)
- lib-y += copy_from_user.o copy_to_user.o
+ mmu-y += copy_from_user.o copy_to_user.o
else
- lib-y += uaccess.o
+ mmu-y += uaccess.o
endif
endif
+lib-$(CONFIG_MMU) += $(mmu-y)
+
ifeq ($(CONFIG_CPU_32v3),y)
lib-y += io-readsw-armv3.o io-writesw-armv3.o
else
diff --git a/include/asm-arm/uaccess.h b/include/asm-arm/uaccess.h
index f909dc75301a..87aba57a66c4 100644
--- a/include/asm-arm/uaccess.h
+++ b/include/asm-arm/uaccess.h
@@ -41,15 +41,24 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
/*
+ * These two are intentionally not defined anywhere - if the kernel
+ * code generates any references to them, that's a bug.
+ */
+extern int __get_user_bad(void);
+extern int __put_user_bad(void);
+
+/*
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
-#define USER_DS TASK_SIZE
-
#define get_ds() (KERNEL_DS)
+
+#ifdef CONFIG_MMU
+
+#define USER_DS TASK_SIZE
#define get_fs() (current_thread_info()->addr_limit)
-static inline void set_fs (mm_segment_t fs)
+static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
@@ -75,8 +84,6 @@ static inline void set_fs (mm_segment_t fs)
: "cc"); \
flag; })
-#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
-
/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
@@ -87,20 +94,10 @@ static inline void set_fs (mm_segment_t fs)
* fixup code, but there are a few places where it intrudes on the
* main code path. When we only write to user space, there is no
* problem.
- *
- * The "__xxx" versions of the user access functions do not verify the
- * address space - it must have been done previously with a separate
- * "access_ok()" call.
- *
- * The "xxx_error" versions set the third argument to EFAULT if an
- * error occurs, and leave it unchanged on success. Note that these
- * versions are void (ie, don't return a value as such).
*/
-
extern int __get_user_1(void *);
extern int __get_user_2(void *);
extern int __get_user_4(void *);
-extern int __get_user_bad(void);
#define __get_user_x(__r2,__p,__e,__s,__i...) \
__asm__ __volatile__ ( \
@@ -131,6 +128,74 @@ extern int __get_user_bad(void);
__e; \
})
+extern int __put_user_1(void *, unsigned int);
+extern int __put_user_2(void *, unsigned int);
+extern int __put_user_4(void *, unsigned int);
+extern int __put_user_8(void *, unsigned long long);
+
+#define __put_user_x(__r2,__p,__e,__s) \
+ __asm__ __volatile__ ( \
+ __asmeq("%0", "r0") __asmeq("%2", "r2") \
+ "bl __put_user_" #__s \
+ : "=&r" (__e) \
+ : "0" (__p), "r" (__r2) \
+ : "ip", "lr", "cc")
+
+#define put_user(x,p) \
+ ({ \
+ const register typeof(*(p)) __r2 asm("r2") = (x); \
+ const register typeof(*(p)) __user *__p asm("r0") = (p);\
+ register int __e asm("r0"); \
+ switch (sizeof(*(__p))) { \
+ case 1: \
+ __put_user_x(__r2, __p, __e, 1); \
+ break; \
+ case 2: \
+ __put_user_x(__r2, __p, __e, 2); \
+ break; \
+ case 4: \
+ __put_user_x(__r2, __p, __e, 4); \
+ break; \
+ case 8: \
+ __put_user_x(__r2, __p, __e, 8); \
+ break; \
+ default: __e = __put_user_bad(); break; \
+ } \
+ __e; \
+ })
+
+#else /* CONFIG_MMU */
+
+/*
+ * uClinux has only one addr space, so has simplified address limits.
+ */
+#define USER_DS KERNEL_DS
+
+#define segment_eq(a,b) (1)
+#define __addr_ok(addr) (1)
+#define __range_ok(addr,size) (0)
+#define get_fs() (KERNEL_DS)
+
+static inline void set_fs(mm_segment_t fs)
+{
+}
+
+#define get_user(x,p) __get_user(x,p)
+#define put_user(x,p) __put_user(x,p)
+
+#endif /* CONFIG_MMU */
+
+#define access_ok(type,addr,size) (__range_ok(addr,size) == 0)
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the
+ * address space - it must have been done previously with a separate
+ * "access_ok()" call.
+ *
+ * The "xxx_error" versions set the third argument to EFAULT if an
+ * error occurs, and leave it unchanged on success. Note that these
+ * versions are void (ie, don't return a value as such).
+ */
#define __get_user(x,ptr) \
({ \
long __gu_err = 0; \
@@ -212,43 +277,6 @@ do { \
: "r" (addr), "i" (-EFAULT) \
: "cc")
-extern int __put_user_1(void *, unsigned int);
-extern int __put_user_2(void *, unsigned int);
-extern int __put_user_4(void *, unsigned int);
-extern int __put_user_8(void *, unsigned long long);
-extern int __put_user_bad(void);
-
-#define __put_user_x(__r2,__p,__e,__s) \
- __asm__ __volatile__ ( \
- __asmeq("%0", "r0") __asmeq("%2", "r2") \
- "bl __put_user_" #__s \
- : "=&r" (__e) \
- : "0" (__p), "r" (__r2) \
- : "ip", "lr", "cc")
-
-#define put_user(x,p) \
- ({ \
- const register typeof(*(p)) __r2 asm("r2") = (x); \
- const register typeof(*(p)) __user *__p asm("r0") = (p);\
- register int __e asm("r0"); \
- switch (sizeof(*(__p))) { \
- case 1: \
- __put_user_x(__r2, __p, __e, 1); \
- break; \
- case 2: \
- __put_user_x(__r2, __p, __e, 2); \
- break; \
- case 4: \
- __put_user_x(__r2, __p, __e, 4); \
- break; \
- case 8: \
- __put_user_x(__r2, __p, __e, 8); \
- break; \
- default: __e = __put_user_bad(); break; \
- } \
- __e; \
- })
-
#define __put_user(x,ptr) \
({ \
long __pu_err = 0; \
@@ -354,9 +382,16 @@ do { \
: "cc")
+#ifdef CONFIG_MMU
extern unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n);
extern unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n);
extern unsigned long __clear_user(void __user *addr, unsigned long n);
+#else
+#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
+#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
+#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0)
+#endif
+
extern unsigned long __strncpy_from_user(char *to, const char __user *from, unsigned long count);
extern unsigned long __strnlen_user(const char __user *s, long n);