aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2018-02-05 15:34:18 +0000
committerAlex Shi <alex.shi@linaro.org>2018-02-26 15:22:15 +0800
commit8508fac0ac76446b0b29c400121a14aa3a6ac5d6 (patch)
tree7c69ebe0e18425d1c5e045cfa70c79196c6fd231
parent1197d391b9b26ab55d69dffe10c6326389af3a93 (diff)
arm64: Make USER_DS an inclusive limit
commit 51369e398d0d upstream. Currently, USER_DS represents an exclusive limit while KERNEL_DS is inclusive. In order to do some clever trickery for speculation-safe masking, we need them both to behave equivalently - there aren't enough bits to make KERNEL_DS exclusive, so we have precisely one option. This also happens to correct a longstanding false negative for a range ending on the very top byte of kernel memory. Mark Rutland points out that we've actually got the semantics of addresses vs. segments muddled up in most of the places we need to amend, so shuffle the {USER,KERNEL}_DS definitions around such that we can correct those properly instead of just pasting "-1"s everywhere. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> (cherry picked from commit 83b20dff71ea949431cf57c6aebaaf7ebd5c1991) Signed-off-by: Alex Shi <alex.shi@linaro.org> Conflicts: force replace __range_ok and add asm/processor.h in arch/arm64/include/asm/uaccess.h using old macro TI_ADDR_LIMIT instead of TSK_TI_ADDR_LIMIT in arch/arm64/kernel/entry.S manual change USER_DS to TASK_SIZE in arch/arm64/mm/fault.c
-rw-r--r--arch/arm64/include/asm/processor.h3
-rw-r--r--arch/arm64/include/asm/uaccess.h46
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/mm/fault.c2
4 files changed, 32 insertions, 21 deletions
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 4258f4d594b5..5917147af0c4 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -21,6 +21,9 @@
#define TASK_SIZE_64 (UL(1) << VA_BITS)
+#define KERNEL_DS UL(-1)
+#define USER_DS (TASK_SIZE_64 - 1)
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 09c9b5983050..7b1eb49f4772 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -36,6 +36,7 @@
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
+#include <asm/processor.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -62,10 +63,7 @@ struct exception_table_entry
extern int fixup_exception(struct pt_regs *regs);
-#define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS)
-
-#define USER_DS TASK_SIZE_64
#define get_fs() (current_thread_info()->addr_limit)
static inline void set_fs(mm_segment_t fs)
@@ -90,22 +88,32 @@ static inline void set_fs(mm_segment_t fs)
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size <= current->addr_limit
- *
- * This needs 65-bit arithmetic.
+ * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
*/
-#define __range_ok(addr, size) \
-({ \
- unsigned long __addr = (unsigned long __force)(addr); \
- unsigned long flag, roksum; \
- __chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
- : "=&r" (flag), "=&r" (roksum) \
- : "1" (__addr), "Ir" (size), \
- "r" (current_thread_info()->addr_limit) \
- : "cc"); \
- flag; \
-})
+static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
+{
+ unsigned long limit = current_thread_info()->addr_limit;
+
+ __chk_user_ptr(addr);
+ asm volatile(
+ // A + B <= C + 1 for all A,B,C, in four easy steps:
+ // 1: X = A + B; X' = X % 2^64
+ " adds %0, %0, %2\n"
+ // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
+ " csel %1, xzr, %1, hi\n"
+ // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
+ // to compensate for the carry flag being set in step 4. For
+ // X > 2^64, X' merely has to remain nonzero, which it does.
+ " csinv %0, %0, xzr, cc\n"
+ // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
+ // comes from the carry in being clear. Otherwise, we are
+ // testing X' - C == 0, subject to the previous adjustments.
+ " sbcs xzr, %0, %1\n"
+ " cset %0, ls\n"
+ : "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
+
+ return addr;
+}
/*
* When dealing with data aborts, watchpoints, or instruction traps we may end
@@ -114,7 +122,7 @@ static inline void set_fs(mm_segment_t fs)
*/
#define untagged_addr(addr) sign_extend64(addr, 55)
-#define access_ok(type, addr, size) __range_ok(addr, size)
+#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
#define user_addr_max get_fs
#define _ASM_EXTABLE(from, to) \
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 478f0feb200f..6915697b4eb0 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -103,7 +103,7 @@
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
ldr x20, [tsk, #TI_ADDR_LIMIT]
str x20, [sp, #S_ORIG_ADDR_LIMIT]
- mov x20, #TASK_SIZE_64
+ mov x20, #USER_DS
str x20, [tsk, #TI_ADDR_LIMIT]
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
.endif /* \el == 0 */
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 403fe9e57135..4df70c9fd762 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -332,7 +332,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (is_permission_fault(esr) && (addr < USER_DS)) {
+ if (is_permission_fault(esr) && (addr < TASK_SIZE)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);