summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Shmidt <dimitrysh@google.com>2017-01-09 10:12:53 -0800
committerDmitry Shmidt <dimitrysh@google.com>2017-01-09 10:12:53 -0800
commitcf0533acdaa9f10777759a36ce3e387019dda9a3 (patch)
tree3c4fcaf56ac379c2a480645a928f7492bb862539
parent712517177d3f0df8ff8e402241d0f5a69208a95b (diff)
parentd5ed6f6f5f92d98f1f4329ff1c5a19fbb0f25550 (diff)
Merge remote-tracking branch 'common/android-4.4' into android-4.4.y
-rw-r--r--android/configs/android-recommended.cfg1
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/alternative.h70
-rw-r--r--arch/arm64/include/asm/assembler.h17
-rw-r--r--arch/arm64/include/asm/barrier.h3
-rw-r--r--arch/arm64/include/asm/futex.h3
-rw-r--r--arch/arm64/include/asm/mmu_context.h6
-rw-r--r--arch/arm64/include/asm/ptrace.h2
-rw-r--r--arch/arm64/include/asm/thread_info.h2
-rw-r--r--arch/arm64/include/asm/uaccess.h108
-rw-r--r--arch/arm64/kernel/armv8_deprecated.c1
-rw-r--r--arch/arm64/kernel/asm-offsets.c6
-rw-r--r--arch/arm64/kernel/entry.S18
-rw-r--r--arch/arm64/lib/clear_user.S3
-rw-r--r--arch/arm64/lib/copy_from_user.S3
-rw-r--r--arch/arm64/lib/copy_in_user.S3
-rw-r--r--arch/arm64/lib/copy_to_user.S3
-rw-r--r--arch/arm64/mm/cache.S6
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/xen/hypercall.S8
-rw-r--r--init/Kconfig1
-rw-r--r--net/socket.c2
22 files changed, 155 insertions, 121 deletions
diff --git a/android/configs/android-recommended.cfg b/android/configs/android-recommended.cfg
index 3fd0b13488a1..70aaae17ad29 100644
--- a/android/configs/android-recommended.cfg
+++ b/android/configs/android-recommended.cfg
@@ -8,6 +8,7 @@
# CONFIG_VT is not set
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ARM_KERNMEM_PERMS=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 47058d0871d9..77bea9751a31 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -716,7 +716,7 @@ config SETEND_EMULATION
endif
config ARM64_SW_TTBR0_PAN
- bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
+ bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
help
Enabling this option prevents the kernel from accessing
user-space memory directly by pointing TTBR0_EL1 to a reserved
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index 8746ff6abd77..55101bd86b98 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -2,6 +2,7 @@
#define __ASM_ALTERNATIVE_H
#include <asm/cpufeature.h>
+#include <asm/insn.h>
#ifndef __ASSEMBLY__
@@ -90,34 +91,55 @@ void apply_alternatives(void *start, size_t length);
.endm
/*
- * Begin an alternative code sequence.
+ * Alternative sequences
+ *
+ * The code for the case where the capability is not present will be
+ * assembled and linked as normal. There are no restrictions on this
+ * code.
+ *
+ * The code for the case where the capability is present will be
+ * assembled into a special section to be used for dynamic patching.
+ * Code for that case must:
+ *
+ * 1. Be exactly the same length (in bytes) as the default code
+ * sequence.
*
- * The code that follows this macro will be assembled and linked as
- * normal. There are no restrictions on this code.
+ * 2. Not contain a branch target that is used outside of the
+ * alternative sequence it is defined in (branches into an
+ * alternative sequence are not fixed up).
+ */
+
+/*
+ * Begin an alternative code sequence.
*/
.macro alternative_if_not cap
+ .set .Lasm_alt_mode, 0
.pushsection .altinstructions, "a"
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
.popsection
661:
.endm
+.macro alternative_if cap
+ .set .Lasm_alt_mode, 1
+ .pushsection .altinstructions, "a"
+ altinstruction_entry 663f, 661f, \cap, 664f-663f, 662f-661f
+ .popsection
+ .pushsection .altinstr_replacement, "ax"
+ .align 2 /* So GAS knows label 661 is suitably aligned */
+661:
+.endm
+
/*
- * Provide the alternative code sequence.
- *
- * The code that follows this macro is assembled into a special
- * section to be used for dynamic patching. Code that follows this
- * macro must:
- *
- * 1. Be exactly the same length (in bytes) as the default code
- * sequence.
- *
- * 2. Not contain a branch target that is used outside of the
- * alternative sequence it is defined in (branches into an
- * alternative sequence are not fixed up).
+ * Provide the other half of the alternative code sequence.
*/
.macro alternative_else
-662: .pushsection .altinstr_replacement, "ax"
+662:
+ .if .Lasm_alt_mode==0
+ .pushsection .altinstr_replacement, "ax"
+ .else
+ .popsection
+ .endif
663:
.endm
@@ -125,11 +147,25 @@ void apply_alternatives(void *start, size_t length);
* Complete an alternative code sequence.
*/
.macro alternative_endif
-664: .popsection
+664:
+ .if .Lasm_alt_mode==0
+ .popsection
+ .endif
.org . - (664b-663b) + (662b-661b)
.org . - (662b-661b) + (664b-663b)
.endm
+/*
+ * Provides a trivial alternative or default sequence consisting solely
+ * of NOPs. The number of NOPs is chosen automatically to match the
+ * previous case.
+ */
+.macro alternative_else_nop_endif
+alternative_else
+ nops (662b-661b) / AARCH64_INSN_SIZE
+alternative_endif
+.endm
+
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index aeb4554b3af3..d8855ca6068a 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -108,6 +108,15 @@
.endm
/*
+ * NOP sequence
+ */
+ .macro nops, num
+ .rept \num
+ nop
+ .endr
+ .endm
+
+/*
* Emit an entry into the exception table
*/
.macro _asm_extable, from, to
@@ -383,15 +392,11 @@ alternative_endif
*/
.macro post_ttbr0_update_workaround
#ifdef CONFIG_CAVIUM_ERRATUM_27456
-alternative_if_not ARM64_WORKAROUND_CAVIUM_27456
- nop
- nop
- nop
-alternative_else
+alternative_if ARM64_WORKAROUND_CAVIUM_27456
ic iallu
dsb nsh
isb
-alternative_endif
+alternative_else_nop_endif
#endif
.endm
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 9622eb48f894..c5dbc5cb8f10 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -20,6 +20,9 @@
#ifndef __ASSEMBLY__
+#define __nops(n) ".rept " #n "\nnop\n.endr\n"
+#define nops(n) asm volatile(__nops(n))
+
#define sev() asm volatile("sev" : : : "memory")
#define wfe() asm volatile("wfe" : : : "memory")
#define wfi() asm volatile("wfi" : : : "memory")
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 71dfa3b42313..85c4a8981d47 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -21,10 +21,7 @@
#include <linux/futex.h>
#include <linux/uaccess.h>
-#include <asm/alternative.h>
-#include <asm/cpufeature.h>
#include <asm/errno.h>
-#include <asm/sysreg.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
do { \
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 4a32fd5f101d..e53d30c6f779 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -218,9 +218,11 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
* Update the saved TTBR0_EL1 of the scheduled-in task as the previous
* value may have not been initialised yet (activate_mm caller) or the
* ASID has changed since the last run (following the context switch
- * of another thread of the same process).
+ * of another thread of the same process). Avoid setting the reserved
+ * TTBR0_EL1 to swapper_pg_dir (init_mm; e.g. via idle_task_exit).
*/
- update_saved_ttbr0(tsk, next);
+ if (next != &init_mm)
+ update_saved_ttbr0(tsk, next);
}
#define deactivate_mm(tsk,mm) do { } while (0)
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index b7b4ff488951..7f94755089e2 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -21,8 +21,6 @@
#include <uapi/asm/ptrace.h>
-#define _PSR_PAN_BIT 22
-
/* Current Exception Level values, as contained in CurrentEL */
#define CurrentEL_EL1 (1 << 2)
#define CurrentEL_EL2 (2 << 2)
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index b3325a9cb90f..794d22603f04 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -47,10 +47,10 @@ typedef unsigned long mm_segment_t;
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
+ struct task_struct *task; /* main task structure */
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
- struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
};
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index c37c064d7cdd..efafdf39cb3b 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,6 +18,10 @@
#ifndef __ASM_UACCESS_H
#define __ASM_UACCESS_H
+#include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
+#include <asm/sysreg.h>
+
#ifndef __ASSEMBLY__
/*
@@ -26,11 +30,8 @@
#include <linux/string.h>
#include <linux/thread_info.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
-#include <asm/kernel-pgtable.h>
#include <asm/ptrace.h>
-#include <asm/sysreg.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
@@ -130,7 +131,7 @@ static inline void set_fs(mm_segment_t fs)
* User access enabling/disabling.
*/
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-static inline void uaccess_ttbr0_disable(void)
+static inline void __uaccess_ttbr0_disable(void)
{
unsigned long ttbr;
@@ -140,7 +141,7 @@ static inline void uaccess_ttbr0_disable(void)
isb();
}
-static inline void uaccess_ttbr0_enable(void)
+static inline void __uaccess_ttbr0_enable(void)
{
unsigned long flags;
@@ -154,30 +155,44 @@ static inline void uaccess_ttbr0_enable(void)
isb();
local_irq_restore(flags);
}
+
+static inline bool uaccess_ttbr0_disable(void)
+{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_disable();
+ return true;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+ if (!system_uses_ttbr0_pan())
+ return false;
+ __uaccess_ttbr0_enable();
+ return true;
+}
#else
-static inline void uaccess_ttbr0_disable(void)
+static inline bool uaccess_ttbr0_disable(void)
{
+ return false;
}
-static inline void uaccess_ttbr0_enable(void)
+static inline bool uaccess_ttbr0_enable(void)
{
+ return false;
}
#endif
#define __uaccess_disable(alt) \
do { \
- if (system_uses_ttbr0_pan()) \
- uaccess_ttbr0_disable(); \
- else \
+ if (!uaccess_ttbr0_disable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
#define __uaccess_enable(alt) \
do { \
- if (system_uses_ttbr0_pan()) \
- uaccess_ttbr0_enable(); \
- else \
+ if (!uaccess_ttbr0_enable()) \
asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
CONFIG_ARM64_PAN)); \
} while (0)
@@ -407,69 +422,62 @@ extern __must_check long strnlen_user(const char __user *str, long n);
#else /* __ASSEMBLY__ */
-#include <asm/alternative.h>
#include <asm/assembler.h>
-#include <asm/kernel-pgtable.h>
/*
* User access enabling/disabling macros.
*/
- .macro uaccess_ttbr0_disable, tmp1
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ .macro __uaccess_ttbr0_disable, tmp1
mrs \tmp1, ttbr1_el1 // swapper_pg_dir
add \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
msr ttbr0_el1, \tmp1 // set reserved TTBR0_EL1
isb
.endm
- .macro uaccess_ttbr0_enable, tmp1
+ .macro __uaccess_ttbr0_enable, tmp1
get_thread_info \tmp1
- ldr \tmp1, [\tmp1, #TI_TTBR0] // load saved TTBR0_EL1
+ ldr \tmp1, [\tmp1, #TSK_TI_TTBR0] // load saved TTBR0_EL1
msr ttbr0_el1, \tmp1 // set the non-PAN TTBR0_EL1
isb
.endm
+ .macro uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+ __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+ save_and_disable_irq \tmp2 // avoid preemption
+ __uaccess_ttbr0_enable \tmp1
+ restore_irq \tmp2
+alternative_else_nop_endif
+ .endm
+#else
+ .macro uaccess_ttbr0_disable, tmp1
+ .endm
+
+ .macro uaccess_ttbr0_enable, tmp1, tmp2
+ .endm
+#endif
+
/*
* These macros are no-ops when UAO is present.
*/
.macro uaccess_disable_not_uao, tmp1
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-alternative_if_not ARM64_HAS_PAN
uaccess_ttbr0_disable \tmp1
-alternative_else
- nop
- nop
- nop
- nop
-alternative_endif
-#endif
-alternative_if_not ARM64_ALT_PAN_NOT_UAO
- nop
-alternative_else
+alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(1)
-alternative_endif
+alternative_else_nop_endif
.endm
.macro uaccess_enable_not_uao, tmp1, tmp2
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
-alternative_if_not ARM64_HAS_PAN
- save_and_disable_irq \tmp2 // avoid preemption
- uaccess_ttbr0_enable \tmp1
- restore_irq \tmp2
-alternative_else
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-alternative_endif
-#endif
-alternative_if_not ARM64_ALT_PAN_NOT_UAO
- nop
-alternative_else
+ uaccess_ttbr0_enable \tmp1, \tmp2
+alternative_if ARM64_ALT_PAN_NOT_UAO
SET_PSTATE_PAN(0)
-alternative_endif
+alternative_else_nop_endif
.endm
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 562f7ddb158b..74dc6c1d97ee 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -14,7 +14,6 @@
#include <linux/slab.h>
#include <linux/sysctl.h>
-#include <asm/alternative.h>
#include <asm/cpufeature.h>
#include <asm/insn.h>
#include <asm/opcodes.h>
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index d0ec987dba5b..40ef661bd3a5 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -36,11 +36,11 @@ int main(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
- DEFINE(TI_TTBR0, offsetof(struct thread_info, ttbr0));
-#endif
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
+#endif
BLANK();
DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context));
BLANK();
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index eb185c93dfc6..d6f10ab36c15 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,11 +121,9 @@
* feature as all TTBR0_EL1 accesses are disabled, not just those to
* user mappings.
*/
-alternative_if_not ARM64_HAS_PAN
- nop
-alternative_else
+alternative_if ARM64_HAS_PAN
b 1f // skip TTBR0 PAN
-alternative_endif
+alternative_else_nop_endif
.if \el != 0
mrs x21, ttbr0_el1
@@ -135,7 +133,7 @@ alternative_endif
and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR
.endif
- uaccess_ttbr0_disable x21
+ __uaccess_ttbr0_disable x21
1:
#endif
@@ -184,17 +182,15 @@ alternative_endif
* Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
* PAN bit checking.
*/
-alternative_if_not ARM64_HAS_PAN
- nop
-alternative_else
+alternative_if ARM64_HAS_PAN
b 2f // skip TTBR0 PAN
-alternative_endif
+alternative_else_nop_endif
.if \el != 0
- tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled
+ tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
.endif
- uaccess_ttbr0_enable x0
+ __uaccess_ttbr0_enable x0
.if \el == 0
/*
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 08b5f18ba604..d7150e30438a 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,9 +17,6 @@
*/
#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
.text
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 6505ec81f1da..90154f3f7f2a 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,10 +16,7 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index 9b04ff3ab610..718b1c4e2f85 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,10 +18,7 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 8077e4f34d56..e99e31c9acac 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,10 +16,7 @@
#include <linux/linkage.h>
-#include <asm/assembler.h>
#include <asm/cache.h>
-#include <asm/cpufeature.h>
-#include <asm/sysreg.h>
#include <asm/uaccess.h>
/*
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 07d7352d7c38..3be2cda5dbda 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -23,6 +23,7 @@
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/alternative.h>
+#include <asm/uaccess.h>
/*
* flush_icache_range(start,end)
@@ -48,6 +49,7 @@ ENTRY(flush_icache_range)
* - end - virtual end address of region
*/
ENTRY(__flush_cache_user_range)
+ uaccess_ttbr0_enable x2, x3
dcache_line_size x2, x3
sub x3, x2, #1
bic x4, x0, x3
@@ -69,10 +71,12 @@ USER(9f, ic ivau, x4 ) // invalidate I line PoU
dsb ish
isb
mov x0, #0
+1:
+ uaccess_ttbr0_disable x1
ret
9:
mov x0, #-EFAULT
- ret
+ b 1b
ENDPROC(flush_icache_range)
ENDPROC(__flush_cache_user_range)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5dcd0f8bbec0..2c0a80ca3536 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -486,10 +486,10 @@ static const struct fault_info {
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
- { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
- { do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
+ { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 6d6e4af1a4bf..b96db5dafec4 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -90,7 +90,6 @@ ENTRY(privcmd_call)
mov x2, x3
mov x3, x4
mov x4, x5
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Privcmd calls are issued by the userspace. The kernel needs to
* enable access to TTBR0_EL1 as the hypervisor would issue stage 1
@@ -99,15 +98,12 @@ ENTRY(privcmd_call)
* need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
* is enabled (it implies that hardware UAO and PAN disabled).
*/
- uaccess_enable_not_uao x6, x7
-#endif
+ uaccess_ttbr0_enable x6, x7
hvc XEN_IMM
-#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
* Disable userspace access from kernel once the hyp call completed.
*/
- uaccess_disable_not_uao x6
-#endif
+ uaccess_ttbr0_disable x6
ret
ENDPROC(privcmd_call);
diff --git a/init/Kconfig b/init/Kconfig
index acb6645ffda5..445af1262134 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -395,6 +395,7 @@ endchoice
config SCHED_WALT
bool "Support window based load tracking"
depends on SMP
+ depends on FAIR_GROUP_SCHED
help
This feature will allow the scheduler to maintain a tunable window
based set of metrics for tasks and runqueues. These metrics can be
diff --git a/net/socket.c b/net/socket.c
index 66d984ac2991..1f69e48b1501 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -524,7 +524,7 @@ int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
int err = simple_setattr(dentry, iattr);
- if (!err) {
+ if (!err && (iattr->ia_valid & ATTR_UID)) {
struct socket *sock = SOCKET_I(d_inode(dentry));
sock->sk->sk_uid = iattr->ia_uid;