aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h8
-rw-r--r--arch/arm64/kernel/debug-monitors.c21
-rw-r--r--arch/arm64/kernel/head.S5
-rw-r--r--arch/arm64/kernel/perf_event.c3
-rw-r--r--arch/arm64/kernel/ptrace.c6
-rw-r--r--arch/arm64/kernel/setup.c4
-rw-r--r--arch/arm64/kernel/suspend.c10
-rw-r--r--arch/arm64/kvm/guest.c2
-rw-r--r--arch/arm64/kvm/inject_fault.c2
-rw-r--r--arch/arm64/mm/mmu.c3
-rw-r--r--arch/arm64/mm/pageattr.c3
-rw-r--r--arch/arm64/mm/proc-macros.S12
-rw-r--r--arch/arm64/mm/proc.S5
-rw-r--r--arch/arm64/net/bpf_jit.h3
-rw-r--r--arch/arm64/net/bpf_jit_comp.c54
15 files changed, 103 insertions, 38 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f05b1fe..3ca894ecf699 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
}
+/*
+ * vcpu_reg should always be passed a register number coming from a
+ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
+ * with banked registers.
+ */
static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
{
- if (vcpu_mode_is_32bit(vcpu))
- return vcpu_reg32(vcpu, reg_num);
-
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 70654d843d9b..0d1d675f2cce 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -184,20 +184,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
/* EL1 Single Step Handler hooks */
static LIST_HEAD(step_hook);
-static DEFINE_RWLOCK(step_hook_lock);
+static DEFINE_SPINLOCK(step_hook_lock);
void register_step_hook(struct step_hook *hook)
{
- write_lock(&step_hook_lock);
- list_add(&hook->node, &step_hook);
- write_unlock(&step_hook_lock);
+ spin_lock(&step_hook_lock);
+ list_add_rcu(&hook->node, &step_hook);
+ spin_unlock(&step_hook_lock);
}
void unregister_step_hook(struct step_hook *hook)
{
- write_lock(&step_hook_lock);
- list_del(&hook->node);
- write_unlock(&step_hook_lock);
+ spin_lock(&step_hook_lock);
+ list_del_rcu(&hook->node);
+ spin_unlock(&step_hook_lock);
+ synchronize_rcu();
}
/*
@@ -211,15 +212,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
struct step_hook *hook;
int retval = DBG_HOOK_ERROR;
- read_lock(&step_hook_lock);
+ rcu_read_lock();
- list_for_each_entry(hook, &step_hook, node) {
+ list_for_each_entry_rcu(hook, &step_hook, node) {
retval = hook->fn(regs, esr);
if (retval == DBG_HOOK_HANDLED)
break;
}
- read_unlock(&step_hook_lock);
+ rcu_read_unlock();
return retval;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 36aa31ff2c06..cc7435c9676e 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -566,9 +566,14 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
#endif
/* EL2 debug */
+ mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
+ sbfx x0, x0, #8, #4
+ cmp x0, #1
+ b.lt 4f // Skip if no PMU present
mrs x0, pmcr_el0 // Disable debug access traps
ubfx x0, x0, #11, #5 // to EL2 and allow access to
msr mdcr_el2, x0 // all PMU counters from EL1
+4:
/* Stage-2 translation */
msr vttbr_el2, xzr
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index fd26e57e769a..c4cb2596ede6 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1242,9 +1242,6 @@ static void armv8pmu_reset(void *info)
/* Initialize & Reset PMNC: C and P bits. */
armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C);
-
- /* Disable access from userspace. */
- asm volatile("msr pmuserenr_el0, %0" :: "r" (0));
}
static int armv8_pmuv3_map_event(struct perf_event *event)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index d882b833dbdb..608ac6aa497b 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -58,6 +58,12 @@
*/
void ptrace_disable(struct task_struct *child)
{
+ /*
+ * This would be better off in core code, but PTRACE_DETACH has
+ * grown its fair share of arch-specific worts and changing it
+ * is likely to cause regressions on obscure architectures.
+ */
+ user_disable_single_step(child);
}
#ifdef CONFIG_HAVE_HW_BREAKPOINT
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 74753132c3ac..bbdb53b87e13 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -523,6 +523,10 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %d\n", i);
#endif
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000UL/HZ),
+ loops_per_jiffy / (5000UL/HZ) % 100);
+
/*
* Dump out the common processor features in a single line.
* Userspace should read the hwcaps with getauxval(AT_HWCAP)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 53f1f8dccf6c..357418137db7 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,3 +1,4 @@
+#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
local_dbg_save(flags);
/*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka suspend finishers) hence
+ * disable graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /*
* mm context saved on the stack, it will be restored when
* the cpu comes out of reset through the identity mapped
* page tables, so that the thread address space is properly
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
hw_breakpoint_restore(NULL);
}
+ unpause_graph_tracing();
+
/*
* Restore pstate flags. OS lock and mdscr have been already
* restored, so from this point onwards, debugging is fully
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 9535bd555d1d..d4e04d2237c4 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -184,7 +184,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id);
- return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id));
+ return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
}
/**
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 85c57158dcd9..648112e90ed5 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
/* Note: These now point to the banked copies */
*vcpu_spsr(vcpu) = new_spsr_value;
- *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+ *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
/* Branch to exception vector */
if (sctlr & (1 << 13))
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 5b8b664422d3..cb34eb8bbb9d 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -450,6 +450,9 @@ void __init paging_init(void)
empty_zero_page = virt_to_page(zero_page);
+ /* Ensure the zero page is visible to the page table walker */
+ dsb(ishst);
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index e47ed1c5dce1..545710f854f8 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -57,6 +57,9 @@ static int change_memory_common(unsigned long addr, int numpages,
if (end < MODULES_VADDR || end >= MODULES_END)
return -EINVAL;
+ if (!numpages)
+ return 0;
+
data.set_mask = set_mask;
data.clear_mask = clear_mask;
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 4c4d93c4bf65..d69dffffaa89 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -62,3 +62,15 @@
bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
#endif
.endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+ .macro reset_pmuserenr_el0, tmpreg
+ mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
+ sbfx \tmpreg, \tmpreg, #8, #4
+ cmp \tmpreg, #1 // Skip if no PMU present
+ b.lt 9000f
+ msr pmuserenr_el0, xzr // Disable PMU access from EL0
+9000:
+ .endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index cdd754e19b9b..d253908a988d 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -165,6 +165,7 @@ ENTRY(cpu_do_resume)
*/
ubfx x11, x11, #1, #1
msr oslar_el1, x11
+ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
mov x0, x12
dsb nsh // Make sure local tlb invalidation completed
isb
@@ -202,7 +203,9 @@ ENTRY(__cpu_setup)
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
- msr mdscr_el1, xzr // Reset mdscr_el1
+ mov x0, #1 << 12 // Reset mdscr_el1 and disable
+ msr mdscr_el1, x0 // access to the DCC from EL0
+ reset_pmuserenr_el0 x0 // Disable PMU access from EL0
/*
* Memory region attributes for LPAE:
*
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 98a26ce82d26..aee5637ea436 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -1,7 +1,7 @@
/*
* BPF JIT compiler for ARM64
*
- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -35,6 +35,7 @@
aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
AARCH64_INSN_BRANCH_COMP_##type)
#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
+#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
/* Conditional branch (immediate) */
#define A64_COND_BRANCH(cond, offset) \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index c047598b09e0..6217f80702d2 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1,7 +1,7 @@
/*
* BPF JIT compiler for ARM64
*
- * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
+ * Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
u8 jmp_cond;
s32 jmp_offset;
+#define check_imm(bits, imm) do { \
+ if ((((imm) > 0) && ((imm) >> (bits))) || \
+ (((imm) < 0) && (~(imm) >> (bits)))) { \
+ pr_info("[%2d] imm=%d(0x%x) out of range\n", \
+ i, imm, imm); \
+ return -EINVAL; \
+ } \
+} while (0)
+#define check_imm19(imm) check_imm(19, imm)
+#define check_imm26(imm) check_imm(26, imm)
+
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
@@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
- emit(A64_UDIV(is64, dst, dst, src), ctx);
- break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
- ctx->tmp_used = 1;
- emit(A64_UDIV(is64, tmp, dst, src), ctx);
- emit(A64_MUL(is64, tmp, tmp, src), ctx);
- emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ {
+ const u8 r0 = bpf2a64[BPF_REG_0];
+
+ /* if (src == 0) return 0 */
+ jmp_offset = 3; /* skip ahead to else path */
+ check_imm19(jmp_offset);
+ emit(A64_CBNZ(is64, src, jmp_offset), ctx);
+ emit(A64_MOVZ(1, r0, 0, 0), ctx);
+ jmp_offset = epilogue_offset(ctx);
+ check_imm26(jmp_offset);
+ emit(A64_B(jmp_offset), ctx);
+ /* else */
+ switch (BPF_OP(code)) {
+ case BPF_DIV:
+ emit(A64_UDIV(is64, dst, dst, src), ctx);
+ break;
+ case BPF_MOD:
+ ctx->tmp_used = 1;
+ emit(A64_UDIV(is64, tmp, dst, src), ctx);
+ emit(A64_MUL(is64, tmp, tmp, src), ctx);
+ emit(A64_SUB(is64, dst, dst, tmp), ctx);
+ break;
+ }
break;
+ }
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:
emit(A64_LSLV(is64, dst, dst, src), ctx);
@@ -393,17 +422,6 @@ emit_bswap_uxt:
emit(A64_ASR(is64, dst, dst, imm), ctx);
break;
-#define check_imm(bits, imm) do { \
- if ((((imm) > 0) && ((imm) >> (bits))) || \
- (((imm) < 0) && (~(imm) >> (bits)))) { \
- pr_info("[%2d] imm=%d(0x%x) out of range\n", \
- i, imm, imm); \
- return -EINVAL; \
- } \
-} while (0)
-#define check_imm19(imm) check_imm(19, imm)
-#define check_imm26(imm) check_imm(26, imm)
-
/* JUMP off */
case BPF_JMP | BPF_JA:
jmp_offset = bpf2a64_offset(i + off, i, ctx);