aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/process.c18
-rw-r--r--arch/arm64/kernel/ptrace.c73
-rw-r--r--arch/arm64/kernel/setup.c12
-rw-r--r--arch/arm64/kernel/smp.c4
-rw-r--r--arch/arm64/kernel/vdso.c20
6 files changed, 76 insertions, 61 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index ecbf2d81ec5c..c76c7241125b 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -613,17 +613,11 @@ enum armv8_pmuv3_perf_types {
ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
-
- /*
- * This isn't an architected event.
- * We detect this event number and use the cycle counter instead.
- */
- ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF,
};
/* PMUv3 HW events mapping. */
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
@@ -1106,7 +1100,7 @@ static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT;
/* Always place a cycle counter into the cycle counter. */
- if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
+ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index f22965ea1cfc..e04cebdbb47f 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -310,24 +310,6 @@ struct task_struct *__switch_to(struct task_struct *prev,
}
/*
- * Fill in the task's elfregs structure for a core dump.
- */
-int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
-{
- elf_core_copy_regs(elfregs, task_pt_regs(t));
- return 1;
-}
-
-/*
- * fill in the fpe structure for a core dump...
- */
-int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
-{
- return 0;
-}
-EXPORT_SYMBOL(dump_fpu);
-
-/*
* Shuffle the argument into the correct register before calling the
* thread function. x1 is the thread argument, x2 is the pointer to
* the thread function, and x3 points to the exit function.
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 2ea3968367c2..6e1e77f1831c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -234,28 +234,33 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
struct arch_hw_breakpoint_ctrl ctrl,
struct perf_event_attr *attr)
{
- int err, len, type;
+ int err, len, type, disabled = !ctrl.enabled;
- err = arch_bp_generic_fields(ctrl, &len, &type);
- if (err)
- return err;
-
- switch (note_type) {
- case NT_ARM_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_ARM_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
+ if (disabled) {
+ len = 0;
+ type = HW_BREAKPOINT_EMPTY;
+ } else {
+ err = arch_bp_generic_fields(ctrl, &len, &type);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
+ return -EINVAL;
+ break;
+ case NT_ARM_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
return -EINVAL;
- break;
- default:
- return -EINVAL;
+ }
}
attr->bp_len = len;
attr->bp_type = type;
- attr->disabled = !ctrl.enabled;
+ attr->disabled = disabled;
return 0;
}
@@ -372,7 +377,7 @@ static int ptrace_hbp_set_addr(unsigned int note_type,
#define PTRACE_HBP_ADDR_SZ sizeof(u64)
#define PTRACE_HBP_CTRL_SZ sizeof(u32)
-#define PTRACE_HBP_REG_OFF sizeof(u32)
+#define PTRACE_HBP_PAD_SZ sizeof(u32)
static int hw_break_get(struct task_struct *target,
const struct user_regset *regset,
@@ -380,7 +385,7 @@ static int hw_break_get(struct task_struct *target,
void *kbuf, void __user *ubuf)
{
unsigned int note_type = regset->core_note_type;
- int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit;
+ int ret, idx = 0, offset, limit;
u32 info, ctrl;
u64 addr;
@@ -389,11 +394,20 @@ static int hw_break_get(struct task_struct *target,
if (ret)
return ret;
- ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 4);
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0,
+ sizeof(info));
+ if (ret)
+ return ret;
+
+ /* Pad */
+ offset = offsetof(struct user_hwdebug_state, pad);
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offset,
+ offset + PTRACE_HBP_PAD_SZ);
if (ret)
return ret;
/* (address, ctrl) registers */
+ offset = offsetof(struct user_hwdebug_state, dbg_regs);
limit = regset->n * regset->size;
while (count && offset < limit) {
ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
@@ -413,6 +427,13 @@ static int hw_break_get(struct task_struct *target,
if (ret)
return ret;
offset += PTRACE_HBP_CTRL_SZ;
+
+ ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
+ offset,
+ offset + PTRACE_HBP_PAD_SZ);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_PAD_SZ;
idx++;
}
@@ -425,12 +446,13 @@ static int hw_break_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
unsigned int note_type = regset->core_note_type;
- int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit;
+ int ret, idx = 0, offset, limit;
u32 ctrl;
u64 addr;
- /* Resource info */
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4);
+ /* Resource info and pad */
+ offset = offsetof(struct user_hwdebug_state, dbg_regs);
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
if (ret)
return ret;
@@ -454,6 +476,13 @@ static int hw_break_set(struct task_struct *target,
if (ret)
return ret;
offset += PTRACE_HBP_CTRL_SZ;
+
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ offset,
+ offset + PTRACE_HBP_PAD_SZ);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_PAD_SZ;
idx++;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 48ffb9fb3fe3..7665a9bfdb1e 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -170,7 +170,19 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
+ base &= PAGE_MASK;
size &= PAGE_MASK;
+ if (base + size < PHYS_OFFSET) {
+ pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+ base, base + size);
+ return;
+ }
+ if (base < PHYS_OFFSET) {
+ pr_warning("Ignoring memory range 0x%llx - 0x%llx\n",
+ base, PHYS_OFFSET);
+ size -= PHYS_OFFSET - base;
+ base = PHYS_OFFSET;
+ }
memblock_add(base, size);
}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index b711525be21f..538300f2273d 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -46,7 +46,6 @@
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
-#include <asm/mmu_context.h>
/*
* as from 2.5, kernels no longer have an init_tasks structure
@@ -212,8 +211,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* before we continue.
*/
set_cpu_online(cpu, true);
- while (!cpu_active(cpu))
- cpu_relax();
+ complete(&cpu_running);
/*
* OK, it's off to the idle thread for us
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 17948fc7d663..ba457943a16b 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
@@ -222,11 +223,10 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
-void update_vsyscall(struct timespec *ts, struct timespec *wtm,
- struct clocksource *clock, u32 mult)
+void update_vsyscall(struct timekeeper *tk)
{
struct timespec xtime_coarse;
- u32 use_syscall = strcmp(clock->name, "arch_sys_counter");
+ u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter");
++vdso_data->tb_seq_count;
smp_wmb();
@@ -237,13 +237,13 @@ void update_vsyscall(struct timespec *ts, struct timespec *wtm,
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
if (!use_syscall) {
- vdso_data->cs_cycle_last = clock->cycle_last;
- vdso_data->xtime_clock_sec = ts->tv_sec;
- vdso_data->xtime_clock_nsec = ts->tv_nsec;
- vdso_data->cs_mult = mult;
- vdso_data->cs_shift = clock->shift;
- vdso_data->wtm_clock_sec = wtm->tv_sec;
- vdso_data->wtm_clock_nsec = wtm->tv_nsec;
+ vdso_data->cs_cycle_last = tk->clock->cycle_last;
+ vdso_data->xtime_clock_sec = tk->xtime_sec;
+ vdso_data->xtime_clock_nsec = tk->xtime_nsec >> tk->shift;
+ vdso_data->cs_mult = tk->mult;
+ vdso_data->cs_shift = tk->shift;
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
}
smp_wmb();