aboutsummaryrefslogtreecommitdiff
path: root/arch/riscv/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-18 10:43:07 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-18 10:43:07 -0800
commite2ae634014d3a8839a99f8897b3f6346a133a33b (patch)
treec33b6df0010a651f0ec890f60a7fdfd6216f7964 /arch/riscv/kernel
parenta409ed156a90093a03fe6a93721ddf4c591eac87 (diff)
parent7d95a88f9254b711a3a95106fc73f6a3a9866a40 (diff)
Merge tag 'riscv-for-linus-5.11-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt: "We have a handful of new kernel features for 5.11: - Support for the contiguous memory allocator. - Support for IRQ Time Accounting - Support for stack tracing - Support for strict /dev/mem - Support for kernel section protection I'm being a bit conservative on the cutoff for this round due to the timing, so this is all the new development I'm going to take for this cycle (even if some of it probably normally would have been OK). There are, however, some fixes on the list that I will likely be sending along either later this week or early next week. There is one issue in here: one of my test configurations (PREEMPT{,_DEBUG}=y) fails to boot on QEMU 5.0.0 (from April) as of the .text.init alignment patch. With any luck we'll sort out the issue, but given how many bugs get fixed all over the place and how unrelated those features seem my guess is that we're just running into something that's been lurking for a while and has already been fixed in the newer QEMU (though I wouldn't be surprised if it's one of these implicit assumptions we have in the boot flow). If it was hardware I'd be strongly inclined to look more closely, but given that users can upgrade their simulators I'm less worried about it" * tag 'riscv-for-linus-5.11-mw0' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: arm64: Use the generic devmem_is_allowed() arm: Use the generic devmem_is_allowed() RISC-V: Use the new generic devmem_is_allowed() lib: Add a generic version of devmem_is_allowed() riscv: Fixed kernel test robot warning riscv: kernel: Drop unused clean rule riscv: provide memmove implementation RISC-V: Move dynamic relocation section under __init RISC-V: Protect all kernel sections including init early RISC-V: Align the .init.text section RISC-V: Initialize SBI early riscv: Enable ARCH_STACKWALK riscv: Make stack walk callback consistent with generic code riscv: Cleanup stacktrace riscv: Add HAVE_IRQ_TIME_ACCOUNTING riscv: Enable CMA support riscv: Ignore Image.* and loader.bin riscv: Clean up boot dir riscv: Fix compressed Image formats build RISC-V: Add kernel image sections to the resource tree
Diffstat (limited to 'arch/riscv/kernel')
-rw-r--r--arch/riscv/kernel/Makefile2
-rw-r--r--arch/riscv/kernel/asm-offsets.c2
-rw-r--r--arch/riscv/kernel/head.S1
-rw-r--r--arch/riscv/kernel/perf_callchain.c10
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c2
-rw-r--r--arch/riscv/kernel/setup.c179
-rw-r--r--arch/riscv/kernel/stacktrace.c62
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S63
8 files changed, 232 insertions, 89 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index fa896c5f7ccb..f6caf4d9ca15 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -56,5 +56,3 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_EFI) += efi.o
-
-clean:
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index db203442c08f..b79ffa3561fd 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -11,6 +11,8 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
+void asm_offsets(void);
+
void asm_offsets(void)
{
OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 7e849797c9c3..16e9941900c4 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -182,7 +182,6 @@ setup_trap_vector:
END(_start)
- __INIT
ENTRY(_start_kernel)
/* Mask all interrupts */
csrw CSR_IE, zero
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index cf190197a22f..0bb1854dce83 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -4,11 +4,7 @@
#include <linux/perf_event.h>
#include <linux/uaccess.h>
-/* Kernel callchain */
-struct stackframe {
- unsigned long fp;
- unsigned long ra;
-};
+#include <asm/stacktrace.h>
/*
* Get the return address for a single stackframe and return a pointer to the
@@ -74,13 +70,11 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
fp = user_backtrace(entry, fp, 0);
}
-bool fill_callchain(unsigned long pc, void *entry)
+static bool fill_callchain(void *entry, unsigned long pc)
{
return perf_callchain_store(entry, pc);
}
-void notrace walk_stackframe(struct task_struct *task,
- struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg);
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
index 450492e1cb4e..5ab1c7e1a6ed 100644
--- a/arch/riscv/kernel/riscv_ksyms.c
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -11,5 +11,7 @@
*/
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memmove);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 117f3212a8e4..1d85e9bf783c 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -4,6 +4,8 @@
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
* Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2020 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
*/
#include <linux/init.h>
@@ -22,6 +24,7 @@
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/setup.h>
+#include <asm/set_memory.h>
#include <asm/sections.h>
#include <asm/sbi.h>
#include <asm/tlbflush.h>
@@ -51,6 +54,163 @@ atomic_t hart_lottery __section(".sdata");
unsigned long boot_cpu_hartid;
static DEFINE_PER_CPU(struct cpu, cpu_devices);
+/*
+ * Place kernel memory regions on the resource tree so that
+ * kexec-tools can retrieve them from /proc/iomem. While there
+ * also add "System RAM" regions for compatibility with other
+ * archs, and the rest of the known regions for completeness.
+ */
+static struct resource code_res = { .name = "Kernel code", };
+static struct resource data_res = { .name = "Kernel data", };
+static struct resource rodata_res = { .name = "Kernel rodata", };
+static struct resource bss_res = { .name = "Kernel bss", };
+
+static int __init add_resource(struct resource *parent,
+ struct resource *res)
+{
+ int ret = 0;
+
+ ret = insert_resource(parent, res);
+ if (ret < 0) {
+ pr_err("Failed to add a %s resource at %llx\n",
+ res->name, (unsigned long long) res->start);
+ return ret;
+ }
+
+ return 1;
+}
+
+static int __init add_kernel_resources(struct resource *res)
+{
+ int ret = 0;
+
+ /*
+ * The memory region of the kernel image is continuous and
+ * was reserved on setup_bootmem, find it here and register
+ * it as a resource, then register the various segments of
+ * the image as child nodes
+ */
+ if (!(res->start <= code_res.start && res->end >= data_res.end))
+ return 0;
+
+ res->name = "Kernel image";
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ /*
+ * We removed a part of this region on setup_bootmem so
+ * we need to expand the resource for the bss to fit in.
+ */
+ res->end = bss_res.end;
+
+ ret = add_resource(&iomem_resource, res);
+ if (ret < 0)
+ return ret;
+
+ ret = add_resource(res, &code_res);
+ if (ret < 0)
+ return ret;
+
+ ret = add_resource(res, &rodata_res);
+ if (ret < 0)
+ return ret;
+
+ ret = add_resource(res, &data_res);
+ if (ret < 0)
+ return ret;
+
+ ret = add_resource(res, &bss_res);
+
+ return ret;
+}
+
+static void __init init_resources(void)
+{
+ struct memblock_region *region = NULL;
+ struct resource *res = NULL;
+ int ret = 0;
+
+ code_res.start = __pa_symbol(_text);
+ code_res.end = __pa_symbol(_etext) - 1;
+ code_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ rodata_res.start = __pa_symbol(__start_rodata);
+ rodata_res.end = __pa_symbol(__end_rodata) - 1;
+ rodata_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ data_res.start = __pa_symbol(_data);
+ data_res.end = __pa_symbol(_edata) - 1;
+ data_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ bss_res.start = __pa_symbol(__bss_start);
+ bss_res.end = __pa_symbol(__bss_stop) - 1;
+ bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ /*
+ * Start by adding the reserved regions, if they overlap
+ * with /memory regions, insert_resource later on will take
+ * care of it.
+ */
+ for_each_reserved_mem_region(region) {
+ res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ res->name = "Reserved";
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
+
+ ret = add_kernel_resources(res);
+ if (ret < 0)
+ goto error;
+ else if (ret)
+ continue;
+
+ /*
+ * Ignore any other reserved regions within
+ * system memory.
+ */
+ if (memblock_is_memory(res->start))
+ continue;
+
+ ret = add_resource(&iomem_resource, res);
+ if (ret < 0)
+ goto error;
+ }
+
+ /* Add /memory regions to the resource tree */
+ for_each_mem_region(region) {
+ res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ if (unlikely(memblock_is_nomap(region))) {
+ res->name = "Reserved";
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ } else {
+ res->name = "System RAM";
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ }
+
+ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+
+ ret = add_resource(&iomem_resource, res);
+ if (ret < 0)
+ goto error;
+ }
+
+ return;
+
+ error:
+ memblock_free((phys_addr_t) res, sizeof(struct resource));
+ /* Better an empty resource tree than an inconsistent one */
+ release_child_resources(&iomem_resource);
+}
+
+
static void __init parse_dtb(void)
{
/* Early scan of device tree from init memory */
@@ -81,6 +241,7 @@ void __init setup_arch(char **cmdline_p)
efi_init();
setup_bootmem();
paging_init();
+ init_resources();
#if IS_ENABLED(CONFIG_BUILTIN_DTB)
unflatten_and_copy_device_tree();
#else
@@ -90,6 +251,11 @@ void __init setup_arch(char **cmdline_p)
pr_err("No DTB found in kernel mappings\n");
#endif
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_init();
+
+ if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ protect_kernel_text_data();
#ifdef CONFIG_SWIOTLB
swiotlb_init(1);
#endif
@@ -98,10 +264,6 @@ void __init setup_arch(char **cmdline_p)
kasan_init();
#endif
-#if IS_ENABLED(CONFIG_RISCV_SBI)
- sbi_init();
-#endif
-
#ifdef CONFIG_SMP
setup_smp();
#endif
@@ -123,3 +285,12 @@ static int __init topology_init(void)
return 0;
}
subsys_initcall(topology_init);
+
+void free_initmem(void)
+{
+ unsigned long init_begin = (unsigned long)__init_begin;
+ unsigned long init_end = (unsigned long)__init_end;
+
+ set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
+ free_initmem_default(POISON_FREE_INITMEM);
+}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 595342910c3f..48b870a685b3 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -12,17 +12,14 @@
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
+#include <asm/stacktrace.h>
+
register unsigned long sp_in_global __asm__("sp");
#ifdef CONFIG_FRAME_POINTER
-struct stackframe {
- unsigned long fp;
- unsigned long ra;
-};
-
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
- bool (*fn)(unsigned long, void *), void *arg)
+ bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
@@ -46,7 +43,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+ if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
break;
/* Validate frame pointer */
@@ -66,7 +63,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
#else /* !CONFIG_FRAME_POINTER */
void notrace walk_stackframe(struct task_struct *task,
- struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+ struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long sp, pc;
unsigned long *ksp;
@@ -88,7 +85,7 @@ void notrace walk_stackframe(struct task_struct *task,
ksp = (unsigned long *)sp;
while (!kstack_end(ksp)) {
- if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
+ if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
break;
pc = (*ksp++) - 0x4;
}
@@ -96,13 +93,12 @@ void notrace walk_stackframe(struct task_struct *task,
#endif /* CONFIG_FRAME_POINTER */
-
-static bool print_trace_address(unsigned long pc, void *arg)
+static bool print_trace_address(void *arg, unsigned long pc)
{
const char *loglvl = arg;
print_ip_sym(loglvl, pc);
- return false;
+ return true;
}
void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
@@ -111,14 +107,14 @@ void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
}
-static bool save_wchan(unsigned long pc, void *arg)
+static bool save_wchan(void *arg, unsigned long pc)
{
if (!in_sched_functions(pc)) {
unsigned long *p = arg;
*p = pc;
- return true;
+ return false;
}
- return false;
+ return true;
}
unsigned long get_wchan(struct task_struct *task)
@@ -130,42 +126,12 @@ unsigned long get_wchan(struct task_struct *task)
return pc;
}
-
#ifdef CONFIG_STACKTRACE
-static bool __save_trace(unsigned long pc, void *arg, bool nosched)
-{
- struct stack_trace *trace = arg;
-
- if (unlikely(nosched && in_sched_functions(pc)))
- return false;
- if (unlikely(trace->skip > 0)) {
- trace->skip--;
- return false;
- }
-
- trace->entries[trace->nr_entries++] = pc;
- return (trace->nr_entries >= trace->max_entries);
-}
-
-static bool save_trace(unsigned long pc, void *arg)
-{
- return __save_trace(pc, arg, false);
-}
-
-/*
- * Save stack-backtrace addresses into a stack_trace buffer.
- */
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
-{
- walk_stackframe(tsk, NULL, save_trace, trace);
-}
-EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
-
-void save_stack_trace(struct stack_trace *trace)
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
{
- save_stack_trace_tsk(NULL, trace);
+ walk_stackframe(task, regs, consume_entry, cookie);
}
-EXPORT_SYMBOL_GPL(save_stack_trace);
#endif /* CONFIG_STACKTRACE */
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 3ffbd6cbdb86..de03cb22d0e9 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -29,8 +29,30 @@ SECTIONS
HEAD_TEXT_SECTION
. = ALIGN(PAGE_SIZE);
+ .text : {
+ _text = .;
+ _stext = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ ENTRY_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ _etext = .;
+ }
+
+ . = ALIGN(SECTION_ALIGN);
__init_begin = .;
- INIT_TEXT_SECTION(PAGE_SIZE)
+ __init_text_begin = .;
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) ALIGN(SECTION_ALIGN) { \
+ _sinittext = .; \
+ INIT_TEXT \
+ _einittext = .; \
+ }
+
. = ALIGN(8);
__soc_early_init_table : {
__soc_early_init_table_start = .;
@@ -47,35 +69,28 @@ SECTIONS
{
EXIT_TEXT
}
+
+ __init_text_end = .;
+ . = ALIGN(SECTION_ALIGN);
+#ifdef CONFIG_EFI
+ . = ALIGN(PECOFF_SECTION_ALIGNMENT);
+ __pecoff_text_end = .;
+#endif
+ /* Start of init data section */
+ __init_data_begin = .;
+ INIT_DATA_SECTION(16)
.exit.data :
{
EXIT_DATA
}
PERCPU_SECTION(L1_CACHE_BYTES)
- __init_end = .;
- . = ALIGN(SECTION_ALIGN);
- .text : {
- _text = .;
- _stext = .;
- TEXT_TEXT
- SCHED_TEXT
- CPUIDLE_TEXT
- LOCK_TEXT
- KPROBES_TEXT
- ENTRY_TEXT
- IRQENTRY_TEXT
- SOFTIRQENTRY_TEXT
- *(.fixup)
- _etext = .;
+ .rel.dyn : {
+ *(.rel.dyn*)
}
-#ifdef CONFIG_EFI
- . = ALIGN(PECOFF_SECTION_ALIGNMENT);
- __pecoff_text_end = .;
-#endif
-
- INIT_DATA_SECTION(16)
+ __init_data_end = .;
+ __init_end = .;
/* Start of data section */
_sdata = .;
@@ -105,10 +120,6 @@ SECTIONS
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
- .rel.dyn : {
- *(.rel.dyn*)
- }
-
#ifdef CONFIG_EFI
. = ALIGN(PECOFF_SECTION_ALIGNMENT);
__pecoff_data_virt_size = ABSOLUTE(. - __pecoff_text_end);