aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-10-05 16:52:18 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-10-09 14:17:01 +0200
commitc972cc60c23f5a6309292bfcc91a441743ba027e (patch)
tree04f8fc1d4e78979d28a897a90bba5c3321555967 /arch/s390/kernel
parent021d48be48481821f6e3f53028915c0571874135 (diff)
s390/vmalloc: have separate modules area
Add a special module area on top of the vmalloc area, which may be only used for modules and bpf jit generated code. This makes sure that inter module branches will always happen without a trampoline and in addition having all the code within a 2GB frame is branch prediction unit friendly. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/module.c11
-rw-r--r--arch/s390/kernel/setup.c13
2 files changed, 22 insertions, 2 deletions
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 46412b1d7e1..4610deafd95 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -44,6 +44,17 @@
#define PLT_ENTRY_SIZE 20
#endif /* CONFIG_64BIT */
+#ifdef CONFIG_64BIT
+void *module_alloc(unsigned long size)
+{
+ if (PAGE_ALIGN(size) > MODULES_LEN)
+ return NULL;
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL, -1,
+ __builtin_return_address(0));
+}
+#endif
+
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bfb48f18169..b1f2be9aaaa 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -105,6 +105,11 @@ EXPORT_SYMBOL(VMALLOC_END);
struct page *vmemmap;
EXPORT_SYMBOL(vmemmap);
+#ifdef CONFIG_64BIT
+unsigned long MODULES_VADDR;
+unsigned long MODULES_END;
+#endif
+
/* An array with a pointer to the lowcore of every CPU. */
struct _lowcore *lowcore_ptr[NR_CPUS];
EXPORT_SYMBOL(lowcore_ptr);
@@ -544,19 +549,23 @@ static void __init setup_memory_end(void)
/* Choose kernel address space layout: 2, 3, or 4 levels. */
#ifdef CONFIG_64BIT
- vmalloc_size = VMALLOC_END ?: 128UL << 30;
+ vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
if (tmp <= (1UL << 42))
vmax = 1UL << 42; /* 3-level kernel page table */
else
vmax = 1UL << 53; /* 4-level kernel page table */
+ /* module area is at the end of the kernel address space. */
+ MODULES_END = vmax;
+ MODULES_VADDR = MODULES_END - MODULES_LEN;
+ VMALLOC_END = MODULES_VADDR;
#else
vmalloc_size = VMALLOC_END ?: 96UL << 20;
vmax = 1UL << 31; /* 2-level kernel page table */
-#endif
/* vmalloc area is at the end of the kernel address space. */
VMALLOC_END = vmax;
+#endif
VMALLOC_START = vmax - vmalloc_size;
/* Split remaining virtual space between 1:1 mapping & vmemmap array */