aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/mm/init.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2006-07-01 04:36:31 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 09:56:04 -0700
commitd882b172512758703ff8d9efb96505eaaee48d2e (patch)
tree64b61d69b4af1395b80446fbc61f0488fbaeedb3 /arch/s390/mm/init.c
parenta581c2a4697ee264699b364399b73477af408e00 (diff)
[PATCH] s390: put sys_call_table into .rodata section and write protect it
Put s390's syscall tables into .rodata section and write protect this section to prevent misuse of it. Suggested by Arjan van de Ven <arjan@infradead.org>. Cc: Arjan van de Ven <arjan@infradead.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/s390/mm/init.c')
-rw-r--r--arch/s390/mm/init.c35
1 files changed, 21 insertions, 14 deletions
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 81dce185f836..eb6ebfef134a 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/bootmem.h>
+#include <linux/pfn.h>
#include <asm/processor.h>
#include <asm/system.h>
@@ -33,6 +34,7 @@
#include <asm/lowcore.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
+#include <asm/sections.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -89,17 +91,6 @@ void show_mem(void)
printk("%d pages swap cached\n",cached);
}
-/* References to section boundaries */
-
-extern unsigned long _text;
-extern unsigned long _etext;
-extern unsigned long _edata;
-extern unsigned long __bss_start;
-extern unsigned long _end;
-
-extern unsigned long __init_begin;
-extern unsigned long __init_end;
-
extern unsigned long __initdata zholes_size[];
/*
* paging_init() sets up the page tables
@@ -116,6 +107,10 @@ void __init paging_init(void)
unsigned long pfn = 0;
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
static const int ssm_mask = 0x04000000L;
+ unsigned long ro_start_pfn, ro_end_pfn;
+
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
/* unmap whole virtual address space */
@@ -143,7 +138,10 @@ void __init paging_init(void)
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
pte_clear(&init_mm, 0, &pte);
set_pte(pg_table, pte);
@@ -175,6 +173,7 @@ void __init paging_init(void)
}
#else /* CONFIG_64BIT */
+
void __init paging_init(void)
{
pgd_t * pg_dir;
@@ -186,13 +185,15 @@ void __init paging_init(void)
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
_KERN_REGION_TABLE;
static const int ssm_mask = 0x04000000L;
-
unsigned long zones_size[MAX_NR_ZONES];
unsigned long dma_pfn, high_pfn;
+ unsigned long ro_start_pfn, ro_end_pfn;
memset(zones_size, 0, sizeof(zones_size));
dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
high_pfn = max_low_pfn;
+ ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+ ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
if (dma_pfn > high_pfn)
zones_size[ZONE_DMA] = high_pfn;
@@ -231,7 +232,10 @@ void __init paging_init(void)
pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
- pte = pfn_pte(pfn, PAGE_KERNEL);
+ if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+ pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+ else
+ pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn) {
pte_clear(&init_mm, 0, &pte);
continue;
@@ -282,6 +286,9 @@ void __init mem_init(void)
reservedpages << (PAGE_SHIFT-10),
datasize >>10,
initsize >> 10);
+ printk("Write protected kernel read-only data: %#lx - %#lx\n",
+ (unsigned long)&__start_rodata,
+ PFN_ALIGN((unsigned long)&__end_rodata) - 1);
}
void free_initmem(void)