blob: f9476a0f8cb6f07ddf61b25395688b1de2eb09c0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/x86_64/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
Pavel Macheka2531292010-07-18 14:27:13 +02005 * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7 */
8
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include <linux/signal.h>
10#include <linux/sched.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/string.h>
14#include <linux/types.h>
15#include <linux/ptrace.h>
16#include <linux/mman.h>
17#include <linux/mm.h>
18#include <linux/swap.h>
19#include <linux/smp.h>
20#include <linux/init.h>
Thomas Gleixner11034d52008-05-12 15:43:36 +020021#include <linux/initrd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/pagemap.h>
23#include <linux/bootmem.h>
Yinghai Lua9ce6bc2010-08-25 13:39:17 -070024#include <linux/memblock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/proc_fs.h>
Andi Kleen59170892005-11-05 17:25:53 +010026#include <linux/pci.h>
Jan Beulich6fb14752007-05-02 19:27:10 +020027#include <linux/pfn.h>
Randy Dunlapc9cf5522006-06-27 02:53:52 -070028#include <linux/poison.h>
Muli Ben-Yehuda17a941d2006-01-11 22:44:42 +010029#include <linux/dma-mapping.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010030#include <linux/module.h>
Benjamin Herrenschmidta63fdc52011-06-14 10:57:50 +100031#include <linux/memory.h>
Matt Tolentino44df75e2006-01-17 07:03:41 +010032#include <linux/memory_hotplug.h>
Konrad Rzeszutekae32b122007-05-02 19:27:11 +020033#include <linux/nmi.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <asm/processor.h>
Ingo Molnar46eaa672008-10-12 15:06:29 +020037#include <asm/bios_ebda.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/system.h>
39#include <asm/uaccess.h>
40#include <asm/pgtable.h>
41#include <asm/pgalloc.h>
42#include <asm/dma.h>
43#include <asm/fixmap.h>
44#include <asm/e820.h>
45#include <asm/apic.h>
46#include <asm/tlb.h>
47#include <asm/mmu_context.h>
48#include <asm/proto.h>
49#include <asm/smp.h>
Andi Kleen2bc04142005-11-05 17:25:53 +010050#include <asm/sections.h>
Thomas Gleixner718fc132008-01-30 13:30:17 +010051#include <asm/kdebug.h>
Thomas Gleixneraaa64e02008-01-30 13:30:17 +010052#include <asm/numa.h>
Harvey Harrison7bfeab92008-02-12 12:12:01 -080053#include <asm/cacheflush.h>
Pekka Enberg4fcb2082009-03-05 14:55:08 +020054#include <asm/init.h>
Nathan Fontenot1dc41aa2011-01-20 10:46:15 -060055#include <asm/uv/uv.h>
Yinghai Lue5f15b42011-02-18 11:30:30 +000056#include <asm/setup.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070057
Ingo Molnar00d1c5e2008-04-17 17:40:45 +020058static int __init parse_direct_gbpages_off(char *arg)
59{
60 direct_gbpages = 0;
61 return 0;
62}
63early_param("nogbpages", parse_direct_gbpages_off);
64
65static int __init parse_direct_gbpages_on(char *arg)
66{
67 direct_gbpages = 1;
68 return 0;
69}
70early_param("gbpages", parse_direct_gbpages_on);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/*
73 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
74 * physical space so we can cache the place of the first one and move
75 * around without checking the pgd every time.
76 */
77
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -070078pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
Yinghai Lubd220a22008-09-05 00:58:28 -070079EXPORT_SYMBOL_GPL(__supported_pte_mask);
80
Yinghai Lubd220a22008-09-05 00:58:28 -070081int force_personality32;
82
Ingo Molnardeed05b2008-09-05 10:23:26 +020083/*
84 * noexec32=on|off
85 * Control non executable heap for 32bit processes.
86 * To control the stack too use noexec=off
87 *
88 * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
89 * off PROT_READ implies PROT_EXEC
90 */
Yinghai Lubd220a22008-09-05 00:58:28 -070091static int __init nonx32_setup(char *str)
92{
93 if (!strcmp(str, "on"))
94 force_personality32 &= ~READ_IMPLIES_EXEC;
95 else if (!strcmp(str, "off"))
96 force_personality32 |= READ_IMPLIES_EXEC;
97 return 1;
98}
99__setup("noexec32=", nonx32_setup);
100
Marcin Slusarz8d6ea9672008-08-15 18:32:24 +0200101/*
Haicheng Li6afb5152010-05-19 17:42:14 +0800102 * When memory was added/removed make sure all the processes MM have
103 * suitable PGD entries in the local PGD level page.
104 */
105void sync_global_pgds(unsigned long start, unsigned long end)
106{
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700107 unsigned long address;
Haicheng Li6afb5152010-05-19 17:42:14 +0800108
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700109 for (address = start; address <= end; address += PGDIR_SIZE) {
110 const pgd_t *pgd_ref = pgd_offset_k(address);
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700111 struct page *page;
Haicheng Li6afb5152010-05-19 17:42:14 +0800112
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700113 if (pgd_none(*pgd_ref))
114 continue;
Haicheng Li6afb5152010-05-19 17:42:14 +0800115
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800116 spin_lock(&pgd_lock);
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700117 list_for_each_entry(page, &pgd_list, lru) {
118 pgd_t *pgd;
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700119 spinlock_t *pgt_lock;
120
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700121 pgd = (pgd_t *)page_address(page) + pgd_index(address);
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800122 /* the pgt_lock only for Xen */
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700123 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
124 spin_lock(pgt_lock);
125
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700126 if (pgd_none(*pgd))
127 set_pgd(pgd, *pgd_ref);
128 else
129 BUG_ON(pgd_page_vaddr(*pgd)
130 != pgd_page_vaddr(*pgd_ref));
Jeremy Fitzhardinge617d34d2010-09-21 12:01:51 -0700131
132 spin_unlock(pgt_lock);
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700133 }
Andrea Arcangelia79e53d2011-02-16 15:45:22 -0800134 spin_unlock(&pgd_lock);
Jeremy Fitzhardinge44235dc2010-10-14 17:04:59 -0700135 }
Haicheng Li6afb5152010-05-19 17:42:14 +0800136}
137
138/*
Marcin Slusarz8d6ea9672008-08-15 18:32:24 +0200139 * NOTE: This function is marked __ref because it calls __init function
140 * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
141 */
142static __ref void *spp_getpage(void)
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100143{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 void *ptr;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100145
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 if (after_bootmem)
Vegard Nossum9e730232009-02-22 11:28:25 +0100147 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 else
149 ptr = alloc_bootmem_pages(PAGE_SIZE);
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100150
151 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
152 panic("set_pte_phys: cannot allocate page data %s\n",
153 after_bootmem ? "after bootmem" : "");
154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
Ingo Molnar10f22dd2008-01-30 13:34:10 +0100156 pr_debug("spp_getpage %p\n", ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100158 return ptr;
159}
160
Jeremy Fitzhardingef254f392009-03-03 12:02:57 -0800161static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
Tejun Heo458a3e62009-02-24 11:57:21 +0900162{
163 if (pgd_none(*pgd)) {
164 pud_t *pud = (pud_t *)spp_getpage();
165 pgd_populate(&init_mm, pgd, pud);
166 if (pud != pud_offset(pgd, 0))
167 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
168 pud, pud_offset(pgd, 0));
169 }
170 return pud_offset(pgd, vaddr);
171}
172
Jeremy Fitzhardingef254f392009-03-03 12:02:57 -0800173static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
Tejun Heo458a3e62009-02-24 11:57:21 +0900174{
175 if (pud_none(*pud)) {
176 pmd_t *pmd = (pmd_t *) spp_getpage();
177 pud_populate(&init_mm, pud, pmd);
178 if (pmd != pmd_offset(pud, 0))
179 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
180 pmd, pmd_offset(pud, 0));
181 }
182 return pmd_offset(pud, vaddr);
183}
184
Jeremy Fitzhardingef254f392009-03-03 12:02:57 -0800185static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
Tejun Heo458a3e62009-02-24 11:57:21 +0900186{
187 if (pmd_none(*pmd)) {
188 pte_t *pte = (pte_t *) spp_getpage();
189 pmd_populate_kernel(&init_mm, pmd, pte);
190 if (pte != pte_offset_kernel(pmd, 0))
191 printk(KERN_ERR "PAGETABLE BUG #02!\n");
192 }
193 return pte_offset_kernel(pmd, vaddr);
194}
195
196void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 pud_t *pud;
199 pmd_t *pmd;
Jeremy Fitzhardinged494a962008-06-17 11:41:59 -0700200 pte_t *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Eduardo Habkost0814e0b2008-06-25 00:19:22 -0400202 pud = pud_page + pud_index(vaddr);
Tejun Heo458a3e62009-02-24 11:57:21 +0900203 pmd = fill_pmd(pud, vaddr);
204 pte = fill_pte(pmd, vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 set_pte(pte, new_pte);
207
208 /*
209 * It's enough to flush this one mapping.
210 * (PGE mappings get flushed as well)
211 */
212 __flush_tlb_one(vaddr);
213}
214
Tejun Heo458a3e62009-02-24 11:57:21 +0900215void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
Eduardo Habkost0814e0b2008-06-25 00:19:22 -0400216{
217 pgd_t *pgd;
218 pud_t *pud_page;
219
220 pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
221
222 pgd = pgd_offset_k(vaddr);
223 if (pgd_none(*pgd)) {
224 printk(KERN_ERR
225 "PGD FIXMAP MISSING, it should be setup in head.S!\n");
226 return;
227 }
228 pud_page = (pud_t*)pgd_page_vaddr(*pgd);
229 set_pte_vaddr_pud(pud_page, vaddr, pteval);
230}
231
Tejun Heo458a3e62009-02-24 11:57:21 +0900232pmd_t * __init populate_extra_pmd(unsigned long vaddr)
Tejun Heo11124412009-02-20 16:29:09 +0900233{
234 pgd_t *pgd;
235 pud_t *pud;
236
237 pgd = pgd_offset_k(vaddr);
Tejun Heo458a3e62009-02-24 11:57:21 +0900238 pud = fill_pud(pgd, vaddr);
239 return fill_pmd(pud, vaddr);
240}
Tejun Heo11124412009-02-20 16:29:09 +0900241
Tejun Heo458a3e62009-02-24 11:57:21 +0900242pte_t * __init populate_extra_pte(unsigned long vaddr)
243{
244 pmd_t *pmd;
245
246 pmd = populate_extra_pmd(vaddr);
247 return fill_pte(pmd, vaddr);
Tejun Heo11124412009-02-20 16:29:09 +0900248}
249
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100250/*
Jack Steiner3a9e1892008-07-01 14:45:32 -0500251 * Create large page table mappings for a range of physical addresses.
252 */
253static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
254 pgprot_t prot)
255{
256 pgd_t *pgd;
257 pud_t *pud;
258 pmd_t *pmd;
259
260 BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
261 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
262 pgd = pgd_offset_k((unsigned long)__va(phys));
263 if (pgd_none(*pgd)) {
264 pud = (pud_t *) spp_getpage();
265 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
266 _PAGE_USER));
267 }
268 pud = pud_offset(pgd, (unsigned long)__va(phys));
269 if (pud_none(*pud)) {
270 pmd = (pmd_t *) spp_getpage();
271 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
272 _PAGE_USER));
273 }
274 pmd = pmd_offset(pud, phys);
275 BUG_ON(!pmd_none(*pmd));
276 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
277 }
278}
279
280void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
281{
282 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
283}
284
285void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
286{
287 __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
288}
289
290/*
Ingo Molnar88f3aec2008-02-21 11:04:11 +0100291 * The head.S code sets up the kernel high mapping:
292 *
293 * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100294 *
295 * phys_addr holds the negative offset to the kernel, which is added
296 * to the compile time generated pmds. This results in invalid pmds up
297 * to the point where we hit the physaddr 0 mapping.
298 *
Yinghai Lue5f15b42011-02-18 11:30:30 +0000299 * We limit the mappings to the region from _text to _brk_end. _brk_end
300 * is rounded up to the 2MB boundary. This catches the invalid pmds as
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100301 * well, as they are located before _text:
302 */
303void __init cleanup_highmap(void)
304{
305 unsigned long vaddr = __START_KERNEL_map;
Yinghai Lue5f15b42011-02-18 11:30:30 +0000306 unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
307 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100308 pmd_t *pmd = level2_kernel_pgt;
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100309
Yinghai Lue5f15b42011-02-18 11:30:30 +0000310 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
Hugh Dickins2884f112008-05-28 19:36:07 +0100311 if (pmd_none(*pmd))
Thomas Gleixner31eedd82008-02-15 17:29:12 +0100312 continue;
313 if (vaddr < (unsigned long) _text || vaddr > end)
314 set_pmd(pmd, __pmd(0));
315 }
316}
317
Jan Beulich9482ac62008-08-21 14:28:42 +0100318static __ref void *alloc_low_page(unsigned long *phys)
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100319{
Yinghai Lud1b19422011-02-24 14:46:24 +0100320 unsigned long pfn = pgt_buf_end++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 void *adr;
322
Matt Tolentino44df75e2006-01-17 07:03:41 +0100323 if (after_bootmem) {
Vegard Nossum9e730232009-02-22 11:28:25 +0100324 adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100325 *phys = __pa(adr);
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100326
Matt Tolentino44df75e2006-01-17 07:03:41 +0100327 return adr;
328 }
329
Yinghai Lud1b19422011-02-24 14:46:24 +0100330 if (pfn >= pgt_buf_top)
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100331 panic("alloc_low_page: ran out of memory");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700333 adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
Jan Beulich234bb542010-09-02 13:46:34 +0100334 clear_page(adr);
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200335 *phys = pfn * PAGE_SIZE;
336 return adr;
337}
338
Yinghai Lu4b239f42010-12-17 16:58:28 -0800339static __ref void *map_low_page(void *virt)
340{
341 void *adr;
342 unsigned long phys, left;
343
344 if (after_bootmem)
345 return virt;
346
347 phys = __pa(virt);
348 left = phys & (PAGE_SIZE - 1);
349 adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
350 adr = (void *)(((unsigned long)adr) | left);
351
352 return adr;
353}
354
Jan Beulich9482ac62008-08-21 14:28:42 +0100355static __ref void unmap_low_page(void *adr)
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100356{
Matt Tolentino44df75e2006-01-17 07:03:41 +0100357 if (after_bootmem)
358 return;
359
Yinghai Lu4b239f42010-12-17 16:58:28 -0800360 early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE);
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100361}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700363static unsigned long __meminit
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700364phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
365 pgprot_t prot)
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400366{
367 unsigned pages = 0;
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700368 unsigned long last_map_addr = end;
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400369 int i;
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700370
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400371 pte_t *pte = pte_page + pte_index(addr);
372
373 for(i = pte_index(addr); i < PTRS_PER_PTE; i++, addr += PAGE_SIZE, pte++) {
374
375 if (addr >= end) {
376 if (!after_bootmem) {
377 for(; i < PTRS_PER_PTE; i++, pte++)
378 set_pte(pte, __pte(0));
379 }
380 break;
381 }
382
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700383 /*
384 * We will re-use the existing mapping.
385 * Xen for example has some special requirements, like mapping
386 * pagetable pages as RO. So assume someone who pre-setup
387 * these mappings are more intelligent.
388 */
Yinghai Lu3afa3942008-10-25 22:58:21 -0700389 if (pte_val(*pte)) {
390 pages++;
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400391 continue;
Yinghai Lu3afa3942008-10-25 22:58:21 -0700392 }
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400393
394 if (0)
395 printk(" pte=%p addr=%lx pte=%016lx\n",
396 pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400397 pages++;
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700398 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400399 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400400 }
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700401
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400402 update_page_count(PG_LEVEL_4K, pages);
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700403
404 return last_map_addr;
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400405}
406
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700407static unsigned long __meminit
Yinghai Lub50efd22008-07-08 01:41:05 -0700408phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700409 unsigned long page_size_mask, pgprot_t prot)
Matt Tolentino44df75e2006-01-17 07:03:41 +0100410{
Jan Beulich20167d32012-05-16 14:06:26 +0100411 unsigned long pages = 0, next;
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700412 unsigned long last_map_addr = end;
Andi Kleence0c0e52008-05-02 11:46:49 +0200413
Keith Mannthey6ad91652006-09-26 10:52:36 +0200414 int i = pmd_index(address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Jan Beulich20167d32012-05-16 14:06:26 +0100416 for (; i < PTRS_PER_PMD; i++, address = next) {
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400417 unsigned long pte_phys;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200418 pmd_t *pmd = pmd_page + pmd_index(address);
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400419 pte_t *pte;
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700420 pgprot_t new_prot = prot;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100421
Jan Beulich5f51e132006-06-26 13:59:02 +0200422 if (address >= end) {
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100423 if (!after_bootmem) {
Jan Beulich5f51e132006-06-26 13:59:02 +0200424 for (; i < PTRS_PER_PMD; i++, pmd++)
425 set_pmd(pmd, __pmd(0));
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100426 }
Matt Tolentino44df75e2006-01-17 07:03:41 +0100427 break;
428 }
Keith Mannthey6ad91652006-09-26 10:52:36 +0200429
Jan Beulich20167d32012-05-16 14:06:26 +0100430 next = (address & PMD_MASK) + PMD_SIZE;
431
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400432 if (pmd_val(*pmd)) {
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100433 if (!pmd_large(*pmd)) {
434 spin_lock(&init_mm.page_table_lock);
Yinghai Lu4b239f42010-12-17 16:58:28 -0800435 pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
436 last_map_addr = phys_pte_init(pte, address,
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700437 end, prot);
Yinghai Lu4b239f42010-12-17 16:58:28 -0800438 unmap_low_page(pte);
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100439 spin_unlock(&init_mm.page_table_lock);
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700440 continue;
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100441 }
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700442 /*
443 * If we are ok with PG_LEVEL_2M mapping, then we will
444 * use the existing mapping,
445 *
446 * Otherwise, we will split the large page mapping but
447 * use the same existing protection bits except for
448 * large page, so that we don't violate Intel's TLB
449 * Application note (317080) which says, while changing
450 * the page sizes, new and old translations should
451 * not differ with respect to page frame and
452 * attributes.
453 */
Yinghai Lu3afa3942008-10-25 22:58:21 -0700454 if (page_size_mask & (1 << PG_LEVEL_2M)) {
Jan Beulich20167d32012-05-16 14:06:26 +0100455 last_map_addr = next;
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700456 continue;
Yinghai Lu3afa3942008-10-25 22:58:21 -0700457 }
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700458 new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400459 }
Keith Mannthey6ad91652006-09-26 10:52:36 +0200460
Yinghai Lub50efd22008-07-08 01:41:05 -0700461 if (page_size_mask & (1<<PG_LEVEL_2M)) {
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400462 pages++;
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100463 spin_lock(&init_mm.page_table_lock);
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400464 set_pte((pte_t *)pmd,
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700465 pfn_pte(address >> PAGE_SHIFT,
466 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100467 spin_unlock(&init_mm.page_table_lock);
Jan Beulich20167d32012-05-16 14:06:26 +0100468 last_map_addr = next;
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400469 continue;
470 }
471
472 pte = alloc_low_page(&pte_phys);
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700473 last_map_addr = phys_pte_init(pte, address, end, new_prot);
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400474 unmap_low_page(pte);
475
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100476 spin_lock(&init_mm.page_table_lock);
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400477 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100478 spin_unlock(&init_mm.page_table_lock);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100479 }
Andi Kleence0c0e52008-05-02 11:46:49 +0200480 update_page_count(PG_LEVEL_2M, pages);
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700481 return last_map_addr;
Matt Tolentino44df75e2006-01-17 07:03:41 +0100482}
483
Andi Kleencc615032008-03-12 03:53:28 +0100484static unsigned long __meminit
Yinghai Lub50efd22008-07-08 01:41:05 -0700485phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
486 unsigned long page_size_mask)
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100487{
Jan Beulich20167d32012-05-16 14:06:26 +0100488 unsigned long pages = 0, next;
Andi Kleencc615032008-03-12 03:53:28 +0100489 unsigned long last_map_addr = end;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200490 int i = pud_index(addr);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100491
Jan Beulich20167d32012-05-16 14:06:26 +0100492 for (; i < PTRS_PER_PUD; i++, addr = next) {
Keith Mannthey6ad91652006-09-26 10:52:36 +0200493 unsigned long pmd_phys;
494 pud_t *pud = pud_page + pud_index(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 pmd_t *pmd;
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700496 pgprot_t prot = PAGE_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Keith Mannthey6ad91652006-09-26 10:52:36 +0200498 if (addr >= end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Jan Beulich20167d32012-05-16 14:06:26 +0100501 next = (addr & PUD_MASK) + PUD_SIZE;
502
503 if (!after_bootmem && !e820_any_mapped(addr, next, 0)) {
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100504 set_pud(pud, __pud(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 continue;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100506 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Keith Mannthey6ad91652006-09-26 10:52:36 +0200508 if (pud_val(*pud)) {
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700509 if (!pud_large(*pud)) {
Yinghai Lu4b239f42010-12-17 16:58:28 -0800510 pmd = map_low_page(pmd_offset(pud, 0));
511 last_map_addr = phys_pmd_init(pmd, addr, end,
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700512 page_size_mask, prot);
Yinghai Lu4b239f42010-12-17 16:58:28 -0800513 unmap_low_page(pmd);
514 __flush_tlb_all();
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700515 continue;
516 }
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700517 /*
518 * If we are ok with PG_LEVEL_1G mapping, then we will
519 * use the existing mapping.
520 *
521 * Otherwise, we will split the gbpage mapping but use
522 * the same existing protection bits except for large
523 * page, so that we don't violate Intel's TLB
524 * Application note (317080) which says, while changing
525 * the page sizes, new and old translations should
526 * not differ with respect to page frame and
527 * attributes.
528 */
Yinghai Lu3afa3942008-10-25 22:58:21 -0700529 if (page_size_mask & (1 << PG_LEVEL_1G)) {
Jan Beulich20167d32012-05-16 14:06:26 +0100530 last_map_addr = next;
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700531 continue;
Yinghai Lu3afa3942008-10-25 22:58:21 -0700532 }
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700533 prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
Andi Kleenef925762008-04-17 17:40:45 +0200534 }
535
Yinghai Lub50efd22008-07-08 01:41:05 -0700536 if (page_size_mask & (1<<PG_LEVEL_1G)) {
Andi Kleence0c0e52008-05-02 11:46:49 +0200537 pages++;
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100538 spin_lock(&init_mm.page_table_lock);
Andi Kleenef925762008-04-17 17:40:45 +0200539 set_pte((pte_t *)pud,
540 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100541 spin_unlock(&init_mm.page_table_lock);
Jan Beulich20167d32012-05-16 14:06:26 +0100542 last_map_addr = next;
Keith Mannthey6ad91652006-09-26 10:52:36 +0200543 continue;
544 }
545
Vivek Goyaldafe41e2007-05-02 19:27:06 +0200546 pmd = alloc_low_page(&pmd_phys);
Suresh Siddhab27a43c2008-10-07 13:58:46 -0700547 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
548 prot);
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400549 unmap_low_page(pmd);
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100550
551 spin_lock(&init_mm.page_table_lock);
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400552 pud_populate(&init_mm, pud, __va(pmd_phys));
Matt Tolentino44df75e2006-01-17 07:03:41 +0100553 spin_unlock(&init_mm.page_table_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 }
Andi Kleen1a2b4412008-01-30 13:33:54 +0100555 __flush_tlb_all();
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700556
Andi Kleence0c0e52008-05-02 11:46:49 +0200557 update_page_count(PG_LEVEL_1G, pages);
Andi Kleencc615032008-03-12 03:53:28 +0100558
Yinghai Lu1a0db382008-06-24 14:56:20 -0700559 return last_map_addr;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100560}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Shaohua Li41d840e2009-06-12 12:57:52 +0800562unsigned long __meminit
Pekka Enbergf7650902009-03-05 14:55:05 +0200563kernel_physical_mapping_init(unsigned long start,
564 unsigned long end,
565 unsigned long page_size_mask)
Yinghai Lub50efd22008-07-08 01:41:05 -0700566{
Haicheng Li9b861522010-08-20 17:50:16 +0800567 bool pgd_changed = false;
Yinghai Lub50efd22008-07-08 01:41:05 -0700568 unsigned long next, last_map_addr = end;
Haicheng Li9b861522010-08-20 17:50:16 +0800569 unsigned long addr;
Yinghai Lub50efd22008-07-08 01:41:05 -0700570
571 start = (unsigned long)__va(start);
572 end = (unsigned long)__va(end);
Wu Fengguang1c5f50e2010-09-03 17:04:07 +0800573 addr = start;
Yinghai Lub50efd22008-07-08 01:41:05 -0700574
575 for (; start < end; start = next) {
576 pgd_t *pgd = pgd_offset_k(start);
577 unsigned long pud_phys;
578 pud_t *pud;
579
Jack Steinere22146e2008-07-16 11:11:59 -0500580 next = (start + PGDIR_SIZE) & PGDIR_MASK;
Yinghai Lub50efd22008-07-08 01:41:05 -0700581 if (next > end)
582 next = end;
583
584 if (pgd_val(*pgd)) {
Yinghai Lu4b239f42010-12-17 16:58:28 -0800585 pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
586 last_map_addr = phys_pud_init(pud, __pa(start),
Yinghai Lub50efd22008-07-08 01:41:05 -0700587 __pa(end), page_size_mask);
Yinghai Lu4b239f42010-12-17 16:58:28 -0800588 unmap_low_page(pud);
Yinghai Lub50efd22008-07-08 01:41:05 -0700589 continue;
590 }
591
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100592 pud = alloc_low_page(&pud_phys);
Yinghai Lub50efd22008-07-08 01:41:05 -0700593 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
594 page_size_mask);
595 unmap_low_page(pud);
Jan Beulich8ae3a5a2008-08-21 14:27:22 +0100596
597 spin_lock(&init_mm.page_table_lock);
598 pgd_populate(&init_mm, pgd, __va(pud_phys));
599 spin_unlock(&init_mm.page_table_lock);
Haicheng Li9b861522010-08-20 17:50:16 +0800600 pgd_changed = true;
Yinghai Lub50efd22008-07-08 01:41:05 -0700601 }
Haicheng Li9b861522010-08-20 17:50:16 +0800602
603 if (pgd_changed)
604 sync_global_pgds(addr, end);
605
Suresh Siddhaa2699e42008-09-23 14:00:38 -0700606 __flush_tlb_all();
Yinghai Lub50efd22008-07-08 01:41:05 -0700607
608 return last_map_addr;
609}
Yinghai Lu7b16eb82008-07-09 20:15:02 -0700610
Matt Tolentino2b976902005-06-23 00:08:06 -0700611#ifndef CONFIG_NUMA
Tejun Heod8fc3af2011-02-16 12:13:06 +0100612void __init initmem_init(void)
Yinghai Lu1f75d7e2008-06-22 02:44:49 -0700613{
Tejun Heo0608f702011-07-14 11:44:23 +0200614 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
Yinghai Lu1f75d7e2008-06-22 02:44:49 -0700615}
Pekka Enberg3551f882009-05-07 15:35:41 +0300616#endif
Yinghai Lu1f75d7e2008-06-22 02:44:49 -0700617
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618void __init paging_init(void)
619{
Pekka Enberg3551f882009-05-07 15:35:41 +0300620 sparse_memory_present_with_active_regions(MAX_NUMNODES);
Matt Tolentino44df75e2006-01-17 07:03:41 +0100621 sparse_init();
Yinghai Lu44b57282009-07-08 09:50:19 -0700622
623 /*
624 * clear the default setting with node 0
625 * note: don't use nodes_clear here, that is really clearing when
626 * numa support is not compiled in, and later node_set_state
627 * will not set it back.
628 */
629 node_clear_state(0, N_NORMAL_MEMORY);
630
Pekka Enberg4c0b2e52011-11-01 15:58:17 +0200631 zone_sizes_init();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100634/*
Matt Tolentino44df75e2006-01-17 07:03:41 +0100635 * Memory hotplug specific functions
Matt Tolentino44df75e2006-01-17 07:03:41 +0100636 */
Yasunori Gotobc02af92006-06-27 02:53:30 -0700637#ifdef CONFIG_MEMORY_HOTPLUG
638/*
Shaohui Zhengea085412010-02-02 13:44:16 -0800639 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
640 * updating.
641 */
642static void update_end_of_memory_vars(u64 start, u64 size)
643{
644 unsigned long end_pfn = PFN_UP(start + size);
645
646 if (end_pfn > max_pfn) {
647 max_pfn = end_pfn;
648 max_low_pfn = end_pfn;
649 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
650 }
651}
652
653/*
Yasunori Gotobc02af92006-06-27 02:53:30 -0700654 * Memory is added always to NORMAL zone. This means you will never get
655 * additional DMA/DMA32 memory.
656 */
657int arch_add_memory(int nid, u64 start, u64 size)
658{
659 struct pglist_data *pgdat = NODE_DATA(nid);
Christoph Lameter776ed982006-09-25 23:31:09 -0700660 struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
Andi Kleencc615032008-03-12 03:53:28 +0100661 unsigned long last_mapped_pfn, start_pfn = start >> PAGE_SHIFT;
Yasunori Gotobc02af92006-06-27 02:53:30 -0700662 unsigned long nr_pages = size >> PAGE_SHIFT;
663 int ret;
664
Shaohua Li60817c92008-10-27 13:03:18 -0700665 last_mapped_pfn = init_memory_mapping(start, start + size);
Andi Kleencc615032008-03-12 03:53:28 +0100666 if (last_mapped_pfn > max_pfn_mapped)
667 max_pfn_mapped = last_mapped_pfn;
Keith Mannthey45e0b782006-09-30 23:27:09 -0700668
Gary Hadec04fc582009-01-06 14:39:14 -0800669 ret = __add_pages(nid, zone, start_pfn, nr_pages);
Gary Hadefe8b8682008-10-28 16:43:14 -0700670 WARN_ON_ONCE(ret);
Yasunori Gotobc02af92006-06-27 02:53:30 -0700671
Shaohui Zhengea085412010-02-02 13:44:16 -0800672 /* update max_pfn, max_low_pfn and high_memory */
673 update_end_of_memory_vars(start, size);
674
Yasunori Gotobc02af92006-06-27 02:53:30 -0700675 return ret;
Yasunori Gotobc02af92006-06-27 02:53:30 -0700676}
677EXPORT_SYMBOL_GPL(arch_add_memory);
678
Keith Mannthey45e0b782006-09-30 23:27:09 -0700679#endif /* CONFIG_MEMORY_HOTPLUG */
680
KAMEZAWA Hiroyuki81ac3ad2009-09-22 16:45:49 -0700681static struct kcore_list kcore_vsyscall;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683void __init mem_init(void)
684{
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200685 long codesize, reservedpages, datasize, initsize;
Yinghai Lu11a6b0c2008-10-14 18:59:18 -0700686 unsigned long absent_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Jon Mason0dc243a2006-06-26 13:58:11 +0200688 pci_iommu_alloc();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Yinghai Lu48ddb152008-01-30 13:32:36 +0100690 /* clear_bss() already clear the empty_zero_page */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 reservedpages = 0;
693
694 /* this will put all low memory onto the freelists */
Matt Tolentino2b976902005-06-23 00:08:06 -0700695#ifdef CONFIG_NUMA
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200696 totalram_pages = numa_free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697#else
Andi Kleen0a43e4b2005-09-12 18:49:24 +0200698 totalram_pages = free_all_bootmem();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699#endif
Yinghai Lu11a6b0c2008-10-14 18:59:18 -0700700
701 absent_pages = absent_pages_in_range(0, max_pfn);
702 reservedpages = max_pfn - totalram_pages - absent_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 after_bootmem = 1;
704
705 codesize = (unsigned long) &_etext - (unsigned long) &_text;
706 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
707 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
708
709 /* Register memory areas for /proc/kcore */
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100710 kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
KAMEZAWA Hiroyukic30bb2a2009-09-22 16:45:43 -0700711 VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Ingo Molnar10f22dd2008-01-30 13:34:10 +0100713 printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
Yinghai Lu11a6b0c2008-10-14 18:59:18 -0700714 "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
Geert Uytterhoevencc013a82009-09-21 17:02:36 -0700715 nr_free_pages() << (PAGE_SHIFT-10),
Yinghai Luc987d122008-06-24 22:14:09 -0700716 max_pfn << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 codesize >> 10,
Yinghai Lu11a6b0c2008-10-14 18:59:18 -0700718 absent_pages << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 reservedpages << (PAGE_SHIFT-10),
720 datasize >> 10,
721 initsize >> 10);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722}
723
Arjan van de Ven67df1972006-01-06 00:12:04 -0800724#ifdef CONFIG_DEBUG_RODATA
Arjan van de Venedeed302008-01-30 13:34:08 +0100725const int rodata_test_data = 0xC3;
726EXPORT_SYMBOL_GPL(rodata_test_data);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800727
Suresh Siddha502f6602009-10-28 18:46:56 -0800728int kernel_set_to_readonly;
Steven Rostedt16239632009-02-17 17:57:30 -0500729
730void set_kernel_text_rw(void)
731{
Suresh Siddhab9af7c02009-10-14 14:46:55 -0700732 unsigned long start = PFN_ALIGN(_text);
Suresh Siddhae7d23dd2009-10-28 18:46:58 -0800733 unsigned long end = PFN_ALIGN(__stop___ex_table);
Steven Rostedt16239632009-02-17 17:57:30 -0500734
735 if (!kernel_set_to_readonly)
736 return;
737
738 pr_debug("Set kernel text: %lx - %lx for read write\n",
739 start, end);
740
Suresh Siddhae7d23dd2009-10-28 18:46:58 -0800741 /*
742 * Make the kernel identity mapping for text RW. Kernel text
743 * mapping will always be RO. Refer to the comment in
744 * static_protections() in pageattr.c
745 */
Steven Rostedt16239632009-02-17 17:57:30 -0500746 set_memory_rw(start, (end - start) >> PAGE_SHIFT);
747}
748
749void set_kernel_text_ro(void)
750{
Suresh Siddhab9af7c02009-10-14 14:46:55 -0700751 unsigned long start = PFN_ALIGN(_text);
Suresh Siddhae7d23dd2009-10-28 18:46:58 -0800752 unsigned long end = PFN_ALIGN(__stop___ex_table);
Steven Rostedt16239632009-02-17 17:57:30 -0500753
754 if (!kernel_set_to_readonly)
755 return;
756
757 pr_debug("Set kernel text: %lx - %lx for read only\n",
758 start, end);
759
Suresh Siddhae7d23dd2009-10-28 18:46:58 -0800760 /*
761 * Set the kernel identity mapping for text RO.
762 */
Steven Rostedt16239632009-02-17 17:57:30 -0500763 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
764}
765
Arjan van de Ven67df1972006-01-06 00:12:04 -0800766void mark_rodata_ro(void)
767{
Suresh Siddha74e08172009-10-14 14:46:56 -0700768 unsigned long start = PFN_ALIGN(_text);
Steven Rostedt8f0f9962008-05-12 21:20:56 +0200769 unsigned long rodata_start =
770 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
Suresh Siddha74e08172009-10-14 14:46:56 -0700771 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
772 unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
773 unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
774 unsigned long data_start = (unsigned long) &_sdata;
Steven Rostedt8f0f9962008-05-12 21:20:56 +0200775
Jan Beulich6fb14752007-05-02 19:27:10 +0200776 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
Linus Torvaldse3ebadd2007-05-07 08:44:24 -0700777 (end - start) >> 10);
Arjan van de Ven984bb802008-02-06 22:39:45 +0100778 set_memory_ro(start, (end - start) >> PAGE_SHIFT);
779
Steven Rostedt16239632009-02-17 17:57:30 -0500780 kernel_set_to_readonly = 1;
781
Arjan van de Ven984bb802008-02-06 22:39:45 +0100782 /*
783 * The rodata section (but not the kernel text!) should also be
784 * not-executable.
785 */
Pekka Paalanen72b59d672008-05-12 21:21:01 +0200786 set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
Arjan van de Ven67df1972006-01-06 00:12:04 -0800787
Arjan van de Ven1a487252008-01-30 13:34:09 +0100788 rodata_test();
789
Andi Kleen0c42f392008-01-30 13:33:42 +0100790#ifdef CONFIG_CPA_DEBUG
Ingo Molnar10f22dd2008-01-30 13:34:10 +0100791 printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100792 set_memory_rw(start, (end-start) >> PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +0100793
Ingo Molnar10f22dd2008-01-30 13:34:10 +0100794 printk(KERN_INFO "Testing CPA: again\n");
Arjan van de Ven6d238cc2008-01-30 13:34:06 +0100795 set_memory_ro(start, (end-start) >> PAGE_SHIFT);
Andi Kleen0c42f392008-01-30 13:33:42 +0100796#endif
Suresh Siddha74e08172009-10-14 14:46:56 -0700797
798 free_init_pages("unused kernel memory",
799 (unsigned long) page_address(virt_to_page(text_end)),
800 (unsigned long)
801 page_address(virt_to_page(rodata_start)));
802 free_init_pages("unused kernel memory",
803 (unsigned long) page_address(virt_to_page(rodata_end)),
804 (unsigned long) page_address(virt_to_page(data_start)));
Arjan van de Ven67df1972006-01-06 00:12:04 -0800805}
Mathieu Desnoyers4e4eee02008-02-02 15:42:20 -0500806
Arjan van de Ven67df1972006-01-06 00:12:04 -0800807#endif
808
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100809int kern_addr_valid(unsigned long addr)
810{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100812 pgd_t *pgd;
813 pud_t *pud;
814 pmd_t *pmd;
815 pte_t *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 if (above != 0 && above != -1UL)
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100818 return 0;
819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 pgd = pgd_offset_k(addr);
821 if (pgd_none(*pgd))
822 return 0;
823
824 pud = pud_offset(pgd, addr);
825 if (pud_none(*pud))
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100826 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
828 pmd = pmd_offset(pud, addr);
829 if (pmd_none(*pmd))
830 return 0;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 if (pmd_large(*pmd))
833 return pfn_valid(pmd_pfn(*pmd));
834
835 pte = pte_offset_kernel(pmd, addr);
836 if (pte_none(*pte))
837 return 0;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return pfn_valid(pte_pfn(*pte));
840}
841
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100842/*
843 * A pseudo VMA to allow ptrace access for the vsyscall page. This only
844 * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
845 * not need special handling anymore:
846 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847static struct vm_area_struct gate_vma = {
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100848 .vm_start = VSYSCALL_START,
849 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
850 .vm_page_prot = PAGE_READONLY_EXEC,
851 .vm_flags = VM_READ | VM_EXEC
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852};
853
Stephen Wilson31db58b2011-03-13 15:49:15 -0400854struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855{
856#ifdef CONFIG_IA32_EMULATION
Stephen Wilson31db58b2011-03-13 15:49:15 -0400857 if (!mm || mm->context.ia32_compat)
Andi Kleen1e014412005-04-16 15:24:55 -0700858 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859#endif
860 return &gate_vma;
861}
862
Stephen Wilson83b964b2011-03-13 15:49:16 -0400863int in_gate_area(struct mm_struct *mm, unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
Stephen Wilson83b964b2011-03-13 15:49:16 -0400865 struct vm_area_struct *vma = get_gate_vma(mm);
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100866
Andi Kleen1e014412005-04-16 15:24:55 -0700867 if (!vma)
868 return 0;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 return (addr >= vma->vm_start) && (addr < vma->vm_end);
871}
872
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100873/*
Stephen Wilsoncae5d392011-03-13 15:49:17 -0400874 * Use this when you have no reliable mm, typically from interrupt
875 * context. It is less reliable than using a task's mm and may give
876 * false positives.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 */
Stephen Wilsoncae5d392011-03-13 15:49:17 -0400878int in_gate_area_no_mm(unsigned long addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879{
Andi Kleen1e014412005-04-16 15:24:55 -0700880 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
Zou Nan hai2e1c49d2007-06-01 00:46:28 -0700882
Andi Kleen2aae9502007-07-21 17:10:01 +0200883const char *arch_vma_name(struct vm_area_struct *vma)
884{
885 if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
886 return "[vdso]";
887 if (vma == &gate_vma)
888 return "[vsyscall]";
889 return NULL;
890}
Christoph Lameter0889eba2007-10-16 01:24:15 -0700891
Nathan Fontenot1dc41aa2011-01-20 10:46:15 -0600892#ifdef CONFIG_X86_UV
Nathan Fontenot1dc41aa2011-01-20 10:46:15 -0600893unsigned long memory_block_size_bytes(void)
894{
895 if (is_uv_system()) {
896 printk(KERN_INFO "UV: memory block size 2GB\n");
897 return 2UL * 1024 * 1024 * 1024;
898 }
899 return MIN_MEMORY_BLOCK_SIZE;
900}
901#endif
902
Christoph Lameter0889eba2007-10-16 01:24:15 -0700903#ifdef CONFIG_SPARSEMEM_VMEMMAP
904/*
905 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
906 */
Yinghai Luc2b91e22008-04-12 01:19:24 -0700907static long __meminitdata addr_start, addr_end;
908static void __meminitdata *p_start, *p_end;
909static int __meminitdata node_start;
910
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100911int __meminit
912vmemmap_populate(struct page *start_page, unsigned long size, int node)
Christoph Lameter0889eba2007-10-16 01:24:15 -0700913{
914 unsigned long addr = (unsigned long)start_page;
915 unsigned long end = (unsigned long)(start_page + size);
916 unsigned long next;
917 pgd_t *pgd;
918 pud_t *pud;
919 pmd_t *pmd;
920
921 for (; addr < end; addr = next) {
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400922 void *p = NULL;
Christoph Lameter0889eba2007-10-16 01:24:15 -0700923
924 pgd = vmemmap_pgd_populate(addr, node);
925 if (!pgd)
926 return -ENOMEM;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100927
Christoph Lameter0889eba2007-10-16 01:24:15 -0700928 pud = vmemmap_pud_populate(pgd, addr, node);
929 if (!pud)
930 return -ENOMEM;
931
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400932 if (!cpu_has_pse) {
933 next = (addr + PAGE_SIZE) & PAGE_MASK;
934 pmd = vmemmap_pmd_populate(pud, addr, node);
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100935
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400936 if (!pmd)
937 return -ENOMEM;
938
939 p = vmemmap_pte_populate(pmd, addr, node);
940
Christoph Lameter0889eba2007-10-16 01:24:15 -0700941 if (!p)
942 return -ENOMEM;
943
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400944 addr_end = addr + PAGE_SIZE;
945 p_end = p + PAGE_SIZE;
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100946 } else {
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400947 next = pmd_addr_end(addr, end);
948
949 pmd = pmd_offset(pud, addr);
950 if (pmd_none(*pmd)) {
951 pte_t entry;
952
Yinghai Lu9bdac912010-02-10 01:20:22 -0800953 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400954 if (!p)
955 return -ENOMEM;
956
957 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
958 PAGE_KERNEL_LARGE);
959 set_pmd(pmd, __pmd(pte_val(entry)));
960
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400961 /* check to see if we have contiguous blocks */
962 if (p_end != p || node_start != node) {
963 if (p_start)
964 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
965 addr_start, addr_end-1, p_start, p_end-1, node_start);
966 addr_start = addr;
967 node_start = node;
968 p_start = p;
969 }
Yinghai Lu49c980d2008-07-03 12:29:34 -0700970
971 addr_end = addr + PMD_SIZE;
972 p_end = p + PMD_SIZE;
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400973 } else
974 vmemmap_verify((pte_t *)pmd, node, addr, next);
Thomas Gleixner14a62c32008-01-30 13:34:10 +0100975 }
Jeremy Fitzhardinge7c934d32008-06-25 00:19:20 -0400976
Christoph Lameter0889eba2007-10-16 01:24:15 -0700977 }
Haicheng Li9b861522010-08-20 17:50:16 +0800978 sync_global_pgds((unsigned long)start_page, end);
Christoph Lameter0889eba2007-10-16 01:24:15 -0700979 return 0;
980}
Yinghai Luc2b91e22008-04-12 01:19:24 -0700981
982void __meminit vmemmap_populate_print_last(void)
983{
984 if (p_start) {
985 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
986 addr_start, addr_end-1, p_start, p_end-1, node_start);
987 p_start = NULL;
988 p_end = NULL;
989 node_start = 0;
990 }
991}
Christoph Lameter0889eba2007-10-16 01:24:15 -0700992#endif