Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2005, Paul Mackerras, IBM Corporation. |
| 3 | * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation. |
| 4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | */ |
| 11 | |
| 12 | #include <linux/sched.h> |
| 13 | #include <asm/pgalloc.h> |
| 14 | #include <asm/tlb.h> |
| 15 | |
| 16 | #include "mmu_decl.h" |
| 17 | |
| 18 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
| 19 | /* |
| 20 | * On hash-based CPUs, the vmemmap is bolted in the hash table. |
| 21 | * |
| 22 | */ |
| 23 | int __meminit vmemmap_create_mapping(unsigned long start, |
| 24 | unsigned long page_size, |
| 25 | unsigned long phys) |
| 26 | { |
| 27 | int rc = htab_bolt_mapping(start, start + page_size, phys, |
| 28 | pgprot_val(PAGE_KERNEL), |
| 29 | mmu_vmemmap_psize, mmu_kernel_ssize); |
| 30 | if (rc < 0) { |
| 31 | int rc2 = htab_remove_mapping(start, start + page_size, |
| 32 | mmu_vmemmap_psize, |
| 33 | mmu_kernel_ssize); |
| 34 | BUG_ON(rc2 && (rc2 != -ENOENT)); |
| 35 | } |
| 36 | return rc; |
| 37 | } |
| 38 | |
| 39 | #ifdef CONFIG_MEMORY_HOTPLUG |
| 40 | void vmemmap_remove_mapping(unsigned long start, |
| 41 | unsigned long page_size) |
| 42 | { |
| 43 | int rc = htab_remove_mapping(start, start + page_size, |
| 44 | mmu_vmemmap_psize, |
| 45 | mmu_kernel_ssize); |
| 46 | BUG_ON((rc < 0) && (rc != -ENOENT)); |
| 47 | WARN_ON(rc == -ENOENT); |
| 48 | } |
| 49 | #endif |
| 50 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 51 | |
| 52 | /* |
| 53 | * map_kernel_page currently only called by __ioremap |
| 54 | * map_kernel_page adds an entry to the ioremap page table |
| 55 | * and adds an entry to the HPT, possibly bolting it |
| 56 | */ |
| 57 | int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags) |
| 58 | { |
| 59 | pgd_t *pgdp; |
| 60 | pud_t *pudp; |
| 61 | pmd_t *pmdp; |
| 62 | pte_t *ptep; |
| 63 | |
Aneesh Kumar K.V | dd1842a | 2016-04-29 23:25:49 +1000 | [diff] [blame^] | 64 | BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); |
Aneesh Kumar K.V | eee24b5 | 2016-04-29 23:25:44 +1000 | [diff] [blame] | 65 | if (slab_is_available()) { |
| 66 | pgdp = pgd_offset_k(ea); |
| 67 | pudp = pud_alloc(&init_mm, pgdp, ea); |
| 68 | if (!pudp) |
| 69 | return -ENOMEM; |
| 70 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
| 71 | if (!pmdp) |
| 72 | return -ENOMEM; |
| 73 | ptep = pte_alloc_kernel(pmdp, ea); |
| 74 | if (!ptep) |
| 75 | return -ENOMEM; |
| 76 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
| 77 | __pgprot(flags))); |
| 78 | } else { |
| 79 | /* |
| 80 | * If the mm subsystem is not fully up, we cannot create a |
| 81 | * linux page table entry for this mapping. Simply bolt an |
| 82 | * entry in the hardware page table. |
| 83 | * |
| 84 | */ |
| 85 | if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags, |
| 86 | mmu_io_psize, mmu_kernel_ssize)) { |
| 87 | printk(KERN_ERR "Failed to do bolted mapping IO " |
| 88 | "memory at %016lx !\n", pa); |
| 89 | return -ENOMEM; |
| 90 | } |
| 91 | } |
| 92 | |
| 93 | smp_wmb(); |
| 94 | return 0; |
| 95 | } |