Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 1 | /* |
| 2 | * Page fault handler for SH with an MMU. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * Copyright (C) 1999 Niibe Yutaka |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 5 | * Copyright (C) 2003 - 2009 Paul Mundt |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
| 7 | * Based on linux/arch/i386/mm/fault.c: |
| 8 | * Copyright (C) 1995 Linus Torvalds |
Paul Mundt | 26ff6c1 | 2006-09-27 15:13:36 +0900 | [diff] [blame] | 9 | * |
| 10 | * This file is subject to the terms and conditions of the GNU General Public |
| 11 | * License. See the file "COPYING" in the main directory of this archive |
| 12 | * for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/kernel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/mm.h> |
Paul Mundt | 0f08f33 | 2006-09-27 17:03:56 +0900 | [diff] [blame] | 16 | #include <linux/hardirq.h> |
| 17 | #include <linux/kprobes.h> |
Ingo Molnar | cdd6c48 | 2009-09-21 12:02:48 +0200 | [diff] [blame] | 18 | #include <linux/perf_event.h> |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 19 | #include <asm/io_trapped.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/mmu_context.h> |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 21 | #include <asm/tlbflush.h> |
David Howells | e839ca5 | 2012-03-28 18:30:03 +0100 | [diff] [blame] | 22 | #include <asm/traps.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 24 | static inline int notify_page_fault(struct pt_regs *regs, int trap) |
| 25 | { |
| 26 | int ret = 0; |
| 27 | |
Paul Mundt | c63c310 | 2009-07-05 02:50:10 +0900 | [diff] [blame] | 28 | if (kprobes_built_in() && !user_mode(regs)) { |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 29 | preempt_disable(); |
| 30 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) |
| 31 | ret = 1; |
| 32 | preempt_enable(); |
| 33 | } |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 34 | |
| 35 | return ret; |
| 36 | } |
| 37 | |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame^] | 38 | /* |
| 39 | * This is useful to dump out the page tables associated with |
| 40 | * 'addr' in mm 'mm'. |
| 41 | */ |
| 42 | static void show_pte(struct mm_struct *mm, unsigned long addr) |
| 43 | { |
| 44 | pgd_t *pgd; |
| 45 | |
| 46 | if (mm) |
| 47 | pgd = mm->pgd; |
| 48 | else |
| 49 | pgd = get_TTB(); |
| 50 | |
| 51 | printk(KERN_ALERT "pgd = %p\n", pgd); |
| 52 | pgd += pgd_index(addr); |
| 53 | printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr, |
| 54 | sizeof(*pgd) * 2, (u64)pgd_val(*pgd)); |
| 55 | |
| 56 | do { |
| 57 | pud_t *pud; |
| 58 | pmd_t *pmd; |
| 59 | pte_t *pte; |
| 60 | |
| 61 | if (pgd_none(*pgd)) |
| 62 | break; |
| 63 | |
| 64 | if (pgd_bad(*pgd)) { |
| 65 | printk("(bad)"); |
| 66 | break; |
| 67 | } |
| 68 | |
| 69 | pud = pud_offset(pgd, addr); |
| 70 | if (PTRS_PER_PUD != 1) |
| 71 | printk(", *pud=%0*Lx", sizeof(*pud) * 2, |
| 72 | (u64)pud_val(*pud)); |
| 73 | |
| 74 | if (pud_none(*pud)) |
| 75 | break; |
| 76 | |
| 77 | if (pud_bad(*pud)) { |
| 78 | printk("(bad)"); |
| 79 | break; |
| 80 | } |
| 81 | |
| 82 | pmd = pmd_offset(pud, addr); |
| 83 | if (PTRS_PER_PMD != 1) |
| 84 | printk(", *pmd=%0*Lx", sizeof(*pmd) * 2, |
| 85 | (u64)pmd_val(*pmd)); |
| 86 | |
| 87 | if (pmd_none(*pmd)) |
| 88 | break; |
| 89 | |
| 90 | if (pmd_bad(*pmd)) { |
| 91 | printk("(bad)"); |
| 92 | break; |
| 93 | } |
| 94 | |
| 95 | /* We must not map this if we have highmem enabled */ |
| 96 | if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) |
| 97 | break; |
| 98 | |
| 99 | pte = pte_offset_kernel(pmd, addr); |
| 100 | printk(", *pte=%0*Lx", sizeof(*pte) * 2, (u64)pte_val(*pte)); |
| 101 | } while (0); |
| 102 | |
| 103 | printk("\n"); |
| 104 | } |
| 105 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 106 | static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address) |
| 107 | { |
| 108 | unsigned index = pgd_index(address); |
| 109 | pgd_t *pgd_k; |
| 110 | pud_t *pud, *pud_k; |
| 111 | pmd_t *pmd, *pmd_k; |
| 112 | |
| 113 | pgd += index; |
| 114 | pgd_k = init_mm.pgd + index; |
| 115 | |
| 116 | if (!pgd_present(*pgd_k)) |
| 117 | return NULL; |
| 118 | |
| 119 | pud = pud_offset(pgd, address); |
| 120 | pud_k = pud_offset(pgd_k, address); |
| 121 | if (!pud_present(*pud_k)) |
| 122 | return NULL; |
| 123 | |
Matt Fleming | 5d9b4b1 | 2009-12-13 14:38:50 +0000 | [diff] [blame] | 124 | if (!pud_present(*pud)) |
| 125 | set_pud(pud, *pud_k); |
| 126 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 127 | pmd = pmd_offset(pud, address); |
| 128 | pmd_k = pmd_offset(pud_k, address); |
| 129 | if (!pmd_present(*pmd_k)) |
| 130 | return NULL; |
| 131 | |
| 132 | if (!pmd_present(*pmd)) |
| 133 | set_pmd(pmd, *pmd_k); |
Matt Fleming | 05dd2cd | 2009-07-13 11:38:04 +0000 | [diff] [blame] | 134 | else { |
| 135 | /* |
| 136 | * The page tables are fully synchronised so there must |
| 137 | * be another reason for the fault. Return NULL here to |
| 138 | * signal that we have not taken care of the fault. |
| 139 | */ |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 140 | BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k)); |
Matt Fleming | 05dd2cd | 2009-07-13 11:38:04 +0000 | [diff] [blame] | 141 | return NULL; |
| 142 | } |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 143 | |
| 144 | return pmd_k; |
| 145 | } |
| 146 | |
| 147 | /* |
| 148 | * Handle a fault on the vmalloc or module mapping area |
| 149 | */ |
| 150 | static noinline int vmalloc_fault(unsigned long address) |
| 151 | { |
| 152 | pgd_t *pgd_k; |
| 153 | pmd_t *pmd_k; |
| 154 | pte_t *pte_k; |
| 155 | |
Paul Mundt | 0906a3a | 2009-09-03 17:21:10 +0900 | [diff] [blame] | 156 | /* Make sure we are in vmalloc/module/P3 area: */ |
Stuart Menefy | 8d9a784 | 2012-02-14 11:29:11 +0000 | [diff] [blame] | 157 | if (!(address >= P3SEG && address < P3_ADDR_MAX)) |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 158 | return -1; |
| 159 | |
| 160 | /* |
| 161 | * Synchronize this task's top level page-table |
| 162 | * with the 'reference' page table. |
| 163 | * |
| 164 | * Do _not_ use "current" here. We might be inside |
| 165 | * an interrupt in the middle of a task switch.. |
| 166 | */ |
| 167 | pgd_k = get_TTB(); |
Matt Fleming | 05dd2cd | 2009-07-13 11:38:04 +0000 | [diff] [blame] | 168 | pmd_k = vmalloc_sync_one(pgd_k, address); |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 169 | if (!pmd_k) |
| 170 | return -1; |
| 171 | |
| 172 | pte_k = pte_offset_kernel(pmd_k, address); |
| 173 | if (!pte_present(*pte_k)) |
| 174 | return -1; |
| 175 | |
| 176 | return 0; |
| 177 | } |
| 178 | |
| 179 | static int fault_in_kernel_space(unsigned long address) |
| 180 | { |
| 181 | return address >= TASK_SIZE; |
| 182 | } |
| 183 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | /* |
| 185 | * This routine handles page faults. It determines the address, |
| 186 | * and the problem, and then passes it off to one of the appropriate |
| 187 | * routines. |
| 188 | */ |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 189 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, |
| 190 | unsigned long writeaccess, |
| 191 | unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | { |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 193 | unsigned long vec; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | struct task_struct *tsk; |
| 195 | struct mm_struct *mm; |
| 196 | struct vm_area_struct * vma; |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 197 | int si_code; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 198 | int fault; |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 199 | siginfo_t info; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | tsk = current; |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 202 | mm = tsk->mm; |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 203 | si_code = SEGV_MAPERR; |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 204 | vec = lookup_exception_vector(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 206 | /* |
| 207 | * We fault-in kernel-space virtual memory on-demand. The |
| 208 | * 'reference' page table is init_mm.pgd. |
| 209 | * |
| 210 | * NOTE! We MUST NOT take any locks for this case. We may |
| 211 | * be in an interrupt or a critical region, and should |
| 212 | * only copy the information from the master page table, |
| 213 | * nothing more. |
| 214 | */ |
| 215 | if (unlikely(fault_in_kernel_space(address))) { |
| 216 | if (vmalloc_fault(address) >= 0) |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 217 | return; |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 218 | if (notify_page_fault(regs, vec)) |
Stuart Menefy | 96e14e5 | 2008-09-05 16:17:15 +0900 | [diff] [blame] | 219 | return; |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 220 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 221 | goto bad_area_nosemaphore; |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 222 | } |
| 223 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 224 | if (unlikely(notify_page_fault(regs, vec))) |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 225 | return; |
| 226 | |
| 227 | /* Only enable interrupts if they were on before the fault */ |
| 228 | if ((regs->sr & SR_IMASK) != SR_IMASK) |
| 229 | local_irq_enable(); |
| 230 | |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 231 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 232 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | /* |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 234 | * If we're in an interrupt, have no user context or are running |
| 235 | * in an atomic region then we must not take the fault: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | */ |
| 237 | if (in_atomic() || !mm) |
| 238 | goto no_context; |
| 239 | |
| 240 | down_read(&mm->mmap_sem); |
| 241 | |
| 242 | vma = find_vma(mm, address); |
| 243 | if (!vma) |
| 244 | goto bad_area; |
| 245 | if (vma->vm_start <= address) |
| 246 | goto good_area; |
| 247 | if (!(vma->vm_flags & VM_GROWSDOWN)) |
| 248 | goto bad_area; |
| 249 | if (expand_stack(vma, address)) |
| 250 | goto bad_area; |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 251 | |
| 252 | /* |
| 253 | * Ok, we have a good vm_area for this memory access, so |
| 254 | * we can handle it.. |
| 255 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 256 | good_area: |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 257 | si_code = SEGV_ACCERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | if (writeaccess) { |
| 259 | if (!(vma->vm_flags & VM_WRITE)) |
| 260 | goto bad_area; |
| 261 | } else { |
Jason Baron | df67b3d | 2006-09-29 01:58:58 -0700 | [diff] [blame] | 262 | if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | goto bad_area; |
| 264 | } |
| 265 | |
| 266 | /* |
| 267 | * If for any reason at all we couldn't handle the fault, |
| 268 | * make sure we exit gracefully rather than endlessly redo |
| 269 | * the fault. |
| 270 | */ |
Linus Torvalds | d06063c | 2009-04-10 09:01:23 -0700 | [diff] [blame] | 271 | fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0); |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 272 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 273 | if (fault & VM_FAULT_OOM) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | goto out_of_memory; |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 275 | else if (fault & VM_FAULT_SIGBUS) |
| 276 | goto do_sigbus; |
| 277 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | } |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 279 | if (fault & VM_FAULT_MAJOR) { |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 280 | tsk->maj_flt++; |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 281 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 282 | regs, address); |
| 283 | } else { |
Nick Piggin | 83c5407 | 2007-07-19 01:47:05 -0700 | [diff] [blame] | 284 | tsk->min_flt++; |
Peter Zijlstra | a8b0ca1 | 2011-06-27 14:41:57 +0200 | [diff] [blame] | 285 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, |
Paul Mundt | 7433ab770 | 2009-06-25 02:30:10 +0900 | [diff] [blame] | 286 | regs, address); |
| 287 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | |
| 289 | up_read(&mm->mmap_sem); |
| 290 | return; |
| 291 | |
Paul Mundt | 0f60bb2 | 2009-07-05 03:18:47 +0900 | [diff] [blame] | 292 | /* |
| 293 | * Something tried to access memory that isn't in our memory map.. |
| 294 | * Fix it, but check if it's kernel or user first.. |
| 295 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | bad_area: |
| 297 | up_read(&mm->mmap_sem); |
| 298 | |
Stuart Menefy | 99a596f | 2006-11-21 15:38:05 +0900 | [diff] [blame] | 299 | bad_area_nosemaphore: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | if (user_mode(regs)) { |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 301 | info.si_signo = SIGSEGV; |
| 302 | info.si_errno = 0; |
| 303 | info.si_code = si_code; |
| 304 | info.si_addr = (void *) address; |
| 305 | force_sig_info(SIGSEGV, &info, tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | return; |
| 307 | } |
| 308 | |
| 309 | no_context: |
| 310 | /* Are we prepared to handle this kernel fault? */ |
| 311 | if (fixup_exception(regs)) |
| 312 | return; |
| 313 | |
Magnus Damm | e7cc9a7 | 2008-02-07 20:18:21 +0900 | [diff] [blame] | 314 | if (handle_trapped_io(regs, address)) |
| 315 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | /* |
| 317 | * Oops. The kernel tried to access some bad page. We'll have to |
| 318 | * terminate things with extreme prejudice. |
| 319 | * |
| 320 | */ |
Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 321 | |
| 322 | bust_spinlocks(1); |
| 323 | |
| 324 | if (oops_may_print()) { |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame^] | 325 | printk(KERN_ALERT |
| 326 | "Unable to handle kernel %s at virtual address %08lx\n", |
| 327 | (address < PAGE_SIZE) ? "NULL pointer dereference" : |
| 328 | "paging request", address); |
Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 329 | |
Stuart Menefy | 45c0e0e | 2012-04-19 17:25:03 +0900 | [diff] [blame^] | 330 | show_pte(mm, address); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | } |
Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 332 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | die("Oops", regs, writeaccess); |
Paul Mundt | 0630e45 | 2007-06-18 19:02:47 +0900 | [diff] [blame] | 334 | bust_spinlocks(0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | do_exit(SIGKILL); |
| 336 | |
| 337 | /* |
| 338 | * We ran out of memory, or some other thing happened to us that made |
| 339 | * us unable to handle the page fault gracefully. |
| 340 | */ |
| 341 | out_of_memory: |
| 342 | up_read(&mm->mmap_sem); |
Nick Piggin | 6b6b18e | 2010-04-22 16:06:26 +0000 | [diff] [blame] | 343 | if (!user_mode(regs)) |
| 344 | goto no_context; |
| 345 | pagefault_out_of_memory(); |
| 346 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | |
| 348 | do_sigbus: |
| 349 | up_read(&mm->mmap_sem); |
| 350 | |
| 351 | /* |
| 352 | * Send a sigbus, regardless of whether we were in kernel |
| 353 | * or user mode. |
| 354 | */ |
Stuart Menefy | b5a1bcb | 2006-11-21 13:34:04 +0900 | [diff] [blame] | 355 | info.si_signo = SIGBUS; |
| 356 | info.si_errno = 0; |
| 357 | info.si_code = BUS_ADRERR; |
| 358 | info.si_addr = (void *)address; |
| 359 | force_sig_info(SIGBUS, &info, tsk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | /* Kernel mode? Handle exceptions or die */ |
| 362 | if (!user_mode(regs)) |
| 363 | goto no_context; |
| 364 | } |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 365 | |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 366 | /* |
| 367 | * Called with interrupts disabled. |
| 368 | */ |
Paul Mundt | 112e584 | 2009-08-15 02:49:40 +0900 | [diff] [blame] | 369 | asmlinkage int __kprobes |
| 370 | handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess, |
| 371 | unsigned long address) |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 372 | { |
| 373 | pgd_t *pgd; |
| 374 | pud_t *pud; |
| 375 | pmd_t *pmd; |
| 376 | pte_t *pte; |
| 377 | pte_t entry; |
Paul Mundt | 3d58695 | 2008-09-21 13:56:39 +0900 | [diff] [blame] | 378 | |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 379 | /* |
| 380 | * We don't take page faults for P1, P2, and parts of P4, these |
| 381 | * are always mapped, whether it be due to legacy behaviour in |
| 382 | * 29-bit mode, or due to PMB configuration in 32-bit mode. |
| 383 | */ |
| 384 | if (address >= P3SEG && address < P3_ADDR_MAX) { |
| 385 | pgd = pgd_offset_k(address); |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 386 | } else { |
Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 387 | if (unlikely(address >= TASK_SIZE || !current->mm)) |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 388 | return 1; |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 389 | |
Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 390 | pgd = pgd_offset(current->mm, address); |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 391 | } |
| 392 | |
| 393 | pud = pud_offset(pgd, address); |
| 394 | if (pud_none_or_clear_bad(pud)) |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 395 | return 1; |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 396 | pmd = pmd_offset(pud, address); |
| 397 | if (pmd_none_or_clear_bad(pmd)) |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 398 | return 1; |
Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 399 | pte = pte_offset_kernel(pmd, address); |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 400 | entry = *pte; |
| 401 | if (unlikely(pte_none(entry) || pte_not_present(entry))) |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 402 | return 1; |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 403 | if (unlikely(writeaccess && !pte_write(entry))) |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 404 | return 1; |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 405 | |
| 406 | if (writeaccess) |
| 407 | entry = pte_mkdirty(entry); |
| 408 | entry = pte_mkyoung(entry); |
| 409 | |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 410 | set_pte(pte, entry); |
| 411 | |
Hideo Saito | a602cc0 | 2008-02-14 14:45:08 +0900 | [diff] [blame] | 412 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SMP) |
| 413 | /* |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 414 | * SH-4 does not set MMUCR.RC to the corresponding TLB entry in |
| 415 | * the case of an initial page write exception, so we need to |
| 416 | * flush it in order to avoid potential TLB entry duplication. |
Hideo Saito | a602cc0 | 2008-02-14 14:45:08 +0900 | [diff] [blame] | 417 | */ |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 418 | if (writeaccess == 2) |
| 419 | local_flush_tlb_one(get_asid(), address & PAGE_MASK); |
Hideo Saito | a602cc0 | 2008-02-14 14:45:08 +0900 | [diff] [blame] | 420 | #endif |
| 421 | |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 422 | update_mmu_cache(NULL, address, pte); |
Paul Mundt | 0f1a394 | 2007-11-19 13:05:18 +0900 | [diff] [blame] | 423 | |
Paul Mundt | 8010fbe | 2009-08-15 03:06:41 +0900 | [diff] [blame] | 424 | return 0; |
Paul Mundt | db2e1fa | 2007-02-14 14:13:10 +0900 | [diff] [blame] | 425 | } |