blob: 0b1c04e5e2c50eea54d13b7b953700dcd74c399d [file] [log] [blame]
Vivek Goyal666bfdd2005-06-25 14:58:21 -07001/*
2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
7 *
8 */
9
Vivek Goyal666bfdd2005-06-25 14:58:21 -070010#include <linux/mm.h>
David Howells2f96b8c2013-04-12 00:10:25 +010011#include <linux/kcore.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070012#include <linux/user.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070013#include <linux/elf.h>
14#include <linux/elfcore.h>
Paul Gortmakerafeacc82011-05-26 16:00:52 -040015#include <linux/export.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070017#include <linux/highmem.h>
Andrew Morton87ebdc02013-02-27 17:03:16 -080018#include <linux/printk.h>
Vivek Goyal666bfdd2005-06-25 14:58:21 -070019#include <linux/bootmem.h>
20#include <linux/init.h>
21#include <linux/crash_dump.h>
22#include <linux/list.h>
23#include <asm/uaccess.h>
24#include <asm/io.h>
David Howells2f96b8c2013-04-12 00:10:25 +010025#include "internal.h"
Vivek Goyal666bfdd2005-06-25 14:58:21 -070026
27/* List representing chunks of contiguous memory areas and their offsets in
28 * vmcore file.
29 */
30static LIST_HEAD(vmcore_list);
31
32/* Stores the pointer to the buffer containing kernel elf core headers. */
33static char *elfcorebuf;
34static size_t elfcorebuf_sz;
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -070035static size_t elfcorebuf_sz_orig;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070036
37/* Total size of vmcore file. */
38static u64 vmcore_size;
39
Alexey Dobriyan5aa140c2008-10-06 14:36:31 +040040static struct proc_dir_entry *proc_vmcore = NULL;
Vivek Goyal666bfdd2005-06-25 14:58:21 -070041
Olaf Hering997c1362011-05-26 16:25:54 -070042/*
43 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
44 * The called function has to take care of module refcounting.
45 */
46static int (*oldmem_pfn_is_ram)(unsigned long pfn);
47
48int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
49{
50 if (oldmem_pfn_is_ram)
51 return -EBUSY;
52 oldmem_pfn_is_ram = fn;
53 return 0;
54}
55EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
56
57void unregister_oldmem_pfn_is_ram(void)
58{
59 oldmem_pfn_is_ram = NULL;
60 wmb();
61}
62EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
63
64static int pfn_is_ram(unsigned long pfn)
65{
66 int (*fn)(unsigned long pfn);
67 /* pfn is ram unless fn() checks pagetype */
68 int ret = 1;
69
70 /*
71 * Ask hypervisor if the pfn is really ram.
72 * A ballooned page contains no data and reading from such a page
73 * will cause high load in the hypervisor.
74 */
75 fn = oldmem_pfn_is_ram;
76 if (fn)
77 ret = fn(pfn);
78
79 return ret;
80}
81
Vivek Goyal666bfdd2005-06-25 14:58:21 -070082/* Reads a page from the oldmem device from given offset. */
83static ssize_t read_from_oldmem(char *buf, size_t count,
Vivek Goyal9e9e3942006-01-11 12:17:37 -080084 u64 *ppos, int userbuf)
Vivek Goyal666bfdd2005-06-25 14:58:21 -070085{
86 unsigned long pfn, offset;
87 size_t nr_bytes;
88 ssize_t read = 0, tmp;
89
90 if (!count)
91 return 0;
92
93 offset = (unsigned long)(*ppos % PAGE_SIZE);
94 pfn = (unsigned long)(*ppos / PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -070095
96 do {
97 if (count > (PAGE_SIZE - offset))
98 nr_bytes = PAGE_SIZE - offset;
99 else
100 nr_bytes = count;
101
Olaf Hering997c1362011-05-26 16:25:54 -0700102 /* If pfn is not ram, return zeros for sparse dump files */
103 if (pfn_is_ram(pfn) == 0)
104 memset(buf, 0, nr_bytes);
105 else {
106 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
107 offset, userbuf);
108 if (tmp < 0)
109 return tmp;
110 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700111 *ppos += nr_bytes;
112 count -= nr_bytes;
113 buf += nr_bytes;
114 read += nr_bytes;
115 ++pfn;
116 offset = 0;
117 } while (count);
118
119 return read;
120}
121
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700122/* Read from the ELF header and then the crash dump. On error, negative value is
123 * returned otherwise number of bytes read are returned.
124 */
125static ssize_t read_vmcore(struct file *file, char __user *buffer,
126 size_t buflen, loff_t *fpos)
127{
128 ssize_t acc = 0, tmp;
Vivek Goyal80e8ff62006-04-10 22:54:10 -0700129 size_t tsz;
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700130 u64 start;
131 struct vmcore *m = NULL;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700132
133 if (buflen == 0 || *fpos >= vmcore_size)
134 return 0;
135
136 /* trim buflen to not go beyond EOF */
137 if (buflen > vmcore_size - *fpos)
138 buflen = vmcore_size - *fpos;
139
140 /* Read ELF core header */
141 if (*fpos < elfcorebuf_sz) {
142 tsz = elfcorebuf_sz - *fpos;
143 if (buflen < tsz)
144 tsz = buflen;
145 if (copy_to_user(buffer, elfcorebuf + *fpos, tsz))
146 return -EFAULT;
147 buflen -= tsz;
148 *fpos += tsz;
149 buffer += tsz;
150 acc += tsz;
151
152 /* leave now if filled buffer already */
153 if (buflen == 0)
154 return acc;
155 }
156
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700157 list_for_each_entry(m, &vmcore_list, list) {
158 if (*fpos < m->offset + m->size) {
159 tsz = m->offset + m->size - *fpos;
160 if (buflen < tsz)
161 tsz = buflen;
162 start = m->paddr + *fpos - m->offset;
163 tmp = read_from_oldmem(buffer, tsz, &start, 1);
164 if (tmp < 0)
165 return tmp;
166 buflen -= tsz;
167 *fpos += tsz;
168 buffer += tsz;
169 acc += tsz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700170
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700171 /* leave now if filled buffer already */
172 if (buflen == 0)
173 return acc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700174 }
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700175 }
HATAYAMA Daisukeb27eb182013-07-03 15:02:13 -0700176
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700177 return acc;
178}
179
Alexey Dobriyan5aa140c2008-10-06 14:36:31 +0400180static const struct file_operations proc_vmcore_operations = {
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700181 .read = read_vmcore,
Arnd Bergmannc227e692010-09-22 13:04:54 -0700182 .llseek = default_llseek,
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700183};
184
185static struct vmcore* __init get_new_element(void)
186{
Cyrill Gorcunov2f6d3112009-06-17 16:26:00 -0700187 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700188}
189
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700190static u64 __init get_vmcore_size_elf64(char *elfptr, size_t elfsz)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700191{
192 int i;
193 u64 size;
194 Elf64_Ehdr *ehdr_ptr;
195 Elf64_Phdr *phdr_ptr;
196
197 ehdr_ptr = (Elf64_Ehdr *)elfptr;
198 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700199 size = elfsz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700200 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
201 size += phdr_ptr->p_memsz;
202 phdr_ptr++;
203 }
204 return size;
205}
206
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700207static u64 __init get_vmcore_size_elf32(char *elfptr, size_t elfsz)
Vivek Goyal72658e92005-06-25 14:58:22 -0700208{
209 int i;
210 u64 size;
211 Elf32_Ehdr *ehdr_ptr;
212 Elf32_Phdr *phdr_ptr;
213
214 ehdr_ptr = (Elf32_Ehdr *)elfptr;
215 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700216 size = elfsz;
Vivek Goyal72658e92005-06-25 14:58:22 -0700217 for (i = 0; i < ehdr_ptr->e_phnum; i++) {
218 size += phdr_ptr->p_memsz;
219 phdr_ptr++;
220 }
221 return size;
222}
223
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700224/* Merges all the PT_NOTE headers into one. */
225static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
226 struct list_head *vc_list)
227{
228 int i, nr_ptnote=0, rc=0;
229 char *tmp;
230 Elf64_Ehdr *ehdr_ptr;
231 Elf64_Phdr phdr, *phdr_ptr;
232 Elf64_Nhdr *nhdr_ptr;
233 u64 phdr_sz = 0, note_off;
234
235 ehdr_ptr = (Elf64_Ehdr *)elfptr;
236 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr));
237 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
238 int j;
239 void *notes_section;
240 struct vmcore *new;
241 u64 offset, max_sz, sz, real_sz = 0;
242 if (phdr_ptr->p_type != PT_NOTE)
243 continue;
244 nr_ptnote++;
245 max_sz = phdr_ptr->p_memsz;
246 offset = phdr_ptr->p_offset;
247 notes_section = kmalloc(max_sz, GFP_KERNEL);
248 if (!notes_section)
249 return -ENOMEM;
250 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
251 if (rc < 0) {
252 kfree(notes_section);
253 return rc;
254 }
255 nhdr_ptr = notes_section;
256 for (j = 0; j < max_sz; j += sz) {
257 if (nhdr_ptr->n_namesz == 0)
258 break;
259 sz = sizeof(Elf64_Nhdr) +
260 ((nhdr_ptr->n_namesz + 3) & ~3) +
261 ((nhdr_ptr->n_descsz + 3) & ~3);
262 real_sz += sz;
263 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
264 }
265
266 /* Add this contiguous chunk of notes section to vmcore list.*/
267 new = get_new_element();
268 if (!new) {
269 kfree(notes_section);
270 return -ENOMEM;
271 }
272 new->paddr = phdr_ptr->p_offset;
273 new->size = real_sz;
274 list_add_tail(&new->list, vc_list);
275 phdr_sz += real_sz;
276 kfree(notes_section);
277 }
278
279 /* Prepare merged PT_NOTE program header. */
280 phdr.p_type = PT_NOTE;
281 phdr.p_flags = 0;
282 note_off = sizeof(Elf64_Ehdr) +
283 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
284 phdr.p_offset = note_off;
285 phdr.p_vaddr = phdr.p_paddr = 0;
286 phdr.p_filesz = phdr.p_memsz = phdr_sz;
287 phdr.p_align = 0;
288
289 /* Add merged PT_NOTE program header*/
290 tmp = elfptr + sizeof(Elf64_Ehdr);
291 memcpy(tmp, &phdr, sizeof(phdr));
292 tmp += sizeof(phdr);
293
294 /* Remove unwanted PT_NOTE program headers. */
295 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
296 *elfsz = *elfsz - i;
297 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700298 memset(elfptr + *elfsz, 0, i);
299 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700300
301 /* Modify e_phnum to reflect merged headers. */
302 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
303
304 return 0;
305}
306
Vivek Goyal72658e92005-06-25 14:58:22 -0700307/* Merges all the PT_NOTE headers into one. */
308static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
309 struct list_head *vc_list)
310{
311 int i, nr_ptnote=0, rc=0;
312 char *tmp;
313 Elf32_Ehdr *ehdr_ptr;
314 Elf32_Phdr phdr, *phdr_ptr;
315 Elf32_Nhdr *nhdr_ptr;
316 u64 phdr_sz = 0, note_off;
317
318 ehdr_ptr = (Elf32_Ehdr *)elfptr;
319 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr));
320 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
321 int j;
322 void *notes_section;
323 struct vmcore *new;
324 u64 offset, max_sz, sz, real_sz = 0;
325 if (phdr_ptr->p_type != PT_NOTE)
326 continue;
327 nr_ptnote++;
328 max_sz = phdr_ptr->p_memsz;
329 offset = phdr_ptr->p_offset;
330 notes_section = kmalloc(max_sz, GFP_KERNEL);
331 if (!notes_section)
332 return -ENOMEM;
333 rc = read_from_oldmem(notes_section, max_sz, &offset, 0);
334 if (rc < 0) {
335 kfree(notes_section);
336 return rc;
337 }
338 nhdr_ptr = notes_section;
339 for (j = 0; j < max_sz; j += sz) {
340 if (nhdr_ptr->n_namesz == 0)
341 break;
342 sz = sizeof(Elf32_Nhdr) +
343 ((nhdr_ptr->n_namesz + 3) & ~3) +
344 ((nhdr_ptr->n_descsz + 3) & ~3);
345 real_sz += sz;
346 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
347 }
348
349 /* Add this contiguous chunk of notes section to vmcore list.*/
350 new = get_new_element();
351 if (!new) {
352 kfree(notes_section);
353 return -ENOMEM;
354 }
355 new->paddr = phdr_ptr->p_offset;
356 new->size = real_sz;
357 list_add_tail(&new->list, vc_list);
358 phdr_sz += real_sz;
359 kfree(notes_section);
360 }
361
362 /* Prepare merged PT_NOTE program header. */
363 phdr.p_type = PT_NOTE;
364 phdr.p_flags = 0;
365 note_off = sizeof(Elf32_Ehdr) +
366 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
367 phdr.p_offset = note_off;
368 phdr.p_vaddr = phdr.p_paddr = 0;
369 phdr.p_filesz = phdr.p_memsz = phdr_sz;
370 phdr.p_align = 0;
371
372 /* Add merged PT_NOTE program header*/
373 tmp = elfptr + sizeof(Elf32_Ehdr);
374 memcpy(tmp, &phdr, sizeof(phdr));
375 tmp += sizeof(phdr);
376
377 /* Remove unwanted PT_NOTE program headers. */
378 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
379 *elfsz = *elfsz - i;
380 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700381 memset(elfptr + *elfsz, 0, i);
382 *elfsz = roundup(*elfsz, PAGE_SIZE);
Vivek Goyal72658e92005-06-25 14:58:22 -0700383
384 /* Modify e_phnum to reflect merged headers. */
385 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
386
387 return 0;
388}
389
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700390/* Add memory chunks represented by program headers to vmcore list. Also update
391 * the new offset fields of exported program headers. */
392static int __init process_ptload_program_headers_elf64(char *elfptr,
393 size_t elfsz,
394 struct list_head *vc_list)
395{
396 int i;
397 Elf64_Ehdr *ehdr_ptr;
398 Elf64_Phdr *phdr_ptr;
399 loff_t vmcore_off;
400 struct vmcore *new;
401
402 ehdr_ptr = (Elf64_Ehdr *)elfptr;
403 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
404
405 /* First program header is PT_NOTE header. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700406 vmcore_off = elfsz +
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700407 phdr_ptr->p_memsz; /* Note sections */
408
409 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
410 if (phdr_ptr->p_type != PT_LOAD)
411 continue;
412
413 /* Add this contiguous chunk of memory to vmcore list.*/
414 new = get_new_element();
415 if (!new)
416 return -ENOMEM;
417 new->paddr = phdr_ptr->p_offset;
418 new->size = phdr_ptr->p_memsz;
419 list_add_tail(&new->list, vc_list);
420
421 /* Update the program header offset. */
422 phdr_ptr->p_offset = vmcore_off;
423 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
424 }
425 return 0;
426}
427
Vivek Goyal72658e92005-06-25 14:58:22 -0700428static int __init process_ptload_program_headers_elf32(char *elfptr,
429 size_t elfsz,
430 struct list_head *vc_list)
431{
432 int i;
433 Elf32_Ehdr *ehdr_ptr;
434 Elf32_Phdr *phdr_ptr;
435 loff_t vmcore_off;
436 struct vmcore *new;
437
438 ehdr_ptr = (Elf32_Ehdr *)elfptr;
439 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
440
441 /* First program header is PT_NOTE header. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700442 vmcore_off = elfsz +
Vivek Goyal72658e92005-06-25 14:58:22 -0700443 phdr_ptr->p_memsz; /* Note sections */
444
445 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
446 if (phdr_ptr->p_type != PT_LOAD)
447 continue;
448
449 /* Add this contiguous chunk of memory to vmcore list.*/
450 new = get_new_element();
451 if (!new)
452 return -ENOMEM;
453 new->paddr = phdr_ptr->p_offset;
454 new->size = phdr_ptr->p_memsz;
455 list_add_tail(&new->list, vc_list);
456
457 /* Update the program header offset */
458 phdr_ptr->p_offset = vmcore_off;
459 vmcore_off = vmcore_off + phdr_ptr->p_memsz;
460 }
461 return 0;
462}
463
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700464/* Sets offset fields of vmcore elements. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700465static void __init set_vmcore_list_offsets(size_t elfsz,
466 struct list_head *vc_list)
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700467{
468 loff_t vmcore_off;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700469 struct vmcore *m;
470
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700471 /* Skip Elf header and program headers. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700472 vmcore_off = elfsz;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700473
474 list_for_each_entry(m, vc_list, list) {
475 m->offset = vmcore_off;
476 vmcore_off += m->size;
477 }
478}
479
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700480static void free_elfcorebuf(void)
Vivek Goyal72658e92005-06-25 14:58:22 -0700481{
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700482 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
483 elfcorebuf = NULL;
Vivek Goyal72658e92005-06-25 14:58:22 -0700484}
485
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700486static int __init parse_crash_elf64_headers(void)
487{
488 int rc=0;
489 Elf64_Ehdr ehdr;
490 u64 addr;
491
492 addr = elfcorehdr_addr;
493
494 /* Read Elf header */
495 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf64_Ehdr), &addr, 0);
496 if (rc < 0)
497 return rc;
498
499 /* Do some basic Verification. */
500 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
501 (ehdr.e_type != ET_CORE) ||
Mika Westerberg9833c392010-11-19 09:29:24 +0100502 !vmcore_elf64_check_arch(&ehdr) ||
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700503 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
504 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
505 ehdr.e_version != EV_CURRENT ||
506 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
507 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
508 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -0800509 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700510 return -EINVAL;
511 }
512
513 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700514 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
515 ehdr.e_phnum * sizeof(Elf64_Phdr);
516 elfcorebuf_sz = elfcorebuf_sz_orig;
517 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
518 get_order(elfcorebuf_sz_orig));
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700519 if (!elfcorebuf)
520 return -ENOMEM;
521 addr = elfcorehdr_addr;
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700522 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
523 if (rc < 0)
524 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700525
526 /* Merge all PT_NOTE headers into one. */
527 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700528 if (rc)
529 goto fail;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700530 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
531 &vmcore_list);
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700532 if (rc)
533 goto fail;
534 set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700535 return 0;
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700536fail:
537 free_elfcorebuf();
538 return rc;
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700539}
540
Vivek Goyal72658e92005-06-25 14:58:22 -0700541static int __init parse_crash_elf32_headers(void)
542{
543 int rc=0;
544 Elf32_Ehdr ehdr;
545 u64 addr;
546
547 addr = elfcorehdr_addr;
548
549 /* Read Elf header */
550 rc = read_from_oldmem((char*)&ehdr, sizeof(Elf32_Ehdr), &addr, 0);
551 if (rc < 0)
552 return rc;
553
554 /* Do some basic Verification. */
555 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
556 (ehdr.e_type != ET_CORE) ||
557 !elf_check_arch(&ehdr) ||
558 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
559 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
560 ehdr.e_version != EV_CURRENT ||
561 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
562 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
563 ehdr.e_phnum == 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -0800564 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal72658e92005-06-25 14:58:22 -0700565 return -EINVAL;
566 }
567
568 /* Read in all elf headers. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700569 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
570 elfcorebuf_sz = elfcorebuf_sz_orig;
571 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
572 get_order(elfcorebuf_sz_orig));
Vivek Goyal72658e92005-06-25 14:58:22 -0700573 if (!elfcorebuf)
574 return -ENOMEM;
575 addr = elfcorehdr_addr;
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700576 rc = read_from_oldmem(elfcorebuf, elfcorebuf_sz_orig, &addr, 0);
577 if (rc < 0)
578 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -0700579
580 /* Merge all PT_NOTE headers into one. */
581 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz, &vmcore_list);
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700582 if (rc)
583 goto fail;
Vivek Goyal72658e92005-06-25 14:58:22 -0700584 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
585 &vmcore_list);
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700586 if (rc)
587 goto fail;
588 set_vmcore_list_offsets(elfcorebuf_sz, &vmcore_list);
Vivek Goyal72658e92005-06-25 14:58:22 -0700589 return 0;
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700590fail:
591 free_elfcorebuf();
592 return rc;
Vivek Goyal72658e92005-06-25 14:58:22 -0700593}
594
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700595static int __init parse_crash_elf_headers(void)
596{
597 unsigned char e_ident[EI_NIDENT];
598 u64 addr;
599 int rc=0;
600
601 addr = elfcorehdr_addr;
602 rc = read_from_oldmem(e_ident, EI_NIDENT, &addr, 0);
603 if (rc < 0)
604 return rc;
605 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
Andrew Morton87ebdc02013-02-27 17:03:16 -0800606 pr_warn("Warning: Core image elf header not found\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700607 return -EINVAL;
608 }
609
610 if (e_ident[EI_CLASS] == ELFCLASS64) {
611 rc = parse_crash_elf64_headers();
612 if (rc)
613 return rc;
614
615 /* Determine vmcore size. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700616 vmcore_size = get_vmcore_size_elf64(elfcorebuf, elfcorebuf_sz);
Vivek Goyal72658e92005-06-25 14:58:22 -0700617 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
618 rc = parse_crash_elf32_headers();
619 if (rc)
620 return rc;
621
622 /* Determine vmcore size. */
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700623 vmcore_size = get_vmcore_size_elf32(elfcorebuf, elfcorebuf_sz);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700624 } else {
Andrew Morton87ebdc02013-02-27 17:03:16 -0800625 pr_warn("Warning: Core image elf header is not sane\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700626 return -EINVAL;
627 }
628 return 0;
629}
630
631/* Init function for vmcore module. */
632static int __init vmcore_init(void)
633{
634 int rc = 0;
635
636 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
Simon Horman85a0ee32008-10-18 20:28:29 -0700637 if (!(is_vmcore_usable()))
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700638 return rc;
639 rc = parse_crash_elf_headers();
640 if (rc) {
Andrew Morton87ebdc02013-02-27 17:03:16 -0800641 pr_warn("Kdump: vmcore not initialized\n");
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700642 return rc;
643 }
644
Alexey Dobriyan5aa140c2008-10-06 14:36:31 +0400645 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
Vivek Goyal666bfdd2005-06-25 14:58:21 -0700646 if (proc_vmcore)
647 proc_vmcore->size = vmcore_size;
648 return 0;
649}
650module_init(vmcore_init)
Mahesh Salgaonkar162573932012-02-16 01:15:00 +0000651
652/* Cleanup function for vmcore module. */
653void vmcore_cleanup(void)
654{
655 struct list_head *pos, *next;
656
657 if (proc_vmcore) {
David Howellsa8ca16e2013-04-12 17:27:28 +0100658 proc_remove(proc_vmcore);
Mahesh Salgaonkar162573932012-02-16 01:15:00 +0000659 proc_vmcore = NULL;
660 }
661
662 /* clear the vmcore list. */
663 list_for_each_safe(pos, next, &vmcore_list) {
664 struct vmcore *m;
665
666 m = list_entry(pos, struct vmcore, list);
667 list_del(&m->list);
668 kfree(m);
669 }
HATAYAMA Daisukef2bdacdd2013-07-03 15:02:14 -0700670 free_elfcorebuf();
Mahesh Salgaonkar162573932012-02-16 01:15:00 +0000671}
672EXPORT_SYMBOL_GPL(vmcore_cleanup);