blob: 6eb8df76e17c5d0289e79d1a94f9bffb1d71fa56 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/fs/binfmt_elf.c
3 *
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
7 * Tools".
8 *
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/mm.h>
16#include <linux/mman.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/errno.h>
18#include <linux/signal.h>
19#include <linux/binfmts.h>
20#include <linux/string.h>
21#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/personality.h>
24#include <linux/elfcore.h>
25#include <linux/init.h>
26#include <linux/highuid.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/compiler.h>
28#include <linux/highmem.h>
29#include <linux/pagemap.h>
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070030#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/security.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/random.h>
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070033#include <linux/elf.h>
Alexey Dobriyan7e80d0d2007-05-08 00:28:59 -070034#include <linux/utsname.h>
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -080035#include <linux/coredump.h>
Frederic Weisbecker6fac4822012-11-13 14:20:55 +010036#include <linux/sched.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/uaccess.h>
38#include <asm/param.h>
39#include <asm/page.h>
40
Denys Vlasenko2aa362c2012-10-04 17:15:36 -070041#ifndef user_long_t
42#define user_long_t long
43#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -070044#ifndef user_siginfo_t
45#define user_siginfo_t siginfo_t
46#endif
47
Al Viro71613c32012-10-20 22:00:48 -040048static int load_elf_binary(struct linux_binprm *bprm);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070049static int load_elf_library(struct file *);
Andrew Mortonbb1ad822008-01-30 13:31:07 +010050static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
51 int, int, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Linus Torvalds1da177e2005-04-16 15:20:36 -070053/*
54 * If we don't support core dumping, then supply a NULL so we
55 * don't even try.
56 */
Christoph Hellwig698ba7b2009-12-15 16:47:37 -080057#ifdef CONFIG_ELF_CORE
Masami Hiramatsuf6151df2009-12-17 15:27:16 -080058static int elf_core_dump(struct coredump_params *cprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#else
60#define elf_core_dump NULL
61#endif
62
63#if ELF_EXEC_PAGESIZE > PAGE_SIZE
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070064#define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#else
Jesper Juhlf4e5cc22006-06-23 02:05:35 -070066#define ELF_MIN_ALIGN PAGE_SIZE
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#endif
68
69#ifndef ELF_CORE_EFLAGS
70#define ELF_CORE_EFLAGS 0
71#endif
72
73#define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
74#define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
75#define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
76
77static struct linux_binfmt elf_format = {
Mikael Petterssonf670d0e2011-01-12 17:00:02 -080078 .module = THIS_MODULE,
79 .load_binary = load_elf_binary,
80 .load_shlib = load_elf_library,
81 .core_dump = elf_core_dump,
82 .min_coredump = ELF_EXEC_PAGESIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -070083};
84
Andrew Mortond4e3cc32007-07-21 04:37:32 -070085#define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87static int set_brk(unsigned long start, unsigned long end)
88{
89 start = ELF_PAGEALIGN(start);
90 end = ELF_PAGEALIGN(end);
91 if (end > start) {
92 unsigned long addr;
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -070093 addr = vm_brk(start, end - start);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 if (BAD_ADDR(addr))
95 return addr;
96 }
97 current->mm->start_brk = current->mm->brk = end;
98 return 0;
99}
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/* We need to explicitly zero any fractional pages
102 after the data section (i.e. bss). This would
103 contain the junk from the file that should not
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700104 be in memory
105 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106static int padzero(unsigned long elf_bss)
107{
108 unsigned long nbyte;
109
110 nbyte = ELF_PAGEOFFSET(elf_bss);
111 if (nbyte) {
112 nbyte = ELF_MIN_ALIGN - nbyte;
113 if (clear_user((void __user *) elf_bss, nbyte))
114 return -EFAULT;
115 }
116 return 0;
117}
118
Ohad Ben-Cohen09c6dd32008-02-03 18:05:15 +0200119/* Let's use some macros to make this stack manipulation a little clearer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120#ifdef CONFIG_STACK_GROWSUP
121#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
122#define STACK_ROUND(sp, items) \
123 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700124#define STACK_ALLOC(sp, len) ({ \
125 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
126 old_sp; })
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127#else
128#define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
129#define STACK_ROUND(sp, items) \
130 (((unsigned long) (sp - items)) &~ 15UL)
131#define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
132#endif
133
Nathan Lynch483fad12008-07-22 04:48:46 +1000134#ifndef ELF_BASE_PLATFORM
135/*
136 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
137 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
138 * will be copied to the user stack in the same manner as AT_PLATFORM.
139 */
140#define ELF_BASE_PLATFORM NULL
141#endif
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143static int
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700144create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
Andi Kleend20894a2008-02-08 04:21:54 -0800145 unsigned long load_addr, unsigned long interp_load_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146{
147 unsigned long p = bprm->p;
148 int argc = bprm->argc;
149 int envc = bprm->envc;
150 elf_addr_t __user *argv;
151 elf_addr_t __user *envp;
152 elf_addr_t __user *sp;
153 elf_addr_t __user *u_platform;
Nathan Lynch483fad12008-07-22 04:48:46 +1000154 elf_addr_t __user *u_base_platform;
Kees Cookf06295b2009-01-07 18:08:52 -0800155 elf_addr_t __user *u_rand_bytes;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 const char *k_platform = ELF_PLATFORM;
Nathan Lynch483fad12008-07-22 04:48:46 +1000157 const char *k_base_platform = ELF_BASE_PLATFORM;
Kees Cookf06295b2009-01-07 18:08:52 -0800158 unsigned char k_rand_bytes[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 int items;
160 elf_addr_t *elf_info;
161 int ei_index = 0;
David Howells86a264a2008-11-14 10:39:18 +1100162 const struct cred *cred = current_cred();
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700163 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 /*
Franck Bui-Huud68c9d62007-10-16 23:30:24 -0700166 * In some cases (e.g. Hyper-Threading), we want to avoid L1
167 * evictions by the processes running on the same package. One
168 * thing we can do is to shuffle the initial stack for them.
169 */
170
171 p = arch_align_stack(p);
172
173 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 * If this architecture has a platform capability string, copy it
175 * to userspace. In some cases (Sparc), this info is impossible
176 * for userspace to get any other way, in others (i386) it is
177 * merely difficult.
178 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 u_platform = NULL;
180 if (k_platform) {
181 size_t len = strlen(k_platform) + 1;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
184 if (__copy_to_user(u_platform, k_platform, len))
185 return -EFAULT;
186 }
187
Nathan Lynch483fad12008-07-22 04:48:46 +1000188 /*
189 * If this architecture has a "base" platform capability
190 * string, copy it to userspace.
191 */
192 u_base_platform = NULL;
193 if (k_base_platform) {
194 size_t len = strlen(k_base_platform) + 1;
195
196 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
197 if (__copy_to_user(u_base_platform, k_base_platform, len))
198 return -EFAULT;
199 }
200
Kees Cookf06295b2009-01-07 18:08:52 -0800201 /*
202 * Generate 16 random bytes for userspace PRNG seeding.
203 */
204 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
205 u_rand_bytes = (elf_addr_t __user *)
206 STACK_ALLOC(p, sizeof(k_rand_bytes));
207 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
208 return -EFAULT;
209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 /* Create the ELF interpreter info */
Jesper Juhl785d5572006-06-23 02:05:35 -0700211 elf_info = (elf_addr_t *)current->mm->saved_auxv;
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700212 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213#define NEW_AUX_ENT(id, val) \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700214 do { \
Jesper Juhl785d5572006-06-23 02:05:35 -0700215 elf_info[ei_index++] = id; \
216 elf_info[ei_index++] = val; \
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700217 } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219#ifdef ARCH_DLINFO
220 /*
221 * ARCH_DLINFO must come first so PPC can do its special alignment of
222 * AUXV.
Olaf Hering4f9a58d2007-10-16 23:30:12 -0700223 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
224 * ARCH_DLINFO changes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 */
226 ARCH_DLINFO;
227#endif
228 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
229 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
230 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
231 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700232 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
234 NEW_AUX_ENT(AT_BASE, interp_load_addr);
235 NEW_AUX_ENT(AT_FLAGS, 0);
236 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -0800237 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
238 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
239 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
240 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
Jesper Juhl785d5572006-06-23 02:05:35 -0700241 NEW_AUX_ENT(AT_SECURE, security_bprm_secureexec(bprm));
Kees Cookf06295b2009-01-07 18:08:52 -0800242 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
Michael Neuling21713642013-04-17 17:33:11 +0000243#ifdef ELF_HWCAP2
244 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
245#endif
John Reiser65191082008-07-21 14:21:32 -0700246 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 if (k_platform) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700248 NEW_AUX_ENT(AT_PLATFORM,
Jesper Juhl785d5572006-06-23 02:05:35 -0700249 (elf_addr_t)(unsigned long)u_platform);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
Nathan Lynch483fad12008-07-22 04:48:46 +1000251 if (k_base_platform) {
252 NEW_AUX_ENT(AT_BASE_PLATFORM,
253 (elf_addr_t)(unsigned long)u_base_platform);
254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
Jesper Juhl785d5572006-06-23 02:05:35 -0700256 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#undef NEW_AUX_ENT
259 /* AT_NULL is zero; clear the rest too */
260 memset(&elf_info[ei_index], 0,
261 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
262
263 /* And advance past the AT_NULL entry. */
264 ei_index += 2;
265
266 sp = STACK_ADD(p, ei_index);
267
Andi Kleend20894a2008-02-08 04:21:54 -0800268 items = (argc + 1) + (envc + 1) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 bprm->p = STACK_ROUND(sp, items);
270
271 /* Point sp at the lowest address on the stack */
272#ifdef CONFIG_STACK_GROWSUP
273 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700274 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275#else
276 sp = (elf_addr_t __user *)bprm->p;
277#endif
278
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700279
280 /*
281 * Grow the stack manually; some architectures have a limit on how
282 * far ahead a user-space access may be in order to grow the stack.
283 */
284 vma = find_extend_vma(current->mm, bprm->p);
285 if (!vma)
286 return -EFAULT;
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
289 if (__put_user(argc, sp++))
290 return -EFAULT;
Andi Kleend20894a2008-02-08 04:21:54 -0800291 argv = sp;
292 envp = argv + argc + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294 /* Populate argv and envp */
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -0700295 p = current->mm->arg_end = current->mm->arg_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 while (argc-- > 0) {
297 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800298 if (__put_user((elf_addr_t)p, argv++))
299 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700300 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
301 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800302 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 p += len;
304 }
305 if (__put_user(0, argv))
306 return -EFAULT;
307 current->mm->arg_end = current->mm->env_start = p;
308 while (envc-- > 0) {
309 size_t len;
Heiko Carstens841d5fb2006-12-06 20:36:35 -0800310 if (__put_user((elf_addr_t)p, envp++))
311 return -EFAULT;
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700312 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
313 if (!len || len > MAX_ARG_STRLEN)
WANG Cong23c49712008-05-08 21:52:33 +0800314 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 p += len;
316 }
317 if (__put_user(0, envp))
318 return -EFAULT;
319 current->mm->env_end = p;
320
321 /* Put the elf_info on the stack in the right place. */
322 sp = (elf_addr_t __user *)envp + 1;
323 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
324 return -EFAULT;
325 return 0;
326}
327
James Hoganc07380b2011-05-09 10:58:40 +0100328#ifndef elf_map
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330static unsigned long elf_map(struct file *filep, unsigned long addr,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100331 struct elf_phdr *eppnt, int prot, int type,
332 unsigned long total_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
334 unsigned long map_addr;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100335 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
336 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
337 addr = ELF_PAGESTART(addr);
338 size = ELF_PAGEALIGN(size);
Jan Kratochvil60bfba72007-07-15 23:40:06 -0700339
Andrew Mortond4e3cc32007-07-21 04:37:32 -0700340 /* mmap() will return -EINVAL if given a zero size, but a
341 * segment with zero filesize is perfectly valid */
Jiri Kosinacc503c12008-01-30 13:31:07 +0100342 if (!size)
343 return addr;
344
Jiri Kosinacc503c12008-01-30 13:31:07 +0100345 /*
346 * total_size is the size of the ELF (interpreter) image.
347 * The _first_ mmap needs to know the full size, otherwise
348 * randomization might put this image into an overlapping
349 * position with the ELF binary image. (since size < total_size)
350 * So we first map the 'big' image - and unmap the remainder at
351 * the end. (which unmap is needed for ELF images with holes.)
352 */
353 if (total_size) {
354 total_size = ELF_PAGEALIGN(total_size);
Al Viro5a5e4c22012-05-30 01:49:38 -0400355 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100356 if (!BAD_ADDR(map_addr))
Al Viro5a5e4c22012-05-30 01:49:38 -0400357 vm_munmap(map_addr+size, total_size-size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100358 } else
Al Viro5a5e4c22012-05-30 01:49:38 -0400359 map_addr = vm_mmap(filep, addr, size, prot, type, off);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 return(map_addr);
362}
363
James Hoganc07380b2011-05-09 10:58:40 +0100364#endif /* !elf_map */
365
Jiri Kosinacc503c12008-01-30 13:31:07 +0100366static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
367{
368 int i, first_idx = -1, last_idx = -1;
369
370 for (i = 0; i < nr; i++) {
371 if (cmds[i].p_type == PT_LOAD) {
372 last_idx = i;
373 if (first_idx == -1)
374 first_idx = i;
375 }
376 }
377 if (first_idx == -1)
378 return 0;
379
380 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
381 ELF_PAGESTART(cmds[first_idx].p_vaddr);
382}
383
384
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385/* This is much more generalized than the library routine read function,
386 so we keep this separate. Technically the library read function
387 is only provided so that we can read a.out libraries that have
388 an ELF header */
389
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700390static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
Jiri Kosinacc503c12008-01-30 13:31:07 +0100391 struct file *interpreter, unsigned long *interp_map_addr,
392 unsigned long no_base)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
394 struct elf_phdr *elf_phdata;
395 struct elf_phdr *eppnt;
396 unsigned long load_addr = 0;
397 int load_addr_set = 0;
398 unsigned long last_bss = 0, elf_bss = 0;
399 unsigned long error = ~0UL;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100400 unsigned long total_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 int retval, i, size;
402
403 /* First of all, some simple consistency checks */
404 if (interp_elf_ex->e_type != ET_EXEC &&
405 interp_elf_ex->e_type != ET_DYN)
406 goto out;
407 if (!elf_check_arch(interp_elf_ex))
408 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400409 if (!interpreter->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 goto out;
411
412 /*
413 * If the size of this structure has changed, then punt, since
414 * we will be doing the wrong thing.
415 */
416 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
417 goto out;
418 if (interp_elf_ex->e_phnum < 1 ||
419 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
420 goto out;
421
422 /* Now read in all of the header information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
424 if (size > ELF_MIN_ALIGN)
425 goto out;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700426 elf_phdata = kmalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 if (!elf_phdata)
428 goto out;
429
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700430 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
Mikael Petterssonf670d0e2011-01-12 17:00:02 -0800431 (char *)elf_phdata, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 error = -EIO;
433 if (retval != size) {
434 if (retval < 0)
435 error = retval;
436 goto out_close;
437 }
438
Jiri Kosinacc503c12008-01-30 13:31:07 +0100439 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
440 if (!total_size) {
441 error = -EINVAL;
442 goto out_close;
443 }
444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 eppnt = elf_phdata;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700446 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
447 if (eppnt->p_type == PT_LOAD) {
448 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
449 int elf_prot = 0;
450 unsigned long vaddr = 0;
451 unsigned long k, map_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700453 if (eppnt->p_flags & PF_R)
454 elf_prot = PROT_READ;
455 if (eppnt->p_flags & PF_W)
456 elf_prot |= PROT_WRITE;
457 if (eppnt->p_flags & PF_X)
458 elf_prot |= PROT_EXEC;
459 vaddr = eppnt->p_vaddr;
460 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
461 elf_type |= MAP_FIXED;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100462 else if (no_base && interp_elf_ex->e_type == ET_DYN)
463 load_addr = -vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700465 map_addr = elf_map(interpreter, load_addr + vaddr,
Andrew Mortonbb1ad822008-01-30 13:31:07 +0100466 eppnt, elf_prot, elf_type, total_size);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100467 total_size = 0;
468 if (!*interp_map_addr)
469 *interp_map_addr = map_addr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700470 error = map_addr;
471 if (BAD_ADDR(map_addr))
472 goto out_close;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700474 if (!load_addr_set &&
475 interp_elf_ex->e_type == ET_DYN) {
476 load_addr = map_addr - ELF_PAGESTART(vaddr);
477 load_addr_set = 1;
478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700480 /*
481 * Check to see if the section's size will overflow the
482 * allowed task size. Note that p_filesz must always be
483 * <= p_memsize so it's only necessary to check p_memsz.
484 */
485 k = load_addr + eppnt->p_vaddr;
Chuck Ebbertce510592006-07-03 00:24:14 -0700486 if (BAD_ADDR(k) ||
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700487 eppnt->p_filesz > eppnt->p_memsz ||
488 eppnt->p_memsz > TASK_SIZE ||
489 TASK_SIZE - eppnt->p_memsz < k) {
490 error = -ENOMEM;
491 goto out_close;
492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700494 /*
495 * Find the end of the file mapping for this phdr, and
496 * keep track of the largest address we see for this.
497 */
498 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
499 if (k > elf_bss)
500 elf_bss = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700502 /*
503 * Do the same thing for the memory mapping - between
504 * elf_bss and last_bss is the bss section.
505 */
506 k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
507 if (k > last_bss)
508 last_bss = k;
509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 }
511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 if (last_bss > elf_bss) {
Roland McGrath752015d2009-09-08 19:49:40 -0700513 /*
514 * Now fill out the bss section. First pad the last page up
515 * to the page boundary, and then perform a mmap to make sure
516 * that there are zero-mapped pages up to and including the
517 * last bss page.
518 */
519 if (padzero(elf_bss)) {
520 error = -EFAULT;
521 goto out_close;
522 }
523
524 /* What we have mapped so far */
525 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
526
527 /* Map the last of the bss segment */
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -0700528 error = vm_brk(elf_bss, last_bss - elf_bss);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 if (BAD_ADDR(error))
530 goto out_close;
531 }
532
Jiri Kosinacc503c12008-01-30 13:31:07 +0100533 error = load_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535out_close:
536 kfree(elf_phdata);
537out:
538 return error;
539}
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541/*
542 * These are the functions used to load ELF style executables and shared
543 * libraries. There is no binary dependent code anywhere else.
544 */
545
Andi Kleen913bd902006-03-25 16:29:09 +0100546#ifndef STACK_RND_MASK
James Bottomleyd1cabd62007-03-16 13:38:35 -0800547#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
Andi Kleen913bd902006-03-25 16:29:09 +0100548#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549
550static unsigned long randomize_stack_top(unsigned long stack_top)
551{
Hector Marco-Gisbert14a3e0c2015-02-14 09:33:50 -0800552 unsigned long random_variable = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Andi Kleenc16b63e02006-09-26 10:52:28 +0200554 if ((current->flags & PF_RANDOMIZE) &&
555 !(current->personality & ADDR_NO_RANDOMIZE)) {
Hector Marco-Gisbert14a3e0c2015-02-14 09:33:50 -0800556 random_variable = (unsigned long) get_random_int();
557 random_variable &= STACK_RND_MASK;
Andi Kleen913bd902006-03-25 16:29:09 +0100558 random_variable <<= PAGE_SHIFT;
559 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560#ifdef CONFIG_STACK_GROWSUP
Andi Kleen913bd902006-03-25 16:29:09 +0100561 return PAGE_ALIGN(stack_top) + random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562#else
Andi Kleen913bd902006-03-25 16:29:09 +0100563 return PAGE_ALIGN(stack_top) - random_variable;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564#endif
565}
566
Al Viro71613c32012-10-20 22:00:48 -0400567static int load_elf_binary(struct linux_binprm *bprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568{
569 struct file *interpreter = NULL; /* to shut gcc up */
570 unsigned long load_addr = 0, load_bias = 0;
571 int load_addr_set = 0;
572 char * elf_interpreter = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 unsigned long error;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700574 struct elf_phdr *elf_ppnt, *elf_phdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 unsigned long elf_bss, elf_brk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 int retval, i;
577 unsigned int size;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100578 unsigned long elf_entry;
579 unsigned long interp_load_addr = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 unsigned long start_code, end_code, start_data, end_data;
David Daney1a530a62011-03-22 16:34:48 -0700581 unsigned long reloc_func_desc __maybe_unused = 0;
David Rientjes8de61e62006-12-06 20:40:16 -0800582 int executable_stack = EXSTACK_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 unsigned long def_flags = 0;
Al Viro71613c32012-10-20 22:00:48 -0400584 struct pt_regs *regs = current_pt_regs();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 struct {
586 struct elfhdr elf_ex;
587 struct elfhdr interp_elf_ex;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 } *loc;
589
590 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
591 if (!loc) {
592 retval = -ENOMEM;
593 goto out_ret;
594 }
595
596 /* Get the exec-header */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700597 loc->elf_ex = *((struct elfhdr *)bprm->buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599 retval = -ENOEXEC;
600 /* First of all, some simple consistency checks */
601 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
602 goto out;
603
604 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
605 goto out;
606 if (!elf_check_arch(&loc->elf_ex))
607 goto out;
Al Viro72c2d532013-09-22 16:27:52 -0400608 if (!bprm->file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 goto out;
610
611 /* Now read in all of the header information */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
613 goto out;
614 if (loc->elf_ex.e_phnum < 1 ||
615 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
616 goto out;
617 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
618 retval = -ENOMEM;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700619 elf_phdata = kmalloc(size, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 if (!elf_phdata)
621 goto out;
622
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700623 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
624 (char *)elf_phdata, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 if (retval != size) {
626 if (retval >= 0)
627 retval = -EIO;
628 goto out_free_ph;
629 }
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 elf_ppnt = elf_phdata;
632 elf_bss = 0;
633 elf_brk = 0;
634
635 start_code = ~0UL;
636 end_code = 0;
637 start_data = 0;
638 end_data = 0;
639
640 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
641 if (elf_ppnt->p_type == PT_INTERP) {
642 /* This is the program interpreter used for
643 * shared libraries - for now assume that this
644 * is an a.out format binary
645 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 retval = -ENOEXEC;
647 if (elf_ppnt->p_filesz > PATH_MAX ||
648 elf_ppnt->p_filesz < 2)
Al Viroe7b9b552009-03-29 16:31:16 -0400649 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
651 retval = -ENOMEM;
Jesper Juhl792db3a2006-01-09 20:54:45 -0800652 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700653 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 if (!elf_interpreter)
Al Viroe7b9b552009-03-29 16:31:16 -0400655 goto out_free_ph;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
657 retval = kernel_read(bprm->file, elf_ppnt->p_offset,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700658 elf_interpreter,
659 elf_ppnt->p_filesz);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (retval != elf_ppnt->p_filesz) {
661 if (retval >= 0)
662 retval = -EIO;
663 goto out_free_interp;
664 }
665 /* make sure path is NULL terminated */
666 retval = -ENOEXEC;
667 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
668 goto out_free_interp;
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 interpreter = open_exec(elf_interpreter);
671 retval = PTR_ERR(interpreter);
672 if (IS_ERR(interpreter))
673 goto out_free_interp;
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800674
675 /*
676 * If the binary is not readable then enforce
677 * mm->dumpable = 0 regardless of the interpreter's
678 * permissions.
679 */
Al Viro1b5d7832011-06-19 12:49:47 -0400680 would_dump(bprm, interpreter);
Alexey Dobriyan1fb84492007-01-26 00:57:16 -0800681
Maciej W. Rozyckib9b99f72015-10-26 15:48:19 +0000682 /* Get the exec headers */
683 retval = kernel_read(interpreter, 0,
684 (void *)&loc->interp_elf_ex,
685 sizeof(loc->interp_elf_ex));
686 if (retval != sizeof(loc->interp_elf_ex)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (retval >= 0)
688 retval = -EIO;
689 goto out_free_dentry;
690 }
691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 break;
693 }
694 elf_ppnt++;
695 }
696
697 elf_ppnt = elf_phdata;
698 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
699 if (elf_ppnt->p_type == PT_GNU_STACK) {
700 if (elf_ppnt->p_flags & PF_X)
701 executable_stack = EXSTACK_ENABLE_X;
702 else
703 executable_stack = EXSTACK_DISABLE_X;
704 break;
705 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 /* Some simple consistency checks for the interpreter */
708 if (elf_interpreter) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 retval = -ELIBBAD;
Andi Kleend20894a2008-02-08 04:21:54 -0800710 /* Not an ELF interpreter */
711 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 /* Verify the interpreter has a valid arch */
Andi Kleend20894a2008-02-08 04:21:54 -0800714 if (!elf_check_arch(&loc->interp_elf_ex))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 goto out_free_dentry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 }
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 /* Flush all traces of the currently running executable */
719 retval = flush_old_exec(bprm);
720 if (retval)
721 goto out_free_dentry;
722
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723 /* OK, This is the point of no return */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 current->mm->def_flags = def_flags;
725
726 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
727 may depend on the personality. */
Martin Schwidefsky0b592682008-10-16 15:39:57 +0200728 SET_PERSONALITY(loc->elf_ex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
730 current->personality |= READ_IMPLIES_EXEC;
731
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700732 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 current->flags |= PF_RANDOMIZE;
Linus Torvalds221af7f2010-01-28 22:14:42 -0800734
735 setup_new_exec(bprm);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737 /* Do this so that we can load the interpreter, if need be. We will
738 change some of these later */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
740 executable_stack);
741 if (retval < 0) {
742 send_sig(SIGKILL, current, 0);
743 goto out_free_dentry;
744 }
745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 current->mm->start_stack = bprm->p;
747
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200748 /* Now we do a little grungy work by mmapping the ELF image into
Jiri Kosinacc503c12008-01-30 13:31:07 +0100749 the correct location in memory. */
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700750 for(i = 0, elf_ppnt = elf_phdata;
751 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 int elf_prot = 0, elf_flags;
753 unsigned long k, vaddr;
Michael Davidson3d262152015-04-14 15:47:38 -0700754 unsigned long total_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756 if (elf_ppnt->p_type != PT_LOAD)
757 continue;
758
759 if (unlikely (elf_brk > elf_bss)) {
760 unsigned long nbyte;
761
762 /* There was a PT_LOAD segment with p_memsz > p_filesz
763 before this one. Map anonymous pages, if needed,
764 and clear the area. */
Mikael Petterssonf670d0e2011-01-12 17:00:02 -0800765 retval = set_brk(elf_bss + load_bias,
766 elf_brk + load_bias);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 if (retval) {
768 send_sig(SIGKILL, current, 0);
769 goto out_free_dentry;
770 }
771 nbyte = ELF_PAGEOFFSET(elf_bss);
772 if (nbyte) {
773 nbyte = ELF_MIN_ALIGN - nbyte;
774 if (nbyte > elf_brk - elf_bss)
775 nbyte = elf_brk - elf_bss;
776 if (clear_user((void __user *)elf_bss +
777 load_bias, nbyte)) {
778 /*
779 * This bss-zeroing can fail if the ELF
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700780 * file specifies odd protections. So
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 * we don't check the return value
782 */
783 }
784 }
785 }
786
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700787 if (elf_ppnt->p_flags & PF_R)
788 elf_prot |= PROT_READ;
789 if (elf_ppnt->p_flags & PF_W)
790 elf_prot |= PROT_WRITE;
791 if (elf_ppnt->p_flags & PF_X)
792 elf_prot |= PROT_EXEC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700794 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
796 vaddr = elf_ppnt->p_vaddr;
797 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
798 elf_flags |= MAP_FIXED;
799 } else if (loc->elf_ex.e_type == ET_DYN) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700800 /* Try and get dynamic programs out of the way of the
801 * default mmap base, as well as whatever program they
802 * might try to exec. This is because the brk will
803 * follow the loader, and is not movable. */
David Daneye39f5602012-01-10 15:10:21 -0800804#ifdef CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE
Jiri Kosinaa3defbe2011-11-02 13:37:41 -0700805 /* Memory randomization might have been switched off
Jiri Kosinac1d025e2013-04-30 15:27:45 -0700806 * in runtime via sysctl or explicit setting of
807 * personality flags.
Jiri Kosinaa3defbe2011-11-02 13:37:41 -0700808 * If that is the case, retain the original non-zero
809 * load_bias value in order to establish proper
810 * non-randomized mappings.
811 */
812 if (current->flags & PF_RANDOMIZE)
813 load_bias = 0;
814 else
815 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100816#else
Linus Torvalds90cb28e2007-01-06 13:28:21 -0800817 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
Jiri Kosinacc503c12008-01-30 13:31:07 +0100818#endif
Michael Davidson3d262152015-04-14 15:47:38 -0700819 total_size = total_mapping_size(elf_phdata,
820 loc->elf_ex.e_phnum);
821 if (!total_size) {
Andrew Morton2e4d4522015-05-28 15:44:24 -0700822 retval = -EINVAL;
Michael Davidson3d262152015-04-14 15:47:38 -0700823 goto out_free_dentry;
824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
826
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700827 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
Michael Davidson3d262152015-04-14 15:47:38 -0700828 elf_prot, elf_flags, total_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (BAD_ADDR(error)) {
830 send_sig(SIGKILL, current, 0);
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700831 retval = IS_ERR((void *)error) ?
832 PTR_ERR((void*)error) : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 goto out_free_dentry;
834 }
835
836 if (!load_addr_set) {
837 load_addr_set = 1;
838 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
839 if (loc->elf_ex.e_type == ET_DYN) {
840 load_bias += error -
841 ELF_PAGESTART(load_bias + vaddr);
842 load_addr += load_bias;
843 reloc_func_desc = load_bias;
844 }
845 }
846 k = elf_ppnt->p_vaddr;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700847 if (k < start_code)
848 start_code = k;
849 if (start_data < k)
850 start_data = k;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 /*
853 * Check to see if the section's size will overflow the
854 * allowed task size. Note that p_filesz must always be
855 * <= p_memsz so it is only necessary to check p_memsz.
856 */
Chuck Ebbertce510592006-07-03 00:24:14 -0700857 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 elf_ppnt->p_memsz > TASK_SIZE ||
859 TASK_SIZE - elf_ppnt->p_memsz < k) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700860 /* set_brk can never work. Avoid overflows. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 send_sig(SIGKILL, current, 0);
Alexey Kuznetsovb140f2512007-05-08 00:31:57 -0700862 retval = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 goto out_free_dentry;
864 }
865
866 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
867
868 if (k > elf_bss)
869 elf_bss = k;
870 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
871 end_code = k;
872 if (end_data < k)
873 end_data = k;
874 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
875 if (k > elf_brk)
876 elf_brk = k;
877 }
878
879 loc->elf_ex.e_entry += load_bias;
880 elf_bss += load_bias;
881 elf_brk += load_bias;
882 start_code += load_bias;
883 end_code += load_bias;
884 start_data += load_bias;
885 end_data += load_bias;
886
887 /* Calling set_brk effectively mmaps the pages that we need
888 * for the bss and break sections. We must do this before
889 * mapping in the interpreter, to make sure it doesn't wind
890 * up getting placed where the bss needs to go.
891 */
892 retval = set_brk(elf_bss, elf_brk);
893 if (retval) {
894 send_sig(SIGKILL, current, 0);
895 goto out_free_dentry;
896 }
akpm@osdl.org6de50512005-10-11 08:29:08 -0700897 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 send_sig(SIGSEGV, current, 0);
899 retval = -EFAULT; /* Nobody gets to see this, but.. */
900 goto out_free_dentry;
901 }
902
903 if (elf_interpreter) {
Alan Cox6eec4822012-10-04 17:13:42 -0700904 unsigned long interp_map_addr = 0;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100905
Andi Kleend20894a2008-02-08 04:21:54 -0800906 elf_entry = load_elf_interp(&loc->interp_elf_ex,
907 interpreter,
908 &interp_map_addr,
909 load_bias);
910 if (!IS_ERR((void *)elf_entry)) {
911 /*
912 * load_elf_interp() returns relocation
913 * adjustment
914 */
915 interp_load_addr = elf_entry;
916 elf_entry += loc->interp_elf_ex.e_entry;
Jiri Kosinacc503c12008-01-30 13:31:07 +0100917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (BAD_ADDR(elf_entry)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 force_sig(SIGSEGV, current);
Chuck Ebbertce510592006-07-03 00:24:14 -0700920 retval = IS_ERR((void *)elf_entry) ?
921 (int)elf_entry : -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 goto out_free_dentry;
923 }
924 reloc_func_desc = interp_load_addr;
925
926 allow_write_access(interpreter);
927 fput(interpreter);
928 kfree(elf_interpreter);
929 } else {
930 elf_entry = loc->elf_ex.e_entry;
Suresh Siddha5342fba2006-02-26 04:18:28 +0100931 if (BAD_ADDR(elf_entry)) {
Chuck Ebbertce510592006-07-03 00:24:14 -0700932 force_sig(SIGSEGV, current);
933 retval = -EINVAL;
Suresh Siddha5342fba2006-02-26 04:18:28 +0100934 goto out_free_dentry;
935 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 }
937
938 kfree(elf_phdata);
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 set_binfmt(&elf_format);
941
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -0700942#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
Martin Schwidefskyfc5243d2008-12-25 13:38:35 +0100943 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -0700944 if (retval < 0) {
945 send_sig(SIGKILL, current, 0);
Roland McGrath18c8baf2005-04-28 15:17:19 -0700946 goto out;
Benjamin Herrenschmidt547ee842005-04-16 15:24:35 -0700947 }
948#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
949
David Howellsa6f76f22008-11-14 10:39:24 +1100950 install_exec_creds(bprm);
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700951 retval = create_elf_tables(bprm, &loc->elf_ex,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700952 load_addr, interp_load_addr);
Ollie Wildb6a2fea2007-07-19 01:48:16 -0700953 if (retval < 0) {
954 send_sig(SIGKILL, current, 0);
955 goto out;
956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 /* N.B. passed_fileno might not be initialized? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 current->mm->end_code = end_code;
959 current->mm->start_code = start_code;
960 current->mm->start_data = start_data;
961 current->mm->end_data = end_data;
962 current->mm->start_stack = bprm->p;
963
Jiri Kosinac1d171a2008-01-30 13:30:40 +0100964#ifdef arch_randomize_brk
Jiri Kosina4471a672011-04-14 15:22:09 -0700965 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
Jiri Kosinac1d171a2008-01-30 13:30:40 +0100966 current->mm->brk = current->mm->start_brk =
967 arch_randomize_brk(current->mm);
Jiri Kosina4471a672011-04-14 15:22:09 -0700968#ifdef CONFIG_COMPAT_BRK
969 current->brk_randomized = 1;
970#endif
971 }
Jiri Kosinac1d171a2008-01-30 13:30:40 +0100972#endif
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 if (current->personality & MMAP_PAGE_ZERO) {
975 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
976 and some applications "depend" upon this behavior.
977 Since we do not have the power to recompile these, we
Jesper Juhlf4e5cc22006-06-23 02:05:35 -0700978 emulate the SVr4 behavior. Sigh. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -0700979 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 MAP_FIXED | MAP_PRIVATE, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982
983#ifdef ELF_PLAT_INIT
984 /*
985 * The ABI may specify that certain registers be set up in special
986 * ways (on i386 %edx is the address of a DT_FINI function, for
987 * example. In addition, it may also specify (eg, PowerPC64 ELF)
988 * that the e_entry field is the address of the function descriptor
989 * for the startup routine, rather than the address of the startup
990 * routine itself. This macro performs whatever initialization to
991 * the regs structure is required as well as any relocations to the
992 * function descriptor entries when executing dynamically links apps.
993 */
994 ELF_PLAT_INIT(regs, reloc_func_desc);
995#endif
996
997 start_thread(regs, elf_entry, bprm->p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 retval = 0;
999out:
1000 kfree(loc);
1001out_ret:
1002 return retval;
1003
1004 /* error cleanup */
1005out_free_dentry:
1006 allow_write_access(interpreter);
1007 if (interpreter)
1008 fput(interpreter);
1009out_free_interp:
Jesper Juhlf99d49a2005-11-07 01:01:34 -08001010 kfree(elf_interpreter);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011out_free_ph:
1012 kfree(elf_phdata);
1013 goto out;
1014}
1015
1016/* This is really simpleminded and specialized - we are loading an
1017 a.out library that is given an ELF header. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018static int load_elf_library(struct file *file)
1019{
1020 struct elf_phdr *elf_phdata;
1021 struct elf_phdr *eppnt;
1022 unsigned long elf_bss, bss, len;
1023 int retval, error, i, j;
1024 struct elfhdr elf_ex;
1025
1026 error = -ENOEXEC;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001027 retval = kernel_read(file, 0, (char *)&elf_ex, sizeof(elf_ex));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 if (retval != sizeof(elf_ex))
1029 goto out;
1030
1031 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1032 goto out;
1033
1034 /* First of all, some simple consistency checks */
1035 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
Al Viro72c2d532013-09-22 16:27:52 -04001036 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 goto out;
1038
1039 /* Now read in all of the header information */
1040
1041 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1042 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1043
1044 error = -ENOMEM;
1045 elf_phdata = kmalloc(j, GFP_KERNEL);
1046 if (!elf_phdata)
1047 goto out;
1048
1049 eppnt = elf_phdata;
1050 error = -ENOEXEC;
1051 retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
1052 if (retval != j)
1053 goto out_free_ph;
1054
1055 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1056 if ((eppnt + i)->p_type == PT_LOAD)
1057 j++;
1058 if (j != 1)
1059 goto out_free_ph;
1060
1061 while (eppnt->p_type != PT_LOAD)
1062 eppnt++;
1063
1064 /* Now use mmap to map the library into memory. */
Linus Torvalds6be5ceb2012-04-20 17:13:58 -07001065 error = vm_mmap(file,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 ELF_PAGESTART(eppnt->p_vaddr),
1067 (eppnt->p_filesz +
1068 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1069 PROT_READ | PROT_WRITE | PROT_EXEC,
1070 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1071 (eppnt->p_offset -
1072 ELF_PAGEOFFSET(eppnt->p_vaddr)));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1074 goto out_free_ph;
1075
1076 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1077 if (padzero(elf_bss)) {
1078 error = -EFAULT;
1079 goto out_free_ph;
1080 }
1081
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001082 len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
1083 ELF_MIN_ALIGN - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 bss = eppnt->p_memsz + eppnt->p_vaddr;
Linus Torvaldse4eb1ff2012-04-20 15:35:40 -07001085 if (bss > len)
1086 vm_brk(len, bss - len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 error = 0;
1088
1089out_free_ph:
1090 kfree(elf_phdata);
1091out:
1092 return error;
1093}
1094
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08001095#ifdef CONFIG_ELF_CORE
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096/*
1097 * ELF core dumper
1098 *
1099 * Modelled on fs/exec.c:aout_core_dump()
1100 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1101 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
1103/*
Jason Baron909af762012-03-23 15:02:51 -07001104 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1105 * that are useful for post-mortem analysis are included in every core dump.
1106 * In that way we ensure that the core dump is fully interpretable later
1107 * without matching up the same kernel and hardware config to see what PC values
1108 * meant. These special mappings include - vDSO, vsyscall, and other
1109 * architecture specific mappings
1110 */
1111static bool always_dump_vma(struct vm_area_struct *vma)
1112{
1113 /* Any vsyscall mappings? */
1114 if (vma == get_gate_vma(vma->vm_mm))
1115 return true;
1116 /*
1117 * arch_vma_name() returns non-NULL for special architecture mappings,
1118 * such as vDSO sections.
1119 */
1120 if (arch_vma_name(vma))
1121 return true;
1122
1123 return false;
1124}
1125
1126/*
Roland McGrath82df3972007-10-16 23:27:02 -07001127 * Decide what to dump of a segment, part, all or none.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 */
Roland McGrath82df3972007-10-16 23:27:02 -07001129static unsigned long vma_dump_size(struct vm_area_struct *vma,
1130 unsigned long mm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131{
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001132#define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1133
Jason Baron909af762012-03-23 15:02:51 -07001134 /* always dump the vdso and vsyscall sections */
1135 if (always_dump_vma(vma))
Roland McGrath82df3972007-10-16 23:27:02 -07001136 goto whole;
Roland McGrathe5b97dd2007-01-26 00:56:48 -08001137
Konstantin Khlebnikov0103bd12012-10-08 16:28:59 -07001138 if (vma->vm_flags & VM_DONTDUMP)
Jason Baronaccb61f2012-03-23 15:02:51 -07001139 return 0;
1140
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001141 /* Hugetlb memory check */
1142 if (vma->vm_flags & VM_HUGETLB) {
1143 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1144 goto whole;
1145 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1146 goto whole;
Naoya Horiguchi23d9e482013-04-17 15:58:28 -07001147 return 0;
KOSAKI Motohiroe575f112008-10-18 20:27:08 -07001148 }
1149
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 /* Do not dump I/O mapped devices or special mappings */
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -07001151 if (vma->vm_flags & VM_IO)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 return 0;
1153
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001154 /* By default, dump shared memory if mapped from an anonymous file. */
1155 if (vma->vm_flags & VM_SHARED) {
Al Viro496ad9a2013-01-23 17:07:38 -05001156 if (file_inode(vma->vm_file)->i_nlink == 0 ?
Roland McGrath82df3972007-10-16 23:27:02 -07001157 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1158 goto whole;
1159 return 0;
Kawai, Hidehiroa1b59e82007-07-19 01:48:29 -07001160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Roland McGrath82df3972007-10-16 23:27:02 -07001162 /* Dump segments that have been written to. */
1163 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1164 goto whole;
1165 if (vma->vm_file == NULL)
1166 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Roland McGrath82df3972007-10-16 23:27:02 -07001168 if (FILTER(MAPPED_PRIVATE))
1169 goto whole;
1170
1171 /*
1172 * If this looks like the beginning of a DSO or executable mapping,
1173 * check for an ELF header. If we find one, dump the first page to
1174 * aid in determining what was mapped here.
1175 */
Roland McGrath92dc07b2009-02-06 17:34:07 -08001176 if (FILTER(ELF_HEADERS) &&
1177 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
Roland McGrath82df3972007-10-16 23:27:02 -07001178 u32 __user *header = (u32 __user *) vma->vm_start;
1179 u32 word;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001180 mm_segment_t fs = get_fs();
Roland McGrath82df3972007-10-16 23:27:02 -07001181 /*
1182 * Doing it this way gets the constant folded by GCC.
1183 */
1184 union {
1185 u32 cmp;
1186 char elfmag[SELFMAG];
1187 } magic;
1188 BUILD_BUG_ON(SELFMAG != sizeof word);
1189 magic.elfmag[EI_MAG0] = ELFMAG0;
1190 magic.elfmag[EI_MAG1] = ELFMAG1;
1191 magic.elfmag[EI_MAG2] = ELFMAG2;
1192 magic.elfmag[EI_MAG3] = ELFMAG3;
Roland McGrath92dc07b2009-02-06 17:34:07 -08001193 /*
1194 * Switch to the user "segment" for get_user(),
1195 * then put back what elf_core_dump() had in place.
1196 */
1197 set_fs(USER_DS);
1198 if (unlikely(get_user(word, header)))
1199 word = 0;
1200 set_fs(fs);
1201 if (word == magic.cmp)
Roland McGrath82df3972007-10-16 23:27:02 -07001202 return PAGE_SIZE;
1203 }
1204
1205#undef FILTER
1206
1207 return 0;
1208
1209whole:
1210 return vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211}
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213/* An ELF note in memory */
1214struct memelfnote
1215{
1216 const char *name;
1217 int type;
1218 unsigned int datasz;
1219 void *data;
1220};
1221
1222static int notesize(struct memelfnote *en)
1223{
1224 int sz;
1225
1226 sz = sizeof(struct elf_note);
1227 sz += roundup(strlen(en->name) + 1, 4);
1228 sz += roundup(en->datasz, 4);
1229
1230 return sz;
1231}
1232
Al Viroecc8c772013-10-05 15:32:35 -04001233static int writenote(struct memelfnote *men, struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234{
1235 struct elf_note en;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 en.n_namesz = strlen(men->name) + 1;
1237 en.n_descsz = men->datasz;
1238 en.n_type = men->type;
1239
Al Viroecc8c772013-10-05 15:32:35 -04001240 return dump_emit(cprm, &en, sizeof(en)) &&
Al Viro22a8cb82013-10-08 11:05:01 -04001241 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1242 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
Roland McGrath3aba4812008-01-30 13:31:44 +01001245static void fill_elf_header(struct elfhdr *elf, int segs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001246 u16 machine, u32 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247{
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001248 memset(elf, 0, sizeof(*elf));
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1251 elf->e_ident[EI_CLASS] = ELF_CLASS;
1252 elf->e_ident[EI_DATA] = ELF_DATA;
1253 elf->e_ident[EI_VERSION] = EV_CURRENT;
1254 elf->e_ident[EI_OSABI] = ELF_OSABI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
1256 elf->e_type = ET_CORE;
Roland McGrath3aba4812008-01-30 13:31:44 +01001257 elf->e_machine = machine;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 elf->e_version = EV_CURRENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 elf->e_phoff = sizeof(struct elfhdr);
Roland McGrath3aba4812008-01-30 13:31:44 +01001260 elf->e_flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 elf->e_ehsize = sizeof(struct elfhdr);
1262 elf->e_phentsize = sizeof(struct elf_phdr);
1263 elf->e_phnum = segs;
Cyrill Gorcunov6970c8e2008-04-29 01:01:18 -07001264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 return;
1266}
1267
Andrew Morton8d6b5eee2006-09-25 23:32:04 -07001268static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269{
1270 phdr->p_type = PT_NOTE;
1271 phdr->p_offset = offset;
1272 phdr->p_vaddr = 0;
1273 phdr->p_paddr = 0;
1274 phdr->p_filesz = sz;
1275 phdr->p_memsz = 0;
1276 phdr->p_flags = 0;
1277 phdr->p_align = 0;
1278 return;
1279}
1280
1281static void fill_note(struct memelfnote *note, const char *name, int type,
1282 unsigned int sz, void *data)
1283{
1284 note->name = name;
1285 note->type = type;
1286 note->datasz = sz;
1287 note->data = data;
1288 return;
1289}
1290
1291/*
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001292 * fill up all the fields in prstatus from the given task struct, except
1293 * registers which need to be filled up separately.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 */
1295static void fill_prstatus(struct elf_prstatus *prstatus,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001296 struct task_struct *p, long signr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297{
1298 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1299 prstatus->pr_sigpend = p->pending.signal.sig[0];
1300 prstatus->pr_sighold = p->blocked.sig[0];
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001301 rcu_read_lock();
1302 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1303 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001304 prstatus->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001305 prstatus->pr_pgrp = task_pgrp_vnr(p);
1306 prstatus->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 if (thread_group_leader(p)) {
Frank Mayharf06febc2008-09-12 09:54:39 -07001308 struct task_cputime cputime;
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 /*
Frank Mayharf06febc2008-09-12 09:54:39 -07001311 * This is the record for the group leader. It shows the
1312 * group-wide total, not its individual thread total.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 */
Frank Mayharf06febc2008-09-12 09:54:39 -07001314 thread_group_cputime(p, &cputime);
1315 cputime_to_timeval(cputime.utime, &prstatus->pr_utime);
1316 cputime_to_timeval(cputime.stime, &prstatus->pr_stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 } else {
Frederic Weisbecker6fac4822012-11-13 14:20:55 +01001318 cputime_t utime, stime;
1319
1320 task_cputime(p, &utime, &stime);
1321 cputime_to_timeval(utime, &prstatus->pr_utime);
1322 cputime_to_timeval(stime, &prstatus->pr_stime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 }
1324 cputime_to_timeval(p->signal->cutime, &prstatus->pr_cutime);
1325 cputime_to_timeval(p->signal->cstime, &prstatus->pr_cstime);
1326}
1327
1328static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1329 struct mm_struct *mm)
1330{
David Howellsc69e8d92008-11-14 10:39:19 +11001331 const struct cred *cred;
Greg Kroah-Hartmana84a5052005-05-11 00:10:44 -07001332 unsigned int i, len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
1334 /* first copy the parameters from user space */
1335 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1336
1337 len = mm->arg_end - mm->arg_start;
1338 if (len >= ELF_PRARGSZ)
1339 len = ELF_PRARGSZ-1;
1340 if (copy_from_user(&psinfo->pr_psargs,
1341 (const char __user *)mm->arg_start, len))
1342 return -EFAULT;
1343 for(i = 0; i < len; i++)
1344 if (psinfo->pr_psargs[i] == 0)
1345 psinfo->pr_psargs[i] = ' ';
1346 psinfo->pr_psargs[len] = 0;
1347
Oleg Nesterov3b34fc52009-06-17 16:27:38 -07001348 rcu_read_lock();
1349 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1350 rcu_read_unlock();
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001351 psinfo->pr_pid = task_pid_vnr(p);
Pavel Emelyanovb4888932007-10-18 23:40:14 -07001352 psinfo->pr_pgrp = task_pgrp_vnr(p);
1353 psinfo->pr_sid = task_session_vnr(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
1355 i = p->state ? ffz(~p->state) + 1 : 0;
1356 psinfo->pr_state = i;
Carsten Otte55148542006-03-25 03:08:22 -08001357 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1359 psinfo->pr_nice = task_nice(p);
1360 psinfo->pr_flag = p->flags;
David Howellsc69e8d92008-11-14 10:39:19 +11001361 rcu_read_lock();
1362 cred = __task_cred(p);
Eric W. Biedermanebc887b2012-02-07 18:36:10 -08001363 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1364 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
David Howellsc69e8d92008-11-14 10:39:19 +11001365 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1367
1368 return 0;
1369}
1370
Roland McGrath3aba4812008-01-30 13:31:44 +01001371static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1372{
1373 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1374 int i = 0;
1375 do
1376 i += 2;
1377 while (auxv[i - 2] != AT_NULL);
1378 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1379}
1380
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001381static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
Al Viroce395962013-10-13 17:23:53 -04001382 const siginfo_t *siginfo)
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001383{
1384 mm_segment_t old_fs = get_fs();
1385 set_fs(KERNEL_DS);
1386 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1387 set_fs(old_fs);
1388 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1389}
1390
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001391#define MAX_FILE_NOTE_SIZE (4*1024*1024)
1392/*
1393 * Format of NT_FILE note:
1394 *
1395 * long count -- how many files are mapped
1396 * long page_size -- units for file_ofs
1397 * array of [COUNT] elements of
1398 * long start
1399 * long end
1400 * long file_ofs
1401 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1402 */
Dan Aloni72023652013-09-30 13:45:02 -07001403static int fill_files_note(struct memelfnote *note)
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001404{
1405 struct vm_area_struct *vma;
1406 unsigned count, size, names_ofs, remaining, n;
1407 user_long_t *data;
1408 user_long_t *start_end_ofs;
1409 char *name_base, *name_curpos;
1410
1411 /* *Estimated* file count and total data size needed */
1412 count = current->mm->map_count;
1413 size = count * 64;
1414
1415 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1416 alloc:
1417 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
Dan Aloni72023652013-09-30 13:45:02 -07001418 return -EINVAL;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001419 size = round_up(size, PAGE_SIZE);
1420 data = vmalloc(size);
1421 if (!data)
Dan Aloni72023652013-09-30 13:45:02 -07001422 return -ENOMEM;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001423
1424 start_end_ofs = data + 2;
1425 name_base = name_curpos = ((char *)data) + names_ofs;
1426 remaining = size - names_ofs;
1427 count = 0;
1428 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1429 struct file *file;
1430 const char *filename;
1431
1432 file = vma->vm_file;
1433 if (!file)
1434 continue;
1435 filename = d_path(&file->f_path, name_curpos, remaining);
1436 if (IS_ERR(filename)) {
1437 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1438 vfree(data);
1439 size = size * 5 / 4;
1440 goto alloc;
1441 }
1442 continue;
1443 }
1444
1445 /* d_path() fills at the end, move name down */
1446 /* n = strlen(filename) + 1: */
1447 n = (name_curpos + remaining) - filename;
1448 remaining = filename - name_curpos;
1449 memmove(name_curpos, filename, n);
1450 name_curpos += n;
1451
1452 *start_end_ofs++ = vma->vm_start;
1453 *start_end_ofs++ = vma->vm_end;
1454 *start_end_ofs++ = vma->vm_pgoff;
1455 count++;
1456 }
1457
1458 /* Now we know exact count of files, can store it */
1459 data[0] = count;
1460 data[1] = PAGE_SIZE;
1461 /*
1462 * Count usually is less than current->mm->map_count,
1463 * we need to move filenames down.
1464 */
1465 n = current->mm->map_count - count;
1466 if (n != 0) {
1467 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1468 memmove(name_base - shift_bytes, name_base,
1469 name_curpos - name_base);
1470 name_curpos -= shift_bytes;
1471 }
1472
1473 size = name_curpos - (char *)data;
1474 fill_note(note, "CORE", NT_FILE, size, data);
Dan Aloni72023652013-09-30 13:45:02 -07001475 return 0;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001476}
1477
Roland McGrath4206d3a2008-01-30 13:31:45 +01001478#ifdef CORE_DUMP_USE_REGSET
1479#include <linux/regset.h>
1480
1481struct elf_thread_core_info {
1482 struct elf_thread_core_info *next;
1483 struct task_struct *task;
1484 struct elf_prstatus prstatus;
1485 struct memelfnote notes[0];
1486};
1487
1488struct elf_note_info {
1489 struct elf_thread_core_info *thread;
1490 struct memelfnote psinfo;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001491 struct memelfnote signote;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001492 struct memelfnote auxv;
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001493 struct memelfnote files;
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001494 user_siginfo_t csigdata;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001495 size_t size;
1496 int thread_notes;
1497};
1498
Roland McGrathd31472b2008-03-04 14:28:30 -08001499/*
1500 * When a regset has a writeback hook, we call it on each thread before
1501 * dumping user memory. On register window machines, this makes sure the
1502 * user memory backing the register data is up to date before we read it.
1503 */
1504static void do_thread_regset_writeback(struct task_struct *task,
1505 const struct user_regset *regset)
1506{
1507 if (regset->writeback)
1508 regset->writeback(task, regset, 1);
1509}
1510
H. J. Lu0953f65d2012-02-14 13:34:52 -08001511#ifndef PR_REG_SIZE
1512#define PR_REG_SIZE(S) sizeof(S)
1513#endif
1514
1515#ifndef PRSTATUS_SIZE
1516#define PRSTATUS_SIZE(S) sizeof(S)
1517#endif
1518
1519#ifndef PR_REG_PTR
1520#define PR_REG_PTR(S) (&((S)->pr_reg))
1521#endif
1522
1523#ifndef SET_PR_FPVALID
1524#define SET_PR_FPVALID(S, V) ((S)->pr_fpvalid = (V))
1525#endif
1526
Roland McGrath4206d3a2008-01-30 13:31:45 +01001527static int fill_thread_core_info(struct elf_thread_core_info *t,
1528 const struct user_regset_view *view,
1529 long signr, size_t *total)
1530{
1531 unsigned int i;
1532
1533 /*
1534 * NT_PRSTATUS is the one special case, because the regset data
1535 * goes into the pr_reg field inside the note contents, rather
1536 * than being the whole note contents. We fill the reset in here.
1537 * We assume that regset 0 is NT_PRSTATUS.
1538 */
1539 fill_prstatus(&t->prstatus, t->task, signr);
1540 (void) view->regsets[0].get(t->task, &view->regsets[0],
H. J. Lu0953f65d2012-02-14 13:34:52 -08001541 0, PR_REG_SIZE(t->prstatus.pr_reg),
1542 PR_REG_PTR(&t->prstatus), NULL);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001543
1544 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
H. J. Lu0953f65d2012-02-14 13:34:52 -08001545 PRSTATUS_SIZE(t->prstatus), &t->prstatus);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001546 *total += notesize(&t->notes[0]);
1547
Roland McGrathd31472b2008-03-04 14:28:30 -08001548 do_thread_regset_writeback(t->task, &view->regsets[0]);
1549
Roland McGrath4206d3a2008-01-30 13:31:45 +01001550 /*
1551 * Each other regset might generate a note too. For each regset
1552 * that has no core_note_type or is inactive, we leave t->notes[i]
1553 * all zero and we'll know to skip writing it later.
1554 */
1555 for (i = 1; i < view->n; ++i) {
1556 const struct user_regset *regset = &view->regsets[i];
Roland McGrathd31472b2008-03-04 14:28:30 -08001557 do_thread_regset_writeback(t->task, regset);
H. Peter Anvinc8e25252012-03-02 10:43:48 -08001558 if (regset->core_note_type && regset->get &&
Roland McGrath4206d3a2008-01-30 13:31:45 +01001559 (!regset->active || regset->active(t->task, regset))) {
1560 int ret;
1561 size_t size = regset->n * regset->size;
1562 void *data = kmalloc(size, GFP_KERNEL);
1563 if (unlikely(!data))
1564 return 0;
1565 ret = regset->get(t->task, regset,
1566 0, size, data, NULL);
1567 if (unlikely(ret))
1568 kfree(data);
1569 else {
1570 if (regset->core_note_type != NT_PRFPREG)
1571 fill_note(&t->notes[i], "LINUX",
1572 regset->core_note_type,
1573 size, data);
1574 else {
H. J. Lu0953f65d2012-02-14 13:34:52 -08001575 SET_PR_FPVALID(&t->prstatus, 1);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001576 fill_note(&t->notes[i], "CORE",
1577 NT_PRFPREG, size, data);
1578 }
1579 *total += notesize(&t->notes[i]);
1580 }
1581 }
1582 }
1583
1584 return 1;
1585}
1586
1587static int fill_note_info(struct elfhdr *elf, int phdrs,
1588 struct elf_note_info *info,
Al Viroec579412013-10-13 17:57:29 -04001589 const siginfo_t *siginfo, struct pt_regs *regs)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001590{
1591 struct task_struct *dump_task = current;
1592 const struct user_regset_view *view = task_user_regset_view(dump_task);
1593 struct elf_thread_core_info *t;
1594 struct elf_prpsinfo *psinfo;
Oleg Nesterov83914442008-07-25 01:47:45 -07001595 struct core_thread *ct;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001596 unsigned int i;
1597
1598 info->size = 0;
1599 info->thread = NULL;
1600
1601 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
Alan Cox6899e922012-12-17 16:02:09 -08001602 if (psinfo == NULL) {
1603 info->psinfo.data = NULL; /* So we don't free this wrongly */
Roland McGrath4206d3a2008-01-30 13:31:45 +01001604 return 0;
Alan Cox6899e922012-12-17 16:02:09 -08001605 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001606
Amerigo Wange2dbe122009-07-01 01:06:26 -04001607 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1608
Roland McGrath4206d3a2008-01-30 13:31:45 +01001609 /*
1610 * Figure out how many notes we're going to need for each thread.
1611 */
1612 info->thread_notes = 0;
1613 for (i = 0; i < view->n; ++i)
1614 if (view->regsets[i].core_note_type != 0)
1615 ++info->thread_notes;
1616
1617 /*
1618 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1619 * since it is our one special case.
1620 */
1621 if (unlikely(info->thread_notes == 0) ||
1622 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1623 WARN_ON(1);
1624 return 0;
1625 }
1626
1627 /*
1628 * Initialize the ELF file header.
1629 */
1630 fill_elf_header(elf, phdrs,
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001631 view->e_machine, view->e_flags);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001632
1633 /*
1634 * Allocate a structure for each thread.
1635 */
Oleg Nesterov83914442008-07-25 01:47:45 -07001636 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1637 t = kzalloc(offsetof(struct elf_thread_core_info,
1638 notes[info->thread_notes]),
1639 GFP_KERNEL);
1640 if (unlikely(!t))
1641 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001642
Oleg Nesterov83914442008-07-25 01:47:45 -07001643 t->task = ct->task;
1644 if (ct->task == dump_task || !info->thread) {
1645 t->next = info->thread;
1646 info->thread = t;
1647 } else {
1648 /*
1649 * Make sure to keep the original task at
1650 * the head of the list.
1651 */
1652 t->next = info->thread->next;
1653 info->thread->next = t;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001654 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001655 }
Roland McGrath4206d3a2008-01-30 13:31:45 +01001656
1657 /*
1658 * Now fill in each thread's information.
1659 */
1660 for (t = info->thread; t != NULL; t = t->next)
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001661 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001662 return 0;
1663
1664 /*
1665 * Fill in the two process-wide notes.
1666 */
1667 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1668 info->size += notesize(&info->psinfo);
1669
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001670 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1671 info->size += notesize(&info->signote);
1672
Roland McGrath4206d3a2008-01-30 13:31:45 +01001673 fill_auxv_note(&info->auxv, current->mm);
1674 info->size += notesize(&info->auxv);
1675
Dan Aloni72023652013-09-30 13:45:02 -07001676 if (fill_files_note(&info->files) == 0)
1677 info->size += notesize(&info->files);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001678
Roland McGrath4206d3a2008-01-30 13:31:45 +01001679 return 1;
1680}
1681
1682static size_t get_note_info_size(struct elf_note_info *info)
1683{
1684 return info->size;
1685}
1686
1687/*
1688 * Write all the notes for each thread. When writing the first thread, the
1689 * process-wide notes are interleaved after the first thread-specific note.
1690 */
1691static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04001692 struct coredump_params *cprm)
Roland McGrath4206d3a2008-01-30 13:31:45 +01001693{
1694 bool first = 1;
1695 struct elf_thread_core_info *t = info->thread;
1696
1697 do {
1698 int i;
1699
Al Viroecc8c772013-10-05 15:32:35 -04001700 if (!writenote(&t->notes[0], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001701 return 0;
1702
Al Viroecc8c772013-10-05 15:32:35 -04001703 if (first && !writenote(&info->psinfo, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001704 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001705 if (first && !writenote(&info->signote, cprm))
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001706 return 0;
Al Viroecc8c772013-10-05 15:32:35 -04001707 if (first && !writenote(&info->auxv, cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001708 return 0;
Dan Aloni72023652013-09-30 13:45:02 -07001709 if (first && info->files.data &&
Al Viroecc8c772013-10-05 15:32:35 -04001710 !writenote(&info->files, cprm))
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001711 return 0;
Roland McGrath4206d3a2008-01-30 13:31:45 +01001712
1713 for (i = 1; i < info->thread_notes; ++i)
1714 if (t->notes[i].data &&
Al Viroecc8c772013-10-05 15:32:35 -04001715 !writenote(&t->notes[i], cprm))
Roland McGrath4206d3a2008-01-30 13:31:45 +01001716 return 0;
1717
1718 first = 0;
1719 t = t->next;
1720 } while (t);
1721
1722 return 1;
1723}
1724
1725static void free_note_info(struct elf_note_info *info)
1726{
1727 struct elf_thread_core_info *threads = info->thread;
1728 while (threads) {
1729 unsigned int i;
1730 struct elf_thread_core_info *t = threads;
1731 threads = t->next;
1732 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1733 for (i = 1; i < info->thread_notes; ++i)
1734 kfree(t->notes[i].data);
1735 kfree(t);
1736 }
1737 kfree(info->psinfo.data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001738 vfree(info->files.data);
Roland McGrath4206d3a2008-01-30 13:31:45 +01001739}
1740
1741#else
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743/* Here is the structure in which status of each thread is captured. */
1744struct elf_thread_status
1745{
1746 struct list_head list;
1747 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1748 elf_fpregset_t fpu; /* NT_PRFPREG */
1749 struct task_struct *thread;
1750#ifdef ELF_CORE_COPY_XFPREGS
Mark Nelson5b20cd82007-10-16 23:25:39 -07001751 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752#endif
1753 struct memelfnote notes[3];
1754 int num_notes;
1755};
1756
1757/*
1758 * In order to add the specific thread information for the elf file format,
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001759 * we need to keep a linked list of every threads pr_status and then create
1760 * a single section for them in the final core file.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 */
1762static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1763{
1764 int sz = 0;
1765 struct task_struct *p = t->thread;
1766 t->num_notes = 0;
1767
1768 fill_prstatus(&t->prstatus, p, signr);
1769 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1770
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001771 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1772 &(t->prstatus));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 t->num_notes++;
1774 sz += notesize(&t->notes[0]);
1775
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07001776 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1777 &t->fpu))) {
1778 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1779 &(t->fpu));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 t->num_notes++;
1781 sz += notesize(&t->notes[1]);
1782 }
1783
1784#ifdef ELF_CORE_COPY_XFPREGS
1785 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
Mark Nelson5b20cd82007-10-16 23:25:39 -07001786 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1787 sizeof(t->xfpu), &t->xfpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 t->num_notes++;
1789 sz += notesize(&t->notes[2]);
1790 }
1791#endif
1792 return sz;
1793}
1794
Roland McGrath3aba4812008-01-30 13:31:44 +01001795struct elf_note_info {
1796 struct memelfnote *notes;
Dan Aloni72023652013-09-30 13:45:02 -07001797 struct memelfnote *notes_files;
Roland McGrath3aba4812008-01-30 13:31:44 +01001798 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1799 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1800 struct list_head thread_list;
1801 elf_fpregset_t *fpu;
1802#ifdef ELF_CORE_COPY_XFPREGS
1803 elf_fpxregset_t *xfpu;
1804#endif
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001805 user_siginfo_t csigdata;
Roland McGrath3aba4812008-01-30 13:31:44 +01001806 int thread_status_size;
1807 int numnote;
1808};
1809
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001810static int elf_note_info_init(struct elf_note_info *info)
Roland McGrath3aba4812008-01-30 13:31:44 +01001811{
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001812 memset(info, 0, sizeof(*info));
Roland McGrath3aba4812008-01-30 13:31:44 +01001813 INIT_LIST_HEAD(&info->thread_list);
1814
Denys Vlasenko49ae4d42012-10-04 17:15:35 -07001815 /* Allocate space for ELF notes */
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001816 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
Roland McGrath3aba4812008-01-30 13:31:44 +01001817 if (!info->notes)
1818 return 0;
1819 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1820 if (!info->psinfo)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001821 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001822 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1823 if (!info->prstatus)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001824 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001825 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1826 if (!info->fpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001827 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001828#ifdef ELF_CORE_COPY_XFPREGS
1829 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
1830 if (!info->xfpu)
Denys Vlasenkof34f9d12012-09-26 11:34:50 +10001831 return 0;
Roland McGrath3aba4812008-01-30 13:31:44 +01001832#endif
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001833 return 1;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001834}
Roland McGrath3aba4812008-01-30 13:31:44 +01001835
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001836static int fill_note_info(struct elfhdr *elf, int phdrs,
1837 struct elf_note_info *info,
Al Viroec579412013-10-13 17:57:29 -04001838 const siginfo_t *siginfo, struct pt_regs *regs)
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001839{
1840 struct list_head *t;
Al Viroafabada2013-10-14 07:39:56 -04001841 struct core_thread *ct;
1842 struct elf_thread_status *ets;
Amerigo Wang0cf062d2009-09-23 15:57:05 -07001843
1844 if (!elf_note_info_init(info))
1845 return 0;
1846
Al Viroafabada2013-10-14 07:39:56 -04001847 for (ct = current->mm->core_state->dumper.next;
1848 ct; ct = ct->next) {
1849 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
1850 if (!ets)
1851 return 0;
Oleg Nesterov24d52882008-07-25 01:47:40 -07001852
Al Viroafabada2013-10-14 07:39:56 -04001853 ets->thread = ct->task;
1854 list_add(&ets->list, &info->thread_list);
1855 }
Oleg Nesterov83914442008-07-25 01:47:45 -07001856
Al Viroafabada2013-10-14 07:39:56 -04001857 list_for_each(t, &info->thread_list) {
1858 int sz;
Oleg Nesterov83914442008-07-25 01:47:45 -07001859
Al Viroafabada2013-10-14 07:39:56 -04001860 ets = list_entry(t, struct elf_thread_status, list);
1861 sz = elf_dump_thread_status(siginfo->si_signo, ets);
1862 info->thread_status_size += sz;
Roland McGrath3aba4812008-01-30 13:31:44 +01001863 }
1864 /* now collect the dump for the current */
1865 memset(info->prstatus, 0, sizeof(*info->prstatus));
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07001866 fill_prstatus(info->prstatus, current, siginfo->si_signo);
Roland McGrath3aba4812008-01-30 13:31:44 +01001867 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
1868
1869 /* Set up header */
Zhang Yanfeid3330cf2013-02-21 16:44:20 -08001870 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
Roland McGrath3aba4812008-01-30 13:31:44 +01001871
1872 /*
1873 * Set up the notes in similar form to SVR4 core dumps made
1874 * with info from their /proc.
1875 */
1876
1877 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
1878 sizeof(*info->prstatus), info->prstatus);
1879 fill_psinfo(info->psinfo, current->group_leader, current->mm);
1880 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
1881 sizeof(*info->psinfo), info->psinfo);
1882
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001883 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
1884 fill_auxv_note(info->notes + 3, current->mm);
Dan Aloni72023652013-09-30 13:45:02 -07001885 info->numnote = 4;
Roland McGrath3aba4812008-01-30 13:31:44 +01001886
Dan Aloni72023652013-09-30 13:45:02 -07001887 if (fill_files_note(info->notes + info->numnote) == 0) {
1888 info->notes_files = info->notes + info->numnote;
1889 info->numnote++;
1890 }
Roland McGrath3aba4812008-01-30 13:31:44 +01001891
1892 /* Try to dump the FPU. */
1893 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
1894 info->fpu);
1895 if (info->prstatus->pr_fpvalid)
1896 fill_note(info->notes + info->numnote++,
1897 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
1898#ifdef ELF_CORE_COPY_XFPREGS
1899 if (elf_core_copy_task_xfpregs(current, info->xfpu))
1900 fill_note(info->notes + info->numnote++,
1901 "LINUX", ELF_CORE_XFPREG_TYPE,
1902 sizeof(*info->xfpu), info->xfpu);
1903#endif
1904
1905 return 1;
Roland McGrath3aba4812008-01-30 13:31:44 +01001906}
1907
1908static size_t get_note_info_size(struct elf_note_info *info)
1909{
1910 int sz = 0;
1911 int i;
1912
1913 for (i = 0; i < info->numnote; i++)
1914 sz += notesize(info->notes + i);
1915
1916 sz += info->thread_status_size;
1917
1918 return sz;
1919}
1920
1921static int write_note_info(struct elf_note_info *info,
Al Viroecc8c772013-10-05 15:32:35 -04001922 struct coredump_params *cprm)
Roland McGrath3aba4812008-01-30 13:31:44 +01001923{
1924 int i;
1925 struct list_head *t;
1926
1927 for (i = 0; i < info->numnote; i++)
Al Viroecc8c772013-10-05 15:32:35 -04001928 if (!writenote(info->notes + i, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01001929 return 0;
1930
1931 /* write out the thread status notes section */
1932 list_for_each(t, &info->thread_list) {
1933 struct elf_thread_status *tmp =
1934 list_entry(t, struct elf_thread_status, list);
1935
1936 for (i = 0; i < tmp->num_notes; i++)
Al Viroecc8c772013-10-05 15:32:35 -04001937 if (!writenote(&tmp->notes[i], cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01001938 return 0;
1939 }
1940
1941 return 1;
1942}
1943
1944static void free_note_info(struct elf_note_info *info)
1945{
1946 while (!list_empty(&info->thread_list)) {
1947 struct list_head *tmp = info->thread_list.next;
1948 list_del(tmp);
1949 kfree(list_entry(tmp, struct elf_thread_status, list));
1950 }
1951
Dan Aloni72023652013-09-30 13:45:02 -07001952 /* Free data possibly allocated by fill_files_note(): */
1953 if (info->notes_files)
1954 vfree(info->notes_files->data);
Denys Vlasenko2aa362c2012-10-04 17:15:36 -07001955
Roland McGrath3aba4812008-01-30 13:31:44 +01001956 kfree(info->prstatus);
1957 kfree(info->psinfo);
1958 kfree(info->notes);
1959 kfree(info->fpu);
1960#ifdef ELF_CORE_COPY_XFPREGS
1961 kfree(info->xfpu);
1962#endif
1963}
1964
Roland McGrath4206d3a2008-01-30 13:31:45 +01001965#endif
1966
Roland McGrathf47aef52007-01-26 00:56:49 -08001967static struct vm_area_struct *first_vma(struct task_struct *tsk,
1968 struct vm_area_struct *gate_vma)
1969{
1970 struct vm_area_struct *ret = tsk->mm->mmap;
1971
1972 if (ret)
1973 return ret;
1974 return gate_vma;
1975}
1976/*
1977 * Helper function for iterating across a vma list. It ensures that the caller
1978 * will visit `gate_vma' prior to terminating the search.
1979 */
1980static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
1981 struct vm_area_struct *gate_vma)
1982{
1983 struct vm_area_struct *ret;
1984
1985 ret = this_vma->vm_next;
1986 if (ret)
1987 return ret;
1988 if (this_vma == gate_vma)
1989 return NULL;
1990 return gate_vma;
1991}
1992
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08001993static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
1994 elf_addr_t e_shoff, int segs)
1995{
1996 elf->e_shoff = e_shoff;
1997 elf->e_shentsize = sizeof(*shdr4extnum);
1998 elf->e_shnum = 1;
1999 elf->e_shstrndx = SHN_UNDEF;
2000
2001 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2002
2003 shdr4extnum->sh_type = SHT_NULL;
2004 shdr4extnum->sh_size = elf->e_shnum;
2005 shdr4extnum->sh_link = elf->e_shstrndx;
2006 shdr4extnum->sh_info = segs;
2007}
2008
2009static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
2010 unsigned long mm_flags)
2011{
2012 struct vm_area_struct *vma;
2013 size_t size = 0;
2014
2015 for (vma = first_vma(current, gate_vma); vma != NULL;
2016 vma = next_vma(vma, gate_vma))
2017 size += vma_dump_size(vma, mm_flags);
2018 return size;
2019}
2020
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021/*
2022 * Actual dumper
2023 *
2024 * This is a two-pass process; first we find the offsets of the bits,
2025 * and then they are actually written out. If we run out of core limit
2026 * we just truncate.
2027 */
Masami Hiramatsuf6151df2009-12-17 15:27:16 -08002028static int elf_core_dump(struct coredump_params *cprm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 int has_dumped = 0;
2031 mm_segment_t fs;
2032 int segs;
Roland McGrathf47aef52007-01-26 00:56:49 -08002033 struct vm_area_struct *vma, *gate_vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 struct elfhdr *elf = NULL;
Al Virocdc3d562013-10-05 22:24:29 -04002035 loff_t offset = 0, dataoff;
Dan Aloni72023652013-09-30 13:45:02 -07002036 struct elf_note_info info = { };
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002037 struct elf_phdr *phdr4note = NULL;
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002038 struct elf_shdr *shdr4extnum = NULL;
2039 Elf_Half e_phnum;
2040 elf_addr_t e_shoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042 /*
2043 * We no longer stop all VM operations.
2044 *
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002045 * This is because those proceses that could possibly change map_count
2046 * or the mmap / vma pages are now blocked in do_exit on current
2047 * finishing this core dump.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 *
2049 * Only ptrace can touch these memory addresses, but it doesn't change
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002050 * the map_count or the pages allocated. So no possibility of crashing
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 * exists while dumping the mm->vm_next areas to the core file.
2052 */
2053
2054 /* alloc memory for large data structures: too large to be on stack */
2055 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2056 if (!elf)
WANG Cong5f719552008-05-06 12:45:35 +08002057 goto out;
KAMEZAWA Hiroyuki341c87b2009-06-30 11:41:23 -07002058 /*
2059 * The number of segs are recored into ELF header as 16bit value.
2060 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2061 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 segs = current->mm->map_count;
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002063 segs += elf_core_extra_phdrs();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Stephen Wilson31db58b2011-03-13 15:49:15 -04002065 gate_vma = get_gate_vma(current->mm);
Roland McGrathf47aef52007-01-26 00:56:49 -08002066 if (gate_vma != NULL)
2067 segs++;
2068
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002069 /* for notes section */
2070 segs++;
2071
2072 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2073 * this, kernel supports extended numbering. Have a look at
2074 * include/linux/elf.h for further information. */
2075 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2076
Roland McGrath3aba4812008-01-30 13:31:44 +01002077 /*
2078 * Collect all the non-memory information about the process for the
2079 * notes. This also sets up the file header.
2080 */
Denys Vlasenko5ab1c302012-10-04 17:15:29 -07002081 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
Roland McGrath3aba4812008-01-30 13:31:44 +01002082 goto cleanup;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
2084 has_dumped = 1;
Oleg Nesterov079148b2013-04-30 15:28:16 -07002085
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 fs = get_fs();
2087 set_fs(KERNEL_DS);
2088
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 offset += sizeof(*elf); /* Elf header */
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002090 offset += segs * sizeof(struct elf_phdr); /* Program headers */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 /* Write notes phdr entry */
2093 {
Roland McGrath3aba4812008-01-30 13:31:44 +01002094 size_t sz = get_note_info_size(&info);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
Michael Ellermane5501492007-09-19 14:38:12 +10002096 sz += elf_coredump_extra_notes_size();
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002097
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002098 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2099 if (!phdr4note)
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002100 goto end_coredump;
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002101
2102 fill_elf_note_phdr(phdr4note, sz, offset);
2103 offset += sz;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 }
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2107
Masami Hiramatsu30736a42010-03-05 13:44:12 -08002108 offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002109 offset += elf_core_extra_data_size();
2110 e_shoff = offset;
2111
2112 if (e_phnum == PN_XNUM) {
2113 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2114 if (!shdr4extnum)
2115 goto end_coredump;
2116 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2117 }
2118
2119 offset = dataoff;
2120
Al Viroecc8c772013-10-05 15:32:35 -04002121 if (!dump_emit(cprm, elf, sizeof(*elf)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002122 goto end_coredump;
2123
Al Viroecc8c772013-10-05 15:32:35 -04002124 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002125 goto end_coredump;
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 /* Write program headers for segments dump */
Roland McGrathf47aef52007-01-26 00:56:49 -08002128 for (vma = first_vma(current, gate_vma); vma != NULL;
2129 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 struct elf_phdr phdr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132 phdr.p_type = PT_LOAD;
2133 phdr.p_offset = offset;
2134 phdr.p_vaddr = vma->vm_start;
2135 phdr.p_paddr = 0;
Masami Hiramatsu30736a42010-03-05 13:44:12 -08002136 phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
Roland McGrath82df3972007-10-16 23:27:02 -07002137 phdr.p_memsz = vma->vm_end - vma->vm_start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 offset += phdr.p_filesz;
2139 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002140 if (vma->vm_flags & VM_WRITE)
2141 phdr.p_flags |= PF_W;
2142 if (vma->vm_flags & VM_EXEC)
2143 phdr.p_flags |= PF_X;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 phdr.p_align = ELF_EXEC_PAGESIZE;
2145
Al Viroecc8c772013-10-05 15:32:35 -04002146 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
Daisuke HATAYAMA088e7af2010-03-05 13:44:06 -08002147 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 }
2149
Al Viro506f21c2013-10-05 17:22:57 -04002150 if (!elf_core_write_extra_phdrs(cprm, offset))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002151 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152
2153 /* write out the notes section */
Al Viroecc8c772013-10-05 15:32:35 -04002154 if (!write_note_info(&info, cprm))
Roland McGrath3aba4812008-01-30 13:31:44 +01002155 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
Al Virocdc3d562013-10-05 22:24:29 -04002157 if (elf_coredump_extra_notes_write(cprm))
Michael Ellermane5501492007-09-19 14:38:12 +10002158 goto end_coredump;
Dwayne Grant McConnellbf1ab972006-11-23 00:46:37 +01002159
Andi Kleend025c9d2006-09-30 23:29:28 -07002160 /* Align to page */
Al Viro9b56d542013-10-08 09:26:08 -04002161 if (!dump_skip(cprm, dataoff - cprm->written))
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002162 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
Roland McGrathf47aef52007-01-26 00:56:49 -08002164 for (vma = first_vma(current, gate_vma); vma != NULL;
2165 vma = next_vma(vma, gate_vma)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 unsigned long addr;
Roland McGrath82df3972007-10-16 23:27:02 -07002167 unsigned long end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Masami Hiramatsu30736a42010-03-05 13:44:12 -08002169 end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Roland McGrath82df3972007-10-16 23:27:02 -07002171 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
Jesper Juhlf4e5cc22006-06-23 02:05:35 -07002172 struct page *page;
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002173 int stop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002175 page = get_dump_page(addr);
2176 if (page) {
2177 void *kaddr = kmap(page);
Al Viro13046ec2013-10-05 18:08:47 -04002178 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002179 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 page_cache_release(page);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002181 } else
Al Viro9b56d542013-10-08 09:26:08 -04002182 stop = !dump_skip(cprm, PAGE_SIZE);
Hugh Dickinsf3e8fcc2009-09-21 17:03:25 -07002183 if (stop)
2184 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 }
2186 }
2187
Al Viroaa3e7ea2013-10-05 17:50:15 -04002188 if (!elf_core_write_extra_data(cprm))
Daisuke HATAYAMA1fcccba2010-03-05 13:44:07 -08002189 goto end_coredump;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002191 if (e_phnum == PN_XNUM) {
Al Viro13046ec2013-10-05 18:08:47 -04002192 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002193 goto end_coredump;
2194 }
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196end_coredump:
2197 set_fs(fs);
2198
2199cleanup:
Roland McGrath3aba4812008-01-30 13:31:44 +01002200 free_note_info(&info);
Daisuke HATAYAMA8d9032b2010-03-05 13:44:10 -08002201 kfree(shdr4extnum);
Daisuke HATAYAMA93eb2112010-03-05 13:44:09 -08002202 kfree(phdr4note);
WANG Cong5f719552008-05-06 12:45:35 +08002203 kfree(elf);
2204out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 return has_dumped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206}
2207
Christoph Hellwig698ba7b2009-12-15 16:47:37 -08002208#endif /* CONFIG_ELF_CORE */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210static int __init init_elf_binfmt(void)
2211{
Al Viro8fc3dc52012-03-17 03:05:16 -04002212 register_binfmt(&elf_format);
2213 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214}
2215
2216static void __exit exit_elf_binfmt(void)
2217{
2218 /* Remove the COFF and ELF loaders. */
2219 unregister_binfmt(&elf_format);
2220}
2221
2222core_initcall(init_elf_binfmt);
2223module_exit(exit_elf_binfmt);
2224MODULE_LICENSE("GPL");