blob: 0e289949213348152460cd392214f2434cced2bb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/mm.h>
2#include <linux/hugetlb.h>
Dave Hansen22e057c2011-03-22 16:33:00 -07003#include <linux/huge_mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mount.h>
5#include <linux/seq_file.h>
Mauricio Line070ad42005-09-03 15:55:10 -07006#include <linux/highmem.h>
Kees Cook5096add2007-05-08 00:26:04 -07007#include <linux/ptrace.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09008#include <linux/slab.h>
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07009#include <linux/pagemap.h>
10#include <linux/mempolicy.h>
Dave Hansen22e057c2011-03-22 16:33:00 -070011#include <linux/rmap.h>
Matt Mackall85863e42008-02-04 22:29:04 -080012#include <linux/swap.h>
13#include <linux/swapops.h>
Mauricio Line070ad42005-09-03 15:55:10 -070014
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/elf.h>
16#include <asm/uaccess.h>
Mauricio Line070ad42005-09-03 15:55:10 -070017#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include "internal.h"
19
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080020void task_mem(struct seq_file *m, struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021{
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080022 unsigned long data, text, lib, swap;
Hugh Dickins365e9c872005-10-29 18:16:18 -070023 unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
24
25 /*
26 * Note: to minimize their overhead, mm maintains hiwater_vm and
27 * hiwater_rss only when about to *lower* total_vm or rss. Any
28 * collector of these hiwater stats must therefore get total_vm
29 * and rss too, which will usually be the higher. Barriers? not
30 * worth the effort, such snapshots can always be inconsistent.
31 */
32 hiwater_vm = total_vm = mm->total_vm;
33 if (hiwater_vm < mm->hiwater_vm)
34 hiwater_vm = mm->hiwater_vm;
35 hiwater_rss = total_rss = get_mm_rss(mm);
36 if (hiwater_rss < mm->hiwater_rss)
37 hiwater_rss = mm->hiwater_rss;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39 data = mm->total_vm - mm->shared_vm - mm->stack_vm;
40 text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
41 lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080042 swap = get_mm_counter(mm, MM_SWAPENTS);
Eric W. Biedermandf5f8312008-02-08 04:18:33 -080043 seq_printf(m,
Hugh Dickins365e9c872005-10-29 18:16:18 -070044 "VmPeak:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 "VmSize:\t%8lu kB\n"
46 "VmLck:\t%8lu kB\n"
Christoph Lameterbc3e53f2011-10-31 17:07:30 -070047 "VmPin:\t%8lu kB\n"
Hugh Dickins365e9c872005-10-29 18:16:18 -070048 "VmHWM:\t%8lu kB\n"
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 "VmRSS:\t%8lu kB\n"
50 "VmData:\t%8lu kB\n"
51 "VmStk:\t%8lu kB\n"
52 "VmExe:\t%8lu kB\n"
53 "VmLib:\t%8lu kB\n"
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080054 "VmPTE:\t%8lu kB\n"
55 "VmSwap:\t%8lu kB\n",
Hugh Dickins365e9c872005-10-29 18:16:18 -070056 hiwater_vm << (PAGE_SHIFT-10),
Konstantin Khlebnikov314e51b2012-10-08 16:29:02 -070057 total_vm << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 mm->locked_vm << (PAGE_SHIFT-10),
Christoph Lameterbc3e53f2011-10-31 17:07:30 -070059 mm->pinned_vm << (PAGE_SHIFT-10),
Hugh Dickins365e9c872005-10-29 18:16:18 -070060 hiwater_rss << (PAGE_SHIFT-10),
61 total_rss << (PAGE_SHIFT-10),
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 data << (PAGE_SHIFT-10),
63 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
KAMEZAWA Hiroyukib084d432010-03-05 13:41:42 -080064 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
65 swap << (PAGE_SHIFT-10));
Linus Torvalds1da177e2005-04-16 15:20:36 -070066}
67
68unsigned long task_vsize(struct mm_struct *mm)
69{
70 return PAGE_SIZE * mm->total_vm;
71}
72
Alexey Dobriyana2ade7b2011-01-12 17:00:32 -080073unsigned long task_statm(struct mm_struct *mm,
74 unsigned long *shared, unsigned long *text,
75 unsigned long *data, unsigned long *resident)
Linus Torvalds1da177e2005-04-16 15:20:36 -070076{
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080077 *shared = get_mm_counter(mm, MM_FILEPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 *text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
79 >> PAGE_SHIFT;
80 *data = mm->total_vm - mm->shared_vm;
KAMEZAWA Hiroyukid559db02010-03-05 13:41:39 -080081 *resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 return mm->total_vm;
83}
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085static void pad_len_spaces(struct seq_file *m, int len)
86{
87 len = 25 + sizeof(void*) * 6 - len;
88 if (len < 1)
89 len = 1;
90 seq_printf(m, "%*c", len, ' ');
91}
92
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +090093#ifdef CONFIG_NUMA
94/*
95 * These functions are for numa_maps but called in generic **maps seq_file
96 * ->start(), ->stop() ops.
97 *
98 * numa_maps scans all vmas under mmap_sem and checks their mempolicy.
99 * Each mempolicy object is controlled by reference counting. The problem here
100 * is how to avoid accessing dead mempolicy object.
101 *
102 * Because we're holding mmap_sem while reading seq_file, it's safe to access
103 * each vma's mempolicy, no vma objects will never drop refs to mempolicy.
104 *
105 * A task's mempolicy (task->mempolicy) has different behavior. task->mempolicy
106 * is set and replaced under mmap_sem but unrefed and cleared under task_lock().
107 * So, without task_lock(), we cannot trust get_vma_policy() because we cannot
108 * gurantee the task never exits under us. But taking task_lock() around
109 * get_vma_plicy() causes lock order problem.
110 *
111 * To access task->mempolicy without lock, we hold a reference count of an
112 * object pointed by task->mempolicy and remember it. This will guarantee
113 * that task->mempolicy points to an alive object or NULL in numa_maps accesses.
114 */
115static void hold_task_mempolicy(struct proc_maps_private *priv)
116{
117 struct task_struct *task = priv->task;
118
119 task_lock(task);
120 priv->task_mempolicy = task->mempolicy;
121 mpol_get(priv->task_mempolicy);
122 task_unlock(task);
123}
124static void release_task_mempolicy(struct proc_maps_private *priv)
125{
126 mpol_put(priv->task_mempolicy);
127}
128#else
129static void hold_task_mempolicy(struct proc_maps_private *priv)
130{
131}
132static void release_task_mempolicy(struct proc_maps_private *priv)
133{
134}
135#endif
136
Colin Cross6ebfe582013-06-26 17:26:01 -0700137static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
138{
139 const char __user *name = vma_get_anon_name(vma);
140 struct mm_struct *mm = vma->vm_mm;
141
142 unsigned long page_start_vaddr;
143 unsigned long page_offset;
144 unsigned long num_pages;
145 unsigned long max_len = NAME_MAX;
146 int i;
147
148 page_start_vaddr = (unsigned long)name & PAGE_MASK;
149 page_offset = (unsigned long)name - page_start_vaddr;
150 num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
151
152 seq_puts(m, "[anon:");
153
154 for (i = 0; i < num_pages; i++) {
155 int len;
156 int write_len;
157 const char *kaddr;
158 long pages_pinned;
159 struct page *page;
160
161 pages_pinned = get_user_pages(current, mm, page_start_vaddr,
162 1, 0, 0, &page, NULL);
163 if (pages_pinned < 1) {
164 seq_puts(m, "<fault>]");
165 return;
166 }
167
168 kaddr = (const char *)kmap(page);
169 len = min(max_len, PAGE_SIZE - page_offset);
170 write_len = strnlen(kaddr + page_offset, len);
171 seq_write(m, kaddr + page_offset, write_len);
172 kunmap(page);
173 put_page(page);
174
175 /* if strnlen hit a null terminator then we're done */
176 if (write_len != len)
177 break;
178
179 max_len -= len;
180 page_offset = 0;
181 page_start_vaddr += PAGE_SIZE;
182 }
183
184 seq_putc(m, ']');
185}
186
Matt Mackalla6198792008-02-04 22:29:03 -0800187static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
Mauricio Line070ad42005-09-03 15:55:10 -0700188{
Matt Mackalla6198792008-02-04 22:29:03 -0800189 if (vma && vma != priv->tail_vma) {
190 struct mm_struct *mm = vma->vm_mm;
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900191 release_task_mempolicy(priv);
Matt Mackalla6198792008-02-04 22:29:03 -0800192 up_read(&mm->mmap_sem);
193 mmput(mm);
194 }
195}
196
197static void *m_start(struct seq_file *m, loff_t *pos)
198{
199 struct proc_maps_private *priv = m->private;
200 unsigned long last_addr = m->version;
201 struct mm_struct *mm;
202 struct vm_area_struct *vma, *tail_vma = NULL;
203 loff_t l = *pos;
204
205 /* Clear the per syscall fields in priv */
206 priv->task = NULL;
207 priv->tail_vma = NULL;
208
209 /*
210 * We remember last_addr rather than next_addr to hit with
211 * mmap_cache most of the time. We have zero last_addr at
212 * the beginning and also after lseek. We will have -1 last_addr
213 * after the end of the vmas.
214 */
215
216 if (last_addr == -1UL)
217 return NULL;
218
219 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
220 if (!priv->task)
Al Viroec6fd8a2011-02-15 22:22:54 -0500221 return ERR_PTR(-ESRCH);
Matt Mackalla6198792008-02-04 22:29:03 -0800222
Cong Wange7dcd992012-05-31 16:26:17 -0700223 mm = mm_access(priv->task, PTRACE_MODE_READ);
Al Viroec6fd8a2011-02-15 22:22:54 -0500224 if (!mm || IS_ERR(mm))
225 return mm;
Oleg Nesterov00f89d22009-07-10 03:27:38 +0200226 down_read(&mm->mmap_sem);
Matt Mackalla6198792008-02-04 22:29:03 -0800227
Stephen Wilson31db58b2011-03-13 15:49:15 -0400228 tail_vma = get_gate_vma(priv->task->mm);
Matt Mackalla6198792008-02-04 22:29:03 -0800229 priv->tail_vma = tail_vma;
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900230 hold_task_mempolicy(priv);
Matt Mackalla6198792008-02-04 22:29:03 -0800231 /* Start with last addr hint */
232 vma = find_vma(mm, last_addr);
233 if (last_addr && vma) {
234 vma = vma->vm_next;
235 goto out;
236 }
237
238 /*
239 * Check the vma index is within the range and do
240 * sequential scan until m_index.
241 */
242 vma = NULL;
243 if ((unsigned long)l < mm->map_count) {
244 vma = mm->mmap;
245 while (l-- && vma)
246 vma = vma->vm_next;
247 goto out;
248 }
249
250 if (l != mm->map_count)
251 tail_vma = NULL; /* After gate vma */
252
253out:
254 if (vma)
255 return vma;
256
KAMEZAWA Hiroyuki9e781442012-10-19 17:00:55 +0900257 release_task_mempolicy(priv);
Matt Mackalla6198792008-02-04 22:29:03 -0800258 /* End of vmas has been reached */
259 m->version = (tail_vma != NULL)? 0: -1UL;
260 up_read(&mm->mmap_sem);
261 mmput(mm);
262 return tail_vma;
263}
264
265static void *m_next(struct seq_file *m, void *v, loff_t *pos)
266{
267 struct proc_maps_private *priv = m->private;
268 struct vm_area_struct *vma = v;
269 struct vm_area_struct *tail_vma = priv->tail_vma;
270
271 (*pos)++;
272 if (vma && (vma != tail_vma) && vma->vm_next)
273 return vma->vm_next;
274 vma_stop(priv, vma);
275 return (vma != tail_vma)? tail_vma: NULL;
276}
277
278static void m_stop(struct seq_file *m, void *v)
279{
280 struct proc_maps_private *priv = m->private;
281 struct vm_area_struct *vma = v;
282
Linus Torvalds76597cd2011-03-27 19:09:29 -0700283 if (!IS_ERR(vma))
284 vma_stop(priv, vma);
Matt Mackalla6198792008-02-04 22:29:03 -0800285 if (priv->task)
286 put_task_struct(priv->task);
287}
288
289static int do_maps_open(struct inode *inode, struct file *file,
Jan Engelhardt03a44822008-02-08 04:21:19 -0800290 const struct seq_operations *ops)
Matt Mackalla6198792008-02-04 22:29:03 -0800291{
292 struct proc_maps_private *priv;
293 int ret = -ENOMEM;
294 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
295 if (priv) {
296 priv->pid = proc_pid(inode);
297 ret = seq_open(file, ops);
298 if (!ret) {
299 struct seq_file *m = file->private_data;
300 m->private = priv;
301 } else {
302 kfree(priv);
303 }
304 }
305 return ret;
306}
Mauricio Line070ad42005-09-03 15:55:10 -0700307
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700308static void
309show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
Mauricio Line070ad42005-09-03 15:55:10 -0700311 struct mm_struct *mm = vma->vm_mm;
312 struct file *file = vma->vm_file;
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700313 struct proc_maps_private *priv = m->private;
314 struct task_struct *task = priv->task;
KOSAKI Motohiroca16d142011-05-26 19:16:19 +0900315 vm_flags_t flags = vma->vm_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 unsigned long ino = 0;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700317 unsigned long long pgoff = 0;
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200318 unsigned long start, end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 dev_t dev = 0;
320 int len;
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700321 const char *name = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322
323 if (file) {
Al Viro496ad9a2013-01-23 17:07:38 -0500324 struct inode *inode = file_inode(vma->vm_file);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 dev = inode->i_sb->s_dev;
326 ino = inode->i_ino;
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700327 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 }
329
Linus Torvaldsd7824372010-08-15 11:35:52 -0700330 /* We don't show the stack guard page in /proc/maps */
331 start = vma->vm_start;
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200332 if (stack_guard_page_start(vma, start))
333 start += PAGE_SIZE;
334 end = vma->vm_end;
335 if (stack_guard_page_end(vma, end))
336 end -= PAGE_SIZE;
Linus Torvaldsd7824372010-08-15 11:35:52 -0700337
Clement Calmels1804dc62008-08-20 14:09:00 -0700338 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
Linus Torvaldsd7824372010-08-15 11:35:52 -0700339 start,
Mikulas Patockaa09a79f2011-05-09 13:01:09 +0200340 end,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 flags & VM_READ ? 'r' : '-',
342 flags & VM_WRITE ? 'w' : '-',
343 flags & VM_EXEC ? 'x' : '-',
344 flags & VM_MAYSHARE ? 's' : 'p',
KAMEZAWA Hiroyuki6260a4b2009-04-06 19:00:30 -0700345 pgoff,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 MAJOR(dev), MINOR(dev), ino, &len);
347
348 /*
349 * Print the dentry name for named mappings, and a
350 * special [heap] marker for the heap:
351 */
Mauricio Line070ad42005-09-03 15:55:10 -0700352 if (file) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 pad_len_spaces(m, len);
Jan Blunckc32c2f62008-02-14 19:38:43 -0800354 seq_path(m, &file->f_path, "\n");
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700355 goto done;
356 }
357
358 name = arch_vma_name(vma);
359 if (!name) {
360 pid_t tid;
361
362 if (!mm) {
363 name = "[vdso]";
364 goto done;
365 }
366
367 if (vma->vm_start <= mm->brk &&
368 vma->vm_end >= mm->start_brk) {
369 name = "[heap]";
370 goto done;
371 }
372
373 tid = vm_is_stack(task, vma, is_pid);
374
375 if (tid != 0) {
376 /*
377 * Thread stack in /proc/PID/task/TID/maps or
378 * the main process stack.
379 */
380 if (!is_pid || (vma->vm_start <= mm->start_stack &&
381 vma->vm_end >= mm->start_stack)) {
382 name = "[stack]";
Ingo Molnare6e54942006-06-27 02:53:50 -0700383 } else {
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700384 /* Thread stack in /proc/PID/maps */
385 pad_len_spaces(m, len);
386 seq_printf(m, "[stack:%d]", tid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 }
Colin Cross6ebfe582013-06-26 17:26:01 -0700388 goto done;
389 }
390
391 if (vma_get_anon_name(vma)) {
392 pad_len_spaces(m, len);
393 seq_print_vma_name(m, vma);
Ingo Molnare6e54942006-06-27 02:53:50 -0700394 }
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700395 }
396
397done:
398 if (name) {
399 pad_len_spaces(m, len);
400 seq_puts(m, name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 }
402 seq_putc(m, '\n');
Joe Korty7c88db02008-10-16 15:27:09 +0400403}
404
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700405static int show_map(struct seq_file *m, void *v, int is_pid)
Joe Korty7c88db02008-10-16 15:27:09 +0400406{
407 struct vm_area_struct *vma = v;
408 struct proc_maps_private *priv = m->private;
409 struct task_struct *task = priv->task;
410
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700411 show_map_vma(m, vma, is_pid);
Mauricio Line070ad42005-09-03 15:55:10 -0700412
Mauricio Line070ad42005-09-03 15:55:10 -0700413 if (m->count < m->size) /* vma is copied successfully */
Stephen Wilson31db58b2011-03-13 15:49:15 -0400414 m->version = (vma != get_gate_vma(task->mm))
415 ? vma->vm_start : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 return 0;
417}
418
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700419static int show_pid_map(struct seq_file *m, void *v)
420{
421 return show_map(m, v, 1);
422}
423
424static int show_tid_map(struct seq_file *m, void *v)
425{
426 return show_map(m, v, 0);
427}
428
Jan Engelhardt03a44822008-02-08 04:21:19 -0800429static const struct seq_operations proc_pid_maps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800430 .start = m_start,
431 .next = m_next,
432 .stop = m_stop,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700433 .show = show_pid_map
Matt Mackalla6198792008-02-04 22:29:03 -0800434};
435
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700436static const struct seq_operations proc_tid_maps_op = {
437 .start = m_start,
438 .next = m_next,
439 .stop = m_stop,
440 .show = show_tid_map
441};
442
443static int pid_maps_open(struct inode *inode, struct file *file)
Matt Mackalla6198792008-02-04 22:29:03 -0800444{
445 return do_maps_open(inode, file, &proc_pid_maps_op);
446}
447
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700448static int tid_maps_open(struct inode *inode, struct file *file)
449{
450 return do_maps_open(inode, file, &proc_tid_maps_op);
451}
452
453const struct file_operations proc_pid_maps_operations = {
454 .open = pid_maps_open,
455 .read = seq_read,
456 .llseek = seq_lseek,
457 .release = seq_release_private,
458};
459
460const struct file_operations proc_tid_maps_operations = {
461 .open = tid_maps_open,
Matt Mackalla6198792008-02-04 22:29:03 -0800462 .read = seq_read,
463 .llseek = seq_lseek,
464 .release = seq_release_private,
465};
466
467/*
468 * Proportional Set Size(PSS): my share of RSS.
469 *
470 * PSS of a process is the count of pages it has in memory, where each
471 * page is divided by the number of processes sharing it. So if a
472 * process has 1000 pages all to itself, and 1000 shared with one other
473 * process, its PSS will be 1500.
474 *
475 * To keep (accumulated) division errors low, we adopt a 64bit
476 * fixed-point pss counter to minimize division errors. So (pss >>
477 * PSS_SHIFT) would be the real byte count.
478 *
479 * A shift of 12 before division means (assuming 4K page size):
480 * - 1M 3-user-pages add up to 8KB errors;
481 * - supports mapcount up to 2^24, or 16M;
482 * - supports PSS up to 2^52 bytes, or 4PB.
483 */
484#define PSS_SHIFT 12
485
Matt Mackall1e883282008-02-04 22:29:07 -0800486#ifdef CONFIG_PROC_PAGE_MONITOR
Peter Zijlstra214e4712008-04-28 02:12:55 -0700487struct mem_size_stats {
Matt Mackalla6198792008-02-04 22:29:03 -0800488 struct vm_area_struct *vma;
489 unsigned long resident;
490 unsigned long shared_clean;
491 unsigned long shared_dirty;
492 unsigned long private_clean;
493 unsigned long private_dirty;
494 unsigned long referenced;
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700495 unsigned long anonymous;
Dave Hansen4031a212011-03-22 16:33:01 -0700496 unsigned long anonymous_thp;
Peter Zijlstra214e4712008-04-28 02:12:55 -0700497 unsigned long swap;
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700498 unsigned long nonlinear;
Matt Mackalla6198792008-02-04 22:29:03 -0800499 u64 pss;
500};
501
Dave Hansenae11c4d2011-03-22 16:32:58 -0700502
503static void smaps_pte_entry(pte_t ptent, unsigned long addr,
Dave Hansen3c9acc72011-03-22 16:32:59 -0700504 unsigned long ptent_size, struct mm_walk *walk)
Dave Hansenae11c4d2011-03-22 16:32:58 -0700505{
506 struct mem_size_stats *mss = walk->private;
507 struct vm_area_struct *vma = mss->vma;
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700508 pgoff_t pgoff = linear_page_index(vma, addr);
Konstantin Khlebnikovb1d4d9e2012-05-31 16:26:20 -0700509 struct page *page = NULL;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700510 int mapcount;
511
Konstantin Khlebnikovb1d4d9e2012-05-31 16:26:20 -0700512 if (pte_present(ptent)) {
513 page = vm_normal_page(vma, addr, ptent);
514 } else if (is_swap_pte(ptent)) {
515 swp_entry_t swpent = pte_to_swp_entry(ptent);
516
517 if (!non_swap_entry(swpent))
518 mss->swap += ptent_size;
519 else if (is_migration_entry(swpent))
520 page = migration_entry_to_page(swpent);
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700521 } else if (pte_file(ptent)) {
522 if (pte_to_pgoff(ptent) != pgoff)
523 mss->nonlinear += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700524 }
525
Dave Hansenae11c4d2011-03-22 16:32:58 -0700526 if (!page)
527 return;
528
529 if (PageAnon(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700530 mss->anonymous += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700531
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700532 if (page->index != pgoff)
533 mss->nonlinear += ptent_size;
534
Dave Hansen3c9acc72011-03-22 16:32:59 -0700535 mss->resident += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700536 /* Accumulate the size in pages that have been accessed. */
537 if (pte_young(ptent) || PageReferenced(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700538 mss->referenced += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700539 mapcount = page_mapcount(page);
540 if (mapcount >= 2) {
541 if (pte_dirty(ptent) || PageDirty(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700542 mss->shared_dirty += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700543 else
Dave Hansen3c9acc72011-03-22 16:32:59 -0700544 mss->shared_clean += ptent_size;
545 mss->pss += (ptent_size << PSS_SHIFT) / mapcount;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700546 } else {
547 if (pte_dirty(ptent) || PageDirty(page))
Dave Hansen3c9acc72011-03-22 16:32:59 -0700548 mss->private_dirty += ptent_size;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700549 else
Dave Hansen3c9acc72011-03-22 16:32:59 -0700550 mss->private_clean += ptent_size;
551 mss->pss += (ptent_size << PSS_SHIFT);
Dave Hansenae11c4d2011-03-22 16:32:58 -0700552 }
553}
554
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800555static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700556 struct mm_walk *walk)
Mauricio Line070ad42005-09-03 15:55:10 -0700557{
Dave Hansen21650092008-06-12 15:21:47 -0700558 struct mem_size_stats *mss = walk->private;
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800559 struct vm_area_struct *vma = mss->vma;
Dave Hansenae11c4d2011-03-22 16:32:58 -0700560 pte_t *pte;
Hugh Dickins705e87c2005-10-29 18:16:27 -0700561 spinlock_t *ptl;
Mauricio Line070ad42005-09-03 15:55:10 -0700562
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700563 if (pmd_trans_huge_lock(pmd, vma) == 1) {
564 smaps_pte_entry(*(pte_t *)pmd, addr, HPAGE_PMD_SIZE, walk);
Dave Hansen22e057c2011-03-22 16:33:00 -0700565 spin_unlock(&walk->mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700566 mss->anonymous_thp += HPAGE_PMD_SIZE;
567 return 0;
Dave Hansen22e057c2011-03-22 16:33:00 -0700568 }
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700569
570 if (pmd_trans_unstable(pmd))
571 return 0;
Dave Hansen22e057c2011-03-22 16:33:00 -0700572 /*
573 * The mmap_sem held all the way back in m_start() is what
574 * keeps khugepaged out of here and from collapsing things
575 * in here.
576 */
Hugh Dickins705e87c2005-10-29 18:16:27 -0700577 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
Dave Hansenae11c4d2011-03-22 16:32:58 -0700578 for (; addr != end; pte++, addr += PAGE_SIZE)
Dave Hansen3c9acc72011-03-22 16:32:59 -0700579 smaps_pte_entry(*pte, addr, PAGE_SIZE, walk);
Hugh Dickins705e87c2005-10-29 18:16:27 -0700580 pte_unmap_unlock(pte - 1, ptl);
581 cond_resched();
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800582 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700583}
584
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800585static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
586{
587 /*
588 * Don't forget to update Documentation/ on changes.
589 */
590 static const char mnemonics[BITS_PER_LONG][2] = {
591 /*
592 * In case if we meet a flag we don't know about.
593 */
594 [0 ... (BITS_PER_LONG-1)] = "??",
595
596 [ilog2(VM_READ)] = "rd",
597 [ilog2(VM_WRITE)] = "wr",
598 [ilog2(VM_EXEC)] = "ex",
599 [ilog2(VM_SHARED)] = "sh",
600 [ilog2(VM_MAYREAD)] = "mr",
601 [ilog2(VM_MAYWRITE)] = "mw",
602 [ilog2(VM_MAYEXEC)] = "me",
603 [ilog2(VM_MAYSHARE)] = "ms",
604 [ilog2(VM_GROWSDOWN)] = "gd",
605 [ilog2(VM_PFNMAP)] = "pf",
606 [ilog2(VM_DENYWRITE)] = "dw",
607 [ilog2(VM_LOCKED)] = "lo",
608 [ilog2(VM_IO)] = "io",
609 [ilog2(VM_SEQ_READ)] = "sr",
610 [ilog2(VM_RAND_READ)] = "rr",
611 [ilog2(VM_DONTCOPY)] = "dc",
612 [ilog2(VM_DONTEXPAND)] = "de",
613 [ilog2(VM_ACCOUNT)] = "ac",
614 [ilog2(VM_NORESERVE)] = "nr",
615 [ilog2(VM_HUGETLB)] = "ht",
616 [ilog2(VM_NONLINEAR)] = "nl",
617 [ilog2(VM_ARCH_1)] = "ar",
618 [ilog2(VM_DONTDUMP)] = "dd",
619 [ilog2(VM_MIXEDMAP)] = "mm",
620 [ilog2(VM_HUGEPAGE)] = "hg",
621 [ilog2(VM_NOHUGEPAGE)] = "nh",
622 [ilog2(VM_MERGEABLE)] = "mg",
623 };
624 size_t i;
625
626 seq_puts(m, "VmFlags: ");
627 for (i = 0; i < BITS_PER_LONG; i++) {
628 if (vma->vm_flags & (1UL << i)) {
629 seq_printf(m, "%c%c ",
630 mnemonics[i][0], mnemonics[i][1]);
631 }
632 }
633 seq_putc(m, '\n');
634}
635
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700636static int show_smap(struct seq_file *m, void *v, int is_pid)
Mauricio Line070ad42005-09-03 15:55:10 -0700637{
Joe Korty7c88db02008-10-16 15:27:09 +0400638 struct proc_maps_private *priv = m->private;
639 struct task_struct *task = priv->task;
Mauricio Line070ad42005-09-03 15:55:10 -0700640 struct vm_area_struct *vma = v;
Mauricio Line070ad42005-09-03 15:55:10 -0700641 struct mem_size_stats mss;
Dave Hansen21650092008-06-12 15:21:47 -0700642 struct mm_walk smaps_walk = {
643 .pmd_entry = smaps_pte_range,
644 .mm = vma->vm_mm,
645 .private = &mss,
646 };
Mauricio Line070ad42005-09-03 15:55:10 -0700647
648 memset(&mss, 0, sizeof mss);
Matt Mackallb3ae5ac2008-02-04 22:29:01 -0800649 mss.vma = vma;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900650 /* mmap_sem is held in m_start */
Nick Piggin5ddfae12006-03-06 15:42:57 -0800651 if (vma->vm_mm && !is_vm_hugetlb_page(vma))
Dave Hansen21650092008-06-12 15:21:47 -0700652 walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
Matt Mackall4752c362008-02-04 22:29:02 -0800653
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700654 show_map_vma(m, vma, is_pid);
Matt Mackall4752c362008-02-04 22:29:02 -0800655
656 seq_printf(m,
657 "Size: %8lu kB\n"
658 "Rss: %8lu kB\n"
659 "Pss: %8lu kB\n"
660 "Shared_Clean: %8lu kB\n"
661 "Shared_Dirty: %8lu kB\n"
662 "Private_Clean: %8lu kB\n"
663 "Private_Dirty: %8lu kB\n"
Peter Zijlstra214e4712008-04-28 02:12:55 -0700664 "Referenced: %8lu kB\n"
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700665 "Anonymous: %8lu kB\n"
Dave Hansen4031a212011-03-22 16:33:01 -0700666 "AnonHugePages: %8lu kB\n"
Mel Gorman08fba692009-01-06 14:38:53 -0800667 "Swap: %8lu kB\n"
Mel Gorman33402892009-01-06 14:38:54 -0800668 "KernelPageSize: %8lu kB\n"
Nikanth Karthikesan2d905082011-01-13 15:45:53 -0800669 "MMUPageSize: %8lu kB\n"
670 "Locked: %8lu kB\n",
Matt Mackall4752c362008-02-04 22:29:02 -0800671 (vma->vm_end - vma->vm_start) >> 10,
672 mss.resident >> 10,
673 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
674 mss.shared_clean >> 10,
675 mss.shared_dirty >> 10,
676 mss.private_clean >> 10,
677 mss.private_dirty >> 10,
Peter Zijlstra214e4712008-04-28 02:12:55 -0700678 mss.referenced >> 10,
Nikanth Karthikesanb40d4f82010-10-27 15:34:10 -0700679 mss.anonymous >> 10,
Dave Hansen4031a212011-03-22 16:33:01 -0700680 mss.anonymous_thp >> 10,
Mel Gorman08fba692009-01-06 14:38:53 -0800681 mss.swap >> 10,
Mel Gorman33402892009-01-06 14:38:54 -0800682 vma_kernel_pagesize(vma) >> 10,
Nikanth Karthikesan2d905082011-01-13 15:45:53 -0800683 vma_mmu_pagesize(vma) >> 10,
684 (vma->vm_flags & VM_LOCKED) ?
685 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
Matt Mackall4752c362008-02-04 22:29:02 -0800686
Konstantin Khlebnikovbca15542012-05-31 16:26:20 -0700687 if (vma->vm_flags & VM_NONLINEAR)
688 seq_printf(m, "Nonlinear: %8lu kB\n",
689 mss.nonlinear >> 10);
690
Cyrill Gorcunov834f82e2012-12-17 16:03:13 -0800691 show_smap_vma_flags(m, vma);
692
Colin Cross6ebfe582013-06-26 17:26:01 -0700693 if (vma_get_anon_name(vma)) {
694 seq_puts(m, "Name: ");
695 seq_print_vma_name(m, vma);
696 seq_putc(m, '\n');
697 }
698
Joe Korty7c88db02008-10-16 15:27:09 +0400699 if (m->count < m->size) /* vma is copied successfully */
Stephen Wilson31db58b2011-03-13 15:49:15 -0400700 m->version = (vma != get_gate_vma(task->mm))
701 ? vma->vm_start : 0;
Joe Korty7c88db02008-10-16 15:27:09 +0400702 return 0;
Mauricio Line070ad42005-09-03 15:55:10 -0700703}
704
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700705static int show_pid_smap(struct seq_file *m, void *v)
706{
707 return show_smap(m, v, 1);
708}
709
710static int show_tid_smap(struct seq_file *m, void *v)
711{
712 return show_smap(m, v, 0);
713}
714
Jan Engelhardt03a44822008-02-08 04:21:19 -0800715static const struct seq_operations proc_pid_smaps_op = {
Matt Mackalla6198792008-02-04 22:29:03 -0800716 .start = m_start,
717 .next = m_next,
718 .stop = m_stop,
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700719 .show = show_pid_smap
Matt Mackalla6198792008-02-04 22:29:03 -0800720};
721
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700722static const struct seq_operations proc_tid_smaps_op = {
723 .start = m_start,
724 .next = m_next,
725 .stop = m_stop,
726 .show = show_tid_smap
727};
728
729static int pid_smaps_open(struct inode *inode, struct file *file)
Matt Mackalla6198792008-02-04 22:29:03 -0800730{
731 return do_maps_open(inode, file, &proc_pid_smaps_op);
732}
733
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -0700734static int tid_smaps_open(struct inode *inode, struct file *file)
735{
736 return do_maps_open(inode, file, &proc_tid_smaps_op);
737}
738
739const struct file_operations proc_pid_smaps_operations = {
740 .open = pid_smaps_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = seq_release_private,
744};
745
746const struct file_operations proc_tid_smaps_operations = {
747 .open = tid_smaps_open,
Matt Mackalla6198792008-02-04 22:29:03 -0800748 .read = seq_read,
749 .llseek = seq_lseek,
750 .release = seq_release_private,
751};
752
753static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
Dave Hansen21650092008-06-12 15:21:47 -0700754 unsigned long end, struct mm_walk *walk)
Matt Mackalla6198792008-02-04 22:29:03 -0800755{
Dave Hansen21650092008-06-12 15:21:47 -0700756 struct vm_area_struct *vma = walk->private;
Matt Mackalla6198792008-02-04 22:29:03 -0800757 pte_t *pte, ptent;
758 spinlock_t *ptl;
759 struct page *page;
760
Kirill A. Shutemove1803772012-12-12 13:50:59 -0800761 split_huge_page_pmd(vma, addr, pmd);
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -0700762 if (pmd_trans_unstable(pmd))
763 return 0;
Dave Hansen03319322011-03-22 16:32:56 -0700764
Matt Mackalla6198792008-02-04 22:29:03 -0800765 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
766 for (; addr != end; pte++, addr += PAGE_SIZE) {
767 ptent = *pte;
768 if (!pte_present(ptent))
769 continue;
770
771 page = vm_normal_page(vma, addr, ptent);
772 if (!page)
773 continue;
774
775 /* Clear accessed and referenced bits. */
776 ptep_test_and_clear_young(vma, addr, pte);
777 ClearPageReferenced(page);
778 }
779 pte_unmap_unlock(pte - 1, ptl);
780 cond_resched();
781 return 0;
782}
783
Moussa A. Ba398499d2009-09-21 17:02:29 -0700784#define CLEAR_REFS_ALL 1
785#define CLEAR_REFS_ANON 2
786#define CLEAR_REFS_MAPPED 3
Petr Cermak740006e2015-02-18 10:39:10 +0000787#define CLEAR_REFS_MM_HIWATER_RSS 5
Moussa A. Ba398499d2009-09-21 17:02:29 -0700788
Matt Mackallf248dcb2008-02-04 22:29:03 -0800789static ssize_t clear_refs_write(struct file *file, const char __user *buf,
790 size_t count, loff_t *ppos)
David Rientjesb813e932007-05-06 14:49:24 -0700791{
Matt Mackallf248dcb2008-02-04 22:29:03 -0800792 struct task_struct *task;
Vincent Lifb92a4b2009-09-22 16:45:36 -0700793 char buffer[PROC_NUMBUF];
Matt Mackallf248dcb2008-02-04 22:29:03 -0800794 struct mm_struct *mm;
David Rientjesb813e932007-05-06 14:49:24 -0700795 struct vm_area_struct *vma;
Alexey Dobriyan0a8cb8e2011-05-26 16:25:50 -0700796 int type;
797 int rv;
David Rientjesb813e932007-05-06 14:49:24 -0700798
Matt Mackallf248dcb2008-02-04 22:29:03 -0800799 memset(buffer, 0, sizeof(buffer));
800 if (count > sizeof(buffer) - 1)
801 count = sizeof(buffer) - 1;
802 if (copy_from_user(buffer, buf, count))
803 return -EFAULT;
Alexey Dobriyan0a8cb8e2011-05-26 16:25:50 -0700804 rv = kstrtoint(strstrip(buffer), 10, &type);
805 if (rv < 0)
806 return rv;
Petr Cermak740006e2015-02-18 10:39:10 +0000807 if ((type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) &&
808 type != CLEAR_REFS_MM_HIWATER_RSS)
Matt Mackallf248dcb2008-02-04 22:29:03 -0800809 return -EINVAL;
Al Viro496ad9a2013-01-23 17:07:38 -0500810 task = get_proc_task(file_inode(file));
Matt Mackallf248dcb2008-02-04 22:29:03 -0800811 if (!task)
812 return -ESRCH;
813 mm = get_task_mm(task);
814 if (mm) {
Andrew Morton20cbc972008-07-05 12:29:05 -0700815 struct mm_walk clear_refs_walk = {
816 .pmd_entry = clear_refs_pte_range,
817 .mm = mm,
818 };
Petr Cermak740006e2015-02-18 10:39:10 +0000819
820 if (type == CLEAR_REFS_MM_HIWATER_RSS) {
821 /*
822 * Writing 5 to /proc/pid/clear_refs resets the peak
823 * resident set size to this mm's current rss value.
824 */
825 down_write(&mm->mmap_sem);
826 reset_mm_hiwater_rss(mm);
827 up_write(&mm->mmap_sem);
828 goto out_mm;
829 }
830
Matt Mackallf248dcb2008-02-04 22:29:03 -0800831 down_read(&mm->mmap_sem);
Dave Hansen21650092008-06-12 15:21:47 -0700832 for (vma = mm->mmap; vma; vma = vma->vm_next) {
833 clear_refs_walk.private = vma;
Moussa A. Ba398499d2009-09-21 17:02:29 -0700834 if (is_vm_hugetlb_page(vma))
835 continue;
836 /*
837 * Writing 1 to /proc/pid/clear_refs affects all pages.
838 *
839 * Writing 2 to /proc/pid/clear_refs only affects
840 * Anonymous pages.
841 *
842 * Writing 3 to /proc/pid/clear_refs only affects file
843 * mapped pages.
844 */
845 if (type == CLEAR_REFS_ANON && vma->vm_file)
846 continue;
847 if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
848 continue;
849 walk_page_range(vma->vm_start, vma->vm_end,
850 &clear_refs_walk);
Dave Hansen21650092008-06-12 15:21:47 -0700851 }
Matt Mackallf248dcb2008-02-04 22:29:03 -0800852 flush_tlb_mm(mm);
853 up_read(&mm->mmap_sem);
Petr Cermak740006e2015-02-18 10:39:10 +0000854out_mm:
Matt Mackallf248dcb2008-02-04 22:29:03 -0800855 mmput(mm);
856 }
857 put_task_struct(task);
Vincent Lifb92a4b2009-09-22 16:45:36 -0700858
859 return count;
David Rientjesb813e932007-05-06 14:49:24 -0700860}
861
Matt Mackallf248dcb2008-02-04 22:29:03 -0800862const struct file_operations proc_clear_refs_operations = {
863 .write = clear_refs_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200864 .llseek = noop_llseek,
Matt Mackallf248dcb2008-02-04 22:29:03 -0800865};
866
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700867typedef struct {
868 u64 pme;
869} pagemap_entry_t;
870
Matt Mackall85863e42008-02-04 22:29:04 -0800871struct pagemapread {
yonghua zhengf30d87b2013-08-13 16:01:03 -0700872 int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700873 pagemap_entry_t *buffer;
Matt Mackall85863e42008-02-04 22:29:04 -0800874};
875
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700876#define PAGEMAP_WALK_SIZE (PMD_SIZE)
877#define PAGEMAP_WALK_MASK (PMD_MASK)
878
yonghua zhengf30d87b2013-08-13 16:01:03 -0700879#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500880#define PM_STATUS_BITS 3
881#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
882#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
883#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
884#define PM_PSHIFT_BITS 6
885#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
886#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
887#define PM_PSHIFT(x) (((u64) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
888#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
889#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
890
891#define PM_PRESENT PM_STATUS(4LL)
892#define PM_SWAP PM_STATUS(2LL)
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700893#define PM_FILE PM_STATUS(1LL)
Hans Rosenfeldf16278c2008-03-21 18:46:59 -0500894#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
Matt Mackall85863e42008-02-04 22:29:04 -0800895#define PM_END_OF_BUFFER 1
896
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700897static inline pagemap_entry_t make_pme(u64 val)
898{
899 return (pagemap_entry_t) { .pme = val };
900}
901
902static int add_to_pagemap(unsigned long addr, pagemap_entry_t *pme,
Matt Mackall85863e42008-02-04 22:29:04 -0800903 struct pagemapread *pm)
904{
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700905 pm->buffer[pm->pos++] = *pme;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +0900906 if (pm->pos >= pm->len)
Thomas Tuttleaae86792008-06-05 22:46:31 -0700907 return PM_END_OF_BUFFER;
Matt Mackall85863e42008-02-04 22:29:04 -0800908 return 0;
909}
910
911static int pagemap_pte_hole(unsigned long start, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700912 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -0800913{
Dave Hansen21650092008-06-12 15:21:47 -0700914 struct pagemapread *pm = walk->private;
Matt Mackall85863e42008-02-04 22:29:04 -0800915 unsigned long addr;
916 int err = 0;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700917 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
918
Matt Mackall85863e42008-02-04 22:29:04 -0800919 for (addr = start; addr < end; addr += PAGE_SIZE) {
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700920 err = add_to_pagemap(addr, &pme, pm);
Matt Mackall85863e42008-02-04 22:29:04 -0800921 if (err)
922 break;
923 }
924 return err;
925}
926
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700927static void pte_to_pagemap_entry(pagemap_entry_t *pme,
928 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
Matt Mackall85863e42008-02-04 22:29:04 -0800929{
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700930 u64 frame, flags;
931 struct page *page = NULL;
Matt Mackall85863e42008-02-04 22:29:04 -0800932
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700933 if (pte_present(pte)) {
934 frame = pte_pfn(pte);
935 flags = PM_PRESENT;
936 page = vm_normal_page(vma, addr, pte);
937 } else if (is_swap_pte(pte)) {
938 swp_entry_t entry = pte_to_swp_entry(pte);
939
940 frame = swp_type(entry) |
941 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
942 flags = PM_SWAP;
943 if (is_migration_entry(entry))
944 page = migration_entry_to_page(entry);
945 } else {
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700946 *pme = make_pme(PM_NOT_PRESENT);
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -0700947 return;
948 }
949
950 if (page && !PageAnon(page))
951 flags |= PM_FILE;
952
953 *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
Dave Hansenbcf80392008-06-12 15:21:48 -0700954}
955
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700956#ifdef CONFIG_TRANSPARENT_HUGEPAGE
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700957static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
958 pmd_t pmd, int offset)
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700959{
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700960 /*
961 * Currently pmd for thp is always present because thp can not be
962 * swapped-out, migrated, or HWPOISONed (split in such cases instead.)
963 * This if-check is just to prepare for future implementation.
964 */
965 if (pmd_present(pmd))
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700966 *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset)
967 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -0700968 else
969 *pme = make_pme(PM_NOT_PRESENT);
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700970}
971#else
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700972static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme,
973 pmd_t pmd, int offset)
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700974{
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700975}
976#endif
977
Matt Mackall85863e42008-02-04 22:29:04 -0800978static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
Dave Hansen21650092008-06-12 15:21:47 -0700979 struct mm_walk *walk)
Matt Mackall85863e42008-02-04 22:29:04 -0800980{
Dave Hansenbcf80392008-06-12 15:21:48 -0700981 struct vm_area_struct *vma;
Dave Hansen21650092008-06-12 15:21:47 -0700982 struct pagemapread *pm = walk->private;
Matt Mackall85863e42008-02-04 22:29:04 -0800983 pte_t *pte;
984 int err = 0;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700985 pagemap_entry_t pme = make_pme(PM_NOT_PRESENT);
Matt Mackall85863e42008-02-04 22:29:04 -0800986
Dave Hansenbcf80392008-06-12 15:21:48 -0700987 /* find the first VMA at or above 'addr' */
988 vma = find_vma(walk->mm, addr);
Sasha Levin08fa29d2012-05-29 15:06:15 -0700989 if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700990 for (; addr != end; addr += PAGE_SIZE) {
991 unsigned long offset;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700992
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700993 offset = (addr & ~PAGEMAP_WALK_MASK) >>
994 PAGE_SHIFT;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -0700995 thp_pmd_to_pagemap_entry(&pme, *pmd, offset);
996 err = add_to_pagemap(addr, &pme, pm);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -0700997 if (err)
998 break;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -0700999 }
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -07001000 spin_unlock(&walk->mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001001 return err;
Naoya Horiguchi5aaabe82012-03-21 16:33:57 -07001002 }
1003
Andrea Arcangeli45f83ce2012-03-28 14:42:40 -07001004 if (pmd_trans_unstable(pmd))
1005 return 0;
Matt Mackall85863e42008-02-04 22:29:04 -08001006 for (; addr != end; addr += PAGE_SIZE) {
Dave Hansenbcf80392008-06-12 15:21:48 -07001007
1008 /* check to see if we've left 'vma' behind
1009 * and need a new, higher one */
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -07001010 if (vma && (addr >= vma->vm_end)) {
Dave Hansenbcf80392008-06-12 15:21:48 -07001011 vma = find_vma(walk->mm, addr);
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -07001012 pme = make_pme(PM_NOT_PRESENT);
1013 }
Dave Hansenbcf80392008-06-12 15:21:48 -07001014
1015 /* check that 'vma' actually covers this address,
1016 * and that it isn't a huge page vma */
1017 if (vma && (vma->vm_start <= addr) &&
1018 !is_vm_hugetlb_page(vma)) {
1019 pte = pte_offset_map(pmd, addr);
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001020 pte_to_pagemap_entry(&pme, vma, addr, *pte);
Dave Hansenbcf80392008-06-12 15:21:48 -07001021 /* unmap before userspace copy */
1022 pte_unmap(pte);
1023 }
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001024 err = add_to_pagemap(addr, &pme, pm);
Matt Mackall85863e42008-02-04 22:29:04 -08001025 if (err)
1026 return err;
1027 }
1028
1029 cond_resched();
1030
1031 return err;
1032}
1033
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001034#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001035static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme,
1036 pte_t pte, int offset)
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001037{
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001038 if (pte_present(pte))
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001039 *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset)
1040 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT);
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -07001041 else
1042 *pme = make_pme(PM_NOT_PRESENT);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001043}
1044
Naoya Horiguchi116354d2010-04-06 14:35:04 -07001045/* This function walks within one hugetlb entry in the single call */
1046static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
1047 unsigned long addr, unsigned long end,
1048 struct mm_walk *walk)
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001049{
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001050 struct pagemapread *pm = walk->private;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001051 int err = 0;
Konstantin Khlebnikov16fbdce2012-05-10 13:01:43 -07001052 pagemap_entry_t pme;
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001053
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001054 for (; addr != end; addr += PAGE_SIZE) {
Naoya Horiguchi116354d2010-04-06 14:35:04 -07001055 int offset = (addr & ~hmask) >> PAGE_SHIFT;
Naoya Horiguchi092b50b2012-03-21 16:33:59 -07001056 huge_pte_to_pagemap_entry(&pme, *pte, offset);
1057 err = add_to_pagemap(addr, &pme, pm);
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001058 if (err)
1059 return err;
1060 }
1061
1062 cond_resched();
1063
1064 return err;
1065}
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001066#endif /* HUGETLB_PAGE */
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001067
Matt Mackall85863e42008-02-04 22:29:04 -08001068/*
1069 * /proc/pid/pagemap - an array mapping virtual pages to pfns
1070 *
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001071 * For each page in the address space, this file contains one 64-bit entry
1072 * consisting of the following:
1073 *
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001074 * Bits 0-54 page frame number (PFN) if present
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001075 * Bits 0-4 swap type if swapped
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001076 * Bits 5-54 swap offset if swapped
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001077 * Bits 55-60 page shift (page size = 1<<page shift)
Konstantin Khlebnikov052fb0d2012-05-31 16:26:19 -07001078 * Bit 61 page is file-page or shared-anon
Hans Rosenfeldf16278c2008-03-21 18:46:59 -05001079 * Bit 62 page swapped
1080 * Bit 63 page present
1081 *
1082 * If the page is not present but in swap, then the PFN contains an
1083 * encoding of the swap file number and the page's offset into the
1084 * swap. Unmapped pages return a null PFN. This allows determining
Matt Mackall85863e42008-02-04 22:29:04 -08001085 * precisely which pages are mapped (or in swap) and comparing mapped
1086 * pages between processes.
1087 *
1088 * Efficient users of this interface will use /proc/pid/maps to
1089 * determine which areas of memory are actually mapped and llseek to
1090 * skip over unmapped regions.
1091 */
1092static ssize_t pagemap_read(struct file *file, char __user *buf,
1093 size_t count, loff_t *ppos)
1094{
Al Viro496ad9a2013-01-23 17:07:38 -05001095 struct task_struct *task = get_proc_task(file_inode(file));
Matt Mackall85863e42008-02-04 22:29:04 -08001096 struct mm_struct *mm;
1097 struct pagemapread pm;
Matt Mackall85863e42008-02-04 22:29:04 -08001098 int ret = -ESRCH;
Alexey Dobriyanee1e6ab2008-07-21 14:21:36 -07001099 struct mm_walk pagemap_walk = {};
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001100 unsigned long src;
1101 unsigned long svpfn;
1102 unsigned long start_vaddr;
1103 unsigned long end_vaddr;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001104 int copied = 0;
Matt Mackall85863e42008-02-04 22:29:04 -08001105
1106 if (!task)
1107 goto out;
1108
Matt Mackall85863e42008-02-04 22:29:04 -08001109 ret = -EINVAL;
1110 /* file position must be aligned */
Thomas Tuttleaae86792008-06-05 22:46:31 -07001111 if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
Marcelo Tosattifb393802008-03-13 12:32:35 -07001112 goto out_task;
Matt Mackall85863e42008-02-04 22:29:04 -08001113
1114 ret = 0;
Vitaly Mayatskikh08161782009-04-30 15:08:18 -07001115 if (!count)
1116 goto out_task;
1117
yonghua zhengf30d87b2013-08-13 16:01:03 -07001118 pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
1119 pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001120 ret = -ENOMEM;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001121 if (!pm.buffer)
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001122 goto out_task;
1123
Cong Wange7dcd992012-05-31 16:26:17 -07001124 mm = mm_access(task, PTRACE_MODE_READ);
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001125 ret = PTR_ERR(mm);
1126 if (!mm || IS_ERR(mm))
1127 goto out_free;
Matt Mackall85863e42008-02-04 22:29:04 -08001128
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001129 pagemap_walk.pmd_entry = pagemap_pte_range;
1130 pagemap_walk.pte_hole = pagemap_pte_hole;
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001131#ifdef CONFIG_HUGETLB_PAGE
Naoya Horiguchi5dc37642009-12-14 18:00:01 -08001132 pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
Naoya Horiguchi1a5cb812010-05-24 14:32:12 -07001133#endif
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001134 pagemap_walk.mm = mm;
1135 pagemap_walk.private = &pm;
Matt Mackall85863e42008-02-04 22:29:04 -08001136
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001137 src = *ppos;
1138 svpfn = src / PM_ENTRY_BYTES;
1139 start_vaddr = svpfn << PAGE_SHIFT;
1140 end_vaddr = TASK_SIZE_OF(task);
Matt Mackall85863e42008-02-04 22:29:04 -08001141
Andrew Morton5d7e0d22008-07-05 01:02:01 -07001142 /* watch out for wraparound */
1143 if (svpfn > TASK_SIZE_OF(task) >> PAGE_SHIFT)
1144 start_vaddr = end_vaddr;
1145
1146 /*
1147 * The odds are that this will stop walking way
1148 * before end_vaddr, because the length of the
1149 * user buffer is tracked in "pm", and the walk
1150 * will stop when we hit the end of the buffer.
1151 */
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001152 ret = 0;
1153 while (count && (start_vaddr < end_vaddr)) {
1154 int len;
1155 unsigned long end;
Matt Mackall85863e42008-02-04 22:29:04 -08001156
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001157 pm.pos = 0;
Naoya Horiguchiea251c12010-11-24 12:57:13 -08001158 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001159 /* overflow ? */
1160 if (end < start_vaddr || end > end_vaddr)
1161 end = end_vaddr;
1162 down_read(&mm->mmap_sem);
1163 ret = walk_page_range(start_vaddr, end, &pagemap_walk);
1164 up_read(&mm->mmap_sem);
1165 start_vaddr = end;
1166
1167 len = min(count, PM_ENTRY_BYTES * pm.pos);
Dan Carpenter309361e02010-04-06 13:45:39 +03001168 if (copy_to_user(buf, pm.buffer, len)) {
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001169 ret = -EFAULT;
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001170 goto out_mm;
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001171 }
1172 copied += len;
1173 buf += len;
1174 count -= len;
Matt Mackall85863e42008-02-04 22:29:04 -08001175 }
KAMEZAWA Hiroyukid82ef022010-04-02 09:11:29 +09001176 *ppos += copied;
1177 if (!ret || ret == PM_END_OF_BUFFER)
1178 ret = copied;
1179
Marcelo Tosattifb393802008-03-13 12:32:35 -07001180out_mm:
1181 mmput(mm);
KOSAKI Motohiro98bc93e2011-05-26 16:25:53 -07001182out_free:
1183 kfree(pm.buffer);
Matt Mackall85863e42008-02-04 22:29:04 -08001184out_task:
1185 put_task_struct(task);
1186out:
1187 return ret;
1188}
1189
Kirill A. Shutemove11b7082015-03-09 23:11:12 +02001190static int pagemap_open(struct inode *inode, struct file *file)
1191{
1192 /* do not disclose physical addresses to unprivileged
1193 userspace (closes a rowhammer attack vector) */
1194 if (!capable(CAP_SYS_ADMIN))
1195 return -EPERM;
1196 return 0;
1197}
1198
Matt Mackall85863e42008-02-04 22:29:04 -08001199const struct file_operations proc_pagemap_operations = {
1200 .llseek = mem_lseek, /* borrow this */
1201 .read = pagemap_read,
Kirill A. Shutemove11b7082015-03-09 23:11:12 +02001202 .open = pagemap_open,
Matt Mackall85863e42008-02-04 22:29:04 -08001203};
Matt Mackall1e883282008-02-04 22:29:07 -08001204#endif /* CONFIG_PROC_PAGE_MONITOR */
Matt Mackall85863e42008-02-04 22:29:04 -08001205
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001206#ifdef CONFIG_NUMA
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001207
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001208struct numa_maps {
1209 struct vm_area_struct *vma;
1210 unsigned long pages;
1211 unsigned long anon;
1212 unsigned long active;
1213 unsigned long writeback;
1214 unsigned long mapcount_max;
1215 unsigned long dirty;
1216 unsigned long swapcache;
1217 unsigned long node[MAX_NUMNODES];
1218};
1219
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001220struct numa_maps_private {
1221 struct proc_maps_private proc_maps;
1222 struct numa_maps md;
1223};
1224
Dave Hanseneb4866d2011-09-20 15:19:38 -07001225static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
1226 unsigned long nr_pages)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001227{
1228 int count = page_mapcount(page);
1229
Dave Hanseneb4866d2011-09-20 15:19:38 -07001230 md->pages += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001231 if (pte_dirty || PageDirty(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001232 md->dirty += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001233
1234 if (PageSwapCache(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001235 md->swapcache += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001236
1237 if (PageActive(page) || PageUnevictable(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001238 md->active += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001239
1240 if (PageWriteback(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001241 md->writeback += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001242
1243 if (PageAnon(page))
Dave Hanseneb4866d2011-09-20 15:19:38 -07001244 md->anon += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001245
1246 if (count > md->mapcount_max)
1247 md->mapcount_max = count;
1248
Dave Hanseneb4866d2011-09-20 15:19:38 -07001249 md->node[page_to_nid(page)] += nr_pages;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001250}
1251
Dave Hansen3200a8a2011-09-20 15:19:39 -07001252static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
1253 unsigned long addr)
1254{
1255 struct page *page;
1256 int nid;
1257
1258 if (!pte_present(pte))
1259 return NULL;
1260
1261 page = vm_normal_page(vma, addr, pte);
1262 if (!page)
1263 return NULL;
1264
1265 if (PageReserved(page))
1266 return NULL;
1267
1268 nid = page_to_nid(page);
Lai Jiangshan4ff1b2c2012-12-12 13:51:25 -08001269 if (!node_isset(nid, node_states[N_MEMORY]))
Dave Hansen3200a8a2011-09-20 15:19:39 -07001270 return NULL;
1271
1272 return page;
1273}
1274
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001275static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
1276 unsigned long end, struct mm_walk *walk)
1277{
1278 struct numa_maps *md;
1279 spinlock_t *ptl;
1280 pte_t *orig_pte;
1281 pte_t *pte;
1282
1283 md = walk->private;
Dave Hansen32ef4382011-09-20 15:19:41 -07001284
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001285 if (pmd_trans_huge_lock(pmd, md->vma) == 1) {
1286 pte_t huge_pte = *(pte_t *)pmd;
1287 struct page *page;
1288
1289 page = can_gather_numa_stats(huge_pte, md->vma, addr);
1290 if (page)
1291 gather_stats(page, md, pte_dirty(huge_pte),
1292 HPAGE_PMD_SIZE/PAGE_SIZE);
Dave Hansen32ef4382011-09-20 15:19:41 -07001293 spin_unlock(&walk->mm->page_table_lock);
Naoya Horiguchi025c5b22012-03-21 16:33:57 -07001294 return 0;
Dave Hansen32ef4382011-09-20 15:19:41 -07001295 }
1296
Andrea Arcangeli1a5a9902012-03-21 16:33:42 -07001297 if (pmd_trans_unstable(pmd))
1298 return 0;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001299 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
1300 do {
Dave Hansen3200a8a2011-09-20 15:19:39 -07001301 struct page *page = can_gather_numa_stats(*pte, md->vma, addr);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001302 if (!page)
1303 continue;
Dave Hanseneb4866d2011-09-20 15:19:38 -07001304 gather_stats(page, md, pte_dirty(*pte), 1);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001305
1306 } while (pte++, addr += PAGE_SIZE, addr != end);
1307 pte_unmap_unlock(orig_pte, ptl);
1308 return 0;
1309}
1310#ifdef CONFIG_HUGETLB_PAGE
1311static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1312 unsigned long addr, unsigned long end, struct mm_walk *walk)
1313{
1314 struct numa_maps *md;
1315 struct page *page;
1316
1317 if (pte_none(*pte))
1318 return 0;
1319
1320 page = pte_page(*pte);
1321 if (!page)
1322 return 0;
1323
1324 md = walk->private;
Dave Hanseneb4866d2011-09-20 15:19:38 -07001325 gather_stats(page, md, pte_dirty(*pte), 1);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001326 return 0;
1327}
1328
1329#else
1330static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
1331 unsigned long addr, unsigned long end, struct mm_walk *walk)
1332{
1333 return 0;
1334}
1335#endif
1336
1337/*
1338 * Display pages allocated per node and memory policy via /proc.
1339 */
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001340static int show_numa_map(struct seq_file *m, void *v, int is_pid)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001341{
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001342 struct numa_maps_private *numa_priv = m->private;
1343 struct proc_maps_private *proc_priv = &numa_priv->proc_maps;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001344 struct vm_area_struct *vma = v;
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001345 struct numa_maps *md = &numa_priv->md;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001346 struct file *file = vma->vm_file;
David Rientjes32f85162012-10-16 17:31:23 -07001347 struct task_struct *task = proc_priv->task;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001348 struct mm_struct *mm = vma->vm_mm;
1349 struct mm_walk walk = {};
1350 struct mempolicy *pol;
1351 int n;
1352 char buffer[50];
1353
1354 if (!mm)
1355 return 0;
1356
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001357 /* Ensure we start with an empty set of numa_maps statistics. */
1358 memset(md, 0, sizeof(*md));
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001359
1360 md->vma = vma;
1361
1362 walk.hugetlb_entry = gather_hugetbl_stats;
1363 walk.pmd_entry = gather_pte_stats;
1364 walk.private = md;
1365 walk.mm = mm;
1366
David Rientjes32f85162012-10-16 17:31:23 -07001367 pol = get_vma_policy(task, vma, vma->vm_start);
Hugh Dickinsa7a88b22013-01-02 02:04:23 -08001368 mpol_to_str(buffer, sizeof(buffer), pol);
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001369 mpol_cond_put(pol);
1370
1371 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
1372
1373 if (file) {
1374 seq_printf(m, " file=");
1375 seq_path(m, &file->f_path, "\n\t= ");
1376 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
1377 seq_printf(m, " heap");
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001378 } else {
David Rientjes32f85162012-10-16 17:31:23 -07001379 pid_t tid = vm_is_stack(task, vma, is_pid);
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001380 if (tid != 0) {
1381 /*
1382 * Thread stack in /proc/PID/task/TID/maps or
1383 * the main process stack.
1384 */
1385 if (!is_pid || (vma->vm_start <= mm->start_stack &&
1386 vma->vm_end >= mm->start_stack))
1387 seq_printf(m, " stack");
1388 else
1389 seq_printf(m, " stack:%d", tid);
1390 }
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001391 }
1392
Andrew Mortonfc360bd2011-10-31 17:06:32 -07001393 if (is_vm_hugetlb_page(vma))
1394 seq_printf(m, " huge");
1395
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001396 walk_page_range(vma->vm_start, vma->vm_end, &walk);
1397
1398 if (!md->pages)
1399 goto out;
1400
1401 if (md->anon)
1402 seq_printf(m, " anon=%lu", md->anon);
1403
1404 if (md->dirty)
1405 seq_printf(m, " dirty=%lu", md->dirty);
1406
1407 if (md->pages != md->anon && md->pages != md->dirty)
1408 seq_printf(m, " mapped=%lu", md->pages);
1409
1410 if (md->mapcount_max > 1)
1411 seq_printf(m, " mapmax=%lu", md->mapcount_max);
1412
1413 if (md->swapcache)
1414 seq_printf(m, " swapcache=%lu", md->swapcache);
1415
1416 if (md->active < md->pages && !is_vm_hugetlb_page(vma))
1417 seq_printf(m, " active=%lu", md->active);
1418
1419 if (md->writeback)
1420 seq_printf(m, " writeback=%lu", md->writeback);
1421
Lai Jiangshan4ff1b2c2012-12-12 13:51:25 -08001422 for_each_node_state(n, N_MEMORY)
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001423 if (md->node[n])
1424 seq_printf(m, " N%d=%lu", n, md->node[n]);
1425out:
1426 seq_putc(m, '\n');
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001427
1428 if (m->count < m->size)
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001429 m->version = (vma != proc_priv->tail_vma) ? vma->vm_start : 0;
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001430 return 0;
1431}
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001432
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001433static int show_pid_numa_map(struct seq_file *m, void *v)
1434{
1435 return show_numa_map(m, v, 1);
1436}
1437
1438static int show_tid_numa_map(struct seq_file *m, void *v)
1439{
1440 return show_numa_map(m, v, 0);
1441}
1442
Jan Engelhardt03a44822008-02-08 04:21:19 -08001443static const struct seq_operations proc_pid_numa_maps_op = {
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001444 .start = m_start,
1445 .next = m_next,
1446 .stop = m_stop,
1447 .show = show_pid_numa_map,
Christoph Lameter6e21c8f2005-09-03 15:54:45 -07001448};
Eric W. Biederman662795d2006-06-26 00:25:48 -07001449
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001450static const struct seq_operations proc_tid_numa_maps_op = {
1451 .start = m_start,
1452 .next = m_next,
1453 .stop = m_stop,
1454 .show = show_tid_numa_map,
1455};
1456
1457static int numa_maps_open(struct inode *inode, struct file *file,
1458 const struct seq_operations *ops)
Eric W. Biederman662795d2006-06-26 00:25:48 -07001459{
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001460 struct numa_maps_private *priv;
1461 int ret = -ENOMEM;
1462 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1463 if (priv) {
1464 priv->proc_maps.pid = proc_pid(inode);
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001465 ret = seq_open(file, ops);
Stephen Wilson5b52fc82011-05-24 17:12:49 -07001466 if (!ret) {
1467 struct seq_file *m = file->private_data;
1468 m->private = priv;
1469 } else {
1470 kfree(priv);
1471 }
1472 }
1473 return ret;
Eric W. Biederman662795d2006-06-26 00:25:48 -07001474}
1475
Siddhesh Poyarekarb7643752012-03-21 16:34:04 -07001476static int pid_numa_maps_open(struct inode *inode, struct file *file)
1477{
1478 return numa_maps_open(inode, file, &proc_pid_numa_maps_op);
1479}
1480
1481static int tid_numa_maps_open(struct inode *inode, struct file *file)
1482{
1483 return numa_maps_open(inode, file, &proc_tid_numa_maps_op);
1484}
1485
1486const struct file_operations proc_pid_numa_maps_operations = {
1487 .open = pid_numa_maps_open,
1488 .read = seq_read,
1489 .llseek = seq_lseek,
1490 .release = seq_release_private,
1491};
1492
1493const struct file_operations proc_tid_numa_maps_operations = {
1494 .open = tid_numa_maps_open,
Eric W. Biederman662795d2006-06-26 00:25:48 -07001495 .read = seq_read,
1496 .llseek = seq_lseek,
Eric W. Biederman99f89552006-06-26 00:25:55 -07001497 .release = seq_release_private,
Eric W. Biederman662795d2006-06-26 00:25:48 -07001498};
Stephen Wilsonf69ff942011-05-24 17:12:47 -07001499#endif /* CONFIG_NUMA */