blob: eb0c35994b5401dd269275a8c4b5361ebb4b2aea [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/**
2 * @file buffer_sync.c
3 *
Robert Richterae735e92008-12-25 17:26:07 +01004 * @remark Copyright 2002-2009 OProfile authors
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * @remark Read the file COPYING
6 *
7 * @author John Levon <levon@movementarian.org>
Barry Kasindorf345c2572008-07-22 21:08:54 +02008 * @author Barry Kasindorf
Robert Richterae735e92008-12-25 17:26:07 +01009 * @author Robert Richter <robert.richter@amd.com>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 *
11 * This is the core of the buffer management. Each
12 * CPU buffer is processed and entered into the
13 * global event buffer. Such processing is necessary
14 * in several circumstances, mentioned below.
15 *
16 * The processing does the job of converting the
17 * transitory EIP value into a persistent dentry/offset
18 * value that the profiler can record at its leisure.
19 *
20 * See fs/dcookies.c for a description of the dentry/offset
21 * objects.
22 */
23
Davidlohr Bueso11163342015-04-16 12:49:12 -070024#include <linux/file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/mm.h>
26#include <linux/workqueue.h>
27#include <linux/notifier.h>
28#include <linux/dcookies.h>
29#include <linux/profile.h>
30#include <linux/module.h>
31#include <linux/fs.h>
Bob Nelson14748552007-07-20 21:39:53 +020032#include <linux/oprofile.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040033#include <linux/sched.h>
Ingo Molnar6e84f312017-02-08 18:51:29 +010034#include <linux/sched/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/gfp.h>
Bob Nelson14748552007-07-20 21:39:53 +020036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include "oprofile_stats.h"
38#include "event_buffer.h"
39#include "cpu_buffer.h"
40#include "buffer_sync.h"
Robert Richter73185e02008-07-22 21:08:51 +020041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042static LIST_HEAD(dying_tasks);
43static LIST_HEAD(dead_tasks);
Rusty Russellf7df8ed2009-01-10 21:58:09 -080044static cpumask_var_t marked_cpus;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045static DEFINE_SPINLOCK(task_mortuary);
46static void process_task_mortuary(void);
47
Linus Torvalds1da177e2005-04-16 15:20:36 -070048/* Take ownership of the task struct and place it on the
49 * list for processing. Only after two full buffer syncs
50 * does the task eventually get freed, because by then
51 * we are sure we will not reference it again.
Paul E. McKenney4369ef32006-01-08 01:01:35 -080052 * Can be invoked from softirq via RCU callback due to
53 * call_rcu() of the task struct, hence the _irqsave.
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 */
Robert Richter73185e02008-07-22 21:08:51 +020055static int
56task_free_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
Paul E. McKenney4369ef32006-01-08 01:01:35 -080058 unsigned long flags;
Robert Richter73185e02008-07-22 21:08:51 +020059 struct task_struct *task = data;
Paul E. McKenney4369ef32006-01-08 01:01:35 -080060 spin_lock_irqsave(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 list_add(&task->tasks, &dying_tasks);
Paul E. McKenney4369ef32006-01-08 01:01:35 -080062 spin_unlock_irqrestore(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 return NOTIFY_OK;
64}
65
66
67/* The task is on its way out. A sync of the buffer means we can catch
68 * any remaining samples for this task.
69 */
Robert Richter73185e02008-07-22 21:08:51 +020070static int
71task_exit_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 /* To avoid latency problems, we only process the current CPU,
74 * hoping that most samples for the task are on this CPU
75 */
Ingo Molnar39c715b2005-06-21 17:14:34 -070076 sync_buffer(raw_smp_processor_id());
Robert Richter73185e02008-07-22 21:08:51 +020077 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
80
81/* The task is about to try a do_munmap(). We peek at what it's going to
82 * do, and if it's an executable region, process the samples first, so
83 * we don't lose any. This does not have to be exact, it's a QoI issue
84 * only.
85 */
Robert Richter73185e02008-07-22 21:08:51 +020086static int
87munmap_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
89 unsigned long addr = (unsigned long)data;
Robert Richter73185e02008-07-22 21:08:51 +020090 struct mm_struct *mm = current->mm;
91 struct vm_area_struct *mpnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93 down_read(&mm->mmap_sem);
94
95 mpnt = find_vma(mm, addr);
96 if (mpnt && mpnt->vm_file && (mpnt->vm_flags & VM_EXEC)) {
97 up_read(&mm->mmap_sem);
98 /* To avoid latency problems, we only process the current CPU,
99 * hoping that most samples for the task are on this CPU
100 */
Ingo Molnar39c715b2005-06-21 17:14:34 -0700101 sync_buffer(raw_smp_processor_id());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 return 0;
103 }
104
105 up_read(&mm->mmap_sem);
106 return 0;
107}
108
Robert Richter73185e02008-07-22 21:08:51 +0200109
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110/* We need to be told about new modules so we don't attribute to a previously
111 * loaded module, or drop the samples on the floor.
112 */
Robert Richter73185e02008-07-22 21:08:51 +0200113static int
114module_load_notify(struct notifier_block *self, unsigned long val, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116#ifdef CONFIG_MODULES
117 if (val != MODULE_STATE_COMING)
118 return 0;
119
120 /* FIXME: should we process all CPU buffers ? */
Markus Armbruster59cc1852006-06-25 05:47:33 -0700121 mutex_lock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 add_event_entry(ESCAPE_CODE);
123 add_event_entry(MODULE_LOADED_CODE);
Markus Armbruster59cc1852006-06-25 05:47:33 -0700124 mutex_unlock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125#endif
126 return 0;
127}
128
Robert Richter73185e02008-07-22 21:08:51 +0200129
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130static struct notifier_block task_free_nb = {
131 .notifier_call = task_free_notify,
132};
133
134static struct notifier_block task_exit_nb = {
135 .notifier_call = task_exit_notify,
136};
137
138static struct notifier_block munmap_nb = {
139 .notifier_call = munmap_notify,
140};
141
142static struct notifier_block module_load_nb = {
143 .notifier_call = module_load_notify,
144};
145
Robert Richter6ac65192011-05-26 18:22:54 +0200146static void free_all_tasks(void)
147{
148 /* make sure we don't leak task structs */
149 process_task_mortuary();
150 process_task_mortuary();
151}
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153int sync_start(void)
154{
155 int err;
156
Li Zefan79f55992009-06-15 14:58:26 +0800157 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
Robert Richter4c50d9e2009-01-22 14:14:14 +0100158 return -ENOMEM;
Robert Richter4c50d9e2009-01-22 14:14:14 +0100159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 err = task_handoff_register(&task_free_nb);
161 if (err)
162 goto out1;
163 err = profile_event_register(PROFILE_TASK_EXIT, &task_exit_nb);
164 if (err)
165 goto out2;
166 err = profile_event_register(PROFILE_MUNMAP, &munmap_nb);
167 if (err)
168 goto out3;
169 err = register_module_notifier(&module_load_nb);
170 if (err)
171 goto out4;
172
Robert Richter750d8572010-08-13 16:29:04 +0200173 start_cpu_work();
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175out:
176 return err;
177out4:
178 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
179out3:
180 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
181out2:
182 task_handoff_unregister(&task_free_nb);
Robert Richter6ac65192011-05-26 18:22:54 +0200183 free_all_tasks();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184out1:
Robert Richter4c50d9e2009-01-22 14:14:14 +0100185 free_cpumask_var(marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 goto out;
187}
188
189
190void sync_stop(void)
191{
Robert Richter750d8572010-08-13 16:29:04 +0200192 end_cpu_work();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 unregister_module_notifier(&module_load_nb);
194 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
195 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
196 task_handoff_unregister(&task_free_nb);
Robert Richter130c5ce2011-05-26 18:39:35 +0200197 barrier(); /* do all of the above first */
198
Tejun Heo3d7851b2010-10-15 09:51:08 -0400199 flush_cpu_work();
Robert Richter750d8572010-08-13 16:29:04 +0200200
Robert Richter6ac65192011-05-26 18:22:54 +0200201 free_all_tasks();
Robert Richter4c50d9e2009-01-22 14:14:14 +0100202 free_cpumask_var(marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203}
204
Jan Blunck448678a2008-02-14 19:38:36 -0800205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206/* Optimisation. We can manage without taking the dcookie sem
207 * because we cannot reach this code without at least one
208 * dcookie user still being registered (namely, the reader
209 * of the event buffer). */
Al Viro71215a72016-11-20 19:30:18 -0500210static inline unsigned long fast_get_dcookie(const struct path *path)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
212 unsigned long cookie;
Jan Blunck448678a2008-02-14 19:38:36 -0800213
Nick Pigginc2452f32008-12-01 09:33:43 +0100214 if (path->dentry->d_flags & DCACHE_COOKIE)
Jan Blunck448678a2008-02-14 19:38:36 -0800215 return (unsigned long)path->dentry;
216 get_dcookie(path, &cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 return cookie;
218}
219
Jan Blunck448678a2008-02-14 19:38:36 -0800220
Konstantin Khlebnikov2dd8ad82012-10-08 16:28:51 -0700221/* Look up the dcookie for the task's mm->exe_file,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 * which corresponds loosely to "application name". This is
223 * not strictly necessary but allows oprofile to associate
224 * shared-library samples with particular applications
225 */
Robert Richter73185e02008-07-22 21:08:51 +0200226static unsigned long get_exec_dcookie(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
John Levon0c0a4002005-06-23 22:02:47 -0700228 unsigned long cookie = NO_COOKIE;
Davidlohr Bueso11163342015-04-16 12:49:12 -0700229 struct file *exe_file;
Robert Richter73185e02008-07-22 21:08:51 +0200230
Davidlohr Bueso11163342015-04-16 12:49:12 -0700231 if (!mm)
232 goto done;
Robert Richter73185e02008-07-22 21:08:51 +0200233
Davidlohr Bueso11163342015-04-16 12:49:12 -0700234 exe_file = get_mm_exe_file(mm);
235 if (!exe_file)
236 goto done;
237
238 cookie = fast_get_dcookie(&exe_file->f_path);
239 fput(exe_file);
240done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 return cookie;
242}
243
244
245/* Convert the EIP value of a sample into a persistent dentry/offset
246 * pair that can then be added to the global event buffer. We make
247 * sure to do this lookup before a mm->mmap modification happens so
248 * we don't lose track.
Davidlohr Bueso11163342015-04-16 12:49:12 -0700249 *
250 * The caller must ensure the mm is not nil (ie: not a kernel thread).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 */
Robert Richter73185e02008-07-22 21:08:51 +0200252static unsigned long
253lookup_dcookie(struct mm_struct *mm, unsigned long addr, off_t *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254{
John Levon0c0a4002005-06-23 22:02:47 -0700255 unsigned long cookie = NO_COOKIE;
Robert Richter73185e02008-07-22 21:08:51 +0200256 struct vm_area_struct *vma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Davidlohr Bueso11163342015-04-16 12:49:12 -0700258 down_read(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
Robert Richter73185e02008-07-22 21:08:51 +0200260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 if (addr < vma->vm_start || addr >= vma->vm_end)
262 continue;
263
John Levon0c0a4002005-06-23 22:02:47 -0700264 if (vma->vm_file) {
Jan Blunck448678a2008-02-14 19:38:36 -0800265 cookie = fast_get_dcookie(&vma->vm_file->f_path);
John Levon0c0a4002005-06-23 22:02:47 -0700266 *offset = (vma->vm_pgoff << PAGE_SHIFT) + addr -
267 vma->vm_start;
268 } else {
269 /* must be an anonymous map */
270 *offset = addr;
271 }
272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 break;
274 }
275
John Levon0c0a4002005-06-23 22:02:47 -0700276 if (!vma)
277 cookie = INVALID_COOKIE;
Davidlohr Bueso11163342015-04-16 12:49:12 -0700278 up_read(&mm->mmap_sem);
John Levon0c0a4002005-06-23 22:02:47 -0700279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 return cookie;
281}
282
John Levon0c0a4002005-06-23 22:02:47 -0700283static unsigned long last_cookie = INVALID_COOKIE;
Robert Richter73185e02008-07-22 21:08:51 +0200284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285static void add_cpu_switch(int i)
286{
287 add_event_entry(ESCAPE_CODE);
288 add_event_entry(CPU_SWITCH_CODE);
289 add_event_entry(i);
John Levon0c0a4002005-06-23 22:02:47 -0700290 last_cookie = INVALID_COOKIE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291}
292
293static void add_kernel_ctx_switch(unsigned int in_kernel)
294{
295 add_event_entry(ESCAPE_CODE);
296 if (in_kernel)
Robert Richter73185e02008-07-22 21:08:51 +0200297 add_event_entry(KERNEL_ENTER_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 else
Robert Richter73185e02008-07-22 21:08:51 +0200299 add_event_entry(KERNEL_EXIT_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
Robert Richter73185e02008-07-22 21:08:51 +0200301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302static void
Robert Richter73185e02008-07-22 21:08:51 +0200303add_user_ctx_switch(struct task_struct const *task, unsigned long cookie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
305 add_event_entry(ESCAPE_CODE);
Robert Richter73185e02008-07-22 21:08:51 +0200306 add_event_entry(CTX_SWITCH_CODE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 add_event_entry(task->pid);
308 add_event_entry(cookie);
309 /* Another code for daemon back-compat */
310 add_event_entry(ESCAPE_CODE);
311 add_event_entry(CTX_TGID_CODE);
312 add_event_entry(task->tgid);
313}
314
Robert Richter73185e02008-07-22 21:08:51 +0200315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316static void add_cookie_switch(unsigned long cookie)
317{
318 add_event_entry(ESCAPE_CODE);
319 add_event_entry(COOKIE_SWITCH_CODE);
320 add_event_entry(cookie);
321}
322
Robert Richter73185e02008-07-22 21:08:51 +0200323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324static void add_trace_begin(void)
325{
326 add_event_entry(ESCAPE_CODE);
327 add_event_entry(TRACE_BEGIN_CODE);
328}
329
Robert Richter1acda872009-01-05 10:35:31 +0100330static void add_data(struct op_entry *entry, struct mm_struct *mm)
Barry Kasindorf345c2572008-07-22 21:08:54 +0200331{
Robert Richter1acda872009-01-05 10:35:31 +0100332 unsigned long code, pc, val;
333 unsigned long cookie;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200334 off_t offset;
335
Robert Richter1acda872009-01-05 10:35:31 +0100336 if (!op_cpu_buffer_get_data(entry, &code))
Robert Richterdbe6e282008-12-16 11:01:18 +0100337 return;
Robert Richter1acda872009-01-05 10:35:31 +0100338 if (!op_cpu_buffer_get_data(entry, &pc))
339 return;
340 if (!op_cpu_buffer_get_size(entry))
341 return;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200342
343 if (mm) {
Robert Richterd358e752009-01-05 13:14:04 +0100344 cookie = lookup_dcookie(mm, pc, &offset);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200345
Robert Richterd358e752009-01-05 13:14:04 +0100346 if (cookie == NO_COOKIE)
347 offset = pc;
348 if (cookie == INVALID_COOKIE) {
Barry Kasindorf345c2572008-07-22 21:08:54 +0200349 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
Robert Richterd358e752009-01-05 13:14:04 +0100350 offset = pc;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200351 }
Robert Richterd358e752009-01-05 13:14:04 +0100352 if (cookie != last_cookie) {
353 add_cookie_switch(cookie);
354 last_cookie = cookie;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200355 }
356 } else
Robert Richterd358e752009-01-05 13:14:04 +0100357 offset = pc;
Barry Kasindorf345c2572008-07-22 21:08:54 +0200358
359 add_event_entry(ESCAPE_CODE);
360 add_event_entry(code);
361 add_event_entry(offset); /* Offset from Dcookie */
362
Robert Richter1acda872009-01-05 10:35:31 +0100363 while (op_cpu_buffer_get_data(entry, &val))
364 add_event_entry(val);
Barry Kasindorf345c2572008-07-22 21:08:54 +0200365}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Robert Richter6368a1f2008-12-29 18:44:21 +0100367static inline void add_sample_entry(unsigned long offset, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368{
369 add_event_entry(offset);
370 add_event_entry(event);
371}
372
373
Robert Richter9741b302008-12-18 19:44:20 +0100374/*
375 * Add a sample to the global event buffer. If possible the
376 * sample is converted into a persistent dentry/offset pair
377 * for later lookup from userspace. Return 0 on failure.
378 */
379static int
380add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
382 unsigned long cookie;
383 off_t offset;
Robert Richter73185e02008-07-22 21:08:51 +0200384
Robert Richter9741b302008-12-18 19:44:20 +0100385 if (in_kernel) {
386 add_sample_entry(s->eip, s->event);
387 return 1;
388 }
389
390 /* add userspace sample */
391
392 if (!mm) {
393 atomic_inc(&oprofile_stats.sample_lost_no_mm);
394 return 0;
395 }
396
Robert Richter73185e02008-07-22 21:08:51 +0200397 cookie = lookup_dcookie(mm, s->eip, &offset);
398
John Levon0c0a4002005-06-23 22:02:47 -0700399 if (cookie == INVALID_COOKIE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 atomic_inc(&oprofile_stats.sample_lost_no_mapping);
401 return 0;
402 }
403
404 if (cookie != last_cookie) {
405 add_cookie_switch(cookie);
406 last_cookie = cookie;
407 }
408
409 add_sample_entry(offset, s->event);
410
411 return 1;
412}
413
Robert Richter73185e02008-07-22 21:08:51 +0200414
Robert Richter73185e02008-07-22 21:08:51 +0200415static void release_mm(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416{
417 if (!mm)
418 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 mmput(mm);
420}
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422static inline int is_code(unsigned long val)
423{
424 return val == ESCAPE_CODE;
425}
Robert Richter73185e02008-07-22 21:08:51 +0200426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428/* Move tasks along towards death. Any tasks on dead_tasks
429 * will definitely have no remaining references in any
430 * CPU buffers at this point, because we use two lists,
431 * and to have reached the list, it must have gone through
432 * one full sync already.
433 */
434static void process_task_mortuary(void)
435{
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800436 unsigned long flags;
437 LIST_HEAD(local_dead_tasks);
Robert Richter73185e02008-07-22 21:08:51 +0200438 struct task_struct *task;
439 struct task_struct *ttask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800441 spin_lock_irqsave(&task_mortuary, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Paul E. McKenney4369ef32006-01-08 01:01:35 -0800443 list_splice_init(&dead_tasks, &local_dead_tasks);
444 list_splice_init(&dying_tasks, &dead_tasks);
445
446 spin_unlock_irqrestore(&task_mortuary, flags);
447
448 list_for_each_entry_safe(task, ttask, &local_dead_tasks, tasks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 list_del(&task->tasks);
450 free_task(task);
451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452}
453
454
455static void mark_done(int cpu)
456{
457 int i;
458
Rusty Russellf7df8ed2009-01-10 21:58:09 -0800459 cpumask_set_cpu(cpu, marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 for_each_online_cpu(i) {
Rusty Russellf7df8ed2009-01-10 21:58:09 -0800462 if (!cpumask_test_cpu(i, marked_cpus))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 return;
464 }
465
466 /* All CPUs have been processed at least once,
467 * we can process the mortuary once
468 */
469 process_task_mortuary();
470
Rusty Russellf7df8ed2009-01-10 21:58:09 -0800471 cpumask_clear(marked_cpus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472}
473
474
475/* FIXME: this is not sufficient if we implement syscall barrier backtrace
476 * traversal, the code switch to sb_sample_start at first kernel enter/exit
477 * switch so we need a fifth state and some special handling in sync_buffer()
478 */
479typedef enum {
480 sb_bt_ignore = -2,
481 sb_buffer_start,
482 sb_bt_start,
483 sb_sample_start,
484} sync_buffer_state;
485
486/* Sync one of the CPU's buffers into the global event buffer.
487 * Here we need to go through each batch of samples punctuated
488 * by context switch notes, taking the task's mmap_sem and doing
489 * lookup in task->mm->mmap to convert EIP into dcookie/offset
490 * value.
491 */
492void sync_buffer(int cpu)
493{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 struct mm_struct *mm = NULL;
Robert Richterfd7826d2008-09-26 17:50:31 -0400495 struct mm_struct *oldmm;
Robert Richterbd7dc462009-01-06 03:56:50 +0100496 unsigned long val;
Robert Richter73185e02008-07-22 21:08:51 +0200497 struct task_struct *new;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 unsigned long cookie = 0;
499 int in_kernel = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 sync_buffer_state state = sb_buffer_start;
Barry Kasindorf9b1f2612008-07-15 00:10:36 +0200501 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 unsigned long available;
Robert Richterae735e92008-12-25 17:26:07 +0100503 unsigned long flags;
Robert Richter2d87b142008-12-30 04:10:46 +0100504 struct op_entry entry;
505 struct op_sample *sample;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
Markus Armbruster59cc1852006-06-25 05:47:33 -0700507 mutex_lock(&buffer_mutex);
Robert Richter73185e02008-07-22 21:08:51 +0200508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 add_cpu_switch(cpu);
510
Robert Richter6d2c53f2008-12-24 16:53:53 +0100511 op_cpu_buffer_reset(cpu);
512 available = op_cpu_buffer_entries(cpu);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513
514 for (i = 0; i < available; ++i) {
Robert Richter2d87b142008-12-30 04:10:46 +0100515 sample = op_cpu_buffer_read_entry(&entry, cpu);
516 if (!sample)
Robert Richter6dad8282008-12-09 01:21:32 +0100517 break;
Robert Richter73185e02008-07-22 21:08:51 +0200518
Robert Richter2d87b142008-12-30 04:10:46 +0100519 if (is_code(sample->eip)) {
Robert Richterae735e92008-12-25 17:26:07 +0100520 flags = sample->event;
521 if (flags & TRACE_BEGIN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 state = sb_bt_start;
523 add_trace_begin();
Robert Richterae735e92008-12-25 17:26:07 +0100524 }
525 if (flags & KERNEL_CTX_SWITCH) {
526 /* kernel/userspace switch */
527 in_kernel = flags & IS_KERNEL;
528 if (state == sb_buffer_start)
529 state = sb_sample_start;
530 add_kernel_ctx_switch(flags & IS_KERNEL);
531 }
Robert Richterbd7dc462009-01-06 03:56:50 +0100532 if (flags & USER_CTX_SWITCH
533 && op_cpu_buffer_get_data(&entry, &val)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 /* userspace context switch */
Robert Richterbd7dc462009-01-06 03:56:50 +0100535 new = (struct task_struct *)val;
Robert Richterfd7826d2008-09-26 17:50:31 -0400536 oldmm = mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 release_mm(oldmm);
Davidlohr Bueso11163342015-04-16 12:49:12 -0700538 mm = get_task_mm(new);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 if (mm != oldmm)
540 cookie = get_exec_dcookie(mm);
541 add_user_ctx_switch(new, cookie);
542 }
Robert Richter1acda872009-01-05 10:35:31 +0100543 if (op_cpu_buffer_get_size(&entry))
544 add_data(&entry, mm);
Robert Richter317f33b2008-12-18 19:44:20 +0100545 continue;
546 }
547
548 if (state < sb_bt_start)
549 /* ignore sample */
550 continue;
551
Robert Richter2d87b142008-12-30 04:10:46 +0100552 if (add_sample(mm, sample, in_kernel))
Robert Richter317f33b2008-12-18 19:44:20 +0100553 continue;
554
555 /* ignore backtraces if failed to add a sample */
556 if (state == sb_bt_start) {
557 state = sb_bt_ignore;
558 atomic_inc(&oprofile_stats.bt_lost_no_mapping);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 }
561 release_mm(mm);
562
563 mark_done(cpu);
564
Markus Armbruster59cc1852006-06-25 05:47:33 -0700565 mutex_unlock(&buffer_mutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566}
Carl Lovea5598ca2008-10-14 23:37:01 +0000567
568/* The function can be used to add a buffer worth of data directly to
569 * the kernel buffer. The buffer is assumed to be a circular buffer.
570 * Take the entries from index start and end at index end, wrapping
571 * at max_entries.
572 */
573void oprofile_put_buff(unsigned long *buf, unsigned int start,
574 unsigned int stop, unsigned int max)
575{
576 int i;
577
578 i = start;
579
580 mutex_lock(&buffer_mutex);
581 while (i != stop) {
582 add_event_entry(buf[i++]);
583
584 if (i >= max)
585 i = 0;
586 }
587
588 mutex_unlock(&buffer_mutex);
589}
590