blob: adc3ef37b7e82e9987a71662057b23e7ae531a98 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02002 * Performance events core code:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003 *
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
Ingo Molnare7e7ee22011-05-04 08:42:29 +02005 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8 *
Ingo Molnar57c0c152009-09-21 12:20:38 +02009 * For licensing details see kernel-base/COPYING
Ingo Molnarcdd6c482009-09-21 12:02:48 +020010 */
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
Peter Zijlstra2e80a822010-11-17 23:17:36 +010016#include <linux/idr.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020017#include <linux/file.h>
18#include <linux/poll.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Frederic Weisbecker76e1d902010-04-05 15:35:57 +020020#include <linux/hash.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020021#include <linux/sysfs.h>
22#include <linux/dcache.h>
23#include <linux/percpu.h>
24#include <linux/ptrace.h>
Peter Zijlstrac2774432010-12-08 15:29:02 +010025#include <linux/reboot.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020026#include <linux/vmstat.h>
Peter Zijlstraabe43402010-11-17 23:17:37 +010027#include <linux/device.h>
Peter Zijlstra906010b2009-09-21 16:08:49 +020028#include <linux/vmalloc.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020029#include <linux/hardirq.h>
30#include <linux/rculist.h>
31#include <linux/uaccess.h>
Peter Zijlstra144060f2011-08-01 12:49:14 +020032#include <linux/suspend.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020033#include <linux/syscalls.h>
34#include <linux/anon_inodes.h>
35#include <linux/kernel_stat.h>
36#include <linux/perf_event.h>
Li Zefan6fb29152009-10-15 11:21:42 +080037#include <linux/ftrace_event.h>
Jason Wessel3c502e72010-11-04 17:33:01 -050038#include <linux/hw_breakpoint.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020039
Frederic Weisbecker76369132011-05-19 19:55:04 +020040#include "internal.h"
41
Ingo Molnarcdd6c482009-09-21 12:02:48 +020042#include <asm/irq_regs.h>
43
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010044struct remote_function_call {
Ingo Molnare7e7ee22011-05-04 08:42:29 +020045 struct task_struct *p;
46 int (*func)(void *info);
47 void *info;
48 int ret;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010049};
50
51static void remote_function(void *data)
52{
53 struct remote_function_call *tfc = data;
54 struct task_struct *p = tfc->p;
55
56 if (p) {
57 tfc->ret = -EAGAIN;
58 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
59 return;
60 }
61
62 tfc->ret = tfc->func(tfc->info);
63}
64
65/**
66 * task_function_call - call a function on the cpu on which a task runs
67 * @p: the task to evaluate
68 * @func: the function to be called
69 * @info: the function call argument
70 *
71 * Calls the function @func when the task is currently running. This might
72 * be on the current CPU, which just calls the function directly
73 *
74 * returns: @func return value, or
75 * -ESRCH - when the process isn't running
76 * -EAGAIN - when the process moved away
77 */
78static int
79task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
80{
81 struct remote_function_call data = {
Ingo Molnare7e7ee22011-05-04 08:42:29 +020082 .p = p,
83 .func = func,
84 .info = info,
85 .ret = -ESRCH, /* No such (running) process */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +010086 };
87
88 if (task_curr(p))
89 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
90
91 return data.ret;
92}
93
94/**
95 * cpu_function_call - call a function on the cpu
96 * @func: the function to be called
97 * @info: the function call argument
98 *
99 * Calls the function @func on the remote cpu.
100 *
101 * returns: @func return value or -ENXIO when the cpu is offline
102 */
103static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
104{
105 struct remote_function_call data = {
Ingo Molnare7e7ee22011-05-04 08:42:29 +0200106 .p = NULL,
107 .func = func,
108 .info = info,
109 .ret = -ENXIO, /* No such CPU */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +0100110 };
111
112 smp_call_function_single(cpu, remote_function, &data, 1);
113
114 return data.ret;
115}
116
Stephane Eraniane5d13672011-02-14 11:20:01 +0200117#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
118 PERF_FLAG_FD_OUTPUT |\
119 PERF_FLAG_PID_CGROUP)
120
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200121enum event_type_t {
122 EVENT_FLEXIBLE = 0x1,
123 EVENT_PINNED = 0x2,
124 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
125};
126
Stephane Eraniane5d13672011-02-14 11:20:01 +0200127/*
128 * perf_sched_events : >0 events exist
129 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
130 */
Jason Barond430d3d2011-03-16 17:29:47 -0400131struct jump_label_key perf_sched_events __read_mostly;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200132static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
133
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200134static atomic_t nr_mmap_events __read_mostly;
135static atomic_t nr_comm_events __read_mostly;
136static atomic_t nr_task_events __read_mostly;
137
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200138static LIST_HEAD(pmus);
139static DEFINE_MUTEX(pmus_lock);
140static struct srcu_struct pmus_srcu;
141
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200142/*
143 * perf event paranoia level:
144 * -1 - not paranoid at all
145 * 0 - disallow raw tracepoint access for unpriv
146 * 1 - disallow cpu events for unpriv
147 * 2 - disallow kernel profiling for unpriv
148 */
149int sysctl_perf_event_paranoid __read_mostly = 1;
150
Frederic Weisbecker20443382011-03-31 03:33:29 +0200151/* Minimum for 512 kiB + 1 user control page */
152int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200153
154/*
155 * max perf event sample rate
156 */
Peter Zijlstra163ec432011-02-16 11:22:34 +0100157#define DEFAULT_MAX_SAMPLE_RATE 100000
158int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
159static int max_samples_per_tick __read_mostly =
160 DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
161
162int perf_proc_update_handler(struct ctl_table *table, int write,
163 void __user *buffer, size_t *lenp,
164 loff_t *ppos)
165{
166 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
167
168 if (ret || !write)
169 return ret;
170
171 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
172
173 return 0;
174}
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200175
176static atomic64_t perf_event_id;
177
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200178static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
179 enum event_type_t event_type);
180
181static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +0200182 enum event_type_t event_type,
183 struct task_struct *task);
184
185static void update_context_time(struct perf_event_context *ctx);
186static u64 perf_event_time(struct perf_event *event);
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200187
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200188void __weak perf_event_print_debug(void) { }
189
Matt Fleming84c79912010-10-03 21:41:13 +0100190extern __weak const char *perf_pmu_name(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200191{
Matt Fleming84c79912010-10-03 21:41:13 +0100192 return "pmu";
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200193}
194
Stephane Eranian0b3fcf12011-01-03 18:20:01 +0200195static inline u64 perf_clock(void)
196{
197 return local_clock();
198}
199
Stephane Eraniane5d13672011-02-14 11:20:01 +0200200static inline struct perf_cpu_context *
201__get_cpu_context(struct perf_event_context *ctx)
202{
203 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
204}
205
Peter Zijlstrafacc4302011-04-09 21:17:42 +0200206static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
207 struct perf_event_context *ctx)
208{
209 raw_spin_lock(&cpuctx->ctx.lock);
210 if (ctx)
211 raw_spin_lock(&ctx->lock);
212}
213
214static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
215 struct perf_event_context *ctx)
216{
217 if (ctx)
218 raw_spin_unlock(&ctx->lock);
219 raw_spin_unlock(&cpuctx->ctx.lock);
220}
221
Stephane Eraniane5d13672011-02-14 11:20:01 +0200222#ifdef CONFIG_CGROUP_PERF
223
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200224/*
225 * Must ensure cgroup is pinned (css_get) before calling
226 * this function. In other words, we cannot call this function
227 * if there is no cgroup event for the current CPU context.
228 */
Stephane Eraniane5d13672011-02-14 11:20:01 +0200229static inline struct perf_cgroup *
230perf_cgroup_from_task(struct task_struct *task)
231{
232 return container_of(task_subsys_state(task, perf_subsys_id),
233 struct perf_cgroup, css);
234}
235
236static inline bool
237perf_cgroup_match(struct perf_event *event)
238{
239 struct perf_event_context *ctx = event->ctx;
240 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
241
242 return !event->cgrp || event->cgrp == cpuctx->cgrp;
243}
244
245static inline void perf_get_cgroup(struct perf_event *event)
246{
247 css_get(&event->cgrp->css);
248}
249
250static inline void perf_put_cgroup(struct perf_event *event)
251{
252 css_put(&event->cgrp->css);
253}
254
255static inline void perf_detach_cgroup(struct perf_event *event)
256{
257 perf_put_cgroup(event);
258 event->cgrp = NULL;
259}
260
261static inline int is_cgroup_event(struct perf_event *event)
262{
263 return event->cgrp != NULL;
264}
265
266static inline u64 perf_cgroup_event_time(struct perf_event *event)
267{
268 struct perf_cgroup_info *t;
269
270 t = per_cpu_ptr(event->cgrp->info, event->cpu);
271 return t->time;
272}
273
274static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
275{
276 struct perf_cgroup_info *info;
277 u64 now;
278
279 now = perf_clock();
280
281 info = this_cpu_ptr(cgrp->info);
282
283 info->time += now - info->timestamp;
284 info->timestamp = now;
285}
286
287static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
288{
289 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
290 if (cgrp_out)
291 __update_cgrp_time(cgrp_out);
292}
293
294static inline void update_cgrp_time_from_event(struct perf_event *event)
295{
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200296 struct perf_cgroup *cgrp;
297
Stephane Eraniane5d13672011-02-14 11:20:01 +0200298 /*
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200299 * ensure we access cgroup data only when needed and
300 * when we know the cgroup is pinned (css_get)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200301 */
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200302 if (!is_cgroup_event(event))
Stephane Eraniane5d13672011-02-14 11:20:01 +0200303 return;
304
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200305 cgrp = perf_cgroup_from_task(current);
306 /*
307 * Do not update time when cgroup is not active
308 */
309 if (cgrp == event->cgrp)
310 __update_cgrp_time(event->cgrp);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200311}
312
313static inline void
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200314perf_cgroup_set_timestamp(struct task_struct *task,
315 struct perf_event_context *ctx)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200316{
317 struct perf_cgroup *cgrp;
318 struct perf_cgroup_info *info;
319
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200320 /*
321 * ctx->lock held by caller
322 * ensure we do not access cgroup data
323 * unless we have the cgroup pinned (css_get)
324 */
325 if (!task || !ctx->nr_cgroups)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200326 return;
327
328 cgrp = perf_cgroup_from_task(task);
329 info = this_cpu_ptr(cgrp->info);
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200330 info->timestamp = ctx->timestamp;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200331}
332
333#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
334#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
335
336/*
337 * reschedule events based on the cgroup constraint of task.
338 *
339 * mode SWOUT : schedule out everything
340 * mode SWIN : schedule in based on cgroup for next
341 */
342void perf_cgroup_switch(struct task_struct *task, int mode)
343{
344 struct perf_cpu_context *cpuctx;
345 struct pmu *pmu;
346 unsigned long flags;
347
348 /*
349 * disable interrupts to avoid geting nr_cgroup
350 * changes via __perf_event_disable(). Also
351 * avoids preemption.
352 */
353 local_irq_save(flags);
354
355 /*
356 * we reschedule only in the presence of cgroup
357 * constrained events.
358 */
359 rcu_read_lock();
360
361 list_for_each_entry_rcu(pmu, &pmus, entry) {
Stephane Eraniane5d13672011-02-14 11:20:01 +0200362 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
363
Stephane Eraniane5d13672011-02-14 11:20:01 +0200364 /*
365 * perf_cgroup_events says at least one
366 * context on this CPU has cgroup events.
367 *
368 * ctx->nr_cgroups reports the number of cgroup
369 * events for a context.
370 */
371 if (cpuctx->ctx.nr_cgroups > 0) {
Peter Zijlstrafacc4302011-04-09 21:17:42 +0200372 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
373 perf_pmu_disable(cpuctx->ctx.pmu);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200374
375 if (mode & PERF_CGROUP_SWOUT) {
376 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
377 /*
378 * must not be done before ctxswout due
379 * to event_filter_match() in event_sched_out()
380 */
381 cpuctx->cgrp = NULL;
382 }
383
384 if (mode & PERF_CGROUP_SWIN) {
Stephane Eraniane566b762011-04-06 02:54:54 +0200385 WARN_ON_ONCE(cpuctx->cgrp);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200386 /* set cgrp before ctxsw in to
387 * allow event_filter_match() to not
388 * have to pass task around
389 */
390 cpuctx->cgrp = perf_cgroup_from_task(task);
391 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
392 }
Peter Zijlstrafacc4302011-04-09 21:17:42 +0200393 perf_pmu_enable(cpuctx->ctx.pmu);
394 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200395 }
Stephane Eraniane5d13672011-02-14 11:20:01 +0200396 }
397
398 rcu_read_unlock();
399
400 local_irq_restore(flags);
401}
402
403static inline void perf_cgroup_sched_out(struct task_struct *task)
404{
405 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
406}
407
408static inline void perf_cgroup_sched_in(struct task_struct *task)
409{
410 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
411}
412
413static inline int perf_cgroup_connect(int fd, struct perf_event *event,
414 struct perf_event_attr *attr,
415 struct perf_event *group_leader)
416{
417 struct perf_cgroup *cgrp;
418 struct cgroup_subsys_state *css;
419 struct file *file;
420 int ret = 0, fput_needed;
421
422 file = fget_light(fd, &fput_needed);
423 if (!file)
424 return -EBADF;
425
426 css = cgroup_css_from_dir(file, perf_subsys_id);
Li Zefan3db272c2011-03-03 14:25:37 +0800427 if (IS_ERR(css)) {
428 ret = PTR_ERR(css);
429 goto out;
430 }
Stephane Eraniane5d13672011-02-14 11:20:01 +0200431
432 cgrp = container_of(css, struct perf_cgroup, css);
433 event->cgrp = cgrp;
434
Li Zefanf75e18c2011-03-03 14:25:50 +0800435 /* must be done before we fput() the file */
436 perf_get_cgroup(event);
437
Stephane Eraniane5d13672011-02-14 11:20:01 +0200438 /*
439 * all events in a group must monitor
440 * the same cgroup because a task belongs
441 * to only one perf cgroup at a time
442 */
443 if (group_leader && group_leader->cgrp != cgrp) {
444 perf_detach_cgroup(event);
445 ret = -EINVAL;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200446 }
Li Zefan3db272c2011-03-03 14:25:37 +0800447out:
Stephane Eraniane5d13672011-02-14 11:20:01 +0200448 fput_light(file, fput_needed);
449 return ret;
450}
451
452static inline void
453perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
454{
455 struct perf_cgroup_info *t;
456 t = per_cpu_ptr(event->cgrp->info, event->cpu);
457 event->shadow_ctx_time = now - t->timestamp;
458}
459
460static inline void
461perf_cgroup_defer_enabled(struct perf_event *event)
462{
463 /*
464 * when the current task's perf cgroup does not match
465 * the event's, we need to remember to call the
466 * perf_mark_enable() function the first time a task with
467 * a matching perf cgroup is scheduled in.
468 */
469 if (is_cgroup_event(event) && !perf_cgroup_match(event))
470 event->cgrp_defer_enabled = 1;
471}
472
473static inline void
474perf_cgroup_mark_enabled(struct perf_event *event,
475 struct perf_event_context *ctx)
476{
477 struct perf_event *sub;
478 u64 tstamp = perf_event_time(event);
479
480 if (!event->cgrp_defer_enabled)
481 return;
482
483 event->cgrp_defer_enabled = 0;
484
485 event->tstamp_enabled = tstamp - event->total_time_enabled;
486 list_for_each_entry(sub, &event->sibling_list, group_entry) {
487 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
488 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
489 sub->cgrp_defer_enabled = 0;
490 }
491 }
492}
493#else /* !CONFIG_CGROUP_PERF */
494
495static inline bool
496perf_cgroup_match(struct perf_event *event)
497{
498 return true;
499}
500
501static inline void perf_detach_cgroup(struct perf_event *event)
502{}
503
504static inline int is_cgroup_event(struct perf_event *event)
505{
506 return 0;
507}
508
509static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
510{
511 return 0;
512}
513
514static inline void update_cgrp_time_from_event(struct perf_event *event)
515{
516}
517
518static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
519{
520}
521
522static inline void perf_cgroup_sched_out(struct task_struct *task)
523{
524}
525
526static inline void perf_cgroup_sched_in(struct task_struct *task)
527{
528}
529
530static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
531 struct perf_event_attr *attr,
532 struct perf_event *group_leader)
533{
534 return -EINVAL;
535}
536
537static inline void
Stephane Eranian3f7cce32011-02-18 14:40:01 +0200538perf_cgroup_set_timestamp(struct task_struct *task,
539 struct perf_event_context *ctx)
Stephane Eraniane5d13672011-02-14 11:20:01 +0200540{
541}
542
543void
544perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
545{
546}
547
548static inline void
549perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
550{
551}
552
553static inline u64 perf_cgroup_event_time(struct perf_event *event)
554{
555 return 0;
556}
557
558static inline void
559perf_cgroup_defer_enabled(struct perf_event *event)
560{
561}
562
563static inline void
564perf_cgroup_mark_enabled(struct perf_event *event,
565 struct perf_event_context *ctx)
566{
567}
568#endif
569
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200570void perf_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200571{
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200572 int *count = this_cpu_ptr(pmu->pmu_disable_count);
573 if (!(*count)++)
574 pmu->pmu_disable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200575}
576
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200577void perf_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200578{
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200579 int *count = this_cpu_ptr(pmu->pmu_disable_count);
580 if (!--(*count))
581 pmu->pmu_enable(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200582}
583
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200584static DEFINE_PER_CPU(struct list_head, rotation_list);
585
586/*
587 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
588 * because they're strictly cpu affine and rotate_start is called with IRQs
589 * disabled, while rotate_context is called from IRQ context.
590 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200591static void perf_pmu_rotate_start(struct pmu *pmu)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200592{
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200593 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200594 struct list_head *head = &__get_cpu_var(rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200595
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200596 WARN_ON(!irqs_disabled());
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200597
Peter Zijlstrae9d2b062010-09-17 11:28:50 +0200598 if (list_empty(&cpuctx->rotation_list))
599 list_add(&cpuctx->rotation_list, head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200600}
601
602static void get_ctx(struct perf_event_context *ctx)
603{
604 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
605}
606
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200607static void put_ctx(struct perf_event_context *ctx)
608{
609 if (atomic_dec_and_test(&ctx->refcount)) {
610 if (ctx->parent_ctx)
611 put_ctx(ctx->parent_ctx);
612 if (ctx->task)
613 put_task_struct(ctx->task);
Lai Jiangshancb796ff2011-03-18 12:07:41 +0800614 kfree_rcu(ctx, rcu_head);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200615 }
616}
617
618static void unclone_ctx(struct perf_event_context *ctx)
619{
620 if (ctx->parent_ctx) {
621 put_ctx(ctx->parent_ctx);
622 ctx->parent_ctx = NULL;
623 }
624}
625
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200626static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
627{
628 /*
629 * only top level events have the pid namespace they were created in
630 */
631 if (event->parent)
632 event = event->parent;
633
634 return task_tgid_nr_ns(p, event->ns);
635}
636
637static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
638{
639 /*
640 * only top level events have the pid namespace they were created in
641 */
642 if (event->parent)
643 event = event->parent;
644
645 return task_pid_nr_ns(p, event->ns);
646}
647
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200648/*
649 * If we inherit events we want to return the parent event id
650 * to userspace.
651 */
652static u64 primary_event_id(struct perf_event *event)
653{
654 u64 id = event->id;
655
656 if (event->parent)
657 id = event->parent->id;
658
659 return id;
660}
661
662/*
663 * Get the perf_event_context for a task and lock it.
664 * This has to cope with with the fact that until it is locked,
665 * the context could get moved to another task.
666 */
667static struct perf_event_context *
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200668perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200669{
670 struct perf_event_context *ctx;
671
672 rcu_read_lock();
Peter Zijlstra9ed60602010-06-11 17:36:35 +0200673retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200674 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200675 if (ctx) {
676 /*
677 * If this context is a clone of another, it might
678 * get swapped for another underneath us by
679 * perf_event_task_sched_out, though the
680 * rcu_read_lock() protects us from any context
681 * getting freed. Lock the context and check if it
682 * got swapped before we could get the lock, and retry
683 * if so. If we locked the right context, then it
684 * can't get swapped on us any more.
685 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100686 raw_spin_lock_irqsave(&ctx->lock, *flags);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200687 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100688 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200689 goto retry;
690 }
691
692 if (!atomic_inc_not_zero(&ctx->refcount)) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100693 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200694 ctx = NULL;
695 }
696 }
697 rcu_read_unlock();
698 return ctx;
699}
700
701/*
702 * Get the context for a task and increment its pin_count so it
703 * can't get swapped to another task. This also increments its
704 * reference count so that the context can't get freed.
705 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200706static struct perf_event_context *
707perf_pin_task_context(struct task_struct *task, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200708{
709 struct perf_event_context *ctx;
710 unsigned long flags;
711
Peter Zijlstra8dc85d52010-09-02 16:50:03 +0200712 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200713 if (ctx) {
714 ++ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100715 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200716 }
717 return ctx;
718}
719
720static void perf_unpin_context(struct perf_event_context *ctx)
721{
722 unsigned long flags;
723
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100724 raw_spin_lock_irqsave(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200725 --ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +0100726 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200727}
728
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100729/*
730 * Update the record of the current time in a context.
731 */
732static void update_context_time(struct perf_event_context *ctx)
733{
734 u64 now = perf_clock();
735
736 ctx->time += now - ctx->timestamp;
737 ctx->timestamp = now;
738}
739
Stephane Eranian41587552011-01-03 18:20:01 +0200740static u64 perf_event_time(struct perf_event *event)
741{
742 struct perf_event_context *ctx = event->ctx;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200743
744 if (is_cgroup_event(event))
745 return perf_cgroup_event_time(event);
746
Stephane Eranian41587552011-01-03 18:20:01 +0200747 return ctx ? ctx->time : 0;
748}
749
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100750/*
751 * Update the total_time_enabled and total_time_running fields for a event.
Eric B Munsonb7526f02011-06-23 16:34:37 -0400752 * The caller of this function needs to hold the ctx->lock.
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100753 */
754static void update_event_times(struct perf_event *event)
755{
756 struct perf_event_context *ctx = event->ctx;
757 u64 run_end;
758
759 if (event->state < PERF_EVENT_STATE_INACTIVE ||
760 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
761 return;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200762 /*
763 * in cgroup mode, time_enabled represents
764 * the time the event was enabled AND active
765 * tasks were in the monitored cgroup. This is
766 * independent of the activity of the context as
767 * there may be a mix of cgroup and non-cgroup events.
768 *
769 * That is why we treat cgroup events differently
770 * here.
771 */
772 if (is_cgroup_event(event))
Stephane Eranian41587552011-01-03 18:20:01 +0200773 run_end = perf_event_time(event);
Stephane Eraniane5d13672011-02-14 11:20:01 +0200774 else if (ctx->is_active)
775 run_end = ctx->time;
Peter Zijlstraacd1d7c2009-11-23 15:00:36 +0100776 else
777 run_end = event->tstamp_stopped;
778
779 event->total_time_enabled = run_end - event->tstamp_enabled;
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100780
781 if (event->state == PERF_EVENT_STATE_INACTIVE)
782 run_end = event->tstamp_stopped;
783 else
Stephane Eranian41587552011-01-03 18:20:01 +0200784 run_end = perf_event_time(event);
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100785
786 event->total_time_running = run_end - event->tstamp_running;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200787
Peter Zijlstraf67218c2009-11-23 11:37:27 +0100788}
789
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200790/*
791 * Update total_time_enabled and total_time_running for all events in a group.
792 */
793static void update_group_times(struct perf_event *leader)
794{
795 struct perf_event *event;
796
797 update_event_times(leader);
798 list_for_each_entry(event, &leader->sibling_list, group_entry)
799 update_event_times(event);
800}
801
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100802static struct list_head *
803ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
804{
805 if (event->attr.pinned)
806 return &ctx->pinned_groups;
807 else
808 return &ctx->flexible_groups;
809}
810
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200811/*
812 * Add a event from the lists for its context.
813 * Must be called with ctx->mutex and ctx->lock held.
814 */
815static void
816list_add_event(struct perf_event *event, struct perf_event_context *ctx)
817{
Peter Zijlstra8a495422010-05-27 15:47:49 +0200818 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
819 event->attach_state |= PERF_ATTACH_CONTEXT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200820
821 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +0200822 * If we're a stand alone event or group leader, we go to the context
823 * list, group events are kept attached to the group so that
824 * perf_group_detach can, at all times, locate all siblings.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200825 */
Peter Zijlstra8a495422010-05-27 15:47:49 +0200826 if (event->group_leader == event) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100827 struct list_head *list;
828
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +0100829 if (is_software_event(event))
830 event->group_flags |= PERF_GROUP_SOFTWARE;
831
Frederic Weisbecker889ff012010-01-09 20:04:47 +0100832 list = ctx_group_list(event, ctx);
833 list_add_tail(&event->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200834 }
835
Peter Zijlstra08309372011-03-03 11:31:20 +0100836 if (is_cgroup_event(event))
Stephane Eraniane5d13672011-02-14 11:20:01 +0200837 ctx->nr_cgroups++;
Stephane Eraniane5d13672011-02-14 11:20:01 +0200838
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200839 list_add_rcu(&event->event_entry, &ctx->event_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +0200840 if (!ctx->nr_events)
Peter Zijlstra108b02c2010-09-06 14:32:03 +0200841 perf_pmu_rotate_start(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200842 ctx->nr_events++;
843 if (event->attr.inherit_stat)
844 ctx->nr_stat++;
845}
846
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200847/*
848 * Called at perf_event creation and when events are attached/detached from a
849 * group.
850 */
851static void perf_event__read_size(struct perf_event *event)
852{
853 int entry = sizeof(u64); /* value */
854 int size = 0;
855 int nr = 1;
856
857 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
858 size += sizeof(u64);
859
860 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
861 size += sizeof(u64);
862
863 if (event->attr.read_format & PERF_FORMAT_ID)
864 entry += sizeof(u64);
865
866 if (event->attr.read_format & PERF_FORMAT_GROUP) {
867 nr += event->group_leader->nr_siblings;
868 size += sizeof(u64);
869 }
870
871 size += entry * nr;
872 event->read_size = size;
873}
874
875static void perf_event__header_size(struct perf_event *event)
876{
877 struct perf_sample_data *data;
878 u64 sample_type = event->attr.sample_type;
879 u16 size = 0;
880
881 perf_event__read_size(event);
882
883 if (sample_type & PERF_SAMPLE_IP)
884 size += sizeof(data->ip);
885
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200886 if (sample_type & PERF_SAMPLE_ADDR)
887 size += sizeof(data->addr);
888
889 if (sample_type & PERF_SAMPLE_PERIOD)
890 size += sizeof(data->period);
891
892 if (sample_type & PERF_SAMPLE_READ)
893 size += event->read_size;
894
895 event->header_size = size;
896}
897
898static void perf_event__id_header_size(struct perf_event *event)
899{
900 struct perf_sample_data *data;
901 u64 sample_type = event->attr.sample_type;
902 u16 size = 0;
903
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200904 if (sample_type & PERF_SAMPLE_TID)
905 size += sizeof(data->tid_entry);
906
907 if (sample_type & PERF_SAMPLE_TIME)
908 size += sizeof(data->time);
909
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200910 if (sample_type & PERF_SAMPLE_ID)
911 size += sizeof(data->id);
912
913 if (sample_type & PERF_SAMPLE_STREAM_ID)
914 size += sizeof(data->stream_id);
915
916 if (sample_type & PERF_SAMPLE_CPU)
917 size += sizeof(data->cpu_entry);
918
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -0200919 event->id_header_size = size;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200920}
921
Peter Zijlstra8a495422010-05-27 15:47:49 +0200922static void perf_group_attach(struct perf_event *event)
923{
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200924 struct perf_event *group_leader = event->group_leader, *pos;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200925
Peter Zijlstra74c33372010-10-15 11:40:29 +0200926 /*
927 * We can have double attach due to group movement in perf_event_open.
928 */
929 if (event->attach_state & PERF_ATTACH_GROUP)
930 return;
931
Peter Zijlstra8a495422010-05-27 15:47:49 +0200932 event->attach_state |= PERF_ATTACH_GROUP;
933
934 if (group_leader == event)
935 return;
936
937 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
938 !is_software_event(event))
939 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
940
941 list_add_tail(&event->group_entry, &group_leader->sibling_list);
942 group_leader->nr_siblings++;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -0200943
944 perf_event__header_size(group_leader);
945
946 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
947 perf_event__header_size(pos);
Peter Zijlstra8a495422010-05-27 15:47:49 +0200948}
949
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200950/*
951 * Remove a event from the lists for its context.
952 * Must be called with ctx->mutex and ctx->lock held.
953 */
954static void
955list_del_event(struct perf_event *event, struct perf_event_context *ctx)
956{
Stephane Eranian68cacd22011-03-23 16:03:06 +0100957 struct perf_cpu_context *cpuctx;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200958 /*
959 * We can have double detach due to exit/hot-unplug + close.
960 */
961 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200962 return;
Peter Zijlstra8a495422010-05-27 15:47:49 +0200963
964 event->attach_state &= ~PERF_ATTACH_CONTEXT;
965
Stephane Eranian68cacd22011-03-23 16:03:06 +0100966 if (is_cgroup_event(event)) {
Stephane Eraniane5d13672011-02-14 11:20:01 +0200967 ctx->nr_cgroups--;
Stephane Eranian68cacd22011-03-23 16:03:06 +0100968 cpuctx = __get_cpu_context(ctx);
969 /*
970 * if there are no more cgroup events
971 * then cler cgrp to avoid stale pointer
972 * in update_cgrp_time_from_cpuctx()
973 */
974 if (!ctx->nr_cgroups)
975 cpuctx->cgrp = NULL;
976 }
Stephane Eraniane5d13672011-02-14 11:20:01 +0200977
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200978 ctx->nr_events--;
979 if (event->attr.inherit_stat)
980 ctx->nr_stat--;
981
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200982 list_del_rcu(&event->event_entry);
983
Peter Zijlstra8a495422010-05-27 15:47:49 +0200984 if (event->group_leader == event)
985 list_del_init(&event->group_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200986
Peter Zijlstra96c21a42010-05-11 16:19:10 +0200987 update_group_times(event);
Stephane Eranianb2e74a22009-11-26 09:24:30 -0800988
989 /*
990 * If event was in error state, then keep it
991 * that way, otherwise bogus counts will be
992 * returned on read(). The only way to get out
993 * of error state is by explicit re-enabling
994 * of the event
995 */
996 if (event->state > PERF_EVENT_STATE_OFF)
997 event->state = PERF_EVENT_STATE_OFF;
Peter Zijlstra050735b2010-05-11 11:51:53 +0200998}
999
Peter Zijlstra8a495422010-05-27 15:47:49 +02001000static void perf_group_detach(struct perf_event *event)
Peter Zijlstra050735b2010-05-11 11:51:53 +02001001{
1002 struct perf_event *sibling, *tmp;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001003 struct list_head *list = NULL;
1004
1005 /*
1006 * We can have double detach due to exit/hot-unplug + close.
1007 */
1008 if (!(event->attach_state & PERF_ATTACH_GROUP))
1009 return;
1010
1011 event->attach_state &= ~PERF_ATTACH_GROUP;
1012
1013 /*
1014 * If this is a sibling, remove it from its group.
1015 */
1016 if (event->group_leader != event) {
1017 list_del_init(&event->group_entry);
1018 event->group_leader->nr_siblings--;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001019 goto out;
Peter Zijlstra8a495422010-05-27 15:47:49 +02001020 }
1021
1022 if (!list_empty(&event->group_entry))
1023 list = &event->group_entry;
Peter Zijlstra2e2af502009-11-23 11:37:25 +01001024
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001025 /*
1026 * If this was a group event with sibling events then
1027 * upgrade the siblings to singleton events by adding them
Peter Zijlstra8a495422010-05-27 15:47:49 +02001028 * to whatever list we are on.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001029 */
1030 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
Peter Zijlstra8a495422010-05-27 15:47:49 +02001031 if (list)
1032 list_move_tail(&sibling->group_entry, list);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001033 sibling->group_leader = sibling;
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +01001034
1035 /* Inherit group flags from the previous leader */
1036 sibling->group_flags = event->group_flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001037 }
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02001038
1039out:
1040 perf_event__header_size(event->group_leader);
1041
1042 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1043 perf_event__header_size(tmp);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001044}
1045
Stephane Eranianfa66f072010-08-26 16:40:01 +02001046static inline int
1047event_filter_match(struct perf_event *event)
1048{
Stephane Eraniane5d13672011-02-14 11:20:01 +02001049 return (event->cpu == -1 || event->cpu == smp_processor_id())
1050 && perf_cgroup_match(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +02001051}
1052
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001053static void
1054event_sched_out(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001055 struct perf_cpu_context *cpuctx,
1056 struct perf_event_context *ctx)
1057{
Stephane Eranian41587552011-01-03 18:20:01 +02001058 u64 tstamp = perf_event_time(event);
Stephane Eranianfa66f072010-08-26 16:40:01 +02001059 u64 delta;
1060 /*
1061 * An event which could not be activated because of
1062 * filter mismatch still needs to have its timings
1063 * maintained, otherwise bogus information is return
1064 * via read() for time_enabled, time_running:
1065 */
1066 if (event->state == PERF_EVENT_STATE_INACTIVE
1067 && !event_filter_match(event)) {
Stephane Eraniane5d13672011-02-14 11:20:01 +02001068 delta = tstamp - event->tstamp_stopped;
Stephane Eranianfa66f072010-08-26 16:40:01 +02001069 event->tstamp_running += delta;
Stephane Eranian41587552011-01-03 18:20:01 +02001070 event->tstamp_stopped = tstamp;
Stephane Eranianfa66f072010-08-26 16:40:01 +02001071 }
1072
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001073 if (event->state != PERF_EVENT_STATE_ACTIVE)
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001074 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001075
1076 event->state = PERF_EVENT_STATE_INACTIVE;
1077 if (event->pending_disable) {
1078 event->pending_disable = 0;
1079 event->state = PERF_EVENT_STATE_OFF;
1080 }
Stephane Eranian41587552011-01-03 18:20:01 +02001081 event->tstamp_stopped = tstamp;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001082 event->pmu->del(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001083 event->oncpu = -1;
1084
1085 if (!is_software_event(event))
1086 cpuctx->active_oncpu--;
1087 ctx->nr_active--;
1088 if (event->attr.exclusive || !cpuctx->active_oncpu)
1089 cpuctx->exclusive = 0;
1090}
1091
1092static void
1093group_sched_out(struct perf_event *group_event,
1094 struct perf_cpu_context *cpuctx,
1095 struct perf_event_context *ctx)
1096{
1097 struct perf_event *event;
Stephane Eranianfa66f072010-08-26 16:40:01 +02001098 int state = group_event->state;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001099
1100 event_sched_out(group_event, cpuctx, ctx);
1101
1102 /*
1103 * Schedule out siblings (if any):
1104 */
1105 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1106 event_sched_out(event, cpuctx, ctx);
1107
Stephane Eranianfa66f072010-08-26 16:40:01 +02001108 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001109 cpuctx->exclusive = 0;
1110}
1111
1112/*
1113 * Cross CPU call to remove a performance event
1114 *
1115 * We disable the event on the hardware level first. After that we
1116 * remove it from the context list.
1117 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001118static int __perf_remove_from_context(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001119{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001120 struct perf_event *event = info;
1121 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001122 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001123
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001124 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001125 event_sched_out(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001126 list_del_event(event, ctx);
Peter Zijlstra64ce3122011-04-09 21:17:48 +02001127 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1128 ctx->is_active = 0;
1129 cpuctx->task_ctx = NULL;
1130 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001131 raw_spin_unlock(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001132
1133 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001134}
1135
1136
1137/*
1138 * Remove the event from a task's (or a CPU's) list of events.
1139 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001140 * CPU events are removed with a smp call. For task events we only
1141 * call when the task is on a CPU.
1142 *
1143 * If event->ctx is a cloned context, callers must make sure that
1144 * every task struct that event->ctx->task could possibly point to
1145 * remains valid. This is OK when called from perf_release since
1146 * that only calls us on the top-level context, which can't be a clone.
1147 * When called from perf_event_exit_task, it's OK because the
1148 * context has been detached from its task.
1149 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001150static void perf_remove_from_context(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001151{
1152 struct perf_event_context *ctx = event->ctx;
1153 struct task_struct *task = ctx->task;
1154
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001155 lockdep_assert_held(&ctx->mutex);
1156
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001157 if (!task) {
1158 /*
1159 * Per cpu events are removed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001160 * the removal is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001161 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001162 cpu_function_call(event->cpu, __perf_remove_from_context, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001163 return;
1164 }
1165
1166retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001167 if (!task_function_call(task, __perf_remove_from_context, event))
1168 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001169
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001170 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001171 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001172 * If we failed to find a running task, but find the context active now
1173 * that we've acquired the ctx->lock, retry.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001174 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001175 if (ctx->is_active) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001176 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001177 goto retry;
1178 }
1179
1180 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001181 * Since the task isn't running, its safe to remove the event, us
1182 * holding the ctx->lock ensures the task won't get scheduled in.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001183 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001184 list_del_event(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001185 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001186}
1187
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001188/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001189 * Cross CPU call to disable a performance event
1190 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001191static int __perf_event_disable(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001192{
1193 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001194 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001195 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001196
1197 /*
1198 * If this is a per-task event, need to check whether this
1199 * event's task is the current task on this cpu.
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001200 *
1201 * Can trigger due to concurrent perf_event_context_sched_out()
1202 * flipping contexts around.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001203 */
1204 if (ctx->task && cpuctx->task_ctx != ctx)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001205 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001206
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001207 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001208
1209 /*
1210 * If the event is on, turn it off.
1211 * If it is in error state, leave it in error state.
1212 */
1213 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1214 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001215 update_cgrp_time_from_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001216 update_group_times(event);
1217 if (event == event->group_leader)
1218 group_sched_out(event, cpuctx, ctx);
1219 else
1220 event_sched_out(event, cpuctx, ctx);
1221 event->state = PERF_EVENT_STATE_OFF;
1222 }
1223
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001224 raw_spin_unlock(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001225
1226 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001227}
1228
1229/*
1230 * Disable a event.
1231 *
1232 * If event->ctx is a cloned context, callers must make sure that
1233 * every task struct that event->ctx->task could possibly point to
1234 * remains valid. This condition is satisifed when called through
1235 * perf_event_for_each_child or perf_event_for_each because they
1236 * hold the top-level event's child_mutex, so any descendant that
1237 * goes to exit will block in sync_child_event.
1238 * When called from perf_pending_event it's OK because event->ctx
1239 * is the current context on this CPU and preemption is disabled,
1240 * hence we can't get into perf_event_task_sched_out for this context.
1241 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +01001242void perf_event_disable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001243{
1244 struct perf_event_context *ctx = event->ctx;
1245 struct task_struct *task = ctx->task;
1246
1247 if (!task) {
1248 /*
1249 * Disable the event on the cpu that it's on
1250 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001251 cpu_function_call(event->cpu, __perf_event_disable, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001252 return;
1253 }
1254
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001255retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001256 if (!task_function_call(task, __perf_event_disable, event))
1257 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001258
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001259 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001260 /*
1261 * If the event is still active, we need to retry the cross-call.
1262 */
1263 if (event->state == PERF_EVENT_STATE_ACTIVE) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001264 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001265 /*
1266 * Reload the task pointer, it might have been changed by
1267 * a concurrent perf_event_context_sched_out().
1268 */
1269 task = ctx->task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001270 goto retry;
1271 }
1272
1273 /*
1274 * Since we have the lock this context can't be scheduled
1275 * in, so we can change the state safely.
1276 */
1277 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1278 update_group_times(event);
1279 event->state = PERF_EVENT_STATE_OFF;
1280 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001281 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001282}
1283
Stephane Eraniane5d13672011-02-14 11:20:01 +02001284static void perf_set_shadow_time(struct perf_event *event,
1285 struct perf_event_context *ctx,
1286 u64 tstamp)
1287{
1288 /*
1289 * use the correct time source for the time snapshot
1290 *
1291 * We could get by without this by leveraging the
1292 * fact that to get to this function, the caller
1293 * has most likely already called update_context_time()
1294 * and update_cgrp_time_xx() and thus both timestamp
1295 * are identical (or very close). Given that tstamp is,
1296 * already adjusted for cgroup, we could say that:
1297 * tstamp - ctx->timestamp
1298 * is equivalent to
1299 * tstamp - cgrp->timestamp.
1300 *
1301 * Then, in perf_output_read(), the calculation would
1302 * work with no changes because:
1303 * - event is guaranteed scheduled in
1304 * - no scheduled out in between
1305 * - thus the timestamp would be the same
1306 *
1307 * But this is a bit hairy.
1308 *
1309 * So instead, we have an explicit cgroup call to remain
1310 * within the time time source all along. We believe it
1311 * is cleaner and simpler to understand.
1312 */
1313 if (is_cgroup_event(event))
1314 perf_cgroup_set_shadow_time(event, tstamp);
1315 else
1316 event->shadow_ctx_time = tstamp - ctx->timestamp;
1317}
1318
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01001319#define MAX_INTERRUPTS (~0ULL)
1320
1321static void perf_log_throttle(struct perf_event *event, int enable);
1322
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001323static int
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001324event_sched_in(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001325 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001326 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001327{
Stephane Eranian41587552011-01-03 18:20:01 +02001328 u64 tstamp = perf_event_time(event);
1329
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001330 if (event->state <= PERF_EVENT_STATE_OFF)
1331 return 0;
1332
1333 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001334 event->oncpu = smp_processor_id();
Peter Zijlstra4fe757d2011-02-15 22:26:07 +01001335
1336 /*
1337 * Unthrottle events, since we scheduled we might have missed several
1338 * ticks already, also for a heavily scheduling task there is little
1339 * guarantee it'll get a tick in a timely manner.
1340 */
1341 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1342 perf_log_throttle(event, 1);
1343 event->hw.interrupts = 0;
1344 }
1345
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001346 /*
1347 * The new state must be visible before we turn it on in the hardware:
1348 */
1349 smp_wmb();
1350
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001351 if (event->pmu->add(event, PERF_EF_START)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001352 event->state = PERF_EVENT_STATE_INACTIVE;
1353 event->oncpu = -1;
1354 return -EAGAIN;
1355 }
1356
Stephane Eranian41587552011-01-03 18:20:01 +02001357 event->tstamp_running += tstamp - event->tstamp_stopped;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001358
Stephane Eraniane5d13672011-02-14 11:20:01 +02001359 perf_set_shadow_time(event, ctx, tstamp);
Stephane Eranianeed01522010-10-26 16:08:01 +02001360
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001361 if (!is_software_event(event))
1362 cpuctx->active_oncpu++;
1363 ctx->nr_active++;
1364
1365 if (event->attr.exclusive)
1366 cpuctx->exclusive = 1;
1367
1368 return 0;
1369}
1370
1371static int
1372group_sched_in(struct perf_event *group_event,
1373 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001374 struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001375{
Lin Ming6bde9b62010-04-23 13:56:00 +08001376 struct perf_event *event, *partial_group = NULL;
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001377 struct pmu *pmu = group_event->pmu;
Stephane Eraniand7842da2010-10-20 15:25:01 +02001378 u64 now = ctx->time;
1379 bool simulate = false;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001380
1381 if (group_event->state == PERF_EVENT_STATE_OFF)
1382 return 0;
1383
Peter Zijlstraad5133b2010-06-15 12:22:39 +02001384 pmu->start_txn(pmu);
Lin Ming6bde9b62010-04-23 13:56:00 +08001385
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001386 if (event_sched_in(group_event, cpuctx, ctx)) {
Peter Zijlstraad5133b2010-06-15 12:22:39 +02001387 pmu->cancel_txn(pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001388 return -EAGAIN;
Stephane Eranian90151c32010-05-25 16:23:10 +02001389 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001390
1391 /*
1392 * Schedule in siblings as one group (if any):
1393 */
1394 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001395 if (event_sched_in(event, cpuctx, ctx)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001396 partial_group = event;
1397 goto group_error;
1398 }
1399 }
1400
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001401 if (!pmu->commit_txn(pmu))
Paul Mackerras6e851582010-05-08 20:58:00 +10001402 return 0;
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001403
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001404group_error:
1405 /*
1406 * Groups can be scheduled in as one unit only, so undo any
1407 * partial group before returning:
Stephane Eraniand7842da2010-10-20 15:25:01 +02001408 * The events up to the failed event are scheduled out normally,
1409 * tstamp_stopped will be updated.
1410 *
1411 * The failed events and the remaining siblings need to have
1412 * their timings updated as if they had gone thru event_sched_in()
1413 * and event_sched_out(). This is required to get consistent timings
1414 * across the group. This also takes care of the case where the group
1415 * could never be scheduled by ensuring tstamp_stopped is set to mark
1416 * the time the event was actually stopped, such that time delta
1417 * calculation in update_event_times() is correct.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001418 */
1419 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1420 if (event == partial_group)
Stephane Eraniand7842da2010-10-20 15:25:01 +02001421 simulate = true;
1422
1423 if (simulate) {
1424 event->tstamp_running += now - event->tstamp_stopped;
1425 event->tstamp_stopped = now;
1426 } else {
1427 event_sched_out(event, cpuctx, ctx);
1428 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001429 }
Stephane Eranian9ffcfa62010-10-20 15:25:01 +02001430 event_sched_out(group_event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001431
Peter Zijlstraad5133b2010-06-15 12:22:39 +02001432 pmu->cancel_txn(pmu);
Stephane Eranian90151c32010-05-25 16:23:10 +02001433
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001434 return -EAGAIN;
1435}
1436
1437/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001438 * Work out whether we can put this event group on the CPU now.
1439 */
1440static int group_can_go_on(struct perf_event *event,
1441 struct perf_cpu_context *cpuctx,
1442 int can_add_hw)
1443{
1444 /*
1445 * Groups consisting entirely of software events can always go on.
1446 */
Frederic Weisbeckerd6f962b2010-01-10 01:25:51 +01001447 if (event->group_flags & PERF_GROUP_SOFTWARE)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001448 return 1;
1449 /*
1450 * If an exclusive group is already on, no other hardware
1451 * events can go on.
1452 */
1453 if (cpuctx->exclusive)
1454 return 0;
1455 /*
1456 * If this group is exclusive and there are already
1457 * events on the CPU, it can't go on.
1458 */
1459 if (event->attr.exclusive && cpuctx->active_oncpu)
1460 return 0;
1461 /*
1462 * Otherwise, try to add it if all previous groups were able
1463 * to go on.
1464 */
1465 return can_add_hw;
1466}
1467
1468static void add_event_to_ctx(struct perf_event *event,
1469 struct perf_event_context *ctx)
1470{
Stephane Eranian41587552011-01-03 18:20:01 +02001471 u64 tstamp = perf_event_time(event);
1472
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001473 list_add_event(event, ctx);
Peter Zijlstra8a495422010-05-27 15:47:49 +02001474 perf_group_attach(event);
Stephane Eranian41587552011-01-03 18:20:01 +02001475 event->tstamp_enabled = tstamp;
1476 event->tstamp_running = tstamp;
1477 event->tstamp_stopped = tstamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001478}
1479
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001480static void task_ctx_sched_out(struct perf_event_context *ctx);
1481static void
1482ctx_sched_in(struct perf_event_context *ctx,
1483 struct perf_cpu_context *cpuctx,
1484 enum event_type_t event_type,
1485 struct task_struct *task);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001486
Peter Zijlstradce58552011-04-09 21:17:46 +02001487static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1488 struct perf_event_context *ctx,
1489 struct task_struct *task)
1490{
1491 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1492 if (ctx)
1493 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1494 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1495 if (ctx)
1496 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1497}
1498
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001499/*
1500 * Cross CPU call to install and enable a performance event
1501 *
1502 * Must be called with ctx->mutex held
1503 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001504static int __perf_install_in_context(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001505{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001506 struct perf_event *event = info;
1507 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001508 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001509 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1510 struct task_struct *task = current;
1511
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02001512 perf_ctx_lock(cpuctx, task_ctx);
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001513 perf_pmu_disable(cpuctx->ctx.pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001514
1515 /*
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001516 * If there was an active task_ctx schedule it out.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001517 */
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02001518 if (task_ctx)
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001519 task_ctx_sched_out(task_ctx);
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02001520
1521 /*
1522 * If the context we're installing events in is not the
1523 * active task_ctx, flip them.
1524 */
1525 if (ctx->task && task_ctx != ctx) {
1526 if (task_ctx)
1527 raw_spin_unlock(&task_ctx->lock);
1528 raw_spin_lock(&ctx->lock);
1529 task_ctx = ctx;
1530 }
1531
1532 if (task_ctx) {
1533 cpuctx->task_ctx = task_ctx;
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001534 task = task_ctx->task;
1535 }
Peter Zijlstrab58f6b02011-06-07 00:23:28 +02001536
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001537 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001538
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001539 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001540 /*
1541 * update cgrp time only if current cgrp
1542 * matches event->cgrp. Must be done before
1543 * calling add_event_to_ctx()
1544 */
1545 update_cgrp_time_from_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001546
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001547 add_event_to_ctx(event, ctx);
1548
1549 /*
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001550 * Schedule everything back in
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001551 */
Peter Zijlstradce58552011-04-09 21:17:46 +02001552 perf_event_sched_in(cpuctx, task_ctx, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001553
Peter Zijlstra2c29ef02011-04-09 21:17:44 +02001554 perf_pmu_enable(cpuctx->ctx.pmu);
1555 perf_ctx_unlock(cpuctx, task_ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001556
1557 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001558}
1559
1560/*
1561 * Attach a performance event to a context
1562 *
1563 * First we add the event to the list with the hardware enable bit
1564 * in event->hw_config cleared.
1565 *
1566 * If the event is attached to a task which is on a CPU we use a smp
1567 * call to enable it in the task context. The task might have been
1568 * scheduled away, but we check this in the smp call again.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001569 */
1570static void
1571perf_install_in_context(struct perf_event_context *ctx,
1572 struct perf_event *event,
1573 int cpu)
1574{
1575 struct task_struct *task = ctx->task;
1576
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001577 lockdep_assert_held(&ctx->mutex);
1578
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02001579 event->ctx = ctx;
1580
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001581 if (!task) {
1582 /*
1583 * Per cpu events are installed via an smp call and
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001584 * the install is always successful.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001585 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001586 cpu_function_call(cpu, __perf_install_in_context, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001587 return;
1588 }
1589
1590retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001591 if (!task_function_call(task, __perf_install_in_context, event))
1592 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001593
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001594 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001595 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001596 * If we failed to find a running task, but find the context active now
1597 * that we've acquired the ctx->lock, retry.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001598 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001599 if (ctx->is_active) {
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001600 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001601 goto retry;
1602 }
1603
1604 /*
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001605 * Since the task isn't running, its safe to add the event, us holding
1606 * the ctx->lock ensures the task won't get scheduled in.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001607 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001608 add_event_to_ctx(event, ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001609 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001610}
1611
1612/*
1613 * Put a event into inactive state and update time fields.
1614 * Enabling the leader of a group effectively enables all
1615 * the group members that aren't explicitly disabled, so we
1616 * have to update their ->tstamp_enabled also.
1617 * Note: this works for group members as well as group leaders
1618 * since the non-leader members' sibling_lists will be empty.
1619 */
1620static void __perf_event_mark_enabled(struct perf_event *event,
1621 struct perf_event_context *ctx)
1622{
1623 struct perf_event *sub;
Stephane Eranian41587552011-01-03 18:20:01 +02001624 u64 tstamp = perf_event_time(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001625
1626 event->state = PERF_EVENT_STATE_INACTIVE;
Stephane Eranian41587552011-01-03 18:20:01 +02001627 event->tstamp_enabled = tstamp - event->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001628 list_for_each_entry(sub, &event->sibling_list, group_entry) {
Stephane Eranian41587552011-01-03 18:20:01 +02001629 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1630 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001631 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001632}
1633
1634/*
1635 * Cross CPU call to enable a performance event
1636 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001637static int __perf_event_enable(void *info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001638{
1639 struct perf_event *event = info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001640 struct perf_event_context *ctx = event->ctx;
1641 struct perf_event *leader = event->group_leader;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001642 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001643 int err;
1644
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001645 if (WARN_ON_ONCE(!ctx->is_active))
1646 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001647
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001648 raw_spin_lock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001649 update_context_time(ctx);
1650
1651 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1652 goto unlock;
Stephane Eraniane5d13672011-02-14 11:20:01 +02001653
1654 /*
1655 * set current task's cgroup time reference point
1656 */
Stephane Eranian3f7cce32011-02-18 14:40:01 +02001657 perf_cgroup_set_timestamp(current, ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001658
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001659 __perf_event_mark_enabled(event, ctx);
1660
Stephane Eraniane5d13672011-02-14 11:20:01 +02001661 if (!event_filter_match(event)) {
1662 if (is_cgroup_event(event))
1663 perf_cgroup_defer_enabled(event);
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001664 goto unlock;
Stephane Eraniane5d13672011-02-14 11:20:01 +02001665 }
Peter Zijlstraf4c41762009-12-16 17:55:54 +01001666
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001667 /*
1668 * If the event is in a group and isn't the group leader,
1669 * then don't put it on unless the group is on.
1670 */
1671 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1672 goto unlock;
1673
1674 if (!group_can_go_on(event, cpuctx, 1)) {
1675 err = -EEXIST;
1676 } else {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001677 if (event == leader)
Peter Zijlstra6e377382010-02-11 13:21:58 +01001678 err = group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001679 else
Peter Zijlstra6e377382010-02-11 13:21:58 +01001680 err = event_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001681 }
1682
1683 if (err) {
1684 /*
1685 * If this event can't go on and it's part of a
1686 * group, then the whole group has to come off.
1687 */
1688 if (leader != event)
1689 group_sched_out(leader, cpuctx, ctx);
1690 if (leader->attr.pinned) {
1691 update_group_times(leader);
1692 leader->state = PERF_EVENT_STATE_ERROR;
1693 }
1694 }
1695
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001696unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001697 raw_spin_unlock(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001698
1699 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001700}
1701
1702/*
1703 * Enable a event.
1704 *
1705 * If event->ctx is a cloned context, callers must make sure that
1706 * every task struct that event->ctx->task could possibly point to
1707 * remains valid. This condition is satisfied when called through
1708 * perf_event_for_each_child or perf_event_for_each as described
1709 * for perf_event_disable.
1710 */
Frederic Weisbecker44234ad2009-12-09 09:25:48 +01001711void perf_event_enable(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001712{
1713 struct perf_event_context *ctx = event->ctx;
1714 struct task_struct *task = ctx->task;
1715
1716 if (!task) {
1717 /*
1718 * Enable the event on the cpu that it's on
1719 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001720 cpu_function_call(event->cpu, __perf_event_enable, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001721 return;
1722 }
1723
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001724 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001725 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1726 goto out;
1727
1728 /*
1729 * If the event is in error state, clear that first.
1730 * That way, if we see the event in error state below, we
1731 * know that it has gone back into error state, as distinct
1732 * from the task having been scheduled away before the
1733 * cross-call arrived.
1734 */
1735 if (event->state == PERF_EVENT_STATE_ERROR)
1736 event->state = PERF_EVENT_STATE_OFF;
1737
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001738retry:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001739 if (!ctx->is_active) {
1740 __perf_event_mark_enabled(event, ctx);
1741 goto out;
1742 }
1743
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001744 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001745
1746 if (!task_function_call(task, __perf_event_enable, event))
1747 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001748
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001749 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001750
1751 /*
1752 * If the context is active and the event is still off,
1753 * we need to retry the cross-call.
1754 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001755 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1756 /*
1757 * task could have been flipped by a concurrent
1758 * perf_event_context_sched_out()
1759 */
1760 task = ctx->task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001761 goto retry;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001762 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001763
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001764out:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001765 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001766}
1767
Avi Kivity26ca5c12011-06-29 18:42:37 +03001768int perf_event_refresh(struct perf_event *event, int refresh)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001769{
1770 /*
1771 * not supported on inherited events
1772 */
Franck Bui-Huu2e939d12010-11-23 16:21:44 +01001773 if (event->attr.inherit || !is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001774 return -EINVAL;
1775
1776 atomic_add(refresh, &event->event_limit);
1777 perf_event_enable(event);
1778
1779 return 0;
1780}
Avi Kivity26ca5c12011-06-29 18:42:37 +03001781EXPORT_SYMBOL_GPL(perf_event_refresh);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001782
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001783static void ctx_sched_out(struct perf_event_context *ctx,
1784 struct perf_cpu_context *cpuctx,
1785 enum event_type_t event_type)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001786{
1787 struct perf_event *event;
Peter Zijlstradb24d332011-04-09 21:17:45 +02001788 int is_active = ctx->is_active;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001789
Peter Zijlstradb24d332011-04-09 21:17:45 +02001790 ctx->is_active &= ~event_type;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001791 if (likely(!ctx->nr_events))
Peter Zijlstrafacc4302011-04-09 21:17:42 +02001792 return;
1793
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001794 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001795 update_cgrp_time_from_cpuctx(cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001796 if (!ctx->nr_active)
Peter Zijlstrafacc4302011-04-09 21:17:42 +02001797 return;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001798
Peter Zijlstra075e0b02011-04-09 21:17:40 +02001799 perf_pmu_disable(ctx->pmu);
Peter Zijlstradb24d332011-04-09 21:17:45 +02001800 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001801 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1802 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001803 }
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001804
Peter Zijlstradb24d332011-04-09 21:17:45 +02001805 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
Frederic Weisbecker889ff012010-01-09 20:04:47 +01001806 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08001807 group_sched_out(event, cpuctx, ctx);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02001808 }
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02001809 perf_pmu_enable(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001810}
1811
1812/*
1813 * Test whether two contexts are equivalent, i.e. whether they
1814 * have both been cloned from the same version of the same context
1815 * and they both have the same number of enabled events.
1816 * If the number of enabled events is the same, then the set
1817 * of enabled events should be the same, because these are both
1818 * inherited contexts, therefore we can't access individual events
1819 * in them directly with an fd; we can only enable/disable all
1820 * events via prctl, or enable/disable all events in a family
1821 * via ioctl, which will have the same effect on both contexts.
1822 */
1823static int context_equiv(struct perf_event_context *ctx1,
1824 struct perf_event_context *ctx2)
1825{
1826 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1827 && ctx1->parent_gen == ctx2->parent_gen
1828 && !ctx1->pin_count && !ctx2->pin_count;
1829}
1830
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001831static void __perf_event_sync_stat(struct perf_event *event,
1832 struct perf_event *next_event)
1833{
1834 u64 value;
1835
1836 if (!event->attr.inherit_stat)
1837 return;
1838
1839 /*
1840 * Update the event value, we cannot use perf_event_read()
1841 * because we're in the middle of a context switch and have IRQs
1842 * disabled, which upsets smp_call_function_single(), however
1843 * we know the event must be on the current CPU, therefore we
1844 * don't need to use it.
1845 */
1846 switch (event->state) {
1847 case PERF_EVENT_STATE_ACTIVE:
Peter Zijlstra3dbebf12009-11-20 22:19:52 +01001848 event->pmu->read(event);
1849 /* fall-through */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001850
1851 case PERF_EVENT_STATE_INACTIVE:
1852 update_event_times(event);
1853 break;
1854
1855 default:
1856 break;
1857 }
1858
1859 /*
1860 * In order to keep per-task stats reliable we need to flip the event
1861 * values when we flip the contexts.
1862 */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001863 value = local64_read(&next_event->count);
1864 value = local64_xchg(&event->count, value);
1865 local64_set(&next_event->count, value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001866
1867 swap(event->total_time_enabled, next_event->total_time_enabled);
1868 swap(event->total_time_running, next_event->total_time_running);
1869
1870 /*
1871 * Since we swizzled the values, update the user visible data too.
1872 */
1873 perf_event_update_userpage(event);
1874 perf_event_update_userpage(next_event);
1875}
1876
1877#define list_next_entry(pos, member) \
1878 list_entry(pos->member.next, typeof(*pos), member)
1879
1880static void perf_event_sync_stat(struct perf_event_context *ctx,
1881 struct perf_event_context *next_ctx)
1882{
1883 struct perf_event *event, *next_event;
1884
1885 if (!ctx->nr_stat)
1886 return;
1887
Peter Zijlstra02ffdbc2009-11-20 22:19:50 +01001888 update_context_time(ctx);
1889
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001890 event = list_first_entry(&ctx->event_list,
1891 struct perf_event, event_entry);
1892
1893 next_event = list_first_entry(&next_ctx->event_list,
1894 struct perf_event, event_entry);
1895
1896 while (&event->event_entry != &ctx->event_list &&
1897 &next_event->event_entry != &next_ctx->event_list) {
1898
1899 __perf_event_sync_stat(event, next_event);
1900
1901 event = list_next_entry(event, event_entry);
1902 next_event = list_next_entry(next_event, event_entry);
1903 }
1904}
1905
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01001906static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1907 struct task_struct *next)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001908{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001909 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001910 struct perf_event_context *next_ctx;
1911 struct perf_event_context *parent;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001912 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001913 int do_switch = 1;
1914
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001915 if (likely(!ctx))
1916 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001917
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001918 cpuctx = __get_cpu_context(ctx);
1919 if (!cpuctx->task_ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001920 return;
1921
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001922 rcu_read_lock();
1923 parent = rcu_dereference(ctx->parent_ctx);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001924 next_ctx = next->perf_event_ctxp[ctxn];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001925 if (parent && next_ctx &&
1926 rcu_dereference(next_ctx->parent_ctx) == parent) {
1927 /*
1928 * Looks like the two contexts are clones, so we might be
1929 * able to optimize the context switch. We lock both
1930 * contexts and check that they are clones under the
1931 * lock (including re-checking that neither has been
1932 * uncloned in the meantime). It doesn't matter which
1933 * order we take the locks because no other cpu could
1934 * be trying to lock both of these tasks.
1935 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001936 raw_spin_lock(&ctx->lock);
1937 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001938 if (context_equiv(ctx, next_ctx)) {
1939 /*
1940 * XXX do we need a memory barrier of sorts
1941 * wrt to rcu_dereference() of perf_event_ctxp
1942 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001943 task->perf_event_ctxp[ctxn] = next_ctx;
1944 next->perf_event_ctxp[ctxn] = ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001945 ctx->task = next;
1946 next_ctx->task = task;
1947 do_switch = 0;
1948
1949 perf_event_sync_stat(ctx, next_ctx);
1950 }
Thomas Gleixnere625cce2009-11-17 18:02:06 +01001951 raw_spin_unlock(&next_ctx->lock);
1952 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001953 }
1954 rcu_read_unlock();
1955
1956 if (do_switch) {
Peter Zijlstrafacc4302011-04-09 21:17:42 +02001957 raw_spin_lock(&ctx->lock);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01001958 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001959 cpuctx->task_ctx = NULL;
Peter Zijlstrafacc4302011-04-09 21:17:42 +02001960 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001961 }
1962}
1963
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001964#define for_each_task_context_nr(ctxn) \
1965 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
1966
1967/*
1968 * Called from scheduler to remove the events of the current task,
1969 * with interrupts disabled.
1970 *
1971 * We stop each event and update the event value in event->count.
1972 *
1973 * This does not protect us against NMI, but disable()
1974 * sets the disabled bit in the control field of event _before_
1975 * accessing the event control register. If a NMI hits, then it will
1976 * not restart the event.
1977 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02001978void __perf_event_task_sched_out(struct task_struct *task,
1979 struct task_struct *next)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001980{
1981 int ctxn;
1982
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001983 for_each_task_context_nr(ctxn)
1984 perf_event_context_sched_out(task, ctxn, next);
Stephane Eraniane5d13672011-02-14 11:20:01 +02001985
1986 /*
1987 * if cgroup events exist on this CPU, then we need
1988 * to check if we have to switch out PMU state.
1989 * cgroup event are system-wide mode only
1990 */
1991 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
1992 perf_cgroup_sched_out(task);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02001993}
1994
Peter Zijlstra04dc2db2011-04-09 21:17:43 +02001995static void task_ctx_sched_out(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001996{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02001997 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001998
1999 if (!cpuctx->task_ctx)
2000 return;
2001
2002 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2003 return;
2004
Peter Zijlstra04dc2db2011-04-09 21:17:43 +02002005 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002006 cpuctx->task_ctx = NULL;
2007}
2008
2009/*
2010 * Called with IRQs disabled
2011 */
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002012static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2013 enum event_type_t event_type)
2014{
2015 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002016}
2017
2018static void
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002019ctx_pinned_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01002020 struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002021{
2022 struct perf_event *event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002023
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002024 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2025 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002026 continue;
Stephane Eranian5632ab12011-01-03 18:20:01 +02002027 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002028 continue;
2029
Stephane Eraniane5d13672011-02-14 11:20:01 +02002030 /* may need to reset tstamp_enabled */
2031 if (is_cgroup_event(event))
2032 perf_cgroup_mark_enabled(event, ctx);
2033
Xiao Guangrong8c9ed8e2009-09-25 13:51:17 +08002034 if (group_can_go_on(event, cpuctx, 1))
Peter Zijlstra6e377382010-02-11 13:21:58 +01002035 group_sched_in(event, cpuctx, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002036
2037 /*
2038 * If this pinned group hasn't been scheduled,
2039 * put it in error state.
2040 */
2041 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2042 update_group_times(event);
2043 event->state = PERF_EVENT_STATE_ERROR;
2044 }
2045 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002046}
2047
2048static void
2049ctx_flexible_sched_in(struct perf_event_context *ctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01002050 struct perf_cpu_context *cpuctx)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002051{
2052 struct perf_event *event;
2053 int can_add_hw = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002054
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002055 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2056 /* Ignore events in OFF or ERROR state */
2057 if (event->state <= PERF_EVENT_STATE_OFF)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002058 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002059 /*
2060 * Listen to the 'cpu' scheduling filter constraint
2061 * of events:
2062 */
Stephane Eranian5632ab12011-01-03 18:20:01 +02002063 if (!event_filter_match(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002064 continue;
2065
Stephane Eraniane5d13672011-02-14 11:20:01 +02002066 /* may need to reset tstamp_enabled */
2067 if (is_cgroup_event(event))
2068 perf_cgroup_mark_enabled(event, ctx);
2069
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002070 if (group_can_go_on(event, cpuctx, can_add_hw)) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01002071 if (group_sched_in(event, cpuctx, ctx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002072 can_add_hw = 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002073 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002074 }
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002075}
2076
2077static void
2078ctx_sched_in(struct perf_event_context *ctx,
2079 struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +02002080 enum event_type_t event_type,
2081 struct task_struct *task)
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002082{
Stephane Eraniane5d13672011-02-14 11:20:01 +02002083 u64 now;
Peter Zijlstradb24d332011-04-09 21:17:45 +02002084 int is_active = ctx->is_active;
Stephane Eraniane5d13672011-02-14 11:20:01 +02002085
Peter Zijlstradb24d332011-04-09 21:17:45 +02002086 ctx->is_active |= event_type;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002087 if (likely(!ctx->nr_events))
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002088 return;
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002089
Stephane Eraniane5d13672011-02-14 11:20:01 +02002090 now = perf_clock();
2091 ctx->timestamp = now;
Stephane Eranian3f7cce32011-02-18 14:40:01 +02002092 perf_cgroup_set_timestamp(task, ctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002093 /*
2094 * First go through the list and put on any pinned groups
2095 * in order to give them the best chance of going on.
2096 */
Peter Zijlstradb24d332011-04-09 21:17:45 +02002097 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
Peter Zijlstra6e377382010-02-11 13:21:58 +01002098 ctx_pinned_sched_in(ctx, cpuctx);
Frederic Weisbecker5b0311e2010-01-17 11:59:13 +01002099
2100 /* Then walk through the lower prio flexible groups */
Peter Zijlstradb24d332011-04-09 21:17:45 +02002101 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
Peter Zijlstra6e377382010-02-11 13:21:58 +01002102 ctx_flexible_sched_in(ctx, cpuctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002103}
2104
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01002105static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
Stephane Eraniane5d13672011-02-14 11:20:01 +02002106 enum event_type_t event_type,
2107 struct task_struct *task)
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01002108{
2109 struct perf_event_context *ctx = &cpuctx->ctx;
2110
Stephane Eraniane5d13672011-02-14 11:20:01 +02002111 ctx_sched_in(ctx, cpuctx, event_type, task);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01002112}
2113
Stephane Eraniane5d13672011-02-14 11:20:01 +02002114static void perf_event_context_sched_in(struct perf_event_context *ctx,
2115 struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002116{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002117 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002118
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002119 cpuctx = __get_cpu_context(ctx);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01002120 if (cpuctx->task_ctx == ctx)
2121 return;
2122
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002123 perf_ctx_lock(cpuctx, ctx);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02002124 perf_pmu_disable(ctx->pmu);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01002125 /*
2126 * We want to keep the following priority order:
2127 * cpu pinned (that don't need to move), task pinned,
2128 * cpu flexible, task flexible.
2129 */
2130 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2131
Peter Zijlstradce58552011-04-09 21:17:46 +02002132 perf_event_sched_in(cpuctx, ctx, task);
Frederic Weisbecker329c0e02010-01-17 12:56:05 +01002133
2134 cpuctx->task_ctx = ctx;
eranian@google.com9b33fa62010-03-10 22:26:05 -08002135
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002136 perf_pmu_enable(ctx->pmu);
2137 perf_ctx_unlock(cpuctx, ctx);
2138
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002139 /*
2140 * Since these rotations are per-cpu, we need to ensure the
2141 * cpu-context we got scheduled on is actually rotating.
2142 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002143 perf_pmu_rotate_start(ctx->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002144}
2145
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002146/*
2147 * Called from scheduler to add the events of the current task
2148 * with interrupts disabled.
2149 *
2150 * We restore the event value and then enable it.
2151 *
2152 * This does not protect us against NMI, but enable()
2153 * sets the enabled bit in the control field of event _before_
2154 * accessing the event control register. If a NMI hits, then it will
2155 * keep the event running.
2156 */
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02002157void __perf_event_task_sched_in(struct task_struct *task)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002158{
2159 struct perf_event_context *ctx;
2160 int ctxn;
2161
2162 for_each_task_context_nr(ctxn) {
2163 ctx = task->perf_event_ctxp[ctxn];
2164 if (likely(!ctx))
2165 continue;
2166
Stephane Eraniane5d13672011-02-14 11:20:01 +02002167 perf_event_context_sched_in(ctx, task);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002168 }
Stephane Eraniane5d13672011-02-14 11:20:01 +02002169 /*
2170 * if cgroup events exist on this CPU, then we need
2171 * to check if we have to switch in PMU state.
2172 * cgroup event are system-wide mode only
2173 */
2174 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2175 perf_cgroup_sched_in(task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002176}
2177
Peter Zijlstraabd50712010-01-26 18:50:16 +01002178static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2179{
2180 u64 frequency = event->attr.sample_freq;
2181 u64 sec = NSEC_PER_SEC;
2182 u64 divisor, dividend;
2183
2184 int count_fls, nsec_fls, frequency_fls, sec_fls;
2185
2186 count_fls = fls64(count);
2187 nsec_fls = fls64(nsec);
2188 frequency_fls = fls64(frequency);
2189 sec_fls = 30;
2190
2191 /*
2192 * We got @count in @nsec, with a target of sample_freq HZ
2193 * the target period becomes:
2194 *
2195 * @count * 10^9
2196 * period = -------------------
2197 * @nsec * sample_freq
2198 *
2199 */
2200
2201 /*
2202 * Reduce accuracy by one bit such that @a and @b converge
2203 * to a similar magnitude.
2204 */
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002205#define REDUCE_FLS(a, b) \
Peter Zijlstraabd50712010-01-26 18:50:16 +01002206do { \
2207 if (a##_fls > b##_fls) { \
2208 a >>= 1; \
2209 a##_fls--; \
2210 } else { \
2211 b >>= 1; \
2212 b##_fls--; \
2213 } \
2214} while (0)
2215
2216 /*
2217 * Reduce accuracy until either term fits in a u64, then proceed with
2218 * the other, so that finally we can do a u64/u64 division.
2219 */
2220 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2221 REDUCE_FLS(nsec, frequency);
2222 REDUCE_FLS(sec, count);
2223 }
2224
2225 if (count_fls + sec_fls > 64) {
2226 divisor = nsec * frequency;
2227
2228 while (count_fls + sec_fls > 64) {
2229 REDUCE_FLS(count, sec);
2230 divisor >>= 1;
2231 }
2232
2233 dividend = count * sec;
2234 } else {
2235 dividend = count * sec;
2236
2237 while (nsec_fls + frequency_fls > 64) {
2238 REDUCE_FLS(nsec, frequency);
2239 dividend >>= 1;
2240 }
2241
2242 divisor = nsec * frequency;
2243 }
2244
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02002245 if (!divisor)
2246 return dividend;
2247
Peter Zijlstraabd50712010-01-26 18:50:16 +01002248 return div64_u64(dividend, divisor);
2249}
2250
2251static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002252{
2253 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraf6ab91a2010-06-04 15:18:01 +02002254 s64 period, sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002255 s64 delta;
2256
Peter Zijlstraabd50712010-01-26 18:50:16 +01002257 period = perf_calculate_period(event, nsec, count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002258
2259 delta = (s64)(period - hwc->sample_period);
2260 delta = (delta + 7) / 8; /* low pass filter */
2261
2262 sample_period = hwc->sample_period + delta;
2263
2264 if (!sample_period)
2265 sample_period = 1;
2266
2267 hwc->sample_period = sample_period;
Peter Zijlstraabd50712010-01-26 18:50:16 +01002268
Peter Zijlstrae7850592010-05-21 14:43:08 +02002269 if (local64_read(&hwc->period_left) > 8*sample_period) {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002270 event->pmu->stop(event, PERF_EF_UPDATE);
Peter Zijlstrae7850592010-05-21 14:43:08 +02002271 local64_set(&hwc->period_left, 0);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002272 event->pmu->start(event, PERF_EF_RELOAD);
Peter Zijlstraabd50712010-01-26 18:50:16 +01002273 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002274}
2275
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002276static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002277{
2278 struct perf_event *event;
2279 struct hw_perf_event *hwc;
Peter Zijlstraabd50712010-01-26 18:50:16 +01002280 u64 interrupts, now;
2281 s64 delta;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002282
Paul Mackerras03541f82009-10-14 16:58:03 +11002283 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002284 if (event->state != PERF_EVENT_STATE_ACTIVE)
2285 continue;
2286
Stephane Eranian5632ab12011-01-03 18:20:01 +02002287 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01002288 continue;
2289
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002290 hwc = &event->hw;
2291
2292 interrupts = hwc->interrupts;
2293 hwc->interrupts = 0;
2294
2295 /*
2296 * unthrottle events on the tick
2297 */
2298 if (interrupts == MAX_INTERRUPTS) {
2299 perf_log_throttle(event, 1);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002300 event->pmu->start(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002301 }
2302
2303 if (!event->attr.freq || !event->attr.sample_freq)
2304 continue;
2305
Peter Zijlstraabd50712010-01-26 18:50:16 +01002306 event->pmu->read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02002307 now = local64_read(&event->count);
Peter Zijlstraabd50712010-01-26 18:50:16 +01002308 delta = now - hwc->freq_count_stamp;
2309 hwc->freq_count_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002310
Peter Zijlstraabd50712010-01-26 18:50:16 +01002311 if (delta > 0)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002312 perf_adjust_period(event, period, delta);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002313 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002314}
2315
2316/*
2317 * Round-robin a context's events:
2318 */
2319static void rotate_ctx(struct perf_event_context *ctx)
2320{
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01002321 /*
2322 * Rotate the first entry last of non-pinned groups. Rotation might be
2323 * disabled by the inheritance code.
2324 */
2325 if (!ctx->rotate_disable)
2326 list_rotate_left(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002327}
2328
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002329/*
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002330 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2331 * because they're strictly cpu affine and rotate_start is called with IRQs
2332 * disabled, while rotate_context is called from IRQ context.
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002333 */
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002334static void perf_rotate_context(struct perf_cpu_context *cpuctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002335{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002336 u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002337 struct perf_event_context *ctx = NULL;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002338 int rotate = 0, remove = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002339
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002340 if (cpuctx->ctx.nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002341 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002342 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2343 rotate = 1;
2344 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002345
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002346 ctx = cpuctx->task_ctx;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002347 if (ctx && ctx->nr_events) {
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002348 remove = 0;
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002349 if (ctx->nr_events != ctx->nr_active)
2350 rotate = 1;
2351 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002352
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002353 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
Peter Zijlstra1b9a6442010-09-07 18:32:22 +02002354 perf_pmu_disable(cpuctx->ctx.pmu);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002355 perf_ctx_adjust_freq(&cpuctx->ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002356 if (ctx)
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002357 perf_ctx_adjust_freq(ctx, interval);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002358
Peter Zijlstrad4944a02010-03-08 13:51:20 +01002359 if (!rotate)
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002360 goto done;
Peter Zijlstrad4944a02010-03-08 13:51:20 +01002361
Frederic Weisbecker7defb0f2010-01-17 12:15:31 +01002362 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002363 if (ctx)
Peter Zijlstra04dc2db2011-04-09 21:17:43 +02002364 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002365
2366 rotate_ctx(&cpuctx->ctx);
2367 if (ctx)
2368 rotate_ctx(ctx);
2369
Peter Zijlstradce58552011-04-09 21:17:46 +02002370 perf_event_sched_in(cpuctx, ctx, current);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002371
2372done:
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002373 if (remove)
2374 list_del_init(&cpuctx->rotation_list);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02002375
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002376 perf_pmu_enable(cpuctx->ctx.pmu);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02002377 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02002378}
2379
2380void perf_event_task_tick(void)
2381{
2382 struct list_head *head = &__get_cpu_var(rotation_list);
2383 struct perf_cpu_context *cpuctx, *tmp;
2384
2385 WARN_ON(!irqs_disabled());
2386
2387 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2388 if (cpuctx->jiffies_interval == 1 ||
2389 !(jiffies % cpuctx->jiffies_interval))
2390 perf_rotate_context(cpuctx);
2391 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002392}
2393
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002394static int event_enable_on_exec(struct perf_event *event,
2395 struct perf_event_context *ctx)
2396{
2397 if (!event->attr.enable_on_exec)
2398 return 0;
2399
2400 event->attr.enable_on_exec = 0;
2401 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2402 return 0;
2403
2404 __perf_event_mark_enabled(event, ctx);
2405
2406 return 1;
2407}
2408
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002409/*
2410 * Enable all of a task's events that have been marked enable-on-exec.
2411 * This expects task == current.
2412 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002413static void perf_event_enable_on_exec(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002414{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002415 struct perf_event *event;
2416 unsigned long flags;
2417 int enabled = 0;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002418 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002419
2420 local_irq_save(flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002421 if (!ctx || !ctx->nr_events)
2422 goto out;
2423
Stephane Eraniane566b762011-04-06 02:54:54 +02002424 /*
2425 * We must ctxsw out cgroup events to avoid conflict
2426 * when invoking perf_task_event_sched_in() later on
2427 * in this function. Otherwise we end up trying to
2428 * ctxswin cgroup events which are already scheduled
2429 * in.
2430 */
2431 perf_cgroup_sched_out(current);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002432
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002433 raw_spin_lock(&ctx->lock);
Peter Zijlstra04dc2db2011-04-09 21:17:43 +02002434 task_ctx_sched_out(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002435
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002436 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2437 ret = event_enable_on_exec(event, ctx);
2438 if (ret)
2439 enabled = 1;
2440 }
2441
2442 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2443 ret = event_enable_on_exec(event, ctx);
2444 if (ret)
2445 enabled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002446 }
2447
2448 /*
2449 * Unclone this context if we enabled any event.
2450 */
2451 if (enabled)
2452 unclone_ctx(ctx);
2453
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002454 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002455
Stephane Eraniane566b762011-04-06 02:54:54 +02002456 /*
2457 * Also calls ctxswin for cgroup events, if any:
2458 */
Stephane Eraniane5d13672011-02-14 11:20:01 +02002459 perf_event_context_sched_in(ctx, ctx->task);
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002460out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002461 local_irq_restore(flags);
2462}
2463
2464/*
2465 * Cross CPU call to read the hardware event
2466 */
2467static void __perf_event_read(void *info)
2468{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002469 struct perf_event *event = info;
2470 struct perf_event_context *ctx = event->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002471 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002472
2473 /*
2474 * If this is a task context, we need to check whether it is
2475 * the current task context of this cpu. If not it has been
2476 * scheduled out before the smp call arrived. In that case
2477 * event->count would have been updated to a recent sample
2478 * when the event was scheduled out.
2479 */
2480 if (ctx->task && cpuctx->task_ctx != ctx)
2481 return;
2482
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002483 raw_spin_lock(&ctx->lock);
Stephane Eraniane5d13672011-02-14 11:20:01 +02002484 if (ctx->is_active) {
Peter Zijlstra542e72f2011-01-26 15:38:35 +01002485 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02002486 update_cgrp_time_from_event(event);
2487 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002488 update_event_times(event);
Peter Zijlstra542e72f2011-01-26 15:38:35 +01002489 if (event->state == PERF_EVENT_STATE_ACTIVE)
2490 event->pmu->read(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002491 raw_spin_unlock(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002492}
2493
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002494static inline u64 perf_event_count(struct perf_event *event)
2495{
Peter Zijlstrae7850592010-05-21 14:43:08 +02002496 return local64_read(&event->count) + atomic64_read(&event->child_count);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002497}
2498
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002499static u64 perf_event_read(struct perf_event *event)
2500{
2501 /*
2502 * If event is enabled and currently active on a CPU, update the
2503 * value in the event structure:
2504 */
2505 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2506 smp_call_function_single(event->oncpu,
2507 __perf_event_read, event, 1);
2508 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
Peter Zijlstra2b8988c2009-11-20 22:19:54 +01002509 struct perf_event_context *ctx = event->ctx;
2510 unsigned long flags;
2511
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002512 raw_spin_lock_irqsave(&ctx->lock, flags);
Stephane Eranianc530ccd2010-10-15 15:26:01 +02002513 /*
2514 * may read while context is not active
2515 * (e.g., thread is blocked), in that case
2516 * we cannot update context time
2517 */
Stephane Eraniane5d13672011-02-14 11:20:01 +02002518 if (ctx->is_active) {
Stephane Eranianc530ccd2010-10-15 15:26:01 +02002519 update_context_time(ctx);
Stephane Eraniane5d13672011-02-14 11:20:01 +02002520 update_cgrp_time_from_event(event);
2521 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002522 update_event_times(event);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002523 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002524 }
2525
Peter Zijlstrab5e58792010-05-21 14:43:12 +02002526 return perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002527}
2528
2529/*
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002530 * Callchain support
2531 */
2532
2533struct callchain_cpus_entries {
2534 struct rcu_head rcu_head;
2535 struct perf_callchain_entry *cpu_entries[0];
2536};
2537
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02002538static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002539static atomic_t nr_callchain_events;
2540static DEFINE_MUTEX(callchain_mutex);
2541struct callchain_cpus_entries *callchain_cpus_entries;
2542
2543
2544__weak void perf_callchain_kernel(struct perf_callchain_entry *entry,
2545 struct pt_regs *regs)
2546{
2547}
2548
2549__weak void perf_callchain_user(struct perf_callchain_entry *entry,
2550 struct pt_regs *regs)
2551{
2552}
2553
2554static void release_callchain_buffers_rcu(struct rcu_head *head)
2555{
2556 struct callchain_cpus_entries *entries;
2557 int cpu;
2558
2559 entries = container_of(head, struct callchain_cpus_entries, rcu_head);
2560
2561 for_each_possible_cpu(cpu)
2562 kfree(entries->cpu_entries[cpu]);
2563
2564 kfree(entries);
2565}
2566
2567static void release_callchain_buffers(void)
2568{
2569 struct callchain_cpus_entries *entries;
2570
2571 entries = callchain_cpus_entries;
2572 rcu_assign_pointer(callchain_cpus_entries, NULL);
2573 call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
2574}
2575
2576static int alloc_callchain_buffers(void)
2577{
2578 int cpu;
2579 int size;
2580 struct callchain_cpus_entries *entries;
2581
2582 /*
2583 * We can't use the percpu allocation API for data that can be
2584 * accessed from NMI. Use a temporary manual per cpu allocation
2585 * until that gets sorted out.
2586 */
Eric Dumazet88d4f0d2011-01-25 19:40:51 +01002587 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002588
2589 entries = kzalloc(size, GFP_KERNEL);
2590 if (!entries)
2591 return -ENOMEM;
2592
Frederic Weisbecker7ae07ea2010-08-14 20:45:13 +02002593 size = sizeof(struct perf_callchain_entry) * PERF_NR_CONTEXTS;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002594
2595 for_each_possible_cpu(cpu) {
2596 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
2597 cpu_to_node(cpu));
2598 if (!entries->cpu_entries[cpu])
2599 goto fail;
2600 }
2601
2602 rcu_assign_pointer(callchain_cpus_entries, entries);
2603
2604 return 0;
2605
2606fail:
2607 for_each_possible_cpu(cpu)
2608 kfree(entries->cpu_entries[cpu]);
2609 kfree(entries);
2610
2611 return -ENOMEM;
2612}
2613
2614static int get_callchain_buffers(void)
2615{
2616 int err = 0;
2617 int count;
2618
2619 mutex_lock(&callchain_mutex);
2620
2621 count = atomic_inc_return(&nr_callchain_events);
2622 if (WARN_ON_ONCE(count < 1)) {
2623 err = -EINVAL;
2624 goto exit;
2625 }
2626
2627 if (count > 1) {
2628 /* If the allocation failed, give up */
2629 if (!callchain_cpus_entries)
2630 err = -ENOMEM;
2631 goto exit;
2632 }
2633
2634 err = alloc_callchain_buffers();
2635 if (err)
2636 release_callchain_buffers();
2637exit:
2638 mutex_unlock(&callchain_mutex);
2639
2640 return err;
2641}
2642
2643static void put_callchain_buffers(void)
2644{
2645 if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
2646 release_callchain_buffers();
2647 mutex_unlock(&callchain_mutex);
2648 }
2649}
2650
2651static int get_recursion_context(int *recursion)
2652{
2653 int rctx;
2654
2655 if (in_nmi())
2656 rctx = 3;
2657 else if (in_irq())
2658 rctx = 2;
2659 else if (in_softirq())
2660 rctx = 1;
2661 else
2662 rctx = 0;
2663
2664 if (recursion[rctx])
2665 return -1;
2666
2667 recursion[rctx]++;
2668 barrier();
2669
2670 return rctx;
2671}
2672
2673static inline void put_recursion_context(int *recursion, int rctx)
2674{
2675 barrier();
2676 recursion[rctx]--;
2677}
2678
2679static struct perf_callchain_entry *get_callchain_entry(int *rctx)
2680{
2681 int cpu;
2682 struct callchain_cpus_entries *entries;
2683
2684 *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
2685 if (*rctx == -1)
2686 return NULL;
2687
2688 entries = rcu_dereference(callchain_cpus_entries);
2689 if (!entries)
2690 return NULL;
2691
2692 cpu = smp_processor_id();
2693
2694 return &entries->cpu_entries[cpu][*rctx];
2695}
2696
2697static void
2698put_callchain_entry(int rctx)
2699{
2700 put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
2701}
2702
2703static struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2704{
2705 int rctx;
2706 struct perf_callchain_entry *entry;
2707
2708
2709 entry = get_callchain_entry(&rctx);
2710 if (rctx == -1)
2711 return NULL;
2712
2713 if (!entry)
2714 goto exit_put;
2715
2716 entry->nr = 0;
2717
2718 if (!user_mode(regs)) {
2719 perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
2720 perf_callchain_kernel(entry, regs);
2721 if (current->mm)
2722 regs = task_pt_regs(current);
2723 else
2724 regs = NULL;
2725 }
2726
2727 if (regs) {
2728 perf_callchain_store(entry, PERF_CONTEXT_USER);
2729 perf_callchain_user(entry, regs);
2730 }
2731
2732exit_put:
2733 put_callchain_entry(rctx);
2734
2735 return entry;
2736}
2737
2738/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002739 * Initialize the perf_event context in a task_struct:
2740 */
Peter Zijlstraeb184472010-09-07 15:55:13 +02002741static void __perf_event_init_context(struct perf_event_context *ctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002742{
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002743 raw_spin_lock_init(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002744 mutex_init(&ctx->mutex);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01002745 INIT_LIST_HEAD(&ctx->pinned_groups);
2746 INIT_LIST_HEAD(&ctx->flexible_groups);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002747 INIT_LIST_HEAD(&ctx->event_list);
2748 atomic_set(&ctx->refcount, 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002749}
2750
Peter Zijlstraeb184472010-09-07 15:55:13 +02002751static struct perf_event_context *
2752alloc_perf_context(struct pmu *pmu, struct task_struct *task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002753{
2754 struct perf_event_context *ctx;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002755
2756 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2757 if (!ctx)
2758 return NULL;
2759
2760 __perf_event_init_context(ctx);
2761 if (task) {
2762 ctx->task = task;
2763 get_task_struct(task);
2764 }
2765 ctx->pmu = pmu;
2766
2767 return ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002768}
2769
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002770static struct task_struct *
2771find_lively_task_by_vpid(pid_t vpid)
2772{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002773 struct task_struct *task;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002774 int err;
2775
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002776 rcu_read_lock();
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002777 if (!vpid)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002778 task = current;
2779 else
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002780 task = find_task_by_vpid(vpid);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002781 if (task)
2782 get_task_struct(task);
2783 rcu_read_unlock();
2784
2785 if (!task)
2786 return ERR_PTR(-ESRCH);
2787
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002788 /* Reuse ptrace permission checks for now. */
2789 err = -EACCES;
2790 if (!ptrace_may_access(task, PTRACE_MODE_READ))
2791 goto errout;
2792
Matt Helsley2ebd4ff2010-09-13 13:01:19 -07002793 return task;
2794errout:
2795 put_task_struct(task);
2796 return ERR_PTR(err);
2797
2798}
2799
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002800/*
2801 * Returns a matching context with refcount and pincount.
2802 */
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002803static struct perf_event_context *
Matt Helsley38a81da2010-09-13 13:01:20 -07002804find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002805{
2806 struct perf_event_context *ctx;
2807 struct perf_cpu_context *cpuctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002808 unsigned long flags;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002809 int ctxn, err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002810
Oleg Nesterov22a4ec72011-01-18 17:10:08 +01002811 if (!task) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002812 /* Must be root to operate on a CPU event: */
2813 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2814 return ERR_PTR(-EACCES);
2815
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002816 /*
2817 * We could be clever and allow to attach a event to an
2818 * offline CPU and activate it when the CPU comes up, but
2819 * that's for later.
2820 */
2821 if (!cpu_online(cpu))
2822 return ERR_PTR(-ENODEV);
2823
Peter Zijlstra108b02c2010-09-06 14:32:03 +02002824 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002825 ctx = &cpuctx->ctx;
2826 get_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002827 ++ctx->pin_count;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002828
2829 return ctx;
2830 }
2831
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002832 err = -EINVAL;
2833 ctxn = pmu->task_ctx_nr;
2834 if (ctxn < 0)
2835 goto errout;
2836
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002837retry:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02002838 ctx = perf_lock_task_context(task, ctxn, &flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002839 if (ctx) {
2840 unclone_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002841 ++ctx->pin_count;
Thomas Gleixnere625cce2009-11-17 18:02:06 +01002842 raw_spin_unlock_irqrestore(&ctx->lock, flags);
Peter Zijlstra9137fb22011-04-09 21:17:41 +02002843 } else {
Peter Zijlstraeb184472010-09-07 15:55:13 +02002844 ctx = alloc_perf_context(pmu, task);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002845 err = -ENOMEM;
2846 if (!ctx)
2847 goto errout;
Peter Zijlstraeb184472010-09-07 15:55:13 +02002848
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002849 err = 0;
2850 mutex_lock(&task->perf_event_mutex);
2851 /*
2852 * If it has already passed perf_event_exit_task().
2853 * we must see PF_EXITING, it takes this mutex too.
2854 */
2855 if (task->flags & PF_EXITING)
2856 err = -ESRCH;
2857 else if (task->perf_event_ctxp[ctxn])
2858 err = -EAGAIN;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002859 else {
Peter Zijlstra9137fb22011-04-09 21:17:41 +02002860 get_ctx(ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002861 ++ctx->pin_count;
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002862 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01002863 }
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002864 mutex_unlock(&task->perf_event_mutex);
2865
2866 if (unlikely(err)) {
Peter Zijlstra9137fb22011-04-09 21:17:41 +02002867 put_ctx(ctx);
Oleg Nesterovdbe08d82011-01-19 19:22:07 +01002868
2869 if (err == -EAGAIN)
2870 goto retry;
2871 goto errout;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002872 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002873 }
2874
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002875 return ctx;
2876
Peter Zijlstra9ed60602010-06-11 17:36:35 +02002877errout:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002878 return ERR_PTR(err);
2879}
2880
Li Zefan6fb29152009-10-15 11:21:42 +08002881static void perf_event_free_filter(struct perf_event *event);
2882
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002883static void free_event_rcu(struct rcu_head *head)
2884{
2885 struct perf_event *event;
2886
2887 event = container_of(head, struct perf_event, rcu_head);
2888 if (event->ns)
2889 put_pid_ns(event->ns);
Li Zefan6fb29152009-10-15 11:21:42 +08002890 perf_event_free_filter(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002891 kfree(event);
2892}
2893
Frederic Weisbecker76369132011-05-19 19:55:04 +02002894static void ring_buffer_put(struct ring_buffer *rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002895
2896static void free_event(struct perf_event *event)
2897{
Peter Zijlstrae360adb2010-10-14 14:01:34 +08002898 irq_work_sync(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002899
2900 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02002901 if (event->attach_state & PERF_ATTACH_TASK)
Stephane Eraniane5d13672011-02-14 11:20:01 +02002902 jump_label_dec(&perf_sched_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01002903 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002904 atomic_dec(&nr_mmap_events);
2905 if (event->attr.comm)
2906 atomic_dec(&nr_comm_events);
2907 if (event->attr.task)
2908 atomic_dec(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002909 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2910 put_callchain_buffers();
Peter Zijlstra08309372011-03-03 11:31:20 +01002911 if (is_cgroup_event(event)) {
2912 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2913 jump_label_dec(&perf_sched_events);
2914 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002915 }
2916
Frederic Weisbecker76369132011-05-19 19:55:04 +02002917 if (event->rb) {
2918 ring_buffer_put(event->rb);
2919 event->rb = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002920 }
2921
Stephane Eraniane5d13672011-02-14 11:20:01 +02002922 if (is_cgroup_event(event))
2923 perf_detach_cgroup(event);
2924
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002925 if (event->destroy)
2926 event->destroy(event);
2927
Peter Zijlstra0c67b402010-09-13 11:15:58 +02002928 if (event->ctx)
2929 put_ctx(event->ctx);
2930
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002931 call_rcu(&event->rcu_head, free_event_rcu);
2932}
2933
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002934int perf_event_release_kernel(struct perf_event *event)
2935{
2936 struct perf_event_context *ctx = event->ctx;
2937
2938 WARN_ON_ONCE(ctx->parent_ctx);
Peter Zijlstraa0507c82010-05-06 15:42:53 +02002939 /*
2940 * There are two ways this annotation is useful:
2941 *
2942 * 1) there is a lock recursion from perf_event_exit_task
2943 * see the comment there.
2944 *
2945 * 2) there is a lock-inversion with mmap_sem through
2946 * perf_event_read_group(), which takes faults while
2947 * holding ctx->mutex, however this is called after
2948 * the last filedesc died, so there is no possibility
2949 * to trigger the AB-BA case.
2950 */
2951 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002952 raw_spin_lock_irq(&ctx->lock);
Peter Zijlstra8a495422010-05-27 15:47:49 +02002953 perf_group_detach(event);
Peter Zijlstra050735b2010-05-11 11:51:53 +02002954 raw_spin_unlock_irq(&ctx->lock);
Peter Zijlstrae03a9a52011-04-09 21:17:47 +02002955 perf_remove_from_context(event);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002956 mutex_unlock(&ctx->mutex);
2957
Arjan van de Venfb0459d2009-09-25 12:25:56 +02002958 free_event(event);
2959
2960 return 0;
2961}
2962EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2963
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002964/*
2965 * Called when the last reference to the file is gone.
2966 */
2967static int perf_release(struct inode *inode, struct file *file)
2968{
2969 struct perf_event *event = file->private_data;
Peter Zijlstra88821352010-11-09 19:01:43 +01002970 struct task_struct *owner;
Peter Zijlstraa66a3052009-11-23 11:37:23 +01002971
2972 file->private_data = NULL;
2973
Peter Zijlstra88821352010-11-09 19:01:43 +01002974 rcu_read_lock();
2975 owner = ACCESS_ONCE(event->owner);
2976 /*
2977 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2978 * !owner it means the list deletion is complete and we can indeed
2979 * free this event, otherwise we need to serialize on
2980 * owner->perf_event_mutex.
2981 */
2982 smp_read_barrier_depends();
2983 if (owner) {
2984 /*
2985 * Since delayed_put_task_struct() also drops the last
2986 * task reference we can safely take a new reference
2987 * while holding the rcu_read_lock().
2988 */
2989 get_task_struct(owner);
2990 }
2991 rcu_read_unlock();
2992
2993 if (owner) {
2994 mutex_lock(&owner->perf_event_mutex);
2995 /*
2996 * We have to re-check the event->owner field, if it is cleared
2997 * we raced with perf_event_exit_task(), acquiring the mutex
2998 * ensured they're done, and we can proceed with freeing the
2999 * event.
3000 */
3001 if (event->owner)
3002 list_del_init(&event->owner_entry);
3003 mutex_unlock(&owner->perf_event_mutex);
3004 put_task_struct(owner);
3005 }
3006
Peter Zijlstraa66a3052009-11-23 11:37:23 +01003007 return perf_event_release_kernel(event);
3008}
3009
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003010u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003011{
3012 struct perf_event *child;
3013 u64 total = 0;
3014
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003015 *enabled = 0;
3016 *running = 0;
3017
Peter Zijlstra6f105812009-11-20 22:19:56 +01003018 mutex_lock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003019 total += perf_event_read(event);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003020 *enabled += event->total_time_enabled +
3021 atomic64_read(&event->child_total_time_enabled);
3022 *running += event->total_time_running +
3023 atomic64_read(&event->child_total_time_running);
3024
3025 list_for_each_entry(child, &event->child_list, child_list) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003026 total += perf_event_read(child);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003027 *enabled += child->total_time_enabled;
3028 *running += child->total_time_running;
3029 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01003030 mutex_unlock(&event->child_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003031
3032 return total;
3033}
Arjan van de Venfb0459d2009-09-25 12:25:56 +02003034EXPORT_SYMBOL_GPL(perf_event_read_value);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003035
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003036static int perf_event_read_group(struct perf_event *event,
3037 u64 read_format, char __user *buf)
3038{
3039 struct perf_event *leader = event->group_leader, *sub;
Peter Zijlstra6f105812009-11-20 22:19:56 +01003040 int n = 0, size = 0, ret = -EFAULT;
3041 struct perf_event_context *ctx = leader->ctx;
Peter Zijlstraabf48682009-11-20 22:19:49 +01003042 u64 values[5];
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003043 u64 count, enabled, running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01003044
Peter Zijlstra6f105812009-11-20 22:19:56 +01003045 mutex_lock(&ctx->mutex);
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003046 count = perf_event_read_value(leader, &enabled, &running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003047
3048 values[n++] = 1 + leader->nr_siblings;
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003049 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3050 values[n++] = enabled;
3051 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3052 values[n++] = running;
Peter Zijlstraabf48682009-11-20 22:19:49 +01003053 values[n++] = count;
3054 if (read_format & PERF_FORMAT_ID)
3055 values[n++] = primary_event_id(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003056
3057 size = n * sizeof(u64);
3058
3059 if (copy_to_user(buf, values, size))
Peter Zijlstra6f105812009-11-20 22:19:56 +01003060 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003061
Peter Zijlstra6f105812009-11-20 22:19:56 +01003062 ret = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003063
3064 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Peter Zijlstraabf48682009-11-20 22:19:49 +01003065 n = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003066
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003067 values[n++] = perf_event_read_value(sub, &enabled, &running);
Peter Zijlstraabf48682009-11-20 22:19:49 +01003068 if (read_format & PERF_FORMAT_ID)
3069 values[n++] = primary_event_id(sub);
3070
3071 size = n * sizeof(u64);
3072
Stephane Eranian184d3da2009-11-23 21:40:49 -08003073 if (copy_to_user(buf + ret, values, size)) {
Peter Zijlstra6f105812009-11-20 22:19:56 +01003074 ret = -EFAULT;
3075 goto unlock;
3076 }
Peter Zijlstraabf48682009-11-20 22:19:49 +01003077
3078 ret += size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003079 }
Peter Zijlstra6f105812009-11-20 22:19:56 +01003080unlock:
3081 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003082
Peter Zijlstraabf48682009-11-20 22:19:49 +01003083 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003084}
3085
3086static int perf_event_read_one(struct perf_event *event,
3087 u64 read_format, char __user *buf)
3088{
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003089 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003090 u64 values[4];
3091 int n = 0;
3092
Peter Zijlstra59ed4462009-11-20 22:19:55 +01003093 values[n++] = perf_event_read_value(event, &enabled, &running);
3094 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3095 values[n++] = enabled;
3096 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3097 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003098 if (read_format & PERF_FORMAT_ID)
3099 values[n++] = primary_event_id(event);
3100
3101 if (copy_to_user(buf, values, n * sizeof(u64)))
3102 return -EFAULT;
3103
3104 return n * sizeof(u64);
3105}
3106
3107/*
3108 * Read the performance event - simple non blocking version for now
3109 */
3110static ssize_t
3111perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3112{
3113 u64 read_format = event->attr.read_format;
3114 int ret;
3115
3116 /*
3117 * Return end-of-file for a read on a event that is in
3118 * error state (i.e. because it was pinned but it couldn't be
3119 * scheduled on to the CPU at some point).
3120 */
3121 if (event->state == PERF_EVENT_STATE_ERROR)
3122 return 0;
3123
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003124 if (count < event->read_size)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003125 return -ENOSPC;
3126
3127 WARN_ON_ONCE(event->ctx->parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003128 if (read_format & PERF_FORMAT_GROUP)
3129 ret = perf_event_read_group(event, read_format, buf);
3130 else
3131 ret = perf_event_read_one(event, read_format, buf);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003132
3133 return ret;
3134}
3135
3136static ssize_t
3137perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3138{
3139 struct perf_event *event = file->private_data;
3140
3141 return perf_read_hw(event, buf, count);
3142}
3143
3144static unsigned int perf_poll(struct file *file, poll_table *wait)
3145{
3146 struct perf_event *event = file->private_data;
Frederic Weisbecker76369132011-05-19 19:55:04 +02003147 struct ring_buffer *rb;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003148 unsigned int events = POLL_HUP;
3149
3150 rcu_read_lock();
Frederic Weisbecker76369132011-05-19 19:55:04 +02003151 rb = rcu_dereference(event->rb);
3152 if (rb)
3153 events = atomic_xchg(&rb->poll, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003154 rcu_read_unlock();
3155
3156 poll_wait(file, &event->waitq, wait);
3157
3158 return events;
3159}
3160
3161static void perf_event_reset(struct perf_event *event)
3162{
3163 (void)perf_event_read(event);
Peter Zijlstrae7850592010-05-21 14:43:08 +02003164 local64_set(&event->count, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003165 perf_event_update_userpage(event);
3166}
3167
3168/*
3169 * Holding the top-level event's child_mutex means that any
3170 * descendant process that has inherited this event will block
3171 * in sync_child_event if it goes to exit, thus satisfying the
3172 * task existence requirements of perf_event_enable/disable.
3173 */
3174static void perf_event_for_each_child(struct perf_event *event,
3175 void (*func)(struct perf_event *))
3176{
3177 struct perf_event *child;
3178
3179 WARN_ON_ONCE(event->ctx->parent_ctx);
3180 mutex_lock(&event->child_mutex);
3181 func(event);
3182 list_for_each_entry(child, &event->child_list, child_list)
3183 func(child);
3184 mutex_unlock(&event->child_mutex);
3185}
3186
3187static void perf_event_for_each(struct perf_event *event,
3188 void (*func)(struct perf_event *))
3189{
3190 struct perf_event_context *ctx = event->ctx;
3191 struct perf_event *sibling;
3192
3193 WARN_ON_ONCE(ctx->parent_ctx);
3194 mutex_lock(&ctx->mutex);
3195 event = event->group_leader;
3196
3197 perf_event_for_each_child(event, func);
3198 func(event);
3199 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3200 perf_event_for_each_child(event, func);
3201 mutex_unlock(&ctx->mutex);
3202}
3203
3204static int perf_event_period(struct perf_event *event, u64 __user *arg)
3205{
3206 struct perf_event_context *ctx = event->ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003207 int ret = 0;
3208 u64 value;
3209
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01003210 if (!is_sampling_event(event))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003211 return -EINVAL;
3212
John Blackwoodad0cf342010-09-28 18:03:11 -04003213 if (copy_from_user(&value, arg, sizeof(value)))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003214 return -EFAULT;
3215
3216 if (!value)
3217 return -EINVAL;
3218
Thomas Gleixnere625cce2009-11-17 18:02:06 +01003219 raw_spin_lock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003220 if (event->attr.freq) {
3221 if (value > sysctl_perf_event_sample_rate) {
3222 ret = -EINVAL;
3223 goto unlock;
3224 }
3225
3226 event->attr.sample_freq = value;
3227 } else {
3228 event->attr.sample_period = value;
3229 event->hw.sample_period = value;
3230 }
3231unlock:
Thomas Gleixnere625cce2009-11-17 18:02:06 +01003232 raw_spin_unlock_irq(&ctx->lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003233
3234 return ret;
3235}
3236
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003237static const struct file_operations perf_fops;
3238
3239static struct perf_event *perf_fget_light(int fd, int *fput_needed)
3240{
3241 struct file *file;
3242
3243 file = fget_light(fd, fput_needed);
3244 if (!file)
3245 return ERR_PTR(-EBADF);
3246
3247 if (file->f_op != &perf_fops) {
3248 fput_light(file, *fput_needed);
3249 *fput_needed = 0;
3250 return ERR_PTR(-EBADF);
3251 }
3252
3253 return file->private_data;
3254}
3255
3256static int perf_event_set_output(struct perf_event *event,
3257 struct perf_event *output_event);
Li Zefan6fb29152009-10-15 11:21:42 +08003258static int perf_event_set_filter(struct perf_event *event, void __user *arg);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003259
3260static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3261{
3262 struct perf_event *event = file->private_data;
3263 void (*func)(struct perf_event *);
3264 u32 flags = arg;
3265
3266 switch (cmd) {
3267 case PERF_EVENT_IOC_ENABLE:
3268 func = perf_event_enable;
3269 break;
3270 case PERF_EVENT_IOC_DISABLE:
3271 func = perf_event_disable;
3272 break;
3273 case PERF_EVENT_IOC_RESET:
3274 func = perf_event_reset;
3275 break;
3276
3277 case PERF_EVENT_IOC_REFRESH:
3278 return perf_event_refresh(event, arg);
3279
3280 case PERF_EVENT_IOC_PERIOD:
3281 return perf_event_period(event, (u64 __user *)arg);
3282
3283 case PERF_EVENT_IOC_SET_OUTPUT:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003284 {
3285 struct perf_event *output_event = NULL;
3286 int fput_needed = 0;
3287 int ret;
3288
3289 if (arg != -1) {
3290 output_event = perf_fget_light(arg, &fput_needed);
3291 if (IS_ERR(output_event))
3292 return PTR_ERR(output_event);
3293 }
3294
3295 ret = perf_event_set_output(event, output_event);
3296 if (output_event)
3297 fput_light(output_event->filp, fput_needed);
3298
3299 return ret;
3300 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003301
Li Zefan6fb29152009-10-15 11:21:42 +08003302 case PERF_EVENT_IOC_SET_FILTER:
3303 return perf_event_set_filter(event, (void __user *)arg);
3304
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003305 default:
3306 return -ENOTTY;
3307 }
3308
3309 if (flags & PERF_IOC_FLAG_GROUP)
3310 perf_event_for_each(event, func);
3311 else
3312 perf_event_for_each_child(event, func);
3313
3314 return 0;
3315}
3316
3317int perf_event_task_enable(void)
3318{
3319 struct perf_event *event;
3320
3321 mutex_lock(&current->perf_event_mutex);
3322 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3323 perf_event_for_each_child(event, perf_event_enable);
3324 mutex_unlock(&current->perf_event_mutex);
3325
3326 return 0;
3327}
3328
3329int perf_event_task_disable(void)
3330{
3331 struct perf_event *event;
3332
3333 mutex_lock(&current->perf_event_mutex);
3334 list_for_each_entry(event, &current->perf_event_list, owner_entry)
3335 perf_event_for_each_child(event, perf_event_disable);
3336 mutex_unlock(&current->perf_event_mutex);
3337
3338 return 0;
3339}
3340
3341#ifndef PERF_EVENT_INDEX_OFFSET
3342# define PERF_EVENT_INDEX_OFFSET 0
3343#endif
3344
3345static int perf_event_index(struct perf_event *event)
3346{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02003347 if (event->hw.state & PERF_HES_STOPPED)
3348 return 0;
3349
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003350 if (event->state != PERF_EVENT_STATE_ACTIVE)
3351 return 0;
3352
3353 return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
3354}
3355
Eric B Munsonc4794292011-06-23 16:34:38 -04003356static void calc_timer_values(struct perf_event *event,
3357 u64 *running,
3358 u64 *enabled)
3359{
3360 u64 now, ctx_time;
3361
3362 now = perf_clock();
3363 ctx_time = event->shadow_ctx_time + now;
3364 *enabled = ctx_time - event->tstamp_enabled;
3365 *running = ctx_time - event->tstamp_running;
3366}
3367
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003368/*
3369 * Callers need to ensure there can be no nesting of this function, otherwise
3370 * the seqlock logic goes bad. We can not serialize this because the arch
3371 * code calls this from NMI context.
3372 */
3373void perf_event_update_userpage(struct perf_event *event)
3374{
3375 struct perf_event_mmap_page *userpg;
Frederic Weisbecker76369132011-05-19 19:55:04 +02003376 struct ring_buffer *rb;
Eric B Munson0d641202011-06-24 12:26:26 -04003377 u64 enabled, running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003378
3379 rcu_read_lock();
Eric B Munson0d641202011-06-24 12:26:26 -04003380 /*
3381 * compute total_time_enabled, total_time_running
3382 * based on snapshot values taken when the event
3383 * was last scheduled in.
3384 *
3385 * we cannot simply called update_context_time()
3386 * because of locking issue as we can be called in
3387 * NMI context
3388 */
3389 calc_timer_values(event, &enabled, &running);
Frederic Weisbecker76369132011-05-19 19:55:04 +02003390 rb = rcu_dereference(event->rb);
3391 if (!rb)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003392 goto unlock;
3393
Frederic Weisbecker76369132011-05-19 19:55:04 +02003394 userpg = rb->user_page;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003395
3396 /*
3397 * Disable preemption so as to not let the corresponding user-space
3398 * spin too long if we get preempted.
3399 */
3400 preempt_disable();
3401 ++userpg->lock;
3402 barrier();
3403 userpg->index = perf_event_index(event);
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003404 userpg->offset = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003405 if (event->state == PERF_EVENT_STATE_ACTIVE)
Peter Zijlstrae7850592010-05-21 14:43:08 +02003406 userpg->offset -= local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003407
Eric B Munson0d641202011-06-24 12:26:26 -04003408 userpg->time_enabled = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003409 atomic64_read(&event->child_total_time_enabled);
3410
Eric B Munson0d641202011-06-24 12:26:26 -04003411 userpg->time_running = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003412 atomic64_read(&event->child_total_time_running);
3413
3414 barrier();
3415 ++userpg->lock;
3416 preempt_enable();
3417unlock:
3418 rcu_read_unlock();
3419}
3420
Peter Zijlstra906010b2009-09-21 16:08:49 +02003421static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3422{
3423 struct perf_event *event = vma->vm_file->private_data;
Frederic Weisbecker76369132011-05-19 19:55:04 +02003424 struct ring_buffer *rb;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003425 int ret = VM_FAULT_SIGBUS;
3426
3427 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3428 if (vmf->pgoff == 0)
3429 ret = 0;
3430 return ret;
3431 }
3432
3433 rcu_read_lock();
Frederic Weisbecker76369132011-05-19 19:55:04 +02003434 rb = rcu_dereference(event->rb);
3435 if (!rb)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003436 goto unlock;
3437
3438 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3439 goto unlock;
3440
Frederic Weisbecker76369132011-05-19 19:55:04 +02003441 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003442 if (!vmf->page)
3443 goto unlock;
3444
3445 get_page(vmf->page);
3446 vmf->page->mapping = vma->vm_file->f_mapping;
3447 vmf->page->index = vmf->pgoff;
3448
3449 ret = 0;
3450unlock:
3451 rcu_read_unlock();
3452
3453 return ret;
3454}
3455
Frederic Weisbecker76369132011-05-19 19:55:04 +02003456static void rb_free_rcu(struct rcu_head *rcu_head)
Peter Zijlstra906010b2009-09-21 16:08:49 +02003457{
Frederic Weisbecker76369132011-05-19 19:55:04 +02003458 struct ring_buffer *rb;
Peter Zijlstra906010b2009-09-21 16:08:49 +02003459
Frederic Weisbecker76369132011-05-19 19:55:04 +02003460 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3461 rb_free(rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003462}
3463
Frederic Weisbecker76369132011-05-19 19:55:04 +02003464static struct ring_buffer *ring_buffer_get(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003465{
Frederic Weisbecker76369132011-05-19 19:55:04 +02003466 struct ring_buffer *rb;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003467
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003468 rcu_read_lock();
Frederic Weisbecker76369132011-05-19 19:55:04 +02003469 rb = rcu_dereference(event->rb);
3470 if (rb) {
3471 if (!atomic_inc_not_zero(&rb->refcount))
3472 rb = NULL;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003473 }
3474 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003475
Frederic Weisbecker76369132011-05-19 19:55:04 +02003476 return rb;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003477}
3478
Frederic Weisbecker76369132011-05-19 19:55:04 +02003479static void ring_buffer_put(struct ring_buffer *rb)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003480{
Frederic Weisbecker76369132011-05-19 19:55:04 +02003481 if (!atomic_dec_and_test(&rb->refcount))
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003482 return;
3483
Frederic Weisbecker76369132011-05-19 19:55:04 +02003484 call_rcu(&rb->rcu_head, rb_free_rcu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003485}
3486
3487static void perf_mmap_open(struct vm_area_struct *vma)
3488{
3489 struct perf_event *event = vma->vm_file->private_data;
3490
3491 atomic_inc(&event->mmap_count);
3492}
3493
3494static void perf_mmap_close(struct vm_area_struct *vma)
3495{
3496 struct perf_event *event = vma->vm_file->private_data;
3497
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003498 if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
Frederic Weisbecker76369132011-05-19 19:55:04 +02003499 unsigned long size = perf_data_size(event->rb);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003500 struct user_struct *user = event->mmap_user;
Frederic Weisbecker76369132011-05-19 19:55:04 +02003501 struct ring_buffer *rb = event->rb;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003502
Peter Zijlstra906010b2009-09-21 16:08:49 +02003503 atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003504 vma->vm_mm->locked_vm -= event->mmap_locked;
Frederic Weisbecker76369132011-05-19 19:55:04 +02003505 rcu_assign_pointer(event->rb, NULL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003506 mutex_unlock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003507
Frederic Weisbecker76369132011-05-19 19:55:04 +02003508 ring_buffer_put(rb);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003509 free_uid(user);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003510 }
3511}
3512
Alexey Dobriyanf0f37e22009-09-27 22:29:37 +04003513static const struct vm_operations_struct perf_mmap_vmops = {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003514 .open = perf_mmap_open,
3515 .close = perf_mmap_close,
3516 .fault = perf_mmap_fault,
3517 .page_mkwrite = perf_mmap_fault,
3518};
3519
3520static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3521{
3522 struct perf_event *event = file->private_data;
3523 unsigned long user_locked, user_lock_limit;
3524 struct user_struct *user = current_user();
3525 unsigned long locked, lock_limit;
Frederic Weisbecker76369132011-05-19 19:55:04 +02003526 struct ring_buffer *rb;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003527 unsigned long vma_size;
3528 unsigned long nr_pages;
3529 long user_extra, extra;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003530 int ret = 0, flags = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003531
Peter Zijlstrac7920612010-05-18 10:33:24 +02003532 /*
3533 * Don't allow mmap() of inherited per-task counters. This would
3534 * create a performance issue due to all children writing to the
Frederic Weisbecker76369132011-05-19 19:55:04 +02003535 * same rb.
Peter Zijlstrac7920612010-05-18 10:33:24 +02003536 */
3537 if (event->cpu == -1 && event->attr.inherit)
3538 return -EINVAL;
3539
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003540 if (!(vma->vm_flags & VM_SHARED))
3541 return -EINVAL;
3542
3543 vma_size = vma->vm_end - vma->vm_start;
3544 nr_pages = (vma_size / PAGE_SIZE) - 1;
3545
3546 /*
Frederic Weisbecker76369132011-05-19 19:55:04 +02003547 * If we have rb pages ensure they're a power-of-two number, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003548 * can do bitmasks instead of modulo.
3549 */
3550 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3551 return -EINVAL;
3552
3553 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3554 return -EINVAL;
3555
3556 if (vma->vm_pgoff != 0)
3557 return -EINVAL;
3558
3559 WARN_ON_ONCE(event->ctx->parent_ctx);
3560 mutex_lock(&event->mmap_mutex);
Frederic Weisbecker76369132011-05-19 19:55:04 +02003561 if (event->rb) {
3562 if (event->rb->nr_pages == nr_pages)
3563 atomic_inc(&event->rb->refcount);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003564 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003565 ret = -EINVAL;
3566 goto unlock;
3567 }
3568
3569 user_extra = nr_pages + 1;
3570 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3571
3572 /*
3573 * Increase the limit linearly with more CPUs:
3574 */
3575 user_lock_limit *= num_online_cpus();
3576
3577 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3578
3579 extra = 0;
3580 if (user_locked > user_lock_limit)
3581 extra = user_locked - user_lock_limit;
3582
Jiri Slaby78d7d402010-03-05 13:42:54 -08003583 lock_limit = rlimit(RLIMIT_MEMLOCK);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003584 lock_limit >>= PAGE_SHIFT;
3585 locked = vma->vm_mm->locked_vm + extra;
3586
3587 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3588 !capable(CAP_IPC_LOCK)) {
3589 ret = -EPERM;
3590 goto unlock;
3591 }
3592
Frederic Weisbecker76369132011-05-19 19:55:04 +02003593 WARN_ON(event->rb);
Peter Zijlstra906010b2009-09-21 16:08:49 +02003594
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003595 if (vma->vm_flags & VM_WRITE)
Frederic Weisbecker76369132011-05-19 19:55:04 +02003596 flags |= RING_BUFFER_WRITABLE;
Peter Zijlstrad57e34f2010-05-28 19:41:35 +02003597
Vince Weaver4ec83632011-06-01 15:15:36 -04003598 rb = rb_alloc(nr_pages,
3599 event->attr.watermark ? event->attr.wakeup_watermark : 0,
3600 event->cpu, flags);
3601
Frederic Weisbecker76369132011-05-19 19:55:04 +02003602 if (!rb) {
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003603 ret = -ENOMEM;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003604 goto unlock;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003605 }
Frederic Weisbecker76369132011-05-19 19:55:04 +02003606 rcu_assign_pointer(event->rb, rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003607
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003608 atomic_long_add(user_extra, &user->locked_vm);
3609 event->mmap_locked = extra;
3610 event->mmap_user = get_current_user();
3611 vma->vm_mm->locked_vm += event->mmap_locked;
3612
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003613unlock:
Peter Zijlstraac9721f2010-05-27 12:54:41 +02003614 if (!ret)
3615 atomic_inc(&event->mmap_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003616 mutex_unlock(&event->mmap_mutex);
3617
3618 vma->vm_flags |= VM_RESERVED;
3619 vma->vm_ops = &perf_mmap_vmops;
3620
3621 return ret;
3622}
3623
3624static int perf_fasync(int fd, struct file *filp, int on)
3625{
3626 struct inode *inode = filp->f_path.dentry->d_inode;
3627 struct perf_event *event = filp->private_data;
3628 int retval;
3629
3630 mutex_lock(&inode->i_mutex);
3631 retval = fasync_helper(fd, filp, on, &event->fasync);
3632 mutex_unlock(&inode->i_mutex);
3633
3634 if (retval < 0)
3635 return retval;
3636
3637 return 0;
3638}
3639
3640static const struct file_operations perf_fops = {
Arnd Bergmann3326c1c2010-03-23 19:09:33 +01003641 .llseek = no_llseek,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003642 .release = perf_release,
3643 .read = perf_read,
3644 .poll = perf_poll,
3645 .unlocked_ioctl = perf_ioctl,
3646 .compat_ioctl = perf_ioctl,
3647 .mmap = perf_mmap,
3648 .fasync = perf_fasync,
3649};
3650
3651/*
3652 * Perf event wakeup
3653 *
3654 * If there's data, ensure we set the poll() state and publish everything
3655 * to user-space before waking everybody up.
3656 */
3657
3658void perf_event_wakeup(struct perf_event *event)
3659{
3660 wake_up_all(&event->waitq);
3661
3662 if (event->pending_kill) {
3663 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3664 event->pending_kill = 0;
3665 }
3666}
3667
Peter Zijlstrae360adb2010-10-14 14:01:34 +08003668static void perf_pending_event(struct irq_work *entry)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003669{
3670 struct perf_event *event = container_of(entry,
3671 struct perf_event, pending);
3672
3673 if (event->pending_disable) {
3674 event->pending_disable = 0;
3675 __perf_event_disable(event);
3676 }
3677
3678 if (event->pending_wakeup) {
3679 event->pending_wakeup = 0;
3680 perf_event_wakeup(event);
3681 }
3682}
3683
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003684/*
Zhang, Yanmin39447b32010-04-19 13:32:41 +08003685 * We assume there is only KVM supporting the callbacks.
3686 * Later on, we might change it to a list if there is
3687 * another virtualization implementation supporting the callbacks.
3688 */
3689struct perf_guest_info_callbacks *perf_guest_cbs;
3690
3691int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3692{
3693 perf_guest_cbs = cbs;
3694 return 0;
3695}
3696EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3697
3698int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3699{
3700 perf_guest_cbs = NULL;
3701 return 0;
3702}
3703EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3704
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003705static void __perf_event_header__init_id(struct perf_event_header *header,
3706 struct perf_sample_data *data,
3707 struct perf_event *event)
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003708{
3709 u64 sample_type = event->attr.sample_type;
3710
3711 data->type = sample_type;
3712 header->size += event->id_header_size;
3713
3714 if (sample_type & PERF_SAMPLE_TID) {
3715 /* namespace issues */
3716 data->tid_entry.pid = perf_event_pid(event, current);
3717 data->tid_entry.tid = perf_event_tid(event, current);
3718 }
3719
3720 if (sample_type & PERF_SAMPLE_TIME)
3721 data->time = perf_clock();
3722
3723 if (sample_type & PERF_SAMPLE_ID)
3724 data->id = primary_event_id(event);
3725
3726 if (sample_type & PERF_SAMPLE_STREAM_ID)
3727 data->stream_id = event->id;
3728
3729 if (sample_type & PERF_SAMPLE_CPU) {
3730 data->cpu_entry.cpu = raw_smp_processor_id();
3731 data->cpu_entry.reserved = 0;
3732 }
3733}
3734
Frederic Weisbecker76369132011-05-19 19:55:04 +02003735void perf_event_header__init_id(struct perf_event_header *header,
3736 struct perf_sample_data *data,
3737 struct perf_event *event)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003738{
3739 if (event->attr.sample_id_all)
3740 __perf_event_header__init_id(header, data, event);
3741}
3742
3743static void __perf_event__output_id_sample(struct perf_output_handle *handle,
3744 struct perf_sample_data *data)
3745{
3746 u64 sample_type = data->type;
3747
3748 if (sample_type & PERF_SAMPLE_TID)
3749 perf_output_put(handle, data->tid_entry);
3750
3751 if (sample_type & PERF_SAMPLE_TIME)
3752 perf_output_put(handle, data->time);
3753
3754 if (sample_type & PERF_SAMPLE_ID)
3755 perf_output_put(handle, data->id);
3756
3757 if (sample_type & PERF_SAMPLE_STREAM_ID)
3758 perf_output_put(handle, data->stream_id);
3759
3760 if (sample_type & PERF_SAMPLE_CPU)
3761 perf_output_put(handle, data->cpu_entry);
3762}
3763
Frederic Weisbecker76369132011-05-19 19:55:04 +02003764void perf_event__output_id_sample(struct perf_event *event,
3765 struct perf_output_handle *handle,
3766 struct perf_sample_data *sample)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003767{
3768 if (event->attr.sample_id_all)
3769 __perf_event__output_id_sample(handle, sample);
3770}
3771
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003772static void perf_output_read_one(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003773 struct perf_event *event,
3774 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003775{
3776 u64 read_format = event->attr.read_format;
3777 u64 values[4];
3778 int n = 0;
3779
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003780 values[n++] = perf_event_count(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003781 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003782 values[n++] = enabled +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003783 atomic64_read(&event->child_total_time_enabled);
3784 }
3785 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
Stephane Eranianeed01522010-10-26 16:08:01 +02003786 values[n++] = running +
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003787 atomic64_read(&event->child_total_time_running);
3788 }
3789 if (read_format & PERF_FORMAT_ID)
3790 values[n++] = primary_event_id(event);
3791
Frederic Weisbecker76369132011-05-19 19:55:04 +02003792 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003793}
3794
3795/*
3796 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
3797 */
3798static void perf_output_read_group(struct perf_output_handle *handle,
Stephane Eranianeed01522010-10-26 16:08:01 +02003799 struct perf_event *event,
3800 u64 enabled, u64 running)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003801{
3802 struct perf_event *leader = event->group_leader, *sub;
3803 u64 read_format = event->attr.read_format;
3804 u64 values[5];
3805 int n = 0;
3806
3807 values[n++] = 1 + leader->nr_siblings;
3808
3809 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
Stephane Eranianeed01522010-10-26 16:08:01 +02003810 values[n++] = enabled;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003811
3812 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
Stephane Eranianeed01522010-10-26 16:08:01 +02003813 values[n++] = running;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003814
3815 if (leader != event)
3816 leader->pmu->read(leader);
3817
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003818 values[n++] = perf_event_count(leader);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003819 if (read_format & PERF_FORMAT_ID)
3820 values[n++] = primary_event_id(leader);
3821
Frederic Weisbecker76369132011-05-19 19:55:04 +02003822 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003823
3824 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3825 n = 0;
3826
3827 if (sub != event)
3828 sub->pmu->read(sub);
3829
Peter Zijlstrab5e58792010-05-21 14:43:12 +02003830 values[n++] = perf_event_count(sub);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003831 if (read_format & PERF_FORMAT_ID)
3832 values[n++] = primary_event_id(sub);
3833
Frederic Weisbecker76369132011-05-19 19:55:04 +02003834 __output_copy(handle, values, n * sizeof(u64));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003835 }
3836}
3837
Stephane Eranianeed01522010-10-26 16:08:01 +02003838#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
3839 PERF_FORMAT_TOTAL_TIME_RUNNING)
3840
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003841static void perf_output_read(struct perf_output_handle *handle,
3842 struct perf_event *event)
3843{
Eric B Munsonc4794292011-06-23 16:34:38 -04003844 u64 enabled = 0, running = 0;
Stephane Eranianeed01522010-10-26 16:08:01 +02003845 u64 read_format = event->attr.read_format;
3846
3847 /*
3848 * compute total_time_enabled, total_time_running
3849 * based on snapshot values taken when the event
3850 * was last scheduled in.
3851 *
3852 * we cannot simply called update_context_time()
3853 * because of locking issue as we are called in
3854 * NMI context
3855 */
Eric B Munsonc4794292011-06-23 16:34:38 -04003856 if (read_format & PERF_FORMAT_TOTAL_TIMES)
3857 calc_timer_values(event, &enabled, &running);
Stephane Eranianeed01522010-10-26 16:08:01 +02003858
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003859 if (event->attr.read_format & PERF_FORMAT_GROUP)
Stephane Eranianeed01522010-10-26 16:08:01 +02003860 perf_output_read_group(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003861 else
Stephane Eranianeed01522010-10-26 16:08:01 +02003862 perf_output_read_one(handle, event, enabled, running);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003863}
3864
3865void perf_output_sample(struct perf_output_handle *handle,
3866 struct perf_event_header *header,
3867 struct perf_sample_data *data,
3868 struct perf_event *event)
3869{
3870 u64 sample_type = data->type;
3871
3872 perf_output_put(handle, *header);
3873
3874 if (sample_type & PERF_SAMPLE_IP)
3875 perf_output_put(handle, data->ip);
3876
3877 if (sample_type & PERF_SAMPLE_TID)
3878 perf_output_put(handle, data->tid_entry);
3879
3880 if (sample_type & PERF_SAMPLE_TIME)
3881 perf_output_put(handle, data->time);
3882
3883 if (sample_type & PERF_SAMPLE_ADDR)
3884 perf_output_put(handle, data->addr);
3885
3886 if (sample_type & PERF_SAMPLE_ID)
3887 perf_output_put(handle, data->id);
3888
3889 if (sample_type & PERF_SAMPLE_STREAM_ID)
3890 perf_output_put(handle, data->stream_id);
3891
3892 if (sample_type & PERF_SAMPLE_CPU)
3893 perf_output_put(handle, data->cpu_entry);
3894
3895 if (sample_type & PERF_SAMPLE_PERIOD)
3896 perf_output_put(handle, data->period);
3897
3898 if (sample_type & PERF_SAMPLE_READ)
3899 perf_output_read(handle, event);
3900
3901 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3902 if (data->callchain) {
3903 int size = 1;
3904
3905 if (data->callchain)
3906 size += data->callchain->nr;
3907
3908 size *= sizeof(u64);
3909
Frederic Weisbecker76369132011-05-19 19:55:04 +02003910 __output_copy(handle, data->callchain, size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003911 } else {
3912 u64 nr = 0;
3913 perf_output_put(handle, nr);
3914 }
3915 }
3916
3917 if (sample_type & PERF_SAMPLE_RAW) {
3918 if (data->raw) {
3919 perf_output_put(handle, data->raw->size);
Frederic Weisbecker76369132011-05-19 19:55:04 +02003920 __output_copy(handle, data->raw->data,
3921 data->raw->size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003922 } else {
3923 struct {
3924 u32 size;
3925 u32 data;
3926 } raw = {
3927 .size = sizeof(u32),
3928 .data = 0,
3929 };
3930 perf_output_put(handle, raw);
3931 }
3932 }
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02003933
3934 if (!event->attr.watermark) {
3935 int wakeup_events = event->attr.wakeup_events;
3936
3937 if (wakeup_events) {
3938 struct ring_buffer *rb = handle->rb;
3939 int events = local_inc_return(&rb->events);
3940
3941 if (events >= wakeup_events) {
3942 local_sub(wakeup_events, &rb->events);
3943 local_inc(&rb->wakeup);
3944 }
3945 }
3946 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003947}
3948
3949void perf_prepare_sample(struct perf_event_header *header,
3950 struct perf_sample_data *data,
3951 struct perf_event *event,
3952 struct pt_regs *regs)
3953{
3954 u64 sample_type = event->attr.sample_type;
3955
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003956 header->type = PERF_RECORD_SAMPLE;
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003957 header->size = sizeof(*header) + event->header_size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003958
3959 header->misc = 0;
3960 header->misc |= perf_misc_flags(regs);
3961
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02003962 __perf_event_header__init_id(header, data, event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02003963
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02003964 if (sample_type & PERF_SAMPLE_IP)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003965 data->ip = perf_instruction_pointer(regs);
3966
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003967 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
3968 int size = 1;
3969
3970 data->callchain = perf_callchain(regs);
3971
3972 if (data->callchain)
3973 size += data->callchain->nr;
3974
3975 header->size += size * sizeof(u64);
3976 }
3977
3978 if (sample_type & PERF_SAMPLE_RAW) {
3979 int size = sizeof(u32);
3980
3981 if (data->raw)
3982 size += data->raw->size;
3983 else
3984 size += sizeof(u32);
3985
3986 WARN_ON_ONCE(size & (sizeof(u64)-1));
3987 header->size += size;
3988 }
3989}
3990
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02003991static void perf_event_output(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02003992 struct perf_sample_data *data,
3993 struct pt_regs *regs)
3994{
3995 struct perf_output_handle handle;
3996 struct perf_event_header header;
3997
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02003998 /* protect the callchain buffers */
3999 rcu_read_lock();
4000
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004001 perf_prepare_sample(&header, data, event, regs);
4002
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02004003 if (perf_output_begin(&handle, event, header.size))
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004004 goto exit;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004005
4006 perf_output_sample(&handle, &header, data, event);
4007
4008 perf_output_end(&handle);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004009
4010exit:
4011 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004012}
4013
4014/*
4015 * read event_id
4016 */
4017
4018struct perf_read_event {
4019 struct perf_event_header header;
4020
4021 u32 pid;
4022 u32 tid;
4023};
4024
4025static void
4026perf_event_read_event(struct perf_event *event,
4027 struct task_struct *task)
4028{
4029 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004030 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004031 struct perf_read_event read_event = {
4032 .header = {
4033 .type = PERF_RECORD_READ,
4034 .misc = 0,
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02004035 .size = sizeof(read_event) + event->read_size,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004036 },
4037 .pid = perf_event_pid(event, task),
4038 .tid = perf_event_tid(event, task),
4039 };
4040 int ret;
4041
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004042 perf_event_header__init_id(&read_event.header, &sample, event);
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02004043 ret = perf_output_begin(&handle, event, read_event.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004044 if (ret)
4045 return;
4046
4047 perf_output_put(&handle, read_event);
4048 perf_output_read(&handle, event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004049 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004050
4051 perf_output_end(&handle);
4052}
4053
4054/*
4055 * task tracking -- fork/exit
4056 *
Eric B Munson3af9e852010-05-18 15:30:49 +01004057 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004058 */
4059
4060struct perf_task_event {
4061 struct task_struct *task;
4062 struct perf_event_context *task_ctx;
4063
4064 struct {
4065 struct perf_event_header header;
4066
4067 u32 pid;
4068 u32 ppid;
4069 u32 tid;
4070 u32 ptid;
4071 u64 time;
4072 } event_id;
4073};
4074
4075static void perf_event_task_output(struct perf_event *event,
4076 struct perf_task_event *task_event)
4077{
4078 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004079 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004080 struct task_struct *task = task_event->task;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004081 int ret, size = task_event->event_id.header.size;
Mike Galbraith8bb39f92010-03-26 11:11:33 +01004082
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004083 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004084
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004085 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02004086 task_event->event_id.header.size);
Peter Zijlstraef607772010-05-18 10:50:41 +02004087 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004088 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004089
4090 task_event->event_id.pid = perf_event_pid(event, task);
4091 task_event->event_id.ppid = perf_event_pid(event, current);
4092
4093 task_event->event_id.tid = perf_event_tid(event, task);
4094 task_event->event_id.ptid = perf_event_tid(event, current);
4095
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004096 perf_output_put(&handle, task_event->event_id);
4097
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004098 perf_event__output_id_sample(event, &handle, &sample);
4099
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004100 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004101out:
4102 task_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004103}
4104
4105static int perf_event_task_match(struct perf_event *event)
4106{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004107 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004108 return 0;
4109
Stephane Eranian5632ab12011-01-03 18:20:01 +02004110 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004111 return 0;
4112
Eric B Munson3af9e852010-05-18 15:30:49 +01004113 if (event->attr.comm || event->attr.mmap ||
4114 event->attr.mmap_data || event->attr.task)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004115 return 1;
4116
4117 return 0;
4118}
4119
4120static void perf_event_task_ctx(struct perf_event_context *ctx,
4121 struct perf_task_event *task_event)
4122{
4123 struct perf_event *event;
4124
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004125 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4126 if (perf_event_task_match(event))
4127 perf_event_task_output(event, task_event);
4128 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004129}
4130
4131static void perf_event_task_event(struct perf_task_event *task_event)
4132{
4133 struct perf_cpu_context *cpuctx;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004134 struct perf_event_context *ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004135 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004136 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004137
Peter Zijlstrad6ff86c2009-11-20 22:19:46 +01004138 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004139 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004140 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004141 if (cpuctx->active_pmu != pmu)
4142 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004143 perf_event_task_ctx(&cpuctx->ctx, task_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004144
4145 ctx = task_event->task_ctx;
4146 if (!ctx) {
4147 ctxn = pmu->task_ctx_nr;
4148 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004149 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004150 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4151 }
4152 if (ctx)
4153 perf_event_task_ctx(ctx, task_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02004154next:
4155 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004156 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004157 rcu_read_unlock();
4158}
4159
4160static void perf_event_task(struct task_struct *task,
4161 struct perf_event_context *task_ctx,
4162 int new)
4163{
4164 struct perf_task_event task_event;
4165
4166 if (!atomic_read(&nr_comm_events) &&
4167 !atomic_read(&nr_mmap_events) &&
4168 !atomic_read(&nr_task_events))
4169 return;
4170
4171 task_event = (struct perf_task_event){
4172 .task = task,
4173 .task_ctx = task_ctx,
4174 .event_id = {
4175 .header = {
4176 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4177 .misc = 0,
4178 .size = sizeof(task_event.event_id),
4179 },
4180 /* .pid */
4181 /* .ppid */
4182 /* .tid */
4183 /* .ptid */
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004184 .time = perf_clock(),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004185 },
4186 };
4187
4188 perf_event_task_event(&task_event);
4189}
4190
4191void perf_event_fork(struct task_struct *task)
4192{
4193 perf_event_task(task, NULL, 1);
4194}
4195
4196/*
4197 * comm tracking
4198 */
4199
4200struct perf_comm_event {
4201 struct task_struct *task;
4202 char *comm;
4203 int comm_size;
4204
4205 struct {
4206 struct perf_event_header header;
4207
4208 u32 pid;
4209 u32 tid;
4210 } event_id;
4211};
4212
4213static void perf_event_comm_output(struct perf_event *event,
4214 struct perf_comm_event *comm_event)
4215{
4216 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004217 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004218 int size = comm_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004219 int ret;
4220
4221 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4222 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02004223 comm_event->event_id.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004224
4225 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004226 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004227
4228 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4229 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4230
4231 perf_output_put(&handle, comm_event->event_id);
Frederic Weisbecker76369132011-05-19 19:55:04 +02004232 __output_copy(&handle, comm_event->comm,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004233 comm_event->comm_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004234
4235 perf_event__output_id_sample(event, &handle, &sample);
4236
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004237 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004238out:
4239 comm_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004240}
4241
4242static int perf_event_comm_match(struct perf_event *event)
4243{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004244 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004245 return 0;
4246
Stephane Eranian5632ab12011-01-03 18:20:01 +02004247 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004248 return 0;
4249
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004250 if (event->attr.comm)
4251 return 1;
4252
4253 return 0;
4254}
4255
4256static void perf_event_comm_ctx(struct perf_event_context *ctx,
4257 struct perf_comm_event *comm_event)
4258{
4259 struct perf_event *event;
4260
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004261 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4262 if (perf_event_comm_match(event))
4263 perf_event_comm_output(event, comm_event);
4264 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004265}
4266
4267static void perf_event_comm_event(struct perf_comm_event *comm_event)
4268{
4269 struct perf_cpu_context *cpuctx;
4270 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004271 char comm[TASK_COMM_LEN];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004272 unsigned int size;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004273 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004274 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004275
4276 memset(comm, 0, sizeof(comm));
Márton Németh96b02d72009-11-21 23:10:15 +01004277 strlcpy(comm, comm_event->task->comm, sizeof(comm));
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004278 size = ALIGN(strlen(comm)+1, sizeof(u64));
4279
4280 comm_event->comm = comm;
4281 comm_event->comm_size = size;
4282
4283 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
Peter Zijlstraf6595f32009-11-20 22:19:47 +01004284 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004285 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004286 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004287 if (cpuctx->active_pmu != pmu)
4288 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004289 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004290
4291 ctxn = pmu->task_ctx_nr;
4292 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004293 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004294
4295 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4296 if (ctx)
4297 perf_event_comm_ctx(ctx, comm_event);
Peter Zijlstra41945f62010-09-16 19:17:24 +02004298next:
4299 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004300 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004301 rcu_read_unlock();
4302}
4303
4304void perf_event_comm(struct task_struct *task)
4305{
4306 struct perf_comm_event comm_event;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004307 struct perf_event_context *ctx;
4308 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004309
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004310 for_each_task_context_nr(ctxn) {
4311 ctx = task->perf_event_ctxp[ctxn];
4312 if (!ctx)
4313 continue;
4314
4315 perf_event_enable_on_exec(ctx);
4316 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004317
4318 if (!atomic_read(&nr_comm_events))
4319 return;
4320
4321 comm_event = (struct perf_comm_event){
4322 .task = task,
4323 /* .comm */
4324 /* .comm_size */
4325 .event_id = {
4326 .header = {
4327 .type = PERF_RECORD_COMM,
4328 .misc = 0,
4329 /* .size */
4330 },
4331 /* .pid */
4332 /* .tid */
4333 },
4334 };
4335
4336 perf_event_comm_event(&comm_event);
4337}
4338
4339/*
4340 * mmap tracking
4341 */
4342
4343struct perf_mmap_event {
4344 struct vm_area_struct *vma;
4345
4346 const char *file_name;
4347 int file_size;
4348
4349 struct {
4350 struct perf_event_header header;
4351
4352 u32 pid;
4353 u32 tid;
4354 u64 start;
4355 u64 len;
4356 u64 pgoff;
4357 } event_id;
4358};
4359
4360static void perf_event_mmap_output(struct perf_event *event,
4361 struct perf_mmap_event *mmap_event)
4362{
4363 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004364 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004365 int size = mmap_event->event_id.header.size;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004366 int ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004367
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004368 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4369 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02004370 mmap_event->event_id.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004371 if (ret)
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004372 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004373
4374 mmap_event->event_id.pid = perf_event_pid(event, current);
4375 mmap_event->event_id.tid = perf_event_tid(event, current);
4376
4377 perf_output_put(&handle, mmap_event->event_id);
Frederic Weisbecker76369132011-05-19 19:55:04 +02004378 __output_copy(&handle, mmap_event->file_name,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004379 mmap_event->file_size);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004380
4381 perf_event__output_id_sample(event, &handle, &sample);
4382
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004383 perf_output_end(&handle);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004384out:
4385 mmap_event->event_id.header.size = size;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004386}
4387
4388static int perf_event_mmap_match(struct perf_event *event,
Eric B Munson3af9e852010-05-18 15:30:49 +01004389 struct perf_mmap_event *mmap_event,
4390 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004391{
Peter Zijlstra6f93d0a2010-02-14 11:12:04 +01004392 if (event->state < PERF_EVENT_STATE_INACTIVE)
Peter Zijlstra22e19082010-01-18 09:12:32 +01004393 return 0;
4394
Stephane Eranian5632ab12011-01-03 18:20:01 +02004395 if (!event_filter_match(event))
Peter Zijlstra5d27c232009-12-17 13:16:32 +01004396 return 0;
4397
Eric B Munson3af9e852010-05-18 15:30:49 +01004398 if ((!executable && event->attr.mmap_data) ||
4399 (executable && event->attr.mmap))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004400 return 1;
4401
4402 return 0;
4403}
4404
4405static void perf_event_mmap_ctx(struct perf_event_context *ctx,
Eric B Munson3af9e852010-05-18 15:30:49 +01004406 struct perf_mmap_event *mmap_event,
4407 int executable)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004408{
4409 struct perf_event *event;
4410
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004411 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
Eric B Munson3af9e852010-05-18 15:30:49 +01004412 if (perf_event_mmap_match(event, mmap_event, executable))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004413 perf_event_mmap_output(event, mmap_event);
4414 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004415}
4416
4417static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4418{
4419 struct perf_cpu_context *cpuctx;
4420 struct perf_event_context *ctx;
4421 struct vm_area_struct *vma = mmap_event->vma;
4422 struct file *file = vma->vm_file;
4423 unsigned int size;
4424 char tmp[16];
4425 char *buf = NULL;
4426 const char *name;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004427 struct pmu *pmu;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004428 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004429
4430 memset(tmp, 0, sizeof(tmp));
4431
4432 if (file) {
4433 /*
Frederic Weisbecker76369132011-05-19 19:55:04 +02004434 * d_path works from the end of the rb backwards, so we
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004435 * need to add enough zero bytes after the string to handle
4436 * the 64bit alignment we do later.
4437 */
4438 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4439 if (!buf) {
4440 name = strncpy(tmp, "//enomem", sizeof(tmp));
4441 goto got_name;
4442 }
4443 name = d_path(&file->f_path, buf, PATH_MAX);
4444 if (IS_ERR(name)) {
4445 name = strncpy(tmp, "//toolong", sizeof(tmp));
4446 goto got_name;
4447 }
4448 } else {
4449 if (arch_vma_name(mmap_event->vma)) {
4450 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4451 sizeof(tmp));
4452 goto got_name;
4453 }
4454
4455 if (!vma->vm_mm) {
4456 name = strncpy(tmp, "[vdso]", sizeof(tmp));
4457 goto got_name;
Eric B Munson3af9e852010-05-18 15:30:49 +01004458 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4459 vma->vm_end >= vma->vm_mm->brk) {
4460 name = strncpy(tmp, "[heap]", sizeof(tmp));
4461 goto got_name;
4462 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4463 vma->vm_end >= vma->vm_mm->start_stack) {
4464 name = strncpy(tmp, "[stack]", sizeof(tmp));
4465 goto got_name;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004466 }
4467
4468 name = strncpy(tmp, "//anon", sizeof(tmp));
4469 goto got_name;
4470 }
4471
4472got_name:
4473 size = ALIGN(strlen(name)+1, sizeof(u64));
4474
4475 mmap_event->file_name = name;
4476 mmap_event->file_size = size;
4477
4478 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4479
Peter Zijlstraf6d9dd22009-11-20 22:19:48 +01004480 rcu_read_lock();
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004481 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra41945f62010-09-16 19:17:24 +02004482 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra51676952010-12-07 14:18:20 +01004483 if (cpuctx->active_pmu != pmu)
4484 goto next;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004485 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4486 vma->vm_flags & VM_EXEC);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004487
4488 ctxn = pmu->task_ctx_nr;
4489 if (ctxn < 0)
Peter Zijlstra41945f62010-09-16 19:17:24 +02004490 goto next;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02004491
4492 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4493 if (ctx) {
4494 perf_event_mmap_ctx(ctx, mmap_event,
4495 vma->vm_flags & VM_EXEC);
4496 }
Peter Zijlstra41945f62010-09-16 19:17:24 +02004497next:
4498 put_cpu_ptr(pmu->pmu_cpu_context);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02004499 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004500 rcu_read_unlock();
4501
4502 kfree(buf);
4503}
4504
Eric B Munson3af9e852010-05-18 15:30:49 +01004505void perf_event_mmap(struct vm_area_struct *vma)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004506{
4507 struct perf_mmap_event mmap_event;
4508
4509 if (!atomic_read(&nr_mmap_events))
4510 return;
4511
4512 mmap_event = (struct perf_mmap_event){
4513 .vma = vma,
4514 /* .file_name */
4515 /* .file_size */
4516 .event_id = {
4517 .header = {
4518 .type = PERF_RECORD_MMAP,
Zhang, Yanmin39447b32010-04-19 13:32:41 +08004519 .misc = PERF_RECORD_MISC_USER,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004520 /* .size */
4521 },
4522 /* .pid */
4523 /* .tid */
4524 .start = vma->vm_start,
4525 .len = vma->vm_end - vma->vm_start,
Peter Zijlstra3a0304e2010-02-26 10:33:41 +01004526 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004527 },
4528 };
4529
4530 perf_event_mmap_event(&mmap_event);
4531}
4532
4533/*
4534 * IRQ throttle logging
4535 */
4536
4537static void perf_log_throttle(struct perf_event *event, int enable)
4538{
4539 struct perf_output_handle handle;
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004540 struct perf_sample_data sample;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004541 int ret;
4542
4543 struct {
4544 struct perf_event_header header;
4545 u64 time;
4546 u64 id;
4547 u64 stream_id;
4548 } throttle_event = {
4549 .header = {
4550 .type = PERF_RECORD_THROTTLE,
4551 .misc = 0,
4552 .size = sizeof(throttle_event),
4553 },
4554 .time = perf_clock(),
4555 .id = primary_event_id(event),
4556 .stream_id = event->id,
4557 };
4558
4559 if (enable)
4560 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4561
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004562 perf_event_header__init_id(&throttle_event.header, &sample, event);
4563
4564 ret = perf_output_begin(&handle, event,
Peter Zijlstraa7ac67e2011-06-27 16:47:16 +02004565 throttle_event.header.size);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004566 if (ret)
4567 return;
4568
4569 perf_output_put(&handle, throttle_event);
Arnaldo Carvalho de Meloc980d102010-12-04 23:02:20 -02004570 perf_event__output_id_sample(event, &handle, &sample);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004571 perf_output_end(&handle);
4572}
4573
4574/*
4575 * Generic event overflow handling, sampling.
4576 */
4577
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004578static int __perf_event_overflow(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004579 int throttle, struct perf_sample_data *data,
4580 struct pt_regs *regs)
4581{
4582 int events = atomic_read(&event->event_limit);
4583 struct hw_perf_event *hwc = &event->hw;
4584 int ret = 0;
4585
Peter Zijlstra96398822010-11-24 18:55:29 +01004586 /*
4587 * Non-sampling counters might still use the PMI to fold short
4588 * hardware counters, ignore those.
4589 */
4590 if (unlikely(!is_sampling_event(event)))
4591 return 0;
4592
Peter Zijlstra163ec432011-02-16 11:22:34 +01004593 if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
4594 if (throttle) {
4595 hwc->interrupts = MAX_INTERRUPTS;
4596 perf_log_throttle(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004597 ret = 1;
4598 }
Peter Zijlstra163ec432011-02-16 11:22:34 +01004599 } else
4600 hwc->interrupts++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004601
4602 if (event->attr.freq) {
4603 u64 now = perf_clock();
Peter Zijlstraabd50712010-01-26 18:50:16 +01004604 s64 delta = now - hwc->freq_time_stamp;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004605
Peter Zijlstraabd50712010-01-26 18:50:16 +01004606 hwc->freq_time_stamp = now;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004607
Peter Zijlstraabd50712010-01-26 18:50:16 +01004608 if (delta > 0 && delta < 2*TICK_NSEC)
4609 perf_adjust_period(event, delta, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004610 }
4611
4612 /*
4613 * XXX event_limit might not quite work as expected on inherited
4614 * events
4615 */
4616
4617 event->pending_kill = POLL_IN;
4618 if (events && atomic_dec_and_test(&event->event_limit)) {
4619 ret = 1;
4620 event->pending_kill = POLL_HUP;
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004621 event->pending_disable = 1;
4622 irq_work_queue(&event->pending);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004623 }
4624
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004625 if (event->overflow_handler)
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004626 event->overflow_handler(event, data, regs);
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004627 else
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004628 perf_event_output(event, data, regs);
Peter Zijlstra453f19e2009-11-20 22:19:43 +01004629
Peter Zijlstraf506b3d2011-05-26 17:02:53 +02004630 if (event->fasync && event->pending_kill) {
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004631 event->pending_wakeup = 1;
4632 irq_work_queue(&event->pending);
Peter Zijlstraf506b3d2011-05-26 17:02:53 +02004633 }
4634
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004635 return ret;
4636}
4637
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004638int perf_event_overflow(struct perf_event *event,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004639 struct perf_sample_data *data,
4640 struct pt_regs *regs)
4641{
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004642 return __perf_event_overflow(event, 1, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004643}
4644
4645/*
4646 * Generic software event infrastructure
4647 */
4648
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004649struct swevent_htable {
4650 struct swevent_hlist *swevent_hlist;
4651 struct mutex hlist_mutex;
4652 int hlist_refcount;
4653
4654 /* Recursion avoidance in each contexts */
4655 int recursion[PERF_NR_CONTEXTS];
4656};
4657
4658static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
4659
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004660/*
4661 * We directly increment event->count and keep a second value in
4662 * event->hw.period_left to count intervals. This period event
4663 * is kept in the range [-sample_period, 0] so that we can use the
4664 * sign as trigger.
4665 */
4666
4667static u64 perf_swevent_set_period(struct perf_event *event)
4668{
4669 struct hw_perf_event *hwc = &event->hw;
4670 u64 period = hwc->last_period;
4671 u64 nr, offset;
4672 s64 old, val;
4673
4674 hwc->last_period = hwc->sample_period;
4675
4676again:
Peter Zijlstrae7850592010-05-21 14:43:08 +02004677 old = val = local64_read(&hwc->period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004678 if (val < 0)
4679 return 0;
4680
4681 nr = div64_u64(period + val, period);
4682 offset = nr * period;
4683 val -= offset;
Peter Zijlstrae7850592010-05-21 14:43:08 +02004684 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004685 goto again;
4686
4687 return nr;
4688}
4689
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004690static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004691 struct perf_sample_data *data,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004692 struct pt_regs *regs)
4693{
4694 struct hw_perf_event *hwc = &event->hw;
4695 int throttle = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004696
4697 data->period = event->hw.last_period;
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004698 if (!overflow)
4699 overflow = perf_swevent_set_period(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004700
4701 if (hwc->interrupts == MAX_INTERRUPTS)
4702 return;
4703
4704 for (; overflow; overflow--) {
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004705 if (__perf_event_overflow(event, throttle,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004706 data, regs)) {
4707 /*
4708 * We inhibit the overflow from happening when
4709 * hwc->interrupts == MAX_INTERRUPTS.
4710 */
4711 break;
4712 }
4713 throttle = 1;
4714 }
4715}
4716
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004717static void perf_swevent_event(struct perf_event *event, u64 nr,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004718 struct perf_sample_data *data,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004719 struct pt_regs *regs)
4720{
4721 struct hw_perf_event *hwc = &event->hw;
4722
Peter Zijlstrae7850592010-05-21 14:43:08 +02004723 local64_add(nr, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004724
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004725 if (!regs)
4726 return;
4727
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004728 if (!is_sampling_event(event))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004729 return;
4730
4731 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004732 return perf_swevent_overflow(event, 1, data, regs);
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004733
Peter Zijlstrae7850592010-05-21 14:43:08 +02004734 if (local64_add_negative(nr, &hwc->period_left))
Peter Zijlstra0cff7842009-11-20 22:19:44 +01004735 return;
4736
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004737 perf_swevent_overflow(event, 0, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004738}
4739
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004740static int perf_exclude_event(struct perf_event *event,
4741 struct pt_regs *regs)
4742{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004743 if (event->hw.state & PERF_HES_STOPPED)
Frederic Weisbecker91b2f482011-03-07 21:27:08 +01004744 return 1;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004745
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004746 if (regs) {
4747 if (event->attr.exclude_user && user_mode(regs))
4748 return 1;
4749
4750 if (event->attr.exclude_kernel && !user_mode(regs))
4751 return 1;
4752 }
4753
4754 return 0;
4755}
4756
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004757static int perf_swevent_match(struct perf_event *event,
4758 enum perf_type_id type,
Li Zefan6fb29152009-10-15 11:21:42 +08004759 u32 event_id,
4760 struct perf_sample_data *data,
4761 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004762{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004763 if (event->attr.type != type)
4764 return 0;
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004765
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004766 if (event->attr.config != event_id)
4767 return 0;
4768
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01004769 if (perf_exclude_event(event, regs))
4770 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004771
4772 return 1;
4773}
4774
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004775static inline u64 swevent_hash(u64 type, u32 event_id)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004776{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004777 u64 val = event_id | (type << 32);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004778
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004779 return hash_64(val, SWEVENT_HLIST_BITS);
4780}
4781
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004782static inline struct hlist_head *
4783__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004784{
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004785 u64 hash = swevent_hash(type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004786
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004787 return &hlist->heads[hash];
4788}
4789
4790/* For the read side: events when they trigger */
4791static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004792find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004793{
4794 struct swevent_hlist *hlist;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004795
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004796 hlist = rcu_dereference(swhash->swevent_hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004797 if (!hlist)
4798 return NULL;
4799
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004800 return __find_swevent_head(hlist, type, event_id);
4801}
4802
4803/* For the event head insertion and removal in the hlist */
4804static inline struct hlist_head *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004805find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004806{
4807 struct swevent_hlist *hlist;
4808 u32 event_id = event->attr.config;
4809 u64 type = event->attr.type;
4810
4811 /*
4812 * Event scheduling is always serialized against hlist allocation
4813 * and release. Which makes the protected version suitable here.
4814 * The context lock guarantees that.
4815 */
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004816 hlist = rcu_dereference_protected(swhash->swevent_hlist,
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004817 lockdep_is_held(&event->ctx->lock));
4818 if (!hlist)
4819 return NULL;
4820
4821 return __find_swevent_head(hlist, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004822}
4823
4824static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004825 u64 nr,
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004826 struct perf_sample_data *data,
4827 struct pt_regs *regs)
4828{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004829 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004830 struct perf_event *event;
4831 struct hlist_node *node;
4832 struct hlist_head *head;
4833
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004834 rcu_read_lock();
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004835 head = find_swevent_head_rcu(swhash, type, event_id);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004836 if (!head)
4837 goto end;
4838
4839 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
Li Zefan6fb29152009-10-15 11:21:42 +08004840 if (perf_swevent_match(event, type, event_id, data, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004841 perf_swevent_event(event, nr, data, regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004842 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004843end:
4844 rcu_read_unlock();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004845}
4846
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004847int perf_swevent_get_recursion_context(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004848{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004849 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004850
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004851 return get_recursion_context(swhash->recursion);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004852}
Ingo Molnar645e8cc2009-11-22 12:20:19 +01004853EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004854
Jesper Juhlfa9f90b2010-11-28 21:39:34 +01004855inline void perf_swevent_put_recursion_context(int rctx)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004856{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004857 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02004858
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004859 put_recursion_context(swhash->recursion, rctx);
Frederic Weisbeckerce71b9d2009-11-22 05:26:55 +01004860}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004861
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004862void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004863{
Ingo Molnara4234bf2009-11-23 10:57:59 +01004864 struct perf_sample_data data;
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004865 int rctx;
4866
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004867 preempt_disable_notrace();
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004868 rctx = perf_swevent_get_recursion_context();
4869 if (rctx < 0)
4870 return;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004871
Peter Zijlstradc1d6282010-03-03 15:55:04 +01004872 perf_sample_data_init(&data, addr);
Ingo Molnara4234bf2009-11-23 10:57:59 +01004873
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02004874 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
Peter Zijlstra4ed7c922009-11-23 11:37:29 +01004875
4876 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02004877 preempt_enable_notrace();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004878}
4879
4880static void perf_swevent_read(struct perf_event *event)
4881{
4882}
4883
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004884static int perf_swevent_add(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004885{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004886 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004887 struct hw_perf_event *hwc = &event->hw;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004888 struct hlist_head *head;
4889
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01004890 if (is_sampling_event(event)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004891 hwc->last_period = hwc->sample_period;
4892 perf_swevent_set_period(event);
4893 }
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004894
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004895 hwc->state = !(flags & PERF_EF_START);
4896
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004897 head = find_swevent_head(swhash, event);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004898 if (WARN_ON_ONCE(!head))
4899 return -EINVAL;
4900
4901 hlist_add_head_rcu(&event->hlist_entry, head);
4902
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004903 return 0;
4904}
4905
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004906static void perf_swevent_del(struct perf_event *event, int flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004907{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004908 hlist_del_rcu(&event->hlist_entry);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02004909}
4910
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004911static void perf_swevent_start(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004912{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004913 event->hw.state = 0;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004914}
4915
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004916static void perf_swevent_stop(struct perf_event *event, int flags)
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004917{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02004918 event->hw.state = PERF_HES_STOPPED;
Peter Zijlstrac6df8d52010-06-03 11:21:20 +02004919}
4920
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004921/* Deref the hlist from the update side */
4922static inline struct swevent_hlist *
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004923swevent_hlist_deref(struct swevent_htable *swhash)
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004924{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004925 return rcu_dereference_protected(swhash->swevent_hlist,
4926 lockdep_is_held(&swhash->hlist_mutex));
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004927}
4928
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004929static void swevent_hlist_release(struct swevent_htable *swhash)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004930{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004931 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004932
Frederic Weisbecker49f135e2010-05-20 10:17:46 +02004933 if (!hlist)
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004934 return;
4935
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004936 rcu_assign_pointer(swhash->swevent_hlist, NULL);
Lai Jiangshanfa4bbc42011-03-18 12:08:29 +08004937 kfree_rcu(hlist, rcu_head);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004938}
4939
4940static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
4941{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004942 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004943
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004944 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004945
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004946 if (!--swhash->hlist_refcount)
4947 swevent_hlist_release(swhash);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004948
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004949 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004950}
4951
4952static void swevent_hlist_put(struct perf_event *event)
4953{
4954 int cpu;
4955
4956 if (event->cpu != -1) {
4957 swevent_hlist_put_cpu(event, event->cpu);
4958 return;
4959 }
4960
4961 for_each_possible_cpu(cpu)
4962 swevent_hlist_put_cpu(event, cpu);
4963}
4964
4965static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
4966{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004967 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004968 int err = 0;
4969
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004970 mutex_lock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004971
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004972 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004973 struct swevent_hlist *hlist;
4974
4975 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
4976 if (!hlist) {
4977 err = -ENOMEM;
4978 goto exit;
4979 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004980 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004981 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004982 swhash->hlist_refcount++;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02004983exit:
Peter Zijlstrab28ab832010-09-06 14:48:15 +02004984 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02004985
4986 return err;
4987}
4988
4989static int swevent_hlist_get(struct perf_event *event)
4990{
4991 int err;
4992 int cpu, failed_cpu;
4993
4994 if (event->cpu != -1)
4995 return swevent_hlist_get_cpu(event, event->cpu);
4996
4997 get_online_cpus();
4998 for_each_possible_cpu(cpu) {
4999 err = swevent_hlist_get_cpu(event, cpu);
5000 if (err) {
5001 failed_cpu = cpu;
5002 goto fail;
5003 }
5004 }
5005 put_online_cpus();
5006
5007 return 0;
Peter Zijlstra9ed60602010-06-11 17:36:35 +02005008fail:
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005009 for_each_possible_cpu(cpu) {
5010 if (cpu == failed_cpu)
5011 break;
5012 swevent_hlist_put_cpu(event, cpu);
5013 }
5014
5015 put_online_cpus();
5016 return err;
5017}
5018
Jason Barond430d3d2011-03-16 17:29:47 -04005019struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
Frederic Weisbecker95476b62010-04-14 23:42:18 +02005020
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005021static void sw_perf_event_destroy(struct perf_event *event)
5022{
5023 u64 event_id = event->attr.config;
5024
5025 WARN_ON(event->parent);
5026
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02005027 jump_label_dec(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005028 swevent_hlist_put(event);
5029}
5030
5031static int perf_swevent_init(struct perf_event *event)
5032{
5033 int event_id = event->attr.config;
5034
5035 if (event->attr.type != PERF_TYPE_SOFTWARE)
5036 return -ENOENT;
5037
5038 switch (event_id) {
5039 case PERF_COUNT_SW_CPU_CLOCK:
5040 case PERF_COUNT_SW_TASK_CLOCK:
5041 return -ENOENT;
5042
5043 default:
5044 break;
5045 }
5046
Dan Carpenterce677832010-10-24 21:50:42 +02005047 if (event_id >= PERF_COUNT_SW_MAX)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005048 return -ENOENT;
5049
5050 if (!event->parent) {
5051 int err;
5052
5053 err = swevent_hlist_get(event);
5054 if (err)
5055 return err;
5056
Peter Zijlstra7e54a5a2010-10-14 22:32:45 +02005057 jump_label_inc(&perf_swevent_enabled[event_id]);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005058 event->destroy = sw_perf_event_destroy;
5059 }
5060
5061 return 0;
5062}
5063
5064static struct pmu perf_swevent = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005065 .task_ctx_nr = perf_sw_context,
5066
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005067 .event_init = perf_swevent_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005068 .add = perf_swevent_add,
5069 .del = perf_swevent_del,
5070 .start = perf_swevent_start,
5071 .stop = perf_swevent_stop,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005072 .read = perf_swevent_read,
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005073};
Frederic Weisbecker95476b62010-04-14 23:42:18 +02005074
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005075#ifdef CONFIG_EVENT_TRACING
5076
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005077static int perf_tp_filter_match(struct perf_event *event,
Frederic Weisbecker95476b62010-04-14 23:42:18 +02005078 struct perf_sample_data *data)
5079{
5080 void *record = data->raw->data;
5081
5082 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5083 return 1;
5084 return 0;
5085}
5086
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005087static int perf_tp_event_match(struct perf_event *event,
5088 struct perf_sample_data *data,
5089 struct pt_regs *regs)
5090{
Frederic Weisbeckera0f7d0f2011-03-07 21:27:09 +01005091 if (event->hw.state & PERF_HES_STOPPED)
5092 return 0;
Peter Zijlstra580d6072010-05-20 20:54:31 +02005093 /*
5094 * All tracepoints are from kernel-space.
5095 */
5096 if (event->attr.exclude_kernel)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005097 return 0;
5098
5099 if (!perf_tp_filter_match(event, data))
5100 return 0;
5101
5102 return 1;
5103}
5104
5105void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
Peter Zijlstraecc55f82010-05-21 15:11:34 +02005106 struct pt_regs *regs, struct hlist_head *head, int rctx)
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005107{
5108 struct perf_sample_data data;
5109 struct perf_event *event;
5110 struct hlist_node *node;
5111
5112 struct perf_raw_record raw = {
5113 .size = entry_size,
5114 .data = record,
5115 };
5116
5117 perf_sample_data_init(&data, addr);
5118 data.raw = &raw;
5119
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005120 hlist_for_each_entry_rcu(event, node, head, hlist_entry) {
5121 if (perf_tp_event_match(event, &data, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02005122 perf_swevent_event(event, count, &data, regs);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005123 }
Peter Zijlstraecc55f82010-05-21 15:11:34 +02005124
5125 perf_swevent_put_recursion_context(rctx);
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005126}
5127EXPORT_SYMBOL_GPL(perf_tp_event);
5128
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005129static void tp_perf_event_destroy(struct perf_event *event)
5130{
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005131 perf_trace_destroy(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005132}
5133
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005134static int perf_tp_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005135{
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005136 int err;
5137
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005138 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5139 return -ENOENT;
5140
Peter Zijlstra1c024eca2010-05-19 14:02:22 +02005141 err = perf_trace_init(event);
5142 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005143 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005144
5145 event->destroy = tp_perf_event_destroy;
5146
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005147 return 0;
5148}
5149
5150static struct pmu perf_tracepoint = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005151 .task_ctx_nr = perf_sw_context,
5152
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005153 .event_init = perf_tp_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005154 .add = perf_trace_add,
5155 .del = perf_trace_del,
5156 .start = perf_swevent_start,
5157 .stop = perf_swevent_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005158 .read = perf_swevent_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005159};
5160
5161static inline void perf_tp_register(void)
5162{
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005163 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005164}
Li Zefan6fb29152009-10-15 11:21:42 +08005165
5166static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5167{
5168 char *filter_str;
5169 int ret;
5170
5171 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5172 return -EINVAL;
5173
5174 filter_str = strndup_user(arg, PAGE_SIZE);
5175 if (IS_ERR(filter_str))
5176 return PTR_ERR(filter_str);
5177
5178 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5179
5180 kfree(filter_str);
5181 return ret;
5182}
5183
5184static void perf_event_free_filter(struct perf_event *event)
5185{
5186 ftrace_profile_free_filter(event);
5187}
5188
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005189#else
Li Zefan6fb29152009-10-15 11:21:42 +08005190
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005191static inline void perf_tp_register(void)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005192{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005193}
Li Zefan6fb29152009-10-15 11:21:42 +08005194
5195static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5196{
5197 return -ENOENT;
5198}
5199
5200static void perf_event_free_filter(struct perf_event *event)
5201{
5202}
5203
Li Zefan07b139c2009-12-21 14:27:35 +08005204#endif /* CONFIG_EVENT_TRACING */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005205
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005206#ifdef CONFIG_HAVE_HW_BREAKPOINT
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005207void perf_bp_event(struct perf_event *bp, void *data)
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005208{
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005209 struct perf_sample_data sample;
5210 struct pt_regs *regs = data;
5211
Peter Zijlstradc1d6282010-03-03 15:55:04 +01005212 perf_sample_data_init(&sample, bp->attr.bp_addr);
Frederic Weisbeckerf5ffe022009-11-23 15:42:34 +01005213
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005214 if (!bp->hw.state && !perf_exclude_event(bp, regs))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02005215 perf_swevent_event(bp, 1, &sample, regs);
Frederic Weisbecker24f1e32c2009-09-09 19:22:48 +02005216}
5217#endif
5218
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005219/*
5220 * hrtimer based swevent callback
5221 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005222
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005223static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005224{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005225 enum hrtimer_restart ret = HRTIMER_RESTART;
5226 struct perf_sample_data data;
5227 struct pt_regs *regs;
5228 struct perf_event *event;
5229 u64 period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005230
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005231 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
Peter Zijlstraba3dd362011-02-15 12:41:46 +01005232
5233 if (event->state != PERF_EVENT_STATE_ACTIVE)
5234 return HRTIMER_NORESTART;
5235
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005236 event->pmu->read(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005237
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005238 perf_sample_data_init(&data, 0);
5239 data.period = event->hw.last_period;
5240 regs = get_irq_regs();
5241
5242 if (regs && !perf_exclude_event(event, regs)) {
5243 if (!(event->attr.exclude_idle && current->pid == 0))
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02005244 if (perf_event_overflow(event, &data, regs))
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005245 ret = HRTIMER_NORESTART;
5246 }
5247
5248 period = max_t(u64, 10000, event->hw.sample_period);
5249 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5250
5251 return ret;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005252}
5253
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005254static void perf_swevent_start_hrtimer(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005255{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005256 struct hw_perf_event *hwc = &event->hw;
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005257 s64 period;
5258
5259 if (!is_sampling_event(event))
5260 return;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005261
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005262 period = local64_read(&hwc->period_left);
5263 if (period) {
5264 if (period < 0)
5265 period = 10000;
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005266
Franck Bui-Huu5d508e82010-11-23 16:21:45 +01005267 local64_set(&hwc->period_left, 0);
5268 } else {
5269 period = max_t(u64, 10000, hwc->sample_period);
5270 }
5271 __hrtimer_start_range_ns(&hwc->hrtimer,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005272 ns_to_ktime(period), 0,
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02005273 HRTIMER_MODE_REL_PINNED, 0);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005274}
5275
5276static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5277{
5278 struct hw_perf_event *hwc = &event->hw;
5279
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +01005280 if (is_sampling_event(event)) {
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005281 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
Peter Zijlstrafa407f32010-06-24 12:35:12 +02005282 local64_set(&hwc->period_left, ktime_to_ns(remaining));
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005283
5284 hrtimer_cancel(&hwc->hrtimer);
5285 }
5286}
5287
Peter Zijlstraba3dd362011-02-15 12:41:46 +01005288static void perf_swevent_init_hrtimer(struct perf_event *event)
5289{
5290 struct hw_perf_event *hwc = &event->hw;
5291
5292 if (!is_sampling_event(event))
5293 return;
5294
5295 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5296 hwc->hrtimer.function = perf_swevent_hrtimer;
5297
5298 /*
5299 * Since hrtimers have a fixed rate, we can do a static freq->period
5300 * mapping and avoid the whole period adjust feedback stuff.
5301 */
5302 if (event->attr.freq) {
5303 long freq = event->attr.sample_freq;
5304
5305 event->attr.sample_period = NSEC_PER_SEC / freq;
5306 hwc->sample_period = event->attr.sample_period;
5307 local64_set(&hwc->period_left, hwc->sample_period);
5308 event->attr.freq = 0;
5309 }
5310}
5311
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005312/*
5313 * Software event: cpu wall time clock
5314 */
5315
5316static void cpu_clock_event_update(struct perf_event *event)
5317{
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005318 s64 prev;
5319 u64 now;
5320
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005321 now = local_clock();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005322 prev = local64_xchg(&event->hw.prev_count, now);
5323 local64_add(now - prev, &event->count);
5324}
5325
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005326static void cpu_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005327{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005328 local64_set(&event->hw.prev_count, local_clock());
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005329 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005330}
5331
5332static void cpu_clock_event_stop(struct perf_event *event, int flags)
5333{
5334 perf_swevent_cancel_hrtimer(event);
5335 cpu_clock_event_update(event);
5336}
5337
5338static int cpu_clock_event_add(struct perf_event *event, int flags)
5339{
5340 if (flags & PERF_EF_START)
5341 cpu_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005342
5343 return 0;
5344}
5345
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005346static void cpu_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005347{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005348 cpu_clock_event_stop(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005349}
5350
5351static void cpu_clock_event_read(struct perf_event *event)
5352{
5353 cpu_clock_event_update(event);
5354}
5355
5356static int cpu_clock_event_init(struct perf_event *event)
5357{
5358 if (event->attr.type != PERF_TYPE_SOFTWARE)
5359 return -ENOENT;
5360
5361 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5362 return -ENOENT;
5363
Peter Zijlstraba3dd362011-02-15 12:41:46 +01005364 perf_swevent_init_hrtimer(event);
5365
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005366 return 0;
5367}
5368
5369static struct pmu perf_cpu_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005370 .task_ctx_nr = perf_sw_context,
5371
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005372 .event_init = cpu_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005373 .add = cpu_clock_event_add,
5374 .del = cpu_clock_event_del,
5375 .start = cpu_clock_event_start,
5376 .stop = cpu_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005377 .read = cpu_clock_event_read,
5378};
5379
5380/*
5381 * Software event: task time clock
5382 */
5383
5384static void task_clock_event_update(struct perf_event *event, u64 now)
5385{
5386 u64 prev;
5387 s64 delta;
5388
5389 prev = local64_xchg(&event->hw.prev_count, now);
5390 delta = now - prev;
5391 local64_add(delta, &event->count);
5392}
5393
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005394static void task_clock_event_start(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005395{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005396 local64_set(&event->hw.prev_count, event->ctx->time);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005397 perf_swevent_start_hrtimer(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005398}
5399
5400static void task_clock_event_stop(struct perf_event *event, int flags)
5401{
5402 perf_swevent_cancel_hrtimer(event);
5403 task_clock_event_update(event, event->ctx->time);
5404}
5405
5406static int task_clock_event_add(struct perf_event *event, int flags)
5407{
5408 if (flags & PERF_EF_START)
5409 task_clock_event_start(event, flags);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005410
5411 return 0;
5412}
5413
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005414static void task_clock_event_del(struct perf_event *event, int flags)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005415{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005416 task_clock_event_stop(event, PERF_EF_UPDATE);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005417}
5418
5419static void task_clock_event_read(struct perf_event *event)
5420{
Peter Zijlstra768a06e2011-02-22 16:52:24 +01005421 u64 now = perf_clock();
5422 u64 delta = now - event->ctx->timestamp;
5423 u64 time = event->ctx->time + delta;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005424
5425 task_clock_event_update(event, time);
5426}
5427
5428static int task_clock_event_init(struct perf_event *event)
5429{
5430 if (event->attr.type != PERF_TYPE_SOFTWARE)
5431 return -ENOENT;
5432
5433 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5434 return -ENOENT;
5435
Peter Zijlstraba3dd362011-02-15 12:41:46 +01005436 perf_swevent_init_hrtimer(event);
5437
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005438 return 0;
5439}
5440
5441static struct pmu perf_task_clock = {
Peter Zijlstra89a1e182010-09-07 17:34:50 +02005442 .task_ctx_nr = perf_sw_context,
5443
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005444 .event_init = task_clock_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02005445 .add = task_clock_event_add,
5446 .del = task_clock_event_del,
5447 .start = task_clock_event_start,
5448 .stop = task_clock_event_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005449 .read = task_clock_event_read,
5450};
5451
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005452static void perf_pmu_nop_void(struct pmu *pmu)
5453{
5454}
5455
5456static int perf_pmu_nop_int(struct pmu *pmu)
5457{
5458 return 0;
5459}
5460
5461static void perf_pmu_start_txn(struct pmu *pmu)
5462{
5463 perf_pmu_disable(pmu);
5464}
5465
5466static int perf_pmu_commit_txn(struct pmu *pmu)
5467{
5468 perf_pmu_enable(pmu);
5469 return 0;
5470}
5471
5472static void perf_pmu_cancel_txn(struct pmu *pmu)
5473{
5474 perf_pmu_enable(pmu);
5475}
5476
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005477/*
5478 * Ensures all contexts with the same task_ctx_nr have the same
5479 * pmu_cpu_context too.
5480 */
5481static void *find_pmu_context(int ctxn)
5482{
5483 struct pmu *pmu;
5484
5485 if (ctxn < 0)
5486 return NULL;
5487
5488 list_for_each_entry(pmu, &pmus, entry) {
5489 if (pmu->task_ctx_nr == ctxn)
5490 return pmu->pmu_cpu_context;
5491 }
5492
5493 return NULL;
5494}
5495
Peter Zijlstra51676952010-12-07 14:18:20 +01005496static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005497{
Peter Zijlstra51676952010-12-07 14:18:20 +01005498 int cpu;
5499
5500 for_each_possible_cpu(cpu) {
5501 struct perf_cpu_context *cpuctx;
5502
5503 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5504
5505 if (cpuctx->active_pmu == old_pmu)
5506 cpuctx->active_pmu = pmu;
5507 }
5508}
5509
5510static void free_pmu_context(struct pmu *pmu)
5511{
5512 struct pmu *i;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005513
5514 mutex_lock(&pmus_lock);
5515 /*
5516 * Like a real lame refcount.
5517 */
Peter Zijlstra51676952010-12-07 14:18:20 +01005518 list_for_each_entry(i, &pmus, entry) {
5519 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5520 update_pmu_context(i, pmu);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005521 goto out;
Peter Zijlstra51676952010-12-07 14:18:20 +01005522 }
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005523 }
5524
Peter Zijlstra51676952010-12-07 14:18:20 +01005525 free_percpu(pmu->pmu_cpu_context);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005526out:
5527 mutex_unlock(&pmus_lock);
5528}
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005529static struct idr pmu_idr;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005530
Peter Zijlstraabe43402010-11-17 23:17:37 +01005531static ssize_t
5532type_show(struct device *dev, struct device_attribute *attr, char *page)
5533{
5534 struct pmu *pmu = dev_get_drvdata(dev);
5535
5536 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5537}
5538
5539static struct device_attribute pmu_dev_attrs[] = {
5540 __ATTR_RO(type),
5541 __ATTR_NULL,
5542};
5543
5544static int pmu_bus_running;
5545static struct bus_type pmu_bus = {
5546 .name = "event_source",
5547 .dev_attrs = pmu_dev_attrs,
5548};
5549
5550static void pmu_dev_release(struct device *dev)
5551{
5552 kfree(dev);
5553}
5554
5555static int pmu_dev_alloc(struct pmu *pmu)
5556{
5557 int ret = -ENOMEM;
5558
5559 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
5560 if (!pmu->dev)
5561 goto out;
5562
5563 device_initialize(pmu->dev);
5564 ret = dev_set_name(pmu->dev, "%s", pmu->name);
5565 if (ret)
5566 goto free_dev;
5567
5568 dev_set_drvdata(pmu->dev, pmu);
5569 pmu->dev->bus = &pmu_bus;
5570 pmu->dev->release = pmu_dev_release;
5571 ret = device_add(pmu->dev);
5572 if (ret)
5573 goto free_dev;
5574
5575out:
5576 return ret;
5577
5578free_dev:
5579 put_device(pmu->dev);
5580 goto out;
5581}
5582
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005583static struct lock_class_key cpuctx_mutex;
Peter Zijlstrafacc4302011-04-09 21:17:42 +02005584static struct lock_class_key cpuctx_lock;
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005585
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005586int perf_pmu_register(struct pmu *pmu, char *name, int type)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005587{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005588 int cpu, ret;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005589
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005590 mutex_lock(&pmus_lock);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005591 ret = -ENOMEM;
5592 pmu->pmu_disable_count = alloc_percpu(int);
5593 if (!pmu->pmu_disable_count)
5594 goto unlock;
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005595
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005596 pmu->type = -1;
5597 if (!name)
5598 goto skip_type;
5599 pmu->name = name;
5600
5601 if (type < 0) {
5602 int err = idr_pre_get(&pmu_idr, GFP_KERNEL);
5603 if (!err)
5604 goto free_pdc;
5605
5606 err = idr_get_new_above(&pmu_idr, pmu, PERF_TYPE_MAX, &type);
5607 if (err) {
5608 ret = err;
5609 goto free_pdc;
5610 }
5611 }
5612 pmu->type = type;
5613
Peter Zijlstraabe43402010-11-17 23:17:37 +01005614 if (pmu_bus_running) {
5615 ret = pmu_dev_alloc(pmu);
5616 if (ret)
5617 goto free_idr;
5618 }
5619
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005620skip_type:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005621 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
5622 if (pmu->pmu_cpu_context)
5623 goto got_cpu_context;
5624
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005625 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
5626 if (!pmu->pmu_cpu_context)
Peter Zijlstraabe43402010-11-17 23:17:37 +01005627 goto free_dev;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005628
5629 for_each_possible_cpu(cpu) {
5630 struct perf_cpu_context *cpuctx;
5631
5632 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
Peter Zijlstraeb184472010-09-07 15:55:13 +02005633 __perf_event_init_context(&cpuctx->ctx);
Peter Zijlstra547e9fd2011-01-19 12:51:39 +01005634 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
Peter Zijlstrafacc4302011-04-09 21:17:42 +02005635 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02005636 cpuctx->ctx.type = cpu_context;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005637 cpuctx->ctx.pmu = pmu;
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02005638 cpuctx->jiffies_interval = 1;
5639 INIT_LIST_HEAD(&cpuctx->rotation_list);
Peter Zijlstra51676952010-12-07 14:18:20 +01005640 cpuctx->active_pmu = pmu;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005641 }
5642
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02005643got_cpu_context:
Peter Zijlstraad5133b2010-06-15 12:22:39 +02005644 if (!pmu->start_txn) {
5645 if (pmu->pmu_enable) {
5646 /*
5647 * If we have pmu_enable/pmu_disable calls, install
5648 * transaction stubs that use that to try and batch
5649 * hardware accesses.
5650 */
5651 pmu->start_txn = perf_pmu_start_txn;
5652 pmu->commit_txn = perf_pmu_commit_txn;
5653 pmu->cancel_txn = perf_pmu_cancel_txn;
5654 } else {
5655 pmu->start_txn = perf_pmu_nop_void;
5656 pmu->commit_txn = perf_pmu_nop_int;
5657 pmu->cancel_txn = perf_pmu_nop_void;
5658 }
5659 }
5660
5661 if (!pmu->pmu_enable) {
5662 pmu->pmu_enable = perf_pmu_nop_void;
5663 pmu->pmu_disable = perf_pmu_nop_void;
5664 }
5665
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005666 list_add_rcu(&pmu->entry, &pmus);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005667 ret = 0;
5668unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005669 mutex_unlock(&pmus_lock);
5670
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005671 return ret;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005672
Peter Zijlstraabe43402010-11-17 23:17:37 +01005673free_dev:
5674 device_del(pmu->dev);
5675 put_device(pmu->dev);
5676
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005677free_idr:
5678 if (pmu->type >= PERF_TYPE_MAX)
5679 idr_remove(&pmu_idr, pmu->type);
5680
Peter Zijlstra108b02c2010-09-06 14:32:03 +02005681free_pdc:
5682 free_percpu(pmu->pmu_disable_count);
5683 goto unlock;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005684}
5685
5686void perf_pmu_unregister(struct pmu *pmu)
5687{
5688 mutex_lock(&pmus_lock);
5689 list_del_rcu(&pmu->entry);
5690 mutex_unlock(&pmus_lock);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005691
5692 /*
Peter Zijlstracde8e882010-09-13 11:06:55 +02005693 * We dereference the pmu list under both SRCU and regular RCU, so
5694 * synchronize against both of those.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005695 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005696 synchronize_srcu(&pmus_srcu);
Peter Zijlstracde8e882010-09-13 11:06:55 +02005697 synchronize_rcu();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005698
Peter Zijlstra33696fc2010-06-14 08:49:00 +02005699 free_percpu(pmu->pmu_disable_count);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005700 if (pmu->type >= PERF_TYPE_MAX)
5701 idr_remove(&pmu_idr, pmu->type);
Peter Zijlstraabe43402010-11-17 23:17:37 +01005702 device_del(pmu->dev);
5703 put_device(pmu->dev);
Peter Zijlstra51676952010-12-07 14:18:20 +01005704 free_pmu_context(pmu);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005705}
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005706
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005707struct pmu *perf_init_event(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005708{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005709 struct pmu *pmu = NULL;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005710 int idx;
Lin Ming940c5b22011-02-27 21:13:31 +08005711 int ret;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005712
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005713 idx = srcu_read_lock(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005714
5715 rcu_read_lock();
5716 pmu = idr_find(&pmu_idr, event->attr.type);
5717 rcu_read_unlock();
Lin Ming940c5b22011-02-27 21:13:31 +08005718 if (pmu) {
Mark Rutland7e5b2a02011-08-11 12:31:20 +01005719 event->pmu = pmu;
Lin Ming940c5b22011-02-27 21:13:31 +08005720 ret = pmu->event_init(event);
5721 if (ret)
5722 pmu = ERR_PTR(ret);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005723 goto unlock;
Lin Ming940c5b22011-02-27 21:13:31 +08005724 }
Peter Zijlstra2e80a822010-11-17 23:17:36 +01005725
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005726 list_for_each_entry_rcu(pmu, &pmus, entry) {
Mark Rutland7e5b2a02011-08-11 12:31:20 +01005727 event->pmu = pmu;
Lin Ming940c5b22011-02-27 21:13:31 +08005728 ret = pmu->event_init(event);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005729 if (!ret)
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005730 goto unlock;
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02005731
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005732 if (ret != -ENOENT) {
5733 pmu = ERR_PTR(ret);
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005734 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005735 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005736 }
Peter Zijlstrae5f4d332010-09-10 17:38:06 +02005737 pmu = ERR_PTR(-ENOENT);
5738unlock:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005739 srcu_read_unlock(&pmus_srcu, idx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005740
5741 return pmu;
5742}
5743
5744/*
5745 * Allocate and initialize a event structure
5746 */
5747static struct perf_event *
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005748perf_event_alloc(struct perf_event_attr *attr, int cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005749 struct task_struct *task,
5750 struct perf_event *group_leader,
5751 struct perf_event *parent_event,
Avi Kivity4dc0da82011-06-29 18:42:35 +03005752 perf_overflow_handler_t overflow_handler,
5753 void *context)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005754{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02005755 struct pmu *pmu;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005756 struct perf_event *event;
5757 struct hw_perf_event *hwc;
5758 long err;
5759
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005760 if ((unsigned)cpu >= nr_cpu_ids) {
5761 if (!task || cpu != -1)
5762 return ERR_PTR(-EINVAL);
5763 }
5764
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02005765 event = kzalloc(sizeof(*event), GFP_KERNEL);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005766 if (!event)
5767 return ERR_PTR(-ENOMEM);
5768
5769 /*
5770 * Single events are their own group leaders, with an
5771 * empty sibling list:
5772 */
5773 if (!group_leader)
5774 group_leader = event;
5775
5776 mutex_init(&event->child_mutex);
5777 INIT_LIST_HEAD(&event->child_list);
5778
5779 INIT_LIST_HEAD(&event->group_entry);
5780 INIT_LIST_HEAD(&event->event_entry);
5781 INIT_LIST_HEAD(&event->sibling_list);
5782 init_waitqueue_head(&event->waitq);
Peter Zijlstrae360adb2010-10-14 14:01:34 +08005783 init_irq_work(&event->pending, perf_pending_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005784
5785 mutex_init(&event->mmap_mutex);
5786
5787 event->cpu = cpu;
5788 event->attr = *attr;
5789 event->group_leader = group_leader;
5790 event->pmu = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005791 event->oncpu = -1;
5792
5793 event->parent = parent_event;
5794
5795 event->ns = get_pid_ns(current->nsproxy->pid_ns);
5796 event->id = atomic64_inc_return(&perf_event_id);
5797
5798 event->state = PERF_EVENT_STATE_INACTIVE;
5799
Peter Zijlstrad580ff82010-10-14 17:43:23 +02005800 if (task) {
5801 event->attach_state = PERF_ATTACH_TASK;
5802#ifdef CONFIG_HAVE_HW_BREAKPOINT
5803 /*
5804 * hw_breakpoint is a bit difficult here..
5805 */
5806 if (attr->type == PERF_TYPE_BREAKPOINT)
5807 event->hw.bp_target = task;
5808#endif
5809 }
5810
Avi Kivity4dc0da82011-06-29 18:42:35 +03005811 if (!overflow_handler && parent_event) {
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005812 overflow_handler = parent_event->overflow_handler;
Avi Kivity4dc0da82011-06-29 18:42:35 +03005813 context = parent_event->overflow_handler_context;
5814 }
Oleg Nesterov66832eb2011-01-18 17:10:32 +01005815
Frederic Weisbeckerb326e952009-12-05 09:44:31 +01005816 event->overflow_handler = overflow_handler;
Avi Kivity4dc0da82011-06-29 18:42:35 +03005817 event->overflow_handler_context = context;
Frederic Weisbecker97eaf532009-10-18 15:33:50 +02005818
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005819 if (attr->disabled)
5820 event->state = PERF_EVENT_STATE_OFF;
5821
5822 pmu = NULL;
5823
5824 hwc = &event->hw;
5825 hwc->sample_period = attr->sample_period;
5826 if (attr->freq && attr->sample_freq)
5827 hwc->sample_period = 1;
5828 hwc->last_period = hwc->sample_period;
5829
Peter Zijlstrae7850592010-05-21 14:43:08 +02005830 local64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005831
5832 /*
5833 * we currently do not support PERF_FORMAT_GROUP on inherited events
5834 */
5835 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
5836 goto done;
5837
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02005838 pmu = perf_init_event(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005839
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005840done:
5841 err = 0;
5842 if (!pmu)
5843 err = -EINVAL;
5844 else if (IS_ERR(pmu))
5845 err = PTR_ERR(pmu);
5846
5847 if (err) {
5848 if (event->ns)
5849 put_pid_ns(event->ns);
5850 kfree(event);
5851 return ERR_PTR(err);
5852 }
5853
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005854 if (!event->parent) {
Peter Zijlstra82cd6de2010-10-14 17:57:23 +02005855 if (event->attach_state & PERF_ATTACH_TASK)
Stephane Eraniane5d13672011-02-14 11:20:01 +02005856 jump_label_inc(&perf_sched_events);
Eric B Munson3af9e852010-05-18 15:30:49 +01005857 if (event->attr.mmap || event->attr.mmap_data)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005858 atomic_inc(&nr_mmap_events);
5859 if (event->attr.comm)
5860 atomic_inc(&nr_comm_events);
5861 if (event->attr.task)
5862 atomic_inc(&nr_task_events);
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02005863 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
5864 err = get_callchain_buffers();
5865 if (err) {
5866 free_event(event);
5867 return ERR_PTR(err);
5868 }
5869 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005870 }
5871
5872 return event;
5873}
5874
5875static int perf_copy_attr(struct perf_event_attr __user *uattr,
5876 struct perf_event_attr *attr)
5877{
5878 u32 size;
5879 int ret;
5880
5881 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
5882 return -EFAULT;
5883
5884 /*
5885 * zero the full structure, so that a short copy will be nice.
5886 */
5887 memset(attr, 0, sizeof(*attr));
5888
5889 ret = get_user(size, &uattr->size);
5890 if (ret)
5891 return ret;
5892
5893 if (size > PAGE_SIZE) /* silly large */
5894 goto err_size;
5895
5896 if (!size) /* abi compat */
5897 size = PERF_ATTR_SIZE_VER0;
5898
5899 if (size < PERF_ATTR_SIZE_VER0)
5900 goto err_size;
5901
5902 /*
5903 * If we're handed a bigger struct than we know of,
5904 * ensure all the unknown bits are 0 - i.e. new
5905 * user-space does not rely on any kernel feature
5906 * extensions we dont know about yet.
5907 */
5908 if (size > sizeof(*attr)) {
5909 unsigned char __user *addr;
5910 unsigned char __user *end;
5911 unsigned char val;
5912
5913 addr = (void __user *)uattr + sizeof(*attr);
5914 end = (void __user *)uattr + size;
5915
5916 for (; addr < end; addr++) {
5917 ret = get_user(val, addr);
5918 if (ret)
5919 return ret;
5920 if (val)
5921 goto err_size;
5922 }
5923 size = sizeof(*attr);
5924 }
5925
5926 ret = copy_from_user(attr, uattr, size);
5927 if (ret)
5928 return -EFAULT;
5929
Mahesh Salgaonkarcd757642010-01-30 10:25:18 +05305930 if (attr->__reserved_1)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005931 return -EINVAL;
5932
5933 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
5934 return -EINVAL;
5935
5936 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
5937 return -EINVAL;
5938
5939out:
5940 return ret;
5941
5942err_size:
5943 put_user(sizeof(*attr), &uattr->size);
5944 ret = -E2BIG;
5945 goto out;
5946}
5947
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005948static int
5949perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005950{
Frederic Weisbecker76369132011-05-19 19:55:04 +02005951 struct ring_buffer *rb = NULL, *old_rb = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005952 int ret = -EINVAL;
5953
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005954 if (!output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005955 goto set;
5956
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005957 /* don't allow circular references */
5958 if (event == output_event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005959 goto out;
5960
Peter Zijlstra0f139302010-05-20 14:35:15 +02005961 /*
5962 * Don't allow cross-cpu buffers
5963 */
5964 if (output_event->cpu != event->cpu)
5965 goto out;
5966
5967 /*
Frederic Weisbecker76369132011-05-19 19:55:04 +02005968 * If its not a per-cpu rb, it must be the same task.
Peter Zijlstra0f139302010-05-20 14:35:15 +02005969 */
5970 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
5971 goto out;
5972
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005973set:
5974 mutex_lock(&event->mmap_mutex);
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005975 /* Can't redirect output if we've got an active mmap() */
5976 if (atomic_read(&event->mmap_count))
5977 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005978
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005979 if (output_event) {
Frederic Weisbecker76369132011-05-19 19:55:04 +02005980 /* get the rb we want to redirect to */
5981 rb = ring_buffer_get(output_event);
5982 if (!rb)
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005983 goto unlock;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005984 }
5985
Frederic Weisbecker76369132011-05-19 19:55:04 +02005986 old_rb = event->rb;
5987 rcu_assign_pointer(event->rb, rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005988 ret = 0;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02005989unlock:
5990 mutex_unlock(&event->mmap_mutex);
5991
Frederic Weisbecker76369132011-05-19 19:55:04 +02005992 if (old_rb)
5993 ring_buffer_put(old_rb);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005994out:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005995 return ret;
5996}
5997
5998/**
5999 * sys_perf_event_open - open a performance event, associate it to a task/cpu
6000 *
6001 * @attr_uptr: event_id type attributes for monitoring/sampling
6002 * @pid: target pid
6003 * @cpu: target cpu
6004 * @group_fd: group leader event fd
6005 */
6006SYSCALL_DEFINE5(perf_event_open,
6007 struct perf_event_attr __user *, attr_uptr,
6008 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6009{
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006010 struct perf_event *group_leader = NULL, *output_event = NULL;
6011 struct perf_event *event, *sibling;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006012 struct perf_event_attr attr;
6013 struct perf_event_context *ctx;
6014 struct file *event_file = NULL;
6015 struct file *group_file = NULL;
Matt Helsley38a81da2010-09-13 13:01:20 -07006016 struct task_struct *task = NULL;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006017 struct pmu *pmu;
Al Viroea635c62010-05-26 17:40:29 -04006018 int event_fd;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006019 int move_group = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006020 int fput_needed = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006021 int err;
6022
6023 /* for future expandability... */
Stephane Eraniane5d13672011-02-14 11:20:01 +02006024 if (flags & ~PERF_FLAG_ALL)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006025 return -EINVAL;
6026
6027 err = perf_copy_attr(attr_uptr, &attr);
6028 if (err)
6029 return err;
6030
6031 if (!attr.exclude_kernel) {
6032 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6033 return -EACCES;
6034 }
6035
6036 if (attr.freq) {
6037 if (attr.sample_freq > sysctl_perf_event_sample_rate)
6038 return -EINVAL;
6039 }
6040
Stephane Eraniane5d13672011-02-14 11:20:01 +02006041 /*
6042 * In cgroup mode, the pid argument is used to pass the fd
6043 * opened to the cgroup directory in cgroupfs. The cpu argument
6044 * designates the cpu on which to monitor threads from that
6045 * cgroup.
6046 */
6047 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6048 return -EINVAL;
6049
Al Viroea635c62010-05-26 17:40:29 -04006050 event_fd = get_unused_fd_flags(O_RDWR);
6051 if (event_fd < 0)
6052 return event_fd;
6053
Peter Zijlstraac9721f2010-05-27 12:54:41 +02006054 if (group_fd != -1) {
6055 group_leader = perf_fget_light(group_fd, &fput_needed);
6056 if (IS_ERR(group_leader)) {
6057 err = PTR_ERR(group_leader);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02006058 goto err_fd;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02006059 }
6060 group_file = group_leader->filp;
6061 if (flags & PERF_FLAG_FD_OUTPUT)
6062 output_event = group_leader;
6063 if (flags & PERF_FLAG_FD_NO_GROUP)
6064 group_leader = NULL;
6065 }
6066
Stephane Eraniane5d13672011-02-14 11:20:01 +02006067 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02006068 task = find_lively_task_by_vpid(pid);
6069 if (IS_ERR(task)) {
6070 err = PTR_ERR(task);
6071 goto err_group_fd;
6072 }
6073 }
6074
Avi Kivity4dc0da82011-06-29 18:42:35 +03006075 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6076 NULL, NULL);
Stephane Eraniand14b12d2010-09-17 11:28:47 +02006077 if (IS_ERR(event)) {
6078 err = PTR_ERR(event);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02006079 goto err_task;
Stephane Eraniand14b12d2010-09-17 11:28:47 +02006080 }
6081
Stephane Eraniane5d13672011-02-14 11:20:01 +02006082 if (flags & PERF_FLAG_PID_CGROUP) {
6083 err = perf_cgroup_connect(pid, event, &attr, group_leader);
6084 if (err)
6085 goto err_alloc;
Peter Zijlstra08309372011-03-03 11:31:20 +01006086 /*
6087 * one more event:
6088 * - that has cgroup constraint on event->cpu
6089 * - that may need work on context switch
6090 */
6091 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6092 jump_label_inc(&perf_sched_events);
Stephane Eraniane5d13672011-02-14 11:20:01 +02006093 }
6094
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006095 /*
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006096 * Special case software events and allow them to be part of
6097 * any hardware group.
6098 */
6099 pmu = event->pmu;
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006100
6101 if (group_leader &&
6102 (is_software_event(event) != is_software_event(group_leader))) {
6103 if (is_software_event(event)) {
6104 /*
6105 * If event and group_leader are not both a software
6106 * event, and event is, then group leader is not.
6107 *
6108 * Allow the addition of software events to !software
6109 * groups, this is safe because software events never
6110 * fail to schedule.
6111 */
6112 pmu = group_leader->pmu;
6113 } else if (is_software_event(group_leader) &&
6114 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6115 /*
6116 * In case the group is a pure software group, and we
6117 * try to add a hardware event, move the whole group to
6118 * the hardware context.
6119 */
6120 move_group = 1;
6121 }
6122 }
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006123
6124 /*
6125 * Get the target context (task or percpu):
6126 */
Matt Helsley38a81da2010-09-13 13:01:20 -07006127 ctx = find_get_context(pmu, task, cpu);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006128 if (IS_ERR(ctx)) {
6129 err = PTR_ERR(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02006130 goto err_alloc;
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006131 }
6132
Peter Zijlstrafd1edb32011-03-28 13:13:56 +02006133 if (task) {
6134 put_task_struct(task);
6135 task = NULL;
6136 }
6137
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006138 /*
6139 * Look up the group leader (we will attach this event to it):
6140 */
Peter Zijlstraac9721f2010-05-27 12:54:41 +02006141 if (group_leader) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006142 err = -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006143
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006144 /*
6145 * Do not allow a recursive hierarchy (this new sibling
6146 * becoming part of another group-sibling):
6147 */
6148 if (group_leader->group_leader != group_leader)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006149 goto err_context;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006150 /*
6151 * Do not allow to attach to a group in a different
6152 * task or CPU context:
6153 */
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006154 if (move_group) {
6155 if (group_leader->ctx->type != ctx->type)
6156 goto err_context;
6157 } else {
6158 if (group_leader->ctx != ctx)
6159 goto err_context;
6160 }
6161
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006162 /*
6163 * Only a group leader can be exclusive or pinned
6164 */
6165 if (attr.exclusive || attr.pinned)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006166 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02006167 }
6168
6169 if (output_event) {
6170 err = perf_event_set_output(event, output_event);
6171 if (err)
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006172 goto err_context;
Peter Zijlstraac9721f2010-05-27 12:54:41 +02006173 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006174
Al Viroea635c62010-05-26 17:40:29 -04006175 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6176 if (IS_ERR(event_file)) {
6177 err = PTR_ERR(event_file);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006178 goto err_context;
Al Viroea635c62010-05-26 17:40:29 -04006179 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006180
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006181 if (move_group) {
6182 struct perf_event_context *gctx = group_leader->ctx;
6183
6184 mutex_lock(&gctx->mutex);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006185 perf_remove_from_context(group_leader);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006186 list_for_each_entry(sibling, &group_leader->sibling_list,
6187 group_entry) {
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006188 perf_remove_from_context(sibling);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006189 put_ctx(gctx);
6190 }
6191 mutex_unlock(&gctx->mutex);
6192 put_ctx(gctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006193 }
6194
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006195 event->filp = event_file;
6196 WARN_ON_ONCE(ctx->parent_ctx);
6197 mutex_lock(&ctx->mutex);
Peter Zijlstrab04243e2010-09-17 11:28:48 +02006198
6199 if (move_group) {
6200 perf_install_in_context(ctx, group_leader, cpu);
6201 get_ctx(ctx);
6202 list_for_each_entry(sibling, &group_leader->sibling_list,
6203 group_entry) {
6204 perf_install_in_context(ctx, sibling, cpu);
6205 get_ctx(ctx);
6206 }
6207 }
6208
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006209 perf_install_in_context(ctx, event, cpu);
6210 ++ctx->generation;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006211 perf_unpin_context(ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006212 mutex_unlock(&ctx->mutex);
6213
6214 event->owner = current;
Peter Zijlstra88821352010-11-09 19:01:43 +01006215
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006216 mutex_lock(&current->perf_event_mutex);
6217 list_add_tail(&event->owner_entry, &current->perf_event_list);
6218 mutex_unlock(&current->perf_event_mutex);
6219
Peter Zijlstra8a495422010-05-27 15:47:49 +02006220 /*
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006221 * Precalculate sample_data sizes
6222 */
6223 perf_event__header_size(event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006224 perf_event__id_header_size(event);
Arnaldo Carvalho de Meloc320c7b2010-10-20 12:50:11 -02006225
6226 /*
Peter Zijlstra8a495422010-05-27 15:47:49 +02006227 * Drop the reference on the group_event after placing the
6228 * new event on the sibling_list. This ensures destruction
6229 * of the group leader will find the pointer to itself in
6230 * perf_group_detach().
6231 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006232 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04006233 fd_install(event_fd, event_file);
6234 return event_fd;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006235
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006236err_context:
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006237 perf_unpin_context(ctx);
Al Viroea635c62010-05-26 17:40:29 -04006238 put_ctx(ctx);
Peter Zijlstrac6be5a52010-10-14 16:59:46 +02006239err_alloc:
6240 free_event(event);
Peter Zijlstrae7d0bc02010-10-14 16:54:51 +02006241err_task:
6242 if (task)
6243 put_task_struct(task);
Peter Zijlstra89a1e182010-09-07 17:34:50 +02006244err_group_fd:
6245 fput_light(group_file, fput_needed);
Al Viroea635c62010-05-26 17:40:29 -04006246err_fd:
6247 put_unused_fd(event_fd);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006248 return err;
6249}
6250
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006251/**
6252 * perf_event_create_kernel_counter
6253 *
6254 * @attr: attributes of the counter to create
6255 * @cpu: cpu in which the counter is bound
Matt Helsley38a81da2010-09-13 13:01:20 -07006256 * @task: task to profile (NULL for percpu)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006257 */
6258struct perf_event *
6259perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
Matt Helsley38a81da2010-09-13 13:01:20 -07006260 struct task_struct *task,
Avi Kivity4dc0da82011-06-29 18:42:35 +03006261 perf_overflow_handler_t overflow_handler,
6262 void *context)
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006263{
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006264 struct perf_event_context *ctx;
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006265 struct perf_event *event;
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006266 int err;
6267
6268 /*
6269 * Get the target context (task or percpu):
6270 */
6271
Avi Kivity4dc0da82011-06-29 18:42:35 +03006272 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6273 overflow_handler, context);
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006274 if (IS_ERR(event)) {
6275 err = PTR_ERR(event);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006276 goto err;
6277 }
6278
Matt Helsley38a81da2010-09-13 13:01:20 -07006279 ctx = find_get_context(event->pmu, task, cpu);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006280 if (IS_ERR(ctx)) {
6281 err = PTR_ERR(ctx);
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006282 goto err_free;
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006283 }
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006284
6285 event->filp = NULL;
6286 WARN_ON_ONCE(ctx->parent_ctx);
6287 mutex_lock(&ctx->mutex);
6288 perf_install_in_context(ctx, event, cpu);
6289 ++ctx->generation;
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006290 perf_unpin_context(ctx);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006291 mutex_unlock(&ctx->mutex);
6292
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006293 return event;
6294
Peter Zijlstrac3f00c72010-08-18 14:37:15 +02006295err_free:
6296 free_event(event);
6297err:
Frederic Weisbeckerc6567f62009-11-26 05:35:41 +01006298 return ERR_PTR(err);
Arjan van de Venfb0459d2009-09-25 12:25:56 +02006299}
6300EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6301
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006302static void sync_child_event(struct perf_event *child_event,
6303 struct task_struct *child)
6304{
6305 struct perf_event *parent_event = child_event->parent;
6306 u64 child_val;
6307
6308 if (child_event->attr.inherit_stat)
6309 perf_event_read_event(child_event, child);
6310
Peter Zijlstrab5e58792010-05-21 14:43:12 +02006311 child_val = perf_event_count(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006312
6313 /*
6314 * Add back the child's count to the parent's count:
6315 */
Peter Zijlstraa6e6dea2010-05-21 14:27:58 +02006316 atomic64_add(child_val, &parent_event->child_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006317 atomic64_add(child_event->total_time_enabled,
6318 &parent_event->child_total_time_enabled);
6319 atomic64_add(child_event->total_time_running,
6320 &parent_event->child_total_time_running);
6321
6322 /*
6323 * Remove this event from the parent's list
6324 */
6325 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6326 mutex_lock(&parent_event->child_mutex);
6327 list_del_init(&child_event->child_list);
6328 mutex_unlock(&parent_event->child_mutex);
6329
6330 /*
6331 * Release the parent event, if this was the last
6332 * reference to it.
6333 */
6334 fput(parent_event->filp);
6335}
6336
6337static void
6338__perf_event_exit_task(struct perf_event *child_event,
6339 struct perf_event_context *child_ctx,
6340 struct task_struct *child)
6341{
Peter Zijlstra38b435b2011-03-15 14:37:10 +01006342 if (child_event->parent) {
6343 raw_spin_lock_irq(&child_ctx->lock);
6344 perf_group_detach(child_event);
6345 raw_spin_unlock_irq(&child_ctx->lock);
6346 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006347
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006348 perf_remove_from_context(child_event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006349
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006350 /*
Peter Zijlstra38b435b2011-03-15 14:37:10 +01006351 * It can happen that the parent exits first, and has events
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006352 * that are still around due to the child reference. These
Peter Zijlstra38b435b2011-03-15 14:37:10 +01006353 * events need to be zapped.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006354 */
Peter Zijlstra38b435b2011-03-15 14:37:10 +01006355 if (child_event->parent) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006356 sync_child_event(child_event, child);
6357 free_event(child_event);
6358 }
6359}
6360
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006361static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006362{
6363 struct perf_event *child_event, *tmp;
6364 struct perf_event_context *child_ctx;
6365 unsigned long flags;
6366
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006367 if (likely(!child->perf_event_ctxp[ctxn])) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006368 perf_event_task(child, NULL, 0);
6369 return;
6370 }
6371
6372 local_irq_save(flags);
6373 /*
6374 * We can't reschedule here because interrupts are disabled,
6375 * and either child is current or it is a task that can't be
6376 * scheduled, so we are now safe from rescheduling changing
6377 * our context.
6378 */
Oleg Nesterov806839b2011-01-21 18:45:47 +01006379 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006380
6381 /*
6382 * Take the context lock here so that if find_get_context is
6383 * reading child->perf_event_ctxp, we wait until it has
6384 * incremented the context's refcount before we do put_ctx below.
6385 */
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006386 raw_spin_lock(&child_ctx->lock);
Peter Zijlstra04dc2db2011-04-09 21:17:43 +02006387 task_ctx_sched_out(child_ctx);
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006388 child->perf_event_ctxp[ctxn] = NULL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006389 /*
6390 * If this context is a clone; unclone it so it can't get
6391 * swapped to another process while we're removing all
6392 * the events from it.
6393 */
6394 unclone_ctx(child_ctx);
Peter Zijlstra5e942bb2009-11-23 11:37:26 +01006395 update_context_time(child_ctx);
Thomas Gleixnere625cce2009-11-17 18:02:06 +01006396 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006397
6398 /*
6399 * Report the task dead after unscheduling the events so that we
6400 * won't get any samples after PERF_RECORD_EXIT. We can however still
6401 * get a few PERF_RECORD_READ events.
6402 */
6403 perf_event_task(child, child_ctx, 0);
6404
6405 /*
6406 * We can recurse on the same lock type through:
6407 *
6408 * __perf_event_exit_task()
6409 * sync_child_event()
6410 * fput(parent_event->filp)
6411 * perf_release()
6412 * mutex_lock(&ctx->mutex)
6413 *
6414 * But since its the parent context it won't be the same instance.
6415 */
Peter Zijlstraa0507c82010-05-06 15:42:53 +02006416 mutex_lock(&child_ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006417
6418again:
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006419 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6420 group_entry)
6421 __perf_event_exit_task(child_event, child_ctx, child);
6422
6423 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006424 group_entry)
6425 __perf_event_exit_task(child_event, child_ctx, child);
6426
6427 /*
6428 * If the last event was a group event, it will have appended all
6429 * its siblings to the list, but we obtained 'tmp' before that which
6430 * will still point to the list head terminating the iteration.
6431 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006432 if (!list_empty(&child_ctx->pinned_groups) ||
6433 !list_empty(&child_ctx->flexible_groups))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006434 goto again;
6435
6436 mutex_unlock(&child_ctx->mutex);
6437
6438 put_ctx(child_ctx);
6439}
6440
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006441/*
6442 * When a child task exits, feed back event values to parent events.
6443 */
6444void perf_event_exit_task(struct task_struct *child)
6445{
Peter Zijlstra88821352010-11-09 19:01:43 +01006446 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006447 int ctxn;
6448
Peter Zijlstra88821352010-11-09 19:01:43 +01006449 mutex_lock(&child->perf_event_mutex);
6450 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
6451 owner_entry) {
6452 list_del_init(&event->owner_entry);
6453
6454 /*
6455 * Ensure the list deletion is visible before we clear
6456 * the owner, closes a race against perf_release() where
6457 * we need to serialize on the owner->perf_event_mutex.
6458 */
6459 smp_wmb();
6460 event->owner = NULL;
6461 }
6462 mutex_unlock(&child->perf_event_mutex);
6463
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006464 for_each_task_context_nr(ctxn)
6465 perf_event_exit_task_context(child, ctxn);
6466}
6467
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006468static void perf_free_event(struct perf_event *event,
6469 struct perf_event_context *ctx)
6470{
6471 struct perf_event *parent = event->parent;
6472
6473 if (WARN_ON_ONCE(!parent))
6474 return;
6475
6476 mutex_lock(&parent->child_mutex);
6477 list_del_init(&event->child_list);
6478 mutex_unlock(&parent->child_mutex);
6479
6480 fput(parent->filp);
6481
Peter Zijlstra8a495422010-05-27 15:47:49 +02006482 perf_group_detach(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006483 list_del_event(event, ctx);
6484 free_event(event);
6485}
6486
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006487/*
6488 * free an unexposed, unused context as created by inheritance by
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006489 * perf_event_init_task below, used by fork() in case of fail.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006490 */
6491void perf_event_free_task(struct task_struct *task)
6492{
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006493 struct perf_event_context *ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006494 struct perf_event *event, *tmp;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006495 int ctxn;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006496
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006497 for_each_task_context_nr(ctxn) {
6498 ctx = task->perf_event_ctxp[ctxn];
6499 if (!ctx)
6500 continue;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006501
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006502 mutex_lock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006503again:
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006504 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
6505 group_entry)
6506 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006507
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006508 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
6509 group_entry)
6510 perf_free_event(event, ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006511
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006512 if (!list_empty(&ctx->pinned_groups) ||
6513 !list_empty(&ctx->flexible_groups))
6514 goto again;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006515
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006516 mutex_unlock(&ctx->mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006517
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006518 put_ctx(ctx);
6519 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006520}
6521
Peter Zijlstra4e231c72010-09-09 21:01:59 +02006522void perf_event_delayed_put(struct task_struct *task)
6523{
6524 int ctxn;
6525
6526 for_each_task_context_nr(ctxn)
6527 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
6528}
6529
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006530/*
6531 * inherit a event from parent task to child task:
6532 */
6533static struct perf_event *
6534inherit_event(struct perf_event *parent_event,
6535 struct task_struct *parent,
6536 struct perf_event_context *parent_ctx,
6537 struct task_struct *child,
6538 struct perf_event *group_leader,
6539 struct perf_event_context *child_ctx)
6540{
6541 struct perf_event *child_event;
Peter Zijlstracee010e2010-09-10 12:51:54 +02006542 unsigned long flags;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006543
6544 /*
6545 * Instead of creating recursive hierarchies of events,
6546 * we link inherited events back to the original parent,
6547 * which has a filp for sure, which we use as the reference
6548 * count:
6549 */
6550 if (parent_event->parent)
6551 parent_event = parent_event->parent;
6552
6553 child_event = perf_event_alloc(&parent_event->attr,
6554 parent_event->cpu,
Peter Zijlstrad580ff82010-10-14 17:43:23 +02006555 child,
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006556 group_leader, parent_event,
Avi Kivity4dc0da82011-06-29 18:42:35 +03006557 NULL, NULL);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006558 if (IS_ERR(child_event))
6559 return child_event;
6560 get_ctx(child_ctx);
6561
6562 /*
6563 * Make the child state follow the state of the parent event,
6564 * not its attr.disabled bit. We hold the parent's mutex,
6565 * so we won't race with perf_event_{en, dis}able_family.
6566 */
6567 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
6568 child_event->state = PERF_EVENT_STATE_INACTIVE;
6569 else
6570 child_event->state = PERF_EVENT_STATE_OFF;
6571
6572 if (parent_event->attr.freq) {
6573 u64 sample_period = parent_event->hw.sample_period;
6574 struct hw_perf_event *hwc = &child_event->hw;
6575
6576 hwc->sample_period = sample_period;
6577 hwc->last_period = sample_period;
6578
6579 local64_set(&hwc->period_left, sample_period);
6580 }
6581
6582 child_event->ctx = child_ctx;
6583 child_event->overflow_handler = parent_event->overflow_handler;
Avi Kivity4dc0da82011-06-29 18:42:35 +03006584 child_event->overflow_handler_context
6585 = parent_event->overflow_handler_context;
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006586
6587 /*
Thomas Gleixner614b6782010-12-03 16:24:32 -02006588 * Precalculate sample_data sizes
6589 */
6590 perf_event__header_size(child_event);
Arnaldo Carvalho de Melo6844c092010-12-03 16:36:35 -02006591 perf_event__id_header_size(child_event);
Thomas Gleixner614b6782010-12-03 16:24:32 -02006592
6593 /*
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006594 * Link it up in the child's context:
6595 */
Peter Zijlstracee010e2010-09-10 12:51:54 +02006596 raw_spin_lock_irqsave(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006597 add_event_to_ctx(child_event, child_ctx);
Peter Zijlstracee010e2010-09-10 12:51:54 +02006598 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
Peter Zijlstra97dee4f2010-09-07 15:35:33 +02006599
6600 /*
6601 * Get a reference to the parent filp - we will fput it
6602 * when the child event exits. This is safe to do because
6603 * we are in the parent and we know that the filp still
6604 * exists and has a nonzero count:
6605 */
6606 atomic_long_inc(&parent_event->filp->f_count);
6607
6608 /*
6609 * Link this into the parent event's child list
6610 */
6611 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6612 mutex_lock(&parent_event->child_mutex);
6613 list_add_tail(&child_event->child_list, &parent_event->child_list);
6614 mutex_unlock(&parent_event->child_mutex);
6615
6616 return child_event;
6617}
6618
6619static int inherit_group(struct perf_event *parent_event,
6620 struct task_struct *parent,
6621 struct perf_event_context *parent_ctx,
6622 struct task_struct *child,
6623 struct perf_event_context *child_ctx)
6624{
6625 struct perf_event *leader;
6626 struct perf_event *sub;
6627 struct perf_event *child_ctr;
6628
6629 leader = inherit_event(parent_event, parent, parent_ctx,
6630 child, NULL, child_ctx);
6631 if (IS_ERR(leader))
6632 return PTR_ERR(leader);
6633 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
6634 child_ctr = inherit_event(sub, parent, parent_ctx,
6635 child, leader, child_ctx);
6636 if (IS_ERR(child_ctr))
6637 return PTR_ERR(child_ctr);
6638 }
6639 return 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006640}
6641
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006642static int
6643inherit_task_group(struct perf_event *event, struct task_struct *parent,
6644 struct perf_event_context *parent_ctx,
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006645 struct task_struct *child, int ctxn,
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006646 int *inherited_all)
6647{
6648 int ret;
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006649 struct perf_event_context *child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006650
6651 if (!event->attr.inherit) {
6652 *inherited_all = 0;
6653 return 0;
6654 }
6655
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006656 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006657 if (!child_ctx) {
6658 /*
6659 * This is executed from the parent task context, so
6660 * inherit events that have been marked for cloning.
6661 * First allocate and initialize a context for the
6662 * child.
6663 */
6664
Peter Zijlstraeb184472010-09-07 15:55:13 +02006665 child_ctx = alloc_perf_context(event->pmu, child);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006666 if (!child_ctx)
6667 return -ENOMEM;
6668
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006669 child->perf_event_ctxp[ctxn] = child_ctx;
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006670 }
6671
6672 ret = inherit_group(event, parent, parent_ctx,
6673 child, child_ctx);
6674
6675 if (ret)
6676 *inherited_all = 0;
6677
6678 return ret;
6679}
6680
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006681/*
6682 * Initialize the perf_event context in task_struct
6683 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006684int perf_event_init_context(struct task_struct *child, int ctxn)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006685{
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006686 struct perf_event_context *child_ctx, *parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006687 struct perf_event_context *cloned_ctx;
6688 struct perf_event *event;
6689 struct task_struct *parent = current;
6690 int inherited_all = 1;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006691 unsigned long flags;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006692 int ret = 0;
6693
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006694 if (likely(!parent->perf_event_ctxp[ctxn]))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006695 return 0;
6696
6697 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006698 * If the parent's context is a clone, pin it so it won't get
6699 * swapped under us.
6700 */
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006701 parent_ctx = perf_pin_task_context(parent, ctxn);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006702
6703 /*
6704 * No need to check if parent_ctx != NULL here; since we saw
6705 * it non-NULL earlier, the only reason for it to become NULL
6706 * is if we exit, and since we're currently in the middle of
6707 * a fork we can't be exiting at the same time.
6708 */
6709
6710 /*
6711 * Lock the parent list. No need to lock the child - not PID
6712 * hashed yet and not running, so nobody can access it.
6713 */
6714 mutex_lock(&parent_ctx->mutex);
6715
6716 /*
6717 * We dont have to disable NMIs - we are only looking at
6718 * the list, not manipulating it:
6719 */
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006720 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006721 ret = inherit_task_group(event, parent, parent_ctx,
6722 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006723 if (ret)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006724 break;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006725 }
6726
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006727 /*
6728 * We can't hold ctx->lock when iterating the ->flexible_group list due
6729 * to allocations, but we need to prevent rotation because
6730 * rotate_ctx() will change the list from interrupt context.
6731 */
6732 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6733 parent_ctx->rotate_disable = 1;
6734 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6735
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006736 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006737 ret = inherit_task_group(event, parent, parent_ctx,
6738 child, ctxn, &inherited_all);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006739 if (ret)
6740 break;
6741 }
6742
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006743 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6744 parent_ctx->rotate_disable = 0;
Thomas Gleixnerdddd3372010-11-24 10:05:55 +01006745
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006746 child_ctx = child->perf_event_ctxp[ctxn];
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006747
Peter Zijlstra05cbaa22009-12-30 16:00:35 +01006748 if (child_ctx && inherited_all) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006749 /*
6750 * Mark the child context as a clone of the parent
6751 * context, or of whatever the parent is a clone of.
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006752 *
6753 * Note that if the parent is a clone, the holding of
6754 * parent_ctx->lock avoids it from being uncloned.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006755 */
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006756 cloned_ctx = parent_ctx->parent_ctx;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006757 if (cloned_ctx) {
6758 child_ctx->parent_ctx = cloned_ctx;
6759 child_ctx->parent_gen = parent_ctx->parent_gen;
6760 } else {
6761 child_ctx->parent_ctx = parent_ctx;
6762 child_ctx->parent_gen = parent_ctx->generation;
6763 }
6764 get_ctx(child_ctx->parent_ctx);
6765 }
6766
Peter Zijlstrac5ed5142011-01-17 13:45:37 +01006767 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006768 mutex_unlock(&parent_ctx->mutex);
6769
6770 perf_unpin_context(parent_ctx);
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006771 put_ctx(parent_ctx);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006772
6773 return ret;
6774}
6775
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006776/*
6777 * Initialize the perf_event context in task_struct
6778 */
6779int perf_event_init_task(struct task_struct *child)
6780{
6781 int ctxn, ret;
6782
Oleg Nesterov8550d7c2011-01-19 19:22:28 +01006783 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
6784 mutex_init(&child->perf_event_mutex);
6785 INIT_LIST_HEAD(&child->perf_event_list);
6786
Peter Zijlstra8dc85d52010-09-02 16:50:03 +02006787 for_each_task_context_nr(ctxn) {
6788 ret = perf_event_init_context(child, ctxn);
6789 if (ret)
6790 return ret;
6791 }
6792
6793 return 0;
6794}
6795
Paul Mackerras220b1402010-03-10 20:45:52 +11006796static void __init perf_event_init_all_cpus(void)
6797{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006798 struct swevent_htable *swhash;
Paul Mackerras220b1402010-03-10 20:45:52 +11006799 int cpu;
Paul Mackerras220b1402010-03-10 20:45:52 +11006800
6801 for_each_possible_cpu(cpu) {
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006802 swhash = &per_cpu(swevent_htable, cpu);
6803 mutex_init(&swhash->hlist_mutex);
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006804 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
Paul Mackerras220b1402010-03-10 20:45:52 +11006805 }
6806}
6807
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006808static void __cpuinit perf_event_init_cpu(int cpu)
6809{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006810 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006811
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006812 mutex_lock(&swhash->hlist_mutex);
Peter Zijlstra144060f2011-08-01 12:49:14 +02006813 if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006814 struct swevent_hlist *hlist;
6815
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006816 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
6817 WARN_ON(!hlist);
6818 rcu_assign_pointer(swhash->swevent_hlist, hlist);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006819 }
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006820 mutex_unlock(&swhash->hlist_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006821}
6822
Peter Zijlstrac2774432010-12-08 15:29:02 +01006823#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006824static void perf_pmu_rotate_stop(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006825{
Peter Zijlstrae9d2b062010-09-17 11:28:50 +02006826 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6827
6828 WARN_ON(!irqs_disabled());
6829
6830 list_del_init(&cpuctx->rotation_list);
6831}
6832
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006833static void __perf_event_exit_context(void *__info)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006834{
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006835 struct perf_event_context *ctx = __info;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006836 struct perf_event *event, *tmp;
6837
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006838 perf_pmu_rotate_stop(ctx->pmu);
Peter Zijlstrab5ab4cd2010-09-06 16:32:21 +02006839
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006840 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006841 __perf_remove_from_context(event);
Frederic Weisbecker889ff012010-01-09 20:04:47 +01006842 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
Peter Zijlstrafe4b04f2011-02-02 13:19:09 +01006843 __perf_remove_from_context(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006844}
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006845
6846static void perf_event_exit_cpu_context(int cpu)
6847{
6848 struct perf_event_context *ctx;
6849 struct pmu *pmu;
6850 int idx;
6851
6852 idx = srcu_read_lock(&pmus_srcu);
6853 list_for_each_entry_rcu(pmu, &pmus, entry) {
Peter Zijlstra917bdd12010-09-17 11:28:49 +02006854 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006855
6856 mutex_lock(&ctx->mutex);
6857 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
6858 mutex_unlock(&ctx->mutex);
6859 }
6860 srcu_read_unlock(&pmus_srcu, idx);
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006861}
6862
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006863static void perf_event_exit_cpu(int cpu)
6864{
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006865 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006866
Peter Zijlstrab28ab832010-09-06 14:48:15 +02006867 mutex_lock(&swhash->hlist_mutex);
6868 swevent_hlist_release(swhash);
6869 mutex_unlock(&swhash->hlist_mutex);
Frederic Weisbecker76e1d902010-04-05 15:35:57 +02006870
Peter Zijlstra108b02c2010-09-06 14:32:03 +02006871 perf_event_exit_cpu_context(cpu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006872}
6873#else
6874static inline void perf_event_exit_cpu(int cpu) { }
6875#endif
6876
Peter Zijlstrac2774432010-12-08 15:29:02 +01006877static int
6878perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
6879{
6880 int cpu;
6881
6882 for_each_online_cpu(cpu)
6883 perf_event_exit_cpu(cpu);
6884
6885 return NOTIFY_OK;
6886}
6887
6888/*
6889 * Run the perf reboot notifier at the very last possible moment so that
6890 * the generic watchdog code runs as long as possible.
6891 */
6892static struct notifier_block perf_reboot_notifier = {
6893 .notifier_call = perf_reboot,
6894 .priority = INT_MIN,
6895};
6896
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006897static int __cpuinit
6898perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6899{
6900 unsigned int cpu = (long)hcpu;
6901
Peter Zijlstra144060f2011-08-01 12:49:14 +02006902 /*
6903 * Ignore suspend/resume action, the perf_pm_notifier will
6904 * take care of that.
6905 */
6906 if (action & CPU_TASKS_FROZEN)
6907 return NOTIFY_OK;
6908
6909 switch (action) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006910
6911 case CPU_UP_PREPARE:
Peter Zijlstra5e116372010-06-11 13:35:08 +02006912 case CPU_DOWN_FAILED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006913 perf_event_init_cpu(cpu);
6914 break;
6915
Peter Zijlstra5e116372010-06-11 13:35:08 +02006916 case CPU_UP_CANCELED:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006917 case CPU_DOWN_PREPARE:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02006918 perf_event_exit_cpu(cpu);
6919 break;
6920
6921 default:
6922 break;
6923 }
6924
6925 return NOTIFY_OK;
6926}
6927
Peter Zijlstra144060f2011-08-01 12:49:14 +02006928static void perf_pm_resume_cpu(void *unused)
6929{
6930 struct perf_cpu_context *cpuctx;
6931 struct perf_event_context *ctx;
6932 struct pmu *pmu;
6933 int idx;
6934
6935 idx = srcu_read_lock(&pmus_srcu);
6936 list_for_each_entry_rcu(pmu, &pmus, entry) {
6937 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6938 ctx = cpuctx->task_ctx;
6939
6940 perf_ctx_lock(cpuctx, ctx);
6941 perf_pmu_disable(cpuctx->ctx.pmu);
6942
6943 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
6944 if (ctx)
6945 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
6946
6947 perf_pmu_enable(cpuctx->ctx.pmu);
6948 perf_ctx_unlock(cpuctx, ctx);
6949 }
6950 srcu_read_unlock(&pmus_srcu, idx);
6951}
6952
6953static void perf_pm_suspend_cpu(void *unused)
6954{
6955 struct perf_cpu_context *cpuctx;
6956 struct perf_event_context *ctx;
6957 struct pmu *pmu;
6958 int idx;
6959
6960 idx = srcu_read_lock(&pmus_srcu);
6961 list_for_each_entry_rcu(pmu, &pmus, entry) {
6962 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
6963 ctx = cpuctx->task_ctx;
6964
6965 perf_ctx_lock(cpuctx, ctx);
6966 perf_pmu_disable(cpuctx->ctx.pmu);
6967
6968 perf_event_sched_in(cpuctx, ctx, current);
6969
6970 perf_pmu_enable(cpuctx->ctx.pmu);
6971 perf_ctx_unlock(cpuctx, ctx);
6972 }
6973 srcu_read_unlock(&pmus_srcu, idx);
6974}
6975
6976static int perf_resume(void)
6977{
6978 get_online_cpus();
6979 smp_call_function(perf_pm_resume_cpu, NULL, 1);
6980 put_online_cpus();
6981
6982 return NOTIFY_OK;
6983}
6984
6985static int perf_suspend(void)
6986{
6987 get_online_cpus();
6988 smp_call_function(perf_pm_suspend_cpu, NULL, 1);
6989 put_online_cpus();
6990
6991 return NOTIFY_OK;
6992}
6993
6994static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
6995{
6996 switch (action) {
6997 case PM_POST_HIBERNATION:
6998 case PM_POST_SUSPEND:
6999 return perf_resume();
7000 case PM_HIBERNATION_PREPARE:
7001 case PM_SUSPEND_PREPARE:
7002 return perf_suspend();
7003 default:
7004 return NOTIFY_DONE;
7005 }
7006}
7007
7008static struct notifier_block perf_pm_notifier = {
7009 .notifier_call = perf_pm,
7010};
7011
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007012void __init perf_event_init(void)
7013{
Jason Wessel3c502e72010-11-04 17:33:01 -05007014 int ret;
7015
Peter Zijlstra2e80a822010-11-17 23:17:36 +01007016 idr_init(&pmu_idr);
7017
Paul Mackerras220b1402010-03-10 20:45:52 +11007018 perf_event_init_all_cpus();
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007019 init_srcu_struct(&pmus_srcu);
Peter Zijlstra2e80a822010-11-17 23:17:36 +01007020 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7021 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7022 perf_pmu_register(&perf_task_clock, NULL, -1);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02007023 perf_tp_register();
7024 perf_cpu_notifier(perf_cpu_notify);
Peter Zijlstrac2774432010-12-08 15:29:02 +01007025 register_reboot_notifier(&perf_reboot_notifier);
Peter Zijlstra144060f2011-08-01 12:49:14 +02007026 register_pm_notifier(&perf_pm_notifier);
Jason Wessel3c502e72010-11-04 17:33:01 -05007027
7028 ret = init_hw_breakpoint();
7029 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02007030}
Peter Zijlstraabe43402010-11-17 23:17:37 +01007031
7032static int __init perf_event_sysfs_init(void)
7033{
7034 struct pmu *pmu;
7035 int ret;
7036
7037 mutex_lock(&pmus_lock);
7038
7039 ret = bus_register(&pmu_bus);
7040 if (ret)
7041 goto unlock;
7042
7043 list_for_each_entry(pmu, &pmus, entry) {
7044 if (!pmu->name || pmu->type < 0)
7045 continue;
7046
7047 ret = pmu_dev_alloc(pmu);
7048 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7049 }
7050 pmu_bus_running = 1;
7051 ret = 0;
7052
7053unlock:
7054 mutex_unlock(&pmus_lock);
7055
7056 return ret;
7057}
7058device_initcall(perf_event_sysfs_init);
Stephane Eraniane5d13672011-02-14 11:20:01 +02007059
7060#ifdef CONFIG_CGROUP_PERF
7061static struct cgroup_subsys_state *perf_cgroup_create(
7062 struct cgroup_subsys *ss, struct cgroup *cont)
7063{
7064 struct perf_cgroup *jc;
Stephane Eraniane5d13672011-02-14 11:20:01 +02007065
Li Zefan1b15d052011-03-03 14:26:06 +08007066 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
Stephane Eraniane5d13672011-02-14 11:20:01 +02007067 if (!jc)
7068 return ERR_PTR(-ENOMEM);
7069
Stephane Eraniane5d13672011-02-14 11:20:01 +02007070 jc->info = alloc_percpu(struct perf_cgroup_info);
7071 if (!jc->info) {
7072 kfree(jc);
7073 return ERR_PTR(-ENOMEM);
7074 }
7075
Stephane Eraniane5d13672011-02-14 11:20:01 +02007076 return &jc->css;
7077}
7078
7079static void perf_cgroup_destroy(struct cgroup_subsys *ss,
7080 struct cgroup *cont)
7081{
7082 struct perf_cgroup *jc;
7083 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7084 struct perf_cgroup, css);
7085 free_percpu(jc->info);
7086 kfree(jc);
7087}
7088
7089static int __perf_cgroup_move(void *info)
7090{
7091 struct task_struct *task = info;
7092 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7093 return 0;
7094}
7095
Peter Zijlstra74c355f2011-05-30 16:48:06 +02007096static void
7097perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
Stephane Eraniane5d13672011-02-14 11:20:01 +02007098{
7099 task_function_call(task, __perf_cgroup_move, task);
7100}
7101
Stephane Eraniane5d13672011-02-14 11:20:01 +02007102static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7103 struct cgroup *old_cgrp, struct task_struct *task)
7104{
7105 /*
7106 * cgroup_exit() is called in the copy_process() failure path.
7107 * Ignore this case since the task hasn't ran yet, this avoids
7108 * trying to poke a half freed task state from generic code.
7109 */
7110 if (!(task->flags & PF_EXITING))
7111 return;
7112
Peter Zijlstra74c355f2011-05-30 16:48:06 +02007113 perf_cgroup_attach_task(cgrp, task);
Stephane Eraniane5d13672011-02-14 11:20:01 +02007114}
7115
7116struct cgroup_subsys perf_subsys = {
Ingo Molnare7e7ee22011-05-04 08:42:29 +02007117 .name = "perf_event",
7118 .subsys_id = perf_subsys_id,
7119 .create = perf_cgroup_create,
7120 .destroy = perf_cgroup_destroy,
7121 .exit = perf_cgroup_exit,
Peter Zijlstra74c355f2011-05-30 16:48:06 +02007122 .attach_task = perf_cgroup_attach_task,
Stephane Eraniane5d13672011-02-14 11:20:01 +02007123};
7124#endif /* CONFIG_CGROUP_PERF */