blob: 640891014b2ae3dccdef498db11f05a0942145e5 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020031#include <asm/compat.h>
Lin Ming69092622011-03-03 10:34:50 +080032#include <asm/smp.h>
Robert Richterc8e59102011-04-16 02:27:55 +020033#include <asm/alternative.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010034
Kevin Winchesterde0428a2011-08-30 20:41:05 -030035#include "perf_event.h"
36
Peter Zijlstra7645a242010-03-08 13:51:31 +010037#if 0
38#undef wrmsrl
39#define wrmsrl(msr, val) \
40do { \
41 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
42 (unsigned long)(val)); \
43 native_write_msr((msr), (u32)((u64)(val)), \
44 (u32)((u64)(val) >> 32)); \
45} while (0)
46#endif
47
Kevin Winchesterde0428a2011-08-30 20:41:05 -030048struct x86_pmu x86_pmu __read_mostly;
Stephane Eranianefc9f052011-06-06 16:57:03 +020049
Kevin Winchesterde0428a2011-08-30 20:41:05 -030050DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010051 .enabled = 1,
52};
Ingo Molnar241771e2008-12-03 10:39:53 +010053
Kevin Winchesterde0428a2011-08-30 20:41:05 -030054u64 __read_mostly hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +020055 [PERF_COUNT_HW_CACHE_MAX]
56 [PERF_COUNT_HW_CACHE_OP_MAX]
57 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Kevin Winchesterde0428a2011-08-30 20:41:05 -030058u64 __read_mostly hw_cache_extra_regs
Andi Kleene994d7d2011-03-03 10:34:48 +080059 [PERF_COUNT_HW_CACHE_MAX]
60 [PERF_COUNT_HW_CACHE_OP_MAX]
61 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Ingo Molnar8326f442009-06-05 20:22:46 +020062
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +053063/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020064 * Propagate event elapsed time into the generic event.
65 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +010066 * Returns the delta events processed.
67 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -030068u64 x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +010069{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010070 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +020071 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020072 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010073 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020074 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +010075
Markus Metzger30dd5682009-07-21 15:56:48 +020076 if (idx == X86_PMC_IDX_FIXED_BTS)
77 return 0;
78
Ingo Molnaree060942008-12-13 09:00:03 +010079 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020080 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +010081 *
82 * Our tactic to handle this is to first atomically read and
83 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020084 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +010085 */
86again:
Peter Zijlstrae7850592010-05-21 14:43:08 +020087 prev_raw_count = local64_read(&hwc->prev_count);
Robert Richter73d6e522011-02-02 17:40:59 +010088 rdmsrl(hwc->event_base, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +010089
Peter Zijlstrae7850592010-05-21 14:43:08 +020090 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +010091 new_raw_count) != prev_raw_count)
92 goto again;
93
94 /*
95 * Now we have the new raw value and have updated the prev
96 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020097 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +010098 *
99 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200100 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100101 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200102 delta = (new_raw_count << shift) - (prev_raw_count << shift);
103 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100104
Peter Zijlstrae7850592010-05-21 14:43:08 +0200105 local64_add(delta, &event->count);
106 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200107
108 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100109}
110
Andi Kleena7e3ed12011-03-03 10:34:47 +0800111/*
112 * Find and validate any extra registers to set up.
113 */
114static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
115{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200116 struct hw_perf_event_extra *reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800117 struct extra_reg *er;
118
Stephane Eranianefc9f052011-06-06 16:57:03 +0200119 reg = &event->hw.extra_reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800120
121 if (!x86_pmu.extra_regs)
122 return 0;
123
124 for (er = x86_pmu.extra_regs; er->msr; er++) {
125 if (er->event != (config & er->config_mask))
126 continue;
127 if (event->attr.config1 & ~er->valid_mask)
128 return -EINVAL;
Stephane Eranianefc9f052011-06-06 16:57:03 +0200129
130 reg->idx = er->idx;
131 reg->config = event->attr.config1;
132 reg->reg = er->msr;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800133 break;
134 }
135 return 0;
136}
137
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200138static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200139static DEFINE_MUTEX(pmc_reserve_mutex);
140
Robert Richterb27ea292010-03-17 12:49:10 +0100141#ifdef CONFIG_X86_LOCAL_APIC
142
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200143static bool reserve_pmc_hardware(void)
144{
145 int i;
146
Robert Richter948b1bb2010-03-29 18:36:50 +0200147 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100148 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200149 goto perfctr_fail;
150 }
151
Robert Richter948b1bb2010-03-29 18:36:50 +0200152 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100153 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200154 goto eventsel_fail;
155 }
156
157 return true;
158
159eventsel_fail:
160 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100161 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200162
Robert Richter948b1bb2010-03-29 18:36:50 +0200163 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200164
165perfctr_fail:
166 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100167 release_perfctr_nmi(x86_pmu_event_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200168
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200169 return false;
170}
171
172static void release_pmc_hardware(void)
173{
174 int i;
175
Robert Richter948b1bb2010-03-29 18:36:50 +0200176 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100177 release_perfctr_nmi(x86_pmu_event_addr(i));
178 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200179 }
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200180}
181
Robert Richterb27ea292010-03-17 12:49:10 +0100182#else
183
184static bool reserve_pmc_hardware(void) { return true; }
185static void release_pmc_hardware(void) {}
186
187#endif
188
Don Zickus33c6d6a2010-11-22 16:55:23 -0500189static bool check_hw_exists(void)
190{
191 u64 val, val_new = 0;
Peter Zijlstra44072042010-12-08 15:56:23 +0100192 int i, reg, ret = 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500193
Peter Zijlstra44072042010-12-08 15:56:23 +0100194 /*
195 * Check to see if the BIOS enabled any of the counters, if so
196 * complain and bail.
197 */
198 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100199 reg = x86_pmu_config_addr(i);
Peter Zijlstra44072042010-12-08 15:56:23 +0100200 ret = rdmsrl_safe(reg, &val);
201 if (ret)
202 goto msr_fail;
203 if (val & ARCH_PERFMON_EVENTSEL_ENABLE)
204 goto bios_fail;
205 }
206
207 if (x86_pmu.num_counters_fixed) {
208 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
209 ret = rdmsrl_safe(reg, &val);
210 if (ret)
211 goto msr_fail;
212 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
213 if (val & (0x03 << i*4))
214 goto bios_fail;
215 }
216 }
217
218 /*
219 * Now write a value and read it back to see if it matches,
220 * this is needed to detect certain hardware emulators (qemu/kvm)
221 * that don't trap on the MSR access and always return 0s.
222 */
Don Zickus33c6d6a2010-11-22 16:55:23 -0500223 val = 0xabcdUL;
Robert Richter41bf4982011-02-02 17:40:57 +0100224 ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
225 ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
Don Zickus33c6d6a2010-11-22 16:55:23 -0500226 if (ret || val != val_new)
Peter Zijlstra44072042010-12-08 15:56:23 +0100227 goto msr_fail;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500228
229 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100230
231bios_fail:
Ingo Molnar45daae52011-03-25 10:24:23 +0100232 /*
233 * We still allow the PMU driver to operate:
234 */
235 printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
Peter Zijlstra44072042010-12-08 15:56:23 +0100236 printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg, val);
Ingo Molnar45daae52011-03-25 10:24:23 +0100237
238 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100239
240msr_fail:
241 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
Ingo Molnar45daae52011-03-25 10:24:23 +0100242
Peter Zijlstra44072042010-12-08 15:56:23 +0100243 return false;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500244}
245
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200246static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200247{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200248 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200249 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100250 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200251 mutex_unlock(&pmc_reserve_mutex);
252 }
253}
254
Robert Richter85cf9db2009-04-29 12:47:20 +0200255static inline int x86_pmu_initialized(void)
256{
257 return x86_pmu.handle_irq != NULL;
258}
259
Ingo Molnar8326f442009-06-05 20:22:46 +0200260static inline int
Andi Kleene994d7d2011-03-03 10:34:48 +0800261set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
Ingo Molnar8326f442009-06-05 20:22:46 +0200262{
Andi Kleene994d7d2011-03-03 10:34:48 +0800263 struct perf_event_attr *attr = &event->attr;
Ingo Molnar8326f442009-06-05 20:22:46 +0200264 unsigned int cache_type, cache_op, cache_result;
265 u64 config, val;
266
267 config = attr->config;
268
269 cache_type = (config >> 0) & 0xff;
270 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
271 return -EINVAL;
272
273 cache_op = (config >> 8) & 0xff;
274 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
275 return -EINVAL;
276
277 cache_result = (config >> 16) & 0xff;
278 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
279 return -EINVAL;
280
281 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
282
283 if (val == 0)
284 return -ENOENT;
285
286 if (val == -1)
287 return -EINVAL;
288
289 hwc->config |= val;
Andi Kleene994d7d2011-03-03 10:34:48 +0800290 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
291 return x86_pmu_extra_regs(val, event);
Ingo Molnar8326f442009-06-05 20:22:46 +0200292}
293
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300294int x86_setup_perfctr(struct perf_event *event)
Robert Richterc1726f32010-04-13 22:23:11 +0200295{
296 struct perf_event_attr *attr = &event->attr;
297 struct hw_perf_event *hwc = &event->hw;
298 u64 config;
299
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +0100300 if (!is_sampling_event(event)) {
Robert Richterc1726f32010-04-13 22:23:11 +0200301 hwc->sample_period = x86_pmu.max_period;
302 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200303 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200304 } else {
305 /*
306 * If we have a PMU initialized but no APIC
307 * interrupts, we cannot sample hardware
308 * events (user-space has to fall back and
309 * sample via a hrtimer based software event):
310 */
311 if (!x86_pmu.apic)
312 return -EOPNOTSUPP;
313 }
314
Ingo Molnarb52c55c2011-04-22 08:44:38 +0200315 /*
316 * Do not allow config1 (extended registers) to propagate,
317 * there's no sane user-space generalization yet:
318 */
Robert Richterc1726f32010-04-13 22:23:11 +0200319 if (attr->type == PERF_TYPE_RAW)
Ingo Molnarb52c55c2011-04-22 08:44:38 +0200320 return 0;
Robert Richterc1726f32010-04-13 22:23:11 +0200321
322 if (attr->type == PERF_TYPE_HW_CACHE)
Andi Kleene994d7d2011-03-03 10:34:48 +0800323 return set_ext_hw_attr(hwc, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200324
325 if (attr->config >= x86_pmu.max_events)
326 return -EINVAL;
327
328 /*
329 * The generic map:
330 */
331 config = x86_pmu.event_map(attr->config);
332
333 if (config == 0)
334 return -ENOENT;
335
336 if (config == -1LL)
337 return -EINVAL;
338
339 /*
340 * Branch tracing:
341 */
Peter Zijlstra18a073a2011-04-26 13:24:33 +0200342 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
343 !attr->freq && hwc->sample_period == 1) {
Robert Richterc1726f32010-04-13 22:23:11 +0200344 /* BTS is not supported by this architecture. */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200345 if (!x86_pmu.bts_active)
Robert Richterc1726f32010-04-13 22:23:11 +0200346 return -EOPNOTSUPP;
347
348 /* BTS is currently only allowed for user-mode. */
349 if (!attr->exclude_kernel)
350 return -EOPNOTSUPP;
351 }
352
353 hwc->config |= config;
354
355 return 0;
356}
Robert Richter4261e0e2010-04-13 22:23:10 +0200357
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300358int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300359{
Peter Zijlstraab608342010-04-08 23:03:20 +0200360 if (event->attr.precise_ip) {
361 int precise = 0;
362
363 /* Support for constant skid */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200364 if (x86_pmu.pebs_active) {
Peter Zijlstraab608342010-04-08 23:03:20 +0200365 precise++;
366
Peter Zijlstra5553be22010-10-19 14:38:11 +0200367 /* Support for IP fixup */
368 if (x86_pmu.lbr_nr)
369 precise++;
370 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200371
372 if (event->attr.precise_ip > precise)
373 return -EOPNOTSUPP;
374 }
375
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300376 /*
377 * Generate PMC IRQs:
378 * (keep 'enabled' bit clear for now)
379 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200380 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300381
382 /*
383 * Count user and OS events unless requested not to
384 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200385 if (!event->attr.exclude_user)
386 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
387 if (!event->attr.exclude_kernel)
388 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
389
390 if (event->attr.type == PERF_TYPE_RAW)
391 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300392
Robert Richter9d0fcba2010-04-13 22:23:12 +0200393 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300394}
395
Ingo Molnaree060942008-12-13 09:00:03 +0100396/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200397 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100398 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200399static int __x86_pmu_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100400{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200401 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100402
Robert Richter85cf9db2009-04-29 12:47:20 +0200403 if (!x86_pmu_initialized())
404 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100405
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200406 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200407 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200408 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200409 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200410 if (!reserve_pmc_hardware())
411 err = -EBUSY;
Peter Zijlstraf80c9e32010-10-19 14:50:02 +0200412 else
413 reserve_ds_buffers();
Markus Metzger30dd5682009-07-21 15:56:48 +0200414 }
415 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200416 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200417 mutex_unlock(&pmc_reserve_mutex);
418 }
419 if (err)
420 return err;
421
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200422 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cd2009-09-09 10:04:47 +0200423
Robert Richter4261e0e2010-04-13 22:23:10 +0200424 event->hw.idx = -1;
425 event->hw.last_cpu = -1;
426 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200427
Stephane Eranianefc9f052011-06-06 16:57:03 +0200428 /* mark unused */
429 event->hw.extra_reg.idx = EXTRA_REG_NONE;
430
Robert Richter9d0fcba2010-04-13 22:23:12 +0200431 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200432}
433
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300434void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530435{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200436 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200437 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100438
Robert Richter948b1bb2010-03-29 18:36:50 +0200439 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100440 u64 val;
441
Robert Richter43f62012009-04-29 16:55:56 +0200442 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200443 continue;
Robert Richter41bf4982011-02-02 17:40:57 +0100444 rdmsrl(x86_pmu_config_addr(idx), val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100445 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200446 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100447 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Robert Richter41bf4982011-02-02 17:40:57 +0100448 wrmsrl(x86_pmu_config_addr(idx), val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530449 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530450}
451
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200452static void x86_pmu_disable(struct pmu *pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530453{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200454 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
455
Robert Richter85cf9db2009-04-29 12:47:20 +0200456 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200457 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200458
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100459 if (!cpuc->enabled)
460 return;
461
462 cpuc->n_added = 0;
463 cpuc->enabled = 0;
464 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200465
466 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530467}
Ingo Molnar241771e2008-12-03 10:39:53 +0100468
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300469void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530470{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530472 int idx;
473
Robert Richter948b1bb2010-03-29 18:36:50 +0200474 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richterd45dd922011-02-02 17:40:56 +0100475 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100476
Robert Richter43f62012009-04-29 16:55:56 +0200477 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200478 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200479
Robert Richterd45dd922011-02-02 17:40:56 +0100480 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530481 }
482}
483
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200484static struct pmu pmu;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200485
486static inline int is_x86_event(struct perf_event *event)
487{
488 return event->pmu == &pmu;
489}
490
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300491int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200492{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100493 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200494 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100495 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200496 struct hw_perf_event *hwc;
497
498 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
499
500 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100501 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
502 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200503 }
504
505 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200506 * fastpath, try to reuse previous register
507 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100508 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200509 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100510 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200511
512 /* never assigned */
513 if (hwc->idx == -1)
514 break;
515
516 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100517 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200518 break;
519
520 /* not already used */
521 if (test_bit(hwc->idx, used_mask))
522 break;
523
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100524 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200525 if (assign)
526 assign[i] = hwc->idx;
527 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100528 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200529 goto done;
530
531 /*
532 * begin slow path
533 */
534
535 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
536
537 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200538 * weight = number of possible counters
539 *
540 * 1 = most constrained, only works on one counter
541 * wmax = least constrained, works on any counter
542 *
543 * assign events to counters starting with most
544 * constrained events.
545 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200546 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200547
548 /*
549 * when fixed event counters are present,
550 * wmax is incremented by 1 to account
551 * for one more choice
552 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200553 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200554 wmax++;
555
Stephane Eranian81130702010-01-21 17:39:01 +0200556 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200557 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200558 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100559 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200560 hwc = &cpuc->event_list[i]->hw;
561
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100562 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200563 continue;
564
Akinobu Mita984b3f52010-03-05 13:41:37 -0800565 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200566 if (!test_bit(j, used_mask))
567 break;
568 }
569
570 if (j == X86_PMC_IDX_MAX)
571 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200572
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100573 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200574
Stephane Eranian1da53e02010-01-18 10:58:01 +0200575 if (assign)
576 assign[i] = j;
577 num--;
578 }
579 }
Stephane Eranian81130702010-01-21 17:39:01 +0200580done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200581 /*
582 * scheduling failed or is just a simulation,
583 * free resources if necessary
584 */
585 if (!assign || num) {
586 for (i = 0; i < n; i++) {
587 if (x86_pmu.put_event_constraints)
588 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
589 }
590 }
591 return num ? -ENOSPC : 0;
592}
593
594/*
595 * dogrp: true if must collect siblings events (group)
596 * returns total number of events and error code
597 */
598static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
599{
600 struct perf_event *event;
601 int n, max_count;
602
Robert Richter948b1bb2010-03-29 18:36:50 +0200603 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200604
605 /* current number of events already accepted */
606 n = cpuc->n_events;
607
608 if (is_x86_event(leader)) {
609 if (n >= max_count)
610 return -ENOSPC;
611 cpuc->event_list[n] = leader;
612 n++;
613 }
614 if (!dogrp)
615 return n;
616
617 list_for_each_entry(event, &leader->sibling_list, group_entry) {
618 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200619 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200620 continue;
621
622 if (n >= max_count)
623 return -ENOSPC;
624
625 cpuc->event_list[n] = event;
626 n++;
627 }
628 return n;
629}
630
Stephane Eranian1da53e02010-01-18 10:58:01 +0200631static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200632 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200633{
Stephane Eranian447a1942010-02-01 14:50:01 +0200634 struct hw_perf_event *hwc = &event->hw;
635
636 hwc->idx = cpuc->assign[i];
637 hwc->last_cpu = smp_processor_id();
638 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200639
640 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
641 hwc->config_base = 0;
642 hwc->event_base = 0;
643 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
644 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
Stephane Eranianfc66c522011-03-19 18:20:05 +0100645 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - X86_PMC_IDX_FIXED);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200646 } else {
Robert Richter73d6e522011-02-02 17:40:59 +0100647 hwc->config_base = x86_pmu_config_addr(hwc->idx);
648 hwc->event_base = x86_pmu_event_addr(hwc->idx);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200649 }
650}
651
Stephane Eranian447a1942010-02-01 14:50:01 +0200652static inline int match_prev_assignment(struct hw_perf_event *hwc,
653 struct cpu_hw_events *cpuc,
654 int i)
655{
656 return hwc->idx == cpuc->assign[i] &&
657 hwc->last_cpu == smp_processor_id() &&
658 hwc->last_tag == cpuc->tags[i];
659}
660
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200661static void x86_pmu_start(struct perf_event *event, int flags);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100662
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200663static void x86_pmu_enable(struct pmu *pmu)
Ingo Molnaree060942008-12-13 09:00:03 +0100664{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200665 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
666 struct perf_event *event;
667 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100668 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200669
Robert Richter85cf9db2009-04-29 12:47:20 +0200670 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100671 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100672
673 if (cpuc->enabled)
674 return;
675
Stephane Eranian1da53e02010-01-18 10:58:01 +0200676 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100677 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200678 /*
679 * apply assignment obtained either from
680 * hw_perf_group_sched_in() or x86_pmu_enable()
681 *
682 * step1: save events moving to new counters
683 * step2: reprogram moved events into new counters
684 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100685 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200686 event = cpuc->event_list[i];
687 hwc = &event->hw;
688
Stephane Eranian447a1942010-02-01 14:50:01 +0200689 /*
690 * we can avoid reprogramming counter if:
691 * - assigned same counter as last time
692 * - running on same CPU as last time
693 * - no other event has used the counter since
694 */
695 if (hwc->idx == -1 ||
696 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200697 continue;
698
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200699 /*
700 * Ensure we don't accidentally enable a stopped
701 * counter simply because we rescheduled.
702 */
703 if (hwc->state & PERF_HES_STOPPED)
704 hwc->state |= PERF_HES_ARCH;
705
706 x86_pmu_stop(event, PERF_EF_UPDATE);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200707 }
708
709 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200710 event = cpuc->event_list[i];
711 hwc = &event->hw;
712
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100713 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200714 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100715 else if (i < n_running)
716 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200717
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200718 if (hwc->state & PERF_HES_ARCH)
719 continue;
720
721 x86_pmu_start(event, PERF_EF_RELOAD);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200722 }
723 cpuc->n_added = 0;
724 perf_events_lapic_init();
725 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100726
727 cpuc->enabled = 1;
728 barrier();
729
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100730 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +0100731}
Ingo Molnaree060942008-12-13 09:00:03 +0100732
Tejun Heo245b2e72009-06-24 15:13:48 +0900733static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100734
Ingo Molnaree060942008-12-13 09:00:03 +0100735/*
736 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200737 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100738 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300739int x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100740{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100741 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200742 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200743 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100744 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100745
Markus Metzger30dd5682009-07-21 15:56:48 +0200746 if (idx == X86_PMC_IDX_FIXED_BTS)
747 return 0;
748
Ingo Molnaree060942008-12-13 09:00:03 +0100749 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200750 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100751 */
752 if (unlikely(left <= -period)) {
753 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200754 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200755 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200756 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100757 }
758
759 if (unlikely(left <= 0)) {
760 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200761 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200762 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200763 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100764 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200765 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200766 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200767 */
768 if (unlikely(left < 2))
769 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100770
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200771 if (left > x86_pmu.max_period)
772 left = x86_pmu.max_period;
773
Tejun Heo245b2e72009-06-24 15:13:48 +0900774 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100775
776 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200777 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100778 * mark it to be able to extra future deltas:
779 */
Peter Zijlstrae7850592010-05-21 14:43:08 +0200780 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100781
Robert Richter73d6e522011-02-02 17:40:59 +0100782 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400783
784 /*
785 * Due to erratum on certan cpu we need
786 * a second write to be sure the register
787 * is updated properly
788 */
789 if (x86_pmu.perfctr_second_write) {
Robert Richter73d6e522011-02-02 17:40:59 +0100790 wrmsrl(hwc->event_base,
Robert Richter948b1bb2010-03-29 18:36:50 +0200791 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400792 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200793
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200794 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200795
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200796 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100797}
798
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300799void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200800{
Tejun Heo0a3aee02010-12-18 16:28:55 +0100801 if (__this_cpu_read(cpu_hw_events.enabled))
Robert Richter31fa58a2010-04-13 22:23:14 +0200802 __x86_pmu_enable_event(&event->hw,
803 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +0100804}
805
Ingo Molnaree060942008-12-13 09:00:03 +0100806/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200807 * Add a single event to the PMU.
Stephane Eranian1da53e02010-01-18 10:58:01 +0200808 *
809 * The event is added to the group of enabled events
810 * but only if it can be scehduled with existing events.
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200811 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200812static int x86_pmu_add(struct perf_event *event, int flags)
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200813{
814 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200815 struct hw_perf_event *hwc;
816 int assign[X86_PMC_IDX_MAX];
817 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200818
Stephane Eranian1da53e02010-01-18 10:58:01 +0200819 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200820
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200821 perf_pmu_disable(event->pmu);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200822 n0 = cpuc->n_events;
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200823 ret = n = collect_events(cpuc, event, false);
824 if (ret < 0)
825 goto out;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200826
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200827 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
828 if (!(flags & PERF_EF_START))
829 hwc->state |= PERF_HES_ARCH;
830
Lin Ming4d1c52b2010-04-23 13:56:12 +0800831 /*
832 * If group events scheduling transaction was started,
Lucas De Marchi0d2eb442011-03-17 16:24:16 -0300833 * skip the schedulability test here, it will be performed
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200834 * at commit time (->commit_txn) as a whole
Lin Ming4d1c52b2010-04-23 13:56:12 +0800835 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +0200836 if (cpuc->group_flag & PERF_EVENT_TXN)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200837 goto done_collect;
Lin Ming4d1c52b2010-04-23 13:56:12 +0800838
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300839 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200840 if (ret)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200841 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200842 /*
843 * copy new assignment, now we know it is possible
844 * will be used by hw_perf_enable()
845 */
846 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100847
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200848done_collect:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200849 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100850 cpuc->n_added += n - n0;
Stephane Eranian90151c32010-05-25 16:23:10 +0200851 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100852
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200853 ret = 0;
854out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200855 perf_pmu_enable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200856 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +0100857}
858
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200859static void x86_pmu_start(struct perf_event *event, int flags)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200860{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100861 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
862 int idx = event->hw.idx;
863
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200864 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
865 return;
Stephane Eraniand76a0812010-02-08 17:06:01 +0200866
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200867 if (WARN_ON_ONCE(idx == -1))
868 return;
869
870 if (flags & PERF_EF_RELOAD) {
871 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
872 x86_perf_event_set_period(event);
873 }
874
875 event->hw.state = 0;
876
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100877 cpuc->events[idx] = event;
878 __set_bit(idx, cpuc->active_mask);
Robert Richter63e6be62010-09-15 18:20:34 +0200879 __set_bit(idx, cpuc->running);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100880 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100881 perf_event_update_userpage(event);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200882}
883
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200884void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100885{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100886 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +0100887 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200888 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200889 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100890 int cpu, idx;
891
Robert Richter948b1bb2010-03-29 18:36:50 +0200892 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +0100893 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100894
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200895 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100896
897 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200898 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100899
Robert Richterfaa28ae2009-04-29 12:47:13 +0200900 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530901 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
902 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
903 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
904 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +0100905 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100906
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530907 pr_info("\n");
908 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
909 pr_info("CPU#%d: status: %016llx\n", cpu, status);
910 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
911 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +0100912 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530913 }
Peter Zijlstra7645a242010-03-08 13:51:31 +0100914 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +0100915
Robert Richter948b1bb2010-03-29 18:36:50 +0200916 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100917 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
918 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +0100919
Tejun Heo245b2e72009-06-24 15:13:48 +0900920 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100921
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530922 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100923 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530924 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100925 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530926 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +0100927 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100928 }
Robert Richter948b1bb2010-03-29 18:36:50 +0200929 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100930 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
931
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530932 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100933 cpu, idx, pmc_count);
934 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200935 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100936}
937
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300938void x86_pmu_stop(struct perf_event *event, int flags)
Ingo Molnar241771e2008-12-03 10:39:53 +0100939{
Stephane Eraniand76a0812010-02-08 17:06:01 +0200940 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200941 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +0100942
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200943 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
944 x86_pmu.disable(event);
945 cpuc->events[hwc->idx] = NULL;
946 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
947 hwc->state |= PERF_HES_STOPPED;
948 }
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100949
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200950 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
951 /*
952 * Drain the remaining delta count out of a event
953 * that we are disabling:
954 */
955 x86_perf_event_update(event);
956 hwc->state |= PERF_HES_UPTODATE;
957 }
Peter Zijlstra2e841872010-01-25 15:58:43 +0100958}
959
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200960static void x86_pmu_del(struct perf_event *event, int flags)
Peter Zijlstra2e841872010-01-25 15:58:43 +0100961{
962 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
963 int i;
964
Stephane Eranian90151c32010-05-25 16:23:10 +0200965 /*
966 * If we're called during a txn, we don't need to do anything.
967 * The events never got scheduled and ->cancel_txn will truncate
968 * the event_list.
969 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +0200970 if (cpuc->group_flag & PERF_EVENT_TXN)
Stephane Eranian90151c32010-05-25 16:23:10 +0200971 return;
972
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200973 x86_pmu_stop(event, PERF_EF_UPDATE);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200974
Stephane Eranian1da53e02010-01-18 10:58:01 +0200975 for (i = 0; i < cpuc->n_events; i++) {
976 if (event == cpuc->event_list[i]) {
977
978 if (x86_pmu.put_event_constraints)
979 x86_pmu.put_event_constraints(cpuc, event);
980
981 while (++i < cpuc->n_events)
982 cpuc->event_list[i-1] = cpuc->event_list[i];
983
984 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +0100985 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200986 }
987 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200988 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +0100989}
990
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300991int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +0200992{
Peter Zijlstradf1a1322009-06-10 21:02:22 +0200993 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200994 struct cpu_hw_events *cpuc;
995 struct perf_event *event;
Vince Weaver11d15782009-07-08 17:46:14 -0400996 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +0200997 u64 val;
998
Peter Zijlstradc1d6282010-03-03 15:55:04 +0100999 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001000
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001001 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001002
Don Zickus2bce5da2011-04-27 06:32:33 -04001003 /*
1004 * Some chipsets need to unmask the LVTPC in a particular spot
1005 * inside the nmi handler. As a result, the unmasking was pushed
1006 * into all the nmi handlers.
1007 *
1008 * This generic handler doesn't seem to have any issues where the
1009 * unmasking occurs so it was left at the top.
1010 */
1011 apic_write(APIC_LVTPC, APIC_DM_NMI);
1012
Robert Richter948b1bb2010-03-29 18:36:50 +02001013 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter63e6be62010-09-15 18:20:34 +02001014 if (!test_bit(idx, cpuc->active_mask)) {
1015 /*
1016 * Though we deactivated the counter some cpus
1017 * might still deliver spurious interrupts still
1018 * in flight. Catch them:
1019 */
1020 if (__test_and_clear_bit(idx, cpuc->running))
1021 handled++;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001022 continue;
Robert Richter63e6be62010-09-15 18:20:34 +02001023 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001024
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001025 event = cpuc->events[idx];
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001026
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001027 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001028 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001029 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001030
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001031 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001032 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001033 */
Robert Richter4177c422010-09-02 15:07:48 -04001034 handled++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001035 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001036
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001037 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001038 continue;
1039
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001040 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001041 x86_pmu_stop(event, 0);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001042 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001043
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001044 if (handled)
1045 inc_irq_stat(apic_perf_irqs);
1046
Robert Richtera29aa8a2009-04-29 12:47:21 +02001047 return handled;
1048}
Robert Richter39d81ea2009-04-29 12:47:05 +02001049
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001050void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001051{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001052 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001053 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001054
Ingo Molnar241771e2008-12-03 10:39:53 +01001055 /*
Yong Wangc323d952009-05-29 13:28:35 +08001056 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001057 */
Yong Wangc323d952009-05-29 13:28:35 +08001058 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001059}
1060
1061static int __kprobes
Don Zickus9c48f1c2011-09-30 15:06:21 -04001062perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001063{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001064 if (!atomic_read(&active_events))
Don Zickus9c48f1c2011-09-30 15:06:21 -04001065 return NMI_DONE;
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001066
Don Zickus9c48f1c2011-09-30 15:06:21 -04001067 return x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001068}
1069
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001070struct event_constraint emptyconstraint;
1071struct event_constraint unconstrained;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301072
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001073static int __cpuinit
1074x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1075{
1076 unsigned int cpu = (long)hcpu;
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001077 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001078 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001079
1080 switch (action & ~CPU_TASKS_FROZEN) {
1081 case CPU_UP_PREPARE:
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001082 cpuc->kfree_on_online = NULL;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001083 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001084 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001085 break;
1086
1087 case CPU_STARTING:
1088 if (x86_pmu.cpu_starting)
1089 x86_pmu.cpu_starting(cpu);
1090 break;
1091
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001092 case CPU_ONLINE:
1093 kfree(cpuc->kfree_on_online);
1094 break;
1095
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001096 case CPU_DYING:
1097 if (x86_pmu.cpu_dying)
1098 x86_pmu.cpu_dying(cpu);
1099 break;
1100
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001101 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001102 case CPU_DEAD:
1103 if (x86_pmu.cpu_dead)
1104 x86_pmu.cpu_dead(cpu);
1105 break;
1106
1107 default:
1108 break;
1109 }
1110
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001111 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001112}
1113
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001114static void __init pmu_check_apic(void)
1115{
1116 if (cpu_has_apic)
1117 return;
1118
1119 x86_pmu.apic = 0;
1120 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1121 pr_info("no hardware sampling interrupt available.\n");
1122}
1123
Yinghai Ludda99112011-01-21 15:30:01 -08001124static int __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301125{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001126 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001127 int err;
1128
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001129 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001130
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301131 switch (boot_cpu_data.x86_vendor) {
1132 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001133 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301134 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301135 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001136 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301137 break;
Robert Richter41389602009-04-29 12:47:00 +02001138 default:
Peter Zijlstra004417a2010-11-25 18:38:29 +01001139 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301140 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001141 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001142 pr_cont("no PMU driver, software events only.\n");
Peter Zijlstra004417a2010-11-25 18:38:29 +01001143 return 0;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001144 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301145
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001146 pmu_check_apic();
1147
Don Zickus33c6d6a2010-11-22 16:55:23 -05001148 /* sanity check that the hardware exists or is emulated */
Peter Zijlstra44072042010-12-08 15:56:23 +01001149 if (!check_hw_exists())
Peter Zijlstra004417a2010-11-25 18:38:29 +01001150 return 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -05001151
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001152 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001153
Peter Zijlstra3c447802010-03-04 21:49:01 +01001154 if (x86_pmu.quirks)
1155 x86_pmu.quirks();
1156
Robert Richter948b1bb2010-03-29 18:36:50 +02001157 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001158 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001159 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1160 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001161 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001162 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
Ingo Molnar241771e2008-12-03 10:39:53 +01001163
Robert Richter948b1bb2010-03-29 18:36:50 +02001164 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001165 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001166 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1167 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001168 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001169
Robert Richterd6dc0b42010-03-17 12:49:13 +01001170 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001171 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001172
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001173 perf_events_lapic_init();
Don Zickus9c48f1c2011-09-30 15:06:21 -04001174 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001175
Peter Zijlstra63b14642010-01-22 16:32:17 +01001176 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001177 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1178 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001179
Peter Zijlstrab622d642010-02-01 15:36:30 +01001180 if (x86_pmu.event_constraints) {
1181 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001182 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001183 continue;
1184
Robert Richter948b1bb2010-03-29 18:36:50 +02001185 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1186 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001187 }
1188 }
1189
Ingo Molnar57c0c152009-09-21 12:20:38 +02001190 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001191 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1192 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1193 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001194 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001195 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001196 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001197
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001198 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001199 perf_cpu_notifier(x86_pmu_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001200
1201 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001202}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001203early_initcall(init_hw_perf_events);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001204
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001205static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001206{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001207 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001208}
1209
Lin Ming4d1c52b2010-04-23 13:56:12 +08001210/*
1211 * Start group events scheduling transaction
1212 * Set the flag to make pmu::enable() not perform the
1213 * schedulability test, it will be performed at commit time
1214 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001215static void x86_pmu_start_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001216{
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001217 perf_pmu_disable(pmu);
Tejun Heo0a3aee02010-12-18 16:28:55 +01001218 __this_cpu_or(cpu_hw_events.group_flag, PERF_EVENT_TXN);
1219 __this_cpu_write(cpu_hw_events.n_txn, 0);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001220}
1221
1222/*
1223 * Stop group events scheduling transaction
1224 * Clear the flag and pmu::enable() will perform the
1225 * schedulability test.
1226 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001227static void x86_pmu_cancel_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001228{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001229 __this_cpu_and(cpu_hw_events.group_flag, ~PERF_EVENT_TXN);
Stephane Eranian90151c32010-05-25 16:23:10 +02001230 /*
1231 * Truncate the collected events.
1232 */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001233 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1234 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001235 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001236}
1237
1238/*
1239 * Commit group events scheduling transaction
1240 * Perform the group schedulability test as a whole
1241 * Return 0 if success
1242 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001243static int x86_pmu_commit_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001244{
1245 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1246 int assign[X86_PMC_IDX_MAX];
1247 int n, ret;
1248
1249 n = cpuc->n_events;
1250
1251 if (!x86_pmu_initialized())
1252 return -EAGAIN;
1253
1254 ret = x86_pmu.schedule_events(cpuc, n, assign);
1255 if (ret)
1256 return ret;
1257
1258 /*
1259 * copy new assignment, now we know it is possible
1260 * will be used by hw_perf_enable()
1261 */
1262 memcpy(cpuc->assign, assign, n*sizeof(int));
1263
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001264 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001265 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001266 return 0;
1267}
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001268/*
1269 * a fake_cpuc is used to validate event groups. Due to
1270 * the extra reg logic, we need to also allocate a fake
1271 * per_core and per_cpu structure. Otherwise, group events
1272 * using extra reg may conflict without the kernel being
1273 * able to catch this when the last event gets added to
1274 * the group.
1275 */
1276static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1277{
1278 kfree(cpuc->shared_regs);
1279 kfree(cpuc);
1280}
1281
1282static struct cpu_hw_events *allocate_fake_cpuc(void)
1283{
1284 struct cpu_hw_events *cpuc;
1285 int cpu = raw_smp_processor_id();
1286
1287 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1288 if (!cpuc)
1289 return ERR_PTR(-ENOMEM);
1290
1291 /* only needed, if we have extra_regs */
1292 if (x86_pmu.extra_regs) {
1293 cpuc->shared_regs = allocate_shared_regs(cpu);
1294 if (!cpuc->shared_regs)
1295 goto error;
1296 }
1297 return cpuc;
1298error:
1299 free_fake_cpuc(cpuc);
1300 return ERR_PTR(-ENOMEM);
1301}
Lin Ming4d1c52b2010-04-23 13:56:12 +08001302
Stephane Eranian1da53e02010-01-18 10:58:01 +02001303/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001304 * validate that we can schedule this event
1305 */
1306static int validate_event(struct perf_event *event)
1307{
1308 struct cpu_hw_events *fake_cpuc;
1309 struct event_constraint *c;
1310 int ret = 0;
1311
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001312 fake_cpuc = allocate_fake_cpuc();
1313 if (IS_ERR(fake_cpuc))
1314 return PTR_ERR(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01001315
1316 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1317
1318 if (!c || !c->weight)
1319 ret = -ENOSPC;
1320
1321 if (x86_pmu.put_event_constraints)
1322 x86_pmu.put_event_constraints(fake_cpuc, event);
1323
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001324 free_fake_cpuc(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01001325
1326 return ret;
1327}
1328
1329/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001330 * validate a single event group
1331 *
1332 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001333 * - check events are compatible which each other
1334 * - events do not compete for the same counter
1335 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001336 *
1337 * validation ensures the group can be loaded onto the
1338 * PMU if it was the only group available.
1339 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001340static int validate_group(struct perf_event *event)
1341{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001342 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001343 struct cpu_hw_events *fake_cpuc;
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001344 int ret = -ENOSPC, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001345
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001346 fake_cpuc = allocate_fake_cpuc();
1347 if (IS_ERR(fake_cpuc))
1348 return PTR_ERR(fake_cpuc);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001349 /*
1350 * the event is not yet connected with its
1351 * siblings therefore we must first collect
1352 * existing siblings, then add the new event
1353 * before we can simulate the scheduling
1354 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001355 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001356 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001357 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001358
Peter Zijlstra502568d2010-01-22 14:35:46 +01001359 fake_cpuc->n_events = n;
1360 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001361 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001362 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001363
Peter Zijlstra502568d2010-01-22 14:35:46 +01001364 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001365
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001366 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001367
Peter Zijlstra502568d2010-01-22 14:35:46 +01001368out:
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001369 free_fake_cpuc(fake_cpuc);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001370 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001371}
1372
Yinghai Ludda99112011-01-21 15:30:01 -08001373static int x86_pmu_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001374{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001375 struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001376 int err;
1377
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001378 switch (event->attr.type) {
1379 case PERF_TYPE_RAW:
1380 case PERF_TYPE_HARDWARE:
1381 case PERF_TYPE_HW_CACHE:
1382 break;
1383
1384 default:
1385 return -ENOENT;
1386 }
1387
1388 err = __x86_pmu_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001389 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001390 /*
1391 * we temporarily connect event to its pmu
1392 * such that validate_group() can classify
1393 * it as an x86 event using is_x86_event()
1394 */
1395 tmp = event->pmu;
1396 event->pmu = &pmu;
1397
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001398 if (event->group_leader != event)
1399 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001400 else
1401 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001402
1403 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001404 }
Peter Zijlstraa1792cd2009-09-09 10:04:47 +02001405 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001406 if (event->destroy)
1407 event->destroy(event);
Peter Zijlstraa1792cd2009-09-09 10:04:47 +02001408 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001409
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001410 return err;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001411}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001412
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001413static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001414 .pmu_enable = x86_pmu_enable,
1415 .pmu_disable = x86_pmu_disable,
1416
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001417 .event_init = x86_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001418
1419 .add = x86_pmu_add,
1420 .del = x86_pmu_del,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001421 .start = x86_pmu_start,
1422 .stop = x86_pmu_stop,
1423 .read = x86_pmu_read,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001424
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001425 .start_txn = x86_pmu_start_txn,
1426 .cancel_txn = x86_pmu_cancel_txn,
1427 .commit_txn = x86_pmu_commit_txn,
1428};
1429
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001430/*
1431 * callchain support
1432 */
1433
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001434static int backtrace_stack(void *data, char *name)
1435{
Ingo Molnar038e8362009-06-15 09:57:59 +02001436 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001437}
1438
1439static void backtrace_address(void *data, unsigned long addr, int reliable)
1440{
1441 struct perf_callchain_entry *entry = data;
1442
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001443 perf_callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001444}
1445
1446static const struct stacktrace_ops backtrace_ops = {
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001447 .stack = backtrace_stack,
1448 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001449 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001450};
1451
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001452void
1453perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001454{
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001455 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1456 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001457 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001458 }
1459
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001460 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001461
Namhyung Kime8e999c2011-03-18 11:40:06 +09001462 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001463}
1464
Torok Edwin257ef9d2010-03-17 12:07:16 +02001465#ifdef CONFIG_COMPAT
1466static inline int
1467perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001468{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001469 /* 32-bit process in 64-bit kernel. */
1470 struct stack_frame_ia32 frame;
1471 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001472
Torok Edwin257ef9d2010-03-17 12:07:16 +02001473 if (!test_thread_flag(TIF_IA32))
1474 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001475
Torok Edwin257ef9d2010-03-17 12:07:16 +02001476 fp = compat_ptr(regs->bp);
1477 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1478 unsigned long bytes;
1479 frame.next_frame = 0;
1480 frame.return_address = 0;
1481
1482 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1483 if (bytes != sizeof(frame))
1484 break;
1485
1486 if (fp < compat_ptr(regs->sp))
1487 break;
1488
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001489 perf_callchain_store(entry, frame.return_address);
Torok Edwin257ef9d2010-03-17 12:07:16 +02001490 fp = compat_ptr(frame.next_frame);
1491 }
1492 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001493}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001494#else
1495static inline int
1496perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1497{
1498 return 0;
1499}
1500#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001501
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001502void
1503perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001504{
1505 struct stack_frame frame;
1506 const void __user *fp;
1507
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001508 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1509 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001510 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001511 }
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001512
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001513 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001514
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001515 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001516
Andrey Vagin20afc602011-08-30 12:32:36 +04001517 if (!current->mm)
1518 return;
1519
Torok Edwin257ef9d2010-03-17 12:07:16 +02001520 if (perf_callchain_user32(regs, entry))
1521 return;
1522
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001523 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001524 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001525 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001526 frame.return_address = 0;
1527
Torok Edwin257ef9d2010-03-17 12:07:16 +02001528 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1529 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001530 break;
1531
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001532 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001533 break;
1534
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001535 perf_callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001536 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001537 }
1538}
1539
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001540unsigned long perf_instruction_pointer(struct pt_regs *regs)
1541{
1542 unsigned long ip;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001543
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001544 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1545 ip = perf_guest_cbs->get_guest_ip();
1546 else
1547 ip = instruction_pointer(regs);
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001548
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001549 return ip;
1550}
1551
1552unsigned long perf_misc_flags(struct pt_regs *regs)
1553{
1554 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001555
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001556 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001557 if (perf_guest_cbs->is_user_mode())
1558 misc |= PERF_RECORD_MISC_GUEST_USER;
1559 else
1560 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1561 } else {
1562 if (user_mode(regs))
1563 misc |= PERF_RECORD_MISC_USER;
1564 else
1565 misc |= PERF_RECORD_MISC_KERNEL;
1566 }
1567
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001568 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02001569 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001570
1571 return misc;
1572}