blob: 0316ffe851bd6c7cf4aaee47a80469dc4c2b9a2d [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020025#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010028
Ingo Molnar241771e2008-12-03 10:39:53 +010029#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020030#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020031#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010032
Ingo Molnarcdd6c482009-09-21 12:02:48 +020033static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010034
Ingo Molnarcdd6c482009-09-21 12:02:48 +020035/* The maximal number of PEBS events: */
36#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020037
38/* The size of a BTS record in bytes: */
39#define BTS_RECORD_SIZE 24
40
41/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020042#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020043
44/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020045#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020046
47
48/*
49 * Bits in the debugctlmsr controlling branch tracing.
50 */
51#define X86_DEBUGCTL_TR (1 << 6)
52#define X86_DEBUGCTL_BTS (1 << 7)
53#define X86_DEBUGCTL_BTINT (1 << 8)
54#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
55#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
56
57/*
58 * A debug store configuration.
59 *
60 * We only support architectures that use 64bit fields.
61 */
62struct debug_store {
63 u64 bts_buffer_base;
64 u64 bts_index;
65 u64 bts_absolute_maximum;
66 u64 bts_interrupt_threshold;
67 u64 pebs_buffer_base;
68 u64 pebs_index;
69 u64 pebs_absolute_maximum;
70 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020071 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020072};
73
Stephane Eranian1da53e02010-01-18 10:58:01 +020074struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010075 union {
76 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010077 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010078 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010079 u64 code;
80 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010081 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020082};
83
Stephane Eranian38331f62010-02-08 17:17:01 +020084struct amd_nb {
85 int nb_id; /* NorthBridge id */
86 int refcnt; /* reference count */
87 struct perf_event *owners[X86_PMC_IDX_MAX];
88 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
89};
90
Ingo Molnarcdd6c482009-09-21 12:02:48 +020091struct cpu_hw_events {
Stephane Eranian1da53e02010-01-18 10:58:01 +020092 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020093 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010094 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010095 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020096 struct debug_store *ds;
Stephane Eranian1da53e02010-01-18 10:58:01 +020097
98 int n_events;
99 int n_added;
100 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200101 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200102 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Stephane Eranian38331f62010-02-08 17:17:01 +0200103 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100104};
105
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100106#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100107 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100108 .code = (c), \
109 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100110 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100111}
Stephane Eranianb6900812009-10-06 16:42:09 +0200112
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100113#define EVENT_CONSTRAINT(c, n, m) \
114 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
115
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100116#define INTEL_EVENT_CONSTRAINT(c, n) \
117 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100118
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100119#define FIXED_EVENT_CONSTRAINT(c, n) \
Peter Zijlstrab622d642010-02-01 15:36:30 +0100120 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100121
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100122#define EVENT_CONSTRAINT_END \
123 EVENT_CONSTRAINT(0, 0, 0)
124
125#define for_each_event_constraint(e, c) \
126 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200127
Ingo Molnar241771e2008-12-03 10:39:53 +0100128/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200129 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100130 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200131struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200132 const char *name;
133 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800134 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200135 void (*disable_all)(void);
136 void (*enable_all)(void);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100137 void (*enable)(struct perf_event *);
138 void (*disable)(struct perf_event *);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530139 unsigned eventsel;
140 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100141 u64 (*event_map)(int);
142 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530143 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200144 int num_events;
145 int num_events_fixed;
146 int event_bits;
147 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200148 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200149 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200150 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200151 void (*enable_bts)(u64 config);
152 void (*disable_bts)(void);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100153
154 struct event_constraint *
155 (*get_event_constraints)(struct cpu_hw_events *cpuc,
156 struct perf_event *event);
157
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100158 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
159 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100160 struct event_constraint *event_constraints;
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100161
162 void (*cpu_prepare)(int cpu);
163 void (*cpu_starting)(int cpu);
164 void (*cpu_dying)(int cpu);
165 void (*cpu_dead)(int cpu);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530166};
167
Robert Richter4a06bd82009-04-29 12:47:11 +0200168static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530169
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200170static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100171 .enabled = 1,
172};
Ingo Molnar241771e2008-12-03 10:39:53 +0100173
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100174static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200175
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530176/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200177 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200178 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200179 * 'not supported', -1 means 'hw_event makes no sense on
180 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200181 * ID.
182 */
183
184#define C(x) PERF_COUNT_HW_CACHE_##x
185
186static u64 __read_mostly hw_cache_event_ids
187 [PERF_COUNT_HW_CACHE_MAX]
188 [PERF_COUNT_HW_CACHE_OP_MAX]
189 [PERF_COUNT_HW_CACHE_RESULT_MAX];
190
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530191/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200192 * Propagate event elapsed time into the generic event.
193 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100194 * Returns the delta events processed.
195 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200196static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100197x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100198{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100199 struct hw_perf_event *hwc = &event->hw;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200200 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200201 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100202 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200203 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100204
Markus Metzger30dd5682009-07-21 15:56:48 +0200205 if (idx == X86_PMC_IDX_FIXED_BTS)
206 return 0;
207
Ingo Molnaree060942008-12-13 09:00:03 +0100208 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200209 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100210 *
211 * Our tactic to handle this is to first atomically read and
212 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200213 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100214 */
215again:
216 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200217 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100218
219 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
220 new_raw_count) != prev_raw_count)
221 goto again;
222
223 /*
224 * Now we have the new raw value and have updated the prev
225 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200226 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100227 *
228 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200229 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100230 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200231 delta = (new_raw_count << shift) - (prev_raw_count << shift);
232 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100233
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200234 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100235 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200236
237 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100238}
239
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200240static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200241static DEFINE_MUTEX(pmc_reserve_mutex);
242
243static bool reserve_pmc_hardware(void)
244{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200245#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200246 int i;
247
248 if (nmi_watchdog == NMI_LOCAL_APIC)
249 disable_lapic_nmi_watchdog();
250
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200251 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200252 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200253 goto perfctr_fail;
254 }
255
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200256 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200257 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200258 goto eventsel_fail;
259 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200260#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200261
262 return true;
263
Ingo Molnar04da8a42009-08-11 10:40:08 +0200264#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200265eventsel_fail:
266 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200267 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200268
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200269 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200270
271perfctr_fail:
272 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200273 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200274
275 if (nmi_watchdog == NMI_LOCAL_APIC)
276 enable_lapic_nmi_watchdog();
277
278 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200279#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200280}
281
282static void release_pmc_hardware(void)
283{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200284#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200285 int i;
286
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200287 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200288 release_perfctr_nmi(x86_pmu.perfctr + i);
289 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200290 }
291
292 if (nmi_watchdog == NMI_LOCAL_APIC)
293 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200294#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200295}
296
Markus Metzger30dd5682009-07-21 15:56:48 +0200297static inline bool bts_available(void)
298{
299 return x86_pmu.enable_bts != NULL;
300}
301
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100302static void init_debug_store_on_cpu(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +0200303{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200304 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200305
306 if (!ds)
307 return;
308
309 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200310 (u32)((u64)(unsigned long)ds),
311 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200312}
313
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100314static void fini_debug_store_on_cpu(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +0200315{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200316 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200317 return;
318
319 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
320}
321
322static void release_bts_hardware(void)
323{
324 int cpu;
325
326 if (!bts_available())
327 return;
328
329 get_online_cpus();
330
331 for_each_online_cpu(cpu)
332 fini_debug_store_on_cpu(cpu);
333
334 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200335 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200336
337 if (!ds)
338 continue;
339
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200340 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200341
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200342 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200343 kfree(ds);
344 }
345
346 put_online_cpus();
347}
348
349static int reserve_bts_hardware(void)
350{
351 int cpu, err = 0;
352
353 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200354 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200355
356 get_online_cpus();
357
358 for_each_possible_cpu(cpu) {
359 struct debug_store *ds;
360 void *buffer;
361
362 err = -ENOMEM;
363 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
364 if (unlikely(!buffer))
365 break;
366
367 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
368 if (unlikely(!ds)) {
369 kfree(buffer);
370 break;
371 }
372
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200373 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +0200374 ds->bts_index = ds->bts_buffer_base;
375 ds->bts_absolute_maximum =
376 ds->bts_buffer_base + BTS_BUFFER_SIZE;
377 ds->bts_interrupt_threshold =
378 ds->bts_absolute_maximum - BTS_OVFL_TH;
379
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200380 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200381 err = 0;
382 }
383
384 if (err)
385 release_bts_hardware();
386 else {
387 for_each_online_cpu(cpu)
388 init_debug_store_on_cpu(cpu);
389 }
390
391 put_online_cpus();
392
393 return err;
394}
395
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200396static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200397{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200398 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200399 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200400 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200401 mutex_unlock(&pmc_reserve_mutex);
402 }
403}
404
Robert Richter85cf9db2009-04-29 12:47:20 +0200405static inline int x86_pmu_initialized(void)
406{
407 return x86_pmu.handle_irq != NULL;
408}
409
Ingo Molnar8326f442009-06-05 20:22:46 +0200410static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200411set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200412{
413 unsigned int cache_type, cache_op, cache_result;
414 u64 config, val;
415
416 config = attr->config;
417
418 cache_type = (config >> 0) & 0xff;
419 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
420 return -EINVAL;
421
422 cache_op = (config >> 8) & 0xff;
423 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
424 return -EINVAL;
425
426 cache_result = (config >> 16) & 0xff;
427 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
428 return -EINVAL;
429
430 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
431
432 if (val == 0)
433 return -ENOENT;
434
435 if (val == -1)
436 return -EINVAL;
437
438 hwc->config |= val;
439
440 return 0;
441}
442
Ingo Molnaree060942008-12-13 09:00:03 +0100443/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200444 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100445 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200446static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100447{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200448 struct perf_event_attr *attr = &event->attr;
449 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200450 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200451 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100452
Robert Richter85cf9db2009-04-29 12:47:20 +0200453 if (!x86_pmu_initialized())
454 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100455
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200456 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200457 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200458 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200459 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200460 if (!reserve_pmc_hardware())
461 err = -EBUSY;
462 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200463 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +0200464 }
465 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200466 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200467 mutex_unlock(&pmc_reserve_mutex);
468 }
469 if (err)
470 return err;
471
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200472 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cd2009-09-09 10:04:47 +0200473
Ingo Molnar241771e2008-12-03 10:39:53 +0100474 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100475 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100476 * (keep 'enabled' bit clear for now)
477 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100478 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100479
Stephane Eranianb6900812009-10-06 16:42:09 +0200480 hwc->idx = -1;
Stephane Eranian447a1942010-02-01 14:50:01 +0200481 hwc->last_cpu = -1;
482 hwc->last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200483
Ingo Molnar241771e2008-12-03 10:39:53 +0100484 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100485 * Count user and OS events unless requested not to.
486 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200487 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100488 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200489 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100490 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
491
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200492 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200493 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200494 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200495 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200496 } else {
497 /*
498 * If we have a PMU initialized but no APIC
499 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200500 * events (user-space has to fall back and
501 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +0200502 */
503 if (!x86_pmu.apic)
504 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200505 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200506
Ingo Molnar241771e2008-12-03 10:39:53 +0100507 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200508 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100509 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200510 if (attr->type == PERF_TYPE_RAW) {
511 hwc->config |= x86_pmu.raw_event(attr->config);
Peter Zijlstra320ebf02010-03-02 12:35:37 +0100512 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
513 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
514 return -EACCES;
Ingo Molnar8326f442009-06-05 20:22:46 +0200515 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100516 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100517
Ingo Molnar8326f442009-06-05 20:22:46 +0200518 if (attr->type == PERF_TYPE_HW_CACHE)
519 return set_ext_hw_attr(hwc, attr);
520
521 if (attr->config >= x86_pmu.max_events)
522 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200523
Ingo Molnar8326f442009-06-05 20:22:46 +0200524 /*
525 * The generic map:
526 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200527 config = x86_pmu.event_map(attr->config);
528
529 if (config == 0)
530 return -ENOENT;
531
532 if (config == -1LL)
533 return -EINVAL;
534
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200535 /*
536 * Branch tracing:
537 */
538 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200539 (hwc->sample_period == 1)) {
540 /* BTS is not supported by this architecture. */
541 if (!bts_available())
542 return -EOPNOTSUPP;
543
544 /* BTS is currently only allowed for user-mode. */
545 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
546 return -EOPNOTSUPP;
547 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200548
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200549 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200550
Ingo Molnar241771e2008-12-03 10:39:53 +0100551 return 0;
552}
553
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100554static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530555{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200556 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200557 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100558
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200559 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100560 u64 val;
561
Robert Richter43f62012009-04-29 16:55:56 +0200562 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200563 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100564 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100565 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200566 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100567 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100568 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530569 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530570}
571
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200572void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530573{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200574 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
575
Robert Richter85cf9db2009-04-29 12:47:20 +0200576 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200577 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200578
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100579 if (!cpuc->enabled)
580 return;
581
582 cpuc->n_added = 0;
583 cpuc->enabled = 0;
584 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200585
586 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530587}
Ingo Molnar241771e2008-12-03 10:39:53 +0100588
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100589static void x86_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530590{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200591 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530592 int idx;
593
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200594 for (idx = 0; idx < x86_pmu.num_events; idx++) {
595 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200596 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100597
Robert Richter43f62012009-04-29 16:55:56 +0200598 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200599 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200600
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200601 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100602 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100603 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530604 }
605}
606
Stephane Eranian1da53e02010-01-18 10:58:01 +0200607static const struct pmu pmu;
608
609static inline int is_x86_event(struct perf_event *event)
610{
611 return event->pmu == &pmu;
612}
613
614static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
615{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100616 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200617 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100618 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200619 struct hw_perf_event *hwc;
620
621 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
622
623 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100624 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
625 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200626 }
627
628 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200629 * fastpath, try to reuse previous register
630 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100631 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200632 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100633 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200634
635 /* never assigned */
636 if (hwc->idx == -1)
637 break;
638
639 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100640 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200641 break;
642
643 /* not already used */
644 if (test_bit(hwc->idx, used_mask))
645 break;
646
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100647 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200648 if (assign)
649 assign[i] = hwc->idx;
650 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100651 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200652 goto done;
653
654 /*
655 * begin slow path
656 */
657
658 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
659
660 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200661 * weight = number of possible counters
662 *
663 * 1 = most constrained, only works on one counter
664 * wmax = least constrained, works on any counter
665 *
666 * assign events to counters starting with most
667 * constrained events.
668 */
669 wmax = x86_pmu.num_events;
670
671 /*
672 * when fixed event counters are present,
673 * wmax is incremented by 1 to account
674 * for one more choice
675 */
676 if (x86_pmu.num_events_fixed)
677 wmax++;
678
Stephane Eranian81130702010-01-21 17:39:01 +0200679 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200680 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200681 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100682 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200683 hwc = &cpuc->event_list[i]->hw;
684
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100685 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200686 continue;
687
Akinobu Mita984b3f52010-03-05 13:41:37 -0800688 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200689 if (!test_bit(j, used_mask))
690 break;
691 }
692
693 if (j == X86_PMC_IDX_MAX)
694 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200695
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100696 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200697
Stephane Eranian1da53e02010-01-18 10:58:01 +0200698 if (assign)
699 assign[i] = j;
700 num--;
701 }
702 }
Stephane Eranian81130702010-01-21 17:39:01 +0200703done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200704 /*
705 * scheduling failed or is just a simulation,
706 * free resources if necessary
707 */
708 if (!assign || num) {
709 for (i = 0; i < n; i++) {
710 if (x86_pmu.put_event_constraints)
711 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
712 }
713 }
714 return num ? -ENOSPC : 0;
715}
716
717/*
718 * dogrp: true if must collect siblings events (group)
719 * returns total number of events and error code
720 */
721static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
722{
723 struct perf_event *event;
724 int n, max_count;
725
726 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
727
728 /* current number of events already accepted */
729 n = cpuc->n_events;
730
731 if (is_x86_event(leader)) {
732 if (n >= max_count)
733 return -ENOSPC;
734 cpuc->event_list[n] = leader;
735 n++;
736 }
737 if (!dogrp)
738 return n;
739
740 list_for_each_entry(event, &leader->sibling_list, group_entry) {
741 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200742 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200743 continue;
744
745 if (n >= max_count)
746 return -ENOSPC;
747
748 cpuc->event_list[n] = event;
749 n++;
750 }
751 return n;
752}
753
Stephane Eranian1da53e02010-01-18 10:58:01 +0200754static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200755 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200756{
Stephane Eranian447a1942010-02-01 14:50:01 +0200757 struct hw_perf_event *hwc = &event->hw;
758
759 hwc->idx = cpuc->assign[i];
760 hwc->last_cpu = smp_processor_id();
761 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200762
763 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
764 hwc->config_base = 0;
765 hwc->event_base = 0;
766 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
767 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
768 /*
769 * We set it so that event_base + idx in wrmsr/rdmsr maps to
770 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
771 */
772 hwc->event_base =
773 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
774 } else {
775 hwc->config_base = x86_pmu.eventsel;
776 hwc->event_base = x86_pmu.perfctr;
777 }
778}
779
Stephane Eranian447a1942010-02-01 14:50:01 +0200780static inline int match_prev_assignment(struct hw_perf_event *hwc,
781 struct cpu_hw_events *cpuc,
782 int i)
783{
784 return hwc->idx == cpuc->assign[i] &&
785 hwc->last_cpu == smp_processor_id() &&
786 hwc->last_tag == cpuc->tags[i];
787}
788
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100789static int x86_pmu_start(struct perf_event *event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200790static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100791
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200792void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100793{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200794 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
795 struct perf_event *event;
796 struct hw_perf_event *hwc;
797 int i;
798
Robert Richter85cf9db2009-04-29 12:47:20 +0200799 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100800 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100801
802 if (cpuc->enabled)
803 return;
804
Stephane Eranian1da53e02010-01-18 10:58:01 +0200805 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100806 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200807 /*
808 * apply assignment obtained either from
809 * hw_perf_group_sched_in() or x86_pmu_enable()
810 *
811 * step1: save events moving to new counters
812 * step2: reprogram moved events into new counters
813 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100814 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200815 event = cpuc->event_list[i];
816 hwc = &event->hw;
817
Stephane Eranian447a1942010-02-01 14:50:01 +0200818 /*
819 * we can avoid reprogramming counter if:
820 * - assigned same counter as last time
821 * - running on same CPU as last time
822 * - no other event has used the counter since
823 */
824 if (hwc->idx == -1 ||
825 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200826 continue;
827
Stephane Eraniand76a0812010-02-08 17:06:01 +0200828 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200829 }
830
831 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200832 event = cpuc->event_list[i];
833 hwc = &event->hw;
834
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100835 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200836 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100837 else if (i < n_running)
838 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200839
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100840 x86_pmu_start(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200841 }
842 cpuc->n_added = 0;
843 perf_events_lapic_init();
844 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100845
846 cpuc->enabled = 1;
847 barrier();
848
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200849 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +0100850}
Ingo Molnaree060942008-12-13 09:00:03 +0100851
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100852static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100853{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100854 (void)checking_wrmsrl(hwc->config_base + hwc->idx,
Robert Richterbb1165d2010-03-01 14:21:23 +0100855 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100856}
857
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100858static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100859{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100860 struct hw_perf_event *hwc = &event->hw;
861 (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100862}
863
Tejun Heo245b2e72009-06-24 15:13:48 +0900864static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100865
Ingo Molnaree060942008-12-13 09:00:03 +0100866/*
867 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200868 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100869 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200870static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100871x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100872{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100873 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100874 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200875 s64 period = hwc->sample_period;
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100876 int err, ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100877
Markus Metzger30dd5682009-07-21 15:56:48 +0200878 if (idx == X86_PMC_IDX_FIXED_BTS)
879 return 0;
880
Ingo Molnaree060942008-12-13 09:00:03 +0100881 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200882 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100883 */
884 if (unlikely(left <= -period)) {
885 left = period;
886 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200887 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200888 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100889 }
890
891 if (unlikely(left <= 0)) {
892 left += period;
893 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200894 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200895 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100896 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200897 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200898 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200899 */
900 if (unlikely(left < 2))
901 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100902
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200903 if (left > x86_pmu.max_period)
904 left = x86_pmu.max_period;
905
Tejun Heo245b2e72009-06-24 15:13:48 +0900906 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100907
908 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200909 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100910 * mark it to be able to extra future deltas:
911 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100912 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100913
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200914 err = checking_wrmsrl(hwc->event_base + idx,
915 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200916
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200917 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200918
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200919 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100920}
921
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100922static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200923{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200924 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200925 if (cpuc->enabled)
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100926 __x86_pmu_enable_event(&event->hw);
Ingo Molnar241771e2008-12-03 10:39:53 +0100927}
928
Ingo Molnaree060942008-12-13 09:00:03 +0100929/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200930 * activate a single event
931 *
932 * The event is added to the group of enabled events
933 * but only if it can be scehduled with existing events.
934 *
935 * Called with PMU disabled. If successful and return value 1,
936 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200937 */
938static int x86_pmu_enable(struct perf_event *event)
939{
940 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200941 struct hw_perf_event *hwc;
942 int assign[X86_PMC_IDX_MAX];
943 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200944
Stephane Eranian1da53e02010-01-18 10:58:01 +0200945 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200946
Stephane Eranian1da53e02010-01-18 10:58:01 +0200947 n0 = cpuc->n_events;
948 n = collect_events(cpuc, event, false);
949 if (n < 0)
950 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200951
Stephane Eranian1da53e02010-01-18 10:58:01 +0200952 ret = x86_schedule_events(cpuc, n, assign);
953 if (ret)
954 return ret;
955 /*
956 * copy new assignment, now we know it is possible
957 * will be used by hw_perf_enable()
958 */
959 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100960
Stephane Eranian1da53e02010-01-18 10:58:01 +0200961 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100962 cpuc->n_added += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100963
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100964 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100965}
966
Stephane Eraniand76a0812010-02-08 17:06:01 +0200967static int x86_pmu_start(struct perf_event *event)
968{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100969 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
970 int idx = event->hw.idx;
971
972 if (idx == -1)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200973 return -EAGAIN;
974
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100975 x86_perf_event_set_period(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100976 cpuc->events[idx] = event;
977 __set_bit(idx, cpuc->active_mask);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100978 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100979 perf_event_update_userpage(event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200980
981 return 0;
982}
983
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200984static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200985{
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100986 int ret = x86_pmu_start(event);
987 WARN_ON_ONCE(ret);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200988}
989
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200990void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100991{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100992 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200993 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200994 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100995 int cpu, idx;
996
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200997 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +0100998 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100999
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001000 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001001
1002 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001003 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001004
Robert Richterfaa28ae2009-04-29 12:47:13 +02001005 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301006 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1007 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1008 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1009 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001010
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301011 pr_info("\n");
1012 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1013 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1014 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1015 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301016 }
Stephane Eranian1da53e02010-01-18 10:58:01 +02001017 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001018
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001019 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001020 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1021 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001022
Tejun Heo245b2e72009-06-24 15:13:48 +09001023 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001024
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301025 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001026 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301027 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001028 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301029 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001030 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001031 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001032 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001033 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1034
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301035 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001036 cpu, idx, pmc_count);
1037 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001038 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001039}
1040
Stephane Eraniand76a0812010-02-08 17:06:01 +02001041static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001042{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001043 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001044 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001045 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001046
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001047 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1048 return;
1049
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001050 x86_pmu.disable(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001051
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001052 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001053 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001054 * that we are disabling:
1055 */
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001056 x86_perf_event_update(event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001057
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001058 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001059}
1060
1061static void x86_pmu_disable(struct perf_event *event)
1062{
1063 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1064 int i;
1065
Stephane Eraniand76a0812010-02-08 17:06:01 +02001066 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001067
Stephane Eranian1da53e02010-01-18 10:58:01 +02001068 for (i = 0; i < cpuc->n_events; i++) {
1069 if (event == cpuc->event_list[i]) {
1070
1071 if (x86_pmu.put_event_constraints)
1072 x86_pmu.put_event_constraints(cpuc, event);
1073
1074 while (++i < cpuc->n_events)
1075 cpuc->event_list[i-1] = cpuc->event_list[i];
1076
1077 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001078 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001079 }
1080 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001081 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001082}
1083
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001084static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001085{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001086 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001087 struct cpu_hw_events *cpuc;
1088 struct perf_event *event;
1089 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001090 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001091 u64 val;
1092
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001093 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001094
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001095 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001096
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001097 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001098 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001099 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001100
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001101 event = cpuc->events[idx];
1102 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001103
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001104 val = x86_perf_event_update(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001105 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001106 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001107
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001108 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001109 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001110 */
1111 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001112 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001113
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001114 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001115 continue;
1116
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001117 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001118 x86_pmu_stop(event);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001119 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001120
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001121 if (handled)
1122 inc_irq_stat(apic_perf_irqs);
1123
Robert Richtera29aa8a2009-04-29 12:47:21 +02001124 return handled;
1125}
Robert Richter39d81ea2009-04-29 12:47:05 +02001126
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001127void smp_perf_pending_interrupt(struct pt_regs *regs)
1128{
1129 irq_enter();
1130 ack_APIC_irq();
1131 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001132 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001133 irq_exit();
1134}
1135
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001136void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001137{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001138#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001139 if (!x86_pmu.apic || !x86_pmu_initialized())
1140 return;
1141
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001142 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001143#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001144}
1145
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001146void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001147{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001148#ifdef CONFIG_X86_LOCAL_APIC
1149 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001150 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001151
Ingo Molnar241771e2008-12-03 10:39:53 +01001152 /*
Yong Wangc323d952009-05-29 13:28:35 +08001153 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001154 */
Yong Wangc323d952009-05-29 13:28:35 +08001155 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001156#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01001157}
1158
1159static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001160perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001161 unsigned long cmd, void *__args)
1162{
1163 struct die_args *args = __args;
1164 struct pt_regs *regs;
1165
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001166 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001167 return NOTIFY_DONE;
1168
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001169 switch (cmd) {
1170 case DIE_NMI:
1171 case DIE_NMI_IPI:
1172 break;
1173
1174 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001175 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001176 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001177
1178 regs = args->regs;
1179
Ingo Molnar04da8a42009-08-11 10:40:08 +02001180#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01001181 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001182#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001183 /*
1184 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001185 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001186 *
1187 * If the first NMI handles both, the latter will be empty and daze
1188 * the CPU.
1189 */
Yong Wanga3288102009-06-03 13:12:55 +08001190 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001191
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001192 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001193}
1194
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001195static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1196 .notifier_call = perf_event_nmi_handler,
1197 .next = NULL,
1198 .priority = 1
1199};
1200
Peter Zijlstra63b14642010-01-22 16:32:17 +01001201static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001202static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001203
Peter Zijlstra63b14642010-01-22 16:32:17 +01001204static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001205x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001206{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001207 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001208
Stephane Eranian1da53e02010-01-18 10:58:01 +02001209 if (x86_pmu.event_constraints) {
1210 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001211 if ((event->hw.config & c->cmask) == c->code)
1212 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001213 }
1214 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001215
1216 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001217}
1218
Stephane Eranian1da53e02010-01-18 10:58:01 +02001219static int x86_event_sched_in(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001220 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001221{
1222 int ret = 0;
1223
1224 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001225 event->oncpu = smp_processor_id();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001226 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1227
1228 if (!is_x86_event(event))
1229 ret = event->pmu->enable(event);
1230
1231 if (!ret && !is_software_event(event))
1232 cpuctx->active_oncpu++;
1233
1234 if (!ret && event->attr.exclusive)
1235 cpuctx->exclusive = 1;
1236
1237 return ret;
1238}
1239
1240static void x86_event_sched_out(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001241 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001242{
1243 event->state = PERF_EVENT_STATE_INACTIVE;
1244 event->oncpu = -1;
1245
1246 if (!is_x86_event(event))
1247 event->pmu->disable(event);
1248
1249 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1250
1251 if (!is_software_event(event))
1252 cpuctx->active_oncpu--;
1253
1254 if (event->attr.exclusive || !cpuctx->active_oncpu)
1255 cpuctx->exclusive = 0;
1256}
1257
1258/*
1259 * Called to enable a whole group of events.
1260 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1261 * Assumes the caller has disabled interrupts and has
1262 * frozen the PMU with hw_perf_save_disable.
1263 *
1264 * called with PMU disabled. If successful and return value 1,
1265 * then guaranteed to call perf_enable() and hw_perf_enable()
1266 */
1267int hw_perf_group_sched_in(struct perf_event *leader,
1268 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001269 struct perf_event_context *ctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001270{
Peter Zijlstra6e377382010-02-11 13:21:58 +01001271 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001272 struct perf_event *sub;
1273 int assign[X86_PMC_IDX_MAX];
1274 int n0, n1, ret;
1275
1276 /* n0 = total number of events */
1277 n0 = collect_events(cpuc, leader, true);
1278 if (n0 < 0)
1279 return n0;
1280
1281 ret = x86_schedule_events(cpuc, n0, assign);
1282 if (ret)
1283 return ret;
1284
Peter Zijlstra6e377382010-02-11 13:21:58 +01001285 ret = x86_event_sched_in(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001286 if (ret)
1287 return ret;
1288
1289 n1 = 1;
1290 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02001291 if (sub->state > PERF_EVENT_STATE_OFF) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001292 ret = x86_event_sched_in(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001293 if (ret)
1294 goto undo;
1295 ++n1;
1296 }
1297 }
1298 /*
1299 * copy new assignment, now we know it is possible
1300 * will be used by hw_perf_enable()
1301 */
1302 memcpy(cpuc->assign, assign, n0*sizeof(int));
1303
1304 cpuc->n_events = n0;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001305 cpuc->n_added += n1;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001306 ctx->nr_active += n1;
1307
1308 /*
1309 * 1 means successful and events are active
1310 * This is not quite true because we defer
1311 * actual activation until hw_perf_enable() but
1312 * this way we* ensure caller won't try to enable
1313 * individual events
1314 */
1315 return 1;
1316undo:
Peter Zijlstra6e377382010-02-11 13:21:58 +01001317 x86_event_sched_out(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001318 n0 = 1;
1319 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1320 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001321 x86_event_sched_out(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001322 if (++n0 == n1)
1323 break;
1324 }
1325 }
1326 return ret;
1327}
1328
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001329#include "perf_event_amd.c"
1330#include "perf_event_p6.c"
1331#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301332
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001333static int __cpuinit
1334x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1335{
1336 unsigned int cpu = (long)hcpu;
1337
1338 switch (action & ~CPU_TASKS_FROZEN) {
1339 case CPU_UP_PREPARE:
1340 if (x86_pmu.cpu_prepare)
1341 x86_pmu.cpu_prepare(cpu);
1342 break;
1343
1344 case CPU_STARTING:
1345 if (x86_pmu.cpu_starting)
1346 x86_pmu.cpu_starting(cpu);
1347 break;
1348
1349 case CPU_DYING:
1350 if (x86_pmu.cpu_dying)
1351 x86_pmu.cpu_dying(cpu);
1352 break;
1353
1354 case CPU_DEAD:
1355 if (x86_pmu.cpu_dead)
1356 x86_pmu.cpu_dead(cpu);
1357 break;
1358
1359 default:
1360 break;
1361 }
1362
1363 return NOTIFY_OK;
1364}
1365
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001366static void __init pmu_check_apic(void)
1367{
1368 if (cpu_has_apic)
1369 return;
1370
1371 x86_pmu.apic = 0;
1372 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1373 pr_info("no hardware sampling interrupt available.\n");
1374}
1375
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001376void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301377{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001378 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001379 int err;
1380
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001381 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001382
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301383 switch (boot_cpu_data.x86_vendor) {
1384 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001385 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301386 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301387 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001388 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301389 break;
Robert Richter41389602009-04-29 12:47:00 +02001390 default:
1391 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301392 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001393 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001394 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301395 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001396 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301397
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001398 pmu_check_apic();
1399
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001400 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001401
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001402 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
1403 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1404 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
1405 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001406 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001407 perf_event_mask = (1 << x86_pmu.num_events) - 1;
1408 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01001409
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001410 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
1411 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1412 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
1413 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001414 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001415
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001416 perf_event_mask |=
1417 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
1418 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001419
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001420 perf_events_lapic_init();
1421 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001422
Peter Zijlstra63b14642010-01-22 16:32:17 +01001423 unconstrained = (struct event_constraint)
Peter Zijlstrafce877e2010-01-29 13:25:12 +01001424 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1425 0, x86_pmu.num_events);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001426
Peter Zijlstrab622d642010-02-01 15:36:30 +01001427 if (x86_pmu.event_constraints) {
1428 for_each_event_constraint(c, x86_pmu.event_constraints) {
1429 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1430 continue;
1431
1432 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1433 c->weight += x86_pmu.num_events;
1434 }
1435 }
1436
Ingo Molnar57c0c152009-09-21 12:20:38 +02001437 pr_info("... version: %d\n", x86_pmu.version);
1438 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1439 pr_info("... generic registers: %d\n", x86_pmu.num_events);
1440 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
1441 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1442 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
1443 pr_info("... event mask: %016Lx\n", perf_event_mask);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001444
1445 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001446}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001447
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001448static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001449{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001450 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001451}
1452
Robert Richter4aeb0b42009-04-29 12:47:03 +02001453static const struct pmu pmu = {
1454 .enable = x86_pmu_enable,
1455 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001456 .start = x86_pmu_start,
1457 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001458 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001459 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001460};
1461
Stephane Eranian1da53e02010-01-18 10:58:01 +02001462/*
1463 * validate a single event group
1464 *
1465 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001466 * - check events are compatible which each other
1467 * - events do not compete for the same counter
1468 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001469 *
1470 * validation ensures the group can be loaded onto the
1471 * PMU if it was the only group available.
1472 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001473static int validate_group(struct perf_event *event)
1474{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001475 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001476 struct cpu_hw_events *fake_cpuc;
1477 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001478
Peter Zijlstra502568d2010-01-22 14:35:46 +01001479 ret = -ENOMEM;
1480 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1481 if (!fake_cpuc)
1482 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001483
Stephane Eranian1da53e02010-01-18 10:58:01 +02001484 /*
1485 * the event is not yet connected with its
1486 * siblings therefore we must first collect
1487 * existing siblings, then add the new event
1488 * before we can simulate the scheduling
1489 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001490 ret = -ENOSPC;
1491 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001492 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001493 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001494
Peter Zijlstra502568d2010-01-22 14:35:46 +01001495 fake_cpuc->n_events = n;
1496 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001497 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001498 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001499
Peter Zijlstra502568d2010-01-22 14:35:46 +01001500 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001501
Peter Zijlstra502568d2010-01-22 14:35:46 +01001502 ret = x86_schedule_events(fake_cpuc, n, NULL);
1503
1504out_free:
1505 kfree(fake_cpuc);
1506out:
1507 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001508}
1509
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001510const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001511{
Stephane Eranian81130702010-01-21 17:39:01 +02001512 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001513 int err;
1514
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001515 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001516 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001517 /*
1518 * we temporarily connect event to its pmu
1519 * such that validate_group() can classify
1520 * it as an x86 event using is_x86_event()
1521 */
1522 tmp = event->pmu;
1523 event->pmu = &pmu;
1524
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001525 if (event->group_leader != event)
1526 err = validate_group(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001527
1528 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001529 }
Peter Zijlstraa1792cd2009-09-09 10:04:47 +02001530 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001531 if (event->destroy)
1532 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001533 return ERR_PTR(err);
Peter Zijlstraa1792cd2009-09-09 10:04:47 +02001534 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001535
Robert Richter4aeb0b42009-04-29 12:47:03 +02001536 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001537}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001538
1539/*
1540 * callchain support
1541 */
1542
1543static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001544void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001545{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001546 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001547 entry->ip[entry->nr++] = ip;
1548}
1549
Tejun Heo245b2e72009-06-24 15:13:48 +09001550static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1551static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001552
1553
1554static void
1555backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1556{
1557 /* Ignore warnings */
1558}
1559
1560static void backtrace_warning(void *data, char *msg)
1561{
1562 /* Ignore warnings */
1563}
1564
1565static int backtrace_stack(void *data, char *name)
1566{
Ingo Molnar038e8362009-06-15 09:57:59 +02001567 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001568}
1569
1570static void backtrace_address(void *data, unsigned long addr, int reliable)
1571{
1572 struct perf_callchain_entry *entry = data;
1573
1574 if (reliable)
1575 callchain_store(entry, addr);
1576}
1577
1578static const struct stacktrace_ops backtrace_ops = {
1579 .warning = backtrace_warning,
1580 .warning_symbol = backtrace_warning_symbol,
1581 .stack = backtrace_stack,
1582 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001583 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001584};
1585
Ingo Molnar038e8362009-06-15 09:57:59 +02001586#include "../dumpstack.h"
1587
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001588static void
1589perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1590{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001591 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001592 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001593
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001594 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001595}
1596
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001597/*
1598 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
1599 */
1600static unsigned long
1601copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001602{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001603 unsigned long offset, addr = (unsigned long)from;
1604 int type = in_nmi() ? KM_NMI : KM_IRQ0;
1605 unsigned long size, len = 0;
1606 struct page *page;
1607 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001608 int ret;
1609
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001610 do {
1611 ret = __get_user_pages_fast(addr, 1, 0, &page);
1612 if (!ret)
1613 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001614
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001615 offset = addr & (PAGE_SIZE - 1);
1616 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001617
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001618 map = kmap_atomic(page, type);
1619 memcpy(to, map+offset, size);
1620 kunmap_atomic(map, type);
1621 put_page(page);
1622
1623 len += size;
1624 to += size;
1625 addr += size;
1626
1627 } while (len < n);
1628
1629 return len;
1630}
1631
1632static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1633{
1634 unsigned long bytes;
1635
1636 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
1637
1638 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001639}
1640
1641static void
1642perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1643{
1644 struct stack_frame frame;
1645 const void __user *fp;
1646
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001647 if (!user_mode(regs))
1648 regs = task_pt_regs(current);
1649
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001650 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001651
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001652 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001653 callchain_store(entry, regs->ip);
1654
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001655 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02001656 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001657 frame.return_address = 0;
1658
1659 if (!copy_stack_frame(fp, &frame))
1660 break;
1661
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001662 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001663 break;
1664
1665 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001666 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001667 }
1668}
1669
1670static void
1671perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1672{
1673 int is_user;
1674
1675 if (!regs)
1676 return;
1677
1678 is_user = user_mode(regs);
1679
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001680 if (is_user && current->state != TASK_RUNNING)
1681 return;
1682
1683 if (!is_user)
1684 perf_callchain_kernel(regs, entry);
1685
1686 if (current->mm)
1687 perf_callchain_user(regs, entry);
1688}
1689
1690struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1691{
1692 struct perf_callchain_entry *entry;
1693
1694 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001695 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001696 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001697 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001698
1699 entry->nr = 0;
1700
1701 perf_do_callchain(regs, entry);
1702
1703 return entry;
1704}
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001705
Frederic Weisbeckerdcd5c162010-03-16 01:05:02 +01001706#ifdef CONFIG_EVENT_TRACING
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001707void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1708{
1709 regs->ip = ip;
1710 /*
1711 * perf_arch_fetch_caller_regs adds another call, we need to increment
1712 * the skip level
1713 */
1714 regs->bp = rewind_frame_pointer(skip + 1);
1715 regs->cs = __KERNEL_CS;
1716 local_save_flags(regs->flags);
1717}
Frederic Weisbeckerdcd5c162010-03-16 01:05:02 +01001718#endif