blob: dbc082685d52ba7394b426a9a95b94f508e799d2 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001#ifndef _ASM_X86_PERF_EVENT_H
2#define _ASM_X86_PERF_EVENT_H
Thomas Gleixner003a46c2007-10-15 13:57:47 +02003
Ingo Molnareb2b8612008-12-17 09:09:13 +01004/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * Performance event hw details:
Ingo Molnareb2b8612008-12-17 09:09:13 +01006 */
7
8#define X86_PMC_MAX_GENERIC 8
9#define X86_PMC_MAX_FIXED 3
10
Ingo Molnar862a1a52008-12-17 13:09:20 +010011#define X86_PMC_IDX_GENERIC 0
12#define X86_PMC_IDX_FIXED 32
13#define X86_PMC_IDX_MAX 64
14
Ingo Molnar241771e2008-12-03 10:39:53 +010015#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
Thomas Gleixner003a46c2007-10-15 13:57:47 +020017
Ingo Molnar241771e2008-12-03 10:39:53 +010018#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
Thomas Gleixner003a46c2007-10-15 13:57:47 +020020
Ingo Molnar241771e2008-12-03 10:39:53 +010021#define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22)
22#define ARCH_PERFMON_EVENTSEL_INT (1 << 20)
23#define ARCH_PERFMON_EVENTSEL_OS (1 << 17)
24#define ARCH_PERFMON_EVENTSEL_USR (1 << 16)
Thomas Gleixner003a46c2007-10-15 13:57:47 +020025
Ingo Molnar2f18d1e2008-12-22 11:10:42 +010026/*
27 * Includes eventsel and unit mask as well:
28 */
Stephane Eranian1da53e02010-01-18 10:58:01 +020029
30
31#define INTEL_ARCH_EVTSEL_MASK 0x000000FFULL
32#define INTEL_ARCH_UNIT_MASK 0x0000FF00ULL
33#define INTEL_ARCH_EDGE_MASK 0x00040000ULL
34#define INTEL_ARCH_INV_MASK 0x00800000ULL
35#define INTEL_ARCH_CNT_MASK 0xFF000000ULL
36#define INTEL_ARCH_EVENT_MASK (INTEL_ARCH_UNIT_MASK|INTEL_ARCH_EVTSEL_MASK)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +010037
Stephane Eranian04a705df2009-10-06 16:42:08 +020038/*
39 * filter mask to validate fixed counter events.
40 * the following filters disqualify for fixed counters:
41 * - inv
42 * - edge
43 * - cnt-mask
44 * The other filters are supported by fixed counters.
45 * The any-thread option is supported starting with v3.
46 */
Stephane Eranian1da53e02010-01-18 10:58:01 +020047#define INTEL_ARCH_FIXED_MASK \
48 (INTEL_ARCH_CNT_MASK| \
49 INTEL_ARCH_INV_MASK| \
50 INTEL_ARCH_EDGE_MASK|\
51 INTEL_ARCH_UNIT_MASK|\
52 INTEL_ARCH_EVENT_MASK)
Stephane Eranian04a705df2009-10-06 16:42:08 +020053
Ingo Molnar241771e2008-12-03 10:39:53 +010054#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
55#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
Stephane Eranian04a705df2009-10-06 16:42:08 +020056#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
Thomas Gleixner003a46c2007-10-15 13:57:47 +020057#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
Ingo Molnar241771e2008-12-03 10:39:53 +010058 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
59
60#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
Thomas Gleixner003a46c2007-10-15 13:57:47 +020061
Ingo Molnareb2b8612008-12-17 09:09:13 +010062/*
63 * Intel "Architectural Performance Monitoring" CPUID
64 * detection/enumeration details:
65 */
Thomas Gleixner003a46c2007-10-15 13:57:47 +020066union cpuid10_eax {
67 struct {
68 unsigned int version_id:8;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020069 unsigned int num_events:8;
Thomas Gleixner003a46c2007-10-15 13:57:47 +020070 unsigned int bit_width:8;
71 unsigned int mask_length:8;
72 } split;
73 unsigned int full;
74};
75
Ingo Molnar703e9372008-12-17 10:51:15 +010076union cpuid10_edx {
77 struct {
Ingo Molnarcdd6c482009-09-21 12:02:48 +020078 unsigned int num_events_fixed:4;
Ingo Molnar703e9372008-12-17 10:51:15 +010079 unsigned int reserved:28;
80 } split;
81 unsigned int full;
82};
83
84
85/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020086 * Fixed-purpose performance events:
Ingo Molnar703e9372008-12-17 10:51:15 +010087 */
88
Ingo Molnar862a1a52008-12-17 13:09:20 +010089/*
90 * All 3 fixed-mode PMCs are configured via this single MSR:
91 */
92#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
93
94/*
95 * The counts are available in three separate MSRs:
96 */
97
Ingo Molnar703e9372008-12-17 10:51:15 +010098/* Instr_Retired.Any: */
99#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100100#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
Ingo Molnar703e9372008-12-17 10:51:15 +0100101
102/* CPU_CLK_Unhalted.Core: */
103#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100104#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
Ingo Molnar703e9372008-12-17 10:51:15 +0100105
106/* CPU_CLK_Unhalted.Ref: */
107#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100108#define X86_PMC_IDX_FIXED_BUS_CYCLES (X86_PMC_IDX_FIXED + 2)
Ingo Molnar703e9372008-12-17 10:51:15 +0100109
Markus Metzger30dd5682009-07-21 15:56:48 +0200110/*
111 * We model BTS tracing as another fixed-mode PMC.
112 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200113 * We choose a value in the middle of the fixed event range, since lower
114 * values are used by actual fixed events and higher values are used
Markus Metzger30dd5682009-07-21 15:56:48 +0200115 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
116 */
117#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
118
119
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200120#ifdef CONFIG_PERF_EVENTS
121extern void init_hw_perf_events(void);
122extern void perf_events_lapic_init(void);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200123
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200124#define PERF_EVENT_INDEX_OFFSET 0
Peter Zijlstra194002b2009-06-22 16:35:24 +0200125
Ingo Molnar241771e2008-12-03 10:39:53 +0100126#else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200127static inline void init_hw_perf_events(void) { }
128static inline void perf_events_lapic_init(void) { }
Ingo Molnar241771e2008-12-03 10:39:53 +0100129#endif
130
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200131#endif /* _ASM_X86_PERF_EVENT_H */