blob: 72460289c654af7191da8e0a87def5c60ea94518 [file] [log] [blame]
Thomas Gleixner0793a612008-12-04 20:12:29 +01001/*
2 * Performance counters:
3 *
4 * Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
6 *
7 * Data type definitions, declarations, prototypes.
8 *
9 * Started by: Thomas Gleixner and Ingo Molnar
10 *
11 * For licencing details see kernel-base/COPYING
12 */
13#ifndef _LINUX_PERF_COUNTER_H
14#define _LINUX_PERF_COUNTER_H
15
16#include <asm/atomic.h>
17
18#include <linux/list.h>
19#include <linux/mutex.h>
20#include <linux/rculist.h>
21#include <linux/rcupdate.h>
22#include <linux/spinlock.h>
23
24struct task_struct;
25
26/*
Ingo Molnar9f66a382008-12-10 12:33:23 +010027 * User-space ABI bits:
28 */
29
30/*
31 * Generalized performance counter event types, used by the hw_event.type
32 * parameter of the sys_perf_counter_open() syscall:
Thomas Gleixner0793a612008-12-04 20:12:29 +010033 */
34enum hw_event_types {
Thomas Gleixner0793a612008-12-04 20:12:29 +010035 /*
Ingo Molnar9f66a382008-12-10 12:33:23 +010036 * Common hardware events, generalized by the kernel:
Thomas Gleixner0793a612008-12-04 20:12:29 +010037 */
Ingo Molnar9f66a382008-12-10 12:33:23 +010038 PERF_COUNT_CYCLES = 0,
39 PERF_COUNT_INSTRUCTIONS = 1,
40 PERF_COUNT_CACHE_REFERENCES = 2,
41 PERF_COUNT_CACHE_MISSES = 3,
42 PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
43 PERF_COUNT_BRANCH_MISSES = 5,
44
45 /*
46 * Special "software" counters provided by the kernel, even if
47 * the hardware does not support performance counters. These
48 * counters measure various physical and sw events of the
49 * kernel (and allow the profiling of them as well):
50 */
51 PERF_COUNT_CPU_CLOCK = -1,
52 PERF_COUNT_TASK_CLOCK = -2,
Ingo Molnarbae43c92008-12-11 14:03:20 +010053 /*
54 * Future software events:
55 */
56 /* PERF_COUNT_PAGE_FAULTS = -3,
57 PERF_COUNT_CONTEXT_SWITCHES = -4, */
Thomas Gleixner0793a612008-12-04 20:12:29 +010058};
59
60/*
61 * IRQ-notification data record type:
62 */
Ingo Molnar9f66a382008-12-10 12:33:23 +010063enum perf_counter_record_type {
64 PERF_RECORD_SIMPLE = 0,
65 PERF_RECORD_IRQ = 1,
66 PERF_RECORD_GROUP = 2,
Thomas Gleixner0793a612008-12-04 20:12:29 +010067};
68
Ingo Molnar9f66a382008-12-10 12:33:23 +010069/*
70 * Hardware event to monitor via a performance monitoring counter:
71 */
72struct perf_counter_hw_event {
Ingo Molnar01b28382008-12-11 13:45:51 +010073 s64 type;
Ingo Molnar9f66a382008-12-10 12:33:23 +010074
75 u64 irq_period;
76 u32 record_type;
77
78 u32 disabled : 1, /* off by default */
79 nmi : 1, /* NMI sampling */
80 raw : 1, /* raw event type */
81 __reserved_1 : 29;
82
83 u64 __reserved_2;
Thomas Gleixnereab656a2008-12-08 19:26:59 +010084};
85
Ingo Molnar9f66a382008-12-10 12:33:23 +010086/*
87 * Kernel-internal data types:
88 */
89
Thomas Gleixner0793a612008-12-04 20:12:29 +010090/**
Ingo Molnar9f66a382008-12-10 12:33:23 +010091 * struct hw_perf_counter - performance counter hardware details:
Thomas Gleixner0793a612008-12-04 20:12:29 +010092 */
93struct hw_perf_counter {
Ingo Molnaree060942008-12-13 09:00:03 +010094#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar9f66a382008-12-10 12:33:23 +010095 u64 config;
96 unsigned long config_base;
97 unsigned long counter_base;
98 int nmi;
99 unsigned int idx;
Ingo Molnaree060942008-12-13 09:00:03 +0100100 atomic64_t prev_count;
Ingo Molnar9f66a382008-12-10 12:33:23 +0100101 u64 irq_period;
Ingo Molnaree060942008-12-13 09:00:03 +0100102 atomic64_t period_left;
103#endif
Thomas Gleixner0793a612008-12-04 20:12:29 +0100104};
105
106/*
107 * Hardcoded buffer length limit for now, for IRQ-fed events:
108 */
Ingo Molnar9f66a382008-12-10 12:33:23 +0100109#define PERF_DATA_BUFLEN 2048
Thomas Gleixner0793a612008-12-04 20:12:29 +0100110
111/**
112 * struct perf_data - performance counter IRQ data sampling ...
113 */
114struct perf_data {
Ingo Molnar9f66a382008-12-10 12:33:23 +0100115 int len;
116 int rd_idx;
117 int overrun;
118 u8 data[PERF_DATA_BUFLEN];
Thomas Gleixner0793a612008-12-04 20:12:29 +0100119};
120
Ingo Molnar621a01e2008-12-11 12:46:46 +0100121struct perf_counter;
122
123/**
124 * struct hw_perf_counter_ops - performance counter hw ops
125 */
126struct hw_perf_counter_ops {
127 void (*hw_perf_counter_enable) (struct perf_counter *counter);
128 void (*hw_perf_counter_disable) (struct perf_counter *counter);
129 void (*hw_perf_counter_read) (struct perf_counter *counter);
130};
131
Thomas Gleixner0793a612008-12-04 20:12:29 +0100132/**
Ingo Molnar6a930702008-12-11 15:17:03 +0100133 * enum perf_counter_active_state - the states of a counter
134 */
135enum perf_counter_active_state {
136 PERF_COUNTER_STATE_OFF = -1,
137 PERF_COUNTER_STATE_INACTIVE = 0,
138 PERF_COUNTER_STATE_ACTIVE = 1,
139};
140
141/**
Thomas Gleixner0793a612008-12-04 20:12:29 +0100142 * struct perf_counter - performance counter kernel representation:
143 */
144struct perf_counter {
Ingo Molnaree060942008-12-13 09:00:03 +0100145#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar04289bb2008-12-11 08:38:42 +0100146 struct list_head list_entry;
147 struct list_head sibling_list;
148 struct perf_counter *group_leader;
Ingo Molnar5c92d122008-12-11 13:21:10 +0100149 const struct hw_perf_counter_ops *hw_ops;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100150
Ingo Molnar6a930702008-12-11 15:17:03 +0100151 enum perf_counter_active_state state;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100152 atomic64_t count;
Ingo Molnaree060942008-12-13 09:00:03 +0100153
Ingo Molnar9f66a382008-12-10 12:33:23 +0100154 struct perf_counter_hw_event hw_event;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100155 struct hw_perf_counter hw;
156
157 struct perf_counter_context *ctx;
158 struct task_struct *task;
159
160 /*
161 * Protect attach/detach:
162 */
163 struct mutex mutex;
164
165 int oncpu;
166 int cpu;
167
Thomas Gleixner0793a612008-12-04 20:12:29 +0100168 /* read() / irq related data */
169 wait_queue_head_t waitq;
170 /* optional: for NMIs */
171 int wakeup_pending;
172 struct perf_data *irqdata;
173 struct perf_data *usrdata;
174 struct perf_data data[2];
Ingo Molnaree060942008-12-13 09:00:03 +0100175#endif
Thomas Gleixner0793a612008-12-04 20:12:29 +0100176};
177
178/**
179 * struct perf_counter_context - counter context structure
180 *
181 * Used as a container for task counters and CPU counters as well:
182 */
183struct perf_counter_context {
184#ifdef CONFIG_PERF_COUNTERS
185 /*
186 * Protect the list of counters:
187 */
188 spinlock_t lock;
Ingo Molnar04289bb2008-12-11 08:38:42 +0100189
190 struct list_head counter_list;
Thomas Gleixner0793a612008-12-04 20:12:29 +0100191 int nr_counters;
192 int nr_active;
193 struct task_struct *task;
194#endif
195};
196
197/**
198 * struct perf_counter_cpu_context - per cpu counter context structure
199 */
200struct perf_cpu_context {
201 struct perf_counter_context ctx;
202 struct perf_counter_context *task_ctx;
203 int active_oncpu;
204 int max_pertask;
205};
206
207/*
208 * Set by architecture code:
209 */
210extern int perf_max_counters;
211
212#ifdef CONFIG_PERF_COUNTERS
Ingo Molnar5c92d122008-12-11 13:21:10 +0100213extern const struct hw_perf_counter_ops *
Ingo Molnar621a01e2008-12-11 12:46:46 +0100214hw_perf_counter_init(struct perf_counter *counter);
215
Thomas Gleixner0793a612008-12-04 20:12:29 +0100216extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
217extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
218extern void perf_counter_task_tick(struct task_struct *task, int cpu);
219extern void perf_counter_init_task(struct task_struct *task);
220extern void perf_counter_notify(struct pt_regs *regs);
221extern void perf_counter_print_debug(void);
Ingo Molnar01b28382008-12-11 13:45:51 +0100222extern u64 hw_perf_save_disable(void);
223extern void hw_perf_restore(u64 ctrl);
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100224extern int perf_counter_task_disable(void);
225extern int perf_counter_task_enable(void);
Ingo Molnar5c92d122008-12-11 13:21:10 +0100226
Thomas Gleixner0793a612008-12-04 20:12:29 +0100227#else
228static inline void
229perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
230static inline void
231perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
232static inline void
233perf_counter_task_tick(struct task_struct *task, int cpu) { }
234static inline void perf_counter_init_task(struct task_struct *task) { }
235static inline void perf_counter_notify(struct pt_regs *regs) { }
236static inline void perf_counter_print_debug(void) { }
Ingo Molnar01b28382008-12-11 13:45:51 +0100237static inline void hw_perf_restore(u64 ctrl) { }
238static inline u64 hw_perf_save_disable(void) { return 0; }
Ingo Molnar1d1c7dd2008-12-11 14:59:31 +0100239static inline int perf_counter_task_disable(void) { return -EINVAL; }
240static inline int perf_counter_task_enable(void) { return -EINVAL; }
Thomas Gleixner0793a612008-12-04 20:12:29 +0100241#endif
242
243#endif /* _LINUX_PERF_COUNTER_H */