blob: f24edad26c70fd34fb9182179ae87618bbb72da8 [file] [log] [blame]
Jamie Iles0f4f0672010-02-02 20:23:15 +01001/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053015#include <linux/interrupt.h>
Mark Rutland0ce47082011-05-19 10:07:57 +010016#include <linux/perf_event.h>
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053017
Will Deaconb0e89592011-07-26 22:10:28 +010018/*
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053019 * struct arm_pmu_platdata - ARM PMU platform data
20 *
Ming Leie0516a62011-03-02 15:00:08 +080021 * @handle_irq: an optional handler which will be called from the
22 * interrupt and passed the address of the low level handler,
23 * and can be used to implement any platform specific handling
24 * before or after calling it.
Jon Hunter7be29582012-05-31 13:05:20 -050025 * @runtime_resume: an optional handler which will be called by the
26 * runtime PM framework following a call to pm_runtime_get().
27 * Note that if pm_runtime_get() is called more than once in
28 * succession this handler will only be called once.
29 * @runtime_suspend: an optional handler which will be called by the
30 * runtime PM framework following a call to pm_runtime_put().
31 * Note that if pm_runtime_get() is called more than once in
32 * succession this handler will only be called following the
33 * final call to pm_runtime_put() that actually disables the
34 * hardware.
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053035 */
36struct arm_pmu_platdata {
37 irqreturn_t (*handle_irq)(int irq, void *dev,
38 irq_handler_t pmu_handler);
Jon Hunter7be29582012-05-31 13:05:20 -050039 int (*runtime_resume)(struct device *dev);
40 int (*runtime_suspend)(struct device *dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053041};
42
Mark Rutland0ce47082011-05-19 10:07:57 +010043#ifdef CONFIG_HW_PERF_EVENTS
44
45/* The events for a given PMU register set. */
46struct pmu_hw_events {
47 /*
48 * The events that are active on the PMU for the given index.
49 */
50 struct perf_event **events;
51
52 /*
53 * A 1 bit for an index indicates that the counter is being used for
54 * an event. A 0 means that the counter can be used.
55 */
56 unsigned long *used_mask;
57
58 /*
59 * Hardware lock to serialize accesses to PMU registers. Needed for the
60 * read/modify/write sequences.
61 */
62 raw_spinlock_t pmu_lock;
63};
64
65struct arm_pmu {
66 struct pmu pmu;
Mark Rutland0ce47082011-05-19 10:07:57 +010067 cpumask_t active_irqs;
Will Deacon4295b892012-07-06 15:45:00 +010068 char *name;
Mark Rutland0ce47082011-05-19 10:07:57 +010069 irqreturn_t (*handle_irq)(int irq_num, void *dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010070 void (*enable)(struct perf_event *event);
71 void (*disable)(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010072 int (*get_event_idx)(struct pmu_hw_events *hw_events,
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010073 struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010074 int (*set_event_filter)(struct hw_perf_event *evt,
75 struct perf_event_attr *attr);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010076 u32 (*read_counter)(struct perf_event *event);
77 void (*write_counter)(struct perf_event *event, u32 val);
78 void (*start)(struct arm_pmu *);
79 void (*stop)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +010080 void (*reset)(void *);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010081 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
82 void (*free_irq)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +010083 int (*map_event)(struct perf_event *event);
84 int num_events;
85 atomic_t active_events;
86 struct mutex reserve_mutex;
87 u64 max_period;
88 struct platform_device *plat_device;
89 struct pmu_hw_events *(*get_hw_events)(void);
90};
91
92#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
93
Will Deacon6dbc0022012-07-29 12:36:28 +010094extern const struct dev_pm_ops armpmu_dev_pm_ops;
95
Will Deacon03052302012-09-21 14:23:47 +010096int armpmu_register(struct arm_pmu *armpmu, int type);
Mark Rutland0ce47082011-05-19 10:07:57 +010097
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010098u64 armpmu_event_update(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010099
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100100int armpmu_event_set_period(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100101
Will Deacon6dbc0022012-07-29 12:36:28 +0100102int armpmu_map_event(struct perf_event *event,
103 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
104 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
105 [PERF_COUNT_HW_CACHE_OP_MAX]
106 [PERF_COUNT_HW_CACHE_RESULT_MAX],
107 u32 raw_event_mask);
108
Mark Rutland0ce47082011-05-19 10:07:57 +0100109#endif /* CONFIG_HW_PERF_EVENTS */
110
Jamie Iles0f4f0672010-02-02 20:23:15 +0100111#endif /* __ARM_PMU_H__ */