blob: 0cd7824ca762575367ac136eabd5beb6538e5dd0 [file] [log] [blame]
Jamie Iles0f4f0672010-02-02 20:23:15 +01001/*
2 * linux/arch/arm/include/asm/pmu.h
3 *
4 * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#ifndef __ARM_PMU_H__
13#define __ARM_PMU_H__
14
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053015#include <linux/interrupt.h>
Mark Rutland0ce47082011-05-19 10:07:57 +010016#include <linux/perf_event.h>
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053017
Will Deaconb0e89592011-07-26 22:10:28 +010018/*
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053019 * struct arm_pmu_platdata - ARM PMU platform data
20 *
Ming Leie0516a62011-03-02 15:00:08 +080021 * @handle_irq: an optional handler which will be called from the
22 * interrupt and passed the address of the low level handler,
23 * and can be used to implement any platform specific handling
24 * before or after calling it.
Jon Hunter7be29582012-05-31 13:05:20 -050025 * @runtime_resume: an optional handler which will be called by the
26 * runtime PM framework following a call to pm_runtime_get().
27 * Note that if pm_runtime_get() is called more than once in
28 * succession this handler will only be called once.
29 * @runtime_suspend: an optional handler which will be called by the
30 * runtime PM framework following a call to pm_runtime_put().
31 * Note that if pm_runtime_get() is called more than once in
32 * succession this handler will only be called following the
33 * final call to pm_runtime_put() that actually disables the
34 * hardware.
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053035 */
36struct arm_pmu_platdata {
37 irqreturn_t (*handle_irq)(int irq, void *dev,
38 irq_handler_t pmu_handler);
Jon Hunter7be29582012-05-31 13:05:20 -050039 int (*runtime_resume)(struct device *dev);
40 int (*runtime_suspend)(struct device *dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +053041};
42
Mark Rutland0ce47082011-05-19 10:07:57 +010043#ifdef CONFIG_HW_PERF_EVENTS
44
45/* The events for a given PMU register set. */
46struct pmu_hw_events {
47 /*
48 * The events that are active on the PMU for the given index.
49 */
50 struct perf_event **events;
51
52 /*
53 * A 1 bit for an index indicates that the counter is being used for
54 * an event. A 0 means that the counter can be used.
55 */
56 unsigned long *used_mask;
57
58 /*
59 * Hardware lock to serialize accesses to PMU registers. Needed for the
60 * read/modify/write sequences.
61 */
62 raw_spinlock_t pmu_lock;
63};
64
Sudeep KarkadaNagesha24432282012-09-25 18:40:12 +010065struct cpupmu_regs {
66 u32 pmc;
67 u32 pmcntenset;
68 u32 pmuseren;
69 u32 pmintenset;
70 u32 pmxevttype[8];
71 u32 pmxevtcnt[8];
72};
73
Mark Rutland0ce47082011-05-19 10:07:57 +010074struct arm_pmu {
75 struct pmu pmu;
Mark Rutland0ce47082011-05-19 10:07:57 +010076 cpumask_t active_irqs;
Sudeep KarkadaNagesha77eae072012-09-25 17:26:51 +010077 cpumask_t valid_cpus;
Will Deacon4295b892012-07-06 15:45:00 +010078 char *name;
Mark Rutland0ce47082011-05-19 10:07:57 +010079 irqreturn_t (*handle_irq)(int irq_num, void *dev);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010080 void (*enable)(struct perf_event *event);
81 void (*disable)(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010082 int (*get_event_idx)(struct pmu_hw_events *hw_events,
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010083 struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +010084 int (*set_event_filter)(struct hw_perf_event *evt,
85 struct perf_event_attr *attr);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010086 u32 (*read_counter)(struct perf_event *event);
87 void (*write_counter)(struct perf_event *event, u32 val);
88 void (*start)(struct arm_pmu *);
89 void (*stop)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +010090 void (*reset)(void *);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +010091 int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
92 void (*free_irq)(struct arm_pmu *);
Mark Rutland0ce47082011-05-19 10:07:57 +010093 int (*map_event)(struct perf_event *event);
Sudeep KarkadaNagesha24432282012-09-25 18:40:12 +010094 void (*save_regs)(struct arm_pmu *, struct cpupmu_regs *);
95 void (*restore_regs)(struct arm_pmu *, struct cpupmu_regs *);
Mark Rutland0ce47082011-05-19 10:07:57 +010096 int num_events;
97 atomic_t active_events;
98 struct mutex reserve_mutex;
99 u64 max_period;
100 struct platform_device *plat_device;
101 struct pmu_hw_events *(*get_hw_events)(void);
102};
103
104#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
105
Will Deacon6dbc0022012-07-29 12:36:28 +0100106extern const struct dev_pm_ops armpmu_dev_pm_ops;
107
Will Deacon03052302012-09-21 14:23:47 +0100108int armpmu_register(struct arm_pmu *armpmu, int type);
Mark Rutland0ce47082011-05-19 10:07:57 +0100109
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100110u64 armpmu_event_update(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100111
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100112int armpmu_event_set_period(struct perf_event *event);
Mark Rutland0ce47082011-05-19 10:07:57 +0100113
Will Deacon6dbc0022012-07-29 12:36:28 +0100114int armpmu_map_event(struct perf_event *event,
115 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
116 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
117 [PERF_COUNT_HW_CACHE_OP_MAX]
118 [PERF_COUNT_HW_CACHE_RESULT_MAX],
119 u32 raw_event_mask);
120
Mark Rutland0ce47082011-05-19 10:07:57 +0100121#endif /* CONFIG_HW_PERF_EVENTS */
122
Jamie Iles0f4f0672010-02-02 20:23:15 +0100123#endif /* __ARM_PMU_H__ */