Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/include/asm/pmu.h |
| 3 | * |
| 4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | */ |
| 11 | |
| 12 | #ifndef __ARM_PMU_H__ |
| 13 | #define __ARM_PMU_H__ |
| 14 | |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 15 | #include <linux/interrupt.h> |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 16 | #include <linux/perf_event.h> |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 17 | |
Will Deacon | b0e8959 | 2011-07-26 22:10:28 +0100 | [diff] [blame] | 18 | /* |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 19 | * struct arm_pmu_platdata - ARM PMU platform data |
| 20 | * |
Ming Lei | e0516a6 | 2011-03-02 15:00:08 +0800 | [diff] [blame] | 21 | * @handle_irq: an optional handler which will be called from the |
| 22 | * interrupt and passed the address of the low level handler, |
| 23 | * and can be used to implement any platform specific handling |
| 24 | * before or after calling it. |
Jon Hunter | 7be2958 | 2012-05-31 13:05:20 -0500 | [diff] [blame] | 25 | * @runtime_resume: an optional handler which will be called by the |
| 26 | * runtime PM framework following a call to pm_runtime_get(). |
| 27 | * Note that if pm_runtime_get() is called more than once in |
| 28 | * succession this handler will only be called once. |
| 29 | * @runtime_suspend: an optional handler which will be called by the |
| 30 | * runtime PM framework following a call to pm_runtime_put(). |
| 31 | * Note that if pm_runtime_get() is called more than once in |
| 32 | * succession this handler will only be called following the |
| 33 | * final call to pm_runtime_put() that actually disables the |
| 34 | * hardware. |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 35 | */ |
| 36 | struct arm_pmu_platdata { |
| 37 | irqreturn_t (*handle_irq)(int irq, void *dev, |
| 38 | irq_handler_t pmu_handler); |
Jon Hunter | 7be2958 | 2012-05-31 13:05:20 -0500 | [diff] [blame] | 39 | int (*runtime_resume)(struct device *dev); |
| 40 | int (*runtime_suspend)(struct device *dev); |
Rabin Vincent | 0e25a5c | 2011-02-08 09:24:36 +0530 | [diff] [blame] | 41 | }; |
| 42 | |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 43 | #ifdef CONFIG_HW_PERF_EVENTS |
| 44 | |
| 45 | /* The events for a given PMU register set. */ |
| 46 | struct pmu_hw_events { |
| 47 | /* |
| 48 | * The events that are active on the PMU for the given index. |
| 49 | */ |
| 50 | struct perf_event **events; |
| 51 | |
| 52 | /* |
| 53 | * A 1 bit for an index indicates that the counter is being used for |
| 54 | * an event. A 0 means that the counter can be used. |
| 55 | */ |
| 56 | unsigned long *used_mask; |
| 57 | |
| 58 | /* |
| 59 | * Hardware lock to serialize accesses to PMU registers. Needed for the |
| 60 | * read/modify/write sequences. |
| 61 | */ |
| 62 | raw_spinlock_t pmu_lock; |
| 63 | }; |
| 64 | |
| 65 | struct arm_pmu { |
| 66 | struct pmu pmu; |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 67 | cpumask_t active_irqs; |
Will Deacon | 4295b89 | 2012-07-06 15:45:00 +0100 | [diff] [blame] | 68 | char *name; |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 69 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 70 | void (*enable)(struct perf_event *event); |
| 71 | void (*disable)(struct perf_event *event); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 72 | int (*get_event_idx)(struct pmu_hw_events *hw_events, |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 73 | struct perf_event *event); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 74 | int (*set_event_filter)(struct hw_perf_event *evt, |
| 75 | struct perf_event_attr *attr); |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 76 | u32 (*read_counter)(struct perf_event *event); |
| 77 | void (*write_counter)(struct perf_event *event, u32 val); |
| 78 | void (*start)(struct arm_pmu *); |
| 79 | void (*stop)(struct arm_pmu *); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 80 | void (*reset)(void *); |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 81 | int (*request_irq)(struct arm_pmu *, irq_handler_t handler); |
| 82 | void (*free_irq)(struct arm_pmu *); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 83 | int (*map_event)(struct perf_event *event); |
| 84 | int num_events; |
| 85 | atomic_t active_events; |
| 86 | struct mutex reserve_mutex; |
| 87 | u64 max_period; |
| 88 | struct platform_device *plat_device; |
| 89 | struct pmu_hw_events *(*get_hw_events)(void); |
| 90 | }; |
| 91 | |
| 92 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) |
| 93 | |
Will Deacon | 6dbc002 | 2012-07-29 12:36:28 +0100 | [diff] [blame] | 94 | extern const struct dev_pm_ops armpmu_dev_pm_ops; |
| 95 | |
Will Deacon | 0305230 | 2012-09-21 14:23:47 +0100 | [diff] [blame^] | 96 | int armpmu_register(struct arm_pmu *armpmu, int type); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 97 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 98 | u64 armpmu_event_update(struct perf_event *event); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 99 | |
Sudeep KarkadaNagesha | ed6f2a5 | 2012-07-30 12:00:02 +0100 | [diff] [blame] | 100 | int armpmu_event_set_period(struct perf_event *event); |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 101 | |
Will Deacon | 6dbc002 | 2012-07-29 12:36:28 +0100 | [diff] [blame] | 102 | int armpmu_map_event(struct perf_event *event, |
| 103 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], |
| 104 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] |
| 105 | [PERF_COUNT_HW_CACHE_OP_MAX] |
| 106 | [PERF_COUNT_HW_CACHE_RESULT_MAX], |
| 107 | u32 raw_event_mask); |
| 108 | |
Mark Rutland | 0ce4708 | 2011-05-19 10:07:57 +0100 | [diff] [blame] | 109 | #endif /* CONFIG_HW_PERF_EVENTS */ |
| 110 | |
Jamie Iles | 0f4f067 | 2010-02-02 20:23:15 +0100 | [diff] [blame] | 111 | #endif /* __ARM_PMU_H__ */ |