aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/core-book3s.c73
-rw-r--r--arch/powerpc/perf/mpc7450-pmu.c5
-rw-r--r--arch/powerpc/perf/power4-pmu.c2
-rw-r--r--arch/powerpc/perf/power5+-pmu.c2
-rw-r--r--arch/powerpc/perf/power5-pmu.c2
-rw-r--r--arch/powerpc/perf/power6-pmu.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c2
-rw-r--r--arch/powerpc/perf/power8-pmu.c27
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c2
9 files changed, 82 insertions, 35 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fe52db2eea6a..b7cd00b0171e 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -36,7 +36,12 @@ struct cpu_hw_events {
struct perf_event *event[MAX_HWEVENTS];
u64 events[MAX_HWEVENTS];
unsigned int flags[MAX_HWEVENTS];
- unsigned long mmcr[3];
+ /*
+ * The order of the MMCR array is:
+ * - 64-bit, MMCR0, MMCR1, MMCRA, MMCR2
+ * - 32-bit, MMCR0, MMCR1, MMCR2
+ */
+ unsigned long mmcr[4];
struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
@@ -112,9 +117,9 @@ static bool is_ebb_event(struct perf_event *event) { return false; }
static int ebb_event_check(struct perf_event *event) { return 0; }
static void ebb_event_add(struct perf_event *event) { }
static void ebb_switch_out(unsigned long mmcr0) { }
-static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
+static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
- return mmcr0;
+ return cpuhw->mmcr[0];
}
static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
@@ -542,8 +547,10 @@ static void ebb_switch_out(unsigned long mmcr0)
current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
}
-static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
+static unsigned long ebb_switch_in(bool ebb, struct cpu_hw_events *cpuhw)
{
+ unsigned long mmcr0 = cpuhw->mmcr[0];
+
if (!ebb)
goto out;
@@ -568,7 +575,15 @@ static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
mtspr(SPRN_SIAR, current->thread.siar);
mtspr(SPRN_SIER, current->thread.sier);
mtspr(SPRN_SDAR, current->thread.sdar);
- mtspr(SPRN_MMCR2, current->thread.mmcr2);
+
+ /*
+ * Merge the kernel & user values of MMCR2. The semantics we implement
+ * are that the user MMCR2 can set bits, ie. cause counters to freeze,
+ * but not clear bits. If a task wants to be able to clear bits, ie.
+ * unfreeze counters, it should not set exclude_xxx in its events and
+ * instead manage the MMCR2 entirely by itself.
+ */
+ mtspr(SPRN_MMCR2, cpuhw->mmcr[3] | current->thread.mmcr2);
out:
return mmcr0;
}
@@ -915,6 +930,14 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
int i, n, first;
struct perf_event *event;
+ /*
+ * If the PMU we're on supports per event exclude settings then we
+ * don't need to do any of this logic. NB. This assumes no PMU has both
+ * per event exclude and limited PMCs.
+ */
+ if (ppmu->flags & PPMU_ARCH_207S)
+ return 0;
+
n = n_prev + n_new;
if (n <= 1)
return 0;
@@ -1219,28 +1242,31 @@ static void power_pmu_enable(struct pmu *pmu)
}
/*
- * Compute MMCR* values for the new set of events
+ * Clear all MMCR settings and recompute them for the new set of events.
*/
+ memset(cpuhw->mmcr, 0, sizeof(cpuhw->mmcr));
+
if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
- cpuhw->mmcr)) {
+ cpuhw->mmcr, cpuhw->event)) {
/* shouldn't ever get here */
printk(KERN_ERR "oops compute_mmcr failed\n");
goto out;
}
- /*
- * Add in MMCR0 freeze bits corresponding to the
- * attr.exclude_* bits for the first event.
- * We have already checked that all events have the
- * same values for these bits as the first event.
- */
- event = cpuhw->event[0];
- if (event->attr.exclude_user)
- cpuhw->mmcr[0] |= MMCR0_FCP;
- if (event->attr.exclude_kernel)
- cpuhw->mmcr[0] |= freeze_events_kernel;
- if (event->attr.exclude_hv)
- cpuhw->mmcr[0] |= MMCR0_FCHV;
+ if (!(ppmu->flags & PPMU_ARCH_207S)) {
+ /*
+ * Add in MMCR0 freeze bits corresponding to the attr.exclude_*
+ * bits for the first event. We have already checked that all
+ * events have the same value for these bits as the first event.
+ */
+ event = cpuhw->event[0];
+ if (event->attr.exclude_user)
+ cpuhw->mmcr[0] |= MMCR0_FCP;
+ if (event->attr.exclude_kernel)
+ cpuhw->mmcr[0] |= freeze_events_kernel;
+ if (event->attr.exclude_hv)
+ cpuhw->mmcr[0] |= MMCR0_FCHV;
+ }
/*
* Write the new configuration to MMCR* with the freeze
@@ -1252,6 +1278,8 @@ static void power_pmu_enable(struct pmu *pmu)
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
| MMCR0_FC);
+ if (ppmu->flags & PPMU_ARCH_207S)
+ mtspr(SPRN_MMCR2, cpuhw->mmcr[3]);
/*
* Read off any pre-existing events that need to move
@@ -1307,10 +1335,7 @@ static void power_pmu_enable(struct pmu *pmu)
out_enable:
pmao_restore_workaround(ebb);
- if (ppmu->flags & PPMU_ARCH_207S)
- mtspr(SPRN_MMCR2, 0);
-
- mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
+ mmcr0 = ebb_switch_in(ebb, cpuhw);
mb();
if (cpuhw->bhrb_users)
diff --git a/arch/powerpc/perf/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
index fe21b515ca44..d115c5635bf3 100644
--- a/arch/powerpc/perf/mpc7450-pmu.c
+++ b/arch/powerpc/perf/mpc7450-pmu.c
@@ -260,8 +260,9 @@ static const u32 pmcsel_mask[N_COUNTER] = {
/*
* Compute MMCR0/1/2 values for a set of events.
*/
-static int mpc7450_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+static int mpc7450_compute_mmcr(u64 event[], int n_ev, unsigned int hwc[],
+ unsigned long mmcr[],
+ struct perf_event *pevents[])
{
u8 event_index[N_CLASSES][N_COUNTER];
int n_classevent[N_CLASSES];
diff --git a/arch/powerpc/perf/power4-pmu.c b/arch/powerpc/perf/power4-pmu.c
index 9103a1de864d..ce6072fa481b 100644
--- a/arch/powerpc/perf/power4-pmu.c
+++ b/arch/powerpc/perf/power4-pmu.c
@@ -356,7 +356,7 @@ static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[])
}
static int p4_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+ unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
{
unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel, lower;
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index b03b6dc0172d..0526dac66007 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -452,7 +452,7 @@ static int power5p_marked_instr_event(u64 event)
}
static int power5p_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+ unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
{
unsigned long mmcr1 = 0;
unsigned long mmcra = 0;
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index 1e8ce423c3af..4dc99f9f7962 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -383,7 +383,7 @@ static int power5_marked_instr_event(u64 event)
}
static int power5_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+ unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 31128e086fed..9c9d646b68a1 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -175,7 +175,7 @@ static int power6_marked_instr_event(u64 event)
* Assign PMC numbers and compute MMCR1 value for a set of events
*/
static int p6_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+ unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 56c67bca2f75..5b62f2389290 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -245,7 +245,7 @@ static int power7_marked_instr_event(u64 event)
}
static int power7_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+ unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
{
unsigned long mmcr1 = 0;
unsigned long mmcra = MMCRA_SDAR_DCACHE_MISS | MMCRA_SDAR_ERAT_MISS;
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 639cd9156585..396351db601b 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -15,6 +15,7 @@
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <asm/firmware.h>
+#include <asm/cputable.h>
/*
@@ -266,6 +267,11 @@
#define MMCRA_SDAR_MODE_TLB (1ull << 42)
#define MMCRA_IFM_SHIFT 30
+/* Bits in MMCR2 for POWER8 */
+#define MMCR2_FCS(pmc) (1ull << (63 - (((pmc) - 1) * 9)))
+#define MMCR2_FCP(pmc) (1ull << (62 - (((pmc) - 1) * 9)))
+#define MMCR2_FCH(pmc) (1ull << (57 - (((pmc) - 1) * 9)))
+
static inline bool event_is_fab_match(u64 event)
{
@@ -393,9 +399,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
}
static int power8_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+ unsigned int hwc[], unsigned long mmcr[],
+ struct perf_event *pevents[])
{
- unsigned long mmcra, mmcr1, unit, combine, psel, cache, val;
+ unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
unsigned int pmc, pmc_inuse;
int i;
@@ -410,7 +417,7 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
/* In continous sampling mode, update SDAR on TLB miss */
mmcra = MMCRA_SDAR_MODE_TLB;
- mmcr1 = 0;
+ mmcr1 = mmcr2 = 0;
/* Second pass: assign PMCs, set all MMCR1 fields */
for (i = 0; i < n_ev; ++i) {
@@ -472,6 +479,19 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
mmcra |= val << MMCRA_IFM_SHIFT;
}
+ if (pevents[i]->attr.exclude_user)
+ mmcr2 |= MMCR2_FCP(pmc);
+
+ if (pevents[i]->attr.exclude_hv)
+ mmcr2 |= MMCR2_FCH(pmc);
+
+ if (pevents[i]->attr.exclude_kernel) {
+ if (cpu_has_feature(CPU_FTR_HVMODE))
+ mmcr2 |= MMCR2_FCH(pmc);
+ else
+ mmcr2 |= MMCR2_FCS(pmc);
+ }
+
hwc[i] = pmc - 1;
}
@@ -491,6 +511,7 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
mmcr[1] = mmcr1;
mmcr[2] = mmcra;
+ mmcr[3] = mmcr2;
return 0;
}
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 20139ceeacf6..8b6a8a36fa38 100644
--- a/arch/powerpc/perf/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -257,7 +257,7 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
}
static int p970_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
+ unsigned int hwc[], unsigned long mmcr[], struct perf_event *pevents[])
{
unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel;