aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile10
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S3
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S49
-rw-r--r--arch/powerpc/kernel/dma.c6
-rw-r--r--arch/powerpc/kernel/entry_32.S127
-rw-r--r--arch/powerpc/kernel/ftrace.c2
-rw-r--r--arch/powerpc/kernel/head_32.S17
-rw-r--r--arch/powerpc/kernel/mpc7450-pmu.c417
-rw-r--r--arch/powerpc/kernel/of_device.c2
-rw-r--r--arch/powerpc/kernel/perf_counter.c265
-rw-r--r--arch/powerpc/kernel/power4-pmu.c90
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c96
-rw-r--r--arch/powerpc/kernel/power5-pmu.c99
-rw-r--r--arch/powerpc/kernel/power6-pmu.c73
-rw-r--r--arch/powerpc/kernel/power7-pmu.c69
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c63
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c40
-rw-r--r--arch/powerpc/kernel/ptrace.c17
-rw-r--r--arch/powerpc/kernel/ptrace32.c1
-rw-r--r--arch/powerpc/kernel/rtas.c69
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/smp.c3
-rw-r--r--arch/powerpc/kernel/time.c25
-rw-r--r--arch/powerpc/kernel/udbg_16550.c2
-rw-r--r--arch/powerpc/kernel/vector.S6
26 files changed, 1209 insertions, 346 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 612b0c4dc26..b73396b9390 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -4,6 +4,8 @@
CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
+
ifeq ($(CONFIG_PPC64),y)
CFLAGS_prom_init.o += -mno-minimal-toc
endif
@@ -95,9 +97,10 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \
- power5-pmu.o power5+-pmu.o power6-pmu.o \
- power7-pmu.o
+obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
+obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
+ power5+-pmu.o power6-pmu.o power7-pmu.o
+obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
@@ -106,6 +109,7 @@ obj-y += iomap.o
endif
obj-$(CONFIG_PPC64) += $(obj64-y)
+obj-$(CONFIG_PPC32) += $(obj32-y)
ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),)
obj-y += ppc_save_regs.o
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 54f767e31a1..1e9949e6885 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -239,6 +239,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
ori r11,r11,HID0_SGE | HID0_FOLD | HID0_BHTE
ori r11,r11,HID0_LRSTK | HID0_BTIC
oris r11,r11,HID0_DPM@h
+BEGIN_MMU_FTR_SECTION
+ oris r11,r11,HID0_HIGH_BAT@h
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
BEGIN_FTR_SECTION
xori r11,r11,HID0_BTIC
END_FTR_SECTION_IFSET(CPU_FTR_NO_BTIC)
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index eb4b9adcedb..0adb50ad803 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -17,6 +17,40 @@
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
+_GLOBAL(__e500_icache_setup)
+ mfspr r0, SPRN_L1CSR1
+ andi. r3, r0, L1CSR1_ICE
+ bnelr /* Already enabled */
+ oris r0, r0, L1CSR1_CPE@h
+ ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE)
+ mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */
+ isync
+ blr
+
+_GLOBAL(__e500_dcache_setup)
+ mfspr r0, SPRN_L1CSR0
+ andi. r3, r0, L1CSR0_DCE
+ bnelr /* Already enabled */
+ msync
+ isync
+ li r0, 0
+ mtspr SPRN_L1CSR0, r0 /* Disable */
+ msync
+ isync
+ li r0, (L1CSR0_DCFI | L1CSR0_CLFC)
+ mtspr SPRN_L1CSR0, r0 /* Invalidate */
+ isync
+1: mfspr r0, SPRN_L1CSR0
+ andi. r3, r0, L1CSR0_CLFC
+ bne+ 1b /* Wait for lock bits reset */
+ oris r0, r0, L1CSR0_CPE@h
+ ori r0, r0, L1CSR0_DCE
+ msync
+ isync
+ mtspr SPRN_L1CSR0, r0 /* Enable */
+ isync
+ blr
+
_GLOBAL(__setup_cpu_e200)
/* enable dedicated debug exception handling resources (Debug APU) */
mfspr r3,SPRN_HID0
@@ -25,7 +59,16 @@ _GLOBAL(__setup_cpu_e200)
b __setup_e200_ivors
_GLOBAL(__setup_cpu_e500v1)
_GLOBAL(__setup_cpu_e500v2)
- b __setup_e500_ivors
+ mflr r4
+ bl __e500_icache_setup
+ bl __e500_dcache_setup
+ bl __setup_e500_ivors
+ mtlr r4
+ blr
_GLOBAL(__setup_cpu_e500mc)
- b __setup_e500mc_ivors
-
+ mflr r4
+ bl __e500_icache_setup
+ bl __e500_dcache_setup
+ bl __setup_e500mc_ivors
+ mtlr r4
+ blr
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 20a60d661ba..ccf129d47d8 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -7,6 +7,7 @@
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/lmb.h>
#include <asm/bug.h>
#include <asm/abs_addr.h>
@@ -90,11 +91,10 @@ static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
static int dma_direct_dma_supported(struct device *dev, u64 mask)
{
#ifdef CONFIG_PPC64
- /* Could be improved to check for memory though it better be
- * done via some global so platforms can set the limit in case
+ /* Could be improved so platforms can set the limit in case
* they have limited DMA windows
*/
- return mask >= DMA_BIT_MASK(32);
+ return mask >= (lmb_end_of_DRAM() - 1);
#else
return 1;
#endif
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4dd38f12915..3cadba60a4b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -191,11 +191,49 @@ transfer_to_handler_cont:
mflr r9
lwz r11,0(r9) /* virtual address of handler */
lwz r9,4(r9) /* where to go when done */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ lis r12,reenable_mmu@h
+ ori r12,r12,reenable_mmu@l
+ mtspr SPRN_SRR0,r12
+ mtspr SPRN_SRR1,r10
+ SYNC
+ RFI
+reenable_mmu: /* re-enable mmu so we can */
+ mfmsr r10
+ lwz r12,_MSR(r1)
+ xor r10,r10,r12
+ andi. r10,r10,MSR_EE /* Did EE change? */
+ beq 1f
+
+ /* Save handler and return address into the 2 unused words
+ * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
+ * else can be recovered from the pt_regs except r3 which for
+ * normal interrupts has been set to pt_regs and for syscalls
+ * is an argument, so we temporarily use ORIG_GPR3 to save it
+ */
+ stw r9,8(r1)
+ stw r11,12(r1)
+ stw r3,ORIG_GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r0,GPR0(r1)
+ lwz r3,ORIG_GPR3(r1)
+ lwz r4,GPR4(r1)
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ lwz r9,8(r1)
+ lwz r11,12(r1)
+1: mtctr r11
+ mtlr r9
+ bctr /* jump to handler */
+#else /* CONFIG_TRACE_IRQFLAGS */
mtspr SPRN_SRR0,r11
mtspr SPRN_SRR1,r10
mtlr r9
SYNC
RFI /* jump to handler, enable MMU */
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined (CONFIG_6xx) || defined(CONFIG_E500)
4: rlwinm r12,r12,0,~_TLF_NAPPING
@@ -251,6 +289,31 @@ _GLOBAL(DoSyscall)
#ifdef SHOW_SYSCALLS
bl do_show_syscall
#endif /* SHOW_SYSCALLS */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Return from syscalls can (and generally will) hard enable
+ * interrupts. You aren't supposed to call a syscall with
+ * interrupts disabled in the first place. However, to ensure
+ * that we get it right vs. lockdep if it happens, we force
+ * that hard enable here with appropriate tracing if we see
+ * that we have been called with interrupts off
+ */
+ mfmsr r11
+ andi. r12,r11,MSR_EE
+ bne+ 1f
+ /* We came in with interrupts disabled, we enable them now */
+ bl trace_hardirqs_on
+ mfmsr r11
+ lwz r0,GPR0(r1)
+ lwz r3,GPR3(r1)
+ lwz r4,GPR4(r1)
+ ori r11,r11,MSR_EE
+ lwz r5,GPR5(r1)
+ lwz r6,GPR6(r1)
+ lwz r7,GPR7(r1)
+ lwz r8,GPR8(r1)
+ mtmsr r11
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
lwz r11,TI_FLAGS(r10)
andi. r11,r11,_TIF_SYSCALL_T_OR_A
@@ -275,6 +338,7 @@ ret_from_syscall:
rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
/* disable interrupts so current_thread_info()->flags can't change */
LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
+ /* Note: We don't bother telling lockdep about it */
SYNC
MTMSRD(r10)
lwz r9,TI_FLAGS(r12)
@@ -288,6 +352,19 @@ ret_from_syscall:
oris r11,r11,0x1000 /* Set SO bit in CR */
stw r11,_CCR(r1)
syscall_exit_cont:
+ lwz r8,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* If we are going to return from the syscall with interrupts
+ * off, we trace that here. It shouldn't happen though but we
+ * want to catch the bugger if it does right ?
+ */
+ andi. r10,r8,MSR_EE
+ bne+ 1f
+ stw r3,GPR3(r1)
+ bl trace_hardirqs_off
+ lwz r3,GPR3(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
/* If the process has its own DBCR0 value, load it up. The internal
debug mode bit tells us that dbcr0 should be loaded. */
@@ -311,7 +388,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
mtlr r4
mtcr r5
lwz r7,_NIP(r1)
- lwz r8,_MSR(r1)
FIX_SRR1(r8, r0)
lwz r2,GPR2(r1)
lwz r1,GPR1(r1)
@@ -394,7 +470,9 @@ syscall_exit_work:
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq ret_from_except
- /* Re-enable interrupts */
+ /* Re-enable interrupts. There is no need to trace that with
+ * lockdep as we are supposed to have IRQs on at this point
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10)
@@ -705,6 +783,7 @@ ret_from_except:
/* Hard-disable interrupts so that current_thread_info()->flags
* can't change between when we test it and when we return
* from the interrupt. */
+ /* Note: We don't bother telling lockdep about it */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC /* Some chip revs have problems here... */
MTMSRD(r10) /* disable interrupts */
@@ -744,11 +823,24 @@ resume_kernel:
beq+ restore
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep thinks irqs are enabled, we need to call
+ * preempt_schedule_irq with IRQs off, so we inform lockdep
+ * now that we -did- turn them off already
+ */
+ bl trace_hardirqs_off
+#endif
1: bl preempt_schedule_irq
rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
lwz r3,TI_FLAGS(r9)
andi. r0,r3,_TIF_NEED_RESCHED
bne- 1b
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* And now, to properly rebalance the above, we tell lockdep they
+ * are being turned back on, which will happen when we return
+ */
+ bl trace_hardirqs_on
+#endif
#else
resume_kernel:
#endif /* CONFIG_PREEMPT */
@@ -765,6 +857,28 @@ restore:
stw r6,icache_44x_need_flush@l(r4)
1:
#endif /* CONFIG_44x */
+
+ lwz r9,_MSR(r1)
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /* Lockdep doesn't know about the fact that IRQs are temporarily turned
+ * off in this assembly code while peeking at TI_FLAGS() and such. However
+ * we need to inform it if the exception turned interrupts off, and we
+ * are about to trun them back on.
+ *
+ * The problem here sadly is that we don't know whether the exceptions was
+ * one that turned interrupts off or not. So we always tell lockdep about
+ * turning them on here when we go back to wherever we came from with EE
+ * on, even if that may meen some redudant calls being tracked. Maybe later
+ * we could encode what the exception did somewhere or test the exception
+ * type in the pt_regs but that sounds overkill
+ */
+ andi. r10,r9,MSR_EE
+ beq 1f
+ bl trace_hardirqs_on
+ lwz r9,_MSR(r1)
+1:
+#endif /* CONFIG_TRACE_IRQFLAGS */
+
lwz r0,GPR0(r1)
lwz r2,GPR2(r1)
REST_4GPRS(3, r1)
@@ -782,7 +896,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
stwcx. r0,0,r1 /* to clear the reservation */
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
- lwz r9,_MSR(r1)
andi. r10,r9,MSR_RI /* check if this exception occurred */
beql nonrecoverable /* at a bad place (MSR:RI = 0) */
@@ -805,7 +918,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
MTMSRD(r10) /* clear the RI bit */
.globl exc_exit_restart
exc_exit_restart:
- lwz r9,_MSR(r1)
lwz r12,_NIP(r1)
FIX_SRR1(r9,r10)
mtspr SPRN_SRR0,r12
@@ -1035,11 +1147,18 @@ do_work: /* r10 contains MSR_KERNEL here */
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
+ /* Note: We don't need to inform lockdep that we are enabling
+ * interrupts here. As far as it knows, they are already enabled
+ */
ori r10,r10,MSR_EE
SYNC
MTMSRD(r10) /* hard-enable interrupts */
bl schedule
recheck:
+ /* Note: And we don't tell it we are disabling them again
+ * neither. Those disable/enable cycles used to peek at
+ * TI_FLAGS aren't advertised.
+ */
LOAD_MSR_KERNEL(r10,MSR_KERNEL)
SYNC
MTMSRD(r10) /* disable interrupts */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 1b12696cca0..ce1f3e44c24 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -586,7 +586,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return;
}
- if (ftrace_push_return_trace(old, self_addr, &trace.depth) == -EBUSY) {
+ if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) {
*parent = old;
return;
}
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 48469463f89..fc213294275 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -1124,9 +1124,8 @@ mmu_off:
RFI
/*
- * Use the first pair of BAT registers to map the 1st 16MB
- * of RAM to PAGE_OFFSET. From this point on we can't safely
- * call OF any more.
+ * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
+ * (we keep one for debugging) and on others, we use one 256M BAT.
*/
initial_bats:
lis r11,PAGE_OFFSET@h
@@ -1136,12 +1135,16 @@ initial_bats:
bne 4f
ori r11,r11,4 /* set up BAT registers for 601 */
li r8,0x7f /* valid, block length = 8MB */
- oris r9,r11,0x800000@h /* set up BAT reg for 2nd 8M */
- oris r10,r8,0x800000@h /* set up BAT reg for 2nd 8M */
mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
mtspr SPRN_IBAT0L,r8 /* lower BAT register */
- mtspr SPRN_IBAT1U,r9
- mtspr SPRN_IBAT1L,r10
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT1U,r11
+ mtspr SPRN_IBAT1L,r8
+ addis r11,r11,0x800000@h
+ addis r8,r8,0x800000@h
+ mtspr SPRN_IBAT2U,r11
+ mtspr SPRN_IBAT2L,r8
isync
blr
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c
new file mode 100644
index 00000000000..cc466d039af
--- /dev/null
+++ b/arch/powerpc/kernel/mpc7450-pmu.c
@@ -0,0 +1,417 @@
+/*
+ * Performance counter support for MPC7450-family processors.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+#include <asm/cputable.h>
+
+#define N_COUNTER 6 /* Number of hardware counters */
+#define MAX_ALT 3 /* Maximum number of event alternative codes */
+
+/*
+ * Bits in event code for MPC7450 family
+ */
+#define PM_THRMULT_MSKS 0x40000
+#define PM_THRESH_SH 12
+#define PM_THRESH_MSK 0x3f
+#define PM_PMC_SH 8
+#define PM_PMC_MSK 7
+#define PM_PMCSEL_MSK 0x7f
+
+/*
+ * Classify events according to how specific their PMC requirements are.
+ * Result is:
+ * 0: can go on any PMC
+ * 1: can go on PMCs 1-4
+ * 2: can go on PMCs 1,2,4
+ * 3: can go on PMCs 1 or 2
+ * 4: can only go on one PMC
+ * -1: event code is invalid
+ */
+#define N_CLASSES 5
+
+static int mpc7450_classify_event(u32 event)
+{
+ int pmc;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > N_COUNTER)
+ return -1;
+ return 4;
+ }
+ event &= PM_PMCSEL_MSK;
+ if (event <= 1)
+ return 0;
+ if (event <= 7)
+ return 1;
+ if (event <= 13)
+ return 2;
+ if (event <= 22)
+ return 3;
+ return -1;
+}
+
+/*
+ * Events using threshold and possible threshold scale:
+ * code scale? name
+ * 11e N PM_INSTQ_EXCEED_CYC
+ * 11f N PM_ALTV_IQ_EXCEED_CYC
+ * 128 Y PM_DTLB_SEARCH_EXCEED_CYC
+ * 12b Y PM_LD_MISS_EXCEED_L1_CYC
+ * 220 N PM_CQ_EXCEED_CYC
+ * 30c N PM_GPR_RB_EXCEED_CYC
+ * 30d ? PM_FPR_IQ_EXCEED_CYC ?
+ * 311 Y PM_ITLB_SEARCH_EXCEED
+ * 410 N PM_GPR_IQ_EXCEED_CYC
+ */
+
+/*
+ * Return use of threshold and threshold scale bits:
+ * 0 = uses neither, 1 = uses threshold, 2 = uses both
+ */
+static int mpc7450_threshold_use(u32 event)
+{
+ int pmc, sel;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ sel = event & PM_PMCSEL_MSK;
+ switch (pmc) {
+ case 1:
+ if (sel == 0x1e || sel == 0x1f)
+ return 1;
+ if (sel == 0x28 || sel == 0x2b)
+ return 2;
+ break;
+ case 2:
+ if (sel == 0x20)
+ return 1;
+ break;
+ case 3:
+ if (sel == 0xc || sel == 0xd)
+ return 1;
+ if (sel == 0x11)
+ return 2;
+ break;
+ case 4:
+ if (sel == 0x10)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Layout of constraint bits:
+ * 33222222222211111111110000000000
+ * 10987654321098765432109876543210
+ * |< >< > < > < ><><><><><><>
+ * TS TV G4 G3 G2P6P5P4P3P2P1
+ *
+ * P1 - P6
+ * 0 - 11: Count of events needing PMC1 .. PMC6
+ *
+ * G2
+ * 12 - 14: Count of events needing PMC1 or PMC2
+ *
+ * G3
+ * 16 - 18: Count of events needing PMC1, PMC2 or PMC4
+ *
+ * G4
+ * 20 - 23: Count of events needing PMC1, PMC2, PMC3 or PMC4
+ *
+ * TV
+ * 24 - 29: Threshold value requested
+ *
+ * TS
+ * 30: Threshold scale value requested
+ */
+
+static u32 pmcbits[N_COUNTER][2] = {
+ { 0x00844002, 0x00111001 }, /* PMC1 mask, value: P1,G2,G3,G4 */
+ { 0x00844008, 0x00111004 }, /* PMC2: P2,G2,G3,G4 */
+ { 0x00800020, 0x00100010 }, /* PMC3: P3,G4 */
+ { 0x00840080, 0x00110040 }, /* PMC4: P4,G3,G4 */
+ { 0x00000200, 0x00000100 }, /* PMC5: P5 */
+ { 0x00000800, 0x00000400 } /* PMC6: P6 */
+};
+
+static u32 classbits[N_CLASSES - 1][2] = {
+ { 0x00000000, 0x00000000 }, /* class 0: no constraint */
+ { 0x00800000, 0x00100000 }, /* class 1: G4 */
+ { 0x00040000, 0x00010000 }, /* class 2: G3 */
+ { 0x00004000, 0x00001000 }, /* class 3: G2 */
+};
+
+static int mpc7450_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
+{
+ int pmc, class;
+ u32 mask, value;
+ int thresh, tuse;
+
+ class = mpc7450_classify_event(event);
+ if (class < 0)
+ return -1;
+ if (class == 4) {
+ pmc = ((unsigned int)event >> PM_PMC_SH) & PM_PMC_MSK;
+ mask = pmcbits[pmc - 1][0];
+ value = pmcbits[pmc - 1][1];
+ } else {
+ mask = classbits[class][0];
+ value = classbits[class][1];
+ }
+
+ tuse = mpc7450_threshold_use(event);
+ if (tuse) {
+ thresh = ((unsigned int)event >> PM_THRESH_SH) & PM_THRESH_MSK;
+ mask |= 0x3f << 24;
+ value |= thresh << 24;
+ if (tuse == 2) {
+ mask |= 0x40000000;
+ if ((unsigned int)event & PM_THRMULT_MSKS)
+ value |= 0x40000000;
+ }
+ }
+
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x217, 0x317 }, /* PM_L1_DCACHE_MISS */
+ { 0x418, 0x50f, 0x60f }, /* PM_SNOOP_RETRY */
+ { 0x502, 0x602 }, /* PM_L2_HIT */
+ { 0x503, 0x603 }, /* PM_L3_HIT */
+ { 0x504, 0x604 }, /* PM_L2_ICACHE_MISS */
+ { 0x505, 0x605 }, /* PM_L3_ICACHE_MISS */
+ { 0x506, 0x606 }, /* PM_L2_DCACHE_MISS */
+ { 0x507, 0x607 }, /* PM_L3_DCACHE_MISS */
+ { 0x50a, 0x623 }, /* PM_LD_HIT_L3 */
+ { 0x50b, 0x624 }, /* PM_ST_HIT_L3 */
+ { 0x50d, 0x60d }, /* PM_L2_TOUCH_HIT */
+ { 0x50e, 0x60e }, /* PM_L3_TOUCH_HIT */
+ { 0x512, 0x612 }, /* PM_INT_LOCAL */
+ { 0x513, 0x61d }, /* PM_L2_MISS */
+ { 0x514, 0x61e }, /* PM_L3_MISS */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u32 event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+ return -1;
+}
+
+static int mpc7450_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, nalt = 1;
+ u32 ae;
+
+ alt[0] = event;
+ nalt = 1;
+ i = find_alternative((u32)event);
+ if (i >= 0) {
+ for (j = 0; j < MAX_ALT; ++j) {
+ ae = event_alternatives[i][j];
+ if (ae && ae != (u32)event)
+ alt[nalt++] = ae;
+ }
+ }
+ return nalt;
+}
+
+/*
+ * Bitmaps of which PMCs each class can use for classes 0 - 3.
+ * Bit i is set if PMC i+1 is usable.
+ */
+static const u8 classmap[N_CLASSES] = {
+ 0x3f, 0x0f, 0x0b, 0x03, 0
+};
+
+/* Bit position and width of each PMCSEL field */
+static const int pmcsel_shift[N_COUNTER] = {
+ 6, 0, 27, 22, 17, 11
+};
+static const u32 pmcsel_mask[N_COUNTER] = {
+ 0x7f, 0x3f, 0x1f, 0x1f, 0x1f, 0x3f
+};
+
+/*
+ * Compute MMCR0/1/2 values for a set of events.
+ */
+static int mpc7450_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], unsigned long mmcr[])
+{
+ u8 event_index[N_CLASSES][N_COUNTER];
+ int n_classevent[N_CLASSES];
+ int i, j, class, tuse;
+ u32 pmc_inuse = 0, pmc_avail;
+ u32 mmcr0 = 0, mmcr1 = 0, mmcr2 = 0;
+ u32 ev, pmc, thresh;
+
+ if (n_ev > N_COUNTER)
+ return -1;
+
+ /* First pass: count usage in each class */
+ for (i = 0; i < N_CLASSES; ++i)
+ n_classevent[i] = 0;
+ for (i = 0; i < n_ev; ++i) {
+ class = mpc7450_classify_event(event[i]);
+ if (class < 0)
+ return -1;
+ j = n_classevent[class]++;
+ event_index[class][j] = i;
+ }
+
+ /* Second pass: allocate PMCs from most specific event to least */
+ for (class = N_CLASSES - 1; class >= 0; --class) {
+ for (i = 0; i < n_classevent[class]; ++i) {
+ ev = event[event_index[class][i]];
+ if (class == 4) {
+ pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ } else {
+ /* Find a suitable PMC */
+ pmc_avail = classmap[class] & ~pmc_inuse;
+ if (!pmc_avail)
+ return -1;
+ pmc = ffs(pmc_avail);
+ }
+ pmc_inuse |= 1 << (pmc - 1);
+
+ tuse = mpc7450_threshold_use(ev);
+ if (tuse) {
+ thresh = (ev >> PM_THRESH_SH) & PM_THRESH_MSK;
+ mmcr0 |= thresh << 16;
+ if (tuse == 2 && (ev & PM_THRMULT_MSKS))
+ mmcr2 = 0x80000000;
+ }
+ ev &= pmcsel_mask[pmc - 1];
+ ev <<= pmcsel_shift[pmc - 1];
+ if (pmc <= 2)
+ mmcr0 |= ev;
+ else
+ mmcr1 |= ev;
+ hwc[event_index[class][i]] = pmc - 1;
+ }
+ }
+
+ if (pmc_inuse & 1)
+ mmcr0 |= MMCR0_PMC1CE;
+ if (pmc_inuse & 0x3e)
+ mmcr0 |= MMCR0_PMCnCE;
+
+ /* Return MMCRx values */
+ mmcr[0] = mmcr0;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcr2;
+ return 0;
+}
+
+/*
+ * Disable counting by a PMC.
+ * Note that the pmc argument is 0-based here, not 1-based.
+ */
+static void mpc7450_disable_pmc(unsigned int pmc, unsigned long mmcr[])
+{
+ if (pmc <= 1)
+ mmcr[0] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+ else
+ mmcr[1] &= ~(pmcsel_mask[pmc] << pmcsel_shift[pmc]);
+}
+
+static int mpc7450_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 1,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 2,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x217, /* PM_L1_DCACHE_MISS */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x122, /* PM_BR_CMPL */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x41c, /* PM_BR_MPRED */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int mpc7450_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x225 },
+ [C(OP_WRITE)] = { 0, 0x227 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x129, 0x115 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0x634, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { 0, 0 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x312 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x223 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x122, 0x41c },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu mpc7450_pmu = {
+ .name = "MPC7450 family",
+ .n_counter = N_COUNTER,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x00111555ul,
+ .test_adder = 0x00301000ul,
+ .compute_mmcr = mpc7450_compute_mmcr,
+ .get_constraint = mpc7450_get_constraint,
+ .get_alternatives = mpc7450_get_alternatives,
+ .disable_pmc = mpc7450_disable_pmc,
+ .n_generic = ARRAY_SIZE(mpc7450_generic_events),
+ .generic_events = mpc7450_generic_events,
+ .cache_events = &mpc7450_cache_events,
+};
+
+static int init_mpc7450_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc/7450"))
+ return -ENODEV;
+
+ return register_power_pmu(&mpc7450_pmu);
+}
+
+arch_initcall(init_mpc7450_pmu);
diff --git a/arch/powerpc/kernel/of_device.c b/arch/powerpc/kernel/of_device.c
index fa983a59c4c..a359cb08e90 100644
--- a/arch/powerpc/kernel/of_device.c
+++ b/arch/powerpc/kernel/of_device.c
@@ -76,7 +76,7 @@ struct of_device *of_device_alloc(struct device_node *np,
dev->dev.archdata.of_node = np;
if (bus_id)
- dev_set_name(&dev->dev, bus_id);
+ dev_set_name(&dev->dev, "%s", bus_id);
else
of_device_make_bus_id(dev);
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bb202388170..70e1f57f7dd 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -29,7 +29,7 @@ struct cpu_hw_counters {
struct perf_counter *counter[MAX_HWCOUNTERS];
u64 events[MAX_HWCOUNTERS];
unsigned int flags[MAX_HWCOUNTERS];
- u64 mmcr[3];
+ unsigned long mmcr[3];
struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
};
@@ -46,6 +46,115 @@ struct power_pmu *ppmu;
*/
static unsigned int freeze_counters_kernel = MMCR0_FCS;
+/*
+ * 32-bit doesn't have MMCRA but does have an MMCR2,
+ * and a few other names are different.
+ */
+#ifdef CONFIG_PPC32
+
+#define MMCR0_FCHV 0
+#define MMCR0_PMCjCE MMCR0_PMCnCE
+
+#define SPRN_MMCRA SPRN_MMCR2
+#define MMCRA_SAMPLE_ENABLE 0
+
+static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
+{
+ return 0;
+}
+static inline void perf_set_pmu_inuse(int inuse) { }
+static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
+static inline u32 perf_get_misc_flags(struct pt_regs *regs)
+{
+ return 0;
+}
+static inline void perf_read_regs(struct pt_regs *regs) { }
+static inline int perf_intr_is_nmi(struct pt_regs *regs)
+{
+ return 0;
+}
+
+#endif /* CONFIG_PPC32 */
+
+/*
+ * Things that are specific to 64-bit implementations.
+ */
+#ifdef CONFIG_PPC64
+
+static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
+{
+ unsigned long mmcra = regs->dsisr;
+
+ if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+ unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
+ if (slot > 1)
+ return 4 * (slot - 1);
+ }
+ return 0;
+}
+
+static inline void perf_set_pmu_inuse(int inuse)
+{
+ get_lppaca()->pmcregs_in_use = inuse;
+}
+
+/*
+ * The user wants a data address recorded.
+ * If we're not doing instruction sampling, give them the SDAR
+ * (sampled data address). If we are doing instruction sampling, then
+ * only give them the SDAR if it corresponds to the instruction
+ * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
+ * bit in MMCRA.
+ */
+static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
+{
+ unsigned long mmcra = regs->dsisr;
+ unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
+ POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
+
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ *addrp = mfspr(SPRN_SDAR);
+}
+
+static inline u32 perf_get_misc_flags(struct pt_regs *regs)
+{
+ unsigned long mmcra = regs->dsisr;
+
+ if (TRAP(regs) != 0xf00)
+ return 0; /* not a PMU interrupt */
+
+ if (ppmu->flags & PPMU_ALT_SIPR) {
+ if (mmcra & POWER6_MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & POWER6_MMCRA_SIPR) ?
+ PERF_EVENT_MISC_USER : PERF_EVENT_MISC_KERNEL;
+ }
+ if (mmcra & MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+}
+
+/*
+ * Overload regs->dsisr to store MMCRA so we only need to read it once
+ * on each interrupt.
+ */
+static inline void perf_read_regs(struct pt_regs *regs)
+{
+ regs->dsisr = mfspr(SPRN_MMCRA);
+}
+
+/*
+ * If interrupts were soft-disabled when a PMU interrupt occurs, treat
+ * it as an NMI.
+ */
+static inline int perf_intr_is_nmi(struct pt_regs *regs)
+{
+ return !regs->softe;
+}
+
+#endif /* CONFIG_PPC64 */
+
static void perf_counter_interrupt(struct pt_regs *regs);
void perf_counter_print_debug(void)
@@ -78,12 +187,14 @@ static unsigned long read_pmc(int idx)
case 6:
val = mfspr(SPRN_PMC6);
break;
+#ifdef CONFIG_PPC64
case 7:
val = mfspr(SPRN_PMC7);
break;
case 8:
val = mfspr(SPRN_PMC8);
break;
+#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
@@ -115,12 +226,14 @@ static void write_pmc(int idx, unsigned long val)
case 6:
mtspr(SPRN_PMC6, val);
break;
+#ifdef CONFIG_PPC64
case 7:
mtspr(SPRN_PMC7, val);
break;
case 8:
mtspr(SPRN_PMC8, val);
break;
+#endif /* CONFIG_PPC64 */
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
@@ -135,15 +248,15 @@ static void write_pmc(int idx, unsigned long val)
static int power_check_constraints(u64 event[], unsigned int cflags[],
int n_ev)
{
- u64 mask, value, nv;
+ unsigned long mask, value, nv;
u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
- u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
+ unsigned long amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ unsigned long smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
int i, j;
- u64 addf = ppmu->add_fields;
- u64 tadd = ppmu->test_adder;
+ unsigned long addf = ppmu->add_fields;
+ unsigned long tadd = ppmu->test_adder;
if (n_ev > ppmu->n_counter)
return -1;
@@ -283,7 +396,7 @@ static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
static void power_pmu_read(struct perf_counter *counter)
{
- long val, delta, prev;
+ s64 val, delta, prev;
if (!counter->hw.idx)
return;
@@ -403,14 +516,14 @@ static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
void hw_perf_disable(void)
{
struct cpu_hw_counters *cpuhw;
- unsigned long ret;
unsigned long flags;
+ if (!ppmu)
+ return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_counters);
- ret = cpuhw->disabled;
- if (!ret) {
+ if (!cpuhw->disabled) {
cpuhw->disabled = 1;
cpuhw->n_added = 0;
@@ -461,6 +574,8 @@ void hw_perf_enable(void)
int n_lim;
int idx;
+ if (!ppmu)
+ return;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_counters);
if (!cpuhw->disabled) {
@@ -479,7 +594,7 @@ void hw_perf_enable(void)
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
if (cpuhw->n_counters == 0)
- get_lppaca()->pmcregs_in_use = 0;
+ perf_set_pmu_inuse(0);
goto out_enable;
}
@@ -512,7 +627,7 @@ void hw_perf_enable(void)
* bit set and set the hardware counters to their initial values.
* Then unfreeze the counters.
*/
- get_lppaca()->pmcregs_in_use = 1;
+ perf_set_pmu_inuse(1);
mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
@@ -626,6 +741,8 @@ int hw_perf_group_sched_in(struct perf_counter *group_leader,
long i, n, n0;
struct perf_counter *sub;
+ if (!ppmu)
+ return 0;
cpuhw = &__get_cpu_var(cpu_hw_counters);
n0 = cpuhw->n_counters;
n = collect_events(group_leader, ppmu->n_counter - n0,
@@ -913,6 +1030,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
case PERF_TYPE_RAW:
ev = counter->attr.config;
break;
+ default:
+ return ERR_PTR(-EINVAL);
}
counter->hw.config_base = ev;
counter->hw.idx = 0;
@@ -1007,13 +1126,12 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
-static void record_and_restart(struct perf_counter *counter, long val,
+static void record_and_restart(struct perf_counter *counter, unsigned long val,
struct pt_regs *regs, int nmi)
{
u64 period = counter->hw.sample_period;
s64 prev, delta, left;
int record = 0;
- u64 addr, mmcra, sdsync;
/* we don't have to worry about interrupts here */
prev = atomic64_read(&counter->hw.prev_count);
@@ -1033,8 +1151,8 @@ static void record_and_restart(struct perf_counter *counter, long val,
left = period;
record = 1;
}
- if (left < 0x80000000L)
- val = 0x80000000L - left;
+ if (left < 0x80000000LL)
+ val = 0x80000000LL - left;
}
/*
@@ -1047,22 +1165,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
.period = counter->hw.last_period,
};
- if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
- /*
- * The user wants a data address recorded.
- * If we're not doing instruction sampling,
- * give them the SDAR (sampled data address).
- * If we are doing instruction sampling, then only
- * give them the SDAR if it corresponds to the
- * instruction pointed to by SIAR; this is indicated
- * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
- */
- mmcra = regs->dsisr;
- sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
- POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
- if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
- data.addr = mfspr(SPRN_SDAR);
- }
+ if (counter->attr.sample_type & PERF_SAMPLE_ADDR)
+ perf_get_data_addr(regs, &data.addr);
+
if (perf_counter_overflow(counter, nmi, &data)) {
/*
* Interrupts are coming too fast - throttle them
@@ -1088,25 +1193,12 @@ static void record_and_restart(struct perf_counter *counter, long val,
*/
unsigned long perf_misc_flags(struct pt_regs *regs)
{
- unsigned long mmcra;
-
- if (TRAP(regs) != 0xf00) {
- /* not a PMU interrupt */
- return user_mode(regs) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
- }
+ u32 flags = perf_get_misc_flags(regs);
- mmcra = regs->dsisr;
- if (ppmu->flags & PPMU_ALT_SIPR) {
- if (mmcra & POWER6_MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
- }
- if (mmcra & MMCRA_SIHV)
- return PERF_EVENT_MISC_HYPERVISOR;
- return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
- PERF_EVENT_MISC_KERNEL;
+ if (flags)
+ return flags;
+ return user_mode(regs) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
}
/*
@@ -1115,20 +1207,12 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- unsigned long mmcra;
unsigned long ip;
- unsigned long slot;
if (TRAP(regs) != 0xf00)
return regs->nip; /* not a PMU interrupt */
- ip = mfspr(SPRN_SIAR);
- mmcra = regs->dsisr;
- if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
- slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
- if (slot > 1)
- ip += 4 * (slot - 1);
- }
+ ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
return ip;
}
@@ -1140,7 +1224,7 @@ static void perf_counter_interrupt(struct pt_regs *regs)
int i;
struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
struct perf_counter *counter;
- long val;
+ unsigned long val;
int found = 0;
int nmi;
@@ -1148,16 +1232,9 @@ static void perf_counter_interrupt(struct pt_regs *regs)
freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
mfspr(SPRN_PMC6));
- /*
- * Overload regs->dsisr to store MMCRA so we only need to read it once.
- */
- regs->dsisr = mfspr(SPRN_MMCRA);
+ perf_read_regs(regs);
- /*
- * If interrupts were soft-disabled when this PMU interrupt
- * occurred, treat it as an NMI.
- */
- nmi = !regs->softe;
+ nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
@@ -1210,54 +1287,28 @@ void hw_perf_counter_setup(int cpu)
{
struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+ if (!ppmu)
+ return;
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
-extern struct power_pmu power4_pmu;
-extern struct power_pmu ppc970_pmu;
-extern struct power_pmu power5_pmu;
-extern struct power_pmu power5p_pmu;
-extern struct power_pmu power6_pmu;
-extern struct power_pmu power7_pmu;
-
-static int init_perf_counters(void)
+int register_power_pmu(struct power_pmu *pmu)
{
- unsigned long pvr;
-
- /* XXX should get this from cputable */
- pvr = mfspr(SPRN_PVR);
- switch (PVR_VER(pvr)) {
- case PV_POWER4:
- case PV_POWER4p:
- ppmu = &power4_pmu;
- break;
- case PV_970:
- case PV_970FX:
- case PV_970MP:
- ppmu = &ppc970_pmu;
- break;
- case PV_POWER5:
- ppmu = &power5_pmu;
- break;
- case PV_POWER5p:
- ppmu = &power5p_pmu;
- break;
- case 0x3e:
- ppmu = &power6_pmu;
- break;
- case 0x3f:
- ppmu = &power7_pmu;
- break;
- }
+ if (ppmu)
+ return -EBUSY; /* something's already registered */
+
+ ppmu = pmu;
+ pr_info("%s performance monitor hardware support registered\n",
+ pmu->name);
+#ifdef MSR_HV
/*
* Use FCHV to ignore kernel events if MSR.HV is set.
*/
if (mfmsr() & MSR_HV)
freeze_counters_kernel = MMCR0_FCHV;
+#endif /* CONFIG_PPC64 */
return 0;
}
-
-arch_initcall(init_perf_counters);
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
index 07bd308a5fa..3c90a3d9173 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER4
@@ -179,22 +181,22 @@ static short mmcr1_adder_bits[8] = {
*/
static struct unitinfo {
- u64 value, mask;
- int unit;
- int lowerbit;
+ unsigned long value, mask;
+ int unit;
+ int lowerbit;
} p4_unitinfo[16] = {
- [PM_FPU] = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 },
- [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
+ [PM_FPU] = { 0x44000000000000ul, 0x88000000000000ul, PM_FPU, 0 },
+ [PM_ISU1] = { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
[PM_ISU1_ALT] =
- { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
- [PM_IFU] = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
+ { 0x20080000000000ul, 0x88000000000000ul, PM_ISU1, 0 },
+ [PM_IFU] = { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
[PM_IFU_ALT] =
- { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
- [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 },
- [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 },
- [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 },
- [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 },
- [PM_GPS] = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 }
+ { 0x02200000000000ul, 0x08820000000000ul, PM_IFU, 41 },
+ [PM_IDU0] = { 0x10100000000000ul, 0x80840000000000ul, PM_IDU0, 1 },
+ [PM_ISU2] = { 0x10140000000000ul, 0x80840000000000ul, PM_ISU2, 0 },
+ [PM_LSU0] = { 0x01400000000000ul, 0x08800000000000ul, PM_LSU0, 0 },
+ [PM_LSU1] = { 0x00000000000000ul, 0x00010000000000ul, PM_LSU1, 40 },
+ [PM_GPS] = { 0x00000000000000ul, 0x00000000000000ul, PM_GPS, 0 }
};
static unsigned char direct_marked_event[8] = {
@@ -249,10 +251,11 @@ static int p4_marked_instr_event(u64 event)
return (mask >> (byte * 8 + bit)) & 1;
}
-static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p4_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, lower, sh;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -282,14 +285,14 @@ static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
value |= p4_unitinfo[unit].value;
sh = p4_unitinfo[unit].lowerbit;
if (sh > 1)
- value |= (u64)lower << sh;
+ value |= (unsigned long)lower << sh;
else if (lower != sh)
return -1;
unit = p4_unitinfo[unit].unit;
/* Set byte lane select field */
mask |= 0xfULL << (28 - 4 * byte);
- value |= (u64)unit << (28 - 4 * byte);
+ value |= (unsigned long)unit << (28 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2/5/6 field */
@@ -353,9 +356,9 @@ static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[])
}
static int p4_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel, lower;
unsigned int ttm, grp;
unsigned int pmc_inuse = 0;
@@ -429,9 +432,11 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
return -1;
/* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */
- mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH;
- mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH;
- mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH;
+ mmcr1 |= (unsigned long)(unituse[3] * 2 + unituse[2])
+ << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)(unituse[7] * 3 + unituse[6] * 2)
+ << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)unituse[9] << MMCR1_TTM2SEL_SH;
/* Set TTCxSEL fields. */
if (unitlower & 0xe)
@@ -456,7 +461,8 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
ttm = unit - 1; /* 2->1, 3->2 */
else
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
}
@@ -519,7 +525,7 @@ static int p4_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void p4_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p4_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
/*
* Setting the PMCxSEL field to 0 disables PMC x.
@@ -583,16 +589,28 @@ static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power4_pmu = {
- .n_counter = 8,
- .max_alternatives = 5,
- .add_fields = 0x0000001100005555ull,
- .test_adder = 0x0011083300000000ull,
- .compute_mmcr = p4_compute_mmcr,
- .get_constraint = p4_get_constraint,
- .get_alternatives = p4_get_alternatives,
- .disable_pmc = p4_disable_pmc,
- .n_generic = ARRAY_SIZE(p4_generic_events),
- .generic_events = p4_generic_events,
- .cache_events = &power4_cache_events,
+static struct power_pmu power4_pmu = {
+ .name = "POWER4/4+",
+ .n_counter = 8,
+ .max_alternatives = 5,
+ .add_fields = 0x0000001100005555ul,
+ .test_adder = 0x0011083300000000ul,
+ .compute_mmcr = p4_compute_mmcr,
+ .get_constraint = p4_get_constraint,
+ .get_alternatives = p4_get_alternatives,
+ .disable_pmc = p4_disable_pmc,
+ .n_generic = ARRAY_SIZE(p4_generic_events),
+ .generic_events = p4_generic_events,
+ .cache_events = &power4_cache_events,
};
+
+static int init_power4_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power4"))
+ return -ENODEV;
+
+ return register_power_pmu(&power4_pmu);
+}
+
+arch_initcall(init_power4_pmu);
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
index 41e5d2d958d..31918af3e35 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
@@ -126,20 +128,21 @@ static const int grsel_shift[8] = {
};
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
- [PM_FPU] = { 0x3200000000ull, 0x0100000000ull },
- [PM_ISU0] = { 0x0200000000ull, 0x0080000000ull },
- [PM_ISU1] = { 0x3200000000ull, 0x3100000000ull },
- [PM_IFU] = { 0x3200000000ull, 0x2100000000ull },
- [PM_IDU] = { 0x0e00000000ull, 0x0040000000ull },
- [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull },
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0x3200000000ul, 0x0100000000ul },
+ [PM_ISU0] = { 0x0200000000ul, 0x0080000000ul },
+ [PM_ISU1] = { 0x3200000000ul, 0x3100000000ul },
+ [PM_IFU] = { 0x3200000000ul, 0x2100000000ul },
+ [PM_IDU] = { 0x0e00000000ul, 0x0040000000ul },
+ [PM_GRS] = { 0x0e00000000ul, 0x0c40000000ul },
};
-static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power5p_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh;
int bit, fmask;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -171,17 +174,18 @@ static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
- mask |= (u64)fmask << sh;
- value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ mask |= (unsigned long)fmask << sh;
+ value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
+ << sh;
}
/* Set byte lane select field */
- mask |= 0xfULL << (24 - 4 * byte);
- value |= (u64)unit << (24 - 4 * byte);
+ mask |= 0xfUL << (24 - 4 * byte);
+ value |= (unsigned long)unit << (24 - 4 * byte);
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
- mask |= 0x8000000000000ull;
- value |= 0x1000000000000ull;
+ mask |= 0x8000000000000ul;
+ value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
@@ -452,10 +456,10 @@ static int power5p_marked_instr_event(u64 event)
}
static int power5p_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm;
int i, isbus, bit, grsel;
@@ -517,7 +521,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
@@ -525,7 +529,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
@@ -540,10 +544,11 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
- mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -568,7 +573,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
if (isbus && (byte & 2) &&
(psel == 8 || psel == 0x10 || psel == 0x28))
/* add events on higher-numbered bus */
- mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
@@ -576,7 +581,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
- mmcr1 |= (u64)grsel << grsel_shift[bit];
+ mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5p_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -599,7 +604,7 @@ static int power5p_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power5p_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -654,18 +659,31 @@ static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power5p_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x7000000000055ull,
- .test_adder = 0x3000040000000ull,
- .compute_mmcr = power5p_compute_mmcr,
- .get_constraint = power5p_get_constraint,
- .get_alternatives = power5p_get_alternatives,
- .disable_pmc = power5p_disable_pmc,
- .limited_pmc_event = power5p_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6,
- .n_generic = ARRAY_SIZE(power5p_generic_events),
- .generic_events = power5p_generic_events,
- .cache_events = &power5p_cache_events,
+static struct power_pmu power5p_pmu = {
+ .name = "POWER5+/++",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000000000055ul,
+ .test_adder = 0x3000040000000ul,
+ .compute_mmcr = power5p_compute_mmcr,
+ .get_constraint = power5p_get_constraint,
+ .get_alternatives = power5p_get_alternatives,
+ .disable_pmc = power5p_disable_pmc,
+ .limited_pmc_event = power5p_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6,
+ .n_generic = ARRAY_SIZE(power5p_generic_events),
+ .generic_events = power5p_generic_events,
+ .cache_events = &power5p_cache_events,
};
+
+static int init_power5p_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
+ && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5++")))
+ return -ENODEV;
+
+ return register_power_pmu(&power5p_pmu);
+}
+
+arch_initcall(init_power5p_pmu);
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
index 05600b66221..867f6f66396 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER5 (not POWER5++)
@@ -130,20 +132,21 @@ static const int grsel_shift[8] = {
};
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
- [PM_FPU] = { 0xc0002000000000ull, 0x00001000000000ull },
- [PM_ISU0] = { 0x00002000000000ull, 0x00000800000000ull },
- [PM_ISU1] = { 0xc0002000000000ull, 0xc0001000000000ull },
- [PM_IFU] = { 0xc0002000000000ull, 0x80001000000000ull },
- [PM_IDU] = { 0x30002000000000ull, 0x00000400000000ull },
- [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull },
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0xc0002000000000ul, 0x00001000000000ul },
+ [PM_ISU0] = { 0x00002000000000ul, 0x00000800000000ul },
+ [PM_ISU1] = { 0xc0002000000000ul, 0xc0001000000000ul },
+ [PM_IFU] = { 0xc0002000000000ul, 0x80001000000000ul },
+ [PM_IDU] = { 0x30002000000000ul, 0x00000400000000ul },
+ [PM_GRS] = { 0x30002000000000ul, 0x30000400000000ul },
};
-static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power5_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh;
int bit, fmask;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -178,8 +181,9 @@ static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
bit = event & 7;
fmask = (bit == 6)? 7: 3;
sh = grsel_shift[bit];
- mask |= (u64)fmask << sh;
- value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ mask |= (unsigned long)fmask << sh;
+ value |= (unsigned long)((event >> PM_GRS_SH) & fmask)
+ << sh;
}
/*
* Bus events on bytes 0 and 2 can be counted
@@ -188,22 +192,22 @@ static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
if (!pmc)
grp = byte & 1;
/* Set byte lane select field */
- mask |= 0xfULL << (24 - 4 * byte);
- value |= (u64)unit << (24 - 4 * byte);
+ mask |= 0xfUL << (24 - 4 * byte);
+ value |= (unsigned long)unit << (24 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2 field */
- mask |= 0x200000000ull;
- value |= 0x080000000ull;
+ mask |= 0x200000000ul;
+ value |= 0x080000000ul;
} else if (grp == 1) {
/* increment PMC3/4 field */
- mask |= 0x40000000ull;
- value |= 0x10000000ull;
+ mask |= 0x40000000ul;
+ value |= 0x10000000ul;
}
if (pmc < 5) {
/* need a counter from PMC1-4 set */
- mask |= 0x8000000000000ull;
- value |= 0x1000000000000ull;
+ mask |= 0x8000000000000ul;
+ value |= 0x1000000000000ul;
}
*maskp = mask;
*valp = value;
@@ -383,10 +387,10 @@ static int power5_marked_instr_event(u64 event)
}
static int power5_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
int i, isbus, bit, grsel;
@@ -457,7 +461,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (unsigned long)i << MMCR1_TTM0SEL_SH;
}
ttmuse = 0;
for (; i <= PM_GRS; ++i) {
@@ -465,7 +469,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
continue;
if (ttmuse++)
return -1;
- mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(i & 3) << MMCR1_TTM1SEL_SH;
}
if (ttmuse > 1)
return -1;
@@ -480,10 +484,11 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
unit = PM_ISU0_ALT;
} else if (unit == PM_LSU1 + 1) {
/* select lower word of LSU1 for this byte */
- mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ mmcr1 |= 1ul << (MMCR1_TTM3SEL_SH + 3 - byte);
}
ttm = unit >> 2;
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -513,7 +518,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
--pmc;
if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
/* add events on higher-numbered bus */
- mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ mmcr1 |= 1ul << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
} else {
/* Instructions or run cycles on PMC5/6 */
--pmc;
@@ -521,7 +526,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
if (isbus && unit == PM_GRS) {
bit = psel & 7;
grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
- mmcr1 |= (u64)grsel << grsel_shift[bit];
+ mmcr1 |= (unsigned long)grsel << grsel_shift[bit];
}
if (power5_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -541,7 +546,7 @@ static int power5_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void power5_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power5_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
@@ -596,16 +601,28 @@ static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power5_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x7000090000555ull,
- .test_adder = 0x3000490000000ull,
- .compute_mmcr = power5_compute_mmcr,
- .get_constraint = power5_get_constraint,
- .get_alternatives = power5_get_alternatives,
- .disable_pmc = power5_disable_pmc,
- .n_generic = ARRAY_SIZE(power5_generic_events),
- .generic_events = power5_generic_events,
- .cache_events = &power5_cache_events,
+static struct power_pmu power5_pmu = {
+ .name = "POWER5",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000090000555ul,
+ .test_adder = 0x3000490000000ul,
+ .compute_mmcr = power5_compute_mmcr,
+ .get_constraint = power5_get_constraint,
+ .get_alternatives = power5_get_alternatives,
+ .disable_pmc = power5_disable_pmc,
+ .n_generic = ARRAY_SIZE(power5_generic_events),
+ .generic_events = power5_generic_events,
+ .cache_events = &power5_cache_events,
};
+
+static int init_power5_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
+ return -ENODEV;
+
+ return register_power_pmu(&power5_pmu);
+}
+
+arch_initcall(init_power5_pmu);
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
index 46f74bebcfd..fa21890531d 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER6
@@ -41,9 +43,9 @@
#define MMCR1_NESTSEL_SH 45
#define MMCR1_NESTSEL_MSK 0x7
#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
-#define MMCR1_PMC1_LLA ((u64)1 << 44)
-#define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39)
-#define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35)
+#define MMCR1_PMC1_LLA (1ul << 44)
+#define MMCR1_PMC1_LLA_VALUE (1ul << 39)
+#define MMCR1_PMC1_ADDR_SEL (1ul << 35)
#define MMCR1_PMC1SEL_SH 24
#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
#define MMCR1_PMCSEL_MSK 0xff
@@ -173,10 +175,10 @@ static int power6_marked_instr_event(u64 event)
* Assign PMC numbers and compute MMCR1 value for a set of events
*/
static int p6_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
int i;
unsigned int pmc, ev, b, u, s, psel;
unsigned int ttmset = 0;
@@ -215,7 +217,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
/* check for conflict on this byte of event bus */
if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
return -1;
- mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b);
+ mmcr1 |= (unsigned long)u << MMCR1_TTMSEL_SH(b);
ttmset |= 1 << b;
if (u == 5) {
/* Nest events have a further mux */
@@ -224,7 +226,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
MMCR1_NESTSEL(mmcr1) != s)
return -1;
ttmset |= 0x10;
- mmcr1 |= (u64)s << MMCR1_NESTSEL_SH;
+ mmcr1 |= (unsigned long)s << MMCR1_NESTSEL_SH;
}
if (0x30 <= psel && psel <= 0x3d) {
/* these need the PMCx_ADDR_SEL bits */
@@ -243,7 +245,7 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
if (power6_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
if (pmc < 4)
- mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc);
+ mmcr1 |= (unsigned long)psel << MMCR1_PMCSEL_SH(pmc);
}
mmcr[0] = 0;
if (pmc_inuse & 1)
@@ -265,10 +267,11 @@ static int p6_compute_mmcr(u64 event[], int n_ev,
* 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
* 32-34 select field: nest (subunit) event selector
*/
-static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p6_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, sh, subunit;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -282,11 +285,11 @@ static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
sh = byte * 4 + (16 - PM_UNIT_SH);
mask |= PM_UNIT_MSKS << sh;
- value |= (u64)(event & PM_UNIT_MSKS) << sh;
+ value |= (unsigned long)(event & PM_UNIT_MSKS) << sh;
if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
- mask |= (u64)PM_SUBUNIT_MSK << 32;
- value |= (u64)subunit << 32;
+ mask |= (unsigned long)PM_SUBUNIT_MSK << 32;
+ value |= (unsigned long)subunit << 32;
}
}
if (pmc <= 4) {
@@ -458,7 +461,7 @@ static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
return nalt;
}
-static void p6_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p6_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
/* Set PMCxSEL to 0 to disable PMCx */
if (pmc <= 3)
@@ -515,18 +518,30 @@ static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power6_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT,
- .add_fields = 0x1555,
- .test_adder = 0x3000,
- .compute_mmcr = p6_compute_mmcr,
- .get_constraint = p6_get_constraint,
- .get_alternatives = p6_get_alternatives,
- .disable_pmc = p6_disable_pmc,
- .limited_pmc_event = p6_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
- .n_generic = ARRAY_SIZE(power6_generic_events),
- .generic_events = power6_generic_events,
- .cache_events = &power6_cache_events,
+static struct power_pmu power6_pmu = {
+ .name = "POWER6",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x1555,
+ .test_adder = 0x3000,
+ .compute_mmcr = p6_compute_mmcr,
+ .get_constraint = p6_get_constraint,
+ .get_alternatives = p6_get_alternatives,
+ .disable_pmc = p6_disable_pmc,
+ .limited_pmc_event = p6_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
+ .n_generic = ARRAY_SIZE(power6_generic_events),
+ .generic_events = power6_generic_events,
+ .cache_events = &power6_cache_events,
};
+
+static int init_power6_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
+ return -ENODEV;
+
+ return register_power_pmu(&power6_pmu);
+}
+
+arch_initcall(init_power6_pmu);
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
index b72e7a19d05..018d094d92f 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -10,7 +10,9 @@
*/
#include <linux/kernel.h>
#include <linux/perf_counter.h>
+#include <linux/string.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for POWER7
@@ -71,10 +73,11 @@
* 0-9: Count of events needing PMC1..PMC5
*/
-static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int power7_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, sh;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
if (pmc) {
@@ -224,10 +227,10 @@ static int power7_marked_instr_event(u64 event)
}
static int power7_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr1 = 0;
- u64 mmcra = 0;
+ unsigned long mmcr1 = 0;
+ unsigned long mmcra = 0;
unsigned int pmc, unit, combine, l2sel, psel;
unsigned int pmc_inuse = 0;
int i;
@@ -265,11 +268,14 @@ static int power7_compute_mmcr(u64 event[], int n_ev,
--pmc;
}
if (pmc <= 3) {
- mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc);
- mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc);
+ mmcr1 |= (unsigned long) unit
+ << (MMCR1_TTM0SEL_SH - 4 * pmc);
+ mmcr1 |= (unsigned long) combine
+ << (MMCR1_PMC1_COMBINE_SH - pmc);
mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
if (unit == 6) /* L2 events */
- mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH;
+ mmcr1 |= (unsigned long) l2sel
+ << MMCR1_L2SEL_SH;
}
if (power7_marked_instr_event(event[i]))
mmcra |= MMCRA_SAMPLE_ENABLE;
@@ -287,10 +293,10 @@ static int power7_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void power7_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void power7_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
if (pmc <= 3)
- mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc));
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
}
static int power7_generic_events[] = {
@@ -311,7 +317,7 @@ static int power7_generic_events[] = {
*/
static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { 0x400f0, 0xc880 },
+ [C(OP_READ)] = { 0xc880, 0x400f0 },
[C(OP_WRITE)] = { 0, 0x300f0 },
[C(OP_PREFETCH)] = { 0xd8b8, 0 },
},
@@ -321,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
[C(OP_PREFETCH)] = { 0x408a, 0 },
},
[C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
- [C(OP_READ)] = { 0x6080, 0x6084 },
- [C(OP_WRITE)] = { 0x6082, 0x6086 },
+ [C(OP_READ)] = { 0x16080, 0x26080 },
+ [C(OP_WRITE)] = { 0x16082, 0x26082 },
[C(OP_PREFETCH)] = { 0, 0 },
},
[C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
@@ -342,16 +348,29 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu power7_pmu = {
- .n_counter = 6,
- .max_alternatives = MAX_ALT + 1,
- .add_fields = 0x1555ull,
- .test_adder = 0x3000ull,
- .compute_mmcr = power7_compute_mmcr,
- .get_constraint = power7_get_constraint,
- .get_alternatives = power7_get_alternatives,
- .disable_pmc = power7_disable_pmc,
- .n_generic = ARRAY_SIZE(power7_generic_events),
- .generic_events = power7_generic_events,
- .cache_events = &power7_cache_events,
+static struct power_pmu power7_pmu = {
+ .name = "POWER7",
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT + 1,
+ .add_fields = 0x1555ul,
+ .test_adder = 0x3000ul,
+ .compute_mmcr = power7_compute_mmcr,
+ .get_constraint = power7_get_constraint,
+ .get_alternatives = power7_get_alternatives,
+ .disable_pmc = power7_disable_pmc,
+ .flags = PPMU_ALT_SIPR,
+ .n_generic = ARRAY_SIZE(power7_generic_events),
+ .generic_events = power7_generic_events,
+ .cache_events = &power7_cache_events,
};
+
+static int init_power7_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
+ return -ENODEV;
+
+ return register_power_pmu(&power7_pmu);
+}
+
+arch_initcall(init_power7_pmu);
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
index ba0a357a89f..75dccb71a04 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <linux/perf_counter.h>
#include <asm/reg.h>
+#include <asm/cputable.h>
/*
* Bits in event code for PPC970
@@ -183,7 +184,7 @@ static int p970_marked_instr_event(u64 event)
}
/* Masks and values for using events from the various units */
-static u64 unit_cons[PM_LASTUNIT+1][2] = {
+static unsigned long unit_cons[PM_LASTUNIT+1][2] = {
[PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
[PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
[PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
@@ -192,10 +193,11 @@ static u64 unit_cons[PM_LASTUNIT+1][2] = {
[PM_STS] = { 0x380000000000ull, 0x310000000000ull },
};
-static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
+static int p970_get_constraint(u64 event, unsigned long *maskp,
+ unsigned long *valp)
{
int pmc, byte, unit, sh, spcsel;
- u64 mask = 0, value = 0;
+ unsigned long mask = 0, value = 0;
int grp = -1;
pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
@@ -222,7 +224,7 @@ static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
grp = byte & 1;
/* Set byte lane select field */
mask |= 0xfULL << (28 - 4 * byte);
- value |= (u64)unit << (28 - 4 * byte);
+ value |= (unsigned long)unit << (28 - 4 * byte);
}
if (grp == 0) {
/* increment PMC1/2/5/6 field */
@@ -236,7 +238,7 @@ static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
if (spcsel) {
mask |= 3ull << 48;
- value |= (u64)spcsel << 48;
+ value |= (unsigned long)spcsel << 48;
}
*maskp = mask;
*valp = value;
@@ -257,9 +259,9 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
}
static int p970_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], u64 mmcr[])
+ unsigned int hwc[], unsigned long mmcr[])
{
- u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned long mmcr0 = 0, mmcr1 = 0, mmcra = 0;
unsigned int pmc, unit, byte, psel;
unsigned int ttm, grp;
unsigned int pmc_inuse = 0;
@@ -320,7 +322,7 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
continue;
ttm = unitmap[i];
++ttmuse[(ttm >> 2) & 1];
- mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH;
}
/* Check only one unit per TTMx */
if (ttmuse[0] > 1 || ttmuse[1] > 1)
@@ -340,7 +342,8 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
if (unit == PM_LSU1L && byte >= 2)
mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
}
- mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ mmcr1 |= (unsigned long)ttm
+ << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
}
/* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
@@ -386,7 +389,8 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
for (pmc = 0; pmc < 2; ++pmc)
mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
for (; pmc < 8; ++pmc)
- mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
+ mmcr1 |= (unsigned long)pmcsel[pmc]
+ << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
if (pmc_inuse & 1)
mmcr0 |= MMCR0_PMC1CE;
if (pmc_inuse & 0xfe)
@@ -401,7 +405,7 @@ static int p970_compute_mmcr(u64 event[], int n_ev,
return 0;
}
-static void p970_disable_pmc(unsigned int pmc, u64 mmcr[])
+static void p970_disable_pmc(unsigned int pmc, unsigned long mmcr[])
{
int shift, i;
@@ -467,16 +471,29 @@ static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
},
};
-struct power_pmu ppc970_pmu = {
- .n_counter = 8,
- .max_alternatives = 2,
- .add_fields = 0x001100005555ull,
- .test_adder = 0x013300000000ull,
- .compute_mmcr = p970_compute_mmcr,
- .get_constraint = p970_get_constraint,
- .get_alternatives = p970_get_alternatives,
- .disable_pmc = p970_disable_pmc,
- .n_generic = ARRAY_SIZE(ppc970_generic_events),
- .generic_events = ppc970_generic_events,
- .cache_events = &ppc970_cache_events,
+static struct power_pmu ppc970_pmu = {
+ .name = "PPC970/FX/MP",
+ .n_counter = 8,
+ .max_alternatives = 2,
+ .add_fields = 0x001100005555ull,
+ .test_adder = 0x013300000000ull,
+ .compute_mmcr = p970_compute_mmcr,
+ .get_constraint = p970_get_constraint,
+ .get_alternatives = p970_get_alternatives,
+ .disable_pmc = p970_disable_pmc,
+ .n_generic = ARRAY_SIZE(ppc970_generic_events),
+ .generic_events = ppc970_generic_events,
+ .cache_events = &ppc970_cache_events,
};
+
+static int init_ppc970_pmu(void)
+{
+ if (!cur_cpu_spec->oprofile_cpu_type ||
+ (strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
+ && strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970MP")))
+ return -ENODEV;
+
+ return register_power_pmu(&ppc970_pmu);
+}
+
+arch_initcall(init_ppc970_pmu);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3e7135bbe40..892a9f2e6d7 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -528,7 +528,7 @@ void show_regs(struct pt_regs * regs)
for (i = 0; i < 32; i++) {
if ((i % REGS_PER_LINE) == 0)
- printk("\n" KERN_INFO "GPR%02d: ", i);
+ printk("\nGPR%02d: ", i);
printk(REG " ", regs->gpr[i]);
if (i == LAST_VOLATILE && !FULL_REGS(regs))
break;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index ef6f64950e9..a538824616f 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1947,8 +1947,47 @@ static void __init fixup_device_tree_maple(void)
prom_setprop(isa, name, "ranges",
isa_ranges, sizeof(isa_ranges));
}
+
+#define CPC925_MC_START 0xf8000000
+#define CPC925_MC_LENGTH 0x1000000
+/* The values for memory-controller don't have right number of cells */
+static void __init fixup_device_tree_maple_memory_controller(void)
+{
+ phandle mc;
+ u32 mc_reg[4];
+ char *name = "/hostbridge@f8000000";
+ struct prom_t *_prom = &RELOC(prom);
+ u32 ac, sc;
+
+ mc = call_prom("finddevice", 1, 1, ADDR(name));
+ if (!PHANDLE_VALID(mc))
+ return;
+
+ if (prom_getproplen(mc, "reg") != 8)
+ return;
+
+ prom_getprop(_prom->root, "#address-cells", &ac, sizeof(ac));
+ prom_getprop(_prom->root, "#size-cells", &sc, sizeof(sc));
+ if ((ac != 2) || (sc != 2))
+ return;
+
+ if (prom_getprop(mc, "reg", mc_reg, sizeof(mc_reg)) == PROM_ERROR)
+ return;
+
+ if (mc_reg[0] != CPC925_MC_START || mc_reg[1] != CPC925_MC_LENGTH)
+ return;
+
+ prom_printf("Fixing up bogus hostbridge on Maple...\n");
+
+ mc_reg[0] = 0x0;
+ mc_reg[1] = CPC925_MC_START;
+ mc_reg[2] = 0x0;
+ mc_reg[3] = CPC925_MC_LENGTH;
+ prom_setprop(mc, name, "reg", mc_reg, sizeof(mc_reg));
+}
#else
#define fixup_device_tree_maple()
+#define fixup_device_tree_maple_memory_controller()
#endif
#ifdef CONFIG_PPC_CHRP
@@ -2189,6 +2228,7 @@ static void __init fixup_device_tree_efika(void)
static void __init fixup_device_tree(void)
{
fixup_device_tree_maple();
+ fixup_device_tree_maple_memory_controller();
fixup_device_tree_chrp();
fixup_device_tree_pmac();
fixup_device_tree_efika();
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 9fa2c7dcd05..ef149880c14 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -736,15 +736,16 @@ void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
-
-#if defined(CONFIG_BOOKE)
- /* If DAC then do not single step, skip */
- if (task->thread.dabr)
- return;
-#endif
-
if (regs != NULL) {
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#if defined(CONFIG_BOOKE)
+ /* If DAC don't clear DBCRO_IDM or MSR_DE */
+ if (task->thread.dabr)
+ task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
+ else {
+ task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
+ regs->msr &= ~MSR_DE;
+ }
+#elif defined(CONFIG_40x)
task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
regs->msr &= ~MSR_DE;
#else
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c
index 297632cba04..8a6daf4129f 100644
--- a/arch/powerpc/kernel/ptrace32.c
+++ b/arch/powerpc/kernel/ptrace32.c
@@ -21,7 +21,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
-#include <linux/smp_lock.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/regset.h>
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ee4c7609b64..c434823b8c8 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -38,9 +38,10 @@
#include <asm/syscalls.h>
#include <asm/smp.h>
#include <asm/atomic.h>
+#include <asm/time.h>
struct rtas_t rtas = {
- .lock = SPIN_LOCK_UNLOCKED
+ .lock = __RAW_SPIN_LOCK_UNLOCKED
};
EXPORT_SYMBOL(rtas);
@@ -67,6 +68,28 @@ unsigned long rtas_rmo_buf;
void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL(rtas_flash_term_hook);
+/* RTAS use home made raw locking instead of spin_lock_irqsave
+ * because those can be called from within really nasty contexts
+ * such as having the timebase stopped which would lockup with
+ * normal locks and spinlock debugging enabled
+ */
+static unsigned long lock_rtas(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ preempt_disable();
+ __raw_spin_lock_flags(&rtas.lock, flags);
+ return flags;
+}
+
+static void unlock_rtas(unsigned long flags)
+{
+ __raw_spin_unlock(&rtas.lock);
+ local_irq_restore(flags);
+ preempt_enable();
+}
+
/*
* call_rtas_display_status and call_rtas_display_status_delay
* are designed only for very early low-level debugging, which
@@ -79,7 +102,7 @@ static void call_rtas_display_status(char c)
if (!rtas.base)
return;
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
args->token = 10;
args->nargs = 1;
@@ -89,7 +112,7 @@ static void call_rtas_display_status(char c)
enter_rtas(__pa(args));
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
}
static void call_rtas_display_status_delay(char c)
@@ -411,8 +434,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE)
return -1;
- /* Gotta do something different here, use global lock for now... */
- spin_lock_irqsave(&rtas.lock, s);
+ s = lock_rtas();
rtas_args = &rtas.args;
rtas_args->token = token;
@@ -439,8 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
outputs[i] = rtas_args->rets[i+1];
ret = (nret > 0)? rtas_args->rets[0]: 0;
- /* Gotta do something different here, use global lock for now... */
- spin_unlock_irqrestore(&rtas.lock, s);
+ unlock_rtas(s);
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
@@ -837,7 +858,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
buff_copy = get_errorlog_buffer();
- spin_lock_irqsave(&rtas.lock, flags);
+ flags = lock_rtas();
rtas.args = args;
enter_rtas(__pa(&rtas.args));
@@ -848,7 +869,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (args.rets[0] == -1)
errbuf = __fetch_rtas_last_error(buff_copy);
- spin_unlock_irqrestore(&rtas.lock, flags);
+ unlock_rtas(flags);
if (buff_copy) {
if (errbuf)
@@ -951,3 +972,33 @@ int __init early_init_dt_scan_rtas(unsigned long node,
/* break now */
return 1;
}
+
+static raw_spinlock_t timebase_lock;
+static u64 timebase = 0;
+
+void __cpuinit rtas_give_timebase(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ hard_irq_disable();
+ __raw_spin_lock(&timebase_lock);
+ rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL);
+ timebase = get_tb();
+ __raw_spin_unlock(&timebase_lock);
+
+ while (timebase)
+ barrier();
+ rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL);
+ local_irq_restore(flags);
+}
+
+void __cpuinit rtas_take_timebase(void)
+{
+ while (!timebase)
+ barrier();
+ __raw_spin_lock(&timebase_lock);
+ set_tb(timebase >> 32, timebase & 0xffffffff);
+ timebase = 0;
+ __raw_spin_unlock(&timebase_lock);
+}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 1d154248cf4..e1e3059cf34 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -119,6 +119,8 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
*/
notrace void __init machine_init(unsigned long dt_ptr)
{
+ lockdep_init();
+
/* Enable early debugging if any specified (see udbg.h) */
udbg_early_init();
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 65484b2200b..0b47de07302 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -68,7 +68,8 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
/* SMP operations for this machine */
struct smp_ops_t *smp_ops;
-static volatile unsigned int cpu_callin_map[NR_CPUS];
+/* Can't be static due to PowerMac hackery */
+volatile unsigned int cpu_callin_map[NR_CPUS];
int smt_enabled_at_boot = 1;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 15391c2ab01..eae4511ceea 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,6 +53,7 @@
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/perf_counter.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -525,6 +526,26 @@ void __init iSeries_time_init_early(void)
}
#endif /* CONFIG_PPC_ISERIES */
+#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
+DEFINE_PER_CPU(u8, perf_counter_pending);
+
+void set_perf_counter_pending(void)
+{
+ get_cpu_var(perf_counter_pending) = 1;
+ set_dec(1);
+ put_cpu_var(perf_counter_pending);
+}
+
+#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
+#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
+
+#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
+#define test_perf_counter_pending() 0
+#define clear_perf_counter_pending()
+
+#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
/*
* For iSeries shared processors, we have to let the hypervisor
* set the hardware decrementer. We set a virtual decrementer
@@ -551,6 +572,10 @@ void timer_interrupt(struct pt_regs * regs)
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
+ if (test_perf_counter_pending()) {
+ clear_perf_counter_pending();
+ perf_counter_do_pending();
+ }
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 0362a891e54..acb74a17bbb 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -219,7 +219,7 @@ void udbg_init_pas_realmode(void)
#ifdef CONFIG_PPC_EARLY_DEBUG_44x
#include <platforms/44x/44x.h>
-static int udbg_44x_as1_flush(void)
+static void udbg_44x_as1_flush(void)
{
if (udbg_comport) {
while ((as1_readb(&udbg_comport->lsr) & LSR_THRE) == 0)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index ef36cbbc588..ea4d64644d0 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -80,10 +80,10 @@ _GLOBAL(load_up_altivec)
mtvscr vr0
REST_32VRS(0,r4,r5)
#ifndef CONFIG_SMP
- /* Update last_task_used_math to 'current' */
+ /* Update last_task_used_altivec to 'current' */
subi r4,r5,THREAD /* Back to 'current' */
fromreal(r4)
- PPC_STL r4,ADDROFF(last_task_used_math)(r3)
+ PPC_STL r4,ADDROFF(last_task_used_altivec)(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
blr
@@ -172,7 +172,7 @@ _GLOBAL(load_up_vsx)
oris r12,r12,MSR_VSX@h
std r12,_MSR(r1)
#ifndef CONFIG_SMP
- /* Update last_task_used_math to 'current' */
+ /* Update last_task_used_vsx to 'current' */
ld r4,PACACURRENT(r13)
std r4,0(r3)
#endif /* CONFIG_SMP */