aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/oprofile/op_model_power4.c
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2006-03-27 12:00:45 +1100
committerPaul Mackerras <paulus@samba.org>2006-03-29 13:44:16 +1100
commit15e812ad849e142e3dfc984d33c4d8042389f148 (patch)
treee2d50c583457e8c4b2d06eea9846c04d0f5629d7 /arch/powerpc/oprofile/op_model_power4.c
parentfa465f8c7008c6cab32b05f3f1af57f7c86e8873 (diff)
[PATCH] powerpc: Remove oprofile spinlock backtrace code
Remove oprofile spinlock backtrace code now we have proper calltrace support. Also make MMCRA sihv and sipr bits a variable since they may change in future cpus. Finally, MMCRA should be a 64bit quantity. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/oprofile/op_model_power4.c')
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c43
1 files changed, 9 insertions, 34 deletions
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 38db2efef3b..4c2beab1fdc 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -25,18 +25,14 @@ static unsigned long reset_value[OP_MAX_COUNTER];
static int oprofile_running;
static int mmcra_has_sihv;
+/* Unfortunately these bits vary between CPUs */
+static unsigned long mmcra_sihv = MMCRA_SIHV;
+static unsigned long mmcra_sipr = MMCRA_SIPR;
/* mmcr values are set in power4_reg_setup, used in power4_cpu_setup */
static u32 mmcr0_val;
static u64 mmcr1_val;
-static u32 mmcra_val;
-
-/*
- * Since we do not have an NMI, backtracing through spinlocks is
- * only a best guess. In light of this, allow it to be disabled at
- * runtime.
- */
-static int backtrace_spinlocks;
+static u64 mmcra_val;
static void power4_reg_setup(struct op_counter_config *ctr,
struct op_system_config *sys,
@@ -63,8 +59,6 @@ static void power4_reg_setup(struct op_counter_config *ctr,
mmcr1_val = sys->mmcr1;
mmcra_val = sys->mmcra;
- backtrace_spinlocks = sys->backtrace_spinlocks;
-
for (i = 0; i < cur_cpu_spec->num_pmcs; ++i)
reset_value[i] = 0x80000000UL - ctr[i].count;
@@ -197,25 +191,6 @@ static void __attribute_used__ kernel_unknown_bucket(void)
{
}
-static unsigned long check_spinlock_pc(struct pt_regs *regs,
- unsigned long profile_pc)
-{
- unsigned long pc = instruction_pointer(regs);
-
- /*
- * If both the SIAR (sampled instruction) and the perfmon exception
- * occurred in a spinlock region then we account the sample to the
- * calling function. This isnt 100% correct, we really need soft
- * IRQ disable so we always get the perfmon exception at the
- * point at which the SIAR is set.
- */
- if (backtrace_spinlocks && in_lock_functions(pc) &&
- in_lock_functions(profile_pc))
- return regs->link;
- else
- return profile_pc;
-}
-
/*
* On GQ and newer the MMCRA stores the HV and PR bits at the time
* the SIAR was sampled. We use that to work out if the SIAR was sampled in
@@ -228,17 +203,17 @@ static unsigned long get_pc(struct pt_regs *regs)
/* Cant do much about it */
if (!mmcra_has_sihv)
- return check_spinlock_pc(regs, pc);
+ return pc;
mmcra = mfspr(SPRN_MMCRA);
/* Were we in the hypervisor? */
- if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & MMCRA_SIHV))
+ if (firmware_has_feature(FW_FEATURE_LPAR) && (mmcra & mmcra_sihv))
/* function descriptor madness */
return *((unsigned long *)hypervisor_bucket);
/* We were in userspace, nothing to do */
- if (mmcra & MMCRA_SIPR)
+ if (mmcra & mmcra_sipr)
return pc;
#ifdef CONFIG_PPC_RTAS
@@ -257,7 +232,7 @@ static unsigned long get_pc(struct pt_regs *regs)
/* function descriptor madness */
return *((unsigned long *)kernel_unknown_bucket);
- return check_spinlock_pc(regs, pc);
+ return pc;
}
static int get_kernel(unsigned long pc)
@@ -268,7 +243,7 @@ static int get_kernel(unsigned long pc)
is_kernel = is_kernel_addr(pc);
} else {
unsigned long mmcra = mfspr(SPRN_MMCRA);
- is_kernel = ((mmcra & MMCRA_SIPR) == 0);
+ is_kernel = ((mmcra & mmcra_sipr) == 0);
}
return is_kernel;