aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2011-03-16 19:05:38 -0400
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2011-03-16 19:05:38 -0400
commit9689d5a60f9c59ac3f8fdc49b8318ba14b0d6cfc (patch)
treeff3c7d5b80e782ada0c817691006d794ccfff51e /kernel
parent87d0f0c8422790bd8f49d84f37a51c673c65b07c (diff)
downloadlinux-linaro-android-9689d5a60f9c59ac3f8fdc49b8318ba14b0d6cfc.tar.gz
omap-trace-clock-pm-update
omap trace clock pm update Update OMAP trace clock locking. * Differs from omap tree * Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/trace-clock-32-to-64.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/kernel/trace/trace-clock-32-to-64.c b/kernel/trace/trace-clock-32-to-64.c
index 1e0a9382ecc..d80255eb288 100644
--- a/kernel/trace/trace-clock-32-to-64.c
+++ b/kernel/trace/trace-clock-32-to-64.c
@@ -97,17 +97,18 @@ static void update_synthetic_tsc(void)
}
/*
- * Should only be called when the synthetic clock is not used.
+ * Should only be called when interrupts are off. Affects only current CPU.
*/
void _trace_clock_write_synthetic_tsc(u64 value)
{
struct synthetic_tsc_struct *cpu_synth;
- int cpu;
+ unsigned int new_index;
- for_each_online_cpu(cpu) {
- cpu_synth = &per_cpu(synthetic_tsc, cpu);
- cpu_synth->tsc[cpu_synth->index].val = value;
- }
+ cpu_synth = &per_cpu(synthetic_tsc, smp_processor_id());
+ new_index = 1 - cpu_synth->index; /* 0 <-> 1 */
+ cpu_synth->tsc[new_index].val = value;
+ barrier();
+ cpu_synth->index = new_index; /* atomic change of index */
}
/* Called from buffer switch : in _any_ context (even NMI) */
@@ -187,7 +188,7 @@ static void prepare_synthetic_tsc(int cpu)
cpu_synth->tsc[0].val = local_count;
cpu_synth->index = 0;
smp_wmb(); /* Writing in data of CPU about to come up */
- init_timer(&per_cpu(tsc_timer, cpu));
+ init_timer_deferrable(&per_cpu(tsc_timer, cpu));
per_cpu(tsc_timer, cpu).function = tsc_timer_fct;
per_cpu(tsc_timer, cpu).expires = jiffies + precalc_expire;
}
@@ -245,10 +246,8 @@ static int __cpuinit hotcpu_callback(struct notifier_block *nb,
spin_unlock(&synthetic_tsc_lock);
break;
#ifdef CONFIG_HOTPLUG_CPU
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
- case CPU_DEAD:
- case CPU_DEAD_FROZEN:
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
spin_lock(&synthetic_tsc_lock);
if (synthetic_tsc_refcount)
disable_synthetic_tsc(hotcpu);