aboutsummaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2011-03-16 19:05:58 -0400
committerMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>2011-03-16 19:05:58 -0400
commit710944c4e1f7468dba1a0c4fa36b4cda5846f350 (patch)
tree76200076cc8da29802f888364a812a473ac9406e /arch/x86/include
parentd58cc7d529b3944617b1e4453f747b5cdec011c7 (diff)
downloadlinux-linaro-android-710944c4e1f7468dba1a0c4fa36b4cda5846f350.tar.gz
trace-clock-remove-extra-barriers-on-x86
trace clock remove extra barriers on x86 Given that a tracer cannot realistically provide accuracy better than the inaccuracy between the traced action (e.g. an atomic operation) and the timestamp read, having barriers around the timestamp read is just overkill. This will speed up tracing. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/trace-clock.h12
1 files changed, 4 insertions, 8 deletions
diff --git a/arch/x86/include/asm/trace-clock.h b/arch/x86/include/asm/trace-clock.h
index 01bc2f5221c..5f6a36684c9 100644
--- a/arch/x86/include/asm/trace-clock.h
+++ b/arch/x86/include/asm/trace-clock.h
@@ -30,11 +30,9 @@ static inline u32 trace_clock_read32(void)
{
u32 cycles;
- if (likely(trace_clock_is_sync())) {
- get_cycles_barrier();
+ if (likely(trace_clock_is_sync()))
cycles = (u32)get_cycles(); /* only need the 32 LSB */
- get_cycles_barrier();
- } else
+ else
cycles = (u32)trace_clock_async_tsc_read();
return cycles;
}
@@ -43,11 +41,9 @@ static inline u64 trace_clock_read64(void)
{
u64 cycles;
- if (likely(trace_clock_is_sync())) {
- get_cycles_barrier();
+ if (likely(trace_clock_is_sync()))
cycles = get_cycles();
- get_cycles_barrier();
- } else
+ else
cycles = trace_clock_async_tsc_read();
return cycles;
}