aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Kconfig.debug4
-rw-r--r--arch/arm/include/asm/a.out-core.h6
-rw-r--r--arch/arm/include/asm/system.h2
-rw-r--r--arch/arm/include/asm/thread_info.h3
-rw-r--r--arch/arm/include/asm/trace-clock.h1
-rw-r--r--arch/arm/include/asm/unistd.h2
-rw-r--r--arch/arm/include/asm/user.h2
-rw-r--r--arch/arm/kernel/entry-common.S10
-rw-r--r--arch/arm/kernel/process.c9
-rw-r--r--arch/arm/kernel/ptrace.c34
-rw-r--r--arch/arm/kernel/traps.c8
-rw-r--r--arch/arm/mach-omap2/Makefile1
-rw-r--r--arch/arm/mach-omap2/clkt34xx_dpll3m2.c3
-rw-r--r--arch/arm/mach-omap2/clock34xx.c4
-rw-r--r--arch/arm/mach-omap2/pm34xx.c29
-rw-r--r--arch/arm/mach-omap2/trace-clock.c726
-rw-r--r--arch/arm/plat-omap/Kconfig4
-rw-r--r--arch/arm/plat-omap/counter_32k.c5
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h2
-rw-r--r--arch/arm/plat-omap/include/plat/trace-clock.h172
21 files changed, 1015 insertions, 13 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a53245a9ef5e..d8cdd7b00acd 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -13,6 +13,7 @@ config ARM
select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
+ select HAVE_LTT_DUMP_TABLES
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 494224a9b459..23f8c4b764e2 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -147,4 +147,8 @@ config DEBUG_S3C_UART
The uncompressor code port configuration is now handled
by CONFIG_S3C_LOWLEVEL_UART_PORT.
+config DEBUG_TRACE_CLOCK
+ bool "Debug trace clock"
+ depends on HAVE_TRACE_CLOCK
+
endmenu
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
index 93d04acaa31f..92f10cb5c70c 100644
--- a/arch/arm/include/asm/a.out-core.h
+++ b/arch/arm/include/asm/a.out-core.h
@@ -32,11 +32,7 @@ static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
dump->u_ssize = 0;
- dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
- dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
- dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
- dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
- dump->u_debugreg[4] = tsk->thread.debug.nsaved;
+ memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
if (dump->start_stack < 0x04000000)
dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 9a87823642d0..cf7dc925c635 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -395,7 +395,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
break;
case 2:
do {
- asm volatile("@ __cmpxchg1\n"
+ asm volatile("@ __cmpxchg2\n"
" ldrexh %1, [%2]\n"
" mov %0, #0\n"
" teq %1, %3\n"
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 7b5cc8dae06e..1f925b8bcd5f 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -129,6 +129,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
/*
* thread information flags:
* TIF_SYSCALL_TRACE - syscall trace active
+ * TIF_KERNEL_TRACE - kernel trace active
* TIF_SIGPENDING - signal pending
* TIF_NEED_RESCHED - rescheduling necessary
* TIF_NOTIFY_RESUME - callback before returning to user
@@ -138,6 +139,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define TIF_SIGPENDING 0
#define TIF_NEED_RESCHED 1
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+#define TIF_KERNEL_TRACE 7
#define TIF_SYSCALL_TRACE 8
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
@@ -149,6 +151,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_KERNEL_TRACE (1 << TIF_KERNEL_TRACE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
diff --git a/arch/arm/include/asm/trace-clock.h b/arch/arm/include/asm/trace-clock.h
new file mode 100644
index 000000000000..8a13b7dedde5
--- /dev/null
+++ b/arch/arm/include/asm/trace-clock.h
@@ -0,0 +1 @@
+#include <plat/trace-clock.h>
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h
index c891eb76c0e3..92684d2e9054 100644
--- a/arch/arm/include/asm/unistd.h
+++ b/arch/arm/include/asm/unistd.h
@@ -397,6 +397,8 @@
#define __NR_fanotify_mark (__NR_SYSCALL_BASE+368)
#define __NR_prlimit64 (__NR_SYSCALL_BASE+369)
+#define __NR_syscall_max 370
+
/*
* The following SWIs are ARM private.
*/
diff --git a/arch/arm/include/asm/user.h b/arch/arm/include/asm/user.h
index 05ac4b06876a..35917b3a97f9 100644
--- a/arch/arm/include/asm/user.h
+++ b/arch/arm/include/asm/user.h
@@ -71,7 +71,7 @@ struct user{
/* the registers. */
unsigned long magic; /* To uniquely identify a core file */
char u_comm[32]; /* User command that was responsible */
- int u_debugreg[8];
+ int u_debugreg[8]; /* No longer used */
struct user_fp u_fp; /* FP state */
struct user_fp_struct * u_fp0;/* Used by gdb to help find the values for */
/* the FP registers. */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 1e7b04a40a31..1edf1deadf85 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -43,6 +43,8 @@ ret_fast_syscall:
* Ok, we need to do extra processing, enter the slow path.
*/
fast_work_pending:
+ tst r1, #_TIF_KERNEL_TRACE @ flag can be set asynchronously
+ bne __sys_trace_return
str r0, [sp, #S_R0+S_OFF]! @ returned r0
work_pending:
tst r1, #_TIF_NEED_RESCHED
@@ -85,8 +87,8 @@ ENTRY(ret_from_fork)
get_thread_info tsk
ldr r1, [tsk, #TI_FLAGS] @ check for syscall tracing
mov why, #1
- tst r1, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
- beq ret_slow_syscall
+ tst r1, #_TIF_SYSCALL_TRACE | _TIF_KERNEL_TRACE
+ beq ret_slow_syscall @ are we tracing syscalls?
mov r1, sp
mov r0, #1 @ trace exit [IP = 1]
bl syscall_trace
@@ -441,8 +443,8 @@ ENTRY(vector_swi)
1:
#endif
- tst r10, #_TIF_SYSCALL_TRACE @ are we tracing syscalls?
- bne __sys_trace
+ tst r10, #_TIF_SYSCALL_TRACE | _TIF_KERNEL_TRACE
+ bne __sys_trace @ are we tracing syscalls?
cmp scno, #NR_syscalls @ check upper syscall limit
adr lr, BSYM(ret_fast_syscall) @ return address
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94bbedbed639..fe2277c5d8cd 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -30,6 +30,7 @@
#include <linux/uaccess.h>
#include <linux/random.h>
#include <linux/hw_breakpoint.h>
+#include <trace/sched.h>
#include <asm/cacheflush.h>
#include <asm/leds.h>
@@ -45,6 +46,8 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
+DEFINE_TRACE(sched_kthread_create);
+
static const char *processor_modes[] = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
@@ -442,6 +445,7 @@ asm( ".pushsection .text\n"
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
struct pt_regs regs;
+ long pid;
memset(&regs, 0, sizeof(regs));
@@ -452,7 +456,10 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
regs.ARM_pc = (unsigned long)kernel_thread_helper;
regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
- return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+ pid = do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
+
+ trace_sched_kthread_create(fn, pid);
+ return pid;
}
EXPORT_SYMBOL(kernel_thread);
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 2bf27f364d09..03438e9cc069 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -21,10 +21,15 @@
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
+#include <linux/module.h>
+#include <linux/marker.h>
+#include <linux/kallsyms.h>
+#include <trace/syscall.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/traps.h>
+#include <asm/unistd.h>
#define REG_PC 15
#define REG_PSR 16
@@ -52,6 +57,30 @@
#define BREAKINST_THUMB 0xde01
#endif
+DEFINE_TRACE(syscall_entry);
+DEFINE_TRACE(syscall_exit);
+
+extern unsigned long sys_call_table[];
+
+void ltt_dump_sys_call_table(void *call_data)
+{
+ int i;
+ char namebuf[KSYM_NAME_LEN];
+
+ for (i = 0; i < __NR_syscall_max + 1; i++) {
+ sprint_symbol(namebuf, sys_call_table[i]);
+ __trace_mark(0, syscall_state, sys_call_table, call_data,
+ "id %d address %p symbol %s",
+ i, (void*)sys_call_table[i], namebuf);
+ }
+}
+EXPORT_SYMBOL_GPL(ltt_dump_sys_call_table);
+
+void ltt_dump_idt_table(void *call_data)
+{
+}
+EXPORT_SYMBOL_GPL(ltt_dump_idt_table);
+
struct pt_regs_offset {
const char *name;
int offset;
@@ -788,6 +817,11 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
+ if (!why)
+ trace_syscall_entry(regs, scno);
+ else
+ trace_syscall_exit(regs->ARM_r0);
+
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
if (!(current->ptrace & PT_PTRACED))
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 21ac43f1c2d0..41eb77da882a 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -23,6 +23,7 @@
#include <linux/kexec.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <trace/trap.h>
#include <linux/sched.h>
#include <asm/atomic.h>
@@ -35,6 +36,9 @@
#include "signal.h"
+DEFINE_TRACE(trap_entry);
+DEFINE_TRACE(trap_exit);
+
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page;
@@ -296,7 +300,11 @@ void arm_notify_die(const char *str, struct pt_regs *regs,
current->thread.error_code = err;
current->thread.trap_no = trap;
+ trace_trap_entry(regs, current->thread.trap_no);
+
force_sig_info(info->si_signo, info, current);
+
+ trace_trap_exit();
} else {
die(str, regs, err);
}
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 898fffe0e9cb..1d6f14a584fb 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -180,6 +180,7 @@ obj-$(CONFIG_MACH_OMAP_3430SDP) += board-3430sdp.o \
hsmmc.o \
board-flash.o
obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
+obj-$(CONFIG_HAVE_TRACE_CLOCK) += trace-clock.o
obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o \
sdram-nokia.o \
hsmmc.o
diff --git a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
index b2b1e37bb6bb..b10d9efd6db0 100644
--- a/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
+++ b/arch/arm/mach-omap2/clkt34xx_dpll3m2.c
@@ -24,6 +24,7 @@
#include <plat/clock.h>
#include <plat/sram.h>
#include <plat/sdrc.h>
+#include <asm/trace-clock.h>
#include "clock.h"
#include "clock3xxx.h"
@@ -79,6 +80,8 @@ int omap3_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
unlock_dll = 1;
}
+ cpu_hz = arm_fck_p->rate;
+
/*
* XXX This only needs to be done when the CPU frequency changes
*/
diff --git a/arch/arm/mach-omap2/clock34xx.c b/arch/arm/mach-omap2/clock34xx.c
index 287abc480924..8971015538ab 100644
--- a/arch/arm/mach-omap2/clock34xx.c
+++ b/arch/arm/mach-omap2/clock34xx.c
@@ -18,6 +18,7 @@
#undef DEBUG
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/clk.h>
#include <linux/io.h>
@@ -94,6 +95,9 @@ const struct clkops clkops_omap3430es2_dss_usbhost_wait = {
.find_companion = omap2_clk_dflt_find_companion,
};
+unsigned long long cpu_hz;
+EXPORT_SYMBOL(cpu_hz);
+
/**
* omap3430es2_clk_hsotgusb_find_idlest - return CM_IDLEST info for HSOTGUSB
* @clk: struct clk * being enabled
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 2f864e4b085d..dcb1dd36c24c 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -29,6 +29,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/console.h>
+#include <trace/pm.h>
#include <plat/sram.h>
#include "clockdomain.h"
@@ -41,6 +42,8 @@
#include <asm/tlbflush.h>
+#include <asm/trace-clock.h>
+
#include "cm2xxx_3xxx.h"
#include "cm-regbits-34xx.h"
#include "prm-regbits-34xx.h"
@@ -80,6 +83,11 @@ struct power_state {
struct list_head node;
};
+DEFINE_TRACE(pm_idle_entry);
+DEFINE_TRACE(pm_idle_exit);
+DEFINE_TRACE(pm_suspend_entry);
+DEFINE_TRACE(pm_suspend_exit);
+
static LIST_HEAD(pwrst_list);
static void (*_omap_sram_idle)(u32 *addr, int save_state);
@@ -519,8 +527,23 @@ static void omap3_pm_idle(void)
if (omap_irq_pending() || need_resched())
goto out;
+ trace_pm_idle_entry();
+ save_sync_trace_clock();
+
omap_sram_idle();
+ /*
+ * Resyncing the trace clock should ideally be done much sooner. When
+ * we arrive here, there are already some interrupt handlers which have
+ * run before us, using potentially wrong timestamps. This leads
+ * to problems when restarting the clock (and synchronizing on the 32k
+ * clock) if the cycle counter was still active.
+ * resync_track_clock must ensure that timestamps never ever go
+ * backward.
+ */
+ resync_trace_clock();
+ trace_pm_idle_exit();
+
out:
local_fiq_enable();
local_irq_enable();
@@ -550,7 +573,11 @@ static int omap3_pm_suspend(void)
omap_uart_prepare_suspend();
omap3_intc_suspend();
- omap_sram_idle();
+ trace_pm_suspend_entry();
+ save_sync_trace_clock();
+ omap_sram_idle();
+ resync_trace_clock();
+ trace_pm_suspend_exit();
restore:
/* Restore next_pwrsts */
diff --git a/arch/arm/mach-omap2/trace-clock.c b/arch/arm/mach-omap2/trace-clock.c
new file mode 100644
index 000000000000..3db1cdb8d59d
--- /dev/null
+++ b/arch/arm/mach-omap2/trace-clock.c
@@ -0,0 +1,726 @@
+/*
+ * arch/arm/mach-omap2/trace-clock.c
+ *
+ * Trace clock for ARM OMAP3
+ *
+ * Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> 2009
+ */
+
+#include <linux/module.h>
+#include <linux/clocksource.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+
+#include <plat/clock.h>
+#include <asm/trace-clock.h>
+#include <asm/pmu.h>
+
+/* depends on CONFIG_OMAP_32K_TIMER */
+/* Need direct access to the clock from arch/arm/mach-omap2/timer-gp.c */
+static struct clocksource *clock;
+
+DEFINE_PER_CPU(struct pm_save_count, pm_save_count);
+EXPORT_PER_CPU_SYMBOL_GPL(pm_save_count);
+
+static void clear_ccnt_ms(unsigned long data);
+
+/* According to timer32k.c, this is a 32768Hz clock, not a 32000Hz clock. */
+#define TIMER_32K_FREQ 32768
+#define TIMER_32K_SHIFT 15
+
+/*
+ * Clear ccnt twice per 31-bit overflow, or 4 times per 32-bits period.
+ */
+static u32 clear_ccnt_interval;
+
+static DEFINE_SPINLOCK(trace_clock_lock);
+static int trace_clock_refcount;
+
+static int print_info_done;
+
+static struct platform_device *reserved_pmu;
+
+static u32 get_mul_fact(u64 max_freq, u64 cur_freq)
+{
+ u64 rem;
+
+ BUG_ON(cur_freq == 0);
+ return __iter_div_u64_rem(max_freq << 10, cur_freq, &rem);
+}
+
+/*
+ * Cycle counter management.
+ */
+
+static inline void write_pmnc(u32 val)
+{
+ __asm__ __volatile__ ("mcr p15, 0, %0, c9, c12, 0" : : "r" (val));
+}
+
+static inline u32 read_pmnc(void)
+{
+ u32 val;
+ __asm__ __volatile__ ("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
+ return val;
+}
+
+static inline void write_ctens(u32 val)
+{
+ __asm__ __volatile__ ("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
+}
+
+static inline u32 read_ctens(void)
+{
+ u32 val;
+ __asm__ __volatile__ ("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
+ return val;
+}
+
+static inline void write_intenc(u32 val)
+{
+ __asm__ __volatile__ ("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
+}
+
+static inline u32 read_intenc(void)
+{
+ u32 val;
+ __asm__ __volatile__ ("mrc p15, 0, %0, c9, c14, 2" : "=r" (val));
+ return val;
+}
+
+static inline void write_useren(u32 val)
+{
+ __asm__ __volatile__ ("mcr p15, 0, %0, c9, c14, 0" : : "r" (val));
+}
+
+static inline u32 read_useren(void)
+{
+ u32 val;
+ __asm__ __volatile__ ("mrc p15, 0, %0, c9, c14, 0" : "=r" (val));
+ return val;
+}
+
+/*
+ * Must disable counter before writing to it.
+ */
+static inline void write_ccnt(u32 val)
+{
+ __asm__ __volatile__ ("mcr p15, 0, %0, c9, c13, 0" : : "r" (val));
+}
+
+/*
+ * Periodical timer handler, clears ccnt most significant bit each half-period
+ * of 31-bit overflow. Makes sure the ccnt never overflows.
+ */
+static void clear_ccnt_ms(unsigned long data)
+{
+ struct pm_save_count *pm_count;
+ unsigned int cycles;
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ pm_count = &per_cpu(pm_save_count, cpu);
+
+ local_irq_save(flags);
+
+ if (!pm_count->fast_clock_ready)
+ goto end;
+
+ isb(); /* clear the pipeline so we can execute ASAP */
+ write_ctens(read_ctens() & ~(1 << 31)); /* disable counter */
+ cycles = read_ccnt();
+ write_ccnt(cycles & ~(1 << 31));
+ isb();
+ write_ctens(read_ctens() | (1 << 31)); /* enable counter */
+ isb();
+end:
+ local_irq_restore(flags);
+
+ mod_timer_pinned(&pm_count->clear_ccnt_ms_timer,
+ jiffies + clear_ccnt_interval);
+}
+
+/*
+ * disabling interrupts to protect against concurrent IPI save/resync.
+ */
+void save_sync_trace_clock(void)
+{
+ struct pm_save_count *pm_count;
+ unsigned long flags;
+ int cpu;
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ pm_count = &per_cpu(pm_save_count, cpu);
+ raw_spin_lock(&pm_count->lock);
+
+ if (!pm_count->refcount)
+ goto end;
+
+ pm_count->ext_32k = clock->read(clock);
+ pm_count->int_fast_clock = trace_clock_read64();
+end:
+ raw_spin_unlock(&pm_count->lock);
+
+ /*
+ * Only enable slow read after saving the clock values.
+ */
+ barrier();
+ pm_count->fast_clock_ready = 0;
+
+ /*
+ * Disable counter to ensure there is no overflow while we are
+ * keeping track of time with ext. clock.
+ */
+ write_ctens(read_ctens() & ~(1 << 31)); /* disable counter */
+ local_irq_restore(flags);
+}
+
+/*
+ * Called with preemption disabled. Read the external clock source directly
+ * and return corresponding time in fast clock source time frame.
+ * Called after time is saved and before it is resynced.
+ * Also used to periodically resync the drifting dvfs clock on external clock.
+ */
+u64 _trace_clock_read_slow(void)
+{
+ struct pm_save_count *pm_count;
+ u64 ref_time;
+ unsigned int count_32k;
+ int cpu;
+
+ cpu = smp_processor_id();
+ pm_count = &per_cpu(pm_save_count, cpu);
+ WARN_ON_ONCE(!pm_count->refcount);
+
+ /*
+ * Set the timer's value MSBs to the same as current 32K timer.
+ */
+ ref_time = pm_count->int_fast_clock;
+ if (!pm_count->init_clock)
+ count_32k = clock->read(clock);
+ else
+ count_32k = pm_count->init_clock;
+
+ /*
+ * Delta done on 32-bits, then casted to u64. Must guarantee
+ * that we are called often enough so the difference does not
+ * overflow 32 bits anyway.
+ */
+ ref_time += (u64)(count_32k - pm_count->ext_32k)
+ * (cpu_hz >> TIMER_32K_SHIFT);
+ return ref_time;
+}
+EXPORT_SYMBOL_GPL(_trace_clock_read_slow);
+
+/*
+ * resynchronize the per-cpu fast clock with the last save_sync values and the
+ * external clock. Called from PM (thread) context and IPI context.
+ */
+void resync_trace_clock(void)
+{
+ struct pm_save_count *pm_count;
+ struct tc_cur_freq *new_cf, *cf;
+ unsigned int new_index, index;
+ u64 ref_time;
+ unsigned long flags;
+ u32 regval;
+ int cpu;
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ pm_count = &per_cpu(pm_save_count, cpu);
+ raw_spin_lock(&pm_count->lock);
+
+ if (!pm_count->refcount)
+ goto end;
+
+ /* Let userspace access performance counter registers */
+ regval = read_useren();
+ regval |= (1 << 0); /* User mode enable */
+ write_useren(regval);
+
+ regval = read_intenc();
+ regval |= (1 << 31); /* CCNT overflow interrupt disable */
+ write_intenc(regval);
+
+ regval = read_pmnc();
+ regval |= (1 << 0); /* Enable all counters */
+ regval &= ~(1 << 3); /* count every cycles */
+ regval &= ~(1 << 5); /* Enable even in non-invasive debug prohib. */
+ write_pmnc(regval);
+
+ ref_time = _trace_clock_read_slow();
+
+ if (pm_count->init_clock)
+ pm_count->init_clock = 0;
+
+ write_ctens(read_ctens() & ~(1 << 31)); /* disable counter */
+ write_ccnt((u32)ref_time & ~(1 << 31));
+ write_ctens(read_ctens() | (1 << 31)); /* enable counter */
+
+ _trace_clock_write_synthetic_tsc(ref_time);
+
+ index = pm_count->index;
+ new_index = 1 - index;
+ cf = &pm_count->cf[index];
+ new_cf = &pm_count->cf[new_index];
+ new_cf->hw_base = ref_time;
+ new_cf->virt_base = ref_time;
+ new_cf->cur_cpu_freq = cpufreq_quick_get(cpu);
+ if (new_cf->cur_cpu_freq == 0)
+ new_cf->cur_cpu_freq = pm_count->max_cpu_freq;
+ new_cf->mul_fact = get_mul_fact(pm_count->max_cpu_freq,
+ new_cf->cur_cpu_freq);
+ new_cf->floor = max(ref_time, cf->floor);
+ barrier();
+ pm_count->index = new_index;
+ barrier(); /* make clock ready before enabling */
+ pm_count->fast_clock_ready = 1;
+
+ /* Delete resync timer if present. Just done its job anyway. */
+ if (pm_count->dvfs_count)
+ del_timer(&pm_count->clock_resync_timer);
+ pm_count->dvfs_count = 0;
+
+ if (unlikely(!print_info_done)) {
+ printk(KERN_INFO "Trace clock using cycle counter at %llu HZ\n"
+ "saved 32k clk value 0x%08X, "
+ "saved cycle counter value 0x%016llX\n"
+ "synthetic value (write, read) 0x%016llX, 0x%016llX\n",
+ cpu_hz,
+ pm_count->ext_32k,
+ pm_count->int_fast_clock,
+ ref_time, trace_clock_read64());
+ printk(KERN_INFO "Reference clock used : %s\n", clock->name);
+ print_info_done = 1;
+ }
+end:
+ raw_spin_unlock(&pm_count->lock);
+ local_irq_restore(flags);
+}
+
+/*
+ * Called with IRQ and FIQ off.
+ */
+static void resync_on_32k(struct pm_save_count *pm_count, int cpu,
+ unsigned int cached_freq, int new_freq)
+{
+ struct tc_cur_freq *new_cf, *cf;
+ u64 ref_time;
+ unsigned int new_index, index;
+
+ index = pm_count->index;
+
+ new_index = 1 - index;
+ cf = &pm_count->cf[index];
+ new_cf = &pm_count->cf[new_index];
+ ref_time = _trace_clock_read_slow();
+ new_cf->hw_base = trace_clock_read_synthetic_tsc();
+ new_cf->virt_base = ref_time;
+ if (cached_freq)
+ new_cf->cur_cpu_freq = cf->cur_cpu_freq;
+ else {
+ new_cf->cur_cpu_freq = new_freq;
+ if (new_cf->cur_cpu_freq == 0)
+ new_cf->cur_cpu_freq = pm_count->max_cpu_freq;
+ }
+ new_cf->mul_fact = get_mul_fact(pm_count->max_cpu_freq,
+ new_cf->cur_cpu_freq);
+ new_cf->floor = max((((new_cf->hw_base - cf->hw_base)
+ * cf->mul_fact) >> 10) + cf->virt_base,
+ cf->floor);
+ barrier();
+ pm_count->index = new_index;
+}
+
+/*
+ * Timer to resynchronize with ext. 32k clock after DVFS update (but not too
+ * often if flooded by DVFS updates).
+ * Necessary to deal with drift caused by DVFS updates.
+ * Per-cpu timer added by cpu freq events, single-shot.
+ */
+static void clock_resync_timer_fct(unsigned long data)
+{
+ struct pm_save_count *pm_count;
+ unsigned long flags;
+ int cpu;
+
+ cpu = smp_processor_id();
+ pm_count = &per_cpu(pm_save_count, cpu);
+
+ local_irq_save(flags);
+ local_fiq_disable(); /* disable fiqs for floor value */
+
+ /* Need to resync if we had more than 1 dvfs event in period */
+ if (pm_count->dvfs_count > 1)
+ resync_on_32k(pm_count, cpu, 1, 0);
+ pm_count->dvfs_count = 0;
+
+ local_fiq_enable();
+ local_irq_restore(flags);
+}
+
+static void prepare_timer(int cpu)
+{
+ struct pm_save_count *pm_count;
+
+ pm_count = &per_cpu(pm_save_count, cpu);
+ init_timer_deferrable(&pm_count->clear_ccnt_ms_timer);
+ pm_count->clear_ccnt_ms_timer.function = clear_ccnt_ms;
+ pm_count->clear_ccnt_ms_timer.expires = jiffies + clear_ccnt_interval;
+
+ init_timer_deferrable(&pm_count->clock_resync_timer);
+ pm_count->clock_resync_timer.function = clock_resync_timer_fct;
+}
+
+static void enable_timer(int cpu)
+{
+ struct pm_save_count *pm_count;
+
+ pm_count = &per_cpu(pm_save_count, cpu);
+ add_timer_on(&pm_count->clear_ccnt_ms_timer, cpu);
+}
+
+static void disable_timer_ipi(void *info)
+{
+ save_sync_trace_clock();
+}
+
+static void disable_timer(int cpu)
+{
+ struct pm_save_count *pm_count;
+
+ pm_count = &per_cpu(pm_save_count, cpu);
+ del_timer_sync(&pm_count->clear_ccnt_ms_timer);
+ if (pm_count->dvfs_count)
+ del_timer_sync(&pm_count->clock_resync_timer);
+ smp_call_function_single(cpu, disable_timer_ipi, NULL, 1);
+}
+
+static void resync_ipi(void *info)
+{
+ resync_trace_clock();
+}
+
+void _start_trace_clock(void)
+{
+ struct pm_save_count *pm_count;
+ u32 ext_32k;
+ u64 old_fast_clock;
+ int cpu;
+
+ ext_32k = clock->read(clock);
+ old_fast_clock = per_cpu(pm_save_count, 0).int_fast_clock;
+
+ for_each_online_cpu(cpu) {
+ pm_count = &per_cpu(pm_save_count, cpu);
+ pm_count->ext_32k = ext_32k;
+ pm_count->int_fast_clock = old_fast_clock;
+ pm_count->refcount = 1;
+ pm_count->init_clock = ext_32k;
+ pm_count->dvfs_count = 0;
+ }
+
+ on_each_cpu(resync_ipi, NULL, 1);
+
+ get_synthetic_tsc();
+
+ for_each_online_cpu(cpu) {
+ prepare_timer(cpu);
+ enable_timer(cpu);
+ }
+}
+
+void _stop_trace_clock(void)
+{
+ struct pm_save_count *pm_count;
+ int cpu;
+
+ per_cpu(pm_save_count, 0).int_fast_clock = trace_clock_read64();
+
+ for_each_online_cpu(cpu) {
+ pm_count = &per_cpu(pm_save_count, cpu);
+ disable_timer(cpu);
+ pm_count->refcount = 0;
+ }
+ put_synthetic_tsc();
+}
+
+void start_trace_clock(void)
+{
+ spin_lock(&trace_clock_lock);
+ if (!trace_clock_refcount)
+ goto end;
+ _start_trace_clock();
+end:
+ spin_unlock(&trace_clock_lock);
+}
+
+void stop_trace_clock(void)
+{
+ spin_lock(&trace_clock_lock);
+ if (!trace_clock_refcount)
+ goto end;
+ _stop_trace_clock();
+end:
+ spin_unlock(&trace_clock_lock);
+}
+
+/*
+ * hotcpu_callback - CPU hotplug callback
+ * @nb: notifier block
+ * @action: hotplug action to take
+ * @hcpu: CPU number
+ *
+ * Start/stop timers for trace clock upon cpu hotplug.
+ * Also resync the clock.
+ *
+ * Returns the success/failure of the operation. (NOTIFY_OK, NOTIFY_BAD)
+ */
+static int __cpuinit hotcpu_callback(struct notifier_block *nb,
+ unsigned long action,
+ void *hcpu)
+{
+ struct pm_save_count *pm_count;
+ unsigned int hotcpu = (unsigned long)hcpu;
+ unsigned long flags;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ spin_lock(&trace_clock_lock);
+ if (trace_clock_refcount) {
+ pm_count = &per_cpu(pm_save_count, hotcpu);
+ local_irq_save(flags);
+ pm_count->ext_32k = clock->read(clock);
+ pm_count->int_fast_clock = trace_clock_read64();
+ local_irq_restore(flags);
+ pm_count->refcount = 1;
+ pm_count->dvfs_count = 0;
+ prepare_timer(hotcpu);
+ }
+ spin_unlock(&trace_clock_lock);
+ break;
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ spin_lock(&trace_clock_lock);
+ if (trace_clock_refcount) {
+ resync_trace_clock();
+ enable_timer(hotcpu);
+ }
+ spin_unlock(&trace_clock_lock);
+ break;
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ spin_lock(&trace_clock_lock);
+ if (trace_clock_refcount)
+ disable_timer(hotcpu);
+ spin_unlock(&trace_clock_lock);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ spin_lock(&trace_clock_lock);
+ if (trace_clock_refcount) {
+ pm_count = &per_cpu(pm_save_count, hotcpu);
+ pm_count->refcount = 0;
+ }
+ spin_unlock(&trace_clock_lock);
+ break;
+#endif /* CONFIG_HOTPLUG_CPU */
+ }
+ return NOTIFY_OK;
+}
+
+int get_trace_clock(void)
+{
+ int ret = 0;
+
+ spin_lock(&trace_clock_lock);
+ if (trace_clock_refcount)
+ goto end;
+ reserved_pmu = reserve_pmu(ARM_PMU_DEVICE_CPU);
+ if (IS_ERR_OR_NULL(reserved_pmu) && PTR_ERR(reserved_pmu) != -ENODEV) {
+ ret = -EBUSY;
+ goto end;
+ }
+ trace_clock_refcount++;
+ _start_trace_clock();
+end:
+ spin_unlock(&trace_clock_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(get_trace_clock);
+
+void put_trace_clock(void)
+{
+ spin_lock(&trace_clock_lock);
+ WARN_ON(trace_clock_refcount <= 0);
+ if (trace_clock_refcount != 1)
+ goto end;
+ _stop_trace_clock();
+ release_pmu(reserved_pmu);
+end:
+ trace_clock_refcount--;
+ spin_unlock(&trace_clock_lock);
+}
+EXPORT_SYMBOL_GPL(put_trace_clock);
+
+/*
+ * We do not use prechange hook to sample 2 clock values and average because
+ * locking wrt other timers can be difficult to get right.
+ * A bit more imprecision just increases the drift. We have a periodic timer
+ * in place to resynchronize periodically on the 32k clock anyway.
+ */
+static int cpufreq_trace_clock(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct pm_save_count *pm_count;
+ struct tc_cur_freq *new_cf, *cf;
+ unsigned long flags;
+ unsigned int new_index, index;
+ u64 post_val;
+ int cpu;
+
+#if 0 /* debug trace_mark */
+ trace_mark(test, freq_change,
+ "%s cpu %u oldfreq %u newfreq %u const %u",
+ (val != CPUFREQ_POSTCHANGE) ? "prechange" : "postchange",
+ freq->cpu, freq->old, freq->new,
+ (freq->flags & CPUFREQ_CONST_LOOPS) ? 1 : 0);
+#endif
+
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return 0;
+
+ if (val != CPUFREQ_POSTCHANGE)
+ return 0;
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ WARN_ON_ONCE(cpu != freq->cpu);
+ pm_count = &per_cpu(pm_save_count, cpu);
+ raw_spin_lock(&pm_count->lock);
+
+ if (!pm_count->refcount)
+ goto end;
+
+ /*
+ * Disable FIQs to ensure the floor value is indeed the
+ * floor.
+ */
+ local_fiq_disable();
+
+ if (!pm_count->dvfs_count) {
+ resync_on_32k(pm_count, cpu, 0, freq->new);
+ pm_count->clock_resync_timer.expires = jiffies
+ + (TC_RESYNC_PERIOD * HZ / 1000);
+ add_timer_on(&pm_count->clock_resync_timer, cpu);
+ } else {
+ post_val = trace_clock_read_synthetic_tsc();
+ /* disable irqs to ensure we are the only value modifier */
+ index = pm_count->index;
+ new_index = 1 - index;
+ cf = &pm_count->cf[index];
+ new_cf = &pm_count->cf[new_index];
+ new_cf->hw_base = post_val;
+ new_cf->virt_base = (((post_val - cf->hw_base)
+ * cf->mul_fact) >> 10) + cf->virt_base;
+ new_cf->cur_cpu_freq = freq->new;
+ new_cf->mul_fact = get_mul_fact(pm_count->max_cpu_freq,
+ freq->new);
+ new_cf->floor = max((((post_val - cf->hw_base)
+ * cf->mul_fact) >> 10) + cf->virt_base,
+ cf->floor);
+ barrier();
+ pm_count->index = new_index;
+ }
+
+ local_fiq_enable();
+ pm_count->dvfs_count++;
+end:
+ raw_spin_unlock(&pm_count->lock);
+ local_irq_restore(flags);
+ return 0;
+}
+
+static struct notifier_block cpufreq_trace_clock_nb = {
+ .notifier_call = cpufreq_trace_clock,
+};
+
+#ifdef CONFIG_DEBUG_TRACE_CLOCK
+/*
+ * Clock expected to never overflow and never go backward.
+ */
+static DEFINE_PER_CPU(u64, last_clock_value);
+static DEFINE_PER_CPU(u32, last_ccnt_value);
+DEFINE_PER_CPU(unsigned int, last_clock_nest);
+EXPORT_PER_CPU_SYMBOL_GPL(last_clock_nest);
+
+static int tc_print_done;
+
+/*
+ * Called with interrupts disabled.
+ */
+void trace_clock_debug(u64 value)
+{
+ int cpu;
+
+ cpu = smp_processor_id();
+ if (unlikely(per_cpu(last_clock_nest, cpu) != 1))
+ return; /* fiq nesting, don't perform racy check */
+ if (unlikely(!tc_print_done
+ && (per_cpu(last_clock_value, cpu) > value))) {
+ printk(KERN_WARNING "Trace clock going back last %llu new %llu "
+ "diff %llu last_ccnt %u ccnt %u\n",
+ (unsigned long long) per_cpu(last_clock_value, cpu),
+ (unsigned long long) value,
+ (unsigned long long) per_cpu(last_clock_value, cpu)
+ - value,
+ per_cpu(last_ccnt_value, cpu),
+ trace_clock_read32());
+ tc_print_done = 1;
+ }
+ per_cpu(last_clock_value, cpu) = value;
+ per_cpu(last_ccnt_value, cpu) = trace_clock_read32();;
+}
+EXPORT_SYMBOL_GPL(trace_clock_debug);
+#endif
+
+static __init int init_trace_clock(void)
+{
+ int cpu, ret;
+ u64 rem;
+
+ ret = init_pmu(ARM_PMU_DEVICE_CPU);
+ if (ret && ret != -ENODEV)
+ return ret;
+ clock = get_clocksource_32k();
+ /*
+ * clear_ccnt_interval based on the cpu fastest frequency. Never
+ * recomputed.
+ */
+ clear_ccnt_interval = __iter_div_u64_rem(HZ * (1ULL << 30), cpu_hz,
+ &rem);
+ printk(KERN_INFO "LTTng will clear ccnt top bit every %u jiffies.\n",
+ clear_ccnt_interval);
+ for_each_possible_cpu(cpu) {
+ per_cpu(pm_save_count, cpu).max_cpu_freq =
+ __iter_div_u64_rem(cpu_hz, 1000, &rem);
+ per_cpu(pm_save_count, cpu).lock =
+ __RAW_SPIN_LOCK_UNLOCKED(per_cpu(pm_save_count,
+ cpu).lock);
+ }
+ hotcpu_notifier(hotcpu_callback, 4);
+ cpufreq_register_notifier(&cpufreq_trace_clock_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ return 0;
+}
+__initcall(init_trace_clock);
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig
index b6333ae3f92a..99593d71a855 100644
--- a/arch/arm/plat-omap/Kconfig
+++ b/arch/arm/plat-omap/Kconfig
@@ -17,6 +17,10 @@ config ARCH_OMAP1
config ARCH_OMAP2PLUS
bool "TI OMAP2/3/4"
+ select COMMON_CLKDEV
+ select HAVE_TRACE_CLOCK
+ select HAVE_TRACE_CLOCK_32_TO_64
+ select OMAP_32K_TIMER
select CLKDEV_LOOKUP
select OMAP_DM_TIMER
help
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 862dda95d61d..8627a5166886 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -107,6 +107,11 @@ static struct clocksource clocksource_32k = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
+struct clocksource *get_clocksource_32k(void)
+{
+ return &clocksource_32k;
+}
+
/*
* Returns current time from boot in nsecs. It's OK for this to wrap
* around for now, as it's just a relative time stamp.
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 8eb0adab19ea..2ad01f37a3c0 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -297,4 +297,6 @@ extern const struct clkops clkops_null;
extern struct clk dummy_ck;
+struct clocksource *get_clocksource_32k(void);
+
#endif
diff --git a/arch/arm/plat-omap/include/plat/trace-clock.h b/arch/arm/plat-omap/include/plat/trace-clock.h
new file mode 100644
index 000000000000..7fcdbf98063c
--- /dev/null
+++ b/arch/arm/plat-omap/include/plat/trace-clock.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2009 Mathieu Desnoyers
+ *
+ * Trace clock ARM OMAP3 definitions.
+ */
+
+#ifndef _ASM_ARM_TRACE_CLOCK_OMAP3_H
+#define _ASM_ARM_TRACE_CLOCK_OMAP3_H
+
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/percpu.h>
+#include <plat/clock.h>
+
+/*
+ * Number of hardware clock bits. The higher order bits are expected to be 0.
+ * If the hardware clock source has more than 32 bits, the bits higher than the
+ * 32nd will be truncated by a cast to a 32 bits unsigned. Range : 1 - 32.
+ * (too few bits would be unrealistic though, since we depend on the timer to
+ * detect the overflows).
+ * OMAP3-specific : we clear bit 31 periodically so it never overflows. There
+ * is a hardware bug with CP14 and CP15 being executed at the same time a ccnt
+ * overflow occurs.
+ *
+ * Siarhei Siamashka <siarhei.siamashka@nokia.com> :
+ * Performance monitoring unit breaks if somebody is accessing CP14/CP15
+ * coprocessor register exactly at the same time as CCNT overflows (regardless
+ * of the fact if generation of interrupts is enabled or not). A workaround
+ * suggested by ARM was to never allow it to overflow and reset it
+ * periodically.
+ */
+#define TC_HW_BITS 31
+
+/* Expected maximum interrupt latency in ms : 15ms, *2 for security */
+#define TC_EXPECTED_INTERRUPT_LATENCY 30
+
+/* Resync with 32k clock each 100ms */
+#define TC_RESYNC_PERIOD 100
+
+struct tc_cur_freq {
+ u64 cur_cpu_freq; /* in khz */
+ /* cur time : (now - base) * (max_freq / cur_freq) + base */
+ u32 mul_fact; /* (max_cpu_freq << 10) / cur_freq */
+ u64 hw_base; /* stamp of last cpufreq change, hw cycles */
+ u64 virt_base; /* same as above, virtual trace clock cycles */
+ u64 floor; /* floor value, so time never go back */
+};
+
+/* 32KHz counter per-cpu count save upon PM sleep and cpufreq management */
+struct pm_save_count {
+ struct tc_cur_freq cf[2]; /* rcu-protected */
+ unsigned int index; /* tc_cur_freq current read index */
+ /*
+ * Is fast clock ready to be read ? Read with preemption off. Modified
+ * only by local CPU in thread and interrupt context or by start/stop
+ * when time is not read concurrently.
+ */
+ int fast_clock_ready;
+
+ u64 int_fast_clock;
+ struct timer_list clear_ccnt_ms_timer;
+ struct timer_list clock_resync_timer;
+ u32 ext_32k;
+ int refcount;
+ u32 init_clock;
+ raw_spinlock_t lock; /* spinlock only sync the refcount */
+ unsigned int dvfs_count; /* Number of DVFS updates in period */
+ /* cpufreq management */
+ u64 max_cpu_freq; /* in khz */
+};
+
+DECLARE_PER_CPU(struct pm_save_count, pm_save_count);
+
+extern u64 trace_clock_read_synthetic_tsc(void);
+extern void _trace_clock_write_synthetic_tsc(u64 value);
+extern unsigned long long cpu_hz;
+
+DECLARE_PER_CPU(int, fast_clock_ready);
+extern u64 _trace_clock_read_slow(void);
+
+/*
+ * ARM OMAP3 timers only return 32-bits values. We ened to extend it to a
+ * 64-bit value, which is provided by trace-clock-32-to-64.
+ */
+extern u64 trace_clock_async_tsc_read(void);
+/*
+ * Update done by the architecture upon wakeup.
+ */
+extern void _trace_clock_write_synthetic_tsc(u64 value);
+
+#ifdef CONFIG_DEBUG_TRACE_CLOCK
+DECLARE_PER_CPU(unsigned int, last_clock_nest);
+extern void trace_clock_debug(u64 value);
+#else
+static inline void trace_clock_debug(u64 value)
+{
+}
+#endif
+
+static inline u32 read_ccnt(void)
+{
+ u32 val;
+ __asm__ __volatile__ ("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
+ return val & ~(1 << TC_HW_BITS);
+}
+
+static inline u32 trace_clock_read32(void)
+{
+ u32 val;
+
+ isb();
+ val = read_ccnt();
+ isb();
+ return val;
+}
+
+static inline u64 trace_clock_read64(void)
+{
+ struct pm_save_count *pm_count;
+ struct tc_cur_freq *cf;
+ u64 val;
+#ifdef CONFIG_DEBUG_TRACE_CLOCK
+ unsigned long flags;
+
+ local_irq_save(flags);
+ per_cpu(last_clock_nest, smp_processor_id())++;
+ barrier();
+#endif
+
+ preempt_disable();
+ pm_count = &per_cpu(pm_save_count, smp_processor_id());
+ if (likely(pm_count->fast_clock_ready)) {
+ cf = &pm_count->cf[ACCESS_ONCE(pm_count->index)];
+ val = max((((trace_clock_read_synthetic_tsc() - cf->hw_base)
+ * cf->mul_fact) >> 10) + cf->virt_base, cf->floor);
+ } else
+ val = _trace_clock_read_slow();
+ trace_clock_debug(val);
+ preempt_enable();
+
+#ifdef CONFIG_DEBUG_TRACE_CLOCK
+ barrier();
+ per_cpu(last_clock_nest, smp_processor_id())--;
+ local_irq_restore(flags);
+#endif
+ return val;
+}
+
+static inline u64 trace_clock_frequency(void)
+{
+ return cpu_hz;
+}
+
+static inline u32 trace_clock_freq_scale(void)
+{
+ return 1;
+}
+
+extern int get_trace_clock(void);
+extern void put_trace_clock(void);
+extern void get_synthetic_tsc(void);
+extern void put_synthetic_tsc(void);
+
+extern void resync_trace_clock(void);
+extern void save_sync_trace_clock(void);
+extern void start_trace_clock(void);
+extern void stop_trace_clock(void);
+
+static inline void set_trace_clock_is_sync(int state)
+{
+}
+#endif /* _ASM_MIPS_TRACE_CLOCK_OMAP3_H */