diff options
author | Avik Sil <avik.sil@linaro.org> | 2011-03-31 11:06:38 +0000 |
---|---|---|
committer | Avik Sil <avik.sil@linaro.org> | 2011-03-31 11:06:38 +0000 |
commit | ebb688e3183bd5891312bdb8f4e2f520d70b36b6 (patch) | |
tree | c30d1abefaccc8cd1baa4944aae3348668e13bde /arch/arm/plat-omap | |
parent | 8061f3a885ec3538bf405ff3957c205b1ab2aae4 (diff) | |
parent | b2afcd30fff4c24290a63a2497de301864d9726d (diff) |
Merge remote branch 'lttng/2.6.38-lttng-0.247'
Conflicts:
arch/arm/kernel/traps.c
arch/arm/mach-omap2/clock34xx.c
arch/arm/mach-omap2/pm34xx.c
Diffstat (limited to 'arch/arm/plat-omap')
-rw-r--r-- | arch/arm/plat-omap/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm/plat-omap/counter_32k.c | 5 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/clock.h | 2 | ||||
-rw-r--r-- | arch/arm/plat-omap/include/plat/trace-clock.h | 172 |
4 files changed, 182 insertions, 0 deletions
diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index b6333ae3f92..283f4552d1d 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -19,6 +19,9 @@ config ARCH_OMAP2PLUS bool "TI OMAP2/3/4" select CLKDEV_LOOKUP select OMAP_DM_TIMER + select HAVE_TRACE_CLOCK + select HAVE_TRACE_CLOCK_32_TO_64 + select OMAP_32K_TIMER help "Systems based on OMAP2, OMAP3 or OMAP4" diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c index f7fed608019..6a489caa43a 100644 --- a/arch/arm/plat-omap/counter_32k.c +++ b/arch/arm/plat-omap/counter_32k.c @@ -107,6 +107,11 @@ static struct clocksource clocksource_32k = { .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; +struct clocksource *get_clocksource_32k(void) +{ + return &clocksource_32k; +} + /* * Returns current time from boot in nsecs. It's OK for this to wrap * around for now, as it's just a relative time stamp. diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h index 006e599c661..2a27015b0aa 100644 --- a/arch/arm/plat-omap/include/plat/clock.h +++ b/arch/arm/plat-omap/include/plat/clock.h @@ -311,4 +311,6 @@ extern const struct clkops clkops_null; extern struct clk dummy_ck; +struct clocksource *get_clocksource_32k(void); + #endif diff --git a/arch/arm/plat-omap/include/plat/trace-clock.h b/arch/arm/plat-omap/include/plat/trace-clock.h new file mode 100644 index 00000000000..7fcdbf98063 --- /dev/null +++ b/arch/arm/plat-omap/include/plat/trace-clock.h @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2009 Mathieu Desnoyers + * + * Trace clock ARM OMAP3 definitions. + */ + +#ifndef _ASM_ARM_TRACE_CLOCK_OMAP3_H +#define _ASM_ARM_TRACE_CLOCK_OMAP3_H + +#include <linux/clk.h> +#include <linux/timer.h> +#include <linux/percpu.h> +#include <plat/clock.h> + +/* + * Number of hardware clock bits. The higher order bits are expected to be 0. + * If the hardware clock source has more than 32 bits, the bits higher than the + * 32nd will be truncated by a cast to a 32 bits unsigned. Range : 1 - 32. + * (too few bits would be unrealistic though, since we depend on the timer to + * detect the overflows). + * OMAP3-specific : we clear bit 31 periodically so it never overflows. There + * is a hardware bug with CP14 and CP15 being executed at the same time a ccnt + * overflow occurs. + * + * Siarhei Siamashka <siarhei.siamashka@nokia.com> : + * Performance monitoring unit breaks if somebody is accessing CP14/CP15 + * coprocessor register exactly at the same time as CCNT overflows (regardless + * of the fact if generation of interrupts is enabled or not). A workaround + * suggested by ARM was to never allow it to overflow and reset it + * periodically. + */ +#define TC_HW_BITS 31 + +/* Expected maximum interrupt latency in ms : 15ms, *2 for security */ +#define TC_EXPECTED_INTERRUPT_LATENCY 30 + +/* Resync with 32k clock each 100ms */ +#define TC_RESYNC_PERIOD 100 + +struct tc_cur_freq { + u64 cur_cpu_freq; /* in khz */ + /* cur time : (now - base) * (max_freq / cur_freq) + base */ + u32 mul_fact; /* (max_cpu_freq << 10) / cur_freq */ + u64 hw_base; /* stamp of last cpufreq change, hw cycles */ + u64 virt_base; /* same as above, virtual trace clock cycles */ + u64 floor; /* floor value, so time never go back */ +}; + +/* 32KHz counter per-cpu count save upon PM sleep and cpufreq management */ +struct pm_save_count { + struct tc_cur_freq cf[2]; /* rcu-protected */ + unsigned int index; /* tc_cur_freq current read index */ + /* + * Is fast clock ready to be read ? Read with preemption off. Modified + * only by local CPU in thread and interrupt context or by start/stop + * when time is not read concurrently. + */ + int fast_clock_ready; + + u64 int_fast_clock; + struct timer_list clear_ccnt_ms_timer; + struct timer_list clock_resync_timer; + u32 ext_32k; + int refcount; + u32 init_clock; + raw_spinlock_t lock; /* spinlock only sync the refcount */ + unsigned int dvfs_count; /* Number of DVFS updates in period */ + /* cpufreq management */ + u64 max_cpu_freq; /* in khz */ +}; + +DECLARE_PER_CPU(struct pm_save_count, pm_save_count); + +extern u64 trace_clock_read_synthetic_tsc(void); +extern void _trace_clock_write_synthetic_tsc(u64 value); +extern unsigned long long cpu_hz; + +DECLARE_PER_CPU(int, fast_clock_ready); +extern u64 _trace_clock_read_slow(void); + +/* + * ARM OMAP3 timers only return 32-bits values. We ened to extend it to a + * 64-bit value, which is provided by trace-clock-32-to-64. + */ +extern u64 trace_clock_async_tsc_read(void); +/* + * Update done by the architecture upon wakeup. + */ +extern void _trace_clock_write_synthetic_tsc(u64 value); + +#ifdef CONFIG_DEBUG_TRACE_CLOCK +DECLARE_PER_CPU(unsigned int, last_clock_nest); +extern void trace_clock_debug(u64 value); +#else +static inline void trace_clock_debug(u64 value) +{ +} +#endif + +static inline u32 read_ccnt(void) +{ + u32 val; + __asm__ __volatile__ ("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); + return val & ~(1 << TC_HW_BITS); +} + +static inline u32 trace_clock_read32(void) +{ + u32 val; + + isb(); + val = read_ccnt(); + isb(); + return val; +} + +static inline u64 trace_clock_read64(void) +{ + struct pm_save_count *pm_count; + struct tc_cur_freq *cf; + u64 val; +#ifdef CONFIG_DEBUG_TRACE_CLOCK + unsigned long flags; + + local_irq_save(flags); + per_cpu(last_clock_nest, smp_processor_id())++; + barrier(); +#endif + + preempt_disable(); + pm_count = &per_cpu(pm_save_count, smp_processor_id()); + if (likely(pm_count->fast_clock_ready)) { + cf = &pm_count->cf[ACCESS_ONCE(pm_count->index)]; + val = max((((trace_clock_read_synthetic_tsc() - cf->hw_base) + * cf->mul_fact) >> 10) + cf->virt_base, cf->floor); + } else + val = _trace_clock_read_slow(); + trace_clock_debug(val); + preempt_enable(); + +#ifdef CONFIG_DEBUG_TRACE_CLOCK + barrier(); + per_cpu(last_clock_nest, smp_processor_id())--; + local_irq_restore(flags); +#endif + return val; +} + +static inline u64 trace_clock_frequency(void) +{ + return cpu_hz; +} + +static inline u32 trace_clock_freq_scale(void) +{ + return 1; +} + +extern int get_trace_clock(void); +extern void put_trace_clock(void); +extern void get_synthetic_tsc(void); +extern void put_synthetic_tsc(void); + +extern void resync_trace_clock(void); +extern void save_sync_trace_clock(void); +extern void start_trace_clock(void); +extern void stop_trace_clock(void); + +static inline void set_trace_clock_is_sync(int state) +{ +} +#endif /* _ASM_MIPS_TRACE_CLOCK_OMAP3_H */ |