summaryrefslogtreecommitdiff
path: root/arch/x86/kernel/tsc_sync.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-11-19 13:47:39 +0000
committerThomas Gleixner <tglx@linutronix.de>2016-11-29 19:23:17 +0100
commita36f5136814b6a87601220927cb9ad9ecc731e92 (patch)
tree8f216d647d363e058d3d419d9a95198ad8ded609 /arch/x86/kernel/tsc_sync.c
parent1d0095feea591bbd94f35d8a98aed746319783e1 (diff)
x86/tsc: Sync test only for the first cpu in a package
If the TSC_ADJUST MSR is available all CPUs in a package are forced to the same value. So TSCs cannot be out of sync when the first CPU in the package was in sync. That allows to skip the sync test for all CPUs except the first starting CPU in a package. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Link: http://lkml.kernel.org/r/20161119134017.809901363@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/tsc_sync.c')
-rw-r--r--arch/x86/kernel/tsc_sync.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index f713e84d1cb4..fdb3b7befc47 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -59,19 +59,20 @@ void tsc_verify_tsc_adjust(void)
}
#ifndef CONFIG_SMP
-void __init tsc_store_and_check_tsc_adjust(void)
+bool __init tsc_store_and_check_tsc_adjust(void)
{
struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
s64 bootval;
if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
- return;
+ return false;
rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
cur->adjusted = bootval;
cur->nextcheck = jiffies + HZ;
pr_info("TSC ADJUST: Boot CPU0: %lld\n", bootval);
+ return false;
}
#else /* !CONFIG_SMP */
@@ -79,14 +80,14 @@ void __init tsc_store_and_check_tsc_adjust(void)
/*
* Store and check the TSC ADJUST MSR if available
*/
-void tsc_store_and_check_tsc_adjust(void)
+bool tsc_store_and_check_tsc_adjust(void)
{
struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
unsigned int refcpu, cpu = smp_processor_id();
s64 bootval;
if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
- return;
+ return false;
rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
cur->bootval = bootval;
@@ -110,7 +111,7 @@ void tsc_store_and_check_tsc_adjust(void)
*/
cur->adjusted = bootval;
pr_info_once("TSC ADJUST: Boot CPU%u: %lld\n", cpu, bootval);
- return;
+ return false;
}
ref = per_cpu_ptr(&tsc_adjust, refcpu);
@@ -134,6 +135,11 @@ void tsc_store_and_check_tsc_adjust(void)
cur->adjusted = ref->adjusted;
wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
}
+ /*
+ * We have the TSCs forced to be in sync on this package. Skip sync
+ * test:
+ */
+ return true;
}
/*
@@ -142,6 +148,7 @@ void tsc_store_and_check_tsc_adjust(void)
*/
static atomic_t start_count;
static atomic_t stop_count;
+static atomic_t skip_test;
/*
* We use a raw spinlock in this exceptional case, because
@@ -265,10 +272,16 @@ void check_tsc_sync_source(int cpu)
atomic_set(&stop_count, 0);
/*
- * Wait for the target to arrive:
+ * Wait for the target to start or to skip the test:
*/
- while (atomic_read(&start_count) != cpus-1)
+ while (atomic_read(&start_count) != cpus - 1) {
+ if (atomic_read(&skip_test) > 0) {
+ atomic_set(&skip_test, 0);
+ return;
+ }
cpu_relax();
+ }
+
/*
* Trigger the target to continue into the measurement too:
*/
@@ -318,8 +331,14 @@ void check_tsc_sync_target(void)
if (unsynchronized_tsc() || tsc_clocksource_reliable)
return;
- /* Store and check the TSC ADJUST MSR */
- tsc_store_and_check_tsc_adjust();
+ /*
+ * Store, verify and sanitize the TSC adjust register. If
+ * successful skip the test.
+ */
+ if (tsc_store_and_check_tsc_adjust()) {
+ atomic_inc(&skip_test);
+ return;
+ }
/*
* Register this CPU's participation and wait for the