From e1bd79187f2a8ed916a5c2f004abdf8a80dd8424 Mon Sep 17 00:00:00 2001 From: Reyk Floeter Date: Wed, 14 Dec 2016 10:31:00 +0000 Subject: Add the TSC timecounter and use it on Skylake machines where the HPET is too slow and the invariant TSC more accurate. The commit includes joint work by mikeb@ kettenis@ and me; tested for some time by a large group of volunteers. OK mikeb@ kettenis@ --- sys/arch/amd64/amd64/identcpu.c | 63 ++++++++++++++++++++++++++++++++++++++++- sys/arch/amd64/include/cpu.h | 3 +- 2 files changed, 64 insertions(+), 2 deletions(-) (limited to 'sys/arch/amd64') diff --git a/sys/arch/amd64/amd64/identcpu.c b/sys/arch/amd64/amd64/identcpu.c index f0561675683..d9b1337a5fd 100644 --- a/sys/arch/amd64/amd64/identcpu.c +++ b/sys/arch/amd64/amd64/identcpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: identcpu.c,v 1.78 2016/10/13 19:36:25 martijn Exp $ */ +/* $OpenBSD: identcpu.c,v 1.79 2016/12/14 10:30:59 reyk Exp $ */ /* $NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */ /* @@ -39,6 +39,7 @@ #include #include #include +#include #include "vmm.h" @@ -56,6 +57,12 @@ void cpu_check_vmm_cap(struct cpu_info *); char cpu_model[48]; int cpuspeed; +u_int tsc_get_timecount(struct timecounter *tc); + +struct timecounter tsc_timecounter = { + tsc_get_timecount, NULL, ~0u, 0, "tsc", 0, NULL +}; + int amd64_has_xcrypt; #ifdef CRYPTO int amd64_has_pclmul; @@ -385,6 +392,7 @@ cpu_tsc_freq_ctr(struct cpu_info *ci) u_int64_t count, last_count, msr; if ((ci->ci_flags & CPUF_CONST_TSC) == 0 || + (ci->ci_flags & CPUF_INVAR_TSC) || (cpu_perf_eax & CPUIDEAX_VERID) <= 1 || CPUIDEDX_NUM_FC(cpu_perf_edx) <= 1) return (0); @@ -420,6 +428,40 @@ u_int64_t cpu_tsc_freq(struct cpu_info *ci) { u_int64_t last_count, count; + uint32_t eax, ebx, khz, dummy; + + if (!strcmp(cpu_vendor, "GenuineIntel") && + cpuid_level >= 0x15) { + eax = ebx = khz = dummy = 0; + CPUID(0x15, eax, ebx, khz, dummy); + khz /= 1000; + if (khz == 0) { + switch (ci->ci_model) { + case 0x4e: /* Skylake mobile */ + case 0x5e: /* Skylake desktop */ + case 0x8e: /* Kabylake mobile */ + case 0x9e: /* Kabylake desktop */ + khz = 24000; /* 24.0 Mhz */ + break; + case 0x55: /* Skylake X */ + khz = 25000; /* 25.0 Mhz */ + break; + case 0x5c: /* Atom Goldmont */ + khz = 19200; /* 19.2 Mhz */ + break; + } + } + if (ebx == 0 || eax == 0) + count = 0; + else if ((count = khz * ebx / eax) != 0) { + /* + * Using the CPUID-derived frequency increases + * the quality of the TSC time counter. + */ + tsc_timecounter.tc_quality = 2000; + return (count * 1000); + } + } count = cpu_tsc_freq_ctr(ci); if (count != 0) @@ -432,6 +474,12 @@ cpu_tsc_freq(struct cpu_info *ci) return ((count - last_count) * 10); } +u_int +tsc_get_timecount(struct timecounter *tc) +{ + return rdtsc(); +} + void identifycpu(struct cpu_info *ci) { @@ -513,6 +561,10 @@ identifycpu(struct cpu_info *ci) ci->ci_flags |= CPUF_CONST_TSC; } } + + /* Check if it's an invariant TSC */ + if (cpu_apmi_edx & CPUIDEDX_ITSC) + ci->ci_flags |= CPUF_INVAR_TSC; } ci->ci_tsc_freq = cpu_tsc_freq(ci); @@ -648,6 +700,15 @@ identifycpu(struct cpu_info *ci) #endif } + if ((ci->ci_flags & CPUF_PRIMARY) && + (ci->ci_flags & CPUF_CONST_TSC) && + (ci->ci_flags & CPUF_INVAR_TSC)) { + printf("%s: TSC frequency %llu Hz\n", + ci->ci_dev->dv_xname, ci->ci_tsc_freq); + tsc_timecounter.tc_frequency = ci->ci_tsc_freq; + tc_init(&tsc_timecounter); + } + cpu_topology(ci); #if NVMM > 0 cpu_check_vmm_cap(ci); diff --git a/sys/arch/amd64/include/cpu.h b/sys/arch/amd64/include/cpu.h index 8f044403925..09ce08cc938 100644 --- a/sys/arch/amd64/include/cpu.h +++ b/sys/arch/amd64/include/cpu.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.h,v 1.106 2016/10/13 19:36:25 martijn Exp $ */ +/* $OpenBSD: cpu.h,v 1.107 2016/12/14 10:30:59 reyk Exp $ */ /* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */ /*- @@ -200,6 +200,7 @@ struct cpu_info { #define CPUF_CONST_TSC 0x0040 /* CPU has constant TSC */ #define CPUF_USERSEGS_BIT 7 /* CPU has curproc's segments */ #define CPUF_USERSEGS (1<