summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcus Glocker <mglocker@cvs.openbsd.org>2012-11-10 09:45:06 +0000
committerMarcus Glocker <mglocker@cvs.openbsd.org>2012-11-10 09:45:06 +0000
commitfcc13a47ab56f0c004e8ac405ed5af0b7e59e8d0 (patch)
tree26636041125bd70a51b56350dc025de6d75f146e
parent25659e88c95da597f238aea9c5e1cf8748e2e69d (diff)
Recent x86 CPUs come with a constant time stamp counter. If this is
the case we verify if the CPU supports a specific version of the architectural performance monitoring feature and read out the current frequency from the fixed-function performance counter of the unhalted core. My initial motivation to implement this was the Soekris net6501-70 which comes with an Intel Atom E6xx 1.60GHz CPU. It has a constant time stamp counter plus speed step support and boots on the lowest frequency of 600MHz. This caused hw.cpuspeed and hw.setperf to reflect the wrong values. The diff is a cooperation work with jsg@. The fixed-function performance counter read code comes from a former diff of him. OK jsg@
-rw-r--r--sys/arch/amd64/amd64/identcpu.c65
-rw-r--r--sys/arch/amd64/amd64/locore.S20
-rw-r--r--sys/arch/amd64/include/cpu.h8
-rw-r--r--sys/arch/amd64/include/specialreg.h38
-rw-r--r--sys/arch/i386/i386/locore.s20
-rw-r--r--sys/arch/i386/i386/machdep.c41
-rw-r--r--sys/arch/i386/include/cpu.h7
-rw-r--r--sys/arch/i386/include/specialreg.h38
-rw-r--r--sys/arch/i386/isa/clock.c35
9 files changed, 253 insertions, 19 deletions
diff --git a/sys/arch/amd64/amd64/identcpu.c b/sys/arch/amd64/amd64/identcpu.c
index 8812d0c26e1..94bc8392dae 100644
--- a/sys/arch/amd64/amd64/identcpu.c
+++ b/sys/arch/amd64/amd64/identcpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: identcpu.c,v 1.42 2012/10/31 03:30:22 jsg Exp $ */
+/* $OpenBSD: identcpu.c,v 1.43 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*
@@ -159,6 +159,10 @@ const struct {
{ SEFF0EBX_RDSEED, "RDSEED" },
{ SEFF0EBX_ADX, "ADX" },
{ SEFF0EBX_SMAP, "SMAP" },
+}, cpu_cpuid_perf_eax[] = {
+ { CPUIDEAX_VERID, "PERF" },
+}, cpu_cpuid_apmi_edx[] = {
+ { CPUIDEDX_ITSC, "ITSC" },
};
int
@@ -320,7 +324,7 @@ via_update_sensor(void *args)
void
identifycpu(struct cpu_info *ci)
{
- u_int64_t last_tsc;
+ u_int64_t last_count, count, msr;
u_int32_t dummy, val, pnfeatset;
u_int32_t brand[12];
char mycpu_model[48];
@@ -375,9 +379,52 @@ identifycpu(struct cpu_info *ci)
ci->ci_model += ((ci->ci_signature >> 16) & 0x0f) << 4;
}
- last_tsc = rdtsc();
- delay(100000);
- ci->ci_tsc_freq = (rdtsc() - last_tsc) * 10;
+ if (ci->ci_feature_flags && ci->ci_feature_flags & CPUID_TSC) {
+ /* Has TSC, check if it's constant */
+ if (!strcmp(cpu_vendor, "GenuineIntel")) {
+ if ((ci->ci_family == 0x0f && ci->ci_model >= 0x03) ||
+ (ci->ci_family == 0x06 && ci->ci_model >= 0x0e)) {
+ ci->ci_flags |= CPUF_CONST_TSC;
+ }
+ } else if (!strcmp(cpu_vendor, "CentaurHauls")) {
+ /* VIA */
+ if (ci->ci_model >= 0x0f) {
+ ci->ci_flags |= CPUF_CONST_TSC;
+ }
+ } else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ if (cpu_apmi_edx & CPUIDEDX_ITSC) {
+ /* Invariant TSC indicates constant TSC on
+ * AMD.
+ */
+ ci->ci_flags |= CPUF_CONST_TSC;
+ }
+ }
+ }
+
+ if ((ci->ci_flags & CPUF_CONST_TSC) &&
+ (cpu_perf_eax & CPUIDEAX_VERID) > 1 &&
+ CPUIDEDX_NUM_FC(cpu_perf_edx) > 1) {
+ msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL) | MSR_PERF_FIXED_CTR1_EN;
+ wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
+ msr = rdmsr(MSR_PERF_GLOBAL_CTRL) | MSR_PERF_GLOBAL_CTR1_EN;
+ wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
+
+ last_count = rdmsr(MSR_PERF_FIXED_CTR1);
+ delay(100000);
+ count = rdmsr(MSR_PERF_FIXED_CTR1);
+
+ msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
+ msr &= ~MSR_PERF_FIXED_CTR1_EN;
+ wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
+ msr = rdmsr(MSR_PERF_GLOBAL_CTRL);
+ msr &= ~MSR_PERF_GLOBAL_CTR1_EN;
+ wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
+ } else {
+ last_count = rdtsc();
+ delay(100000);
+ count = rdtsc();
+ }
+ ci->ci_tsc_freq = (count - last_count) * 10;
amd_cpu_cacheinfo(ci);
@@ -410,6 +457,14 @@ identifycpu(struct cpu_info *ci)
for (i = 0; i < max; i++)
if (ecpu_ecxfeature & cpu_ecpuid_ecxfeatures[i].bit)
printf(",%s", cpu_ecpuid_ecxfeatures[i].str);
+ max = sizeof(cpu_cpuid_perf_eax) / sizeof(cpu_cpuid_perf_eax[0]);
+ for (i = 0; i < max; i++)
+ if (cpu_perf_eax & cpu_cpuid_perf_eax[i].bit)
+ printf(",%s", cpu_cpuid_perf_eax[i].str);
+ max = sizeof(cpu_cpuid_apmi_edx) / sizeof(cpu_cpuid_apmi_edx[0]);
+ for (i = 0; i < max; i++)
+ if (cpu_apmi_edx & cpu_cpuid_apmi_edx[i].bit)
+ printf(",%s", cpu_cpuid_apmi_edx[i].str);
if (cpuid_level >= 0x07) {
/* "Structured Extended Feature Flags" */
diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S
index d20c9575481..30d452fe0ab 100644
--- a/sys/arch/amd64/amd64/locore.S
+++ b/sys/arch/amd64/amd64/locore.S
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.S,v 1.53 2012/09/25 09:58:57 pirofti Exp $ */
+/* $OpenBSD: locore.S,v 1.54 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
@@ -170,6 +170,10 @@ _C_LABEL(lapic_isr):
.globl _C_LABEL(cpu_id),_C_LABEL(cpu_vendor), _C_LABEL(cpu_brand_id)
.globl _C_LABEL(cpuid_level),_C_LABEL(cpu_feature)
.globl _C_LABEL(cpu_ecxfeature),_C_LABEL(ecpu_ecxfeature)
+ .globl _C_LABEL(cpu_perf_eax)
+ .globl _C_LABEL(cpu_perf_ebx)
+ .globl _C_LABEL(cpu_perf_edx)
+ .globl _C_LABEL(cpu_apmi_edx)
.globl _C_LABEL(esym),_C_LABEL(boothowto),_C_LABEL(bootdev)
.globl _C_LABEL(bootinfo), _C_LABEL(bootinfo_size), _C_LABEL(atdevbase)
.globl _C_LABEL(proc0paddr),_C_LABEL(PTDpaddr)
@@ -182,6 +186,10 @@ _C_LABEL(cpu_feature): .long 0 # feature flags from 'cpuid'
# instruction
_C_LABEL(cpu_ecxfeature):.long 0 # extended feature flags from 'cpuid'
_C_LABEL(ecpu_ecxfeature):.long 0 # extended ecx feature flags
+_C_LABEL(cpu_perf_eax): .long 0 # arch. perf. mon. flags from 'cpuid'
+_C_LABEL(cpu_perf_ebx): .long 0 # arch. perf. mon. flags from 'cpuid'
+_C_LABEL(cpu_perf_edx): .long 0 # arch. perf. mon. flags from 'cpuid'
+_C_LABEL(cpu_apmi_edx): .long 0 # adv. power mgmt. info. from 'cpuid'
_C_LABEL(cpuid_level): .long -1 # max. level accepted by 'cpuid'
# instruction
_C_LABEL(cpu_vendor): .space 16 # vendor string returned by `cpuid'
@@ -310,11 +318,21 @@ bi_size_ok:
movl %ecx,RELOC(cpu_ecxfeature)
movl %edx,RELOC(cpu_feature)
+ movl $0x0a,%eax
+ cpuid
+ movl %eax,RELOC(_C_LABEL(cpu_perf_eax))
+ movl %ebx,RELOC(_C_LABEL(cpu_perf_ebx))
+ movl %edx,RELOC(_C_LABEL(cpu_perf_edx))
+
movl $0x80000001, %eax
cpuid
andl $CPUID_NXE, %edx /* other bits may clash */
orl %edx, RELOC(cpu_feature)
+ movl $0x80000007,%eax
+ cpuid
+ movl %edx,RELOC(_C_LABEL(cpu_apmi_edx))
+
/* Brand ID is bits 0-7 of %ebx */
andl $255,%ebx
movl %ebx,RELOC(cpu_brand_id)
diff --git a/sys/arch/amd64/include/cpu.h b/sys/arch/amd64/include/cpu.h
index 9ce437af9de..60a095eed9e 100644
--- a/sys/arch/amd64/include/cpu.h
+++ b/sys/arch/amd64/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.74 2012/10/09 04:40:36 jsg Exp $ */
+/* $OpenBSD: cpu.h,v 1.75 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
@@ -138,6 +138,8 @@ struct cpu_info {
#define CPUF_IDENTIFY 0x0010 /* CPU may now identify */
#define CPUF_IDENTIFIED 0x0020 /* CPU has been identified */
+#define CPUF_CONST_TSC 0x0040 /* CPU has constant TSC */
+
#define CPUF_PRESENT 0x1000 /* CPU is present */
#define CPUF_RUNNING 0x2000 /* CPU is running */
#define CPUF_PAUSE 0x4000 /* CPU is paused in DDB */
@@ -258,6 +260,10 @@ extern int biosextmem;
extern int cpu;
extern int cpu_feature;
extern int cpu_ecxfeature;
+extern int cpu_perf_eax;
+extern int cpu_perf_ebx;
+extern int cpu_perf_edx;
+extern int cpu_apmi_edx;
extern int ecpu_ecxfeature;
extern int cpu_id;
extern char cpu_vendor[];
diff --git a/sys/arch/amd64/include/specialreg.h b/sys/arch/amd64/include/specialreg.h
index 142fbbcbd57..f094f14c7ff 100644
--- a/sys/arch/amd64/include/specialreg.h
+++ b/sys/arch/amd64/include/specialreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: specialreg.h,v 1.23 2012/10/09 09:16:09 jsg Exp $ */
+/* $OpenBSD: specialreg.h,v 1.24 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: specialreg.h,v 1.1 2003/04/26 18:39:48 fvdl Exp $ */
/* $NetBSD: x86/specialreg.h,v 1.2 2003/04/25 21:54:30 fvdl Exp $ */
@@ -177,6 +177,27 @@
#define SEFF0EBX_ADX 0x00080000 /* ADCX/ADOX instructions */
#define SEFF0EBX_SMAP 0x00100000 /* Supervisor mode access prevent */
+ /*
+ * "Architectural Performance Monitoring" bits (CPUID function 0x0a):
+ * EAX bits, EBX bits, EDX bits.
+ */
+
+#define CPUIDEAX_VERID 0x000000ff /* Version ID */
+#define CPUIDEAX_NUM_GC(cpuid) (((cpuid) >> 8) & 0x000000ff)
+#define CPUIDEAX_BIT_GC(cpuid) (((cpuid) >> 16) & 0x000000ff)
+#define CPUIDEAX_LEN_EBX(cpuid) (((cpuid) >> 24) & 0x000000ff)
+
+#define CPUIDEBX_EVT_CORE (1 << 0) /* Core cycle */
+#define CPUIDEBX_EVT_INST (1 << 1) /* Instruction retired */
+#define CPUIDEBX_EVT_REFR (1 << 2) /* Reference cycles */
+#define CPUIDEBX_EVT_CACHE_REF (1 << 3) /* Last-level cache ref. */
+#define CPUIDEBX_EVT_CACHE_MIS (1 << 4) /* Last-level cache miss. */
+#define CPUIDEBX_EVT_BRANCH_INST (1 << 5) /* Branch instruction ret. */
+#define CPUIDEBX_EVT_BRANCH_MISP (1 << 6) /* Branch mispredict ret. */
+
+#define CPUIDEDX_NUM_FC(cpuid) (((cpuid) >> 0) & 0x0000001f)
+#define CPUIDEDX_BIT_FC(cpuid) (((cpuid) >> 5) & 0x000000ff)
+
/*
* CPUID "extended features" bits (CPUID function 0x80000001):
* EDX bits, then ECX bits
@@ -214,6 +235,13 @@
#define CPUIDECX_TBM 0x00200000 /* Trailing bit manipulation instruction */
#define CPUIDECX_TOPEXT 0x00400000 /* Topology extensions support */
+/*
+ * "Advanced Power Management Information" bits (CPUID function 0x80000007):
+ * EDX bits.
+ */
+
+#define CPUIDEDX_ITSC (1 << 8) /* Invariant TSC */
+
#define CPUID2FAMILY(cpuid) (((cpuid) >> 8) & 15)
#define CPUID2MODEL(cpuid) (((cpuid) >> 4) & 15)
#define CPUID2STEPPING(cpuid) ((cpuid) & 15)
@@ -310,6 +338,14 @@
#define MSR_MTRRfix4K_F8000 0x26f
#define MSR_CR_PAT 0x277
#define MSR_MTRRdefType 0x2ff
+#define MSR_PERF_FIXED_CTR1 0x30a /* CPU_CLK_Unhalted.Core */
+#define MSR_PERF_FIXED_CTR2 0x30b /* CPU_CLK.Unhalted.Ref */
+#define MSR_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_PERF_FIXED_CTR1_EN (1 << 4)
+#define MSR_PERF_FIXED_CTR2_EN (1 << 8)
+#define MSR_PERF_GLOBAL_CTRL 0x38f
+#define MSR_PERF_GLOBAL_CTR1_EN (1ULL << 33)
+#define MSR_PERF_GLOBAL_CTR2_EN (1ULL << 34)
#define MSR_MC0_CTL 0x400
#define MSR_MC0_STATUS 0x401
#define MSR_MC0_ADDR 0x402
diff --git a/sys/arch/i386/i386/locore.s b/sys/arch/i386/i386/locore.s
index fccbe7e1a75..f2f42b22202 100644
--- a/sys/arch/i386/i386/locore.s
+++ b/sys/arch/i386/i386/locore.s
@@ -1,4 +1,4 @@
-/* $OpenBSD: locore.s,v 1.143 2012/10/31 03:30:22 jsg Exp $ */
+/* $OpenBSD: locore.s,v 1.144 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: locore.s,v 1.145 1996/05/03 19:41:19 christos Exp $ */
/*-
@@ -170,6 +170,10 @@
.globl _C_LABEL(ecpu_feature), _C_LABEL(ecpu_ecxfeature)
.globl _C_LABEL(cpu_cache_eax), _C_LABEL(cpu_cache_ebx)
.globl _C_LABEL(cpu_cache_ecx), _C_LABEL(cpu_cache_edx)
+ .globl _C_LABEL(cpu_perf_eax)
+ .globl _C_LABEL(cpu_perf_ebx)
+ .globl _C_LABEL(cpu_perf_edx)
+ .globl _C_LABEL(cpu_apmi_edx)
.globl _C_LABEL(cold), _C_LABEL(cnvmem), _C_LABEL(extmem)
.globl _C_LABEL(esym)
.globl _C_LABEL(boothowto), _C_LABEL(bootdev), _C_LABEL(atdevbase)
@@ -213,6 +217,10 @@ _C_LABEL(cpu_cache_eax):.long 0
_C_LABEL(cpu_cache_ebx):.long 0
_C_LABEL(cpu_cache_ecx):.long 0
_C_LABEL(cpu_cache_edx):.long 0
+_C_LABEL(cpu_perf_eax): .long 0 # arch. perf. mon. flags from 'cpuid'
+_C_LABEL(cpu_perf_ebx): .long 0 # arch. perf. mon. flags from 'cpuid'
+_C_LABEL(cpu_perf_edx): .long 0 # arch. perf. mon. flags from 'cpuid'
+_C_LABEL(cpu_apmi_edx): .long 0 # adv. power management info. 'cpuid'
_C_LABEL(cpu_vendor): .space 16 # vendor string returned by 'cpuid' instruction
_C_LABEL(cpu_brandstr): .space 48 # brand string returned by 'cpuid'
_C_LABEL(cold): .long 1 # cold till we are not
@@ -415,6 +423,12 @@ try586: /* Use the `cpuid' instruction. */
movl %ecx,RELOC(_C_LABEL(cpu_cache_ecx))
movl %edx,RELOC(_C_LABEL(cpu_cache_edx))
+ movl $0x0a,%eax
+ cpuid
+ movl %eax,RELOC(_C_LABEL(cpu_perf_eax))
+ movl %ebx,RELOC(_C_LABEL(cpu_perf_ebx))
+ movl %edx,RELOC(_C_LABEL(cpu_perf_edx))
+
1:
/* Check if brand identification string is supported */
movl $0x80000000,%eax
@@ -445,6 +459,10 @@ try586: /* Use the `cpuid' instruction. */
andl $0x00ffffff,%edx /* Shouldn't be necessary */
movl %edx,RELOC(_C_LABEL(cpu_brandstr))+44
+ movl $0x80000007,%eax
+ cpuid
+ movl %edx,RELOC(_C_LABEL(cpu_apmi_edx))
+
2:
/*
* Finished with old stack; load new %esp now instead of later so we
diff --git a/sys/arch/i386/i386/machdep.c b/sys/arch/i386/i386/machdep.c
index 7795286ea43..00305d9d766 100644
--- a/sys/arch/i386/i386/machdep.c
+++ b/sys/arch/i386/i386/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.516 2012/10/31 03:30:22 jsg Exp $ */
+/* $OpenBSD: machdep.c,v 1.517 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: machdep.c,v 1.214 1996/11/10 03:16:17 thorpej Exp $ */
/*-
@@ -1069,6 +1069,14 @@ const struct cpu_cpuid_feature cpu_seff0_ebxfeatures[] = {
{ SEFF0EBX_SMAP, "SMAP" },
};
+const struct cpu_cpuid_feature i386_cpuid_eaxperf[] = {
+ { CPUIDEAX_VERID, "PERF" },
+};
+
+const struct cpu_cpuid_feature i386_cpuid_edxapmi[] = {
+ { CPUIDEDX_ITSC, "ITSC" },
+};
+
void
winchip_cpu_setup(struct cpu_info *ci)
{
@@ -1838,7 +1846,20 @@ identifycpu(struct cpu_info *ci)
}
if (ci->ci_feature_flags && (ci->ci_feature_flags & CPUID_TSC)) {
- /* Has TSC */
+ /* Has TSC, check if it's constant */
+ switch (vendor) {
+ case CPUVENDOR_INTEL:
+ if ((ci->ci_family == 0x0f && ci->ci_model >= 0x03) ||
+ (ci->ci_family == 0x06 && ci->ci_model >= 0x0e)) {
+ ci->ci_flags |= CPUF_CONST_TSC;
+ }
+ break;
+ case CPUVENDOR_VIA:
+ if (ci->ci_model >= 0x0f) {
+ ci->ci_flags |= CPUF_CONST_TSC;
+ }
+ break;
+ }
calibrate_cyclecounter();
if (cpuspeed > 994) {
int ghz, fr;
@@ -1900,6 +1921,22 @@ identifycpu(struct cpu_info *ci)
numbits++;
}
}
+ for (i = 0; i < nitems(i386_cpuid_eaxperf); i++) {
+ if (cpu_perf_eax &
+ i386_cpuid_eaxperf[i].feature_bit) {
+ printf("%s%s", (numbits == 0 ? "" : ","),
+ i386_cpuid_eaxperf[i].feature_name);
+ numbits++;
+ }
+ }
+ for (i = 0; i < nitems(i386_cpuid_edxapmi); i++) {
+ if (cpu_apmi_edx &
+ i386_cpuid_edxapmi[i].feature_bit) {
+ printf("%s%s", (numbits == 0 ? "" : ","),
+ i386_cpuid_edxapmi[i].feature_name);
+ numbits++;
+ }
+ }
if (cpuid_level >= 0x07) {
u_int dummy;
diff --git a/sys/arch/i386/include/cpu.h b/sys/arch/i386/include/cpu.h
index 003ceea564c..b87b89aee46 100644
--- a/sys/arch/i386/include/cpu.h
+++ b/sys/arch/i386/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.123 2012/10/09 04:40:36 jsg Exp $ */
+/* $OpenBSD: cpu.h,v 1.124 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: cpu.h,v 1.35 1996/05/05 19:29:26 christos Exp $ */
/*-
@@ -163,6 +163,7 @@ struct cpu_info {
#define CPUF_SP 0x0004 /* CPU is only processor */
#define CPUF_PRIMARY 0x0008 /* CPU is active primary processor */
#define CPUF_APIC_CD 0x0010 /* CPU has apic configured */
+#define CPUF_CONST_TSC 0x0020 /* CPU has constant TSC */
#define CPUF_PRESENT 0x1000 /* CPU is present */
#define CPUF_RUNNING 0x2000 /* CPU is running */
@@ -319,6 +320,10 @@ extern int cpu_cache_eax;
extern int cpu_cache_ebx;
extern int cpu_cache_ecx;
extern int cpu_cache_edx;
+extern int cpu_perf_eax;
+extern int cpu_perf_ebx;
+extern int cpu_perf_edx;
+extern int cpu_apmi_edx;
/* machdep.c */
extern int cpu_apmhalt;
diff --git a/sys/arch/i386/include/specialreg.h b/sys/arch/i386/include/specialreg.h
index d693449f881..5f5ed76b9b1 100644
--- a/sys/arch/i386/include/specialreg.h
+++ b/sys/arch/i386/include/specialreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: specialreg.h,v 1.43 2012/10/09 09:16:09 jsg Exp $ */
+/* $OpenBSD: specialreg.h,v 1.44 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: specialreg.h,v 1.7 1994/10/27 04:16:26 cgd Exp $ */
/*-
@@ -177,6 +177,27 @@
#define SEFF0EBX_SMAP 0x00100000 /* Supervisor mode access prevent */
/*
+ * "Architectural Performance Monitoring" bits (CPUID function 0x0a):
+ * EAX bits
+ */
+
+#define CPUIDEAX_VERID 0x000000ff
+#define CPUIDEAX_NUM_GC(cpuid) (((cpuid) >> 8) & 0x000000ff)
+#define CPUIDEAX_BIT_GC(cpuid) (((cpuid) >> 16) & 0x000000ff)
+#define CPUIDEAX_LEN_EBX(cpuid) (((cpuid) >> 24) & 0x000000ff)
+
+#define CPUIDEBX_EVT_CORE (1 << 0) /* Core cycle */
+#define CPUIDEBX_EVT_INST (1 << 1) /* Instruction retired */
+#define CPUIDEBX_EVT_REFR (1 << 2) /* Reference cycles */
+#define CPUIDEBX_EVT_CACHE_REF (1 << 3) /* Last-level cache ref. */
+#define CPUIDEBX_EVT_CACHE_MIS (1 << 4) /* Last-level cache miss. */
+#define CPUIDEBX_EVT_BRANCH_INST (1 << 5) /* Branch instruction ret. */
+#define CPUIDEBX_EVT_BRANCH_MISP (1 << 6) /* Branch mispredict ret. */
+
+#define CPUIDEDX_NUM_FC(cpuid) (((cpuid) >> 0) & 0x0000001f)
+#define CPUIDEDX_BIT_FC(cpuid) (((cpuid) >> 5) & 0x000000ff)
+
+/*
* CPUID "extended features" bits (CPUID function 0x80000001):
* EDX bits, then ECX bits
*/
@@ -213,6 +234,13 @@
#define CPUIDECX_TBM 0x00200000 /* Trailing bit manipulation instruction */
#define CPUIDECX_TOPEXT 0x00400000 /* Topology extensions support */
+/*
+ * "Advanced Power Management Information" bits (CPUID function 0x80000007):
+ * EDX bits.
+ */
+
+#define CPUIDEDX_ITSC (1 << 8) /* Invariant TSC */
+
#define CPUID2FAMILY(cpuid) (((cpuid) >> 8) & 15)
#define CPUID2MODEL(cpuid) (((cpuid) >> 4) & 15)
#define CPUID2STEPPING(cpuid) ((cpuid) & 15)
@@ -309,6 +337,14 @@
#define MSR_MTRRfix4K_F8000 0x26f
#define MSR_CR_PAT 0x277
#define MSR_MTRRdefType 0x2ff
+#define MSR_PERF_FIXED_CTR1 0x30a /* CPU_CLK_Unhalted.Core */
+#define MSR_PERF_FIXED_CTR2 0x30b /* CPU_CLK.Unhalted.Ref */
+#define MSR_PERF_FIXED_CTR_CTRL 0x38d
+#define MSR_PERF_FIXED_CTR1_EN (1 << 4)
+#define MSR_PERF_FIXED_CTR2_EN (1 << 8)
+#define MSR_PERF_GLOBAL_CTRL 0x38f
+#define MSR_PERF_GLOBAL_CTR1_EN (1ULL << 33)
+#define MSR_PERF_GLOBAL_CTR2_EN (1ULL << 34)
#define MSR_MC0_CTL 0x400
#define MSR_MC0_STATUS 0x401
#define MSR_MC0_ADDR 0x402
diff --git a/sys/arch/i386/isa/clock.c b/sys/arch/i386/isa/clock.c
index c9ac92eb481..8e674950b6d 100644
--- a/sys/arch/i386/isa/clock.c
+++ b/sys/arch/i386/isa/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.46 2011/07/05 17:11:07 oga Exp $ */
+/* $OpenBSD: clock.c,v 1.47 2012/11/10 09:45:05 mglocker Exp $ */
/* $NetBSD: clock.c,v 1.39 1996/05/12 23:11:54 mycroft Exp $ */
/*-
@@ -376,11 +376,34 @@ i8254_delay(int n)
void
calibrate_cyclecounter(void)
{
- unsigned long long count, last_count;
-
- __asm __volatile("rdtsc" : "=A" (last_count));
- delay(1000000);
- __asm __volatile("rdtsc" : "=A" (count));
+ struct cpu_info *ci;
+ unsigned long long count, last_count, msr;
+
+ ci = curcpu();
+
+ if ((ci->ci_flags & CPUF_CONST_TSC) &&
+ (cpu_perf_eax & CPUIDEAX_VERID) > 1 &&
+ CPUIDEDX_NUM_FC(cpu_perf_edx) > 1) {
+ msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL) | MSR_PERF_FIXED_CTR1_EN;
+ wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
+ msr = rdmsr(MSR_PERF_GLOBAL_CTRL) | MSR_PERF_GLOBAL_CTR1_EN;
+ wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
+
+ last_count = rdmsr(MSR_PERF_FIXED_CTR1);
+ delay(1000000);
+ count = rdmsr(MSR_PERF_FIXED_CTR1);
+
+ msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
+ msr &= ~MSR_PERF_FIXED_CTR1_EN;
+ wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
+ msr = rdmsr(MSR_PERF_GLOBAL_CTRL);
+ msr &= ~MSR_PERF_GLOBAL_CTR1_EN;
+ wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
+ } else {
+ __asm __volatile("rdtsc" : "=A" (last_count));
+ delay(1000000);
+ __asm __volatile("rdtsc" : "=A" (count));
+ }
cpuspeed = ((count - last_count) + 999999) / 1000000;
}