summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPhilip Guenther <guenther@cvs.openbsd.org>2024-04-03 02:01:22 +0000
committerPhilip Guenther <guenther@cvs.openbsd.org>2024-04-03 02:01:22 +0000
commit7aa9791fe062d1b2a5c9b93c8f6675a8cbd33f7c (patch)
tree2b28362f63340bfb2fd8f4b8893cfcab4ac50804
parentfc6017e0de122eabb8fa29e57fd793623b2a579f (diff)
Add ci_cpuid_level and ci_vendor holding the per-CPU basic cpuid
level and a numeric mapping of the cpu vendor, both from CPUID(0). Convert the general use of strcmp(cpu_vendor) to simple numeric tests of ci_vendor. Track the minimum of all ci_cpuid_level in the cpuid_level global and continue to use that for what we vmm exposes. AMD testing help matthieu@ krw@ ok miod@ deraadt@ cheloha@
-rw-r--r--sys/arch/amd64/amd64/cacheinfo.c6
-rw-r--r--sys/arch/amd64/amd64/cpu.c39
-rw-r--r--sys/arch/amd64/amd64/identcpu.c38
-rw-r--r--sys/arch/amd64/amd64/lapic.c4
-rw-r--r--sys/arch/amd64/amd64/machdep.c20
-rw-r--r--sys/arch/amd64/amd64/mtrr.c9
-rw-r--r--sys/arch/amd64/amd64/pctr.c7
-rw-r--r--sys/arch/amd64/amd64/tsc.c8
-rw-r--r--sys/arch/amd64/amd64/ucode.c6
-rw-r--r--sys/arch/amd64/include/cpu.h12
10 files changed, 94 insertions, 55 deletions
diff --git a/sys/arch/amd64/amd64/cacheinfo.c b/sys/arch/amd64/amd64/cacheinfo.c
index acee9fd2d32..216b660941d 100644
--- a/sys/arch/amd64/amd64/cacheinfo.c
+++ b/sys/arch/amd64/amd64/cacheinfo.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cacheinfo.c,v 1.12 2024/02/03 09:53:15 jsg Exp $ */
+/* $OpenBSD: cacheinfo.c,v 1.13 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2022 Jonathan Gray <jsg@openbsd.org>
@@ -210,14 +210,14 @@ x86_print_cacheinfo(struct cpu_info *ci)
{
uint64_t msr;
- if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+ if (ci->ci_vendor == CPUV_INTEL &&
rdmsr_safe(MSR_MISC_ENABLE, &msr) == 0 &&
(msr & MISC_ENABLE_LIMIT_CPUID_MAXVAL) == 0) {
intel_print_cacheinfo(ci, 4);
return;
}
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0 &&
+ if (ci->ci_vendor == CPUV_AMD &&
(ecpu_ecxfeature & CPUIDECX_TOPEXT)) {
intel_print_cacheinfo(ci, 0x8000001d);
return;
diff --git a/sys/arch/amd64/amd64/cpu.c b/sys/arch/amd64/amd64/cpu.c
index c92d1e8fd41..78bd512da93 100644
--- a/sys/arch/amd64/amd64/cpu.c
+++ b/sys/arch/amd64/amd64/cpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.c,v 1.184 2024/03/17 05:49:41 guenther Exp $ */
+/* $OpenBSD: cpu.c,v 1.185 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
@@ -150,8 +150,8 @@ void replacemds(void);
extern long _stac;
extern long _clac;
-int cpuid_level = 0; /* cpuid(0).eax */
-char cpu_vendor[16] = { 0 }; /* cpuid(0).e[bdc]x, \0 */
+int cpuid_level = 0; /* MIN cpuid(0).eax */
+char cpu_vendor[16] = { 0 }; /* CPU0's cpuid(0).e[bdc]x, \0 */
int cpu_id = 0; /* cpuid(1).eax */
int cpu_ebxfeature = 0; /* cpuid(1).ebx */
int cpu_ecxfeature = 0; /* cpuid(1).ecx */
@@ -190,7 +190,7 @@ replacemeltdown(void)
struct cpu_info *ci = &cpu_info_primary;
int swapgs_vuln = 0, ibrs = 0, s, ibpb = 0;
- if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
+ if (ci->ci_vendor == CPUV_INTEL) {
int family = ci->ci_family;
int model = ci->ci_model;
@@ -213,7 +213,7 @@ replacemeltdown(void)
}
if (ci->ci_feature_sefflags_edx & SEFF0EDX_IBRS)
ibpb = 1;
- } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0 &&
+ } else if (ci->ci_vendor == CPUV_AMD &&
ci->ci_pnfeatset >= 0x80000008) {
if (ci->ci_feature_amdspec_ebx & CPUIDEBX_IBRS_ALWAYSON) {
ibrs = 2;
@@ -310,7 +310,7 @@ replacemds(void)
return;
replacedone = 1;
- if (strcmp(cpu_vendor, "GenuineIntel") != 0)
+ if (ci->ci_vendor != CPUV_INTEL)
goto notintel; /* VERW only needed on Intel */
if ((ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP))
@@ -527,7 +527,7 @@ cpu_init_mwait(struct cpu_softc *sc, struct cpu_info *ci)
{
unsigned int smallest, largest, extensions, c_substates;
- if ((cpu_ecxfeature & CPUIDECX_MWAIT) == 0 || cpuid_level < 0x5)
+ if ((cpu_ecxfeature & CPUIDECX_MWAIT) == 0 || ci->ci_cpuid_level < 0x5)
return;
/* get the monitor granularity */
@@ -536,7 +536,7 @@ cpu_init_mwait(struct cpu_softc *sc, struct cpu_info *ci)
largest &= 0xffff;
/* mask out states C6/C7 in 31:24 for CHT45 errata */
- if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+ if (ci->ci_vendor == CPUV_INTEL &&
ci->ci_family == 0x06 && ci->ci_model == 0x4c)
cpu_mwait_states &= 0x00ffffff;
@@ -789,7 +789,7 @@ cpu_init(struct cpu_info *ci)
cr4 |= CR4_SMAP;
if (ci->ci_feature_sefflags_ecx & SEFF0ECX_UMIP)
cr4 |= CR4_UMIP;
- if ((cpu_ecxfeature & CPUIDECX_XSAVE) && cpuid_level >= 0xd)
+ if ((cpu_ecxfeature & CPUIDECX_XSAVE) && ci->ci_cpuid_level >= 0xd)
cr4 |= CR4_OSXSAVE;
if (pg_xo)
cr4 |= CR4_PKE;
@@ -797,7 +797,7 @@ cpu_init(struct cpu_info *ci)
cr4 |= CR4_PCIDE;
lcr4(cr4);
- if ((cpu_ecxfeature & CPUIDECX_XSAVE) && cpuid_level >= 0xd) {
+ if ((cpu_ecxfeature & CPUIDECX_XSAVE) && ci->ci_cpuid_level >= 0xd) {
u_int32_t eax, ebx, ecx, edx;
xsave_mask = XFEATURE_X87 | XFEATURE_SSE;
@@ -815,7 +815,7 @@ cpu_init(struct cpu_info *ci)
/* check for xsaves, xsaveopt, and supervisor features */
CPUID_LEAF(0xd, 1, eax, ebx, ecx, edx);
/* Disable XSAVES on AMD family 17h due to Erratum 1386 */
- if (!strcmp(cpu_vendor, "AuthenticAMD") &&
+ if (ci->ci_vendor == CPUV_AMD &&
ci->ci_family == 0x17) {
eax &= ~XSAVE_XSAVES;
}
@@ -1022,6 +1022,15 @@ cpu_hatch(void *v)
struct cpu_info *ci = (struct cpu_info *)v;
int s;
+ {
+ uint32_t vendor[4];
+ int level;
+
+ CPUID(0, level, vendor[0], vendor[2], vendor[1]);
+ vendor[3] = 0;
+ cpu_set_vendor(ci, level, (const char *)vendor);
+ }
+
cpu_init_msrs(ci);
#ifdef DEBUG
@@ -1215,7 +1224,7 @@ cpu_fix_msrs(struct cpu_info *ci)
int family = ci->ci_family;
uint64_t msr, nmsr;
- if (!strcmp(cpu_vendor, "GenuineIntel")) {
+ if (ci->ci_vendor == CPUV_INTEL) {
if ((family > 6 || (family == 6 && ci->ci_model >= 0xd)) &&
rdmsr_safe(MSR_MISC_ENABLE, &msr) == 0 &&
(msr & MISC_ENABLE_FAST_STRINGS) == 0) {
@@ -1241,7 +1250,7 @@ cpu_fix_msrs(struct cpu_info *ci)
}
}
- if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ if (ci->ci_vendor == CPUV_AMD) {
/* Apply AMD errata */
amd64_errata(ci);
@@ -1286,11 +1295,11 @@ cpu_tsx_disable(struct cpu_info *ci)
uint32_t dummy, sefflags_edx;
/* this runs before identifycpu() populates ci_feature_sefflags_edx */
- if (cpuid_level < 0x07)
+ if (ci->ci_cpuid_level < 0x07)
return;
CPUID_LEAF(0x7, 0, dummy, dummy, dummy, sefflags_edx);
- if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+ if (ci->ci_vendor == CPUV_INTEL &&
(sefflags_edx & SEFF0EDX_ARCH_CAP)) {
msr = rdmsr(MSR_ARCH_CAPABILITIES);
if (msr & ARCH_CAP_TSX_CTRL) {
diff --git a/sys/arch/amd64/amd64/identcpu.c b/sys/arch/amd64/amd64/identcpu.c
index c8eb1f23949..9d8fefa6305 100644
--- a/sys/arch/amd64/amd64/identcpu.c
+++ b/sys/arch/amd64/amd64/identcpu.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: identcpu.c,v 1.139 2024/03/17 05:49:41 guenther Exp $ */
+/* $OpenBSD: identcpu.c,v 1.140 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*
@@ -615,17 +615,17 @@ identifycpu(struct cpu_info *ci)
if (ci->ci_feature_flags && ci->ci_feature_flags & CPUID_TSC) {
/* Has TSC, check if it's constant */
- if (!strcmp(cpu_vendor, "GenuineIntel")) {
+ if (ci->ci_vendor == CPUV_INTEL) {
if ((ci->ci_family == 0x0f && ci->ci_model >= 0x03) ||
(ci->ci_family == 0x06 && ci->ci_model >= 0x0e)) {
atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
}
- } else if (!strcmp(cpu_vendor, "CentaurHauls")) {
+ } else if (ci->ci_vendor == CPUV_VIA) {
/* VIA */
if (ci->ci_model >= 0x0f) {
atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
}
- } else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ } else if (ci->ci_vendor == CPUV_AMD) {
if (cpu_apmi_edx & CPUIDEDX_ITSC) {
/* Invariant TSC indicates constant TSC on AMD */
atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
@@ -659,9 +659,9 @@ identifycpu(struct cpu_info *ci)
uint64_t level = 0;
uint32_t dummy;
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
+ if (ci->ci_vendor == CPUV_AMD) {
level = rdmsr(MSR_PATCH_LEVEL);
- } else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
+ } else if (ci->ci_vendor == CPUV_INTEL) {
wrmsr(MSR_BIOS_SIGN, 0);
CPUID(1, dummy, dummy, dummy, dummy);
level = rdmsr(MSR_BIOS_SIGN) >> 32;
@@ -691,7 +691,7 @@ identifycpu(struct cpu_info *ci)
if (cpu_apmi_edx & cpu_cpuid_apmi_edx[i].bit)
printf(",%s", cpu_cpuid_apmi_edx[i].str);
- if (cpuid_level >= 0x07) {
+ if (ci->ci_cpuid_level >= 0x07) {
/* "Structured Extended Feature Flags" */
CPUID_LEAF(0x7, 0, dummy, ci->ci_feature_sefflags_ebx,
ci->ci_feature_sefflags_ecx, ci->ci_feature_sefflags_edx);
@@ -709,14 +709,14 @@ identifycpu(struct cpu_info *ci)
printf(",%s", cpu_seff0_edxfeatures[i].str);
}
- if (!strcmp(cpu_vendor, "GenuineIntel") && cpuid_level >= 0x06) {
+ if (ci->ci_vendor == CPUV_INTEL && ci->ci_cpuid_level >= 0x06) {
CPUID(0x06, ci->ci_feature_tpmflags, dummy, cpu_tpm_ecxflags,
dummy);
for (i = 0; i < nitems(cpu_tpm_eaxfeatures); i++)
if (ci->ci_feature_tpmflags &
cpu_tpm_eaxfeatures[i].bit)
printf(",%s", cpu_tpm_eaxfeatures[i].str);
- } else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ } else if (ci->ci_vendor == CPUV_AMD) {
CPUID(0x06, ci->ci_feature_tpmflags, dummy, cpu_tpm_ecxflags,
dummy);
if (ci->ci_family >= 0x12)
@@ -724,7 +724,7 @@ identifycpu(struct cpu_info *ci)
}
/* speculation control features */
- if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ if (ci->ci_vendor == CPUV_AMD) {
if (ci->ci_pnfeatset >= 0x80000008) {
CPUID(0x80000008, dummy, ci->ci_feature_amdspec_ebx,
dummy, dummy);
@@ -734,7 +734,7 @@ identifycpu(struct cpu_info *ci)
printf(",%s",
cpu_amdspec_ebxfeatures[i].str);
}
- } else if (!strcmp(cpu_vendor, "GenuineIntel") &&
+ } else if (ci->ci_vendor == CPUV_INTEL &&
(ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP)) {
uint64_t msr = rdmsr(MSR_ARCH_CAPABILITIES);
@@ -744,7 +744,7 @@ identifycpu(struct cpu_info *ci)
}
/* xsave subfeatures */
- if (cpuid_level >= 0xd) {
+ if (ci->ci_cpuid_level >= 0xd) {
CPUID_LEAF(0xd, 1, val, dummy, dummy, dummy);
for (i = 0; i < nitems(cpu_xsave_extfeatures); i++)
if (val & cpu_xsave_extfeatures[i].bit)
@@ -761,7 +761,7 @@ identifycpu(struct cpu_info *ci)
if (CPU_IS_PRIMARY(ci)) {
#ifndef SMALL_KERNEL
- if (!strcmp(cpu_vendor, "AuthenticAMD") &&
+ if (ci->ci_vendor == CPUV_AMD &&
ci->ci_pnfeatset >= 0x80000007) {
CPUID(0x80000007, dummy, dummy, dummy, val);
@@ -813,7 +813,7 @@ identifycpu(struct cpu_info *ci)
}
#endif
- if (CPU_IS_PRIMARY(ci) && !strcmp(cpu_vendor, "CentaurHauls")) {
+ if (CPU_IS_PRIMARY(ci) && ci->ci_vendor == CPUV_VIA) {
ci->cpu_setup = via_nano_setup;
#ifndef SMALL_KERNEL
ci->ci_sensor.type = SENSOR_TEMP;
@@ -887,14 +887,14 @@ cpu_topology(struct cpu_info *ci)
u_int32_t smt_mask = 0, core_mask, pkg_mask = 0;
/* We need at least apicid at CPUID 1 */
- if (cpuid_level < 1)
+ if (ci->ci_cpuid_level < 1)
goto no_topology;
/* Initial apicid */
CPUID(1, eax, ebx, ecx, edx);
apicid = (ebx >> 24) & 0xff;
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
+ if (ci->ci_vendor == CPUV_AMD) {
uint32_t nthreads = 1; /* per core */
uint32_t thread_id; /* within a package */
@@ -920,9 +920,9 @@ cpu_topology(struct cpu_info *ci)
/* Cut logical thread_id into core id, and smt id in a core */
ci->ci_core_id = thread_id / nthreads;
ci->ci_smt_id = thread_id % nthreads;
- } else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
+ } else if (ci->ci_vendor == CPUV_INTEL) {
/* We only support leaf 1/4 detection */
- if (cpuid_level < 4)
+ if (ci->ci_cpuid_level < 4)
goto no_topology;
/* Get max_apicid */
CPUID(1, eax, ebx, ecx, edx);
@@ -1091,7 +1091,7 @@ cpu_check_vmm_cap(struct cpu_info *ci)
* Full details can be found here:
* https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-l1-terminal-fault
*/
- if (!strcmp(cpu_vendor, "GenuineIntel")) {
+ if (ci->ci_vendor == CPUV_INTEL) {
if (ci->ci_feature_sefflags_edx & SEFF0EDX_L1DF)
ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr = 1;
else
diff --git a/sys/arch/amd64/amd64/lapic.c b/sys/arch/amd64/amd64/lapic.c
index 4232647c3f8..82c4eca4689 100644
--- a/sys/arch/amd64/amd64/lapic.c
+++ b/sys/arch/amd64/amd64/lapic.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: lapic.c,v 1.71 2023/09/17 14:50:50 cheloha Exp $ */
+/* $OpenBSD: lapic.c,v 1.72 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: lapic.c,v 1.2 2003/05/08 01:04:35 fvdl Exp $ */
/*-
@@ -284,7 +284,7 @@ lapic_set_lvt(void)
}
#endif
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
+ if (ci->ci_vendor == CPUV_AMD) {
/*
* Detect the presence of C1E capability mostly on latest
* dual-cores (or future) k8 family. This mis-feature renders
diff --git a/sys/arch/amd64/amd64/machdep.c b/sys/arch/amd64/amd64/machdep.c
index f58e6c585c1..7bf57a406cd 100644
--- a/sys/arch/amd64/amd64/machdep.c
+++ b/sys/arch/amd64/amd64/machdep.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: machdep.c,v 1.291 2024/02/25 22:33:09 guenther Exp $ */
+/* $OpenBSD: machdep.c,v 1.292 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
@@ -1393,6 +1393,23 @@ map_tramps(void)
#endif
}
+void
+cpu_set_vendor(struct cpu_info *ci, int level, const char *vendor)
+{
+ ci->ci_cpuid_level = level;
+ cpuid_level = MIN(cpuid_level, level);
+
+ /* map the vendor string to an integer */
+ if (strcmp(vendor, "AuthenticAMD") == 0)
+ ci->ci_vendor = CPUV_AMD;
+ else if (strcmp(vendor, "GenuineIntel") == 0)
+ ci->ci_vendor = CPUV_INTEL;
+ else if (strcmp(vendor, "CentaurHauls") == 0)
+ ci->ci_vendor = CPUV_VIA;
+ else
+ ci->ci_vendor = CPUV_UNKNOWN;
+}
+
#define IDTVEC(name) __CONCAT(X, name)
typedef void (vector)(void);
extern vector *IDTVEC(exceptions)[];
@@ -1416,6 +1433,7 @@ init_x86_64(paddr_t first_avail)
early_pte_pages = first_avail;
first_avail += 3 * NBPG;
+ cpu_set_vendor(&cpu_info_primary, cpuid_level, cpu_vendor);
cpu_init_msrs(&cpu_info_primary);
proc0.p_addr = proc0paddr;
diff --git a/sys/arch/amd64/amd64/mtrr.c b/sys/arch/amd64/amd64/mtrr.c
index ff725aa1c33..7e16713b2e4 100644
--- a/sys/arch/amd64/amd64/mtrr.c
+++ b/sys/arch/amd64/amd64/mtrr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: mtrr.c,v 1.4 2013/12/19 21:30:02 deraadt Exp $ */
+/* $OpenBSD: mtrr.c,v 1.5 2024/04/03 02:01:21 guenther Exp $ */
/*-
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* Copyright (c) 1999 Brian Fundakowski Feldman
@@ -38,6 +38,7 @@ extern struct mem_range_ops mrops;
void
mem_range_attach(void)
{
+ struct cpu_info *ci = &cpu_info_primary;
int family, model, step;
family = (cpu_id >> 8) & 0xf;
@@ -45,9 +46,9 @@ mem_range_attach(void)
step = (cpu_id >> 0) & 0xf;
/* Try for i686 MTRRs */
- if (((strcmp(cpu_vendor, "GenuineIntel") == 0) ||
- (strcmp(cpu_vendor, "CentaurHauls") == 0) ||
- (strcmp(cpu_vendor, "AuthenticAMD") == 0)) &&
+ if ((ci->ci_vendor == CPUV_AMD ||
+ ci->ci_vendor == CPUV_INTEL ||
+ ci->ci_vendor == CPUV_VIA) &&
(family == 0x6 || family == 0xf) &&
cpu_feature & CPUID_MTRR) {
mem_range_softc.mr_op = &mrops;
diff --git a/sys/arch/amd64/amd64/pctr.c b/sys/arch/amd64/amd64/pctr.c
index 22b78568b3b..3d22cee2b6c 100644
--- a/sys/arch/amd64/amd64/pctr.c
+++ b/sys/arch/amd64/amd64/pctr.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: pctr.c,v 1.9 2019/03/25 18:48:12 guenther Exp $ */
+/* $OpenBSD: pctr.c,v 1.10 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2007 Mike Belopuhov
@@ -76,14 +76,15 @@ pctrrd(struct pctrst *st)
void
pctrattach(int num)
{
+ struct cpu_info *ci = &cpu_info_primary;
uint32_t dummy;
if (num > 1)
return;
- pctr_isamd = (strcmp(cpu_vendor, "AuthenticAMD") == 0);
+ pctr_isamd = (ci->ci_vendor == CPUV_AMD);
if (!pctr_isamd) {
- pctr_isintel = (strcmp(cpu_vendor, "GenuineIntel") == 0);
+ pctr_isintel = (ci->ci_vendor == CPUV_INTEL);
CPUID(0xa, pctr_intel_cap, dummy, dummy, dummy);
}
}
diff --git a/sys/arch/amd64/amd64/tsc.c b/sys/arch/amd64/amd64/tsc.c
index c642dc1bca3..a14ad9e4066 100644
--- a/sys/arch/amd64/amd64/tsc.c
+++ b/sys/arch/amd64/amd64/tsc.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: tsc.c,v 1.31 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: tsc.c,v 1.32 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* Copyright (c) 2016,2017 Reyk Floeter <reyk@openbsd.org>
@@ -63,8 +63,8 @@ tsc_freq_cpuid(struct cpu_info *ci)
uint64_t count;
uint32_t eax, ebx, khz, dummy;
- if (!strcmp(cpu_vendor, "GenuineIntel") &&
- cpuid_level >= 0x15) {
+ if (ci->ci_vendor == CPUV_INTEL &&
+ ci->ci_cpuid_level >= 0x15) {
eax = ebx = khz = dummy = 0;
CPUID(0x15, eax, ebx, khz, dummy);
khz /= 1000;
@@ -104,7 +104,7 @@ tsc_freq_msr(struct cpu_info *ci)
{
uint64_t base, def, divisor, multiplier;
- if (strcmp(cpu_vendor, "AuthenticAMD") != 0)
+ if (ci->ci_vendor != CPUV_AMD)
return 0;
/*
diff --git a/sys/arch/amd64/amd64/ucode.c b/sys/arch/amd64/amd64/ucode.c
index 7111cc8dda3..e11ee6158c5 100644
--- a/sys/arch/amd64/amd64/ucode.c
+++ b/sys/arch/amd64/amd64/ucode.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: ucode.c,v 1.8 2023/09/10 09:32:31 jsg Exp $ */
+/* $OpenBSD: ucode.c,v 1.9 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2018 Stefan Fritsch <fritsch@genua.de>
* Copyright (c) 2018 Patrick Wildt <patrick@blueri.se>
@@ -108,9 +108,9 @@ cpu_ucode_setup(void)
void
cpu_ucode_apply(struct cpu_info *ci)
{
- if (strcmp(cpu_vendor, "GenuineIntel") == 0)
+ if (ci->ci_vendor == CPUV_INTEL)
cpu_ucode_intel_apply(ci);
- else if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+ else if (ci->ci_vendor == CPUV_AMD)
cpu_ucode_amd_apply(ci);
}
diff --git a/sys/arch/amd64/include/cpu.h b/sys/arch/amd64/include/cpu.h
index 3eea95d5614..84a86b229da 100644
--- a/sys/arch/amd64/include/cpu.h
+++ b/sys/arch/amd64/include/cpu.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: cpu.h,v 1.163 2024/02/25 19:15:50 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.164 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
@@ -91,6 +91,13 @@ union vmm_cpu_cap {
struct svm vcc_svm;
};
+enum cpu_vendor {
+ CPUV_UNKNOWN,
+ CPUV_AMD,
+ CPUV_INTEL,
+ CPUV_VIA,
+};
+
/*
* Locks used to protect struct members in this file:
* I immutable after creation
@@ -154,6 +161,8 @@ struct cpu_info {
volatile u_int ci_flags; /* [a] */
u_int32_t ci_ipis; /* [a] */
+ enum cpu_vendor ci_vendor; /* [I] mapped from cpuid(0) */
+ u_int32_t ci_cpuid_level; /* [I] cpuid(0).eax */
u_int32_t ci_feature_flags; /* [I] */
u_int32_t ci_feature_eflags; /* [I] */
u_int32_t ci_feature_sefflags_ebx;/* [I] */
@@ -403,6 +412,7 @@ extern int cpuspeed;
/* machdep.c */
void dumpconf(void);
+void cpu_set_vendor(struct cpu_info *, int _level, const char *_vendor);
void cpu_reset(void);
void x86_64_proc0_tss_ldt_init(void);
void cpu_proc_fork(struct proc *, struct proc *);