level and a numeric mapping of the cpu vendor, both from CPUID(0).
Convert the general use of strcmp(cpu_vendor) to simple numeric
tests of ci_vendor. Track the minimum of all ci_cpuid_level in the
cpuid_level global and continue to use that for what we vmm exposes.
AMD testing help matthieu@ krw@
ok miod@ deraadt@ cheloha@
-/* $OpenBSD: cacheinfo.c,v 1.12 2024/02/03 09:53:15 jsg Exp $ */
+/* $OpenBSD: cacheinfo.c,v 1.13 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2022 Jonathan Gray <jsg@openbsd.org>
{
uint64_t msr;
- if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+ if (ci->ci_vendor == CPUV_INTEL &&
rdmsr_safe(MSR_MISC_ENABLE, &msr) == 0 &&
(msr & MISC_ENABLE_LIMIT_CPUID_MAXVAL) == 0) {
intel_print_cacheinfo(ci, 4);
return;
}
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0 &&
+ if (ci->ci_vendor == CPUV_AMD &&
(ecpu_ecxfeature & CPUIDECX_TOPEXT)) {
intel_print_cacheinfo(ci, 0x8000001d);
return;
-/* $OpenBSD: cpu.c,v 1.184 2024/03/17 05:49:41 guenther Exp $ */
+/* $OpenBSD: cpu.c,v 1.185 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: cpu.c,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*-
extern long _stac;
extern long _clac;
-int cpuid_level = 0; /* cpuid(0).eax */
-char cpu_vendor[16] = { 0 }; /* cpuid(0).e[bdc]x, \0 */
+int cpuid_level = 0; /* MIN cpuid(0).eax */
+char cpu_vendor[16] = { 0 }; /* CPU0's cpuid(0).e[bdc]x, \0 */
int cpu_id = 0; /* cpuid(1).eax */
int cpu_ebxfeature = 0; /* cpuid(1).ebx */
int cpu_ecxfeature = 0; /* cpuid(1).ecx */
struct cpu_info *ci = &cpu_info_primary;
int swapgs_vuln = 0, ibrs = 0, s, ibpb = 0;
- if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
+ if (ci->ci_vendor == CPUV_INTEL) {
int family = ci->ci_family;
int model = ci->ci_model;
}
if (ci->ci_feature_sefflags_edx & SEFF0EDX_IBRS)
ibpb = 1;
- } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0 &&
+ } else if (ci->ci_vendor == CPUV_AMD &&
ci->ci_pnfeatset >= 0x80000008) {
if (ci->ci_feature_amdspec_ebx & CPUIDEBX_IBRS_ALWAYSON) {
ibrs = 2;
return;
replacedone = 1;
- if (strcmp(cpu_vendor, "GenuineIntel") != 0)
+ if (ci->ci_vendor != CPUV_INTEL)
goto notintel; /* VERW only needed on Intel */
if ((ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP))
{
unsigned int smallest, largest, extensions, c_substates;
- if ((cpu_ecxfeature & CPUIDECX_MWAIT) == 0 || cpuid_level < 0x5)
+ if ((cpu_ecxfeature & CPUIDECX_MWAIT) == 0 || ci->ci_cpuid_level < 0x5)
return;
/* get the monitor granularity */
largest &= 0xffff;
/* mask out states C6/C7 in 31:24 for CHT45 errata */
- if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+ if (ci->ci_vendor == CPUV_INTEL &&
ci->ci_family == 0x06 && ci->ci_model == 0x4c)
cpu_mwait_states &= 0x00ffffff;
cr4 |= CR4_SMAP;
if (ci->ci_feature_sefflags_ecx & SEFF0ECX_UMIP)
cr4 |= CR4_UMIP;
- if ((cpu_ecxfeature & CPUIDECX_XSAVE) && cpuid_level >= 0xd)
+ if ((cpu_ecxfeature & CPUIDECX_XSAVE) && ci->ci_cpuid_level >= 0xd)
cr4 |= CR4_OSXSAVE;
if (pg_xo)
cr4 |= CR4_PKE;
cr4 |= CR4_PCIDE;
lcr4(cr4);
- if ((cpu_ecxfeature & CPUIDECX_XSAVE) && cpuid_level >= 0xd) {
+ if ((cpu_ecxfeature & CPUIDECX_XSAVE) && ci->ci_cpuid_level >= 0xd) {
u_int32_t eax, ebx, ecx, edx;
xsave_mask = XFEATURE_X87 | XFEATURE_SSE;
/* check for xsaves, xsaveopt, and supervisor features */
CPUID_LEAF(0xd, 1, eax, ebx, ecx, edx);
/* Disable XSAVES on AMD family 17h due to Erratum 1386 */
- if (!strcmp(cpu_vendor, "AuthenticAMD") &&
+ if (ci->ci_vendor == CPUV_AMD &&
ci->ci_family == 0x17) {
eax &= ~XSAVE_XSAVES;
}
struct cpu_info *ci = (struct cpu_info *)v;
int s;
+ {
+ uint32_t vendor[4];
+ int level;
+
+ CPUID(0, level, vendor[0], vendor[2], vendor[1]);
+ vendor[3] = 0;
+ cpu_set_vendor(ci, level, (const char *)vendor);
+ }
+
cpu_init_msrs(ci);
#ifdef DEBUG
int family = ci->ci_family;
uint64_t msr, nmsr;
- if (!strcmp(cpu_vendor, "GenuineIntel")) {
+ if (ci->ci_vendor == CPUV_INTEL) {
if ((family > 6 || (family == 6 && ci->ci_model >= 0xd)) &&
rdmsr_safe(MSR_MISC_ENABLE, &msr) == 0 &&
(msr & MISC_ENABLE_FAST_STRINGS) == 0) {
}
}
- if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ if (ci->ci_vendor == CPUV_AMD) {
/* Apply AMD errata */
amd64_errata(ci);
uint32_t dummy, sefflags_edx;
/* this runs before identifycpu() populates ci_feature_sefflags_edx */
- if (cpuid_level < 0x07)
+ if (ci->ci_cpuid_level < 0x07)
return;
CPUID_LEAF(0x7, 0, dummy, dummy, dummy, sefflags_edx);
- if (strcmp(cpu_vendor, "GenuineIntel") == 0 &&
+ if (ci->ci_vendor == CPUV_INTEL &&
(sefflags_edx & SEFF0EDX_ARCH_CAP)) {
msr = rdmsr(MSR_ARCH_CAPABILITIES);
if (msr & ARCH_CAP_TSX_CTRL) {
-/* $OpenBSD: identcpu.c,v 1.139 2024/03/17 05:49:41 guenther Exp $ */
+/* $OpenBSD: identcpu.c,v 1.140 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: identcpu.c,v 1.1 2003/04/26 18:39:28 fvdl Exp $ */
/*
if (ci->ci_feature_flags && ci->ci_feature_flags & CPUID_TSC) {
/* Has TSC, check if it's constant */
- if (!strcmp(cpu_vendor, "GenuineIntel")) {
+ if (ci->ci_vendor == CPUV_INTEL) {
if ((ci->ci_family == 0x0f && ci->ci_model >= 0x03) ||
(ci->ci_family == 0x06 && ci->ci_model >= 0x0e)) {
atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
}
- } else if (!strcmp(cpu_vendor, "CentaurHauls")) {
+ } else if (ci->ci_vendor == CPUV_VIA) {
/* VIA */
if (ci->ci_model >= 0x0f) {
atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
}
- } else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ } else if (ci->ci_vendor == CPUV_AMD) {
if (cpu_apmi_edx & CPUIDEDX_ITSC) {
/* Invariant TSC indicates constant TSC on AMD */
atomic_setbits_int(&ci->ci_flags, CPUF_CONST_TSC);
uint64_t level = 0;
uint32_t dummy;
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
+ if (ci->ci_vendor == CPUV_AMD) {
level = rdmsr(MSR_PATCH_LEVEL);
- } else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
+ } else if (ci->ci_vendor == CPUV_INTEL) {
wrmsr(MSR_BIOS_SIGN, 0);
CPUID(1, dummy, dummy, dummy, dummy);
level = rdmsr(MSR_BIOS_SIGN) >> 32;
if (cpu_apmi_edx & cpu_cpuid_apmi_edx[i].bit)
printf(",%s", cpu_cpuid_apmi_edx[i].str);
- if (cpuid_level >= 0x07) {
+ if (ci->ci_cpuid_level >= 0x07) {
/* "Structured Extended Feature Flags" */
CPUID_LEAF(0x7, 0, dummy, ci->ci_feature_sefflags_ebx,
ci->ci_feature_sefflags_ecx, ci->ci_feature_sefflags_edx);
printf(",%s", cpu_seff0_edxfeatures[i].str);
}
- if (!strcmp(cpu_vendor, "GenuineIntel") && cpuid_level >= 0x06) {
+ if (ci->ci_vendor == CPUV_INTEL && ci->ci_cpuid_level >= 0x06) {
CPUID(0x06, ci->ci_feature_tpmflags, dummy, cpu_tpm_ecxflags,
dummy);
for (i = 0; i < nitems(cpu_tpm_eaxfeatures); i++)
if (ci->ci_feature_tpmflags &
cpu_tpm_eaxfeatures[i].bit)
printf(",%s", cpu_tpm_eaxfeatures[i].str);
- } else if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ } else if (ci->ci_vendor == CPUV_AMD) {
CPUID(0x06, ci->ci_feature_tpmflags, dummy, cpu_tpm_ecxflags,
dummy);
if (ci->ci_family >= 0x12)
}
/* speculation control features */
- if (!strcmp(cpu_vendor, "AuthenticAMD")) {
+ if (ci->ci_vendor == CPUV_AMD) {
if (ci->ci_pnfeatset >= 0x80000008) {
CPUID(0x80000008, dummy, ci->ci_feature_amdspec_ebx,
dummy, dummy);
printf(",%s",
cpu_amdspec_ebxfeatures[i].str);
}
- } else if (!strcmp(cpu_vendor, "GenuineIntel") &&
+ } else if (ci->ci_vendor == CPUV_INTEL &&
(ci->ci_feature_sefflags_edx & SEFF0EDX_ARCH_CAP)) {
uint64_t msr = rdmsr(MSR_ARCH_CAPABILITIES);
}
/* xsave subfeatures */
- if (cpuid_level >= 0xd) {
+ if (ci->ci_cpuid_level >= 0xd) {
CPUID_LEAF(0xd, 1, val, dummy, dummy, dummy);
for (i = 0; i < nitems(cpu_xsave_extfeatures); i++)
if (val & cpu_xsave_extfeatures[i].bit)
if (CPU_IS_PRIMARY(ci)) {
#ifndef SMALL_KERNEL
- if (!strcmp(cpu_vendor, "AuthenticAMD") &&
+ if (ci->ci_vendor == CPUV_AMD &&
ci->ci_pnfeatset >= 0x80000007) {
CPUID(0x80000007, dummy, dummy, dummy, val);
}
#endif
- if (CPU_IS_PRIMARY(ci) && !strcmp(cpu_vendor, "CentaurHauls")) {
+ if (CPU_IS_PRIMARY(ci) && ci->ci_vendor == CPUV_VIA) {
ci->cpu_setup = via_nano_setup;
#ifndef SMALL_KERNEL
ci->ci_sensor.type = SENSOR_TEMP;
u_int32_t smt_mask = 0, core_mask, pkg_mask = 0;
/* We need at least apicid at CPUID 1 */
- if (cpuid_level < 1)
+ if (ci->ci_cpuid_level < 1)
goto no_topology;
/* Initial apicid */
CPUID(1, eax, ebx, ecx, edx);
apicid = (ebx >> 24) & 0xff;
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
+ if (ci->ci_vendor == CPUV_AMD) {
uint32_t nthreads = 1; /* per core */
uint32_t thread_id; /* within a package */
/* Cut logical thread_id into core id, and smt id in a core */
ci->ci_core_id = thread_id / nthreads;
ci->ci_smt_id = thread_id % nthreads;
- } else if (strcmp(cpu_vendor, "GenuineIntel") == 0) {
+ } else if (ci->ci_vendor == CPUV_INTEL) {
/* We only support leaf 1/4 detection */
- if (cpuid_level < 4)
+ if (ci->ci_cpuid_level < 4)
goto no_topology;
/* Get max_apicid */
CPUID(1, eax, ebx, ecx, edx);
* Full details can be found here:
* https://software.intel.com/security-software-guidance/insights/deep-dive-intel-analysis-l1-terminal-fault
*/
- if (!strcmp(cpu_vendor, "GenuineIntel")) {
+ if (ci->ci_vendor == CPUV_INTEL) {
if (ci->ci_feature_sefflags_edx & SEFF0EDX_L1DF)
ci->ci_vmm_cap.vcc_vmx.vmx_has_l1_flush_msr = 1;
else
-/* $OpenBSD: lapic.c,v 1.71 2023/09/17 14:50:50 cheloha Exp $ */
+/* $OpenBSD: lapic.c,v 1.72 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: lapic.c,v 1.2 2003/05/08 01:04:35 fvdl Exp $ */
/*-
}
#endif
- if (strcmp(cpu_vendor, "AuthenticAMD") == 0) {
+ if (ci->ci_vendor == CPUV_AMD) {
/*
* Detect the presence of C1E capability mostly on latest
* dual-cores (or future) k8 family. This mis-feature renders
-/* $OpenBSD: machdep.c,v 1.291 2024/02/25 22:33:09 guenther Exp $ */
+/* $OpenBSD: machdep.c,v 1.292 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: machdep.c,v 1.3 2003/05/07 22:58:18 fvdl Exp $ */
/*-
#endif
}
+void
+cpu_set_vendor(struct cpu_info *ci, int level, const char *vendor)
+{
+ ci->ci_cpuid_level = level;
+ cpuid_level = MIN(cpuid_level, level);
+
+ /* map the vendor string to an integer */
+ if (strcmp(vendor, "AuthenticAMD") == 0)
+ ci->ci_vendor = CPUV_AMD;
+ else if (strcmp(vendor, "GenuineIntel") == 0)
+ ci->ci_vendor = CPUV_INTEL;
+ else if (strcmp(vendor, "CentaurHauls") == 0)
+ ci->ci_vendor = CPUV_VIA;
+ else
+ ci->ci_vendor = CPUV_UNKNOWN;
+}
+
#define IDTVEC(name) __CONCAT(X, name)
typedef void (vector)(void);
extern vector *IDTVEC(exceptions)[];
early_pte_pages = first_avail;
first_avail += 3 * NBPG;
+ cpu_set_vendor(&cpu_info_primary, cpuid_level, cpu_vendor);
cpu_init_msrs(&cpu_info_primary);
proc0.p_addr = proc0paddr;
-/* $OpenBSD: mtrr.c,v 1.4 2013/12/19 21:30:02 deraadt Exp $ */
+/* $OpenBSD: mtrr.c,v 1.5 2024/04/03 02:01:21 guenther Exp $ */
/*-
* Copyright (c) 1999 Michael Smith <msmith@freebsd.org>
* Copyright (c) 1999 Brian Fundakowski Feldman
void
mem_range_attach(void)
{
+ struct cpu_info *ci = &cpu_info_primary;
int family, model, step;
family = (cpu_id >> 8) & 0xf;
step = (cpu_id >> 0) & 0xf;
/* Try for i686 MTRRs */
- if (((strcmp(cpu_vendor, "GenuineIntel") == 0) ||
- (strcmp(cpu_vendor, "CentaurHauls") == 0) ||
- (strcmp(cpu_vendor, "AuthenticAMD") == 0)) &&
+ if ((ci->ci_vendor == CPUV_AMD ||
+ ci->ci_vendor == CPUV_INTEL ||
+ ci->ci_vendor == CPUV_VIA) &&
(family == 0x6 || family == 0xf) &&
cpu_feature & CPUID_MTRR) {
mem_range_softc.mr_op = &mrops;
-/* $OpenBSD: pctr.c,v 1.9 2019/03/25 18:48:12 guenther Exp $ */
+/* $OpenBSD: pctr.c,v 1.10 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2007 Mike Belopuhov
void
pctrattach(int num)
{
+ struct cpu_info *ci = &cpu_info_primary;
uint32_t dummy;
if (num > 1)
return;
- pctr_isamd = (strcmp(cpu_vendor, "AuthenticAMD") == 0);
+ pctr_isamd = (ci->ci_vendor == CPUV_AMD);
if (!pctr_isamd) {
- pctr_isintel = (strcmp(cpu_vendor, "GenuineIntel") == 0);
+ pctr_isintel = (ci->ci_vendor == CPUV_INTEL);
CPUID(0xa, pctr_intel_cap, dummy, dummy, dummy);
}
}
-/* $OpenBSD: tsc.c,v 1.31 2023/02/04 19:19:36 cheloha Exp $ */
+/* $OpenBSD: tsc.c,v 1.32 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* Copyright (c) 2016,2017 Reyk Floeter <reyk@openbsd.org>
uint64_t count;
uint32_t eax, ebx, khz, dummy;
- if (!strcmp(cpu_vendor, "GenuineIntel") &&
- cpuid_level >= 0x15) {
+ if (ci->ci_vendor == CPUV_INTEL &&
+ ci->ci_cpuid_level >= 0x15) {
eax = ebx = khz = dummy = 0;
CPUID(0x15, eax, ebx, khz, dummy);
khz /= 1000;
{
uint64_t base, def, divisor, multiplier;
- if (strcmp(cpu_vendor, "AuthenticAMD") != 0)
+ if (ci->ci_vendor != CPUV_AMD)
return 0;
/*
-/* $OpenBSD: ucode.c,v 1.8 2023/09/10 09:32:31 jsg Exp $ */
+/* $OpenBSD: ucode.c,v 1.9 2024/04/03 02:01:21 guenther Exp $ */
/*
* Copyright (c) 2018 Stefan Fritsch <fritsch@genua.de>
* Copyright (c) 2018 Patrick Wildt <patrick@blueri.se>
void
cpu_ucode_apply(struct cpu_info *ci)
{
- if (strcmp(cpu_vendor, "GenuineIntel") == 0)
+ if (ci->ci_vendor == CPUV_INTEL)
cpu_ucode_intel_apply(ci);
- else if (strcmp(cpu_vendor, "AuthenticAMD") == 0)
+ else if (ci->ci_vendor == CPUV_AMD)
cpu_ucode_amd_apply(ci);
}
-/* $OpenBSD: cpu.h,v 1.163 2024/02/25 19:15:50 cheloha Exp $ */
+/* $OpenBSD: cpu.h,v 1.164 2024/04/03 02:01:21 guenther Exp $ */
/* $NetBSD: cpu.h,v 1.1 2003/04/26 18:39:39 fvdl Exp $ */
/*-
struct svm vcc_svm;
};
+enum cpu_vendor {
+ CPUV_UNKNOWN,
+ CPUV_AMD,
+ CPUV_INTEL,
+ CPUV_VIA,
+};
+
/*
* Locks used to protect struct members in this file:
* I immutable after creation
volatile u_int ci_flags; /* [a] */
u_int32_t ci_ipis; /* [a] */
+ enum cpu_vendor ci_vendor; /* [I] mapped from cpuid(0) */
+ u_int32_t ci_cpuid_level; /* [I] cpuid(0).eax */
u_int32_t ci_feature_flags; /* [I] */
u_int32_t ci_feature_eflags; /* [I] */
u_int32_t ci_feature_sefflags_ebx;/* [I] */
/* machdep.c */
void dumpconf(void);
+void cpu_set_vendor(struct cpu_info *, int _level, const char *_vendor);
void cpu_reset(void);
void x86_64_proc0_tss_ldt_init(void);
void cpu_proc_fork(struct proc *, struct proc *);