From: kettenis Date: Mon, 18 Mar 2024 21:57:22 +0000 (+0000) Subject: Implement Spectre-V4 mitigations. The only real effect of this change is X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=8291c3f136ba87b37753555bb27f6c477d823b8e;p=openbsd Implement Spectre-V4 mitigations. The only real effect of this change is that we now make a firmware call to enable the mitigations if the firmware tells us mitigations are implemented and needed. But according to the specification these mitigations should be enabled by default. The open source TF-A implementation only implements mitigations for older Cortex-A76 cores. Newer Cortex-A76 revisions are not vulnerable and as far as I can tell we only support SoCs with the newer cores. ok patrick@ --- diff --git a/sys/arch/arm64/arm64/cpu.c b/sys/arch/arm64/arm64/cpu.c index 06c48cd3d1c..e4741ceb25f 100644 --- a/sys/arch/arm64/arm64/cpu.c +++ b/sys/arch/arm64/arm64/cpu.c @@ -1,4 +1,4 @@ -/* $OpenBSD: cpu.c,v 1.112 2024/03/18 18:35:21 kettenis Exp $ */ +/* $OpenBSD: cpu.c,v 1.113 2024/03/18 21:57:22 kettenis Exp $ */ /* * Copyright (c) 2016 Dale Rahn @@ -278,6 +278,43 @@ void cpu_kstat_attach(struct cpu_info *ci); void cpu_opp_kstat_attach(struct cpu_info *ci); #endif +/* + * Enable mitigation for Spectre-V4 speculative store bypass + * vulnerabilities (CVE-2018-3639). + */ +void +cpu_mitigate_spectre_v4(struct cpu_info *ci) +{ + uint64_t id; + + switch (CPU_IMPL(ci->ci_midr)) { + case CPU_IMPL_ARM: + switch (CPU_PART(ci->ci_midr)) { + case CPU_PART_CORTEX_A35: + case CPU_PART_CORTEX_A53: + case CPU_PART_CORTEX_A55: + /* Not vulnerable. */ + return; + } + break; + case CPU_IMPL_QCOM: + switch (CPU_PART(ci->ci_midr)) { + case CPU_PART_KRYO400_SILVER: + /* Not vulnerable. */ + return; + } + break; + } + + /* SSBS tells us Spectre-V4 is mitigated. */ + id = READ_SPECIALREG(id_aa64pfr1_el1); + if (ID_AA64PFR1_SSBS(id) >= ID_AA64PFR1_SSBS_PSTATE) + return; + + /* Enable firmware workaround if required. */ + smccc_enable_arch_workaround_2(); +} + void cpu_identify(struct cpu_info *ci) { @@ -508,7 +545,7 @@ cpu_identify(struct cpu_info *ci) */ #if NPSCI > 0 if (ci->ci_trampoline_vectors == (vaddr_t)trampoline_vectors_none && - psci_flush_bp_has_bhb()) { + smccc_needs_arch_workaround_3()) { ci->ci_flush_bp = cpu_flush_bp_noop; if (psci_method() == PSCI_METHOD_HVC) ci->ci_trampoline_vectors = @@ -539,6 +576,8 @@ cpu_identify(struct cpu_info *ci) ci->ci_trampoline_vectors = (vaddr_t)trampoline_vectors_none; } + cpu_mitigate_spectre_v4(ci); + /* * Apple CPUs provide detailed information for SError. */ diff --git a/sys/dev/fdt/psci.c b/sys/dev/fdt/psci.c index 675a97d5cd8..c064823cc69 100644 --- a/sys/dev/fdt/psci.c +++ b/sys/dev/fdt/psci.c @@ -1,4 +1,4 @@ -/* $OpenBSD: psci.c,v 1.14 2023/02/19 17:16:13 kettenis Exp $ */ +/* $OpenBSD: psci.c,v 1.15 2024/03/18 21:57:22 kettenis Exp $ */ /* * Copyright (c) 2016 Jonathan Gray @@ -34,6 +34,7 @@ extern void (*powerdownfn)(void); #define SMCCC_VERSION 0x80000000 #define SMCCC_ARCH_FEATURES 0x80000001 #define SMCCC_ARCH_WORKAROUND_1 0x80008000 +#define SMCCC_ARCH_WORKAROUND_2 0x80007fff #define SMCCC_ARCH_WORKAROUND_3 0x80003fff #define PSCI_VERSION 0x84000000 @@ -240,8 +241,24 @@ psci_flush_bp(void) } } +void +smccc_enable_arch_workaround_2(void) +{ + struct psci_softc *sc = psci_sc; + + /* + * SMCCC 1.1 allows us to detect if the workaround is + * implemented and needed. + */ + if (sc && sc->sc_smccc_version >= 0x10001 && + smccc_arch_features(SMCCC_ARCH_WORKAROUND_2) == 0) { + /* Workaround implemented and needed. */ + (*sc->sc_callfn)(SMCCC_ARCH_WORKAROUND_2, 1, 0, 0); + } +} + int -psci_flush_bp_has_bhb(void) +smccc_needs_arch_workaround_3(void) { struct psci_softc *sc = psci_sc; diff --git a/sys/dev/fdt/pscivar.h b/sys/dev/fdt/pscivar.h index 04e418f3190..f13a757d7c5 100644 --- a/sys/dev/fdt/pscivar.h +++ b/sys/dev/fdt/pscivar.h @@ -22,4 +22,7 @@ int psci_method(void); int32_t smccc(uint32_t, register_t, register_t, register_t); +void smccc_enable_arch_workaround_2(void); +int smccc_needs_arch_workaround_3(void); + #endif /* _SYS_DEV_FDT_PSCIVAR_H_ */