From a1fa35384a2451a9a5f75e63d1e99a2902f0cfbd Mon Sep 17 00:00:00 2001 From: guenther Date: Tue, 24 Jul 2018 02:42:25 +0000 Subject: [PATCH] Also do RSB refilling when context switching, after vmexits, and when vmlaunch or vmresume fails. Follow the lead of clang and the intel recommendation and do an lfence after the pause in the speculation-stop path for retpoline, RSB refill, and meltover ASM bits. ok kettenis@ deraadt@ --- sys/arch/amd64/amd64/locore.S | 14 +++++++++++++- sys/arch/amd64/amd64/vector.S | 3 ++- sys/arch/amd64/amd64/vmm_support.S | 6 +++++- sys/arch/amd64/include/asm.h | 5 ++++- sys/arch/amd64/include/cpufunc.h | 4 +++- 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/sys/arch/amd64/amd64/locore.S b/sys/arch/amd64/amd64/locore.S index 1318b32f850..62ec81239f4 100644 --- a/sys/arch/amd64/amd64/locore.S +++ b/sys/arch/amd64/amd64/locore.S @@ -1,4 +1,4 @@ -/* $OpenBSD: locore.S,v 1.106 2018/07/23 17:54:04 guenther Exp $ */ +/* $OpenBSD: locore.S,v 1.107 2018/07/24 02:42:25 guenther Exp $ */ /* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */ /* @@ -448,6 +448,15 @@ restore_saved: movq PCB_CR3(%r13),%rax movq %rax,%cr3 /* %rax used below too */ + /* + * If we switched from a userland thread with a shallow call stack + * (e.g interrupt->ast->mi_ast->prempt->mi_switch->cpu_switchto) + * then the RSB may have attacker controlled entries when we switch + * to a deeper call stack in the new thread. Refill the RSB with + * entries safe to speculate into/through. + */ + RET_STACK_REFILL_WITH_RCX + /* Don't bother with the rest if switching to a system process. */ testl $P_SYSTEM,P_FLAG(%r12) jnz switch_restored @@ -571,6 +580,7 @@ XUsyscall_meltdown: movq CPUVAR(KERN_CR3),%rax movq %rax,%cr3 0: pause + lfence jmp 0b KUTEXT_PAGE_END @@ -675,6 +685,7 @@ IDTVEC_NOALIGN(syscall) movq %rax,%cr3 Xsyscall_trampback: 0: pause + lfence jmp 0b CODEPATCH_END(CPTAG_MELTDOWN_NOP) swapgs @@ -850,6 +861,7 @@ intr_user_exit_post_ast: movq %rax,%cr3 Xiretq_trampback: 0: pause + lfence jmp 0b .space 5,0xcc /* pad to match "movq CPUVAR(SCRATCH),%rax" */ CODEPATCH_END(CPTAG_MELTDOWN_NOP) diff --git a/sys/arch/amd64/amd64/vector.S b/sys/arch/amd64/amd64/vector.S index 1f4be77736c..f4d4c4ec3b5 100644 --- a/sys/arch/amd64/amd64/vector.S +++ b/sys/arch/amd64/amd64/vector.S @@ -1,4 +1,4 @@ -/* $OpenBSD: vector.S,v 1.74 2018/07/21 02:19:54 guenther Exp $ */ +/* $OpenBSD: vector.S,v 1.75 2018/07/24 02:42:25 guenther Exp $ */ /* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */ /* @@ -379,6 +379,7 @@ Xalltraps: movq CPUVAR(KERN_CR3),%rax movq %rax,%cr3 0: pause + lfence jmp 0b KUTEXT_PAGE_END diff --git a/sys/arch/amd64/amd64/vmm_support.S b/sys/arch/amd64/amd64/vmm_support.S index f0e9bba9864..8053e841f06 100644 --- a/sys/arch/amd64/amd64/vmm_support.S +++ b/sys/arch/amd64/amd64/vmm_support.S @@ -1,4 +1,4 @@ -/* $OpenBSD: vmm_support.S,v 1.11 2018/07/03 23:21:15 mortimer Exp $ */ +/* $OpenBSD: vmm_support.S,v 1.12 2018/07/24 02:42:25 guenther Exp $ */ /* * Copyright (c) 2014 Mike Larkin * @@ -294,6 +294,8 @@ do_resume: movq 0x00(%rsi), %rsi vmresume fail_launch_or_resume: + RET_STACK_REFILL_WITH_RCX + /* Failed launch/resume (fell through) */ jc fail_launch_invalid_vmcs /* Invalid VMCS */ jz fail_launch_valid_vmcs /* Valid VMCS, failed launch/resume */ @@ -349,6 +351,8 @@ vmx_exit_handler_asm: /* %rdi = 0 means we took an exit */ xorq %rdi, %rdi + RET_STACK_REFILL_WITH_RCX + restore_host: popq %rbx popq %rbp diff --git a/sys/arch/amd64/include/asm.h b/sys/arch/amd64/include/asm.h index 583e88829a3..11bb740fabb 100644 --- a/sys/arch/amd64/include/asm.h +++ b/sys/arch/amd64/include/asm.h @@ -1,4 +1,4 @@ -/* $OpenBSD: asm.h,v 1.16 2018/07/23 17:54:04 guenther Exp $ */ +/* $OpenBSD: asm.h,v 1.17 2018/07/24 02:42:25 guenther Exp $ */ /* $NetBSD: asm.h,v 1.2 2003/05/02 18:05:47 yamt Exp $ */ /*- @@ -96,10 +96,12 @@ _ALIGN_TEXT ; \ 3: call 5f ; \ 4: pause ; \ + lfence ; \ call 4b ; \ _ALIGN_TRAPS ; \ 5: call 7f ; \ 6: pause ; \ + lfence ; \ call 6b ; \ _ALIGN_TRAPS ; \ 7: loop 3b ; \ @@ -186,6 +188,7 @@ #define JMP_RETPOLINE(reg) \ call 69f ; \ 68: pause ; \ + lfence ; \ jmp 68b ; \ _ALIGN_TRAPS ; \ 69: mov %reg,(%rsp) ; \ diff --git a/sys/arch/amd64/include/cpufunc.h b/sys/arch/amd64/include/cpufunc.h index 7c7e391f88d..9c6cc12f097 100644 --- a/sys/arch/amd64/include/cpufunc.h +++ b/sys/arch/amd64/include/cpufunc.h @@ -1,4 +1,4 @@ -/* $OpenBSD: cpufunc.h,v 1.27 2018/07/23 17:54:04 guenther Exp $ */ +/* $OpenBSD: cpufunc.h,v 1.28 2018/07/24 02:42:25 guenther Exp $ */ /* $NetBSD: cpufunc.h,v 1.3 2003/05/08 10:27:43 fvdl Exp $ */ /*- @@ -295,10 +295,12 @@ mwait(u_long extensions, u_int hints) " .align 16,0x90 ;" "3: call 5f ;" "4: pause ;" + " lfence ;" " call 4b ;" " .align 16,0xcc ;" "5: call 7f ;" "6: pause ;" + " lfence ;" " call 6b ;" " .align 16,0xcc ;" "7: loop 3b ;" -- 2.20.1