-/* $OpenBSD: acpi_wakecode.S,v 1.46 2018/10/04 05:00:40 guenther Exp $ */
+/* $OpenBSD: acpi_wakecode.S,v 1.47 2021/09/04 22:15:33 bluhm Exp $ */
/*
* Copyright (c) 2001 Takanori Watanabe <takawata@jp.freebsd.org>
* Copyright (c) 2001 Mitsuru IWASAKI <iwasaki@jp.freebsd.org>
1: RETGUARD_CHECK(hibernate_activate_resume_pt_machdep, r11)
ret
+ lfence
/*
* Switch to the private resume-time hibernate stack
/* On our own stack from here onward */
RETGUARD_CHECK(hibernate_switch_stack_machdep, r11)
ret
+ lfence
NENTRY(hibernate_flush)
RETGUARD_SETUP(hibernate_flush, r11)
invlpg HIBERNATE_INFLATE_PAGE
RETGUARD_CHECK(hibernate_flush, r11)
ret
+ lfence
#endif /* HIBERNATE */
/*
movl $1, %eax
RETGUARD_CHECK(acpi_savecpu, r11)
ret
+ lfence
-/* $OpenBSD: aes_intel.S,v 1.13 2018/07/09 08:39:28 mortimer Exp $ */
+/* $OpenBSD: aes_intel.S,v 1.14 2021/09/04 22:15:33 bluhm Exp $ */
/*
* Implement AES algorithm in Intel AES-NI instructions.
add $0x10,%rcx
RETGUARD_CHECK(_key_expansion_128, rax)
ret
+ lfence
_key_expansion_192a:
RETGUARD_SETUP(_key_expansion_192a, rax)
add $0x20,%rcx
RETGUARD_CHECK(_key_expansion_192a, rax)
ret
+ lfence
_key_expansion_192b:
RETGUARD_SETUP(_key_expansion_192b, rax)
add $0x10,%rcx
RETGUARD_CHECK(_key_expansion_192b, rax)
ret
+ lfence
_key_expansion_256b:
RETGUARD_SETUP(_key_expansion_256b, rax)
add $0x10,%rcx
RETGUARD_CHECK(_key_expansion_256b, rax)
ret
+ lfence
/*
* void aesni_set_key(struct aesni_session *ses, uint8_t *key, size_t len)
jb 4b
RETGUARD_CHECK(aesni_set_key, r11)
ret
+ lfence
/*
* void aesni_enc(struct aesni_session *ses, uint8_t *dst, uint8_t *src)
movups STATE,(OUTP) # output
RETGUARD_CHECK(aesni_enc, r11)
ret
+ lfence
/*
* _aesni_enc1: internal ABI
aesenclast KEY,STATE
RETGUARD_CHECK(_aesni_enc1, rax)
ret
+ lfence
/*
* _aesni_enc4: internal ABI
aesenclast KEY,STATE4
RETGUARD_CHECK(_aesni_enc4, rax)
ret
+ lfence
/*
* void aesni_dec(struct aesni_session *ses, uint8_t *dst, uint8_t *src)
movups STATE,(OUTP) # output
RETGUARD_CHECK(aesni_dec, r11)
ret
+ lfence
/*
* _aesni_dec1: internal ABI
aesdeclast KEY,STATE
RETGUARD_CHECK(_aesni_dec1, rax)
ret
+ lfence
/*
* _aesni_dec4: internal ABI
aesdeclast KEY,STATE4
RETGUARD_CHECK(_aesni_dec4, rax)
ret
+ lfence
#if 0
/*
3:
RETGUARD_CHECK(aesni_ecb_enc, r11)
ret
+ lfence
/*
* void aesni_ecb_dec(struct aesni_session *ses, uint8_t *dst, uint8_t *src,
3:
RETGUARD_CHECK(aesni_ecb_dec, r11)
ret
+ lfence
#endif
/*
2:
RETGUARD_CHECK(aesni_cbc_enc, r11)
ret
+ lfence
/*
* void aesni_cbc_dec(struct aesni_session *ses, uint8_t *dst, uint8_t *src,
4:
RETGUARD_CHECK(aesni_cbc_dec, r11)
ret
+ lfence
/*
* _aesni_inc_init: internal ABI
movd CTR,TCTR_LOW
RETGUARD_CHECK(_aesni_inc_init, rax)
ret
+ lfence
/*
* _aesni_inc: internal ABI
pshufb BSWAP_MASK,IV
RETGUARD_CHECK(_aesni_inc, rax)
ret
+ lfence
/*
* void aesni_ctr_enc(struct aesni_session *ses, uint8_t *dst, uint8_t *src,
RETGUARD_POP(r11)
RETGUARD_CHECK(aesni_ctr_enc, r11)
ret
+ lfence
_aesni_gmac_gfmul:
RETGUARD_SETUP(_aesni_gmac_gfmul, rax)
pxor %xmm3,%xmm6 # the result is in xmm6
RETGUARD_CHECK(_aesni_gmac_gfmul, rax)
ret
+ lfence
/*
* void aesni_gmac_update(GHASH_CTX *ghash, uint8_t *src, size_t len)
2:
RETGUARD_CHECK(aesni_gmac_update, r11)
ret
+ lfence
/*
* void aesni_gmac_final(struct aesni_sess *ses, uint8_t *tag,
movdqu STATE,(OUTP) # output
RETGUARD_CHECK(aesni_gmac_final, r11)
ret
+ lfence
/*
* void aesni_xts_enc(struct aesni_xts_ctx *xts, uint8_t *dst, uint8_t *src,
RETGUARD_POP(r11)
RETGUARD_CHECK(aesni_xts_enc, r11)
ret
+ lfence
/*
* void aesni_xts_dec(struct aesni_xts_ctx *xts, uint8_t *dst, uint8_t *src,
RETGUARD_POP(r11)
RETGUARD_CHECK(aesni_xts_dec, r11)
ret
+ lfence
/*
* Prepare tweak as E_k2(IV). IV is specified as LE representation of a
RETGUARD_POP(rax)
RETGUARD_CHECK(_aesni_xts_tweak, rax)
ret
+ lfence
/*
* Exponentiate AES XTS tweak (in %xmm3).
2:
RETGUARD_CHECK(_aesni_xts_tweak_exp, rax)
ret
+ lfence
-/* $OpenBSD: copy.S,v 1.13 2019/04/02 03:35:08 mortimer Exp $ */
+/* $OpenBSD: copy.S,v 1.14 2021/09/04 22:15:33 bluhm Exp $ */
/* $NetBSD: copy.S,v 1.1 2003/04/26 18:39:26 fvdl Exp $ */
/*
xorq %rax,%rax
RETGUARD_CHECK(kcopy, r10)
ret
+ lfence
1: addq %rcx,%rdi # copy backward
addq %rcx,%rsi
xorq %rax,%rax
RETGUARD_CHECK(kcopy, r10)
ret
+ lfence
ENTRY(copyout)
RETGUARD_SETUP(kcopy, r10)
xorl %eax,%eax
RETGUARD_CHECK(kcopy, r10)
ret
+ lfence
ENTRY(copyin)
RETGUARD_SETUP(kcopy, r10)
xorl %eax,%eax
RETGUARD_CHECK(kcopy, r10)
ret
+ lfence
NENTRY(copy_fault)
SMAP_CLAC
movl $EFAULT,%eax
RETGUARD_CHECK(kcopy, r10)
ret
+ lfence
ENTRY(copyoutstr)
RETGUARD_SETUP(copyoutstr, r10)
8:
RETGUARD_CHECK(copyoutstr, r10)
ret
+ lfence
ENTRY(copystr)
RETGUARD_SETUP(copystr, r10)
7:
RETGUARD_CHECK(copystr, r10)
ret
+ lfence
.section .rodata
.globl _C_LABEL(_stac)
-/* $OpenBSD: locore.S,v 1.125 2021/06/18 06:17:28 guenther Exp $ */
+/* $OpenBSD: locore.S,v 1.126 2021/09/04 22:15:33 bluhm Exp $ */
/* $NetBSD: locore.S,v 1.13 2004/03/25 18:33:17 drochner Exp $ */
/*
xorl %eax,%eax
RETGUARD_CHECK(setjmp, r11)
ret
+ lfence
END(setjmp)
ENTRY(longjmp)
incl %eax
RETGUARD_CHECK(longjmp, r11)
ret
+ lfence
END(longjmp)
#endif /* DDB */
popq %rbx
RETGUARD_CHECK(cpu_switchto, r11)
ret
+ lfence
#ifdef DIAGNOSTIC
.globl _C_LABEL(panic)
ENTRY(cpu_idle_enter)
ret
+ lfence
END(cpu_idle_enter)
ENTRY(cpu_idle_leave)
ret
+ lfence
END(cpu_idle_leave)
/* placed here for correct static branch prediction in cpu_idle_* */
hlt
RETGUARD_CHECK(cpu_idle_cycle, r11)
ret
+ lfence
END(cpu_idle_cycle)
/*
movq %rbp,PCB_RBP(%rdi)
RETGUARD_CHECK(savectx, r11)
ret
+ lfence
END(savectx)
IDTVEC(syscall32)
xorl %eax, %eax
RETGUARD_CHECK(xrstor_user, r11)
ret
+ lfence
NENTRY(xrstor_resume)
movl $1, %eax
RETGUARD_CHECK(xrstor_user, r11)
ret
+ lfence
END(xrstor_user)
ENTRY(fpusave)
CODEPATCH_END(CPTAG_XSAVE)
RETGUARD_CHECK(fpusave, r11)
ret
+ lfence
END(fpusave)
ENTRY(fpusavereset)
CODEPATCH_END(CPTAG_XRSTOR)
RETGUARD_CHECK(fpusavereset, r11)
ret
+ lfence
END(fpusavereset)
ENTRY(xsetbv_user)
xorl %eax, %eax
RETGUARD_CHECK(xsetbv_user, r11)
ret
+ lfence
NENTRY(xsetbv_resume)
movl $1, %eax
RETGUARD_CHECK(xsetbv_user, r11)
ret
+ lfence
END(xsetbv_user)
.section .rodata
sfence
RETGUARD_CHECK(pagezero, r11)
ret
+ lfence
END(pagezero)
/* int rdmsr_safe(u_int msr, uint64_t *data) */
RETGUARD_CHECK(rdmsr_safe, r10)
ret
+ lfence
NENTRY(rdmsr_resume)
movl $0x1, %eax
RETGUARD_CHECK(rdmsr_safe, r10)
ret
+ lfence
END(rdmsr_safe)
#if NXEN > 0
-/* $OpenBSD: mds.S,v 1.2 2019/11/29 17:47:10 mortimer Exp $ */
+/* $OpenBSD: mds.S,v 1.3 2021/09/04 22:15:33 bluhm Exp $ */
/*
* Copyright (c) 2019 Philip Guenther <guenther@openbsd.org>
*
movdqa CPUVAR(MDS_TMP),%xmm0
RETGUARD_CHECK(mds_handler_ivb, r11)
retq
+ lfence
END(mds_handler_ivb)
ENTRY(mds_handler_bdw)
movdqa CPUVAR(MDS_TMP),%xmm0
RETGUARD_CHECK(mds_handler_bdw, r11)
retq
+ lfence
END(mds_handler_bdw)
ENTRY(mds_handler_skl)
movdqa CPUVAR(MDS_TMP),%xmm0
RETGUARD_CHECK(mds_handler_skl_sse, r11)
retq
+ lfence
END(mds_handler_skl_sse)
ENTRY(mds_handler_skl_avx)
vmovdqa CPUVAR(MDS_TMP),%ymm0
RETGUARD_CHECK(mds_handler_skl_avx, r11)
retq
+ lfence
END(mds_handler_skl_avx)
/* we don't support AVX512 yet */
vmovdqa64 CPUVAR(MDS_TMP),%zmm0
RETGUARD_CHECK(mds_handler_skl_avx512, r11)
retq
+ lfence
END(mds_handler_skl_avx512)
#endif
movdqa CPUVAR(MDS_TMP),%xmm0
RETGUARD_CHECK(mds_handler_silvermont, r11)
retq
+ lfence
END(mds_handler_silvermont)
ENTRY(mds_handler_knights)
mfence
RETGUARD_CHECK(mds_handler_knights, r11)
retq
+ lfence
END(mds_handler_knights)
-/* $OpenBSD: spl.S,v 1.17 2018/07/10 16:01:26 deraadt Exp $ */
+/* $OpenBSD: spl.S,v 1.18 2021/09/04 22:15:33 bluhm Exp $ */
/* $NetBSD: spl.S,v 1.3 2004/06/28 09:13:11 fvdl Exp $ */
/*
xchgl %eax,CPUVAR(ILEVEL)
RETGUARD_CHECK(splhigh, r11)
ret
+ lfence
.align 16, 0xcc
_C_LABEL(splx):
jnz _C_LABEL(Xspllower)
RETGUARD_CHECK(splx, r11)
ret
+ lfence
#endif /* PROF || GPROF */
#endif
popq %rbx
RETGUARD_CHECK(Xspllower, r11)
ret
+ lfence
2: bsrq %rax,%rax
btrq %rax,CPUVAR(IPENDING)
movq CPUVAR(ISOURCES)(,%rax,8),%rax
-/* $OpenBSD: vector.S,v 1.85 2021/09/03 16:45:44 jasper Exp $ */
+/* $OpenBSD: vector.S,v 1.86 2021/09/04 22:15:33 bluhm Exp $ */
/* $NetBSD: vector.S,v 1.5 2004/06/28 09:13:11 fvdl Exp $ */
/*
popq %rcx
popq %rax
ret
+ lfence
END(x2apic_eoi)
#if NLAPIC > 0
-/* $OpenBSD: vmm_support.S,v 1.17 2021/02/13 07:47:37 mlarkin Exp $ */
+/* $OpenBSD: vmm_support.S,v 1.18 2021/09/04 22:15:33 bluhm Exp $ */
/*
* Copyright (c) 2014 Mike Larkin <mlarkin@openbsd.org>
*
cli
callq *%rdi
ret
+ lfence
_C_LABEL(vmxon):
RETGUARD_SETUP(vmxon, r11)
xorq %rax, %rax
RETGUARD_CHECK(vmxon, r11)
ret
+ lfence
failed_on:
movq $0x01, %rax
RETGUARD_CHECK(vmxon, r11)
ret
+ lfence
_C_LABEL(vmxoff):
RETGUARD_SETUP(vmxoff, r11)
xorq %rax, %rax
RETGUARD_CHECK(vmxoff, r11)
ret
+ lfence
failed_off:
movq $0x01, %rax
RETGUARD_CHECK(vmxoff, r11)
ret
+ lfence
_C_LABEL(vmclear):
RETGUARD_SETUP(vmclear, r11)
xorq %rax, %rax
RETGUARD_CHECK(vmclear, r11)
ret
+ lfence
failed_clear:
movq $0x01, %rax
RETGUARD_CHECK(vmclear, r11)
ret
+ lfence
_C_LABEL(vmptrld):
RETGUARD_SETUP(vmptrld, r11)
xorq %rax, %rax
RETGUARD_CHECK(vmptrld, r11)
ret
+ lfence
failed_ptrld:
movq $0x01, %rax
RETGUARD_CHECK(vmptrld, r11)
ret
+ lfence
_C_LABEL(vmptrst):
RETGUARD_SETUP(vmptrst, r11)
xorq %rax, %rax
RETGUARD_CHECK(vmptrst, r11)
ret
+ lfence
failed_ptrst:
movq $0x01, %rax
RETGUARD_CHECK(vmptrst, r11)
ret
+ lfence
_C_LABEL(vmwrite):
RETGUARD_SETUP(vmwrite, r11)
xorq %rax, %rax
RETGUARD_CHECK(vmwrite, r11)
ret
+ lfence
failed_write:
movq $0x01, %rax
RETGUARD_CHECK(vmwrite, r11)
ret
+ lfence
_C_LABEL(vmread):
RETGUARD_SETUP(vmread, r11)
xorq %rax, %rax
RETGUARD_CHECK(vmread, r11)
ret
+ lfence
failed_read:
movq $0x01, %rax
RETGUARD_CHECK(vmread, r11)
ret
+ lfence
_C_LABEL(invvpid):
RETGUARD_SETUP(invvpid, r11)
invvpid (%rsi), %rdi
RETGUARD_CHECK(invvpid, r11)
ret
+ lfence
_C_LABEL(invept):
RETGUARD_SETUP(invept, r11)
invept (%rsi), %rdi
RETGUARD_CHECK(invept, r11)
ret
+ lfence
_C_LABEL(vmx_enter_guest):
RETGUARD_SETUP(vmx_enter_guest, r11)
movq %rdi, %rax
RETGUARD_CHECK(vmx_enter_guest, r11)
ret
+ lfence
_C_LABEL(svm_enter_guest):
RETGUARD_SETUP(svm_enter_guest, r11)
RETGUARD_CHECK(svm_enter_guest, r11)
ret
+ lfence
-/* $OpenBSD: asm.h,v 1.19 2021/09/01 09:50:21 bluhm Exp $ */
+/* $OpenBSD: asm.h,v 1.20 2021/09/04 22:15:33 bluhm Exp $ */
/* $NetBSD: asm.h,v 1.2 2003/05/02 18:05:47 yamt Exp $ */
/*-
cmpq (__retguard_ ## x)(%rip), %reg; \
je 66f; \
int3; int3; \
- .zero (0xf - ((. - x) & 0xf)), 0xcc; \
+ .zero (0xf - ((. + 3 - x) & 0xf)), 0xcc; \
66:
# define RETGUARD_PUSH(reg) \
pushq %reg
-/* $OpenBSD: profile.h,v 1.4 2012/08/22 17:19:35 pascal Exp $ */
+/* $OpenBSD: profile.h,v 1.5 2021/09/04 22:15:33 bluhm Exp $ */
/* $NetBSD: profile.h,v 1.3 2003/11/28 23:22:45 fvdl Exp $ */
/*
" movq 48(%rsp),%rax \n" \
" leave \n" \
" ret \n" \
+" lfence \n" \
" .size __mcount,.-__mcount");