From: guenther Date: Wed, 7 Dec 2022 18:25:32 +0000 (+0000) Subject: Apply changes from commitid FWrfGfO9Ojnsh1mq to libkern. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=fc541c5dac429051bdc0c092615e861e86da24e9;p=openbsd Apply changes from commitid FWrfGfO9Ojnsh1mq to libkern. From original commit message: To mitigate against spectre attacks, AMD processors without the IBRS feature need an lfence instruction after every near ret. See software techniques for managing speculation on AMD processors revision 9.17.20 mitigation G-5. ok deraadt@ --- diff --git a/sys/lib/libkern/arch/amd64/bcmp.S b/sys/lib/libkern/arch/amd64/bcmp.S index 96754ed68a4..2ef8087f8e4 100644 --- a/sys/lib/libkern/arch/amd64/bcmp.S +++ b/sys/lib/libkern/arch/amd64/bcmp.S @@ -19,3 +19,4 @@ ENTRY(bcmp) L1: incl %eax L2: RETGUARD_CHECK(bcmp, r11) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/bzero.S b/sys/lib/libkern/arch/amd64/bzero.S index a1c9a94d955..db05dbfa87e 100644 --- a/sys/lib/libkern/arch/amd64/bzero.S +++ b/sys/lib/libkern/arch/amd64/bzero.S @@ -38,3 +38,4 @@ L1: movq %rdx,%rcx /* zero remainder by bytes */ stosb RETGUARD_CHECK(bzero, r11) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/ffs.S b/sys/lib/libkern/arch/amd64/ffs.S index b6ada6cd286..10154debeb2 100644 --- a/sys/lib/libkern/arch/amd64/ffs.S +++ b/sys/lib/libkern/arch/amd64/ffs.S @@ -17,3 +17,4 @@ ENTRY(ffs) L1: xorl %eax,%eax /* clear result */ L2: RETGUARD_CHECK(ffs, r11) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/htonl.S b/sys/lib/libkern/arch/amd64/htonl.S index 6073506bfaa..dbe6647e7f3 100644 --- a/sys/lib/libkern/arch/amd64/htonl.S +++ b/sys/lib/libkern/arch/amd64/htonl.S @@ -49,3 +49,4 @@ _PROF_PROLOGUE bswap %eax RETGUARD_CHECK(htonl, r11) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/htons.S b/sys/lib/libkern/arch/amd64/htons.S index 189bcba3c54..c7943cf7140 100644 --- a/sys/lib/libkern/arch/amd64/htons.S +++ b/sys/lib/libkern/arch/amd64/htons.S @@ -49,3 +49,4 @@ _PROF_PROLOGUE xchgb %ah,%al RETGUARD_CHECK(htons, r11) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/memchr.S b/sys/lib/libkern/arch/amd64/memchr.S index 1ec17272f1b..6a5943e40b2 100644 --- a/sys/lib/libkern/arch/amd64/memchr.S +++ b/sys/lib/libkern/arch/amd64/memchr.S @@ -20,3 +20,4 @@ ENTRY(memchr) L1: xorq %rax,%rax L2: RETGUARD_CHECK(memchr, r11) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/memcmp.S b/sys/lib/libkern/arch/amd64/memcmp.S index b53e2d1c21a..c16879cda78 100644 --- a/sys/lib/libkern/arch/amd64/memcmp.S +++ b/sys/lib/libkern/arch/amd64/memcmp.S @@ -35,3 +35,4 @@ L6: xorl %eax,%eax /* Perform unsigned comparison */ subl %edx,%eax L7: RETGUARD_CHECK(memcmp, r11) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/memmove.S b/sys/lib/libkern/arch/amd64/memmove.S index 71d5b007f41..5a5939f4fea 100644 --- a/sys/lib/libkern/arch/amd64/memmove.S +++ b/sys/lib/libkern/arch/amd64/memmove.S @@ -87,3 +87,4 @@ ENTRY(memcpy) cld 3: RETGUARD_CHECK(memmove, r10) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/memset.S b/sys/lib/libkern/arch/amd64/memset.S index 1ccdfe79a9d..e57a7c84599 100644 --- a/sys/lib/libkern/arch/amd64/memset.S +++ b/sys/lib/libkern/arch/amd64/memset.S @@ -52,3 +52,4 @@ L1: rep movq %r11,%rax RETGUARD_CHECK(memset, r10) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/scanc.S b/sys/lib/libkern/arch/amd64/scanc.S index 2ae94d96d74..582343584d8 100644 --- a/sys/lib/libkern/arch/amd64/scanc.S +++ b/sys/lib/libkern/arch/amd64/scanc.S @@ -54,3 +54,4 @@ ENTRY(scanc) movl %ecx,%eax RETGUARD_CHECK(scanc, r10) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/skpc.S b/sys/lib/libkern/arch/amd64/skpc.S index c67f9c3b252..a8b3dfbe89a 100644 --- a/sys/lib/libkern/arch/amd64/skpc.S +++ b/sys/lib/libkern/arch/amd64/skpc.S @@ -48,3 +48,4 @@ ENTRY(skpc) movl %ecx,%eax RETGUARD_CHECK(skpc, r10) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/strchr.S b/sys/lib/libkern/arch/amd64/strchr.S index 54d2178d017..ad8a7ca5023 100644 --- a/sys/lib/libkern/arch/amd64/strchr.S +++ b/sys/lib/libkern/arch/amd64/strchr.S @@ -1,4 +1,4 @@ -/* $OpenBSD: strchr.S,v 1.5 2018/07/13 05:56:57 guenther Exp $ */ +/* $OpenBSD: strchr.S,v 1.6 2022/12/07 18:25:32 guenther Exp $ */ /* $NetBSD: strchr.S,v 1.7 2014/03/22 19:16:34 jakllsch Exp $ */ /*- @@ -102,6 +102,7 @@ ENTRY(strchr) 12: RETGUARD_POP(r9) RETGUARD_CHECK(strchr, r9) ret + lfence /* Source misaligned: read aligned word and make low bytes invalid */ /* I (dsl) think aligning the text here will slow things down! */ diff --git a/sys/lib/libkern/arch/amd64/strcmp.S b/sys/lib/libkern/arch/amd64/strcmp.S index 3ba13a49c34..12f10bacab4 100644 --- a/sys/lib/libkern/arch/amd64/strcmp.S +++ b/sys/lib/libkern/arch/amd64/strcmp.S @@ -1,4 +1,4 @@ -/* $OpenBSD: strcmp.S,v 1.4 2018/07/03 23:22:48 mortimer Exp $ */ +/* $OpenBSD: strcmp.S,v 1.5 2022/12/07 18:25:33 guenther Exp $ */ /* $NetBSD: strcmp.S,v 1.2 2014/03/22 19:16:34 jakllsch Exp $ */ /* @@ -71,3 +71,4 @@ ENTRY(strcmp) subq %rdx,%rax RETGUARD_CHECK(strcmp, r10) ret + lfence diff --git a/sys/lib/libkern/arch/amd64/strlen.S b/sys/lib/libkern/arch/amd64/strlen.S index d1a82185fae..03bceca944c 100644 --- a/sys/lib/libkern/arch/amd64/strlen.S +++ b/sys/lib/libkern/arch/amd64/strlen.S @@ -1,4 +1,4 @@ -/* $OpenBSD: strlen.S,v 1.8 2022/01/11 09:21:34 jsg Exp $ */ +/* $OpenBSD: strlen.S,v 1.9 2022/12/07 18:25:33 guenther Exp $ */ /* $NetBSD: strlen.S,v 1.6 2014/03/22 19:16:34 jakllsch Exp $ */ /*- @@ -142,6 +142,7 @@ ENTRY(strlen) lea -8(%rax,%rdx),%rax RETGUARD_CHECK(strlen, r10) ret + lfence /* Misaligned, read aligned word and make low bytes non-zero */ _ALIGN_TRAPS diff --git a/sys/lib/libkern/arch/amd64/strrchr.S b/sys/lib/libkern/arch/amd64/strrchr.S index 945146d0059..a32af2bfb13 100644 --- a/sys/lib/libkern/arch/amd64/strrchr.S +++ b/sys/lib/libkern/arch/amd64/strrchr.S @@ -1,4 +1,4 @@ -/* $OpenBSD: strrchr.S,v 1.4 2018/07/03 23:22:48 mortimer Exp $ */ +/* $OpenBSD: strrchr.S,v 1.5 2022/12/07 18:25:33 guenther Exp $ */ /* $NetBSD: strrchr.S,v 1.3 2014/03/22 19:16:34 jakllsch Exp $ */ /* @@ -123,3 +123,4 @@ ENTRY(strrchr) .Ldone: RETGUARD_CHECK(strrchr, r10) ret + lfence