From: deraadt Date: Tue, 25 Apr 2023 04:42:25 +0000 (+0000) Subject: Add endbr64 where needed by inspection. Passes regresson tests. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=1d4dcfa77bf337c400b3c46a2bb237704a570857;p=openbsd Add endbr64 where needed by inspection. Passes regresson tests. ok jsing, and kind of tb an earlier version --- diff --git a/lib/libcrypto/aes/asm/aes-x86_64.pl b/lib/libcrypto/aes/asm/aes-x86_64.pl index b7399b552ad..78ba20ca59f 100755 --- a/lib/libcrypto/aes/asm/aes-x86_64.pl +++ b/lib/libcrypto/aes/asm/aes-x86_64.pl @@ -318,6 +318,7 @@ $code.=<<___; .type _x86_64_AES_encrypt,\@abi-omnipotent .align 16 _x86_64_AES_encrypt: + endbr64 xor 0($key),$s0 # xor with key xor 4($key),$s1 xor 8($key),$s2 @@ -548,6 +549,7 @@ $code.=<<___; .type _x86_64_AES_encrypt_compact,\@abi-omnipotent .align 16 _x86_64_AES_encrypt_compact: + endbr64 lea 128($sbox),$inp # size optimization mov 0-128($inp),$acc1 # prefetch Te4 mov 32-128($inp),$acc2 @@ -593,6 +595,7 @@ $code.=<<___; .hidden asm_AES_encrypt asm_AES_encrypt: AES_encrypt: + endbr64 push %rbx push %rbp push %r12 @@ -884,6 +887,7 @@ $code.=<<___; .type _x86_64_AES_decrypt,\@abi-omnipotent .align 16 _x86_64_AES_decrypt: + endbr64 xor 0($key),$s0 # xor with key xor 4($key),$s1 xor 8($key),$s2 @@ -1138,6 +1142,7 @@ $code.=<<___; .type _x86_64_AES_decrypt_compact,\@abi-omnipotent .align 16 _x86_64_AES_decrypt_compact: + endbr64 lea 128($sbox),$inp # size optimization mov 0-128($inp),$acc1 # prefetch Td4 mov 32-128($inp),$acc2 @@ -1192,6 +1197,7 @@ $code.=<<___; .hidden asm_AES_decrypt asm_AES_decrypt: AES_decrypt: + endbr64 push %rbx push %rbp push %r12 @@ -1291,6 +1297,7 @@ $code.=<<___; .type AES_set_encrypt_key,\@function,3 .align 16 AES_set_encrypt_key: + endbr64 push %rbx push %rbp push %r12 # redundant, but allows to share @@ -1316,6 +1323,7 @@ AES_set_encrypt_key: .type _x86_64_AES_set_encrypt_key,\@abi-omnipotent .align 16 _x86_64_AES_set_encrypt_key: + endbr64 mov %esi,%ecx # %ecx=bits mov %rdi,%rsi # %rsi=userKey mov %rdx,%rdi # %rdi=key @@ -1561,6 +1569,7 @@ $code.=<<___; .type AES_set_decrypt_key,\@function,3 .align 16 AES_set_decrypt_key: + endbr64 push %rbx push %rbp push %r12 @@ -1660,6 +1669,7 @@ $code.=<<___; .hidden asm_AES_cbc_encrypt asm_AES_cbc_encrypt: AES_cbc_encrypt: + endbr64 cmp \$0,%rdx # check length je .Lcbc_epilogue pushfq @@ -2551,6 +2561,7 @@ $code.=<<___; .type block_se_handler,\@abi-omnipotent .align 16 block_se_handler: + endbr64 push %rsi push %rdi push %rbx @@ -2609,6 +2620,7 @@ block_se_handler: .type key_se_handler,\@abi-omnipotent .align 16 key_se_handler: + endbr64 push %rsi push %rdi push %rbx @@ -2666,6 +2678,7 @@ key_se_handler: .type cbc_se_handler,\@abi-omnipotent .align 16 cbc_se_handler: + endbr64 push %rsi push %rdi push %rbx diff --git a/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl b/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl index bafa906a050..879d16793f5 100644 --- a/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl +++ b/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl @@ -89,6 +89,7 @@ $code.=<<___; .type aesni_cbc_sha1_enc,\@abi-omnipotent .align 16 aesni_cbc_sha1_enc: + endbr64 # caller should check for SSSE3 and AES-NI bits mov OPENSSL_ia32cap_P+0(%rip),%r10d mov OPENSSL_ia32cap_P+4(%rip),%r11d @@ -132,6 +133,7 @@ $code.=<<___; .type aesni_cbc_sha1_enc_ssse3,\@function,6 .align 16 aesni_cbc_sha1_enc_ssse3: + endbr64 mov `($win64?56:8)`(%rsp),$inp # load 7th argument #shr \$6,$len # debugging artefact #jz .Lepilogue_ssse3 # debugging artefact @@ -650,6 +652,7 @@ $code.=<<___; .type aesni_cbc_sha1_enc_avx,\@function,6 .align 16 aesni_cbc_sha1_enc_avx: + endbr64 mov `($win64?56:8)`(%rsp),$inp # load 7th argument #shr \$6,$len # debugging artefact #jz .Lepilogue_avx # debugging artefact @@ -1100,6 +1103,7 @@ $code.=<<___; .type ssse3_handler,\@abi-omnipotent .align 16 ssse3_handler: + endbr64 push %rsi push %rdi push %rbx diff --git a/lib/libcrypto/aes/asm/aesni-x86_64.pl b/lib/libcrypto/aes/asm/aesni-x86_64.pl index d394e6f696b..c261a379580 100644 --- a/lib/libcrypto/aes/asm/aesni-x86_64.pl +++ b/lib/libcrypto/aes/asm/aesni-x86_64.pl @@ -242,6 +242,7 @@ $code.=<<___; .type ${PREFIX}_encrypt,\@abi-omnipotent .align 16 ${PREFIX}_encrypt: + endbr64 movups ($inp),$inout0 # load input mov 240($key),$rounds # key->rounds ___ @@ -255,6 +256,7 @@ $code.=<<___; .type ${PREFIX}_decrypt,\@abi-omnipotent .align 16 ${PREFIX}_decrypt: + endbr64 movups ($inp),$inout0 # load input mov 240($key),$rounds # key->rounds ___ @@ -284,6 +286,7 @@ $code.=<<___; .type _aesni_${dir}rypt3,\@abi-omnipotent .align 16 _aesni_${dir}rypt3: + endbr64 $movkey ($key),$rndkey0 shr \$1,$rounds $movkey 16($key),$rndkey1 @@ -328,6 +331,7 @@ $code.=<<___; .type _aesni_${dir}rypt4,\@abi-omnipotent .align 16 _aesni_${dir}rypt4: + endbr64 $movkey ($key),$rndkey0 shr \$1,$rounds $movkey 16($key),$rndkey1 @@ -373,6 +377,7 @@ $code.=<<___; .type _aesni_${dir}rypt6,\@abi-omnipotent .align 16 _aesni_${dir}rypt6: + endbr64 $movkey ($key),$rndkey0 shr \$1,$rounds $movkey 16($key),$rndkey1 @@ -437,6 +442,7 @@ $code.=<<___; .type _aesni_${dir}rypt8,\@abi-omnipotent .align 16 _aesni_${dir}rypt8: + endbr64 $movkey ($key),$rndkey0 shr \$1,$rounds $movkey 16($key),$rndkey1 @@ -525,6 +531,7 @@ $code.=<<___; .type aesni_ecb_encrypt,\@function,5 .align 16 aesni_ecb_encrypt: + endbr64 and \$-16,$len jz .Lecb_ret @@ -830,6 +837,7 @@ $code.=<<___; .type aesni_ccm64_encrypt_blocks,\@function,6 .align 16 aesni_ccm64_encrypt_blocks: + endbr64 ___ $code.=<<___ if ($win64); lea -0x58(%rsp),%rsp @@ -2478,6 +2486,7 @@ $code.=<<___; .type ${PREFIX}_set_decrypt_key,\@abi-omnipotent .align 16 ${PREFIX}_set_decrypt_key: + endbr64 sub \$8,%rsp call __aesni_set_encrypt_key shl \$4,$bits # rounds-1 after _aesni_set_encrypt_key @@ -2528,6 +2537,7 @@ $code.=<<___; .type ${PREFIX}_set_encrypt_key,\@abi-omnipotent .align 16 ${PREFIX}_set_encrypt_key: + endbr64 __aesni_set_encrypt_key: sub \$8,%rsp mov \$-1,%rax @@ -2749,6 +2759,7 @@ $code.=<<___ if ($PREFIX eq "aesni"); .type ecb_se_handler,\@abi-omnipotent .align 16 ecb_se_handler: + endbr64 push %rsi push %rdi push %rbx @@ -2768,6 +2779,7 @@ ecb_se_handler: .type ccm64_se_handler,\@abi-omnipotent .align 16 ccm64_se_handler: + endbr64 push %rsi push %rdi push %rbx @@ -2809,6 +2821,7 @@ ccm64_se_handler: .type ctr32_se_handler,\@abi-omnipotent .align 16 ctr32_se_handler: + endbr64 push %rsi push %rdi push %rbx @@ -2844,6 +2857,7 @@ ctr32_se_handler: .type xts_se_handler,\@abi-omnipotent .align 16 xts_se_handler: + endbr64 push %rsi push %rdi push %rbx @@ -2885,6 +2899,7 @@ $code.=<<___; .type cbc_se_handler,\@abi-omnipotent .align 16 cbc_se_handler: + endbr64 push %rsi push %rdi push %rbx diff --git a/lib/libcrypto/aes/asm/bsaes-x86_64.pl b/lib/libcrypto/aes/asm/bsaes-x86_64.pl index 20e9e1f71f3..7098ba27f49 100644 --- a/lib/libcrypto/aes/asm/bsaes-x86_64.pl +++ b/lib/libcrypto/aes/asm/bsaes-x86_64.pl @@ -813,6 +813,7 @@ $code.=<<___; .type _bsaes_encrypt8,\@abi-omnipotent .align 64 _bsaes_encrypt8: + endbr64 lea .LBS0(%rip), $const # constants table movdqa ($key), @XMM[9] # round 0 key @@ -877,6 +878,7 @@ $code.=<<___; .type _bsaes_decrypt8,\@abi-omnipotent .align 64 _bsaes_decrypt8: + endbr64 lea .LBS0(%rip), $const # constants table movdqa ($key), @XMM[9] # round 0 key @@ -968,6 +970,7 @@ $code.=<<___; .type _bsaes_key_convert,\@abi-omnipotent .align 16 _bsaes_key_convert: + endbr64 lea .Lmasks(%rip), $const movdqu ($inp), %xmm7 # load round 0 key lea 0x10($inp), $inp @@ -1057,6 +1060,7 @@ $code.=<<___; .type bsaes_enc_key_convert,\@function,2 .align 16 bsaes_enc_key_convert: + endbr64 mov 240($inp),%r10d # pass rounds mov $inp,%rcx # pass key mov $out,%rax # pass key schedule @@ -1071,6 +1075,7 @@ bsaes_enc_key_convert: .align 16 bsaes_encrypt_128: .Lenc128_loop: + endbr64 movdqu 0x00($inp), @XMM[0] # load input movdqu 0x10($inp), @XMM[1] movdqu 0x20($inp), @XMM[2] @@ -1103,6 +1108,7 @@ bsaes_encrypt_128: .type bsaes_dec_key_convert,\@function,2 .align 16 bsaes_dec_key_convert: + endbr64 mov 240($inp),%r10d # pass rounds mov $inp,%rcx # pass key mov $out,%rax # pass key schedule @@ -1117,6 +1123,7 @@ bsaes_dec_key_convert: .type bsaes_decrypt_128,\@function,4 .align 16 bsaes_decrypt_128: + endbr64 .Ldec128_loop: movdqu 0x00($inp), @XMM[0] # load input movdqu 0x10($inp), @XMM[1] @@ -1162,6 +1169,7 @@ $code.=<<___; .type bsaes_ecb_encrypt_blocks,\@abi-omnipotent .align 16 bsaes_ecb_encrypt_blocks: + endbr64 mov %rsp, %rax .Lecb_enc_prologue: push %rbp @@ -1363,6 +1371,7 @@ $code.=<<___; .type bsaes_ecb_decrypt_blocks,\@abi-omnipotent .align 16 bsaes_ecb_decrypt_blocks: + endbr64 mov %rsp, %rax .Lecb_dec_prologue: push %rbp @@ -1568,6 +1577,7 @@ $code.=<<___; .type bsaes_cbc_encrypt,\@abi-omnipotent .align 16 bsaes_cbc_encrypt: + endbr64 ___ $code.=<<___ if ($win64); mov 48(%rsp),$arg6 # pull direction flag @@ -1855,6 +1865,7 @@ $code.=<<___; .type bsaes_ctr32_encrypt_blocks,\@abi-omnipotent .align 16 bsaes_ctr32_encrypt_blocks: + endbr64 mov %rsp, %rax .Lctr_enc_prologue: push %rbp @@ -2096,6 +2107,7 @@ $code.=<<___; .type bsaes_xts_encrypt,\@abi-omnipotent .align 16 bsaes_xts_encrypt: + endbr64 mov %rsp, %rax .Lxts_enc_prologue: push %rbp @@ -2477,6 +2489,7 @@ $code.=<<___; .type bsaes_xts_decrypt,\@abi-omnipotent .align 16 bsaes_xts_decrypt: + endbr64 mov %rsp, %rax .Lxts_dec_prologue: push %rbp @@ -2953,6 +2966,7 @@ $code.=<<___; .type se_handler,\@abi-omnipotent .align 16 se_handler: + endbr64 push %rsi push %rdi push %rbx diff --git a/lib/libcrypto/aes/asm/vpaes-x86_64.pl b/lib/libcrypto/aes/asm/vpaes-x86_64.pl index 3ffb1a3038f..8ff8d8602bc 100644 --- a/lib/libcrypto/aes/asm/vpaes-x86_64.pl +++ b/lib/libcrypto/aes/asm/vpaes-x86_64.pl @@ -82,6 +82,7 @@ $code.=<<___; .type _vpaes_encrypt_core,\@abi-omnipotent .align 16 _vpaes_encrypt_core: + endbr64 mov %rdx, %r9 mov \$16, %r11 mov 240(%rdx),%eax @@ -172,6 +173,7 @@ _vpaes_encrypt_core: .type _vpaes_decrypt_core,\@abi-omnipotent .align 16 _vpaes_decrypt_core: + endbr64 mov %rdx, %r9 # load key mov 240(%rdx),%eax movdqa %xmm9, %xmm1 @@ -279,6 +281,7 @@ _vpaes_decrypt_core: .type _vpaes_schedule_core,\@abi-omnipotent .align 16 _vpaes_schedule_core: + endbr64 # rdi = key # rsi = size in bits # rdx = buffer @@ -464,6 +467,7 @@ _vpaes_schedule_core: .type _vpaes_schedule_192_smear,\@abi-omnipotent .align 16 _vpaes_schedule_192_smear: + endbr64 pshufd \$0x80, %xmm6, %xmm0 # d c 0 0 -> c 0 0 0 pxor %xmm0, %xmm6 # -> c+d c 0 0 pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a @@ -495,6 +499,7 @@ _vpaes_schedule_192_smear: .type _vpaes_schedule_round,\@abi-omnipotent .align 16 _vpaes_schedule_round: + endbr64 # extract rcon from xmm8 pxor %xmm1, %xmm1 palignr \$15, %xmm8, %xmm1 @@ -562,6 +567,7 @@ _vpaes_schedule_low_round: .type _vpaes_schedule_transform,\@abi-omnipotent .align 16 _vpaes_schedule_transform: + endbr64 movdqa %xmm9, %xmm1 pandn %xmm0, %xmm1 psrld \$4, %xmm1 @@ -600,6 +606,7 @@ _vpaes_schedule_transform: .type _vpaes_schedule_mangle,\@abi-omnipotent .align 16 _vpaes_schedule_mangle: + endbr64 movdqa %xmm0, %xmm4 # save xmm0 for later movdqa .Lk_mc_forward(%rip),%xmm5 test %rcx, %rcx @@ -673,6 +680,7 @@ _vpaes_schedule_mangle: .type ${PREFIX}_set_encrypt_key,\@function,3 .align 16 ${PREFIX}_set_encrypt_key: + endbr64 ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -721,6 +729,7 @@ $code.=<<___; .type ${PREFIX}_set_decrypt_key,\@function,3 .align 16 ${PREFIX}_set_decrypt_key: + endbr64 ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -774,6 +783,7 @@ $code.=<<___; .type ${PREFIX}_encrypt,\@function,3 .align 16 ${PREFIX}_encrypt: + endbr64 ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -817,6 +827,7 @@ $code.=<<___; .type ${PREFIX}_decrypt,\@function,3 .align 16 ${PREFIX}_decrypt: + endbr64 ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -866,6 +877,7 @@ $code.=<<___; .type ${PREFIX}_cbc_encrypt,\@function,6 .align 16 ${PREFIX}_cbc_encrypt: + endbr64 xchg $key,$len ___ ($len,$key)=($key,$len); @@ -949,6 +961,7 @@ $code.=<<___; .type _vpaes_preheat,\@abi-omnipotent .align 16 _vpaes_preheat: + endbr64 lea .Lk_s0F(%rip), %r10 movdqa -0x20(%r10), %xmm10 # .Lk_inv movdqa -0x10(%r10), %xmm11 # .Lk_inv+16 @@ -1079,6 +1092,7 @@ $code.=<<___; .type se_handler,\@abi-omnipotent .align 16 se_handler: + endbr64 push %rsi push %rdi push %rbx diff --git a/lib/libcrypto/bn/arch/amd64/bignum_add.S b/lib/libcrypto/bn/arch/amd64/bignum_add.S index d56fa5e3a83..06298ca69e1 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_add.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_add.S @@ -49,6 +49,7 @@ S2N_BN_SYMBOL(bignum_add): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S b/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S index 1dc1e58705d..5ad712749f2 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S @@ -54,6 +54,7 @@ S2N_BN_SYMBOL(bignum_cmadd): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_cmul.S b/lib/libcrypto/bn/arch/amd64/bignum_cmul.S index c1a23ccea4d..9199c8f48b6 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_cmul.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_cmul.S @@ -51,6 +51,7 @@ S2N_BN_SYMBOL(bignum_cmul): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_mul.S b/lib/libcrypto/bn/arch/amd64/bignum_mul.S index 42ac988a197..2d7ed190912 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_mul.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_mul.S @@ -59,6 +59,7 @@ S2N_BN_SYMBOL(bignum_mul): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S index 3b7848b285f..f02b09b2887 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S @@ -72,6 +72,7 @@ adc h, rdx S2N_BN_SYMBOL(bignum_mul_4_8_alt): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S index 1be37840df6..97be83e1f72 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S @@ -72,6 +72,7 @@ adc h, rdx S2N_BN_SYMBOL(bignum_mul_8_16_alt): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sqr.S b/lib/libcrypto/bn/arch/amd64/bignum_sqr.S index 2e05b9c179a..c4a0cabf352 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_sqr.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_sqr.S @@ -62,6 +62,7 @@ #define llshort ebp S2N_BN_SYMBOL(bignum_sqr): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S index a635177c650..b228414dcea 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S @@ -71,6 +71,7 @@ adc c, 0 S2N_BN_SYMBOL(bignum_sqr_4_8_alt): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S index f698202d29c..04efeec7e28 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S @@ -103,6 +103,7 @@ adc c, 0 S2N_BN_SYMBOL(bignum_sqr_8_16_alt): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sub.S b/lib/libcrypto/bn/arch/amd64/bignum_sub.S index f8e1fe35a8f..11a9bd7edd4 100644 --- a/lib/libcrypto/bn/arch/amd64/bignum_sub.S +++ b/lib/libcrypto/bn/arch/amd64/bignum_sub.S @@ -49,6 +49,7 @@ S2N_BN_SYMBOL(bignum_sub): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/arch/amd64/word_clz.S b/lib/libcrypto/bn/arch/amd64/word_clz.S index 025e98f9cb0..464a9d90fc3 100644 --- a/lib/libcrypto/bn/arch/amd64/word_clz.S +++ b/lib/libcrypto/bn/arch/amd64/word_clz.S @@ -30,6 +30,7 @@ .text S2N_BN_SYMBOL(word_clz): + endbr64 #if WINDOWS_ABI push rdi diff --git a/lib/libcrypto/bn/asm/modexp512-x86_64.pl b/lib/libcrypto/bn/asm/modexp512-x86_64.pl index 2e71a7f03dc..af78fff5412 100644 --- a/lib/libcrypto/bn/asm/modexp512-x86_64.pl +++ b/lib/libcrypto/bn/asm/modexp512-x86_64.pl @@ -347,6 +347,7 @@ $code.=<<___; .type MULADD_128x512,\@abi-omnipotent .align 16 MULADD_128x512: + endbr64 ___ &MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx"); $code.=<<___; @@ -414,6 +415,7 @@ $code.=<<___; .type mont_reduce,\@abi-omnipotent .align 16 mont_reduce: + endbr64 ___ my $STACK_DEPTH = 8; @@ -676,6 +678,7 @@ $code.=<<___; .type mont_mul_a3b,\@abi-omnipotent .align 16 mont_mul_a3b: + endbr64 # # multiply tmp = src1 * src2 # For multiply: dst = rcx, src1 = rdi, src2 = rsi @@ -1077,6 +1080,7 @@ $code.=<<___; .type sqr_reduce,\@abi-omnipotent .align 16 sqr_reduce: + endbr64 mov (+$pResult_offset+8)(%rsp), %rcx ___ &SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi"); @@ -1106,6 +1110,7 @@ $code.=<<___; .globl mod_exp_512 .type mod_exp_512,\@function,4 mod_exp_512: + endbr64 push %rbp push %rbx push %r12 diff --git a/lib/libcrypto/bn/asm/x86_64-mont.pl b/lib/libcrypto/bn/asm/x86_64-mont.pl index cae7309d5ba..6f5ab331e2f 100755 --- a/lib/libcrypto/bn/asm/x86_64-mont.pl +++ b/lib/libcrypto/bn/asm/x86_64-mont.pl @@ -63,6 +63,7 @@ $code=<<___; .type bn_mul_mont,\@function,6 .align 16 bn_mul_mont: + endbr64 test \$3,${num}d jnz .Lmul_enter cmp \$8,${num}d @@ -278,6 +279,7 @@ $code.=<<___; .align 16 bn_mul4x_mont: .Lmul4x_enter: + endbr64 push %rbx push %rbp push %r12 @@ -705,6 +707,7 @@ $code.=<<___; .align 16 bn_sqr4x_mont: .Lsqr4x_enter: + endbr64 push %rbx push %rbp push %r12 diff --git a/lib/libcrypto/bn/asm/x86_64-mont5.pl b/lib/libcrypto/bn/asm/x86_64-mont5.pl index 7b9c6df2739..3b3325a6ccc 100755 --- a/lib/libcrypto/bn/asm/x86_64-mont5.pl +++ b/lib/libcrypto/bn/asm/x86_64-mont5.pl @@ -57,6 +57,7 @@ $code=<<___; .type bn_mul_mont_gather5,\@function,6 .align 64 bn_mul_mont_gather5: + endbr64 test \$3,${num}d jnz .Lmul_enter cmp \$8,${num}d @@ -387,6 +388,7 @@ $code.=<<___; .type bn_mul4x_mont_gather5,\@function,6 .align 16 bn_mul4x_mont_gather5: + endbr64 .Lmul4x_enter: mov ${num}d,${num}d movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument @@ -925,6 +927,7 @@ $code.=<<___; .type bn_scatter5,\@abi-omnipotent .align 16 bn_scatter5: + endbr64 cmp \$0, $num jz .Lscatter_epilogue lea ($tbl,$idx,8),$tbl @@ -943,6 +946,7 @@ bn_scatter5: .type bn_gather5,\@abi-omnipotent .align 16 bn_gather5: + endbr64 .LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases # I can't trust assembler to use specific encoding:-( .byte 0x4c,0x8d,0x14,0x24 # lea (%rsp),%r10 @@ -1053,6 +1057,7 @@ $code.=<<___; .type mul_handler,\@abi-omnipotent .align 16 mul_handler: + endbr64 push %rsi push %rdi push %rbx diff --git a/lib/libcrypto/camellia/asm/cmll-x86_64.pl b/lib/libcrypto/camellia/asm/cmll-x86_64.pl index 586e5d6e93f..3ceed3e8991 100644 --- a/lib/libcrypto/camellia/asm/cmll-x86_64.pl +++ b/lib/libcrypto/camellia/asm/cmll-x86_64.pl @@ -116,6 +116,7 @@ $code=<<___; .type Camellia_EncryptBlock,\@abi-omnipotent .align 16 Camellia_EncryptBlock: + endbr64 movl \$128,%eax subl $arg0d,%eax movl \$3,$arg0d @@ -128,6 +129,7 @@ Camellia_EncryptBlock: .align 16 .Lenc_rounds: Camellia_EncryptBlock_Rounds: + endbr64 push %rbx push %rbp push %r13 @@ -176,6 +178,7 @@ Camellia_EncryptBlock_Rounds: .type _x86_64_Camellia_encrypt,\@abi-omnipotent .align 16 _x86_64_Camellia_encrypt: + endbr64 xor 0($key),@S[1] xor 4($key),@S[0] # ^=key[0-3] xor 8($key),@S[3] @@ -226,6 +229,7 @@ $code.=<<___; .type Camellia_DecryptBlock,\@abi-omnipotent .align 16 Camellia_DecryptBlock: + endbr64 movl \$128,%eax subl $arg0d,%eax movl \$3,$arg0d @@ -238,6 +242,7 @@ Camellia_DecryptBlock: .align 16 .Ldec_rounds: Camellia_DecryptBlock_Rounds: + endbr64 push %rbx push %rbp push %r13 @@ -286,6 +291,7 @@ Camellia_DecryptBlock_Rounds: .type _x86_64_Camellia_decrypt,\@abi-omnipotent .align 16 _x86_64_Camellia_decrypt: + endbr64 xor 0($key),@S[1] xor 4($key),@S[0] # ^=key[0-3] xor 8($key),@S[3] @@ -400,6 +406,7 @@ $code.=<<___; .type Camellia_Ekeygen,\@function,3 .align 16 Camellia_Ekeygen: + endbr64 push %rbx push %rbp push %r13 @@ -630,6 +637,7 @@ $code.=<<___; .type Camellia_cbc_encrypt,\@function,6 .align 16 Camellia_cbc_encrypt: + endbr64 cmp \$0,%rdx je .Lcbc_abort push %rbx diff --git a/lib/libcrypto/md5/asm/md5-x86_64.pl b/lib/libcrypto/md5/asm/md5-x86_64.pl index c902a1b532f..06d69094f44 100755 --- a/lib/libcrypto/md5/asm/md5-x86_64.pl +++ b/lib/libcrypto/md5/asm/md5-x86_64.pl @@ -128,6 +128,7 @@ $code .= <