summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorTheo de Raadt <deraadt@cvs.openbsd.org>2023-04-25 04:42:27 +0000
committerTheo de Raadt <deraadt@cvs.openbsd.org>2023-04-25 04:42:27 +0000
commit58df9d669d3d7ca4e3d96aec565f39f2d421ed63 (patch)
tree0c7e25baf50f9e7be8f7fac6991861d63dfce089 /lib
parente5afc37d23dd33ecc94dd525498c7d86adad6ff4 (diff)
Add endbr64 where needed by inspection. Passes regresson tests.
ok jsing, and kind of tb an earlier version
Diffstat (limited to 'lib')
-rwxr-xr-xlib/libcrypto/aes/asm/aes-x86_64.pl13
-rw-r--r--lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl4
-rw-r--r--lib/libcrypto/aes/asm/aesni-x86_64.pl15
-rw-r--r--lib/libcrypto/aes/asm/bsaes-x86_64.pl14
-rw-r--r--lib/libcrypto/aes/asm/vpaes-x86_64.pl14
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_add.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_cmadd.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_cmul.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_mul.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_sqr.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/bignum_sub.S1
-rw-r--r--lib/libcrypto/bn/arch/amd64/word_clz.S1
-rw-r--r--lib/libcrypto/bn/asm/modexp512-x86_64.pl5
-rwxr-xr-xlib/libcrypto/bn/asm/x86_64-mont.pl3
-rwxr-xr-xlib/libcrypto/bn/asm/x86_64-mont5.pl5
-rw-r--r--lib/libcrypto/camellia/asm/cmll-x86_64.pl8
-rwxr-xr-xlib/libcrypto/md5/asm/md5-x86_64.pl1
-rw-r--r--lib/libcrypto/modes/asm/ghash-x86_64.pl4
-rw-r--r--lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl3
-rwxr-xr-xlib/libcrypto/rc4/asm/rc4-x86_64.pl6
-rwxr-xr-xlib/libcrypto/sha/asm/sha1-x86_64.pl4
-rwxr-xr-xlib/libcrypto/sha/asm/sha512-x86_64.pl1
-rw-r--r--lib/libcrypto/whrlpool/asm/wp-x86_64.pl1
-rw-r--r--lib/libcrypto/x86_64cpuid.pl2
28 files changed, 113 insertions, 1 deletions
diff --git a/lib/libcrypto/aes/asm/aes-x86_64.pl b/lib/libcrypto/aes/asm/aes-x86_64.pl
index b7399b552ad..78ba20ca59f 100755
--- a/lib/libcrypto/aes/asm/aes-x86_64.pl
+++ b/lib/libcrypto/aes/asm/aes-x86_64.pl
@@ -318,6 +318,7 @@ $code.=<<___;
.type _x86_64_AES_encrypt,\@abi-omnipotent
.align 16
_x86_64_AES_encrypt:
+ endbr64
xor 0($key),$s0 # xor with key
xor 4($key),$s1
xor 8($key),$s2
@@ -548,6 +549,7 @@ $code.=<<___;
.type _x86_64_AES_encrypt_compact,\@abi-omnipotent
.align 16
_x86_64_AES_encrypt_compact:
+ endbr64
lea 128($sbox),$inp # size optimization
mov 0-128($inp),$acc1 # prefetch Te4
mov 32-128($inp),$acc2
@@ -593,6 +595,7 @@ $code.=<<___;
.hidden asm_AES_encrypt
asm_AES_encrypt:
AES_encrypt:
+ endbr64
push %rbx
push %rbp
push %r12
@@ -884,6 +887,7 @@ $code.=<<___;
.type _x86_64_AES_decrypt,\@abi-omnipotent
.align 16
_x86_64_AES_decrypt:
+ endbr64
xor 0($key),$s0 # xor with key
xor 4($key),$s1
xor 8($key),$s2
@@ -1138,6 +1142,7 @@ $code.=<<___;
.type _x86_64_AES_decrypt_compact,\@abi-omnipotent
.align 16
_x86_64_AES_decrypt_compact:
+ endbr64
lea 128($sbox),$inp # size optimization
mov 0-128($inp),$acc1 # prefetch Td4
mov 32-128($inp),$acc2
@@ -1192,6 +1197,7 @@ $code.=<<___;
.hidden asm_AES_decrypt
asm_AES_decrypt:
AES_decrypt:
+ endbr64
push %rbx
push %rbp
push %r12
@@ -1291,6 +1297,7 @@ $code.=<<___;
.type AES_set_encrypt_key,\@function,3
.align 16
AES_set_encrypt_key:
+ endbr64
push %rbx
push %rbp
push %r12 # redundant, but allows to share
@@ -1316,6 +1323,7 @@ AES_set_encrypt_key:
.type _x86_64_AES_set_encrypt_key,\@abi-omnipotent
.align 16
_x86_64_AES_set_encrypt_key:
+ endbr64
mov %esi,%ecx # %ecx=bits
mov %rdi,%rsi # %rsi=userKey
mov %rdx,%rdi # %rdi=key
@@ -1561,6 +1569,7 @@ $code.=<<___;
.type AES_set_decrypt_key,\@function,3
.align 16
AES_set_decrypt_key:
+ endbr64
push %rbx
push %rbp
push %r12
@@ -1660,6 +1669,7 @@ $code.=<<___;
.hidden asm_AES_cbc_encrypt
asm_AES_cbc_encrypt:
AES_cbc_encrypt:
+ endbr64
cmp \$0,%rdx # check length
je .Lcbc_epilogue
pushfq
@@ -2551,6 +2561,7 @@ $code.=<<___;
.type block_se_handler,\@abi-omnipotent
.align 16
block_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
@@ -2609,6 +2620,7 @@ block_se_handler:
.type key_se_handler,\@abi-omnipotent
.align 16
key_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
@@ -2666,6 +2678,7 @@ key_se_handler:
.type cbc_se_handler,\@abi-omnipotent
.align 16
cbc_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl b/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl
index bafa906a050..879d16793f5 100644
--- a/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl
+++ b/lib/libcrypto/aes/asm/aesni-sha1-x86_64.pl
@@ -89,6 +89,7 @@ $code.=<<___;
.type aesni_cbc_sha1_enc,\@abi-omnipotent
.align 16
aesni_cbc_sha1_enc:
+ endbr64
# caller should check for SSSE3 and AES-NI bits
mov OPENSSL_ia32cap_P+0(%rip),%r10d
mov OPENSSL_ia32cap_P+4(%rip),%r11d
@@ -132,6 +133,7 @@ $code.=<<___;
.type aesni_cbc_sha1_enc_ssse3,\@function,6
.align 16
aesni_cbc_sha1_enc_ssse3:
+ endbr64
mov `($win64?56:8)`(%rsp),$inp # load 7th argument
#shr \$6,$len # debugging artefact
#jz .Lepilogue_ssse3 # debugging artefact
@@ -650,6 +652,7 @@ $code.=<<___;
.type aesni_cbc_sha1_enc_avx,\@function,6
.align 16
aesni_cbc_sha1_enc_avx:
+ endbr64
mov `($win64?56:8)`(%rsp),$inp # load 7th argument
#shr \$6,$len # debugging artefact
#jz .Lepilogue_avx # debugging artefact
@@ -1100,6 +1103,7 @@ $code.=<<___;
.type ssse3_handler,\@abi-omnipotent
.align 16
ssse3_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/aes/asm/aesni-x86_64.pl b/lib/libcrypto/aes/asm/aesni-x86_64.pl
index d394e6f696b..c261a379580 100644
--- a/lib/libcrypto/aes/asm/aesni-x86_64.pl
+++ b/lib/libcrypto/aes/asm/aesni-x86_64.pl
@@ -242,6 +242,7 @@ $code.=<<___;
.type ${PREFIX}_encrypt,\@abi-omnipotent
.align 16
${PREFIX}_encrypt:
+ endbr64
movups ($inp),$inout0 # load input
mov 240($key),$rounds # key->rounds
___
@@ -255,6 +256,7 @@ $code.=<<___;
.type ${PREFIX}_decrypt,\@abi-omnipotent
.align 16
${PREFIX}_decrypt:
+ endbr64
movups ($inp),$inout0 # load input
mov 240($key),$rounds # key->rounds
___
@@ -284,6 +286,7 @@ $code.=<<___;
.type _aesni_${dir}rypt3,\@abi-omnipotent
.align 16
_aesni_${dir}rypt3:
+ endbr64
$movkey ($key),$rndkey0
shr \$1,$rounds
$movkey 16($key),$rndkey1
@@ -328,6 +331,7 @@ $code.=<<___;
.type _aesni_${dir}rypt4,\@abi-omnipotent
.align 16
_aesni_${dir}rypt4:
+ endbr64
$movkey ($key),$rndkey0
shr \$1,$rounds
$movkey 16($key),$rndkey1
@@ -373,6 +377,7 @@ $code.=<<___;
.type _aesni_${dir}rypt6,\@abi-omnipotent
.align 16
_aesni_${dir}rypt6:
+ endbr64
$movkey ($key),$rndkey0
shr \$1,$rounds
$movkey 16($key),$rndkey1
@@ -437,6 +442,7 @@ $code.=<<___;
.type _aesni_${dir}rypt8,\@abi-omnipotent
.align 16
_aesni_${dir}rypt8:
+ endbr64
$movkey ($key),$rndkey0
shr \$1,$rounds
$movkey 16($key),$rndkey1
@@ -525,6 +531,7 @@ $code.=<<___;
.type aesni_ecb_encrypt,\@function,5
.align 16
aesni_ecb_encrypt:
+ endbr64
and \$-16,$len
jz .Lecb_ret
@@ -830,6 +837,7 @@ $code.=<<___;
.type aesni_ccm64_encrypt_blocks,\@function,6
.align 16
aesni_ccm64_encrypt_blocks:
+ endbr64
___
$code.=<<___ if ($win64);
lea -0x58(%rsp),%rsp
@@ -2478,6 +2486,7 @@ $code.=<<___;
.type ${PREFIX}_set_decrypt_key,\@abi-omnipotent
.align 16
${PREFIX}_set_decrypt_key:
+ endbr64
sub \$8,%rsp
call __aesni_set_encrypt_key
shl \$4,$bits # rounds-1 after _aesni_set_encrypt_key
@@ -2528,6 +2537,7 @@ $code.=<<___;
.type ${PREFIX}_set_encrypt_key,\@abi-omnipotent
.align 16
${PREFIX}_set_encrypt_key:
+ endbr64
__aesni_set_encrypt_key:
sub \$8,%rsp
mov \$-1,%rax
@@ -2749,6 +2759,7 @@ $code.=<<___ if ($PREFIX eq "aesni");
.type ecb_se_handler,\@abi-omnipotent
.align 16
ecb_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
@@ -2768,6 +2779,7 @@ ecb_se_handler:
.type ccm64_se_handler,\@abi-omnipotent
.align 16
ccm64_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
@@ -2809,6 +2821,7 @@ ccm64_se_handler:
.type ctr32_se_handler,\@abi-omnipotent
.align 16
ctr32_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
@@ -2844,6 +2857,7 @@ ctr32_se_handler:
.type xts_se_handler,\@abi-omnipotent
.align 16
xts_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
@@ -2885,6 +2899,7 @@ $code.=<<___;
.type cbc_se_handler,\@abi-omnipotent
.align 16
cbc_se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/aes/asm/bsaes-x86_64.pl b/lib/libcrypto/aes/asm/bsaes-x86_64.pl
index 20e9e1f71f3..7098ba27f49 100644
--- a/lib/libcrypto/aes/asm/bsaes-x86_64.pl
+++ b/lib/libcrypto/aes/asm/bsaes-x86_64.pl
@@ -813,6 +813,7 @@ $code.=<<___;
.type _bsaes_encrypt8,\@abi-omnipotent
.align 64
_bsaes_encrypt8:
+ endbr64
lea .LBS0(%rip), $const # constants table
movdqa ($key), @XMM[9] # round 0 key
@@ -877,6 +878,7 @@ $code.=<<___;
.type _bsaes_decrypt8,\@abi-omnipotent
.align 64
_bsaes_decrypt8:
+ endbr64
lea .LBS0(%rip), $const # constants table
movdqa ($key), @XMM[9] # round 0 key
@@ -968,6 +970,7 @@ $code.=<<___;
.type _bsaes_key_convert,\@abi-omnipotent
.align 16
_bsaes_key_convert:
+ endbr64
lea .Lmasks(%rip), $const
movdqu ($inp), %xmm7 # load round 0 key
lea 0x10($inp), $inp
@@ -1057,6 +1060,7 @@ $code.=<<___;
.type bsaes_enc_key_convert,\@function,2
.align 16
bsaes_enc_key_convert:
+ endbr64
mov 240($inp),%r10d # pass rounds
mov $inp,%rcx # pass key
mov $out,%rax # pass key schedule
@@ -1071,6 +1075,7 @@ bsaes_enc_key_convert:
.align 16
bsaes_encrypt_128:
.Lenc128_loop:
+ endbr64
movdqu 0x00($inp), @XMM[0] # load input
movdqu 0x10($inp), @XMM[1]
movdqu 0x20($inp), @XMM[2]
@@ -1103,6 +1108,7 @@ bsaes_encrypt_128:
.type bsaes_dec_key_convert,\@function,2
.align 16
bsaes_dec_key_convert:
+ endbr64
mov 240($inp),%r10d # pass rounds
mov $inp,%rcx # pass key
mov $out,%rax # pass key schedule
@@ -1117,6 +1123,7 @@ bsaes_dec_key_convert:
.type bsaes_decrypt_128,\@function,4
.align 16
bsaes_decrypt_128:
+ endbr64
.Ldec128_loop:
movdqu 0x00($inp), @XMM[0] # load input
movdqu 0x10($inp), @XMM[1]
@@ -1162,6 +1169,7 @@ $code.=<<___;
.type bsaes_ecb_encrypt_blocks,\@abi-omnipotent
.align 16
bsaes_ecb_encrypt_blocks:
+ endbr64
mov %rsp, %rax
.Lecb_enc_prologue:
push %rbp
@@ -1363,6 +1371,7 @@ $code.=<<___;
.type bsaes_ecb_decrypt_blocks,\@abi-omnipotent
.align 16
bsaes_ecb_decrypt_blocks:
+ endbr64
mov %rsp, %rax
.Lecb_dec_prologue:
push %rbp
@@ -1568,6 +1577,7 @@ $code.=<<___;
.type bsaes_cbc_encrypt,\@abi-omnipotent
.align 16
bsaes_cbc_encrypt:
+ endbr64
___
$code.=<<___ if ($win64);
mov 48(%rsp),$arg6 # pull direction flag
@@ -1855,6 +1865,7 @@ $code.=<<___;
.type bsaes_ctr32_encrypt_blocks,\@abi-omnipotent
.align 16
bsaes_ctr32_encrypt_blocks:
+ endbr64
mov %rsp, %rax
.Lctr_enc_prologue:
push %rbp
@@ -2096,6 +2107,7 @@ $code.=<<___;
.type bsaes_xts_encrypt,\@abi-omnipotent
.align 16
bsaes_xts_encrypt:
+ endbr64
mov %rsp, %rax
.Lxts_enc_prologue:
push %rbp
@@ -2477,6 +2489,7 @@ $code.=<<___;
.type bsaes_xts_decrypt,\@abi-omnipotent
.align 16
bsaes_xts_decrypt:
+ endbr64
mov %rsp, %rax
.Lxts_dec_prologue:
push %rbp
@@ -2953,6 +2966,7 @@ $code.=<<___;
.type se_handler,\@abi-omnipotent
.align 16
se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/aes/asm/vpaes-x86_64.pl b/lib/libcrypto/aes/asm/vpaes-x86_64.pl
index 3ffb1a3038f..8ff8d8602bc 100644
--- a/lib/libcrypto/aes/asm/vpaes-x86_64.pl
+++ b/lib/libcrypto/aes/asm/vpaes-x86_64.pl
@@ -82,6 +82,7 @@ $code.=<<___;
.type _vpaes_encrypt_core,\@abi-omnipotent
.align 16
_vpaes_encrypt_core:
+ endbr64
mov %rdx, %r9
mov \$16, %r11
mov 240(%rdx),%eax
@@ -172,6 +173,7 @@ _vpaes_encrypt_core:
.type _vpaes_decrypt_core,\@abi-omnipotent
.align 16
_vpaes_decrypt_core:
+ endbr64
mov %rdx, %r9 # load key
mov 240(%rdx),%eax
movdqa %xmm9, %xmm1
@@ -279,6 +281,7 @@ _vpaes_decrypt_core:
.type _vpaes_schedule_core,\@abi-omnipotent
.align 16
_vpaes_schedule_core:
+ endbr64
# rdi = key
# rsi = size in bits
# rdx = buffer
@@ -464,6 +467,7 @@ _vpaes_schedule_core:
.type _vpaes_schedule_192_smear,\@abi-omnipotent
.align 16
_vpaes_schedule_192_smear:
+ endbr64
pshufd \$0x80, %xmm6, %xmm0 # d c 0 0 -> c 0 0 0
pxor %xmm0, %xmm6 # -> c+d c 0 0
pshufd \$0xFE, %xmm7, %xmm0 # b a _ _ -> b b b a
@@ -495,6 +499,7 @@ _vpaes_schedule_192_smear:
.type _vpaes_schedule_round,\@abi-omnipotent
.align 16
_vpaes_schedule_round:
+ endbr64
# extract rcon from xmm8
pxor %xmm1, %xmm1
palignr \$15, %xmm8, %xmm1
@@ -562,6 +567,7 @@ _vpaes_schedule_low_round:
.type _vpaes_schedule_transform,\@abi-omnipotent
.align 16
_vpaes_schedule_transform:
+ endbr64
movdqa %xmm9, %xmm1
pandn %xmm0, %xmm1
psrld \$4, %xmm1
@@ -600,6 +606,7 @@ _vpaes_schedule_transform:
.type _vpaes_schedule_mangle,\@abi-omnipotent
.align 16
_vpaes_schedule_mangle:
+ endbr64
movdqa %xmm0, %xmm4 # save xmm0 for later
movdqa .Lk_mc_forward(%rip),%xmm5
test %rcx, %rcx
@@ -673,6 +680,7 @@ _vpaes_schedule_mangle:
.type ${PREFIX}_set_encrypt_key,\@function,3
.align 16
${PREFIX}_set_encrypt_key:
+ endbr64
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
@@ -721,6 +729,7 @@ $code.=<<___;
.type ${PREFIX}_set_decrypt_key,\@function,3
.align 16
${PREFIX}_set_decrypt_key:
+ endbr64
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
@@ -774,6 +783,7 @@ $code.=<<___;
.type ${PREFIX}_encrypt,\@function,3
.align 16
${PREFIX}_encrypt:
+ endbr64
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
@@ -817,6 +827,7 @@ $code.=<<___;
.type ${PREFIX}_decrypt,\@function,3
.align 16
${PREFIX}_decrypt:
+ endbr64
___
$code.=<<___ if ($win64);
lea -0xb8(%rsp),%rsp
@@ -866,6 +877,7 @@ $code.=<<___;
.type ${PREFIX}_cbc_encrypt,\@function,6
.align 16
${PREFIX}_cbc_encrypt:
+ endbr64
xchg $key,$len
___
($len,$key)=($key,$len);
@@ -949,6 +961,7 @@ $code.=<<___;
.type _vpaes_preheat,\@abi-omnipotent
.align 16
_vpaes_preheat:
+ endbr64
lea .Lk_s0F(%rip), %r10
movdqa -0x20(%r10), %xmm10 # .Lk_inv
movdqa -0x10(%r10), %xmm11 # .Lk_inv+16
@@ -1079,6 +1092,7 @@ $code.=<<___;
.type se_handler,\@abi-omnipotent
.align 16
se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_add.S b/lib/libcrypto/bn/arch/amd64/bignum_add.S
index d56fa5e3a83..06298ca69e1 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_add.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_add.S
@@ -49,6 +49,7 @@
S2N_BN_SYMBOL(bignum_add):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S b/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S
index 1dc1e58705d..5ad712749f2 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_cmadd.S
@@ -54,6 +54,7 @@
S2N_BN_SYMBOL(bignum_cmadd):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_cmul.S b/lib/libcrypto/bn/arch/amd64/bignum_cmul.S
index c1a23ccea4d..9199c8f48b6 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_cmul.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_cmul.S
@@ -51,6 +51,7 @@
S2N_BN_SYMBOL(bignum_cmul):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_mul.S b/lib/libcrypto/bn/arch/amd64/bignum_mul.S
index 42ac988a197..2d7ed190912 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_mul.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_mul.S
@@ -59,6 +59,7 @@
S2N_BN_SYMBOL(bignum_mul):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S
index 3b7848b285f..f02b09b2887 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_mul_4_8_alt.S
@@ -72,6 +72,7 @@
adc h, rdx
S2N_BN_SYMBOL(bignum_mul_4_8_alt):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S
index 1be37840df6..97be83e1f72 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_mul_8_16_alt.S
@@ -72,6 +72,7 @@
adc h, rdx
S2N_BN_SYMBOL(bignum_mul_8_16_alt):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sqr.S b/lib/libcrypto/bn/arch/amd64/bignum_sqr.S
index 2e05b9c179a..c4a0cabf352 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_sqr.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_sqr.S
@@ -62,6 +62,7 @@
#define llshort ebp
S2N_BN_SYMBOL(bignum_sqr):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S
index a635177c650..b228414dcea 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_sqr_4_8_alt.S
@@ -71,6 +71,7 @@
adc c, 0
S2N_BN_SYMBOL(bignum_sqr_4_8_alt):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S b/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S
index f698202d29c..04efeec7e28 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_sqr_8_16_alt.S
@@ -103,6 +103,7 @@
adc c, 0
S2N_BN_SYMBOL(bignum_sqr_8_16_alt):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/bignum_sub.S b/lib/libcrypto/bn/arch/amd64/bignum_sub.S
index f8e1fe35a8f..11a9bd7edd4 100644
--- a/lib/libcrypto/bn/arch/amd64/bignum_sub.S
+++ b/lib/libcrypto/bn/arch/amd64/bignum_sub.S
@@ -49,6 +49,7 @@
S2N_BN_SYMBOL(bignum_sub):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/arch/amd64/word_clz.S b/lib/libcrypto/bn/arch/amd64/word_clz.S
index 025e98f9cb0..464a9d90fc3 100644
--- a/lib/libcrypto/bn/arch/amd64/word_clz.S
+++ b/lib/libcrypto/bn/arch/amd64/word_clz.S
@@ -30,6 +30,7 @@
.text
S2N_BN_SYMBOL(word_clz):
+ endbr64
#if WINDOWS_ABI
push rdi
diff --git a/lib/libcrypto/bn/asm/modexp512-x86_64.pl b/lib/libcrypto/bn/asm/modexp512-x86_64.pl
index 2e71a7f03dc..af78fff5412 100644
--- a/lib/libcrypto/bn/asm/modexp512-x86_64.pl
+++ b/lib/libcrypto/bn/asm/modexp512-x86_64.pl
@@ -347,6 +347,7 @@ $code.=<<___;
.type MULADD_128x512,\@abi-omnipotent
.align 16
MULADD_128x512:
+ endbr64
___
&MULSTEP_512([map("%r$_",(8..15))], "(+8*0)(%rcx)", "%rsi", "%rbp", "%rbx");
$code.=<<___;
@@ -414,6 +415,7 @@ $code.=<<___;
.type mont_reduce,\@abi-omnipotent
.align 16
mont_reduce:
+ endbr64
___
my $STACK_DEPTH = 8;
@@ -676,6 +678,7 @@ $code.=<<___;
.type mont_mul_a3b,\@abi-omnipotent
.align 16
mont_mul_a3b:
+ endbr64
#
# multiply tmp = src1 * src2
# For multiply: dst = rcx, src1 = rdi, src2 = rsi
@@ -1077,6 +1080,7 @@ $code.=<<___;
.type sqr_reduce,\@abi-omnipotent
.align 16
sqr_reduce:
+ endbr64
mov (+$pResult_offset+8)(%rsp), %rcx
___
&SQR_512("%rsp+$tmp16_offset+8", "%rcx", [map("%r$_",(10..15,8..9))], "%rbx", "%rbp", "%rsi", "%rdi");
@@ -1106,6 +1110,7 @@ $code.=<<___;
.globl mod_exp_512
.type mod_exp_512,\@function,4
mod_exp_512:
+ endbr64
push %rbp
push %rbx
push %r12
diff --git a/lib/libcrypto/bn/asm/x86_64-mont.pl b/lib/libcrypto/bn/asm/x86_64-mont.pl
index cae7309d5ba..6f5ab331e2f 100755
--- a/lib/libcrypto/bn/asm/x86_64-mont.pl
+++ b/lib/libcrypto/bn/asm/x86_64-mont.pl
@@ -63,6 +63,7 @@ $code=<<___;
.type bn_mul_mont,\@function,6
.align 16
bn_mul_mont:
+ endbr64
test \$3,${num}d
jnz .Lmul_enter
cmp \$8,${num}d
@@ -278,6 +279,7 @@ $code.=<<___;
.align 16
bn_mul4x_mont:
.Lmul4x_enter:
+ endbr64
push %rbx
push %rbp
push %r12
@@ -705,6 +707,7 @@ $code.=<<___;
.align 16
bn_sqr4x_mont:
.Lsqr4x_enter:
+ endbr64
push %rbx
push %rbp
push %r12
diff --git a/lib/libcrypto/bn/asm/x86_64-mont5.pl b/lib/libcrypto/bn/asm/x86_64-mont5.pl
index 7b9c6df2739..3b3325a6ccc 100755
--- a/lib/libcrypto/bn/asm/x86_64-mont5.pl
+++ b/lib/libcrypto/bn/asm/x86_64-mont5.pl
@@ -57,6 +57,7 @@ $code=<<___;
.type bn_mul_mont_gather5,\@function,6
.align 64
bn_mul_mont_gather5:
+ endbr64
test \$3,${num}d
jnz .Lmul_enter
cmp \$8,${num}d
@@ -387,6 +388,7 @@ $code.=<<___;
.type bn_mul4x_mont_gather5,\@function,6
.align 16
bn_mul4x_mont_gather5:
+ endbr64
.Lmul4x_enter:
mov ${num}d,${num}d
movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
@@ -925,6 +927,7 @@ $code.=<<___;
.type bn_scatter5,\@abi-omnipotent
.align 16
bn_scatter5:
+ endbr64
cmp \$0, $num
jz .Lscatter_epilogue
lea ($tbl,$idx,8),$tbl
@@ -943,6 +946,7 @@ bn_scatter5:
.type bn_gather5,\@abi-omnipotent
.align 16
bn_gather5:
+ endbr64
.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
# I can't trust assembler to use specific encoding:-(
.byte 0x4c,0x8d,0x14,0x24 # lea (%rsp),%r10
@@ -1053,6 +1057,7 @@ $code.=<<___;
.type mul_handler,\@abi-omnipotent
.align 16
mul_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/camellia/asm/cmll-x86_64.pl b/lib/libcrypto/camellia/asm/cmll-x86_64.pl
index 586e5d6e93f..3ceed3e8991 100644
--- a/lib/libcrypto/camellia/asm/cmll-x86_64.pl
+++ b/lib/libcrypto/camellia/asm/cmll-x86_64.pl
@@ -116,6 +116,7 @@ $code=<<___;
.type Camellia_EncryptBlock,\@abi-omnipotent
.align 16
Camellia_EncryptBlock:
+ endbr64
movl \$128,%eax
subl $arg0d,%eax
movl \$3,$arg0d
@@ -128,6 +129,7 @@ Camellia_EncryptBlock:
.align 16
.Lenc_rounds:
Camellia_EncryptBlock_Rounds:
+ endbr64
push %rbx
push %rbp
push %r13
@@ -176,6 +178,7 @@ Camellia_EncryptBlock_Rounds:
.type _x86_64_Camellia_encrypt,\@abi-omnipotent
.align 16
_x86_64_Camellia_encrypt:
+ endbr64
xor 0($key),@S[1]
xor 4($key),@S[0] # ^=key[0-3]
xor 8($key),@S[3]
@@ -226,6 +229,7 @@ $code.=<<___;
.type Camellia_DecryptBlock,\@abi-omnipotent
.align 16
Camellia_DecryptBlock:
+ endbr64
movl \$128,%eax
subl $arg0d,%eax
movl \$3,$arg0d
@@ -238,6 +242,7 @@ Camellia_DecryptBlock:
.align 16
.Ldec_rounds:
Camellia_DecryptBlock_Rounds:
+ endbr64
push %rbx
push %rbp
push %r13
@@ -286,6 +291,7 @@ Camellia_DecryptBlock_Rounds:
.type _x86_64_Camellia_decrypt,\@abi-omnipotent
.align 16
_x86_64_Camellia_decrypt:
+ endbr64
xor 0($key),@S[1]
xor 4($key),@S[0] # ^=key[0-3]
xor 8($key),@S[3]
@@ -400,6 +406,7 @@ $code.=<<___;
.type Camellia_Ekeygen,\@function,3
.align 16
Camellia_Ekeygen:
+ endbr64
push %rbx
push %rbp
push %r13
@@ -630,6 +637,7 @@ $code.=<<___;
.type Camellia_cbc_encrypt,\@function,6
.align 16
Camellia_cbc_encrypt:
+ endbr64
cmp \$0,%rdx
je .Lcbc_abort
push %rbx
diff --git a/lib/libcrypto/md5/asm/md5-x86_64.pl b/lib/libcrypto/md5/asm/md5-x86_64.pl
index c902a1b532f..06d69094f44 100755
--- a/lib/libcrypto/md5/asm/md5-x86_64.pl
+++ b/lib/libcrypto/md5/asm/md5-x86_64.pl
@@ -128,6 +128,7 @@ $code .= <<EOF;
.globl md5_block_asm_data_order
.type md5_block_asm_data_order,\@function,3
md5_block_asm_data_order:
+ endbr64
push %rbp
push %rbx
push %r12
diff --git a/lib/libcrypto/modes/asm/ghash-x86_64.pl b/lib/libcrypto/modes/asm/ghash-x86_64.pl
index 71d0822ac91..9ce0c381410 100644
--- a/lib/libcrypto/modes/asm/ghash-x86_64.pl
+++ b/lib/libcrypto/modes/asm/ghash-x86_64.pl
@@ -412,6 +412,7 @@ $code.=<<___;
.type gcm_init_clmul,\@abi-omnipotent
.align 16
gcm_init_clmul:
+ endbr64
movdqu ($Xip),$Hkey
pshufd \$0b01001110,$Hkey,$Hkey # dword swap
@@ -449,6 +450,7 @@ $code.=<<___;
.type gcm_gmult_clmul,\@abi-omnipotent
.align 16
gcm_gmult_clmul:
+ endbr64
movdqu ($Xip),$Xi
movdqa .Lbswap_mask(%rip),$T3
movdqu ($Htbl),$Hkey
@@ -476,6 +478,7 @@ $code.=<<___;
.type gcm_ghash_clmul,\@abi-omnipotent
.align 16
gcm_ghash_clmul:
+ endbr64
___
$code.=<<___ if ($win64);
.LSEH_begin_gcm_ghash_clmul:
@@ -686,6 +689,7 @@ $code.=<<___;
.type se_handler,\@abi-omnipotent
.align 16
se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl b/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl
index 501d9e936bb..6d058bd9d4b 100644
--- a/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl
+++ b/lib/libcrypto/rc4/asm/rc4-md5-x86_64.pl
@@ -109,6 +109,7 @@ $code.=<<___;
.globl $func
.type $func,\@function,$nargs
$func:
+ endbr64
cmp \$0,$len
je .Labort
push %rbx
@@ -453,6 +454,7 @@ $code.=<<___;
.type RC4_set_key,\@function,3
.align 16
RC4_set_key:
+ endbr64
lea 8($dat),$dat
lea ($inp,$len),$inp
neg $len
@@ -494,6 +496,7 @@ RC4_set_key:
.type RC4_options,\@abi-omnipotent
.align 16
RC4_options:
+ endbr64
lea .Lopts(%rip),%rax
ret
.align 64
diff --git a/lib/libcrypto/rc4/asm/rc4-x86_64.pl b/lib/libcrypto/rc4/asm/rc4-x86_64.pl
index 8de869489f6..2bac7d744dd 100755
--- a/lib/libcrypto/rc4/asm/rc4-x86_64.pl
+++ b/lib/libcrypto/rc4/asm/rc4-x86_64.pl
@@ -127,7 +127,9 @@ $code=<<___;
.globl RC4
.type RC4,\@function,4
.align 16
-RC4: or $len,$len
+RC4:
+ endbr64
+ or $len,$len
jne .Lentry
ret
.Lentry:
@@ -433,6 +435,7 @@ $code.=<<___;
.type RC4_set_key,\@function,3
.align 16
RC4_set_key:
+ endbr64
lea 8($dat),$dat
lea ($inp,$len),$inp
neg $len
@@ -505,6 +508,7 @@ RC4_set_key:
.type RC4_options,\@abi-omnipotent
.align 16
RC4_options:
+ endbr64
lea .Lopts(%rip),%rax
mov OPENSSL_ia32cap_P(%rip),%edx
bt \$IA32CAP_BIT0_INTELP4,%edx
diff --git a/lib/libcrypto/sha/asm/sha1-x86_64.pl b/lib/libcrypto/sha/asm/sha1-x86_64.pl
index 43eee73c4ab..e15ff47f88b 100755
--- a/lib/libcrypto/sha/asm/sha1-x86_64.pl
+++ b/lib/libcrypto/sha/asm/sha1-x86_64.pl
@@ -222,6 +222,7 @@ $code.=<<___;
.type sha1_block_data_order,\@function,3
.align 16
sha1_block_data_order:
+ endbr64
mov OPENSSL_ia32cap_P+0(%rip),%r9d
mov OPENSSL_ia32cap_P+4(%rip),%r8d
test \$IA32CAP_MASK1_SSSE3,%r8d # check SSSE3 bit
@@ -309,6 +310,7 @@ $code.=<<___;
.align 16
sha1_block_data_order_ssse3:
_ssse3_shortcut:
+ endbr64
push %rbx
push %rbp
push %r12
@@ -729,6 +731,7 @@ $code.=<<___;
.align 16
sha1_block_data_order_avx:
_avx_shortcut:
+ endbr64
push %rbx
push %rbp
push %r12
@@ -1099,6 +1102,7 @@ $code.=<<___;
.type se_handler,\@abi-omnipotent
.align 16
se_handler:
+ endbr64
push %rsi
push %rdi
push %rbx
diff --git a/lib/libcrypto/sha/asm/sha512-x86_64.pl b/lib/libcrypto/sha/asm/sha512-x86_64.pl
index 0517eab6602..120693feeb7 100755
--- a/lib/libcrypto/sha/asm/sha512-x86_64.pl
+++ b/lib/libcrypto/sha/asm/sha512-x86_64.pl
@@ -175,6 +175,7 @@ $code=<<___;
.type $func,\@function,4
.align 16
$func:
+ endbr64
push %rbx
push %rbp
push %r12
diff --git a/lib/libcrypto/whrlpool/asm/wp-x86_64.pl b/lib/libcrypto/whrlpool/asm/wp-x86_64.pl
index de5d3acfb40..7958f6d2897 100644
--- a/lib/libcrypto/whrlpool/asm/wp-x86_64.pl
+++ b/lib/libcrypto/whrlpool/asm/wp-x86_64.pl
@@ -57,6 +57,7 @@ $code=<<___;
.type $func,\@function,3
.align 16
$func:
+ endbr64
push %rbx
push %rbp
push %r12
diff --git a/lib/libcrypto/x86_64cpuid.pl b/lib/libcrypto/x86_64cpuid.pl
index 1b67d1110f9..dc56732a28b 100644
--- a/lib/libcrypto/x86_64cpuid.pl
+++ b/lib/libcrypto/x86_64cpuid.pl
@@ -18,6 +18,7 @@ print<<___;
.extern OPENSSL_cpuid_setup
.hidden OPENSSL_cpuid_setup
.section .init
+ endbr64
call OPENSSL_cpuid_setup
.extern OPENSSL_ia32cap_P
@@ -29,6 +30,7 @@ print<<___;
.type OPENSSL_ia32_cpuid,\@abi-omnipotent
.align 16
OPENSSL_ia32_cpuid:
+ endbr64
mov %rbx,%r8 # save %rbx
xor %eax,%eax