From: H.J. Lu Date: Fri, 31 Jan 2020 12:17:26 +0000 (-0800) Subject: x86_64: Add endbranch at function entries for Intel CET X-Git-Tag: openssl-3.0.0-alpha1~463 X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=98ad3fe82bd3e7e7f929dd1fa4ef3915426002c0;p=oweals%2Fopenssl.git x86_64: Add endbranch at function entries for Intel CET To support Intel CET, all indirect branch targets must start with endbranch. Here is a patch to add endbranch to function entries in x86_64 assembly codes which are indirect branch targets as discovered by running openssl testsuite on Intel CET machine and visual inspection. Verified with $ CC="gcc -Wl,-z,cet-report=error" ./Configure shared linux-x86_64 -fcf-protection $ make $ make test and $ CC="gcc -mx32 -Wl,-z,cet-report=error" ./Configure shared linux-x32 -fcf-protection $ make $ make test # <<< passed with https://github.com/openssl/openssl/pull/10988 Reviewed-by: Tomas Mraz Reviewed-by: Richard Levitte (Merged from https://github.com/openssl/openssl/pull/10982) --- diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl index 813817ed46..4e417a516b 100755 --- a/crypto/aes/asm/aes-x86_64.pl +++ b/crypto/aes/asm/aes-x86_64.pl @@ -606,6 +606,7 @@ $code.=<<___; asm_AES_encrypt: AES_encrypt: .cfi_startproc + endbranch mov %rsp,%rax .cfi_def_cfa_register %rax push %rbx @@ -1226,6 +1227,7 @@ $code.=<<___; asm_AES_decrypt: AES_decrypt: .cfi_startproc + endbranch mov %rsp,%rax .cfi_def_cfa_register %rax push %rbx @@ -1343,6 +1345,7 @@ $code.=<<___; .align 16 AES_set_encrypt_key: .cfi_startproc + endbranch push %rbx .cfi_push %rbx push %rbp @@ -1623,6 +1626,7 @@ $code.=<<___; .align 16 AES_set_decrypt_key: .cfi_startproc + endbranch push %rbx .cfi_push %rbx push %rbp @@ -1737,6 +1741,7 @@ $code.=<<___; asm_AES_cbc_encrypt: AES_cbc_encrypt: .cfi_startproc + endbranch cmp \$0,%rdx # check length je .Lcbc_epilogue pushfq diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl index 79d50da713..7b2ad2866b 100644 --- a/crypto/aes/asm/aesni-x86_64.pl +++ b/crypto/aes/asm/aesni-x86_64.pl @@ -277,6 +277,7 @@ $code.=<<___; .align 16 ${PREFIX}_encrypt: .cfi_startproc + endbranch movups ($inp),$inout0 # load input mov 240($key),$rounds # key->rounds ___ @@ -295,6 +296,7 @@ $code.=<<___; .align 16 ${PREFIX}_decrypt: .cfi_startproc + endbranch movups ($inp),$inout0 # load input mov 240($key),$rounds # key->rounds ___ @@ -615,6 +617,7 @@ $code.=<<___; .align 16 aesni_ecb_encrypt: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); lea -0x58(%rsp),%rsp @@ -987,6 +990,7 @@ $code.=<<___; .align 16 aesni_ccm64_encrypt_blocks: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); lea -0x58(%rsp),%rsp @@ -1079,6 +1083,7 @@ $code.=<<___; .align 16 aesni_ccm64_decrypt_blocks: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); lea -0x58(%rsp),%rsp @@ -1205,6 +1210,7 @@ $code.=<<___; .align 16 aesni_ctr32_encrypt_blocks: .cfi_startproc + endbranch cmp \$1,$len jne .Lctr32_bulk @@ -1777,6 +1783,7 @@ $code.=<<___; .align 16 aesni_xts_encrypt: .cfi_startproc + endbranch lea (%rsp),%r11 # frame pointer .cfi_def_cfa_register %r11 push %rbp @@ -2260,6 +2267,7 @@ $code.=<<___; .align 16 aesni_xts_decrypt: .cfi_startproc + endbranch lea (%rsp),%r11 # frame pointer .cfi_def_cfa_register %r11 push %rbp @@ -2785,6 +2793,7 @@ $code.=<<___; .align 32 aesni_ocb_encrypt: .cfi_startproc + endbranch lea (%rsp),%rax push %rbx .cfi_push %rbx @@ -3251,6 +3260,7 @@ __ocb_encrypt1: .align 32 aesni_ocb_decrypt: .cfi_startproc + endbranch lea (%rsp),%rax push %rbx .cfi_push %rbx @@ -3739,6 +3749,7 @@ $code.=<<___; .align 16 ${PREFIX}_cbc_encrypt: .cfi_startproc + endbranch test $len,$len # check length jz .Lcbc_ret diff --git a/crypto/aes/asm/bsaes-x86_64.pl b/crypto/aes/asm/bsaes-x86_64.pl index b75d95ffab..2e46802dfe 100644 --- a/crypto/aes/asm/bsaes-x86_64.pl +++ b/crypto/aes/asm/bsaes-x86_64.pl @@ -1616,6 +1616,7 @@ $code.=<<___; .align 16 bsaes_cbc_encrypt: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); mov 48(%rsp),$arg6 # pull direction flag @@ -1921,6 +1922,7 @@ $code.=<<___; .align 16 bsaes_ctr32_encrypt_blocks: .cfi_startproc + endbranch mov %rsp, %rax .Lctr_enc_prologue: push %rbp diff --git a/crypto/aes/asm/vpaes-x86_64.pl b/crypto/aes/asm/vpaes-x86_64.pl index 5c1dc9f9ea..121370658c 100644 --- a/crypto/aes/asm/vpaes-x86_64.pl +++ b/crypto/aes/asm/vpaes-x86_64.pl @@ -698,6 +698,7 @@ _vpaes_schedule_mangle: .align 16 ${PREFIX}_set_encrypt_key: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -748,6 +749,7 @@ $code.=<<___; .align 16 ${PREFIX}_set_decrypt_key: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -803,6 +805,7 @@ $code.=<<___; .align 16 ${PREFIX}_encrypt: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -848,6 +851,7 @@ $code.=<<___; .align 16 ${PREFIX}_decrypt: .cfi_startproc + endbranch ___ $code.=<<___ if ($win64); lea -0xb8(%rsp),%rsp @@ -899,6 +903,7 @@ $code.=<<___; .align 16 ${PREFIX}_cbc_encrypt: .cfi_startproc + endbranch xchg $key,$len ___ ($len,$key)=($key,$len); diff --git a/crypto/camellia/asm/cmll-x86_64.pl b/crypto/camellia/asm/cmll-x86_64.pl index 6865e73a7b..547fdf9176 100644 --- a/crypto/camellia/asm/cmll-x86_64.pl +++ b/crypto/camellia/asm/cmll-x86_64.pl @@ -687,6 +687,7 @@ $code.=<<___; .align 16 Camellia_cbc_encrypt: .cfi_startproc + endbranch cmp \$0,%rdx je .Lcbc_abort push %rbx diff --git a/crypto/modes/asm/ghash-x86_64.pl b/crypto/modes/asm/ghash-x86_64.pl index 3af31abc4f..68fd85e99f 100644 --- a/crypto/modes/asm/ghash-x86_64.pl +++ b/crypto/modes/asm/ghash-x86_64.pl @@ -241,6 +241,7 @@ $code=<<___; .align 16 gcm_gmult_4bit: .cfi_startproc + endbranch push %rbx .cfi_push %rbx push %rbp # %rbp and others are pushed exclusively in @@ -288,6 +289,7 @@ $code.=<<___; .align 16 gcm_ghash_4bit: .cfi_startproc + endbranch push %rbx .cfi_push %rbx push %rbp @@ -614,6 +616,7 @@ $code.=<<___; .align 16 gcm_gmult_clmul: .cfi_startproc + endbranch .L_gmult_clmul: movdqu ($Xip),$Xi movdqa .Lbswap_mask(%rip),$T3 @@ -665,6 +668,7 @@ $code.=<<___; .align 32 gcm_ghash_clmul: .cfi_startproc + endbranch .L_ghash_clmul: ___ $code.=<<___ if ($win64); @@ -1168,6 +1172,7 @@ $code.=<<___; .align 32 gcm_gmult_avx: .cfi_startproc + endbranch jmp .L_gmult_clmul .cfi_endproc .size gcm_gmult_avx,.-gcm_gmult_avx @@ -1179,6 +1184,7 @@ $code.=<<___; .align 32 gcm_ghash_avx: .cfi_startproc + endbranch ___ if ($avx) { my ($Xip,$Htbl,$inp,$len)=@_4args; diff --git a/crypto/poly1305/asm/poly1305-x86_64.pl b/crypto/poly1305/asm/poly1305-x86_64.pl index c03af53922..e5b841260e 100755 --- a/crypto/poly1305/asm/poly1305-x86_64.pl +++ b/crypto/poly1305/asm/poly1305-x86_64.pl @@ -2808,6 +2808,7 @@ $code.=<<___; .align 32 poly1305_blocks_vpmadd52: .cfi_startproc + endbranch shr \$4,$len jz .Lno_data_vpmadd52 # too short @@ -3741,6 +3742,7 @@ $code.=<<___; .align 32 poly1305_emit_base2_44: .cfi_startproc + endbranch mov 0($ctx),%r8 # load hash value mov 8($ctx),%r9 mov 16($ctx),%r10 diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl index b86425e582..57d72a283a 100755 --- a/crypto/rc4/asm/rc4-x86_64.pl +++ b/crypto/rc4/asm/rc4-x86_64.pl @@ -142,6 +142,7 @@ $code=<<___; .align 16 RC4: .cfi_startproc + endbranch or $len,$len jne .Lentry ret @@ -457,6 +458,7 @@ $code.=<<___; .align 16 RC4_set_key: .cfi_startproc + endbranch lea 8($dat),$dat lea ($inp,$len),$inp neg $len @@ -531,6 +533,7 @@ RC4_set_key: .align 16 RC4_options: .cfi_startproc + endbranch lea .Lopts(%rip),%rax mov OPENSSL_ia32cap_P(%rip),%edx bt \$20,%edx diff --git a/crypto/x86_64cpuid.pl b/crypto/x86_64cpuid.pl index 966dddbb7d..be9f33f5f8 100644 --- a/crypto/x86_64cpuid.pl +++ b/crypto/x86_64cpuid.pl @@ -42,6 +42,7 @@ print<<___; .align 16 OPENSSL_atomic_add: .cfi_startproc + endbranch movl ($arg1),%eax .Lspin: leaq ($arg2,%rax),%r8 .byte 0xf0 # lock @@ -58,6 +59,7 @@ OPENSSL_atomic_add: .align 16 OPENSSL_rdtsc: .cfi_startproc + endbranch rdtsc shl \$32,%rdx or %rdx,%rax @@ -70,6 +72,7 @@ OPENSSL_rdtsc: .align 16 OPENSSL_ia32_cpuid: .cfi_startproc + endbranch mov %rbx,%r8 # save %rbx .cfi_register %rbx,%r8 @@ -239,6 +242,7 @@ OPENSSL_ia32_cpuid: .align 16 OPENSSL_cleanse: .cfi_startproc + endbranch xor %rax,%rax cmp \$15,$arg2 jae .Lot @@ -276,6 +280,7 @@ OPENSSL_cleanse: .align 16 CRYPTO_memcmp: .cfi_startproc + endbranch xor %rax,%rax xor %r10,%r10 cmp \$0,$arg3 @@ -314,6 +319,7 @@ print<<___ if (!$win64); .align 16 OPENSSL_wipe_cpu: .cfi_startproc + endbranch pxor %xmm0,%xmm0 pxor %xmm1,%xmm1 pxor %xmm2,%xmm2 @@ -378,6 +384,7 @@ print<<___; .align 16 OPENSSL_instrument_bus: .cfi_startproc + endbranch mov $arg1,$out # tribute to Win64 mov $arg2,$cnt mov $arg2,$max @@ -412,6 +419,7 @@ OPENSSL_instrument_bus: .align 16 OPENSSL_instrument_bus2: .cfi_startproc + endbranch mov $arg1,$out # tribute to Win64 mov $arg2,$cnt mov $arg3,$max @@ -467,6 +475,7 @@ print<<___; .align 16 OPENSSL_ia32_${rdop}_bytes: .cfi_startproc + endbranch xor %rax, %rax # return value cmp \$0,$arg2 je .Ldone_${rdop}_bytes