From 609b0852e4d50251857dbbac3141ba042e35a9ae Mon Sep 17 00:00:00 2001 From: David Benjamin Date: Mon, 10 Oct 2016 12:01:24 -0400 Subject: [PATCH] Remove trailing whitespace from some files. The prevailing style seems to not have trailing whitespace, but a few lines do. This is mostly in the perlasm files, but a few C files got them after the reformat. This is the result of: find . -name '*.pl' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.c' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' find . -name '*.h' | xargs sed -E -i '' -e 's/( |'$'\t'')*$//' Then bn_prime.h was excluded since this is a generated file. Note mkerr.pl has some changes in a heredoc for some help output, but other lines there lack trailing whitespace too. Reviewed-by: Kurt Roeckx Reviewed-by: Matt Caswell --- apps/cms.c | 2 +- apps/smime.c | 2 +- apps/speed.c | 4 +- crypto/aes/asm/aes-586.pl | 12 +- crypto/aes/asm/aes-ppc.pl | 8 +- crypto/aes/asm/aes-s390x.pl | 16 +- crypto/aes/asm/aes-x86_64.pl | 4 +- crypto/aes/asm/aesni-mb-x86_64.pl | 28 +-- crypto/aes/asm/aesni-sha1-x86_64.pl | 2 +- crypto/aes/asm/aesni-sha256-x86_64.pl | 2 +- crypto/aes/asm/aesni-x86.pl | 2 +- crypto/aes/asm/aesni-x86_64.pl | 10 +- crypto/aes/asm/aesp8-ppc.pl | 2 +- crypto/aes/asm/aesv8-armx.pl | 6 +- crypto/aes/asm/bsaes-armv7.pl | 2 +- crypto/aes/asm/bsaes-x86_64.pl | 4 +- crypto/aes/asm/vpaes-armv8.pl | 8 +- crypto/aes/asm/vpaes-ppc.pl | 8 +- crypto/aes/asm/vpaes-x86.pl | 8 +- crypto/aes/asm/vpaes-x86_64.pl | 20 +- crypto/bn/asm/armv4-gf2m.pl | 2 +- crypto/bn/asm/armv4-mont.pl | 2 +- crypto/bn/asm/bn-586.pl | 24 +-- crypto/bn/asm/co-586.pl | 12 +- crypto/bn/asm/ia64-mont.pl | 4 +- crypto/bn/asm/mips.pl | 6 +- crypto/bn/asm/parisc-mont.pl | 4 +- crypto/bn/asm/ppc-mont.pl | 6 +- crypto/bn/asm/ppc.pl | 264 +++++++++++++------------- crypto/bn/asm/rsaz-avx2.pl | 8 +- crypto/bn/asm/rsaz-x86_64.pl | 36 ++-- crypto/bn/asm/s390x-gf2m.pl | 2 +- crypto/bn/asm/via-mont.pl | 2 +- crypto/bn/asm/x86-mont.pl | 2 +- crypto/bn/asm/x86_64-mont5.pl | 10 +- crypto/camellia/asm/cmll-x86.pl | 6 +- crypto/cast/asm/cast-586.pl | 6 +- crypto/chacha/asm/chacha-armv4.pl | 4 +- crypto/chacha/asm/chacha-armv8.pl | 4 +- crypto/chacha/asm/chacha-ppc.pl | 4 +- crypto/des/asm/crypt586.pl | 4 +- crypto/des/asm/des-586.pl | 6 +- crypto/des/asm/desboth.pl | 2 +- crypto/ec/asm/ecp_nistz256-armv8.pl | 2 +- crypto/ec/asm/ecp_nistz256-sparcv9.pl | 2 +- crypto/ec/asm/ecp_nistz256-x86.pl | 2 +- crypto/ec/asm/ecp_nistz256-x86_64.pl | 16 +- crypto/md5/asm/md5-586.pl | 2 +- crypto/md5/asm/md5-sparcv9.pl | 2 +- crypto/mips_arch.h | 2 +- crypto/modes/asm/ghash-armv4.pl | 4 +- crypto/modes/asm/ghash-s390x.pl | 2 +- crypto/modes/asm/ghash-x86.pl | 6 +- crypto/modes/asm/ghash-x86_64.pl | 8 +- crypto/perlasm/cbc.pl | 10 +- crypto/perlasm/ppc-xlate.pl | 2 +- crypto/perlasm/sparcv9_modes.pl | 16 +- crypto/perlasm/x86_64-xlate.pl | 12 +- crypto/perlasm/x86nasm.pl | 2 +- crypto/rc4/asm/rc4-c64xplus.pl | 2 +- crypto/rc4/asm/rc4-md5-x86_64.pl | 4 +- crypto/rc4/asm/rc4-parisc.pl | 4 +- crypto/rc4/asm/rc4-x86_64.pl | 2 +- crypto/ripemd/asm/rmd-586.pl | 12 +- crypto/sha/asm/sha1-586.pl | 2 +- crypto/sha/asm/sha1-mb-x86_64.pl | 4 +- crypto/sha/asm/sha1-sparcv9.pl | 2 +- crypto/sha/asm/sha1-sparcv9a.pl | 2 +- crypto/sha/asm/sha1-x86_64.pl | 2 +- crypto/sha/asm/sha256-586.pl | 6 +- crypto/sha/asm/sha256-mb-x86_64.pl | 2 +- crypto/sha/asm/sha512-586.pl | 2 +- crypto/sha/asm/sha512-armv8.pl | 2 +- crypto/sha/asm/sha512-parisc.pl | 4 +- crypto/sha/asm/sha512-s390x.pl | 2 +- crypto/sha/asm/sha512-sparcv9.pl | 4 +- crypto/sha/asm/sha512-x86_64.pl | 2 +- crypto/ts/ts_rsp_verify.c | 2 +- crypto/whrlpool/asm/wp-mmx.pl | 4 +- crypto/x509v3/v3_enum.c | 2 +- crypto/x509v3/v3_skey.c | 2 +- crypto/x86cpuid.pl | 2 +- engines/asm/e_padlock-x86_64.pl | 2 +- include/openssl/x509.h | 2 +- ssl/packet.c | 2 +- ssl/packet_locl.h | 2 +- test/pkits-test.pl | 2 +- test/recipes/tconversion.pl | 2 +- test/wpackettest.c | 2 +- util/ck_errf.pl | 2 +- util/copy.pl | 4 +- util/fipslink.pl | 2 +- util/mkdef.pl | 8 +- util/mkerr.pl | 16 +- util/su-filter.pl | 2 +- 95 files changed, 390 insertions(+), 390 deletions(-) diff --git a/apps/cms.c b/apps/cms.c index 133dc021ae..21f096192a 100644 --- a/apps/cms.c +++ b/apps/cms.c @@ -146,7 +146,7 @@ OPTIONS cms_options[] = { "Do not load certificates from the default certificates directory"}, {"content", OPT_CONTENT, '<', "Supply or override content for detached signature"}, - {"print", OPT_PRINT, '-', + {"print", OPT_PRINT, '-', "For the -cmsout operation print out all fields of the CMS structure"}, {"secretkey", OPT_SECRETKEY, 's'}, {"secretkeyid", OPT_SECRETKEYID, 's'}, diff --git a/apps/smime.c b/apps/smime.c index 1f4091fa99..0c8660f537 100644 --- a/apps/smime.c +++ b/apps/smime.c @@ -89,7 +89,7 @@ OPTIONS smime_options[] = { {"no-CApath", OPT_NOCAPATH, '-', "Do not load certificates from the default certificates directory"}, {"resign", OPT_RESIGN, '-', "Resign a signed message"}, - {"nochain", OPT_NOCHAIN, '-', + {"nochain", OPT_NOCHAIN, '-', "set PKCS7_NOCHAIN so certificates contained in the message are not used as untrusted CAs" }, {"nosmimecap", OPT_NOSMIMECAP, '-', "Omit the SMIMECapabilities attribute"}, {"stream", OPT_STREAM, '-', "Enable CMS streaming" }, diff --git a/apps/speed.c b/apps/speed.c index e6bdc5dd2f..e9dc8a9fa7 100644 --- a/apps/speed.c +++ b/apps/speed.c @@ -1187,8 +1187,8 @@ static int run_benchmark(int async_jobs, continue; #endif - ret = ASYNC_start_job(&loopargs[i].inprogress_job, - loopargs[i].wait_ctx, &job_op_count, loop_function, + ret = ASYNC_start_job(&loopargs[i].inprogress_job, + loopargs[i].wait_ctx, &job_op_count, loop_function, (void *)(loopargs + i), sizeof(loopargs_t)); switch (ret) { case ASYNC_PAUSE: diff --git a/crypto/aes/asm/aes-586.pl b/crypto/aes/asm/aes-586.pl index 1ba356508a..61bdce865c 100755 --- a/crypto/aes/asm/aes-586.pl +++ b/crypto/aes/asm/aes-586.pl @@ -123,7 +123,7 @@ # words every cache-line is *guaranteed* to be accessed within ~50 # cycles window. Why just SSE? Because it's needed on hyper-threading # CPU! Which is also why it's prefetched with 64 byte stride. Best -# part is that it has no negative effect on performance:-) +# part is that it has no negative effect on performance:-) # # Version 4.3 implements switch between compact and non-compact block # functions in AES_cbc_encrypt depending on how much data was asked @@ -585,7 +585,7 @@ sub enctransform() # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ # | mm4 | mm0 | # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ -# | s3 | s2 | s1 | s0 | +# | s3 | s2 | s1 | s0 | # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ # |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| # +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ @@ -805,7 +805,7 @@ sub encstep() if ($i==3) { $tmp=$s[3]; &mov ($s[2],$__s1); }##%ecx elsif($i==2){ &movz ($tmp,&HB($s[3])); }#%ebx[2] - else { &mov ($tmp,$s[3]); + else { &mov ($tmp,$s[3]); &shr ($tmp,24) } &xor ($out,&DWP(1,$te,$tmp,8)); if ($i<2) { &mov (&DWP(4+4*$i,"esp"),$out); } @@ -1558,7 +1558,7 @@ sub sse_deccompact() &pxor ("mm1","mm3"); &pxor ("mm5","mm7"); # tp4 &pshufw ("mm3","mm1",0xb1); &pshufw ("mm7","mm5",0xb1); &pxor ("mm0","mm1"); &pxor ("mm4","mm5"); # ^= tp4 - &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16) + &pxor ("mm0","mm3"); &pxor ("mm4","mm7"); # ^= ROTATE(tp4,16) &pxor ("mm3","mm3"); &pxor ("mm7","mm7"); &pcmpgtb("mm3","mm1"); &pcmpgtb("mm7","mm5"); @@ -2028,7 +2028,7 @@ sub declast() { # stack frame layout # -4(%esp) # return address 0(%esp) -# 0(%esp) # s0 backing store 4(%esp) +# 0(%esp) # s0 backing store 4(%esp) # 4(%esp) # s1 backing store 8(%esp) # 8(%esp) # s2 backing store 12(%esp) # 12(%esp) # s3 backing store 16(%esp) @@ -2738,7 +2738,7 @@ sub enckey() &mov (&DWP(80,"edi"),10); # setup number of rounds &xor ("eax","eax"); &jmp (&label("exit")); - + &set_label("12rounds"); &mov ("eax",&DWP(0,"esi")); # copy first 6 dwords &mov ("ebx",&DWP(4,"esi")); diff --git a/crypto/aes/asm/aes-ppc.pl b/crypto/aes/asm/aes-ppc.pl index 1558d8e454..184c28a291 100644 --- a/crypto/aes/asm/aes-ppc.pl +++ b/crypto/aes/asm/aes-ppc.pl @@ -1433,10 +1433,10 @@ $code.=<<___; xor $s1,$s1,$acc05 xor $s2,$s2,$acc06 xor $s3,$s3,$acc07 - xor $s0,$s0,$acc08 # ^= ROTATE(r8,8) - xor $s1,$s1,$acc09 - xor $s2,$s2,$acc10 - xor $s3,$s3,$acc11 + xor $s0,$s0,$acc08 # ^= ROTATE(r8,8) + xor $s1,$s1,$acc09 + xor $s2,$s2,$acc10 + xor $s3,$s3,$acc11 b Ldec_compact_loop .align 4 diff --git a/crypto/aes/asm/aes-s390x.pl b/crypto/aes/asm/aes-s390x.pl index a93d601913..9c17f0e5c2 100644 --- a/crypto/aes/asm/aes-s390x.pl +++ b/crypto/aes/asm/aes-s390x.pl @@ -404,7 +404,7 @@ _s390x_AES_encrypt: or $s1,$t1 or $t2,$i2 or $t3,$i3 - + srlg $i1,$s2,`8-3` # i0 srlg $i2,$s2,`16-3` # i1 nr $i1,$mask @@ -457,7 +457,7 @@ _s390x_AES_encrypt: x $s2,24($key) x $s3,28($key) - br $ra + br $ra .size _s390x_AES_encrypt,.-_s390x_AES_encrypt ___ @@ -779,7 +779,7 @@ _s390x_AES_decrypt: x $s2,24($key) x $s3,28($key) - br $ra + br $ra .size _s390x_AES_decrypt,.-_s390x_AES_decrypt ___ @@ -1297,7 +1297,7 @@ $code.=<<___; .Lcbc_enc_done: l${g} $ivp,6*$SIZE_T($sp) st $s0,0($ivp) - st $s1,4($ivp) + st $s1,4($ivp) st $s2,8($ivp) st $s3,12($ivp) @@ -1635,7 +1635,7 @@ $code.=<<___ if(1); llgc $len,2*$SIZE_T-1($sp) nill $len,0x0f # $len%=16 br $ra - + .align 16 .Lxts_km_vanilla: ___ @@ -1862,7 +1862,7 @@ $code.=<<___; xgr $s1,%r1 lrvgr $s1,$s1 # flip byte order lrvgr $s3,$s3 - srlg $s0,$s1,32 # smash the tweak to 4x32-bits + srlg $s0,$s1,32 # smash the tweak to 4x32-bits stg $s1,$tweak+0($sp) # save the tweak llgfr $s1,$s1 srlg $s2,$s3,32 @@ -1913,7 +1913,7 @@ $code.=<<___; xgr $s1,%r1 lrvgr $s1,$s1 # flip byte order lrvgr $s3,$s3 - srlg $s0,$s1,32 # smash the tweak to 4x32-bits + srlg $s0,$s1,32 # smash the tweak to 4x32-bits stg $s1,$tweak+0($sp) # save the tweak llgfr $s1,$s1 srlg $s2,$s3,32 @@ -2105,7 +2105,7 @@ $code.=<<___; xgr $s1,%r1 lrvgr $s1,$s1 # flip byte order lrvgr $s3,$s3 - srlg $s0,$s1,32 # smash the tweak to 4x32-bits + srlg $s0,$s1,32 # smash the tweak to 4x32-bits stg $s1,$tweak+0($sp) # save the tweak llgfr $s1,$s1 srlg $s2,$s3,32 diff --git a/crypto/aes/asm/aes-x86_64.pl b/crypto/aes/asm/aes-x86_64.pl index ce4ca30b1a..ae7fde20fe 100755 --- a/crypto/aes/asm/aes-x86_64.pl +++ b/crypto/aes/asm/aes-x86_64.pl @@ -1298,7 +1298,7 @@ $code.=<<___; AES_set_encrypt_key: push %rbx push %rbp - push %r12 # redundant, but allows to share + push %r12 # redundant, but allows to share push %r13 # exception handler... push %r14 push %r15 @@ -1424,7 +1424,7 @@ $code.=<<___; xor %rax,%rax jmp .Lexit -.L14rounds: +.L14rounds: mov 0(%rsi),%rax # copy first 8 dwords mov 8(%rsi),%rbx mov 16(%rsi),%rcx diff --git a/crypto/aes/asm/aesni-mb-x86_64.pl b/crypto/aes/asm/aesni-mb-x86_64.pl index aa2735e06a..fcef7c62fa 100644 --- a/crypto/aes/asm/aesni-mb-x86_64.pl +++ b/crypto/aes/asm/aesni-mb-x86_64.pl @@ -134,7 +134,7 @@ $code.=<<___ if ($win64); movaps %xmm10,0x40(%rsp) movaps %xmm11,0x50(%rsp) movaps %xmm12,0x60(%rsp) - movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler + movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler movaps %xmm14,-0x58(%rax) movaps %xmm15,-0x48(%rax) ___ @@ -308,9 +308,9 @@ $code.=<<___; movups @out[0],-16(@outptr[0],$offset) pxor @inp[0],@out[0] - movups @out[1],-16(@outptr[1],$offset) + movups @out[1],-16(@outptr[1],$offset) pxor @inp[1],@out[1] - movups @out[2],-16(@outptr[2],$offset) + movups @out[2],-16(@outptr[2],$offset) pxor @inp[2],@out[2] movups @out[3],-16(@outptr[3],$offset) pxor @inp[3],@out[3] @@ -393,7 +393,7 @@ $code.=<<___ if ($win64); movaps %xmm10,0x40(%rsp) movaps %xmm11,0x50(%rsp) movaps %xmm12,0x60(%rsp) - movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler + movaps %xmm13,-0x68(%rax) # not used, saved to share se_handler movaps %xmm14,-0x58(%rax) movaps %xmm15,-0x48(%rax) ___ @@ -563,10 +563,10 @@ $code.=<<___; movups @out[0],-16(@outptr[0],$offset) movdqu (@inptr[0],$offset),@out[0] - movups @out[1],-16(@outptr[1],$offset) + movups @out[1],-16(@outptr[1],$offset) movdqu (@inptr[1],$offset),@out[1] pxor $zero,@out[0] - movups @out[2],-16(@outptr[2],$offset) + movups @out[2],-16(@outptr[2],$offset) movdqu (@inptr[2],$offset),@out[2] pxor $zero,@out[1] movups @out[3],-16(@outptr[3],$offset) @@ -835,10 +835,10 @@ $code.=<<___; vmovups @out[0],-16(@ptr[0]) # write output sub $offset,@ptr[0] # switch to input vpxor 0x00($offload),@out[0],@out[0] - vmovups @out[1],-16(@ptr[1]) + vmovups @out[1],-16(@ptr[1]) sub `64+1*8`(%rsp),@ptr[1] vpxor 0x10($offload),@out[1],@out[1] - vmovups @out[2],-16(@ptr[2]) + vmovups @out[2],-16(@ptr[2]) sub `64+2*8`(%rsp),@ptr[2] vpxor 0x20($offload),@out[2],@out[2] vmovups @out[3],-16(@ptr[3]) @@ -847,10 +847,10 @@ $code.=<<___; vmovups @out[4],-16(@ptr[4]) sub `64+4*8`(%rsp),@ptr[4] vpxor @inp[0],@out[4],@out[4] - vmovups @out[5],-16(@ptr[5]) + vmovups @out[5],-16(@ptr[5]) sub `64+5*8`(%rsp),@ptr[5] vpxor @inp[1],@out[5],@out[5] - vmovups @out[6],-16(@ptr[6]) + vmovups @out[6],-16(@ptr[6]) sub `64+6*8`(%rsp),@ptr[6] vpxor @inp[2],@out[6],@out[6] vmovups @out[7],-16(@ptr[7]) @@ -1128,12 +1128,12 @@ $code.=<<___; sub $offset,@ptr[0] # switch to input vmovdqu 128+0(%rsp),@out[0] vpxor 0x70($offload),@out[7],@out[7] - vmovups @out[1],-16(@ptr[1]) + vmovups @out[1],-16(@ptr[1]) sub `64+1*8`(%rsp),@ptr[1] vmovdqu @out[0],0x00($offload) vpxor $zero,@out[0],@out[0] vmovdqu 128+16(%rsp),@out[1] - vmovups @out[2],-16(@ptr[2]) + vmovups @out[2],-16(@ptr[2]) sub `64+2*8`(%rsp),@ptr[2] vmovdqu @out[1],0x10($offload) vpxor $zero,@out[1],@out[1] @@ -1149,11 +1149,11 @@ $code.=<<___; vpxor $zero,@out[3],@out[3] vmovdqu @inp[0],0x40($offload) vpxor @inp[0],$zero,@out[4] - vmovups @out[5],-16(@ptr[5]) + vmovups @out[5],-16(@ptr[5]) sub `64+5*8`(%rsp),@ptr[5] vmovdqu @inp[1],0x50($offload) vpxor @inp[1],$zero,@out[5] - vmovups @out[6],-16(@ptr[6]) + vmovups @out[6],-16(@ptr[6]) sub `64+6*8`(%rsp),@ptr[6] vmovdqu @inp[2],0x60($offload) vpxor @inp[2],$zero,@out[6] diff --git a/crypto/aes/asm/aesni-sha1-x86_64.pl b/crypto/aes/asm/aesni-sha1-x86_64.pl index 4b979a7346..a50795e263 100644 --- a/crypto/aes/asm/aesni-sha1-x86_64.pl +++ b/crypto/aes/asm/aesni-sha1-x86_64.pl @@ -793,7 +793,7 @@ sub body_00_19_dec () { # ((c^d)&b)^d sub body_20_39_dec () { # b^d^c # on entry @T[0]=b^d return &body_40_59_dec() if ($rx==39); - + my @r=@body_20_39; unshift (@r,@aes256_dec[$rx]) if (@aes256_dec[$rx]); diff --git a/crypto/aes/asm/aesni-sha256-x86_64.pl b/crypto/aes/asm/aesni-sha256-x86_64.pl index a5fde2e4d1..ba4964a850 100644 --- a/crypto/aes/asm/aesni-sha256-x86_64.pl +++ b/crypto/aes/asm/aesni-sha256-x86_64.pl @@ -884,7 +884,7 @@ if ($avx>1) {{ ###################################################################### # AVX2+BMI code path # -my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp +my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp my $PUSH8=8*2*$SZ; use integer; diff --git a/crypto/aes/asm/aesni-x86.pl b/crypto/aes/asm/aesni-x86.pl index ed1a47c30c..c34d9bf4af 100644 --- a/crypto/aes/asm/aesni-x86.pl +++ b/crypto/aes/asm/aesni-x86.pl @@ -1051,7 +1051,7 @@ if ($PREFIX eq "aesni") { &set_label("ctr32_one_shortcut",16); &movups ($inout0,&QWP(0,$rounds_)); # load ivec &mov ($rounds,&DWP(240,$key)); - + &set_label("ctr32_one"); if ($inline) { &aesni_inline_generate1("enc"); } diff --git a/crypto/aes/asm/aesni-x86_64.pl b/crypto/aes/asm/aesni-x86_64.pl index 25dd120dd2..397e82f8c7 100644 --- a/crypto/aes/asm/aesni-x86_64.pl +++ b/crypto/aes/asm/aesni-x86_64.pl @@ -34,7 +34,7 @@ # ECB 4.25/4.25 1.38/1.38 1.28/1.28 1.26/1.26 1.26/1.26 # CTR 5.42/5.42 1.92/1.92 1.44/1.44 1.28/1.28 1.26/1.26 # CBC 4.38/4.43 4.15/1.43 4.07/1.32 4.07/1.29 4.06/1.28 -# CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07 +# CCM 5.66/9.42 4.42/5.41 4.16/4.40 4.09/4.15 4.06/4.07 # OFB 5.42/5.42 4.64/4.64 4.44/4.44 4.39/4.39 4.38/4.38 # CFB 5.73/5.85 5.56/5.62 5.48/5.56 5.47/5.55 5.47/5.55 # @@ -118,7 +118,7 @@ # performance is achieved by interleaving instructions working on # independent blocks. In which case asymptotic limit for such modes # can be obtained by dividing above mentioned numbers by AES -# instructions' interleave factor. Westmere can execute at most 3 +# instructions' interleave factor. Westmere can execute at most 3 # instructions at a time, meaning that optimal interleave factor is 3, # and that's where the "magic" number of 1.25 come from. "Optimal # interleave factor" means that increase of interleave factor does @@ -312,7 +312,7 @@ ___ # on 2x subroutine on Atom Silvermont account. For processors that # can schedule aes[enc|dec] every cycle optimal interleave factor # equals to corresponding instructions latency. 8x is optimal for -# * Bridge and "super-optimal" for other Intel CPUs... +# * Bridge and "super-optimal" for other Intel CPUs... sub aesni_generate2 { my $dir=shift; @@ -1271,7 +1271,7 @@ $code.=<<___; lea 7($ctr),%r9 mov %r10d,0x60+12(%rsp) bswap %r9d - mov OPENSSL_ia32cap_P+4(%rip),%r10d + mov OPENSSL_ia32cap_P+4(%rip),%r10d xor $key0,%r9d and \$`1<<26|1<<22`,%r10d # isolate XSAVE+MOVBE mov %r9d,0x70+12(%rsp) @@ -1551,7 +1551,7 @@ $code.=<<___; .Lctr32_tail: # note that at this point $inout0..5 are populated with - # counter values xor-ed with 0-round key + # counter values xor-ed with 0-round key lea 16($key),$key cmp \$4,$len jb .Lctr32_loop3 diff --git a/crypto/aes/asm/aesp8-ppc.pl b/crypto/aes/asm/aesp8-ppc.pl index 3fdf1ecda0..0497953cf5 100755 --- a/crypto/aes/asm/aesp8-ppc.pl +++ b/crypto/aes/asm/aesp8-ppc.pl @@ -3773,7 +3773,7 @@ foreach(split("\n",$code)) { if ($flavour =~ /le$/o) { SWITCH: for($conv) { /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; }; - /\?rev/ && do { @bytes=reverse(@bytes); last; }; + /\?rev/ && do { @bytes=reverse(@bytes); last; }; } } diff --git a/crypto/aes/asm/aesv8-armx.pl b/crypto/aes/asm/aesv8-armx.pl index 9246dbb437..954c041f1e 100755 --- a/crypto/aes/asm/aesv8-armx.pl +++ b/crypto/aes/asm/aesv8-armx.pl @@ -961,21 +961,21 @@ if ($flavour =~ /64/) { ######## 64-bit code $arg =~ m/q([0-9]+),\s*\{q([0-9]+)\},\s*q([0-9]+)/o && sprintf "vtbl.8 d%d,{q%d},d%d\n\t". - "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1; + "vtbl.8 d%d,{q%d},d%d", 2*$1,$2,2*$3, 2*$1+1,$2,2*$3+1; } sub unvdup32 { my $arg=shift; $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o && - sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1; + sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1; } sub unvmov32 { my $arg=shift; $arg =~ m/q([0-9]+)\[([0-3])\],(.*)/o && - sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3; + sprintf "vmov.32 d%d[%d],%s",2*$1+($2>>1),$2&1,$3; } foreach(split("\n",$code)) { diff --git a/crypto/aes/asm/bsaes-armv7.pl b/crypto/aes/asm/bsaes-armv7.pl index 12091ef9c4..33295881e1 100644 --- a/crypto/aes/asm/bsaes-armv7.pl +++ b/crypto/aes/asm/bsaes-armv7.pl @@ -91,7 +91,7 @@ my @s=@_[12..15]; sub InBasisChange { # input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb -# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb +# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb my @b=@_[0..7]; $code.=<<___; veor @b[2], @b[2], @b[1] diff --git a/crypto/aes/asm/bsaes-x86_64.pl b/crypto/aes/asm/bsaes-x86_64.pl index 6b14a517dc..d257a3efe6 100644 --- a/crypto/aes/asm/bsaes-x86_64.pl +++ b/crypto/aes/asm/bsaes-x86_64.pl @@ -129,7 +129,7 @@ my @s=@_[12..15]; sub InBasisChange { # input in lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb -# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb +# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb my @b=@_[0..7]; $code.=<<___; pxor @b[6], @b[5] @@ -379,7 +379,7 @@ $code.=<<___; pxor @s[0], @t[3] pxor @s[1], @t[2] pxor @s[2], @t[1] - pxor @s[3], @t[0] + pxor @s[3], @t[0] #Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3 diff --git a/crypto/aes/asm/vpaes-armv8.pl b/crypto/aes/asm/vpaes-armv8.pl index d6b5f561c4..2e704a2124 100755 --- a/crypto/aes/asm/vpaes-armv8.pl +++ b/crypto/aes/asm/vpaes-armv8.pl @@ -769,7 +769,7 @@ _vpaes_schedule_core: ld1 {v0.16b}, [$inp] // vmovdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) bl _vpaes_schedule_transform // input transform mov $inp, #7 // mov \$7, %esi - + .Loop_schedule_256: sub $inp, $inp, #1 // dec %esi bl _vpaes_schedule_mangle // output low result @@ -778,7 +778,7 @@ _vpaes_schedule_core: // high round bl _vpaes_schedule_round cbz $inp, .Lschedule_mangle_last - bl _vpaes_schedule_mangle + bl _vpaes_schedule_mangle // low round. swap xmm7 and xmm6 dup v0.4s, v0.s[3] // vpshufd \$0xFF, %xmm0, %xmm0 @@ -787,7 +787,7 @@ _vpaes_schedule_core: mov v7.16b, v6.16b // vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round mov v7.16b, v5.16b // vmovdqa %xmm5, %xmm7 - + b .Loop_schedule_256 ## @@ -814,7 +814,7 @@ _vpaes_schedule_core: .Lschedule_mangle_last_dec: ld1 {v20.2d-v21.2d}, [x11] // reload constants - sub $out, $out, #16 // add \$-16, %rdx + sub $out, $out, #16 // add \$-16, %rdx eor v0.16b, v0.16b, v16.16b // vpxor .Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform // output transform st1 {v0.2d}, [$out] // vmovdqu %xmm0, (%rdx) # save last key diff --git a/crypto/aes/asm/vpaes-ppc.pl b/crypto/aes/asm/vpaes-ppc.pl index 7f81209927..3563f6d60f 100644 --- a/crypto/aes/asm/vpaes-ppc.pl +++ b/crypto/aes/asm/vpaes-ppc.pl @@ -1074,7 +1074,7 @@ Loop_schedule_256: # high round bl _vpaes_schedule_round bdz Lschedule_mangle_last # dec %esi - bl _vpaes_schedule_mangle + bl _vpaes_schedule_mangle # low round. swap xmm7 and xmm6 ?vspltw v0, v0, 3 # vpshufd \$0xFF, %xmm0, %xmm0 @@ -1082,7 +1082,7 @@ Loop_schedule_256: vmr v7, v6 # vmovdqa %xmm6, %xmm7 bl _vpaes_schedule_low_round vmr v7, v5 # vmovdqa %xmm5, %xmm7 - + b Loop_schedule_256 ## ## .aes_schedule_mangle_last @@ -1130,7 +1130,7 @@ Lschedule_mangle_last: Lschedule_mangle_last_dec: lvx $iptlo, r11, r12 # reload $ipt lvx $ipthi, r9, r12 - addi $out, $out, -16 # add \$-16, %rdx + addi $out, $out, -16 # add \$-16, %rdx vxor v0, v0, v26 # vpxor .Lk_s63(%rip), %xmm0, %xmm0 bl _vpaes_schedule_transform # output transform @@ -1565,7 +1565,7 @@ foreach (split("\n",$code)) { if ($flavour =~ /le$/o) { SWITCH: for($conv) { /\?inv/ && do { @bytes=map($_^0xf,@bytes); last; }; - /\?rev/ && do { @bytes=reverse(@bytes); last; }; + /\?rev/ && do { @bytes=reverse(@bytes); last; }; } } diff --git a/crypto/aes/asm/vpaes-x86.pl b/crypto/aes/asm/vpaes-x86.pl index 47615c0795..d9157fa06d 100644 --- a/crypto/aes/asm/vpaes-x86.pl +++ b/crypto/aes/asm/vpaes-x86.pl @@ -445,7 +445,7 @@ $k_dsbo=0x2c0; # decryption sbox final output ## &set_label("schedule_192",16); &movdqu ("xmm0",&QWP(8,$inp)); # load key part 2 (very unaligned) - &call ("_vpaes_schedule_transform"); # input transform + &call ("_vpaes_schedule_transform"); # input transform &movdqa ("xmm6","xmm0"); # save short part &pxor ("xmm4","xmm4"); # clear 4 &movhlps("xmm6","xmm4"); # clobber low side with zeros @@ -476,7 +476,7 @@ $k_dsbo=0x2c0; # decryption sbox final output ## &set_label("schedule_256",16); &movdqu ("xmm0",&QWP(16,$inp)); # load key part 2 (unaligned) - &call ("_vpaes_schedule_transform"); # input transform + &call ("_vpaes_schedule_transform"); # input transform &mov ($round,7); &set_label("loop_schedule_256"); @@ -487,7 +487,7 @@ $k_dsbo=0x2c0; # decryption sbox final output &call ("_vpaes_schedule_round"); &dec ($round); &jz (&label("schedule_mangle_last")); - &call ("_vpaes_schedule_mangle"); + &call ("_vpaes_schedule_mangle"); # low round. swap xmm7 and xmm6 &pshufd ("xmm0","xmm0",0xFF); @@ -610,7 +610,7 @@ $k_dsbo=0x2c0; # decryption sbox final output # subbyte &movdqa ("xmm4",&QWP($k_s0F,$const)); &movdqa ("xmm5",&QWP($k_inv,$const)); # 4 : 1/j - &movdqa ("xmm1","xmm4"); + &movdqa ("xmm1","xmm4"); &pandn ("xmm1","xmm0"); &psrld ("xmm1",4); # 1 = i &pand ("xmm0","xmm4"); # 0 = k diff --git a/crypto/aes/asm/vpaes-x86_64.pl b/crypto/aes/asm/vpaes-x86_64.pl index 265b6aa362..dd1f13a271 100644 --- a/crypto/aes/asm/vpaes-x86_64.pl +++ b/crypto/aes/asm/vpaes-x86_64.pl @@ -171,7 +171,7 @@ _vpaes_encrypt_core: pshufb %xmm1, %xmm0 ret .size _vpaes_encrypt_core,.-_vpaes_encrypt_core - + ## ## Decryption core ## @@ -332,7 +332,7 @@ _vpaes_schedule_core: ## .Lschedule_128: mov \$10, %esi - + .Loop_schedule_128: call _vpaes_schedule_round dec %rsi @@ -366,7 +366,7 @@ _vpaes_schedule_core: .Loop_schedule_192: call _vpaes_schedule_round - palignr \$8,%xmm6,%xmm0 + palignr \$8,%xmm6,%xmm0 call _vpaes_schedule_mangle # save key n call _vpaes_schedule_192_smear call _vpaes_schedule_mangle # save key n+1 @@ -392,7 +392,7 @@ _vpaes_schedule_core: movdqu 16(%rdi),%xmm0 # load key part 2 (unaligned) call _vpaes_schedule_transform # input transform mov \$7, %esi - + .Loop_schedule_256: call _vpaes_schedule_mangle # output low result movdqa %xmm0, %xmm6 # save cur_lo in xmm6 @@ -401,7 +401,7 @@ _vpaes_schedule_core: call _vpaes_schedule_round dec %rsi jz .Lschedule_mangle_last - call _vpaes_schedule_mangle + call _vpaes_schedule_mangle # low round. swap xmm7 and xmm6 pshufd \$0xFF, %xmm0, %xmm0 @@ -409,10 +409,10 @@ _vpaes_schedule_core: movdqa %xmm6, %xmm7 call _vpaes_schedule_low_round movdqa %xmm5, %xmm7 - + jmp .Loop_schedule_256 - + ## ## .aes_schedule_mangle_last ## @@ -511,9 +511,9 @@ _vpaes_schedule_round: # rotate pshufd \$0xFF, %xmm0, %xmm0 palignr \$1, %xmm0, %xmm0 - + # fall through... - + # low round: same as high round, but no rotation and no rcon. _vpaes_schedule_low_round: # smear xmm7 @@ -552,7 +552,7 @@ _vpaes_schedule_low_round: pxor %xmm4, %xmm0 # 0 = sbox output # add in smeared stuff - pxor %xmm7, %xmm0 + pxor %xmm7, %xmm0 movdqa %xmm0, %xmm7 ret .size _vpaes_schedule_round,.-_vpaes_schedule_round diff --git a/crypto/bn/asm/armv4-gf2m.pl b/crypto/bn/asm/armv4-gf2m.pl index 0bb5433075..7a0cdb2e8a 100644 --- a/crypto/bn/asm/armv4-gf2m.pl +++ b/crypto/bn/asm/armv4-gf2m.pl @@ -36,7 +36,7 @@ # # Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software # Polynomial Multiplication on ARM Processors using the NEON Engine. -# +# # http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf $flavour = shift; diff --git a/crypto/bn/asm/armv4-mont.pl b/crypto/bn/asm/armv4-mont.pl index 0dc4fe95e4..75a36f62fa 100644 --- a/crypto/bn/asm/armv4-mont.pl +++ b/crypto/bn/asm/armv4-mont.pl @@ -23,7 +23,7 @@ # [depending on key length, less for longer keys] on ARM920T, and # +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code # base and compiler generated code with in-lined umull and even umlal -# instructions. The latter means that this code didn't really have an +# instructions. The latter means that this code didn't really have an # "advantage" of utilizing some "secret" instruction. # # The code is interoperable with Thumb ISA and is rather compact, less diff --git a/crypto/bn/asm/bn-586.pl b/crypto/bn/asm/bn-586.pl index 1ca1bbf7d4..1350bcd8fa 100644 --- a/crypto/bn/asm/bn-586.pl +++ b/crypto/bn/asm/bn-586.pl @@ -54,7 +54,7 @@ sub bn_mul_add_words &movd("mm0",&wparam(3)); # mm0 = w &pxor("mm1","mm1"); # mm1 = carry_in &jmp(&label("maw_sse2_entry")); - + &set_label("maw_sse2_unrolled",16); &movd("mm3",&DWP(0,$r,"",0)); # mm3 = r[0] &paddq("mm1","mm3"); # mm1 = carry_in + r[0] @@ -675,20 +675,20 @@ sub bn_sub_part_words &adc($c,0); &mov(&DWP($i*4,$r,"",0),$tmp1); # *r } - + &comment(""); &add($b,32); &add($r,32); &sub($num,8); &jnz(&label("pw_neg_loop")); - + &set_label("pw_neg_finish",0); &mov($tmp2,&wparam(4)); # get dl &mov($num,0); &sub($num,$tmp2); &and($num,7); &jz(&label("pw_end")); - + for ($i=0; $i<7; $i++) { &comment("dl<0 Tail Round $i"); @@ -705,9 +705,9 @@ sub bn_sub_part_words } &jmp(&label("pw_end")); - + &set_label("pw_pos",0); - + &and($num,0xfffffff8); # num / 8 &jz(&label("pw_pos_finish")); @@ -722,18 +722,18 @@ sub bn_sub_part_words &mov(&DWP($i*4,$r,"",0),$tmp1); # *r &jnc(&label("pw_nc".$i)); } - + &comment(""); &add($a,32); &add($r,32); &sub($num,8); &jnz(&label("pw_pos_loop")); - + &set_label("pw_pos_finish",0); &mov($num,&wparam(4)); # get dl &and($num,7); &jz(&label("pw_end")); - + for ($i=0; $i<7; $i++) { &comment("dl>0 Tail Round $i"); @@ -754,17 +754,17 @@ sub bn_sub_part_words &mov(&DWP($i*4,$r,"",0),$tmp1); # *r &set_label("pw_nc".$i,0); } - + &comment(""); &add($a,32); &add($r,32); &sub($num,8); &jnz(&label("pw_nc_loop")); - + &mov($num,&wparam(4)); # get dl &and($num,7); &jz(&label("pw_nc_end")); - + for ($i=0; $i<7; $i++) { &mov($tmp1,&DWP($i*4,$a,"",0)); # *a diff --git a/crypto/bn/asm/co-586.pl b/crypto/bn/asm/co-586.pl index 60d0363660..6f34c37cf8 100644 --- a/crypto/bn/asm/co-586.pl +++ b/crypto/bn/asm/co-586.pl @@ -47,7 +47,7 @@ sub mul_add_c &mov("edx",&DWP(($nb)*4,$b,"",0)) if $pos == 1; # laod next b ### &adc($c2,0); - # is pos > 1, it means it is the last loop + # is pos > 1, it means it is the last loop &mov(&DWP($i*4,"eax","",0),$c0) if $pos > 0; # save r[]; &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # laod next a } @@ -76,7 +76,7 @@ sub sqr_add_c &mov("edx",&DWP(($nb)*4,$a,"",0)) if ($pos == 1) && ($na != $nb); ### &adc($c2,0); - # is pos > 1, it means it is the last loop + # is pos > 1, it means it is the last loop &mov(&DWP($i*4,$r,"",0),$c0) if $pos > 0; # save r[]; &mov("eax",&DWP(($na)*4,$a,"",0)) if $pos == 1; # load next b } @@ -127,7 +127,7 @@ sub bn_mul_comba $c2="ebp"; $a="esi"; $b="edi"; - + $as=0; $ae=0; $bs=0; @@ -142,9 +142,9 @@ sub bn_mul_comba &push("ebx"); &xor($c0,$c0); - &mov("eax",&DWP(0,$a,"",0)); # load the first word + &mov("eax",&DWP(0,$a,"",0)); # load the first word &xor($c1,$c1); - &mov("edx",&DWP(0,$b,"",0)); # load the first second + &mov("edx",&DWP(0,$b,"",0)); # load the first second for ($i=0; $i<$tot; $i++) { @@ -152,7 +152,7 @@ sub bn_mul_comba $bi=$bs; $end=$be+1; - &comment("################## Calculate word $i"); + &comment("################## Calculate word $i"); for ($j=$bs; $j<$end; $j++) { diff --git a/crypto/bn/asm/ia64-mont.pl b/crypto/bn/asm/ia64-mont.pl index 5cc5c599f9..233fdd47a9 100644 --- a/crypto/bn/asm/ia64-mont.pl +++ b/crypto/bn/asm/ia64-mont.pl @@ -80,7 +80,7 @@ $code=<<___; // int bn_mul_mont (BN_ULONG *rp,const BN_ULONG *ap, // const BN_ULONG *bp,const BN_ULONG *np, -// const BN_ULONG *n0p,int num); +// const BN_ULONG *n0p,int num); .align 64 .global bn_mul_mont# .proc bn_mul_mont# @@ -203,7 +203,7 @@ bn_mul_mont_general: { .mmi; .pred.rel "mutex",p39,p41 (p39) add topbit=r0,r0 (p41) add topbit=r0,r0,1 - nop.i 0 } + nop.i 0 } { .mmi; st8 [tp_1]=n[0] add tptr=16,sp add tp_1=8,sp };; diff --git a/crypto/bn/asm/mips.pl b/crypto/bn/asm/mips.pl index 102b656229..5093177552 100644 --- a/crypto/bn/asm/mips.pl +++ b/crypto/bn/asm/mips.pl @@ -603,13 +603,13 @@ $code.=<<___; sltu $v0,$t2,$ta2 $ST $t2,-2*$BNSZ($a0) $ADDU $v0,$t8 - + $ADDU $ta3,$t3 sltu $t9,$ta3,$t3 $ADDU $t3,$ta3,$v0 sltu $v0,$t3,$ta3 $ST $t3,-$BNSZ($a0) - + .set noreorder bgtz $at,.L_bn_add_words_loop $ADDU $v0,$t9 @@ -808,7 +808,7 @@ bn_div_3_words: # so that we can save two arguments # and return address in registers # instead of stack:-) - + $LD $a0,($a3) move $ta2,$a1 bne $a0,$a2,bn_div_3_words_internal diff --git a/crypto/bn/asm/parisc-mont.pl b/crypto/bn/asm/parisc-mont.pl index 8aa94e8511..61c3625a3c 100644 --- a/crypto/bn/asm/parisc-mont.pl +++ b/crypto/bn/asm/parisc-mont.pl @@ -546,7 +546,7 @@ L\$copy ldd $idx($np),$hi0 std,ma %r0,8($tp) addib,<> 8,$idx,.-8 ; L\$copy - std,ma $hi0,8($rp) + std,ma $hi0,8($rp) ___ if ($BN_SZ==4) { # PA-RISC 1.1 code-path @@ -868,7 +868,7 @@ L\$copy_pa11 ldwx $idx($np),$hi0 stws,ma %r0,4($tp) addib,<> 4,$idx,L\$copy_pa11 - stws,ma $hi0,4($rp) + stws,ma $hi0,4($rp) nop ; alignment L\$done diff --git a/crypto/bn/asm/ppc-mont.pl b/crypto/bn/asm/ppc-mont.pl index 5802260ca6..7a25b1ec9b 100644 --- a/crypto/bn/asm/ppc-mont.pl +++ b/crypto/bn/asm/ppc-mont.pl @@ -26,7 +26,7 @@ # So far RSA *sign* performance improvement over pre-bn_mul_mont asm # for 64-bit application running on PPC970/G5 is: # -# 512-bit +65% +# 512-bit +65% # 1024-bit +35% # 2048-bit +18% # 4096-bit +4% @@ -49,7 +49,7 @@ if ($flavour =~ /32/) { $UMULL= "mullw"; # unsigned multiply low $UMULH= "mulhwu"; # unsigned multiply high $UCMP= "cmplw"; # unsigned compare - $SHRI= "srwi"; # unsigned shift right by immediate + $SHRI= "srwi"; # unsigned shift right by immediate $PUSH= $ST; $POP= $LD; } elsif ($flavour =~ /64/) { @@ -69,7 +69,7 @@ if ($flavour =~ /32/) { $UMULL= "mulld"; # unsigned multiply low $UMULH= "mulhdu"; # unsigned multiply high $UCMP= "cmpld"; # unsigned compare - $SHRI= "srdi"; # unsigned shift right by immediate + $SHRI= "srdi"; # unsigned shift right by immediate $PUSH= $ST; $POP= $LD; } else { die "nonsense $flavour"; } diff --git a/crypto/bn/asm/ppc.pl b/crypto/bn/asm/ppc.pl index e9262df0f3..1a03f4561e 100644 --- a/crypto/bn/asm/ppc.pl +++ b/crypto/bn/asm/ppc.pl @@ -38,7 +38,7 @@ #rsa 2048 bits 0.3036s 0.0085s 3.3 117.1 #rsa 4096 bits 2.0040s 0.0299s 0.5 33.4 #dsa 512 bits 0.0087s 0.0106s 114.3 94.5 -#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0 +#dsa 1024 bits 0.0256s 0.0313s 39.0 32.0 # # Same bechmark with this assembler code: # @@ -74,7 +74,7 @@ #rsa 4096 bits 0.3700s 0.0058s 2.7 171.0 #dsa 512 bits 0.0016s 0.0020s 610.7 507.1 #dsa 1024 bits 0.0047s 0.0058s 212.5 173.2 -# +# # Again, performance increases by at about 75% # # Mac OS X, Apple G5 1.8GHz (Note this is 32 bit code) @@ -125,7 +125,7 @@ if ($flavour =~ /32/) { $CNTLZ= "cntlzw"; # count leading zeros $SHL= "slw"; # shift left $SHR= "srw"; # unsigned shift right - $SHRI= "srwi"; # unsigned shift right by immediate + $SHRI= "srwi"; # unsigned shift right by immediate $SHLI= "slwi"; # shift left by immediate $CLRU= "clrlwi"; # clear upper bits $INSR= "insrwi"; # insert right @@ -149,10 +149,10 @@ if ($flavour =~ /32/) { $CNTLZ= "cntlzd"; # count leading zeros $SHL= "sld"; # shift left $SHR= "srd"; # unsigned shift right - $SHRI= "srdi"; # unsigned shift right by immediate + $SHRI= "srdi"; # unsigned shift right by immediate $SHLI= "sldi"; # shift left by immediate $CLRU= "clrldi"; # clear upper bits - $INSR= "insrdi"; # insert right + $INSR= "insrdi"; # insert right $ROTL= "rotldi"; # rotate left by immediate $TR= "td"; # conditional trap } else { die "nonsense $flavour"; } @@ -189,7 +189,7 @@ $data=<=d? blt Lppcasm_div3 #goto Lppcasm_div3 if not - subf r3,r5,r3 #h-=d ; + subf r3,r5,r3 #h-=d ; Lppcasm_div3: #r7 = BN_BITS2-i. so r7=i cmpi 0,0,r7,0 # is (i == 0)? beq Lppcasm_div4 @@ -1668,7 +1668,7 @@ Lppcasm_div4: # as it saves registers. li r6,2 #r6=2 mtctr r6 #counter will be in count. -Lppcasm_divouterloop: +Lppcasm_divouterloop: $SHRI r8,r3,`$BITS/2` #r8 = (h>>BN_BITS4) $SHRI r11,r4,`$BITS/2` #r11= (l&BN_MASK2h)>>BN_BITS4 # compute here for innerloop. @@ -1676,7 +1676,7 @@ Lppcasm_divouterloop: bne Lppcasm_div5 # goto Lppcasm_div5 if not li r8,-1 - $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l + $CLRU r8,r8,`$BITS/2` #q = BN_MASK2l b Lppcasm_div6 Lppcasm_div5: $UDIV r8,r3,r9 #q = h/dh @@ -1684,7 +1684,7 @@ Lppcasm_div6: $UMULL r12,r9,r8 #th = q*dh $CLRU r10,r5,`$BITS/2` #r10=dl $UMULL r6,r8,r10 #tl = q*dl - + Lppcasm_divinnerloop: subf r10,r12,r3 #t = h -th $SHRI r7,r10,`$BITS/2` #r7= (t &BN_MASK2H), sort of... @@ -1761,7 +1761,7 @@ Lppcasm_div9: addi r4,r4,-$BNSZ addi r3,r3,-$BNSZ mtctr r5 -Lppcasm_sqr_mainloop: +Lppcasm_sqr_mainloop: #sqr(r[0],r[1],a[0]); $LDU r6,$BNSZ(r4) $UMULL r7,r6,r6 @@ -1769,7 +1769,7 @@ Lppcasm_sqr_mainloop: $STU r7,$BNSZ(r3) $STU r8,$BNSZ(r3) bdnz Lppcasm_sqr_mainloop -Lppcasm_sqr_adios: +Lppcasm_sqr_adios: blr .long 0 .byte 0,12,0x14,0,0,0,3,0 @@ -1783,7 +1783,7 @@ Lppcasm_sqr_adios: # done in the build # -.align 4 +.align 4 .bn_mul_words: # # BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w) @@ -1797,7 +1797,7 @@ Lppcasm_sqr_adios: rlwinm. r7,r5,30,2,31 # num >> 2 beq Lppcasm_mw_REM mtctr r7 -Lppcasm_mw_LOOP: +Lppcasm_mw_LOOP: #mul(rp[0],ap[0],w,c1); $LD r8,`0*$BNSZ`(r4) $UMULL r9,r6,r8 @@ -1809,7 +1809,7 @@ Lppcasm_mw_LOOP: #using adde. $ST r9,`0*$BNSZ`(r3) #mul(rp[1],ap[1],w,c1); - $LD r8,`1*$BNSZ`(r4) + $LD r8,`1*$BNSZ`(r4) $UMULL r11,r6,r8 $UMULH r12,r6,r8 adde r11,r11,r10 @@ -1830,7 +1830,7 @@ Lppcasm_mw_LOOP: addze r12,r12 #this spin we collect carry into #r12 $ST r11,`3*$BNSZ`(r3) - + addi r3,r3,`4*$BNSZ` addi r4,r4,`4*$BNSZ` bdnz Lppcasm_mw_LOOP @@ -1846,25 +1846,25 @@ Lppcasm_mw_REM: addze r10,r10 $ST r9,`0*$BNSZ`(r3) addi r12,r10,0 - + addi r5,r5,-1 cmpli 0,0,r5,0 beq Lppcasm_mw_OVER - + #mul(rp[1],ap[1],w,c1); - $LD r8,`1*$BNSZ`(r4) + $LD r8,`1*$BNSZ`(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 addc r9,r9,r12 addze r10,r10 $ST r9,`1*$BNSZ`(r3) addi r12,r10,0 - + addi r5,r5,-1 cmpli 0,0,r5,0 beq Lppcasm_mw_OVER - + #mul_add(rp[2],ap[2],w,c1); $LD r8,`2*$BNSZ`(r4) $UMULL r9,r6,r8 @@ -1873,8 +1873,8 @@ Lppcasm_mw_REM: addze r10,r10 $ST r9,`2*$BNSZ`(r3) addi r12,r10,0 - -Lppcasm_mw_OVER: + +Lppcasm_mw_OVER: addi r3,r12,0 blr .long 0 @@ -1902,11 +1902,11 @@ Lppcasm_mw_OVER: # empirical evidence suggests that unrolled version performs best!! # xor r0,r0,r0 #r0 = 0 - xor r12,r12,r12 #r12 = 0 . used for carry + xor r12,r12,r12 #r12 = 0 . used for carry rlwinm. r7,r5,30,2,31 # num >> 2 beq Lppcasm_maw_leftover # if (num < 4) go LPPCASM_maw_leftover mtctr r7 -Lppcasm_maw_mainloop: +Lppcasm_maw_mainloop: #mul_add(rp[0],ap[0],w,c1); $LD r8,`0*$BNSZ`(r4) $LD r11,`0*$BNSZ`(r3) @@ -1922,9 +1922,9 @@ Lppcasm_maw_mainloop: #by multiply and will be collected #in the next spin $ST r9,`0*$BNSZ`(r3) - + #mul_add(rp[1],ap[1],w,c1); - $LD r8,`1*$BNSZ`(r4) + $LD r8,`1*$BNSZ`(r4) $LD r9,`1*$BNSZ`(r3) $UMULL r11,r6,r8 $UMULH r12,r6,r8 @@ -1933,7 +1933,7 @@ Lppcasm_maw_mainloop: addc r11,r11,r9 #addze r12,r12 $ST r11,`1*$BNSZ`(r3) - + #mul_add(rp[2],ap[2],w,c1); $LD r8,`2*$BNSZ`(r4) $UMULL r9,r6,r8 @@ -1944,7 +1944,7 @@ Lppcasm_maw_mainloop: addc r9,r9,r11 #addze r10,r10 $ST r9,`2*$BNSZ`(r3) - + #mul_add(rp[3],ap[3],w,c1); $LD r8,`3*$BNSZ`(r4) $UMULL r11,r6,r8 @@ -1958,7 +1958,7 @@ Lppcasm_maw_mainloop: addi r3,r3,`4*$BNSZ` addi r4,r4,`4*$BNSZ` bdnz Lppcasm_maw_mainloop - + Lppcasm_maw_leftover: andi. r5,r5,0x3 beq Lppcasm_maw_adios @@ -1975,10 +1975,10 @@ Lppcasm_maw_leftover: addc r9,r9,r12 addze r12,r10 $ST r9,0(r3) - + bdz Lppcasm_maw_adios #mul_add(rp[1],ap[1],w,c1); - $LDU r8,$BNSZ(r4) + $LDU r8,$BNSZ(r4) $UMULL r9,r6,r8 $UMULH r10,r6,r8 $LDU r11,$BNSZ(r3) @@ -1987,7 +1987,7 @@ Lppcasm_maw_leftover: addc r9,r9,r12 addze r12,r10 $ST r9,0(r3) - + bdz Lppcasm_maw_adios #mul_add(rp[2],ap[2],w,c1); $LDU r8,$BNSZ(r4) @@ -1999,8 +1999,8 @@ Lppcasm_maw_leftover: addc r9,r9,r12 addze r12,r10 $ST r9,0(r3) - -Lppcasm_maw_adios: + +Lppcasm_maw_adios: addi r3,r12,0 blr .long 0 diff --git a/crypto/bn/asm/rsaz-avx2.pl b/crypto/bn/asm/rsaz-avx2.pl index 0c1b236ef9..f34c84f452 100755 --- a/crypto/bn/asm/rsaz-avx2.pl +++ b/crypto/bn/asm/rsaz-avx2.pl @@ -382,7 +382,7 @@ $code.=<<___; vpaddq $TEMP1, $ACC1, $ACC1 vpmuludq 32*7-128($aap), $B2, $ACC2 vpbroadcastq 32*5-128($tpa), $B2 - vpaddq 32*11-448($tp1), $ACC2, $ACC2 + vpaddq 32*11-448($tp1), $ACC2, $ACC2 vmovdqu $ACC6, 32*6-192($tp0) vmovdqu $ACC7, 32*7-192($tp0) @@ -441,7 +441,7 @@ $code.=<<___; vmovdqu $ACC7, 32*16-448($tp1) lea 8($tp1), $tp1 - dec $i + dec $i jnz .LOOP_SQR_1024 ___ $ZERO = $ACC9; @@ -786,7 +786,7 @@ $code.=<<___; vpblendd \$3, $TEMP4, $TEMP5, $TEMP4 vpaddq $TEMP3, $ACC7, $ACC7 vpaddq $TEMP4, $ACC8, $ACC8 - + vpsrlq \$29, $ACC4, $TEMP1 vpand $AND_MASK, $ACC4, $ACC4 vpsrlq \$29, $ACC5, $TEMP2 @@ -1451,7 +1451,7 @@ $code.=<<___; vpaddq $TEMP4, $ACC8, $ACC8 vmovdqu $ACC4, 128-128($rp) - vmovdqu $ACC5, 160-128($rp) + vmovdqu $ACC5, 160-128($rp) vmovdqu $ACC6, 192-128($rp) vmovdqu $ACC7, 224-128($rp) vmovdqu $ACC8, 256-128($rp) diff --git a/crypto/bn/asm/rsaz-x86_64.pl b/crypto/bn/asm/rsaz-x86_64.pl index 6f3b664f7a..7bcfafe8dd 100755 --- a/crypto/bn/asm/rsaz-x86_64.pl +++ b/crypto/bn/asm/rsaz-x86_64.pl @@ -282,9 +282,9 @@ $code.=<<___; movq %r9, 16(%rsp) movq %r10, 24(%rsp) shrq \$63, %rbx - + #third iteration - movq 16($inp), %r9 + movq 16($inp), %r9 movq 24($inp), %rax mulq %r9 addq %rax, %r12 @@ -532,7 +532,7 @@ $code.=<<___; movl $times,128+8(%rsp) movq $out, %xmm0 # off-load movq %rbp, %xmm1 # off-load -#first iteration +#first iteration mulx %rax, %r8, %r9 mulx 16($inp), %rcx, %r10 @@ -568,7 +568,7 @@ $code.=<<___; mov %rax, (%rsp) mov %r8, 8(%rsp) -#second iteration +#second iteration mulx 16($inp), %rax, %rbx adox %rax, %r10 adcx %rbx, %r11 @@ -607,8 +607,8 @@ $code.=<<___; mov %r9, 16(%rsp) .byte 0x4c,0x89,0x94,0x24,0x18,0x00,0x00,0x00 # mov %r10, 24(%rsp) - -#third iteration + +#third iteration .byte 0xc4,0x62,0xc3,0xf6,0x8e,0x18,0x00,0x00,0x00 # mulx 24($inp), $out, %r9 adox $out, %r12 adcx %r9, %r13 @@ -643,8 +643,8 @@ $code.=<<___; mov %r11, 32(%rsp) .byte 0x4c,0x89,0xa4,0x24,0x28,0x00,0x00,0x00 # mov %r12, 40(%rsp) - -#fourth iteration + +#fourth iteration .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x20,0x00,0x00,0x00 # mulx 32($inp), %rax, %rbx adox %rax, %r14 adcx %rbx, %r15 @@ -676,8 +676,8 @@ $code.=<<___; mov %r13, 48(%rsp) mov %r14, 56(%rsp) - -#fifth iteration + +#fifth iteration .byte 0xc4,0x62,0xc3,0xf6,0x9e,0x28,0x00,0x00,0x00 # mulx 40($inp), $out, %r11 adox $out, %r8 adcx %r11, %r9 @@ -704,8 +704,8 @@ $code.=<<___; mov %r15, 64(%rsp) mov %r8, 72(%rsp) - -#sixth iteration + +#sixth iteration .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 48($inp), %rax, %rbx adox %rax, %r10 adcx %rbx, %r11 @@ -1048,7 +1048,7 @@ $code.=<<___; movq 56($ap), %rax movq %rdx, %r14 adcq \$0, %r14 - + mulq %rbx addq %rax, %r14 movq ($ap), %rax @@ -1150,7 +1150,7 @@ $code.=<<___; movq ($ap), %rax adcq \$0, %rdx addq %r15, %r14 - movq %rdx, %r15 + movq %rdx, %r15 adcq \$0, %r15 leaq 8(%rdi), %rdi @@ -1212,7 +1212,7 @@ $code.=<<___ if ($addx); mulx 48($ap), %rbx, %r14 adcx %rax, %r12 - + mulx 56($ap), %rax, %r15 adcx %rbx, %r13 adcx %rax, %r14 @@ -1411,7 +1411,7 @@ $code.=<<___; ___ $code.=<<___ if ($addx); jmp .Lmul_scatter_tail - + .align 32 .Lmulx_scatter: movq ($out), %rdx # pass b[0] @@ -1824,7 +1824,7 @@ __rsaz_512_mul: movq 56($ap), %rax movq %rdx, %r14 adcq \$0, %r14 - + mulq %rbx addq %rax, %r14 movq ($ap), %rax @@ -1901,7 +1901,7 @@ __rsaz_512_mul: movq ($ap), %rax adcq \$0, %rdx addq %r15, %r14 - movq %rdx, %r15 + movq %rdx, %r15 adcq \$0, %r15 leaq 8(%rdi), %rdi diff --git a/crypto/bn/asm/s390x-gf2m.pl b/crypto/bn/asm/s390x-gf2m.pl index cbd16f4214..57b0032d67 100644 --- a/crypto/bn/asm/s390x-gf2m.pl +++ b/crypto/bn/asm/s390x-gf2m.pl @@ -198,7 +198,7 @@ $code.=<<___; xgr $hi,@r[1] xgr $lo,@r[0] xgr $hi,@r[2] - xgr $lo,@r[3] + xgr $lo,@r[3] xgr $hi,@r[3] xgr $lo,$hi stg $hi,16($rp) diff --git a/crypto/bn/asm/via-mont.pl b/crypto/bn/asm/via-mont.pl index 9f81bc822e..558501c315 100644 --- a/crypto/bn/asm/via-mont.pl +++ b/crypto/bn/asm/via-mont.pl @@ -76,7 +76,7 @@ # dsa 1024 bits 0.001346s 0.001595s 742.7 627.0 # dsa 2048 bits 0.004745s 0.005582s 210.7 179.1 # -# Conclusions: +# Conclusions: # - VIA SDK leaves a *lot* of room for improvement (which this # implementation successfully fills:-); # - 'rep montmul' gives up to >3x performance improvement depending on diff --git a/crypto/bn/asm/x86-mont.pl b/crypto/bn/asm/x86-mont.pl index 6787503666..a8b402d59b 100755 --- a/crypto/bn/asm/x86-mont.pl +++ b/crypto/bn/asm/x86-mont.pl @@ -39,7 +39,7 @@ require "x86asm.pl"; $output = pop; open STDOUT,">$output"; - + &asm_init($ARGV[0],$0); $sse2=0; diff --git a/crypto/bn/asm/x86_64-mont5.pl b/crypto/bn/asm/x86_64-mont5.pl index 3278dc6056..8f49391727 100755 --- a/crypto/bn/asm/x86_64-mont5.pl +++ b/crypto/bn/asm/x86_64-mont5.pl @@ -1049,7 +1049,7 @@ my $bptr="%rdx"; # const void *table, my $nptr="%rcx"; # const BN_ULONG *nptr, my $n0 ="%r8"; # const BN_ULONG *n0); my $num ="%r9"; # int num, has to be divisible by 8 - # int pwr + # int pwr my ($i,$j,$tptr)=("%rbp","%rcx",$rptr); my @A0=("%r10","%r11"); @@ -1126,7 +1126,7 @@ $code.=<<___; ja .Lpwr_page_walk .Lpwr_page_walk_done: - mov $num,%r10 + mov $num,%r10 neg $num ############################################################## @@ -2036,7 +2036,7 @@ __bn_post4x_internal: jnz .Lsqr4x_sub mov $num,%r10 # prepare for back-to-back call - neg $num # restore $num + neg $num # restore $num ret .size __bn_post4x_internal,.-__bn_post4x_internal ___ @@ -2259,7 +2259,7 @@ bn_mulx4x_mont_gather5: mov \$0,%r10 cmovc %r10,%r11 sub %r11,%rbp -.Lmulx4xsp_done: +.Lmulx4xsp_done: and \$-64,%rbp # ensure alignment mov %rsp,%r11 sub %rbp,%r11 @@ -2741,7 +2741,7 @@ bn_powerx5: ja .Lpwrx_page_walk .Lpwrx_page_walk_done: - mov $num,%r10 + mov $num,%r10 neg $num ############################################################## diff --git a/crypto/camellia/asm/cmll-x86.pl b/crypto/camellia/asm/cmll-x86.pl index 59f9ed9141..26afad8c95 100644 --- a/crypto/camellia/asm/cmll-x86.pl +++ b/crypto/camellia/asm/cmll-x86.pl @@ -792,9 +792,9 @@ if ($OPENSSL) { 64, 40,211,123,187,201, 67,193, 21,227,173,244,119,199,128,158); sub S1110 { my $i=shift; $i=@SBOX[$i]; return $i<<24|$i<<16|$i<<8; } -sub S4404 { my $i=shift; $i=($i<<1|$i>>7)&0xff; $i=@SBOX[$i]; return $i<<24|$i<<16|$i; } -sub S0222 { my $i=shift; $i=@SBOX[$i]; $i=($i<<1|$i>>7)&0xff; return $i<<16|$i<<8|$i; } -sub S3033 { my $i=shift; $i=@SBOX[$i]; $i=($i>>1|$i<<7)&0xff; return $i<<24|$i<<8|$i; } +sub S4404 { my $i=shift; $i=($i<<1|$i>>7)&0xff; $i=@SBOX[$i]; return $i<<24|$i<<16|$i; } +sub S0222 { my $i=shift; $i=@SBOX[$i]; $i=($i<<1|$i>>7)&0xff; return $i<<16|$i<<8|$i; } +sub S3033 { my $i=shift; $i=@SBOX[$i]; $i=($i>>1|$i<<7)&0xff; return $i<<24|$i<<8|$i; } &set_label("Camellia_SIGMA",64); &data_word( diff --git a/crypto/cast/asm/cast-586.pl b/crypto/cast/asm/cast-586.pl index 6beb9c5f25..1fc2b1a309 100644 --- a/crypto/cast/asm/cast-586.pl +++ b/crypto/cast/asm/cast-586.pl @@ -7,7 +7,7 @@ # https://www.openssl.org/source/license.html -# This flag makes the inner loop one cycle longer, but generates +# This flag makes the inner loop one cycle longer, but generates # code that runs %30 faster on the pentium pro/II, 44% faster # of PIII, while only %7 slower on the pentium. # By default, this flag is on. @@ -157,7 +157,7 @@ sub E_CAST { if ($ppro) { &xor( $tmp1, $tmp1); &mov( $tmp2, 0xff); - + &movb( &LB($tmp1), &HB($tmp4)); # A &and( $tmp2, $tmp4); @@ -166,7 +166,7 @@ sub E_CAST { } else { &mov( $tmp2, $tmp4); # B &movb( &LB($tmp1), &HB($tmp4)); # A # BAD BAD BAD - + &shr( $tmp4, 16); # &and( $tmp2, 0xff); } diff --git a/crypto/chacha/asm/chacha-armv4.pl b/crypto/chacha/asm/chacha-armv4.pl index b5e21e4938..c90306e45c 100755 --- a/crypto/chacha/asm/chacha-armv4.pl +++ b/crypto/chacha/asm/chacha-armv4.pl @@ -15,7 +15,7 @@ # ==================================================================== # # December 2014 -# +# # ChaCha20 for ARMv4. # # Performance in cycles per byte out of large buffer. @@ -720,7 +720,7 @@ ChaCha20_neon: vadd.i32 $d2,$d1,$t0 @ counter+2 str @t[3], [sp,#4*(16+15)] mov @t[3],#10 - add @x[12],@x[12],#3 @ counter+3 + add @x[12],@x[12],#3 @ counter+3 b .Loop_neon .align 4 diff --git a/crypto/chacha/asm/chacha-armv8.pl b/crypto/chacha/asm/chacha-armv8.pl index f7e1074714..db3776a2fc 100755 --- a/crypto/chacha/asm/chacha-armv8.pl +++ b/crypto/chacha/asm/chacha-armv8.pl @@ -15,7 +15,7 @@ # ==================================================================== # # June 2015 -# +# # ChaCha20 for ARMv8. # # Performance in cycles per byte out of large buffer. @@ -201,7 +201,7 @@ ChaCha20_ctr32: mov $ctr,#10 subs $len,$len,#64 .Loop: - sub $ctr,$ctr,#1 + sub $ctr,$ctr,#1 ___ foreach (&ROUND(0, 4, 8,12)) { eval; } foreach (&ROUND(0, 5,10,15)) { eval; } diff --git a/crypto/chacha/asm/chacha-ppc.pl b/crypto/chacha/asm/chacha-ppc.pl index 8a54cbaca7..7da99e0767 100755 --- a/crypto/chacha/asm/chacha-ppc.pl +++ b/crypto/chacha/asm/chacha-ppc.pl @@ -15,7 +15,7 @@ # ==================================================================== # # October 2015 -# +# # ChaCha20 for PowerPC/AltiVec. # # Performance in cycles per byte out of large buffer. @@ -524,7 +524,7 @@ $code.=<<___; lwz @d[3],12($ctr) vadduwm @K[5],@K[4],@K[5] - vspltisw $twenty,-12 # synthesize constants + vspltisw $twenty,-12 # synthesize constants vspltisw $twelve,12 vspltisw $twenty5,-7 #vspltisw $seven,7 # synthesized in the loop diff --git a/crypto/des/asm/crypt586.pl b/crypto/des/asm/crypt586.pl index d5911a1858..ad89eeb085 100644 --- a/crypto/des/asm/crypt586.pl +++ b/crypto/des/asm/crypt586.pl @@ -111,7 +111,7 @@ sub D_ENCRYPT &and( $u, "0xfcfcfcfc" ); # 2 &xor( $tmp1, $tmp1); # 1 &and( $t, "0xcfcfcfcf" ); # 2 - &xor( $tmp2, $tmp2); + &xor( $tmp2, $tmp2); &movb( &LB($tmp1), &LB($u) ); &movb( &LB($tmp2), &HB($u) ); &rotr( $t, 4 ); @@ -175,7 +175,7 @@ sub IP_new &R_PERM_OP($l,$tt,$r,14,"0x33333333",$r); &R_PERM_OP($tt,$r,$l,22,"0x03fc03fc",$r); &R_PERM_OP($l,$r,$tt, 9,"0xaaaaaaaa",$r); - + if ($lr != 3) { if (($lr-3) < 0) diff --git a/crypto/des/asm/des-586.pl b/crypto/des/asm/des-586.pl index 3d7c7f1b91..d45102c1da 100644 --- a/crypto/des/asm/des-586.pl +++ b/crypto/des/asm/des-586.pl @@ -85,7 +85,7 @@ sub DES_encrypt_internal() &function_end_B("_x86_DES_encrypt"); } - + sub DES_decrypt_internal() { &function_begin_B("_x86_DES_decrypt"); @@ -122,7 +122,7 @@ sub DES_decrypt_internal() &function_end_B("_x86_DES_decrypt"); } - + sub DES_encrypt { local($name,$do_ip)=@_; @@ -283,7 +283,7 @@ sub IP_new &R_PERM_OP($l,$tt,$r,14,"0x33333333",$r); &R_PERM_OP($tt,$r,$l,22,"0x03fc03fc",$r); &R_PERM_OP($l,$r,$tt, 9,"0xaaaaaaaa",$r); - + if ($lr != 3) { if (($lr-3) < 0) diff --git a/crypto/des/asm/desboth.pl b/crypto/des/asm/desboth.pl index 76759fb292..ef7054e275 100644 --- a/crypto/des/asm/desboth.pl +++ b/crypto/des/asm/desboth.pl @@ -34,7 +34,7 @@ sub DES_encrypt3 &IP_new($L,$R,"edx",0); # put them back - + if ($enc) { &mov(&DWP(4,"ebx","",0),$R); diff --git a/crypto/ec/asm/ecp_nistz256-armv8.pl b/crypto/ec/asm/ecp_nistz256-armv8.pl index cdc91617ff..d93c4fe957 100644 --- a/crypto/ec/asm/ecp_nistz256-armv8.pl +++ b/crypto/ec/asm/ecp_nistz256-armv8.pl @@ -660,7 +660,7 @@ __ecp_nistz256_div_by_2: adc $ap,xzr,xzr // zap $ap tst $acc0,#1 // is a even? - csel $acc0,$acc0,$t0,eq // ret = even ? a : a+modulus + csel $acc0,$acc0,$t0,eq // ret = even ? a : a+modulus csel $acc1,$acc1,$t1,eq csel $acc2,$acc2,$t2,eq csel $acc3,$acc3,$t3,eq diff --git a/crypto/ec/asm/ecp_nistz256-sparcv9.pl b/crypto/ec/asm/ecp_nistz256-sparcv9.pl index 97201cb271..ee11069459 100755 --- a/crypto/ec/asm/ecp_nistz256-sparcv9.pl +++ b/crypto/ec/asm/ecp_nistz256-sparcv9.pl @@ -1874,7 +1874,7 @@ $code.=<<___ if ($i<3); ldx [$bp+8*($i+1)],$bi ! bp[$i+1] ___ $code.=<<___; - addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication + addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication sllx $acc0,32,$t0 addxccc $acc2,$t1,$acc2 srlx $acc0,32,$t1 diff --git a/crypto/ec/asm/ecp_nistz256-x86.pl b/crypto/ec/asm/ecp_nistz256-x86.pl index 1d9e00616b..f637c844c4 100755 --- a/crypto/ec/asm/ecp_nistz256-x86.pl +++ b/crypto/ec/asm/ecp_nistz256-x86.pl @@ -443,7 +443,7 @@ for(1..37) { &mov (&DWP(20,"esp"),"eax"); &mov (&DWP(24,"esp"),"eax"); &mov (&DWP(28,"esp"),"eax"); - + &call ("_ecp_nistz256_sub"); &stack_pop(8); diff --git a/crypto/ec/asm/ecp_nistz256-x86_64.pl b/crypto/ec/asm/ecp_nistz256-x86_64.pl index 16b6639b54..adb49f37dd 100755 --- a/crypto/ec/asm/ecp_nistz256-x86_64.pl +++ b/crypto/ec/asm/ecp_nistz256-x86_64.pl @@ -611,7 +611,7 @@ __ecp_nistz256_mul_montq: adc \$0, $acc0 ######################################################################## - # Second reduction step + # Second reduction step mov $acc1, $t1 shl \$32, $acc1 mulq $poly3 @@ -658,7 +658,7 @@ __ecp_nistz256_mul_montq: adc \$0, $acc1 ######################################################################## - # Third reduction step + # Third reduction step mov $acc2, $t1 shl \$32, $acc2 mulq $poly3 @@ -705,7 +705,7 @@ __ecp_nistz256_mul_montq: adc \$0, $acc2 ######################################################################## - # Final reduction step + # Final reduction step mov $acc3, $t1 shl \$32, $acc3 mulq $poly3 @@ -718,7 +718,7 @@ __ecp_nistz256_mul_montq: mov $acc5, $t1 adc \$0, $acc2 - ######################################################################## + ######################################################################## # Branch-less conditional subtraction of P sub \$-1, $acc4 # .Lpoly[0] mov $acc0, $t2 @@ -2118,7 +2118,7 @@ $code.=<<___; movq %xmm1, $r_ptr call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S); ___ -{ +{ ######## ecp_nistz256_div_by_2(res_y, res_y); ########################## # operate in 4-5-6-7 "name space" that matches squaring output # @@ -2207,7 +2207,7 @@ $code.=<<___; lea $M(%rsp), $b_ptr mov $acc4, $acc6 # harmonize sub output and mul input xor %ecx, %ecx - mov $acc4, $S+8*0(%rsp) # have to save:-( + mov $acc4, $S+8*0(%rsp) # have to save:-( mov $acc5, $acc2 mov $acc5, $S+8*1(%rsp) cmovz $acc0, $acc3 @@ -3055,8 +3055,8 @@ ___ ######################################################################## # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7 # -open TABLE,"{op} eq "movz") { # movz is pain... sprintf "%s%s%s",$self->{op},$self->{sz},shift; - } elsif ($self->{op} =~ /^set/) { + } elsif ($self->{op} =~ /^set/) { "$self->{op}"; } elsif ($self->{op} eq "ret") { my $epilogue = ""; @@ -178,7 +178,7 @@ my %globals; $self->{op} .= $self->{sz}; } elsif ($self->{op} eq "call" && $current_segment eq ".CRT\$XCU") { $self->{op} = "\tDQ"; - } + } $self->{op}; } } @@ -639,7 +639,7 @@ my %globals; if ($sz eq "D" && ($current_segment=~/.[px]data/ || $dir eq ".rva")) { $var=~s/([_a-z\$\@][_a-z0-9\$\@]*)/$nasm?"$1 wrt ..imagebase":"imagerel $1"/egi; } $var; - }; + }; $sz =~ tr/bvlrq/BWDDQ/; $self->{value} = "\tD$sz\t"; @@ -649,7 +649,7 @@ my %globals; }; /\.byte/ && do { my @str=split(/,\s*/,$$line); map(s/(0b[0-1]+)/oct($1)/eig,@str); - map(s/0x([0-9a-f]+)/0$1h/ig,@str) if ($masm); + map(s/0x([0-9a-f]+)/0$1h/ig,@str) if ($masm); while ($#str>15) { $self->{value}.="DB\t" .join(",",@str[0..15])."\n"; @@ -896,7 +896,7 @@ while(defined(my $line=<>)) { printf "%s",$directive->out(); } elsif (my $opcode=opcode->re(\$line)) { my $asm = eval("\$".$opcode->mnemonic()); - + if ((ref($asm) eq 'CODE') && scalar(my @bytes=&$asm($line))) { print $gas?".byte\t":"DB\t",join(',',@bytes),"\n"; next; @@ -974,7 +974,7 @@ close STDOUT; # %r13 - - # %r14 - - # %r15 - - -# +# # (*) volatile register # (-) preserved by callee # (#) Nth argument, volatile diff --git a/crypto/perlasm/x86nasm.pl b/crypto/perlasm/x86nasm.pl index 4b664a870b..b4d4e2a781 100644 --- a/crypto/perlasm/x86nasm.pl +++ b/crypto/perlasm/x86nasm.pl @@ -132,7 +132,7 @@ ___ grep {s/(^extern\s+${nmdecor}OPENSSL_ia32cap_P)/\;$1/} @out; push (@out,$comm) } - push (@out,$initseg) if ($initseg); + push (@out,$initseg) if ($initseg); } sub ::comment { foreach (@_) { push(@out,"\t; $_\n"); } } diff --git a/crypto/rc4/asm/rc4-c64xplus.pl b/crypto/rc4/asm/rc4-c64xplus.pl index daed75c750..9f282fe45e 100644 --- a/crypto/rc4/asm/rc4-c64xplus.pl +++ b/crypto/rc4/asm/rc4-c64xplus.pl @@ -89,7 +89,7 @@ _RC4: || NOP 5 STB $XX,*${KEYA}[-2] ; key->x || SUB4 $YY,$TX,$YY -|| BNOP B3 +|| BNOP B3 STB $YY,*${KEYB}[-1] ; key->y || NOP 5 .endasmfunc diff --git a/crypto/rc4/asm/rc4-md5-x86_64.pl b/crypto/rc4/asm/rc4-md5-x86_64.pl index 890161bac5..433ed8571a 100644 --- a/crypto/rc4/asm/rc4-md5-x86_64.pl +++ b/crypto/rc4/asm/rc4-md5-x86_64.pl @@ -51,7 +51,7 @@ my ($rc4,$md5)=(1,1); # what to generate? my $D="#" if (!$md5); # if set to "#", MD5 is stitched into RC4(), # but its result is discarded. Idea here is # to be able to use 'openssl speed rc4' for - # benchmarking the stitched subroutine... + # benchmarking the stitched subroutine... my $flavour = shift; my $output = shift; @@ -419,7 +419,7 @@ $code.=<<___ if ($rc4 && (!$md5 || $D)); and \$63,$len # remaining bytes jnz .Loop1 jmp .Ldone - + .align 16 .Loop1: add $TX[0]#b,$YY#b diff --git a/crypto/rc4/asm/rc4-parisc.pl b/crypto/rc4/asm/rc4-parisc.pl index 006b6b01af..81ec098840 100644 --- a/crypto/rc4/asm/rc4-parisc.pl +++ b/crypto/rc4/asm/rc4-parisc.pl @@ -98,7 +98,7 @@ sub unrolledloopbody { for ($i=0;$i<4;$i++) { $code.=<<___; ldo 1($XX[0]),$XX[1] - `sprintf("$LDX %$TY(%$key),%$dat1") if ($i>0)` + `sprintf("$LDX %$TY(%$key),%$dat1") if ($i>0)` and $mask,$XX[1],$XX[1] $LDX $YY($key),$TY $MKX $YY,$key,$ix @@ -166,7 +166,7 @@ RC4 ldo `2*$SZ`($key),$key ldi 0xff,$mask - ldi 3,$dat0 + ldi 3,$dat0 ldo 1($XX[0]),$XX[0] ; warm up loop and $mask,$XX[0],$XX[0] diff --git a/crypto/rc4/asm/rc4-x86_64.pl b/crypto/rc4/asm/rc4-x86_64.pl index aaed2b1e61..6e07c7c1bb 100755 --- a/crypto/rc4/asm/rc4-x86_64.pl +++ b/crypto/rc4/asm/rc4-x86_64.pl @@ -48,7 +48,7 @@ # April 2005 # -# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing +# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing # those with add/sub results in 50% performance improvement of folded # loop... diff --git a/crypto/ripemd/asm/rmd-586.pl b/crypto/ripemd/asm/rmd-586.pl index 544c496f07..f19fdc9f9a 100644 --- a/crypto/ripemd/asm/rmd-586.pl +++ b/crypto/ripemd/asm/rmd-586.pl @@ -34,7 +34,7 @@ $KL2=0x6ED9EBA1; $KL3=0x8F1BBCDC; $KL4=0xA953FD4E; $KR0=0x50A28BE6; -$KR1=0x5C4DD124; +$KR1=0x5C4DD124; $KR2=0x6D703EF3; $KR3=0x7A6D76E9; @@ -543,28 +543,28 @@ sub ripemd160_block # &mov($tmp2, &wparam(0)); # Moved into last round &mov($tmp1, &DWP( 4,$tmp2,"",0)); # ctx->B - &add($D, $tmp1); + &add($D, $tmp1); &mov($tmp1, &swtmp(16+2)); # $c &add($D, $tmp1); &mov($tmp1, &DWP( 8,$tmp2,"",0)); # ctx->C - &add($E, $tmp1); + &add($E, $tmp1); &mov($tmp1, &swtmp(16+3)); # $d &add($E, $tmp1); &mov($tmp1, &DWP(12,$tmp2,"",0)); # ctx->D - &add($A, $tmp1); + &add($A, $tmp1); &mov($tmp1, &swtmp(16+4)); # $e &add($A, $tmp1); &mov($tmp1, &DWP(16,$tmp2,"",0)); # ctx->E - &add($B, $tmp1); + &add($B, $tmp1); &mov($tmp1, &swtmp(16+0)); # $a &add($B, $tmp1); &mov($tmp1, &DWP( 0,$tmp2,"",0)); # ctx->A - &add($C, $tmp1); + &add($C, $tmp1); &mov($tmp1, &swtmp(16+1)); # $b &add($C, $tmp1); diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl index 0efed70a3e..3bf8200dbb 100644 --- a/crypto/sha/asm/sha1-586.pl +++ b/crypto/sha/asm/sha1-586.pl @@ -133,7 +133,7 @@ $ymm=1 if ($xmm && =~ /GNU assembler version ([2-9]\.[0-9]+)/ && $1>=2.19); # first version supporting AVX -$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" && +$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" && `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ && $1>=2.03); # first version supporting AVX diff --git a/crypto/sha/asm/sha1-mb-x86_64.pl b/crypto/sha/asm/sha1-mb-x86_64.pl index 51c73c05ac..2f6b35f355 100644 --- a/crypto/sha/asm/sha1-mb-x86_64.pl +++ b/crypto/sha/asm/sha1-mb-x86_64.pl @@ -95,7 +95,7 @@ $K="%xmm15"; if (1) { # Atom-specific optimization aiming to eliminate pshufb with high - # registers [and thus get rid of 48 cycles accumulated penalty] + # registers [and thus get rid of 48 cycles accumulated penalty] @Xi=map("%xmm$_",(0..4)); ($tx,$t0,$t1,$t2,$t3)=map("%xmm$_",(5..9)); @V=($A,$B,$C,$D,$E)=map("%xmm$_",(10..14)); @@ -126,7 +126,7 @@ my $k=$i+2; # ... # $i==13: 14,15,15,15, # $i==14: 15 -# +# # Then at $i==15 Xupdate is applied one iteration in advance... $code.=<<___ if ($i==0); movd (@ptr[0]),@Xi[0] diff --git a/crypto/sha/asm/sha1-sparcv9.pl b/crypto/sha/asm/sha1-sparcv9.pl index 7437ff4f05..cdd5b9afc5 100644 --- a/crypto/sha/asm/sha1-sparcv9.pl +++ b/crypto/sha/asm/sha1-sparcv9.pl @@ -227,7 +227,7 @@ sha1_block_data_order: ldd [%o1 + 0x20], %f16 ldd [%o1 + 0x28], %f18 ldd [%o1 + 0x30], %f20 - subcc %o2, 1, %o2 ! done yet? + subcc %o2, 1, %o2 ! done yet? ldd [%o1 + 0x38], %f22 add %o1, 0x40, %o1 prefetch [%o1 + 63], 20 diff --git a/crypto/sha/asm/sha1-sparcv9a.pl b/crypto/sha/asm/sha1-sparcv9a.pl index f9ed5630e8..8dfde463cb 100644 --- a/crypto/sha/asm/sha1-sparcv9a.pl +++ b/crypto/sha/asm/sha1-sparcv9a.pl @@ -519,7 +519,7 @@ $code.=<<___; mov $Cctx,$C mov $Dctx,$D mov $Ectx,$E - alignaddr %g0,$tmp0,%g0 + alignaddr %g0,$tmp0,%g0 dec 1,$len ba .Loop mov $nXfer,$Xfer diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl index 97baae37cd..66054ceeae 100755 --- a/crypto/sha/asm/sha1-x86_64.pl +++ b/crypto/sha/asm/sha1-x86_64.pl @@ -262,7 +262,7 @@ sha1_block_data_order: jz .Lialu ___ $code.=<<___ if ($shaext); - test \$`1<<29`,%r10d # check SHA bit + test \$`1<<29`,%r10d # check SHA bit jnz _shaext_shortcut ___ $code.=<<___ if ($avx>1); diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl index 6af1d84beb..8e7f4eecc3 100644 --- a/crypto/sha/asm/sha256-586.pl +++ b/crypto/sha/asm/sha256-586.pl @@ -47,7 +47,7 @@ # # Performance in clock cycles per processed byte (less is better): # -# gcc icc x86 asm(*) SIMD x86_64 asm(**) +# gcc icc x86 asm(*) SIMD x86_64 asm(**) # Pentium 46 57 40/38 - - # PIII 36 33 27/24 - - # P4 41 38 28 - 17.3 @@ -276,7 +276,7 @@ my $suffix=shift; &mov ($Coff,"ecx"); &mov ($Doff,"edi"); &mov (&DWP(0,"esp"),"ebx"); # magic - &mov ($E,&DWP(16,"esi")); + &mov ($E,&DWP(16,"esi")); &mov ("ebx",&DWP(20,"esi")); &mov ("ecx",&DWP(24,"esi")); &mov ("edi",&DWP(28,"esi")); @@ -385,7 +385,7 @@ my @AH=($A,$K256); &xor ($AH[1],"ecx"); # magic &mov (&DWP(8,"esp"),"ecx"); &mov (&DWP(12,"esp"),"ebx"); - &mov ($E,&DWP(16,"esi")); + &mov ($E,&DWP(16,"esi")); &mov ("ebx",&DWP(20,"esi")); &mov ("ecx",&DWP(24,"esi")); &mov ("esi",&DWP(28,"esi")); diff --git a/crypto/sha/asm/sha256-mb-x86_64.pl b/crypto/sha/asm/sha256-mb-x86_64.pl index fbcd29f2e8..b8a77c7fce 100644 --- a/crypto/sha/asm/sha256-mb-x86_64.pl +++ b/crypto/sha/asm/sha256-mb-x86_64.pl @@ -36,7 +36,7 @@ # (iii) "this" is for n=8, when we gather twice as much data, result # for n=4 is 20.3+4.44=24.7; # (iv) presented improvement coefficients are asymptotic limits and -# in real-life application are somewhat lower, e.g. for 2KB +# in real-life application are somewhat lower, e.g. for 2KB # fragments they range from 75% to 130% (on Haswell); $flavour = shift; diff --git a/crypto/sha/asm/sha512-586.pl b/crypto/sha/asm/sha512-586.pl index 0887e06148..94cc0114f8 100644 --- a/crypto/sha/asm/sha512-586.pl +++ b/crypto/sha/asm/sha512-586.pl @@ -383,7 +383,7 @@ if ($sse2) { &set_label("16_79_sse2",16); for ($j=0;$j<2;$j++) { # 2x unroll - #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15 + #&movq ("mm7",&QWP(8*(9+16-1),"esp")); # prefetched in BODY_00_15 &movq ("mm5",&QWP(8*(9+16-14),"esp")); &movq ("mm1","mm7"); &psrlq ("mm7",1); diff --git a/crypto/sha/asm/sha512-armv8.pl b/crypto/sha/asm/sha512-armv8.pl index c1aaf778f4..620aa39440 100644 --- a/crypto/sha/asm/sha512-armv8.pl +++ b/crypto/sha/asm/sha512-armv8.pl @@ -26,7 +26,7 @@ # Denver 2.01 10.5 (+26%) 6.70 (+8%) # X-Gene 20.0 (+100%) 12.8 (+300%(***)) # Mongoose 2.36 13.0 (+50%) 8.36 (+33%) -# +# # (*) Software SHA256 results are of lesser relevance, presented # mostly for informational purposes. # (**) The result is a trade-off: it's possible to improve it by diff --git a/crypto/sha/asm/sha512-parisc.pl b/crypto/sha/asm/sha512-parisc.pl index fcb6157902..d28a5af835 100755 --- a/crypto/sha/asm/sha512-parisc.pl +++ b/crypto/sha/asm/sha512-parisc.pl @@ -368,7 +368,7 @@ L\$parisc1 ___ @V=( $Ahi, $Alo, $Bhi, $Blo, $Chi, $Clo, $Dhi, $Dlo, - $Ehi, $Elo, $Fhi, $Flo, $Ghi, $Glo, $Hhi, $Hlo) = + $Ehi, $Elo, $Fhi, $Flo, $Ghi, $Glo, $Hhi, $Hlo) = ( "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7", "%r8", "%r9","%r10","%r11","%r12","%r13","%r14","%r15","%r16"); $a0 ="%r17"; @@ -419,7 +419,7 @@ $code.=<<___; add $t0,$hlo,$hlo shd $ahi,$alo,$Sigma0[0],$t0 addc $t1,$hhi,$hhi ; h += Sigma1(e) - shd $alo,$ahi,$Sigma0[0],$t1 + shd $alo,$ahi,$Sigma0[0],$t1 add $a0,$hlo,$hlo shd $ahi,$alo,$Sigma0[1],$t2 addc $a1,$hhi,$hhi ; h += Ch(e,f,g) diff --git a/crypto/sha/asm/sha512-s390x.pl b/crypto/sha/asm/sha512-s390x.pl index 582d393cef..92d7a7725a 100644 --- a/crypto/sha/asm/sha512-s390x.pl +++ b/crypto/sha/asm/sha512-s390x.pl @@ -311,7 +311,7 @@ $code.=<<___; cl${g} $inp,`$frame+4*$SIZE_T`($sp) jne .Lloop - lm${g} %r6,%r15,`$frame+6*$SIZE_T`($sp) + lm${g} %r6,%r15,`$frame+6*$SIZE_T`($sp) br %r14 .size $Func,.-$Func .string "SHA${label} block transform for s390x, CRYPTOGAMS by " diff --git a/crypto/sha/asm/sha512-sparcv9.pl b/crypto/sha/asm/sha512-sparcv9.pl index 4a1ce5fe3e..098c2a118a 100644 --- a/crypto/sha/asm/sha512-sparcv9.pl +++ b/crypto/sha/asm/sha512-sparcv9.pl @@ -102,7 +102,7 @@ if ($output =~ /512/) { $locals=0; # X[16] is register resident @X=("%o0","%o1","%o2","%o3","%o4","%o5","%g1","%o7"); - + $A="%l0"; $B="%l1"; $C="%l2"; @@ -254,7 +254,7 @@ $code.=<<___; $SLL $a,`$SZ*8-@Sigma0[1]`,$tmp1 xor $tmp0,$h,$h $SRL $a,@Sigma0[2],$tmp0 - xor $tmp1,$h,$h + xor $tmp1,$h,$h $SLL $a,`$SZ*8-@Sigma0[0]`,$tmp1 xor $tmp0,$h,$h xor $tmp1,$h,$h ! Sigma0(a) diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl index 63a62656f6..01bbb7775f 100755 --- a/crypto/sha/asm/sha512-x86_64.pl +++ b/crypto/sha/asm/sha512-x86_64.pl @@ -1782,7 +1782,7 @@ if ($avx>1) {{ ###################################################################### # AVX2+BMI code path # -my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp +my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp my $PUSH8=8*2*$SZ; use integer; diff --git a/crypto/ts/ts_rsp_verify.c b/crypto/ts/ts_rsp_verify.c index 2755dd0ef3..66f5be6f69 100644 --- a/crypto/ts/ts_rsp_verify.c +++ b/crypto/ts/ts_rsp_verify.c @@ -480,7 +480,7 @@ static char *ts_get_status_text(STACK_OF(ASN1_UTF8STRING) *text) return result; } -static int ts_check_policy(const ASN1_OBJECT *req_oid, +static int ts_check_policy(const ASN1_OBJECT *req_oid, const TS_TST_INFO *tst_info) { const ASN1_OBJECT *resp_oid = tst_info->policy_id; diff --git a/crypto/whrlpool/asm/wp-mmx.pl b/crypto/whrlpool/asm/wp-mmx.pl index f63945c8b9..d628d56506 100644 --- a/crypto/whrlpool/asm/wp-mmx.pl +++ b/crypto/whrlpool/asm/wp-mmx.pl @@ -31,7 +31,7 @@ # multiplying 64 by CPU clock frequency and dividing by relevant # value from the given table: # -# $SCALE=2/8 icc8 gcc3 +# $SCALE=2/8 icc8 gcc3 # Intel P4 3200/4600 4600(*) 6400 # Intel PIII 2900/3000 4900 5400 # AMD K[78] 2500/1800 9900 8200(**) @@ -502,6 +502,6 @@ for($i=0;$i<8;$i++) { &L(0xca,0x2d,0xbf,0x07,0xad,0x5a,0x83,0x33); &function_end_B("whirlpool_block_mmx"); -&asm_finish(); +&asm_finish(); close STDOUT; diff --git a/crypto/x509v3/v3_enum.c b/crypto/x509v3/v3_enum.c index f39cb5ac2a..3b0f197444 100644 --- a/crypto/x509v3/v3_enum.c +++ b/crypto/x509v3/v3_enum.c @@ -38,7 +38,7 @@ const X509V3_EXT_METHOD v3_crl_reason = { crl_reasons }; -char *i2s_ASN1_ENUMERATED_TABLE(X509V3_EXT_METHOD *method, +char *i2s_ASN1_ENUMERATED_TABLE(X509V3_EXT_METHOD *method, const ASN1_ENUMERATED *e) { ENUMERATED_NAMES *enam; diff --git a/crypto/x509v3/v3_skey.c b/crypto/x509v3/v3_skey.c index 39597dc41d..749f51b2f0 100644 --- a/crypto/x509v3/v3_skey.c +++ b/crypto/x509v3/v3_skey.c @@ -24,7 +24,7 @@ const X509V3_EXT_METHOD v3_skey_id = { NULL }; -char *i2s_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method, +char *i2s_ASN1_OCTET_STRING(X509V3_EXT_METHOD *method, const ASN1_OCTET_STRING *oct) { return OPENSSL_buf2hexstr(oct->data, oct->length); diff --git a/crypto/x86cpuid.pl b/crypto/x86cpuid.pl index c45b18356b..176e8e38c8 100644 --- a/crypto/x86cpuid.pl +++ b/crypto/x86cpuid.pl @@ -89,7 +89,7 @@ for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); } &ja (&label("generic")); &and ("edx",0xefffffff); # clear hyper-threading bit &jmp (&label("generic")); - + &set_label("intel"); &cmp ("edi",7); &jb (&label("cacheinfo")); diff --git a/engines/asm/e_padlock-x86_64.pl b/engines/asm/e_padlock-x86_64.pl index da285abc61..834b1ea79c 100644 --- a/engines/asm/e_padlock-x86_64.pl +++ b/engines/asm/e_padlock-x86_64.pl @@ -535,7 +535,7 @@ $code.=<<___ if ($PADLOCK_PREFETCH{$mode}); sub $len,%rsp shr \$3,$len lea (%rsp),$out - .byte 0xf3,0x48,0xa5 # rep movsq + .byte 0xf3,0x48,0xa5 # rep movsq lea (%r8),$out lea (%rsp),$inp mov $chunk,$len diff --git a/include/openssl/x509.h b/include/openssl/x509.h index c8996f3520..038cef9534 100644 --- a/include/openssl/x509.h +++ b/include/openssl/x509.h @@ -805,7 +805,7 @@ X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_txt(X509_NAME_ENTRY **ne, const unsigned char *bytes, int len); X509_NAME_ENTRY *X509_NAME_ENTRY_create_by_NID(X509_NAME_ENTRY **ne, int nid, - int type, + int type, const unsigned char *bytes, int len); int X509_NAME_add_entry_by_txt(X509_NAME *name, const char *field, int type, diff --git a/ssl/packet.c b/ssl/packet.c index 2a8fe2541c..27462e947e 100644 --- a/ssl/packet.c +++ b/ssl/packet.c @@ -178,7 +178,7 @@ static int wpacket_intern_close(WPACKET *pkt) } /* Write out the WPACKET length if needed */ - if (sub->lenbytes > 0 + if (sub->lenbytes > 0 && !put_value((unsigned char *)&pkt->buf->data[sub->packet_len], packlen, sub->lenbytes)) return 0; diff --git a/ssl/packet_locl.h b/ssl/packet_locl.h index 55e41bba15..cee14002bf 100644 --- a/ssl/packet_locl.h +++ b/ssl/packet_locl.h @@ -707,7 +707,7 @@ int WPACKET_sub_allocate_bytes__(WPACKET *pkt, size_t len, * maximum size will be. If this function is used, then it should be immediately * followed by a WPACKET_allocate_bytes() call before any other WPACKET * functions are called (unless the write to the allocated bytes is abandoned). - * + * * For example: If we are generating a signature, then the size of that * signature may not be known in advance. We can use WPACKET_reserve_bytes() to * handle this: diff --git a/test/pkits-test.pl b/test/pkits-test.pl index ae7279cf2e..41444f191b 100644 --- a/test/pkits-test.pl +++ b/test/pkits-test.pl @@ -6,7 +6,7 @@ # in the file LICENSE in the source distribution or at # https://www.openssl.org/source/license.html -# Perl utility to run PKITS tests for RFC3280 compliance. +# Perl utility to run PKITS tests for RFC3280 compliance. my $ossl_path; diff --git a/test/recipes/tconversion.pl b/test/recipes/tconversion.pl index e5fa9dec87..1655cd483f 100644 --- a/test/recipes/tconversion.pl +++ b/test/recipes/tconversion.pl @@ -23,7 +23,7 @@ my %conversionforms = ( sub tconversion { my $testtype = shift; my $t = shift; - my @conversionforms = + my @conversionforms = defined($conversionforms{$testtype}) ? @{$conversionforms{$testtype}} : @{$conversionforms{"*"}}; diff --git a/test/wpackettest.c b/test/wpackettest.c index 2cb9e7201f..aabf781952 100644 --- a/test/wpackettest.c +++ b/test/wpackettest.c @@ -115,7 +115,7 @@ static int test_WPACKET_set_max_size(void) || !WPACKET_set_max_size(&pkt, SIZE_MAX) || !WPACKET_finish(&pkt)) { testfail("test_WPACKET_set_max_size():1 failed\n", &pkt); - return 0; + return 0; } if (!WPACKET_init_len(&pkt, buf, 1) diff --git a/util/ck_errf.pl b/util/ck_errf.pl index 7fc536786e..01ed905fe5 100755 --- a/util/ck_errf.pl +++ b/util/ck_errf.pl @@ -8,7 +8,7 @@ # This is just a quick script to scan for cases where the 'error' # function name in a XXXerr() macro is wrong. -# +# # Run in the top level by going # perl util/ck_errf.pl */*.c */*/*.c # diff --git a/util/copy.pl b/util/copy.pl index ef4d8708e2..c4aeea6a18 100644 --- a/util/copy.pl +++ b/util/copy.pl @@ -40,7 +40,7 @@ if ($fnum <= 1) } $dest = pop @filelist; - + if ($fnum > 2 && ! -d $dest) { die "Destination must be a directory"; @@ -73,5 +73,5 @@ foreach (@filelist) close(OUT); print "Copying: $_ to $dfile\n"; } - + diff --git a/util/fipslink.pl b/util/fipslink.pl index 18a91532be..bb685bf7bd 100644 --- a/util/fipslink.pl +++ b/util/fipslink.pl @@ -109,7 +109,7 @@ sub check_hash $hashval =~ s/^.*=\s+//; die "Invalid hash syntax in file" if (length($hashfile) != 40); die "Invalid hash received for file" if (length($hashval) != 40); - die "***HASH VALUE MISMATCH FOR FILE $filename ***" if ($hashval ne $hashfile); + die "***HASH VALUE MISMATCH FOR FILE $filename ***" if ($hashval ne $hashfile); } diff --git a/util/mkdef.pl b/util/mkdef.pl index b54c925c4c..764029334f 100755 --- a/util/mkdef.pl +++ b/util/mkdef.pl @@ -24,7 +24,7 @@ # existence:platform:kind:algorithms # # - "existence" can be "EXIST" or "NOEXIST" depending on if the symbol is -# found somewhere in the source, +# found somewhere in the source, # - "platforms" is empty if it exists on all platforms, otherwise it contains # comma-separated list of the platform, just as they are if the symbol exists # for those platforms, or prepended with a "!" if not. This helps resolve @@ -172,7 +172,7 @@ foreach (@ARGV, split(/ /, $config{options})) $do_ssl=1 if $_ eq "libssl"; if ($_ eq "ssl") { - $do_ssl=1; + $do_ssl=1; $libname=$_ } $do_crypto=1 if $_ eq "libcrypto"; @@ -211,7 +211,7 @@ foreach (@ARGV, split(/ /, $config{options})) } -if (!$libname) { +if (!$libname) { if ($do_ssl) { $libname="LIBSSL"; } @@ -339,7 +339,7 @@ if($do_crypto == 1) { } &update_numbers(*OUT,"LIBCRYPTO",*crypto_list,$max_crypto,@crypto_symbols); close OUT; -} +} } elsif ($do_checkexist) { &check_existing(*ssl_list, @ssl_symbols) diff --git a/util/mkerr.pl b/util/mkerr.pl index 79c8cfc31c..4645658e99 100644 --- a/util/mkerr.pl +++ b/util/mkerr.pl @@ -97,7 +97,7 @@ Options: Default: keep previously assigned numbers. (You are warned when collisions are detected.) - -nostatic Generates a different source code, where these additional + -nostatic Generates a different source code, where these additional functions are generated for each library specified in the config file: void ERR_load__strings(void); @@ -105,7 +105,7 @@ Options: void ERR__error(int f, int r, char *fn, int ln); #define err(f,r) ERR__error(f,r,OPENSSL_FILE,OPENSSL_LINE) while the code facilitates the use of these in an environment - where the error support routines are dynamically loaded at + where the error support routines are dynamically loaded at runtime. Default: 'static' code generation. @@ -114,8 +114,8 @@ Options: -unref Print out unreferenced function and reason codes. - -write Actually (over)write the generated code to the header and C - source files as assigned to each library through the config + -write Actually (over)write the generated code to the header and C + source files as assigned to each library through the config file. Default: don't write. @@ -196,7 +196,7 @@ while (($hdr, $lib) = each %libinc) if(/\/\*/) { if (not /\*\//) { # multiline comment... $line = $_; # ... just accumulate - next; + next; } else { s/\/\*.*?\*\///gs; # wipe it } @@ -370,7 +370,7 @@ foreach $file (@source) { print STDERR "ERROR: mismatch $file:$linenr $func:$3\n"; $errcount++; } - print STDERR "Function: $1\t= $fcodes{$1} (lib: $2, name: $3)\n" if $debug; + print STDERR "Function: $1\t= $fcodes{$1} (lib: $2, name: $3)\n" if $debug; } if(/(([A-Z0-9]+)_R_[A-Z0-9_]+)/) { next unless exists $csrc{$2}; @@ -379,8 +379,8 @@ foreach $file (@source) { $rcodes{$1} = "X"; $rnew{$2}++; } - print STDERR "Reason: $1\t= $rcodes{$1} (lib: $2)\n" if $debug; - } + print STDERR "Reason: $1\t= $rcodes{$1} (lib: $2)\n" if $debug; + } } close IN; } diff --git a/util/su-filter.pl b/util/su-filter.pl index 5996f58225..389c7c35c5 100644 --- a/util/su-filter.pl +++ b/util/su-filter.pl @@ -108,7 +108,7 @@ sub structureData { if($inbrace) { if($item eq "}") { $inbrace --; - + if(!$inbrace) { $substruc = structureData($dataitem); $dataitem = $substruc; -- 2.25.1