3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
9 # sha256/512_block procedure for x86_64.
11 # 40% improvement over compiler-generated code on Opteron. On EM64T
12 # sha256 was observed to run >80% faster and sha512 - >40%. No magical
13 # tricks, just straight implementation... I really wonder why gcc
14 # [being armed with inline assembler] fails to generate as fast code.
15 # The only thing which is cool about this module is that it's very
16 # same instruction sequence used for both SHA-256 and SHA-512. In
17 # former case the instructions operate on 32-bit operands, while in
18 # latter - on 64-bit ones. All I had to do is to get one flavor right,
19 # the other one passed the test right away:-)
21 # sha256_block runs in ~1005 cycles on Opteron, which gives you
22 # asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
23 # frequency in GHz. sha512_block runs in ~1275 cycles, which results
24 # in 128*1000/1275=100MBps per GHz. Is there room for improvement?
25 # Well, if you compare it to IA-64 implementation, which maintains
26 # X[16] in register bank[!], tends to 4 instructions per CPU clock
27 # cycle and runs in 1003 cycles, 1275 is very good result for 3-way
28 # issue Opteron pipeline and X[16] maintained in memory. So that *if*
29 # there is a way to improve it, *then* the only way would be to try to
30 # offload X[16] updates to SSE unit, but that would require "deeper"
31 # loop unroll, which in turn would naturally cause size blow-up, not
32 # to mention increased complexity! And once again, only *if* it's
33 # actually possible to noticeably improve overall ILP, instruction
34 # level parallelism, on a given CPU implementation in this case.
36 # Special note on Intel EM64T. While Opteron CPU exhibits perfect
37 # perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
38 # [currently available] EM64T CPUs apparently are far from it. On the
39 # contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
40 # sha256_block:-( This is presumably because 64-bit shifts/rotates
41 # apparently are not atomic instructions, but implemented in microcode.
45 # Optimization including one of Pavel Semjanov's ideas, alternative
46 # Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
47 # unfortunately -2% SHA512 on P4 [which nobody should care about
52 # Add SIMD code paths, see below for improvement coefficients. SSSE3
53 # code path was not attempted for SHA512, because improvement is not
54 # estimated to be high enough, noticeably less than 9%, to justify
55 # the effort, not on pre-AVX processors. [Obviously with exclusion
56 # for VIA Nano, but it has SHA512 instruction that is faster and
57 # should be used instead.] For reference, corresponding estimated
58 # upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
59 # higher coefficients are observed on VIA Nano and Bulldozer has more
60 # to do with specifics of their architecture [which is topic for
61 # separate discussion].
65 # Add AVX2 code path. Two consecutive input blocks are loaded to
66 # 256-bit %ymm registers, with data from first block to least
67 # significant 128-bit halves and data from second to most significant.
68 # The data is then processed with same SIMD instruction sequence as
69 # for AVX, but with %ymm as operands. Side effect is increased stack
70 # frame, 448 additional bytes in SHA256 and 1152 in SHA512, and 1.2KB
75 # Add support for Intel SHA Extensions.
77 ######################################################################
78 # Current performance in cycles per processed byte (less is better):
80 # SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*)
82 # AMD K8 14.9 - - 9.57 -
84 # Core 2 15.6 13.8(+13%) - 9.97 -
85 # Westmere 14.8 12.3(+19%) - 9.58 -
86 # Sandy Bridge 17.4 14.2(+23%) 11.6(+50%(**)) 11.2 8.10(+38%(**))
87 # Ivy Bridge 12.6 10.5(+20%) 10.3(+22%) 8.17 7.22(+13%)
88 # Haswell 12.2 9.28(+31%) 7.80(+56%) 7.66 5.40(+42%)
89 # Skylake 11.4 9.03(+26%) 7.70(+48%) 7.25 5.20(+40%)
90 # Bulldozer 21.1 13.6(+54%) 13.6(+54%(***)) 13.5 8.58(+57%)
91 # VIA Nano 23.0 16.5(+39%) - 14.7 -
92 # Atom 23.0 18.9(+22%) - 14.7 -
93 # Silvermont 27.4 20.6(+33%) - 17.5 -
95 # (*) whichever best applicable;
96 # (**) switch from ror to shrd stands for fair share of improvement;
97 # (***) execution time is fully determined by remaining integer-only
98 # part, body_00_15; reducing the amount of SIMD instructions
99 # below certain limit makes no difference/sense; to conserve
100 # space SHA256 XOP code path is therefore omitted;
104 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
106 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
108 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
109 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
110 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
111 die "can't locate x86_64-xlate.pl";
113 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
114 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
115 $avx = ($1>=2.19) + ($1>=2.22);
118 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
119 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
120 $avx = ($1>=2.09) + ($1>=2.10);
123 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
124 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
125 $avx = ($1>=10) + ($1>=11);
128 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
129 $avx = ($2>=3.0) + ($2>3.0);
132 $shaext=1; ### set to zero if compiling for 1.0.1
133 $avx=1 if (!$shaext && $avx);
135 open OUT,"| \"$^X\" $xlate $flavour $output";
138 if ($output =~ /512/) {
139 $func="sha512_block_data_order";
142 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
143 "%r8", "%r9", "%r10","%r11");
144 ($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
151 $func="sha256_block_data_order";
154 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
155 "%r8d","%r9d","%r10d","%r11d");
156 ($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
164 $ctx="%rdi"; # 1st arg, zapped by $a3
165 $inp="%rsi"; # 2nd arg
168 $_ctx="16*$SZ+0*8(%rsp)";
169 $_inp="16*$SZ+1*8(%rsp)";
170 $_end="16*$SZ+2*8(%rsp)";
171 $_rsp="16*$SZ+3*8(%rsp)";
172 $framesz="16*$SZ+4*8";
176 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
178 $STRIDE += 16 if ($i%(16/$SZ)==(16/$SZ-1));
181 ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
185 ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
188 mov $T1,`$SZ*($i&0xf)`(%rsp)
192 ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
194 xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
196 ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
198 add $a2,$T1 # T1+=Ch(e,f,g)
201 add ($Tbl),$T1 # T1+=K[round]
204 xor $b,$a2 # a^b, b^c in next round
205 ror \$$Sigma1[0],$a0 # Sigma1(e)
209 ror \$$Sigma0[0],$a1 # Sigma0(a)
210 add $a0,$T1 # T1+=Sigma1(e)
212 xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
216 lea $STRIDE($Tbl),$Tbl # round++
218 $code.=<<___ if ($i<15);
219 add $a1,$h # h+=Sigma0(a)
221 ($a2,$a3) = ($a3,$a2);
225 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
228 mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
229 mov `$SZ*(($i+14)&0xf)`(%rsp),$a2
232 ror \$`$sigma0[1]-$sigma0[0]`,$a0
233 add $a1,$a # modulo-scheduled h+=Sigma0(a)
235 ror \$`$sigma1[1]-$sigma1[0]`,$a2
244 xor $a0,$T1 # sigma0(X[(i+1)&0xf])
245 xor $a1,$a2 # sigma1(X[(i+14)&0xf])
246 add `$SZ*(($i+9)&0xf)`(%rsp),$T1
248 add `$SZ*($i&0xf)`(%rsp),$T1
259 .extern OPENSSL_ia32cap_P
261 .type $func,\@function,3
265 $code.=<<___ if ($SZ==4 || $avx);
266 lea OPENSSL_ia32cap_P(%rip),%r11
271 $code.=<<___ if ($SZ==4 && $shaext);
272 test \$`1<<29`,%r11d # check for SHA
275 $code.=<<___ if ($avx && $SZ==8);
276 test \$`1<<11`,%r10d # check for XOP
279 $code.=<<___ if ($avx>1);
280 and \$`1<<8|1<<5|1<<3`,%r11d # check for BMI2+AVX2+BMI1
281 cmp \$`1<<8|1<<5|1<<3`,%r11d
284 $code.=<<___ if ($avx);
285 and \$`1<<30`,%r9d # mask "Intel CPU" bit
286 and \$`1<<28|1<<9`,%r10d # mask AVX and SSSE3 bits
288 cmp \$`1<<28|1<<9|1<<30`,%r10d
291 $code.=<<___ if ($SZ==4);
302 mov %rsp,%r11 # copy %rsp
303 shl \$4,%rdx # num*16
305 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
306 and \$-64,%rsp # align stack frame
307 mov $ctx,$_ctx # save ctx, 1st arg
308 mov $inp,$_inp # save inp, 2nd arh
309 mov %rdx,$_end # save end pointer, "3rd" arg
310 mov %r11,$_rsp # save copy of %rsp
326 lea $TABLE(%rip),$Tbl
329 for($i=0;$i<16;$i++) {
330 $code.=" mov $SZ*$i($inp),$T1\n";
331 $code.=" mov @ROT[4],$a0\n";
332 $code.=" mov @ROT[0],$a1\n";
333 $code.=" bswap $T1\n";
334 &ROUND_00_15($i,@ROT);
335 unshift(@ROT,pop(@ROT));
343 &ROUND_16_XX($i,@ROT);
344 unshift(@ROT,pop(@ROT));
348 cmpb \$0,`$SZ-1`($Tbl)
352 add $a1,$A # modulo-scheduled h+=Sigma0(a)
353 lea 16*$SZ($inp),$inp
392 .type $TABLE,\@object
394 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
395 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
396 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
397 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
398 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
399 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
400 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
401 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
402 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
403 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
404 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
405 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
406 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
407 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
408 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
409 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
410 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
411 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
412 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
413 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
414 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
415 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
416 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
417 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
418 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
419 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
420 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
421 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
422 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
423 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
424 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
425 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
427 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
428 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
429 .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
430 .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
431 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
432 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
433 .asciz "SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
438 .type $TABLE,\@object
440 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
441 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
442 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
443 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
444 .quad 0x3956c25bf348b538,0x59f111f1b605d019
445 .quad 0x3956c25bf348b538,0x59f111f1b605d019
446 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
447 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
448 .quad 0xd807aa98a3030242,0x12835b0145706fbe
449 .quad 0xd807aa98a3030242,0x12835b0145706fbe
450 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
451 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
452 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
453 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
454 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
455 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
456 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
457 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
458 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
459 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
460 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
461 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
462 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
463 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
464 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
465 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
466 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
467 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
468 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
469 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
470 .quad 0x06ca6351e003826f,0x142929670a0e6e70
471 .quad 0x06ca6351e003826f,0x142929670a0e6e70
472 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
473 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
474 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
475 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
476 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
477 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
478 .quad 0x81c2c92e47edaee6,0x92722c851482353b
479 .quad 0x81c2c92e47edaee6,0x92722c851482353b
480 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
481 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
482 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
483 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
484 .quad 0xd192e819d6ef5218,0xd69906245565a910
485 .quad 0xd192e819d6ef5218,0xd69906245565a910
486 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
487 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
488 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
489 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
490 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
491 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
492 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
493 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
494 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
495 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
496 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
497 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
498 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
499 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
500 .quad 0x90befffa23631e28,0xa4506cebde82bde9
501 .quad 0x90befffa23631e28,0xa4506cebde82bde9
502 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
503 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
504 .quad 0xca273eceea26619c,0xd186b8c721c0c207
505 .quad 0xca273eceea26619c,0xd186b8c721c0c207
506 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
507 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
508 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
509 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
510 .quad 0x113f9804bef90dae,0x1b710b35131c471b
511 .quad 0x113f9804bef90dae,0x1b710b35131c471b
512 .quad 0x28db77f523047d84,0x32caab7b40c72493
513 .quad 0x28db77f523047d84,0x32caab7b40c72493
514 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
515 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
516 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
517 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
518 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
519 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
521 .quad 0x0001020304050607,0x08090a0b0c0d0e0f
522 .quad 0x0001020304050607,0x08090a0b0c0d0e0f
523 .asciz "SHA512 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
527 ######################################################################
530 if ($SZ==4 && $shaext) {{{
531 ######################################################################
532 # Intel SHA Extensions implementation of SHA256 update function.
534 my ($ctx,$inp,$num,$Tbl)=("%rdi","%rsi","%rdx","%rcx");
536 my ($Wi,$ABEF,$CDGH,$TMP,$BSWAP,$ABEF_SAVE,$CDGH_SAVE)=map("%xmm$_",(0..2,7..10));
537 my @MSG=map("%xmm$_",(3..6));
540 .type sha256_block_data_order_shaext,\@function,3
542 sha256_block_data_order_shaext:
545 $code.=<<___ if ($win64);
546 lea `-8-5*16`(%rsp),%rsp
547 movaps %xmm6,-8-5*16(%rax)
548 movaps %xmm7,-8-4*16(%rax)
549 movaps %xmm8,-8-3*16(%rax)
550 movaps %xmm9,-8-2*16(%rax)
551 movaps %xmm10,-8-1*16(%rax)
555 lea K256+0x80(%rip),$Tbl
556 movdqu ($ctx),$ABEF # DCBA
557 movdqu 16($ctx),$CDGH # HGFE
558 movdqa 0x200-0x80($Tbl),$TMP # byte swap mask
560 pshufd \$0x1b,$ABEF,$Wi # ABCD
561 pshufd \$0xb1,$ABEF,$ABEF # CDAB
562 pshufd \$0x1b,$CDGH,$CDGH # EFGH
563 movdqa $TMP,$BSWAP # offload
564 palignr \$8,$CDGH,$ABEF # ABEF
565 punpcklqdq $Wi,$CDGH # CDGH
570 movdqu ($inp),@MSG[0]
571 movdqu 0x10($inp),@MSG[1]
572 movdqu 0x20($inp),@MSG[2]
574 movdqu 0x30($inp),@MSG[3]
576 movdqa 0*32-0x80($Tbl),$Wi
579 movdqa $CDGH,$CDGH_SAVE # offload
580 sha256rnds2 $ABEF,$CDGH # 0-3
581 pshufd \$0x0e,$Wi,$Wi
583 movdqa $ABEF,$ABEF_SAVE # offload
584 sha256rnds2 $CDGH,$ABEF
586 movdqa 1*32-0x80($Tbl),$Wi
589 sha256rnds2 $ABEF,$CDGH # 4-7
590 pshufd \$0x0e,$Wi,$Wi
592 sha256msg1 @MSG[1],@MSG[0]
593 sha256rnds2 $CDGH,$ABEF
595 movdqa 2*32-0x80($Tbl),$Wi
598 sha256rnds2 $ABEF,$CDGH # 8-11
599 pshufd \$0x0e,$Wi,$Wi
601 palignr \$4,@MSG[2],$TMP
604 sha256msg1 @MSG[2],@MSG[1]
605 sha256rnds2 $CDGH,$ABEF
607 movdqa 3*32-0x80($Tbl),$Wi
609 sha256msg2 @MSG[3],@MSG[0]
610 sha256rnds2 $ABEF,$CDGH # 12-15
611 pshufd \$0x0e,$Wi,$Wi
613 palignr \$4,@MSG[3],$TMP
616 sha256msg1 @MSG[3],@MSG[2]
617 sha256rnds2 $CDGH,$ABEF
619 for($i=4;$i<16-3;$i++) {
621 movdqa $i*32-0x80($Tbl),$Wi
623 sha256msg2 @MSG[0],@MSG[1]
624 sha256rnds2 $ABEF,$CDGH # 16-19...
625 pshufd \$0x0e,$Wi,$Wi
627 palignr \$4,@MSG[0],$TMP
630 sha256msg1 @MSG[0],@MSG[3]
631 sha256rnds2 $CDGH,$ABEF
633 push(@MSG,shift(@MSG));
636 movdqa 13*32-0x80($Tbl),$Wi
638 sha256msg2 @MSG[0],@MSG[1]
639 sha256rnds2 $ABEF,$CDGH # 52-55
640 pshufd \$0x0e,$Wi,$Wi
642 palignr \$4,@MSG[0],$TMP
643 sha256rnds2 $CDGH,$ABEF
646 movdqa 14*32-0x80($Tbl),$Wi
648 sha256rnds2 $ABEF,$CDGH # 56-59
649 pshufd \$0x0e,$Wi,$Wi
650 sha256msg2 @MSG[1],@MSG[2]
652 sha256rnds2 $CDGH,$ABEF
654 movdqa 15*32-0x80($Tbl),$Wi
657 sha256rnds2 $ABEF,$CDGH # 60-63
658 pshufd \$0x0e,$Wi,$Wi
661 sha256rnds2 $CDGH,$ABEF
663 paddd $CDGH_SAVE,$CDGH
664 paddd $ABEF_SAVE,$ABEF
667 pshufd \$0xb1,$CDGH,$CDGH # DCHG
668 pshufd \$0x1b,$ABEF,$TMP # FEBA
669 pshufd \$0xb1,$ABEF,$ABEF # BAFE
670 punpckhqdq $CDGH,$ABEF # DCBA
671 palignr \$8,$TMP,$CDGH # HGFE
674 movdqu $CDGH,16($ctx)
676 $code.=<<___ if ($win64);
677 movaps -8-5*16(%rax),%xmm6
678 movaps -8-4*16(%rax),%xmm7
679 movaps -8-3*16(%rax),%xmm8
680 movaps -8-2*16(%rax),%xmm9
681 movaps -8-1*16(%rax),%xmm10
687 .size sha256_block_data_order_shaext,.-sha256_block_data_order_shaext
693 my ($a,$b,$c,$d,$e,$f,$g,$h);
695 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
696 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
698 $arg = "\$$arg" if ($arg*1 eq $arg);
699 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
704 '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
706 '&ror ($a0,$Sigma1[2]-$Sigma1[1])',
710 '&ror ($a1,$Sigma0[2]-$Sigma0[1])',
712 '&xor ($a4,$g)', # f^g
714 '&ror ($a0,$Sigma1[1]-$Sigma1[0])',
716 '&and ($a4,$e)', # (f^g)&e
719 '&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i]
722 '&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g
723 '&ror ($a1,$Sigma0[1]-$Sigma0[0])',
724 '&xor ($a2,$b)', # a^b, b^c in next round
726 '&add ($h,$a4)', # h+=Ch(e,f,g)
727 '&ror ($a0,$Sigma1[0])', # Sigma1(e)
728 '&and ($a3,$a2)', # (b^c)&(a^b)
731 '&add ($h,$a0)', # h+=Sigma1(e)
732 '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
734 '&ror ($a1,$Sigma0[0])', # Sigma0(a)
735 '&add ($d,$h)', # d+=h
736 '&add ($h,$a3)', # h+=Maj(a,b,c)
739 '&add ($a1,$h);'. # h+=Sigma0(a)
740 '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
744 ######################################################################
747 if ($SZ==4) { # SHA256 only
748 my @X = map("%xmm$_",(0..3));
749 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
752 .type ${func}_ssse3,\@function,3
762 mov %rsp,%r11 # copy %rsp
763 shl \$4,%rdx # num*16
764 sub \$`$framesz+$win64*16*4`,%rsp
765 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
766 and \$-64,%rsp # align stack frame
767 mov $ctx,$_ctx # save ctx, 1st arg
768 mov $inp,$_inp # save inp, 2nd arh
769 mov %rdx,$_end # save end pointer, "3rd" arg
770 mov %r11,$_rsp # save copy of %rsp
772 $code.=<<___ if ($win64);
773 movaps %xmm6,16*$SZ+32(%rsp)
774 movaps %xmm7,16*$SZ+48(%rsp)
775 movaps %xmm8,16*$SZ+64(%rsp)
776 movaps %xmm9,16*$SZ+80(%rsp)
792 #movdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
793 #movdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
797 movdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
798 movdqu 0x00($inp),@X[0]
799 movdqu 0x10($inp),@X[1]
800 movdqu 0x20($inp),@X[2]
802 movdqu 0x30($inp),@X[3]
803 lea $TABLE(%rip),$Tbl
805 movdqa 0x00($Tbl),$t0
806 movdqa 0x20($Tbl),$t1
809 movdqa 0x40($Tbl),$t2
811 movdqa 0x60($Tbl),$t3
815 movdqa $t0,0x00(%rsp)
817 movdqa $t1,0x10(%rsp)
819 movdqa $t2,0x20(%rsp)
821 movdqa $t3,0x30(%rsp)
827 sub \$`-16*2*$SZ`,$Tbl # size optimization
829 sub Xupdate_256_SSSE3 () {
831 '&movdqa ($t0,@X[1]);',
832 '&movdqa ($t3,@X[3])',
833 '&palignr ($t0,@X[0],$SZ)', # X[1..4]
834 '&palignr ($t3,@X[2],$SZ);', # X[9..12]
836 '&movdqa ($t2,$t0);',
837 '&psrld ($t0,$sigma0[2])',
838 '&paddd (@X[0],$t3);', # X[0..3] += X[9..12]
839 '&psrld ($t2,$sigma0[0])',
840 '&pshufd ($t3,@X[3],0b11111010)',# X[14..15]
841 '&pslld ($t1,8*$SZ-$sigma0[1]);'.
843 '&psrld ($t2,$sigma0[1]-$sigma0[0]);'.
845 '&pslld ($t1,$sigma0[1]-$sigma0[0]);'.
848 '&pxor ($t0,$t1);', # sigma0(X[1..4])
849 '&psrld ($t3,$sigma1[2])',
850 '&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
851 '&psrlq ($t2,$sigma1[0])',
853 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
855 '&pshufb ($t3,$t4)', # sigma1(X[14..15])
856 '&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
857 '&pshufd ($t3,@X[0],0b01010000)',# X[16..17]
858 '&movdqa ($t2,$t3);',
859 '&psrld ($t3,$sigma1[2])',
860 '&psrlq ($t2,$sigma1[0])',
862 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
864 '&movdqa ($t2,16*2*$j."($Tbl)")',
866 '&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17])
870 sub SSSE3_256_00_47 () {
874 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
877 foreach (Xupdate_256_SSSE3()) { # 36 instructions
883 } else { # squeeze extra 4% on Westmere and 19% on Atom
884 eval(shift(@insns)); #@
889 eval(shift(@insns)); #@
892 eval(shift(@insns)); #@
894 &palignr ($t0,@X[0],$SZ); # X[1..4]
897 &palignr ($t3,@X[2],$SZ); # X[9..12]
901 eval(shift(@insns)); #@
906 eval(shift(@insns)); #@
908 &psrld ($t0,$sigma0[2]);
912 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
913 eval(shift(@insns)); #@
915 &psrld ($t2,$sigma0[0]);
918 &pshufd ($t3,@X[3],0b11111010); # X[4..15]
920 eval(shift(@insns)); #@
921 &pslld ($t1,8*$SZ-$sigma0[1]);
925 eval(shift(@insns)); #@
928 eval(shift(@insns)); #@
929 &psrld ($t2,$sigma0[1]-$sigma0[0]);
934 &pslld ($t1,$sigma0[1]-$sigma0[0]);
939 eval(shift(@insns)); #@
943 &pxor ($t0,$t1); # sigma0(X[1..4])
944 eval(shift(@insns)); #@
947 &psrld ($t3,$sigma1[2]);
950 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
951 eval(shift(@insns)); #@
953 &psrlq ($t2,$sigma1[0]);
958 eval(shift(@insns)); #@
961 eval(shift(@insns)); #@
962 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
966 eval(shift(@insns)); #@
969 #&pshufb ($t3,$t4); # sigma1(X[14..15])
970 &pshufd ($t3,$t3,0b10000000);
976 eval(shift(@insns)); #@
979 eval(shift(@insns)); #@
980 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
984 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
986 eval(shift(@insns)); #@
991 &psrld ($t3,$sigma1[2]);
993 eval(shift(@insns)); #@
994 &psrlq ($t2,$sigma1[0]);
998 eval(shift(@insns)); #@
1000 eval(shift(@insns));
1001 eval(shift(@insns)); #@
1002 eval(shift(@insns));
1003 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
1004 eval(shift(@insns));
1005 eval(shift(@insns));
1006 eval(shift(@insns));
1008 eval(shift(@insns));
1009 eval(shift(@insns));
1010 eval(shift(@insns)); #@
1012 &pshufd ($t3,$t3,0b00001000);
1013 eval(shift(@insns));
1014 eval(shift(@insns));
1015 &movdqa ($t2,16*2*$j."($Tbl)");
1016 eval(shift(@insns)); #@
1017 eval(shift(@insns));
1019 eval(shift(@insns));
1020 eval(shift(@insns));
1021 eval(shift(@insns));
1022 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
1023 eval(shift(@insns)); #@
1024 eval(shift(@insns));
1025 eval(shift(@insns));
1028 foreach (@insns) { eval; } # remaining instructions
1029 &movdqa (16*$j."(%rsp)",$t2);
1032 for ($i=0,$j=0; $j<4; $j++) {
1033 &SSSE3_256_00_47($j,\&body_00_15,@X);
1034 push(@X,shift(@X)); # rotate(@X)
1036 &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
1037 &jne (".Lssse3_00_47");
1039 for ($i=0; $i<16; ) {
1040 foreach(body_00_15()) { eval; }
1047 lea 16*$SZ($inp),$inp
1070 $code.=<<___ if ($win64);
1071 movaps 16*$SZ+32(%rsp),%xmm6
1072 movaps 16*$SZ+48(%rsp),%xmm7
1073 movaps 16*$SZ+64(%rsp),%xmm8
1074 movaps 16*$SZ+80(%rsp),%xmm9
1086 .size ${func}_ssse3,.-${func}_ssse3
1091 ######################################################################
1094 if ($SZ==8) { # SHA512 only
1096 .type ${func}_xop,\@function,3
1106 mov %rsp,%r11 # copy %rsp
1107 shl \$4,%rdx # num*16
1108 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
1109 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1110 and \$-64,%rsp # align stack frame
1111 mov $ctx,$_ctx # save ctx, 1st arg
1112 mov $inp,$_inp # save inp, 2nd arh
1113 mov %rdx,$_end # save end pointer, "3rd" arg
1114 mov %r11,$_rsp # save copy of %rsp
1116 $code.=<<___ if ($win64);
1117 movaps %xmm6,16*$SZ+32(%rsp)
1118 movaps %xmm7,16*$SZ+48(%rsp)
1119 movaps %xmm8,16*$SZ+64(%rsp)
1120 movaps %xmm9,16*$SZ+80(%rsp)
1122 $code.=<<___ if ($win64 && $SZ>4);
1123 movaps %xmm10,16*$SZ+96(%rsp)
1124 movaps %xmm11,16*$SZ+112(%rsp)
1140 if ($SZ==4) { # SHA256
1141 my @X = map("%xmm$_",(0..3));
1142 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
1147 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1148 vmovdqu 0x00($inp),@X[0]
1149 vmovdqu 0x10($inp),@X[1]
1150 vmovdqu 0x20($inp),@X[2]
1151 vmovdqu 0x30($inp),@X[3]
1152 vpshufb $t3,@X[0],@X[0]
1153 lea $TABLE(%rip),$Tbl
1154 vpshufb $t3,@X[1],@X[1]
1155 vpshufb $t3,@X[2],@X[2]
1156 vpaddd 0x00($Tbl),@X[0],$t0
1157 vpshufb $t3,@X[3],@X[3]
1158 vpaddd 0x20($Tbl),@X[1],$t1
1159 vpaddd 0x40($Tbl),@X[2],$t2
1160 vpaddd 0x60($Tbl),@X[3],$t3
1161 vmovdqa $t0,0x00(%rsp)
1163 vmovdqa $t1,0x10(%rsp)
1165 vmovdqa $t2,0x20(%rsp)
1167 vmovdqa $t3,0x30(%rsp)
1173 sub \$`-16*2*$SZ`,$Tbl # size optimization
1175 sub XOP_256_00_47 () {
1179 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
1181 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
1182 eval(shift(@insns));
1183 eval(shift(@insns));
1184 &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
1185 eval(shift(@insns));
1186 eval(shift(@insns));
1187 &vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
1188 eval(shift(@insns));
1189 eval(shift(@insns));
1190 &vpsrld ($t0,$t0,$sigma0[2]);
1191 eval(shift(@insns));
1192 eval(shift(@insns));
1193 &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
1194 eval(shift(@insns));
1195 eval(shift(@insns));
1196 eval(shift(@insns));
1197 eval(shift(@insns));
1198 &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
1199 eval(shift(@insns));
1200 eval(shift(@insns));
1201 &vpxor ($t0,$t0,$t1);
1202 eval(shift(@insns));
1203 eval(shift(@insns));
1204 eval(shift(@insns));
1205 eval(shift(@insns));
1206 &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
1207 eval(shift(@insns));
1208 eval(shift(@insns));
1209 &vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
1210 eval(shift(@insns));
1211 eval(shift(@insns));
1212 &vpsrld ($t2,@X[3],$sigma1[2]);
1213 eval(shift(@insns));
1214 eval(shift(@insns));
1215 &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
1216 eval(shift(@insns));
1217 eval(shift(@insns));
1218 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
1219 eval(shift(@insns));
1220 eval(shift(@insns));
1221 &vpxor ($t3,$t3,$t2);
1222 eval(shift(@insns));
1223 eval(shift(@insns));
1224 eval(shift(@insns));
1225 eval(shift(@insns));
1226 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
1227 eval(shift(@insns));
1228 eval(shift(@insns));
1229 eval(shift(@insns));
1230 eval(shift(@insns));
1231 &vpsrldq ($t3,$t3,8);
1232 eval(shift(@insns));
1233 eval(shift(@insns));
1234 eval(shift(@insns));
1235 eval(shift(@insns));
1236 &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
1237 eval(shift(@insns));
1238 eval(shift(@insns));
1239 eval(shift(@insns));
1240 eval(shift(@insns));
1241 &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
1242 eval(shift(@insns));
1243 eval(shift(@insns));
1244 &vpsrld ($t2,@X[0],$sigma1[2]);
1245 eval(shift(@insns));
1246 eval(shift(@insns));
1247 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
1248 eval(shift(@insns));
1249 eval(shift(@insns));
1250 &vpxor ($t3,$t3,$t2);
1251 eval(shift(@insns));
1252 eval(shift(@insns));
1253 eval(shift(@insns));
1254 eval(shift(@insns));
1255 &vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
1256 eval(shift(@insns));
1257 eval(shift(@insns));
1258 eval(shift(@insns));
1259 eval(shift(@insns));
1260 &vpslldq ($t3,$t3,8); # 22 instructions
1261 eval(shift(@insns));
1262 eval(shift(@insns));
1263 eval(shift(@insns));
1264 eval(shift(@insns));
1265 &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
1266 eval(shift(@insns));
1267 eval(shift(@insns));
1268 eval(shift(@insns));
1269 eval(shift(@insns));
1270 &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
1271 foreach (@insns) { eval; } # remaining instructions
1272 &vmovdqa (16*$j."(%rsp)",$t2);
1275 for ($i=0,$j=0; $j<4; $j++) {
1276 &XOP_256_00_47($j,\&body_00_15,@X);
1277 push(@X,shift(@X)); # rotate(@X)
1279 &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
1280 &jne (".Lxop_00_47");
1282 for ($i=0; $i<16; ) {
1283 foreach(body_00_15()) { eval; }
1287 my @X = map("%xmm$_",(0..7));
1288 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1293 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1294 vmovdqu 0x00($inp),@X[0]
1295 lea $TABLE+0x80(%rip),$Tbl # size optimization
1296 vmovdqu 0x10($inp),@X[1]
1297 vmovdqu 0x20($inp),@X[2]
1298 vpshufb $t3,@X[0],@X[0]
1299 vmovdqu 0x30($inp),@X[3]
1300 vpshufb $t3,@X[1],@X[1]
1301 vmovdqu 0x40($inp),@X[4]
1302 vpshufb $t3,@X[2],@X[2]
1303 vmovdqu 0x50($inp),@X[5]
1304 vpshufb $t3,@X[3],@X[3]
1305 vmovdqu 0x60($inp),@X[6]
1306 vpshufb $t3,@X[4],@X[4]
1307 vmovdqu 0x70($inp),@X[7]
1308 vpshufb $t3,@X[5],@X[5]
1309 vpaddq -0x80($Tbl),@X[0],$t0
1310 vpshufb $t3,@X[6],@X[6]
1311 vpaddq -0x60($Tbl),@X[1],$t1
1312 vpshufb $t3,@X[7],@X[7]
1313 vpaddq -0x40($Tbl),@X[2],$t2
1314 vpaddq -0x20($Tbl),@X[3],$t3
1315 vmovdqa $t0,0x00(%rsp)
1316 vpaddq 0x00($Tbl),@X[4],$t0
1317 vmovdqa $t1,0x10(%rsp)
1318 vpaddq 0x20($Tbl),@X[5],$t1
1319 vmovdqa $t2,0x20(%rsp)
1320 vpaddq 0x40($Tbl),@X[6],$t2
1321 vmovdqa $t3,0x30(%rsp)
1322 vpaddq 0x60($Tbl),@X[7],$t3
1323 vmovdqa $t0,0x40(%rsp)
1325 vmovdqa $t1,0x50(%rsp)
1327 vmovdqa $t2,0x60(%rsp)
1329 vmovdqa $t3,0x70(%rsp)
1335 add \$`16*2*$SZ`,$Tbl
1337 sub XOP_512_00_47 () {
1341 my @insns = (&$body,&$body); # 52 instructions
1343 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
1344 eval(shift(@insns));
1345 eval(shift(@insns));
1346 &vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
1347 eval(shift(@insns));
1348 eval(shift(@insns));
1349 &vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
1350 eval(shift(@insns));
1351 eval(shift(@insns));
1352 &vpsrlq ($t0,$t0,$sigma0[2]);
1353 eval(shift(@insns));
1354 eval(shift(@insns));
1355 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
1356 eval(shift(@insns));
1357 eval(shift(@insns));
1358 eval(shift(@insns));
1359 eval(shift(@insns));
1360 &vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
1361 eval(shift(@insns));
1362 eval(shift(@insns));
1363 &vpxor ($t0,$t0,$t1);
1364 eval(shift(@insns));
1365 eval(shift(@insns));
1366 eval(shift(@insns));
1367 eval(shift(@insns));
1368 &vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
1369 eval(shift(@insns));
1370 eval(shift(@insns));
1371 &vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
1372 eval(shift(@insns));
1373 eval(shift(@insns));
1374 &vpsrlq ($t2,@X[7],$sigma1[2]);
1375 eval(shift(@insns));
1376 eval(shift(@insns));
1377 &vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
1378 eval(shift(@insns));
1379 eval(shift(@insns));
1380 &vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
1381 eval(shift(@insns));
1382 eval(shift(@insns));
1383 &vpxor ($t3,$t3,$t2);
1384 eval(shift(@insns));
1385 eval(shift(@insns));
1386 eval(shift(@insns));
1387 eval(shift(@insns));
1388 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
1389 eval(shift(@insns));
1390 eval(shift(@insns));
1391 eval(shift(@insns));
1392 eval(shift(@insns));
1393 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
1394 eval(shift(@insns));
1395 eval(shift(@insns));
1396 eval(shift(@insns));
1397 eval(shift(@insns));
1398 &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
1399 foreach (@insns) { eval; } # remaining instructions
1400 &vmovdqa (16*$j."(%rsp)",$t2);
1403 for ($i=0,$j=0; $j<8; $j++) {
1404 &XOP_512_00_47($j,\&body_00_15,@X);
1405 push(@X,shift(@X)); # rotate(@X)
1407 &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
1408 &jne (".Lxop_00_47");
1410 for ($i=0; $i<16; ) {
1411 foreach(body_00_15()) { eval; }
1419 lea 16*$SZ($inp),$inp
1443 $code.=<<___ if ($win64);
1444 movaps 16*$SZ+32(%rsp),%xmm6
1445 movaps 16*$SZ+48(%rsp),%xmm7
1446 movaps 16*$SZ+64(%rsp),%xmm8
1447 movaps 16*$SZ+80(%rsp),%xmm9
1449 $code.=<<___ if ($win64 && $SZ>4);
1450 movaps 16*$SZ+96(%rsp),%xmm10
1451 movaps 16*$SZ+112(%rsp),%xmm11
1463 .size ${func}_xop,.-${func}_xop
1466 ######################################################################
1467 # AVX+shrd code path
1469 local *ror = sub { &shrd(@_[0],@_) };
1472 .type ${func}_avx,\@function,3
1482 mov %rsp,%r11 # copy %rsp
1483 shl \$4,%rdx # num*16
1484 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
1485 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1486 and \$-64,%rsp # align stack frame
1487 mov $ctx,$_ctx # save ctx, 1st arg
1488 mov $inp,$_inp # save inp, 2nd arh
1489 mov %rdx,$_end # save end pointer, "3rd" arg
1490 mov %r11,$_rsp # save copy of %rsp
1492 $code.=<<___ if ($win64);
1493 movaps %xmm6,16*$SZ+32(%rsp)
1494 movaps %xmm7,16*$SZ+48(%rsp)
1495 movaps %xmm8,16*$SZ+64(%rsp)
1496 movaps %xmm9,16*$SZ+80(%rsp)
1498 $code.=<<___ if ($win64 && $SZ>4);
1499 movaps %xmm10,16*$SZ+96(%rsp)
1500 movaps %xmm11,16*$SZ+112(%rsp)
1515 if ($SZ==4) { # SHA256
1516 my @X = map("%xmm$_",(0..3));
1517 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
1520 vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
1521 vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
1525 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1526 vmovdqu 0x00($inp),@X[0]
1527 vmovdqu 0x10($inp),@X[1]
1528 vmovdqu 0x20($inp),@X[2]
1529 vmovdqu 0x30($inp),@X[3]
1530 vpshufb $t3,@X[0],@X[0]
1531 lea $TABLE(%rip),$Tbl
1532 vpshufb $t3,@X[1],@X[1]
1533 vpshufb $t3,@X[2],@X[2]
1534 vpaddd 0x00($Tbl),@X[0],$t0
1535 vpshufb $t3,@X[3],@X[3]
1536 vpaddd 0x20($Tbl),@X[1],$t1
1537 vpaddd 0x40($Tbl),@X[2],$t2
1538 vpaddd 0x60($Tbl),@X[3],$t3
1539 vmovdqa $t0,0x00(%rsp)
1541 vmovdqa $t1,0x10(%rsp)
1543 vmovdqa $t2,0x20(%rsp)
1545 vmovdqa $t3,0x30(%rsp)
1551 sub \$`-16*2*$SZ`,$Tbl # size optimization
1553 sub Xupdate_256_AVX () {
1555 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
1556 '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
1557 '&vpsrld ($t2,$t0,$sigma0[0]);',
1558 '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
1559 '&vpsrld ($t3,$t0,$sigma0[2])',
1560 '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
1561 '&vpxor ($t0,$t3,$t2)',
1562 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1563 '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1564 '&vpxor ($t0,$t0,$t1)',
1565 '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1566 '&vpxor ($t0,$t0,$t2)',
1567 '&vpsrld ($t2,$t3,$sigma1[2]);',
1568 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
1569 '&vpsrlq ($t3,$t3,$sigma1[0]);',
1570 '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
1571 '&vpxor ($t2,$t2,$t3);',
1572 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1573 '&vpxor ($t2,$t2,$t3)',
1574 '&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
1575 '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
1576 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1577 '&vpsrld ($t2,$t3,$sigma1[2])',
1578 '&vpsrlq ($t3,$t3,$sigma1[0])',
1579 '&vpxor ($t2,$t2,$t3);',
1580 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1581 '&vpxor ($t2,$t2,$t3)',
1582 '&vpshufb ($t2,$t2,$t5)',
1583 '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
1587 sub AVX_256_00_47 () {
1591 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
1593 foreach (Xupdate_256_AVX()) { # 29 instructions
1595 eval(shift(@insns));
1596 eval(shift(@insns));
1597 eval(shift(@insns));
1599 &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
1600 foreach (@insns) { eval; } # remaining instructions
1601 &vmovdqa (16*$j."(%rsp)",$t2);
1604 for ($i=0,$j=0; $j<4; $j++) {
1605 &AVX_256_00_47($j,\&body_00_15,@X);
1606 push(@X,shift(@X)); # rotate(@X)
1608 &cmpb ($SZ-1+16*2*$SZ."($Tbl)",0);
1609 &jne (".Lavx_00_47");
1611 for ($i=0; $i<16; ) {
1612 foreach(body_00_15()) { eval; }
1616 my @X = map("%xmm$_",(0..7));
1617 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1623 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1624 vmovdqu 0x00($inp),@X[0]
1625 lea $TABLE+0x80(%rip),$Tbl # size optimization
1626 vmovdqu 0x10($inp),@X[1]
1627 vmovdqu 0x20($inp),@X[2]
1628 vpshufb $t3,@X[0],@X[0]
1629 vmovdqu 0x30($inp),@X[3]
1630 vpshufb $t3,@X[1],@X[1]
1631 vmovdqu 0x40($inp),@X[4]
1632 vpshufb $t3,@X[2],@X[2]
1633 vmovdqu 0x50($inp),@X[5]
1634 vpshufb $t3,@X[3],@X[3]
1635 vmovdqu 0x60($inp),@X[6]
1636 vpshufb $t3,@X[4],@X[4]
1637 vmovdqu 0x70($inp),@X[7]
1638 vpshufb $t3,@X[5],@X[5]
1639 vpaddq -0x80($Tbl),@X[0],$t0
1640 vpshufb $t3,@X[6],@X[6]
1641 vpaddq -0x60($Tbl),@X[1],$t1
1642 vpshufb $t3,@X[7],@X[7]
1643 vpaddq -0x40($Tbl),@X[2],$t2
1644 vpaddq -0x20($Tbl),@X[3],$t3
1645 vmovdqa $t0,0x00(%rsp)
1646 vpaddq 0x00($Tbl),@X[4],$t0
1647 vmovdqa $t1,0x10(%rsp)
1648 vpaddq 0x20($Tbl),@X[5],$t1
1649 vmovdqa $t2,0x20(%rsp)
1650 vpaddq 0x40($Tbl),@X[6],$t2
1651 vmovdqa $t3,0x30(%rsp)
1652 vpaddq 0x60($Tbl),@X[7],$t3
1653 vmovdqa $t0,0x40(%rsp)
1655 vmovdqa $t1,0x50(%rsp)
1657 vmovdqa $t2,0x60(%rsp)
1659 vmovdqa $t3,0x70(%rsp)
1665 add \$`16*2*$SZ`,$Tbl
1667 sub Xupdate_512_AVX () {
1669 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
1670 '&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
1671 '&vpsrlq ($t2,$t0,$sigma0[0])',
1672 '&vpaddq (@X[0],@X[0],$t3);', # X[0..1] += X[9..10]
1673 '&vpsrlq ($t3,$t0,$sigma0[2])',
1674 '&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
1675 '&vpxor ($t0,$t3,$t2)',
1676 '&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1677 '&vpxor ($t0,$t0,$t1)',
1678 '&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1679 '&vpxor ($t0,$t0,$t2)',
1680 '&vpsrlq ($t3,@X[7],$sigma1[2]);',
1681 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
1682 '&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1]);',
1683 '&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
1684 '&vpsrlq ($t1,@X[7],$sigma1[0]);',
1685 '&vpxor ($t3,$t3,$t2)',
1686 '&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
1687 '&vpxor ($t3,$t3,$t1)',
1688 '&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
1689 '&vpxor ($t3,$t3,$t2)',
1690 '&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
1691 '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
1695 sub AVX_512_00_47 () {
1699 my @insns = (&$body,&$body); # 52 instructions
1701 foreach (Xupdate_512_AVX()) { # 23 instructions
1703 eval(shift(@insns));
1704 eval(shift(@insns));
1706 &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
1707 foreach (@insns) { eval; } # remaining instructions
1708 &vmovdqa (16*$j."(%rsp)",$t2);
1711 for ($i=0,$j=0; $j<8; $j++) {
1712 &AVX_512_00_47($j,\&body_00_15,@X);
1713 push(@X,shift(@X)); # rotate(@X)
1715 &cmpb ($SZ-1+16*2*$SZ-0x80."($Tbl)",0);
1716 &jne (".Lavx_00_47");
1718 for ($i=0; $i<16; ) {
1719 foreach(body_00_15()) { eval; }
1727 lea 16*$SZ($inp),$inp
1751 $code.=<<___ if ($win64);
1752 movaps 16*$SZ+32(%rsp),%xmm6
1753 movaps 16*$SZ+48(%rsp),%xmm7
1754 movaps 16*$SZ+64(%rsp),%xmm8
1755 movaps 16*$SZ+80(%rsp),%xmm9
1757 $code.=<<___ if ($win64 && $SZ>4);
1758 movaps 16*$SZ+96(%rsp),%xmm10
1759 movaps 16*$SZ+112(%rsp),%xmm11
1771 .size ${func}_avx,.-${func}_avx
1775 ######################################################################
1776 # AVX2+BMI code path
1778 my $a5=$SZ==4?"%esi":"%rsi"; # zap $inp
1782 sub bodyx_00_15 () {
1783 # at start $a1 should be zero, $a3 - $b^$c and $a4 copy of $f
1785 '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
1787 '&add ($h,(32*($i/(16/$SZ))+$SZ*($i%(16/$SZ)))%$PUSH8.$base)', # h+=X[i]+K[i]
1788 '&and ($a4,$e)', # f&e
1789 '&rorx ($a0,$e,$Sigma1[2])',
1790 '&rorx ($a2,$e,$Sigma1[1])',
1792 '&lea ($a,"($a,$a1)")', # h+=Sigma0(a) from the past
1793 '&lea ($h,"($h,$a4)")',
1794 '&andn ($a4,$e,$g)', # ~e&g
1797 '&rorx ($a1,$e,$Sigma1[0])',
1798 '&lea ($h,"($h,$a4)")', # h+=Ch(e,f,g)=(e&f)+(~e&g)
1799 '&xor ($a0,$a1)', # Sigma1(e)
1802 '&rorx ($a4,$a,$Sigma0[2])',
1803 '&lea ($h,"($h,$a0)")', # h+=Sigma1(e)
1804 '&xor ($a2,$b)', # a^b, b^c in next round
1805 '&rorx ($a1,$a,$Sigma0[1])',
1807 '&rorx ($a0,$a,$Sigma0[0])',
1808 '&lea ($d,"($d,$h)")', # d+=h
1809 '&and ($a3,$a2)', # (b^c)&(a^b)
1812 '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
1813 '&xor ($a1,$a0)', # Sigma0(a)
1814 '&lea ($h,"($h,$a3)");'. # h+=Maj(a,b,c)
1815 '&mov ($a4,$e)', # copy of f in future
1817 '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
1819 # and at the finish one has to $a+=$a1
1823 .type ${func}_avx2,\@function,3
1833 mov %rsp,%r11 # copy %rsp
1834 sub \$`2*$SZ*$rounds+4*8+$win64*16*($SZ==4?4:6)`,%rsp
1835 shl \$4,%rdx # num*16
1836 and \$-256*$SZ,%rsp # align stack frame
1837 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1838 add \$`2*$SZ*($rounds-8)`,%rsp
1839 mov $ctx,$_ctx # save ctx, 1st arg
1840 mov $inp,$_inp # save inp, 2nd arh
1841 mov %rdx,$_end # save end pointer, "3rd" arg
1842 mov %r11,$_rsp # save copy of %rsp
1844 $code.=<<___ if ($win64);
1845 movaps %xmm6,16*$SZ+32(%rsp)
1846 movaps %xmm7,16*$SZ+48(%rsp)
1847 movaps %xmm8,16*$SZ+64(%rsp)
1848 movaps %xmm9,16*$SZ+80(%rsp)
1850 $code.=<<___ if ($win64 && $SZ>4);
1851 movaps %xmm10,16*$SZ+96(%rsp)
1852 movaps %xmm11,16*$SZ+112(%rsp)
1858 sub \$-16*$SZ,$inp # inp++, size optimization
1860 mov $inp,%r12 # borrow $T1
1862 cmp %rdx,$inp # $_end
1864 cmove %rsp,%r12 # next block or random data
1871 if ($SZ==4) { # SHA256
1872 my @X = map("%ymm$_",(0..3));
1873 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%ymm$_",(4..9));
1876 vmovdqa $TABLE+`$SZ*2*$rounds`+32(%rip),$t4
1877 vmovdqa $TABLE+`$SZ*2*$rounds`+64(%rip),$t5
1881 vmovdqa $TABLE+`$SZ*2*$rounds`(%rip),$t3
1882 vmovdqu -16*$SZ+0($inp),%xmm0
1883 vmovdqu -16*$SZ+16($inp),%xmm1
1884 vmovdqu -16*$SZ+32($inp),%xmm2
1885 vmovdqu -16*$SZ+48($inp),%xmm3
1886 #mov $inp,$_inp # offload $inp
1887 vinserti128 \$1,(%r12),@X[0],@X[0]
1888 vinserti128 \$1,16(%r12),@X[1],@X[1]
1889 vpshufb $t3,@X[0],@X[0]
1890 vinserti128 \$1,32(%r12),@X[2],@X[2]
1891 vpshufb $t3,@X[1],@X[1]
1892 vinserti128 \$1,48(%r12),@X[3],@X[3]
1894 lea $TABLE(%rip),$Tbl
1895 vpshufb $t3,@X[2],@X[2]
1896 vpaddd 0x00($Tbl),@X[0],$t0
1897 vpshufb $t3,@X[3],@X[3]
1898 vpaddd 0x20($Tbl),@X[1],$t1
1899 vpaddd 0x40($Tbl),@X[2],$t2
1900 vpaddd 0x60($Tbl),@X[3],$t3
1901 vmovdqa $t0,0x00(%rsp)
1903 vmovdqa $t1,0x20(%rsp)
1904 lea -$PUSH8(%rsp),%rsp
1906 vmovdqa $t2,0x00(%rsp)
1908 vmovdqa $t3,0x20(%rsp)
1910 sub \$-16*2*$SZ,$Tbl # size optimization
1917 sub AVX2_256_00_47 () {
1921 my @insns = (&$body,&$body,&$body,&$body); # 96 instructions
1922 my $base = "+2*$PUSH8(%rsp)";
1924 &lea ("%rsp","-$PUSH8(%rsp)") if (($j%2)==0);
1925 foreach (Xupdate_256_AVX()) { # 29 instructions
1927 eval(shift(@insns));
1928 eval(shift(@insns));
1929 eval(shift(@insns));
1931 &vpaddd ($t2,@X[0],16*2*$j."($Tbl)");
1932 foreach (@insns) { eval; } # remaining instructions
1933 &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
1936 for ($i=0,$j=0; $j<4; $j++) {
1937 &AVX2_256_00_47($j,\&bodyx_00_15,@X);
1938 push(@X,shift(@X)); # rotate(@X)
1940 &lea ($Tbl,16*2*$SZ."($Tbl)");
1941 &cmpb (($SZ-1)."($Tbl)",0);
1942 &jne (".Lavx2_00_47");
1944 for ($i=0; $i<16; ) {
1945 my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
1946 foreach(bodyx_00_15()) { eval; }
1949 my @X = map("%ymm$_",(0..7));
1950 my ($t0,$t1,$t2,$t3) = map("%ymm$_",(8..11));
1956 vmovdqu -16*$SZ($inp),%xmm0
1957 vmovdqu -16*$SZ+16($inp),%xmm1
1958 vmovdqu -16*$SZ+32($inp),%xmm2
1959 lea $TABLE+0x80(%rip),$Tbl # size optimization
1960 vmovdqu -16*$SZ+48($inp),%xmm3
1961 vmovdqu -16*$SZ+64($inp),%xmm4
1962 vmovdqu -16*$SZ+80($inp),%xmm5
1963 vmovdqu -16*$SZ+96($inp),%xmm6
1964 vmovdqu -16*$SZ+112($inp),%xmm7
1965 #mov $inp,$_inp # offload $inp
1966 vmovdqa `$SZ*2*$rounds-0x80`($Tbl),$t2
1967 vinserti128 \$1,(%r12),@X[0],@X[0]
1968 vinserti128 \$1,16(%r12),@X[1],@X[1]
1969 vpshufb $t2,@X[0],@X[0]
1970 vinserti128 \$1,32(%r12),@X[2],@X[2]
1971 vpshufb $t2,@X[1],@X[1]
1972 vinserti128 \$1,48(%r12),@X[3],@X[3]
1973 vpshufb $t2,@X[2],@X[2]
1974 vinserti128 \$1,64(%r12),@X[4],@X[4]
1975 vpshufb $t2,@X[3],@X[3]
1976 vinserti128 \$1,80(%r12),@X[5],@X[5]
1977 vpshufb $t2,@X[4],@X[4]
1978 vinserti128 \$1,96(%r12),@X[6],@X[6]
1979 vpshufb $t2,@X[5],@X[5]
1980 vinserti128 \$1,112(%r12),@X[7],@X[7]
1982 vpaddq -0x80($Tbl),@X[0],$t0
1983 vpshufb $t2,@X[6],@X[6]
1984 vpaddq -0x60($Tbl),@X[1],$t1
1985 vpshufb $t2,@X[7],@X[7]
1986 vpaddq -0x40($Tbl),@X[2],$t2
1987 vpaddq -0x20($Tbl),@X[3],$t3
1988 vmovdqa $t0,0x00(%rsp)
1989 vpaddq 0x00($Tbl),@X[4],$t0
1990 vmovdqa $t1,0x20(%rsp)
1991 vpaddq 0x20($Tbl),@X[5],$t1
1992 vmovdqa $t2,0x40(%rsp)
1993 vpaddq 0x40($Tbl),@X[6],$t2
1994 vmovdqa $t3,0x60(%rsp)
1995 lea -$PUSH8(%rsp),%rsp
1996 vpaddq 0x60($Tbl),@X[7],$t3
1997 vmovdqa $t0,0x00(%rsp)
1999 vmovdqa $t1,0x20(%rsp)
2001 vmovdqa $t2,0x40(%rsp)
2003 vmovdqa $t3,0x60(%rsp)
2012 sub AVX2_512_00_47 () {
2016 my @insns = (&$body,&$body); # 48 instructions
2017 my $base = "+2*$PUSH8(%rsp)";
2019 &lea ("%rsp","-$PUSH8(%rsp)") if (($j%4)==0);
2020 foreach (Xupdate_512_AVX()) { # 23 instructions
2023 eval(shift(@insns));
2024 eval(shift(@insns));
2025 eval(shift(@insns));
2028 &vpaddq ($t2,@X[0],16*2*$j-0x80."($Tbl)");
2029 foreach (@insns) { eval; } # remaining instructions
2030 &vmovdqa ((32*$j)%$PUSH8."(%rsp)",$t2);
2033 for ($i=0,$j=0; $j<8; $j++) {
2034 &AVX2_512_00_47($j,\&bodyx_00_15,@X);
2035 push(@X,shift(@X)); # rotate(@X)
2037 &lea ($Tbl,16*2*$SZ."($Tbl)");
2038 &cmpb (($SZ-1-0x80)."($Tbl)",0);
2039 &jne (".Lavx2_00_47");
2041 for ($i=0; $i<16; ) {
2042 my $base=$i<8?"+$PUSH8(%rsp)":"(%rsp)";
2043 foreach(bodyx_00_15()) { eval; }
2047 mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
2049 #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
2050 lea `2*$SZ*($rounds-8)`(%rsp),$Tbl
2070 cmp `$PUSH8+2*8`($Tbl),$inp # $_end
2081 for ($i=0; $i<8; ) {
2082 my $base="+16($Tbl)";
2083 foreach(bodyx_00_15()) { eval; }
2086 lea -$PUSH8($Tbl),$Tbl
2090 mov `2*$SZ*$rounds`(%rsp),$ctx # $_ctx
2092 #mov `2*$SZ*$rounds+8`(%rsp),$inp # $_inp
2093 lea `2*$SZ*($rounds-8)`(%rsp),%rsp
2101 lea `2*16*$SZ`($inp),$inp # inp+=2
2108 cmove %rsp,%r12 # next block or stale data
2125 $code.=<<___ if ($win64);
2126 movaps 16*$SZ+32(%rsp),%xmm6
2127 movaps 16*$SZ+48(%rsp),%xmm7
2128 movaps 16*$SZ+64(%rsp),%xmm8
2129 movaps 16*$SZ+80(%rsp),%xmm9
2131 $code.=<<___ if ($win64 && $SZ>4);
2132 movaps 16*$SZ+96(%rsp),%xmm10
2133 movaps 16*$SZ+112(%rsp),%xmm11
2145 .size ${func}_avx2,.-${func}_avx2
2150 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2151 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2159 .extern __imp_RtlVirtualUnwind
2160 .type se_handler,\@abi-omnipotent
2174 mov 120($context),%rax # pull context->Rax
2175 mov 248($context),%rbx # pull context->Rip
2177 mov 8($disp),%rsi # disp->ImageBase
2178 mov 56($disp),%r11 # disp->HanderlData
2180 mov 0(%r11),%r10d # HandlerData[0]
2181 lea (%rsi,%r10),%r10 # prologue label
2182 cmp %r10,%rbx # context->Rip<prologue label
2185 mov 152($context),%rax # pull context->Rsp
2187 mov 4(%r11),%r10d # HandlerData[1]
2188 lea (%rsi,%r10),%r10 # epilogue label
2189 cmp %r10,%rbx # context->Rip>=epilogue label
2192 $code.=<<___ if ($avx>1);
2193 lea .Lavx2_shortcut(%rip),%r10
2194 cmp %r10,%rbx # context->Rip<avx2_shortcut
2198 add \$`2*$SZ*($rounds-8)`,%rax
2202 mov %rax,%rsi # put aside Rsp
2203 mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
2212 mov %rbx,144($context) # restore context->Rbx
2213 mov %rbp,160($context) # restore context->Rbp
2214 mov %r12,216($context) # restore context->R12
2215 mov %r13,224($context) # restore context->R13
2216 mov %r14,232($context) # restore context->R14
2217 mov %r15,240($context) # restore context->R15
2219 lea .Lepilogue(%rip),%r10
2221 jb .Lin_prologue # non-AVX code
2223 lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
2224 lea 512($context),%rdi # &context.Xmm6
2225 mov \$`$SZ==4?8:12`,%ecx
2226 .long 0xa548f3fc # cld; rep movsq
2231 mov %rax,152($context) # restore context->Rsp
2232 mov %rsi,168($context) # restore context->Rsi
2233 mov %rdi,176($context) # restore context->Rdi
2235 mov 40($disp),%rdi # disp->ContextRecord
2236 mov $context,%rsi # context
2237 mov \$154,%ecx # sizeof(CONTEXT)
2238 .long 0xa548f3fc # cld; rep movsq
2241 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2242 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2243 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2244 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2245 mov 40(%rsi),%r10 # disp->ContextRecord
2246 lea 56(%rsi),%r11 # &disp->HandlerData
2247 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2248 mov %r10,32(%rsp) # arg5
2249 mov %r11,40(%rsp) # arg6
2250 mov %r12,48(%rsp) # arg7
2251 mov %rcx,56(%rsp) # arg8, (NULL)
2252 call *__imp_RtlVirtualUnwind(%rip)
2254 mov \$1,%eax # ExceptionContinueSearch
2266 .size se_handler,.-se_handler
2269 $code.=<<___ if ($SZ==4 && $shaext);
2270 .type shaext_handler,\@abi-omnipotent
2284 mov 120($context),%rax # pull context->Rax
2285 mov 248($context),%rbx # pull context->Rip
2287 lea .Lprologue_shaext(%rip),%r10
2288 cmp %r10,%rbx # context->Rip<.Lprologue
2291 lea .Lepilogue_shaext(%rip),%r10
2292 cmp %r10,%rbx # context->Rip>=.Lepilogue
2295 lea -8-5*16(%rax),%rsi
2296 lea 512($context),%rdi # &context.Xmm6
2298 .long 0xa548f3fc # cld; rep movsq
2301 .size shaext_handler,.-shaext_handler
2307 .rva .LSEH_begin_$func
2308 .rva .LSEH_end_$func
2309 .rva .LSEH_info_$func
2311 $code.=<<___ if ($SZ==4 && $shaext);
2312 .rva .LSEH_begin_${func}_shaext
2313 .rva .LSEH_end_${func}_shaext
2314 .rva .LSEH_info_${func}_shaext
2316 $code.=<<___ if ($SZ==4);
2317 .rva .LSEH_begin_${func}_ssse3
2318 .rva .LSEH_end_${func}_ssse3
2319 .rva .LSEH_info_${func}_ssse3
2321 $code.=<<___ if ($avx && $SZ==8);
2322 .rva .LSEH_begin_${func}_xop
2323 .rva .LSEH_end_${func}_xop
2324 .rva .LSEH_info_${func}_xop
2326 $code.=<<___ if ($avx);
2327 .rva .LSEH_begin_${func}_avx
2328 .rva .LSEH_end_${func}_avx
2329 .rva .LSEH_info_${func}_avx
2331 $code.=<<___ if ($avx>1);
2332 .rva .LSEH_begin_${func}_avx2
2333 .rva .LSEH_end_${func}_avx2
2334 .rva .LSEH_info_${func}_avx2
2342 .rva .Lprologue,.Lepilogue # HandlerData[]
2344 $code.=<<___ if ($SZ==4 && $shaext);
2345 .LSEH_info_${func}_shaext:
2349 $code.=<<___ if ($SZ==4);
2350 .LSEH_info_${func}_ssse3:
2353 .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
2355 $code.=<<___ if ($avx && $SZ==8);
2356 .LSEH_info_${func}_xop:
2359 .rva .Lprologue_xop,.Lepilogue_xop # HandlerData[]
2361 $code.=<<___ if ($avx);
2362 .LSEH_info_${func}_avx:
2365 .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
2367 $code.=<<___ if ($avx>1);
2368 .LSEH_info_${func}_avx2:
2371 .rva .Lprologue_avx2,.Lepilogue_avx2 # HandlerData[]
2378 "sha256rnds2" => 0xcb,
2379 "sha256msg1" => 0xcc,
2380 "sha256msg2" => 0xcd );
2382 if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-7]),\s*%xmm([0-7])/) {
2383 my @opcode=(0x0f,0x38);
2384 push @opcode,$opcodelet{$instr};
2385 push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
2386 return ".byte\t".join(',',@opcode);
2388 return $instr."\t".@_[0];
2392 foreach (split("\n",$code)) {
2393 s/\`([^\`]*)\`/eval $1/geo;
2395 s/\b(sha256[^\s]*)\s+(.*)/sha256op38($1,$2)/geo;