3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
9 # sha256/512_block procedure for x86_64.
11 # 40% improvement over compiler-generated code on Opteron. On EM64T
12 # sha256 was observed to run >80% faster and sha512 - >40%. No magical
13 # tricks, just straight implementation... I really wonder why gcc
14 # [being armed with inline assembler] fails to generate as fast code.
15 # The only thing which is cool about this module is that it's very
16 # same instruction sequence used for both SHA-256 and SHA-512. In
17 # former case the instructions operate on 32-bit operands, while in
18 # latter - on 64-bit ones. All I had to do is to get one flavor right,
19 # the other one passed the test right away:-)
21 # sha256_block runs in ~1005 cycles on Opteron, which gives you
22 # asymptotic performance of 64*1000/1005=63.7MBps times CPU clock
23 # frequency in GHz. sha512_block runs in ~1275 cycles, which results
24 # in 128*1000/1275=100MBps per GHz. Is there room for improvement?
25 # Well, if you compare it to IA-64 implementation, which maintains
26 # X[16] in register bank[!], tends to 4 instructions per CPU clock
27 # cycle and runs in 1003 cycles, 1275 is very good result for 3-way
28 # issue Opteron pipeline and X[16] maintained in memory. So that *if*
29 # there is a way to improve it, *then* the only way would be to try to
30 # offload X[16] updates to SSE unit, but that would require "deeper"
31 # loop unroll, which in turn would naturally cause size blow-up, not
32 # to mention increased complexity! And once again, only *if* it's
33 # actually possible to noticeably improve overall ILP, instruction
34 # level parallelism, on a given CPU implementation in this case.
36 # Special note on Intel EM64T. While Opteron CPU exhibits perfect
37 # perfromance ratio of 1.5 between 64- and 32-bit flavors [see above],
38 # [currently available] EM64T CPUs apparently are far from it. On the
39 # contrary, 64-bit version, sha512_block, is ~30% *slower* than 32-bit
40 # sha256_block:-( This is presumably because 64-bit shifts/rotates
41 # apparently are not atomic instructions, but implemented in microcode.
45 # Optimization including one of Pavel Semjanov's ideas, alternative
46 # Maj, resulted in >=5% improvement on most CPUs, +20% SHA256 and
47 # unfortunately -10% SHA512 on P4 [which nobody should care about
52 # Add SIMD code paths, see below for improvement coefficients. SSSE3
53 # code path was not attempted for SHA512, because improvement is not
54 # estimated to be high enough, noticeably less than 9%, to justify
55 # the effort, not on pre-AVX processors. [Obviously with exclusion
56 # for VIA Nano, but it has SHA512 instruction that is faster and
57 # should be used instead.] For reference, corresponding estimated
58 # upper limit for improvement for SSSE3 SHA256 is 28%. The fact that
59 # higher coefficients are observed on VIA Nano and Bulldozer has more
60 # to do with specifics of their architecture [which is topic for
61 # separate discussion].
63 ######################################################################
64 # Current performance in cycles per processed byte (less is better):
66 # SHA256 SSSE3 AVX/XOP(*) SHA512 AVX/XOP(*)
68 # AMD K8 15.1 - - 9.70 -
70 # Core 2 15.5 13.9(+11%) - 10.3 -
71 # Westmere 15.1 12.5(+21%) - 9.72 -
72 # Atom 23.0 21.6(+6%) - 14.7 -
73 # VIA Nano 23.0 16.3(+41%) - 14.7 -
74 # Sandy Bridge 17.4 14.0(+24%) 11.6(+50%(**)) 11.2 8.10(+38%(**))
75 # Ivy Bridge 12.6 10.3(+22%) 10.3(+22%) 8.17 7.22(+13%)
76 # Bulldozer 21.5 13.7(+57%) 13.7(+57%(***)) 13.5 8.58(+57%)
78 # (*) whichever applicable;
79 # (**) switch from ror to shrd stands for fair share of improvement;
80 # (***) execution time is fully determined by remaining integer-only
81 # part, body_00_15; reducing the amount of SIMD instructions
82 # below certain limit makes no difference/sense; to conserve
83 # space SHA256 XOP code path is therefore omitted;
87 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
89 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
91 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
92 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
93 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
94 die "can't locate x86_64-xlate.pl";
96 $avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
97 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
99 $avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
100 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
102 $avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
103 `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
106 open STDOUT,"| \"$^X\" $xlate $flavour $output";
108 if ($output =~ /512/) {
109 $func="sha512_block_data_order";
112 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%rax","%rbx","%rcx","%rdx",
113 "%r8", "%r9", "%r10","%r11");
114 ($T1,$a0,$a1,$a2,$a3)=("%r12","%r13","%r14","%r15","%rdi");
121 $func="sha256_block_data_order";
124 @ROT=($A,$B,$C,$D,$E,$F,$G,$H)=("%eax","%ebx","%ecx","%edx",
125 "%r8d","%r9d","%r10d","%r11d");
126 ($T1,$a0,$a1,$a2,$a3)=("%r12d","%r13d","%r14d","%r15d","%edi");
134 $ctx="%rdi"; # 1st arg, zapped by $a3
135 $inp="%rsi"; # 2nd arg
138 $_ctx="16*$SZ+0*8(%rsp)";
139 $_inp="16*$SZ+1*8(%rsp)";
140 $_end="16*$SZ+2*8(%rsp)";
141 $_rsp="16*$SZ+3*8(%rsp)";
142 $framesz="16*$SZ+4*8";
146 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
149 ror \$`$Sigma1[2]-$Sigma1[1]`,$a0
152 ror \$`$Sigma0[2]-$Sigma0[1]`,$a1
156 mov $T1,`$SZ*($i&0xf)`(%rsp)
160 ror \$`$Sigma1[1]-$Sigma1[0]`,$a0
162 xor $g,$a2 # Ch(e,f,g)=((f^g)&e)^g
164 ror \$`$Sigma0[1]-$Sigma0[0]`,$a1
166 add $a2,$T1 # T1+=Ch(e,f,g)
169 add ($Tbl),$T1 # T1+=K[round]
172 ror \$$Sigma1[0],$a0 # Sigma1(e)
173 xor $b,$a2 # a^b, b^c in next round
176 ror \$$Sigma0[0],$a1 # Sigma0(a)
178 add $a0,$T1 # T1+=Sigma1(e)
180 xor $a3,$h # h=Maj(a,b,c)=Ch(a^b,c,b)
184 $code.=<<___ if ($i>=15);
185 mov `$SZ*(($i+2)&0xf)`(%rsp),$a0
188 lea $SZ($Tbl),$Tbl # round++
189 add $a1,$h # h+=Sigma0(a)
192 ($a2,$a3) = ($a3,$a2);
196 { my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
199 #mov `$SZ*(($i+1)&0xf)`(%rsp),$a0
200 mov `$SZ*(($i+14)&0xf)`(%rsp),$a1
203 ror \$`$sigma0[1]-$sigma0[0]`,$a0
205 ror \$`$sigma1[1]-$sigma1[0]`,$a1
213 xor $a0,$T1 # sigma0(X[(i+1)&0xf])
215 add `$SZ*(($i+9)&0xf)`(%rsp),$T1
216 xor $a2,$a1 # sigma1(X[(i+14)&0xf])
218 add `$SZ*($i&0xf)`(%rsp),$T1
229 .extern OPENSSL_ia32cap_P
231 .type $func,\@function,4
235 $code.=<<___ if ($SZ==4 || $avx);
236 lea OPENSSL_ia32cap_P(%rip),%r11
240 $code.=<<___ if ($avx && $SZ==8);
241 test \$`1<<11`,%r11d # check for XOP
244 $code.=<<___ if ($avx);
245 and \$`1<<30`,%r10d # mask "Intel CPU" bit
246 and \$`1<<28|1<<9`,%r11d # mask AVX and SSSE3 bits
248 cmp \$`1<<28|1<<9|1<<30`,%r11d
251 $code.=<<___ if ($SZ==4);
262 mov %rsp,%r11 # copy %rsp
263 shl \$4,%rdx # num*16
265 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
266 and \$-64,%rsp # align stack frame
267 mov $ctx,$_ctx # save ctx, 1st arg
268 mov $inp,$_inp # save inp, 2nd arh
269 mov %rdx,$_end # save end pointer, "3rd" arg
270 mov %r11,$_rsp # save copy of %rsp
286 lea $TABLE(%rip),$Tbl
289 for($i=0;$i<16;$i++) {
290 $code.=" mov $SZ*$i($inp),$T1\n";
291 $code.=" mov @ROT[4],$a0\n";
292 $code.=" mov @ROT[0],$a1\n";
293 $code.=" bswap $T1\n";
294 &ROUND_00_15($i,@ROT);
295 unshift(@ROT,pop(@ROT));
303 &ROUND_16_XX($i,@ROT);
304 unshift(@ROT,pop(@ROT));
308 cmpb \$0,`$SZ-1`($Tbl)
312 lea 16*$SZ($inp),$inp
351 .type $TABLE,\@object
353 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
354 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
355 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
356 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
357 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
358 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
359 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
360 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
361 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
362 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
363 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
364 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
365 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
366 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
367 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
368 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
370 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f
371 .long 0x03020100,0x0b0a0908,0xffffffff,0xffffffff
372 .long 0xffffffff,0xffffffff,0x03020100,0x0b0a0908
373 .asciz "SHA256 block transform for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
378 .type $TABLE,\@object
380 .quad 0x428a2f98d728ae22,0x7137449123ef65cd
381 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
382 .quad 0x3956c25bf348b538,0x59f111f1b605d019
383 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
384 .quad 0xd807aa98a3030242,0x12835b0145706fbe
385 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
386 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
387 .quad 0x9bdc06a725c71235,0xc19bf174cf692694
388 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
389 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
390 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
391 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
392 .quad 0x983e5152ee66dfab,0xa831c66d2db43210
393 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
394 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
395 .quad 0x06ca6351e003826f,0x142929670a0e6e70
396 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
397 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
398 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8
399 .quad 0x81c2c92e47edaee6,0x92722c851482353b
400 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
401 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30
402 .quad 0xd192e819d6ef5218,0xd69906245565a910
403 .quad 0xf40e35855771202a,0x106aa07032bbd1b8
404 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
405 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
406 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
407 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
408 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60
409 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
410 .quad 0x90befffa23631e28,0xa4506cebde82bde9
411 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
412 .quad 0xca273eceea26619c,0xd186b8c721c0c207
413 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
414 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
415 .quad 0x113f9804bef90dae,0x1b710b35131c471b
416 .quad 0x28db77f523047d84,0x32caab7b40c72493
417 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
418 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
419 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
421 .quad 0x0001020304050607,0x08090a0b0c0d0e0f
422 .asciz "SHA512 block transfort for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
426 ######################################################################
432 my ($a,$b,$c,$d,$e,$f,$g,$h);
434 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
435 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
437 $arg = "\$$arg" if ($arg*1 eq $arg);
438 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
443 '($a,$b,$c,$d,$e,$f,$g,$h)=@ROT;'.
445 '&ror ($a0,$Sigma1[2]-$Sigma1[1])',
450 '&ror ($a1,$Sigma0[2]-$Sigma0[1])',
451 '&xor ($a4,$g)', # f^g
453 '&ror ($a0,$Sigma1[1]-$Sigma1[0])',
455 '&and ($a4,$e)', # (f^g)&e
458 '&add ($h,$SZ*($i&15)."(%rsp)")', # h+=X[i]+K[i]
461 '&ror ($a1,$Sigma0[1]-$Sigma0[0])',
462 '&xor ($a4,$g)', # Ch(e,f,g)=((f^g)&e)^g
463 '&xor ($a2,$b)', # a^b, b^c in next round
465 '&ror ($a0,$Sigma1[0])', # Sigma1(e)
466 '&add ($h,$a4)', # h+=Ch(e,f,g)
467 '&and ($a3,$a2)', # (b^c)&(a^b)
470 '&add ($h,$a0)', # h+=Sigma1(e)
471 '&xor ($a3,$b)', # Maj(a,b,c)=Ch(a^b,c,b)
473 '&add ($d,$h)', # d+=h
474 '&ror ($a1,$Sigma0[0])', # Sigma0(a)
475 '&add ($h,$a3)', # h+=Maj(a,b,c)
478 '&add ($a1,$h);'. # h+=Sigma0(a)
479 '($a2,$a3) = ($a3,$a2); unshift(@ROT,pop(@ROT)); $i++;'
483 ######################################################################
486 if ($SZ==4) { # SHA256 only
487 my @X = map("%xmm$_",(0..3));
488 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
491 .type ${func}_ssse3,\@function,4
501 mov %rsp,%r11 # copy %rsp
502 shl \$4,%rdx # num*16
503 sub \$`$framesz+$win64*16*4`,%rsp
504 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
505 and \$-64,%rsp # align stack frame
506 mov $ctx,$_ctx # save ctx, 1st arg
507 mov $inp,$_inp # save inp, 2nd arh
508 mov %rdx,$_end # save end pointer, "3rd" arg
509 mov %r11,$_rsp # save copy of %rsp
511 $code.=<<___ if ($win64);
512 movaps %xmm6,16*$SZ+32(%rsp)
513 movaps %xmm7,16*$SZ+48(%rsp)
514 movaps %xmm8,16*$SZ+64(%rsp)
515 movaps %xmm9,16*$SZ+80(%rsp)
531 movdqa $TABLE+`$SZ*$rounds`+16(%rip),$t4
532 movdqa $TABLE+`$SZ*$rounds`+32(%rip),$t5
536 movdqa $TABLE+`$SZ*$rounds`(%rip),$t3
537 movdqu 0x00($inp),@X[0]
538 movdqu 0x10($inp),@X[1]
539 movdqu 0x20($inp),@X[2]
540 movdqu 0x30($inp),@X[3]
542 lea $TABLE(%rip),$Tbl
544 movdqa 0x00($Tbl),$t0
546 movdqa 0x10($Tbl),$t1
548 movdqa 0x20($Tbl),$t2
550 movdqa 0x30($Tbl),$t3
554 movdqa $t0,0x00(%rsp)
556 movdqa $t1,0x10(%rsp)
558 movdqa $t2,0x20(%rsp)
560 movdqa $t3,0x30(%rsp)
568 sub Xupdate_256_SSSE3 () {
570 '&movdqa ($t0,@X[1]);',
571 '&movdqa ($t3,@X[3])',
572 '&palignr ($t0,@X[0],$SZ)', # X[1..4]
573 '&palignr ($t3,@X[2],$SZ);', # X[9..12]
575 '&movdqa ($t2,$t0);',
576 '&psrld ($t0,$sigma0[2])',
577 '&paddd (@X[0],$t3);', # X[0..3] += X[9..12]
578 '&psrld ($t2,$sigma0[0])',
579 '&pshufd ($t3,@X[3],0b11111010)',# X[14..15]
580 '&pslld ($t1,8*$SZ-$sigma0[1]);'.
582 '&psrld ($t2,$sigma0[1]-$sigma0[0]);'.
584 '&pslld ($t1,$sigma0[1]-$sigma0[0]);'.
587 '&pxor ($t0,$t1);', # sigma0(X[1..4])
588 '&psrld ($t3,$sigma1[2])',
589 '&paddd (@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
590 '&psrlq ($t2,$sigma1[0])',
592 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
594 '&pshufb ($t3,$t4)', # sigma1(X[14..15])
595 '&paddd (@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
596 '&pshufd ($t3,@X[0],0b01010000)',# X[16..17]
597 '&movdqa ($t2,$t3);',
598 '&psrld ($t3,$sigma1[2])',
599 '&psrlq ($t2,$sigma1[0])',
601 '&psrlq ($t2,$sigma1[1]-$sigma1[0])',
603 '&movdqa ($t2,16*$j."($Tbl)")',
605 '&paddd (@X[0],$t3)' # X[2..3] += sigma1(X[16..17])
609 sub SSSE3_256_00_47 () {
613 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
616 foreach (Xupdate_256_SSSE3()) { # 36 instructions
622 } else { # squeeze extra 3% on Westmere and Atom
623 eval(shift(@insns)); #@
629 eval(shift(@insns)); #@
631 &palignr ($t0,@X[0],$SZ); # X[1..4]
632 eval(shift(@insns)); #@
634 &palignr ($t3,@X[2],$SZ); # X[9..12]
639 eval(shift(@insns)); #@
644 eval(shift(@insns)); #@
647 &psrld ($t0,$sigma0[2]);
651 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
653 eval(shift(@insns)); #@
655 &psrld ($t2,$sigma0[0]);
658 eval(shift(@insns)); #@
660 &pshufd ($t3,@X[3],0b11111010); # X[4..15]
662 &pslld ($t1,8*$SZ-$sigma0[1]);
665 eval(shift(@insns)); #@
667 &psrld ($t2,$sigma0[1]-$sigma0[0]);
668 eval(shift(@insns)); #@
673 &pslld ($t1,$sigma0[1]-$sigma0[0]);
677 eval(shift(@insns)); #@
681 eval(shift(@insns)); #@
683 &pxor ($t0,$t1); # sigma0(X[1..4])
686 &psrld ($t3,$sigma1[2]);
689 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
691 eval(shift(@insns)); #@
694 &psrlq ($t2,$sigma1[0]);
696 eval(shift(@insns)); #@
701 eval(shift(@insns)); #@
702 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
704 eval(shift(@insns)); #@
710 &pshufb ($t3,$t4); # sigma1(X[14..15])
712 eval(shift(@insns)); #@
715 eval(shift(@insns)); #@
716 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
718 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
725 eval(shift(@insns)); #@
727 &psrld ($t3,$sigma1[2]);
729 &psrlq ($t2,$sigma1[0]);
731 eval(shift(@insns)); #@
736 eval(shift(@insns)); #@
738 &psrlq ($t2,$sigma1[1]-$sigma1[0]);
739 eval(shift(@insns)); #@
746 &movdqa ($t2,16*$j."($Tbl)");
747 eval(shift(@insns)); #@
751 eval(shift(@insns)); #@
755 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
761 foreach (@insns) { eval; } # remaining instructions
762 &movdqa (16*$j."(%rsp)",$t2);
765 for ($i=0,$j=0; $j<4; $j++) {
766 &SSSE3_256_00_47($j,\&body_00_15,@X);
767 push(@X,shift(@X)); # rotate(@X)
769 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
770 &jne (".Lssse3_00_47");
772 for ($i=0; $i<16; ) {
773 foreach(body_00_15()) { eval; }
780 lea 16*$SZ($inp),$inp
803 $code.=<<___ if ($win64);
804 movaps 16*$SZ+32(%rsp),%xmm6
805 movaps 16*$SZ+48(%rsp),%xmm7
806 movaps 16*$SZ+64(%rsp),%xmm8
807 movaps 16*$SZ+80(%rsp),%xmm9
819 .size ${func}_ssse3,.-${func}_ssse3
824 ######################################################################
827 if ($SZ==8) { # SHA512 only
829 .type ${func}_xop,\@function,4
839 mov %rsp,%r11 # copy %rsp
840 shl \$4,%rdx # num*16
841 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
842 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
843 and \$-64,%rsp # align stack frame
844 mov $ctx,$_ctx # save ctx, 1st arg
845 mov $inp,$_inp # save inp, 2nd arh
846 mov %rdx,$_end # save end pointer, "3rd" arg
847 mov %r11,$_rsp # save copy of %rsp
849 $code.=<<___ if ($win64);
850 movaps %xmm6,16*$SZ+32(%rsp)
851 movaps %xmm7,16*$SZ+48(%rsp)
852 movaps %xmm8,16*$SZ+64(%rsp)
853 movaps %xmm9,16*$SZ+80(%rsp)
855 $code.=<<___ if ($win64 && $SZ>4);
856 movaps %xmm10,16*$SZ+96(%rsp)
857 movaps %xmm11,16*$SZ+112(%rsp)
873 if ($SZ==4) { # SHA256
874 my @X = map("%xmm$_",(0..3));
875 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
880 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
881 vmovdqu 0x00($inp),@X[0]
882 vmovdqu 0x10($inp),@X[1]
883 vmovdqu 0x20($inp),@X[2]
884 vmovdqu 0x30($inp),@X[3]
885 vpshufb $t3,@X[0],@X[0]
886 lea $TABLE(%rip),$Tbl
887 vpshufb $t3,@X[1],@X[1]
888 vpshufb $t3,@X[2],@X[2]
889 vpaddd 0x00($Tbl),@X[0],$t0
890 vpshufb $t3,@X[3],@X[3]
891 vpaddd 0x10($Tbl),@X[1],$t1
892 vpaddd 0x20($Tbl),@X[2],$t2
893 vpaddd 0x30($Tbl),@X[3],$t3
894 vmovdqa $t0,0x00(%rsp)
896 vmovdqa $t1,0x10(%rsp)
898 vmovdqa $t2,0x20(%rsp)
900 vmovdqa $t3,0x30(%rsp)
908 sub XOP_256_00_47 () {
912 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
914 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..4]
917 &vpalignr ($t3,@X[3],@X[2],$SZ); # X[9..12]
920 &vprotd ($t1,$t0,8*$SZ-$sigma0[1]);
923 &vpsrld ($t0,$t0,$sigma0[2]);
926 &vpaddd (@X[0],@X[0],$t3); # X[0..3] += X[9..12]
931 &vprotd ($t2,$t1,$sigma0[1]-$sigma0[0]);
934 &vpxor ($t0,$t0,$t1);
939 &vprotd ($t3,@X[3],8*$SZ-$sigma1[1]);
942 &vpxor ($t0,$t0,$t2); # sigma0(X[1..4])
945 &vpsrld ($t2,@X[3],$sigma1[2]);
948 &vpaddd (@X[0],@X[0],$t0); # X[0..3] += sigma0(X[1..4])
951 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
954 &vpxor ($t3,$t3,$t2);
959 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
964 &vpsrldq ($t3,$t3,8);
969 &vpaddd (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
974 &vprotd ($t3,@X[0],8*$SZ-$sigma1[1]);
977 &vpsrld ($t2,@X[0],$sigma1[2]);
980 &vprotd ($t1,$t3,$sigma1[1]-$sigma1[0]);
983 &vpxor ($t3,$t3,$t2);
988 &vpxor ($t3,$t3,$t1); # sigma1(X[16..17])
993 &vpslldq ($t3,$t3,8); # 22 instructions
998 &vpaddd (@X[0],@X[0],$t3); # X[2..3] += sigma1(X[16..17])
1000 eval(shift(@insns));
1001 eval(shift(@insns));
1002 eval(shift(@insns));
1003 &vpaddd ($t2,@X[0],16*$j."($Tbl)");
1004 foreach (@insns) { eval; } # remaining instructions
1005 &vmovdqa (16*$j."(%rsp)",$t2);
1008 for ($i=0,$j=0; $j<4; $j++) {
1009 &XOP_256_00_47($j,\&body_00_15,@X);
1010 push(@X,shift(@X)); # rotate(@X)
1012 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1013 &jne (".Lxop_00_47");
1015 for ($i=0; $i<16; ) {
1016 foreach(body_00_15()) { eval; }
1020 my @X = map("%xmm$_",(0..7));
1021 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1026 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
1027 vmovdqu 0x00($inp),@X[0]
1028 lea $TABLE(%rip),$Tbl
1029 vmovdqu 0x10($inp),@X[1]
1030 vmovdqu 0x20($inp),@X[2]
1031 vpshufb $t3,@X[0],@X[0]
1032 vmovdqu 0x30($inp),@X[3]
1033 vpshufb $t3,@X[1],@X[1]
1034 vmovdqu 0x40($inp),@X[4]
1035 vpshufb $t3,@X[2],@X[2]
1036 vmovdqu 0x50($inp),@X[5]
1037 vpshufb $t3,@X[3],@X[3]
1038 vmovdqu 0x60($inp),@X[6]
1039 vpshufb $t3,@X[4],@X[4]
1040 vmovdqu 0x70($inp),@X[7]
1041 vpshufb $t3,@X[5],@X[5]
1042 vpaddq 0x00($Tbl),@X[0],$t0
1043 vpshufb $t3,@X[6],@X[6]
1044 vpaddq 0x10($Tbl),@X[1],$t1
1045 vpshufb $t3,@X[7],@X[7]
1046 vpaddq 0x20($Tbl),@X[2],$t2
1047 vpaddq 0x30($Tbl),@X[3],$t3
1048 vmovdqa $t0,0x00(%rsp)
1049 vpaddq 0x40($Tbl),@X[4],$t0
1050 vmovdqa $t1,0x10(%rsp)
1051 vpaddq 0x50($Tbl),@X[5],$t1
1052 vmovdqa $t2,0x20(%rsp)
1053 vpaddq 0x60($Tbl),@X[6],$t2
1054 vmovdqa $t3,0x30(%rsp)
1055 vpaddq 0x70($Tbl),@X[7],$t3
1056 vmovdqa $t0,0x40(%rsp)
1058 vmovdqa $t1,0x50(%rsp)
1060 vmovdqa $t2,0x60(%rsp)
1062 vmovdqa $t3,0x70(%rsp)
1070 sub XOP_512_00_47 () {
1074 my @insns = (&$body,&$body); # 52 instructions
1076 &vpalignr ($t0,@X[1],@X[0],$SZ); # X[1..2]
1077 eval(shift(@insns));
1078 eval(shift(@insns));
1079 &vpalignr ($t3,@X[5],@X[4],$SZ); # X[9..10]
1080 eval(shift(@insns));
1081 eval(shift(@insns));
1082 &vprotq ($t1,$t0,8*$SZ-$sigma0[1]);
1083 eval(shift(@insns));
1084 eval(shift(@insns));
1085 &vpsrlq ($t0,$t0,$sigma0[2]);
1086 eval(shift(@insns));
1087 eval(shift(@insns));
1088 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += X[9..10]
1089 eval(shift(@insns));
1090 eval(shift(@insns));
1091 eval(shift(@insns));
1092 eval(shift(@insns));
1093 &vprotq ($t2,$t1,$sigma0[1]-$sigma0[0]);
1094 eval(shift(@insns));
1095 eval(shift(@insns));
1096 &vpxor ($t0,$t0,$t1);
1097 eval(shift(@insns));
1098 eval(shift(@insns));
1099 eval(shift(@insns));
1100 eval(shift(@insns));
1101 &vprotq ($t3,@X[7],8*$SZ-$sigma1[1]);
1102 eval(shift(@insns));
1103 eval(shift(@insns));
1104 &vpxor ($t0,$t0,$t2); # sigma0(X[1..2])
1105 eval(shift(@insns));
1106 eval(shift(@insns));
1107 &vpsrlq ($t2,@X[7],$sigma1[2]);
1108 eval(shift(@insns));
1109 eval(shift(@insns));
1110 &vpaddq (@X[0],@X[0],$t0); # X[0..1] += sigma0(X[1..2])
1111 eval(shift(@insns));
1112 eval(shift(@insns));
1113 &vprotq ($t1,$t3,$sigma1[1]-$sigma1[0]);
1114 eval(shift(@insns));
1115 eval(shift(@insns));
1116 &vpxor ($t3,$t3,$t2);
1117 eval(shift(@insns));
1118 eval(shift(@insns));
1119 eval(shift(@insns));
1120 eval(shift(@insns));
1121 &vpxor ($t3,$t3,$t1); # sigma1(X[14..15])
1122 eval(shift(@insns));
1123 eval(shift(@insns));
1124 eval(shift(@insns));
1125 eval(shift(@insns));
1126 &vpaddq (@X[0],@X[0],$t3); # X[0..1] += sigma1(X[14..15])
1127 eval(shift(@insns));
1128 eval(shift(@insns));
1129 eval(shift(@insns));
1130 eval(shift(@insns));
1131 &vpaddq ($t2,@X[0],16*$j."($Tbl)");
1132 foreach (@insns) { eval; } # remaining instructions
1133 &vmovdqa (16*$j."(%rsp)",$t2);
1136 for ($i=0,$j=0; $j<8; $j++) {
1137 &XOP_512_00_47($j,\&body_00_15,@X);
1138 push(@X,shift(@X)); # rotate(@X)
1140 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1141 &jne (".Lxop_00_47");
1143 for ($i=0; $i<16; ) {
1144 foreach(body_00_15()) { eval; }
1152 lea 16*$SZ($inp),$inp
1176 $code.=<<___ if ($win64);
1177 movaps 16*$SZ+32(%rsp),%xmm6
1178 movaps 16*$SZ+48(%rsp),%xmm7
1179 movaps 16*$SZ+64(%rsp),%xmm8
1180 movaps 16*$SZ+80(%rsp),%xmm9
1182 $code.=<<___ if ($win64 && $SZ>4);
1183 movaps 16*$SZ+96(%rsp),%xmm10
1184 movaps 16*$SZ+112(%rsp),%xmm11
1196 .size ${func}_xop,.-${func}_xop
1199 ######################################################################
1200 # AVX+shrd code path
1202 local *ror = sub { &shrd(@_[0],@_) };
1205 .type ${func}_avx,\@function,4
1215 mov %rsp,%r11 # copy %rsp
1216 shl \$4,%rdx # num*16
1217 sub \$`$framesz+$win64*16*($SZ==4?4:6)`,%rsp
1218 lea ($inp,%rdx,$SZ),%rdx # inp+num*16*$SZ
1219 and \$-64,%rsp # align stack frame
1220 mov $ctx,$_ctx # save ctx, 1st arg
1221 mov $inp,$_inp # save inp, 2nd arh
1222 mov %rdx,$_end # save end pointer, "3rd" arg
1223 mov %r11,$_rsp # save copy of %rsp
1225 $code.=<<___ if ($win64);
1226 movaps %xmm6,16*$SZ+32(%rsp)
1227 movaps %xmm7,16*$SZ+48(%rsp)
1228 movaps %xmm8,16*$SZ+64(%rsp)
1229 movaps %xmm9,16*$SZ+80(%rsp)
1231 $code.=<<___ if ($win64 && $SZ>4);
1232 movaps %xmm10,16*$SZ+96(%rsp)
1233 movaps %xmm11,16*$SZ+112(%rsp)
1248 if ($SZ==4) { # SHA256
1249 my @X = map("%xmm$_",(0..3));
1250 my ($t0,$t1,$t2,$t3, $t4,$t5) = map("%xmm$_",(4..9));
1253 vmovdqa $TABLE+`$SZ*$rounds`+16(%rip),$t4
1254 vmovdqa $TABLE+`$SZ*$rounds`+32(%rip),$t5
1258 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
1259 vmovdqu 0x00($inp),@X[0]
1260 vmovdqu 0x10($inp),@X[1]
1261 vmovdqu 0x20($inp),@X[2]
1262 vmovdqu 0x30($inp),@X[3]
1263 vpshufb $t3,@X[0],@X[0]
1264 lea $TABLE(%rip),$Tbl
1265 vpshufb $t3,@X[1],@X[1]
1266 vpshufb $t3,@X[2],@X[2]
1267 vpaddd 0x00($Tbl),@X[0],$t0
1268 vpshufb $t3,@X[3],@X[3]
1269 vpaddd 0x10($Tbl),@X[1],$t1
1270 vpaddd 0x20($Tbl),@X[2],$t2
1271 vpaddd 0x30($Tbl),@X[3],$t3
1272 vmovdqa $t0,0x00(%rsp)
1274 vmovdqa $t1,0x10(%rsp)
1276 vmovdqa $t2,0x20(%rsp)
1278 vmovdqa $t3,0x30(%rsp)
1286 sub Xupdate_256_AVX () {
1288 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..4]
1289 '&vpalignr ($t3,@X[3],@X[2],$SZ)', # X[9..12]
1290 '&vpsrld ($t2,$t0,$sigma0[0]);',
1291 '&vpaddd (@X[0],@X[0],$t3)', # X[0..3] += X[9..12]
1292 '&vpsrld ($t3,$t0,$sigma0[2])',
1293 '&vpslld ($t1,$t0,8*$SZ-$sigma0[1]);',
1294 '&vpxor ($t0,$t3,$t2)',
1295 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1296 '&vpsrld ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1297 '&vpxor ($t0,$t0,$t1)',
1298 '&vpslld ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1299 '&vpxor ($t0,$t0,$t2)',
1300 '&vpsrld ($t2,$t3,$sigma1[2]);',
1301 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..4])
1302 '&vpsrlq ($t3,$t3,$sigma1[0]);',
1303 '&vpaddd (@X[0],@X[0],$t0)', # X[0..3] += sigma0(X[1..4])
1304 '&vpxor ($t2,$t2,$t3);',
1305 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1306 '&vpxor ($t2,$t2,$t3)',
1307 '&vpshufb ($t2,$t2,$t4)', # sigma1(X[14..15])
1308 '&vpaddd (@X[0],@X[0],$t2)', # X[0..1] += sigma1(X[14..15])
1309 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1310 '&vpsrld ($t2,$t3,$sigma1[2])',
1311 '&vpsrlq ($t3,$t3,$sigma1[0])',
1312 '&vpxor ($t2,$t2,$t3);',
1313 '&vpsrlq ($t3,$t3,$sigma1[1]-$sigma1[0])',
1314 '&vpxor ($t2,$t2,$t3)',
1315 '&vpshufb ($t2,$t2,$t5)',
1316 '&vpaddd (@X[0],@X[0],$t2)' # X[2..3] += sigma1(X[16..17])
1320 sub AVX_256_00_47 () {
1324 my @insns = (&$body,&$body,&$body,&$body); # 104 instructions
1326 foreach (Xupdate_256_AVX()) { # 29 instructions
1328 eval(shift(@insns));
1329 eval(shift(@insns));
1330 eval(shift(@insns));
1332 &vpaddd ($t2,@X[0],16*$j."($Tbl)");
1333 foreach (@insns) { eval; } # remaining instructions
1334 &vmovdqa (16*$j."(%rsp)",$t2);
1337 for ($i=0,$j=0; $j<4; $j++) {
1338 &AVX_256_00_47($j,\&body_00_15,@X);
1339 push(@X,shift(@X)); # rotate(@X)
1341 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1342 &jne (".Lavx_00_47");
1344 for ($i=0; $i<16; ) {
1345 foreach(body_00_15()) { eval; }
1349 my @X = map("%xmm$_",(0..7));
1350 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(8..11));
1356 vmovdqa $TABLE+`$SZ*$rounds`(%rip),$t3
1357 vmovdqu 0x00($inp),@X[0]
1358 lea $TABLE(%rip),$Tbl
1359 vmovdqu 0x10($inp),@X[1]
1360 vmovdqu 0x20($inp),@X[2]
1361 vpshufb $t3,@X[0],@X[0]
1362 vmovdqu 0x30($inp),@X[3]
1363 vpshufb $t3,@X[1],@X[1]
1364 vmovdqu 0x40($inp),@X[4]
1365 vpshufb $t3,@X[2],@X[2]
1366 vmovdqu 0x50($inp),@X[5]
1367 vpshufb $t3,@X[3],@X[3]
1368 vmovdqu 0x60($inp),@X[6]
1369 vpshufb $t3,@X[4],@X[4]
1370 vmovdqu 0x70($inp),@X[7]
1371 vpshufb $t3,@X[5],@X[5]
1372 vpaddq 0x00($Tbl),@X[0],$t0
1373 vpshufb $t3,@X[6],@X[6]
1374 vpaddq 0x10($Tbl),@X[1],$t1
1375 vpshufb $t3,@X[7],@X[7]
1376 vpaddq 0x20($Tbl),@X[2],$t2
1377 vpaddq 0x30($Tbl),@X[3],$t3
1378 vmovdqa $t0,0x00(%rsp)
1379 vpaddq 0x40($Tbl),@X[4],$t0
1380 vmovdqa $t1,0x10(%rsp)
1381 vpaddq 0x50($Tbl),@X[5],$t1
1382 vmovdqa $t2,0x20(%rsp)
1383 vpaddq 0x60($Tbl),@X[6],$t2
1384 vmovdqa $t3,0x30(%rsp)
1385 vpaddq 0x70($Tbl),@X[7],$t3
1386 vmovdqa $t0,0x40(%rsp)
1388 vmovdqa $t1,0x50(%rsp)
1390 vmovdqa $t2,0x60(%rsp)
1392 vmovdqa $t3,0x70(%rsp)
1400 sub Xupdate_512_AVX () {
1402 '&vpalignr ($t0,@X[1],@X[0],$SZ)', # X[1..2]
1403 '&vpalignr ($t3,@X[5],@X[4],$SZ)', # X[9..10]
1404 '&vpsrlq ($t2,$t0,$sigma0[0]);',
1405 '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += X[9..10]
1406 '&vpsrlq ($t3,$t0,$sigma0[2])',
1407 '&vpsllq ($t1,$t0,8*$SZ-$sigma0[1]);',
1408 '&vpxor ($t0,$t3,$t2)',
1409 '&vpsrlq ($t2,$t2,$sigma0[1]-$sigma0[0]);',
1410 '&vpxor ($t0,$t0,$t1)',
1411 '&vpsllq ($t1,$t1,$sigma0[1]-$sigma0[0]);',
1412 '&vpxor ($t0,$t0,$t2)',
1413 '&vpsrlq ($t3,@X[7],$sigma1[2]);',
1414 '&vpxor ($t0,$t0,$t1)', # sigma0(X[1..2])
1415 '&vpsllq ($t2,@X[7],8*$SZ-$sigma1[1])',
1416 '&vpaddq (@X[0],@X[0],$t0)', # X[0..1] += sigma0(X[1..2])
1417 '&vpsrlq ($t1,@X[7],$sigma1[0]);',
1418 '&vpxor ($t3,$t3,$t2)',
1419 '&vpsllq ($t2,$t2,$sigma1[1]-$sigma1[0]);',
1420 '&vpxor ($t3,$t3,$t1)',
1421 '&vpsrlq ($t1,$t1,$sigma1[1]-$sigma1[0]);',
1422 '&vpxor ($t3,$t3,$t2)',
1423 '&vpxor ($t3,$t3,$t1)', # sigma1(X[14..15])
1424 '&vpaddq (@X[0],@X[0],$t3)', # X[0..1] += sigma1(X[14..15])
1428 sub AVX_512_00_47 () {
1432 my @insns = (&$body,&$body); # 52 instructions
1434 foreach (Xupdate_512_AVX()) { # 23 instructions
1436 eval(shift(@insns));
1437 eval(shift(@insns));
1439 &vpaddq ($t2,@X[0],16*$j."($Tbl)");
1440 foreach (@insns) { eval; } # remaining instructions
1441 &vmovdqa (16*$j."(%rsp)",$t2);
1444 for ($i=0,$j=0; $j<8; $j++) {
1445 &AVX_512_00_47($j,\&body_00_15,@X);
1446 push(@X,shift(@X)); # rotate(@X)
1448 &cmpb ($SZ-1+16*$SZ."($Tbl)",0);
1449 &jne (".Lavx_00_47");
1451 for ($i=0; $i<16; ) {
1452 foreach(body_00_15()) { eval; }
1460 lea 16*$SZ($inp),$inp
1484 $code.=<<___ if ($win64);
1485 movaps 16*$SZ+32(%rsp),%xmm6
1486 movaps 16*$SZ+48(%rsp),%xmm7
1487 movaps 16*$SZ+64(%rsp),%xmm8
1488 movaps 16*$SZ+80(%rsp),%xmm9
1490 $code.=<<___ if ($win64 && $SZ>4);
1491 movaps 16*$SZ+96(%rsp),%xmm10
1492 movaps 16*$SZ+112(%rsp),%xmm11
1504 .size ${func}_avx,.-${func}_avx
1508 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1509 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1517 .extern __imp_RtlVirtualUnwind
1518 .type se_handler,\@abi-omnipotent
1532 mov 120($context),%rax # pull context->Rax
1533 mov 248($context),%rbx # pull context->Rip
1535 mov 8($disp),%rsi # disp->ImageBase
1536 mov 56($disp),%r11 # disp->HanderlData
1538 mov 0(%r11),%r10d # HandlerData[0]
1539 lea (%rsi,%r10),%r10 # prologue label
1540 cmp %r10,%rbx # context->Rip<prologue label
1543 mov 152($context),%rax # pull context->Rsp
1545 mov 4(%r11),%r10d # HandlerData[1]
1546 lea (%rsi,%r10),%r10 # epilogue label
1547 cmp %r10,%rbx # context->Rip>=epilogue label
1550 mov %rax,%rsi # put aside Rsp
1551 mov 16*$SZ+3*8(%rax),%rax # pull $_rsp
1560 mov %rbx,144($context) # restore context->Rbx
1561 mov %rbp,160($context) # restore context->Rbp
1562 mov %r12,216($context) # restore context->R12
1563 mov %r13,224($context) # restore context->R13
1564 mov %r14,232($context) # restore context->R14
1565 mov %r15,240($context) # restore context->R15
1567 lea .Lepilogue(%rip),%r10
1569 jb .Lin_prologue # non-AVX code
1571 lea 16*$SZ+4*8(%rsi),%rsi # Xmm6- save area
1572 lea 512($context),%rdi # &context.Xmm6
1573 mov \$`$SZ==4?8:12`,%ecx
1574 .long 0xa548f3fc # cld; rep movsq
1579 mov %rax,152($context) # restore context->Rsp
1580 mov %rsi,168($context) # restore context->Rsi
1581 mov %rdi,176($context) # restore context->Rdi
1583 mov 40($disp),%rdi # disp->ContextRecord
1584 mov $context,%rsi # context
1585 mov \$154,%ecx # sizeof(CONTEXT)
1586 .long 0xa548f3fc # cld; rep movsq
1589 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1590 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1591 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1592 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1593 mov 40(%rsi),%r10 # disp->ContextRecord
1594 lea 56(%rsi),%r11 # &disp->HandlerData
1595 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1596 mov %r10,32(%rsp) # arg5
1597 mov %r11,40(%rsp) # arg6
1598 mov %r12,48(%rsp) # arg7
1599 mov %rcx,56(%rsp) # arg8, (NULL)
1600 call *__imp_RtlVirtualUnwind(%rip)
1602 mov \$1,%eax # ExceptionContinueSearch
1614 .size se_handler,.-se_handler
1618 .rva .LSEH_begin_$func
1619 .rva .LSEH_end_$func
1620 .rva .LSEH_info_$func
1622 $code.=<<___ if ($SZ==4);
1623 .rva .LSEH_begin_${func}_ssse3
1624 .rva .LSEH_end_${func}_ssse3
1625 .rva .LSEH_info_${func}_ssse3
1627 $code.=<<___ if ($avx && $SZ==8);
1628 .rva .LSEH_begin_${func}_xop
1629 .rva .LSEH_end_${func}_xop
1630 .rva .LSEH_info_${func}_xop
1632 $code.=<<___ if ($avx);
1633 .rva .LSEH_begin_${func}_avx
1634 .rva .LSEH_end_${func}_avx
1635 .rva .LSEH_info_${func}_avx
1643 .rva .Lprologue,.Lepilogue # HandlerData[]
1645 $code.=<<___ if ($SZ==4);
1646 .LSEH_info_${func}_ssse3:
1649 .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
1651 $code.=<<___ if ($avx && $SZ==8);
1652 .LSEH_info_${func}_xop:
1655 .rva .Lprologue_xop,.Lepilogue_xop # HandlerData[]
1657 $code.=<<___ if ($avx);
1658 .LSEH_info_${func}_avx:
1661 .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
1665 $code =~ s/\`([^\`]*)\`/eval $1/gem;