3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # This is AESNI-CBC+SHA1 "stitch" implementation. The idea, as spelled
13 # in http://download.intel.com/design/intarch/papers/323686.pdf, is
14 # that since AESNI-CBC encrypt exhibit *very* low instruction-level
15 # parallelism, interleaving it with another algorithm would allow to
16 # utilize processor resources better and achieve better performance.
17 # SHA1 instruction sequences(*) are taken from sha1-x86_64.pl and
18 # AESNI code is weaved into it. Below are performance numbers in
19 # cycles per processed byte, less is better, for standalone AESNI-CBC
20 # encrypt, sum of the latter and standalone SHA1, and "stitched"
23 # AES-128-CBC +SHA1 stitch gain
24 # Westmere 3.77[+5.3] 9.07 6.55 +38%
25 # Sandy Bridge 5.05[+5.0(6.1)] 10.06(11.15) 5.98(7.05) +68%(+58%)
26 # Ivy Bridge 5.05[+4.6] 9.65 5.54 +74%
27 # Haswell 4.43[+3.6(4.2)] 8.00(8.58) 4.55(5.21) +75%(+65%)
28 # Bulldozer 5.77[+6.0] 11.72 6.37 +84%
31 # Westmere 4.51 9.81 6.80 +44%
32 # Sandy Bridge 6.05 11.06(12.15) 6.11(7.19) +81%(+69%)
33 # Ivy Bridge 6.05 10.65 6.07 +75%
34 # Haswell 5.29 8.86(9.44) 5.32(5.32) +67%(+77%)
35 # Bulldozer 6.89 12.84 6.96 +84%
38 # Westmere 5.25 10.55 7.21 +46%
39 # Sandy Bridge 7.05 12.06(13.15) 7.12(7.72) +69%(+70%)
40 # Ivy Bridge 7.05 11.65 7.12 +64%
41 # Haswell 6.19 9.76(10.34) 6.21(6.25) +57%(+65%)
42 # Bulldozer 8.00 13.95 8.25 +69%
44 # (*) There are two code paths: SSSE3 and AVX. See sha1-568.pl for
45 # background information. Above numbers in parentheses are SSSE3
46 # results collected on AVX-capable CPU, i.e. apply on OSes that
49 # Needless to mention that it makes no sense to implement "stitched"
50 # *decrypt* subroutine. Because *both* AESNI-CBC decrypt and SHA1
51 # fully utilize parallelism, so stitching would not give any gain
52 # anyway. Well, there might be some, e.g. because of better cache
53 # locality... For reference, here are performance results for
54 # standalone AESNI-CBC decrypt:
56 # AES-128-CBC AES-192-CBC AES-256-CBC
57 # Westmere 1.25 1.50 1.75
58 # Sandy Bridge 0.74 0.91 1.09
59 # Ivy Bridge 0.74 0.90 1.11
60 # Haswell 0.63 0.76 0.88
61 # Bulldozer 0.70 0.85 0.99
65 # AES-256-CBC +SHA1 stitch gain
66 # Westmere 1.75 7.20 6.68 +7.8%
67 # Sandy Bridge 1.09 6.09(7.22) 5.82(6.95) +4.6%(+3.9%)
68 # Ivy Bridge 1.11 5.70 5.45 +4.6%
69 # Haswell 0.88 4.45(5.00) 4.39(4.69) +1.4%(*)(+6.6%)
70 # Bulldozer 0.99 6.95 5.95 +17%(**)
72 # (*) Tiny improvement coefficient on Haswell is because we compare
73 # AVX1 stitch to sum with AVX2 SHA1.
74 # (**) Execution is fully dominated by integer code sequence and
75 # SIMD still hardly shows [in single-process benchmark;-]
79 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
81 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
83 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
84 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
85 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
86 die "can't locate x86_64-xlate.pl";
88 $avx=1 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
89 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
91 $avx=1 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
92 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
94 $avx=1 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
95 `ml64 2>&1` =~ /Version ([0-9]+)\./ &&
100 open OUT,"| \"$^X\" $xlate $flavour $output";
103 # void aesni_cbc_sha1_enc(const void *inp,
106 # const AES_KEY *key,
113 .extern OPENSSL_ia32cap_P
115 .globl aesni_cbc_sha1_enc
116 .type aesni_cbc_sha1_enc,\@abi-omnipotent
119 # caller should check for SSSE3 and AES-NI bits
120 mov OPENSSL_ia32cap_P+0(%rip),%r10d
121 mov OPENSSL_ia32cap_P+4(%rip),%r11
122 bt \$61,%r11 # check SHA bit
123 jc aesni_cbc_sha1_enc_shaext
125 $code.=<<___ if ($avx);
126 and \$`1<<28`,%r11d # mask AVX bit
127 and \$`1<<30`,%r10d # mask "Intel CPU" bit
129 cmp \$`1<<28|1<<30`,%r10d
130 je aesni_cbc_sha1_enc_avx
133 jmp aesni_cbc_sha1_enc_ssse3
135 .size aesni_cbc_sha1_enc,.-aesni_cbc_sha1_enc
138 my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
141 my @X=map("%xmm$_",(4..7,0..3));
142 my @Tx=map("%xmm$_",(8..10));
143 my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
144 my @T=("%esi","%edi");
145 my $j=0; my $jj=0; my $r=0; my $sn=0; my $rx=0;
147 my ($rndkey0,$iv,$in)=map("%xmm$_",(11..13)); # for enc
148 my @rndkey=("%xmm14","%xmm15"); # for enc
149 my ($inout0,$inout1,$inout2,$inout3)=map("%xmm$_",(12..15)); # for dec
151 if (1) { # reassign for Atom Silvermont
152 # The goal is to minimize amount of instructions with more than
153 # 3 prefix bytes. Or in more practical terms to keep AES-NI *and*
154 # SSSE3 instructions to upper half of the register bank.
155 @X=map("%xmm$_",(8..11,4..7));
156 @Tx=map("%xmm$_",(12,13,3));
157 ($iv,$in,$rndkey0)=map("%xmm$_",(2,14,15));
158 @rndkey=("%xmm0","%xmm1");
161 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
162 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
164 $arg = "\$$arg" if ($arg*1 eq $arg);
165 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
168 my $_rol=sub { &rol(@_) };
169 my $_ror=sub { &ror(@_) };
172 .type aesni_cbc_sha1_enc_ssse3,\@function,6
174 aesni_cbc_sha1_enc_ssse3:
175 mov `($win64?56:8)`(%rsp),$inp # load 7th argument
176 #shr \$6,$len # debugging artefact
177 #jz .Lepilogue_ssse3 # debugging artefact
184 lea `-104-($win64?10*16:0)`(%rsp),%rsp
185 #mov $in0,$inp # debugging artefact
186 #lea 64(%rsp),$ctx # debugging artefact
188 $code.=<<___ if ($win64);
189 movaps %xmm6,96+0(%rsp)
190 movaps %xmm7,96+16(%rsp)
191 movaps %xmm8,96+32(%rsp)
192 movaps %xmm9,96+48(%rsp)
193 movaps %xmm10,96+64(%rsp)
194 movaps %xmm11,96+80(%rsp)
195 movaps %xmm12,96+96(%rsp)
196 movaps %xmm13,96+112(%rsp)
197 movaps %xmm14,96+128(%rsp)
198 movaps %xmm15,96+144(%rsp)
202 mov $in0,%r12 # reassign arguments
205 lea 112($key),%r15 # size optimization
206 movdqu ($ivp),$iv # load IV
207 mov $ivp,88(%rsp) # save $ivp
209 ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
210 my $rounds="${ivp}d";
214 mov 240-112($key),$rounds
215 add $inp,$len # end of input
217 lea K_XX_XX(%rip),$K_XX_XX
218 mov 0($ctx),$A # load context
222 mov $B,@T[0] # magic seed
228 movdqa 64($K_XX_XX),@Tx[2] # pbswap mask
229 movdqa 0($K_XX_XX),@Tx[1] # K_00_19
230 movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
231 movdqu 16($inp),@X[-3&7]
232 movdqu 32($inp),@X[-2&7]
233 movdqu 48($inp),@X[-1&7]
234 pshufb @Tx[2],@X[-4&7] # byte swap
235 pshufb @Tx[2],@X[-3&7]
236 pshufb @Tx[2],@X[-2&7]
238 paddd @Tx[1],@X[-4&7] # add K_00_19
239 pshufb @Tx[2],@X[-1&7]
240 paddd @Tx[1],@X[-3&7]
241 paddd @Tx[1],@X[-2&7]
242 movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU
243 psubd @Tx[1],@X[-4&7] # restore X[]
244 movdqa @X[-3&7],16(%rsp)
245 psubd @Tx[1],@X[-3&7]
246 movdqa @X[-2&7],32(%rsp)
247 psubd @Tx[1],@X[-2&7]
248 movups -112($key),$rndkey0 # $key[0]
249 movups 16-112($key),$rndkey[0] # forward reference
255 my ($n,$k)=($r/10,$r%10);
258 movups `16*$n`($in0),$in # load input
261 $code.=<<___ if ($n);
262 movups $iv,`16*($n-1)`($out,$in0) # write output
266 movups `32+16*$k-112`($key),$rndkey[1]
267 aesenc $rndkey[0],$iv
274 movups `32+16*($k+0)-112`($key),$rndkey[1]
275 aesenc $rndkey[0],$iv
276 movups `32+16*($k+1)-112`($key),$rndkey[0]
277 aesenc $rndkey[1],$iv
279 movups `32+16*($k+2)-112`($key),$rndkey[1]
280 aesenc $rndkey[0],$iv
281 movups `32+16*($k+3)-112`($key),$rndkey[0]
282 aesenc $rndkey[1],$iv
284 aesenclast $rndkey[0],$iv
285 movups 16-112($key),$rndkey[1] # forward reference
289 movups `32+16*$k-112`($key),$rndkey[1]
290 aesenc $rndkey[0],$iv
293 $r++; unshift(@rndkey,pop(@rndkey));
296 sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
299 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
302 eval(shift(@insns)); # ror
303 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
305 &movdqa (@Tx[0],@X[-1&7]);
306 &paddd (@Tx[1],@X[-1&7]);
310 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
312 eval(shift(@insns)); # rol
314 &psrldq (@Tx[0],4); # "X[-3]", 3 dwords
318 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
320 eval(shift(@insns)); # ror
321 &pxor (@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
326 &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
328 eval(shift(@insns)); # rol
329 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
333 &movdqa (@Tx[2],@X[0]);
336 eval(shift(@insns)); # ror
337 &movdqa (@Tx[0],@X[0]);
340 &pslldq (@Tx[2],12); # "X[0]"<<96, extract one dword
341 &paddd (@X[0],@X[0]);
347 eval(shift(@insns)); # rol
349 &movdqa (@Tx[1],@Tx[2]);
355 eval(shift(@insns)); # ror
356 &por (@X[0],@Tx[0]); # "X[0]"<<<=1
362 &pxor (@X[0],@Tx[2]);
364 &movdqa (@Tx[2],eval(16*(($Xi)/5))."($K_XX_XX)"); # K_XX_XX
365 eval(shift(@insns)); # rol
369 &pxor (@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
370 &pshufd (@Tx[1],@X[-1&7],0xee) if ($Xi==7); # was &movdqa (@Tx[0],@X[-1&7]) in Xupdate_ssse3_32_79
372 foreach (@insns) { eval; } # remaining instructions [if any]
374 $Xi++; push(@X,shift(@X)); # "rotate" X[]
375 push(@Tx,shift(@Tx));
378 sub Xupdate_ssse3_32_79()
381 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
384 eval(shift(@insns)) if ($Xi==8);
385 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
386 eval(shift(@insns)) if ($Xi==8);
387 eval(shift(@insns)); # body_20_39
389 eval(shift(@insns)) if (@insns[1] =~ /_ror/);
390 eval(shift(@insns)) if (@insns[0] =~ /_ror/);
391 &punpcklqdq(@Tx[0],@X[-1&7]); # compose "X[-6]", was &palignr(@Tx[0],@X[-2&7],8);
393 eval(shift(@insns)); # rol
395 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
399 &movdqa (@Tx[2],@Tx[1]);# "perpetuate" K_XX_XX...
400 } else { # ... or load next one
401 &movdqa (@Tx[2],eval(16*($Xi/5))."($K_XX_XX)");
403 eval(shift(@insns)); # ror
404 &paddd (@Tx[1],@X[-1&7]);
407 &pxor (@X[0],@Tx[0]); # "X[0]"^="X[-6]"
408 eval(shift(@insns)); # body_20_39
411 eval(shift(@insns)); # rol
412 eval(shift(@insns)) if (@insns[0] =~ /_ror/);
414 &movdqa (@Tx[0],@X[0]);
417 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
418 eval(shift(@insns)); # ror
420 eval(shift(@insns)); # body_20_39
426 eval(shift(@insns)) if (@insns[0] =~ /_rol/);# rol
429 eval(shift(@insns)); # ror
431 &por (@X[0],@Tx[0]); # "X[0]"<<<=2
433 eval(shift(@insns)); # body_20_39
434 eval(shift(@insns)) if (@insns[1] =~ /_rol/);
435 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
436 &pshufd(@Tx[1],@X[-1&7],0xee) if ($Xi<19); # was &movdqa (@Tx[1],@X[0])
438 eval(shift(@insns)); # rol
441 eval(shift(@insns)); # rol
444 foreach (@insns) { eval; } # remaining instructions
446 $Xi++; push(@X,shift(@X)); # "rotate" X[]
447 push(@Tx,shift(@Tx));
450 sub Xuplast_ssse3_80()
453 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
460 &paddd (@Tx[1],@X[-1&7]);
464 &movdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
466 foreach (@insns) { eval; } # remaining instructions
471 unshift(@Tx,pop(@Tx));
473 &movdqa (@Tx[2],"64($K_XX_XX)"); # pbswap mask
474 &movdqa (@Tx[1],"0($K_XX_XX)"); # K_00_19
475 &movdqu (@X[-4&7],"0($inp)"); # load input
476 &movdqu (@X[-3&7],"16($inp)");
477 &movdqu (@X[-2&7],"32($inp)");
478 &movdqu (@X[-1&7],"48($inp)");
479 &pshufb (@X[-4&7],@Tx[2]); # byte swap
488 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
494 &pshufb (@X[($Xi-3)&7],@Tx[2]);
499 &paddd (@X[($Xi-4)&7],@Tx[1]);
504 &movdqa (eval(16*$Xi)."(%rsp)",@X[($Xi-4)&7]); # X[]+K xfer to IALU
509 &psubd (@X[($Xi-4)&7],@Tx[1]);
511 foreach (@insns) { eval; }
518 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
521 foreach (@insns) { eval; }
525 '($a,$b,$c,$d,$e)=@V;'.
526 '&$_ror ($b,$j?7:2);', # $b>>>2
528 '&mov (@T[1],$a);', # $b for next round
530 '&add ($e,eval(4*($j&15))."(%rsp)");',# X[]+K xfer
531 '&xor ($b,$c);', # $c^$d for next round
535 '&and (@T[1],$b);', # ($b&($c^$d)) for next round
537 '&xor ($b,$c);', # restore $b
538 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
541 sub body_00_19 () { # ((c^d)&b)^d
542 # on start @T[0]=(c^d)&b
543 return &body_20_39() if ($rx==19); $rx++;
550 $k = (($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
551 @r[$k%$n].='&$aesenc();' if ($jj==$k/$n);
558 '($a,$b,$c,$d,$e)=@V;'.
559 '&add ($e,eval(4*($j&15))."(%rsp)");',# X[]+K xfer
560 '&xor (@T[0],$d) if($j==19);'.
561 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c)
562 '&mov (@T[1],$a);', # $b for next round
566 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round
568 '&$_ror ($b,7);', # $b>>>2
569 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
572 sub body_20_39 () { # b^d^c
574 return &body_40_59() if ($rx==39); $rx++;
581 $k = (($jj+1)*8/20)*20*$n/8; # 8 aesencs per these 20 rounds
582 @r[$k%$n].='&$aesenc();' if ($jj==$k/$n && $rx!=20);
589 '($a,$b,$c,$d,$e)=@V;'.
590 '&add ($e,eval(4*($j&15))."(%rsp)");',# X[]+K xfer
591 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d)
592 '&xor ($c,$d) if ($j>=40);', # restore $c
594 '&$_ror ($b,7);', # $b>>>2
595 '&mov (@T[1],$a);', # $b for next round
600 '&xor (@T[1],$c) if ($j==59);'.
601 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round
603 '&xor ($b,$c) if ($j< 59);', # c^d for next round
604 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
607 sub body_40_59 () { # ((b^c)&(c^d))^c
608 # on entry @T[0]=(b^c), (c^=d)
616 $k=(($jj+1)*12/20)*20*$n/12; # 12 aesencs per these 20 rounds
617 @r[$k%$n].='&$aesenc();' if ($jj==$k/$n && $rx!=40);
626 &Xupdate_ssse3_16_31(\&body_00_19);
627 &Xupdate_ssse3_16_31(\&body_00_19);
628 &Xupdate_ssse3_16_31(\&body_00_19);
629 &Xupdate_ssse3_16_31(\&body_00_19);
630 &Xupdate_ssse3_32_79(\&body_00_19);
631 &Xupdate_ssse3_32_79(\&body_20_39);
632 &Xupdate_ssse3_32_79(\&body_20_39);
633 &Xupdate_ssse3_32_79(\&body_20_39);
634 &Xupdate_ssse3_32_79(\&body_20_39);
635 &Xupdate_ssse3_32_79(\&body_20_39);
636 &Xupdate_ssse3_32_79(\&body_40_59);
637 &Xupdate_ssse3_32_79(\&body_40_59);
638 &Xupdate_ssse3_32_79(\&body_40_59);
639 &Xupdate_ssse3_32_79(\&body_40_59);
640 &Xupdate_ssse3_32_79(\&body_40_59);
641 &Xupdate_ssse3_32_79(\&body_20_39);
642 &Xuplast_ssse3_80(\&body_20_39,".Ldone_ssse3"); # can jump to "done"
644 $saved_j=$j; @saved_V=@V;
645 $saved_r=$r; @saved_rndkey=@rndkey;
647 &Xloop_ssse3(\&body_20_39);
648 &Xloop_ssse3(\&body_20_39);
649 &Xloop_ssse3(\&body_20_39);
652 movups $iv,48($out,$in0) # write output
655 add 0($ctx),$A # update context
662 mov @T[0],$B # magic seed
673 $jj=$j=$saved_j; @V=@saved_V;
674 $r=$saved_r; @rndkey=@saved_rndkey;
676 &Xtail_ssse3(\&body_20_39);
677 &Xtail_ssse3(\&body_20_39);
678 &Xtail_ssse3(\&body_20_39);
681 movups $iv,48($out,$in0) # write output
682 mov 88(%rsp),$ivp # restore $ivp
684 add 0($ctx),$A # update context
694 movups $iv,($ivp) # write IV
696 $code.=<<___ if ($win64);
697 movaps 96+0(%rsp),%xmm6
698 movaps 96+16(%rsp),%xmm7
699 movaps 96+32(%rsp),%xmm8
700 movaps 96+48(%rsp),%xmm9
701 movaps 96+64(%rsp),%xmm10
702 movaps 96+80(%rsp),%xmm11
703 movaps 96+96(%rsp),%xmm12
704 movaps 96+112(%rsp),%xmm13
705 movaps 96+128(%rsp),%xmm14
706 movaps 96+144(%rsp),%xmm15
709 lea `104+($win64?10*16:0)`(%rsp),%rsi
719 .size aesni_cbc_sha1_enc_ssse3,.-aesni_cbc_sha1_enc_ssse3
722 if ($stitched_decrypt) {{{
724 ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
728 # reassign for Atom Silvermont (see above)
729 ($inout0,$inout1,$inout2,$inout3,$rndkey0)=map("%xmm$_",(0..4));
730 @X=map("%xmm$_",(8..13,6,7));
731 @Tx=map("%xmm$_",(14,15,5));
734 '&movdqu($inout0,"0x00($in0)");',
735 '&movdqu($inout1,"0x10($in0)"); &pxor ($inout0,$rndkey0);',
736 '&movdqu($inout2,"0x20($in0)"); &pxor ($inout1,$rndkey0);',
737 '&movdqu($inout3,"0x30($in0)"); &pxor ($inout2,$rndkey0);',
739 '&pxor ($inout3,$rndkey0); &movups ($rndkey0,"16-112($key)");',
740 '&movaps("64(%rsp)",@X[2]);', # save IV, originally @X[3]
743 for ($i=0;$i<13;$i++) {
745 '&aesdec ($inout0,$rndkey0);',
746 '&aesdec ($inout1,$rndkey0);',
747 '&aesdec ($inout2,$rndkey0);',
748 '&aesdec ($inout3,$rndkey0); &movups($rndkey0,"'.(16*($i+2)-112).'($key)");'
750 push (@aes256_dec,(undef,undef)) if (($i>=3 && $i<=5) || $i>=11);
751 push (@aes256_dec,(undef,undef)) if ($i==5);
754 '&aesdeclast ($inout0,$rndkey0); &movups (@X[0],"0x00($in0)");',
755 '&aesdeclast ($inout1,$rndkey0); &movups (@X[1],"0x10($in0)");',
756 '&aesdeclast ($inout2,$rndkey0); &movups (@X[2],"0x20($in0)");',
757 '&aesdeclast ($inout3,$rndkey0); &movups (@X[3],"0x30($in0)");',
759 '&xorps ($inout0,"64(%rsp)"); &movdqu ($rndkey0,"-112($key)");',
760 '&xorps ($inout1,@X[0]); &movups ("0x00($out,$in0)",$inout0);',
761 '&xorps ($inout2,@X[1]); &movups ("0x10($out,$in0)",$inout1);',
762 '&xorps ($inout3,@X[2]); &movups ("0x20($out,$in0)",$inout2);',
764 '&movups ("0x30($out,$in0)",$inout3);'
767 sub body_00_19_dec () { # ((c^d)&b)^d
768 # on start @T[0]=(c^d)&b
769 return &body_20_39_dec() if ($rx==19);
773 unshift (@r,@aes256_dec[$rx]) if (@aes256_dec[$rx]);
779 sub body_20_39_dec () { # b^d^c
781 return &body_40_59_dec() if ($rx==39);
785 unshift (@r,@aes256_dec[$rx]) if (@aes256_dec[$rx]);
791 sub body_40_59_dec () { # ((b^c)&(c^d))^c
792 # on entry @T[0]=(b^c), (c^=d)
796 unshift (@r,@aes256_dec[$rx]) if (@aes256_dec[$rx]);
803 .globl aesni256_cbc_sha1_dec
804 .type aesni256_cbc_sha1_dec,\@abi-omnipotent
806 aesni256_cbc_sha1_dec:
807 # caller should check for SSSE3 and AES-NI bits
808 mov OPENSSL_ia32cap_P+0(%rip),%r10d
809 mov OPENSSL_ia32cap_P+4(%rip),%r11d
811 $code.=<<___ if ($avx);
812 and \$`1<<28`,%r11d # mask AVX bit
813 and \$`1<<30`,%r10d # mask "Intel CPU" bit
815 cmp \$`1<<28|1<<30`,%r10d
816 je aesni256_cbc_sha1_dec_avx
819 jmp aesni256_cbc_sha1_dec_ssse3
821 .size aesni256_cbc_sha1_dec,.-aesni256_cbc_sha1_dec
823 .type aesni256_cbc_sha1_dec_ssse3,\@function,6
825 aesni256_cbc_sha1_dec_ssse3:
826 mov `($win64?56:8)`(%rsp),$inp # load 7th argument
833 lea `-104-($win64?10*16:0)`(%rsp),%rsp
835 $code.=<<___ if ($win64);
836 movaps %xmm6,96+0(%rsp)
837 movaps %xmm7,96+16(%rsp)
838 movaps %xmm8,96+32(%rsp)
839 movaps %xmm9,96+48(%rsp)
840 movaps %xmm10,96+64(%rsp)
841 movaps %xmm11,96+80(%rsp)
842 movaps %xmm12,96+96(%rsp)
843 movaps %xmm13,96+112(%rsp)
844 movaps %xmm14,96+128(%rsp)
845 movaps %xmm15,96+144(%rsp)
846 .Lprologue_dec_ssse3:
849 mov $in0,%r12 # reassign arguments
852 lea 112($key),%r15 # size optimization
853 movdqu ($ivp),@X[3] # load IV
854 #mov $ivp,88(%rsp) # save $ivp
856 ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
860 add $inp,$len # end of input
862 lea K_XX_XX(%rip),$K_XX_XX
863 mov 0($ctx),$A # load context
867 mov $B,@T[0] # magic seed
873 movdqa 64($K_XX_XX),@Tx[2] # pbswap mask
874 movdqa 0($K_XX_XX),@Tx[1] # K_00_19
875 movdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
876 movdqu 16($inp),@X[-3&7]
877 movdqu 32($inp),@X[-2&7]
878 movdqu 48($inp),@X[-1&7]
879 pshufb @Tx[2],@X[-4&7] # byte swap
881 pshufb @Tx[2],@X[-3&7]
882 pshufb @Tx[2],@X[-2&7]
883 pshufb @Tx[2],@X[-1&7]
884 paddd @Tx[1],@X[-4&7] # add K_00_19
885 paddd @Tx[1],@X[-3&7]
886 paddd @Tx[1],@X[-2&7]
887 movdqa @X[-4&7],0(%rsp) # X[]+K xfer to IALU
888 psubd @Tx[1],@X[-4&7] # restore X[]
889 movdqa @X[-3&7],16(%rsp)
890 psubd @Tx[1],@X[-3&7]
891 movdqa @X[-2&7],32(%rsp)
892 psubd @Tx[1],@X[-2&7]
893 movdqu -112($key),$rndkey0 # $key[0]
899 &Xupdate_ssse3_16_31(\&body_00_19_dec);
900 &Xupdate_ssse3_16_31(\&body_00_19_dec);
901 &Xupdate_ssse3_16_31(\&body_00_19_dec);
902 &Xupdate_ssse3_16_31(\&body_00_19_dec);
903 &Xupdate_ssse3_32_79(\&body_00_19_dec);
904 &Xupdate_ssse3_32_79(\&body_20_39_dec);
905 &Xupdate_ssse3_32_79(\&body_20_39_dec);
906 &Xupdate_ssse3_32_79(\&body_20_39_dec);
907 &Xupdate_ssse3_32_79(\&body_20_39_dec);
908 &Xupdate_ssse3_32_79(\&body_20_39_dec);
909 &Xupdate_ssse3_32_79(\&body_40_59_dec);
910 &Xupdate_ssse3_32_79(\&body_40_59_dec);
911 &Xupdate_ssse3_32_79(\&body_40_59_dec);
912 &Xupdate_ssse3_32_79(\&body_40_59_dec);
913 &Xupdate_ssse3_32_79(\&body_40_59_dec);
914 &Xupdate_ssse3_32_79(\&body_20_39_dec);
915 &Xuplast_ssse3_80(\&body_20_39_dec,".Ldone_dec_ssse3"); # can jump to "done"
917 $saved_j=$j; @saved_V=@V;
920 &Xloop_ssse3(\&body_20_39_dec);
921 &Xloop_ssse3(\&body_20_39_dec);
922 &Xloop_ssse3(\&body_20_39_dec);
924 eval(@aes256_dec[-1]); # last store
928 add 0($ctx),$A # update context
935 mov @T[0],$B # magic seed
946 $jj=$j=$saved_j; @V=@saved_V;
949 &Xtail_ssse3(\&body_20_39_dec);
950 &Xtail_ssse3(\&body_20_39_dec);
951 &Xtail_ssse3(\&body_20_39_dec);
953 eval(@aes256_dec[-1]); # last store
955 add 0($ctx),$A # update context
965 movups @X[3],($ivp) # write IV
967 $code.=<<___ if ($win64);
968 movaps 96+0(%rsp),%xmm6
969 movaps 96+16(%rsp),%xmm7
970 movaps 96+32(%rsp),%xmm8
971 movaps 96+48(%rsp),%xmm9
972 movaps 96+64(%rsp),%xmm10
973 movaps 96+80(%rsp),%xmm11
974 movaps 96+96(%rsp),%xmm12
975 movaps 96+112(%rsp),%xmm13
976 movaps 96+128(%rsp),%xmm14
977 movaps 96+144(%rsp),%xmm15
980 lea `104+($win64?10*16:0)`(%rsp),%rsi
988 .Lepilogue_dec_ssse3:
990 .size aesni256_cbc_sha1_dec_ssse3,.-aesni256_cbc_sha1_dec_ssse3
996 my ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
999 my @X=map("%xmm$_",(4..7,0..3));
1000 my @Tx=map("%xmm$_",(8..10));
1001 my @V=($A,$B,$C,$D,$E)=("%eax","%ebx","%ecx","%edx","%ebp"); # size optimization
1002 my @T=("%esi","%edi");
1003 my ($rndkey0,$iv,$in)=map("%xmm$_",(11..13));
1004 my @rndkey=("%xmm14","%xmm15");
1005 my ($inout0,$inout1,$inout2,$inout3)=map("%xmm$_",(12..15)); # for dec
1008 my $_rol=sub { &shld(@_[0],@_) };
1009 my $_ror=sub { &shrd(@_[0],@_) };
1012 .type aesni_cbc_sha1_enc_avx,\@function,6
1014 aesni_cbc_sha1_enc_avx:
1015 mov `($win64?56:8)`(%rsp),$inp # load 7th argument
1016 #shr \$6,$len # debugging artefact
1017 #jz .Lepilogue_avx # debugging artefact
1024 lea `-104-($win64?10*16:0)`(%rsp),%rsp
1025 #mov $in0,$inp # debugging artefact
1026 #lea 64(%rsp),$ctx # debugging artefact
1028 $code.=<<___ if ($win64);
1029 movaps %xmm6,96+0(%rsp)
1030 movaps %xmm7,96+16(%rsp)
1031 movaps %xmm8,96+32(%rsp)
1032 movaps %xmm9,96+48(%rsp)
1033 movaps %xmm10,96+64(%rsp)
1034 movaps %xmm11,96+80(%rsp)
1035 movaps %xmm12,96+96(%rsp)
1036 movaps %xmm13,96+112(%rsp)
1037 movaps %xmm14,96+128(%rsp)
1038 movaps %xmm15,96+144(%rsp)
1043 mov $in0,%r12 # reassign arguments
1046 lea 112($key),%r15 # size optimization
1047 vmovdqu ($ivp),$iv # load IV
1048 mov $ivp,88(%rsp) # save $ivp
1050 ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
1051 my $rounds="${ivp}d";
1055 mov 240-112($key),$rounds
1056 add $inp,$len # end of input
1058 lea K_XX_XX(%rip),$K_XX_XX
1059 mov 0($ctx),$A # load context
1063 mov $B,@T[0] # magic seed
1069 vmovdqa 64($K_XX_XX),@X[2] # pbswap mask
1070 vmovdqa 0($K_XX_XX),$Kx # K_00_19
1071 vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
1072 vmovdqu 16($inp),@X[-3&7]
1073 vmovdqu 32($inp),@X[-2&7]
1074 vmovdqu 48($inp),@X[-1&7]
1075 vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap
1077 vpshufb @X[2],@X[-3&7],@X[-3&7]
1078 vpshufb @X[2],@X[-2&7],@X[-2&7]
1079 vpshufb @X[2],@X[-1&7],@X[-1&7]
1080 vpaddd $Kx,@X[-4&7],@X[0] # add K_00_19
1081 vpaddd $Kx,@X[-3&7],@X[1]
1082 vpaddd $Kx,@X[-2&7],@X[2]
1083 vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU
1084 vmovdqa @X[1],16(%rsp)
1085 vmovdqa @X[2],32(%rsp)
1086 vmovups -112($key),$rndkey[1] # $key[0]
1087 vmovups 16-112($key),$rndkey[0] # forward reference
1093 my ($n,$k)=($r/10,$r%10);
1096 vmovdqu `16*$n`($in0),$in # load input
1097 vpxor $rndkey[1],$in,$in
1099 $code.=<<___ if ($n);
1100 vmovups $iv,`16*($n-1)`($out,$in0) # write output
1104 vaesenc $rndkey[0],$iv,$iv
1105 vmovups `32+16*$k-112`($key),$rndkey[1]
1112 vaesenc $rndkey[0],$iv,$iv
1113 vmovups `32+16*($k+0)-112`($key),$rndkey[1]
1114 vaesenc $rndkey[1],$iv,$iv
1115 vmovups `32+16*($k+1)-112`($key),$rndkey[0]
1117 vaesenc $rndkey[0],$iv,$iv
1118 vmovups `32+16*($k+2)-112`($key),$rndkey[1]
1119 vaesenc $rndkey[1],$iv,$iv
1120 vmovups `32+16*($k+3)-112`($key),$rndkey[0]
1122 vaesenclast $rndkey[0],$iv,$iv
1123 vmovups -112($key),$rndkey[0]
1124 vmovups 16-112($key),$rndkey[1] # forward reference
1128 vaesenc $rndkey[0],$iv,$iv
1129 vmovups `32+16*$k-112`($key),$rndkey[1]
1132 $r++; unshift(@rndkey,pop(@rndkey));
1135 sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
1138 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
1139 my ($a,$b,$c,$d,$e);
1141 eval(shift(@insns));
1142 eval(shift(@insns));
1143 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
1144 eval(shift(@insns));
1145 eval(shift(@insns));
1147 &vpaddd (@Tx[1],$Kx,@X[-1&7]);
1148 eval(shift(@insns));
1149 eval(shift(@insns));
1150 &vpsrldq(@Tx[0],@X[-1&7],4); # "X[-3]", 3 dwords
1151 eval(shift(@insns));
1152 eval(shift(@insns));
1153 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
1154 eval(shift(@insns));
1155 eval(shift(@insns));
1157 &vpxor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
1158 eval(shift(@insns));
1159 eval(shift(@insns));
1160 eval(shift(@insns));
1161 eval(shift(@insns));
1163 &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-3]"^"X[-8]"
1164 eval(shift(@insns));
1165 eval(shift(@insns));
1166 &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
1167 eval(shift(@insns));
1168 eval(shift(@insns));
1170 &vpsrld (@Tx[0],@X[0],31);
1171 eval(shift(@insns));
1172 eval(shift(@insns));
1173 eval(shift(@insns));
1174 eval(shift(@insns));
1176 &vpslldq(@Tx[1],@X[0],12); # "X[0]"<<96, extract one dword
1177 &vpaddd (@X[0],@X[0],@X[0]);
1178 eval(shift(@insns));
1179 eval(shift(@insns));
1180 eval(shift(@insns));
1181 eval(shift(@insns));
1183 &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=1
1184 &vpsrld (@Tx[0],@Tx[1],30);
1185 eval(shift(@insns));
1186 eval(shift(@insns));
1187 eval(shift(@insns));
1188 eval(shift(@insns));
1190 &vpslld (@Tx[1],@Tx[1],2);
1191 &vpxor (@X[0],@X[0],@Tx[0]);
1192 eval(shift(@insns));
1193 eval(shift(@insns));
1194 eval(shift(@insns));
1195 eval(shift(@insns));
1197 &vpxor (@X[0],@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
1198 eval(shift(@insns));
1199 eval(shift(@insns));
1200 &vmovdqa ($Kx,eval(16*(($Xi)/5))."($K_XX_XX)") if ($Xi%5==0); # K_XX_XX
1201 eval(shift(@insns));
1202 eval(shift(@insns));
1205 foreach (@insns) { eval; } # remaining instructions [if any]
1207 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1210 sub Xupdate_avx_32_79()
1213 my @insns = (&$body,&$body,&$body,&$body); # 32 to 48 instructions
1214 my ($a,$b,$c,$d,$e);
1216 &vpalignr(@Tx[0],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
1217 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
1218 eval(shift(@insns)); # body_20_39
1219 eval(shift(@insns));
1220 eval(shift(@insns));
1221 eval(shift(@insns)); # rol
1223 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
1224 eval(shift(@insns));
1225 eval(shift(@insns)) if (@insns[0] !~ /&ro[rl]/);
1226 &vpaddd (@Tx[1],$Kx,@X[-1&7]);
1227 &vmovdqa ($Kx,eval(16*($Xi/5))."($K_XX_XX)") if ($Xi%5==0);
1228 eval(shift(@insns)); # ror
1229 eval(shift(@insns));
1231 &vpxor (@X[0],@X[0],@Tx[0]); # "X[0]"^="X[-6]"
1232 eval(shift(@insns)); # body_20_39
1233 eval(shift(@insns));
1234 eval(shift(@insns));
1235 eval(shift(@insns)); # rol
1237 &vpsrld (@Tx[0],@X[0],30);
1238 &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer to IALU
1239 eval(shift(@insns));
1240 eval(shift(@insns));
1241 eval(shift(@insns)); # ror
1242 eval(shift(@insns));
1244 &vpslld (@X[0],@X[0],2);
1245 eval(shift(@insns)); # body_20_39
1246 eval(shift(@insns));
1247 eval(shift(@insns));
1248 eval(shift(@insns)); # rol
1249 eval(shift(@insns));
1250 eval(shift(@insns));
1251 eval(shift(@insns)); # ror
1252 eval(shift(@insns));
1254 &vpor (@X[0],@X[0],@Tx[0]); # "X[0]"<<<=2
1255 eval(shift(@insns)); # body_20_39
1256 eval(shift(@insns));
1257 eval(shift(@insns));
1258 eval(shift(@insns)); # rol
1259 eval(shift(@insns));
1260 eval(shift(@insns));
1261 eval(shift(@insns)); # rol
1262 eval(shift(@insns));
1264 foreach (@insns) { eval; } # remaining instructions
1266 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1269 sub Xuplast_avx_80()
1272 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1273 my ($a,$b,$c,$d,$e);
1275 eval(shift(@insns));
1276 &vpaddd (@Tx[1],$Kx,@X[-1&7]);
1277 eval(shift(@insns));
1278 eval(shift(@insns));
1279 eval(shift(@insns));
1280 eval(shift(@insns));
1282 &vmovdqa (eval(16*(($Xi-1)&3))."(%rsp)",@Tx[1]); # X[]+K xfer IALU
1284 foreach (@insns) { eval; } # remaining instructions
1289 &vmovdqa(@Tx[1],"64($K_XX_XX)"); # pbswap mask
1290 &vmovdqa($Kx,"0($K_XX_XX)"); # K_00_19
1291 &vmovdqu(@X[-4&7],"0($inp)"); # load input
1292 &vmovdqu(@X[-3&7],"16($inp)");
1293 &vmovdqu(@X[-2&7],"32($inp)");
1294 &vmovdqu(@X[-1&7],"48($inp)");
1295 &vpshufb(@X[-4&7],@X[-4&7],@Tx[1]); # byte swap
1304 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1305 my ($a,$b,$c,$d,$e);
1307 eval(shift(@insns));
1308 eval(shift(@insns));
1309 &vpshufb(@X[($Xi-3)&7],@X[($Xi-3)&7],@Tx[1]);
1310 eval(shift(@insns));
1311 eval(shift(@insns));
1312 &vpaddd (@Tx[0],@X[($Xi-4)&7],$Kx);
1313 eval(shift(@insns));
1314 eval(shift(@insns));
1315 eval(shift(@insns));
1316 eval(shift(@insns));
1317 &vmovdqa(eval(16*$Xi)."(%rsp)",@Tx[0]); # X[]+K xfer to IALU
1318 eval(shift(@insns));
1319 eval(shift(@insns));
1321 foreach (@insns) { eval; }
1328 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1329 my ($a,$b,$c,$d,$e);
1331 foreach (@insns) { eval; }
1338 &Xupdate_avx_16_31(\&body_00_19);
1339 &Xupdate_avx_16_31(\&body_00_19);
1340 &Xupdate_avx_16_31(\&body_00_19);
1341 &Xupdate_avx_16_31(\&body_00_19);
1342 &Xupdate_avx_32_79(\&body_00_19);
1343 &Xupdate_avx_32_79(\&body_20_39);
1344 &Xupdate_avx_32_79(\&body_20_39);
1345 &Xupdate_avx_32_79(\&body_20_39);
1346 &Xupdate_avx_32_79(\&body_20_39);
1347 &Xupdate_avx_32_79(\&body_20_39);
1348 &Xupdate_avx_32_79(\&body_40_59);
1349 &Xupdate_avx_32_79(\&body_40_59);
1350 &Xupdate_avx_32_79(\&body_40_59);
1351 &Xupdate_avx_32_79(\&body_40_59);
1352 &Xupdate_avx_32_79(\&body_40_59);
1353 &Xupdate_avx_32_79(\&body_20_39);
1354 &Xuplast_avx_80(\&body_20_39,".Ldone_avx"); # can jump to "done"
1356 $saved_j=$j; @saved_V=@V;
1357 $saved_r=$r; @saved_rndkey=@rndkey;
1359 &Xloop_avx(\&body_20_39);
1360 &Xloop_avx(\&body_20_39);
1361 &Xloop_avx(\&body_20_39);
1364 vmovups $iv,48($out,$in0) # write output
1367 add 0($ctx),$A # update context
1374 mov @T[0],$B # magic seed
1385 $jj=$j=$saved_j; @V=@saved_V;
1386 $r=$saved_r; @rndkey=@saved_rndkey;
1388 &Xtail_avx(\&body_20_39);
1389 &Xtail_avx(\&body_20_39);
1390 &Xtail_avx(\&body_20_39);
1393 vmovups $iv,48($out,$in0) # write output
1394 mov 88(%rsp),$ivp # restore $ivp
1396 add 0($ctx),$A # update context
1406 vmovups $iv,($ivp) # write IV
1409 $code.=<<___ if ($win64);
1410 movaps 96+0(%rsp),%xmm6
1411 movaps 96+16(%rsp),%xmm7
1412 movaps 96+32(%rsp),%xmm8
1413 movaps 96+48(%rsp),%xmm9
1414 movaps 96+64(%rsp),%xmm10
1415 movaps 96+80(%rsp),%xmm11
1416 movaps 96+96(%rsp),%xmm12
1417 movaps 96+112(%rsp),%xmm13
1418 movaps 96+128(%rsp),%xmm14
1419 movaps 96+144(%rsp),%xmm15
1422 lea `104+($win64?10*16:0)`(%rsp),%rsi
1432 .size aesni_cbc_sha1_enc_avx,.-aesni_cbc_sha1_enc_avx
1435 if ($stitched_decrypt) {{{
1437 ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
1439 $j=$jj=$r=$sn=$rx=0;
1443 '&vpxor ($inout0,$rndkey0,"0x00($in0)");',
1444 '&vpxor ($inout1,$rndkey0,"0x10($in0)");',
1445 '&vpxor ($inout2,$rndkey0,"0x20($in0)");',
1446 '&vpxor ($inout3,$rndkey0,"0x30($in0)");',
1448 '&vmovups($rndkey0,"16-112($key)");',
1449 '&vmovups("64(%rsp)",@X[2]);', # save IV, originally @X[3]
1452 for ($i=0;$i<13;$i++) {
1454 '&vaesdec ($inout0,$inout0,$rndkey0);',
1455 '&vaesdec ($inout1,$inout1,$rndkey0);',
1456 '&vaesdec ($inout2,$inout2,$rndkey0);',
1457 '&vaesdec ($inout3,$inout3,$rndkey0); &vmovups($rndkey0,"'.(16*($i+2)-112).'($key)");'
1459 push (@aes256_dec,(undef,undef)) if (($i>=3 && $i<=5) || $i>=11);
1460 push (@aes256_dec,(undef,undef)) if ($i==5);
1463 '&vaesdeclast ($inout0,$inout0,$rndkey0); &vmovups(@X[0],"0x00($in0)");',
1464 '&vaesdeclast ($inout1,$inout1,$rndkey0); &vmovups(@X[1],"0x10($in0)");',
1465 '&vaesdeclast ($inout2,$inout2,$rndkey0); &vmovups(@X[2],"0x20($in0)");',
1466 '&vaesdeclast ($inout3,$inout3,$rndkey0); &vmovups(@X[3],"0x30($in0)");',
1468 '&vxorps ($inout0,$inout0,"64(%rsp)"); &vmovdqu($rndkey0,"-112($key)");',
1469 '&vxorps ($inout1,$inout1,@X[0]); &vmovups("0x00($out,$in0)",$inout0);',
1470 '&vxorps ($inout2,$inout2,@X[1]); &vmovups("0x10($out,$in0)",$inout1);',
1471 '&vxorps ($inout3,$inout3,@X[2]); &vmovups("0x20($out,$in0)",$inout2);',
1473 '&vmovups ("0x30($out,$in0)",$inout3);'
1477 .type aesni256_cbc_sha1_dec_avx,\@function,6
1479 aesni256_cbc_sha1_dec_avx:
1480 mov `($win64?56:8)`(%rsp),$inp # load 7th argument
1487 lea `-104-($win64?10*16:0)`(%rsp),%rsp
1489 $code.=<<___ if ($win64);
1490 movaps %xmm6,96+0(%rsp)
1491 movaps %xmm7,96+16(%rsp)
1492 movaps %xmm8,96+32(%rsp)
1493 movaps %xmm9,96+48(%rsp)
1494 movaps %xmm10,96+64(%rsp)
1495 movaps %xmm11,96+80(%rsp)
1496 movaps %xmm12,96+96(%rsp)
1497 movaps %xmm13,96+112(%rsp)
1498 movaps %xmm14,96+128(%rsp)
1499 movaps %xmm15,96+144(%rsp)
1504 mov $in0,%r12 # reassign arguments
1507 lea 112($key),%r15 # size optimization
1508 vmovdqu ($ivp),@X[3] # load IV
1510 ($in0,$out,$len,$key)=map("%r$_",(12..15)); # reassign arguments
1514 add $inp,$len # end of input
1516 lea K_XX_XX(%rip),$K_XX_XX
1517 mov 0($ctx),$A # load context
1521 mov $B,@T[0] # magic seed
1527 vmovdqa 64($K_XX_XX),@X[2] # pbswap mask
1528 vmovdqa 0($K_XX_XX),$Kx # K_00_19
1529 vmovdqu 0($inp),@X[-4&7] # load input to %xmm[0-3]
1530 vmovdqu 16($inp),@X[-3&7]
1531 vmovdqu 32($inp),@X[-2&7]
1532 vmovdqu 48($inp),@X[-1&7]
1533 vpshufb @X[2],@X[-4&7],@X[-4&7] # byte swap
1535 vpshufb @X[2],@X[-3&7],@X[-3&7]
1536 vpshufb @X[2],@X[-2&7],@X[-2&7]
1537 vpshufb @X[2],@X[-1&7],@X[-1&7]
1538 vpaddd $Kx,@X[-4&7],@X[0] # add K_00_19
1539 vpaddd $Kx,@X[-3&7],@X[1]
1540 vpaddd $Kx,@X[-2&7],@X[2]
1541 vmovdqa @X[0],0(%rsp) # X[]+K xfer to IALU
1542 vmovdqa @X[1],16(%rsp)
1543 vmovdqa @X[2],32(%rsp)
1544 vmovups -112($key),$rndkey0 # $key[0]
1550 &Xupdate_avx_16_31(\&body_00_19_dec);
1551 &Xupdate_avx_16_31(\&body_00_19_dec);
1552 &Xupdate_avx_16_31(\&body_00_19_dec);
1553 &Xupdate_avx_16_31(\&body_00_19_dec);
1554 &Xupdate_avx_32_79(\&body_00_19_dec);
1555 &Xupdate_avx_32_79(\&body_20_39_dec);
1556 &Xupdate_avx_32_79(\&body_20_39_dec);
1557 &Xupdate_avx_32_79(\&body_20_39_dec);
1558 &Xupdate_avx_32_79(\&body_20_39_dec);
1559 &Xupdate_avx_32_79(\&body_20_39_dec);
1560 &Xupdate_avx_32_79(\&body_40_59_dec);
1561 &Xupdate_avx_32_79(\&body_40_59_dec);
1562 &Xupdate_avx_32_79(\&body_40_59_dec);
1563 &Xupdate_avx_32_79(\&body_40_59_dec);
1564 &Xupdate_avx_32_79(\&body_40_59_dec);
1565 &Xupdate_avx_32_79(\&body_20_39_dec);
1566 &Xuplast_avx_80(\&body_20_39_dec,".Ldone_dec_avx"); # can jump to "done"
1568 $saved_j=$j; @saved_V=@V;
1571 &Xloop_avx(\&body_20_39_dec);
1572 &Xloop_avx(\&body_20_39_dec);
1573 &Xloop_avx(\&body_20_39_dec);
1575 eval(@aes256_dec[-1]); # last store
1579 add 0($ctx),$A # update context
1586 mov @T[0],$B # magic seed
1597 $jj=$j=$saved_j; @V=@saved_V;
1600 &Xtail_avx(\&body_20_39_dec);
1601 &Xtail_avx(\&body_20_39_dec);
1602 &Xtail_avx(\&body_20_39_dec);
1604 eval(@aes256_dec[-1]); # last store
1607 add 0($ctx),$A # update context
1617 vmovups @X[3],($ivp) # write IV
1620 $code.=<<___ if ($win64);
1621 movaps 96+0(%rsp),%xmm6
1622 movaps 96+16(%rsp),%xmm7
1623 movaps 96+32(%rsp),%xmm8
1624 movaps 96+48(%rsp),%xmm9
1625 movaps 96+64(%rsp),%xmm10
1626 movaps 96+80(%rsp),%xmm11
1627 movaps 96+96(%rsp),%xmm12
1628 movaps 96+112(%rsp),%xmm13
1629 movaps 96+128(%rsp),%xmm14
1630 movaps 96+144(%rsp),%xmm15
1633 lea `104+($win64?10*16:0)`(%rsp),%rsi
1643 .size aesni256_cbc_sha1_dec_avx,.-aesni256_cbc_sha1_dec_avx
1650 .long 0x5a827999,0x5a827999,0x5a827999,0x5a827999 # K_00_19
1651 .long 0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1 # K_20_39
1652 .long 0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc # K_40_59
1653 .long 0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6 # K_60_79
1654 .long 0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f # pbswap mask
1655 .byte 0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0
1657 .asciz "AESNI-CBC+SHA1 stitch for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1661 ($in0,$out,$len,$key,$ivp,$ctx,$inp)=("%rdi","%rsi","%rdx","%rcx","%r8","%r9","%r10");
1665 ($iv,$in,$rndkey0)=map("%xmm$_",(2,14,15));
1666 @rndkey=("%xmm0","%xmm1");
1669 my ($BSWAP,$ABCD,$E,$E_,$ABCD_SAVE,$E_SAVE)=map("%xmm$_",(7..12));
1670 my @MSG=map("%xmm$_",(3..6));
1673 .type aesni_cbc_sha1_enc_shaext,\@function,6
1675 aesni_cbc_sha1_enc_shaext:
1676 mov `($win64?56:8)`(%rsp),$inp # load 7th argument
1678 $code.=<<___ if ($win64);
1679 lea `-8-4*16`(%rsp),%rsp
1680 movaps %xmm6,-8-10*16(%rax)
1681 movaps %xmm7,-8-9*16(%rax)
1682 movaps %xmm8,-8-8*16(%rax)
1683 movaps %xmm9,-8-7*16(%rax)
1684 movaps %xmm10,-8-6*16(%rax)
1685 movaps %xmm11,-8-5*16(%rax)
1686 movaps %xmm12,-8-4*16(%rax)
1687 movaps %xmm13,-8-3*16(%rax)
1688 movaps %xmm14,-8-2*16(%rax)
1689 movaps %xmm15,-8-1*16(%rax)
1695 movdqa K_XX_XX+0x50(%rip),$BSWAP # byte-n-word swap
1697 mov 240($key),$rounds
1699 movups ($key),$rndkey0 # $key[0]
1700 movups 16($key),$rndkey[0] # forward reference
1701 lea 112($key),$key # size optimization
1703 pshufd \$0b00011011,$ABCD,$ABCD # flip word order
1704 pshufd \$0b00011011,$E,$E # flip word order
1712 movdqu ($inp),@MSG[0]
1713 movdqa $E,$E_SAVE # offload $E
1714 pshufb $BSWAP,@MSG[0]
1715 movdqu 0x10($inp),@MSG[1]
1716 movdqa $ABCD,$ABCD_SAVE # offload $ABCD
1720 pshufb $BSWAP,@MSG[1]
1723 movdqu 0x20($inp),@MSG[2]
1725 pxor $E_SAVE,@MSG[0] # black magic
1729 pxor $E_SAVE,@MSG[0] # black magic
1731 pshufb $BSWAP,@MSG[2]
1732 sha1rnds4 \$0,$E,$ABCD # 0-3
1733 sha1nexte @MSG[1],$E_
1737 sha1msg1 @MSG[1],@MSG[0]
1738 movdqu -0x10($inp),@MSG[3]
1740 pshufb $BSWAP,@MSG[3]
1744 sha1rnds4 \$0,$E_,$ABCD # 4-7
1745 sha1nexte @MSG[2],$E
1746 pxor @MSG[2],@MSG[0]
1747 sha1msg1 @MSG[2],@MSG[1]
1751 for($i=2;$i<20-4;$i++) {
1754 sha1rnds4 \$`int($i/5)`,$E,$ABCD # 8-11
1755 sha1nexte @MSG[3],$E_
1759 sha1msg2 @MSG[3],@MSG[0]
1760 pxor @MSG[3],@MSG[1]
1761 sha1msg1 @MSG[3],@MSG[2]
1764 push(@MSG,shift(@MSG));
1770 sha1rnds4 \$3,$E,$ABCD # 64-67
1771 sha1nexte @MSG[3],$E_
1772 sha1msg2 @MSG[3],@MSG[0]
1773 pxor @MSG[3],@MSG[1]
1778 sha1rnds4 \$3,$E_,$ABCD # 68-71
1779 sha1nexte @MSG[0],$E
1780 sha1msg2 @MSG[0],@MSG[1]
1784 movdqa $E_SAVE,@MSG[0]
1786 sha1rnds4 \$3,$E,$ABCD # 72-75
1787 sha1nexte @MSG[1],$E_
1792 sha1rnds4 \$3,$E_,$ABCD # 76-79
1793 sha1nexte $MSG[0],$E
1795 while($r<40) { &$aesenc(); } # remaining aesenc's
1799 paddd $ABCD_SAVE,$ABCD
1800 movups $iv,48($out,$in0) # write output
1804 pshufd \$0b00011011,$ABCD,$ABCD
1805 pshufd \$0b00011011,$E,$E
1806 movups $iv,($ivp) # write IV
1810 $code.=<<___ if ($win64);
1811 movaps -8-10*16(%rax),%xmm6
1812 movaps -8-9*16(%rax),%xmm7
1813 movaps -8-8*16(%rax),%xmm8
1814 movaps -8-7*16(%rax),%xmm9
1815 movaps -8-6*16(%rax),%xmm10
1816 movaps -8-5*16(%rax),%xmm11
1817 movaps -8-4*16(%rax),%xmm12
1818 movaps -8-3*16(%rax),%xmm13
1819 movaps -8-2*16(%rax),%xmm14
1820 movaps -8-1*16(%rax),%xmm15
1826 .size aesni_cbc_sha1_enc_shaext,.-aesni_cbc_sha1_enc_shaext
1829 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1830 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1838 .extern __imp_RtlVirtualUnwind
1839 .type ssse3_handler,\@abi-omnipotent
1853 mov 120($context),%rax # pull context->Rax
1854 mov 248($context),%rbx # pull context->Rip
1856 mov 8($disp),%rsi # disp->ImageBase
1857 mov 56($disp),%r11 # disp->HandlerData
1859 mov 0(%r11),%r10d # HandlerData[0]
1860 lea (%rsi,%r10),%r10 # prologue label
1861 cmp %r10,%rbx # context->Rip<prologue label
1862 jb .Lcommon_seh_tail
1864 mov 152($context),%rax # pull context->Rsp
1866 mov 4(%r11),%r10d # HandlerData[1]
1867 lea (%rsi,%r10),%r10 # epilogue label
1868 cmp %r10,%rbx # context->Rip>=epilogue label
1869 jae .Lcommon_seh_tail
1872 lea 512($context),%rdi # &context.Xmm6
1874 .long 0xa548f3fc # cld; rep movsq
1875 lea `104+10*16`(%rax),%rax # adjust stack pointer
1884 mov %rbx,144($context) # restore context->Rbx
1885 mov %rbp,160($context) # restore context->Rbp
1886 mov %r12,216($context) # restore context->R12
1887 mov %r13,224($context) # restore context->R13
1888 mov %r14,232($context) # restore context->R14
1889 mov %r15,240($context) # restore context->R15
1894 mov %rax,152($context) # restore context->Rsp
1895 mov %rsi,168($context) # restore context->Rsi
1896 mov %rdi,176($context) # restore context->Rdi
1898 mov 40($disp),%rdi # disp->ContextRecord
1899 mov $context,%rsi # context
1900 mov \$154,%ecx # sizeof(CONTEXT)
1901 .long 0xa548f3fc # cld; rep movsq
1904 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1905 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1906 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1907 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1908 mov 40(%rsi),%r10 # disp->ContextRecord
1909 lea 56(%rsi),%r11 # &disp->HandlerData
1910 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1911 mov %r10,32(%rsp) # arg5
1912 mov %r11,40(%rsp) # arg6
1913 mov %r12,48(%rsp) # arg7
1914 mov %rcx,56(%rsp) # arg8, (NULL)
1915 call *__imp_RtlVirtualUnwind(%rip)
1917 mov \$1,%eax # ExceptionContinueSearch
1929 .size ssse3_handler,.-ssse3_handler
1933 .rva .LSEH_begin_aesni_cbc_sha1_enc_ssse3
1934 .rva .LSEH_end_aesni_cbc_sha1_enc_ssse3
1935 .rva .LSEH_info_aesni_cbc_sha1_enc_ssse3
1937 $code.=<<___ if ($avx);
1938 .rva .LSEH_begin_aesni_cbc_sha1_enc_avx
1939 .rva .LSEH_end_aesni_cbc_sha1_enc_avx
1940 .rva .LSEH_info_aesni_cbc_sha1_enc_avx
1945 .LSEH_info_aesni_cbc_sha1_enc_ssse3:
1948 .rva .Lprologue_ssse3,.Lepilogue_ssse3 # HandlerData[]
1950 $code.=<<___ if ($avx);
1951 .LSEH_info_aesni_cbc_sha1_enc_avx:
1954 .rva .Lprologue_avx,.Lepilogue_avx # HandlerData[]
1958 ####################################################################
1960 local *opcode=shift;
1964 $rex|=0x04 if($dst>=8);
1965 $rex|=0x01 if($src>=8);
1966 unshift @opcode,$rex|0x40 if($rex);
1970 if (@_[0] =~ /\$([x0-9a-f]+),\s*%xmm([0-9]+),\s*%xmm([0-9]+)/) {
1971 my @opcode=(0x0f,0x3a,0xcc);
1972 rex(\@opcode,$3,$2);
1973 push @opcode,0xc0|($2&7)|(($3&7)<<3); # ModR/M
1975 push @opcode,$c=~/^0/?oct($c):$c;
1976 return ".byte\t".join(',',@opcode);
1978 return "sha1rnds4\t".@_[0];
1985 "sha1nexte" => 0xc8,
1987 "sha1msg2" => 0xca );
1989 if (defined($opcodelet{$instr}) && @_[0] =~ /%xmm([0-9]+),\s*%xmm([0-9]+)/) {
1990 my @opcode=(0x0f,0x38);
1991 rex(\@opcode,$2,$1);
1992 push @opcode,$opcodelet{$instr};
1993 push @opcode,0xc0|($1&7)|(($2&7)<<3); # ModR/M
1994 return ".byte\t".join(',',@opcode);
1996 return $instr."\t".@_[0];
2002 my @opcode=(0x0f,0x38);
2004 if ($line=~/(aes[a-z]+)\s+%xmm([0-9]+),\s*%xmm([0-9]+)/) {
2006 "aesenc" => 0xdc, "aesenclast" => 0xdd,
2007 "aesdec" => 0xde, "aesdeclast" => 0xdf
2009 return undef if (!defined($opcodelet{$1}));
2010 rex(\@opcode,$3,$2);
2011 push @opcode,$opcodelet{$1},0xc0|($2&7)|(($3&7)<<3); # ModR/M
2012 unshift @opcode,0x66;
2013 return ".byte\t".join(',',@opcode);
2018 foreach (split("\n",$code)) {
2019 s/\`([^\`]*)\`/eval $1/geo;
2021 s/\b(sha1rnds4)\s+(.*)/sha1rnds4($2)/geo or
2022 s/\b(sha1[^\s]*)\s+(.*)/sha1op38($1,$2)/geo or
2023 s/\b(aes.*%xmm[0-9]+).*$/aesni($1)/geo;