3 # ====================================================================
4 # [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
11 # functions were re-implemented to address P4 performance issue [see
12 # commentary below], and in 2006 the rest was rewritten in order to
13 # gain freedom to liberate licensing terms.
15 # January, September 2004.
17 # It was noted that Intel IA-32 C compiler generates code which
18 # performs ~30% *faster* on P4 CPU than original *hand-coded*
19 # SHA1 assembler implementation. To address this problem (and
20 # prove that humans are still better than machines:-), the
21 # original code was overhauled, which resulted in following
22 # performance changes:
24 # compared with original compared with Intel cc
25 # assembler impl. generated code
30 # As you can see Pentium came out as looser:-( Yet I reckoned that
31 # improvement on P4 outweights the loss and incorporate this
32 # re-tuned code to 0.9.7 and later.
33 # ----------------------------------------------------------------
34 # <appro@fy.chalmers.se>
38 # George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
39 # '(c&d) + (b&(c^d))', which allows to accumulate partial results
40 # and lighten "pressure" on scratch registers. This resulted in
41 # >12% performance improvement on contemporary AMD cores (with no
42 # degradation on other CPUs:-). Also, the code was revised to maximize
43 # "distance" between instructions producing input to 'lea' instruction
44 # and the 'lea' instruction itself, which is essential for Intel Atom
45 # core and resulted in ~15% improvement.
49 # Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
50 # is to offload message schedule denoted by Wt in NIST specification,
51 # or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel,
52 # and in SSE2 context was first explored by Dean Gaudet in 2004, see
53 # http://arctic.org/~dean/crypto/sha1.html. Since then several things
54 # have changed that made it interesting again:
56 # a) XMM units became faster and wider;
57 # b) instruction set became more versatile;
58 # c) an important observation was made by Max Locktykhin, which made
59 # it possible to reduce amount of instructions required to perform
60 # the operation in question, for further details see
61 # http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/.
65 # Add AVX code path, probably most controversial... The thing is that
66 # switch to AVX alone improves performance by as little as 4% in
67 # comparison to SSSE3 code path. But below result doesn't look like
68 # 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
69 # pair of µ-ops, and it's the additional µ-ops, two per round, that
70 # make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
71 # as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
72 # equivalent 'sh[rl]d' that is responsible for the impressive 5.1
73 # cycles per processed byte. But 'sh[rl]d' is not something that used
74 # to be fast, nor does it appear to be fast in upcoming Bulldozer
75 # [according to its optimization manual]. Which is why AVX code path
76 # is guarded by *both* AVX and synthetic bit denoting Intel CPUs.
77 # One can argue that it's unfair to AMD, but without 'sh[rl]d' it
78 # makes no sense to keep the AVX code path. If somebody feels that
79 # strongly, it's probably more appropriate to discuss possibility of
80 # using vector rotate XOP on AMD...
84 # Add support for Intel SHA Extensions.
86 ######################################################################
87 # Current performance is summarized in following table. Numbers are
88 # CPU clock cycles spent to process single byte (less is better).
95 # Core2 7.3 6.0/+22% -
96 # Atom 12.5 9.3(*)/+35% -
97 # Westmere 7.3 5.5/+33% -
98 # Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73%
99 # Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53%
100 # Haswell 6.5 4.3/+51% 4.1(**)/+58%
101 # Bulldozer 11.6 6.0/+92%
102 # VIA Nano 10.6 7.5/+41%
104 # (*) Loop is 1056 instructions long and expected result is ~8.25.
105 # It remains mystery [to me] why ILP is limited to 1.7.
107 # (**) As per above comment, the result is for AVX *plus* sh[rl]d.
109 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
110 push(@INC,"${dir}","${dir}../../perlasm");
113 &asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
116 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
119 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
120 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
121 $1>=2.19); # first version supporting AVX
123 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
124 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
125 $1>=2.03); # first version supporting AVX
127 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
128 `ml 2>&1` =~ /Version ([0-9]+)\./ &&
129 $1>=10); # first version supporting AVX
131 &external_label("OPENSSL_ia32cap_P") if ($xmm);
142 @V=($A,$B,$C,$D,$E,$T);
144 $alt=0; # 1 denotes alternative IALU implementation, which performs
145 # 8% *worse* on P4, same on Westmere and Atom, 2% better on
150 local($n,$a,$b,$c,$d,$e,$f)=@_;
152 &comment("00_15 $n");
154 &mov($f,$c); # f to hold F_00_19(b,c,d)
155 if ($n==0) { &mov($tmp1,$a); }
156 else { &mov($a,$tmp1); }
157 &rotl($tmp1,5); # tmp1=ROTATE(a,5)
159 &add($tmp1,$e); # tmp1+=e;
160 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded
161 # with xi, also note that e becomes
164 &rotr($b,2); # b=ROTATE(b,30)
165 &xor($f,$d); # f holds F_00_19(b,c,d)
166 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
168 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
169 &add($f,$tmp1); } # f+=tmp1
170 else { &add($tmp1,$f); } # f becomes a in next round
171 &mov($tmp1,$a) if ($alt && $n==15);
176 local($n,$a,$b,$c,$d,$e,$f)=@_;
178 &comment("16_19 $n");
182 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
183 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d
184 &xor($f,&swtmp(($n+8)%16));
185 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
186 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
187 &rotl($f,1); # f=ROTATE(f,1)
188 &add($e,$tmp1); # e+=F_00_19(b,c,d)
189 &xor($c,$d); # restore $c
190 &mov($tmp1,$a); # b in next round
191 &rotr($b,$n==16?2:7); # b=ROTATE(b,30)
192 &mov(&swtmp($n%16),$f); # xi=f
193 &rotl($a,5); # ROTATE(a,5)
194 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
195 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
196 &add($f,$a); # f+=ROTATE(a,5)
198 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d)
199 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
201 &xor($f,&swtmp(($n+8)%16));
203 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
204 &rotl($f,1); # f=ROTATE(f,1)
205 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
206 &add($e,$tmp1); # e+=F_00_19(b,c,d)
208 &rotr($b,2); # b=ROTATE(b,30)
209 &mov(&swtmp($n%16),$f); # xi=f
210 &rotl($tmp1,5); # ROTATE(a,5)
211 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
212 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
213 &add($f,$tmp1); # f+=ROTATE(a,5)
219 local($n,$a,$b,$c,$d,$e,$f)=@_;
220 local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
222 &comment("20_39 $n");
225 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c
226 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
227 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
228 &xor($f,&swtmp(($n+8)%16));
229 &add($e,$tmp1); # e+=F_20_39(b,c,d)
230 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
231 &rotl($f,1); # f=ROTATE(f,1)
232 &mov($tmp1,$a); # b in next round
233 &rotr($b,7); # b=ROTATE(b,30)
234 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
235 &rotl($a,5); # ROTATE(a,5)
236 &xor($b,$c) if($n==39);# warm up for BODY_40_59
237 &and($tmp1,$b) if($n==39);
238 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
239 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
240 &add($f,$a); # f+=ROTATE(a,5)
241 &rotr($a,5) if ($n==79);
243 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d)
244 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
246 &xor($f,&swtmp(($n+8)%16));
247 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
248 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
249 &rotl($f,1); # f=ROTATE(f,1)
250 &add($e,$tmp1); # e+=F_20_39(b,c,d)
251 &rotr($b,2); # b=ROTATE(b,30)
253 &rotl($tmp1,5); # ROTATE(a,5)
254 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
255 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
256 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
257 &add($f,$tmp1); # f+=ROTATE(a,5)
263 local($n,$a,$b,$c,$d,$e,$f)=@_;
265 &comment("40_59 $n");
268 &add($e,$tmp1); # e+=b&(c^d)
269 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
271 &xor($f,&swtmp(($n+8)%16));
272 &xor($c,$d); # restore $c
273 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
274 &rotl($f,1); # f=ROTATE(f,1)
276 &rotr($b,7); # b=ROTATE(b,30)
277 &add($e,$tmp1); # e+=c&d
278 &mov($tmp1,$a); # b in next round
279 &mov(&swtmp($n%16),$f); # xi=f
280 &rotl($a,5); # ROTATE(a,5)
281 &xor($b,$c) if ($n<59);
282 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d)
283 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d))
284 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
285 &add($f,$a); # f+=ROTATE(a,5)
287 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d)
288 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
290 &xor($f,&swtmp(($n+8)%16));
292 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
293 &rotl($f,1); # f=ROTATE(f,1)
294 &add($tmp1,$e); # b&(c^d)+=e
295 &rotr($b,2); # b=ROTATE(b,30)
296 &mov($e,$a); # e becomes volatile
297 &rotl($e,5); # ROTATE(a,5)
298 &mov(&swtmp($n%16),$f); # xi=f
299 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
301 &add($f,$e); # f+=ROTATE(a,5)
303 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
304 &add($f,$tmp1); # f+=c&d
308 &function_begin("sha1_block_data_order");
310 &static_label("shaext_shortcut");
311 &static_label("ssse3_shortcut");
312 &static_label("avx_shortcut") if ($ymm);
313 &static_label("K_XX_XX");
315 &call (&label("pic_point")); # make it PIC!
316 &set_label("pic_point");
318 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point"));
319 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
321 &mov ($A,&DWP(0,$T));
322 &mov ($D,&DWP(4,$T));
323 &test ($D,1<<9); # check SSSE3 bit
325 &mov ($C,&DWP(8,$T));
326 &test ($A,1<<24); # check FXSR bit
328 &test ($C,1<<29); # check SHA bit
329 &jnz (&label("shaext_shortcut"));
331 &and ($D,1<<28); # mask AVX bit
332 &and ($A,1<<30); # mask "Intel CPU" bit
334 &cmp ($A,1<<28|1<<30);
335 &je (&label("avx_shortcut"));
337 &jmp (&label("ssse3_shortcut"));
338 &set_label("x86",16);
340 &mov($tmp1,&wparam(0)); # SHA_CTX *c
341 &mov($T,&wparam(1)); # const void *input
342 &mov($A,&wparam(2)); # size_t num
343 &stack_push(16+3); # allocate X[16]
346 &mov(&wparam(2),$A); # pointer beyond the end of input
347 &mov($E,&DWP(16,$tmp1));# pre-load E
348 &jmp(&label("loop"));
350 &set_label("loop",16);
352 # copy input chunk to X, but reversing byte order!
353 for ($i=0; $i<16; $i+=4)
355 &mov($A,&DWP(4*($i+0),$T));
356 &mov($B,&DWP(4*($i+1),$T));
357 &mov($C,&DWP(4*($i+2),$T));
358 &mov($D,&DWP(4*($i+3),$T));
363 &mov(&swtmp($i+0),$A);
364 &mov(&swtmp($i+1),$B);
365 &mov(&swtmp($i+2),$C);
366 &mov(&swtmp($i+3),$D);
368 &mov(&wparam(1),$T); # redundant in 1st spin
370 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX
371 &mov($B,&DWP(4,$tmp1));
372 &mov($C,&DWP(8,$tmp1));
373 &mov($D,&DWP(12,$tmp1));
376 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
377 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
378 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
379 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
380 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
382 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
384 &mov($tmp1,&wparam(0)); # re-load SHA_CTX*
385 &mov($D,&wparam(1)); # D is last "T" and is discarded
387 &add($E,&DWP(0,$tmp1)); # E is last "A"...
388 &add($T,&DWP(4,$tmp1));
389 &add($A,&DWP(8,$tmp1));
390 &add($B,&DWP(12,$tmp1));
391 &add($C,&DWP(16,$tmp1));
393 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX
394 &add($D,64); # advance input pointer
395 &mov(&DWP(4,$tmp1),$T);
396 &cmp($D,&wparam(2)); # have we reached the end yet?
397 &mov(&DWP(8,$tmp1),$A);
398 &mov($E,$C); # C is last "E" which needs to be "pre-loaded"
399 &mov(&DWP(12,$tmp1),$B);
400 &mov($T,$D); # input pointer
401 &mov(&DWP(16,$tmp1),$C);
405 &function_end("sha1_block_data_order");
409 ######################################################################
410 # Intel SHA Extensions implementation of SHA1 update function.
412 my ($ctx,$inp,$num)=("edi","esi","ecx");
413 my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3));
414 my @MSG=map("xmm$_",(4..7));
417 my ($dst,$src,$imm)=@_;
418 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
419 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); }
422 my ($opcodelet,$dst,$src)=@_;
423 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
424 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
426 sub sha1nexte { sha1op38(0xc8,@_); }
427 sub sha1msg1 { sha1op38(0xc9,@_); }
428 sub sha1msg2 { sha1op38(0xca,@_); }
430 &function_begin("_sha1_block_data_order_shaext");
431 &call (&label("pic_point")); # make it PIC!
432 &set_label("pic_point");
434 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
435 &set_label("shaext_shortcut");
436 &mov ($ctx,&wparam(0));
438 &mov ($inp,&wparam(1));
439 &mov ($num,&wparam(2));
442 &movdqu ($ABCD,&QWP(0,$ctx));
443 &movd ($E,&QWP(16,$ctx));
445 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap
447 &movdqu (@MSG[0],&QWP(0,$inp));
448 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order
449 &movdqu (@MSG[1],&QWP(0x10,$inp));
450 &pshufd ($E,$E,0b00011011); # flip word order
451 &movdqu (@MSG[2],&QWP(0x20,$inp));
452 &pshufb (@MSG[0],$BSWAP);
453 &movdqu (@MSG[3],&QWP(0x30,$inp));
454 &pshufb (@MSG[1],$BSWAP);
455 &pshufb (@MSG[2],$BSWAP);
456 &pshufb (@MSG[3],$BSWAP);
457 &jmp (&label("loop_shaext"));
459 &set_label("loop_shaext",16);
461 &lea ("eax",&DWP(0x40,$inp));
462 &movdqa (&QWP(0,"esp"),$E); # offload $E
464 &cmovne ($inp,"eax");
465 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD
467 for($i=0;$i<20-4;$i+=2) {
468 &sha1msg1 (@MSG[0],@MSG[1]);
470 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3...
471 &sha1nexte ($E_,@MSG[1]);
472 &pxor (@MSG[0],@MSG[2]);
473 &sha1msg1 (@MSG[1],@MSG[2]);
474 &sha1msg2 (@MSG[0],@MSG[3]);
477 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5));
478 &sha1nexte ($E,@MSG[2]);
479 &pxor (@MSG[1],@MSG[3]);
480 &sha1msg2 (@MSG[1],@MSG[0]);
482 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
484 &movdqu (@MSG[0],&QWP(0,$inp));
486 &sha1rnds4 ($ABCD,$E,3); # 64-67
487 &sha1nexte ($E_,@MSG[1]);
488 &movdqu (@MSG[1],&QWP(0x10,$inp));
489 &pshufb (@MSG[0],$BSWAP);
492 &sha1rnds4 ($ABCD,$E_,3); # 68-71
493 &sha1nexte ($E,@MSG[2]);
494 &movdqu (@MSG[2],&QWP(0x20,$inp));
495 &pshufb (@MSG[1],$BSWAP);
498 &sha1rnds4 ($ABCD,$E,3); # 72-75
499 &sha1nexte ($E_,@MSG[3]);
500 &movdqu (@MSG[3],&QWP(0x30,$inp));
501 &pshufb (@MSG[2],$BSWAP);
504 &sha1rnds4 ($ABCD,$E_,3); # 76-79
505 &movdqa ($E_,&QWP(0,"esp"));
506 &pshufb (@MSG[3],$BSWAP);
508 &paddd ($ABCD,&QWP(16,"esp"));
510 &jnz (&label("loop_shaext"));
512 &pshufd ($ABCD,$ABCD,0b00011011);
513 &pshufd ($E,$E,0b00011011);
514 &movdqu (&QWP(0,$ctx),$ABCD)
515 &movd (&DWP(16,$ctx),$E);
517 &function_end("_sha1_block_data_order_shaext");
519 ######################################################################
520 # The SSSE3 implementation.
522 # %xmm[0-7] are used as ring @X[] buffer containing quadruples of last
523 # 32 elements of the message schedule or Xupdate outputs. First 4
524 # quadruples are simply byte-swapped input, next 4 are calculated
525 # according to method originally suggested by Dean Gaudet (modulo
526 # being implemented in SSSE3). Once 8 quadruples or 32 elements are
527 # collected, it switches to routine proposed by Max Locktyukhin.
529 # Calculations inevitably require temporary reqisters, and there are
530 # no %xmm registers left to spare. For this reason part of the ring
531 # buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
532 # buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
533 # X[-5], and X[4] - X[-4]...
535 # Another notable optimization is aggressive stack frame compression
536 # aiming to minimize amount of 9-byte instructions...
538 # Yet another notable optimization is "jumping" $B variable. It means
539 # that there is no register permanently allocated for $B value. This
540 # allowed to eliminate one instruction from body_20_39...
542 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
543 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
544 my @V=($A,$B,$C,$D,$E);
545 my $j=0; # hash round
550 my $_rol=sub { &rol(@_) };
551 my $_ror=sub { &ror(@_) };
553 &function_begin("_sha1_block_data_order_ssse3");
554 &call (&label("pic_point")); # make it PIC!
555 &set_label("pic_point");
557 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
558 &set_label("ssse3_shortcut");
560 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19
561 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39
562 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59
563 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79
564 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask
566 &mov ($E,&wparam(0)); # load argument block
567 &mov ($inp=@T[1],&wparam(1));
568 &mov ($D,&wparam(2));
573 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
574 # X[4]+K X[5]+K X[6]+K X[7]+K
575 # X[8]+K X[9]+K X[10]+K X[11]+K
576 # X[12]+K X[13]+K X[14]+K X[15]+K
578 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
579 # X[4] X[5] X[6] X[7]
580 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
582 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
583 # K_40_59 K_40_59 K_40_59 K_40_59
584 # K_60_79 K_60_79 K_60_79 K_60_79
585 # K_00_19 K_00_19 K_00_19 K_00_19
588 # +192 ctx # argument block
595 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants
596 &movdqa (&QWP(112+16,"esp"),@X[5]);
597 &movdqa (&QWP(112+32,"esp"),@X[6]);
598 &shl ($D,6); # len*64
599 &movdqa (&QWP(112+48,"esp"),@X[3]);
600 &add ($D,$inp); # end of input
601 &movdqa (&QWP(112+64,"esp"),@X[2]);
603 &mov (&DWP(192+0,"esp"),$E); # save argument block
604 &mov (&DWP(192+4,"esp"),$inp);
605 &mov (&DWP(192+8,"esp"),$D);
606 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
608 &mov ($A,&DWP(0,$E)); # load context
609 &mov ($B,&DWP(4,$E));
610 &mov ($C,&DWP(8,$E));
611 &mov ($D,&DWP(12,$E));
612 &mov ($E,&DWP(16,$E));
613 &mov (@T[0],$B); # magic seed
615 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
616 &movdqu (@X[-3&7],&QWP(-48,$inp));
617 &movdqu (@X[-2&7],&QWP(-32,$inp));
618 &movdqu (@X[-1&7],&QWP(-16,$inp));
619 &pshufb (@X[-4&7],@X[2]); # byte swap
620 &pshufb (@X[-3&7],@X[2]);
621 &pshufb (@X[-2&7],@X[2]);
622 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
623 &pshufb (@X[-1&7],@X[2]);
624 &paddd (@X[-4&7],@X[3]); # add K_00_19
625 &paddd (@X[-3&7],@X[3]);
626 &paddd (@X[-2&7],@X[3]);
627 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU
628 &psubd (@X[-4&7],@X[3]); # restore X[]
629 &movdqa (&QWP(0+16,"esp"),@X[-3&7]);
630 &psubd (@X[-3&7],@X[3]);
631 &movdqa (&QWP(0+32,"esp"),@X[-2&7]);
633 &psubd (@X[-2&7],@X[3]);
635 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
637 &jmp (&label("loop"));
639 ######################################################################
640 # SSE instruction sequence is first broken to groups of indepentent
641 # instructions, independent in respect to their inputs and shifter
642 # (not all architectures have more than one). Then IALU instructions
643 # are "knitted in" between the SSE groups. Distance is maintained for
644 # SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer
645 # [which allegedly also implements SSSE3]...
647 # Temporary registers usage. X[2] is volatile at the entry and at the
648 # end is restored from backtrace ring buffer. X[3] is expected to
649 # contain current K_XX_XX constant and is used to caclulate X[-1]+K
650 # from previous round, it becomes volatile the moment the value is
651 # saved to stack for transfer to IALU. X[4] becomes volatile whenever
652 # X[-4] is accumulated and offloaded to backtrace ring buffer, at the
653 # end it is loaded with next K_XX_XX [which becomes X[3] in next
656 sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
659 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
662 eval(shift(@insns)); # ror
665 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
666 &movdqa (@X[2],@X[-1&7]);
670 &paddd (@X[3],@X[-1&7]);
671 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
672 eval(shift(@insns)); # rol
674 &psrldq (@X[2],4); # "X[-3]", 3 dwords
677 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
679 eval(shift(@insns)); # ror
681 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
686 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
688 eval(shift(@insns)); # rol
689 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
693 &movdqa (@X[4],@X[0]);
696 eval(shift(@insns)); # ror
697 &movdqa (@X[2],@X[0]);
700 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword
701 &paddd (@X[0],@X[0]);
707 eval(shift(@insns)); # rol
708 &movdqa (@X[3],@X[4]);
715 eval(shift(@insns)); # ror
716 &por (@X[0],@X[2]); # "X[0]"<<<=1
718 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
724 eval(shift(@insns)); # rol
726 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
730 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2
731 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7])
732 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7);
736 foreach (@insns) { eval; } # remaining instructions [if any]
738 $Xi++; push(@X,shift(@X)); # "rotate" X[]
741 sub Xupdate_ssse3_32_79()
744 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
747 eval(shift(@insns)); # body_20_39
748 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
749 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8)
752 eval(shift(@insns)); # rol
754 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
755 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
758 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
760 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
761 } else { # ... or load next one
762 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
764 eval(shift(@insns)); # ror
765 &paddd (@X[3],@X[-1&7]);
768 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]"
769 eval(shift(@insns)); # body_20_39
772 eval(shift(@insns)); # rol
774 &movdqa (@X[2],@X[0]);
775 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
778 eval(shift(@insns)); # ror
780 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
783 eval(shift(@insns)); # body_20_39
787 eval(shift(@insns)); # rol
790 eval(shift(@insns)); # ror
792 eval(shift(@insns)) if (@insns[1] =~ /_rol/);
793 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
795 &por (@X[0],@X[2]); # "X[0]"<<<=2
796 eval(shift(@insns)); # body_20_39
798 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
800 eval(shift(@insns)); # rol
803 eval(shift(@insns)); # ror
804 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0])
807 foreach (@insns) { eval; } # remaining instructions
809 $Xi++; push(@X,shift(@X)); # "rotate" X[]
812 sub Xuplast_ssse3_80()
815 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
825 &paddd (@X[3],@X[-1&7]);
831 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
833 foreach (@insns) { eval; } # remaining instructions
835 &mov ($inp=@T[1],&DWP(192+4,"esp"));
836 &cmp ($inp,&DWP(192+8,"esp"));
837 &je (&label("done"));
839 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19
840 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask
841 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input
842 &movdqu (@X[-3&7],&QWP(16,$inp));
843 &movdqu (@X[-2&7],&QWP(32,$inp));
844 &movdqu (@X[-1&7],&QWP(48,$inp));
846 &pshufb (@X[-4&7],@X[2]); # byte swap
847 &mov (&DWP(192+4,"esp"),$inp);
848 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
856 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
866 &pshufb (@X[($Xi-3)&7],@X[2]);
871 &paddd (@X[($Xi-4)&7],@X[3]);
876 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU
881 &psubd (@X[($Xi-4)&7],@X[3]);
883 foreach (@insns) { eval; }
890 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
893 foreach (@insns) { eval; }
896 sub body_00_19 () { # ((c^d)&b)^d
897 # on start @T[0]=(c^d)&b
898 return &body_20_39() if ($rx==19); $rx++;
900 '($a,$b,$c,$d,$e)=@V;'.
901 '&$_ror ($b,$j?7:2);', # $b>>>2
903 '&mov (@T[1],$a);', # $b in next round
905 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
906 '&xor ($b,$c);', # $c^$d for next round
910 '&and (@T[1],$b);', # ($b&($c^$d)) for next round
912 '&xor ($b,$c);', # restore $b
913 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
917 sub body_20_39 () { # b^d^c
919 return &body_40_59() if ($rx==39); $rx++;
921 '($a,$b,$c,$d,$e)=@V;'.
922 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
923 '&xor (@T[0],$d) if($j==19);'.
924 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c)
925 '&mov (@T[1],$a);', # $b in next round
929 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round
931 '&$_ror ($b,7);', # $b>>>2
932 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
936 sub body_40_59 () { # ((b^c)&(c^d))^c
937 # on entry @T[0]=(b^c), (c^=d)
940 '($a,$b,$c,$d,$e)=@V;'.
941 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
942 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d)
943 '&xor ($c,$d) if ($j>=40);', # restore $c
945 '&$_ror ($b,7);', # $b>>>2
946 '&mov (@T[1],$a);', # $b for next round
951 '&xor (@T[1],$c) if ($j==59);'.
952 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round
954 '&xor ($b,$c) if ($j< 59);', # c^d for next round
955 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
959 sub bodyx_00_19 () { # ((c^d)&b)^d
960 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K
961 return &bodyx_20_39() if ($rx==19); $rx++;
963 '($a,$b,$c,$d,$e)=@V;'.
965 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2
966 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2
967 '&lea ($e,&DWP(0,$e,@T[0]));',
968 '&rorx (@T[0],$a,5);',
970 '&andn (@T[1],$a,$c);',
972 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer
975 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
979 sub bodyx_20_39 () { # b^d^c
981 return &bodyx_40_59() if ($rx==39); $rx++;
983 '($a,$b,$c,$d,$e)=@V;'.
985 '&add ($e,($j==19?@T[0]:$b))',
986 '&rorx ($b,@T[1],7);', # $b>>>2
987 '&rorx (@T[0],$a,5);',
989 '&xor ($a,$b) if ($j<79);',
990 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer
991 '&xor ($a,$c) if ($j<79);',
992 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
996 sub bodyx_40_59 () { # ((b^c)&(c^d))^c
997 # on start $b=((b^c)&(c^d))^c
998 return &bodyx_20_39() if ($rx==59); $rx++;
1000 '($a,$b,$c,$d,$e)=@V;'.
1002 '&rorx (@T[0],$a,5)',
1003 '&lea ($e,&DWP(0,$e,$b))',
1004 '&rorx ($b,@T[1],7)', # $b>>>2
1005 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer
1008 '&xor ($a,$b)', # b^c for next round
1009 '&xor (@T[1],$b)', # c^d for next round
1013 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1017 &set_label("loop",16);
1018 &Xupdate_ssse3_16_31(\&body_00_19);
1019 &Xupdate_ssse3_16_31(\&body_00_19);
1020 &Xupdate_ssse3_16_31(\&body_00_19);
1021 &Xupdate_ssse3_16_31(\&body_00_19);
1022 &Xupdate_ssse3_32_79(\&body_00_19);
1023 &Xupdate_ssse3_32_79(\&body_20_39);
1024 &Xupdate_ssse3_32_79(\&body_20_39);
1025 &Xupdate_ssse3_32_79(\&body_20_39);
1026 &Xupdate_ssse3_32_79(\&body_20_39);
1027 &Xupdate_ssse3_32_79(\&body_20_39);
1028 &Xupdate_ssse3_32_79(\&body_40_59);
1029 &Xupdate_ssse3_32_79(\&body_40_59);
1030 &Xupdate_ssse3_32_79(\&body_40_59);
1031 &Xupdate_ssse3_32_79(\&body_40_59);
1032 &Xupdate_ssse3_32_79(\&body_40_59);
1033 &Xupdate_ssse3_32_79(\&body_20_39);
1034 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
1036 $saved_j=$j; @saved_V=@V;
1038 &Xloop_ssse3(\&body_20_39);
1039 &Xloop_ssse3(\&body_20_39);
1040 &Xloop_ssse3(\&body_20_39);
1042 &mov (@T[1],&DWP(192,"esp")); # update context
1043 &add ($A,&DWP(0,@T[1]));
1044 &add (@T[0],&DWP(4,@T[1])); # $b
1045 &add ($C,&DWP(8,@T[1]));
1046 &mov (&DWP(0,@T[1]),$A);
1047 &add ($D,&DWP(12,@T[1]));
1048 &mov (&DWP(4,@T[1]),@T[0]);
1049 &add ($E,&DWP(16,@T[1]));
1050 &mov (&DWP(8,@T[1]),$C);
1052 &mov (&DWP(12,@T[1]),$D);
1054 &mov (&DWP(16,@T[1]),$E);
1056 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
1060 &jmp (&label("loop"));
1062 &set_label("done",16); $j=$saved_j; @V=@saved_V;
1064 &Xtail_ssse3(\&body_20_39);
1065 &Xtail_ssse3(\&body_20_39);
1066 &Xtail_ssse3(\&body_20_39);
1068 &mov (@T[1],&DWP(192,"esp")); # update context
1069 &add ($A,&DWP(0,@T[1]));
1070 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1071 &add (@T[0],&DWP(4,@T[1])); # $b
1072 &add ($C,&DWP(8,@T[1]));
1073 &mov (&DWP(0,@T[1]),$A);
1074 &add ($D,&DWP(12,@T[1]));
1075 &mov (&DWP(4,@T[1]),@T[0]);
1076 &add ($E,&DWP(16,@T[1]));
1077 &mov (&DWP(8,@T[1]),$C);
1078 &mov (&DWP(12,@T[1]),$D);
1079 &mov (&DWP(16,@T[1]),$E);
1081 &function_end("_sha1_block_data_order_ssse3");
1086 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
1087 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
1088 my @V=($A,$B,$C,$D,$E);
1089 my $j=0; # hash round
1093 my $_rol=sub { &shld(@_[0],@_) };
1094 my $_ror=sub { &shrd(@_[0],@_) };
1096 &function_begin("_sha1_block_data_order_avx");
1097 &call (&label("pic_point")); # make it PIC!
1098 &set_label("pic_point");
1100 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
1101 &set_label("avx_shortcut");
1104 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19
1105 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39
1106 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59
1107 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79
1108 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask
1110 &mov ($E,&wparam(0)); # load argument block
1111 &mov ($inp=@T[1],&wparam(1));
1112 &mov ($D,&wparam(2));
1115 # stack frame layout
1117 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
1118 # X[4]+K X[5]+K X[6]+K X[7]+K
1119 # X[8]+K X[9]+K X[10]+K X[11]+K
1120 # X[12]+K X[13]+K X[14]+K X[15]+K
1122 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
1123 # X[4] X[5] X[6] X[7]
1124 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
1126 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
1127 # K_40_59 K_40_59 K_40_59 K_40_59
1128 # K_60_79 K_60_79 K_60_79 K_60_79
1129 # K_00_19 K_00_19 K_00_19 K_00_19
1132 # +192 ctx # argument block
1139 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants
1140 &vmovdqa(&QWP(112+16,"esp"),@X[5]);
1141 &vmovdqa(&QWP(112+32,"esp"),@X[6]);
1142 &shl ($D,6); # len*64
1143 &vmovdqa(&QWP(112+48,"esp"),@X[3]);
1144 &add ($D,$inp); # end of input
1145 &vmovdqa(&QWP(112+64,"esp"),@X[2]);
1147 &mov (&DWP(192+0,"esp"),$E); # save argument block
1148 &mov (&DWP(192+4,"esp"),$inp);
1149 &mov (&DWP(192+8,"esp"),$D);
1150 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
1152 &mov ($A,&DWP(0,$E)); # load context
1153 &mov ($B,&DWP(4,$E));
1154 &mov ($C,&DWP(8,$E));
1155 &mov ($D,&DWP(12,$E));
1156 &mov ($E,&DWP(16,$E));
1157 &mov (@T[0],$B); # magic seed
1159 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
1160 &vmovdqu(@X[-3&7],&QWP(-48,$inp));
1161 &vmovdqu(@X[-2&7],&QWP(-32,$inp));
1162 &vmovdqu(@X[-1&7],&QWP(-16,$inp));
1163 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1164 &vpshufb(@X[-3&7],@X[-3&7],@X[2]);
1165 &vpshufb(@X[-2&7],@X[-2&7],@X[2]);
1166 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1167 &vpshufb(@X[-1&7],@X[-1&7],@X[2]);
1168 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19
1169 &vpaddd (@X[1],@X[-3&7],@X[3]);
1170 &vpaddd (@X[2],@X[-2&7],@X[3]);
1171 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU
1173 &vmovdqa(&QWP(0+16,"esp"),@X[1]);
1175 &vmovdqa(&QWP(0+32,"esp"),@X[2]);
1177 &jmp (&label("loop"));
1179 sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
1182 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
1183 my ($a,$b,$c,$d,$e);
1185 eval(shift(@insns));
1186 eval(shift(@insns));
1187 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
1188 eval(shift(@insns));
1189 eval(shift(@insns));
1191 &vpaddd (@X[3],@X[3],@X[-1&7]);
1192 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
1193 eval(shift(@insns));
1194 eval(shift(@insns));
1195 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords
1196 eval(shift(@insns));
1197 eval(shift(@insns));
1198 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
1199 eval(shift(@insns));
1200 eval(shift(@insns));
1202 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
1203 eval(shift(@insns));
1204 eval(shift(@insns));
1205 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1206 eval(shift(@insns));
1207 eval(shift(@insns));
1209 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
1210 eval(shift(@insns));
1211 eval(shift(@insns));
1212 eval(shift(@insns));
1213 eval(shift(@insns));
1215 &vpsrld (@X[2],@X[0],31);
1216 eval(shift(@insns));
1217 eval(shift(@insns));
1218 eval(shift(@insns));
1219 eval(shift(@insns));
1221 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword
1222 &vpaddd (@X[0],@X[0],@X[0]);
1223 eval(shift(@insns));
1224 eval(shift(@insns));
1225 eval(shift(@insns));
1226 eval(shift(@insns));
1228 &vpsrld (@X[3],@X[4],30);
1229 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1
1230 eval(shift(@insns));
1231 eval(shift(@insns));
1232 eval(shift(@insns));
1233 eval(shift(@insns));
1235 &vpslld (@X[4],@X[4],2);
1236 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
1237 eval(shift(@insns));
1238 eval(shift(@insns));
1239 &vpxor (@X[0],@X[0],@X[3]);
1240 eval(shift(@insns));
1241 eval(shift(@insns));
1242 eval(shift(@insns));
1243 eval(shift(@insns));
1245 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2
1246 eval(shift(@insns));
1247 eval(shift(@insns));
1248 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
1249 eval(shift(@insns));
1250 eval(shift(@insns));
1252 foreach (@insns) { eval; } # remaining instructions [if any]
1254 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1257 sub Xupdate_avx_32_79()
1260 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
1261 my ($a,$b,$c,$d,$e);
1263 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
1264 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
1265 eval(shift(@insns)); # body_20_39
1266 eval(shift(@insns));
1267 eval(shift(@insns));
1268 eval(shift(@insns)); # rol
1270 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
1271 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
1272 eval(shift(@insns));
1273 eval(shift(@insns));
1275 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
1276 } else { # ... or load next one
1277 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
1279 &vpaddd (@X[3],@X[3],@X[-1&7]);
1280 eval(shift(@insns)); # ror
1281 eval(shift(@insns));
1283 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]"
1284 eval(shift(@insns)); # body_20_39
1285 eval(shift(@insns));
1286 eval(shift(@insns));
1287 eval(shift(@insns)); # rol
1289 &vpsrld (@X[2],@X[0],30);
1290 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1291 eval(shift(@insns));
1292 eval(shift(@insns));
1293 eval(shift(@insns)); # ror
1294 eval(shift(@insns));
1296 &vpslld (@X[0],@X[0],2);
1297 eval(shift(@insns)); # body_20_39
1298 eval(shift(@insns));
1299 eval(shift(@insns));
1300 eval(shift(@insns)); # rol
1301 eval(shift(@insns));
1302 eval(shift(@insns));
1303 eval(shift(@insns)); # ror
1304 eval(shift(@insns));
1306 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2
1307 eval(shift(@insns)); # body_20_39
1308 eval(shift(@insns));
1309 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
1310 eval(shift(@insns));
1311 eval(shift(@insns)); # rol
1312 eval(shift(@insns));
1313 eval(shift(@insns));
1314 eval(shift(@insns)); # ror
1315 eval(shift(@insns));
1317 foreach (@insns) { eval; } # remaining instructions
1319 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1322 sub Xuplast_avx_80()
1325 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1326 my ($a,$b,$c,$d,$e);
1328 eval(shift(@insns));
1329 &vpaddd (@X[3],@X[3],@X[-1&7]);
1330 eval(shift(@insns));
1331 eval(shift(@insns));
1332 eval(shift(@insns));
1333 eval(shift(@insns));
1335 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
1337 foreach (@insns) { eval; } # remaining instructions
1339 &mov ($inp=@T[1],&DWP(192+4,"esp"));
1340 &cmp ($inp,&DWP(192+8,"esp"));
1341 &je (&label("done"));
1343 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19
1344 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask
1345 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input
1346 &vmovdqu(@X[-3&7],&QWP(16,$inp));
1347 &vmovdqu(@X[-2&7],&QWP(32,$inp));
1348 &vmovdqu(@X[-1&7],&QWP(48,$inp));
1350 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1351 &mov (&DWP(192+4,"esp"),$inp);
1352 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1360 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1361 my ($a,$b,$c,$d,$e);
1363 eval(shift(@insns));
1364 eval(shift(@insns));
1365 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
1366 eval(shift(@insns));
1367 eval(shift(@insns));
1368 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]);
1369 eval(shift(@insns));
1370 eval(shift(@insns));
1371 eval(shift(@insns));
1372 eval(shift(@insns));
1373 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU
1374 eval(shift(@insns));
1375 eval(shift(@insns));
1377 foreach (@insns) { eval; }
1384 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1385 my ($a,$b,$c,$d,$e);
1387 foreach (@insns) { eval; }
1390 &set_label("loop",16);
1391 &Xupdate_avx_16_31(\&body_00_19);
1392 &Xupdate_avx_16_31(\&body_00_19);
1393 &Xupdate_avx_16_31(\&body_00_19);
1394 &Xupdate_avx_16_31(\&body_00_19);
1395 &Xupdate_avx_32_79(\&body_00_19);
1396 &Xupdate_avx_32_79(\&body_20_39);
1397 &Xupdate_avx_32_79(\&body_20_39);
1398 &Xupdate_avx_32_79(\&body_20_39);
1399 &Xupdate_avx_32_79(\&body_20_39);
1400 &Xupdate_avx_32_79(\&body_20_39);
1401 &Xupdate_avx_32_79(\&body_40_59);
1402 &Xupdate_avx_32_79(\&body_40_59);
1403 &Xupdate_avx_32_79(\&body_40_59);
1404 &Xupdate_avx_32_79(\&body_40_59);
1405 &Xupdate_avx_32_79(\&body_40_59);
1406 &Xupdate_avx_32_79(\&body_20_39);
1407 &Xuplast_avx_80(\&body_20_39); # can jump to "done"
1409 $saved_j=$j; @saved_V=@V;
1411 &Xloop_avx(\&body_20_39);
1412 &Xloop_avx(\&body_20_39);
1413 &Xloop_avx(\&body_20_39);
1415 &mov (@T[1],&DWP(192,"esp")); # update context
1416 &add ($A,&DWP(0,@T[1]));
1417 &add (@T[0],&DWP(4,@T[1])); # $b
1418 &add ($C,&DWP(8,@T[1]));
1419 &mov (&DWP(0,@T[1]),$A);
1420 &add ($D,&DWP(12,@T[1]));
1421 &mov (&DWP(4,@T[1]),@T[0]);
1422 &add ($E,&DWP(16,@T[1]));
1424 &mov (&DWP(8,@T[1]),$C);
1426 &mov (&DWP(12,@T[1]),$D);
1427 &mov (&DWP(16,@T[1]),$E);
1432 &jmp (&label("loop"));
1434 &set_label("done",16); $j=$saved_j; @V=@saved_V;
1436 &Xtail_avx(\&body_20_39);
1437 &Xtail_avx(\&body_20_39);
1438 &Xtail_avx(\&body_20_39);
1442 &mov (@T[1],&DWP(192,"esp")); # update context
1443 &add ($A,&DWP(0,@T[1]));
1444 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1445 &add (@T[0],&DWP(4,@T[1])); # $b
1446 &add ($C,&DWP(8,@T[1]));
1447 &mov (&DWP(0,@T[1]),$A);
1448 &add ($D,&DWP(12,@T[1]));
1449 &mov (&DWP(4,@T[1]),@T[0]);
1450 &add ($E,&DWP(16,@T[1]));
1451 &mov (&DWP(8,@T[1]),$C);
1452 &mov (&DWP(12,@T[1]),$D);
1453 &mov (&DWP(16,@T[1]),$E);
1454 &function_end("_sha1_block_data_order_avx");
1456 &set_label("K_XX_XX",64);
1457 &data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19
1458 &data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39
1459 &data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59
1460 &data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79
1461 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask
1462 &data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0);
1464 &asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");