2 # Copyright 1998-2018 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
18 # functions were re-implemented to address P4 performance issue [see
19 # commentary below], and in 2006 the rest was rewritten in order to
20 # gain freedom to liberate licensing terms.
22 # January, September 2004.
24 # It was noted that Intel IA-32 C compiler generates code which
25 # performs ~30% *faster* on P4 CPU than original *hand-coded*
26 # SHA1 assembler implementation. To address this problem (and
27 # prove that humans are still better than machines:-), the
28 # original code was overhauled, which resulted in following
29 # performance changes:
31 # compared with original compared with Intel cc
32 # assembler impl. generated code
37 # As you can see Pentium came out as looser:-( Yet I reckoned that
38 # improvement on P4 outweighs the loss and incorporate this
39 # re-tuned code to 0.9.7 and later.
40 # ----------------------------------------------------------------
44 # George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
45 # '(c&d) + (b&(c^d))', which allows to accumulate partial results
46 # and lighten "pressure" on scratch registers. This resulted in
47 # >12% performance improvement on contemporary AMD cores (with no
48 # degradation on other CPUs:-). Also, the code was revised to maximize
49 # "distance" between instructions producing input to 'lea' instruction
50 # and the 'lea' instruction itself, which is essential for Intel Atom
51 # core and resulted in ~15% improvement.
55 # Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
56 # is to offload message schedule denoted by Wt in NIST specification,
57 # or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel,
58 # and in SSE2 context was first explored by Dean Gaudet in 2004, see
59 # http://arctic.org/~dean/crypto/sha1.html. Since then several things
60 # have changed that made it interesting again:
62 # a) XMM units became faster and wider;
63 # b) instruction set became more versatile;
64 # c) an important observation was made by Max Locktykhin, which made
65 # it possible to reduce amount of instructions required to perform
66 # the operation in question, for further details see
67 # http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/.
71 # Add AVX code path, probably most controversial... The thing is that
72 # switch to AVX alone improves performance by as little as 4% in
73 # comparison to SSSE3 code path. But below result doesn't look like
74 # 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
75 # pair of µ-ops, and it's the additional µ-ops, two per round, that
76 # make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
77 # as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
78 # equivalent 'sh[rl]d' that is responsible for the impressive 5.1
79 # cycles per processed byte. But 'sh[rl]d' is not something that used
80 # to be fast, nor does it appear to be fast in upcoming Bulldozer
81 # [according to its optimization manual]. Which is why AVX code path
82 # is guarded by *both* AVX and synthetic bit denoting Intel CPUs.
83 # One can argue that it's unfair to AMD, but without 'sh[rl]d' it
84 # makes no sense to keep the AVX code path. If somebody feels that
85 # strongly, it's probably more appropriate to discuss possibility of
86 # using vector rotate XOP on AMD...
90 # Add support for Intel SHA Extensions.
92 ######################################################################
93 # Current performance is summarized in following table. Numbers are
94 # CPU clock cycles spent to process single byte (less is better).
101 # Core2 7.3 6.0/+22% -
102 # Westmere 7.3 5.5/+33% -
103 # Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73%
104 # Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53%
105 # Haswell 6.5 4.3/+51% 4.1(**)/+58%
106 # Skylake 6.4 4.1/+55% 4.1(**)/+55%
107 # Bulldozer 11.6 6.0/+92%
108 # VIA Nano 10.6 7.5/+41%
109 # Atom 12.5 9.3(*)/+35%
110 # Silvermont 14.5 9.9(*)/+46%
111 # Goldmont 8.8 6.7/+30% 1.7(***)/+415%
113 # (*) Loop is 1056 instructions long and expected result is ~8.25.
114 # The discrepancy is because of front-end limitations, so
115 # called MS-ROM penalties, and on Silvermont even rotate's
116 # limited parallelism.
118 # (**) As per above comment, the result is for AVX *plus* sh[rl]d.
120 # (***) SHAEXT result
122 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
123 push(@INC,"${dir}","${dir}../../perlasm");
126 $output=pop and open STDOUT,">$output";
128 &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
131 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
134 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
135 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
136 $1>=2.19); # first version supporting AVX
138 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
139 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
140 $1>=2.03); # first version supporting AVX
142 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
143 `ml 2>&1` =~ /Version ([0-9]+)\./ &&
144 $1>=10); # first version supporting AVX
146 $ymm=1 if ($xmm && !$ymm && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/ &&
147 $2>=3.0); # first version supporting AVX
149 $shaext=$xmm; ### set to zero if compiling for 1.0.1
151 &external_label("OPENSSL_ia32cap_P") if ($xmm);
162 @V=($A,$B,$C,$D,$E,$T);
164 $alt=0; # 1 denotes alternative IALU implementation, which performs
165 # 8% *worse* on P4, same on Westmere and Atom, 2% better on
170 local($n,$a,$b,$c,$d,$e,$f)=@_;
172 &comment("00_15 $n");
174 &mov($f,$c); # f to hold F_00_19(b,c,d)
175 if ($n==0) { &mov($tmp1,$a); }
176 else { &mov($a,$tmp1); }
177 &rotl($tmp1,5); # tmp1=ROTATE(a,5)
179 &add($tmp1,$e); # tmp1+=e;
180 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded
181 # with xi, also note that e becomes
184 &rotr($b,2); # b=ROTATE(b,30)
185 &xor($f,$d); # f holds F_00_19(b,c,d)
186 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
188 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
189 &add($f,$tmp1); } # f+=tmp1
190 else { &add($tmp1,$f); } # f becomes a in next round
191 &mov($tmp1,$a) if ($alt && $n==15);
196 local($n,$a,$b,$c,$d,$e,$f)=@_;
198 &comment("16_19 $n");
202 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
203 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d
204 &xor($f,&swtmp(($n+8)%16));
205 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
206 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
207 &rotl($f,1); # f=ROTATE(f,1)
208 &add($e,$tmp1); # e+=F_00_19(b,c,d)
209 &xor($c,$d); # restore $c
210 &mov($tmp1,$a); # b in next round
211 &rotr($b,$n==16?2:7); # b=ROTATE(b,30)
212 &mov(&swtmp($n%16),$f); # xi=f
213 &rotl($a,5); # ROTATE(a,5)
214 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
215 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
216 &add($f,$a); # f+=ROTATE(a,5)
218 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d)
219 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
221 &xor($f,&swtmp(($n+8)%16));
223 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
224 &rotl($f,1); # f=ROTATE(f,1)
225 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
226 &add($e,$tmp1); # e+=F_00_19(b,c,d)
228 &rotr($b,2); # b=ROTATE(b,30)
229 &mov(&swtmp($n%16),$f); # xi=f
230 &rotl($tmp1,5); # ROTATE(a,5)
231 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
232 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
233 &add($f,$tmp1); # f+=ROTATE(a,5)
239 local($n,$a,$b,$c,$d,$e,$f)=@_;
240 local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
242 &comment("20_39 $n");
245 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c
246 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
247 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
248 &xor($f,&swtmp(($n+8)%16));
249 &add($e,$tmp1); # e+=F_20_39(b,c,d)
250 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
251 &rotl($f,1); # f=ROTATE(f,1)
252 &mov($tmp1,$a); # b in next round
253 &rotr($b,7); # b=ROTATE(b,30)
254 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
255 &rotl($a,5); # ROTATE(a,5)
256 &xor($b,$c) if($n==39);# warm up for BODY_40_59
257 &and($tmp1,$b) if($n==39);
258 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
259 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
260 &add($f,$a); # f+=ROTATE(a,5)
261 &rotr($a,5) if ($n==79);
263 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d)
264 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
266 &xor($f,&swtmp(($n+8)%16));
267 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
268 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
269 &rotl($f,1); # f=ROTATE(f,1)
270 &add($e,$tmp1); # e+=F_20_39(b,c,d)
271 &rotr($b,2); # b=ROTATE(b,30)
273 &rotl($tmp1,5); # ROTATE(a,5)
274 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
275 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
276 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
277 &add($f,$tmp1); # f+=ROTATE(a,5)
283 local($n,$a,$b,$c,$d,$e,$f)=@_;
285 &comment("40_59 $n");
288 &add($e,$tmp1); # e+=b&(c^d)
289 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
291 &xor($f,&swtmp(($n+8)%16));
292 &xor($c,$d); # restore $c
293 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
294 &rotl($f,1); # f=ROTATE(f,1)
296 &rotr($b,7); # b=ROTATE(b,30)
297 &add($e,$tmp1); # e+=c&d
298 &mov($tmp1,$a); # b in next round
299 &mov(&swtmp($n%16),$f); # xi=f
300 &rotl($a,5); # ROTATE(a,5)
301 &xor($b,$c) if ($n<59);
302 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d)
303 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d))
304 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
305 &add($f,$a); # f+=ROTATE(a,5)
307 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d)
308 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
310 &xor($f,&swtmp(($n+8)%16));
312 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
313 &rotl($f,1); # f=ROTATE(f,1)
314 &add($tmp1,$e); # b&(c^d)+=e
315 &rotr($b,2); # b=ROTATE(b,30)
316 &mov($e,$a); # e becomes volatile
317 &rotl($e,5); # ROTATE(a,5)
318 &mov(&swtmp($n%16),$f); # xi=f
319 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
321 &add($f,$e); # f+=ROTATE(a,5)
323 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
324 &add($f,$tmp1); # f+=c&d
328 &function_begin("sha1_block_data_order");
330 &static_label("shaext_shortcut") if ($shaext);
331 &static_label("ssse3_shortcut");
332 &static_label("avx_shortcut") if ($ymm);
333 &static_label("K_XX_XX");
335 &call (&label("pic_point")); # make it PIC!
336 &set_label("pic_point");
338 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point"));
339 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
341 &mov ($A,&DWP(0,$T));
342 &mov ($D,&DWP(4,$T));
343 &test ($D,1<<9); # check SSSE3 bit
345 &mov ($C,&DWP(8,$T));
346 &test ($A,1<<24); # check FXSR bit
349 &test ($C,1<<29); # check SHA bit
350 &jnz (&label("shaext_shortcut"));
353 &and ($D,1<<28); # mask AVX bit
354 &and ($A,1<<30); # mask "Intel CPU" bit
356 &cmp ($A,1<<28|1<<30);
357 &je (&label("avx_shortcut"));
359 &jmp (&label("ssse3_shortcut"));
360 &set_label("x86",16);
362 &mov($tmp1,&wparam(0)); # SHA_CTX *c
363 &mov($T,&wparam(1)); # const void *input
364 &mov($A,&wparam(2)); # size_t num
365 &stack_push(16+3); # allocate X[16]
368 &mov(&wparam(2),$A); # pointer beyond the end of input
369 &mov($E,&DWP(16,$tmp1));# pre-load E
370 &jmp(&label("loop"));
372 &set_label("loop",16);
374 # copy input chunk to X, but reversing byte order!
375 for ($i=0; $i<16; $i+=4)
377 &mov($A,&DWP(4*($i+0),$T));
378 &mov($B,&DWP(4*($i+1),$T));
379 &mov($C,&DWP(4*($i+2),$T));
380 &mov($D,&DWP(4*($i+3),$T));
385 &mov(&swtmp($i+0),$A);
386 &mov(&swtmp($i+1),$B);
387 &mov(&swtmp($i+2),$C);
388 &mov(&swtmp($i+3),$D);
390 &mov(&wparam(1),$T); # redundant in 1st spin
392 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX
393 &mov($B,&DWP(4,$tmp1));
394 &mov($C,&DWP(8,$tmp1));
395 &mov($D,&DWP(12,$tmp1));
398 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
399 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
400 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
401 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
402 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
404 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
406 &mov($tmp1,&wparam(0)); # re-load SHA_CTX*
407 &mov($D,&wparam(1)); # D is last "T" and is discarded
409 &add($E,&DWP(0,$tmp1)); # E is last "A"...
410 &add($T,&DWP(4,$tmp1));
411 &add($A,&DWP(8,$tmp1));
412 &add($B,&DWP(12,$tmp1));
413 &add($C,&DWP(16,$tmp1));
415 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX
416 &add($D,64); # advance input pointer
417 &mov(&DWP(4,$tmp1),$T);
418 &cmp($D,&wparam(2)); # have we reached the end yet?
419 &mov(&DWP(8,$tmp1),$A);
420 &mov($E,$C); # C is last "E" which needs to be "pre-loaded"
421 &mov(&DWP(12,$tmp1),$B);
422 &mov($T,$D); # input pointer
423 &mov(&DWP(16,$tmp1),$C);
427 &function_end("sha1_block_data_order");
431 ######################################################################
432 # Intel SHA Extensions implementation of SHA1 update function.
434 my ($ctx,$inp,$num)=("edi","esi","ecx");
435 my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3));
436 my @MSG=map("xmm$_",(4..7));
439 my ($dst,$src,$imm)=@_;
440 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
441 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); }
444 my ($opcodelet,$dst,$src)=@_;
445 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
446 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
448 sub sha1nexte { sha1op38(0xc8,@_); }
449 sub sha1msg1 { sha1op38(0xc9,@_); }
450 sub sha1msg2 { sha1op38(0xca,@_); }
452 &function_begin("_sha1_block_data_order_shaext");
453 &call (&label("pic_point")); # make it PIC!
454 &set_label("pic_point");
456 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
457 &set_label("shaext_shortcut");
458 &mov ($ctx,&wparam(0));
460 &mov ($inp,&wparam(1));
461 &mov ($num,&wparam(2));
464 &movdqu ($ABCD,&QWP(0,$ctx));
465 &movd ($E,&DWP(16,$ctx));
467 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap
469 &movdqu (@MSG[0],&QWP(0,$inp));
470 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order
471 &movdqu (@MSG[1],&QWP(0x10,$inp));
472 &pshufd ($E,$E,0b00011011); # flip word order
473 &movdqu (@MSG[2],&QWP(0x20,$inp));
474 &pshufb (@MSG[0],$BSWAP);
475 &movdqu (@MSG[3],&QWP(0x30,$inp));
476 &pshufb (@MSG[1],$BSWAP);
477 &pshufb (@MSG[2],$BSWAP);
478 &pshufb (@MSG[3],$BSWAP);
479 &jmp (&label("loop_shaext"));
481 &set_label("loop_shaext",16);
483 &lea ("eax",&DWP(0x40,$inp));
484 &movdqa (&QWP(0,"esp"),$E); # offload $E
486 &cmovne ($inp,"eax");
487 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD
489 for($i=0;$i<20-4;$i+=2) {
490 &sha1msg1 (@MSG[0],@MSG[1]);
492 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3...
493 &sha1nexte ($E_,@MSG[1]);
494 &pxor (@MSG[0],@MSG[2]);
495 &sha1msg1 (@MSG[1],@MSG[2]);
496 &sha1msg2 (@MSG[0],@MSG[3]);
499 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5));
500 &sha1nexte ($E,@MSG[2]);
501 &pxor (@MSG[1],@MSG[3]);
502 &sha1msg2 (@MSG[1],@MSG[0]);
504 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG));
506 &movdqu (@MSG[0],&QWP(0,$inp));
508 &sha1rnds4 ($ABCD,$E,3); # 64-67
509 &sha1nexte ($E_,@MSG[1]);
510 &movdqu (@MSG[1],&QWP(0x10,$inp));
511 &pshufb (@MSG[0],$BSWAP);
514 &sha1rnds4 ($ABCD,$E_,3); # 68-71
515 &sha1nexte ($E,@MSG[2]);
516 &movdqu (@MSG[2],&QWP(0x20,$inp));
517 &pshufb (@MSG[1],$BSWAP);
520 &sha1rnds4 ($ABCD,$E,3); # 72-75
521 &sha1nexte ($E_,@MSG[3]);
522 &movdqu (@MSG[3],&QWP(0x30,$inp));
523 &pshufb (@MSG[2],$BSWAP);
526 &sha1rnds4 ($ABCD,$E_,3); # 76-79
527 &movdqa ($E_,&QWP(0,"esp"));
528 &pshufb (@MSG[3],$BSWAP);
530 &paddd ($ABCD,&QWP(16,"esp"));
532 &jnz (&label("loop_shaext"));
534 &pshufd ($ABCD,$ABCD,0b00011011);
535 &pshufd ($E,$E,0b00011011);
536 &movdqu (&QWP(0,$ctx),$ABCD)
537 &movd (&DWP(16,$ctx),$E);
539 &function_end("_sha1_block_data_order_shaext");
541 ######################################################################
542 # The SSSE3 implementation.
544 # %xmm[0-7] are used as ring @X[] buffer containing quadruples of last
545 # 32 elements of the message schedule or Xupdate outputs. First 4
546 # quadruples are simply byte-swapped input, next 4 are calculated
547 # according to method originally suggested by Dean Gaudet (modulo
548 # being implemented in SSSE3). Once 8 quadruples or 32 elements are
549 # collected, it switches to routine proposed by Max Locktyukhin.
551 # Calculations inevitably require temporary registers, and there are
552 # no %xmm registers left to spare. For this reason part of the ring
553 # buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
554 # buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
555 # X[-5], and X[4] - X[-4]...
557 # Another notable optimization is aggressive stack frame compression
558 # aiming to minimize amount of 9-byte instructions...
560 # Yet another notable optimization is "jumping" $B variable. It means
561 # that there is no register permanently allocated for $B value. This
562 # allowed to eliminate one instruction from body_20_39...
564 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
565 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
566 my @V=($A,$B,$C,$D,$E);
567 my $j=0; # hash round
572 my $_rol=sub { &rol(@_) };
573 my $_ror=sub { &ror(@_) };
575 &function_begin("_sha1_block_data_order_ssse3");
576 &call (&label("pic_point")); # make it PIC!
577 &set_label("pic_point");
579 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
580 &set_label("ssse3_shortcut");
582 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19
583 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39
584 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59
585 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79
586 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask
588 &mov ($E,&wparam(0)); # load argument block
589 &mov ($inp=@T[1],&wparam(1));
590 &mov ($D,&wparam(2));
595 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
596 # X[4]+K X[5]+K X[6]+K X[7]+K
597 # X[8]+K X[9]+K X[10]+K X[11]+K
598 # X[12]+K X[13]+K X[14]+K X[15]+K
600 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
601 # X[4] X[5] X[6] X[7]
602 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
604 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
605 # K_40_59 K_40_59 K_40_59 K_40_59
606 # K_60_79 K_60_79 K_60_79 K_60_79
607 # K_00_19 K_00_19 K_00_19 K_00_19
610 # +192 ctx # argument block
617 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants
618 &movdqa (&QWP(112+16,"esp"),@X[5]);
619 &movdqa (&QWP(112+32,"esp"),@X[6]);
620 &shl ($D,6); # len*64
621 &movdqa (&QWP(112+48,"esp"),@X[3]);
622 &add ($D,$inp); # end of input
623 &movdqa (&QWP(112+64,"esp"),@X[2]);
625 &mov (&DWP(192+0,"esp"),$E); # save argument block
626 &mov (&DWP(192+4,"esp"),$inp);
627 &mov (&DWP(192+8,"esp"),$D);
628 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
630 &mov ($A,&DWP(0,$E)); # load context
631 &mov ($B,&DWP(4,$E));
632 &mov ($C,&DWP(8,$E));
633 &mov ($D,&DWP(12,$E));
634 &mov ($E,&DWP(16,$E));
635 &mov (@T[0],$B); # magic seed
637 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
638 &movdqu (@X[-3&7],&QWP(-48,$inp));
639 &movdqu (@X[-2&7],&QWP(-32,$inp));
640 &movdqu (@X[-1&7],&QWP(-16,$inp));
641 &pshufb (@X[-4&7],@X[2]); # byte swap
642 &pshufb (@X[-3&7],@X[2]);
643 &pshufb (@X[-2&7],@X[2]);
644 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
645 &pshufb (@X[-1&7],@X[2]);
646 &paddd (@X[-4&7],@X[3]); # add K_00_19
647 &paddd (@X[-3&7],@X[3]);
648 &paddd (@X[-2&7],@X[3]);
649 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU
650 &psubd (@X[-4&7],@X[3]); # restore X[]
651 &movdqa (&QWP(0+16,"esp"),@X[-3&7]);
652 &psubd (@X[-3&7],@X[3]);
653 &movdqa (&QWP(0+32,"esp"),@X[-2&7]);
655 &psubd (@X[-2&7],@X[3]);
657 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
659 &jmp (&label("loop"));
661 ######################################################################
662 # SSE instruction sequence is first broken to groups of independent
663 # instructions, independent in respect to their inputs and shifter
664 # (not all architectures have more than one). Then IALU instructions
665 # are "knitted in" between the SSE groups. Distance is maintained for
666 # SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer
667 # [which allegedly also implements SSSE3]...
669 # Temporary registers usage. X[2] is volatile at the entry and at the
670 # end is restored from backtrace ring buffer. X[3] is expected to
671 # contain current K_XX_XX constant and is used to calculate X[-1]+K
672 # from previous round, it becomes volatile the moment the value is
673 # saved to stack for transfer to IALU. X[4] becomes volatile whenever
674 # X[-4] is accumulated and offloaded to backtrace ring buffer, at the
675 # end it is loaded with next K_XX_XX [which becomes X[3] in next
678 sub Xupdate_ssse3_16_31() # recall that $Xi starts with 4
681 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
684 eval(shift(@insns)); # ror
687 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8);
688 &movdqa (@X[2],@X[-1&7]);
692 &paddd (@X[3],@X[-1&7]);
693 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
694 eval(shift(@insns)); # rol
696 &psrldq (@X[2],4); # "X[-3]", 3 dwords
699 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
701 eval(shift(@insns)); # ror
703 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
708 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
710 eval(shift(@insns)); # rol
711 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
715 &movdqa (@X[4],@X[0]);
718 eval(shift(@insns)); # ror
719 &movdqa (@X[2],@X[0]);
722 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword
723 &paddd (@X[0],@X[0]);
729 eval(shift(@insns)); # rol
730 &movdqa (@X[3],@X[4]);
737 eval(shift(@insns)); # ror
738 &por (@X[0],@X[2]); # "X[0]"<<<=1
740 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
746 eval(shift(@insns)); # rol
748 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
752 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2
753 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7])
754 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7);
758 foreach (@insns) { eval; } # remaining instructions [if any]
760 $Xi++; push(@X,shift(@X)); # "rotate" X[]
763 sub Xupdate_ssse3_32_79()
766 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
769 eval(shift(@insns)); # body_20_39
770 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
771 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8)
774 eval(shift(@insns)); # rol
776 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
777 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
780 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
782 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
783 } else { # ... or load next one
784 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
786 eval(shift(@insns)); # ror
787 &paddd (@X[3],@X[-1&7]);
790 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]"
791 eval(shift(@insns)); # body_20_39
794 eval(shift(@insns)); # rol
796 &movdqa (@X[2],@X[0]);
797 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
800 eval(shift(@insns)); # ror
802 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
805 eval(shift(@insns)); # body_20_39
809 eval(shift(@insns)); # rol
812 eval(shift(@insns)); # ror
814 eval(shift(@insns)) if (@insns[1] =~ /_rol/);
815 eval(shift(@insns)) if (@insns[0] =~ /_rol/);
817 &por (@X[0],@X[2]); # "X[0]"<<<=2
818 eval(shift(@insns)); # body_20_39
820 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
822 eval(shift(@insns)); # rol
825 eval(shift(@insns)); # ror
826 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0])
829 foreach (@insns) { eval; } # remaining instructions
831 $Xi++; push(@X,shift(@X)); # "rotate" X[]
834 sub Xuplast_ssse3_80()
837 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
847 &paddd (@X[3],@X[-1&7]);
853 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
855 foreach (@insns) { eval; } # remaining instructions
857 &mov ($inp=@T[1],&DWP(192+4,"esp"));
858 &cmp ($inp,&DWP(192+8,"esp"));
859 &je (&label("done"));
861 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19
862 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask
863 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input
864 &movdqu (@X[-3&7],&QWP(16,$inp));
865 &movdqu (@X[-2&7],&QWP(32,$inp));
866 &movdqu (@X[-1&7],&QWP(48,$inp));
868 &pshufb (@X[-4&7],@X[2]); # byte swap
869 &mov (&DWP(192+4,"esp"),$inp);
870 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
878 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
888 &pshufb (@X[($Xi-3)&7],@X[2]);
893 &paddd (@X[($Xi-4)&7],@X[3]);
898 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU
903 &psubd (@X[($Xi-4)&7],@X[3]);
905 foreach (@insns) { eval; }
912 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
915 foreach (@insns) { eval; }
918 sub body_00_19 () { # ((c^d)&b)^d
919 # on start @T[0]=(c^d)&b
920 return &body_20_39() if ($rx==19); $rx++;
922 '($a,$b,$c,$d,$e)=@V;'.
923 '&$_ror ($b,$j?7:2);', # $b>>>2
925 '&mov (@T[1],$a);', # $b in next round
927 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
928 '&xor ($b,$c);', # $c^$d for next round
932 '&and (@T[1],$b);', # ($b&($c^$d)) for next round
934 '&xor ($b,$c);', # restore $b
935 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
939 sub body_20_39 () { # b^d^c
941 return &body_40_59() if ($rx==39); $rx++;
943 '($a,$b,$c,$d,$e)=@V;'.
944 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
945 '&xor (@T[0],$d) if($j==19);'.
946 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c)
947 '&mov (@T[1],$a);', # $b in next round
951 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round
953 '&$_ror ($b,7);', # $b>>>2
954 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
958 sub body_40_59 () { # ((b^c)&(c^d))^c
959 # on entry @T[0]=(b^c), (c^=d)
962 '($a,$b,$c,$d,$e)=@V;'.
963 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
964 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d)
965 '&xor ($c,$d) if ($j>=40);', # restore $c
967 '&$_ror ($b,7);', # $b>>>2
968 '&mov (@T[1],$a);', # $b for next round
973 '&xor (@T[1],$c) if ($j==59);'.
974 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round
976 '&xor ($b,$c) if ($j< 59);', # c^d for next round
977 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
981 sub bodyx_00_19 () { # ((c^d)&b)^d
982 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K
983 return &bodyx_20_39() if ($rx==19); $rx++;
985 '($a,$b,$c,$d,$e)=@V;'.
987 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2
988 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2
989 '&lea ($e,&DWP(0,$e,@T[0]));',
990 '&rorx (@T[0],$a,5);',
992 '&andn (@T[1],$a,$c);',
994 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer
997 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1001 sub bodyx_20_39 () { # b^d^c
1003 return &bodyx_40_59() if ($rx==39); $rx++;
1005 '($a,$b,$c,$d,$e)=@V;'.
1007 '&add ($e,($j==19?@T[0]:$b))',
1008 '&rorx ($b,@T[1],7);', # $b>>>2
1009 '&rorx (@T[0],$a,5);',
1011 '&xor ($a,$b) if ($j<79);',
1012 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer
1013 '&xor ($a,$c) if ($j<79);',
1014 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1018 sub bodyx_40_59 () { # ((b^c)&(c^d))^c
1019 # on start $b=((b^c)&(c^d))^c
1020 return &bodyx_20_39() if ($rx==59); $rx++;
1022 '($a,$b,$c,$d,$e)=@V;'.
1024 '&rorx (@T[0],$a,5)',
1025 '&lea ($e,&DWP(0,$e,$b))',
1026 '&rorx ($b,@T[1],7)', # $b>>>2
1027 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer
1030 '&xor ($a,$b)', # b^c for next round
1031 '&xor (@T[1],$b)', # c^d for next round
1035 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
1039 &set_label("loop",16);
1040 &Xupdate_ssse3_16_31(\&body_00_19);
1041 &Xupdate_ssse3_16_31(\&body_00_19);
1042 &Xupdate_ssse3_16_31(\&body_00_19);
1043 &Xupdate_ssse3_16_31(\&body_00_19);
1044 &Xupdate_ssse3_32_79(\&body_00_19);
1045 &Xupdate_ssse3_32_79(\&body_20_39);
1046 &Xupdate_ssse3_32_79(\&body_20_39);
1047 &Xupdate_ssse3_32_79(\&body_20_39);
1048 &Xupdate_ssse3_32_79(\&body_20_39);
1049 &Xupdate_ssse3_32_79(\&body_20_39);
1050 &Xupdate_ssse3_32_79(\&body_40_59);
1051 &Xupdate_ssse3_32_79(\&body_40_59);
1052 &Xupdate_ssse3_32_79(\&body_40_59);
1053 &Xupdate_ssse3_32_79(\&body_40_59);
1054 &Xupdate_ssse3_32_79(\&body_40_59);
1055 &Xupdate_ssse3_32_79(\&body_20_39);
1056 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
1058 $saved_j=$j; @saved_V=@V;
1060 &Xloop_ssse3(\&body_20_39);
1061 &Xloop_ssse3(\&body_20_39);
1062 &Xloop_ssse3(\&body_20_39);
1064 &mov (@T[1],&DWP(192,"esp")); # update context
1065 &add ($A,&DWP(0,@T[1]));
1066 &add (@T[0],&DWP(4,@T[1])); # $b
1067 &add ($C,&DWP(8,@T[1]));
1068 &mov (&DWP(0,@T[1]),$A);
1069 &add ($D,&DWP(12,@T[1]));
1070 &mov (&DWP(4,@T[1]),@T[0]);
1071 &add ($E,&DWP(16,@T[1]));
1072 &mov (&DWP(8,@T[1]),$C);
1074 &mov (&DWP(12,@T[1]),$D);
1076 &mov (&DWP(16,@T[1]),$E);
1078 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]);
1082 &jmp (&label("loop"));
1084 &set_label("done",16); $j=$saved_j; @V=@saved_V;
1086 &Xtail_ssse3(\&body_20_39);
1087 &Xtail_ssse3(\&body_20_39);
1088 &Xtail_ssse3(\&body_20_39);
1090 &mov (@T[1],&DWP(192,"esp")); # update context
1091 &add ($A,&DWP(0,@T[1]));
1092 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1093 &add (@T[0],&DWP(4,@T[1])); # $b
1094 &add ($C,&DWP(8,@T[1]));
1095 &mov (&DWP(0,@T[1]),$A);
1096 &add ($D,&DWP(12,@T[1]));
1097 &mov (&DWP(4,@T[1]),@T[0]);
1098 &add ($E,&DWP(16,@T[1]));
1099 &mov (&DWP(8,@T[1]),$C);
1100 &mov (&DWP(12,@T[1]),$D);
1101 &mov (&DWP(16,@T[1]),$E);
1103 &function_end("_sha1_block_data_order_ssse3");
1108 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
1109 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
1110 my @V=($A,$B,$C,$D,$E);
1111 my $j=0; # hash round
1115 my $_rol=sub { &shld(@_[0],@_) };
1116 my $_ror=sub { &shrd(@_[0],@_) };
1118 &function_begin("_sha1_block_data_order_avx");
1119 &call (&label("pic_point")); # make it PIC!
1120 &set_label("pic_point");
1122 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
1123 &set_label("avx_shortcut");
1126 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19
1127 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39
1128 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59
1129 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79
1130 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask
1132 &mov ($E,&wparam(0)); # load argument block
1133 &mov ($inp=@T[1],&wparam(1));
1134 &mov ($D,&wparam(2));
1137 # stack frame layout
1139 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
1140 # X[4]+K X[5]+K X[6]+K X[7]+K
1141 # X[8]+K X[9]+K X[10]+K X[11]+K
1142 # X[12]+K X[13]+K X[14]+K X[15]+K
1144 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
1145 # X[4] X[5] X[6] X[7]
1146 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
1148 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
1149 # K_40_59 K_40_59 K_40_59 K_40_59
1150 # K_60_79 K_60_79 K_60_79 K_60_79
1151 # K_00_19 K_00_19 K_00_19 K_00_19
1154 # +192 ctx # argument block
1161 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants
1162 &vmovdqa(&QWP(112+16,"esp"),@X[5]);
1163 &vmovdqa(&QWP(112+32,"esp"),@X[6]);
1164 &shl ($D,6); # len*64
1165 &vmovdqa(&QWP(112+48,"esp"),@X[3]);
1166 &add ($D,$inp); # end of input
1167 &vmovdqa(&QWP(112+64,"esp"),@X[2]);
1169 &mov (&DWP(192+0,"esp"),$E); # save argument block
1170 &mov (&DWP(192+4,"esp"),$inp);
1171 &mov (&DWP(192+8,"esp"),$D);
1172 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
1174 &mov ($A,&DWP(0,$E)); # load context
1175 &mov ($B,&DWP(4,$E));
1176 &mov ($C,&DWP(8,$E));
1177 &mov ($D,&DWP(12,$E));
1178 &mov ($E,&DWP(16,$E));
1179 &mov (@T[0],$B); # magic seed
1181 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
1182 &vmovdqu(@X[-3&7],&QWP(-48,$inp));
1183 &vmovdqu(@X[-2&7],&QWP(-32,$inp));
1184 &vmovdqu(@X[-1&7],&QWP(-16,$inp));
1185 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1186 &vpshufb(@X[-3&7],@X[-3&7],@X[2]);
1187 &vpshufb(@X[-2&7],@X[-2&7],@X[2]);
1188 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1189 &vpshufb(@X[-1&7],@X[-1&7],@X[2]);
1190 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19
1191 &vpaddd (@X[1],@X[-3&7],@X[3]);
1192 &vpaddd (@X[2],@X[-2&7],@X[3]);
1193 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU
1195 &vmovdqa(&QWP(0+16,"esp"),@X[1]);
1197 &vmovdqa(&QWP(0+32,"esp"),@X[2]);
1199 &jmp (&label("loop"));
1201 sub Xupdate_avx_16_31() # recall that $Xi starts with 4
1204 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
1205 my ($a,$b,$c,$d,$e);
1207 eval(shift(@insns));
1208 eval(shift(@insns));
1209 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
1210 eval(shift(@insns));
1211 eval(shift(@insns));
1213 &vpaddd (@X[3],@X[3],@X[-1&7]);
1214 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
1215 eval(shift(@insns));
1216 eval(shift(@insns));
1217 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords
1218 eval(shift(@insns));
1219 eval(shift(@insns));
1220 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
1221 eval(shift(@insns));
1222 eval(shift(@insns));
1224 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
1225 eval(shift(@insns));
1226 eval(shift(@insns));
1227 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1228 eval(shift(@insns));
1229 eval(shift(@insns));
1231 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
1232 eval(shift(@insns));
1233 eval(shift(@insns));
1234 eval(shift(@insns));
1235 eval(shift(@insns));
1237 &vpsrld (@X[2],@X[0],31);
1238 eval(shift(@insns));
1239 eval(shift(@insns));
1240 eval(shift(@insns));
1241 eval(shift(@insns));
1243 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword
1244 &vpaddd (@X[0],@X[0],@X[0]);
1245 eval(shift(@insns));
1246 eval(shift(@insns));
1247 eval(shift(@insns));
1248 eval(shift(@insns));
1250 &vpsrld (@X[3],@X[4],30);
1251 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1
1252 eval(shift(@insns));
1253 eval(shift(@insns));
1254 eval(shift(@insns));
1255 eval(shift(@insns));
1257 &vpslld (@X[4],@X[4],2);
1258 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
1259 eval(shift(@insns));
1260 eval(shift(@insns));
1261 &vpxor (@X[0],@X[0],@X[3]);
1262 eval(shift(@insns));
1263 eval(shift(@insns));
1264 eval(shift(@insns));
1265 eval(shift(@insns));
1267 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2
1268 eval(shift(@insns));
1269 eval(shift(@insns));
1270 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
1271 eval(shift(@insns));
1272 eval(shift(@insns));
1274 foreach (@insns) { eval; } # remaining instructions [if any]
1276 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1279 sub Xupdate_avx_32_79()
1282 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
1283 my ($a,$b,$c,$d,$e);
1285 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
1286 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
1287 eval(shift(@insns)); # body_20_39
1288 eval(shift(@insns));
1289 eval(shift(@insns));
1290 eval(shift(@insns)); # rol
1292 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
1293 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
1294 eval(shift(@insns));
1295 eval(shift(@insns));
1297 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
1298 } else { # ... or load next one
1299 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
1301 &vpaddd (@X[3],@X[3],@X[-1&7]);
1302 eval(shift(@insns)); # ror
1303 eval(shift(@insns));
1305 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]"
1306 eval(shift(@insns)); # body_20_39
1307 eval(shift(@insns));
1308 eval(shift(@insns));
1309 eval(shift(@insns)); # rol
1311 &vpsrld (@X[2],@X[0],30);
1312 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1313 eval(shift(@insns));
1314 eval(shift(@insns));
1315 eval(shift(@insns)); # ror
1316 eval(shift(@insns));
1318 &vpslld (@X[0],@X[0],2);
1319 eval(shift(@insns)); # body_20_39
1320 eval(shift(@insns));
1321 eval(shift(@insns));
1322 eval(shift(@insns)); # rol
1323 eval(shift(@insns));
1324 eval(shift(@insns));
1325 eval(shift(@insns)); # ror
1326 eval(shift(@insns));
1328 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2
1329 eval(shift(@insns)); # body_20_39
1330 eval(shift(@insns));
1331 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
1332 eval(shift(@insns));
1333 eval(shift(@insns)); # rol
1334 eval(shift(@insns));
1335 eval(shift(@insns));
1336 eval(shift(@insns)); # ror
1337 eval(shift(@insns));
1339 foreach (@insns) { eval; } # remaining instructions
1341 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1344 sub Xuplast_avx_80()
1347 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1348 my ($a,$b,$c,$d,$e);
1350 eval(shift(@insns));
1351 &vpaddd (@X[3],@X[3],@X[-1&7]);
1352 eval(shift(@insns));
1353 eval(shift(@insns));
1354 eval(shift(@insns));
1355 eval(shift(@insns));
1357 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
1359 foreach (@insns) { eval; } # remaining instructions
1361 &mov ($inp=@T[1],&DWP(192+4,"esp"));
1362 &cmp ($inp,&DWP(192+8,"esp"));
1363 &je (&label("done"));
1365 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19
1366 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask
1367 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input
1368 &vmovdqu(@X[-3&7],&QWP(16,$inp));
1369 &vmovdqu(@X[-2&7],&QWP(32,$inp));
1370 &vmovdqu(@X[-1&7],&QWP(48,$inp));
1372 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1373 &mov (&DWP(192+4,"esp"),$inp);
1374 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1382 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1383 my ($a,$b,$c,$d,$e);
1385 eval(shift(@insns));
1386 eval(shift(@insns));
1387 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
1388 eval(shift(@insns));
1389 eval(shift(@insns));
1390 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]);
1391 eval(shift(@insns));
1392 eval(shift(@insns));
1393 eval(shift(@insns));
1394 eval(shift(@insns));
1395 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU
1396 eval(shift(@insns));
1397 eval(shift(@insns));
1399 foreach (@insns) { eval; }
1406 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1407 my ($a,$b,$c,$d,$e);
1409 foreach (@insns) { eval; }
1412 &set_label("loop",16);
1413 &Xupdate_avx_16_31(\&body_00_19);
1414 &Xupdate_avx_16_31(\&body_00_19);
1415 &Xupdate_avx_16_31(\&body_00_19);
1416 &Xupdate_avx_16_31(\&body_00_19);
1417 &Xupdate_avx_32_79(\&body_00_19);
1418 &Xupdate_avx_32_79(\&body_20_39);
1419 &Xupdate_avx_32_79(\&body_20_39);
1420 &Xupdate_avx_32_79(\&body_20_39);
1421 &Xupdate_avx_32_79(\&body_20_39);
1422 &Xupdate_avx_32_79(\&body_20_39);
1423 &Xupdate_avx_32_79(\&body_40_59);
1424 &Xupdate_avx_32_79(\&body_40_59);
1425 &Xupdate_avx_32_79(\&body_40_59);
1426 &Xupdate_avx_32_79(\&body_40_59);
1427 &Xupdate_avx_32_79(\&body_40_59);
1428 &Xupdate_avx_32_79(\&body_20_39);
1429 &Xuplast_avx_80(\&body_20_39); # can jump to "done"
1431 $saved_j=$j; @saved_V=@V;
1433 &Xloop_avx(\&body_20_39);
1434 &Xloop_avx(\&body_20_39);
1435 &Xloop_avx(\&body_20_39);
1437 &mov (@T[1],&DWP(192,"esp")); # update context
1438 &add ($A,&DWP(0,@T[1]));
1439 &add (@T[0],&DWP(4,@T[1])); # $b
1440 &add ($C,&DWP(8,@T[1]));
1441 &mov (&DWP(0,@T[1]),$A);
1442 &add ($D,&DWP(12,@T[1]));
1443 &mov (&DWP(4,@T[1]),@T[0]);
1444 &add ($E,&DWP(16,@T[1]));
1446 &mov (&DWP(8,@T[1]),$C);
1448 &mov (&DWP(12,@T[1]),$D);
1449 &mov (&DWP(16,@T[1]),$E);
1454 &jmp (&label("loop"));
1456 &set_label("done",16); $j=$saved_j; @V=@saved_V;
1458 &Xtail_avx(\&body_20_39);
1459 &Xtail_avx(\&body_20_39);
1460 &Xtail_avx(\&body_20_39);
1464 &mov (@T[1],&DWP(192,"esp")); # update context
1465 &add ($A,&DWP(0,@T[1]));
1466 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1467 &add (@T[0],&DWP(4,@T[1])); # $b
1468 &add ($C,&DWP(8,@T[1]));
1469 &mov (&DWP(0,@T[1]),$A);
1470 &add ($D,&DWP(12,@T[1]));
1471 &mov (&DWP(4,@T[1]),@T[0]);
1472 &add ($E,&DWP(16,@T[1]));
1473 &mov (&DWP(8,@T[1]),$C);
1474 &mov (&DWP(12,@T[1]),$D);
1475 &mov (&DWP(16,@T[1]),$E);
1476 &function_end("_sha1_block_data_order_avx");
1478 &set_label("K_XX_XX",64);
1479 &data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19
1480 &data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39
1481 &data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59
1482 &data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79
1483 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask
1484 &data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0);
1486 &asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");