3 # ====================================================================
4 # [Re]written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # "[Re]written" was achieved in two major overhauls. In 2004 BODY_*
11 # functions were re-implemented to address P4 performance issue [see
12 # commentary below], and in 2006 the rest was rewritten in order to
13 # gain freedom to liberate licensing terms.
15 # January, September 2004.
17 # It was noted that Intel IA-32 C compiler generates code which
18 # performs ~30% *faster* on P4 CPU than original *hand-coded*
19 # SHA1 assembler implementation. To address this problem (and
20 # prove that humans are still better than machines:-), the
21 # original code was overhauled, which resulted in following
22 # performance changes:
24 # compared with original compared with Intel cc
25 # assembler impl. generated code
30 # As you can see Pentium came out as looser:-( Yet I reckoned that
31 # improvement on P4 outweights the loss and incorporate this
32 # re-tuned code to 0.9.7 and later.
33 # ----------------------------------------------------------------
34 # <appro@fy.chalmers.se>
38 # George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as
39 # '(c&d) + (b&(c^d))', which allows to accumulate partial results
40 # and lighten "pressure" on scratch registers. This resulted in
41 # >12% performance improvement on contemporary AMD cores (with no
42 # degradation on other CPUs:-). Also, the code was revised to maximize
43 # "distance" between instructions producing input to 'lea' instruction
44 # and the 'lea' instruction itself, which is essential for Intel Atom
45 # core and resulted in ~15% improvement.
49 # Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it
50 # is to offload message schedule denoted by Wt in NIST specification,
51 # or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel,
52 # and in SSE2 context was first explored by Dean Gaudet in 2004, see
53 # http://arctic.org/~dean/crypto/sha1.html. Since then several things
54 # have changed that made it interesting again:
56 # a) XMM units became faster and wider;
57 # b) instruction set became more versatile;
58 # c) an important observation was made by Max Locktykhin, which made
59 # it possible to reduce amount of instructions required to perform
60 # the operation in question, for further details see
61 # http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/.
65 # Add AVX code path, probably most controversial... The thing is that
66 # switch to AVX alone improves performance by as little as 4% in
67 # comparison to SSSE3 code path. But below result doesn't look like
68 # 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as
69 # pair of µ-ops, and it's the additional µ-ops, two per round, that
70 # make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded
71 # as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with
72 # equivalent 'sh[rl]d' that is responsible for the impressive 5.1
73 # cycles per processed byte. But 'sh[rl]d' is not something that used
74 # to be fast, nor does it appear to be fast in upcoming Bulldozer
75 # [according to its optimization manual]. Which is why AVX code path
76 # is guarded by *both* AVX and synthetic bit denoting Intel CPUs.
77 # One can argue that it's unfair to AMD, but without 'sh[rl]d' it
78 # makes no sense to keep the AVX code path. If somebody feels that
79 # strongly, it's probably more appropriate to discuss possibility of
80 # using vector rotate XOP on AMD...
82 ######################################################################
83 # Current performance is summarized in following table. Numbers are
84 # CPU clock cycles spent to process single byte (less is better).
91 # Core2 7.3 6.1/+20% -
92 # Atom 12.5 9.3(*)/+35% -
93 # Westmere 7.3 5.5/+33% -
94 # Sandy Bridge 8.8 6.2/+40% 5.2(**)/+70%
95 # Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53%
96 # Bulldozer 11.6 6.0/+92%
97 # VIA Nano 10.6 7.6/+40%
99 # (*) Loop is 1056 instructions long and expected result is ~8.25.
100 # It remains mystery [to me] why ILP is limited to 1.7.
102 # (**) As per above comment, the result is for AVX *plus* sh[rl]d.
104 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
105 push(@INC,"${dir}","${dir}../../perlasm");
108 &asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386");
111 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
114 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
115 =~ /GNU assembler version ([2-9]\.[0-9]+)/ &&
116 $1>=2.19); # first version supporting AVX
118 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" &&
119 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ &&
120 $1>=2.03); # first version supporting AVX
122 $ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" &&
123 `ml 2>&1` =~ /Version ([0-9]+)\./ &&
124 $1>=10); # first version supporting AVX
126 &external_label("OPENSSL_ia32cap_P") if ($xmm);
137 @V=($A,$B,$C,$D,$E,$T);
139 $alt=0; # 1 denotes alternative IALU implementation, which performs
140 # 8% *worse* on P4, same on Westmere and Atom, 2% better on
145 local($n,$a,$b,$c,$d,$e,$f)=@_;
147 &comment("00_15 $n");
149 &mov($f,$c); # f to hold F_00_19(b,c,d)
150 if ($n==0) { &mov($tmp1,$a); }
151 else { &mov($a,$tmp1); }
152 &rotl($tmp1,5); # tmp1=ROTATE(a,5)
154 &add($tmp1,$e); # tmp1+=e;
155 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded
156 # with xi, also note that e becomes
159 &rotr($b,2); # b=ROTATE(b,30)
160 &xor($f,$d); # f holds F_00_19(b,c,d)
161 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi
163 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round
164 &add($f,$tmp1); } # f+=tmp1
165 else { &add($tmp1,$f); } # f becomes a in next round
166 &mov($tmp1,$a) if ($alt && $n==15);
171 local($n,$a,$b,$c,$d,$e,$f)=@_;
173 &comment("16_19 $n");
177 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
178 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d
179 &xor($f,&swtmp(($n+8)%16));
180 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
181 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
182 &rotl($f,1); # f=ROTATE(f,1)
183 &add($e,$tmp1); # e+=F_00_19(b,c,d)
184 &xor($c,$d); # restore $c
185 &mov($tmp1,$a); # b in next round
186 &rotr($b,$n==16?2:7); # b=ROTATE(b,30)
187 &mov(&swtmp($n%16),$f); # xi=f
188 &rotl($a,5); # ROTATE(a,5)
189 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
190 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
191 &add($f,$a); # f+=ROTATE(a,5)
193 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d)
194 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
196 &xor($f,&swtmp(($n+8)%16));
198 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
199 &rotl($f,1); # f=ROTATE(f,1)
200 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d)
201 &add($e,$tmp1); # e+=F_00_19(b,c,d)
203 &rotr($b,2); # b=ROTATE(b,30)
204 &mov(&swtmp($n%16),$f); # xi=f
205 &rotl($tmp1,5); # ROTATE(a,5)
206 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e
207 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
208 &add($f,$tmp1); # f+=ROTATE(a,5)
214 local($n,$a,$b,$c,$d,$e,$f)=@_;
215 local $K=($n<40)?0x6ed9eba1:0xca62c1d6;
217 &comment("20_39 $n");
220 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c
221 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
222 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
223 &xor($f,&swtmp(($n+8)%16));
224 &add($e,$tmp1); # e+=F_20_39(b,c,d)
225 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
226 &rotl($f,1); # f=ROTATE(f,1)
227 &mov($tmp1,$a); # b in next round
228 &rotr($b,7); # b=ROTATE(b,30)
229 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
230 &rotl($a,5); # ROTATE(a,5)
231 &xor($b,$c) if($n==39);# warm up for BODY_40_59
232 &and($tmp1,$b) if($n==39);
233 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
234 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
235 &add($f,$a); # f+=ROTATE(a,5)
236 &rotr($a,5) if ($n==79);
238 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d)
239 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
241 &xor($f,&swtmp(($n+8)%16));
242 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d)
243 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
244 &rotl($f,1); # f=ROTATE(f,1)
245 &add($e,$tmp1); # e+=F_20_39(b,c,d)
246 &rotr($b,2); # b=ROTATE(b,30)
248 &rotl($tmp1,5); # ROTATE(a,5)
249 &mov(&swtmp($n%16),$f) if($n<77);# xi=f
250 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY
251 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round
252 &add($f,$tmp1); # f+=ROTATE(a,5)
258 local($n,$a,$b,$c,$d,$e,$f)=@_;
260 &comment("40_59 $n");
263 &add($e,$tmp1); # e+=b&(c^d)
264 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
266 &xor($f,&swtmp(($n+8)%16));
267 &xor($c,$d); # restore $c
268 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
269 &rotl($f,1); # f=ROTATE(f,1)
271 &rotr($b,7); # b=ROTATE(b,30)
272 &add($e,$tmp1); # e+=c&d
273 &mov($tmp1,$a); # b in next round
274 &mov(&swtmp($n%16),$f); # xi=f
275 &rotl($a,5); # ROTATE(a,5)
276 &xor($b,$c) if ($n<59);
277 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d)
278 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d))
279 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
280 &add($f,$a); # f+=ROTATE(a,5)
282 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d)
283 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd)
285 &xor($f,&swtmp(($n+8)%16));
287 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd
288 &rotl($f,1); # f=ROTATE(f,1)
289 &add($tmp1,$e); # b&(c^d)+=e
290 &rotr($b,2); # b=ROTATE(b,30)
291 &mov($e,$a); # e becomes volatile
292 &rotl($e,5); # ROTATE(a,5)
293 &mov(&swtmp($n%16),$f); # xi=f
294 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d))
296 &add($f,$e); # f+=ROTATE(a,5)
298 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round
299 &add($f,$tmp1); # f+=c&d
303 &function_begin("sha1_block_data_order");
305 &static_label("ssse3_shortcut");
306 &static_label("avx_shortcut") if ($ymm);
307 &static_label("K_XX_XX");
309 &call (&label("pic_point")); # make it PIC!
310 &set_label("pic_point");
312 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point"));
313 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
315 &mov ($A,&DWP(0,$T));
316 &mov ($D,&DWP(4,$T));
317 &test ($D,1<<9); # check SSSE3 bit
319 &test ($A,1<<24); # check FXSR bit
322 &and ($D,1<<28); # mask AVX bit
323 &and ($A,1<<30); # mask "Intel CPU" bit
325 &cmp ($A,1<<28|1<<30);
326 &je (&label("avx_shortcut"));
328 &jmp (&label("ssse3_shortcut"));
329 &set_label("x86",16);
331 &mov($tmp1,&wparam(0)); # SHA_CTX *c
332 &mov($T,&wparam(1)); # const void *input
333 &mov($A,&wparam(2)); # size_t num
334 &stack_push(16+3); # allocate X[16]
337 &mov(&wparam(2),$A); # pointer beyond the end of input
338 &mov($E,&DWP(16,$tmp1));# pre-load E
339 &jmp(&label("loop"));
341 &set_label("loop",16);
343 # copy input chunk to X, but reversing byte order!
344 for ($i=0; $i<16; $i+=4)
346 &mov($A,&DWP(4*($i+0),$T));
347 &mov($B,&DWP(4*($i+1),$T));
348 &mov($C,&DWP(4*($i+2),$T));
349 &mov($D,&DWP(4*($i+3),$T));
354 &mov(&swtmp($i+0),$A);
355 &mov(&swtmp($i+1),$B);
356 &mov(&swtmp($i+2),$C);
357 &mov(&swtmp($i+3),$D);
359 &mov(&wparam(1),$T); # redundant in 1st spin
361 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX
362 &mov($B,&DWP(4,$tmp1));
363 &mov($C,&DWP(8,$tmp1));
364 &mov($D,&DWP(12,$tmp1));
367 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
368 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); }
369 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
370 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); }
371 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); }
373 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check
375 &mov($tmp1,&wparam(0)); # re-load SHA_CTX*
376 &mov($D,&wparam(1)); # D is last "T" and is discarded
378 &add($E,&DWP(0,$tmp1)); # E is last "A"...
379 &add($T,&DWP(4,$tmp1));
380 &add($A,&DWP(8,$tmp1));
381 &add($B,&DWP(12,$tmp1));
382 &add($C,&DWP(16,$tmp1));
384 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX
385 &add($D,64); # advance input pointer
386 &mov(&DWP(4,$tmp1),$T);
387 &cmp($D,&wparam(2)); # have we reached the end yet?
388 &mov(&DWP(8,$tmp1),$A);
389 &mov($E,$C); # C is last "E" which needs to be "pre-loaded"
390 &mov(&DWP(12,$tmp1),$B);
391 &mov($T,$D); # input pointer
392 &mov(&DWP(16,$tmp1),$C);
396 &function_end("sha1_block_data_order");
399 ######################################################################
400 # The SSSE3 implementation.
402 # %xmm[0-7] are used as ring @X[] buffer containing quadruples of last
403 # 32 elements of the message schedule or Xupdate outputs. First 4
404 # quadruples are simply byte-swapped input, next 4 are calculated
405 # according to method originally suggested by Dean Gaudet (modulo
406 # being implemented in SSSE3). Once 8 quadruples or 32 elements are
407 # collected, it switches to routine proposed by Max Locktyukhin.
409 # Calculations inevitably require temporary reqisters, and there are
410 # no %xmm registers left to spare. For this reason part of the ring
411 # buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring
412 # buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] -
413 # X[-5], and X[4] - X[-4]...
415 # Another notable optimization is aggressive stack frame compression
416 # aiming to minimize amount of 9-byte instructions...
418 # Yet another notable optimization is "jumping" $B variable. It means
419 # that there is no register permanently allocated for $B value. This
420 # allowed to eliminate one instruction from body_20_39...
422 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
423 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
424 my @V=($A,$B,$C,$D,$E);
425 my $j=0; # hash round
429 my $_rol=sub { &rol(@_) };
430 my $_ror=sub { &ror(@_) };
432 &function_begin("_sha1_block_data_order_ssse3");
433 &call (&label("pic_point")); # make it PIC!
434 &set_label("pic_point");
436 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
437 &set_label("ssse3_shortcut");
439 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19
440 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39
441 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59
442 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79
443 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask
445 &mov ($E,&wparam(0)); # load argument block
446 &mov ($inp=@T[1],&wparam(1));
447 &mov ($D,&wparam(2));
452 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
453 # X[4]+K X[5]+K X[6]+K X[7]+K
454 # X[8]+K X[9]+K X[10]+K X[11]+K
455 # X[12]+K X[13]+K X[14]+K X[15]+K
457 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
458 # X[4] X[5] X[6] X[7]
459 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
461 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
462 # K_40_59 K_40_59 K_40_59 K_40_59
463 # K_60_79 K_60_79 K_60_79 K_60_79
464 # K_00_19 K_00_19 K_00_19 K_00_19
467 # +192 ctx # argument block
474 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants
475 &movdqa (&QWP(112+16,"esp"),@X[5]);
476 &movdqa (&QWP(112+32,"esp"),@X[6]);
477 &shl ($D,6); # len*64
478 &movdqa (&QWP(112+48,"esp"),@X[3]);
479 &add ($D,$inp); # end of input
480 &movdqa (&QWP(112+64,"esp"),@X[2]);
482 &mov (&DWP(192+0,"esp"),$E); # save argument block
483 &mov (&DWP(192+4,"esp"),$inp);
484 &mov (&DWP(192+8,"esp"),$D);
485 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
487 &mov ($A,&DWP(0,$E)); # load context
488 &mov ($B,&DWP(4,$E));
489 &mov ($C,&DWP(8,$E));
490 &mov ($D,&DWP(12,$E));
491 &mov ($E,&DWP(16,$E));
492 &mov (@T[0],$B); # magic seed
494 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
495 &movdqu (@X[-3&7],&QWP(-48,$inp));
496 &movdqu (@X[-2&7],&QWP(-32,$inp));
497 &movdqu (@X[-1&7],&QWP(-16,$inp));
498 &pshufb (@X[-4&7],@X[2]); # byte swap
499 &pshufb (@X[-3&7],@X[2]);
500 &pshufb (@X[-2&7],@X[2]);
501 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
502 &pshufb (@X[-1&7],@X[2]);
503 &paddd (@X[-4&7],@X[3]); # add K_00_19
504 &paddd (@X[-3&7],@X[3]);
505 &paddd (@X[-2&7],@X[3]);
506 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU
507 &psubd (@X[-4&7],@X[3]); # restore X[]
508 &movdqa (&QWP(0+16,"esp"),@X[-3&7]);
509 &psubd (@X[-3&7],@X[3]);
510 &movdqa (&QWP(0+32,"esp"),@X[-2&7]);
511 &psubd (@X[-2&7],@X[3]);
512 &movdqa (@X[0],@X[-3&7]);
513 &jmp (&label("loop"));
515 ######################################################################
516 # SSE instruction sequence is first broken to groups of indepentent
517 # instructions, independent in respect to their inputs and shifter
518 # (not all architectures have more than one). Then IALU instructions
519 # are "knitted in" between the SSE groups. Distance is maintained for
520 # SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer
521 # [which allegedly also implements SSSE3]...
523 # Temporary registers usage. X[2] is volatile at the entry and at the
524 # end is restored from backtrace ring buffer. X[3] is expected to
525 # contain current K_XX_XX constant and is used to caclulate X[-1]+K
526 # from previous round, it becomes volatile the moment the value is
527 # saved to stack for transfer to IALU. X[4] becomes volatile whenever
528 # X[-4] is accumulated and offloaded to backtrace ring buffer, at the
529 # end it is loaded with next K_XX_XX [which becomes X[3] in next
532 sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4
535 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
540 &palignr(@X[0],@X[-4&7],8); # compose "X[-14]" in "X[0]"
541 &movdqa (@X[2],@X[-1&7]);
545 &paddd (@X[3],@X[-1&7]);
546 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
549 &psrldq (@X[2],4); # "X[-3]", 3 dwords
552 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
556 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
562 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
565 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
569 &movdqa (@X[4],@X[0]);
570 &movdqa (@X[2],@X[0]);
576 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword
577 &paddd (@X[0],@X[0]);
586 &movdqa (@X[3],@X[4]);
591 &por (@X[0],@X[2]); # "X[0]"<<<=1
594 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
602 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
606 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2
607 &movdqa (@X[1],@X[-2&7]) if ($Xi<7);
611 foreach (@insns) { eval; } # remaining instructions [if any]
613 $Xi++; push(@X,shift(@X)); # "rotate" X[]
616 sub Xupdate_ssse3_32_79()
619 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
622 &movdqa (@X[2],@X[-1&7]) if ($Xi==8);
623 eval(shift(@insns)); # body_20_39
624 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
625 &palignr(@X[2],@X[-2&7],8); # compose "X[-6]"
628 eval(shift(@insns)); # rol
630 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
631 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
635 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
636 } else { # ... or load next one
637 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
639 &paddd (@X[3],@X[-1&7]);
640 eval(shift(@insns)); # ror
643 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]"
644 eval(shift(@insns)); # body_20_39
647 eval(shift(@insns)); # rol
649 &movdqa (@X[2],@X[0]);
650 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
653 eval(shift(@insns)); # ror
657 eval(shift(@insns)); # body_20_39
661 eval(shift(@insns)); # rol
664 eval(shift(@insns)); # ror
667 &por (@X[0],@X[2]); # "X[0]"<<<=2
668 eval(shift(@insns)); # body_20_39
670 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
672 eval(shift(@insns)); # rol
675 eval(shift(@insns)); # ror
676 &movdqa (@X[3],@X[0]) if ($Xi<19);
679 foreach (@insns) { eval; } # remaining instructions
681 $Xi++; push(@X,shift(@X)); # "rotate" X[]
684 sub Xuplast_ssse3_80()
687 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
691 &paddd (@X[3],@X[-1&7]);
697 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
699 foreach (@insns) { eval; } # remaining instructions
701 &mov ($inp=@T[1],&DWP(192+4,"esp"));
702 &cmp ($inp,&DWP(192+8,"esp"));
703 &je (&label("done"));
705 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19
706 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask
707 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input
708 &movdqu (@X[-3&7],&QWP(16,$inp));
709 &movdqu (@X[-2&7],&QWP(32,$inp));
710 &movdqu (@X[-1&7],&QWP(48,$inp));
712 &pshufb (@X[-4&7],@X[2]); # byte swap
713 &mov (&DWP(192+4,"esp"),$inp);
714 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
722 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
727 &pshufb (@X[($Xi-3)&7],@X[2]);
730 &paddd (@X[($Xi-4)&7],@X[3]);
735 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU
738 &psubd (@X[($Xi-4)&7],@X[3]);
740 foreach (@insns) { eval; }
747 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
750 foreach (@insns) { eval; }
755 '($a,$b,$c,$d,$e)=@V;'.
756 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer
758 '&mov (@T[1],$a);', # $b in next round
760 '&and (@T[0],$c);', # ($b&($c^$d))
761 '&xor ($c,$d);', # restore $c
764 '&$_ror ($b,$j?7:2);', # $b>>>2
765 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));'
771 '($a,$b,$c,$d,$e)=@V;'.
772 '&add ($e,&DWP(4*($j++&15),"esp"));', # X[]+K xfer
773 '&xor (@T[0],$d);', # ($b^$d)
774 '&mov (@T[1],$a);', # $b in next round
776 '&xor (@T[0],$c);', # ($b^$d^$c)
778 '&$_ror ($b,7);', # $b>>>2
779 '&add ($e,@T[0]);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
785 '($a,$b,$c,$d,$e)=@V;'.
788 '&add ($e,&DWP(4*($j++&15),"esp"));', # X[]+K xfer
789 '&and (@T[0],@T[1]);',
790 '&$_ror ($b,7);', # $b>>>2
792 '&mov (@T[1],$a);', # $b in next round
795 '&mov (@T[0],$b);', # copy of $c in next round
796 '&add ($e,$a);' .'unshift(@V,pop(@V)); unshift(@T,pop(@T));'
800 &set_label("loop",16);
801 &Xupdate_ssse3_16_31(\&body_00_19);
802 &Xupdate_ssse3_16_31(\&body_00_19);
803 &Xupdate_ssse3_16_31(\&body_00_19);
804 &Xupdate_ssse3_16_31(\&body_00_19);
805 &Xupdate_ssse3_32_79(\&body_00_19);
806 &Xupdate_ssse3_32_79(\&body_20_39);
807 &Xupdate_ssse3_32_79(\&body_20_39);
808 &Xupdate_ssse3_32_79(\&body_20_39);
809 &Xupdate_ssse3_32_79(\&body_20_39);
810 &Xupdate_ssse3_32_79(\&body_20_39);
811 &mov (@T[1],@V[2]); # copy of $c in next round
812 &Xupdate_ssse3_32_79(\&body_40_59);
813 &Xupdate_ssse3_32_79(\&body_40_59);
814 &Xupdate_ssse3_32_79(\&body_40_59);
815 &Xupdate_ssse3_32_79(\&body_40_59);
816 &Xupdate_ssse3_32_79(\&body_40_59);
817 &Xupdate_ssse3_32_79(\&body_20_39);
818 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done"
820 $saved_j=$j; @saved_V=@V;
822 &Xloop_ssse3(\&body_20_39);
823 &Xloop_ssse3(\&body_20_39);
824 &Xloop_ssse3(\&body_20_39);
826 &mov (@T[1],&DWP(192,"esp")); # update context
827 &add ($A,&DWP(0,@T[1]));
828 &add (@T[0],&DWP(4,@T[1])); # $b
829 &add ($C,&DWP(8,@T[1]));
830 &mov (&DWP(0,@T[1]),$A);
831 &add ($D,&DWP(12,@T[1]));
832 &mov (&DWP(4,@T[1]),@T[0]);
833 &add ($E,&DWP(16,@T[1]));
834 &mov (&DWP(8,@T[1]),$C);
836 &mov (&DWP(12,@T[1]),$D);
837 &mov (&DWP(16,@T[1]),$E);
838 &movdqa (@X[0],@X[-3&7]);
840 &jmp (&label("loop"));
842 &set_label("done",16); $j=$saved_j; @V=@saved_V;
844 &Xtail_ssse3(\&body_20_39);
845 &Xtail_ssse3(\&body_20_39);
846 &Xtail_ssse3(\&body_20_39);
848 &mov (@T[1],&DWP(192,"esp")); # update context
849 &add ($A,&DWP(0,@T[1]));
850 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
851 &add (@T[0],&DWP(4,@T[1])); # $b
852 &add ($C,&DWP(8,@T[1]));
853 &mov (&DWP(0,@T[1]),$A);
854 &add ($D,&DWP(12,@T[1]));
855 &mov (&DWP(4,@T[1]),@T[0]);
856 &add ($E,&DWP(16,@T[1]));
857 &mov (&DWP(8,@T[1]),$C);
858 &mov (&DWP(12,@T[1]),$D);
859 &mov (&DWP(16,@T[1]),$E);
861 &function_end("_sha1_block_data_order_ssse3");
864 my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded
865 my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4
866 my @V=($A,$B,$C,$D,$E);
867 my $j=0; # hash round
871 my $_rol=sub { &shld(@_[0],@_) };
872 my $_ror=sub { &shrd(@_[0],@_) };
874 &function_begin("_sha1_block_data_order_avx");
875 &call (&label("pic_point")); # make it PIC!
876 &set_label("pic_point");
878 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1));
879 &set_label("avx_shortcut");
882 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19
883 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39
884 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59
885 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79
886 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask
888 &mov ($E,&wparam(0)); # load argument block
889 &mov ($inp=@T[1],&wparam(1));
890 &mov ($D,&wparam(2));
895 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area
896 # X[4]+K X[5]+K X[6]+K X[7]+K
897 # X[8]+K X[9]+K X[10]+K X[11]+K
898 # X[12]+K X[13]+K X[14]+K X[15]+K
900 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area
901 # X[4] X[5] X[6] X[7]
902 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19
904 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants
905 # K_40_59 K_40_59 K_40_59 K_40_59
906 # K_60_79 K_60_79 K_60_79 K_60_79
907 # K_00_19 K_00_19 K_00_19 K_00_19
910 # +192 ctx # argument block
917 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants
918 &vmovdqa(&QWP(112+16,"esp"),@X[5]);
919 &vmovdqa(&QWP(112+32,"esp"),@X[6]);
920 &shl ($D,6); # len*64
921 &vmovdqa(&QWP(112+48,"esp"),@X[3]);
922 &add ($D,$inp); # end of input
923 &vmovdqa(&QWP(112+64,"esp"),@X[2]);
925 &mov (&DWP(192+0,"esp"),$E); # save argument block
926 &mov (&DWP(192+4,"esp"),$inp);
927 &mov (&DWP(192+8,"esp"),$D);
928 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp
930 &mov ($A,&DWP(0,$E)); # load context
931 &mov ($B,&DWP(4,$E));
932 &mov ($C,&DWP(8,$E));
933 &mov ($D,&DWP(12,$E));
934 &mov ($E,&DWP(16,$E));
935 &mov (@T[0],$B); # magic seed
937 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3]
938 &vmovdqu(@X[-3&7],&QWP(-48,$inp));
939 &vmovdqu(@X[-2&7],&QWP(-32,$inp));
940 &vmovdqu(@X[-1&7],&QWP(-16,$inp));
941 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
942 &vpshufb(@X[-3&7],@X[-3&7],@X[2]);
943 &vpshufb(@X[-2&7],@X[-2&7],@X[2]);
944 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
945 &vpshufb(@X[-1&7],@X[-1&7],@X[2]);
946 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19
947 &vpaddd (@X[1],@X[-3&7],@X[3]);
948 &vpaddd (@X[2],@X[-2&7],@X[3]);
949 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU
950 &vmovdqa(&QWP(0+16,"esp"),@X[1]);
951 &vmovdqa(&QWP(0+32,"esp"),@X[2]);
952 &jmp (&label("loop"));
954 sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4
957 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions
962 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]"
966 &vpaddd (@X[3],@X[3],@X[-1&7]);
967 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer
970 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords
973 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
977 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]"
980 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
984 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]"
990 &vpsrld (@X[2],@X[0],31);
996 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword
997 &vpaddd (@X[0],@X[0],@X[0]);
1000 eval(shift(@insns));
1001 eval(shift(@insns));
1003 &vpsrld (@X[3],@X[4],30);
1004 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1
1005 eval(shift(@insns));
1006 eval(shift(@insns));
1007 eval(shift(@insns));
1008 eval(shift(@insns));
1010 &vpslld (@X[4],@X[4],2);
1011 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer
1012 eval(shift(@insns));
1013 eval(shift(@insns));
1014 &vpxor (@X[0],@X[0],@X[3]);
1015 eval(shift(@insns));
1016 eval(shift(@insns));
1017 eval(shift(@insns));
1018 eval(shift(@insns));
1020 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2
1021 eval(shift(@insns));
1022 eval(shift(@insns));
1023 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX
1024 eval(shift(@insns));
1025 eval(shift(@insns));
1027 foreach (@insns) { eval; } # remaining instructions [if any]
1029 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1032 sub Xupdate_avx_32_79()
1035 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions
1036 my ($a,$b,$c,$d,$e);
1038 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]"
1039 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
1040 eval(shift(@insns)); # body_20_39
1041 eval(shift(@insns));
1042 eval(shift(@insns));
1043 eval(shift(@insns)); # rol
1045 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
1046 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer
1047 eval(shift(@insns));
1048 eval(shift(@insns));
1050 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX...
1051 } else { # ... or load next one
1052 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp"));
1054 &vpaddd (@X[3],@X[3],@X[-1&7]);
1055 eval(shift(@insns)); # ror
1056 eval(shift(@insns));
1058 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]"
1059 eval(shift(@insns)); # body_20_39
1060 eval(shift(@insns));
1061 eval(shift(@insns));
1062 eval(shift(@insns)); # rol
1064 &vpsrld (@X[2],@X[0],30);
1065 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU
1066 eval(shift(@insns));
1067 eval(shift(@insns));
1068 eval(shift(@insns)); # ror
1069 eval(shift(@insns));
1071 &vpslld (@X[0],@X[0],2);
1072 eval(shift(@insns)); # body_20_39
1073 eval(shift(@insns));
1074 eval(shift(@insns));
1075 eval(shift(@insns)); # rol
1076 eval(shift(@insns));
1077 eval(shift(@insns));
1078 eval(shift(@insns)); # ror
1079 eval(shift(@insns));
1081 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2
1082 eval(shift(@insns)); # body_20_39
1083 eval(shift(@insns));
1084 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer
1085 eval(shift(@insns));
1086 eval(shift(@insns)); # rol
1087 eval(shift(@insns));
1088 eval(shift(@insns));
1089 eval(shift(@insns)); # ror
1090 eval(shift(@insns));
1092 foreach (@insns) { eval; } # remaining instructions
1094 $Xi++; push(@X,shift(@X)); # "rotate" X[]
1097 sub Xuplast_avx_80()
1100 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1101 my ($a,$b,$c,$d,$e);
1103 eval(shift(@insns));
1104 &vpaddd (@X[3],@X[3],@X[-1&7]);
1105 eval(shift(@insns));
1106 eval(shift(@insns));
1107 eval(shift(@insns));
1108 eval(shift(@insns));
1110 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU
1112 foreach (@insns) { eval; } # remaining instructions
1114 &mov ($inp=@T[1],&DWP(192+4,"esp"));
1115 &cmp ($inp,&DWP(192+8,"esp"));
1116 &je (&label("done"));
1118 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19
1119 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask
1120 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input
1121 &vmovdqu(@X[-3&7],&QWP(16,$inp));
1122 &vmovdqu(@X[-2&7],&QWP(32,$inp));
1123 &vmovdqu(@X[-1&7],&QWP(48,$inp));
1125 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap
1126 &mov (&DWP(192+4,"esp"),$inp);
1127 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot
1135 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1136 my ($a,$b,$c,$d,$e);
1138 eval(shift(@insns));
1139 eval(shift(@insns));
1140 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]);
1141 eval(shift(@insns));
1142 eval(shift(@insns));
1143 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]);
1144 eval(shift(@insns));
1145 eval(shift(@insns));
1146 eval(shift(@insns));
1147 eval(shift(@insns));
1148 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU
1149 eval(shift(@insns));
1150 eval(shift(@insns));
1152 foreach (@insns) { eval; }
1159 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions
1160 my ($a,$b,$c,$d,$e);
1162 foreach (@insns) { eval; }
1165 &set_label("loop",16);
1166 &Xupdate_avx_16_31(\&body_00_19);
1167 &Xupdate_avx_16_31(\&body_00_19);
1168 &Xupdate_avx_16_31(\&body_00_19);
1169 &Xupdate_avx_16_31(\&body_00_19);
1170 &Xupdate_avx_32_79(\&body_00_19);
1171 &Xupdate_avx_32_79(\&body_20_39);
1172 &Xupdate_avx_32_79(\&body_20_39);
1173 &Xupdate_avx_32_79(\&body_20_39);
1174 &Xupdate_avx_32_79(\&body_20_39);
1175 &Xupdate_avx_32_79(\&body_20_39);
1176 &mov (@T[1],@V[2]); # copy of $c in next round
1177 &Xupdate_avx_32_79(\&body_40_59);
1178 &Xupdate_avx_32_79(\&body_40_59);
1179 &Xupdate_avx_32_79(\&body_40_59);
1180 &Xupdate_avx_32_79(\&body_40_59);
1181 &Xupdate_avx_32_79(\&body_40_59);
1182 &Xupdate_avx_32_79(\&body_20_39);
1183 &Xuplast_avx_80(\&body_20_39); # can jump to "done"
1185 $saved_j=$j; @saved_V=@V;
1187 &Xloop_avx(\&body_20_39);
1188 &Xloop_avx(\&body_20_39);
1189 &Xloop_avx(\&body_20_39);
1191 &mov (@T[1],&DWP(192,"esp")); # update context
1192 &add ($A,&DWP(0,@T[1]));
1193 &add (@T[0],&DWP(4,@T[1])); # $b
1194 &add ($C,&DWP(8,@T[1]));
1195 &mov (&DWP(0,@T[1]),$A);
1196 &add ($D,&DWP(12,@T[1]));
1197 &mov (&DWP(4,@T[1]),@T[0]);
1198 &add ($E,&DWP(16,@T[1]));
1199 &mov (&DWP(8,@T[1]),$C);
1201 &mov (&DWP(12,@T[1]),$D);
1202 &mov (&DWP(16,@T[1]),$E);
1204 &jmp (&label("loop"));
1206 &set_label("done",16); $j=$saved_j; @V=@saved_V;
1208 &Xtail_avx(\&body_20_39);
1209 &Xtail_avx(\&body_20_39);
1210 &Xtail_avx(\&body_20_39);
1214 &mov (@T[1],&DWP(192,"esp")); # update context
1215 &add ($A,&DWP(0,@T[1]));
1216 &mov ("esp",&DWP(192+12,"esp")); # restore %esp
1217 &add (@T[0],&DWP(4,@T[1])); # $b
1218 &add ($C,&DWP(8,@T[1]));
1219 &mov (&DWP(0,@T[1]),$A);
1220 &add ($D,&DWP(12,@T[1]));
1221 &mov (&DWP(4,@T[1]),@T[0]);
1222 &add ($E,&DWP(16,@T[1]));
1223 &mov (&DWP(8,@T[1]),$C);
1224 &mov (&DWP(12,@T[1]),$D);
1225 &mov (&DWP(16,@T[1]),$E);
1226 &function_end("_sha1_block_data_order_avx");
1228 &set_label("K_XX_XX",64);
1229 &data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19
1230 &data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39
1231 &data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59
1232 &data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79
1233 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask
1235 &asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");