2 # Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # SHA256 block transform for x86. September 2007.
19 # Performance improvement over compiler generated code varies from
20 # 10% to 40% [see below]. Not very impressive on some ยต-archs, but
21 # it's 5 times smaller and optimizies amount of writes.
25 # Optimization including two of Pavel Semjanov's ideas, alternative
26 # Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
27 # ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
28 # 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
29 # on P4, where it kills performance, nor Sandy Bridge, where folded
30 # loop is approximately as fast...
34 # Add AMD XOP-specific code path, >30% improvement on Bulldozer over
35 # May version, >60% over original. Add AVX+shrd code path, >25%
36 # improvement on Sandy Bridge over May version, 60% over original.
40 # Replace AMD XOP code path with SSSE3 to cover more processors.
41 # (Biggest improvement coefficient is on upcoming Atom Silvermont,
42 # not shown.) Add AVX+BMI code path.
46 # Add support for Intel SHA Extensions.
48 # Performance in clock cycles per processed byte (less is better):
50 # gcc icc x86 asm(*) SIMD x86_64 asm(**)
51 # Pentium 46 57 40/38 - -
52 # PIII 36 33 27/24 - -
54 # AMD K8 27 25 19/15.5 - 14.9
55 # Core2 26 23 18/15.6 14.3 13.8
56 # Westmere 27 - 19/15.7 13.4 12.3
57 # Sandy Bridge 25 - 15.9 12.4 11.6
58 # Ivy Bridge 24 - 15.0 11.4 10.3
59 # Haswell 22 - 13.9 9.46 7.80
60 # Skylake 20 - 14.9 9.50 7.70
61 # Bulldozer 36 - 27/22 17.0 13.6
62 # VIA Nano 36 - 25/22 16.8 16.5
63 # Atom 50 - 30/25 21.9 18.9
64 # Silvermont 40 - 34/31 22.9 20.6
65 # Goldmont 29 - 20 16.3(***)
67 # (*) numbers after slash are for unrolled loop, where applicable;
68 # (**) x86_64 assembly performance is presented for reference
69 # purposes, results are best-available;
70 # (***) SHAEXT result is 4.1, strangely enough better than 64-bit one;
72 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
73 push(@INC,"${dir}","${dir}../../perlasm");
77 open STDOUT,">$output";
79 &asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
82 for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
84 if ($xmm && `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
85 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
86 $avx = ($1>=2.19) + ($1>=2.22);
89 if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
90 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
91 $avx = ($1>=2.03) + ($1>=2.10);
94 if ($xmm && !$avx && $ARGV[0] eq "win32" &&
95 `ml 2>&1` =~ /Version ([0-9]+)\./) {
96 $avx = ($1>=10) + ($1>=11);
99 if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) {
100 $avx = ($2>=3.0) + ($2>3.0);
103 $shaext=$xmm; ### set to zero if compiling for 1.0.1
105 $unroll_after = 64*4; # If pre-evicted from L1P cache first spin of
106 # fully unrolled loop was measured to run about
107 # 3-4x slower. If slowdown coefficient is N and
108 # unrolled loop is m times faster, then you break
109 # even at (N-1)/(m-1) blocks. Then it needs to be
110 # adjusted for probability of code being evicted,
111 # code size/cache size=1/4. Typical m is 1.15...
118 $Coff=&DWP(12,"esp");
119 $Doff=&DWP(16,"esp");
120 $Eoff=&DWP(20,"esp");
121 $Foff=&DWP(24,"esp");
122 $Goff=&DWP(28,"esp");
123 $Hoff=&DWP(32,"esp");
124 $Xoff=&DWP(36,"esp");
128 &mov ($T,"ecx"); # "ecx" is preloaded
129 &mov ("esi",&DWP(4*(9+15+16-14),"esp"));
137 &xor ($T,"ecx"); # T = sigma0(X[-15])
139 &add ($T,&DWP(4*(9+15+16),"esp")); # T += X[-16]
141 &add ($T,&DWP(4*(9+15+16-9),"esp")); # T += X[-7]
142 #&xor ("edi","esi") # sigma1(X[-2])
143 # &add ($T,"edi"); # T += sigma1(X[-2])
144 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
152 &xor ("edi","esi") if ($in_16_63); # sigma1(X[-2])
155 &add ($T,"edi") if ($in_16_63); # T += sigma1(X[-2])
159 &mov ($T,&DWP(4*(9+15),"esp")) if (!$in_16_63);
160 &mov (&DWP(4*(9+15),"esp"),$T) if ($in_16_63); # save X[0]
163 &mov ($Eoff,$E); # modulo-scheduled
165 &add ($T,$Hoff); # T += h
166 &xor ("esi","edi"); # Ch(e,f,g)
167 &ror ($E,6); # Sigma1(e)
169 &add ($T,"esi"); # T += Ch(e,f,g)
172 &add ($T,$E); # T += Sigma1(e)
175 &mov ($Aoff,$A); # modulo-scheduled
176 &lea ("esp",&DWP(-4,"esp"));
178 &mov ("esi",&DWP(0,$K256));
180 &mov ($E,$Eoff); # e in next iteration, d in this one
181 &xor ($A,"edi"); # a ^= b
182 &ror ("ecx",2); # Sigma0(a)
184 &add ($T,"esi"); # T+= K[i]
185 &mov (&DWP(0,"esp"),$A); # (b^c) in next round
186 &add ($E,$T); # d += T
187 &and ($A,&DWP(4,"esp")); # a &= (b^c)
188 &add ($T,"ecx"); # T += Sigma0(a)
189 &xor ($A,"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
190 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")) if ($in_16_63); # preload T
192 &add ($A,$T); # h += T
195 &external_label("OPENSSL_ia32cap_P") if (!$i386);
197 &function_begin("sha256_block_data_order");
198 &mov ("esi",wparam(0)); # ctx
199 &mov ("edi",wparam(1)); # inp
200 &mov ("eax",wparam(2)); # num
201 &mov ("ebx","esp"); # saved sp
203 &call (&label("pic_point")); # make it PIC!
204 &set_label("pic_point");
206 &lea ($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
213 &mov (&DWP(0,"esp"),"esi"); # ctx
214 &mov (&DWP(4,"esp"),"edi"); # inp
215 &mov (&DWP(8,"esp"),"eax"); # inp+num*128
216 &mov (&DWP(12,"esp"),"ebx"); # saved sp
217 if (!$i386 && $xmm) {
218 &picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
219 &mov ("ecx",&DWP(0,"edx"));
220 &mov ("ebx",&DWP(4,"edx"));
221 &test ("ecx",1<<20); # check for P4
222 &jnz (&label("loop"));
223 &mov ("edx",&DWP(8,"edx")) if ($xmm);
224 &test ("ecx",1<<24); # check for FXSR
225 &jz ($unroll_after?&label("no_xmm"):&label("loop"));
226 &and ("ecx",1<<30); # mask "Intel CPU" bit
227 &and ("ebx",1<<28|1<<9); # mask AVX and SSSE3 bits
228 &test ("edx",1<<29) if ($shaext); # check for SHA
229 &jnz (&label("shaext")) if ($shaext);
231 &and ("ecx",1<<28|1<<30);
232 &cmp ("ecx",1<<28|1<<30);
234 &je (&label("AVX")) if ($avx);
235 &test ("ebx",1<<9); # check for SSSE3
236 &jnz (&label("SSSE3"));
238 &je (&label("loop_shrd"));
241 &set_label("no_xmm");
243 &cmp ("eax",$unroll_after);
244 &jae (&label("unrolled"));
246 &jmp (&label("loop"));
251 &set_label("loop$suffix",$suffix?32:16);
252 # copy input block to stack reversing byte and dword order
253 for($i=0;$i<4;$i++) {
254 &mov ("eax",&DWP($i*16+0,"edi"));
255 &mov ("ebx",&DWP($i*16+4,"edi"));
256 &mov ("ecx",&DWP($i*16+8,"edi"));
258 &mov ("edx",&DWP($i*16+12,"edi"));
268 &lea ("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
269 &mov (&DWP(4*(9+16)+4,"esp"),"edi");
271 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
272 &mov ($A,&DWP(0,"esi"));
273 &mov ("ebx",&DWP(4,"esi"));
274 &mov ("ecx",&DWP(8,"esi"));
275 &mov ("edi",&DWP(12,"esi"));
281 &mov (&DWP(0,"esp"),"ebx"); # magic
282 &mov ($E,&DWP(16,"esi"));
283 &mov ("ebx",&DWP(20,"esi"));
284 &mov ("ecx",&DWP(24,"esi"));
285 &mov ("edi",&DWP(28,"esi"));
291 &set_label("00_15$suffix",16);
295 &cmp ("esi",0xc19bf174);
296 &jne (&label("00_15$suffix"));
298 &mov ("ecx",&DWP(4*(9+15+16-1),"esp")); # preloaded in BODY_00_15(1)
299 &jmp (&label("16_63$suffix"));
301 &set_label("16_63$suffix",16);
305 &cmp ("esi",0xc67178f2);
306 &jne (&label("16_63$suffix"));
308 &mov ("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
311 # &mov ("edi",$Coff);
313 &add ($A,&DWP(0,"esi"));
314 &add ("ebx",&DWP(4,"esi"));
315 &add ("edi",&DWP(8,"esi"));
316 &add ("ecx",&DWP(12,"esi"));
317 &mov (&DWP(0,"esi"),$A);
318 &mov (&DWP(4,"esi"),"ebx");
319 &mov (&DWP(8,"esi"),"edi");
320 &mov (&DWP(12,"esi"),"ecx");
325 &mov ("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
326 &add ($E,&DWP(16,"esi"));
327 &add ("eax",&DWP(20,"esi"));
328 &add ("ebx",&DWP(24,"esi"));
329 &add ("ecx",&DWP(28,"esi"));
330 &mov (&DWP(16,"esi"),$E);
331 &mov (&DWP(20,"esi"),"eax");
332 &mov (&DWP(24,"esi"),"ebx");
333 &mov (&DWP(28,"esi"),"ecx");
335 &lea ("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
336 &sub ($K256,4*64); # rewind K
338 &cmp ("edi",&DWP(8,"esp")); # are we done yet?
339 &jb (&label("loop$suffix"));
342 &mov ("esp",&DWP(12,"esp")); # restore sp
344 if (!$i386 && !$xmm) {
345 # ~20% improvement on Sandy Bridge
346 local *ror = sub { &shrd(@_[0],@_) };
347 &COMPACT_LOOP("_shrd");
348 &mov ("esp",&DWP(12,"esp")); # restore sp
352 &set_label("K256",64); # Yes! I keep it in the code segment!
353 @K256=( 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
354 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
355 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
356 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
357 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
358 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
359 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
360 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
361 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
362 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
363 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
364 0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
365 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
366 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
367 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
368 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 );
370 &data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # byte swap mask
371 &asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
373 ($a,$b,$c,$d,$e,$f,$g,$h)=(0..7); # offsets
374 sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
376 if (!$i386 && $unroll_after) {
379 &set_label("unrolled",16);
380 &lea ("esp",&DWP(-96,"esp"));
381 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
382 &mov ($AH[0],&DWP(0,"esi"));
383 &mov ($AH[1],&DWP(4,"esi"));
384 &mov ("ecx",&DWP(8,"esi"));
385 &mov ("ebx",&DWP(12,"esi"));
386 #&mov (&DWP(0,"esp"),$AH[0]);
387 &mov (&DWP(4,"esp"),$AH[1]);
388 &xor ($AH[1],"ecx"); # magic
389 &mov (&DWP(8,"esp"),"ecx");
390 &mov (&DWP(12,"esp"),"ebx");
391 &mov ($E,&DWP(16,"esi"));
392 &mov ("ebx",&DWP(20,"esi"));
393 &mov ("ecx",&DWP(24,"esi"));
394 &mov ("esi",&DWP(28,"esi"));
395 #&mov (&DWP(16,"esp"),$E);
396 &mov (&DWP(20,"esp"),"ebx");
397 &mov (&DWP(24,"esp"),"ecx");
398 &mov (&DWP(28,"esp"),"esi");
399 &jmp (&label("grand_loop"));
401 &set_label("grand_loop",16);
402 # copy input block to stack reversing byte order
403 for($i=0;$i<5;$i++) {
404 &mov ("ebx",&DWP(12*$i+0,"edi"));
405 &mov ("ecx",&DWP(12*$i+4,"edi"));
407 &mov ("esi",&DWP(12*$i+8,"edi"));
409 &mov (&DWP(32+12*$i+0,"esp"),"ebx");
411 &mov (&DWP(32+12*$i+4,"esp"),"ecx");
412 &mov (&DWP(32+12*$i+8,"esp"),"esi");
414 &mov ("ebx",&DWP($i*12,"edi"));
417 &mov (&DWP(96+4,"esp"),"edi");
418 &mov (&DWP(32+12*$i,"esp"),"ebx");
420 my ($t1,$t2) = ("ecx","esi");
422 for ($i=0;$i<64;$i++) {
425 &mov ($T,$t1); # $t1 is preloaded
426 # &mov ($t2,&DWP(32+4*(($i+14)&15),"esp"));
434 &xor ($T,$t1); # T = sigma0(X[-15])
436 &add ($T,&DWP(32+4*($i&15),"esp")); # T += X[-16]
438 &add ($T,&DWP(32+4*(($i+9)&15),"esp")); # T += X[-7]
439 #&xor ("edi",$t2) # sigma1(X[-2])
440 # &add ($T,"edi"); # T += sigma1(X[-2])
441 # &mov (&DWP(4*(9+15),"esp"),$T); # save X[0]
444 &xor ("edi",$t2) if ($i>=16); # sigma1(X[-2])
447 &add ($T,"edi") if ($i>=16); # T += sigma1(X[-2])
448 &mov ("edi",&off($g));
450 &mov ($T,&DWP(32+4*($i&15),"esp")) if ($i<16); # X[i]
451 &mov (&DWP(32+4*($i&15),"esp"),$T) if ($i>=16 && $i<62); # save X[0]
455 &mov (&off($e),$t1); # save $E, modulo-scheduled
457 &add ($T,&off($h)); # T += h
458 &xor ("edi",$t2); # Ch(e,f,g)
459 &ror ($E,6); # Sigma1(e)
461 &add ($T,"edi"); # T += Ch(e,f,g)
465 &mov ("edi",&off($b));
467 &mov (&off($a),$AH[0]); # save $A, modulo-scheduled
468 &xor ($AH[0],"edi"); # a ^= b, (b^c) in next round
470 &and ($AH[1],$AH[0]); # (b^c) &= (a^b)
471 &lea ($E,&DWP(@K256[$i],$T,$E)); # T += Sigma1(1)+K[i]
473 &xor ($AH[1],"edi"); # h = Maj(a,b,c) = Ch(a^b,c,b)
474 &mov ($t2,&DWP(32+4*(($i+2)&15),"esp")) if ($i>=15 && $i<63);
475 &ror ($t1,2); # Sigma0(a)
477 &add ($AH[1],$E); # h += T
478 &add ($E,&off($d)); # d += T
479 &add ($AH[1],$t1); # h += Sigma0(a)
480 &mov ($t1,&DWP(32+4*(($i+15)&15),"esp")) if ($i>=15 && $i<63);
482 @AH = reverse(@AH); # rotate(a,h)
483 ($t1,$t2) = ($t2,$t1); # rotate(t1,t2)
485 &mov ("esi",&DWP(96,"esp")); #ctx
486 #&mov ($AH[0],&DWP(0,"esp"));
487 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
488 #&mov ("edi", &DWP(8,"esp"));
489 &mov ("ecx",&DWP(12,"esp"));
490 &add ($AH[0],&DWP(0,"esi"));
491 &add ($AH[1],&DWP(4,"esi"));
492 &add ("edi",&DWP(8,"esi"));
493 &add ("ecx",&DWP(12,"esi"));
494 &mov (&DWP(0,"esi"),$AH[0]);
495 &mov (&DWP(4,"esi"),$AH[1]);
496 &mov (&DWP(8,"esi"),"edi");
497 &mov (&DWP(12,"esi"),"ecx");
498 #&mov (&DWP(0,"esp"),$AH[0]);
499 &mov (&DWP(4,"esp"),$AH[1]);
500 &xor ($AH[1],"edi"); # magic
501 &mov (&DWP(8,"esp"),"edi");
502 &mov (&DWP(12,"esp"),"ecx");
503 #&mov ($E,&DWP(16,"esp"));
504 &mov ("edi",&DWP(20,"esp"));
505 &mov ("ebx",&DWP(24,"esp"));
506 &mov ("ecx",&DWP(28,"esp"));
507 &add ($E,&DWP(16,"esi"));
508 &add ("edi",&DWP(20,"esi"));
509 &add ("ebx",&DWP(24,"esi"));
510 &add ("ecx",&DWP(28,"esi"));
511 &mov (&DWP(16,"esi"),$E);
512 &mov (&DWP(20,"esi"),"edi");
513 &mov (&DWP(24,"esi"),"ebx");
514 &mov (&DWP(28,"esi"),"ecx");
515 #&mov (&DWP(16,"esp"),$E);
516 &mov (&DWP(20,"esp"),"edi");
517 &mov ("edi",&DWP(96+4,"esp")); # inp
518 &mov (&DWP(24,"esp"),"ebx");
519 &mov (&DWP(28,"esp"),"ecx");
521 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
522 &jb (&label("grand_loop"));
524 &mov ("esp",&DWP(96+12,"esp")); # restore sp
527 if (!$i386 && $xmm) {{{
529 ######################################################################
530 # Intel SHA Extensions implementation of SHA256 update function.
532 my ($ctx,$inp,$end)=("esi","edi","eax");
533 my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
534 my @MSG=map("xmm$_",(3..6));
537 my ($opcodelet,$dst,$src)=@_;
538 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
539 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); }
541 sub sha256rnds2 { sha256op38(0xcb,@_); }
542 sub sha256msg1 { sha256op38(0xcc,@_); }
543 sub sha256msg2 { sha256op38(0xcd,@_); }
545 &set_label("shaext",32);
548 &movdqu ($ABEF,&QWP(0,$ctx)); # DCBA
549 &lea ($K256,&DWP(0x80,$K256));
550 &movdqu ($CDGH,&QWP(16,$ctx)); # HGFE
551 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
553 &pshufd ($Wi,$ABEF,0x1b); # ABCD
554 &pshufd ($ABEF,$ABEF,0xb1); # CDAB
555 &pshufd ($CDGH,$CDGH,0x1b); # EFGH
556 &palignr ($ABEF,$CDGH,8); # ABEF
557 &punpcklqdq ($CDGH,$Wi); # CDGH
558 &jmp (&label("loop_shaext"));
560 &set_label("loop_shaext",16);
561 &movdqu (@MSG[0],&QWP(0,$inp));
562 &movdqu (@MSG[1],&QWP(0x10,$inp));
563 &movdqu (@MSG[2],&QWP(0x20,$inp));
564 &pshufb (@MSG[0],$TMP);
565 &movdqu (@MSG[3],&QWP(0x30,$inp));
566 &movdqa (&QWP(16,"esp"),$CDGH); # offload
568 &movdqa ($Wi,&QWP(0*16-0x80,$K256));
569 &paddd ($Wi,@MSG[0]);
570 &pshufb (@MSG[1],$TMP);
571 &sha256rnds2 ($CDGH,$ABEF); # 0-3
572 &pshufd ($Wi,$Wi,0x0e);
574 &movdqa (&QWP(0,"esp"),$ABEF); # offload
575 &sha256rnds2 ($ABEF,$CDGH);
577 &movdqa ($Wi,&QWP(1*16-0x80,$K256));
578 &paddd ($Wi,@MSG[1]);
579 &pshufb (@MSG[2],$TMP);
580 &sha256rnds2 ($CDGH,$ABEF); # 4-7
581 &pshufd ($Wi,$Wi,0x0e);
582 &lea ($inp,&DWP(0x40,$inp));
583 &sha256msg1 (@MSG[0],@MSG[1]);
584 &sha256rnds2 ($ABEF,$CDGH);
586 &movdqa ($Wi,&QWP(2*16-0x80,$K256));
587 &paddd ($Wi,@MSG[2]);
588 &pshufb (@MSG[3],$TMP);
589 &sha256rnds2 ($CDGH,$ABEF); # 8-11
590 &pshufd ($Wi,$Wi,0x0e);
591 &movdqa ($TMP,@MSG[3]);
592 &palignr ($TMP,@MSG[2],4);
594 &paddd (@MSG[0],$TMP);
595 &sha256msg1 (@MSG[1],@MSG[2]);
596 &sha256rnds2 ($ABEF,$CDGH);
598 &movdqa ($Wi,&QWP(3*16-0x80,$K256));
599 &paddd ($Wi,@MSG[3]);
600 &sha256msg2 (@MSG[0],@MSG[3]);
601 &sha256rnds2 ($CDGH,$ABEF); # 12-15
602 &pshufd ($Wi,$Wi,0x0e);
603 &movdqa ($TMP,@MSG[0]);
604 &palignr ($TMP,@MSG[3],4);
606 &paddd (@MSG[1],$TMP);
607 &sha256msg1 (@MSG[2],@MSG[3]);
608 &sha256rnds2 ($ABEF,$CDGH);
610 for($i=4;$i<16-3;$i++) {
611 &movdqa ($Wi,&QWP($i*16-0x80,$K256));
612 &paddd ($Wi,@MSG[0]);
613 &sha256msg2 (@MSG[1],@MSG[0]);
614 &sha256rnds2 ($CDGH,$ABEF); # 16-19...
615 &pshufd ($Wi,$Wi,0x0e);
616 &movdqa ($TMP,@MSG[1]);
617 &palignr ($TMP,@MSG[0],4);
619 &paddd (@MSG[2],$TMP);
620 &sha256msg1 (@MSG[3],@MSG[0]);
621 &sha256rnds2 ($ABEF,$CDGH);
623 push(@MSG,shift(@MSG));
625 &movdqa ($Wi,&QWP(13*16-0x80,$K256));
626 &paddd ($Wi,@MSG[0]);
627 &sha256msg2 (@MSG[1],@MSG[0]);
628 &sha256rnds2 ($CDGH,$ABEF); # 52-55
629 &pshufd ($Wi,$Wi,0x0e);
630 &movdqa ($TMP,@MSG[1])
631 &palignr ($TMP,@MSG[0],4);
632 &sha256rnds2 ($ABEF,$CDGH);
633 &paddd (@MSG[2],$TMP);
635 &movdqa ($Wi,&QWP(14*16-0x80,$K256));
636 &paddd ($Wi,@MSG[1]);
637 &sha256rnds2 ($CDGH,$ABEF); # 56-59
638 &pshufd ($Wi,$Wi,0x0e);
639 &sha256msg2 (@MSG[2],@MSG[1]);
640 &movdqa ($TMP,&QWP(0x100-0x80,$K256)); # byte swap mask
641 &sha256rnds2 ($ABEF,$CDGH);
643 &movdqa ($Wi,&QWP(15*16-0x80,$K256));
644 &paddd ($Wi,@MSG[2]);
646 &sha256rnds2 ($CDGH,$ABEF); # 60-63
647 &pshufd ($Wi,$Wi,0x0e);
650 &sha256rnds2 ($ABEF,$CDGH);
652 &paddd ($CDGH,&QWP(16,"esp"));
653 &paddd ($ABEF,&QWP(0,"esp"));
654 &jnz (&label("loop_shaext"));
656 &pshufd ($CDGH,$CDGH,0xb1); # DCHG
657 &pshufd ($TMP,$ABEF,0x1b); # FEBA
658 &pshufd ($ABEF,$ABEF,0xb1); # BAFE
659 &punpckhqdq ($ABEF,$CDGH); # DCBA
660 &palignr ($CDGH,$TMP,8); # HGFE
662 &mov ("esp",&DWP(32+12,"esp"));
663 &movdqu (&QWP(0,$ctx),$ABEF);
664 &movdqu (&QWP(16,$ctx),$CDGH);
668 my @X = map("xmm$_",(0..3));
669 my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
672 &set_label("SSSE3",32);
673 &lea ("esp",&DWP(-96,"esp"));
674 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
675 &mov ($AH[0],&DWP(0,"esi"));
676 &mov ($AH[1],&DWP(4,"esi"));
677 &mov ("ecx",&DWP(8,"esi"));
678 &mov ("edi",&DWP(12,"esi"));
679 #&mov (&DWP(0,"esp"),$AH[0]);
680 &mov (&DWP(4,"esp"),$AH[1]);
681 &xor ($AH[1],"ecx"); # magic
682 &mov (&DWP(8,"esp"),"ecx");
683 &mov (&DWP(12,"esp"),"edi");
684 &mov ($E,&DWP(16,"esi"));
685 &mov ("edi",&DWP(20,"esi"));
686 &mov ("ecx",&DWP(24,"esi"));
687 &mov ("esi",&DWP(28,"esi"));
688 #&mov (&DWP(16,"esp"),$E);
689 &mov (&DWP(20,"esp"),"edi");
690 &mov ("edi",&DWP(96+4,"esp")); # inp
691 &mov (&DWP(24,"esp"),"ecx");
692 &mov (&DWP(28,"esp"),"esi");
693 &movdqa ($t3,&QWP(256,$K256));
694 &jmp (&label("grand_ssse3"));
696 &set_label("grand_ssse3",16);
697 # load input, reverse byte order, add K256[0..15], save to stack
698 &movdqu (@X[0],&QWP(0,"edi"));
699 &movdqu (@X[1],&QWP(16,"edi"));
700 &movdqu (@X[2],&QWP(32,"edi"));
701 &movdqu (@X[3],&QWP(48,"edi"));
704 &mov (&DWP(96+4,"esp"),"edi");
706 &movdqa ($t0,&QWP(0,$K256));
708 &movdqa ($t1,&QWP(16,$K256));
711 &movdqa ($t2,&QWP(32,$K256));
713 &movdqa ($t3,&QWP(48,$K256));
714 &movdqa (&QWP(32+0,"esp"),$t0);
716 &movdqa (&QWP(32+16,"esp"),$t1);
718 &movdqa (&QWP(32+32,"esp"),$t2);
719 &movdqa (&QWP(32+48,"esp"),$t3);
720 &jmp (&label("ssse3_00_47"));
722 &set_label("ssse3_00_47",16);
729 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
733 eval(shift(@insns)); # @
738 &palignr ($t0,@X[0],4); # X[1..4]
740 eval(shift(@insns)); # @
742 &palignr ($t3,@X[2],4); # X[9..12]
747 eval(shift(@insns)); # @
754 eval(shift(@insns)); # @
755 &paddd (@X[0],$t3); # X[0..3] += X[9..12]
761 eval(shift(@insns)); # @
763 &pshufd ($t3,@X[3],0b11111010); # X[14..15]
768 eval(shift(@insns)); # @
775 eval(shift(@insns)); # @
782 eval(shift(@insns)); # @
789 eval(shift(@insns)); # @
790 &pxor ($t0,$t1); # sigma0(X[1..4])
796 eval(shift(@insns)); # @
797 &paddd (@X[0],$t0); # X[0..3] += sigma0(X[1..4])
803 eval(shift(@insns)); # @
810 eval(shift(@insns)); # @
814 &pshufd ($t3,$t3,0b10000000);
817 eval(shift(@insns)); # @
822 eval(shift(@insns)); # @
828 &paddd (@X[0],$t3); # X[0..1] += sigma1(X[14..15])
829 eval(shift(@insns)); # @
834 eval(shift(@insns)); # @
836 &pshufd ($t3,@X[0],0b01010000); # X[16..17]
841 eval(shift(@insns)); # @
848 eval(shift(@insns)); # @
855 eval(shift(@insns)); # @
860 &pshufd ($t3,$t3,0b00001000);
862 eval(shift(@insns)); # @
863 &movdqa ($t2,&QWP(16*$j,$K256));
869 eval(shift(@insns)); # @
874 eval(shift(@insns)); # @
875 &paddd (@X[0],$t3); # X[2..3] += sigma1(X[16..17])
881 eval(shift(@insns)); # @
883 foreach (@insns) { eval; } # remaining instructions
885 &movdqa (&QWP(32+16*$j,"esp"),$t2);
892 '&mov ("esi",&off($f));',
894 '&mov ("edi",&off($g));',
895 '&xor ("esi","edi");',
897 '&and ("esi","ecx");',
898 '&mov (&off($e),"ecx");', # save $E, modulo-scheduled
900 '&xor ("edi","esi");', # Ch(e,f,g)
901 '&ror ($E,6);', # T = Sigma1(e)
902 '&mov ("ecx",$AH[0]);',
903 '&add ($E,"edi");', # T += Ch(e,f,g)
904 '&mov ("edi",&off($b));',
905 '&mov ("esi",$AH[0]);',
907 '&ror ("ecx",22-13);',
908 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
909 '&xor ("ecx",$AH[0]);',
910 '&xor ($AH[0],"edi");', # a ^= b, (b^c) in next round
911 '&add ($E,&off($h));', # T += h
912 '&ror ("ecx",13-2);',
913 '&and ($AH[1],$AH[0]);', # (b^c) &= (a^b)
914 '&xor ("ecx","esi");',
915 '&add ($E,&DWP(32+4*($i&15),"esp"));', # T += K[i]+X[i]
916 '&xor ($AH[1],"edi");', # h = Maj(a,b,c) = Ch(a^b,c,b)
917 '&ror ("ecx",2);', # Sigma0(a)
919 '&add ($AH[1],$E);', # h += T
920 '&add ($E,&off($d));', # d += T
921 '&add ($AH[1],"ecx");'. # h += Sigma0(a)
923 '@AH = reverse(@AH); $i++;' # rotate(a,h)
927 for ($i=0,$j=0; $j<4; $j++) {
928 &SSSE3_00_47($j,\&body_00_15,@X);
929 push(@X,shift(@X)); # rotate(@X)
931 &cmp (&DWP(16*$j,$K256),0x00010203);
932 &jne (&label("ssse3_00_47"));
934 for ($i=0; $i<16; ) {
935 foreach(body_00_15()) { eval; }
938 &mov ("esi",&DWP(96,"esp")); #ctx
939 #&mov ($AH[0],&DWP(0,"esp"));
940 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
941 #&mov ("edi", &DWP(8,"esp"));
942 &mov ("ecx",&DWP(12,"esp"));
943 &add ($AH[0],&DWP(0,"esi"));
944 &add ($AH[1],&DWP(4,"esi"));
945 &add ("edi",&DWP(8,"esi"));
946 &add ("ecx",&DWP(12,"esi"));
947 &mov (&DWP(0,"esi"),$AH[0]);
948 &mov (&DWP(4,"esi"),$AH[1]);
949 &mov (&DWP(8,"esi"),"edi");
950 &mov (&DWP(12,"esi"),"ecx");
951 #&mov (&DWP(0,"esp"),$AH[0]);
952 &mov (&DWP(4,"esp"),$AH[1]);
953 &xor ($AH[1],"edi"); # magic
954 &mov (&DWP(8,"esp"),"edi");
955 &mov (&DWP(12,"esp"),"ecx");
956 #&mov ($E,&DWP(16,"esp"));
957 &mov ("edi",&DWP(20,"esp"));
958 &mov ("ecx",&DWP(24,"esp"));
959 &add ($E,&DWP(16,"esi"));
960 &add ("edi",&DWP(20,"esi"));
961 &add ("ecx",&DWP(24,"esi"));
962 &mov (&DWP(16,"esi"),$E);
963 &mov (&DWP(20,"esi"),"edi");
964 &mov (&DWP(20,"esp"),"edi");
965 &mov ("edi",&DWP(28,"esp"));
966 &mov (&DWP(24,"esi"),"ecx");
967 #&mov (&DWP(16,"esp"),$E);
968 &add ("edi",&DWP(28,"esi"));
969 &mov (&DWP(24,"esp"),"ecx");
970 &mov (&DWP(28,"esi"),"edi");
971 &mov (&DWP(28,"esp"),"edi");
972 &mov ("edi",&DWP(96+4,"esp")); # inp
974 &movdqa ($t3,&QWP(64,$K256));
975 &sub ($K256,3*64); # rewind K
976 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
977 &jb (&label("grand_ssse3"));
979 &mov ("esp",&DWP(96+12,"esp")); # restore sp
982 &set_label("AVX",32);
984 &and ("edx",1<<8|1<<3); # check for BMI2+BMI1
985 &cmp ("edx",1<<8|1<<3);
986 &je (&label("AVX_BMI"));
988 &lea ("esp",&DWP(-96,"esp"));
990 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
991 &mov ($AH[0],&DWP(0,"esi"));
992 &mov ($AH[1],&DWP(4,"esi"));
993 &mov ("ecx",&DWP(8,"esi"));
994 &mov ("edi",&DWP(12,"esi"));
995 #&mov (&DWP(0,"esp"),$AH[0]);
996 &mov (&DWP(4,"esp"),$AH[1]);
997 &xor ($AH[1],"ecx"); # magic
998 &mov (&DWP(8,"esp"),"ecx");
999 &mov (&DWP(12,"esp"),"edi");
1000 &mov ($E,&DWP(16,"esi"));
1001 &mov ("edi",&DWP(20,"esi"));
1002 &mov ("ecx",&DWP(24,"esi"));
1003 &mov ("esi",&DWP(28,"esi"));
1004 #&mov (&DWP(16,"esp"),$E);
1005 &mov (&DWP(20,"esp"),"edi");
1006 &mov ("edi",&DWP(96+4,"esp")); # inp
1007 &mov (&DWP(24,"esp"),"ecx");
1008 &mov (&DWP(28,"esp"),"esi");
1009 &vmovdqa ($t3,&QWP(256,$K256));
1010 &jmp (&label("grand_avx"));
1012 &set_label("grand_avx",32);
1013 # load input, reverse byte order, add K256[0..15], save to stack
1014 &vmovdqu (@X[0],&QWP(0,"edi"));
1015 &vmovdqu (@X[1],&QWP(16,"edi"));
1016 &vmovdqu (@X[2],&QWP(32,"edi"));
1017 &vmovdqu (@X[3],&QWP(48,"edi"));
1019 &vpshufb (@X[0],@X[0],$t3);
1020 &mov (&DWP(96+4,"esp"),"edi");
1021 &vpshufb (@X[1],@X[1],$t3);
1022 &vpshufb (@X[2],@X[2],$t3);
1023 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1024 &vpshufb (@X[3],@X[3],$t3);
1025 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1026 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1027 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1028 &vmovdqa (&QWP(32+0,"esp"),$t0);
1029 &vmovdqa (&QWP(32+16,"esp"),$t1);
1030 &vmovdqa (&QWP(32+32,"esp"),$t2);
1031 &vmovdqa (&QWP(32+48,"esp"),$t3);
1032 &jmp (&label("avx_00_47"));
1034 &set_label("avx_00_47",16);
1037 sub Xupdate_AVX () {
1039 '&vpalignr ($t0,@X[1],@X[0],4);', # X[1..4]
1040 '&vpalignr ($t3,@X[3],@X[2],4);', # X[9..12]
1041 '&vpsrld ($t2,$t0,7);',
1042 '&vpaddd (@X[0],@X[0],$t3);', # X[0..3] += X[9..16]
1043 '&vpsrld ($t3,$t0,3);',
1044 '&vpslld ($t1,$t0,14);',
1045 '&vpxor ($t0,$t3,$t2);',
1046 '&vpshufd ($t3,@X[3],0b11111010)',# X[14..15]
1047 '&vpsrld ($t2,$t2,18-7);',
1048 '&vpxor ($t0,$t0,$t1);',
1049 '&vpslld ($t1,$t1,25-14);',
1050 '&vpxor ($t0,$t0,$t2);',
1051 '&vpsrld ($t2,$t3,10);',
1052 '&vpxor ($t0,$t0,$t1);', # sigma0(X[1..4])
1053 '&vpsrlq ($t1,$t3,17);',
1054 '&vpaddd (@X[0],@X[0],$t0);', # X[0..3] += sigma0(X[1..4])
1055 '&vpxor ($t2,$t2,$t1);',
1056 '&vpsrlq ($t3,$t3,19);',
1057 '&vpxor ($t2,$t2,$t3);', # sigma1(X[14..15]
1058 '&vpshufd ($t3,$t2,0b10000100);',
1059 '&vpsrldq ($t3,$t3,8);',
1060 '&vpaddd (@X[0],@X[0],$t3);', # X[0..1] += sigma1(X[14..15])
1061 '&vpshufd ($t3,@X[0],0b01010000)',# X[16..17]
1062 '&vpsrld ($t2,$t3,10);',
1063 '&vpsrlq ($t1,$t3,17);',
1064 '&vpxor ($t2,$t2,$t1);',
1065 '&vpsrlq ($t3,$t3,19);',
1066 '&vpxor ($t2,$t2,$t3);', # sigma1(X[16..17]
1067 '&vpshufd ($t3,$t2,0b11101000);',
1068 '&vpslldq ($t3,$t3,8);',
1069 '&vpaddd (@X[0],@X[0],$t3);' # X[2..3] += sigma1(X[16..17])
1073 local *ror = sub { &shrd(@_[0],@_) };
1078 my @insns = (&$body,&$body,&$body,&$body); # 120 instructions
1081 foreach (Xupdate_AVX()) { # 31 instructions
1083 eval(shift(@insns));
1084 eval(shift(@insns));
1085 eval($insn = shift(@insns));
1086 eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
1088 &vpaddd ($t2,@X[0],&QWP(16*$j,$K256));
1089 foreach (@insns) { eval; } # remaining instructions
1090 &vmovdqa (&QWP(32+16*$j,"esp"),$t2);
1093 for ($i=0,$j=0; $j<4; $j++) {
1094 &AVX_00_47($j,\&body_00_15,@X);
1095 push(@X,shift(@X)); # rotate(@X)
1097 &cmp (&DWP(16*$j,$K256),0x00010203);
1098 &jne (&label("avx_00_47"));
1100 for ($i=0; $i<16; ) {
1101 foreach(body_00_15()) { eval; }
1104 &mov ("esi",&DWP(96,"esp")); #ctx
1105 #&mov ($AH[0],&DWP(0,"esp"));
1106 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1107 #&mov ("edi", &DWP(8,"esp"));
1108 &mov ("ecx",&DWP(12,"esp"));
1109 &add ($AH[0],&DWP(0,"esi"));
1110 &add ($AH[1],&DWP(4,"esi"));
1111 &add ("edi",&DWP(8,"esi"));
1112 &add ("ecx",&DWP(12,"esi"));
1113 &mov (&DWP(0,"esi"),$AH[0]);
1114 &mov (&DWP(4,"esi"),$AH[1]);
1115 &mov (&DWP(8,"esi"),"edi");
1116 &mov (&DWP(12,"esi"),"ecx");
1117 #&mov (&DWP(0,"esp"),$AH[0]);
1118 &mov (&DWP(4,"esp"),$AH[1]);
1119 &xor ($AH[1],"edi"); # magic
1120 &mov (&DWP(8,"esp"),"edi");
1121 &mov (&DWP(12,"esp"),"ecx");
1122 #&mov ($E,&DWP(16,"esp"));
1123 &mov ("edi",&DWP(20,"esp"));
1124 &mov ("ecx",&DWP(24,"esp"));
1125 &add ($E,&DWP(16,"esi"));
1126 &add ("edi",&DWP(20,"esi"));
1127 &add ("ecx",&DWP(24,"esi"));
1128 &mov (&DWP(16,"esi"),$E);
1129 &mov (&DWP(20,"esi"),"edi");
1130 &mov (&DWP(20,"esp"),"edi");
1131 &mov ("edi",&DWP(28,"esp"));
1132 &mov (&DWP(24,"esi"),"ecx");
1133 #&mov (&DWP(16,"esp"),$E);
1134 &add ("edi",&DWP(28,"esi"));
1135 &mov (&DWP(24,"esp"),"ecx");
1136 &mov (&DWP(28,"esi"),"edi");
1137 &mov (&DWP(28,"esp"),"edi");
1138 &mov ("edi",&DWP(96+4,"esp")); # inp
1140 &vmovdqa ($t3,&QWP(64,$K256));
1141 &sub ($K256,3*64); # rewind K
1142 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1143 &jb (&label("grand_avx"));
1145 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1149 sub bodyx_00_15 () { # +10%
1151 '&rorx ("ecx",$E,6)',
1152 '&rorx ("esi",$E,11)',
1153 '&mov (&off($e),$E)', # save $E, modulo-scheduled
1154 '&rorx ("edi",$E,25)',
1155 '&xor ("ecx","esi")',
1156 '&andn ("esi",$E,&off($g))',
1157 '&xor ("ecx","edi")', # Sigma1(e)
1158 '&and ($E,&off($f))',
1159 '&mov (&off($a),$AH[0]);', # save $A, modulo-scheduled
1160 '&or ($E,"esi")', # T = Ch(e,f,g)
1162 '&rorx ("edi",$AH[0],2)',
1163 '&rorx ("esi",$AH[0],13)',
1164 '&lea ($E,&DWP(0,$E,"ecx"))', # T += Sigma1(e)
1165 '&rorx ("ecx",$AH[0],22)',
1166 '&xor ("esi","edi")',
1167 '&mov ("edi",&off($b))',
1168 '&xor ("ecx","esi")', # Sigma0(a)
1170 '&xor ($AH[0],"edi")', # a ^= b, (b^c) in next round
1171 '&add ($E,&off($h))', # T += h
1172 '&and ($AH[1],$AH[0])', # (b^c) &= (a^b)
1173 '&add ($E,&DWP(32+4*($i&15),"esp"))', # T += K[i]+X[i]
1174 '&xor ($AH[1],"edi")', # h = Maj(a,b,c) = Ch(a^b,c,b)
1176 '&add ("ecx",$E)', # h += T
1177 '&add ($E,&off($d))', # d += T
1178 '&lea ($AH[1],&DWP(0,$AH[1],"ecx"));'. # h += Sigma0(a)
1180 '@AH = reverse(@AH); $i++;' # rotate(a,h)
1184 &set_label("AVX_BMI",32);
1185 &lea ("esp",&DWP(-96,"esp"));
1187 # copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1188 &mov ($AH[0],&DWP(0,"esi"));
1189 &mov ($AH[1],&DWP(4,"esi"));
1190 &mov ("ecx",&DWP(8,"esi"));
1191 &mov ("edi",&DWP(12,"esi"));
1192 #&mov (&DWP(0,"esp"),$AH[0]);
1193 &mov (&DWP(4,"esp"),$AH[1]);
1194 &xor ($AH[1],"ecx"); # magic
1195 &mov (&DWP(8,"esp"),"ecx");
1196 &mov (&DWP(12,"esp"),"edi");
1197 &mov ($E,&DWP(16,"esi"));
1198 &mov ("edi",&DWP(20,"esi"));
1199 &mov ("ecx",&DWP(24,"esi"));
1200 &mov ("esi",&DWP(28,"esi"));
1201 #&mov (&DWP(16,"esp"),$E);
1202 &mov (&DWP(20,"esp"),"edi");
1203 &mov ("edi",&DWP(96+4,"esp")); # inp
1204 &mov (&DWP(24,"esp"),"ecx");
1205 &mov (&DWP(28,"esp"),"esi");
1206 &vmovdqa ($t3,&QWP(256,$K256));
1207 &jmp (&label("grand_avx_bmi"));
1209 &set_label("grand_avx_bmi",32);
1210 # load input, reverse byte order, add K256[0..15], save to stack
1211 &vmovdqu (@X[0],&QWP(0,"edi"));
1212 &vmovdqu (@X[1],&QWP(16,"edi"));
1213 &vmovdqu (@X[2],&QWP(32,"edi"));
1214 &vmovdqu (@X[3],&QWP(48,"edi"));
1216 &vpshufb (@X[0],@X[0],$t3);
1217 &mov (&DWP(96+4,"esp"),"edi");
1218 &vpshufb (@X[1],@X[1],$t3);
1219 &vpshufb (@X[2],@X[2],$t3);
1220 &vpaddd ($t0,@X[0],&QWP(0,$K256));
1221 &vpshufb (@X[3],@X[3],$t3);
1222 &vpaddd ($t1,@X[1],&QWP(16,$K256));
1223 &vpaddd ($t2,@X[2],&QWP(32,$K256));
1224 &vpaddd ($t3,@X[3],&QWP(48,$K256));
1225 &vmovdqa (&QWP(32+0,"esp"),$t0);
1226 &vmovdqa (&QWP(32+16,"esp"),$t1);
1227 &vmovdqa (&QWP(32+32,"esp"),$t2);
1228 &vmovdqa (&QWP(32+48,"esp"),$t3);
1229 &jmp (&label("avx_bmi_00_47"));
1231 &set_label("avx_bmi_00_47",16);
1234 for ($i=0,$j=0; $j<4; $j++) {
1235 &AVX_00_47($j,\&bodyx_00_15,@X);
1236 push(@X,shift(@X)); # rotate(@X)
1238 &cmp (&DWP(16*$j,$K256),0x00010203);
1239 &jne (&label("avx_bmi_00_47"));
1241 for ($i=0; $i<16; ) {
1242 foreach(bodyx_00_15()) { eval; }
1245 &mov ("esi",&DWP(96,"esp")); #ctx
1246 #&mov ($AH[0],&DWP(0,"esp"));
1247 &xor ($AH[1],"edi"); #&mov ($AH[1],&DWP(4,"esp"));
1248 #&mov ("edi", &DWP(8,"esp"));
1249 &mov ("ecx",&DWP(12,"esp"));
1250 &add ($AH[0],&DWP(0,"esi"));
1251 &add ($AH[1],&DWP(4,"esi"));
1252 &add ("edi",&DWP(8,"esi"));
1253 &add ("ecx",&DWP(12,"esi"));
1254 &mov (&DWP(0,"esi"),$AH[0]);
1255 &mov (&DWP(4,"esi"),$AH[1]);
1256 &mov (&DWP(8,"esi"),"edi");
1257 &mov (&DWP(12,"esi"),"ecx");
1258 #&mov (&DWP(0,"esp"),$AH[0]);
1259 &mov (&DWP(4,"esp"),$AH[1]);
1260 &xor ($AH[1],"edi"); # magic
1261 &mov (&DWP(8,"esp"),"edi");
1262 &mov (&DWP(12,"esp"),"ecx");
1263 #&mov ($E,&DWP(16,"esp"));
1264 &mov ("edi",&DWP(20,"esp"));
1265 &mov ("ecx",&DWP(24,"esp"));
1266 &add ($E,&DWP(16,"esi"));
1267 &add ("edi",&DWP(20,"esi"));
1268 &add ("ecx",&DWP(24,"esi"));
1269 &mov (&DWP(16,"esi"),$E);
1270 &mov (&DWP(20,"esi"),"edi");
1271 &mov (&DWP(20,"esp"),"edi");
1272 &mov ("edi",&DWP(28,"esp"));
1273 &mov (&DWP(24,"esi"),"ecx");
1274 #&mov (&DWP(16,"esp"),$E);
1275 &add ("edi",&DWP(28,"esi"));
1276 &mov (&DWP(24,"esp"),"ecx");
1277 &mov (&DWP(28,"esi"),"edi");
1278 &mov (&DWP(28,"esp"),"edi");
1279 &mov ("edi",&DWP(96+4,"esp")); # inp
1281 &vmovdqa ($t3,&QWP(64,$K256));
1282 &sub ($K256,3*64); # rewind K
1283 &cmp ("edi",&DWP(96+8,"esp")); # are we done yet?
1284 &jb (&label("grand_avx_bmi"));
1286 &mov ("esp",&DWP(96+12,"esp")); # restore sp
1292 &function_end_B("sha256_block_data_order");