3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
34 # Optimize reduction in squaring procedure and improve 1024+-bit RSA
35 # sign performance by 10-16% on Intel Sandy Bridge and later
36 # (virtually same on non-Intel processors).
40 # Add MULX/ADOX/ADCX code path.
44 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
46 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
48 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
49 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
50 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
51 die "can't locate x86_64-xlate.pl";
53 open OUT,"| \"$^X\" $xlate $flavour $output";
56 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
57 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
61 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
62 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
66 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
67 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
72 $rp="%rdi"; # BN_ULONG *rp,
73 $ap="%rsi"; # const BN_ULONG *ap,
74 $bp="%rdx"; # const BN_ULONG *bp,
75 $np="%rcx"; # const BN_ULONG *np,
76 $n0="%r8"; # const BN_ULONG *n0,
77 $num="%r9"; # int num);
89 .extern OPENSSL_ia32cap_P
92 .type bn_mul_mont,\@function,6
100 $code.=<<___ if ($addx);
101 mov OPENSSL_ia32cap_P+8(%rip),%r11d
123 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
124 and \$-1024,%rsp # minimize TLB usage
126 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
128 mov $bp,%r12 # reassign $bp
132 mov ($n0),$n0 # pull n0[0] value
133 mov ($bp),$m0 # m0=bp[0]
140 mulq $m0 # ap[0]*bp[0]
144 imulq $lo0,$m1 # "tp[0]"*n0
148 add %rax,$lo0 # discarded
161 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
164 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
168 mulq $m0 # ap[j]*bp[0]
180 mov ($ap),%rax # ap[0]
182 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
184 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
191 mov $hi1,-8(%rsp,$num,8)
192 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
198 mov ($bp,$i,8),$m0 # m0=bp[i]
202 mulq $m0 # ap[0]*bp[i]
203 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
207 imulq $lo0,$m1 # tp[0]*n0
211 add %rax,$lo0 # discarded
214 mov 8(%rsp),$lo0 # tp[1]
225 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
228 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
232 mulq $m0 # ap[j]*bp[i]
236 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
246 mov ($ap),%rax # ap[0]
248 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
251 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
257 add $lo0,$hi1 # pull upmost overflow bit
259 mov $hi1,-8(%rsp,$num,8)
260 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
266 xor $i,$i # i=0 and clear CF!
267 mov (%rsp),%rax # tp[0]
268 lea (%rsp),$ap # borrow ap for tp
272 .Lsub: sbb ($np,$i,8),%rax
273 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
274 mov 8($ap,$i,8),%rax # tp[i+1]
276 dec $j # doesnn't affect CF!
279 sbb \$0,%rax # handle upmost overflow bit
286 or $np,$ap # ap=borrow?tp:rp
288 .Lcopy: # copy or in-place refresh
290 mov $i,(%rsp,$i,8) # zap temporary vector
291 mov %rax,($rp,$i,8) # rp[i]=tp[i]
296 mov 8(%rsp,$num,8),%rsi # restore %rsp
307 .size bn_mul_mont,.-bn_mul_mont
310 my @A=("%r10","%r11");
311 my @N=("%r13","%rdi");
313 .type bn_mul4x_mont,\@function,6
318 $code.=<<___ if ($addx);
335 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
336 and \$-1024,%rsp # minimize TLB usage
338 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
340 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
341 mov %rdx,%r12 # reassign $bp
345 mov ($n0),$n0 # pull n0[0] value
346 mov ($bp),$m0 # m0=bp[0]
353 mulq $m0 # ap[0]*bp[0]
357 imulq $A[0],$m1 # "tp[0]"*n0
361 add %rax,$A[0] # discarded
384 mulq $m0 # ap[j]*bp[0]
386 mov -16($np,$j,8),%rax
392 mov -8($ap,$j,8),%rax
394 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
396 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
399 mulq $m0 # ap[j]*bp[0]
401 mov -8($np,$j,8),%rax
409 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
411 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
414 mulq $m0 # ap[j]*bp[0]
424 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
426 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
429 mulq $m0 # ap[j]*bp[0]
438 mov -16($ap,$j,8),%rax
440 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
442 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
447 mulq $m0 # ap[j]*bp[0]
449 mov -16($np,$j,8),%rax
455 mov -8($ap,$j,8),%rax
457 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
459 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
462 mulq $m0 # ap[j]*bp[0]
464 mov -8($np,$j,8),%rax
470 mov ($ap),%rax # ap[0]
472 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
474 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
480 mov $N[0],-8(%rsp,$j,8)
481 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
486 mov ($bp,$i,8),$m0 # m0=bp[i]
490 mulq $m0 # ap[0]*bp[i]
491 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
495 imulq $A[0],$m1 # tp[0]*n0
499 add %rax,$A[0] # "$N[0]", discarded
504 mulq $m0 # ap[j]*bp[i]
508 add 8(%rsp),$A[1] # +tp[1]
516 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
519 mov $N[1],(%rsp) # tp[j-1]
524 mulq $m0 # ap[j]*bp[i]
526 mov -16($np,$j,8),%rax
528 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
534 mov -8($ap,$j,8),%rax
538 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
541 mulq $m0 # ap[j]*bp[i]
543 mov -8($np,$j,8),%rax
545 add -8(%rsp,$j,8),$A[1]
555 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
558 mulq $m0 # ap[j]*bp[i]
562 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
572 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
575 mulq $m0 # ap[j]*bp[i]
579 add 8(%rsp,$j,8),$A[1]
586 mov -16($ap,$j,8),%rax
590 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
595 mulq $m0 # ap[j]*bp[i]
597 mov -16($np,$j,8),%rax
599 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
605 mov -8($ap,$j,8),%rax
609 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
612 mulq $m0 # ap[j]*bp[i]
614 mov -8($np,$j,8),%rax
616 add -8(%rsp,$j,8),$A[1]
623 mov ($ap),%rax # ap[0]
627 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
633 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
635 mov $N[0],-8(%rsp,$j,8)
636 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
642 my @ri=("%rax","%rdx",$m0,$m1);
644 mov 16(%rsp,$num,8),$rp # restore $rp
645 mov 0(%rsp),@ri[0] # tp[0]
647 mov 8(%rsp),@ri[1] # tp[1]
648 shr \$2,$num # num/=4
649 lea (%rsp),$ap # borrow ap for tp
650 xor $i,$i # i=0 and clear CF!
653 mov 16($ap),@ri[2] # tp[2]
654 mov 24($ap),@ri[3] # tp[3]
656 lea -1($num),$j # j=num/4-1
660 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
661 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
662 sbb 16($np,$i,8),@ri[2]
663 mov 32($ap,$i,8),@ri[0] # tp[i+1]
664 mov 40($ap,$i,8),@ri[1]
665 sbb 24($np,$i,8),@ri[3]
666 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
667 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
668 sbb 32($np,$i,8),@ri[0]
669 mov 48($ap,$i,8),@ri[2]
670 mov 56($ap,$i,8),@ri[3]
671 sbb 40($np,$i,8),@ri[1]
673 dec $j # doesnn't affect CF!
676 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
677 mov 32($ap,$i,8),@ri[0] # load overflow bit
678 sbb 16($np,$i,8),@ri[2]
679 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
680 sbb 24($np,$i,8),@ri[3]
681 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
683 sbb \$0,@ri[0] # handle upmost overflow bit
684 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
691 or $np,$ap # ap=borrow?tp:rp
698 .Lcopy4x: # copy or in-place refresh
699 movdqu 16($ap,$i),%xmm2
700 movdqu 32($ap,$i),%xmm1
701 movdqa %xmm0,16(%rsp,$i)
702 movdqu %xmm2,16($rp,$i)
703 movdqa %xmm0,32(%rsp,$i)
704 movdqu %xmm1,32($rp,$i)
710 movdqu 16($ap,$i),%xmm2
711 movdqa %xmm0,16(%rsp,$i)
712 movdqu %xmm2,16($rp,$i)
716 mov 8(%rsp,$num,8),%rsi # restore %rsp
727 .size bn_mul4x_mont,.-bn_mul4x_mont
731 ######################################################################
732 # void bn_sqr8x_mont(
733 my $rptr="%rdi"; # const BN_ULONG *rptr,
734 my $aptr="%rsi"; # const BN_ULONG *aptr,
735 my $bptr="%rdx"; # not used
736 my $nptr="%rcx"; # const BN_ULONG *nptr,
737 my $n0 ="%r8"; # const BN_ULONG *n0);
738 my $num ="%r9"; # int num, has to be divisible by 8
740 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
741 my @A0=("%r10","%r11");
742 my @A1=("%r12","%r13");
743 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
746 .type bn_sqr8x_mont,\@function,6
751 $code.=<<___ if ($addx);
764 shl \$3,${num}d # convert $num to bytes
766 mov %rsp,%r11 # put aside %rsp
767 sub $num,%r10 # -$num
769 lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
770 and \$-1024,%rsp # minimize TLB usage
771 ##############################################################
774 # +0 saved $num, used in reduction section
775 # +8 &t[2*$num], used in reduction section
782 mov $rptr,32(%rsp) # save $rptr
785 mov %r11, 56(%rsp) # save original %rsp
787 ##############################################################
790 # a) multiply-n-add everything but a[i]*a[i];
791 # b) shift result of a) by 1 to the left and accumulate
792 # a[i]*a[i] products;
794 ##############################################################
860 lea 32(%r10),$i # $i=-($num-32)
861 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
863 mov $num,$j # $j=$num
865 # comments apply to $num==8 case
866 mov -32($aptr,$i),$a0 # a[0]
867 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
868 mov -24($aptr,$i),%rax # a[1]
869 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
870 mov -16($aptr,$i),$ai # a[2]
874 mov %rax,$A0[0] # a[1]*a[0]
877 mov $A0[0],-24($tptr,$i) # t[1]
883 mov $A0[1],-16($tptr,$i) # t[2]
886 lea -16($i),$j # j=-16
889 mov 8($aptr,$j),$ai # a[3]
891 mov %rax,$A1[0] # a[2]*a[1]+t[3]
897 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
903 mov $A0[0],-8($tptr,$j) # t[3]
908 mov ($aptr,$j),$ai # a[4]
910 add %rax,$A1[1] # a[3]*a[1]+t[4]
916 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
918 mov 8($aptr,$j),$ai # a[5]
926 add %rax,$A1[0] # a[4]*a[3]+t[5]
928 mov $A0[1],($tptr,$j) # t[4]
933 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
935 mov 16($aptr,$j),$ai # a[6]
942 add %rax,$A1[1] # a[5]*a[3]+t[6]
944 mov $A0[0],8($tptr,$j) # t[5]
949 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
951 mov 24($aptr,$j),$ai # a[7]
959 add %rax,$A1[0] # a[6]*a[5]+t[7]
961 mov $A0[1],16($tptr,$j) # t[6]
966 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
973 mov $A0[0],-8($tptr,$j) # t[7]
985 mov $A1[1],($tptr) # t[8]
987 mov %rdx,8($tptr) # t[9]
991 .Lsqr4x_outer: # comments apply to $num==6 case
992 mov -32($aptr,$i),$a0 # a[0]
993 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
994 mov -24($aptr,$i),%rax # a[1]
995 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
996 mov -16($aptr,$i),$ai # a[2]
999 mov -24($tptr,$i),$A0[0] # t[1]
1001 add %rax,$A0[0] # a[1]*a[0]+t[1]
1004 mov $A0[0],-24($tptr,$i) # t[1]
1011 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1014 mov $A0[1],-16($tptr,$i) # t[2]
1016 lea -16($i),$j # j=-16
1020 mov 8($aptr,$j),$ai # a[3]
1022 add %rax,$A1[0] # a[2]*a[1]+t[3]
1025 add 8($tptr,$j),$A1[0]
1030 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1036 mov $A0[0],8($tptr,$j) # t[3]
1043 mov ($aptr,$j),$ai # a[4]
1045 add %rax,$A1[1] # a[3]*a[1]+t[4]
1049 add ($tptr,$j),$A1[1]
1053 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1055 mov 8($aptr,$j),$ai # a[5]
1062 add %rax,$A1[0] # a[4]*a[3]+t[5]
1063 mov $A0[1],($tptr,$j) # t[4]
1067 add 8($tptr,$j),$A1[0]
1072 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1078 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1089 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1091 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1096 # comments apply to $num==4 case
1097 mov -32($aptr),$a0 # a[0]
1098 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1099 mov -24($aptr),%rax # a[1]
1100 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1101 mov -16($aptr),$ai # a[2]
1105 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1113 mov $A0[0],-24($tptr) # t[1]
1116 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1117 mov -8($aptr),$ai # a[3]
1121 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1123 mov $A0[1],-16($tptr) # t[2]
1128 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1134 mov $A0[0],-8($tptr) # t[3]
1138 mov -16($aptr),%rax # a[2]
1143 mov $A1[1],($tptr) # t[4]
1145 mov %rdx,8($tptr) # t[5]
1150 my ($shift,$carry)=($a0,$a1);
1151 my @S=(@A1,$ai,$n0);
1155 sub $num,$i # $i=16-$num
1158 add $A1[0],%rax # t[5]
1160 mov %rax,8($tptr) # t[5]
1161 mov %rdx,16($tptr) # t[6]
1162 mov $carry,24($tptr) # t[7]
1164 mov -16($aptr,$i),%rax # a[0]
1166 xor $A0[0],$A0[0] # t[0]
1167 mov 8($tptr),$A0[1] # t[1]
1169 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1171 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1173 or $A0[0],$S[1] # | t[2*i]>>63
1174 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1175 mov $A0[1],$shift # shift=t[2*i+1]>>63
1176 mul %rax # a[i]*a[i]
1177 neg $carry # mov $carry,cf
1178 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1180 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1184 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1186 sbb $carry,$carry # mov cf,$carry
1188 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1190 or $A0[0],$S[3] # | t[2*i]>>63
1191 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1192 mov $A0[1],$shift # shift=t[2*i+1]>>63
1193 mul %rax # a[i]*a[i]
1194 neg $carry # mov $carry,cf
1195 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1197 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1202 sbb $carry,$carry # mov cf,$carry
1204 jmp .Lsqr4x_shift_n_add
1207 .Lsqr4x_shift_n_add:
1208 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1210 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1212 or $A0[0],$S[1] # | t[2*i]>>63
1213 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1214 mov $A0[1],$shift # shift=t[2*i+1]>>63
1215 mul %rax # a[i]*a[i]
1216 neg $carry # mov $carry,cf
1217 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1219 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1220 mov $S[0],-32($tptr)
1223 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1224 mov $S[1],-24($tptr)
1225 sbb $carry,$carry # mov cf,$carry
1227 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1229 or $A0[0],$S[3] # | t[2*i]>>63
1230 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1231 mov $A0[1],$shift # shift=t[2*i+1]>>63
1232 mul %rax # a[i]*a[i]
1233 neg $carry # mov $carry,cf
1234 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1236 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1237 mov $S[2],-16($tptr)
1240 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1242 sbb $carry,$carry # mov cf,$carry
1244 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1246 or $A0[0],$S[1] # | t[2*i]>>63
1247 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1248 mov $A0[1],$shift # shift=t[2*i+1]>>63
1249 mul %rax # a[i]*a[i]
1250 neg $carry # mov $carry,cf
1251 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1253 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1257 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1259 sbb $carry,$carry # mov cf,$carry
1261 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1263 or $A0[0],$S[3] # | t[2*i]>>63
1264 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1265 mov $A0[1],$shift # shift=t[2*i+1]>>63
1266 mul %rax # a[i]*a[i]
1267 neg $carry # mov $carry,cf
1268 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1270 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1274 sbb $carry,$carry # mov cf,$carry
1277 jnz .Lsqr4x_shift_n_add
1279 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1281 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1283 or $A0[0],$S[1] # | t[2*i]>>63
1284 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1285 mov $A0[1],$shift # shift=t[2*i+1]>>63
1286 mul %rax # a[i]*a[i]
1287 neg $carry # mov $carry,cf
1288 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1290 mov -8($aptr),%rax # a[i+1] # prefetch
1291 mov $S[0],-32($tptr)
1294 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1295 mov $S[1],-24($tptr)
1296 sbb $carry,$carry # mov cf,$carry
1298 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1300 or $A0[0],$S[3] # | t[2*i]>>63
1301 mul %rax # a[i]*a[i]
1302 neg $carry # mov $carry,cf
1305 mov $S[2],-16($tptr)
1309 ######################################################################
1310 # Montgomery reduction part, "word-by-word" algorithm.
1312 # This new path is inspired by multiple submissions from Intel, by
1313 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1316 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1319 mov 40(%rsp),$nptr # pull $nptr
1321 lea ($nptr,$num),%rdx # end of n[]
1322 lea 64(%rsp,$num,2),$tptr # end of t[] buffer
1325 mov %rax,($tptr) # clear top-most carry bit
1326 lea 64(%rsp,$num),$tptr # end of initial t[] window
1328 jmp .L8x_reduction_loop
1331 .L8x_reduction_loop:
1332 lea ($tptr,$num),$tptr # start of current t[] window
1341 lea 8*8($tptr),$tptr
1344 imulq 48(%rsp),$m0 # n0*a[0]
1345 mov 8*0($nptr),%rax # n[0]
1352 mov 8*1($nptr),%rax # n[1]
1362 mov $m0,64-8(%rsp,%rcx,8) # put aside n0*a[i]
1371 mov 48(%rsp),$carry # pull n0, borrow $carry
1379 imulq %r8,$carry # modulo-scheduled
1409 mov $carry,$m0 # n0*a[i]
1411 mov 8*0($nptr),%rax # n[0]
1420 lea 8*8($nptr),$nptr
1422 mov 8(%rsp),%rdx # pull end of t[]
1423 cmp 0(%rsp),$nptr # end of n[]?
1434 sbb $carry,$carry # top carry
1436 mov 64+56(%rsp),$m0 # pull n0*a[0]
1446 mov %r8,($tptr) # save result
1455 lea 8($tptr),$tptr # $tptr++
1500 mov 64-16(%rsp,%rcx,8),$m0 # pull n0*a[i]
1504 mov 8*0($nptr),%rax # pull n[0]
1511 lea 8*8($nptr),$nptr
1512 mov 8(%rsp),%rdx # pull end of t[]
1513 cmp 0(%rsp),$nptr # end of n[]?
1514 jae .L8x_tail_done # break out of loop
1516 mov 64+56(%rsp),$m0 # pull n0*a[0]
1518 mov 8*0($nptr),%rax # pull n[0]
1527 sbb $carry,$carry # top carry
1534 add (%rdx),%r8 # can this overflow?
1547 adc \$0,%rax # top-most carry
1549 mov 40(%rsp),$nptr # restore $nptr
1551 mov %r8,8*0($tptr) # store top 512 bits
1553 mov $nptr,$num # $num is %r9, can't be moved upwards
1555 sub 0(%rsp),$num # -$num
1561 lea 8*8($tptr),$tptr
1562 mov %rax,(%rdx) # store top-most carry
1564 cmp %rdx,$tptr # end of t[]?
1565 jb .L8x_reduction_loop
1567 neg $num # restore $num
1570 ##############################################################
1571 # Post-condition, 4x unrolled copy from bn_mul_mont
1574 my ($tptr,$nptr)=("%rbx",$aptr);
1575 my @ri=("%rax","%rdx","%r10","%r11");
1577 mov 64(%rsp,$num),@ri[0] # tp[0]
1578 lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
1579 mov 40(%rsp),$nptr # restore $nptr
1580 shr \$5,$num # num/4
1581 mov 8($tptr),@ri[1] # t[1]
1582 xor $i,$i # i=0 and clear CF!
1584 mov 32(%rsp),$rptr # restore $rptr
1586 mov 16($tptr),@ri[2] # t[2]
1587 mov 24($tptr),@ri[3] # t[3]
1589 lea -1($num),$j # j=num/4-1
1593 mov @ri[0],0($rptr) # rp[i]=tp[i]-np[i]
1594 mov @ri[1],8($rptr) # rp[i]=tp[i]-np[i]
1595 sbb 16($nptr,$i,8),@ri[2]
1596 mov 32($tptr,$i,8),@ri[0] # tp[i+1]
1597 mov 40($tptr,$i,8),@ri[1]
1598 sbb 24($nptr,$i,8),@ri[3]
1599 mov @ri[2],16($rptr) # rp[i]=tp[i]-np[i]
1600 mov @ri[3],24($rptr) # rp[i]=tp[i]-np[i]
1602 sbb 32($nptr,$i,8),@ri[0]
1603 mov 48($tptr,$i,8),@ri[2]
1604 mov 56($tptr,$i,8),@ri[3]
1605 sbb 40($nptr,$i,8),@ri[1]
1607 dec $j # doesn't affect CF!
1610 mov @ri[0],0($rptr) # rp[i]=tp[i]-np[i]
1611 mov 32($tptr,$i,8),@ri[0] # load overflow bit
1612 sbb 16($nptr,$i,8),@ri[2]
1613 mov @ri[1],8($rptr) # rp[i]=tp[i]-np[i]
1614 sbb 24($nptr,$i,8),@ri[3]
1615 mov @ri[2],16($rptr) # rp[i]=tp[i]-np[i]
1617 sbb \$0,@ri[0] # handle upmost overflow bit
1618 mov @ri[3],24($rptr) # rp[i]=tp[i]-np[i]
1619 mov 32(%rsp),$rptr # restore $rptr
1626 or $nptr,$tptr # tp=borrow?tp:rp
1629 lea 64(%rsp,$num,8),$nptr
1630 movdqu ($tptr),%xmm1
1631 lea ($nptr,$num,8),$nptr
1632 movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
1633 movdqa %xmm0,($nptr) # zap upper half of temporary vector
1634 movdqu %xmm1,($rptr)
1637 .Lsqr4x_copy: # copy or in-place refresh
1638 movdqu 16($tptr,$i),%xmm2
1639 movdqu 32($tptr,$i),%xmm1
1640 movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
1641 movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
1642 movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
1643 movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
1644 movdqu %xmm2,16($rptr,$i)
1645 movdqu %xmm1,32($rptr,$i)
1650 movdqu 16($tptr,$i),%xmm2
1651 movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
1652 movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
1653 movdqu %xmm2,16($rptr,$i)
1657 mov 56(%rsp),%rsi # restore %rsp
1668 .size bn_sqr8x_mont,.-bn_sqr8x_mont
1673 my $bp="%rdx"; # original value
1676 .type bn_mulx4x_mont,\@function,6
1687 shl \$3,${num}d # convert $num to bytes
1689 mov %rsp,%r11 # put aside %rsp
1690 sub $num,%r10 # -$num
1692 lea -72(%rsp,%r10),%rsp # alloca(frame+$num+8)
1695 ##############################################################
1698 # +8 off-loaded &b[i]
1707 mov $num,0(%rsp) # save $num
1709 mov %r10,16(%rsp) # end of b[num]
1711 mov $n0, 24(%rsp) # save *n0
1712 mov $rp, 32(%rsp) # save $rp
1713 mov $num,48(%rsp) # inner counter
1714 mov %r11,56(%rsp) # save original %rsp
1720 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
1721 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
1725 mov ($bp),%rdx # b[0], $bp==%rdx actually
1726 lea 64+32(%rsp),$tptr
1728 xor $zero,$zero # of=0,cf=0
1730 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
1731 mulx 1*8($aptr),%r11,%r14 # a[1]*b[0]
1733 mov $bptr,8(%rsp) # off-load &b[i]
1734 mulx 2*8($aptr),%r12,%r13 # ...
1738 mov $mi,$bptr # borrow $bptr
1739 imulq 24(%rsp),$mi # "t[0]"*n0
1740 xor $zero,$zero # cf=0, of=0
1742 mulx 3*8($aptr),%rax,%r14
1744 lea 4*8($aptr),$aptr
1746 adcx $zero,%r14 # cf=0
1748 mulx 0*8($nptr),%rax,%r10
1749 adcx %rax,$bptr # discarded
1751 mulx 1*8($nptr),%rax,%r11
1754 mulx 2*8($nptr),%rax,%r12
1755 mov 48(%rsp),$bptr # counter value
1756 mov %r10,-4*8($tptr)
1759 mulx 3*8($nptr),%rax,%r15
1762 mov %r11,-3*8($tptr)
1764 adox $zero,%r15 # of=0
1765 lea 4*8($nptr),$nptr
1766 mov %r12,-2*8($tptr)
1772 adcx $zero,%r15 # cf=0, modulo-scheduled
1773 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
1775 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
1777 mulx 2*8($aptr),%r12,%rax # ...
1779 mulx 3*8($aptr),%r13,%r14
1783 adcx $zero,%r14 # cf=0
1784 lea 4*8($aptr),$aptr
1785 lea 4*8($tptr),$tptr
1788 mulx 0*8($nptr),%rax,%r15
1791 mulx 1*8($nptr),%rax,%r15
1794 mulx 2*8($nptr),%rax,%r15
1795 mov %r10,-5*8($tptr)
1797 mov %r11,-4*8($tptr)
1799 mulx 3*8($nptr),%rax,%r15
1801 mov %r12,-3*8($tptr)
1804 lea 4*8($nptr),$nptr
1805 mov %r13,-2*8($tptr)
1807 dec $bptr # of=0, pass cf
1810 mov 0(%rsp),$num # load num
1811 mov 8(%rsp),$bptr # re-load &b[i]
1812 adc $zero,%r15 # modulo-scheduled
1814 sbb %r15,%r15 # top-most carry
1815 mov %r14,-1*8($tptr)
1820 mov ($bptr),%rdx # b[i]
1822 sub $num,$aptr # rewind $aptr
1823 mov %r15,($tptr) # save top-most carry
1826 sub $num,$nptr # rewind $nptr
1827 xor $zero,$zero # cf=0, of=0
1830 mulx 0*8($aptr),$mi,%rax # a[0]*b[i]
1833 mulx 1*8($aptr),%r11,%r14 # a[1]*b[i]
1835 mov $bptr,8(%rsp) # off-load &b[i]
1836 mulx 2*8($aptr),%r12,%r13 # ...
1844 mov $mi,$bptr # borrow $bptr
1845 imulq 24(%rsp),$mi # "t[0]"*n0
1846 xor $zero,$zero # cf=0, of=0
1848 mulx 3*8($aptr),%rax,%r14
1852 adox 3*8($tptr),%r13
1854 lea 4*8($aptr),$aptr
1855 lea 4*8($tptr),$tptr
1858 mulx 0*8($nptr),%rax,%r10
1859 adcx %rax,$bptr # discarded
1861 mulx 1*8($nptr),%rax,%r11
1864 mulx 2*8($nptr),%rax,%r12
1865 mov %r10,-4*8($tptr)
1869 mulx 3*8($nptr),%rax,%r15
1871 mov %r11,-3*8($tptr)
1873 adox $zero,%r15 # of=0
1874 mov 48(%rsp),$bptr # counter value
1876 mov %r12,-2*8($tptr)
1877 lea 4*8($nptr),$nptr
1883 adcx $zero,%r15 # cf=0, modulo-scheduled
1885 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
1888 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
1890 mulx 2*8($aptr),%r12,%rax # ...
1893 mulx 3*8($aptr),%r13,%r14
1895 adcx 2*8($tptr),%r12
1897 adcx 3*8($tptr),%r13
1898 adox $zero,%r14 # of=0
1899 .byte 0x48,0x8d,0xb6,0x20,0x00,0x00,0x00 # lea 4*8($aptr),$aptr
1900 .byte 0x48,0x8d,0x9b,0x20,0x00,0x00,0x00 # lea 4*8($tptr),$tptr
1901 adcx $zero,%r14 # cf=0
1904 mulx 0*8($nptr),%rax,%r15
1907 mulx 1*8($nptr),%rax,%r15
1910 mulx 2*8($nptr),%rax,%r15
1911 mov %r10,-5*8($tptr)
1915 mulx 3*8($nptr),%rax,%r15
1917 mov %r11,-4*8($tptr)
1918 mov %r12,-3*8($tptr)
1921 lea 4*8($nptr),$nptr
1922 mov %r13,-2*8($tptr)
1924 dec $bptr # of=0, pass cf
1927 mov 0(%rsp),$num # load num
1928 mov 8(%rsp),$bptr # re-load &b[i]
1929 adc $zero,%r15 # modulo-scheduled
1930 sub %r10,$zero # pull top-most carry
1932 sbb %r15,%r15 # top-most carry
1933 mov %r14,-1*8($tptr)
1939 mov 32(%rsp),$rptr # restore rp
1944 mov 0*8($nptr,$num),%r8
1945 mov 1*8($nptr,$num),%r9
1947 jmp .Lmulx4x_sub_entry
1951 mov 0*8($nptr,$num),%r8
1952 mov 1*8($nptr,$num),%r9
1955 mov 2*8($nptr,$num),%r10
1958 mov 3*8($nptr,$num),%r11
1965 neg %rdx # mov %rdx,%cf
1968 movdqa %xmm0,($tptr)
1971 movdqa %xmm0,16($tptr)
1972 lea 4*8($tptr),$tptr
1973 sbb %rdx,%rdx # mov %cf,%rdx
1979 lea 4*8($rptr),$rptr
1984 mov 56(%rsp),%rsi # restore %rsp
1995 .size bn_mulx4x_mont,.-bn_mulx4x_mont
1998 ######################################################################
1999 # void bn_sqr8x_mont(
2000 my $rptr="%rdi"; # const BN_ULONG *rptr,
2001 my $aptr="%rsi"; # const BN_ULONG *aptr,
2002 my $bptr="%rdx"; # not used
2003 my $nptr="%rcx"; # const BN_ULONG *nptr,
2004 my $n0 ="%r8"; # const BN_ULONG *n0);
2005 my $num ="%r9"; # int num, has to be divisible by 8
2007 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2008 my @A0=("%r10","%r11");
2009 my @A1=("%r12","%r13");
2010 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2013 .type bn_sqrx8x_mont,\@function,6
2024 shl \$3,${num}d # convert $num to bytes
2026 mov %rsp,%r11 # put aside %rsp
2027 sub $num,%r10 # -$num
2029 lea -64(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
2030 and \$-1024,%rsp # minimize TLB usage
2031 ##############################################################
2034 # +0 saved $num, used in reduction section
2035 # +8 &t[2*$num], used in reduction section
2036 # +16 intermediate carry bit
2037 # +24 top-most carry bit, used in reduction section
2041 movq $rptr,%xmm1 # save $rptr
2042 movq $nptr,%xmm2 # save $nptr
2043 movq %r10, %xmm3 # -$num
2044 movq %r11, %xmm4 # save original %rsp
2047 ##################################################################
2050 # a) multiply-n-add everything but a[i]*a[i];
2051 # b) shift result of a) by 1 to the left and accumulate
2052 # a[i]*a[i] products;
2054 ##################################################################
2055 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2086 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2089 my ($zero,$carry)=("%rbp","%rcx");
2094 lea ($aptr,$num),$aaptr
2095 mov $num,(%rsp) # save $num
2096 mov $aaptr,8(%rsp) # save end of $aptr
2097 jmp .Lsqr8x_zero_start
2100 movdqa %xmm0,0*8($tptr)
2101 movdqa %xmm0,2*8($tptr)
2102 movdqa %xmm0,4*8($tptr)
2103 movdqa %xmm0,6*8($tptr)
2105 movdqa %xmm0,8*8($tptr)
2106 movdqa %xmm0,10*8($tptr)
2107 movdqa %xmm0,12*8($tptr)
2108 movdqa %xmm0,14*8($tptr)
2109 lea 16*8($tptr),$tptr
2113 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2122 xor $zero,$zero # cf=0, cf=0
2123 jmp .Lsqrx8x_outer_loop
2126 .Lsqrx8x_outer_loop:
2127 mulx 1*8($aptr),%rax,%rbx # a[1]*a[0]
2128 adcx %rax,%r8 # a[1]*a[0]+=t[1]
2130 mulx 2*8($aptr),%rax,%rbx # a[2]*a[0]
2133 .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%rax,%rbx # ...
2136 .byte 0xc4,0xe2,0xfb,0xf6,0x9e,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%rax,%rbx
2139 mulx 5*8($aptr),%rax,%rbx
2142 mulx 6*8($aptr),%rax,%rbx
2145 mulx 7*8($aptr),%rax,%r15
2146 mov 1*8($aptr),%rdx # a[1]
2150 sbb $carry,$carry # mov %cf,$carry
2151 xor $zero,$zero # cf=0, of=0
2153 mov %r8,1*8($tptr) # t[1]
2154 mov %r9,2*8($tptr) # t[2]
2156 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2157 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2160 mulx 4*8($aptr),%r10,%rbx # ...
2163 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2166 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2169 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2170 mov 2*8($aptr),%rdx # a[2]
2174 adox $zero,%r14 # of=0
2175 adcx $zero,%r14 # cf=0
2177 mov %r8,3*8($tptr) # t[3]
2178 mov %r9,4*8($tptr) # t[4]
2180 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2181 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2184 mulx 5*8($aptr),%r10,%rbx # ...
2187 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2190 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2192 mov 3*8($aptr),%rdx # a[3]
2196 adox $zero,%r13 # of=0
2197 adcx $zero,%r13 # cf=0
2199 mov %r8,5*8($tptr) # t[5]
2200 mov %r9,6*8($tptr) # t[6]
2202 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2203 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2206 mulx 6*8($aptr),%r10,%rax # ...
2209 mulx 7*8($aptr),%r11,%r12
2210 mov 4*8($aptr),%rdx # a[4]
2211 mov 5*8($aptr),%r14 # a[5]
2214 mov 6*8($aptr),%r15 # a[6]
2216 adox $zero,%r12 # of=0
2217 adcx $zero,%r12 # cf=0
2219 mov %r8,7*8($tptr) # t[7]
2220 mov %r9,8*8($tptr) # t[8]
2222 mulx %r14,%r9,%rax # a[5]*a[4]
2223 mov 7*8($aptr),%r8 # a[7]
2225 mulx %r15,%r10,%rbx # a[6]*a[4]
2228 mulx %r8,%r11,%rax # a[7]*a[4]
2229 mov %r14,%rdx # a[5]
2232 #adox $zero,%rax # of=0
2233 adcx $zero,%rax # cf=0
2235 mulx %r15,%r14,%rbx # a[6]*a[5]
2236 mulx %r8,%r12,%r13 # a[7]*a[5]
2237 mov %r15,%rdx # a[6]
2238 lea 8*8($aptr),$aptr
2245 mulx %r8,%r8,%r14 # a[7]*a[6]
2250 je .Lsqrx8x_outer_break
2252 neg $carry # mov $carry,%cf
2255 adc 9*8($tptr),%r9 # +=t[9]
2256 adc 10*8($tptr),%r10 # ...
2257 adc 11*8($tptr),%r11
2258 adc 12*8($tptr),%r12
2259 adc 13*8($tptr),%r13
2260 adc 14*8($tptr),%r14
2261 adc 15*8($tptr),%r15
2262 lea 8*8($tptr),$tptr
2263 sbb $carry,$carry # mov %cf,$carry
2265 mov -64($aptr),%rdx # a[0]
2267 mov $carry,16(%rsp) # offload $carry
2270 lea 8*8($tptr),$tptr
2271 xor %eax,%eax # cf=0, of=0
2278 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2279 adcx %rax,%rbx # +=t[8]
2282 mulx 1*8($aaptr),%rax,%r9 # ...
2286 mulx 2*8($aaptr),%rax,%r10
2290 mulx 3*8($aaptr),%rax,%r11
2294 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2298 mulx 5*8($aaptr),%rax,%r13
2302 mulx 6*8($aaptr),%rax,%r14
2303 mov %rbx,($tptr,%rcx,8) # store t[8+i]
2308 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
2309 mov 8($aptr,%rcx,8),%rdx # a[i]
2311 adox %rbx,%r15 # %rbx is 0, of=0
2312 adcx %rbx,%r15 # cf=0
2317 lea 8*8($aaptr),$aaptr
2318 cmp 8(%rsp),$aaptr # done?
2321 sub 16(%rsp),%rbx # mov 16(%rsp),%cf
2331 lea 8*8($tptr),$tptr
2332 sbb %rbx,%rbx # mov %cf,%rbx
2333 xor %eax,%eax # cf=0, of=0
2334 mov %rbx,16(%rsp) # offload carry
2340 sub 16(%rsp),%r8 # consume last carry
2341 mov 24(%rsp),$aaptr # initial $tptr
2342 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
2344 lea 8*8($aaptr),$aaptr
2346 mov 1*8($aaptr),%r8 # potentially forwarded store
2348 mov 2*8($aaptr),%r9 # ...
2350 mov 3*8($aaptr),%r10
2352 mov 4*8($aaptr),%r11
2354 mov 5*8($aaptr),%r12
2356 mov 6*8($aaptr),%r13
2358 mov 7*8($aaptr),%r14
2360 xor $zero,$zero # cf=0, cf=0
2361 jmp .Lsqrx8x_outer_loop
2364 .Lsqrx8x_outer_break:
2365 mov %r9,9*8($tptr) # t[9]
2366 movq %xmm3,%rcx # -$num
2367 mov %r10,10*8($tptr) # ...
2368 mov %r11,11*8($tptr)
2369 mov %r12,12*8($tptr)
2370 mov %r13,13*8($tptr)
2371 mov %r14,14*8($tptr)
2376 mov (%rsp),$num # restore $num
2379 mov ($aptr,$i),%rdx # a[0]
2381 mov 8($tptr),$A0[1] # t[1]
2382 xor $A0[0],$A0[0] # t[0], of=0, cf=0
2384 mov 16($tptr),$A1[0] # t[2] # prefetch
2385 mov 24($tptr),$A1[1] # t[3] # prefetch
2387 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
2390 .Lsqrx4x_shift_n_add:
2394 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
2395 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
2398 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
2405 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
2406 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
2409 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
2416 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
2418 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
2421 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
2428 jrcxz .Lsqrx4x_shift_n_add_break
2429 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
2432 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
2433 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
2438 jmp .Lsqrx4x_shift_n_add
2441 .Lsqrx4x_shift_n_add_break:
2443 .byte 0x48,0x89,0x87,0x30,0x00,0x00,0x00 # mov %rax,48($tptr)
2444 .byte 0x48,0x89,0x9f,0x38,0x00,0x00,0x00 # mov %rbx,56($tptr)
2445 .byte 0x48,0x8d,0xbf,0x40,0x00,0x00,0x00 # lea 64($tptr),$tptr
2448 ######################################################################
2449 # Montgomery reduction part, "word-by-word" algorithm.
2451 # This new path is inspired by multiple submissions from Intel, by
2452 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
2455 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
2459 mov 32(%rsp),%rbx # n0
2460 mov 48(%rsp),%rdx # "%r8", 8*0($tptr)
2461 lea ($nptr,$num),%rax # end of n[]
2462 #lea 48(%rsp,$num,2),$tptr # end of t[] buffer
2463 mov %rax, 0(%rsp) # save end of n[]
2464 mov $tptr,8(%rsp) # save end of t[]
2466 lea 48(%rsp),$tptr # initial t[] window
2469 #jmp .Lsqrx8x_reduction_loop
2472 .Lsqrx8x_reduction_loop:
2478 imulq %rbx,%rdx # n0*a[i]
2482 mov %rax,24(%rsp) # store top-most carry bit
2484 lea 8*8($tptr),$tptr
2485 xor $carry,$carry # cf=0,of=0
2492 mulx 8*0($nptr),%rax,%r8 # n[0]
2493 adcx %rbx,%rax # discarded
2496 mulx 8*1($nptr),%rbx,%r9 # n[1]
2500 mulx 8*2($nptr),%rbx,%r10
2504 mulx 8*3($nptr),%rbx,%r11
2508 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
2514 mulx 32(%rsp),%rbx,%rdx # %rdx discarded
2516 mov %rax,48+64(%rsp,%rcx,8) # put aside n0*a[i]
2518 mulx 8*5($nptr),%rax,%r13
2522 mulx 8*6($nptr),%rax,%r14
2526 mulx 8*7($nptr),%rax,%r15
2529 adox $carry,%r15 # $carry is 0
2530 adcx $carry,%r15 # cf=0
2535 lea 8*8($nptr),$nptr
2537 cmp 0(%rsp),$nptr # end of n[]?
2538 jae .Lsqrx8x_no_tail
2540 mov 48(%rsp),%rdx # pull n0*a[0]
2543 adcx 8*2($tptr),%r10
2544 adcx 8*3($tptr),%r11
2545 adcx 8*4($tptr),%r12
2546 adcx 8*5($tptr),%r13
2547 adcx 8*6($tptr),%r14
2548 adcx 8*7($tptr),%r15
2549 lea 8*8($tptr),$tptr
2550 sbb $carry,$carry # top carry
2554 xor $carry,$carry # of=0, cf=0
2560 mulx 8*0($nptr),%rax,%r8
2564 mulx 8*1($nptr),%rax,%r9
2568 mulx 8*2($nptr),%rax,%r10
2572 mulx 8*3($nptr),%rax,%r11
2576 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
2580 mulx 8*5($nptr),%rax,%r13
2584 mulx 8*6($nptr),%rax,%r14
2588 mulx 8*7($nptr),%rax,%r15
2589 mov 48+72(%rsp,%rcx,8),%rdx # pull n0*a[i]
2593 mov %rbx,($tptr,%rcx,8) # save result
2595 adcx $carry,%r15 # cf=0
2600 lea 8*8($nptr),$nptr
2601 cmp 0(%rsp),$nptr # end of n[]?
2602 jae .Lsqrx8x_tail_done # break out of loop
2604 sub 16(%rsp),$carry # neg $carry
2605 mov 48(%rsp),%rdx # pull n0*a[0]
2608 adcx 8*2($tptr),%r10
2609 adcx 8*3($tptr),%r11
2610 adcx 8*4($tptr),%r12
2611 adcx 8*5($tptr),%r13
2612 adcx 8*6($tptr),%r14
2613 adcx 8*7($tptr),%r15
2614 lea 8*8($tptr),$tptr
2619 xor $carry,$carry # of=0, cf=0
2624 add 24(%rsp),%r8 # can this overflow?
2627 sub 16(%rsp),$carry # neg $carry
2628 .Lsqrx8x_no_tail: # carry flag is 0
2632 movq %xmm2,$nptr # restore $nptr
2634 lea 8*8($tptr),$carry # borrow $carry
2640 adc %rax,%rax # top-most carry
2642 cmp 8(%rsp),$carry # end of t[]?
2643 mov 32(%rsp),%rbx # n0
2644 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
2646 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
2647 mov %r8,-8*8($carry) # store top 512 bits
2648 mov %r9,-8*7($carry)
2649 mov %r10,-8*6($carry)
2650 mov %r11,-8*5($carry)
2651 mov %r12,-8*4($carry)
2652 mov %r13,-8*3($carry)
2653 mov %r14,-8*2($carry)
2654 mov %r15,-8*1($carry)
2656 jb .Lsqrx8x_reduction_loop
2659 neg $num # restore $num
2662 ##############################################################
2663 # Post-condition, 8x unrolled
2666 my ($rptr,$nptr,$lptr,$i)=($aptr,"%rbp","%rbx","%rcx");
2667 my @ri=map("%r$_",(10..13));
2668 my @ni=map("%r$_",(14..15));
2670 lea ($nptr,$num),$nptr # end of $nptr
2671 lea 48(%rsp,$num),$lptr # end of lower half of t[2*num]
2672 lea 48(%rsp,$num),$tptr
2673 neg %rax # top-most carry as mask
2675 movq %xmm1,$rptr # restore $rptr
2677 mov 0*8($nptr,$i),%r8
2678 mov 1*8($nptr,$i),%r9
2680 jmp .Lsqrx8x_sub_entry
2684 mov 0*8($nptr,$i),%r8
2685 mov 1*8($nptr,$i),%r9
2688 mov 2*8($nptr,$i),%r10
2691 mov 3*8($nptr,$i),%r11
2694 mov 4*8($nptr,$i),%r12
2697 mov 5*8($nptr,$i),%r13
2700 mov 6*8($nptr,$i),%r14
2703 mov 7*8($nptr,$i),%r15
2706 movdqa %xmm0,0*8($lptr,$i) # zap lower half
2709 movdqa %xmm0,2*8($lptr,$i)
2712 neg %rdx # mov %rdx,%cf
2713 movdqa %xmm0,4*8($lptr,$i)
2716 movdqa %xmm0,6*8($lptr,$i)
2719 movdqa %xmm0,0*8($tptr) # zap upper half
2722 movdqa %xmm0,2*8($tptr)
2725 movdqa %xmm0,4*8($tptr)
2726 sbb %rdx,%rdx # mov %cf,%rdx
2727 movdqa %xmm0,6*8($tptr)
2728 lea 8*8($tptr),$tptr
2738 lea 8*8($rptr),$rptr
2745 movq %xmm4,%rsi # restore %rsp
2756 .size bn_sqrx8x_mont,.-bn_sqrx8x_mont
2760 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2764 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2765 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2773 .extern __imp_RtlVirtualUnwind
2774 .type mul_handler,\@abi-omnipotent
2788 mov 120($context),%rax # pull context->Rax
2789 mov 248($context),%rbx # pull context->Rip
2791 mov 8($disp),%rsi # disp->ImageBase
2792 mov 56($disp),%r11 # disp->HandlerData
2794 mov 0(%r11),%r10d # HandlerData[0]
2795 lea (%rsi,%r10),%r10 # end of prologue label
2796 cmp %r10,%rbx # context->Rip<end of prologue label
2797 jb .Lcommon_seh_tail
2799 mov 152($context),%rax # pull context->Rsp
2801 mov 4(%r11),%r10d # HandlerData[1]
2802 lea (%rsi,%r10),%r10 # epilogue label
2803 cmp %r10,%rbx # context->Rip>=epilogue label
2804 jae .Lcommon_seh_tail
2806 mov 192($context),%r10 # pull $num
2807 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
2816 mov %rbx,144($context) # restore context->Rbx
2817 mov %rbp,160($context) # restore context->Rbp
2818 mov %r12,216($context) # restore context->R12
2819 mov %r13,224($context) # restore context->R13
2820 mov %r14,232($context) # restore context->R14
2821 mov %r15,240($context) # restore context->R15
2823 jmp .Lcommon_seh_tail
2824 .size mul_handler,.-mul_handler
2826 .type sqr_handler,\@abi-omnipotent
2840 mov 120($context),%rax # pull context->Rax
2841 mov 248($context),%rbx # pull context->Rip
2843 mov 8($disp),%rsi # disp->ImageBase
2844 mov 56($disp),%r11 # disp->HandlerData
2846 mov 0(%r11),%r10d # HandlerData[0]
2847 lea (%rsi,%r10),%r10 # end of prologue label
2848 cmp %r10,%rbx # context->Rip<.Lsqr_body
2849 jb .Lcommon_seh_tail
2851 mov 152($context),%rax # pull context->Rsp
2853 mov 4(%r11),%r10d # HandlerData[1]
2854 lea (%rsi,%r10),%r10 # epilogue label
2855 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
2856 jae .Lcommon_seh_tail
2858 mov 56(%rax),%rax # pull saved stack pointer
2867 mov %rbx,144($context) # restore context->Rbx
2868 mov %rbp,160($context) # restore context->Rbp
2869 mov %r12,216($context) # restore context->R12
2870 mov %r13,224($context) # restore context->R13
2871 mov %r14,232($context) # restore context->R14
2872 mov %r15,240($context) # restore context->R15
2877 mov %rax,152($context) # restore context->Rsp
2878 mov %rsi,168($context) # restore context->Rsi
2879 mov %rdi,176($context) # restore context->Rdi
2881 mov 40($disp),%rdi # disp->ContextRecord
2882 mov $context,%rsi # context
2883 mov \$154,%ecx # sizeof(CONTEXT)
2884 .long 0xa548f3fc # cld; rep movsq
2887 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2888 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2889 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2890 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2891 mov 40(%rsi),%r10 # disp->ContextRecord
2892 lea 56(%rsi),%r11 # &disp->HandlerData
2893 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2894 mov %r10,32(%rsp) # arg5
2895 mov %r11,40(%rsp) # arg6
2896 mov %r12,48(%rsp) # arg7
2897 mov %rcx,56(%rsp) # arg8, (NULL)
2898 call *__imp_RtlVirtualUnwind(%rip)
2900 mov \$1,%eax # ExceptionContinueSearch
2912 .size sqr_handler,.-sqr_handler
2916 .rva .LSEH_begin_bn_mul_mont
2917 .rva .LSEH_end_bn_mul_mont
2918 .rva .LSEH_info_bn_mul_mont
2920 .rva .LSEH_begin_bn_mul4x_mont
2921 .rva .LSEH_end_bn_mul4x_mont
2922 .rva .LSEH_info_bn_mul4x_mont
2924 .rva .LSEH_begin_bn_sqr8x_mont
2925 .rva .LSEH_end_bn_sqr8x_mont
2926 .rva .LSEH_info_bn_sqr8x_mont
2928 $code.=<<___ if ($addx);
2929 .rva .LSEH_begin_bn_mulx4x_mont
2930 .rva .LSEH_end_bn_mulx4x_mont
2931 .rva .LSEH_info_bn_mulx4x_mont
2933 .rva .LSEH_begin_bn_sqrx8x_mont
2934 .rva .LSEH_end_bn_sqrx8x_mont
2935 .rva .LSEH_info_bn_sqrx8x_mont
2940 .LSEH_info_bn_mul_mont:
2943 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
2944 .LSEH_info_bn_mul4x_mont:
2947 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
2948 .LSEH_info_bn_sqr8x_mont:
2951 .rva .Lsqr8x_body,.Lsqr8x_epilogue # HandlerData[]
2953 $code.=<<___ if ($addx);
2954 .LSEH_info_bn_mulx4x_mont:
2957 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
2958 .LSEH_info_bn_sqrx8x_mont:
2961 .rva .Lsqrx8x_body,.Lsqrx8x_epilogue # HandlerData[]