3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
20 # Add dedicated squaring procedure. Performance improvement varies
21 # from platform to platform, but in average it's ~5%/15%/25%/33%
22 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
26 # Unroll and modulo-schedule inner loops in such manner that they
27 # are "fallen through" for input lengths of 8, which is critical for
28 # 1024-bit RSA *sign*. Average performance improvement in comparison
29 # to *initial* version of this module from 2005 is ~0%/30%/40%/45%
30 # for 512-/1024-/2048-/4096-bit RSA *sign* benchmarks respectively.
34 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
36 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
38 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
39 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
40 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
41 die "can't locate x86_64-xlate.pl";
43 open STDOUT,"| $^X $xlate $flavour $output";
46 $rp="%rdi"; # BN_ULONG *rp,
47 $ap="%rsi"; # const BN_ULONG *ap,
48 $bp="%rdx"; # const BN_ULONG *bp,
49 $np="%rcx"; # const BN_ULONG *np,
50 $n0="%r8"; # const BN_ULONG *n0,
51 $num="%r9"; # int num);
64 .type bn_mul_mont,\@function,6
88 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+2))
89 and \$-1024,%rsp # minimize TLB usage
91 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
93 mov $bp,%r12 # reassign $bp
97 mov ($n0),$n0 # pull n0[0] value
98 mov ($bp),$m0 # m0=bp[0]
105 mulq $m0 # ap[0]*bp[0]
109 imulq $lo0,$m1 # "tp[0]"*n0
113 add %rax,$lo0 # discarded
126 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
129 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
133 mulq $m0 # ap[j]*bp[0]
145 mov ($ap),%rax # ap[0]
147 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
149 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
156 mov $hi1,-8(%rsp,$num,8)
157 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
163 mov ($bp,$i,8),$m0 # m0=bp[i]
167 mulq $m0 # ap[0]*bp[i]
168 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
172 imulq $lo0,$m1 # tp[0]*n0
176 add %rax,$lo0 # discarded
179 mov 8(%rsp),$lo0 # tp[1]
190 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
193 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
197 mulq $m0 # ap[j]*bp[i]
201 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
211 mov ($ap),%rax # ap[0]
213 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
216 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
222 add $lo0,$hi1 # pull upmost overflow bit
224 mov $hi1,-8(%rsp,$num,8)
225 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
231 xor $i,$i # i=0 and clear CF!
232 mov (%rsp),%rax # tp[0]
233 lea (%rsp),$ap # borrow ap for tp
237 .Lsub: sbb ($np,$i,8),%rax
238 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
239 mov 8($ap,$i,8),%rax # tp[i+1]
241 dec $j # doesnn't affect CF!
244 sbb \$0,%rax # handle upmost overflow bit
251 or $np,$ap # ap=borrow?tp:rp
253 .Lcopy: # copy or in-place refresh
255 mov $i,(%rsp,$i,8) # zap temporary vector
256 mov %rax,($rp,$i,8) # rp[i]=tp[i]
261 mov 8(%rsp,$num,8),%rsi # restore %rsp
272 .size bn_mul_mont,.-bn_mul_mont
275 my @A=("%r10","%r11");
276 my @N=("%r13","%rdi");
278 .type bn_mul4x_mont,\@function,6
293 lea (%rsp,%r10,8),%rsp # tp=alloca(8*(num+4))
294 and \$-1024,%rsp # minimize TLB usage
296 mov %r11,8(%rsp,$num,8) # tp[num+1]=%rsp
298 mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
299 mov %rdx,%r12 # reassign $bp
303 mov ($n0),$n0 # pull n0[0] value
304 mov ($bp),$m0 # m0=bp[0]
311 mulq $m0 # ap[0]*bp[0]
315 imulq $A[0],$m1 # "tp[0]"*n0
319 add %rax,$A[0] # discarded
342 mulq $m0 # ap[j]*bp[0]
344 mov -16($np,$j,8),%rax
350 mov -8($ap,$j,8),%rax
352 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
354 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
357 mulq $m0 # ap[j]*bp[0]
359 mov -8($np,$j,8),%rax
367 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
369 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
372 mulq $m0 # ap[j]*bp[0]
382 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
384 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
387 mulq $m0 # ap[j]*bp[0]
396 mov -16($ap,$j,8),%rax
398 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
400 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
405 mulq $m0 # ap[j]*bp[0]
407 mov -16($np,$j,8),%rax
413 mov -8($ap,$j,8),%rax
415 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
417 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
420 mulq $m0 # ap[j]*bp[0]
422 mov -8($np,$j,8),%rax
428 mov ($ap),%rax # ap[0]
430 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
432 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
438 mov $N[0],-8(%rsp,$j,8)
439 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
444 mov ($bp,$i,8),$m0 # m0=bp[i]
448 mulq $m0 # ap[0]*bp[i]
449 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
453 imulq $A[0],$m1 # tp[0]*n0
457 add %rax,$A[0] # "$N[0]", discarded
462 mulq $m0 # ap[j]*bp[i]
466 add 8(%rsp),$A[1] # +tp[1]
474 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
477 mov $N[1],(%rsp) # tp[j-1]
482 mulq $m0 # ap[j]*bp[i]
484 mov -16($np,$j,8),%rax
486 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
492 mov -8($ap,$j,8),%rax
496 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
499 mulq $m0 # ap[j]*bp[i]
501 mov -8($np,$j,8),%rax
503 add -8(%rsp,$j,8),$A[1]
513 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
516 mulq $m0 # ap[j]*bp[i]
520 add (%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
530 mov $N[0],-8(%rsp,$j,8) # tp[j-1]
533 mulq $m0 # ap[j]*bp[i]
537 add 8(%rsp,$j,8),$A[1]
544 mov -16($ap,$j,8),%rax
548 mov $N[1],-32(%rsp,$j,8) # tp[j-1]
553 mulq $m0 # ap[j]*bp[i]
555 mov -16($np,$j,8),%rax
557 add -16(%rsp,$j,8),$A[0] # ap[j]*bp[i]+tp[j]
563 mov -8($ap,$j,8),%rax
567 mov $N[0],-24(%rsp,$j,8) # tp[j-1]
570 mulq $m0 # ap[j]*bp[i]
572 mov -8($np,$j,8),%rax
574 add -8(%rsp,$j,8),$A[1]
581 mov ($ap),%rax # ap[0]
585 mov $N[1],-16(%rsp,$j,8) # tp[j-1]
591 add (%rsp,$num,8),$N[0] # pull upmost overflow bit
593 mov $N[0],-8(%rsp,$j,8)
594 mov $N[1],(%rsp,$j,8) # store upmost overflow bit
600 my @ri=("%rax","%rdx",$m0,$m1);
602 mov 16(%rsp,$num,8),$rp # restore $rp
603 mov 0(%rsp),@ri[0] # tp[0]
605 mov 8(%rsp),@ri[1] # tp[1]
606 shr \$2,$num # num/=4
607 lea (%rsp),$ap # borrow ap for tp
608 xor $i,$i # i=0 and clear CF!
611 mov 16($ap),@ri[2] # tp[2]
612 mov 24($ap),@ri[3] # tp[3]
614 lea -1($num),$j # j=num/4-1
618 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
619 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
620 sbb 16($np,$i,8),@ri[2]
621 mov 32($ap,$i,8),@ri[0] # tp[i+1]
622 mov 40($ap,$i,8),@ri[1]
623 sbb 24($np,$i,8),@ri[3]
624 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
625 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
626 sbb 32($np,$i,8),@ri[0]
627 mov 48($ap,$i,8),@ri[2]
628 mov 56($ap,$i,8),@ri[3]
629 sbb 40($np,$i,8),@ri[1]
631 dec $j # doesnn't affect CF!
634 mov @ri[0],0($rp,$i,8) # rp[i]=tp[i]-np[i]
635 mov 32($ap,$i,8),@ri[0] # load overflow bit
636 sbb 16($np,$i,8),@ri[2]
637 mov @ri[1],8($rp,$i,8) # rp[i]=tp[i]-np[i]
638 sbb 24($np,$i,8),@ri[3]
639 mov @ri[2],16($rp,$i,8) # rp[i]=tp[i]-np[i]
641 sbb \$0,@ri[0] # handle upmost overflow bit
642 mov @ri[3],24($rp,$i,8) # rp[i]=tp[i]-np[i]
649 or $np,$ap # ap=borrow?tp:rp
656 .Lcopy4x: # copy or in-place refresh
657 movdqu 16($ap,$i),%xmm2
658 movdqu 32($ap,$i),%xmm1
659 movdqa %xmm0,16(%rsp,$i)
660 movdqu %xmm2,16($rp,$i)
661 movdqa %xmm0,32(%rsp,$i)
662 movdqu %xmm1,32($rp,$i)
668 movdqu 16($ap,$i),%xmm2
669 movdqa %xmm0,16(%rsp,$i)
670 movdqu %xmm2,16($rp,$i)
674 mov 8(%rsp,$num,8),%rsi # restore %rsp
685 .size bn_mul4x_mont,.-bn_mul4x_mont
689 ######################################################################
690 # void bn_sqr4x_mont(
691 my $rptr="%rdi"; # const BN_ULONG *rptr,
692 my $aptr="%rsi"; # const BN_ULONG *aptr,
693 my $bptr="%rdx"; # not used
694 my $nptr="%rcx"; # const BN_ULONG *nptr,
695 my $n0 ="%r8"; # const BN_ULONG *n0);
696 my $num ="%r9"; # int num, has to be divisible by 4 and
699 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
700 my @A0=("%r10","%r11");
701 my @A1=("%r12","%r13");
702 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
705 .type bn_sqr4x_mont,\@function,6
716 shl \$3,${num}d # convert $num to bytes
718 mov %rsp,%r11 # put aside %rsp
719 sub $num,%r10 # -$num
721 lea -72(%rsp,%r10,2),%rsp # alloca(frame+2*$num)
722 and \$-1024,%rsp # minimize TLB usage
723 ##############################################################
726 # +0 saved $num, used in reduction section
727 # +8 &t[2*$num], used in reduction section
734 mov $rptr,32(%rsp) # save $rptr
737 mov %r11, 56(%rsp) # save original %rsp
739 ##############################################################
742 # a) multiply-n-add everything but a[i]*a[i];
743 # b) shift result of a) by 1 to the left and accumulate
744 # a[i]*a[i] products;
746 lea 32(%r10),$i # $i=-($num-32)
747 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
749 mov $num,$j # $j=$num
751 # comments apply to $num==8 case
752 mov -32($aptr,$i),$a0 # a[0]
753 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
754 mov -24($aptr,$i),%rax # a[1]
755 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
756 mov -16($aptr,$i),$ai # a[2]
760 mov %rax,$A0[0] # a[1]*a[0]
763 mov $A0[0],-24($tptr,$i) # t[1]
770 mov $A0[1],-16($tptr,$i) # t[2]
772 lea -16($i),$j # j=-16
775 mov 8($aptr,$j),$ai # a[3]
777 mov %rax,$A1[0] # a[2]*a[1]+t[3]
786 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
789 mov $A0[0],-8($tptr,$j) # t[3]
794 mov ($aptr,$j),$ai # a[4]
797 add %rax,$A1[1] # a[3]*a[1]+t[4]
805 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
808 mov $A0[1],($tptr,$j) # t[4]
811 mov 8($aptr,$j),$ai # a[5]
814 add %rax,$A1[0] # a[4]*a[3]+t[5]
823 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
826 mov $A0[0],-8($tptr,$j) # t[5]
828 mov ($aptr,$j),$ai # a[6]
831 add %rax,$A1[1] # a[5]*a[3]+t[6]
839 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
842 mov $A0[1],($tptr,$j) # t[6]
845 mov 8($aptr,$j),$ai # a[7]
848 add %rax,$A1[0] # a[6]*a[5]+t[7]
857 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
860 mov $A0[0],-8($tptr,$j) # t[7]
872 mov $A1[1],($tptr) # t[8]
874 mov $A1[0],8($tptr) # t[9]
878 .Lsqr4x_outer: # comments apply to $num==6 case
879 mov -32($aptr,$i),$a0 # a[0]
880 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
881 mov -24($aptr,$i),%rax # a[1]
882 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
883 mov -16($aptr,$i),$ai # a[2]
886 mov -24($tptr,$i),$A0[0] # t[1]
889 add %rax,$A0[0] # a[1]*a[0]+t[1]
892 mov $A0[0],-24($tptr,$i) # t[1]
895 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
901 mov $A0[1],-16($tptr,$i) # t[2]
903 lea -16($i),$j # j=-16
907 mov 8($aptr,$j),$ai # a[3]
909 add 8($tptr,$j),$A1[0]
912 add %rax,$A1[0] # a[2]*a[1]+t[3]
920 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
923 mov $A0[0],8($tptr,$j) # t[3]
930 mov ($aptr,$j),$ai # a[4]
932 add ($tptr,$j),$A1[1]
935 add %rax,$A1[1] # a[3]*a[1]+t[4]
943 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
946 mov $A0[1],($tptr,$j) # t[4]
948 mov 8($aptr,$j),$ai # a[5]
950 add 8($tptr,$j),$A1[0]
953 add %rax,$A1[0] # a[4]*a[3]+t[5]
962 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
965 mov $A0[0],-8($tptr,$j) # t[5]
977 mov $A1[1],($tptr) # t[6]
978 mov $A1[0],8($tptr) # t[7]
983 # comments apply to $num==4 case
984 mov -32($aptr),$a0 # a[0]
985 lea 64(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
986 mov -24($aptr),%rax # a[1]
987 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
988 mov -16($aptr),$ai # a[2]
991 mov -24($tptr),$A0[0] # t[1]
994 add %rax,$A0[0] # a[1]*a[0]+t[1]
997 mov $A0[0],-24($tptr) # t[1]
1000 add -16($tptr),$A0[1] # a[2]*a[0]+t[2]
1006 mov $A0[1],-16($tptr) # t[2]
1009 mov -8($aptr),$ai # a[3]
1011 add -8($tptr),$A1[0]
1014 add %rax,$A1[0] # a[2]*a[1]+t[3]
1022 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1025 mov $A0[0],-8($tptr) # t[3]
1032 mov -16($aptr),%rax # a[2]
1035 mov $A1[1],($tptr) # t[4]
1036 mov $A1[0],8($tptr) # t[5]
1041 my ($shift,$carry)=($a0,$a1);
1042 my @S=(@A1,$ai,$n0);
1046 sub $num,$i # $i=16-$num
1049 add $A1[0],%rax # t[5]
1051 mov %rax,8($tptr) # t[5]
1052 mov %rdx,16($tptr) # t[6]
1053 mov $carry,24($tptr) # t[7]
1055 mov -16($aptr,$i),%rax # a[0]
1056 lea 64(%rsp,$num,2),$tptr
1057 xor $A0[0],$A0[0] # t[0]
1058 mov -24($tptr,$i,2),$A0[1] # t[1]
1060 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1062 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1064 or $A0[0],$S[1] # | t[2*i]>>63
1065 mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1066 mov $A0[1],$shift # shift=t[2*i+1]>>63
1067 mul %rax # a[i]*a[i]
1068 neg $carry # mov $carry,cf
1069 mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1071 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1072 mov $S[0],-32($tptr,$i,2)
1075 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1076 mov $S[1],-24($tptr,$i,2)
1077 sbb $carry,$carry # mov cf,$carry
1079 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1081 or $A0[0],$S[3] # | t[2*i]>>63
1082 mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1083 mov $A0[1],$shift # shift=t[2*i+1]>>63
1084 mul %rax # a[i]*a[i]
1085 neg $carry # mov $carry,cf
1086 mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1088 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1089 mov $S[2],-16($tptr,$i,2)
1092 mov $S[3],-40($tptr,$i,2)
1093 sbb $carry,$carry # mov cf,$carry
1094 jmp .Lsqr4x_shift_n_add
1097 .Lsqr4x_shift_n_add:
1098 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1100 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1102 or $A0[0],$S[1] # | t[2*i]>>63
1103 mov -16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1104 mov $A0[1],$shift # shift=t[2*i+1]>>63
1105 mul %rax # a[i]*a[i]
1106 neg $carry # mov $carry,cf
1107 mov -8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1109 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1110 mov $S[0],-32($tptr,$i,2)
1113 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1114 mov $S[1],-24($tptr,$i,2)
1115 sbb $carry,$carry # mov cf,$carry
1117 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1119 or $A0[0],$S[3] # | t[2*i]>>63
1120 mov 0($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1121 mov $A0[1],$shift # shift=t[2*i+1]>>63
1122 mul %rax # a[i]*a[i]
1123 neg $carry # mov $carry,cf
1124 mov 8($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1126 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1127 mov $S[2],-16($tptr,$i,2)
1130 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1131 mov $S[3],-8($tptr,$i,2)
1132 sbb $carry,$carry # mov cf,$carry
1134 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1136 or $A0[0],$S[1] # | t[2*i]>>63
1137 mov 16($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1138 mov $A0[1],$shift # shift=t[2*i+1]>>63
1139 mul %rax # a[i]*a[i]
1140 neg $carry # mov $carry,cf
1141 mov 24($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1143 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1144 mov $S[0],0($tptr,$i,2)
1147 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1148 mov $S[1],8($tptr,$i,2)
1149 sbb $carry,$carry # mov cf,$carry
1151 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1153 or $A0[0],$S[3] # | t[2*i]>>63
1154 mov 32($tptr,$i,2),$A0[0] # t[2*i+2] # prefetch
1155 mov $A0[1],$shift # shift=t[2*i+1]>>63
1156 mul %rax # a[i]*a[i]
1157 neg $carry # mov $carry,cf
1158 mov 40($tptr,$i,2),$A0[1] # t[2*i+2+1] # prefetch
1160 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1161 mov $S[2],16($tptr,$i,2)
1163 mov $S[3],24($tptr,$i,2)
1164 sbb $carry,$carry # mov cf,$carry
1166 jnz .Lsqr4x_shift_n_add
1168 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1170 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1172 or $A0[0],$S[1] # | t[2*i]>>63
1173 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1174 mov $A0[1],$shift # shift=t[2*i+1]>>63
1175 mul %rax # a[i]*a[i]
1176 neg $carry # mov $carry,cf
1177 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1179 mov -8($aptr),%rax # a[i+1] # prefetch
1180 mov $S[0],-32($tptr)
1183 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1184 mov $S[1],-24($tptr)
1185 sbb $carry,$carry # mov cf,$carry
1187 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1189 or $A0[0],$S[3] # | t[2*i]>>63
1190 mul %rax # a[i]*a[i]
1191 neg $carry # mov $carry,cf
1194 mov $S[2],-16($tptr)
1198 ##############################################################
1199 # Montgomery reduction part, "word-by-word" algorithm.
1202 my ($topbit,$nptr)=("%rbp",$aptr);
1203 my ($m0,$m1)=($a0,$a1);
1204 my @Ni=("%rbx","%r9");
1206 mov 40(%rsp),$nptr # restore $nptr
1207 mov 48(%rsp),$n0 # restore *n0
1209 mov $num,0(%rsp) # save $num
1210 sub $num,$j # $j=-$num
1211 mov 64(%rsp),$A0[0] # t[0] # modsched #
1212 mov $n0,$m0 # # modsched #
1213 lea 64(%rsp,$num,2),%rax # end of t[] buffer
1214 lea 64(%rsp,$num),$tptr # end of t[] window
1215 mov %rax,8(%rsp) # save end of t[] buffer
1216 lea ($nptr,$num),$nptr # end of n[] buffer
1217 xor $topbit,$topbit # $topbit=0
1219 mov 0($nptr,$j),%rax # n[0] # modsched #
1220 mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
1221 imulq $A0[0],$m0 # m0=t[0]*n0 # modsched #
1222 mov %rax,$Ni[0] # # modsched #
1223 jmp .Lsqr4x_mont_outer
1229 add %rax,$A0[0] # n[0]*m0+t[0]
1235 add 8($tptr,$j),$A0[1]
1238 add %rax,$A0[1] # n[1]*m0+t[1]
1244 mov 16($nptr,$j),$Ni[0] # n[2]
1249 add %rax,$A1[0] # n[0]*m1+"t[1]"
1252 mov $A1[0],8($tptr,$j) # "t[1]"
1255 add 16($tptr,$j),$A0[0]
1258 add %rax,$A0[0] # n[2]*m0+t[2]
1262 mov 24($nptr,$j),$Ni[1] # n[3]
1267 add %rax,$A1[1] # n[1]*m1+"t[2]"
1270 mov $A1[1],16($tptr,$j) # "t[2]"
1273 add 24($tptr,$j),$A0[1]
1277 add %rax,$A0[1] # n[3]*m0+t[3]
1280 jmp .Lsqr4x_mont_inner
1284 mov ($nptr,$j),$Ni[0] # n[4]
1289 add %rax,$A1[0] # n[2]*m1+"t[3]"
1292 mov $A1[0],-8($tptr,$j) # "t[3]"
1295 add ($tptr,$j),$A0[0]
1298 add %rax,$A0[0] # n[4]*m0+t[4]
1302 mov 8($nptr,$j),$Ni[1] # n[5]
1307 add %rax,$A1[1] # n[3]*m1+"t[4]"
1310 mov $A1[1],($tptr,$j) # "t[4]"
1313 add 8($tptr,$j),$A0[1]
1316 add %rax,$A0[1] # n[5]*m0+t[5]
1321 mov 16($nptr,$j),$Ni[0] # n[6]
1326 add %rax,$A1[0] # n[4]*m1+"t[5]"
1329 mov $A1[0],8($tptr,$j) # "t[5]"
1332 add 16($tptr,$j),$A0[0]
1335 add %rax,$A0[0] # n[6]*m0+t[6]
1339 mov 24($nptr,$j),$Ni[1] # n[7]
1344 add %rax,$A1[1] # n[5]*m1+"t[6]"
1347 mov $A1[1],16($tptr,$j) # "t[6]"
1350 add 24($tptr,$j),$A0[1]
1354 add %rax,$A0[1] # n[7]*m0+t[7]
1358 jne .Lsqr4x_mont_inner
1360 sub 0(%rsp),$j # $j=-$num # modsched #
1361 mov $n0,$m0 # # modsched #
1367 add %rax,$A1[0] # n[6]*m1+"t[7]"
1370 mov $A1[0],-8($tptr) # "t[7]"
1373 add ($tptr),$A0[0] # +t[8]
1375 mov 0($nptr,$j),$Ni[0] # n[0] # modsched #
1379 imulq 16($tptr,$j),$m0 # m0=t[0]*n0 # modsched #
1381 mov 8($nptr,$j),$Ni[1] # n[1] # modsched #
1383 mov 16($tptr,$j),$A0[0] # t[0] # modsched #
1386 add %rax,$A1[1] # n[7]*m1+"t[8]"
1387 mov $Ni[0],%rax # # modsched #
1389 mov $A1[1],($tptr) # "t[8]"
1392 add 8($tptr),$A1[0] # +t[9]
1395 lea 16($tptr),$tptr # "t[$num]>>128"
1397 mov $A1[0],-8($tptr) # "t[9]"
1398 cmp 8(%rsp),$tptr # are we done?
1399 jb .Lsqr4x_mont_outer
1401 mov 0(%rsp),$num # restore $num
1402 mov $topbit,($tptr) # save $topbit
1405 ##############################################################
1406 # Post-condition, 4x unrolled copy from bn_mul_mont
1409 my ($tptr,$nptr)=("%rbx",$aptr);
1410 my @ri=("%rax","%rdx","%r10","%r11");
1412 mov 64(%rsp,$num),@ri[0] # tp[0]
1413 lea 64(%rsp,$num),$tptr # upper half of t[2*$num] holds result
1414 mov 40(%rsp),$nptr # restore $nptr
1415 shr \$5,$num # num/4
1416 mov 8($tptr),@ri[1] # t[1]
1417 xor $i,$i # i=0 and clear CF!
1419 mov 32(%rsp),$rptr # restore $rptr
1421 mov 16($tptr),@ri[2] # t[2]
1422 mov 24($tptr),@ri[3] # t[3]
1424 lea -1($num),$j # j=num/4-1
1428 mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
1429 mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
1430 sbb 16($nptr,$i,8),@ri[2]
1431 mov 32($tptr,$i,8),@ri[0] # tp[i+1]
1432 mov 40($tptr,$i,8),@ri[1]
1433 sbb 24($nptr,$i,8),@ri[3]
1434 mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
1435 mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
1436 sbb 32($nptr,$i,8),@ri[0]
1437 mov 48($tptr,$i,8),@ri[2]
1438 mov 56($tptr,$i,8),@ri[3]
1439 sbb 40($nptr,$i,8),@ri[1]
1441 dec $j # doesn't affect CF!
1444 mov @ri[0],0($rptr,$i,8) # rp[i]=tp[i]-np[i]
1445 mov 32($tptr,$i,8),@ri[0] # load overflow bit
1446 sbb 16($nptr,$i,8),@ri[2]
1447 mov @ri[1],8($rptr,$i,8) # rp[i]=tp[i]-np[i]
1448 sbb 24($nptr,$i,8),@ri[3]
1449 mov @ri[2],16($rptr,$i,8) # rp[i]=tp[i]-np[i]
1451 sbb \$0,@ri[0] # handle upmost overflow bit
1452 mov @ri[3],24($rptr,$i,8) # rp[i]=tp[i]-np[i]
1459 or $nptr,$tptr # tp=borrow?tp:rp
1462 lea 64(%rsp,$num,8),$nptr
1463 movdqu ($tptr),%xmm1
1464 lea ($nptr,$num,8),$nptr
1465 movdqa %xmm0,64(%rsp) # zap lower half of temporary vector
1466 movdqa %xmm0,($nptr) # zap upper half of temporary vector
1467 movdqu %xmm1,($rptr)
1470 .Lsqr4x_copy: # copy or in-place refresh
1471 movdqu 16($tptr,$i),%xmm2
1472 movdqu 32($tptr,$i),%xmm1
1473 movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
1474 movdqa %xmm0,96(%rsp,$i) # zap lower half of temporary vector
1475 movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
1476 movdqa %xmm0,32($nptr,$i) # zap upper half of temporary vector
1477 movdqu %xmm2,16($rptr,$i)
1478 movdqu %xmm1,32($rptr,$i)
1483 movdqu 16($tptr,$i),%xmm2
1484 movdqa %xmm0,80(%rsp,$i) # zap lower half of temporary vector
1485 movdqa %xmm0,16($nptr,$i) # zap upper half of temporary vector
1486 movdqu %xmm2,16($rptr,$i)
1490 mov 56(%rsp),%rsi # restore %rsp
1501 .size bn_sqr4x_mont,.-bn_sqr4x_mont
1505 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1509 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1510 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1518 .extern __imp_RtlVirtualUnwind
1519 .type mul_handler,\@abi-omnipotent
1533 mov 120($context),%rax # pull context->Rax
1534 mov 248($context),%rbx # pull context->Rip
1536 mov 8($disp),%rsi # disp->ImageBase
1537 mov 56($disp),%r11 # disp->HandlerData
1539 mov 0(%r11),%r10d # HandlerData[0]
1540 lea (%rsi,%r10),%r10 # end of prologue label
1541 cmp %r10,%rbx # context->Rip<end of prologue label
1542 jb .Lcommon_seh_tail
1544 mov 152($context),%rax # pull context->Rsp
1546 mov 4(%r11),%r10d # HandlerData[1]
1547 lea (%rsi,%r10),%r10 # epilogue label
1548 cmp %r10,%rbx # context->Rip>=epilogue label
1549 jae .Lcommon_seh_tail
1551 mov 192($context),%r10 # pull $num
1552 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
1561 mov %rbx,144($context) # restore context->Rbx
1562 mov %rbp,160($context) # restore context->Rbp
1563 mov %r12,216($context) # restore context->R12
1564 mov %r13,224($context) # restore context->R13
1565 mov %r14,232($context) # restore context->R14
1566 mov %r15,240($context) # restore context->R15
1568 jmp .Lcommon_seh_tail
1569 .size mul_handler,.-mul_handler
1571 .type sqr_handler,\@abi-omnipotent
1585 mov 120($context),%rax # pull context->Rax
1586 mov 248($context),%rbx # pull context->Rip
1588 lea .Lsqr4x_body(%rip),%r10
1589 cmp %r10,%rbx # context->Rip<.Lsqr_body
1590 jb .Lcommon_seh_tail
1592 mov 152($context),%rax # pull context->Rsp
1594 lea .Lsqr4x_epilogue(%rip),%r10
1595 cmp %r10,%rbx # context->Rip>=.Lsqr_epilogue
1596 jae .Lcommon_seh_tail
1598 mov 56(%rax),%rax # pull saved stack pointer
1607 mov %rbx,144($context) # restore context->Rbx
1608 mov %rbp,160($context) # restore context->Rbp
1609 mov %r12,216($context) # restore context->R12
1610 mov %r13,224($context) # restore context->R13
1611 mov %r14,232($context) # restore context->R14
1612 mov %r15,240($context) # restore context->R15
1617 mov %rax,152($context) # restore context->Rsp
1618 mov %rsi,168($context) # restore context->Rsi
1619 mov %rdi,176($context) # restore context->Rdi
1621 mov 40($disp),%rdi # disp->ContextRecord
1622 mov $context,%rsi # context
1623 mov \$154,%ecx # sizeof(CONTEXT)
1624 .long 0xa548f3fc # cld; rep movsq
1627 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1628 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1629 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1630 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1631 mov 40(%rsi),%r10 # disp->ContextRecord
1632 lea 56(%rsi),%r11 # &disp->HandlerData
1633 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1634 mov %r10,32(%rsp) # arg5
1635 mov %r11,40(%rsp) # arg6
1636 mov %r12,48(%rsp) # arg7
1637 mov %rcx,56(%rsp) # arg8, (NULL)
1638 call *__imp_RtlVirtualUnwind(%rip)
1640 mov \$1,%eax # ExceptionContinueSearch
1652 .size sqr_handler,.-sqr_handler
1656 .rva .LSEH_begin_bn_mul_mont
1657 .rva .LSEH_end_bn_mul_mont
1658 .rva .LSEH_info_bn_mul_mont
1660 .rva .LSEH_begin_bn_mul4x_mont
1661 .rva .LSEH_end_bn_mul4x_mont
1662 .rva .LSEH_info_bn_mul4x_mont
1664 .rva .LSEH_begin_bn_sqr4x_mont
1665 .rva .LSEH_end_bn_sqr4x_mont
1666 .rva .LSEH_info_bn_sqr4x_mont
1670 .LSEH_info_bn_mul_mont:
1673 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
1674 .LSEH_info_bn_mul4x_mont:
1677 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
1678 .LSEH_info_bn_sqr4x_mont: