3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Montgomery multiplication routine for x86_64. While it gives modest
13 # 9% improvement of rsa4096 sign on Opteron, rsa512 sign runs more
14 # than twice, >2x, as fast. Most common rsa1024 sign is improved by
15 # respectful 50%. It remains to be seen if loop unrolling and
16 # dedicated squaring routine can provide further improvement...
20 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
21 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
22 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
23 die "can't locate x86_64-xlate.pl";
25 open STDOUT,"| $^X $xlate $output";
28 $rp="%rdi"; # BN_ULONG *rp,
29 $ap="%rsi"; # const BN_ULONG *ap,
30 $bp="%rdx"; # const BN_ULONG *bp,
31 $np="%rcx"; # const BN_ULONG *np,
32 $n0="%r8"; # const BN_ULONG *n0,
33 $num="%r9"; # int num);
36 $bp="%r12"; # reassign $bp
47 .type bn_mul_mont,\@function,6
61 lea (%rsp,%rax,8),%rsp # tp=alloca(8*(num+2))
62 and \$-1024,%rsp # minimize TLB usage
64 mov %rbp,8(%rsp,$num,8) # tp[num+1]=%rsp
65 mov %rdx,$bp # $bp reassigned, remember?
67 mov ($n0),$n0 # pull n0[0] value
72 mov ($bp),$m0 # m0=bp[0]
74 mulq $m0 # ap[0]*bp[0]
78 imulq $n0,%rax # "tp[0]"*n0
82 add $lo0,%rax # discarded
89 mulq $m0 # ap[j]*bp[0]
100 add $lo0,%rax # np[j]*m1+ap[j]*bp[0]
102 mov %rax,-16(%rsp,$j,8) # tp[j-1]
110 mov $hi1,-8(%rsp,$num,8)
111 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
118 mov ($bp,$i,8),$m0 # m0=bp[i]
119 mov ($ap),%rax # ap[0]
120 mulq $m0 # ap[0]*bp[i]
121 add (%rsp),%rax # ap[0]*bp[i]+tp[0]
126 imulq $n0,%rax # tp[0]*n0
129 mulq ($np,$j,8) # np[0]*m1
130 add $lo0,%rax # discarded
131 mov 8(%rsp),$lo0 # tp[1]
139 mulq $m0 # ap[j]*bp[i]
142 add %rax,$lo0 # ap[j]*bp[i]+tp[j]
151 add $lo0,%rax # np[j]*m1+ap[j]*bp[i]+tp[j]
155 mov %rax,-16(%rsp,$j,8) # tp[j-1]
162 add $lo0,$hi1 # pull upmost overflow bit
164 mov $hi1,-8(%rsp,$num,8)
165 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
171 lea (%rsp),$ap # borrow ap for tp
172 lea -1($num),$j # j=num-1
174 mov ($ap),%rax # tp[0]
175 xor $i,$i # i=0 and clear CF!
178 .Lsub: sbb ($np,$i,8),%rax
179 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
180 dec $j # doesn't affect CF!
181 mov 8($ap,$i,8),%rax # tp[i+1]
185 sbb \$0,%rax # handle upmost overflow bit
191 or $np,$ap # ap=borrow?tp:rp
193 .Lcopy: # copy or in-place refresh
195 mov %rax,($rp,$j,8) # rp[i]=tp[i]
196 mov $i,(%rsp,$j,8) # zap temporary vector
200 mov 8(%rsp,$num,8),%rsp # restore %rsp
209 .size bn_mul_mont,.-bn_mul_mont
210 .asciz "Montgomery Multiplication for x86_64, CRYPTOGAMS by <appro\@openssl.org>"