2 # Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # Companion to x86_64-mont.pl that optimizes cache-timing attack
20 # countermeasures. The subroutines are produced by replacing bp[i]
21 # references in their x86_64-mont.pl counterparts with cache-neutral
22 # references to powers table computed in BN_mod_exp_mont_consttime.
23 # In addition subroutine that scatters elements of the powers table
24 # is implemented, so that scatter-/gathering can be tuned without
25 # bn_exp.c modifications.
29 # Add MULX/AD*X code paths and additional interfaces to optimize for
30 # branch prediction unit. For input lengths that are multiples of 8
31 # the np argument is not just modulus value, but one interleaved
32 # with 0. This is to optimize post-condition...
36 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
38 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
40 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
41 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
42 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
43 die "can't locate x86_64-xlate.pl";
45 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
48 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
49 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
53 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
54 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
58 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
59 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
63 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
64 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
68 # int bn_mul_mont_gather5(
69 $rp="%rdi"; # BN_ULONG *rp,
70 $ap="%rsi"; # const BN_ULONG *ap,
71 $bp="%rdx"; # const BN_ULONG *bp,
72 $np="%rcx"; # const BN_ULONG *np,
73 $n0="%r8"; # const BN_ULONG *n0,
74 $num="%r9"; # int num,
75 # int idx); # 0 to 2^5-1, "index" in $bp holding
76 # pre-computed powers of a', interlaced
77 # in such manner that b[0] is $bp[idx],
78 # b[1] is [2^5+idx], etc.
90 .extern OPENSSL_ia32cap_P
92 .globl bn_mul_mont_gather5
93 .type bn_mul_mont_gather5,\@function,6
101 $code.=<<___ if ($addx);
102 mov OPENSSL_ia32cap_P+8(%rip),%r11d
109 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
119 lea -280(%rsp,$num,8),%r10 # future alloca(8*(num+2)+256+8)
120 neg $num # restore $num
121 and \$-1024,%r10 # minimize TLB usage
123 # An OS-agnostic version of __chkstk.
125 # Some OSes (Windows) insist on stack being "wired" to
126 # physical memory in strictly sequential manner, i.e. if stack
127 # allocation spans two pages, then reference to farmost one can
128 # be punishable by SEGV. But page walking can do good even on
129 # other OSes, because it guarantees that villain thread hits
130 # the guard page before it can make damage to innocent one...
137 jmp .Lmul_page_walk_done
144 .Lmul_page_walk_done:
147 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
150 lea 128($bp),%r12 # reassign $bp (+size optimization)
153 $STRIDE=2**5*8; # 5 is "window size"
154 $N=$STRIDE/4; # should match cache line size
156 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
157 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
158 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
161 pshufd \$0,%xmm5,%xmm5 # broadcast index
165 ########################################################################
166 # calculate mask by comparing 0..31 to index and save result to stack
170 pcmpeqd %xmm5,%xmm0 # compare to 1,0
174 for($k=0;$k<$STRIDE/16-4;$k+=4) {
177 pcmpeqd %xmm5,%xmm1 # compare to 3,2
178 movdqa %xmm0,`16*($k+0)+112`(%r10)
182 pcmpeqd %xmm5,%xmm2 # compare to 5,4
183 movdqa %xmm1,`16*($k+1)+112`(%r10)
187 pcmpeqd %xmm5,%xmm3 # compare to 7,6
188 movdqa %xmm2,`16*($k+2)+112`(%r10)
193 movdqa %xmm3,`16*($k+3)+112`(%r10)
197 $code.=<<___; # last iteration can be optimized
200 movdqa %xmm0,`16*($k+0)+112`(%r10)
205 movdqa %xmm1,`16*($k+1)+112`(%r10)
208 movdqa %xmm2,`16*($k+2)+112`(%r10)
209 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
211 pand `16*($k+1)-128`($bp),%xmm1
212 pand `16*($k+2)-128`($bp),%xmm2
213 movdqa %xmm3,`16*($k+3)+112`(%r10)
214 pand `16*($k+3)-128`($bp),%xmm3
218 for($k=0;$k<$STRIDE/16-4;$k+=4) {
220 movdqa `16*($k+0)-128`($bp),%xmm4
221 movdqa `16*($k+1)-128`($bp),%xmm5
222 movdqa `16*($k+2)-128`($bp),%xmm2
223 pand `16*($k+0)+112`(%r10),%xmm4
224 movdqa `16*($k+3)-128`($bp),%xmm3
225 pand `16*($k+1)+112`(%r10),%xmm5
227 pand `16*($k+2)+112`(%r10),%xmm2
229 pand `16*($k+3)+112`(%r10),%xmm3
236 pshufd \$0x4e,%xmm0,%xmm1
239 movq %xmm0,$m0 # m0=bp[0]
241 mov ($n0),$n0 # pull n0[0] value
248 mulq $m0 # ap[0]*bp[0]
252 imulq $lo0,$m1 # "tp[0]"*n0
256 add %rax,$lo0 # discarded
269 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
272 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
276 mulq $m0 # ap[j]*bp[0]
285 jne .L1st # note that upon exit $j==$num, so
286 # they can be used interchangeably
290 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
292 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
299 mov $hi1,-8(%rsp,$num,8)
300 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
306 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
311 for($k=0;$k<$STRIDE/16;$k+=4) {
313 movdqa `16*($k+0)-128`($bp),%xmm0
314 movdqa `16*($k+1)-128`($bp),%xmm1
315 movdqa `16*($k+2)-128`($bp),%xmm2
316 movdqa `16*($k+3)-128`($bp),%xmm3
317 pand `16*($k+0)-128`(%rdx),%xmm0
318 pand `16*($k+1)-128`(%rdx),%xmm1
320 pand `16*($k+2)-128`(%rdx),%xmm2
322 pand `16*($k+3)-128`(%rdx),%xmm3
329 pshufd \$0x4e,%xmm4,%xmm0
333 mov ($ap),%rax # ap[0]
334 movq %xmm0,$m0 # m0=bp[i]
340 mulq $m0 # ap[0]*bp[i]
341 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
345 imulq $lo0,$m1 # tp[0]*n0
349 add %rax,$lo0 # discarded
352 mov 8(%rsp),$lo0 # tp[1]
363 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
366 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
370 mulq $m0 # ap[j]*bp[i]
374 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
381 jne .Linner # note that upon exit $j==$num, so
382 # they can be used interchangeably
385 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
386 mov (%rsp,$num,8),$lo0
388 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
394 add $lo0,$hi1 # pull upmost overflow bit
396 mov $hi1,-8(%rsp,$num,8)
397 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
403 xor $i,$i # i=0 and clear CF!
404 mov (%rsp),%rax # tp[0]
405 lea (%rsp),$ap # borrow ap for tp
409 .Lsub: sbb ($np,$i,8),%rax
410 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
411 mov 8($ap,$i,8),%rax # tp[i+1]
413 dec $j # doesnn't affect CF!
416 sbb \$0,%rax # handle upmost overflow bit
423 or $np,$ap # ap=borrow?tp:rp
425 .Lcopy: # copy or in-place refresh
427 mov $i,(%rsp,$i,8) # zap temporary vector
428 mov %rax,($rp,$i,8) # rp[i]=tp[i]
433 mov 8(%rsp,$num,8),%rsi # restore %rsp
445 .size bn_mul_mont_gather5,.-bn_mul_mont_gather5
448 my @A=("%r10","%r11");
449 my @N=("%r13","%rdi");
451 .type bn_mul4x_mont_gather5,\@function,6
453 bn_mul4x_mont_gather5:
458 $code.=<<___ if ($addx);
460 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
473 shl \$3,${num}d # convert $num to bytes
474 lea ($num,$num,2),%r10 # 3*$num in bytes
477 ##############################################################
478 # Ensure that stack frame doesn't alias with $rptr+3*$num
479 # modulo 4096, which covers ret[num], am[num] and n[num]
480 # (see bn_exp.c). This is done to allow memory disambiguation
481 # logic do its magic. [Extra [num] is allocated in order
482 # to align with bn_power5's frame, which is cleansed after
483 # completing exponentiation. Extra 256 bytes is for power mask
484 # calculated from 7th argument, the index.]
486 lea -320(%rsp,$num,2),%r11
492 sub %r11,%rbp # align with $rp
493 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
498 lea 4096-320(,$num,2),%r10
499 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
513 jmp .Lmul4x_page_walk_done
520 .Lmul4x_page_walk_done:
529 mov 40(%rsp),%rsi # restore %rsp
541 .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
543 .type mul4x_internal,\@abi-omnipotent
546 shl \$5,$num # $num was in bytes
547 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
549 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
550 shr \$5,$num # restore $num
553 $STRIDE=2**5*8; # 5 is "window size"
554 $N=$STRIDE/4; # should match cache line size
557 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
558 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
559 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
560 lea 128(%rdx),$bp # size optimization
562 pshufd \$0,%xmm5,%xmm5 # broadcast index
567 ########################################################################
568 # calculate mask by comparing 0..31 to index and save result to stack
572 pcmpeqd %xmm5,%xmm0 # compare to 1,0
576 for($i=0;$i<$STRIDE/16-4;$i+=4) {
579 pcmpeqd %xmm5,%xmm1 # compare to 3,2
580 movdqa %xmm0,`16*($i+0)+112`(%r10)
584 pcmpeqd %xmm5,%xmm2 # compare to 5,4
585 movdqa %xmm1,`16*($i+1)+112`(%r10)
589 pcmpeqd %xmm5,%xmm3 # compare to 7,6
590 movdqa %xmm2,`16*($i+2)+112`(%r10)
595 movdqa %xmm3,`16*($i+3)+112`(%r10)
599 $code.=<<___; # last iteration can be optimized
602 movdqa %xmm0,`16*($i+0)+112`(%r10)
607 movdqa %xmm1,`16*($i+1)+112`(%r10)
610 movdqa %xmm2,`16*($i+2)+112`(%r10)
611 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
613 pand `16*($i+1)-128`($bp),%xmm1
614 pand `16*($i+2)-128`($bp),%xmm2
615 movdqa %xmm3,`16*($i+3)+112`(%r10)
616 pand `16*($i+3)-128`($bp),%xmm3
620 for($i=0;$i<$STRIDE/16-4;$i+=4) {
622 movdqa `16*($i+0)-128`($bp),%xmm4
623 movdqa `16*($i+1)-128`($bp),%xmm5
624 movdqa `16*($i+2)-128`($bp),%xmm2
625 pand `16*($i+0)+112`(%r10),%xmm4
626 movdqa `16*($i+3)-128`($bp),%xmm3
627 pand `16*($i+1)+112`(%r10),%xmm5
629 pand `16*($i+2)+112`(%r10),%xmm2
631 pand `16*($i+3)+112`(%r10),%xmm3
638 pshufd \$0x4e,%xmm0,%xmm1
641 movq %xmm0,$m0 # m0=bp[0]
643 mov %r13,16+8(%rsp) # save end of b[num]
644 mov $rp, 56+8(%rsp) # save $rp
646 mov ($n0),$n0 # pull n0[0] value
648 lea ($ap,$num),$ap # end of a[num]
652 mulq $m0 # ap[0]*bp[0]
656 imulq $A[0],$m1 # "tp[0]"*n0
661 add %rax,$A[0] # discarded
674 mov 16($ap,$num),%rax
677 lea 4*8($num),$j # j=4
686 mulq $m0 # ap[j]*bp[0]
697 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
699 mov $N[0],-24($tp) # tp[j-1]
702 mulq $m0 # ap[j]*bp[0]
712 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
714 mov $N[1],-16($tp) # tp[j-1]
717 mulq $m0 # ap[j]*bp[0]
727 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
729 mov $N[0],-8($tp) # tp[j-1]
732 mulq $m0 # ap[j]*bp[0]
742 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
745 mov $N[1],($tp) # tp[j-1]
751 mulq $m0 # ap[j]*bp[0]
762 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
764 mov $N[0],-24($tp) # tp[j-1]
767 mulq $m0 # ap[j]*bp[0]
775 mov ($ap,$num),%rax # ap[0]
777 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
779 mov $N[1],-16($tp) # tp[j-1]
782 lea ($np,$num),$np # rewind $np
793 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
797 for($i=0;$i<$STRIDE/16;$i+=4) {
799 movdqa `16*($i+0)-128`($bp),%xmm0
800 movdqa `16*($i+1)-128`($bp),%xmm1
801 movdqa `16*($i+2)-128`($bp),%xmm2
802 movdqa `16*($i+3)-128`($bp),%xmm3
803 pand `16*($i+0)-128`(%rdx),%xmm0
804 pand `16*($i+1)-128`(%rdx),%xmm1
806 pand `16*($i+2)-128`(%rdx),%xmm2
808 pand `16*($i+3)-128`(%rdx),%xmm3
815 pshufd \$0x4e,%xmm4,%xmm0
818 movq %xmm0,$m0 # m0=bp[i]
822 mulq $m0 # ap[0]*bp[i]
823 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
827 imulq $A[0],$m1 # tp[0]*n0
829 mov $N[1],($tp) # store upmost overflow bit
831 lea ($tp,$num),$tp # rewind $tp
834 add %rax,$A[0] # "$N[0]", discarded
839 mulq $m0 # ap[j]*bp[i]
843 add 8($tp),$A[1] # +tp[1]
849 mov 16($ap,$num),%rax
851 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
852 lea 4*8($num),$j # j=4
860 mulq $m0 # ap[j]*bp[i]
864 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
875 mov $N[1],-32($tp) # tp[j-1]
878 mulq $m0 # ap[j]*bp[i]
892 mov $N[0],-24($tp) # tp[j-1]
895 mulq $m0 # ap[j]*bp[i]
899 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
909 mov $N[1],-16($tp) # tp[j-1]
912 mulq $m0 # ap[j]*bp[i]
927 mov $N[0],-8($tp) # tp[j-1]
933 mulq $m0 # ap[j]*bp[i]
937 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
948 mov $N[1],-32($tp) # tp[j-1]
951 mulq $m0 # ap[j]*bp[i]
962 mov ($ap,$num),%rax # ap[0]
966 mov $N[0],-24($tp) # tp[j-1]
969 mov $N[1],-16($tp) # tp[j-1]
970 lea ($np,$num),$np # rewind $np
975 add ($tp),$N[0] # pull upmost overflow bit
976 adc \$0,$N[1] # upmost overflow bit
985 sub $N[0],$m1 # compare top-most words
986 adc $j,$j # $j is zero
988 sub $N[1],%rax # %rax=-$N[1]
989 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
991 lea ($np),%rbp # nptr in .sqr4x_sub
994 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
995 dec %r12 # so that after 'not' we get -n[0]
1000 jmp .Lsqr4x_sub_entry
1003 my @ri=("%rax",$bp,$m0,$m1);
1007 lea ($tp,$num),$tp # rewind $tp
1009 lea ($np,$N[1],8),$np
1010 mov 56+8(%rsp),$rp # restore $rp
1019 sbb 16*0($np),@ri[0]
1021 sbb 16*1($np),@ri[1]
1024 sbb 16*2($np),@ri[2]
1026 sbb 16*3($np),@ri[3]
1040 .size mul4x_internal,.-mul4x_internal
1044 ######################################################################
1046 my $rptr="%rdi"; # BN_ULONG *rptr,
1047 my $aptr="%rsi"; # const BN_ULONG *aptr,
1048 my $bptr="%rdx"; # const void *table,
1049 my $nptr="%rcx"; # const BN_ULONG *nptr,
1050 my $n0 ="%r8"; # const BN_ULONG *n0);
1051 my $num ="%r9"; # int num, has to be divisible by 8
1054 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1055 my @A0=("%r10","%r11");
1056 my @A1=("%r12","%r13");
1057 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1061 .type bn_power5,\@function,6
1066 $code.=<<___ if ($addx);
1067 mov OPENSSL_ia32cap_P+8(%rip),%r11d
1069 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
1081 shl \$3,${num}d # convert $num to bytes
1082 lea ($num,$num,2),%r10d # 3*$num
1086 ##############################################################
1087 # Ensure that stack frame doesn't alias with $rptr+3*$num
1088 # modulo 4096, which covers ret[num], am[num] and n[num]
1089 # (see bn_exp.c). This is done to allow memory disambiguation
1090 # logic do its magic. [Extra 256 bytes is for power mask
1091 # calculated from 7th argument, the index.]
1093 lea -320(%rsp,$num,2),%r11
1099 sub %r11,%rbp # align with $aptr
1100 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
1105 lea 4096-320(,$num,2),%r10
1106 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*num*8+256)
1116 lea (%rbp,%r11),%rsp
1120 jmp .Lpwr_page_walk_done
1123 lea -4096(%rsp),%rsp
1127 .Lpwr_page_walk_done:
1132 ##############################################################
1135 # +0 saved $num, used in reduction section
1136 # +8 &t[2*$num], used in reduction section
1142 mov %rax, 40(%rsp) # save original %rsp
1144 movq $rptr,%xmm1 # save $rptr, used in sqr8x
1145 movq $nptr,%xmm2 # save $nptr
1146 movq %r10, %xmm3 # -$num, used in sqr8x
1149 call __bn_sqr8x_internal
1150 call __bn_post4x_internal
1151 call __bn_sqr8x_internal
1152 call __bn_post4x_internal
1153 call __bn_sqr8x_internal
1154 call __bn_post4x_internal
1155 call __bn_sqr8x_internal
1156 call __bn_post4x_internal
1157 call __bn_sqr8x_internal
1158 call __bn_post4x_internal
1168 mov 40(%rsp),%rsi # restore %rsp
1179 .size bn_power5,.-bn_power5
1181 .globl bn_sqr8x_internal
1182 .hidden bn_sqr8x_internal
1183 .type bn_sqr8x_internal,\@abi-omnipotent
1186 __bn_sqr8x_internal:
1187 ##############################################################
1190 # a) multiply-n-add everything but a[i]*a[i];
1191 # b) shift result of a) by 1 to the left and accumulate
1192 # a[i]*a[i] products;
1194 ##############################################################
1260 lea 32(%r10),$i # $i=-($num-32)
1261 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1263 mov $num,$j # $j=$num
1265 # comments apply to $num==8 case
1266 mov -32($aptr,$i),$a0 # a[0]
1267 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1268 mov -24($aptr,$i),%rax # a[1]
1269 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1270 mov -16($aptr,$i),$ai # a[2]
1274 mov %rax,$A0[0] # a[1]*a[0]
1277 mov $A0[0],-24($tptr,$i) # t[1]
1283 mov $A0[1],-16($tptr,$i) # t[2]
1287 mov -8($aptr,$i),$ai # a[3]
1289 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1295 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1301 mov $A0[0],-8($tptr,$j) # t[3]
1306 mov ($aptr,$j),$ai # a[4]
1308 add %rax,$A1[1] # a[3]*a[1]+t[4]
1314 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1316 mov 8($aptr,$j),$ai # a[5]
1324 add %rax,$A1[0] # a[4]*a[3]+t[5]
1326 mov $A0[1],($tptr,$j) # t[4]
1331 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1333 mov 16($aptr,$j),$ai # a[6]
1340 add %rax,$A1[1] # a[5]*a[3]+t[6]
1342 mov $A0[0],8($tptr,$j) # t[5]
1347 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1349 mov 24($aptr,$j),$ai # a[7]
1357 add %rax,$A1[0] # a[6]*a[5]+t[7]
1359 mov $A0[1],16($tptr,$j) # t[6]
1365 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1371 mov $A0[0],-8($tptr,$j) # t[7]
1383 mov $A1[1],($tptr) # t[8]
1385 mov %rdx,8($tptr) # t[9]
1389 .Lsqr4x_outer: # comments apply to $num==6 case
1390 mov -32($aptr,$i),$a0 # a[0]
1391 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1392 mov -24($aptr,$i),%rax # a[1]
1393 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1394 mov -16($aptr,$i),$ai # a[2]
1398 mov -24($tptr,$i),$A0[0] # t[1]
1399 add %rax,$A0[0] # a[1]*a[0]+t[1]
1402 mov $A0[0],-24($tptr,$i) # t[1]
1409 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1412 mov $A0[1],-16($tptr,$i) # t[2]
1416 mov -8($aptr,$i),$ai # a[3]
1418 add %rax,$A1[0] # a[2]*a[1]+t[3]
1421 add -8($tptr,$i),$A1[0]
1426 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1432 mov $A0[0],-8($tptr,$i) # t[3]
1439 mov ($aptr,$j),$ai # a[4]
1441 add %rax,$A1[1] # a[3]*a[1]+t[4]
1445 add ($tptr,$j),$A1[1]
1450 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1452 mov 8($aptr,$j),$ai # a[5]
1459 add %rax,$A1[0] # a[4]*a[3]+t[5]
1460 mov $A0[1],($tptr,$j) # t[4]
1464 add 8($tptr,$j),$A1[0]
1469 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1475 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1487 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1489 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1494 # comments apply to $num==4 case
1495 mov -32($aptr),$a0 # a[0]
1496 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1497 mov -24($aptr),%rax # a[1]
1498 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1499 mov -16($aptr),$ai # a[2]
1503 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1511 mov $A0[0],-24($tptr) # t[1]
1514 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1515 mov -8($aptr),$ai # a[3]
1519 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1521 mov $A0[1],-16($tptr) # t[2]
1526 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1532 mov $A0[0],-8($tptr) # t[3]
1536 mov -16($aptr),%rax # a[2]
1541 mov $A1[1],($tptr) # t[4]
1543 mov %rdx,8($tptr) # t[5]
1548 my ($shift,$carry)=($a0,$a1);
1549 my @S=(@A1,$ai,$n0);
1553 sub $num,$i # $i=16-$num
1556 add $A1[0],%rax # t[5]
1558 mov %rax,8($tptr) # t[5]
1559 mov %rdx,16($tptr) # t[6]
1560 mov $carry,24($tptr) # t[7]
1562 mov -16($aptr,$i),%rax # a[0]
1563 lea 48+8(%rsp),$tptr
1564 xor $A0[0],$A0[0] # t[0]
1565 mov 8($tptr),$A0[1] # t[1]
1567 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1569 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1571 or $A0[0],$S[1] # | t[2*i]>>63
1572 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1573 mov $A0[1],$shift # shift=t[2*i+1]>>63
1574 mul %rax # a[i]*a[i]
1575 neg $carry # mov $carry,cf
1576 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1578 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1582 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1584 sbb $carry,$carry # mov cf,$carry
1586 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1588 or $A0[0],$S[3] # | t[2*i]>>63
1589 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1590 mov $A0[1],$shift # shift=t[2*i+1]>>63
1591 mul %rax # a[i]*a[i]
1592 neg $carry # mov $carry,cf
1593 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1595 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1600 sbb $carry,$carry # mov cf,$carry
1602 jmp .Lsqr4x_shift_n_add
1605 .Lsqr4x_shift_n_add:
1606 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1608 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1610 or $A0[0],$S[1] # | t[2*i]>>63
1611 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1612 mov $A0[1],$shift # shift=t[2*i+1]>>63
1613 mul %rax # a[i]*a[i]
1614 neg $carry # mov $carry,cf
1615 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1617 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1618 mov $S[0],-32($tptr)
1621 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1622 mov $S[1],-24($tptr)
1623 sbb $carry,$carry # mov cf,$carry
1625 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1627 or $A0[0],$S[3] # | t[2*i]>>63
1628 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1629 mov $A0[1],$shift # shift=t[2*i+1]>>63
1630 mul %rax # a[i]*a[i]
1631 neg $carry # mov $carry,cf
1632 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1634 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1635 mov $S[2],-16($tptr)
1638 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1640 sbb $carry,$carry # mov cf,$carry
1642 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1644 or $A0[0],$S[1] # | t[2*i]>>63
1645 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1646 mov $A0[1],$shift # shift=t[2*i+1]>>63
1647 mul %rax # a[i]*a[i]
1648 neg $carry # mov $carry,cf
1649 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1651 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1655 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1657 sbb $carry,$carry # mov cf,$carry
1659 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1661 or $A0[0],$S[3] # | t[2*i]>>63
1662 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1663 mov $A0[1],$shift # shift=t[2*i+1]>>63
1664 mul %rax # a[i]*a[i]
1665 neg $carry # mov $carry,cf
1666 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1668 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1672 sbb $carry,$carry # mov cf,$carry
1675 jnz .Lsqr4x_shift_n_add
1677 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1680 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1682 or $A0[0],$S[1] # | t[2*i]>>63
1683 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1684 mov $A0[1],$shift # shift=t[2*i+1]>>63
1685 mul %rax # a[i]*a[i]
1686 neg $carry # mov $carry,cf
1687 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1689 mov -8($aptr),%rax # a[i+1] # prefetch
1690 mov $S[0],-32($tptr)
1693 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1694 mov $S[1],-24($tptr)
1695 sbb $carry,$carry # mov cf,$carry
1697 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1699 or $A0[0],$S[3] # | t[2*i]>>63
1700 mul %rax # a[i]*a[i]
1701 neg $carry # mov $carry,cf
1704 mov $S[2],-16($tptr)
1708 ######################################################################
1709 # Montgomery reduction part, "word-by-word" algorithm.
1711 # This new path is inspired by multiple submissions from Intel, by
1712 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1715 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1719 __bn_sqr8x_reduction:
1721 lea ($nptr,$num),%rcx # end of n[]
1722 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1724 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1727 jmp .L8x_reduction_loop
1730 .L8x_reduction_loop:
1731 lea ($tptr,$num),$tptr # start of current t[] window
1741 mov %rax,(%rdx) # store top-most carry bit
1742 lea 8*8($tptr),$tptr
1746 imulq 32+8(%rsp),$m0 # n0*a[0]
1747 mov 8*0($nptr),%rax # n[0]
1754 mov 8*1($nptr),%rax # n[1]
1764 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1773 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1781 imulq %r8,$carry # modulo-scheduled
1811 mov $carry,$m0 # n0*a[i]
1813 mov 8*0($nptr),%rax # n[0]
1822 lea 8*8($nptr),$nptr
1824 mov 8+8(%rsp),%rdx # pull end of t[]
1825 cmp 0+8(%rsp),$nptr # end of n[]?
1837 sbb $carry,$carry # top carry
1839 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1849 mov %r8,($tptr) # save result
1858 lea 8($tptr),$tptr # $tptr++
1903 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1907 mov 8*0($nptr),%rax # pull n[0]
1914 lea 8*8($nptr),$nptr
1915 mov 8+8(%rsp),%rdx # pull end of t[]
1916 cmp 0+8(%rsp),$nptr # end of n[]?
1917 jae .L8x_tail_done # break out of loop
1919 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1921 mov 8*0($nptr),%rax # pull n[0]
1930 sbb $carry,$carry # top carry
1938 add (%rdx),%r8 # can this overflow?
1958 adc \$0,%rax # top-most carry
1959 mov -8($nptr),%rcx # np[num-1]
1962 movq %xmm2,$nptr # restore $nptr
1964 mov %r8,8*0($tptr) # store top 512 bits
1966 movq %xmm3,$num # $num is %r9, can't be moved upwards
1973 lea 8*8($tptr),$tptr
1975 cmp %rdx,$tptr # end of t[]?
1976 jb .L8x_reduction_loop
1978 .size bn_sqr8x_internal,.-bn_sqr8x_internal
1981 ##############################################################
1982 # Post-condition, 4x unrolled
1985 my ($tptr,$nptr)=("%rbx","%rbp");
1987 .type __bn_post4x_internal,\@abi-omnipotent
1989 __bn_post4x_internal:
1991 lea (%rdi,$num),$tptr # %rdi was $tptr above
1993 movq %xmm1,$rptr # restore $rptr
1995 movq %xmm1,$aptr # prepare for back-to-back call
1997 dec %r12 # so that after 'not' we get -n[0]
2002 jmp .Lsqr4x_sub_entry
2011 lea 8*4($nptr),$nptr
2021 neg %r10 # mov %r10,%cf
2027 lea 8*4($tptr),$tptr
2029 sbb %r10,%r10 # mov %cf,%r10
2032 lea 8*4($rptr),$rptr
2037 mov $num,%r10 # prepare for back-to-back call
2038 neg $num # restore $num
2040 .size __bn_post4x_internal,.-__bn_post4x_internal
2045 .globl bn_from_montgomery
2046 .type bn_from_montgomery,\@abi-omnipotent
2049 testl \$7,`($win64?"48(%rsp)":"%r9d")`
2053 .size bn_from_montgomery,.-bn_from_montgomery
2055 .type bn_from_mont8x,\@function,6
2068 shl \$3,${num}d # convert $num to bytes
2069 lea ($num,$num,2),%r10 # 3*$num in bytes
2073 ##############################################################
2074 # Ensure that stack frame doesn't alias with $rptr+3*$num
2075 # modulo 4096, which covers ret[num], am[num] and n[num]
2076 # (see bn_exp.c). The stack is allocated to aligned with
2077 # bn_power5's frame, and as bn_from_montgomery happens to be
2078 # last operation, we use the opportunity to cleanse it.
2080 lea -320(%rsp,$num,2),%r11
2086 sub %r11,%rbp # align with $aptr
2087 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2092 lea 4096-320(,$num,2),%r10
2093 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2103 lea (%rbp,%r11),%rsp
2107 jmp .Lfrom_page_walk_done
2110 lea -4096(%rsp),%rsp
2114 .Lfrom_page_walk_done:
2119 ##############################################################
2122 # +0 saved $num, used in reduction section
2123 # +8 &t[2*$num], used in reduction section
2129 mov %rax, 40(%rsp) # save original %rsp
2138 movdqu ($aptr),%xmm1
2139 movdqu 16($aptr),%xmm2
2140 movdqu 32($aptr),%xmm3
2141 movdqa %xmm0,(%rax,$num)
2142 movdqu 48($aptr),%xmm4
2143 movdqa %xmm0,16(%rax,$num)
2144 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2146 movdqa %xmm0,32(%rax,$num)
2147 movdqa %xmm2,16(%rax)
2148 movdqa %xmm0,48(%rax,$num)
2149 movdqa %xmm3,32(%rax)
2150 movdqa %xmm4,48(%rax)
2159 movq %r10, %xmm3 # -num
2161 $code.=<<___ if ($addx);
2162 mov OPENSSL_ia32cap_P+8(%rip),%r11d
2164 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
2167 lea (%rax,$num),$rptr
2168 call __bn_sqrx8x_reduction
2169 call __bn_postx4x_internal
2173 mov 40(%rsp),%rsi # restore %rsp
2174 jmp .Lfrom_mont_zero
2180 call __bn_sqr8x_reduction
2181 call __bn_post4x_internal
2185 mov 40(%rsp),%rsi # restore %rsp
2186 jmp .Lfrom_mont_zero
2190 movdqa %xmm0,16*0(%rax)
2191 movdqa %xmm0,16*1(%rax)
2192 movdqa %xmm0,16*2(%rax)
2193 movdqa %xmm0,16*3(%rax)
2196 jnz .Lfrom_mont_zero
2208 .size bn_from_mont8x,.-bn_from_mont8x
2214 my $bp="%rdx"; # restore original value
2217 .type bn_mulx4x_mont_gather5,\@function,6
2219 bn_mulx4x_mont_gather5:
2230 shl \$3,${num}d # convert $num to bytes
2231 lea ($num,$num,2),%r10 # 3*$num in bytes
2235 ##############################################################
2236 # Ensure that stack frame doesn't alias with $rptr+3*$num
2237 # modulo 4096, which covers ret[num], am[num] and n[num]
2238 # (see bn_exp.c). This is done to allow memory disambiguation
2239 # logic do its magic. [Extra [num] is allocated in order
2240 # to align with bn_power5's frame, which is cleansed after
2241 # completing exponentiation. Extra 256 bytes is for power mask
2242 # calculated from 7th argument, the index.]
2244 lea -320(%rsp,$num,2),%r11
2250 sub %r11,%rbp # align with $aptr
2251 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2255 lea 4096-320(,$num,2),%r10
2256 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2262 and \$-64,%rbp # ensure alignment
2266 lea (%rbp,%r11),%rsp
2269 ja .Lmulx4x_page_walk
2270 jmp .Lmulx4x_page_walk_done
2273 lea -4096(%rsp),%rsp
2276 ja .Lmulx4x_page_walk
2277 .Lmulx4x_page_walk_done:
2279 ##############################################################
2282 # +8 off-loaded &b[i]
2291 mov $n0, 32(%rsp) # save *n0
2292 mov %rax,40(%rsp) # save original %rsp
2294 call mulx4x_internal
2296 mov 40(%rsp),%rsi # restore %rsp
2308 .size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2310 .type mulx4x_internal,\@abi-omnipotent
2313 mov $num,8(%rsp) # save -$num (it was in bytes)
2315 neg $num # restore $num
2317 neg %r10 # restore $num
2318 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
2320 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
2322 lea .Linc(%rip),%rax
2323 mov %r13,16+8(%rsp) # end of b[num]
2324 mov $num,24+8(%rsp) # inner counter
2325 mov $rp, 56+8(%rsp) # save $rp
2327 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2328 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2330 my $STRIDE=2**5*8; # 5 is "window size"
2331 my $N=$STRIDE/4; # should match cache line size
2333 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2334 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2335 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2336 lea 128($bp),$bptr # size optimization
2338 pshufd \$0,%xmm5,%xmm5 # broadcast index
2343 ########################################################################
2344 # calculate mask by comparing 0..31 to index and save result to stack
2349 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2352 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2355 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2356 movdqa %xmm0,`16*($i+0)+112`(%r10)
2360 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2361 movdqa %xmm1,`16*($i+1)+112`(%r10)
2365 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2366 movdqa %xmm2,`16*($i+2)+112`(%r10)
2371 movdqa %xmm3,`16*($i+3)+112`(%r10)
2375 $code.=<<___; # last iteration can be optimized
2379 movdqa %xmm0,`16*($i+0)+112`(%r10)
2383 movdqa %xmm1,`16*($i+1)+112`(%r10)
2386 movdqa %xmm2,`16*($i+2)+112`(%r10)
2388 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2389 pand `16*($i+1)-128`($bptr),%xmm1
2390 pand `16*($i+2)-128`($bptr),%xmm2
2391 movdqa %xmm3,`16*($i+3)+112`(%r10)
2392 pand `16*($i+3)-128`($bptr),%xmm3
2396 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2398 movdqa `16*($i+0)-128`($bptr),%xmm4
2399 movdqa `16*($i+1)-128`($bptr),%xmm5
2400 movdqa `16*($i+2)-128`($bptr),%xmm2
2401 pand `16*($i+0)+112`(%r10),%xmm4
2402 movdqa `16*($i+3)-128`($bptr),%xmm3
2403 pand `16*($i+1)+112`(%r10),%xmm5
2405 pand `16*($i+2)+112`(%r10),%xmm2
2407 pand `16*($i+3)+112`(%r10),%xmm3
2414 pshufd \$0x4e,%xmm0,%xmm1
2416 lea $STRIDE($bptr),$bptr
2417 movq %xmm0,%rdx # bp[0]
2418 lea 64+8*4+8(%rsp),$tptr
2421 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2422 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2424 mulx 2*8($aptr),%rax,%r13 # ...
2427 mulx 3*8($aptr),%rax,%r14
2430 imulq 32+8(%rsp),$mi # "t[0]"*n0
2431 xor $zero,$zero # cf=0, of=0
2434 mov $bptr,8+8(%rsp) # off-load &b[i]
2436 lea 4*8($aptr),$aptr
2438 adcx $zero,%r14 # cf=0
2440 mulx 0*8($nptr),%rax,%r10
2441 adcx %rax,%r15 # discarded
2443 mulx 1*8($nptr),%rax,%r11
2446 mulx 2*8($nptr),%rax,%r12
2447 mov 24+8(%rsp),$bptr # counter value
2448 mov %r10,-8*4($tptr)
2451 mulx 3*8($nptr),%rax,%r15
2453 mov %r11,-8*3($tptr)
2455 adox $zero,%r15 # of=0
2456 lea 4*8($nptr),$nptr
2457 mov %r12,-8*2($tptr)
2462 adcx $zero,%r15 # cf=0, modulo-scheduled
2463 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2465 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2467 mulx 2*8($aptr),%r12,%rax # ...
2469 mulx 3*8($aptr),%r13,%r14
2473 adcx $zero,%r14 # cf=0
2474 lea 4*8($aptr),$aptr
2475 lea 4*8($tptr),$tptr
2478 mulx 0*8($nptr),%rax,%r15
2481 mulx 1*8($nptr),%rax,%r15
2484 mulx 2*8($nptr),%rax,%r15
2485 mov %r10,-5*8($tptr)
2487 mov %r11,-4*8($tptr)
2489 mulx 3*8($nptr),%rax,%r15
2491 mov %r12,-3*8($tptr)
2494 lea 4*8($nptr),$nptr
2495 mov %r13,-2*8($tptr)
2497 dec $bptr # of=0, pass cf
2500 mov 8(%rsp),$num # load -num
2501 adc $zero,%r15 # modulo-scheduled
2502 lea ($aptr,$num),$aptr # rewind $aptr
2504 mov 8+8(%rsp),$bptr # re-load &b[i]
2505 adc $zero,$zero # top-most carry
2506 mov %r14,-1*8($tptr)
2511 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2516 for($i=0;$i<$STRIDE/16;$i+=4) {
2518 movdqa `16*($i+0)-128`($bptr),%xmm0
2519 movdqa `16*($i+1)-128`($bptr),%xmm1
2520 movdqa `16*($i+2)-128`($bptr),%xmm2
2521 pand `16*($i+0)+256`(%r10),%xmm0
2522 movdqa `16*($i+3)-128`($bptr),%xmm3
2523 pand `16*($i+1)+256`(%r10),%xmm1
2525 pand `16*($i+2)+256`(%r10),%xmm2
2527 pand `16*($i+3)+256`(%r10),%xmm3
2534 pshufd \$0x4e,%xmm4,%xmm0
2536 lea $STRIDE($bptr),$bptr
2537 movq %xmm0,%rdx # m0=bp[i]
2539 mov $zero,($tptr) # save top-most carry
2540 lea 4*8($tptr,$num),$tptr # rewind $tptr
2541 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2542 xor $zero,$zero # cf=0, of=0
2544 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2545 adox -4*8($tptr),$mi # +t[0]
2547 mulx 2*8($aptr),%r15,%r13 # ...
2548 adox -3*8($tptr),%r11
2550 mulx 3*8($aptr),%rdx,%r14
2551 adox -2*8($tptr),%r12
2553 lea ($nptr,$num),$nptr # rewind $nptr
2554 lea 4*8($aptr),$aptr
2555 adox -1*8($tptr),%r13
2560 imulq 32+8(%rsp),$mi # "t[0]"*n0
2563 xor $zero,$zero # cf=0, of=0
2564 mov $bptr,8+8(%rsp) # off-load &b[i]
2566 mulx 0*8($nptr),%rax,%r10
2567 adcx %rax,%r15 # discarded
2569 mulx 1*8($nptr),%rax,%r11
2572 mulx 2*8($nptr),%rax,%r12
2575 mulx 3*8($nptr),%rax,%r15
2577 mov 24+8(%rsp),$bptr # counter value
2578 mov %r10,-8*4($tptr)
2580 mov %r11,-8*3($tptr)
2581 adox $zero,%r15 # of=0
2582 mov %r12,-8*2($tptr)
2583 lea 4*8($nptr),$nptr
2588 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2589 adcx $zero,%r15 # cf=0, modulo-scheduled
2591 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2592 adcx 0*8($tptr),%r10
2594 mulx 2*8($aptr),%r12,%rax # ...
2595 adcx 1*8($tptr),%r11
2597 mulx 3*8($aptr),%r13,%r14
2599 adcx 2*8($tptr),%r12
2601 adcx 3*8($tptr),%r13
2602 adox $zero,%r14 # of=0
2603 lea 4*8($aptr),$aptr
2604 lea 4*8($tptr),$tptr
2605 adcx $zero,%r14 # cf=0
2608 mulx 0*8($nptr),%rax,%r15
2611 mulx 1*8($nptr),%rax,%r15
2614 mulx 2*8($nptr),%rax,%r15
2615 mov %r10,-5*8($tptr)
2618 mov %r11,-4*8($tptr)
2619 mulx 3*8($nptr),%rax,%r15
2621 lea 4*8($nptr),$nptr
2622 mov %r12,-3*8($tptr)
2625 mov %r13,-2*8($tptr)
2627 dec $bptr # of=0, pass cf
2630 mov 0+8(%rsp),$num # load -num
2631 adc $zero,%r15 # modulo-scheduled
2632 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2633 mov 8+8(%rsp),$bptr # re-load &b[i]
2636 lea ($aptr,$num),$aptr # rewind $aptr
2637 adc $zero,$zero # top-most carry
2638 mov %r14,-1*8($tptr)
2645 mov ($nptr,$num),%r12
2646 lea ($nptr,$num),%rbp # rewind $nptr
2648 lea ($tptr,$num),%rdi # rewind $tptr
2651 sub %r14,%r10 # compare top-most words
2655 sub %r8,%rax # %rax=-%r8
2656 mov 56+8(%rsp),%rdx # restore rp
2657 dec %r12 # so that after 'not' we get -n[0]
2662 jmp .Lsqrx4x_sub_entry # common post-condition
2663 .size mulx4x_internal,.-mulx4x_internal
2666 ######################################################################
2668 my $rptr="%rdi"; # BN_ULONG *rptr,
2669 my $aptr="%rsi"; # const BN_ULONG *aptr,
2670 my $bptr="%rdx"; # const void *table,
2671 my $nptr="%rcx"; # const BN_ULONG *nptr,
2672 my $n0 ="%r8"; # const BN_ULONG *n0);
2673 my $num ="%r9"; # int num, has to be divisible by 8
2676 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2677 my @A0=("%r10","%r11");
2678 my @A1=("%r12","%r13");
2679 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2682 .type bn_powerx5,\@function,6
2695 shl \$3,${num}d # convert $num to bytes
2696 lea ($num,$num,2),%r10 # 3*$num in bytes
2700 ##############################################################
2701 # Ensure that stack frame doesn't alias with $rptr+3*$num
2702 # modulo 4096, which covers ret[num], am[num] and n[num]
2703 # (see bn_exp.c). This is done to allow memory disambiguation
2704 # logic do its magic. [Extra 256 bytes is for power mask
2705 # calculated from 7th argument, the index.]
2707 lea -320(%rsp,$num,2),%r11
2713 sub %r11,%rbp # align with $aptr
2714 lea -320(%rbp,$num,2),%rbp # future alloca(frame+2*$num*8+256)
2719 lea 4096-320(,$num,2),%r10
2720 lea -320(%rbp,$num,2),%rbp # alloca(frame+2*$num*8+256)
2730 lea (%rbp,%r11),%rsp
2734 jmp .Lpwrx_page_walk_done
2737 lea -4096(%rsp),%rsp
2741 .Lpwrx_page_walk_done:
2746 ##############################################################
2749 # +0 saved $num, used in reduction section
2750 # +8 &t[2*$num], used in reduction section
2751 # +16 intermediate carry bit
2752 # +24 top-most carry bit, used in reduction section
2758 movq $rptr,%xmm1 # save $rptr
2759 movq $nptr,%xmm2 # save $nptr
2760 movq %r10, %xmm3 # -$num
2763 mov %rax, 40(%rsp) # save original %rsp
2766 call __bn_sqrx8x_internal
2767 call __bn_postx4x_internal
2768 call __bn_sqrx8x_internal
2769 call __bn_postx4x_internal
2770 call __bn_sqrx8x_internal
2771 call __bn_postx4x_internal
2772 call __bn_sqrx8x_internal
2773 call __bn_postx4x_internal
2774 call __bn_sqrx8x_internal
2775 call __bn_postx4x_internal
2777 mov %r10,$num # -num
2783 call mulx4x_internal
2785 mov 40(%rsp),%rsi # restore %rsp
2797 .size bn_powerx5,.-bn_powerx5
2799 .globl bn_sqrx8x_internal
2800 .hidden bn_sqrx8x_internal
2801 .type bn_sqrx8x_internal,\@abi-omnipotent
2804 __bn_sqrx8x_internal:
2805 ##################################################################
2808 # a) multiply-n-add everything but a[i]*a[i];
2809 # b) shift result of a) by 1 to the left and accumulate
2810 # a[i]*a[i] products;
2812 ##################################################################
2813 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2844 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2847 my ($zero,$carry)=("%rbp","%rcx");
2850 lea 48+8(%rsp),$tptr
2851 lea ($aptr,$num),$aaptr
2852 mov $num,0+8(%rsp) # save $num
2853 mov $aaptr,8+8(%rsp) # save end of $aptr
2854 jmp .Lsqr8x_zero_start
2857 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2860 movdqa %xmm0,0*8($tptr)
2861 movdqa %xmm0,2*8($tptr)
2862 movdqa %xmm0,4*8($tptr)
2863 movdqa %xmm0,6*8($tptr)
2864 .Lsqr8x_zero_start: # aligned at 32
2865 movdqa %xmm0,8*8($tptr)
2866 movdqa %xmm0,10*8($tptr)
2867 movdqa %xmm0,12*8($tptr)
2868 movdqa %xmm0,14*8($tptr)
2869 lea 16*8($tptr),$tptr
2873 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2874 #xor %r9,%r9 # t[1], ex-$num, zero already
2881 lea 48+8(%rsp),$tptr
2882 xor $zero,$zero # cf=0, cf=0
2883 jmp .Lsqrx8x_outer_loop
2886 .Lsqrx8x_outer_loop:
2887 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2888 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2890 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2893 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2896 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2899 mulx 5*8($aptr),%r12,%rax
2902 mulx 6*8($aptr),%r13,%rax
2905 mulx 7*8($aptr),%r14,%r15
2906 mov 1*8($aptr),%rdx # a[1]
2910 mov %r8,1*8($tptr) # t[1]
2911 mov %r9,2*8($tptr) # t[2]
2912 sbb $carry,$carry # mov %cf,$carry
2913 xor $zero,$zero # cf=0, of=0
2916 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2917 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2920 mulx 4*8($aptr),%r10,%rbx # ...
2923 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2926 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2929 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2930 mov 2*8($aptr),%rdx # a[2]
2934 adox $zero,%r14 # of=0
2935 adcx $zero,%r14 # cf=0
2937 mov %r8,3*8($tptr) # t[3]
2938 mov %r9,4*8($tptr) # t[4]
2940 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2941 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2944 mulx 5*8($aptr),%r10,%rbx # ...
2947 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2950 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2952 mov 3*8($aptr),%rdx # a[3]
2956 mov %r8,5*8($tptr) # t[5]
2957 mov %r9,6*8($tptr) # t[6]
2958 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2959 adox $zero,%r13 # of=0
2960 adcx $zero,%r13 # cf=0
2962 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2965 mulx 6*8($aptr),%r10,%rax # ...
2968 mulx 7*8($aptr),%r11,%r12
2969 mov 4*8($aptr),%rdx # a[4]
2970 mov 5*8($aptr),%r14 # a[5]
2973 mov 6*8($aptr),%r15 # a[6]
2975 adox $zero,%r12 # of=0
2976 adcx $zero,%r12 # cf=0
2978 mov %r8,7*8($tptr) # t[7]
2979 mov %r9,8*8($tptr) # t[8]
2981 mulx %r14,%r9,%rax # a[5]*a[4]
2982 mov 7*8($aptr),%r8 # a[7]
2984 mulx %r15,%r10,%rbx # a[6]*a[4]
2987 mulx %r8,%r11,%rax # a[7]*a[4]
2988 mov %r14,%rdx # a[5]
2991 #adox $zero,%rax # of=0
2992 adcx $zero,%rax # cf=0
2994 mulx %r15,%r14,%rbx # a[6]*a[5]
2995 mulx %r8,%r12,%r13 # a[7]*a[5]
2996 mov %r15,%rdx # a[6]
2997 lea 8*8($aptr),$aptr
3004 mulx %r8,%r8,%r14 # a[7]*a[6]
3009 je .Lsqrx8x_outer_break
3011 neg $carry # mov $carry,%cf
3015 adcx 9*8($tptr),%r9 # +=t[9]
3016 adcx 10*8($tptr),%r10 # ...
3017 adcx 11*8($tptr),%r11
3018 adc 12*8($tptr),%r12
3019 adc 13*8($tptr),%r13
3020 adc 14*8($tptr),%r14
3021 adc 15*8($tptr),%r15
3023 lea 2*64($tptr),$tptr
3024 sbb %rax,%rax # mov %cf,$carry
3026 mov -64($aptr),%rdx # a[0]
3027 mov %rax,16+8(%rsp) # offload $carry
3028 mov $tptr,24+8(%rsp)
3030 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
3031 xor %eax,%eax # cf=0, of=0
3037 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
3038 adcx %rax,%rbx # +=t[8]
3041 mulx 1*8($aaptr),%rax,%r9 # ...
3045 mulx 2*8($aaptr),%rax,%r10
3049 mulx 3*8($aaptr),%rax,%r11
3053 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
3057 mulx 5*8($aaptr),%rax,%r13
3061 mulx 6*8($aaptr),%rax,%r14
3062 mov %rbx,($tptr,%rcx,8) # store t[8+i]
3067 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
3068 mov 8($aptr,%rcx,8),%rdx # a[i]
3070 adox %rbx,%r15 # %rbx is 0, of=0
3071 adcx %rbx,%r15 # cf=0
3077 lea 8*8($aaptr),$aaptr
3079 cmp 8+8(%rsp),$aaptr # done?
3082 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
3093 lea 8*8($tptr),$tptr
3095 sbb %rax,%rax # mov %cf,%rax
3096 xor %ebx,%ebx # cf=0, of=0
3097 mov %rax,16+8(%rsp) # offload carry
3102 sub 16+8(%rsp),%r8 # consume last carry
3103 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
3104 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
3105 xor %ebp,%ebp # xor $zero,$zero
3107 cmp $carry,$tptr # cf=0, of=0
3108 je .Lsqrx8x_outer_loop
3113 mov 2*8($carry),%r10
3115 mov 3*8($carry),%r11
3117 mov 4*8($carry),%r12
3119 mov 5*8($carry),%r13
3121 mov 6*8($carry),%r14
3123 mov 7*8($carry),%r15
3125 jmp .Lsqrx8x_outer_loop
3128 .Lsqrx8x_outer_break:
3129 mov %r9,9*8($tptr) # t[9]
3130 movq %xmm3,%rcx # -$num
3131 mov %r10,10*8($tptr) # ...
3132 mov %r11,11*8($tptr)
3133 mov %r12,12*8($tptr)
3134 mov %r13,13*8($tptr)
3135 mov %r14,14*8($tptr)
3140 lea 48+8(%rsp),$tptr
3141 mov ($aptr,$i),%rdx # a[0]
3143 mov 8($tptr),$A0[1] # t[1]
3144 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3145 mov 0+8(%rsp),$num # restore $num
3147 mov 16($tptr),$A1[0] # t[2] # prefetch
3148 mov 24($tptr),$A1[1] # t[3] # prefetch
3149 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3152 .Lsqrx4x_shift_n_add:
3156 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3157 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3160 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3167 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3168 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3171 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3178 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3180 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3183 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3190 jrcxz .Lsqrx4x_shift_n_add_break
3191 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3194 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3195 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3200 jmp .Lsqrx4x_shift_n_add
3203 .Lsqrx4x_shift_n_add_break:
3207 lea 64($tptr),$tptr # end of t[] buffer
3210 ######################################################################
3211 # Montgomery reduction part, "word-by-word" algorithm.
3213 # This new path is inspired by multiple submissions from Intel, by
3214 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3217 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3221 __bn_sqrx8x_reduction:
3222 xor %eax,%eax # initial top-most carry bit
3223 mov 32+8(%rsp),%rbx # n0
3224 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
3225 lea -8*8($nptr,$num),%rcx # end of n[]
3226 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3227 mov %rcx, 0+8(%rsp) # save end of n[]
3228 mov $tptr,8+8(%rsp) # save end of t[]
3230 lea 48+8(%rsp),$tptr # initial t[] window
3231 jmp .Lsqrx8x_reduction_loop
3234 .Lsqrx8x_reduction_loop:
3240 imulq %rbx,%rdx # n0*a[i]
3244 mov %rax,24+8(%rsp) # store top-most carry bit
3246 lea 8*8($tptr),$tptr
3247 xor $carry,$carry # cf=0,of=0
3254 mulx 8*0($nptr),%rax,%r8 # n[0]
3255 adcx %rbx,%rax # discarded
3258 mulx 8*1($nptr),%rbx,%r9 # n[1]
3262 mulx 8*2($nptr),%rbx,%r10
3266 mulx 8*3($nptr),%rbx,%r11
3270 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
3276 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3278 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3280 mulx 8*5($nptr),%rax,%r13
3284 mulx 8*6($nptr),%rax,%r14
3288 mulx 8*7($nptr),%rax,%r15
3291 adox $carry,%r15 # $carry is 0
3292 adcx $carry,%r15 # cf=0
3294 .byte 0x67,0x67,0x67
3298 mov $carry,%rax # xor %rax,%rax
3299 cmp 0+8(%rsp),$nptr # end of n[]?
3300 jae .Lsqrx8x_no_tail
3302 mov 48+8(%rsp),%rdx # pull n0*a[0]
3304 lea 8*8($nptr),$nptr
3307 adcx 8*2($tptr),%r10
3313 lea 8*8($tptr),$tptr
3314 sbb %rax,%rax # top carry
3316 xor $carry,$carry # of=0, cf=0
3323 mulx 8*0($nptr),%rax,%r8
3327 mulx 8*1($nptr),%rax,%r9
3331 mulx 8*2($nptr),%rax,%r10
3335 mulx 8*3($nptr),%rax,%r11
3339 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
3343 mulx 8*5($nptr),%rax,%r13
3347 mulx 8*6($nptr),%rax,%r14
3351 mulx 8*7($nptr),%rax,%r15
3352 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3355 mov %rbx,($tptr,%rcx,8) # save result
3357 adcx $carry,%r15 # cf=0
3362 cmp 0+8(%rsp),$nptr # end of n[]?
3363 jae .Lsqrx8x_tail_done # break out of loop
3365 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3366 mov 48+8(%rsp),%rdx # pull n0*a[0]
3367 lea 8*8($nptr),$nptr
3376 lea 8*8($tptr),$tptr
3378 sub \$8,%rcx # mov \$-8,%rcx
3380 xor $carry,$carry # of=0, cf=0
3387 add 24+8(%rsp),%r8 # can this overflow?
3397 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3398 .Lsqrx8x_no_tail: # %cf is 0 if jumped here
3402 mov 8*7($nptr),$carry
3403 movq %xmm2,$nptr # restore $nptr
3410 adc \$0,%rax # top-most carry
3412 mov 32+8(%rsp),%rbx # n0
3413 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3415 mov %r8,8*0($tptr) # store top 512 bits
3416 lea 8*8($tptr),%r8 # borrow %r8
3425 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3426 cmp 8+8(%rsp),%r8 # end of t[]?
3427 jb .Lsqrx8x_reduction_loop
3429 .size bn_sqrx8x_internal,.-bn_sqrx8x_internal
3432 ##############################################################
3433 # Post-condition, 4x unrolled
3436 my ($rptr,$nptr)=("%rdx","%rbp");
3439 __bn_postx4x_internal:
3441 mov %rcx,%r10 # -$num
3442 mov %rcx,%r9 # -$num
3445 #lea 48+8(%rsp,%r9),$tptr
3446 movq %xmm1,$rptr # restore $rptr
3447 movq %xmm1,$aptr # prepare for back-to-back call
3448 dec %r12 # so that after 'not' we get -n[0]
3453 jmp .Lsqrx4x_sub_entry
3463 lea 8*4($nptr),$nptr
3468 neg %r8 # mov %r8,%cf
3474 lea 8*4($tptr),$tptr
3476 sbb %r8,%r8 # mov %cf,%r8
3479 lea 8*4($rptr),$rptr
3484 neg %r9 # restore $num
3487 .size __bn_postx4x_internal,.-__bn_postx4x_internal
3492 my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3493 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3500 .type bn_get_bits5,\@abi-omnipotent
3512 movzw (%r10,$num,2),%eax
3516 .size bn_get_bits5,.-bn_get_bits5
3519 .type bn_scatter5,\@abi-omnipotent
3523 jz .Lscatter_epilogue
3524 lea ($tbl,$idx,8),$tbl
3534 .size bn_scatter5,.-bn_scatter5
3537 .type bn_gather5,\@abi-omnipotent
3540 .LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
3541 # I can't trust assembler to use specific encoding:-(
3542 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3543 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3544 lea .Linc(%rip),%rax
3545 and \$-16,%rsp # shouldn't be formally required
3548 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3549 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3550 lea 128($tbl),%r11 # size optimization
3551 lea 128(%rsp),%rax # size optimization
3553 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3557 ########################################################################
3558 # calculate mask by comparing 0..31 to $idx and save result to stack
3560 for($i=0;$i<$STRIDE/16;$i+=4) {
3563 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3565 $code.=<<___ if ($i);
3566 movdqa %xmm3,`16*($i-1)-128`(%rax)
3572 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3573 movdqa %xmm0,`16*($i+0)-128`(%rax)
3577 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3578 movdqa %xmm1,`16*($i+1)-128`(%rax)
3582 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3583 movdqa %xmm2,`16*($i+2)-128`(%rax)
3588 movdqa %xmm3,`16*($i-1)-128`(%rax)
3596 for($i=0;$i<$STRIDE/16;$i+=4) {
3598 movdqa `16*($i+0)-128`(%r11),%xmm0
3599 movdqa `16*($i+1)-128`(%r11),%xmm1
3600 movdqa `16*($i+2)-128`(%r11),%xmm2
3601 pand `16*($i+0)-128`(%rax),%xmm0
3602 movdqa `16*($i+3)-128`(%r11),%xmm3
3603 pand `16*($i+1)-128`(%rax),%xmm1
3605 pand `16*($i+2)-128`(%rax),%xmm2
3607 pand `16*($i+3)-128`(%rax),%xmm3
3614 lea $STRIDE(%r11),%r11
3615 pshufd \$0x4e,%xmm4,%xmm0
3617 movq %xmm0,($out) # m0=bp[0]
3624 .LSEH_end_bn_gather5:
3625 .size bn_gather5,.-bn_gather5
3633 .asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3636 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3637 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3645 .extern __imp_RtlVirtualUnwind
3646 .type mul_handler,\@abi-omnipotent
3660 mov 120($context),%rax # pull context->Rax
3661 mov 248($context),%rbx # pull context->Rip
3663 mov 8($disp),%rsi # disp->ImageBase
3664 mov 56($disp),%r11 # disp->HandlerData
3666 mov 0(%r11),%r10d # HandlerData[0]
3667 lea (%rsi,%r10),%r10 # end of prologue label
3668 cmp %r10,%rbx # context->Rip<end of prologue label
3669 jb .Lcommon_seh_tail
3671 mov 4(%r11),%r10d # HandlerData[1]
3672 lea (%rsi,%r10),%r10 # epilogue label
3673 cmp %r10,%rbx # context->Rip>=epilogue label
3674 jb .Lcommon_pop_regs
3676 mov 152($context),%rax # pull context->Rsp
3678 mov 8(%r11),%r10d # HandlerData[2]
3679 lea (%rsi,%r10),%r10 # epilogue label
3680 cmp %r10,%rbx # context->Rip>=epilogue label
3681 jae .Lcommon_seh_tail
3683 lea .Lmul_epilogue(%rip),%r10
3687 mov 192($context),%r10 # pull $num
3688 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
3690 jmp .Lcommon_pop_regs
3693 mov 40(%rax),%rax # pull saved stack pointer
3701 mov %rbx,144($context) # restore context->Rbx
3702 mov %rbp,160($context) # restore context->Rbp
3703 mov %r12,216($context) # restore context->R12
3704 mov %r13,224($context) # restore context->R13
3705 mov %r14,232($context) # restore context->R14
3706 mov %r15,240($context) # restore context->R15
3711 mov %rax,152($context) # restore context->Rsp
3712 mov %rsi,168($context) # restore context->Rsi
3713 mov %rdi,176($context) # restore context->Rdi
3715 mov 40($disp),%rdi # disp->ContextRecord
3716 mov $context,%rsi # context
3717 mov \$154,%ecx # sizeof(CONTEXT)
3718 .long 0xa548f3fc # cld; rep movsq
3721 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3722 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3723 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3724 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3725 mov 40(%rsi),%r10 # disp->ContextRecord
3726 lea 56(%rsi),%r11 # &disp->HandlerData
3727 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3728 mov %r10,32(%rsp) # arg5
3729 mov %r11,40(%rsp) # arg6
3730 mov %r12,48(%rsp) # arg7
3731 mov %rcx,56(%rsp) # arg8, (NULL)
3732 call *__imp_RtlVirtualUnwind(%rip)
3734 mov \$1,%eax # ExceptionContinueSearch
3746 .size mul_handler,.-mul_handler
3750 .rva .LSEH_begin_bn_mul_mont_gather5
3751 .rva .LSEH_end_bn_mul_mont_gather5
3752 .rva .LSEH_info_bn_mul_mont_gather5
3754 .rva .LSEH_begin_bn_mul4x_mont_gather5
3755 .rva .LSEH_end_bn_mul4x_mont_gather5
3756 .rva .LSEH_info_bn_mul4x_mont_gather5
3758 .rva .LSEH_begin_bn_power5
3759 .rva .LSEH_end_bn_power5
3760 .rva .LSEH_info_bn_power5
3762 .rva .LSEH_begin_bn_from_mont8x
3763 .rva .LSEH_end_bn_from_mont8x
3764 .rva .LSEH_info_bn_from_mont8x
3766 $code.=<<___ if ($addx);
3767 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3768 .rva .LSEH_end_bn_mulx4x_mont_gather5
3769 .rva .LSEH_info_bn_mulx4x_mont_gather5
3771 .rva .LSEH_begin_bn_powerx5
3772 .rva .LSEH_end_bn_powerx5
3773 .rva .LSEH_info_bn_powerx5
3776 .rva .LSEH_begin_bn_gather5
3777 .rva .LSEH_end_bn_gather5
3778 .rva .LSEH_info_bn_gather5
3782 .LSEH_info_bn_mul_mont_gather5:
3785 .rva .Lmul_body,.Lmul_body,.Lmul_epilogue # HandlerData[]
3787 .LSEH_info_bn_mul4x_mont_gather5:
3790 .rva .Lmul4x_prologue,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3792 .LSEH_info_bn_power5:
3795 .rva .Lpower5_prologue,.Lpower5_body,.Lpower5_epilogue # HandlerData[]
3797 .LSEH_info_bn_from_mont8x:
3800 .rva .Lfrom_prologue,.Lfrom_body,.Lfrom_epilogue # HandlerData[]
3802 $code.=<<___ if ($addx);
3804 .LSEH_info_bn_mulx4x_mont_gather5:
3807 .rva .Lmulx4x_prologue,.Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3809 .LSEH_info_bn_powerx5:
3812 .rva .Lpowerx5_prologue,.Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3816 .LSEH_info_bn_gather5:
3817 .byte 0x01,0x0b,0x03,0x0a
3818 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3819 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
3824 $code =~ s/\`([^\`]*)\`/eval($1)/gem;