3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # Companion to x86_64-mont.pl that optimizes cache-timing attack
13 # countermeasures. The subroutines are produced by replacing bp[i]
14 # references in their x86_64-mont.pl counterparts with cache-neutral
15 # references to powers table computed in BN_mod_exp_mont_consttime.
16 # In addition subroutine that scatters elements of the powers table
17 # is implemented, so that scatter-/gathering can be tuned without
18 # bn_exp.c modifications.
22 # Add MULX/AD*X code paths and additional interfaces to optimize for
23 # branch prediction unit. For input lengths that are multiples of 8
24 # the np argument is not just modulus value, but one interleaved
25 # with 0. This is to optimize post-condition...
29 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
31 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
33 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
34 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
35 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
36 die "can't locate x86_64-xlate.pl";
38 open OUT,"| \"$^X\" $xlate $flavour $output";
41 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
42 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
46 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
47 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
51 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
52 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
56 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
57 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
61 # int bn_mul_mont_gather5(
62 $rp="%rdi"; # BN_ULONG *rp,
63 $ap="%rsi"; # const BN_ULONG *ap,
64 $bp="%rdx"; # const BN_ULONG *bp,
65 $np="%rcx"; # const BN_ULONG *np,
66 $n0="%r8"; # const BN_ULONG *n0,
67 $num="%r9"; # int num,
68 # int idx); # 0 to 2^5-1, "index" in $bp holding
69 # pre-computed powers of a', interlaced
70 # in such manner that b[0] is $bp[idx],
71 # b[1] is [2^5+idx], etc.
83 .extern OPENSSL_ia32cap_P
85 .globl bn_mul_mont_gather5
86 .type bn_mul_mont_gather5,\@function,6
92 $code.=<<___ if ($addx);
93 mov OPENSSL_ia32cap_P+8(%rip),%r11d
102 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
113 lea -264(%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)+256+8)
114 and \$-1024,%rsp # minimize TLB usage
116 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
118 # An OS-agnostic version of __chkstk.
120 # Some OSes (Windows) insist on stack being "wired" to
121 # physical memory in strictly sequential manner, i.e. if stack
122 # allocation spans two pages, then reference to farmost one can
123 # be punishable by SEGV. But page walking can do good even on
124 # other OSes, because it guarantees that villain thread hits
125 # the guard page before it can make damage to innocent one...
131 .byte 0x2e # predict non-taken
134 lea 128($bp),%r12 # reassign $bp (+size optimization)
137 $STRIDE=2**5*8; # 5 is "window size"
138 $N=$STRIDE/4; # should match cache line size
140 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
141 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
142 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
145 pshufd \$0,%xmm5,%xmm5 # broadcast index
149 ########################################################################
150 # calculate mask by comparing 0..31 to index and save result to stack
154 pcmpeqd %xmm5,%xmm0 # compare to 1,0
158 for($k=0;$k<$STRIDE/16-4;$k+=4) {
161 pcmpeqd %xmm5,%xmm1 # compare to 3,2
162 movdqa %xmm0,`16*($k+0)+112`(%r10)
166 pcmpeqd %xmm5,%xmm2 # compare to 5,4
167 movdqa %xmm1,`16*($k+1)+112`(%r10)
171 pcmpeqd %xmm5,%xmm3 # compare to 7,6
172 movdqa %xmm2,`16*($k+2)+112`(%r10)
177 movdqa %xmm3,`16*($k+3)+112`(%r10)
181 $code.=<<___; # last iteration can be optimized
184 movdqa %xmm0,`16*($k+0)+112`(%r10)
189 movdqa %xmm1,`16*($k+1)+112`(%r10)
192 movdqa %xmm2,`16*($k+2)+112`(%r10)
193 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
195 pand `16*($k+1)-128`($bp),%xmm1
196 pand `16*($k+2)-128`($bp),%xmm2
197 movdqa %xmm3,`16*($k+3)+112`(%r10)
198 pand `16*($k+3)-128`($bp),%xmm3
202 for($k=0;$k<$STRIDE/16-4;$k+=4) {
204 movdqa `16*($k+0)-128`($bp),%xmm4
205 movdqa `16*($k+1)-128`($bp),%xmm5
206 movdqa `16*($k+2)-128`($bp),%xmm2
207 pand `16*($k+0)+112`(%r10),%xmm4
208 movdqa `16*($k+3)-128`($bp),%xmm3
209 pand `16*($k+1)+112`(%r10),%xmm5
211 pand `16*($k+2)+112`(%r10),%xmm2
213 pand `16*($k+3)+112`(%r10),%xmm3
220 pshufd \$0x4e,%xmm0,%xmm1
223 movq %xmm0,$m0 # m0=bp[0]
225 mov ($n0),$n0 # pull n0[0] value
232 mulq $m0 # ap[0]*bp[0]
236 imulq $lo0,$m1 # "tp[0]"*n0
240 add %rax,$lo0 # discarded
253 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
256 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
260 mulq $m0 # ap[j]*bp[0]
269 jne .L1st # note that upon exit $j==$num, so
270 # they can be used interchangeably
274 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
276 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
283 mov $hi1,-8(%rsp,$num,8)
284 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
290 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
295 for($k=0;$k<$STRIDE/16;$k+=4) {
297 movdqa `16*($k+0)-128`($bp),%xmm0
298 movdqa `16*($k+1)-128`($bp),%xmm1
299 movdqa `16*($k+2)-128`($bp),%xmm2
300 movdqa `16*($k+3)-128`($bp),%xmm3
301 pand `16*($k+0)-128`(%rdx),%xmm0
302 pand `16*($k+1)-128`(%rdx),%xmm1
304 pand `16*($k+2)-128`(%rdx),%xmm2
306 pand `16*($k+3)-128`(%rdx),%xmm3
313 pshufd \$0x4e,%xmm4,%xmm0
317 mov ($ap),%rax # ap[0]
318 movq %xmm0,$m0 # m0=bp[i]
324 mulq $m0 # ap[0]*bp[i]
325 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
329 imulq $lo0,$m1 # tp[0]*n0
333 add %rax,$lo0 # discarded
336 mov 8(%rsp),$lo0 # tp[1]
347 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
350 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
354 mulq $m0 # ap[j]*bp[i]
358 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
365 jne .Linner # note that upon exit $j==$num, so
366 # they can be used interchangeably
369 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
370 mov (%rsp,$num,8),$lo0
372 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
378 add $lo0,$hi1 # pull upmost overflow bit
380 mov $hi1,-8(%rsp,$num,8)
381 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
387 xor $i,$i # i=0 and clear CF!
388 mov (%rsp),%rax # tp[0]
389 lea (%rsp),$ap # borrow ap for tp
393 .Lsub: sbb ($np,$i,8),%rax
394 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
395 mov 8($ap,$i,8),%rax # tp[i+1]
397 dec $j # doesnn't affect CF!
400 sbb \$0,%rax # handle upmost overflow bit
407 or $np,$ap # ap=borrow?tp:rp
409 .Lcopy: # copy or in-place refresh
411 mov $i,(%rsp,$i,8) # zap temporary vector
412 mov %rax,($rp,$i,8) # rp[i]=tp[i]
417 mov 8(%rsp,$num,8),%rsi # restore %rsp
429 .size bn_mul_mont_gather5,.-bn_mul_mont_gather5
432 my @A=("%r10","%r11");
433 my @N=("%r13","%rdi");
435 .type bn_mul4x_mont_gather5,\@function,6
437 bn_mul4x_mont_gather5:
440 $code.=<<___ if ($addx);
442 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
456 shl \$3,${num}d # convert $num to bytes
457 lea ($num,$num,2),%r10 # 3*$num in bytes
460 ##############################################################
461 # Ensure that stack frame doesn't alias with $rptr+3*$num
462 # modulo 4096, which covers ret[num], am[num] and n[num]
463 # (see bn_exp.c). This is done to allow memory disambiguation
464 # logic do its magic. [Extra [num] is allocated in order
465 # to align with bn_power5's frame, which is cleansed after
466 # completing exponentiation. Extra 256 bytes is for power mask
467 # calculated from 7th argument, the index.]
469 lea -320(%rsp,$num,2),%r11
474 sub %r11,%rsp # align with $rp
475 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
480 lea 4096-320(,$num,2),%r10
481 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
494 .byte 0x2e # predict non-taken
495 jnc .Lmul4x_page_walk
504 mov 40(%rsp),%rsi # restore %rsp
516 .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
518 .type mul4x_internal,\@abi-omnipotent
521 shl \$5,$num # $num was in bytes
522 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
524 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
525 shr \$5,$num # restore $num
528 $STRIDE=2**5*8; # 5 is "window size"
529 $N=$STRIDE/4; # should match cache line size
532 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
533 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
534 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
535 lea 128(%rdx),$bp # size optimization
537 pshufd \$0,%xmm5,%xmm5 # broadcast index
542 ########################################################################
543 # calculate mask by comparing 0..31 to index and save result to stack
547 pcmpeqd %xmm5,%xmm0 # compare to 1,0
551 for($i=0;$i<$STRIDE/16-4;$i+=4) {
554 pcmpeqd %xmm5,%xmm1 # compare to 3,2
555 movdqa %xmm0,`16*($i+0)+112`(%r10)
559 pcmpeqd %xmm5,%xmm2 # compare to 5,4
560 movdqa %xmm1,`16*($i+1)+112`(%r10)
564 pcmpeqd %xmm5,%xmm3 # compare to 7,6
565 movdqa %xmm2,`16*($i+2)+112`(%r10)
570 movdqa %xmm3,`16*($i+3)+112`(%r10)
574 $code.=<<___; # last iteration can be optimized
577 movdqa %xmm0,`16*($i+0)+112`(%r10)
582 movdqa %xmm1,`16*($i+1)+112`(%r10)
585 movdqa %xmm2,`16*($i+2)+112`(%r10)
586 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
588 pand `16*($i+1)-128`($bp),%xmm1
589 pand `16*($i+2)-128`($bp),%xmm2
590 movdqa %xmm3,`16*($i+3)+112`(%r10)
591 pand `16*($i+3)-128`($bp),%xmm3
595 for($i=0;$i<$STRIDE/16-4;$i+=4) {
597 movdqa `16*($i+0)-128`($bp),%xmm4
598 movdqa `16*($i+1)-128`($bp),%xmm5
599 movdqa `16*($i+2)-128`($bp),%xmm2
600 pand `16*($i+0)+112`(%r10),%xmm4
601 movdqa `16*($i+3)-128`($bp),%xmm3
602 pand `16*($i+1)+112`(%r10),%xmm5
604 pand `16*($i+2)+112`(%r10),%xmm2
606 pand `16*($i+3)+112`(%r10),%xmm3
613 pshufd \$0x4e,%xmm0,%xmm1
616 movq %xmm0,$m0 # m0=bp[0]
618 mov %r13,16+8(%rsp) # save end of b[num]
619 mov $rp, 56+8(%rsp) # save $rp
621 mov ($n0),$n0 # pull n0[0] value
623 lea ($ap,$num),$ap # end of a[num]
627 mulq $m0 # ap[0]*bp[0]
631 imulq $A[0],$m1 # "tp[0]"*n0
636 add %rax,$A[0] # discarded
649 mov 16($ap,$num),%rax
652 lea 4*8($num),$j # j=4
661 mulq $m0 # ap[j]*bp[0]
672 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
674 mov $N[0],-24($tp) # tp[j-1]
677 mulq $m0 # ap[j]*bp[0]
687 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
689 mov $N[1],-16($tp) # tp[j-1]
692 mulq $m0 # ap[j]*bp[0]
702 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
704 mov $N[0],-8($tp) # tp[j-1]
707 mulq $m0 # ap[j]*bp[0]
717 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
720 mov $N[1],($tp) # tp[j-1]
726 mulq $m0 # ap[j]*bp[0]
737 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
739 mov $N[0],-24($tp) # tp[j-1]
742 mulq $m0 # ap[j]*bp[0]
750 mov ($ap,$num),%rax # ap[0]
752 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
754 mov $N[1],-16($tp) # tp[j-1]
757 lea ($np,$num),$np # rewind $np
768 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
772 for($i=0;$i<$STRIDE/16;$i+=4) {
774 movdqa `16*($i+0)-128`($bp),%xmm0
775 movdqa `16*($i+1)-128`($bp),%xmm1
776 movdqa `16*($i+2)-128`($bp),%xmm2
777 movdqa `16*($i+3)-128`($bp),%xmm3
778 pand `16*($i+0)-128`(%rdx),%xmm0
779 pand `16*($i+1)-128`(%rdx),%xmm1
781 pand `16*($i+2)-128`(%rdx),%xmm2
783 pand `16*($i+3)-128`(%rdx),%xmm3
790 pshufd \$0x4e,%xmm4,%xmm0
793 movq %xmm0,$m0 # m0=bp[i]
797 mulq $m0 # ap[0]*bp[i]
798 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
802 imulq $A[0],$m1 # tp[0]*n0
804 mov $N[1],($tp) # store upmost overflow bit
806 lea ($tp,$num),$tp # rewind $tp
809 add %rax,$A[0] # "$N[0]", discarded
814 mulq $m0 # ap[j]*bp[i]
818 add 8($tp),$A[1] # +tp[1]
824 mov 16($ap,$num),%rax
826 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
827 lea 4*8($num),$j # j=4
835 mulq $m0 # ap[j]*bp[i]
839 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
850 mov $N[1],-32($tp) # tp[j-1]
853 mulq $m0 # ap[j]*bp[i]
867 mov $N[0],-24($tp) # tp[j-1]
870 mulq $m0 # ap[j]*bp[i]
874 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
884 mov $N[1],-16($tp) # tp[j-1]
887 mulq $m0 # ap[j]*bp[i]
902 mov $N[0],-8($tp) # tp[j-1]
908 mulq $m0 # ap[j]*bp[i]
912 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
923 mov $N[1],-32($tp) # tp[j-1]
926 mulq $m0 # ap[j]*bp[i]
937 mov ($ap,$num),%rax # ap[0]
941 mov $N[0],-24($tp) # tp[j-1]
944 mov $N[1],-16($tp) # tp[j-1]
945 lea ($np,$num),$np # rewind $np
950 add ($tp),$N[0] # pull upmost overflow bit
951 adc \$0,$N[1] # upmost overflow bit
960 sub $N[0],$m1 # compare top-most words
961 adc $j,$j # $j is zero
963 sub $N[1],%rax # %rax=-$N[1]
964 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
966 lea ($np),%rbp # nptr in .sqr4x_sub
969 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
970 dec %r12 # so that after 'not' we get -n[0]
975 jmp .Lsqr4x_sub_entry
978 my @ri=("%rax",$bp,$m0,$m1);
982 lea ($tp,$num),$tp # rewind $tp
984 lea ($np,$N[1],8),$np
985 mov 56+8(%rsp),$rp # restore $rp
1001 sbb 16*3($np),@ri[3]
1015 .size mul4x_internal,.-mul4x_internal
1019 ######################################################################
1021 my $rptr="%rdi"; # BN_ULONG *rptr,
1022 my $aptr="%rsi"; # const BN_ULONG *aptr,
1023 my $bptr="%rdx"; # const void *table,
1024 my $nptr="%rcx"; # const BN_ULONG *nptr,
1025 my $n0 ="%r8"; # const BN_ULONG *n0);
1026 my $num ="%r9"; # int num, has to be divisible by 8
1029 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1030 my @A0=("%r10","%r11");
1031 my @A1=("%r12","%r13");
1032 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1036 .type bn_power5,\@function,6
1040 $code.=<<___ if ($addx);
1041 mov OPENSSL_ia32cap_P+8(%rip),%r11d
1043 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
1055 shl \$3,${num}d # convert $num to bytes
1056 lea ($num,$num,2),%r10d # 3*$num
1060 ##############################################################
1061 # Ensure that stack frame doesn't alias with $rptr+3*$num
1062 # modulo 4096, which covers ret[num], am[num] and n[num]
1063 # (see bn_exp.c). This is done to allow memory disambiguation
1064 # logic do its magic. [Extra 256 bytes is for power mask
1065 # calculated from 7th argument, the index.]
1067 lea -320(%rsp,$num,2),%r11
1072 sub %r11,%rsp # align with $aptr
1073 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1078 lea 4096-320(,$num,2),%r10
1079 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1090 mov (%rsp,%r11),%r10
1092 .byte 0x2e # predict non-taken
1098 ##############################################################
1101 # +0 saved $num, used in reduction section
1102 # +8 &t[2*$num], used in reduction section
1108 mov %rax, 40(%rsp) # save original %rsp
1110 movq $rptr,%xmm1 # save $rptr, used in sqr8x
1111 movq $nptr,%xmm2 # save $nptr
1112 movq %r10, %xmm3 # -$num, used in sqr8x
1115 call __bn_sqr8x_internal
1116 call __bn_post4x_internal
1117 call __bn_sqr8x_internal
1118 call __bn_post4x_internal
1119 call __bn_sqr8x_internal
1120 call __bn_post4x_internal
1121 call __bn_sqr8x_internal
1122 call __bn_post4x_internal
1123 call __bn_sqr8x_internal
1124 call __bn_post4x_internal
1134 mov 40(%rsp),%rsi # restore %rsp
1145 .size bn_power5,.-bn_power5
1147 .globl bn_sqr8x_internal
1148 .hidden bn_sqr8x_internal
1149 .type bn_sqr8x_internal,\@abi-omnipotent
1152 __bn_sqr8x_internal:
1153 ##############################################################
1156 # a) multiply-n-add everything but a[i]*a[i];
1157 # b) shift result of a) by 1 to the left and accumulate
1158 # a[i]*a[i] products;
1160 ##############################################################
1226 lea 32(%r10),$i # $i=-($num-32)
1227 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1229 mov $num,$j # $j=$num
1231 # comments apply to $num==8 case
1232 mov -32($aptr,$i),$a0 # a[0]
1233 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1234 mov -24($aptr,$i),%rax # a[1]
1235 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1236 mov -16($aptr,$i),$ai # a[2]
1240 mov %rax,$A0[0] # a[1]*a[0]
1243 mov $A0[0],-24($tptr,$i) # t[1]
1249 mov $A0[1],-16($tptr,$i) # t[2]
1253 mov -8($aptr,$i),$ai # a[3]
1255 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1261 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1267 mov $A0[0],-8($tptr,$j) # t[3]
1272 mov ($aptr,$j),$ai # a[4]
1274 add %rax,$A1[1] # a[3]*a[1]+t[4]
1280 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1282 mov 8($aptr,$j),$ai # a[5]
1290 add %rax,$A1[0] # a[4]*a[3]+t[5]
1292 mov $A0[1],($tptr,$j) # t[4]
1297 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1299 mov 16($aptr,$j),$ai # a[6]
1306 add %rax,$A1[1] # a[5]*a[3]+t[6]
1308 mov $A0[0],8($tptr,$j) # t[5]
1313 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1315 mov 24($aptr,$j),$ai # a[7]
1323 add %rax,$A1[0] # a[6]*a[5]+t[7]
1325 mov $A0[1],16($tptr,$j) # t[6]
1331 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1337 mov $A0[0],-8($tptr,$j) # t[7]
1349 mov $A1[1],($tptr) # t[8]
1351 mov %rdx,8($tptr) # t[9]
1355 .Lsqr4x_outer: # comments apply to $num==6 case
1356 mov -32($aptr,$i),$a0 # a[0]
1357 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1358 mov -24($aptr,$i),%rax # a[1]
1359 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1360 mov -16($aptr,$i),$ai # a[2]
1364 mov -24($tptr,$i),$A0[0] # t[1]
1365 add %rax,$A0[0] # a[1]*a[0]+t[1]
1368 mov $A0[0],-24($tptr,$i) # t[1]
1375 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1378 mov $A0[1],-16($tptr,$i) # t[2]
1382 mov -8($aptr,$i),$ai # a[3]
1384 add %rax,$A1[0] # a[2]*a[1]+t[3]
1387 add -8($tptr,$i),$A1[0]
1392 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1398 mov $A0[0],-8($tptr,$i) # t[3]
1405 mov ($aptr,$j),$ai # a[4]
1407 add %rax,$A1[1] # a[3]*a[1]+t[4]
1411 add ($tptr,$j),$A1[1]
1416 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1418 mov 8($aptr,$j),$ai # a[5]
1425 add %rax,$A1[0] # a[4]*a[3]+t[5]
1426 mov $A0[1],($tptr,$j) # t[4]
1430 add 8($tptr,$j),$A1[0]
1435 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1441 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1453 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1455 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1460 # comments apply to $num==4 case
1461 mov -32($aptr),$a0 # a[0]
1462 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1463 mov -24($aptr),%rax # a[1]
1464 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1465 mov -16($aptr),$ai # a[2]
1469 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1477 mov $A0[0],-24($tptr) # t[1]
1480 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1481 mov -8($aptr),$ai # a[3]
1485 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1487 mov $A0[1],-16($tptr) # t[2]
1492 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1498 mov $A0[0],-8($tptr) # t[3]
1502 mov -16($aptr),%rax # a[2]
1507 mov $A1[1],($tptr) # t[4]
1509 mov %rdx,8($tptr) # t[5]
1514 my ($shift,$carry)=($a0,$a1);
1515 my @S=(@A1,$ai,$n0);
1519 sub $num,$i # $i=16-$num
1522 add $A1[0],%rax # t[5]
1524 mov %rax,8($tptr) # t[5]
1525 mov %rdx,16($tptr) # t[6]
1526 mov $carry,24($tptr) # t[7]
1528 mov -16($aptr,$i),%rax # a[0]
1529 lea 48+8(%rsp),$tptr
1530 xor $A0[0],$A0[0] # t[0]
1531 mov 8($tptr),$A0[1] # t[1]
1533 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1535 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1537 or $A0[0],$S[1] # | t[2*i]>>63
1538 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1539 mov $A0[1],$shift # shift=t[2*i+1]>>63
1540 mul %rax # a[i]*a[i]
1541 neg $carry # mov $carry,cf
1542 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1544 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1548 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1550 sbb $carry,$carry # mov cf,$carry
1552 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1554 or $A0[0],$S[3] # | t[2*i]>>63
1555 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1556 mov $A0[1],$shift # shift=t[2*i+1]>>63
1557 mul %rax # a[i]*a[i]
1558 neg $carry # mov $carry,cf
1559 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1561 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1566 sbb $carry,$carry # mov cf,$carry
1568 jmp .Lsqr4x_shift_n_add
1571 .Lsqr4x_shift_n_add:
1572 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1574 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1576 or $A0[0],$S[1] # | t[2*i]>>63
1577 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1578 mov $A0[1],$shift # shift=t[2*i+1]>>63
1579 mul %rax # a[i]*a[i]
1580 neg $carry # mov $carry,cf
1581 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1583 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1584 mov $S[0],-32($tptr)
1587 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1588 mov $S[1],-24($tptr)
1589 sbb $carry,$carry # mov cf,$carry
1591 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1593 or $A0[0],$S[3] # | t[2*i]>>63
1594 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1595 mov $A0[1],$shift # shift=t[2*i+1]>>63
1596 mul %rax # a[i]*a[i]
1597 neg $carry # mov $carry,cf
1598 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1600 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1601 mov $S[2],-16($tptr)
1604 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1606 sbb $carry,$carry # mov cf,$carry
1608 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1610 or $A0[0],$S[1] # | t[2*i]>>63
1611 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1612 mov $A0[1],$shift # shift=t[2*i+1]>>63
1613 mul %rax # a[i]*a[i]
1614 neg $carry # mov $carry,cf
1615 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1617 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1621 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1623 sbb $carry,$carry # mov cf,$carry
1625 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1627 or $A0[0],$S[3] # | t[2*i]>>63
1628 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1629 mov $A0[1],$shift # shift=t[2*i+1]>>63
1630 mul %rax # a[i]*a[i]
1631 neg $carry # mov $carry,cf
1632 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1634 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1638 sbb $carry,$carry # mov cf,$carry
1641 jnz .Lsqr4x_shift_n_add
1643 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1646 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1648 or $A0[0],$S[1] # | t[2*i]>>63
1649 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1650 mov $A0[1],$shift # shift=t[2*i+1]>>63
1651 mul %rax # a[i]*a[i]
1652 neg $carry # mov $carry,cf
1653 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1655 mov -8($aptr),%rax # a[i+1] # prefetch
1656 mov $S[0],-32($tptr)
1659 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1660 mov $S[1],-24($tptr)
1661 sbb $carry,$carry # mov cf,$carry
1663 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1665 or $A0[0],$S[3] # | t[2*i]>>63
1666 mul %rax # a[i]*a[i]
1667 neg $carry # mov $carry,cf
1670 mov $S[2],-16($tptr)
1674 ######################################################################
1675 # Montgomery reduction part, "word-by-word" algorithm.
1677 # This new path is inspired by multiple submissions from Intel, by
1678 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1681 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1685 __bn_sqr8x_reduction:
1687 lea ($nptr,$num),%rcx # end of n[]
1688 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1690 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1693 jmp .L8x_reduction_loop
1696 .L8x_reduction_loop:
1697 lea ($tptr,$num),$tptr # start of current t[] window
1707 mov %rax,(%rdx) # store top-most carry bit
1708 lea 8*8($tptr),$tptr
1712 imulq 32+8(%rsp),$m0 # n0*a[0]
1713 mov 8*0($nptr),%rax # n[0]
1720 mov 8*1($nptr),%rax # n[1]
1730 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1739 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1747 imulq %r8,$carry # modulo-scheduled
1777 mov $carry,$m0 # n0*a[i]
1779 mov 8*0($nptr),%rax # n[0]
1788 lea 8*8($nptr),$nptr
1790 mov 8+8(%rsp),%rdx # pull end of t[]
1791 cmp 0+8(%rsp),$nptr # end of n[]?
1803 sbb $carry,$carry # top carry
1805 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1815 mov %r8,($tptr) # save result
1824 lea 8($tptr),$tptr # $tptr++
1869 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1873 mov 8*0($nptr),%rax # pull n[0]
1880 lea 8*8($nptr),$nptr
1881 mov 8+8(%rsp),%rdx # pull end of t[]
1882 cmp 0+8(%rsp),$nptr # end of n[]?
1883 jae .L8x_tail_done # break out of loop
1885 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1887 mov 8*0($nptr),%rax # pull n[0]
1896 sbb $carry,$carry # top carry
1903 add (%rdx),%r8 # can this overflow?
1910 adc \$0,%r15 # can't overflow, because we
1911 # started with "overhung" part
1925 adc \$0,%rax # top-most carry
1926 mov -8($nptr),%rcx # np[num-1]
1929 movq %xmm2,$nptr # restore $nptr
1931 mov %r8,8*0($tptr) # store top 512 bits
1933 movq %xmm3,$num # $num is %r9, can't be moved upwards
1940 lea 8*8($tptr),$tptr
1942 cmp %rdx,$tptr # end of t[]?
1943 jb .L8x_reduction_loop
1945 .size bn_sqr8x_internal,.-bn_sqr8x_internal
1948 ##############################################################
1949 # Post-condition, 4x unrolled
1952 my ($tptr,$nptr)=("%rbx","%rbp");
1954 .type __bn_post4x_internal,\@abi-omnipotent
1956 __bn_post4x_internal:
1958 lea (%rdi,$num),$tptr # %rdi was $tptr above
1960 movq %xmm1,$rptr # restore $rptr
1962 movq %xmm1,$aptr # prepare for back-to-back call
1964 dec %r12 # so that after 'not' we get -n[0]
1969 jmp .Lsqr4x_sub_entry
1978 lea 8*4($nptr),$nptr
1988 neg %r10 # mov %r10,%cf
1994 lea 8*4($tptr),$tptr
1996 sbb %r10,%r10 # mov %cf,%r10
1999 lea 8*4($rptr),$rptr
2004 mov $num,%r10 # prepare for back-to-back call
2005 neg $num # restore $num
2007 .size __bn_post4x_internal,.-__bn_post4x_internal
2012 .globl bn_from_montgomery
2013 .type bn_from_montgomery,\@abi-omnipotent
2016 testl \$7,`($win64?"48(%rsp)":"%r9d")`
2020 .size bn_from_montgomery,.-bn_from_montgomery
2022 .type bn_from_mont8x,\@function,6
2034 shl \$3,${num}d # convert $num to bytes
2035 lea ($num,$num,2),%r10 # 3*$num in bytes
2039 ##############################################################
2040 # Ensure that stack frame doesn't alias with $rptr+3*$num
2041 # modulo 4096, which covers ret[num], am[num] and n[num]
2042 # (see bn_exp.c). The stack is allocated to aligned with
2043 # bn_power5's frame, and as bn_from_montgomery happens to be
2044 # last operation, we use the opportunity to cleanse it.
2046 lea -320(%rsp,$num,2),%r11
2051 sub %r11,%rsp # align with $aptr
2052 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2057 lea 4096-320(,$num,2),%r10
2058 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2069 mov (%rsp,%r11),%r10
2071 .byte 0x2e # predict non-taken
2072 jnc .Lfrom_page_walk
2077 ##############################################################
2080 # +0 saved $num, used in reduction section
2081 # +8 &t[2*$num], used in reduction section
2087 mov %rax, 40(%rsp) # save original %rsp
2096 movdqu ($aptr),%xmm1
2097 movdqu 16($aptr),%xmm2
2098 movdqu 32($aptr),%xmm3
2099 movdqa %xmm0,(%rax,$num)
2100 movdqu 48($aptr),%xmm4
2101 movdqa %xmm0,16(%rax,$num)
2102 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2104 movdqa %xmm0,32(%rax,$num)
2105 movdqa %xmm2,16(%rax)
2106 movdqa %xmm0,48(%rax,$num)
2107 movdqa %xmm3,32(%rax)
2108 movdqa %xmm4,48(%rax)
2117 movq %r10, %xmm3 # -num
2119 $code.=<<___ if ($addx);
2120 mov OPENSSL_ia32cap_P+8(%rip),%r11d
2122 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
2125 lea (%rax,$num),$rptr
2126 call __bn_sqrx8x_reduction
2127 call __bn_postx4x_internal
2131 mov 40(%rsp),%rsi # restore %rsp
2132 jmp .Lfrom_mont_zero
2138 call __bn_sqr8x_reduction
2139 call __bn_post4x_internal
2143 mov 40(%rsp),%rsi # restore %rsp
2144 jmp .Lfrom_mont_zero
2148 movdqa %xmm0,16*0(%rax)
2149 movdqa %xmm0,16*1(%rax)
2150 movdqa %xmm0,16*2(%rax)
2151 movdqa %xmm0,16*3(%rax)
2154 jnz .Lfrom_mont_zero
2166 .size bn_from_mont8x,.-bn_from_mont8x
2172 my $bp="%rdx"; # restore original value
2175 .type bn_mulx4x_mont_gather5,\@function,6
2177 bn_mulx4x_mont_gather5:
2187 shl \$3,${num}d # convert $num to bytes
2188 lea ($num,$num,2),%r10 # 3*$num in bytes
2192 ##############################################################
2193 # Ensure that stack frame doesn't alias with $rptr+3*$num
2194 # modulo 4096, which covers ret[num], am[num] and n[num]
2195 # (see bn_exp.c). This is done to allow memory disambiguation
2196 # logic do its magic. [Extra [num] is allocated in order
2197 # to align with bn_power5's frame, which is cleansed after
2198 # completing exponentiation. Extra 256 bytes is for power mask
2199 # calculated from 7th argument, the index.]
2201 lea -320(%rsp,$num,2),%r11
2206 sub %r11,%rsp # align with $aptr
2207 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2211 lea 4096-320(,$num,2),%r10
2212 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2218 and \$-64,%rsp # ensure alignment
2223 mov (%rsp,%r11),%r10
2225 .byte 0x2e # predict non-taken
2226 jnc .Lmulx4x_page_walk
2228 ##############################################################
2231 # +8 off-loaded &b[i]
2240 mov $n0, 32(%rsp) # save *n0
2241 mov %rax,40(%rsp) # save original %rsp
2243 call mulx4x_internal
2245 mov 40(%rsp),%rsi # restore %rsp
2257 .size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2259 .type mulx4x_internal,\@abi-omnipotent
2262 mov $num,8(%rsp) # save -$num (it was in bytes)
2264 neg $num # restore $num
2266 neg %r10 # restore $num
2267 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
2269 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
2271 lea .Linc(%rip),%rax
2272 mov %r13,16+8(%rsp) # end of b[num]
2273 mov $num,24+8(%rsp) # inner counter
2274 mov $rp, 56+8(%rsp) # save $rp
2276 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2277 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2279 my $STRIDE=2**5*8; # 5 is "window size"
2280 my $N=$STRIDE/4; # should match cache line size
2282 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2283 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2284 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2285 lea 128($bp),$bptr # size optimization
2287 pshufd \$0,%xmm5,%xmm5 # broadcast index
2292 ########################################################################
2293 # calculate mask by comparing 0..31 to index and save result to stack
2298 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2301 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2304 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2305 movdqa %xmm0,`16*($i+0)+112`(%r10)
2309 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2310 movdqa %xmm1,`16*($i+1)+112`(%r10)
2314 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2315 movdqa %xmm2,`16*($i+2)+112`(%r10)
2320 movdqa %xmm3,`16*($i+3)+112`(%r10)
2324 $code.=<<___; # last iteration can be optimized
2328 movdqa %xmm0,`16*($i+0)+112`(%r10)
2332 movdqa %xmm1,`16*($i+1)+112`(%r10)
2335 movdqa %xmm2,`16*($i+2)+112`(%r10)
2337 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2338 pand `16*($i+1)-128`($bptr),%xmm1
2339 pand `16*($i+2)-128`($bptr),%xmm2
2340 movdqa %xmm3,`16*($i+3)+112`(%r10)
2341 pand `16*($i+3)-128`($bptr),%xmm3
2345 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2347 movdqa `16*($i+0)-128`($bptr),%xmm4
2348 movdqa `16*($i+1)-128`($bptr),%xmm5
2349 movdqa `16*($i+2)-128`($bptr),%xmm2
2350 pand `16*($i+0)+112`(%r10),%xmm4
2351 movdqa `16*($i+3)-128`($bptr),%xmm3
2352 pand `16*($i+1)+112`(%r10),%xmm5
2354 pand `16*($i+2)+112`(%r10),%xmm2
2356 pand `16*($i+3)+112`(%r10),%xmm3
2363 pshufd \$0x4e,%xmm0,%xmm1
2365 lea $STRIDE($bptr),$bptr
2366 movq %xmm0,%rdx # bp[0]
2367 lea 64+8*4+8(%rsp),$tptr
2370 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2371 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2373 mulx 2*8($aptr),%rax,%r13 # ...
2376 mulx 3*8($aptr),%rax,%r14
2379 imulq 32+8(%rsp),$mi # "t[0]"*n0
2380 xor $zero,$zero # cf=0, of=0
2383 mov $bptr,8+8(%rsp) # off-load &b[i]
2385 lea 4*8($aptr),$aptr
2387 adcx $zero,%r14 # cf=0
2389 mulx 0*8($nptr),%rax,%r10
2390 adcx %rax,%r15 # discarded
2392 mulx 1*8($nptr),%rax,%r11
2395 mulx 2*8($nptr),%rax,%r12
2396 mov 24+8(%rsp),$bptr # counter value
2397 mov %r10,-8*4($tptr)
2400 mulx 3*8($nptr),%rax,%r15
2402 mov %r11,-8*3($tptr)
2404 adox $zero,%r15 # of=0
2405 lea 4*8($nptr),$nptr
2406 mov %r12,-8*2($tptr)
2411 adcx $zero,%r15 # cf=0, modulo-scheduled
2412 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2414 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2416 mulx 2*8($aptr),%r12,%rax # ...
2418 mulx 3*8($aptr),%r13,%r14
2422 adcx $zero,%r14 # cf=0
2423 lea 4*8($aptr),$aptr
2424 lea 4*8($tptr),$tptr
2427 mulx 0*8($nptr),%rax,%r15
2430 mulx 1*8($nptr),%rax,%r15
2433 mulx 2*8($nptr),%rax,%r15
2434 mov %r10,-5*8($tptr)
2436 mov %r11,-4*8($tptr)
2438 mulx 3*8($nptr),%rax,%r15
2440 mov %r12,-3*8($tptr)
2443 lea 4*8($nptr),$nptr
2444 mov %r13,-2*8($tptr)
2446 dec $bptr # of=0, pass cf
2449 mov 8(%rsp),$num # load -num
2450 adc $zero,%r15 # modulo-scheduled
2451 lea ($aptr,$num),$aptr # rewind $aptr
2453 mov 8+8(%rsp),$bptr # re-load &b[i]
2454 adc $zero,$zero # top-most carry
2455 mov %r14,-1*8($tptr)
2460 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2465 for($i=0;$i<$STRIDE/16;$i+=4) {
2467 movdqa `16*($i+0)-128`($bptr),%xmm0
2468 movdqa `16*($i+1)-128`($bptr),%xmm1
2469 movdqa `16*($i+2)-128`($bptr),%xmm2
2470 pand `16*($i+0)+256`(%r10),%xmm0
2471 movdqa `16*($i+3)-128`($bptr),%xmm3
2472 pand `16*($i+1)+256`(%r10),%xmm1
2474 pand `16*($i+2)+256`(%r10),%xmm2
2476 pand `16*($i+3)+256`(%r10),%xmm3
2483 pshufd \$0x4e,%xmm4,%xmm0
2485 lea $STRIDE($bptr),$bptr
2486 movq %xmm0,%rdx # m0=bp[i]
2488 mov $zero,($tptr) # save top-most carry
2489 lea 4*8($tptr,$num),$tptr # rewind $tptr
2490 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2491 xor $zero,$zero # cf=0, of=0
2493 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2494 adox -4*8($tptr),$mi # +t[0]
2496 mulx 2*8($aptr),%r15,%r13 # ...
2497 adox -3*8($tptr),%r11
2499 mulx 3*8($aptr),%rdx,%r14
2500 adox -2*8($tptr),%r12
2502 lea ($nptr,$num),$nptr # rewind $nptr
2503 lea 4*8($aptr),$aptr
2504 adox -1*8($tptr),%r13
2509 imulq 32+8(%rsp),$mi # "t[0]"*n0
2512 xor $zero,$zero # cf=0, of=0
2513 mov $bptr,8+8(%rsp) # off-load &b[i]
2515 mulx 0*8($nptr),%rax,%r10
2516 adcx %rax,%r15 # discarded
2518 mulx 1*8($nptr),%rax,%r11
2521 mulx 2*8($nptr),%rax,%r12
2524 mulx 3*8($nptr),%rax,%r15
2526 mov 24+8(%rsp),$bptr # counter value
2527 mov %r10,-8*4($tptr)
2529 mov %r11,-8*3($tptr)
2530 adox $zero,%r15 # of=0
2531 mov %r12,-8*2($tptr)
2532 lea 4*8($nptr),$nptr
2537 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2538 adcx $zero,%r15 # cf=0, modulo-scheduled
2540 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2541 adcx 0*8($tptr),%r10
2543 mulx 2*8($aptr),%r12,%rax # ...
2544 adcx 1*8($tptr),%r11
2546 mulx 3*8($aptr),%r13,%r14
2548 adcx 2*8($tptr),%r12
2550 adcx 3*8($tptr),%r13
2551 adox $zero,%r14 # of=0
2552 lea 4*8($aptr),$aptr
2553 lea 4*8($tptr),$tptr
2554 adcx $zero,%r14 # cf=0
2557 mulx 0*8($nptr),%rax,%r15
2560 mulx 1*8($nptr),%rax,%r15
2563 mulx 2*8($nptr),%rax,%r15
2564 mov %r10,-5*8($tptr)
2567 mov %r11,-4*8($tptr)
2568 mulx 3*8($nptr),%rax,%r15
2570 lea 4*8($nptr),$nptr
2571 mov %r12,-3*8($tptr)
2574 mov %r13,-2*8($tptr)
2576 dec $bptr # of=0, pass cf
2579 mov 0+8(%rsp),$num # load -num
2580 adc $zero,%r15 # modulo-scheduled
2581 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2582 mov 8+8(%rsp),$bptr # re-load &b[i]
2585 lea ($aptr,$num),$aptr # rewind $aptr
2586 adc $zero,$zero # top-most carry
2587 mov %r14,-1*8($tptr)
2594 mov ($nptr,$num),%r12
2595 lea ($nptr,$num),%rbp # rewind $nptr
2597 lea ($tptr,$num),%rdi # rewind $tptr
2600 sub %r14,%r10 # compare top-most words
2604 sub %r8,%rax # %rax=-%r8
2605 mov 56+8(%rsp),%rdx # restore rp
2606 dec %r12 # so that after 'not' we get -n[0]
2611 jmp .Lsqrx4x_sub_entry # common post-condition
2612 .size mulx4x_internal,.-mulx4x_internal
2615 ######################################################################
2617 my $rptr="%rdi"; # BN_ULONG *rptr,
2618 my $aptr="%rsi"; # const BN_ULONG *aptr,
2619 my $bptr="%rdx"; # const void *table,
2620 my $nptr="%rcx"; # const BN_ULONG *nptr,
2621 my $n0 ="%r8"; # const BN_ULONG *n0);
2622 my $num ="%r9"; # int num, has to be divisible by 8
2625 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2626 my @A0=("%r10","%r11");
2627 my @A1=("%r12","%r13");
2628 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2631 .type bn_powerx5,\@function,6
2643 shl \$3,${num}d # convert $num to bytes
2644 lea ($num,$num,2),%r10 # 3*$num in bytes
2648 ##############################################################
2649 # Ensure that stack frame doesn't alias with $rptr+3*$num
2650 # modulo 4096, which covers ret[num], am[num] and n[num]
2651 # (see bn_exp.c). This is done to allow memory disambiguation
2652 # logic do its magic. [Extra 256 bytes is for power mask
2653 # calculated from 7th argument, the index.]
2655 lea -320(%rsp,$num,2),%r11
2660 sub %r11,%rsp # align with $aptr
2661 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2666 lea 4096-320(,$num,2),%r10
2667 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2678 mov (%rsp,%r11),%r10
2680 .byte 0x2e # predict non-taken
2681 jnc .Lpwrx_page_walk
2686 ##############################################################
2689 # +0 saved $num, used in reduction section
2690 # +8 &t[2*$num], used in reduction section
2691 # +16 intermediate carry bit
2692 # +24 top-most carry bit, used in reduction section
2698 movq $rptr,%xmm1 # save $rptr
2699 movq $nptr,%xmm2 # save $nptr
2700 movq %r10, %xmm3 # -$num
2703 mov %rax, 40(%rsp) # save original %rsp
2706 call __bn_sqrx8x_internal
2707 call __bn_postx4x_internal
2708 call __bn_sqrx8x_internal
2709 call __bn_postx4x_internal
2710 call __bn_sqrx8x_internal
2711 call __bn_postx4x_internal
2712 call __bn_sqrx8x_internal
2713 call __bn_postx4x_internal
2714 call __bn_sqrx8x_internal
2715 call __bn_postx4x_internal
2717 mov %r10,$num # -num
2723 call mulx4x_internal
2725 mov 40(%rsp),%rsi # restore %rsp
2737 .size bn_powerx5,.-bn_powerx5
2739 .globl bn_sqrx8x_internal
2740 .hidden bn_sqrx8x_internal
2741 .type bn_sqrx8x_internal,\@abi-omnipotent
2744 __bn_sqrx8x_internal:
2745 ##################################################################
2748 # a) multiply-n-add everything but a[i]*a[i];
2749 # b) shift result of a) by 1 to the left and accumulate
2750 # a[i]*a[i] products;
2752 ##################################################################
2753 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2784 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2787 my ($zero,$carry)=("%rbp","%rcx");
2790 lea 48+8(%rsp),$tptr
2791 lea ($aptr,$num),$aaptr
2792 mov $num,0+8(%rsp) # save $num
2793 mov $aaptr,8+8(%rsp) # save end of $aptr
2794 jmp .Lsqr8x_zero_start
2797 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2800 movdqa %xmm0,0*8($tptr)
2801 movdqa %xmm0,2*8($tptr)
2802 movdqa %xmm0,4*8($tptr)
2803 movdqa %xmm0,6*8($tptr)
2804 .Lsqr8x_zero_start: # aligned at 32
2805 movdqa %xmm0,8*8($tptr)
2806 movdqa %xmm0,10*8($tptr)
2807 movdqa %xmm0,12*8($tptr)
2808 movdqa %xmm0,14*8($tptr)
2809 lea 16*8($tptr),$tptr
2813 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2814 #xor %r9,%r9 # t[1], ex-$num, zero already
2821 lea 48+8(%rsp),$tptr
2822 xor $zero,$zero # cf=0, cf=0
2823 jmp .Lsqrx8x_outer_loop
2826 .Lsqrx8x_outer_loop:
2827 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2828 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2830 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2833 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2836 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2839 mulx 5*8($aptr),%r12,%rax
2842 mulx 6*8($aptr),%r13,%rax
2845 mulx 7*8($aptr),%r14,%r15
2846 mov 1*8($aptr),%rdx # a[1]
2850 mov %r8,1*8($tptr) # t[1]
2851 mov %r9,2*8($tptr) # t[2]
2852 sbb $carry,$carry # mov %cf,$carry
2853 xor $zero,$zero # cf=0, of=0
2856 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2857 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2860 mulx 4*8($aptr),%r10,%rbx # ...
2863 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2866 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2869 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2870 mov 2*8($aptr),%rdx # a[2]
2874 adox $zero,%r14 # of=0
2875 adcx $zero,%r14 # cf=0
2877 mov %r8,3*8($tptr) # t[3]
2878 mov %r9,4*8($tptr) # t[4]
2880 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2881 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2884 mulx 5*8($aptr),%r10,%rbx # ...
2887 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2890 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2892 mov 3*8($aptr),%rdx # a[3]
2896 mov %r8,5*8($tptr) # t[5]
2897 mov %r9,6*8($tptr) # t[6]
2898 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2899 adox $zero,%r13 # of=0
2900 adcx $zero,%r13 # cf=0
2902 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2905 mulx 6*8($aptr),%r10,%rax # ...
2908 mulx 7*8($aptr),%r11,%r12
2909 mov 4*8($aptr),%rdx # a[4]
2910 mov 5*8($aptr),%r14 # a[5]
2913 mov 6*8($aptr),%r15 # a[6]
2915 adox $zero,%r12 # of=0
2916 adcx $zero,%r12 # cf=0
2918 mov %r8,7*8($tptr) # t[7]
2919 mov %r9,8*8($tptr) # t[8]
2921 mulx %r14,%r9,%rax # a[5]*a[4]
2922 mov 7*8($aptr),%r8 # a[7]
2924 mulx %r15,%r10,%rbx # a[6]*a[4]
2927 mulx %r8,%r11,%rax # a[7]*a[4]
2928 mov %r14,%rdx # a[5]
2931 #adox $zero,%rax # of=0
2932 adcx $zero,%rax # cf=0
2934 mulx %r15,%r14,%rbx # a[6]*a[5]
2935 mulx %r8,%r12,%r13 # a[7]*a[5]
2936 mov %r15,%rdx # a[6]
2937 lea 8*8($aptr),$aptr
2944 mulx %r8,%r8,%r14 # a[7]*a[6]
2949 je .Lsqrx8x_outer_break
2951 neg $carry # mov $carry,%cf
2955 adcx 9*8($tptr),%r9 # +=t[9]
2956 adcx 10*8($tptr),%r10 # ...
2957 adcx 11*8($tptr),%r11
2958 adc 12*8($tptr),%r12
2959 adc 13*8($tptr),%r13
2960 adc 14*8($tptr),%r14
2961 adc 15*8($tptr),%r15
2963 lea 2*64($tptr),$tptr
2964 sbb %rax,%rax # mov %cf,$carry
2966 mov -64($aptr),%rdx # a[0]
2967 mov %rax,16+8(%rsp) # offload $carry
2968 mov $tptr,24+8(%rsp)
2970 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2971 xor %eax,%eax # cf=0, of=0
2977 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2978 adcx %rax,%rbx # +=t[8]
2981 mulx 1*8($aaptr),%rax,%r9 # ...
2985 mulx 2*8($aaptr),%rax,%r10
2989 mulx 3*8($aaptr),%rax,%r11
2993 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
2997 mulx 5*8($aaptr),%rax,%r13
3001 mulx 6*8($aaptr),%rax,%r14
3002 mov %rbx,($tptr,%rcx,8) # store t[8+i]
3007 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
3008 mov 8($aptr,%rcx,8),%rdx # a[i]
3010 adox %rbx,%r15 # %rbx is 0, of=0
3011 adcx %rbx,%r15 # cf=0
3017 lea 8*8($aaptr),$aaptr
3019 cmp 8+8(%rsp),$aaptr # done?
3022 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
3033 lea 8*8($tptr),$tptr
3035 sbb %rax,%rax # mov %cf,%rax
3036 xor %ebx,%ebx # cf=0, of=0
3037 mov %rax,16+8(%rsp) # offload carry
3042 sub 16+8(%rsp),%r8 # consume last carry
3043 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
3044 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
3045 xor %ebp,%ebp # xor $zero,$zero
3047 cmp $carry,$tptr # cf=0, of=0
3048 je .Lsqrx8x_outer_loop
3053 mov 2*8($carry),%r10
3055 mov 3*8($carry),%r11
3057 mov 4*8($carry),%r12
3059 mov 5*8($carry),%r13
3061 mov 6*8($carry),%r14
3063 mov 7*8($carry),%r15
3065 jmp .Lsqrx8x_outer_loop
3068 .Lsqrx8x_outer_break:
3069 mov %r9,9*8($tptr) # t[9]
3070 movq %xmm3,%rcx # -$num
3071 mov %r10,10*8($tptr) # ...
3072 mov %r11,11*8($tptr)
3073 mov %r12,12*8($tptr)
3074 mov %r13,13*8($tptr)
3075 mov %r14,14*8($tptr)
3080 lea 48+8(%rsp),$tptr
3081 mov ($aptr,$i),%rdx # a[0]
3083 mov 8($tptr),$A0[1] # t[1]
3084 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3085 mov 0+8(%rsp),$num # restore $num
3087 mov 16($tptr),$A1[0] # t[2] # prefetch
3088 mov 24($tptr),$A1[1] # t[3] # prefetch
3089 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3092 .Lsqrx4x_shift_n_add:
3096 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3097 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3100 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3107 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3108 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3111 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3118 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3120 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3123 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3130 jrcxz .Lsqrx4x_shift_n_add_break
3131 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3134 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3135 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3140 jmp .Lsqrx4x_shift_n_add
3143 .Lsqrx4x_shift_n_add_break:
3147 lea 64($tptr),$tptr # end of t[] buffer
3150 ######################################################################
3151 # Montgomery reduction part, "word-by-word" algorithm.
3153 # This new path is inspired by multiple submissions from Intel, by
3154 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3157 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3161 __bn_sqrx8x_reduction:
3162 xor %eax,%eax # initial top-most carry bit
3163 mov 32+8(%rsp),%rbx # n0
3164 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
3165 lea -8*8($nptr,$num),%rcx # end of n[]
3166 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3167 mov %rcx, 0+8(%rsp) # save end of n[]
3168 mov $tptr,8+8(%rsp) # save end of t[]
3170 lea 48+8(%rsp),$tptr # initial t[] window
3171 jmp .Lsqrx8x_reduction_loop
3174 .Lsqrx8x_reduction_loop:
3180 imulq %rbx,%rdx # n0*a[i]
3184 mov %rax,24+8(%rsp) # store top-most carry bit
3186 lea 8*8($tptr),$tptr
3187 xor $carry,$carry # cf=0,of=0
3194 mulx 8*0($nptr),%rax,%r8 # n[0]
3195 adcx %rbx,%rax # discarded
3198 mulx 8*1($nptr),%rbx,%r9 # n[1]
3202 mulx 8*2($nptr),%rbx,%r10
3206 mulx 8*3($nptr),%rbx,%r11
3210 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
3216 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3218 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3220 mulx 8*5($nptr),%rax,%r13
3224 mulx 8*6($nptr),%rax,%r14
3228 mulx 8*7($nptr),%rax,%r15
3231 adox $carry,%r15 # $carry is 0
3232 adcx $carry,%r15 # cf=0
3234 .byte 0x67,0x67,0x67
3238 mov $carry,%rax # xor %rax,%rax
3239 cmp 0+8(%rsp),$nptr # end of n[]?
3240 jae .Lsqrx8x_no_tail
3242 mov 48+8(%rsp),%rdx # pull n0*a[0]
3244 lea 8*8($nptr),$nptr
3247 adcx 8*2($tptr),%r10
3253 lea 8*8($tptr),$tptr
3254 sbb %rax,%rax # top carry
3256 xor $carry,$carry # of=0, cf=0
3263 mulx 8*0($nptr),%rax,%r8
3267 mulx 8*1($nptr),%rax,%r9
3271 mulx 8*2($nptr),%rax,%r10
3275 mulx 8*3($nptr),%rax,%r11
3279 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
3283 mulx 8*5($nptr),%rax,%r13
3287 mulx 8*6($nptr),%rax,%r14
3291 mulx 8*7($nptr),%rax,%r15
3292 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3295 mov %rbx,($tptr,%rcx,8) # save result
3297 adcx $carry,%r15 # cf=0
3302 cmp 0+8(%rsp),$nptr # end of n[]?
3303 jae .Lsqrx8x_tail_done # break out of loop
3305 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3306 mov 48+8(%rsp),%rdx # pull n0*a[0]
3307 lea 8*8($nptr),$nptr
3316 lea 8*8($tptr),$tptr
3318 sub \$8,%rcx # mov \$-8,%rcx
3320 xor $carry,$carry # of=0, cf=0
3326 add 24+8(%rsp),%r8 # can this overflow?
3333 adc \$0,%r15 # can't overflow, because we
3334 # started with "overhung" part
3336 mov $carry,%rax # xor %rax,%rax
3338 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3339 .Lsqrx8x_no_tail: # %cf is 0 if jumped here
3343 mov 8*7($nptr),$carry
3344 movq %xmm2,$nptr # restore $nptr
3351 adc %rax,%rax # top-most carry
3353 mov 32+8(%rsp),%rbx # n0
3354 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3356 mov %r8,8*0($tptr) # store top 512 bits
3357 lea 8*8($tptr),%r8 # borrow %r8
3366 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3367 cmp 8+8(%rsp),%r8 # end of t[]?
3368 jb .Lsqrx8x_reduction_loop
3370 .size bn_sqrx8x_internal,.-bn_sqrx8x_internal
3373 ##############################################################
3374 # Post-condition, 4x unrolled
3377 my ($rptr,$nptr)=("%rdx","%rbp");
3380 __bn_postx4x_internal:
3382 mov %rcx,%r10 # -$num
3383 mov %rcx,%r9 # -$num
3386 #lea 48+8(%rsp,%r9),$tptr
3387 movq %xmm1,$rptr # restore $rptr
3388 movq %xmm1,$aptr # prepare for back-to-back call
3389 dec %r12 # so that after 'not' we get -n[0]
3394 jmp .Lsqrx4x_sub_entry
3404 lea 8*4($nptr),$nptr
3409 neg %r8 # mov %r8,%cf
3415 lea 8*4($tptr),$tptr
3417 sbb %r8,%r8 # mov %cf,%r8
3420 lea 8*4($rptr),$rptr
3425 neg %r9 # restore $num
3428 .size __bn_postx4x_internal,.-__bn_postx4x_internal
3433 my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3434 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3441 .type bn_get_bits5,\@abi-omnipotent
3453 movzw (%r10,$num,2),%eax
3457 .size bn_get_bits5,.-bn_get_bits5
3460 .type bn_scatter5,\@abi-omnipotent
3464 jz .Lscatter_epilogue
3465 lea ($tbl,$idx,8),$tbl
3475 .size bn_scatter5,.-bn_scatter5
3478 .type bn_gather5,\@abi-omnipotent
3481 .LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
3482 # I can't trust assembler to use specific encoding:-(
3483 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3484 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3485 lea .Linc(%rip),%rax
3486 and \$-16,%rsp # shouldn't be formally required
3489 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3490 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3491 lea 128($tbl),%r11 # size optimization
3492 lea 128(%rsp),%rax # size optimization
3494 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3498 ########################################################################
3499 # calculate mask by comparing 0..31 to $idx and save result to stack
3501 for($i=0;$i<$STRIDE/16;$i+=4) {
3504 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3506 $code.=<<___ if ($i);
3507 movdqa %xmm3,`16*($i-1)-128`(%rax)
3513 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3514 movdqa %xmm0,`16*($i+0)-128`(%rax)
3518 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3519 movdqa %xmm1,`16*($i+1)-128`(%rax)
3523 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3524 movdqa %xmm2,`16*($i+2)-128`(%rax)
3529 movdqa %xmm3,`16*($i-1)-128`(%rax)
3537 for($i=0;$i<$STRIDE/16;$i+=4) {
3539 movdqa `16*($i+0)-128`(%r11),%xmm0
3540 movdqa `16*($i+1)-128`(%r11),%xmm1
3541 movdqa `16*($i+2)-128`(%r11),%xmm2
3542 pand `16*($i+0)-128`(%rax),%xmm0
3543 movdqa `16*($i+3)-128`(%r11),%xmm3
3544 pand `16*($i+1)-128`(%rax),%xmm1
3546 pand `16*($i+2)-128`(%rax),%xmm2
3548 pand `16*($i+3)-128`(%rax),%xmm3
3555 lea $STRIDE(%r11),%r11
3556 pshufd \$0x4e,%xmm4,%xmm0
3558 movq %xmm0,($out) # m0=bp[0]
3565 .LSEH_end_bn_gather5:
3566 .size bn_gather5,.-bn_gather5
3574 .asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3577 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3578 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3586 .extern __imp_RtlVirtualUnwind
3587 .type mul_handler,\@abi-omnipotent
3601 mov 120($context),%rax # pull context->Rax
3602 mov 248($context),%rbx # pull context->Rip
3604 mov 8($disp),%rsi # disp->ImageBase
3605 mov 56($disp),%r11 # disp->HandlerData
3607 mov 0(%r11),%r10d # HandlerData[0]
3608 lea (%rsi,%r10),%r10 # end of prologue label
3609 cmp %r10,%rbx # context->Rip<end of prologue label
3610 jb .Lcommon_seh_tail
3612 mov 152($context),%rax # pull context->Rsp
3614 mov 4(%r11),%r10d # HandlerData[1]
3615 lea (%rsi,%r10),%r10 # epilogue label
3616 cmp %r10,%rbx # context->Rip>=epilogue label
3617 jae .Lcommon_seh_tail
3619 lea .Lmul_epilogue(%rip),%r10
3623 mov 192($context),%r10 # pull $num
3624 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
3629 mov 40(%rax),%rax # pull saved stack pointer
3637 mov %rbx,144($context) # restore context->Rbx
3638 mov %rbp,160($context) # restore context->Rbp
3639 mov %r12,216($context) # restore context->R12
3640 mov %r13,224($context) # restore context->R13
3641 mov %r14,232($context) # restore context->R14
3642 mov %r15,240($context) # restore context->R15
3647 mov %rax,152($context) # restore context->Rsp
3648 mov %rsi,168($context) # restore context->Rsi
3649 mov %rdi,176($context) # restore context->Rdi
3651 mov 40($disp),%rdi # disp->ContextRecord
3652 mov $context,%rsi # context
3653 mov \$154,%ecx # sizeof(CONTEXT)
3654 .long 0xa548f3fc # cld; rep movsq
3657 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3658 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3659 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3660 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3661 mov 40(%rsi),%r10 # disp->ContextRecord
3662 lea 56(%rsi),%r11 # &disp->HandlerData
3663 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3664 mov %r10,32(%rsp) # arg5
3665 mov %r11,40(%rsp) # arg6
3666 mov %r12,48(%rsp) # arg7
3667 mov %rcx,56(%rsp) # arg8, (NULL)
3668 call *__imp_RtlVirtualUnwind(%rip)
3670 mov \$1,%eax # ExceptionContinueSearch
3682 .size mul_handler,.-mul_handler
3686 .rva .LSEH_begin_bn_mul_mont_gather5
3687 .rva .LSEH_end_bn_mul_mont_gather5
3688 .rva .LSEH_info_bn_mul_mont_gather5
3690 .rva .LSEH_begin_bn_mul4x_mont_gather5
3691 .rva .LSEH_end_bn_mul4x_mont_gather5
3692 .rva .LSEH_info_bn_mul4x_mont_gather5
3694 .rva .LSEH_begin_bn_power5
3695 .rva .LSEH_end_bn_power5
3696 .rva .LSEH_info_bn_power5
3698 .rva .LSEH_begin_bn_from_mont8x
3699 .rva .LSEH_end_bn_from_mont8x
3700 .rva .LSEH_info_bn_from_mont8x
3702 $code.=<<___ if ($addx);
3703 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3704 .rva .LSEH_end_bn_mulx4x_mont_gather5
3705 .rva .LSEH_info_bn_mulx4x_mont_gather5
3707 .rva .LSEH_begin_bn_powerx5
3708 .rva .LSEH_end_bn_powerx5
3709 .rva .LSEH_info_bn_powerx5
3712 .rva .LSEH_begin_bn_gather5
3713 .rva .LSEH_end_bn_gather5
3714 .rva .LSEH_info_bn_gather5
3718 .LSEH_info_bn_mul_mont_gather5:
3721 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3723 .LSEH_info_bn_mul4x_mont_gather5:
3726 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3728 .LSEH_info_bn_power5:
3731 .rva .Lpower5_body,.Lpower5_epilogue # HandlerData[]
3733 .LSEH_info_bn_from_mont8x:
3736 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
3738 $code.=<<___ if ($addx);
3740 .LSEH_info_bn_mulx4x_mont_gather5:
3743 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3745 .LSEH_info_bn_powerx5:
3748 .rva .Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3752 .LSEH_info_bn_gather5:
3753 .byte 0x01,0x0b,0x03,0x0a
3754 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3755 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
3760 $code =~ s/\`([^\`]*)\`/eval($1)/gem;