2 # Copyright 2011-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # Companion to x86_64-mont.pl that optimizes cache-timing attack
20 # countermeasures. The subroutines are produced by replacing bp[i]
21 # references in their x86_64-mont.pl counterparts with cache-neutral
22 # references to powers table computed in BN_mod_exp_mont_consttime.
23 # In addition subroutine that scatters elements of the powers table
24 # is implemented, so that scatter-/gathering can be tuned without
25 # bn_exp.c modifications.
29 # Add MULX/AD*X code paths and additional interfaces to optimize for
30 # branch prediction unit. For input lengths that are multiples of 8
31 # the np argument is not just modulus value, but one interleaved
32 # with 0. This is to optimize post-condition...
36 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
38 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
40 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
41 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
42 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
43 die "can't locate x86_64-xlate.pl";
45 open OUT,"| \"$^X\" $xlate $flavour $output";
48 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
49 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
53 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
54 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
58 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
59 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
63 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
64 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
68 # int bn_mul_mont_gather5(
69 $rp="%rdi"; # BN_ULONG *rp,
70 $ap="%rsi"; # const BN_ULONG *ap,
71 $bp="%rdx"; # const BN_ULONG *bp,
72 $np="%rcx"; # const BN_ULONG *np,
73 $n0="%r8"; # const BN_ULONG *n0,
74 $num="%r9"; # int num,
75 # int idx); # 0 to 2^5-1, "index" in $bp holding
76 # pre-computed powers of a', interlaced
77 # in such manner that b[0] is $bp[idx],
78 # b[1] is [2^5+idx], etc.
90 .extern OPENSSL_ia32cap_P
92 .globl bn_mul_mont_gather5
93 .type bn_mul_mont_gather5,\@function,6
99 $code.=<<___ if ($addx);
100 mov OPENSSL_ia32cap_P+8(%rip),%r11d
109 movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
120 lea -264(%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)+256+8)
121 and \$-1024,%rsp # minimize TLB usage
123 mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
125 # An OS-agnostic version of __chkstk.
127 # Some OSes (Windows) insist on stack being "wired" to
128 # physical memory in strictly sequential manner, i.e. if stack
129 # allocation spans two pages, then reference to farmost one can
130 # be punishable by SEGV. But page walking can do good even on
131 # other OSes, because it guarantees that villain thread hits
132 # the guard page before it can make damage to innocent one...
138 .byte 0x2e # predict non-taken
141 lea 128($bp),%r12 # reassign $bp (+size optimization)
144 $STRIDE=2**5*8; # 5 is "window size"
145 $N=$STRIDE/4; # should match cache line size
147 movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
148 movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
149 lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
152 pshufd \$0,%xmm5,%xmm5 # broadcast index
156 ########################################################################
157 # calculate mask by comparing 0..31 to index and save result to stack
161 pcmpeqd %xmm5,%xmm0 # compare to 1,0
165 for($k=0;$k<$STRIDE/16-4;$k+=4) {
168 pcmpeqd %xmm5,%xmm1 # compare to 3,2
169 movdqa %xmm0,`16*($k+0)+112`(%r10)
173 pcmpeqd %xmm5,%xmm2 # compare to 5,4
174 movdqa %xmm1,`16*($k+1)+112`(%r10)
178 pcmpeqd %xmm5,%xmm3 # compare to 7,6
179 movdqa %xmm2,`16*($k+2)+112`(%r10)
184 movdqa %xmm3,`16*($k+3)+112`(%r10)
188 $code.=<<___; # last iteration can be optimized
191 movdqa %xmm0,`16*($k+0)+112`(%r10)
196 movdqa %xmm1,`16*($k+1)+112`(%r10)
199 movdqa %xmm2,`16*($k+2)+112`(%r10)
200 pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
202 pand `16*($k+1)-128`($bp),%xmm1
203 pand `16*($k+2)-128`($bp),%xmm2
204 movdqa %xmm3,`16*($k+3)+112`(%r10)
205 pand `16*($k+3)-128`($bp),%xmm3
209 for($k=0;$k<$STRIDE/16-4;$k+=4) {
211 movdqa `16*($k+0)-128`($bp),%xmm4
212 movdqa `16*($k+1)-128`($bp),%xmm5
213 movdqa `16*($k+2)-128`($bp),%xmm2
214 pand `16*($k+0)+112`(%r10),%xmm4
215 movdqa `16*($k+3)-128`($bp),%xmm3
216 pand `16*($k+1)+112`(%r10),%xmm5
218 pand `16*($k+2)+112`(%r10),%xmm2
220 pand `16*($k+3)+112`(%r10),%xmm3
227 pshufd \$0x4e,%xmm0,%xmm1
230 movq %xmm0,$m0 # m0=bp[0]
232 mov ($n0),$n0 # pull n0[0] value
239 mulq $m0 # ap[0]*bp[0]
243 imulq $lo0,$m1 # "tp[0]"*n0
247 add %rax,$lo0 # discarded
260 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
263 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
267 mulq $m0 # ap[j]*bp[0]
276 jne .L1st # note that upon exit $j==$num, so
277 # they can be used interchangeably
281 add $hi0,$hi1 # np[j]*m1+ap[j]*bp[0]
283 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
290 mov $hi1,-8(%rsp,$num,8)
291 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
297 lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
302 for($k=0;$k<$STRIDE/16;$k+=4) {
304 movdqa `16*($k+0)-128`($bp),%xmm0
305 movdqa `16*($k+1)-128`($bp),%xmm1
306 movdqa `16*($k+2)-128`($bp),%xmm2
307 movdqa `16*($k+3)-128`($bp),%xmm3
308 pand `16*($k+0)-128`(%rdx),%xmm0
309 pand `16*($k+1)-128`(%rdx),%xmm1
311 pand `16*($k+2)-128`(%rdx),%xmm2
313 pand `16*($k+3)-128`(%rdx),%xmm3
320 pshufd \$0x4e,%xmm4,%xmm0
324 mov ($ap),%rax # ap[0]
325 movq %xmm0,$m0 # m0=bp[i]
331 mulq $m0 # ap[0]*bp[i]
332 add %rax,$lo0 # ap[0]*bp[i]+tp[0]
336 imulq $lo0,$m1 # tp[0]*n0
340 add %rax,$lo0 # discarded
343 mov 8(%rsp),$lo0 # tp[1]
354 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
357 mov $hi1,-16(%rsp,$j,8) # tp[j-1]
361 mulq $m0 # ap[j]*bp[i]
365 add $hi0,$lo0 # ap[j]*bp[i]+tp[j]
372 jne .Linner # note that upon exit $j==$num, so
373 # they can be used interchangeably
376 add $lo0,$hi1 # np[j]*m1+ap[j]*bp[i]+tp[j]
377 mov (%rsp,$num,8),$lo0
379 mov $hi1,-16(%rsp,$num,8) # tp[num-1]
385 add $lo0,$hi1 # pull upmost overflow bit
387 mov $hi1,-8(%rsp,$num,8)
388 mov %rdx,(%rsp,$num,8) # store upmost overflow bit
394 xor $i,$i # i=0 and clear CF!
395 mov (%rsp),%rax # tp[0]
396 lea (%rsp),$ap # borrow ap for tp
400 .Lsub: sbb ($np,$i,8),%rax
401 mov %rax,($rp,$i,8) # rp[i]=tp[i]-np[i]
402 mov 8($ap,$i,8),%rax # tp[i+1]
404 dec $j # doesnn't affect CF!
407 sbb \$0,%rax # handle upmost overflow bit
414 or $np,$ap # ap=borrow?tp:rp
416 .Lcopy: # copy or in-place refresh
418 mov $i,(%rsp,$i,8) # zap temporary vector
419 mov %rax,($rp,$i,8) # rp[i]=tp[i]
424 mov 8(%rsp,$num,8),%rsi # restore %rsp
436 .size bn_mul_mont_gather5,.-bn_mul_mont_gather5
439 my @A=("%r10","%r11");
440 my @N=("%r13","%rdi");
442 .type bn_mul4x_mont_gather5,\@function,6
444 bn_mul4x_mont_gather5:
447 $code.=<<___ if ($addx);
449 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
463 shl \$3,${num}d # convert $num to bytes
464 lea ($num,$num,2),%r10 # 3*$num in bytes
467 ##############################################################
468 # Ensure that stack frame doesn't alias with $rptr+3*$num
469 # modulo 4096, which covers ret[num], am[num] and n[num]
470 # (see bn_exp.c). This is done to allow memory disambiguation
471 # logic do its magic. [Extra [num] is allocated in order
472 # to align with bn_power5's frame, which is cleansed after
473 # completing exponentiation. Extra 256 bytes is for power mask
474 # calculated from 7th argument, the index.]
476 lea -320(%rsp,$num,2),%r11
481 sub %r11,%rsp # align with $rp
482 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
487 lea 4096-320(,$num,2),%r10
488 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
501 .byte 0x2e # predict non-taken
502 jnc .Lmul4x_page_walk
511 mov 40(%rsp),%rsi # restore %rsp
523 .size bn_mul4x_mont_gather5,.-bn_mul4x_mont_gather5
525 .type mul4x_internal,\@abi-omnipotent
528 shl \$5,$num # $num was in bytes
529 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument, index
531 lea 128(%rdx,$num),%r13 # end of powers table (+size optimization)
532 shr \$5,$num # restore $num
535 $STRIDE=2**5*8; # 5 is "window size"
536 $N=$STRIDE/4; # should match cache line size
539 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
540 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
541 lea 88-112(%rsp,$num),%r10 # place the mask after tp[num+1] (+ICache optimization)
542 lea 128(%rdx),$bp # size optimization
544 pshufd \$0,%xmm5,%xmm5 # broadcast index
549 ########################################################################
550 # calculate mask by comparing 0..31 to index and save result to stack
554 pcmpeqd %xmm5,%xmm0 # compare to 1,0
558 for($i=0;$i<$STRIDE/16-4;$i+=4) {
561 pcmpeqd %xmm5,%xmm1 # compare to 3,2
562 movdqa %xmm0,`16*($i+0)+112`(%r10)
566 pcmpeqd %xmm5,%xmm2 # compare to 5,4
567 movdqa %xmm1,`16*($i+1)+112`(%r10)
571 pcmpeqd %xmm5,%xmm3 # compare to 7,6
572 movdqa %xmm2,`16*($i+2)+112`(%r10)
577 movdqa %xmm3,`16*($i+3)+112`(%r10)
581 $code.=<<___; # last iteration can be optimized
584 movdqa %xmm0,`16*($i+0)+112`(%r10)
589 movdqa %xmm1,`16*($i+1)+112`(%r10)
592 movdqa %xmm2,`16*($i+2)+112`(%r10)
593 pand `16*($i+0)-128`($bp),%xmm0 # while it's still in register
595 pand `16*($i+1)-128`($bp),%xmm1
596 pand `16*($i+2)-128`($bp),%xmm2
597 movdqa %xmm3,`16*($i+3)+112`(%r10)
598 pand `16*($i+3)-128`($bp),%xmm3
602 for($i=0;$i<$STRIDE/16-4;$i+=4) {
604 movdqa `16*($i+0)-128`($bp),%xmm4
605 movdqa `16*($i+1)-128`($bp),%xmm5
606 movdqa `16*($i+2)-128`($bp),%xmm2
607 pand `16*($i+0)+112`(%r10),%xmm4
608 movdqa `16*($i+3)-128`($bp),%xmm3
609 pand `16*($i+1)+112`(%r10),%xmm5
611 pand `16*($i+2)+112`(%r10),%xmm2
613 pand `16*($i+3)+112`(%r10),%xmm3
620 pshufd \$0x4e,%xmm0,%xmm1
623 movq %xmm0,$m0 # m0=bp[0]
625 mov %r13,16+8(%rsp) # save end of b[num]
626 mov $rp, 56+8(%rsp) # save $rp
628 mov ($n0),$n0 # pull n0[0] value
630 lea ($ap,$num),$ap # end of a[num]
634 mulq $m0 # ap[0]*bp[0]
638 imulq $A[0],$m1 # "tp[0]"*n0
643 add %rax,$A[0] # discarded
656 mov 16($ap,$num),%rax
659 lea 4*8($num),$j # j=4
668 mulq $m0 # ap[j]*bp[0]
679 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
681 mov $N[0],-24($tp) # tp[j-1]
684 mulq $m0 # ap[j]*bp[0]
694 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
696 mov $N[1],-16($tp) # tp[j-1]
699 mulq $m0 # ap[j]*bp[0]
709 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
711 mov $N[0],-8($tp) # tp[j-1]
714 mulq $m0 # ap[j]*bp[0]
724 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
727 mov $N[1],($tp) # tp[j-1]
733 mulq $m0 # ap[j]*bp[0]
744 add $A[0],$N[0] # np[j]*m1+ap[j]*bp[0]
746 mov $N[0],-24($tp) # tp[j-1]
749 mulq $m0 # ap[j]*bp[0]
757 mov ($ap,$num),%rax # ap[0]
759 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[0]
761 mov $N[1],-16($tp) # tp[j-1]
764 lea ($np,$num),$np # rewind $np
775 lea 16+128($tp),%rdx # where 256-byte mask is (+size optimization)
779 for($i=0;$i<$STRIDE/16;$i+=4) {
781 movdqa `16*($i+0)-128`($bp),%xmm0
782 movdqa `16*($i+1)-128`($bp),%xmm1
783 movdqa `16*($i+2)-128`($bp),%xmm2
784 movdqa `16*($i+3)-128`($bp),%xmm3
785 pand `16*($i+0)-128`(%rdx),%xmm0
786 pand `16*($i+1)-128`(%rdx),%xmm1
788 pand `16*($i+2)-128`(%rdx),%xmm2
790 pand `16*($i+3)-128`(%rdx),%xmm3
797 pshufd \$0x4e,%xmm4,%xmm0
800 movq %xmm0,$m0 # m0=bp[i]
804 mulq $m0 # ap[0]*bp[i]
805 add %rax,$A[0] # ap[0]*bp[i]+tp[0]
809 imulq $A[0],$m1 # tp[0]*n0
811 mov $N[1],($tp) # store upmost overflow bit
813 lea ($tp,$num),$tp # rewind $tp
816 add %rax,$A[0] # "$N[0]", discarded
821 mulq $m0 # ap[j]*bp[i]
825 add 8($tp),$A[1] # +tp[1]
831 mov 16($ap,$num),%rax
833 add $A[1],$N[1] # np[j]*m1+ap[j]*bp[i]+tp[j]
834 lea 4*8($num),$j # j=4
842 mulq $m0 # ap[j]*bp[i]
846 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
857 mov $N[1],-32($tp) # tp[j-1]
860 mulq $m0 # ap[j]*bp[i]
874 mov $N[0],-24($tp) # tp[j-1]
877 mulq $m0 # ap[j]*bp[i]
881 add ($tp),$A[0] # ap[j]*bp[i]+tp[j]
891 mov $N[1],-16($tp) # tp[j-1]
894 mulq $m0 # ap[j]*bp[i]
909 mov $N[0],-8($tp) # tp[j-1]
915 mulq $m0 # ap[j]*bp[i]
919 add 16($tp),$A[0] # ap[j]*bp[i]+tp[j]
930 mov $N[1],-32($tp) # tp[j-1]
933 mulq $m0 # ap[j]*bp[i]
944 mov ($ap,$num),%rax # ap[0]
948 mov $N[0],-24($tp) # tp[j-1]
951 mov $N[1],-16($tp) # tp[j-1]
952 lea ($np,$num),$np # rewind $np
957 add ($tp),$N[0] # pull upmost overflow bit
958 adc \$0,$N[1] # upmost overflow bit
967 sub $N[0],$m1 # compare top-most words
968 adc $j,$j # $j is zero
970 sub $N[1],%rax # %rax=-$N[1]
971 lea ($tp,$num),%rbx # tptr in .sqr4x_sub
973 lea ($np),%rbp # nptr in .sqr4x_sub
976 mov 56+8(%rsp),%rdi # rptr in .sqr4x_sub
977 dec %r12 # so that after 'not' we get -n[0]
982 jmp .Lsqr4x_sub_entry
985 my @ri=("%rax",$bp,$m0,$m1);
989 lea ($tp,$num),$tp # rewind $tp
991 lea ($np,$N[1],8),$np
992 mov 56+8(%rsp),$rp # restore $rp
1001 sbb 16*0($np),@ri[0]
1003 sbb 16*1($np),@ri[1]
1006 sbb 16*2($np),@ri[2]
1008 sbb 16*3($np),@ri[3]
1022 .size mul4x_internal,.-mul4x_internal
1026 ######################################################################
1028 my $rptr="%rdi"; # BN_ULONG *rptr,
1029 my $aptr="%rsi"; # const BN_ULONG *aptr,
1030 my $bptr="%rdx"; # const void *table,
1031 my $nptr="%rcx"; # const BN_ULONG *nptr,
1032 my $n0 ="%r8"; # const BN_ULONG *n0);
1033 my $num ="%r9"; # int num, has to be divisible by 8
1036 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
1037 my @A0=("%r10","%r11");
1038 my @A1=("%r12","%r13");
1039 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
1043 .type bn_power5,\@function,6
1047 $code.=<<___ if ($addx);
1048 mov OPENSSL_ia32cap_P+8(%rip),%r11d
1050 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
1062 shl \$3,${num}d # convert $num to bytes
1063 lea ($num,$num,2),%r10d # 3*$num
1067 ##############################################################
1068 # Ensure that stack frame doesn't alias with $rptr+3*$num
1069 # modulo 4096, which covers ret[num], am[num] and n[num]
1070 # (see bn_exp.c). This is done to allow memory disambiguation
1071 # logic do its magic. [Extra 256 bytes is for power mask
1072 # calculated from 7th argument, the index.]
1074 lea -320(%rsp,$num,2),%r11
1079 sub %r11,%rsp # align with $aptr
1080 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1085 lea 4096-320(,$num,2),%r10
1086 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*num*8+256)
1097 mov (%rsp,%r11),%r10
1099 .byte 0x2e # predict non-taken
1105 ##############################################################
1108 # +0 saved $num, used in reduction section
1109 # +8 &t[2*$num], used in reduction section
1115 mov %rax, 40(%rsp) # save original %rsp
1117 movq $rptr,%xmm1 # save $rptr, used in sqr8x
1118 movq $nptr,%xmm2 # save $nptr
1119 movq %r10, %xmm3 # -$num, used in sqr8x
1122 call __bn_sqr8x_internal
1123 call __bn_post4x_internal
1124 call __bn_sqr8x_internal
1125 call __bn_post4x_internal
1126 call __bn_sqr8x_internal
1127 call __bn_post4x_internal
1128 call __bn_sqr8x_internal
1129 call __bn_post4x_internal
1130 call __bn_sqr8x_internal
1131 call __bn_post4x_internal
1141 mov 40(%rsp),%rsi # restore %rsp
1152 .size bn_power5,.-bn_power5
1154 .globl bn_sqr8x_internal
1155 .hidden bn_sqr8x_internal
1156 .type bn_sqr8x_internal,\@abi-omnipotent
1159 __bn_sqr8x_internal:
1160 ##############################################################
1163 # a) multiply-n-add everything but a[i]*a[i];
1164 # b) shift result of a) by 1 to the left and accumulate
1165 # a[i]*a[i] products;
1167 ##############################################################
1233 lea 32(%r10),$i # $i=-($num-32)
1234 lea ($aptr,$num),$aptr # end of a[] buffer, ($aptr,$i)=&ap[2]
1236 mov $num,$j # $j=$num
1238 # comments apply to $num==8 case
1239 mov -32($aptr,$i),$a0 # a[0]
1240 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1241 mov -24($aptr,$i),%rax # a[1]
1242 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1243 mov -16($aptr,$i),$ai # a[2]
1247 mov %rax,$A0[0] # a[1]*a[0]
1250 mov $A0[0],-24($tptr,$i) # t[1]
1256 mov $A0[1],-16($tptr,$i) # t[2]
1260 mov -8($aptr,$i),$ai # a[3]
1262 mov %rax,$A1[0] # a[2]*a[1]+t[3]
1268 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1274 mov $A0[0],-8($tptr,$j) # t[3]
1279 mov ($aptr,$j),$ai # a[4]
1281 add %rax,$A1[1] # a[3]*a[1]+t[4]
1287 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1289 mov 8($aptr,$j),$ai # a[5]
1297 add %rax,$A1[0] # a[4]*a[3]+t[5]
1299 mov $A0[1],($tptr,$j) # t[4]
1304 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1306 mov 16($aptr,$j),$ai # a[6]
1313 add %rax,$A1[1] # a[5]*a[3]+t[6]
1315 mov $A0[0],8($tptr,$j) # t[5]
1320 add %rax,$A0[1] # a[6]*a[2]+a[5]*a[3]+t[6]
1322 mov 24($aptr,$j),$ai # a[7]
1330 add %rax,$A1[0] # a[6]*a[5]+t[7]
1332 mov $A0[1],16($tptr,$j) # t[6]
1338 add %rax,$A0[0] # a[7]*a[4]+a[6]*a[5]+t[6]
1344 mov $A0[0],-8($tptr,$j) # t[7]
1356 mov $A1[1],($tptr) # t[8]
1358 mov %rdx,8($tptr) # t[9]
1362 .Lsqr4x_outer: # comments apply to $num==6 case
1363 mov -32($aptr,$i),$a0 # a[0]
1364 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1365 mov -24($aptr,$i),%rax # a[1]
1366 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1367 mov -16($aptr,$i),$ai # a[2]
1371 mov -24($tptr,$i),$A0[0] # t[1]
1372 add %rax,$A0[0] # a[1]*a[0]+t[1]
1375 mov $A0[0],-24($tptr,$i) # t[1]
1382 add -16($tptr,$i),$A0[1] # a[2]*a[0]+t[2]
1385 mov $A0[1],-16($tptr,$i) # t[2]
1389 mov -8($aptr,$i),$ai # a[3]
1391 add %rax,$A1[0] # a[2]*a[1]+t[3]
1394 add -8($tptr,$i),$A1[0]
1399 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1405 mov $A0[0],-8($tptr,$i) # t[3]
1412 mov ($aptr,$j),$ai # a[4]
1414 add %rax,$A1[1] # a[3]*a[1]+t[4]
1418 add ($tptr,$j),$A1[1]
1423 add %rax,$A0[1] # a[4]*a[0]+a[3]*a[1]+t[4]
1425 mov 8($aptr,$j),$ai # a[5]
1432 add %rax,$A1[0] # a[4]*a[3]+t[5]
1433 mov $A0[1],($tptr,$j) # t[4]
1437 add 8($tptr,$j),$A1[0]
1442 add %rax,$A0[0] # a[5]*a[2]+a[4]*a[3]+t[5]
1448 mov $A0[0],-8($tptr,$j) # t[5], "preloaded t[1]" below
1460 mov $A1[1],($tptr) # t[6], "preloaded t[2]" below
1462 mov %rdx,8($tptr) # t[7], "preloaded t[3]" below
1467 # comments apply to $num==4 case
1468 mov -32($aptr),$a0 # a[0]
1469 lea 48+8(%rsp,$num,2),$tptr # end of tp[] buffer, &tp[2*$num]
1470 mov -24($aptr),%rax # a[1]
1471 lea -32($tptr,$i),$tptr # end of tp[] window, &tp[2*$num-"$i"]
1472 mov -16($aptr),$ai # a[2]
1476 add %rax,$A0[0] # a[1]*a[0]+t[1], preloaded t[1]
1484 mov $A0[0],-24($tptr) # t[1]
1487 add $A1[1],$A0[1] # a[2]*a[0]+t[2], preloaded t[2]
1488 mov -8($aptr),$ai # a[3]
1492 add %rax,$A1[0] # a[2]*a[1]+t[3], preloaded t[3]
1494 mov $A0[1],-16($tptr) # t[2]
1499 add %rax,$A0[0] # a[3]*a[0]+a[2]*a[1]+t[3]
1505 mov $A0[0],-8($tptr) # t[3]
1509 mov -16($aptr),%rax # a[2]
1514 mov $A1[1],($tptr) # t[4]
1516 mov %rdx,8($tptr) # t[5]
1521 my ($shift,$carry)=($a0,$a1);
1522 my @S=(@A1,$ai,$n0);
1526 sub $num,$i # $i=16-$num
1529 add $A1[0],%rax # t[5]
1531 mov %rax,8($tptr) # t[5]
1532 mov %rdx,16($tptr) # t[6]
1533 mov $carry,24($tptr) # t[7]
1535 mov -16($aptr,$i),%rax # a[0]
1536 lea 48+8(%rsp),$tptr
1537 xor $A0[0],$A0[0] # t[0]
1538 mov 8($tptr),$A0[1] # t[1]
1540 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1542 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1544 or $A0[0],$S[1] # | t[2*i]>>63
1545 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1546 mov $A0[1],$shift # shift=t[2*i+1]>>63
1547 mul %rax # a[i]*a[i]
1548 neg $carry # mov $carry,cf
1549 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1551 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1555 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1557 sbb $carry,$carry # mov cf,$carry
1559 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1561 or $A0[0],$S[3] # | t[2*i]>>63
1562 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1563 mov $A0[1],$shift # shift=t[2*i+1]>>63
1564 mul %rax # a[i]*a[i]
1565 neg $carry # mov $carry,cf
1566 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1568 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1573 sbb $carry,$carry # mov cf,$carry
1575 jmp .Lsqr4x_shift_n_add
1578 .Lsqr4x_shift_n_add:
1579 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1581 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1583 or $A0[0],$S[1] # | t[2*i]>>63
1584 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1585 mov $A0[1],$shift # shift=t[2*i+1]>>63
1586 mul %rax # a[i]*a[i]
1587 neg $carry # mov $carry,cf
1588 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1590 mov -8($aptr,$i),%rax # a[i+1] # prefetch
1591 mov $S[0],-32($tptr)
1594 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1595 mov $S[1],-24($tptr)
1596 sbb $carry,$carry # mov cf,$carry
1598 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1600 or $A0[0],$S[3] # | t[2*i]>>63
1601 mov 0($tptr),$A0[0] # t[2*i+2] # prefetch
1602 mov $A0[1],$shift # shift=t[2*i+1]>>63
1603 mul %rax # a[i]*a[i]
1604 neg $carry # mov $carry,cf
1605 mov 8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1607 mov 0($aptr,$i),%rax # a[i+1] # prefetch
1608 mov $S[2],-16($tptr)
1611 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1613 sbb $carry,$carry # mov cf,$carry
1615 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1617 or $A0[0],$S[1] # | t[2*i]>>63
1618 mov 16($tptr),$A0[0] # t[2*i+2] # prefetch
1619 mov $A0[1],$shift # shift=t[2*i+1]>>63
1620 mul %rax # a[i]*a[i]
1621 neg $carry # mov $carry,cf
1622 mov 24($tptr),$A0[1] # t[2*i+2+1] # prefetch
1624 mov 8($aptr,$i),%rax # a[i+1] # prefetch
1628 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1 | shift
1630 sbb $carry,$carry # mov cf,$carry
1632 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1634 or $A0[0],$S[3] # | t[2*i]>>63
1635 mov 32($tptr),$A0[0] # t[2*i+2] # prefetch
1636 mov $A0[1],$shift # shift=t[2*i+1]>>63
1637 mul %rax # a[i]*a[i]
1638 neg $carry # mov $carry,cf
1639 mov 40($tptr),$A0[1] # t[2*i+2+1] # prefetch
1641 mov 16($aptr,$i),%rax # a[i+1] # prefetch
1645 sbb $carry,$carry # mov cf,$carry
1648 jnz .Lsqr4x_shift_n_add
1650 lea ($shift,$A0[0],2),$S[0] # t[2*i]<<1 | shift
1653 lea ($j,$A0[1],2),$S[1] # t[2*i+1]<<1 |
1655 or $A0[0],$S[1] # | t[2*i]>>63
1656 mov -16($tptr),$A0[0] # t[2*i+2] # prefetch
1657 mov $A0[1],$shift # shift=t[2*i+1]>>63
1658 mul %rax # a[i]*a[i]
1659 neg $carry # mov $carry,cf
1660 mov -8($tptr),$A0[1] # t[2*i+2+1] # prefetch
1662 mov -8($aptr),%rax # a[i+1] # prefetch
1663 mov $S[0],-32($tptr)
1666 lea ($shift,$A0[0],2),$S[2] # t[2*i]<<1|shift
1667 mov $S[1],-24($tptr)
1668 sbb $carry,$carry # mov cf,$carry
1670 lea ($j,$A0[1],2),$S[3] # t[2*i+1]<<1 |
1672 or $A0[0],$S[3] # | t[2*i]>>63
1673 mul %rax # a[i]*a[i]
1674 neg $carry # mov $carry,cf
1677 mov $S[2],-16($tptr)
1681 ######################################################################
1682 # Montgomery reduction part, "word-by-word" algorithm.
1684 # This new path is inspired by multiple submissions from Intel, by
1685 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
1688 my ($nptr,$tptr,$carry,$m0)=("%rbp","%rdi","%rsi","%rbx");
1692 __bn_sqr8x_reduction:
1694 lea ($nptr,$num),%rcx # end of n[]
1695 lea 48+8(%rsp,$num,2),%rdx # end of t[] buffer
1697 lea 48+8(%rsp,$num),$tptr # end of initial t[] window
1700 jmp .L8x_reduction_loop
1703 .L8x_reduction_loop:
1704 lea ($tptr,$num),$tptr # start of current t[] window
1714 mov %rax,(%rdx) # store top-most carry bit
1715 lea 8*8($tptr),$tptr
1719 imulq 32+8(%rsp),$m0 # n0*a[0]
1720 mov 8*0($nptr),%rax # n[0]
1727 mov 8*1($nptr),%rax # n[1]
1737 mov $m0,48-8+8(%rsp,%rcx,8) # put aside n0*a[i]
1746 mov 32+8(%rsp),$carry # pull n0, borrow $carry
1754 imulq %r8,$carry # modulo-scheduled
1784 mov $carry,$m0 # n0*a[i]
1786 mov 8*0($nptr),%rax # n[0]
1795 lea 8*8($nptr),$nptr
1797 mov 8+8(%rsp),%rdx # pull end of t[]
1798 cmp 0+8(%rsp),$nptr # end of n[]?
1810 sbb $carry,$carry # top carry
1812 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1822 mov %r8,($tptr) # save result
1831 lea 8($tptr),$tptr # $tptr++
1876 mov 48-16+8(%rsp,%rcx,8),$m0# pull n0*a[i]
1880 mov 8*0($nptr),%rax # pull n[0]
1887 lea 8*8($nptr),$nptr
1888 mov 8+8(%rsp),%rdx # pull end of t[]
1889 cmp 0+8(%rsp),$nptr # end of n[]?
1890 jae .L8x_tail_done # break out of loop
1892 mov 48+56+8(%rsp),$m0 # pull n0*a[0]
1894 mov 8*0($nptr),%rax # pull n[0]
1903 sbb $carry,$carry # top carry
1910 add (%rdx),%r8 # can this overflow?
1917 adc \$0,%r15 # can't overflow, because we
1918 # started with "overhung" part
1932 adc \$0,%rax # top-most carry
1933 mov -8($nptr),%rcx # np[num-1]
1936 movq %xmm2,$nptr # restore $nptr
1938 mov %r8,8*0($tptr) # store top 512 bits
1940 movq %xmm3,$num # $num is %r9, can't be moved upwards
1947 lea 8*8($tptr),$tptr
1949 cmp %rdx,$tptr # end of t[]?
1950 jb .L8x_reduction_loop
1952 .size bn_sqr8x_internal,.-bn_sqr8x_internal
1955 ##############################################################
1956 # Post-condition, 4x unrolled
1959 my ($tptr,$nptr)=("%rbx","%rbp");
1961 .type __bn_post4x_internal,\@abi-omnipotent
1963 __bn_post4x_internal:
1965 lea (%rdi,$num),$tptr # %rdi was $tptr above
1967 movq %xmm1,$rptr # restore $rptr
1969 movq %xmm1,$aptr # prepare for back-to-back call
1971 dec %r12 # so that after 'not' we get -n[0]
1976 jmp .Lsqr4x_sub_entry
1985 lea 8*4($nptr),$nptr
1995 neg %r10 # mov %r10,%cf
2001 lea 8*4($tptr),$tptr
2003 sbb %r10,%r10 # mov %cf,%r10
2006 lea 8*4($rptr),$rptr
2011 mov $num,%r10 # prepare for back-to-back call
2012 neg $num # restore $num
2014 .size __bn_post4x_internal,.-__bn_post4x_internal
2019 .globl bn_from_montgomery
2020 .type bn_from_montgomery,\@abi-omnipotent
2023 testl \$7,`($win64?"48(%rsp)":"%r9d")`
2027 .size bn_from_montgomery,.-bn_from_montgomery
2029 .type bn_from_mont8x,\@function,6
2041 shl \$3,${num}d # convert $num to bytes
2042 lea ($num,$num,2),%r10 # 3*$num in bytes
2046 ##############################################################
2047 # Ensure that stack frame doesn't alias with $rptr+3*$num
2048 # modulo 4096, which covers ret[num], am[num] and n[num]
2049 # (see bn_exp.c). The stack is allocated to aligned with
2050 # bn_power5's frame, and as bn_from_montgomery happens to be
2051 # last operation, we use the opportunity to cleanse it.
2053 lea -320(%rsp,$num,2),%r11
2058 sub %r11,%rsp # align with $aptr
2059 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2064 lea 4096-320(,$num,2),%r10
2065 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2076 mov (%rsp,%r11),%r10
2078 .byte 0x2e # predict non-taken
2079 jnc .Lfrom_page_walk
2084 ##############################################################
2087 # +0 saved $num, used in reduction section
2088 # +8 &t[2*$num], used in reduction section
2094 mov %rax, 40(%rsp) # save original %rsp
2103 movdqu ($aptr),%xmm1
2104 movdqu 16($aptr),%xmm2
2105 movdqu 32($aptr),%xmm3
2106 movdqa %xmm0,(%rax,$num)
2107 movdqu 48($aptr),%xmm4
2108 movdqa %xmm0,16(%rax,$num)
2109 .byte 0x48,0x8d,0xb6,0x40,0x00,0x00,0x00 # lea 64($aptr),$aptr
2111 movdqa %xmm0,32(%rax,$num)
2112 movdqa %xmm2,16(%rax)
2113 movdqa %xmm0,48(%rax,$num)
2114 movdqa %xmm3,32(%rax)
2115 movdqa %xmm4,48(%rax)
2124 movq %r10, %xmm3 # -num
2126 $code.=<<___ if ($addx);
2127 mov OPENSSL_ia32cap_P+8(%rip),%r11d
2129 cmp \$0x80108,%r11d # check for AD*X+BMI2+BMI1
2132 lea (%rax,$num),$rptr
2133 call __bn_sqrx8x_reduction
2134 call __bn_postx4x_internal
2138 mov 40(%rsp),%rsi # restore %rsp
2139 jmp .Lfrom_mont_zero
2145 call __bn_sqr8x_reduction
2146 call __bn_post4x_internal
2150 mov 40(%rsp),%rsi # restore %rsp
2151 jmp .Lfrom_mont_zero
2155 movdqa %xmm0,16*0(%rax)
2156 movdqa %xmm0,16*1(%rax)
2157 movdqa %xmm0,16*2(%rax)
2158 movdqa %xmm0,16*3(%rax)
2161 jnz .Lfrom_mont_zero
2173 .size bn_from_mont8x,.-bn_from_mont8x
2179 my $bp="%rdx"; # restore original value
2182 .type bn_mulx4x_mont_gather5,\@function,6
2184 bn_mulx4x_mont_gather5:
2194 shl \$3,${num}d # convert $num to bytes
2195 lea ($num,$num,2),%r10 # 3*$num in bytes
2199 ##############################################################
2200 # Ensure that stack frame doesn't alias with $rptr+3*$num
2201 # modulo 4096, which covers ret[num], am[num] and n[num]
2202 # (see bn_exp.c). This is done to allow memory disambiguation
2203 # logic do its magic. [Extra [num] is allocated in order
2204 # to align with bn_power5's frame, which is cleansed after
2205 # completing exponentiation. Extra 256 bytes is for power mask
2206 # calculated from 7th argument, the index.]
2208 lea -320(%rsp,$num,2),%r11
2213 sub %r11,%rsp # align with $aptr
2214 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2218 lea 4096-320(,$num,2),%r10
2219 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2225 and \$-64,%rsp # ensure alignment
2230 mov (%rsp,%r11),%r10
2232 .byte 0x2e # predict non-taken
2233 jnc .Lmulx4x_page_walk
2235 ##############################################################
2238 # +8 off-loaded &b[i]
2247 mov $n0, 32(%rsp) # save *n0
2248 mov %rax,40(%rsp) # save original %rsp
2250 call mulx4x_internal
2252 mov 40(%rsp),%rsi # restore %rsp
2264 .size bn_mulx4x_mont_gather5,.-bn_mulx4x_mont_gather5
2266 .type mulx4x_internal,\@abi-omnipotent
2269 mov $num,8(%rsp) # save -$num (it was in bytes)
2271 neg $num # restore $num
2273 neg %r10 # restore $num
2274 lea 128($bp,$num),%r13 # end of powers table (+size optimization)
2276 movd `($win64?56:8)`(%rax),%xmm5 # load 7th argument
2278 lea .Linc(%rip),%rax
2279 mov %r13,16+8(%rsp) # end of b[num]
2280 mov $num,24+8(%rsp) # inner counter
2281 mov $rp, 56+8(%rsp) # save $rp
2283 my ($aptr, $bptr, $nptr, $tptr, $mi, $bi, $zero, $num)=
2284 ("%rsi","%rdi","%rcx","%rbx","%r8","%r9","%rbp","%rax");
2286 my $STRIDE=2**5*8; # 5 is "window size"
2287 my $N=$STRIDE/4; # should match cache line size
2289 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
2290 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
2291 lea 88-112(%rsp,%r10),%r10 # place the mask after tp[num+1] (+ICache optimizaton)
2292 lea 128($bp),$bptr # size optimization
2294 pshufd \$0,%xmm5,%xmm5 # broadcast index
2299 ########################################################################
2300 # calculate mask by comparing 0..31 to index and save result to stack
2305 pcmpeqd %xmm5,%xmm0 # compare to 1,0
2308 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2311 pcmpeqd %xmm5,%xmm1 # compare to 3,2
2312 movdqa %xmm0,`16*($i+0)+112`(%r10)
2316 pcmpeqd %xmm5,%xmm2 # compare to 5,4
2317 movdqa %xmm1,`16*($i+1)+112`(%r10)
2321 pcmpeqd %xmm5,%xmm3 # compare to 7,6
2322 movdqa %xmm2,`16*($i+2)+112`(%r10)
2327 movdqa %xmm3,`16*($i+3)+112`(%r10)
2331 $code.=<<___; # last iteration can be optimized
2335 movdqa %xmm0,`16*($i+0)+112`(%r10)
2339 movdqa %xmm1,`16*($i+1)+112`(%r10)
2342 movdqa %xmm2,`16*($i+2)+112`(%r10)
2344 pand `16*($i+0)-128`($bptr),%xmm0 # while it's still in register
2345 pand `16*($i+1)-128`($bptr),%xmm1
2346 pand `16*($i+2)-128`($bptr),%xmm2
2347 movdqa %xmm3,`16*($i+3)+112`(%r10)
2348 pand `16*($i+3)-128`($bptr),%xmm3
2352 for($i=0;$i<$STRIDE/16-4;$i+=4) {
2354 movdqa `16*($i+0)-128`($bptr),%xmm4
2355 movdqa `16*($i+1)-128`($bptr),%xmm5
2356 movdqa `16*($i+2)-128`($bptr),%xmm2
2357 pand `16*($i+0)+112`(%r10),%xmm4
2358 movdqa `16*($i+3)-128`($bptr),%xmm3
2359 pand `16*($i+1)+112`(%r10),%xmm5
2361 pand `16*($i+2)+112`(%r10),%xmm2
2363 pand `16*($i+3)+112`(%r10),%xmm3
2370 pshufd \$0x4e,%xmm0,%xmm1
2372 lea $STRIDE($bptr),$bptr
2373 movq %xmm0,%rdx # bp[0]
2374 lea 64+8*4+8(%rsp),$tptr
2377 mulx 0*8($aptr),$mi,%rax # a[0]*b[0]
2378 mulx 1*8($aptr),%r11,%r12 # a[1]*b[0]
2380 mulx 2*8($aptr),%rax,%r13 # ...
2383 mulx 3*8($aptr),%rax,%r14
2386 imulq 32+8(%rsp),$mi # "t[0]"*n0
2387 xor $zero,$zero # cf=0, of=0
2390 mov $bptr,8+8(%rsp) # off-load &b[i]
2392 lea 4*8($aptr),$aptr
2394 adcx $zero,%r14 # cf=0
2396 mulx 0*8($nptr),%rax,%r10
2397 adcx %rax,%r15 # discarded
2399 mulx 1*8($nptr),%rax,%r11
2402 mulx 2*8($nptr),%rax,%r12
2403 mov 24+8(%rsp),$bptr # counter value
2404 mov %r10,-8*4($tptr)
2407 mulx 3*8($nptr),%rax,%r15
2409 mov %r11,-8*3($tptr)
2411 adox $zero,%r15 # of=0
2412 lea 4*8($nptr),$nptr
2413 mov %r12,-8*2($tptr)
2418 adcx $zero,%r15 # cf=0, modulo-scheduled
2419 mulx 0*8($aptr),%r10,%rax # a[4]*b[0]
2421 mulx 1*8($aptr),%r11,%r14 # a[5]*b[0]
2423 mulx 2*8($aptr),%r12,%rax # ...
2425 mulx 3*8($aptr),%r13,%r14
2429 adcx $zero,%r14 # cf=0
2430 lea 4*8($aptr),$aptr
2431 lea 4*8($tptr),$tptr
2434 mulx 0*8($nptr),%rax,%r15
2437 mulx 1*8($nptr),%rax,%r15
2440 mulx 2*8($nptr),%rax,%r15
2441 mov %r10,-5*8($tptr)
2443 mov %r11,-4*8($tptr)
2445 mulx 3*8($nptr),%rax,%r15
2447 mov %r12,-3*8($tptr)
2450 lea 4*8($nptr),$nptr
2451 mov %r13,-2*8($tptr)
2453 dec $bptr # of=0, pass cf
2456 mov 8(%rsp),$num # load -num
2457 adc $zero,%r15 # modulo-scheduled
2458 lea ($aptr,$num),$aptr # rewind $aptr
2460 mov 8+8(%rsp),$bptr # re-load &b[i]
2461 adc $zero,$zero # top-most carry
2462 mov %r14,-1*8($tptr)
2467 lea 16-256($tptr),%r10 # where 256-byte mask is (+density control)
2472 for($i=0;$i<$STRIDE/16;$i+=4) {
2474 movdqa `16*($i+0)-128`($bptr),%xmm0
2475 movdqa `16*($i+1)-128`($bptr),%xmm1
2476 movdqa `16*($i+2)-128`($bptr),%xmm2
2477 pand `16*($i+0)+256`(%r10),%xmm0
2478 movdqa `16*($i+3)-128`($bptr),%xmm3
2479 pand `16*($i+1)+256`(%r10),%xmm1
2481 pand `16*($i+2)+256`(%r10),%xmm2
2483 pand `16*($i+3)+256`(%r10),%xmm3
2490 pshufd \$0x4e,%xmm4,%xmm0
2492 lea $STRIDE($bptr),$bptr
2493 movq %xmm0,%rdx # m0=bp[i]
2495 mov $zero,($tptr) # save top-most carry
2496 lea 4*8($tptr,$num),$tptr # rewind $tptr
2497 mulx 0*8($aptr),$mi,%r11 # a[0]*b[i]
2498 xor $zero,$zero # cf=0, of=0
2500 mulx 1*8($aptr),%r14,%r12 # a[1]*b[i]
2501 adox -4*8($tptr),$mi # +t[0]
2503 mulx 2*8($aptr),%r15,%r13 # ...
2504 adox -3*8($tptr),%r11
2506 mulx 3*8($aptr),%rdx,%r14
2507 adox -2*8($tptr),%r12
2509 lea ($nptr,$num),$nptr # rewind $nptr
2510 lea 4*8($aptr),$aptr
2511 adox -1*8($tptr),%r13
2516 imulq 32+8(%rsp),$mi # "t[0]"*n0
2519 xor $zero,$zero # cf=0, of=0
2520 mov $bptr,8+8(%rsp) # off-load &b[i]
2522 mulx 0*8($nptr),%rax,%r10
2523 adcx %rax,%r15 # discarded
2525 mulx 1*8($nptr),%rax,%r11
2528 mulx 2*8($nptr),%rax,%r12
2531 mulx 3*8($nptr),%rax,%r15
2533 mov 24+8(%rsp),$bptr # counter value
2534 mov %r10,-8*4($tptr)
2536 mov %r11,-8*3($tptr)
2537 adox $zero,%r15 # of=0
2538 mov %r12,-8*2($tptr)
2539 lea 4*8($nptr),$nptr
2544 mulx 0*8($aptr),%r10,%rax # a[4]*b[i]
2545 adcx $zero,%r15 # cf=0, modulo-scheduled
2547 mulx 1*8($aptr),%r11,%r14 # a[5]*b[i]
2548 adcx 0*8($tptr),%r10
2550 mulx 2*8($aptr),%r12,%rax # ...
2551 adcx 1*8($tptr),%r11
2553 mulx 3*8($aptr),%r13,%r14
2555 adcx 2*8($tptr),%r12
2557 adcx 3*8($tptr),%r13
2558 adox $zero,%r14 # of=0
2559 lea 4*8($aptr),$aptr
2560 lea 4*8($tptr),$tptr
2561 adcx $zero,%r14 # cf=0
2564 mulx 0*8($nptr),%rax,%r15
2567 mulx 1*8($nptr),%rax,%r15
2570 mulx 2*8($nptr),%rax,%r15
2571 mov %r10,-5*8($tptr)
2574 mov %r11,-4*8($tptr)
2575 mulx 3*8($nptr),%rax,%r15
2577 lea 4*8($nptr),$nptr
2578 mov %r12,-3*8($tptr)
2581 mov %r13,-2*8($tptr)
2583 dec $bptr # of=0, pass cf
2586 mov 0+8(%rsp),$num # load -num
2587 adc $zero,%r15 # modulo-scheduled
2588 sub 0*8($tptr),$bptr # pull top-most carry to %cf
2589 mov 8+8(%rsp),$bptr # re-load &b[i]
2592 lea ($aptr,$num),$aptr # rewind $aptr
2593 adc $zero,$zero # top-most carry
2594 mov %r14,-1*8($tptr)
2601 mov ($nptr,$num),%r12
2602 lea ($nptr,$num),%rbp # rewind $nptr
2604 lea ($tptr,$num),%rdi # rewind $tptr
2607 sub %r14,%r10 # compare top-most words
2611 sub %r8,%rax # %rax=-%r8
2612 mov 56+8(%rsp),%rdx # restore rp
2613 dec %r12 # so that after 'not' we get -n[0]
2618 jmp .Lsqrx4x_sub_entry # common post-condition
2619 .size mulx4x_internal,.-mulx4x_internal
2622 ######################################################################
2624 my $rptr="%rdi"; # BN_ULONG *rptr,
2625 my $aptr="%rsi"; # const BN_ULONG *aptr,
2626 my $bptr="%rdx"; # const void *table,
2627 my $nptr="%rcx"; # const BN_ULONG *nptr,
2628 my $n0 ="%r8"; # const BN_ULONG *n0);
2629 my $num ="%r9"; # int num, has to be divisible by 8
2632 my ($i,$j,$tptr)=("%rbp","%rcx",$rptr);
2633 my @A0=("%r10","%r11");
2634 my @A1=("%r12","%r13");
2635 my ($a0,$a1,$ai)=("%r14","%r15","%rbx");
2638 .type bn_powerx5,\@function,6
2650 shl \$3,${num}d # convert $num to bytes
2651 lea ($num,$num,2),%r10 # 3*$num in bytes
2655 ##############################################################
2656 # Ensure that stack frame doesn't alias with $rptr+3*$num
2657 # modulo 4096, which covers ret[num], am[num] and n[num]
2658 # (see bn_exp.c). This is done to allow memory disambiguation
2659 # logic do its magic. [Extra 256 bytes is for power mask
2660 # calculated from 7th argument, the index.]
2662 lea -320(%rsp,$num,2),%r11
2667 sub %r11,%rsp # align with $aptr
2668 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2673 lea 4096-320(,$num,2),%r10
2674 lea -320(%rsp,$num,2),%rsp # alloca(frame+2*$num*8+256)
2685 mov (%rsp,%r11),%r10
2687 .byte 0x2e # predict non-taken
2688 jnc .Lpwrx_page_walk
2693 ##############################################################
2696 # +0 saved $num, used in reduction section
2697 # +8 &t[2*$num], used in reduction section
2698 # +16 intermediate carry bit
2699 # +24 top-most carry bit, used in reduction section
2705 movq $rptr,%xmm1 # save $rptr
2706 movq $nptr,%xmm2 # save $nptr
2707 movq %r10, %xmm3 # -$num
2710 mov %rax, 40(%rsp) # save original %rsp
2713 call __bn_sqrx8x_internal
2714 call __bn_postx4x_internal
2715 call __bn_sqrx8x_internal
2716 call __bn_postx4x_internal
2717 call __bn_sqrx8x_internal
2718 call __bn_postx4x_internal
2719 call __bn_sqrx8x_internal
2720 call __bn_postx4x_internal
2721 call __bn_sqrx8x_internal
2722 call __bn_postx4x_internal
2724 mov %r10,$num # -num
2730 call mulx4x_internal
2732 mov 40(%rsp),%rsi # restore %rsp
2744 .size bn_powerx5,.-bn_powerx5
2746 .globl bn_sqrx8x_internal
2747 .hidden bn_sqrx8x_internal
2748 .type bn_sqrx8x_internal,\@abi-omnipotent
2751 __bn_sqrx8x_internal:
2752 ##################################################################
2755 # a) multiply-n-add everything but a[i]*a[i];
2756 # b) shift result of a) by 1 to the left and accumulate
2757 # a[i]*a[i] products;
2759 ##################################################################
2760 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2791 # a[7]a[7]a[6]a[6]a[5]a[5]a[4]a[4]a[3]a[3]a[2]a[2]a[1]a[1]a[0]a[0]
2794 my ($zero,$carry)=("%rbp","%rcx");
2797 lea 48+8(%rsp),$tptr
2798 lea ($aptr,$num),$aaptr
2799 mov $num,0+8(%rsp) # save $num
2800 mov $aaptr,8+8(%rsp) # save end of $aptr
2801 jmp .Lsqr8x_zero_start
2804 .byte 0x66,0x66,0x66,0x2e,0x0f,0x1f,0x84,0x00,0x00,0x00,0x00,0x00
2807 movdqa %xmm0,0*8($tptr)
2808 movdqa %xmm0,2*8($tptr)
2809 movdqa %xmm0,4*8($tptr)
2810 movdqa %xmm0,6*8($tptr)
2811 .Lsqr8x_zero_start: # aligned at 32
2812 movdqa %xmm0,8*8($tptr)
2813 movdqa %xmm0,10*8($tptr)
2814 movdqa %xmm0,12*8($tptr)
2815 movdqa %xmm0,14*8($tptr)
2816 lea 16*8($tptr),$tptr
2820 mov 0*8($aptr),%rdx # a[0], modulo-scheduled
2821 #xor %r9,%r9 # t[1], ex-$num, zero already
2828 lea 48+8(%rsp),$tptr
2829 xor $zero,$zero # cf=0, cf=0
2830 jmp .Lsqrx8x_outer_loop
2833 .Lsqrx8x_outer_loop:
2834 mulx 1*8($aptr),%r8,%rax # a[1]*a[0]
2835 adcx %r9,%r8 # a[1]*a[0]+=t[1]
2837 mulx 2*8($aptr),%r9,%rax # a[2]*a[0]
2840 .byte 0xc4,0xe2,0xab,0xf6,0x86,0x18,0x00,0x00,0x00 # mulx 3*8($aptr),%r10,%rax # ...
2843 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x20,0x00,0x00,0x00 # mulx 4*8($aptr),%r11,%rax
2846 mulx 5*8($aptr),%r12,%rax
2849 mulx 6*8($aptr),%r13,%rax
2852 mulx 7*8($aptr),%r14,%r15
2853 mov 1*8($aptr),%rdx # a[1]
2857 mov %r8,1*8($tptr) # t[1]
2858 mov %r9,2*8($tptr) # t[2]
2859 sbb $carry,$carry # mov %cf,$carry
2860 xor $zero,$zero # cf=0, of=0
2863 mulx 2*8($aptr),%r8,%rbx # a[2]*a[1]
2864 mulx 3*8($aptr),%r9,%rax # a[3]*a[1]
2867 mulx 4*8($aptr),%r10,%rbx # ...
2870 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x28,0x00,0x00,0x00 # mulx 5*8($aptr),%r11,%rax
2873 .byte 0xc4,0xe2,0x9b,0xf6,0x9e,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r12,%rbx
2876 .byte 0xc4,0x62,0x93,0xf6,0xb6,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r13,%r14
2877 mov 2*8($aptr),%rdx # a[2]
2881 adox $zero,%r14 # of=0
2882 adcx $zero,%r14 # cf=0
2884 mov %r8,3*8($tptr) # t[3]
2885 mov %r9,4*8($tptr) # t[4]
2887 mulx 3*8($aptr),%r8,%rbx # a[3]*a[2]
2888 mulx 4*8($aptr),%r9,%rax # a[4]*a[2]
2891 mulx 5*8($aptr),%r10,%rbx # ...
2894 .byte 0xc4,0xe2,0xa3,0xf6,0x86,0x30,0x00,0x00,0x00 # mulx 6*8($aptr),%r11,%rax
2897 .byte 0xc4,0x62,0x9b,0xf6,0xae,0x38,0x00,0x00,0x00 # mulx 7*8($aptr),%r12,%r13
2899 mov 3*8($aptr),%rdx # a[3]
2903 mov %r8,5*8($tptr) # t[5]
2904 mov %r9,6*8($tptr) # t[6]
2905 mulx 4*8($aptr),%r8,%rax # a[4]*a[3]
2906 adox $zero,%r13 # of=0
2907 adcx $zero,%r13 # cf=0
2909 mulx 5*8($aptr),%r9,%rbx # a[5]*a[3]
2912 mulx 6*8($aptr),%r10,%rax # ...
2915 mulx 7*8($aptr),%r11,%r12
2916 mov 4*8($aptr),%rdx # a[4]
2917 mov 5*8($aptr),%r14 # a[5]
2920 mov 6*8($aptr),%r15 # a[6]
2922 adox $zero,%r12 # of=0
2923 adcx $zero,%r12 # cf=0
2925 mov %r8,7*8($tptr) # t[7]
2926 mov %r9,8*8($tptr) # t[8]
2928 mulx %r14,%r9,%rax # a[5]*a[4]
2929 mov 7*8($aptr),%r8 # a[7]
2931 mulx %r15,%r10,%rbx # a[6]*a[4]
2934 mulx %r8,%r11,%rax # a[7]*a[4]
2935 mov %r14,%rdx # a[5]
2938 #adox $zero,%rax # of=0
2939 adcx $zero,%rax # cf=0
2941 mulx %r15,%r14,%rbx # a[6]*a[5]
2942 mulx %r8,%r12,%r13 # a[7]*a[5]
2943 mov %r15,%rdx # a[6]
2944 lea 8*8($aptr),$aptr
2951 mulx %r8,%r8,%r14 # a[7]*a[6]
2956 je .Lsqrx8x_outer_break
2958 neg $carry # mov $carry,%cf
2962 adcx 9*8($tptr),%r9 # +=t[9]
2963 adcx 10*8($tptr),%r10 # ...
2964 adcx 11*8($tptr),%r11
2965 adc 12*8($tptr),%r12
2966 adc 13*8($tptr),%r13
2967 adc 14*8($tptr),%r14
2968 adc 15*8($tptr),%r15
2970 lea 2*64($tptr),$tptr
2971 sbb %rax,%rax # mov %cf,$carry
2973 mov -64($aptr),%rdx # a[0]
2974 mov %rax,16+8(%rsp) # offload $carry
2975 mov $tptr,24+8(%rsp)
2977 #lea 8*8($tptr),$tptr # see 2*8*8($tptr) above
2978 xor %eax,%eax # cf=0, of=0
2984 mulx 0*8($aaptr),%rax,%r8 # a[8]*a[i]
2985 adcx %rax,%rbx # +=t[8]
2988 mulx 1*8($aaptr),%rax,%r9 # ...
2992 mulx 2*8($aaptr),%rax,%r10
2996 mulx 3*8($aaptr),%rax,%r11
3000 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 4*8($aaptr),%rax,%r12
3004 mulx 5*8($aaptr),%rax,%r13
3008 mulx 6*8($aaptr),%rax,%r14
3009 mov %rbx,($tptr,%rcx,8) # store t[8+i]
3014 .byte 0xc4,0x62,0xfb,0xf6,0xbd,0x38,0x00,0x00,0x00 # mulx 7*8($aaptr),%rax,%r15
3015 mov 8($aptr,%rcx,8),%rdx # a[i]
3017 adox %rbx,%r15 # %rbx is 0, of=0
3018 adcx %rbx,%r15 # cf=0
3024 lea 8*8($aaptr),$aaptr
3026 cmp 8+8(%rsp),$aaptr # done?
3029 sub 16+8(%rsp),%rbx # mov 16(%rsp),%cf
3040 lea 8*8($tptr),$tptr
3042 sbb %rax,%rax # mov %cf,%rax
3043 xor %ebx,%ebx # cf=0, of=0
3044 mov %rax,16+8(%rsp) # offload carry
3049 sub 16+8(%rsp),%r8 # consume last carry
3050 mov 24+8(%rsp),$carry # initial $tptr, borrow $carry
3051 mov 0*8($aptr),%rdx # a[8], modulo-scheduled
3052 xor %ebp,%ebp # xor $zero,$zero
3054 cmp $carry,$tptr # cf=0, of=0
3055 je .Lsqrx8x_outer_loop
3060 mov 2*8($carry),%r10
3062 mov 3*8($carry),%r11
3064 mov 4*8($carry),%r12
3066 mov 5*8($carry),%r13
3068 mov 6*8($carry),%r14
3070 mov 7*8($carry),%r15
3072 jmp .Lsqrx8x_outer_loop
3075 .Lsqrx8x_outer_break:
3076 mov %r9,9*8($tptr) # t[9]
3077 movq %xmm3,%rcx # -$num
3078 mov %r10,10*8($tptr) # ...
3079 mov %r11,11*8($tptr)
3080 mov %r12,12*8($tptr)
3081 mov %r13,13*8($tptr)
3082 mov %r14,14*8($tptr)
3087 lea 48+8(%rsp),$tptr
3088 mov ($aptr,$i),%rdx # a[0]
3090 mov 8($tptr),$A0[1] # t[1]
3091 xor $A0[0],$A0[0] # t[0], of=0, cf=0
3092 mov 0+8(%rsp),$num # restore $num
3094 mov 16($tptr),$A1[0] # t[2] # prefetch
3095 mov 24($tptr),$A1[1] # t[3] # prefetch
3096 #jmp .Lsqrx4x_shift_n_add # happens to be aligned
3099 .Lsqrx4x_shift_n_add:
3103 .byte 0x48,0x8b,0x94,0x0e,0x08,0x00,0x00,0x00 # mov 8($aptr,$i),%rdx # a[i+1] # prefetch
3104 .byte 0x4c,0x8b,0x97,0x20,0x00,0x00,0x00 # mov 32($tptr),$A0[0] # t[2*i+4] # prefetch
3107 mov 40($tptr),$A0[1] # t[2*i+4+1] # prefetch
3114 mov 16($aptr,$i),%rdx # a[i+2] # prefetch
3115 mov 48($tptr),$A1[0] # t[2*i+6] # prefetch
3118 mov 56($tptr),$A1[1] # t[2*i+6+1] # prefetch
3125 mov 24($aptr,$i),%rdx # a[i+3] # prefetch
3127 mov 64($tptr),$A0[0] # t[2*i+8] # prefetch
3130 mov 72($tptr),$A0[1] # t[2*i+8+1] # prefetch
3137 jrcxz .Lsqrx4x_shift_n_add_break
3138 .byte 0x48,0x8b,0x94,0x0e,0x00,0x00,0x00,0x00 # mov 0($aptr,$i),%rdx # a[i+4] # prefetch
3141 mov 80($tptr),$A1[0] # t[2*i+10] # prefetch
3142 mov 88($tptr),$A1[1] # t[2*i+10+1] # prefetch
3147 jmp .Lsqrx4x_shift_n_add
3150 .Lsqrx4x_shift_n_add_break:
3154 lea 64($tptr),$tptr # end of t[] buffer
3157 ######################################################################
3158 # Montgomery reduction part, "word-by-word" algorithm.
3160 # This new path is inspired by multiple submissions from Intel, by
3161 # Shay Gueron, Vlad Krasnov, Erdinc Ozturk, James Guilford,
3164 my ($nptr,$carry,$m0)=("%rbp","%rsi","%rdx");
3168 __bn_sqrx8x_reduction:
3169 xor %eax,%eax # initial top-most carry bit
3170 mov 32+8(%rsp),%rbx # n0
3171 mov 48+8(%rsp),%rdx # "%r8", 8*0($tptr)
3172 lea -8*8($nptr,$num),%rcx # end of n[]
3173 #lea 48+8(%rsp,$num,2),$tptr # end of t[] buffer
3174 mov %rcx, 0+8(%rsp) # save end of n[]
3175 mov $tptr,8+8(%rsp) # save end of t[]
3177 lea 48+8(%rsp),$tptr # initial t[] window
3178 jmp .Lsqrx8x_reduction_loop
3181 .Lsqrx8x_reduction_loop:
3187 imulq %rbx,%rdx # n0*a[i]
3191 mov %rax,24+8(%rsp) # store top-most carry bit
3193 lea 8*8($tptr),$tptr
3194 xor $carry,$carry # cf=0,of=0
3201 mulx 8*0($nptr),%rax,%r8 # n[0]
3202 adcx %rbx,%rax # discarded
3205 mulx 8*1($nptr),%rbx,%r9 # n[1]
3209 mulx 8*2($nptr),%rbx,%r10
3213 mulx 8*3($nptr),%rbx,%r11
3217 .byte 0xc4,0x62,0xe3,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rbx,%r12
3223 mulx 32+8(%rsp),%rbx,%rdx # %rdx discarded
3225 mov %rax,64+48+8(%rsp,%rcx,8) # put aside n0*a[i]
3227 mulx 8*5($nptr),%rax,%r13
3231 mulx 8*6($nptr),%rax,%r14
3235 mulx 8*7($nptr),%rax,%r15
3238 adox $carry,%r15 # $carry is 0
3239 adcx $carry,%r15 # cf=0
3241 .byte 0x67,0x67,0x67
3245 mov $carry,%rax # xor %rax,%rax
3246 cmp 0+8(%rsp),$nptr # end of n[]?
3247 jae .Lsqrx8x_no_tail
3249 mov 48+8(%rsp),%rdx # pull n0*a[0]
3251 lea 8*8($nptr),$nptr
3254 adcx 8*2($tptr),%r10
3260 lea 8*8($tptr),$tptr
3261 sbb %rax,%rax # top carry
3263 xor $carry,$carry # of=0, cf=0
3270 mulx 8*0($nptr),%rax,%r8
3274 mulx 8*1($nptr),%rax,%r9
3278 mulx 8*2($nptr),%rax,%r10
3282 mulx 8*3($nptr),%rax,%r11
3286 .byte 0xc4,0x62,0xfb,0xf6,0xa5,0x20,0x00,0x00,0x00 # mulx 8*4($nptr),%rax,%r12
3290 mulx 8*5($nptr),%rax,%r13
3294 mulx 8*6($nptr),%rax,%r14
3298 mulx 8*7($nptr),%rax,%r15
3299 mov 72+48+8(%rsp,%rcx,8),%rdx # pull n0*a[i]
3302 mov %rbx,($tptr,%rcx,8) # save result
3304 adcx $carry,%r15 # cf=0
3309 cmp 0+8(%rsp),$nptr # end of n[]?
3310 jae .Lsqrx8x_tail_done # break out of loop
3312 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3313 mov 48+8(%rsp),%rdx # pull n0*a[0]
3314 lea 8*8($nptr),$nptr
3323 lea 8*8($tptr),$tptr
3325 sub \$8,%rcx # mov \$-8,%rcx
3327 xor $carry,$carry # of=0, cf=0
3333 add 24+8(%rsp),%r8 # can this overflow?
3340 adc \$0,%r15 # can't overflow, because we
3341 # started with "overhung" part
3343 mov $carry,%rax # xor %rax,%rax
3345 sub 16+8(%rsp),$carry # mov 16(%rsp),%cf
3346 .Lsqrx8x_no_tail: # %cf is 0 if jumped here
3350 mov 8*7($nptr),$carry
3351 movq %xmm2,$nptr # restore $nptr
3358 adc %rax,%rax # top-most carry
3360 mov 32+8(%rsp),%rbx # n0
3361 mov 8*8($tptr,%rcx),%rdx # modulo-scheduled "%r8"
3363 mov %r8,8*0($tptr) # store top 512 bits
3364 lea 8*8($tptr),%r8 # borrow %r8
3373 lea 8*8($tptr,%rcx),$tptr # start of current t[] window
3374 cmp 8+8(%rsp),%r8 # end of t[]?
3375 jb .Lsqrx8x_reduction_loop
3377 .size bn_sqrx8x_internal,.-bn_sqrx8x_internal
3380 ##############################################################
3381 # Post-condition, 4x unrolled
3384 my ($rptr,$nptr)=("%rdx","%rbp");
3387 __bn_postx4x_internal:
3389 mov %rcx,%r10 # -$num
3390 mov %rcx,%r9 # -$num
3393 #lea 48+8(%rsp,%r9),$tptr
3394 movq %xmm1,$rptr # restore $rptr
3395 movq %xmm1,$aptr # prepare for back-to-back call
3396 dec %r12 # so that after 'not' we get -n[0]
3401 jmp .Lsqrx4x_sub_entry
3411 lea 8*4($nptr),$nptr
3416 neg %r8 # mov %r8,%cf
3422 lea 8*4($tptr),$tptr
3424 sbb %r8,%r8 # mov %cf,%r8
3427 lea 8*4($rptr),$rptr
3432 neg %r9 # restore $num
3435 .size __bn_postx4x_internal,.-__bn_postx4x_internal
3440 my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%edx","%r8", "%r9d") : # Win64 order
3441 ("%rdi","%esi","%rdx","%ecx"); # Unix order
3448 .type bn_get_bits5,\@abi-omnipotent
3460 movzw (%r10,$num,2),%eax
3464 .size bn_get_bits5,.-bn_get_bits5
3467 .type bn_scatter5,\@abi-omnipotent
3471 jz .Lscatter_epilogue
3472 lea ($tbl,$idx,8),$tbl
3482 .size bn_scatter5,.-bn_scatter5
3485 .type bn_gather5,\@abi-omnipotent
3488 .LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
3489 # I can't trust assembler to use specific encoding:-(
3490 .byte 0x4c,0x8d,0x14,0x24 #lea (%rsp),%r10
3491 .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 #sub $0x108,%rsp
3492 lea .Linc(%rip),%rax
3493 and \$-16,%rsp # shouldn't be formally required
3496 movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
3497 movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
3498 lea 128($tbl),%r11 # size optimization
3499 lea 128(%rsp),%rax # size optimization
3501 pshufd \$0,%xmm5,%xmm5 # broadcast $idx
3505 ########################################################################
3506 # calculate mask by comparing 0..31 to $idx and save result to stack
3508 for($i=0;$i<$STRIDE/16;$i+=4) {
3511 pcmpeqd %xmm5,%xmm0 # compare to 1,0
3513 $code.=<<___ if ($i);
3514 movdqa %xmm3,`16*($i-1)-128`(%rax)
3520 pcmpeqd %xmm5,%xmm1 # compare to 3,2
3521 movdqa %xmm0,`16*($i+0)-128`(%rax)
3525 pcmpeqd %xmm5,%xmm2 # compare to 5,4
3526 movdqa %xmm1,`16*($i+1)-128`(%rax)
3530 pcmpeqd %xmm5,%xmm3 # compare to 7,6
3531 movdqa %xmm2,`16*($i+2)-128`(%rax)
3536 movdqa %xmm3,`16*($i-1)-128`(%rax)
3544 for($i=0;$i<$STRIDE/16;$i+=4) {
3546 movdqa `16*($i+0)-128`(%r11),%xmm0
3547 movdqa `16*($i+1)-128`(%r11),%xmm1
3548 movdqa `16*($i+2)-128`(%r11),%xmm2
3549 pand `16*($i+0)-128`(%rax),%xmm0
3550 movdqa `16*($i+3)-128`(%r11),%xmm3
3551 pand `16*($i+1)-128`(%rax),%xmm1
3553 pand `16*($i+2)-128`(%rax),%xmm2
3555 pand `16*($i+3)-128`(%rax),%xmm3
3562 lea $STRIDE(%r11),%r11
3563 pshufd \$0x4e,%xmm4,%xmm0
3565 movq %xmm0,($out) # m0=bp[0]
3572 .LSEH_end_bn_gather5:
3573 .size bn_gather5,.-bn_gather5
3581 .asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
3584 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3585 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3593 .extern __imp_RtlVirtualUnwind
3594 .type mul_handler,\@abi-omnipotent
3608 mov 120($context),%rax # pull context->Rax
3609 mov 248($context),%rbx # pull context->Rip
3611 mov 8($disp),%rsi # disp->ImageBase
3612 mov 56($disp),%r11 # disp->HandlerData
3614 mov 0(%r11),%r10d # HandlerData[0]
3615 lea (%rsi,%r10),%r10 # end of prologue label
3616 cmp %r10,%rbx # context->Rip<end of prologue label
3617 jb .Lcommon_seh_tail
3619 mov 152($context),%rax # pull context->Rsp
3621 mov 4(%r11),%r10d # HandlerData[1]
3622 lea (%rsi,%r10),%r10 # epilogue label
3623 cmp %r10,%rbx # context->Rip>=epilogue label
3624 jae .Lcommon_seh_tail
3626 lea .Lmul_epilogue(%rip),%r10
3630 mov 192($context),%r10 # pull $num
3631 mov 8(%rax,%r10,8),%rax # pull saved stack pointer
3636 mov 40(%rax),%rax # pull saved stack pointer
3644 mov %rbx,144($context) # restore context->Rbx
3645 mov %rbp,160($context) # restore context->Rbp
3646 mov %r12,216($context) # restore context->R12
3647 mov %r13,224($context) # restore context->R13
3648 mov %r14,232($context) # restore context->R14
3649 mov %r15,240($context) # restore context->R15
3654 mov %rax,152($context) # restore context->Rsp
3655 mov %rsi,168($context) # restore context->Rsi
3656 mov %rdi,176($context) # restore context->Rdi
3658 mov 40($disp),%rdi # disp->ContextRecord
3659 mov $context,%rsi # context
3660 mov \$154,%ecx # sizeof(CONTEXT)
3661 .long 0xa548f3fc # cld; rep movsq
3664 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3665 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3666 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3667 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3668 mov 40(%rsi),%r10 # disp->ContextRecord
3669 lea 56(%rsi),%r11 # &disp->HandlerData
3670 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3671 mov %r10,32(%rsp) # arg5
3672 mov %r11,40(%rsp) # arg6
3673 mov %r12,48(%rsp) # arg7
3674 mov %rcx,56(%rsp) # arg8, (NULL)
3675 call *__imp_RtlVirtualUnwind(%rip)
3677 mov \$1,%eax # ExceptionContinueSearch
3689 .size mul_handler,.-mul_handler
3693 .rva .LSEH_begin_bn_mul_mont_gather5
3694 .rva .LSEH_end_bn_mul_mont_gather5
3695 .rva .LSEH_info_bn_mul_mont_gather5
3697 .rva .LSEH_begin_bn_mul4x_mont_gather5
3698 .rva .LSEH_end_bn_mul4x_mont_gather5
3699 .rva .LSEH_info_bn_mul4x_mont_gather5
3701 .rva .LSEH_begin_bn_power5
3702 .rva .LSEH_end_bn_power5
3703 .rva .LSEH_info_bn_power5
3705 .rva .LSEH_begin_bn_from_mont8x
3706 .rva .LSEH_end_bn_from_mont8x
3707 .rva .LSEH_info_bn_from_mont8x
3709 $code.=<<___ if ($addx);
3710 .rva .LSEH_begin_bn_mulx4x_mont_gather5
3711 .rva .LSEH_end_bn_mulx4x_mont_gather5
3712 .rva .LSEH_info_bn_mulx4x_mont_gather5
3714 .rva .LSEH_begin_bn_powerx5
3715 .rva .LSEH_end_bn_powerx5
3716 .rva .LSEH_info_bn_powerx5
3719 .rva .LSEH_begin_bn_gather5
3720 .rva .LSEH_end_bn_gather5
3721 .rva .LSEH_info_bn_gather5
3725 .LSEH_info_bn_mul_mont_gather5:
3728 .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
3730 .LSEH_info_bn_mul4x_mont_gather5:
3733 .rva .Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
3735 .LSEH_info_bn_power5:
3738 .rva .Lpower5_body,.Lpower5_epilogue # HandlerData[]
3740 .LSEH_info_bn_from_mont8x:
3743 .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
3745 $code.=<<___ if ($addx);
3747 .LSEH_info_bn_mulx4x_mont_gather5:
3750 .rva .Lmulx4x_body,.Lmulx4x_epilogue # HandlerData[]
3752 .LSEH_info_bn_powerx5:
3755 .rva .Lpowerx5_body,.Lpowerx5_epilogue # HandlerData[]
3759 .LSEH_info_bn_gather5:
3760 .byte 0x01,0x0b,0x03,0x0a
3761 .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
3762 .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp)
3767 $code =~ s/\`([^\`]*)\`/eval($1)/gem;