bn_exp.c: move check for AD*X to rsaz-avx2.pl.
[oweals/openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #!/usr/bin/env perl
2
3 ##############################################################################
4 #                                                                            #
5 #  Copyright (c) 2012, Intel Corporation                                     #
6 #                                                                            #
7 #  All rights reserved.                                                      #
8 #                                                                            #
9 #  Redistribution and use in source and binary forms, with or without        #
10 #  modification, are permitted provided that the following conditions are    #
11 #  met:                                                                      #
12 #                                                                            #
13 #  *  Redistributions of source code must retain the above copyright         #
14 #     notice, this list of conditions and the following disclaimer.          #
15 #                                                                            #
16 #  *  Redistributions in binary form must reproduce the above copyright      #
17 #     notice, this list of conditions and the following disclaimer in the    #
18 #     documentation and/or other materials provided with the                 #
19 #     distribution.                                                          #
20 #                                                                            #
21 #  *  Neither the name of the Intel Corporation nor the names of its         #
22 #     contributors may be used to endorse or promote products derived from   #
23 #     this software without specific prior written permission.               #
24 #                                                                            #
25 #                                                                            #
26 #  THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY          #
27 #  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE         #
28 #  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR        #
29 #  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR            #
30 #  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,     #
31 #  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,       #
32 #  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR        #
33 #  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF    #
34 #  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING      #
35 #  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS        #
36 #  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              #
37 #                                                                            #
38 ##############################################################################
39 # Developers and authors:                                                    #
40 # Shay Gueron (1, 2), and Vlad Krasnov (1)                                   #
41 # (1) Intel Corporation, Israel Development Center, Haifa, Israel            #
42 # (2) University of Haifa, Israel                                            #
43 ##############################################################################
44 # Reference:                                                                 #
45 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular             #
46 #     Exponentiation,  Using Advanced Vector Instructions Architectures",    #
47 #     F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,   #
48 #     pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012              #
49 # [2] S. Gueron: "Efficient Software Implementations of Modular              #
50 #     Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).  #
51 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE         #
52 #     Proceedings of 9th International Conference on Information Technology: #
53 #     New Generations (ITNG 2012), pp.821-823 (2012)                         #
54 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis    #
55 #     resistant 1024-bit modular exponentiation, for optimizing RSA2048      #
56 #     on AVX2 capable x86_64 platforms",                                     #
57 #     http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
58 ##############################################################################
59 #
60 # +13% improvement over original submission by <appro@openssl.org>
61 #
62 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
63 # 2.3GHz Haswell        621             765/+23%        1113/+79%
64 #
65 # (*)   if system doesn't support AVX2, for reference purposes;
66
67 $flavour = shift;
68 $output  = shift;
69 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
70
71 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
72
73 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
74 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
75 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
76 die "can't locate x86_64-xlate.pl";
77
78 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
79                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
80         $avx = ($1>=2.19) + ($1>=2.22);
81         $addx = ($1>=2.23);
82 }
83
84 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
85             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
86         $avx = ($1>=2.09) + ($1>=2.10);
87         $addx = ($1>=2.10);
88 }
89
90 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
91             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
92         $avx = ($1>=10) + ($1>=11);
93         $addx = ($1>=11);
94 }
95
96 if (!$avx && `$ENV{CC} -v 2>&1` =~ /LLVM ([3-9]\.[0-9]+)/) {
97         $avx = ($1>=3.0) + ($1>=3.1);
98         $addx = 0;
99 }
100
101 open OUT,"| $^X $xlate $flavour $output";
102 *STDOUT = *OUT;
103
104 if ($avx>1) {{{
105 { # void AMS_WW(
106 my $rp="%rdi";  # BN_ULONG *rp,
107 my $ap="%rsi";  # const BN_ULONG *ap,
108 my $np="%rdx";  # const BN_ULONG *np,
109 my $n0="%ecx";  # const BN_ULONG n0,
110 my $rep="%r8d"; # int repeat);
111
112 # The registers that hold the accumulated redundant result
113 # The AMM works on 1024 bit operands, and redundant word size is 29
114 # Therefore: ceil(1024/29)/4 = 9
115 my $ACC0="%ymm0";
116 my $ACC1="%ymm1";
117 my $ACC2="%ymm2";
118 my $ACC3="%ymm3";
119 my $ACC4="%ymm4";
120 my $ACC5="%ymm5";
121 my $ACC6="%ymm6";
122 my $ACC7="%ymm7";
123 my $ACC8="%ymm8";
124 my $ACC9="%ymm9";
125 # Registers that hold the broadcasted words of bp, currently used
126 my $B1="%ymm10";
127 my $B2="%ymm11";
128 # Registers that hold the broadcasted words of Y, currently used
129 my $Y1="%ymm12";
130 my $Y2="%ymm13";
131 # Helper registers
132 my $TEMP1="%ymm14";
133 my $AND_MASK="%ymm15";
134 # alu registers that hold the first words of the ACC
135 my $r0="%r9";
136 my $r1="%r10";
137 my $r2="%r11";
138 my $r3="%r12";
139
140 my $i="%r14d";                  # loop counter
141 my $tmp = "%r15";
142
143 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
144
145 my $aap=$r0;
146 my $tp0="%rbx";
147 my $tp1=$r3;
148 my $tpa=$tmp;
149
150 $np="%r13";                     # reassigned argument
151
152 $code.=<<___;
153 .text
154
155 .globl  rsaz_1024_sqr_avx2
156 .type   rsaz_1024_sqr_avx2,\@function,5
157 .align  64
158 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
159         lea     (%rsp), %rax
160         push    %rbx
161         push    %rbp
162         push    %r12
163         push    %r13
164         push    %r14
165         push    %r15
166         vzeroupper
167 ___
168 $code.=<<___ if ($win64);
169         lea     -0xa8(%rsp),%rsp
170         vmovaps %xmm6,-0xd8(%rax)
171         vmovaps %xmm7,-0xc8(%rax)
172         vmovaps %xmm8,-0xb8(%rax)
173         vmovaps %xmm9,-0xa8(%rax)
174         vmovaps %xmm10,-0x98(%rax)
175         vmovaps %xmm11,-0x88(%rax)
176         vmovaps %xmm12,-0x78(%rax)
177         vmovaps %xmm13,-0x68(%rax)
178         vmovaps %xmm14,-0x58(%rax)
179         vmovaps %xmm15,-0x48(%rax)
180 .Lsqr_1024_body:
181 ___
182 $code.=<<___;
183         mov     %rax,%rbp
184         mov     %rdx, $np                       # reassigned argument
185         sub     \$$FrameSize, %rsp
186         mov     $np, $tmp
187         sub     \$-128, $rp                     # size optimization
188         sub     \$-128, $ap
189         sub     \$-128, $np
190
191         and     \$4095, $tmp                    # see if $np crosses page
192         add     \$32*10, $tmp
193         shr     \$12, $tmp
194         vpxor   $ACC9,$ACC9,$ACC9
195         jz      .Lsqr_1024_no_n_copy
196
197         # unaligned 256-bit load that crosses page boundary can
198         # cause >2x performance degradation here, so if $np does
199         # cross page boundary, copy it to stack and make sure stack
200         # frame doesn't...
201         sub             \$32*10,%rsp
202         vmovdqu         32*0-128($np), $ACC0
203         and             \$-2048, %rsp
204         vmovdqu         32*1-128($np), $ACC1
205         vmovdqu         32*2-128($np), $ACC2
206         vmovdqu         32*3-128($np), $ACC3
207         vmovdqu         32*4-128($np), $ACC4
208         vmovdqu         32*5-128($np), $ACC5
209         vmovdqu         32*6-128($np), $ACC6
210         vmovdqu         32*7-128($np), $ACC7
211         vmovdqu         32*8-128($np), $ACC8
212         lea             $FrameSize+128(%rsp),$np
213         vmovdqu         $ACC0, 32*0-128($np)
214         vmovdqu         $ACC1, 32*1-128($np)
215         vmovdqu         $ACC2, 32*2-128($np)
216         vmovdqu         $ACC3, 32*3-128($np)
217         vmovdqu         $ACC4, 32*4-128($np)
218         vmovdqu         $ACC5, 32*5-128($np)
219         vmovdqu         $ACC6, 32*6-128($np)
220         vmovdqu         $ACC7, 32*7-128($np)
221         vmovdqu         $ACC8, 32*8-128($np)
222         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero
223
224 .Lsqr_1024_no_n_copy:
225         and             \$-1024, %rsp
226
227         vmovdqu         32*1-128($ap), $ACC1
228         vmovdqu         32*2-128($ap), $ACC2
229         vmovdqu         32*3-128($ap), $ACC3
230         vmovdqu         32*4-128($ap), $ACC4
231         vmovdqu         32*5-128($ap), $ACC5
232         vmovdqu         32*6-128($ap), $ACC6
233         vmovdqu         32*7-128($ap), $ACC7
234         vmovdqu         32*8-128($ap), $ACC8
235
236         lea     192(%rsp), $tp0                 # 64+128=192
237         vpbroadcastq    .Land_mask(%rip), $AND_MASK
238         jmp     .LOOP_GRANDE_SQR_1024
239
240 .align  32
241 .LOOP_GRANDE_SQR_1024:
242         lea     32*18+128(%rsp), $aap           # size optimization
243         lea     448(%rsp), $tp1                 # 64+128+256=448
244
245         # the squaring is performed as described in Variant B of
246         # "Speeding up Big-Number Squaring", so start by calculating
247         # the A*2=A+A vector
248         vpaddq          $ACC1, $ACC1, $ACC1
249          vpbroadcastq   32*0-128($ap), $B1
250         vpaddq          $ACC2, $ACC2, $ACC2
251         vmovdqa         $ACC1, 32*0-128($aap)
252         vpaddq          $ACC3, $ACC3, $ACC3
253         vmovdqa         $ACC2, 32*1-128($aap)
254         vpaddq          $ACC4, $ACC4, $ACC4
255         vmovdqa         $ACC3, 32*2-128($aap)
256         vpaddq          $ACC5, $ACC5, $ACC5
257         vmovdqa         $ACC4, 32*3-128($aap)
258         vpaddq          $ACC6, $ACC6, $ACC6
259         vmovdqa         $ACC5, 32*4-128($aap)
260         vpaddq          $ACC7, $ACC7, $ACC7
261         vmovdqa         $ACC6, 32*5-128($aap)
262         vpaddq          $ACC8, $ACC8, $ACC8
263         vmovdqa         $ACC7, 32*6-128($aap)
264         vpxor           $ACC9, $ACC9, $ACC9
265         vmovdqa         $ACC8, 32*7-128($aap)
266
267         vpmuludq        32*0-128($ap), $B1, $ACC0
268          vpbroadcastq   32*1-128($ap), $B2
269          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
270         vpmuludq        $B1, $ACC1, $ACC1
271          vmovdqu        $ACC9, 32*10-448($tp1)
272         vpmuludq        $B1, $ACC2, $ACC2
273          vmovdqu        $ACC9, 32*11-448($tp1)
274         vpmuludq        $B1, $ACC3, $ACC3
275          vmovdqu        $ACC9, 32*12-448($tp1)
276         vpmuludq        $B1, $ACC4, $ACC4
277          vmovdqu        $ACC9, 32*13-448($tp1)
278         vpmuludq        $B1, $ACC5, $ACC5
279          vmovdqu        $ACC9, 32*14-448($tp1)
280         vpmuludq        $B1, $ACC6, $ACC6
281          vmovdqu        $ACC9, 32*15-448($tp1)
282         vpmuludq        $B1, $ACC7, $ACC7
283          vmovdqu        $ACC9, 32*16-448($tp1)
284         vpmuludq        $B1, $ACC8, $ACC8
285          vpbroadcastq   32*2-128($ap), $B1
286          vmovdqu        $ACC9, 32*17-448($tp1)
287
288         mov     $ap, $tpa
289         mov     \$4, $i
290         jmp     .Lsqr_entry_1024
291 ___
292 $TEMP0=$Y1;
293 $TEMP2=$Y2;
294 $code.=<<___;
295 .align  32
296 .LOOP_SQR_1024:
297          vpbroadcastq   32*1-128($tpa), $B2
298         vpmuludq        32*0-128($ap), $B1, $ACC0
299         vpaddq          32*0-192($tp0), $ACC0, $ACC0
300         vpmuludq        32*0-128($aap), $B1, $ACC1
301         vpaddq          32*1-192($tp0), $ACC1, $ACC1
302         vpmuludq        32*1-128($aap), $B1, $ACC2
303         vpaddq          32*2-192($tp0), $ACC2, $ACC2
304         vpmuludq        32*2-128($aap), $B1, $ACC3
305         vpaddq          32*3-192($tp0), $ACC3, $ACC3
306         vpmuludq        32*3-128($aap), $B1, $ACC4
307         vpaddq          32*4-192($tp0), $ACC4, $ACC4
308         vpmuludq        32*4-128($aap), $B1, $ACC5
309         vpaddq          32*5-192($tp0), $ACC5, $ACC5
310         vpmuludq        32*5-128($aap), $B1, $ACC6
311         vpaddq          32*6-192($tp0), $ACC6, $ACC6
312         vpmuludq        32*6-128($aap), $B1, $ACC7
313         vpaddq          32*7-192($tp0), $ACC7, $ACC7
314         vpmuludq        32*7-128($aap), $B1, $ACC8
315          vpbroadcastq   32*2-128($tpa), $B1
316         vpaddq          32*8-192($tp0), $ACC8, $ACC8
317 .Lsqr_entry_1024:
318         vmovdqu         $ACC0, 32*0-192($tp0)
319         vmovdqu         $ACC1, 32*1-192($tp0)
320
321         vpmuludq        32*1-128($ap), $B2, $TEMP0
322         vpaddq          $TEMP0, $ACC2, $ACC2
323         vpmuludq        32*1-128($aap), $B2, $TEMP1
324         vpaddq          $TEMP1, $ACC3, $ACC3
325         vpmuludq        32*2-128($aap), $B2, $TEMP2
326         vpaddq          $TEMP2, $ACC4, $ACC4
327         vpmuludq        32*3-128($aap), $B2, $TEMP0
328         vpaddq          $TEMP0, $ACC5, $ACC5
329         vpmuludq        32*4-128($aap), $B2, $TEMP1
330         vpaddq          $TEMP1, $ACC6, $ACC6
331         vpmuludq        32*5-128($aap), $B2, $TEMP2
332         vpaddq          $TEMP2, $ACC7, $ACC7
333         vpmuludq        32*6-128($aap), $B2, $TEMP0
334         vpaddq          $TEMP0, $ACC8, $ACC8
335         vpmuludq        32*7-128($aap), $B2, $ACC0
336          vpbroadcastq   32*3-128($tpa), $B2
337         vpaddq          32*9-192($tp0), $ACC0, $ACC0
338
339         vmovdqu         $ACC2, 32*2-192($tp0)
340         vmovdqu         $ACC3, 32*3-192($tp0)
341
342         vpmuludq        32*2-128($ap), $B1, $TEMP2
343         vpaddq          $TEMP2, $ACC4, $ACC4
344         vpmuludq        32*2-128($aap), $B1, $TEMP0
345         vpaddq          $TEMP0, $ACC5, $ACC5
346         vpmuludq        32*3-128($aap), $B1, $TEMP1
347         vpaddq          $TEMP1, $ACC6, $ACC6
348         vpmuludq        32*4-128($aap), $B1, $TEMP2
349         vpaddq          $TEMP2, $ACC7, $ACC7
350         vpmuludq        32*5-128($aap), $B1, $TEMP0
351         vpaddq          $TEMP0, $ACC8, $ACC8
352         vpmuludq        32*6-128($aap), $B1, $TEMP1
353         vpaddq          $TEMP1, $ACC0, $ACC0
354         vpmuludq        32*7-128($aap), $B1, $ACC1
355          vpbroadcastq   32*4-128($tpa), $B1
356         vpaddq          32*10-448($tp1), $ACC1, $ACC1
357
358         vmovdqu         $ACC4, 32*4-192($tp0)
359         vmovdqu         $ACC5, 32*5-192($tp0)
360
361         vpmuludq        32*3-128($ap), $B2, $TEMP0
362         vpaddq          $TEMP0, $ACC6, $ACC6
363         vpmuludq        32*3-128($aap), $B2, $TEMP1
364         vpaddq          $TEMP1, $ACC7, $ACC7
365         vpmuludq        32*4-128($aap), $B2, $TEMP2
366         vpaddq          $TEMP2, $ACC8, $ACC8
367         vpmuludq        32*5-128($aap), $B2, $TEMP0
368         vpaddq          $TEMP0, $ACC0, $ACC0
369         vpmuludq        32*6-128($aap), $B2, $TEMP1
370         vpaddq          $TEMP1, $ACC1, $ACC1
371         vpmuludq        32*7-128($aap), $B2, $ACC2
372          vpbroadcastq   32*5-128($tpa), $B2
373         vpaddq          32*11-448($tp1), $ACC2, $ACC2   
374
375         vmovdqu         $ACC6, 32*6-192($tp0)
376         vmovdqu         $ACC7, 32*7-192($tp0)
377
378         vpmuludq        32*4-128($ap), $B1, $TEMP0
379         vpaddq          $TEMP0, $ACC8, $ACC8
380         vpmuludq        32*4-128($aap), $B1, $TEMP1
381         vpaddq          $TEMP1, $ACC0, $ACC0
382         vpmuludq        32*5-128($aap), $B1, $TEMP2
383         vpaddq          $TEMP2, $ACC1, $ACC1
384         vpmuludq        32*6-128($aap), $B1, $TEMP0
385         vpaddq          $TEMP0, $ACC2, $ACC2
386         vpmuludq        32*7-128($aap), $B1, $ACC3
387          vpbroadcastq   32*6-128($tpa), $B1
388         vpaddq          32*12-448($tp1), $ACC3, $ACC3
389
390         vmovdqu         $ACC8, 32*8-192($tp0)
391         vmovdqu         $ACC0, 32*9-192($tp0)
392         lea             8($tp0), $tp0
393
394         vpmuludq        32*5-128($ap), $B2, $TEMP2
395         vpaddq          $TEMP2, $ACC1, $ACC1
396         vpmuludq        32*5-128($aap), $B2, $TEMP0
397         vpaddq          $TEMP0, $ACC2, $ACC2
398         vpmuludq        32*6-128($aap), $B2, $TEMP1
399         vpaddq          $TEMP1, $ACC3, $ACC3
400         vpmuludq        32*7-128($aap), $B2, $ACC4
401          vpbroadcastq   32*7-128($tpa), $B2
402         vpaddq          32*13-448($tp1), $ACC4, $ACC4
403
404         vmovdqu         $ACC1, 32*10-448($tp1)
405         vmovdqu         $ACC2, 32*11-448($tp1)
406
407         vpmuludq        32*6-128($ap), $B1, $TEMP0
408         vpaddq          $TEMP0, $ACC3, $ACC3
409         vpmuludq        32*6-128($aap), $B1, $TEMP1
410          vpbroadcastq   32*8-128($tpa), $ACC0           # borrow $ACC0 for $B1
411         vpaddq          $TEMP1, $ACC4, $ACC4
412         vpmuludq        32*7-128($aap), $B1, $ACC5
413          vpbroadcastq   32*0+8-128($tpa), $B1           # for next iteration
414         vpaddq          32*14-448($tp1), $ACC5, $ACC5
415
416         vmovdqu         $ACC3, 32*12-448($tp1)
417         vmovdqu         $ACC4, 32*13-448($tp1)
418         lea             8($tpa), $tpa
419
420         vpmuludq        32*7-128($ap), $B2, $TEMP0
421         vpaddq          $TEMP0, $ACC5, $ACC5
422         vpmuludq        32*7-128($aap), $B2, $ACC6
423         vpaddq          32*15-448($tp1), $ACC6, $ACC6
424
425         vpmuludq        32*8-128($ap), $ACC0, $ACC7
426         vmovdqu         $ACC5, 32*14-448($tp1)
427         vpaddq          32*16-448($tp1), $ACC7, $ACC7
428         vmovdqu         $ACC6, 32*15-448($tp1)
429         vmovdqu         $ACC7, 32*16-448($tp1)
430         lea             8($tp1), $tp1
431
432         dec     $i        
433         jnz     .LOOP_SQR_1024
434 ___
435 $ZERO = $ACC9;
436 $TEMP0 = $B1;
437 $TEMP2 = $B2;
438 $TEMP3 = $Y1;
439 $TEMP4 = $Y2;
440 $code.=<<___;
441         #we need to fix indexes 32-39 to avoid overflow
442         vmovdqu         32*8(%rsp), $ACC8               # 32*8-192($tp0),
443         vmovdqu         32*9(%rsp), $ACC1               # 32*9-192($tp0)
444         vmovdqu         32*10(%rsp), $ACC2              # 32*10-192($tp0)
445         lea             192(%rsp), $tp0                 # 64+128=192
446
447         vpsrlq          \$29, $ACC8, $TEMP1
448         vpand           $AND_MASK, $ACC8, $ACC8
449         vpsrlq          \$29, $ACC1, $TEMP2
450         vpand           $AND_MASK, $ACC1, $ACC1
451
452         vpermq          \$0x93, $TEMP1, $TEMP1
453         vpxor           $ZERO, $ZERO, $ZERO
454         vpermq          \$0x93, $TEMP2, $TEMP2
455
456         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
457         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
458         vpaddq          $TEMP0, $ACC8, $ACC8
459         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
460         vpaddq          $TEMP1, $ACC1, $ACC1
461         vpaddq          $TEMP2, $ACC2, $ACC2
462         vmovdqu         $ACC1, 32*9-192($tp0)
463         vmovdqu         $ACC2, 32*10-192($tp0)
464
465         mov     (%rsp), %rax
466         mov     8(%rsp), $r1
467         mov     16(%rsp), $r2
468         mov     24(%rsp), $r3
469         vmovdqu 32*1(%rsp), $ACC1
470         vmovdqu 32*2-192($tp0), $ACC2
471         vmovdqu 32*3-192($tp0), $ACC3
472         vmovdqu 32*4-192($tp0), $ACC4
473         vmovdqu 32*5-192($tp0), $ACC5
474         vmovdqu 32*6-192($tp0), $ACC6
475         vmovdqu 32*7-192($tp0), $ACC7
476
477         mov     %rax, $r0
478         imull   $n0, %eax
479         and     \$0x1fffffff, %eax
480         vmovd   %eax, $Y1
481
482         mov     %rax, %rdx
483         imulq   -128($np), %rax
484          vpbroadcastq   $Y1, $Y1
485         add     %rax, $r0
486         mov     %rdx, %rax
487         imulq   8-128($np), %rax
488         shr     \$29, $r0
489         add     %rax, $r1
490         mov     %rdx, %rax
491         imulq   16-128($np), %rax
492         add     $r0, $r1
493         add     %rax, $r2
494         imulq   24-128($np), %rdx
495         add     %rdx, $r3
496
497         mov     $r1, %rax
498         imull   $n0, %eax
499         and     \$0x1fffffff, %eax
500
501         mov \$9, $i
502         jmp .LOOP_REDUCE_1024
503
504 .align  32
505 .LOOP_REDUCE_1024:
506         vmovd   %eax, $Y2
507         vpbroadcastq    $Y2, $Y2
508
509         vpmuludq        32*1-128($np), $Y1, $TEMP0
510          mov    %rax, %rdx
511          imulq  -128($np), %rax
512         vpaddq          $TEMP0, $ACC1, $ACC1
513          add    %rax, $r1
514         vpmuludq        32*2-128($np), $Y1, $TEMP1
515          mov    %rdx, %rax
516          imulq  8-128($np), %rax
517         vpaddq          $TEMP1, $ACC2, $ACC2
518         vpmuludq        32*3-128($np), $Y1, $TEMP2
519          .byte  0x67
520          add    %rax, $r2
521          .byte  0x67
522          mov    %rdx, %rax
523          imulq  16-128($np), %rax
524          shr    \$29, $r1
525         vpaddq          $TEMP2, $ACC3, $ACC3
526         vpmuludq        32*4-128($np), $Y1, $TEMP0
527          add    %rax, $r3
528          add    $r1, $r2
529         vpaddq          $TEMP0, $ACC4, $ACC4
530         vpmuludq        32*5-128($np), $Y1, $TEMP1
531          mov    $r2, %rax
532          imull  $n0, %eax
533         vpaddq          $TEMP1, $ACC5, $ACC5
534         vpmuludq        32*6-128($np), $Y1, $TEMP2
535          and    \$0x1fffffff, %eax
536         vpaddq          $TEMP2, $ACC6, $ACC6
537         vpmuludq        32*7-128($np), $Y1, $TEMP0
538         vpaddq          $TEMP0, $ACC7, $ACC7
539         vpmuludq        32*8-128($np), $Y1, $TEMP1
540          vmovd  %eax, $Y1
541          #vmovdqu       32*1-8-128($np), $TEMP2         # moved below
542         vpaddq          $TEMP1, $ACC8, $ACC8
543          #vmovdqu       32*2-8-128($np), $TEMP0         # moved below
544          vpbroadcastq   $Y1, $Y1
545
546         vpmuludq        32*1-8-128($np), $Y2, $TEMP2    # see above
547         vmovdqu         32*3-8-128($np), $TEMP1
548          mov    %rax, %rdx
549          imulq  -128($np), %rax
550         vpaddq          $TEMP2, $ACC1, $ACC1
551         vpmuludq        32*2-8-128($np), $Y2, $TEMP0    # see above
552         vmovdqu         32*4-8-128($np), $TEMP2
553          add    %rax, $r2
554          mov    %rdx, %rax
555          imulq  8-128($np), %rax
556         vpaddq          $TEMP0, $ACC2, $ACC2
557          add    $r3, %rax
558          shr    \$29, $r2
559         vpmuludq        $Y2, $TEMP1, $TEMP1
560         vmovdqu         32*5-8-128($np), $TEMP0
561          add    $r2, %rax
562         vpaddq          $TEMP1, $ACC3, $ACC3
563         vpmuludq        $Y2, $TEMP2, $TEMP2
564         vmovdqu         32*6-8-128($np), $TEMP1
565          .byte  0x67
566          mov    %rax, $r3
567          imull  $n0, %eax
568         vpaddq          $TEMP2, $ACC4, $ACC4
569         vpmuludq        $Y2, $TEMP0, $TEMP0
570         .byte   0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00    # vmovdqu               32*7-8-128($np), $TEMP2
571          and    \$0x1fffffff, %eax
572         vpaddq          $TEMP0, $ACC5, $ACC5
573         vpmuludq        $Y2, $TEMP1, $TEMP1
574         vmovdqu         32*8-8-128($np), $TEMP0
575         vpaddq          $TEMP1, $ACC6, $ACC6
576         vpmuludq        $Y2, $TEMP2, $TEMP2
577         vmovdqu         32*9-8-128($np), $ACC9
578          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
579          imulq  -128($np), %rax
580         vpaddq          $TEMP2, $ACC7, $ACC7
581         vpmuludq        $Y2, $TEMP0, $TEMP0
582          vmovdqu        32*1-16-128($np), $TEMP1
583          vpbroadcastq   $ACC0, $ACC0
584         vpaddq          $TEMP0, $ACC8, $ACC8
585         vpmuludq        $Y2, $ACC9, $ACC9
586          vmovdqu        32*2-16-128($np), $TEMP2
587          add    %rax, $r3
588
589 ___
590 ($ACC0,$Y2)=($Y2,$ACC0);
591 $code.=<<___;
592          vmovdqu        32*1-24-128($np), $ACC0
593         vpmuludq        $Y1, $TEMP1, $TEMP1
594         vmovdqu         32*3-16-128($np), $TEMP0
595         vpaddq          $TEMP1, $ACC1, $ACC1
596          vpmuludq       $Y2, $ACC0, $ACC0
597         vpmuludq        $Y1, $TEMP2, $TEMP2
598         .byte   0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff    # vmovdqu               32*4-16-128($np), $TEMP1
599          vpaddq         $ACC1, $ACC0, $ACC0
600         vpaddq          $TEMP2, $ACC2, $ACC2
601         vpmuludq        $Y1, $TEMP0, $TEMP0
602         vmovdqu         32*5-16-128($np), $TEMP2
603          .byte  0x67
604          vmovq          $ACC0, %rax
605          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
606         vpaddq          $TEMP0, $ACC3, $ACC3
607         vpmuludq        $Y1, $TEMP1, $TEMP1
608         vmovdqu         32*6-16-128($np), $TEMP0
609         vpaddq          $TEMP1, $ACC4, $ACC4
610         vpmuludq        $Y1, $TEMP2, $TEMP2
611         vmovdqu         32*7-16-128($np), $TEMP1
612         vpaddq          $TEMP2, $ACC5, $ACC5
613         vpmuludq        $Y1, $TEMP0, $TEMP0
614         vmovdqu         32*8-16-128($np), $TEMP2
615         vpaddq          $TEMP0, $ACC6, $ACC6
616         vpmuludq        $Y1, $TEMP1, $TEMP1
617          shr    \$29, $r3
618         vmovdqu         32*9-16-128($np), $TEMP0
619          add    $r3, %rax
620         vpaddq          $TEMP1, $ACC7, $ACC7
621         vpmuludq        $Y1, $TEMP2, $TEMP2
622          #vmovdqu       32*2-24-128($np), $TEMP1        # moved below
623          mov    %rax, $r0
624          imull  $n0, %eax
625         vpaddq          $TEMP2, $ACC8, $ACC8
626         vpmuludq        $Y1, $TEMP0, $TEMP0
627          and    \$0x1fffffff, %eax
628          vmovd  %eax, $Y1
629          vmovdqu        32*3-24-128($np), $TEMP2
630         .byte   0x67
631         vpaddq          $TEMP0, $ACC9, $ACC9
632          vpbroadcastq   $Y1, $Y1
633
634         vpmuludq        32*2-24-128($np), $Y2, $TEMP1   # see above
635         vmovdqu         32*4-24-128($np), $TEMP0
636          mov    %rax, %rdx
637          imulq  -128($np), %rax
638          mov    8(%rsp), $r1
639         vpaddq          $TEMP1, $ACC2, $ACC1
640         vpmuludq        $Y2, $TEMP2, $TEMP2
641         vmovdqu         32*5-24-128($np), $TEMP1
642          add    %rax, $r0
643          mov    %rdx, %rax
644          imulq  8-128($np), %rax
645          .byte  0x67
646          shr    \$29, $r0
647          mov    16(%rsp), $r2
648         vpaddq          $TEMP2, $ACC3, $ACC2
649         vpmuludq        $Y2, $TEMP0, $TEMP0
650         vmovdqu         32*6-24-128($np), $TEMP2
651          add    %rax, $r1
652          mov    %rdx, %rax
653          imulq  16-128($np), %rax
654         vpaddq          $TEMP0, $ACC4, $ACC3
655         vpmuludq        $Y2, $TEMP1, $TEMP1
656         vmovdqu         32*7-24-128($np), $TEMP0
657          imulq  24-128($np), %rdx               # future $r3
658          add    %rax, $r2
659          lea    ($r0,$r1), %rax
660         vpaddq          $TEMP1, $ACC5, $ACC4
661         vpmuludq        $Y2, $TEMP2, $TEMP2
662         vmovdqu         32*8-24-128($np), $TEMP1
663          mov    %rax, $r1
664          imull  $n0, %eax
665         vpmuludq        $Y2, $TEMP0, $TEMP0
666         vpaddq          $TEMP2, $ACC6, $ACC5
667         vmovdqu         32*9-24-128($np), $TEMP2
668          and    \$0x1fffffff, %eax
669         vpaddq          $TEMP0, $ACC7, $ACC6
670         vpmuludq        $Y2, $TEMP1, $TEMP1
671          add    24(%rsp), %rdx
672         vpaddq          $TEMP1, $ACC8, $ACC7
673         vpmuludq        $Y2, $TEMP2, $TEMP2
674         vpaddq          $TEMP2, $ACC9, $ACC8
675          vmovq  $r3, $ACC9
676          mov    %rdx, $r3
677
678         dec     $i
679         jnz     .LOOP_REDUCE_1024
680 ___
681 ($ACC0,$Y2)=($Y2,$ACC0);
682 $code.=<<___;
683         lea     448(%rsp), $tp1                 # size optimization
684         vpaddq  $ACC9, $Y2, $ACC0
685         vpxor   $ZERO, $ZERO, $ZERO
686
687         vpaddq          32*9-192($tp0), $ACC0, $ACC0
688         vpaddq          32*10-448($tp1), $ACC1, $ACC1
689         vpaddq          32*11-448($tp1), $ACC2, $ACC2
690         vpaddq          32*12-448($tp1), $ACC3, $ACC3
691         vpaddq          32*13-448($tp1), $ACC4, $ACC4
692         vpaddq          32*14-448($tp1), $ACC5, $ACC5
693         vpaddq          32*15-448($tp1), $ACC6, $ACC6
694         vpaddq          32*16-448($tp1), $ACC7, $ACC7
695         vpaddq          32*17-448($tp1), $ACC8, $ACC8
696
697         vpsrlq          \$29, $ACC0, $TEMP1
698         vpand           $AND_MASK, $ACC0, $ACC0
699         vpsrlq          \$29, $ACC1, $TEMP2
700         vpand           $AND_MASK, $ACC1, $ACC1
701         vpsrlq          \$29, $ACC2, $TEMP3
702         vpermq          \$0x93, $TEMP1, $TEMP1
703         vpand           $AND_MASK, $ACC2, $ACC2
704         vpsrlq          \$29, $ACC3, $TEMP4
705         vpermq          \$0x93, $TEMP2, $TEMP2
706         vpand           $AND_MASK, $ACC3, $ACC3
707         vpermq          \$0x93, $TEMP3, $TEMP3
708
709         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
710         vpermq          \$0x93, $TEMP4, $TEMP4
711         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
712         vpaddq          $TEMP0, $ACC0, $ACC0
713         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
714         vpaddq          $TEMP1, $ACC1, $ACC1
715         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
716         vpaddq          $TEMP2, $ACC2, $ACC2
717         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
718         vpaddq          $TEMP3, $ACC3, $ACC3
719         vpaddq          $TEMP4, $ACC4, $ACC4
720
721         vpsrlq          \$29, $ACC0, $TEMP1
722         vpand           $AND_MASK, $ACC0, $ACC0
723         vpsrlq          \$29, $ACC1, $TEMP2
724         vpand           $AND_MASK, $ACC1, $ACC1
725         vpsrlq          \$29, $ACC2, $TEMP3
726         vpermq          \$0x93, $TEMP1, $TEMP1
727         vpand           $AND_MASK, $ACC2, $ACC2
728         vpsrlq          \$29, $ACC3, $TEMP4
729         vpermq          \$0x93, $TEMP2, $TEMP2
730         vpand           $AND_MASK, $ACC3, $ACC3
731         vpermq          \$0x93, $TEMP3, $TEMP3
732
733         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
734         vpermq          \$0x93, $TEMP4, $TEMP4
735         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
736         vpaddq          $TEMP0, $ACC0, $ACC0
737         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
738         vpaddq          $TEMP1, $ACC1, $ACC1
739         vmovdqu         $ACC0, 32*0-128($rp)
740         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
741         vpaddq          $TEMP2, $ACC2, $ACC2
742         vmovdqu         $ACC1, 32*1-128($rp)
743         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
744         vpaddq          $TEMP3, $ACC3, $ACC3
745         vmovdqu         $ACC2, 32*2-128($rp)
746         vpaddq          $TEMP4, $ACC4, $ACC4
747         vmovdqu         $ACC3, 32*3-128($rp)
748 ___
749 $TEMP5=$ACC0;
750 $code.=<<___;
751         vpsrlq          \$29, $ACC4, $TEMP1
752         vpand           $AND_MASK, $ACC4, $ACC4
753         vpsrlq          \$29, $ACC5, $TEMP2
754         vpand           $AND_MASK, $ACC5, $ACC5
755         vpsrlq          \$29, $ACC6, $TEMP3
756         vpermq          \$0x93, $TEMP1, $TEMP1
757         vpand           $AND_MASK, $ACC6, $ACC6
758         vpsrlq          \$29, $ACC7, $TEMP4
759         vpermq          \$0x93, $TEMP2, $TEMP2
760         vpand           $AND_MASK, $ACC7, $ACC7
761         vpsrlq          \$29, $ACC8, $TEMP5
762         vpermq          \$0x93, $TEMP3, $TEMP3
763         vpand           $AND_MASK, $ACC8, $ACC8
764         vpermq          \$0x93, $TEMP4, $TEMP4
765
766         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
767         vpermq          \$0x93, $TEMP5, $TEMP5
768         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
769         vpaddq          $TEMP0, $ACC4, $ACC4
770         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
771         vpaddq          $TEMP1, $ACC5, $ACC5
772         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
773         vpaddq          $TEMP2, $ACC6, $ACC6
774         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
775         vpaddq          $TEMP3, $ACC7, $ACC7
776         vpaddq          $TEMP4, $ACC8, $ACC8
777      
778         vpsrlq          \$29, $ACC4, $TEMP1
779         vpand           $AND_MASK, $ACC4, $ACC4
780         vpsrlq          \$29, $ACC5, $TEMP2
781         vpand           $AND_MASK, $ACC5, $ACC5
782         vpsrlq          \$29, $ACC6, $TEMP3
783         vpermq          \$0x93, $TEMP1, $TEMP1
784         vpand           $AND_MASK, $ACC6, $ACC6
785         vpsrlq          \$29, $ACC7, $TEMP4
786         vpermq          \$0x93, $TEMP2, $TEMP2
787         vpand           $AND_MASK, $ACC7, $ACC7
788         vpsrlq          \$29, $ACC8, $TEMP5
789         vpermq          \$0x93, $TEMP3, $TEMP3
790         vpand           $AND_MASK, $ACC8, $ACC8
791         vpermq          \$0x93, $TEMP4, $TEMP4
792
793         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
794         vpermq          \$0x93, $TEMP5, $TEMP5
795         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
796         vpaddq          $TEMP0, $ACC4, $ACC4
797         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
798         vpaddq          $TEMP1, $ACC5, $ACC5
799         vmovdqu         $ACC4, 32*4-128($rp)
800         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
801         vpaddq          $TEMP2, $ACC6, $ACC6
802         vmovdqu         $ACC5, 32*5-128($rp)
803         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
804         vpaddq          $TEMP3, $ACC7, $ACC7
805         vmovdqu         $ACC6, 32*6-128($rp)
806         vpaddq          $TEMP4, $ACC8, $ACC8
807         vmovdqu         $ACC7, 32*7-128($rp)
808         vmovdqu         $ACC8, 32*8-128($rp)
809
810         mov     $rp, $ap
811         dec     $rep
812         jne     .LOOP_GRANDE_SQR_1024
813
814         vzeroall
815         mov     %rbp, %rax
816 ___
817 $code.=<<___ if ($win64);
818         movaps  -0xd8(%rax),%xmm6
819         movaps  -0xc8(%rax),%xmm7
820         movaps  -0xb8(%rax),%xmm8
821         movaps  -0xa8(%rax),%xmm9
822         movaps  -0x98(%rax),%xmm10
823         movaps  -0x88(%rax),%xmm11
824         movaps  -0x78(%rax),%xmm12
825         movaps  -0x68(%rax),%xmm13
826         movaps  -0x58(%rax),%xmm14
827         movaps  -0x48(%rax),%xmm15
828 ___
829 $code.=<<___;
830         mov     -48(%rax),%r15
831         mov     -40(%rax),%r14
832         mov     -32(%rax),%r13
833         mov     -24(%rax),%r12
834         mov     -16(%rax),%rbp
835         mov     -8(%rax),%rbx
836         lea     (%rax),%rsp             # restore %rsp
837 .Lsqr_1024_epilogue:
838         ret
839 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
840 ___
841 }
842
843 { # void AMM_WW(
844 my $rp="%rdi";  # BN_ULONG *rp,
845 my $ap="%rsi";  # const BN_ULONG *ap,
846 my $bp="%rdx";  # const BN_ULONG *bp,
847 my $np="%rcx";  # const BN_ULONG *np,
848 my $n0="%r8d";  # unsigned int n0);
849
850 # The registers that hold the accumulated redundant result
851 # The AMM works on 1024 bit operands, and redundant word size is 29
852 # Therefore: ceil(1024/29)/4 = 9
853 my $ACC0="%ymm0";
854 my $ACC1="%ymm1";
855 my $ACC2="%ymm2";
856 my $ACC3="%ymm3";
857 my $ACC4="%ymm4";
858 my $ACC5="%ymm5";
859 my $ACC6="%ymm6";
860 my $ACC7="%ymm7";
861 my $ACC8="%ymm8";
862 my $ACC9="%ymm9";
863
864 # Registers that hold the broadcasted words of multiplier, currently used
865 my $Bi="%ymm10";
866 my $Yi="%ymm11";
867
868 # Helper registers
869 my $TEMP0=$ACC0;
870 my $TEMP1="%ymm12";
871 my $TEMP2="%ymm13";
872 my $ZERO="%ymm14";
873 my $AND_MASK="%ymm15";
874
875 # alu registers that hold the first words of the ACC
876 my $r0="%r9";
877 my $r1="%r10";
878 my $r2="%r11";
879 my $r3="%r12";
880
881 my $i="%r14d";
882 my $tmp="%r15";
883
884 $bp="%r13";     # reassigned argument
885
886 $code.=<<___;
887 .globl  rsaz_1024_mul_avx2
888 .type   rsaz_1024_mul_avx2,\@function,5
889 .align  64
890 rsaz_1024_mul_avx2:
891         lea     (%rsp), %rax
892         push    %rbx
893         push    %rbp
894         push    %r12
895         push    %r13
896         push    %r14
897         push    %r15
898 ___
899 $code.=<<___ if ($win64);
900         vzeroupper
901         lea     -0xa8(%rsp),%rsp
902         vmovaps %xmm6,-0xd8(%rax)
903         vmovaps %xmm7,-0xc8(%rax)
904         vmovaps %xmm8,-0xb8(%rax)
905         vmovaps %xmm9,-0xa8(%rax)
906         vmovaps %xmm10,-0x98(%rax)
907         vmovaps %xmm11,-0x88(%rax)
908         vmovaps %xmm12,-0x78(%rax)
909         vmovaps %xmm13,-0x68(%rax)
910         vmovaps %xmm14,-0x58(%rax)
911         vmovaps %xmm15,-0x48(%rax)
912 .Lmul_1024_body:
913 ___
914 $code.=<<___;
915         mov     %rax,%rbp
916         vzeroall
917         mov     %rdx, $bp       # reassigned argument
918         sub     \$64,%rsp
919
920         # unaligned 256-bit load that crosses page boundary can
921         # cause severe performance degradation here, so if $ap does
922         # cross page boundary, swap it with $bp [meaning that caller
923         # is advised to lay down $ap and $bp next to each other, so
924         # that only one can cross page boundary].
925         .byte   0x67,0x67
926         mov     $ap, $tmp
927         and     \$4095, $tmp
928         add     \$32*10, $tmp
929         shr     \$12, $tmp
930         mov     $ap, $tmp
931         cmovnz  $bp, $ap
932         cmovnz  $tmp, $bp
933
934         mov     $np, $tmp
935         sub     \$-128,$ap      # size optimization
936         sub     \$-128,$np
937         sub     \$-128,$rp
938
939         and     \$4095, $tmp    # see if $np crosses page
940         add     \$32*10, $tmp
941         .byte   0x67,0x67
942         shr     \$12, $tmp
943         jz      .Lmul_1024_no_n_copy
944
945         # unaligned 256-bit load that crosses page boundary can
946         # cause severe performance degradation here, so if $np does
947         # cross page boundary, copy it to stack and make sure stack
948         # frame doesn't...
949         sub             \$32*10,%rsp
950         vmovdqu         32*0-128($np), $ACC0
951         and             \$-512, %rsp
952         vmovdqu         32*1-128($np), $ACC1
953         vmovdqu         32*2-128($np), $ACC2
954         vmovdqu         32*3-128($np), $ACC3
955         vmovdqu         32*4-128($np), $ACC4
956         vmovdqu         32*5-128($np), $ACC5
957         vmovdqu         32*6-128($np), $ACC6
958         vmovdqu         32*7-128($np), $ACC7
959         vmovdqu         32*8-128($np), $ACC8
960         lea             64+128(%rsp),$np
961         vmovdqu         $ACC0, 32*0-128($np)
962         vpxor           $ACC0, $ACC0, $ACC0
963         vmovdqu         $ACC1, 32*1-128($np)
964         vpxor           $ACC1, $ACC1, $ACC1
965         vmovdqu         $ACC2, 32*2-128($np)
966         vpxor           $ACC2, $ACC2, $ACC2
967         vmovdqu         $ACC3, 32*3-128($np)
968         vpxor           $ACC3, $ACC3, $ACC3
969         vmovdqu         $ACC4, 32*4-128($np)
970         vpxor           $ACC4, $ACC4, $ACC4
971         vmovdqu         $ACC5, 32*5-128($np)
972         vpxor           $ACC5, $ACC5, $ACC5
973         vmovdqu         $ACC6, 32*6-128($np)
974         vpxor           $ACC6, $ACC6, $ACC6
975         vmovdqu         $ACC7, 32*7-128($np)
976         vpxor           $ACC7, $ACC7, $ACC7
977         vmovdqu         $ACC8, 32*8-128($np)
978         vmovdqa         $ACC0, $ACC8
979         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
980 .Lmul_1024_no_n_copy:
981         and     \$-64,%rsp
982
983         mov     ($bp), %rbx
984         vpbroadcastq ($bp), $Bi
985         vmovdqu $ACC0, (%rsp)                   # clear top of stack
986         xor     $r0, $r0
987         .byte   0x67
988         xor     $r1, $r1
989         xor     $r2, $r2
990         xor     $r3, $r3
991
992         vmovdqu .Land_mask(%rip), $AND_MASK
993         mov     \$9, $i
994         jmp     .Loop_mul_1024
995
996 .align  32
997 .Loop_mul_1024:
998          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
999         mov     %rbx, %rax
1000         imulq   -128($ap), %rax
1001         add     $r0, %rax
1002         mov     %rbx, $r1
1003         imulq   8-128($ap), $r1
1004         add     8(%rsp), $r1
1005
1006         mov     %rax, $r0
1007         imull   $n0, %eax
1008         and     \$0x1fffffff, %eax
1009
1010          mov    %rbx, $r2
1011          imulq  16-128($ap), $r2
1012          add    16(%rsp), $r2
1013
1014          mov    %rbx, $r3
1015          imulq  24-128($ap), $r3
1016          add    24(%rsp), $r3
1017         vpmuludq        32*1-128($ap),$Bi,$TEMP0
1018          vmovd          %eax, $Yi
1019         vpaddq          $TEMP0,$ACC1,$ACC1
1020         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1021          vpbroadcastq   $Yi, $Yi
1022         vpaddq          $TEMP1,$ACC2,$ACC2
1023         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1024          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1025         vpaddq          $TEMP2,$ACC3,$ACC3
1026         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1027         vpaddq          $TEMP0,$ACC4,$ACC4
1028         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1029         vpaddq          $TEMP1,$ACC5,$ACC5
1030         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1031         vpaddq          $TEMP2,$ACC6,$ACC6
1032         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1033          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1034         vpaddq          $TEMP0,$ACC7,$ACC7
1035         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1036          vpbroadcastq   8($bp), $Bi
1037         vpaddq          $TEMP1,$ACC8,$ACC8
1038
1039         mov     %rax,%rdx
1040         imulq   -128($np),%rax
1041         add     %rax,$r0
1042         mov     %rdx,%rax
1043         imulq   8-128($np),%rax
1044         add     %rax,$r1
1045         mov     %rdx,%rax
1046         imulq   16-128($np),%rax
1047         add     %rax,$r2
1048         shr     \$29, $r0
1049         imulq   24-128($np),%rdx
1050         add     %rdx,$r3
1051         add     $r0, $r1
1052
1053         vpmuludq        32*1-128($np),$Yi,$TEMP2
1054          vmovq          $Bi, %rbx
1055         vpaddq          $TEMP2,$ACC1,$ACC1
1056         vpmuludq        32*2-128($np),$Yi,$TEMP0
1057         vpaddq          $TEMP0,$ACC2,$ACC2
1058         vpmuludq        32*3-128($np),$Yi,$TEMP1
1059         vpaddq          $TEMP1,$ACC3,$ACC3
1060         vpmuludq        32*4-128($np),$Yi,$TEMP2
1061         vpaddq          $TEMP2,$ACC4,$ACC4
1062         vpmuludq        32*5-128($np),$Yi,$TEMP0
1063         vpaddq          $TEMP0,$ACC5,$ACC5
1064         vpmuludq        32*6-128($np),$Yi,$TEMP1
1065         vpaddq          $TEMP1,$ACC6,$ACC6
1066         vpmuludq        32*7-128($np),$Yi,$TEMP2
1067          vpblendd       \$3, $ZERO, $ACC9, $ACC9        # correct $ACC3
1068         vpaddq          $TEMP2,$ACC7,$ACC7
1069         vpmuludq        32*8-128($np),$Yi,$TEMP0
1070          vpaddq         $ACC9, $ACC3, $ACC3             # correct $ACC3
1071         vpaddq          $TEMP0,$ACC8,$ACC8
1072
1073         mov     %rbx, %rax
1074         imulq   -128($ap),%rax
1075         add     %rax,$r1
1076          vmovdqu        -8+32*1-128($ap),$TEMP1
1077         mov     %rbx, %rax
1078         imulq   8-128($ap),%rax
1079         add     %rax,$r2
1080          vmovdqu        -8+32*2-128($ap),$TEMP2
1081
1082         mov     $r1, %rax
1083         imull   $n0, %eax
1084         and     \$0x1fffffff, %eax
1085
1086          imulq  16-128($ap),%rbx
1087          add    %rbx,$r3
1088         vpmuludq        $Bi,$TEMP1,$TEMP1
1089          vmovd          %eax, $Yi
1090         vmovdqu         -8+32*3-128($ap),$TEMP0
1091         vpaddq          $TEMP1,$ACC1,$ACC1
1092         vpmuludq        $Bi,$TEMP2,$TEMP2
1093          vpbroadcastq   $Yi, $Yi
1094         vmovdqu         -8+32*4-128($ap),$TEMP1
1095         vpaddq          $TEMP2,$ACC2,$ACC2
1096         vpmuludq        $Bi,$TEMP0,$TEMP0
1097         vmovdqu         -8+32*5-128($ap),$TEMP2
1098         vpaddq          $TEMP0,$ACC3,$ACC3
1099         vpmuludq        $Bi,$TEMP1,$TEMP1
1100         vmovdqu         -8+32*6-128($ap),$TEMP0
1101         vpaddq          $TEMP1,$ACC4,$ACC4
1102         vpmuludq        $Bi,$TEMP2,$TEMP2
1103         vmovdqu         -8+32*7-128($ap),$TEMP1
1104         vpaddq          $TEMP2,$ACC5,$ACC5
1105         vpmuludq        $Bi,$TEMP0,$TEMP0
1106         vmovdqu         -8+32*8-128($ap),$TEMP2
1107         vpaddq          $TEMP0,$ACC6,$ACC6
1108         vpmuludq        $Bi,$TEMP1,$TEMP1
1109         vmovdqu         -8+32*9-128($ap),$ACC9
1110         vpaddq          $TEMP1,$ACC7,$ACC7
1111         vpmuludq        $Bi,$TEMP2,$TEMP2
1112         vpaddq          $TEMP2,$ACC8,$ACC8
1113         vpmuludq        $Bi,$ACC9,$ACC9
1114          vpbroadcastq   16($bp), $Bi
1115
1116         mov     %rax,%rdx
1117         imulq   -128($np),%rax
1118         add     %rax,$r1
1119          vmovdqu        -8+32*1-128($np),$TEMP0
1120         mov     %rdx,%rax
1121         imulq   8-128($np),%rax
1122         add     %rax,$r2
1123          vmovdqu        -8+32*2-128($np),$TEMP1
1124         shr     \$29, $r1
1125         imulq   16-128($np),%rdx
1126         add     %rdx,$r3
1127         add     $r1, $r2
1128
1129         vpmuludq        $Yi,$TEMP0,$TEMP0
1130          vmovq          $Bi, %rbx
1131         vmovdqu         -8+32*3-128($np),$TEMP2
1132         vpaddq          $TEMP0,$ACC1,$ACC1
1133         vpmuludq        $Yi,$TEMP1,$TEMP1
1134         vmovdqu         -8+32*4-128($np),$TEMP0
1135         vpaddq          $TEMP1,$ACC2,$ACC2
1136         vpmuludq        $Yi,$TEMP2,$TEMP2
1137         vmovdqu         -8+32*5-128($np),$TEMP1
1138         vpaddq          $TEMP2,$ACC3,$ACC3
1139         vpmuludq        $Yi,$TEMP0,$TEMP0
1140         vmovdqu         -8+32*6-128($np),$TEMP2
1141         vpaddq          $TEMP0,$ACC4,$ACC4
1142         vpmuludq        $Yi,$TEMP1,$TEMP1
1143         vmovdqu         -8+32*7-128($np),$TEMP0
1144         vpaddq          $TEMP1,$ACC5,$ACC5
1145         vpmuludq        $Yi,$TEMP2,$TEMP2
1146         vmovdqu         -8+32*8-128($np),$TEMP1
1147         vpaddq          $TEMP2,$ACC6,$ACC6
1148         vpmuludq        $Yi,$TEMP0,$TEMP0
1149         vmovdqu         -8+32*9-128($np),$TEMP2
1150         vpaddq          $TEMP0,$ACC7,$ACC7
1151         vpmuludq        $Yi,$TEMP1,$TEMP1
1152         vpaddq          $TEMP1,$ACC8,$ACC8
1153         vpmuludq        $Yi,$TEMP2,$TEMP2
1154         vpaddq          $TEMP2,$ACC9,$ACC9
1155
1156          vmovdqu        -16+32*1-128($ap),$TEMP0
1157         mov     %rbx,%rax
1158         imulq   -128($ap),%rax
1159         add     $r2,%rax
1160
1161          vmovdqu        -16+32*2-128($ap),$TEMP1
1162         mov     %rax,$r2
1163         imull   $n0, %eax
1164         and     \$0x1fffffff, %eax
1165
1166          imulq  8-128($ap),%rbx
1167          add    %rbx,$r3
1168         vpmuludq        $Bi,$TEMP0,$TEMP0
1169          vmovd          %eax, $Yi
1170         vmovdqu         -16+32*3-128($ap),$TEMP2
1171         vpaddq          $TEMP0,$ACC1,$ACC1
1172         vpmuludq        $Bi,$TEMP1,$TEMP1
1173          vpbroadcastq   $Yi, $Yi
1174         vmovdqu         -16+32*4-128($ap),$TEMP0
1175         vpaddq          $TEMP1,$ACC2,$ACC2
1176         vpmuludq        $Bi,$TEMP2,$TEMP2
1177         vmovdqu         -16+32*5-128($ap),$TEMP1
1178         vpaddq          $TEMP2,$ACC3,$ACC3
1179         vpmuludq        $Bi,$TEMP0,$TEMP0
1180         vmovdqu         -16+32*6-128($ap),$TEMP2
1181         vpaddq          $TEMP0,$ACC4,$ACC4
1182         vpmuludq        $Bi,$TEMP1,$TEMP1
1183         vmovdqu         -16+32*7-128($ap),$TEMP0
1184         vpaddq          $TEMP1,$ACC5,$ACC5
1185         vpmuludq        $Bi,$TEMP2,$TEMP2
1186         vmovdqu         -16+32*8-128($ap),$TEMP1
1187         vpaddq          $TEMP2,$ACC6,$ACC6
1188         vpmuludq        $Bi,$TEMP0,$TEMP0
1189         vmovdqu         -16+32*9-128($ap),$TEMP2
1190         vpaddq          $TEMP0,$ACC7,$ACC7
1191         vpmuludq        $Bi,$TEMP1,$TEMP1
1192         vpaddq          $TEMP1,$ACC8,$ACC8
1193         vpmuludq        $Bi,$TEMP2,$TEMP2
1194          vpbroadcastq   24($bp), $Bi
1195         vpaddq          $TEMP2,$ACC9,$ACC9
1196
1197          vmovdqu        -16+32*1-128($np),$TEMP0
1198         mov     %rax,%rdx
1199         imulq   -128($np),%rax
1200         add     %rax,$r2
1201          vmovdqu        -16+32*2-128($np),$TEMP1
1202         imulq   8-128($np),%rdx
1203         add     %rdx,$r3
1204         shr     \$29, $r2
1205
1206         vpmuludq        $Yi,$TEMP0,$TEMP0
1207          vmovq          $Bi, %rbx
1208         vmovdqu         -16+32*3-128($np),$TEMP2
1209         vpaddq          $TEMP0,$ACC1,$ACC1
1210         vpmuludq        $Yi,$TEMP1,$TEMP1
1211         vmovdqu         -16+32*4-128($np),$TEMP0
1212         vpaddq          $TEMP1,$ACC2,$ACC2
1213         vpmuludq        $Yi,$TEMP2,$TEMP2
1214         vmovdqu         -16+32*5-128($np),$TEMP1
1215         vpaddq          $TEMP2,$ACC3,$ACC3
1216         vpmuludq        $Yi,$TEMP0,$TEMP0
1217         vmovdqu         -16+32*6-128($np),$TEMP2
1218         vpaddq          $TEMP0,$ACC4,$ACC4
1219         vpmuludq        $Yi,$TEMP1,$TEMP1
1220         vmovdqu         -16+32*7-128($np),$TEMP0
1221         vpaddq          $TEMP1,$ACC5,$ACC5
1222         vpmuludq        $Yi,$TEMP2,$TEMP2
1223         vmovdqu         -16+32*8-128($np),$TEMP1
1224         vpaddq          $TEMP2,$ACC6,$ACC6
1225         vpmuludq        $Yi,$TEMP0,$TEMP0
1226         vmovdqu         -16+32*9-128($np),$TEMP2
1227         vpaddq          $TEMP0,$ACC7,$ACC7
1228         vpmuludq        $Yi,$TEMP1,$TEMP1
1229          vmovdqu        -24+32*1-128($ap),$TEMP0
1230         vpaddq          $TEMP1,$ACC8,$ACC8
1231         vpmuludq        $Yi,$TEMP2,$TEMP2
1232          vmovdqu        -24+32*2-128($ap),$TEMP1
1233         vpaddq          $TEMP2,$ACC9,$ACC9
1234
1235         add     $r2, $r3
1236         imulq   -128($ap),%rbx
1237         add     %rbx,$r3
1238
1239         mov     $r3, %rax
1240         imull   $n0, %eax
1241         and     \$0x1fffffff, %eax
1242
1243         vpmuludq        $Bi,$TEMP0,$TEMP0
1244          vmovd          %eax, $Yi
1245         vmovdqu         -24+32*3-128($ap),$TEMP2
1246         vpaddq          $TEMP0,$ACC1,$ACC1
1247         vpmuludq        $Bi,$TEMP1,$TEMP1
1248          vpbroadcastq   $Yi, $Yi
1249         vmovdqu         -24+32*4-128($ap),$TEMP0
1250         vpaddq          $TEMP1,$ACC2,$ACC2
1251         vpmuludq        $Bi,$TEMP2,$TEMP2
1252         vmovdqu         -24+32*5-128($ap),$TEMP1
1253         vpaddq          $TEMP2,$ACC3,$ACC3
1254         vpmuludq        $Bi,$TEMP0,$TEMP0
1255         vmovdqu         -24+32*6-128($ap),$TEMP2
1256         vpaddq          $TEMP0,$ACC4,$ACC4
1257         vpmuludq        $Bi,$TEMP1,$TEMP1
1258         vmovdqu         -24+32*7-128($ap),$TEMP0
1259         vpaddq          $TEMP1,$ACC5,$ACC5
1260         vpmuludq        $Bi,$TEMP2,$TEMP2
1261         vmovdqu         -24+32*8-128($ap),$TEMP1
1262         vpaddq          $TEMP2,$ACC6,$ACC6
1263         vpmuludq        $Bi,$TEMP0,$TEMP0
1264         vmovdqu         -24+32*9-128($ap),$TEMP2
1265         vpaddq          $TEMP0,$ACC7,$ACC7
1266         vpmuludq        $Bi,$TEMP1,$TEMP1
1267         vpaddq          $TEMP1,$ACC8,$ACC8
1268         vpmuludq        $Bi,$TEMP2,$TEMP2
1269          vpbroadcastq   32($bp), $Bi
1270         vpaddq          $TEMP2,$ACC9,$ACC9
1271          add            \$32, $bp                       # $bp++
1272
1273         vmovdqu         -24+32*1-128($np),$TEMP0
1274         imulq   -128($np),%rax
1275         add     %rax,$r3
1276         shr     \$29, $r3
1277
1278         vmovdqu         -24+32*2-128($np),$TEMP1
1279         vpmuludq        $Yi,$TEMP0,$TEMP0
1280          vmovq          $Bi, %rbx
1281         vmovdqu         -24+32*3-128($np),$TEMP2
1282         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1283         vpmuludq        $Yi,$TEMP1,$TEMP1
1284          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1285         vpaddq          $TEMP1,$ACC2,$ACC1
1286         vmovdqu         -24+32*4-128($np),$TEMP0
1287         vpmuludq        $Yi,$TEMP2,$TEMP2
1288         vmovdqu         -24+32*5-128($np),$TEMP1
1289         vpaddq          $TEMP2,$ACC3,$ACC2
1290         vpmuludq        $Yi,$TEMP0,$TEMP0
1291         vmovdqu         -24+32*6-128($np),$TEMP2
1292         vpaddq          $TEMP0,$ACC4,$ACC3
1293         vpmuludq        $Yi,$TEMP1,$TEMP1
1294         vmovdqu         -24+32*7-128($np),$TEMP0
1295         vpaddq          $TEMP1,$ACC5,$ACC4
1296         vpmuludq        $Yi,$TEMP2,$TEMP2
1297         vmovdqu         -24+32*8-128($np),$TEMP1
1298         vpaddq          $TEMP2,$ACC6,$ACC5
1299         vpmuludq        $Yi,$TEMP0,$TEMP0
1300         vmovdqu         -24+32*9-128($np),$TEMP2
1301          mov    $r3, $r0
1302         vpaddq          $TEMP0,$ACC7,$ACC6
1303         vpmuludq        $Yi,$TEMP1,$TEMP1
1304          add    (%rsp), $r0
1305         vpaddq          $TEMP1,$ACC8,$ACC7
1306         vpmuludq        $Yi,$TEMP2,$TEMP2
1307          vmovq  $r3, $TEMP1
1308         vpaddq          $TEMP2,$ACC9,$ACC8
1309
1310         dec     $i
1311         jnz     .Loop_mul_1024
1312 ___
1313
1314 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1315 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1316 #       But as we underutilize resources, it's possible to correct in
1317 #       each iteration with marginal performance loss. But then, as
1318 #       we do it in each iteration, we can correct less digits, and
1319 #       avoid performance penalties completely. Also note that we
1320 #       correct only three digits out of four. This works because
1321 #       most significant digit is subjected to less additions.
1322
1323 $TEMP0 = $ACC9;
1324 $TEMP3 = $Bi;
1325 $TEMP4 = $Yi;
1326 $code.=<<___;
1327         vpermq          \$0, $AND_MASK, $AND_MASK
1328         vpaddq          (%rsp), $TEMP1, $ACC0
1329
1330         vpsrlq          \$29, $ACC0, $TEMP1
1331         vpand           $AND_MASK, $ACC0, $ACC0
1332         vpsrlq          \$29, $ACC1, $TEMP2
1333         vpand           $AND_MASK, $ACC1, $ACC1
1334         vpsrlq          \$29, $ACC2, $TEMP3
1335         vpermq          \$0x93, $TEMP1, $TEMP1
1336         vpand           $AND_MASK, $ACC2, $ACC2
1337         vpsrlq          \$29, $ACC3, $TEMP4
1338         vpermq          \$0x93, $TEMP2, $TEMP2
1339         vpand           $AND_MASK, $ACC3, $ACC3
1340
1341         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1342         vpermq          \$0x93, $TEMP3, $TEMP3
1343         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1344         vpermq          \$0x93, $TEMP4, $TEMP4
1345         vpaddq          $TEMP0, $ACC0, $ACC0
1346         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1347         vpaddq          $TEMP1, $ACC1, $ACC1
1348         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1349         vpaddq          $TEMP2, $ACC2, $ACC2
1350         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1351         vpaddq          $TEMP3, $ACC3, $ACC3
1352         vpaddq          $TEMP4, $ACC4, $ACC4
1353
1354         vpsrlq          \$29, $ACC0, $TEMP1
1355         vpand           $AND_MASK, $ACC0, $ACC0
1356         vpsrlq          \$29, $ACC1, $TEMP2
1357         vpand           $AND_MASK, $ACC1, $ACC1
1358         vpsrlq          \$29, $ACC2, $TEMP3
1359         vpermq          \$0x93, $TEMP1, $TEMP1
1360         vpand           $AND_MASK, $ACC2, $ACC2
1361         vpsrlq          \$29, $ACC3, $TEMP4
1362         vpermq          \$0x93, $TEMP2, $TEMP2
1363         vpand           $AND_MASK, $ACC3, $ACC3
1364         vpermq          \$0x93, $TEMP3, $TEMP3
1365
1366         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1367         vpermq          \$0x93, $TEMP4, $TEMP4
1368         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1369         vpaddq          $TEMP0, $ACC0, $ACC0
1370         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1371         vpaddq          $TEMP1, $ACC1, $ACC1
1372         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1373         vpaddq          $TEMP2, $ACC2, $ACC2
1374         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1375         vpaddq          $TEMP3, $ACC3, $ACC3
1376         vpaddq          $TEMP4, $ACC4, $ACC4
1377
1378         vmovdqu         $ACC0, 0-128($rp)
1379         vmovdqu         $ACC1, 32-128($rp)
1380         vmovdqu         $ACC2, 64-128($rp)
1381         vmovdqu         $ACC3, 96-128($rp)
1382 ___
1383
1384 $TEMP5=$ACC0;
1385 $code.=<<___;
1386         vpsrlq          \$29, $ACC4, $TEMP1
1387         vpand           $AND_MASK, $ACC4, $ACC4
1388         vpsrlq          \$29, $ACC5, $TEMP2
1389         vpand           $AND_MASK, $ACC5, $ACC5
1390         vpsrlq          \$29, $ACC6, $TEMP3
1391         vpermq          \$0x93, $TEMP1, $TEMP1
1392         vpand           $AND_MASK, $ACC6, $ACC6
1393         vpsrlq          \$29, $ACC7, $TEMP4
1394         vpermq          \$0x93, $TEMP2, $TEMP2
1395         vpand           $AND_MASK, $ACC7, $ACC7
1396         vpsrlq          \$29, $ACC8, $TEMP5
1397         vpermq          \$0x93, $TEMP3, $TEMP3
1398         vpand           $AND_MASK, $ACC8, $ACC8
1399         vpermq          \$0x93, $TEMP4, $TEMP4
1400
1401         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1402         vpermq          \$0x93, $TEMP5, $TEMP5
1403         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1404         vpaddq          $TEMP0, $ACC4, $ACC4
1405         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1406         vpaddq          $TEMP1, $ACC5, $ACC5
1407         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1408         vpaddq          $TEMP2, $ACC6, $ACC6
1409         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1410         vpaddq          $TEMP3, $ACC7, $ACC7
1411         vpaddq          $TEMP4, $ACC8, $ACC8
1412
1413         vpsrlq          \$29, $ACC4, $TEMP1
1414         vpand           $AND_MASK, $ACC4, $ACC4
1415         vpsrlq          \$29, $ACC5, $TEMP2
1416         vpand           $AND_MASK, $ACC5, $ACC5
1417         vpsrlq          \$29, $ACC6, $TEMP3
1418         vpermq          \$0x93, $TEMP1, $TEMP1
1419         vpand           $AND_MASK, $ACC6, $ACC6
1420         vpsrlq          \$29, $ACC7, $TEMP4
1421         vpermq          \$0x93, $TEMP2, $TEMP2
1422         vpand           $AND_MASK, $ACC7, $ACC7
1423         vpsrlq          \$29, $ACC8, $TEMP5
1424         vpermq          \$0x93, $TEMP3, $TEMP3
1425         vpand           $AND_MASK, $ACC8, $ACC8
1426         vpermq          \$0x93, $TEMP4, $TEMP4
1427
1428         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1429         vpermq          \$0x93, $TEMP5, $TEMP5
1430         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1431         vpaddq          $TEMP0, $ACC4, $ACC4
1432         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1433         vpaddq          $TEMP1, $ACC5, $ACC5
1434         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1435         vpaddq          $TEMP2, $ACC6, $ACC6
1436         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1437         vpaddq          $TEMP3, $ACC7, $ACC7
1438         vpaddq          $TEMP4, $ACC8, $ACC8
1439
1440         vmovdqu         $ACC4, 128-128($rp)
1441         vmovdqu         $ACC5, 160-128($rp)    
1442         vmovdqu         $ACC6, 192-128($rp)
1443         vmovdqu         $ACC7, 224-128($rp)
1444         vmovdqu         $ACC8, 256-128($rp)
1445         vzeroupper
1446
1447         mov     %rbp, %rax
1448 ___
1449 $code.=<<___ if ($win64);
1450         movaps  -0xd8(%rax),%xmm6
1451         movaps  -0xc8(%rax),%xmm7
1452         movaps  -0xb8(%rax),%xmm8
1453         movaps  -0xa8(%rax),%xmm9
1454         movaps  -0x98(%rax),%xmm10
1455         movaps  -0x88(%rax),%xmm11
1456         movaps  -0x78(%rax),%xmm12
1457         movaps  -0x68(%rax),%xmm13
1458         movaps  -0x58(%rax),%xmm14
1459         movaps  -0x48(%rax),%xmm15
1460 ___
1461 $code.=<<___;
1462         mov     -48(%rax),%r15
1463         mov     -40(%rax),%r14
1464         mov     -32(%rax),%r13
1465         mov     -24(%rax),%r12
1466         mov     -16(%rax),%rbp
1467         mov     -8(%rax),%rbx
1468         lea     (%rax),%rsp             # restore %rsp
1469 .Lmul_1024_epilogue:
1470         ret
1471 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1472 ___
1473 }
1474 {
1475 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1476 my @T = map("%r$_",(8..11));
1477
1478 $code.=<<___;
1479 .globl  rsaz_1024_red2norm_avx2
1480 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1481 .align  32
1482 rsaz_1024_red2norm_avx2:
1483         sub     \$-128,$inp     # size optimization
1484         xor     %rax,%rax
1485 ___
1486
1487 for ($j=0,$i=0; $i<16; $i++) {
1488     my $k=0;
1489     while (29*$j<64*($i+1)) {   # load data till boundary
1490         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1491         $j++; $k++; push(@T,shift(@T));
1492     }
1493     $l=$k;
1494     while ($k>1) {              # shift loaded data but last value
1495         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1496         $k--;
1497     }
1498     $code.=<<___;               # shift last value
1499         mov     @T[-1], @T[0]
1500         shl     \$`29*($j-1)`, @T[-1]
1501         shr     \$`-29*($j-1)`, @T[0]
1502 ___
1503     while ($l) {                # accumulate all values
1504         $code.="        add     @T[-$l], %rax\n";
1505         $l--;
1506     }
1507         $code.=<<___;
1508         adc     \$0, @T[0]      # consume eventual carry
1509         mov     %rax, 8*$i($out)
1510         mov     @T[0], %rax
1511 ___
1512     push(@T,shift(@T));
1513 }
1514 $code.=<<___;
1515         ret
1516 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1517
1518 .globl  rsaz_1024_norm2red_avx2
1519 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1520 .align  32
1521 rsaz_1024_norm2red_avx2:
1522         sub     \$-128,$out     # size optimization
1523         mov     ($inp),@T[0]
1524         mov     \$0x1fffffff,%eax
1525 ___
1526 for ($j=0,$i=0; $i<16; $i++) {
1527     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1528     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1529     my $k=1;
1530     while (29*($j+1)<64*($i+1)) {
1531         $code.=<<___;
1532         mov     @T[0],@T[-$k]
1533         shr     \$`29*$j`,@T[-$k]
1534         and     %rax,@T[-$k]                            # &0x1fffffff
1535         mov     @T[-$k],`8*$j-128`($out)
1536 ___
1537         $j++; $k++;
1538     }
1539     $code.=<<___;
1540         shrd    \$`29*$j`,@T[1],@T[0]
1541         and     %rax,@T[0]
1542         mov     @T[0],`8*$j-128`($out)
1543 ___
1544     $j++;
1545     push(@T,shift(@T));
1546 }
1547 $code.=<<___;
1548         mov     @T[0],`8*$j-128`($out)                  # zero
1549         mov     @T[0],`8*($j+1)-128`($out)
1550         mov     @T[0],`8*($j+2)-128`($out)
1551         mov     @T[0],`8*($j+3)-128`($out)
1552         ret
1553 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1554 ___
1555 }
1556 {
1557 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1558
1559 $code.=<<___;
1560 .globl  rsaz_1024_scatter5_avx2
1561 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1562 .align  32
1563 rsaz_1024_scatter5_avx2:
1564         vzeroupper
1565         vmovdqu .Lscatter_permd(%rip),%ymm5
1566         shl     \$4,$power
1567         lea     ($out,$power),$out
1568         mov     \$9,%eax
1569         jmp     .Loop_scatter_1024
1570
1571 .align  32
1572 .Loop_scatter_1024:
1573         vmovdqu         ($inp),%ymm0
1574         lea             32($inp),$inp
1575         vpermd          %ymm0,%ymm5,%ymm0
1576         vmovdqu         %xmm0,($out)
1577         lea             16*32($out),$out
1578         dec     %eax
1579         jnz     .Loop_scatter_1024
1580
1581         vzeroupper
1582         ret
1583 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1584
1585 .globl  rsaz_1024_gather5_avx2
1586 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1587 .align  32
1588 rsaz_1024_gather5_avx2:
1589 ___
1590 $code.=<<___ if ($win64);
1591         lea     -0x88(%rsp),%rax
1592         vzeroupper
1593 .LSEH_begin_rsaz_1024_gather5:
1594         # I can't trust assembler to use specific encoding:-(
1595         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1596         .byte   0xc5,0xf8,0x29,0x70,0xe0        #vmovaps %xmm6,-0x20(%rax)
1597         .byte   0xc5,0xf8,0x29,0x78,0xf0        #vmovaps %xmm7,-0x10(%rax)
1598         .byte   0xc5,0x78,0x29,0x40,0x00        #vmovaps %xmm8,0(%rax)
1599         .byte   0xc5,0x78,0x29,0x48,0x10        #vmovaps %xmm9,0x10(%rax)
1600         .byte   0xc5,0x78,0x29,0x50,0x20        #vmovaps %xmm10,0x20(%rax)
1601         .byte   0xc5,0x78,0x29,0x58,0x30        #vmovaps %xmm11,0x30(%rax)
1602         .byte   0xc5,0x78,0x29,0x60,0x40        #vmovaps %xmm12,0x40(%rax)
1603         .byte   0xc5,0x78,0x29,0x68,0x50        #vmovaps %xmm13,0x50(%rax)
1604         .byte   0xc5,0x78,0x29,0x70,0x60        #vmovaps %xmm14,0x60(%rax)
1605         .byte   0xc5,0x78,0x29,0x78,0x70        #vmovaps %xmm15,0x70(%rax)
1606 ___
1607 $code.=<<___;
1608         lea     .Lgather_table(%rip),%r11
1609         mov     $power,%eax
1610         and     \$3,$power
1611         shr     \$2,%eax                        # cache line number
1612         shl     \$4,$power                      # offset within cache line
1613
1614         vmovdqu         -32(%r11),%ymm7         # .Lgather_permd
1615         vpbroadcastb    8(%r11,%rax), %xmm8
1616         vpbroadcastb    7(%r11,%rax), %xmm9
1617         vpbroadcastb    6(%r11,%rax), %xmm10
1618         vpbroadcastb    5(%r11,%rax), %xmm11
1619         vpbroadcastb    4(%r11,%rax), %xmm12
1620         vpbroadcastb    3(%r11,%rax), %xmm13
1621         vpbroadcastb    2(%r11,%rax), %xmm14
1622         vpbroadcastb    1(%r11,%rax), %xmm15
1623
1624         lea     64($inp,$power),$inp
1625         mov     \$64,%r11                       # size optimization
1626         mov     \$9,%eax
1627         jmp     .Loop_gather_1024
1628
1629 .align  32
1630 .Loop_gather_1024:
1631         vpand           -64($inp),              %xmm8,%xmm0
1632         vpand           ($inp),                 %xmm9,%xmm1
1633         vpand           64($inp),               %xmm10,%xmm2
1634         vpand           ($inp,%r11,2),          %xmm11,%xmm3
1635          vpor                                   %xmm0,%xmm1,%xmm1
1636         vpand           64($inp,%r11,2),        %xmm12,%xmm4
1637          vpor                                   %xmm2,%xmm3,%xmm3
1638         vpand           ($inp,%r11,4),          %xmm13,%xmm5
1639          vpor                                   %xmm1,%xmm3,%xmm3
1640         vpand           64($inp,%r11,4),        %xmm14,%xmm6
1641          vpor                                   %xmm4,%xmm5,%xmm5
1642         vpand           -128($inp,%r11,8),      %xmm15,%xmm2
1643         lea             ($inp,%r11,8),$inp
1644          vpor                                   %xmm3,%xmm5,%xmm5
1645          vpor                                   %xmm2,%xmm6,%xmm6
1646          vpor                                   %xmm5,%xmm6,%xmm6
1647         vpermd          %ymm6,%ymm7,%ymm6
1648         vmovdqu         %ymm6,($out)
1649         lea             32($out),$out
1650         dec     %eax
1651         jnz     .Loop_gather_1024
1652
1653         vpxor   %ymm0,%ymm0,%ymm0
1654         vmovdqu %ymm0,($out)
1655         vzeroupper
1656 ___
1657 $code.=<<___ if ($win64);
1658         movaps  (%rsp),%xmm6
1659         movaps  0x10(%rsp),%xmm7
1660         movaps  0x20(%rsp),%xmm8
1661         movaps  0x30(%rsp),%xmm9
1662         movaps  0x40(%rsp),%xmm10
1663         movaps  0x50(%rsp),%xmm11
1664         movaps  0x60(%rsp),%xmm12
1665         movaps  0x70(%rsp),%xmm13
1666         movaps  0x80(%rsp),%xmm14
1667         movaps  0x90(%rsp),%xmm15
1668         lea     0xa8(%rsp),%rsp
1669 .LSEH_end_rsaz_1024_gather5:
1670 ___
1671 $code.=<<___;
1672         ret
1673 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1674 ___
1675 }
1676
1677 $code.=<<___;
1678 .extern OPENSSL_ia32cap_P
1679 .globl  rsaz_avx2_eligible
1680 .type   rsaz_avx2_eligible,\@abi-omnipotent
1681 .align  32
1682 rsaz_avx2_eligible:
1683         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1684 ___
1685 $code.=<<___    if ($addx);
1686         mov     \$`1<<8|1<<19`,%ecx
1687         mov     \$0,%edx
1688         and     %eax,%ecx
1689         cmp     \$`1<<8|1<<19`,%ecx     # check for BMI2+AD*X
1690         cmove   %edx,%eax
1691 ___
1692 $code.=<<___;
1693         and     \$`1<<5`,%eax
1694         shr     \$5,%eax
1695         ret
1696 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1697
1698 .align  64
1699 .Land_mask:
1700         .quad   0x1fffffff,0x1fffffff,0x1fffffff,-1
1701 .Lscatter_permd:
1702         .long   0,2,4,6,7,7,7,7
1703 .Lgather_permd:
1704         .long   0,7,1,7,2,7,3,7
1705 .Lgather_table:
1706         .byte   0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
1707 .align  64
1708 ___
1709
1710 if ($win64) {
1711 $rec="%rcx";
1712 $frame="%rdx";
1713 $context="%r8";
1714 $disp="%r9";
1715
1716 $code.=<<___
1717 .extern __imp_RtlVirtualUnwind
1718 .type   rsaz_se_handler,\@abi-omnipotent
1719 .align  16
1720 rsaz_se_handler:
1721         push    %rsi
1722         push    %rdi
1723         push    %rbx
1724         push    %rbp
1725         push    %r12
1726         push    %r13
1727         push    %r14
1728         push    %r15
1729         pushfq
1730         sub     \$64,%rsp
1731
1732         mov     120($context),%rax      # pull context->Rax
1733         mov     248($context),%rbx      # pull context->Rip
1734
1735         mov     8($disp),%rsi           # disp->ImageBase
1736         mov     56($disp),%r11          # disp->HandlerData
1737
1738         mov     0(%r11),%r10d           # HandlerData[0]
1739         lea     (%rsi,%r10),%r10        # prologue label
1740         cmp     %r10,%rbx               # context->Rip<prologue label
1741         jb      .Lcommon_seh_tail
1742
1743         mov     152($context),%rax      # pull context->Rsp
1744
1745         mov     4(%r11),%r10d           # HandlerData[1]
1746         lea     (%rsi,%r10),%r10        # epilogue label
1747         cmp     %r10,%rbx               # context->Rip>=epilogue label
1748         jae     .Lcommon_seh_tail
1749
1750         mov     160($context),%rax      # pull context->Rbp
1751
1752         mov     -48(%rax),%r15
1753         mov     -40(%rax),%r14
1754         mov     -32(%rax),%r13
1755         mov     -24(%rax),%r12
1756         mov     -16(%rax),%rbp
1757         mov     -8(%rax),%rbx
1758         mov     %r15,240($context)
1759         mov     %r14,232($context)
1760         mov     %r13,224($context)
1761         mov     %r12,216($context)
1762         mov     %rbp,160($context)
1763         mov     %rbx,144($context)
1764
1765         lea     -0xd8(%rax),%rsi        # %xmm save area
1766         lea     512($context),%rdi      # & context.Xmm6
1767         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1768         .long   0xa548f3fc              # cld; rep movsq
1769
1770 .Lcommon_seh_tail:
1771         mov     8(%rax),%rdi
1772         mov     16(%rax),%rsi
1773         mov     %rax,152($context)      # restore context->Rsp
1774         mov     %rsi,168($context)      # restore context->Rsi
1775         mov     %rdi,176($context)      # restore context->Rdi
1776
1777         mov     40($disp),%rdi          # disp->ContextRecord
1778         mov     $context,%rsi           # context
1779         mov     \$154,%ecx              # sizeof(CONTEXT)
1780         .long   0xa548f3fc              # cld; rep movsq
1781
1782         mov     $disp,%rsi
1783         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1784         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1785         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1786         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1787         mov     40(%rsi),%r10           # disp->ContextRecord
1788         lea     56(%rsi),%r11           # &disp->HandlerData
1789         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1790         mov     %r10,32(%rsp)           # arg5
1791         mov     %r11,40(%rsp)           # arg6
1792         mov     %r12,48(%rsp)           # arg7
1793         mov     %rcx,56(%rsp)           # arg8, (NULL)
1794         call    *__imp_RtlVirtualUnwind(%rip)
1795
1796         mov     \$1,%eax                # ExceptionContinueSearch
1797         add     \$64,%rsp
1798         popfq
1799         pop     %r15
1800         pop     %r14
1801         pop     %r13
1802         pop     %r12
1803         pop     %rbp
1804         pop     %rbx
1805         pop     %rdi
1806         pop     %rsi
1807         ret
1808 .size   rsaz_se_handler,.-rsaz_se_handler
1809
1810 .section        .pdata
1811 .align  4
1812         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1813         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1814         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1815
1816         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1817         .rva    .LSEH_end_rsaz_1024_mul_avx2
1818         .rva    .LSEH_info_rsaz_1024_mul_avx2
1819
1820         .rva    .LSEH_begin_rsaz_1024_gather5
1821         .rva    .LSEH_end_rsaz_1024_gather5
1822         .rva    .LSEH_info_rsaz_1024_gather5
1823 .section        .xdata
1824 .align  8
1825 .LSEH_info_rsaz_1024_sqr_avx2:
1826         .byte   9,0,0,0
1827         .rva    rsaz_se_handler
1828         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue
1829 .LSEH_info_rsaz_1024_mul_avx2:
1830         .byte   9,0,0,0
1831         .rva    rsaz_se_handler
1832         .rva    .Lmul_1024_body,.Lmul_1024_epilogue
1833 .LSEH_info_rsaz_1024_gather5:
1834         .byte   0x01,0x33,0x16,0x00
1835         .byte   0x36,0xf8,0x09,0x00     #vmovaps 0x90(rsp),xmm15
1836         .byte   0x31,0xe8,0x08,0x00     #vmovaps 0x80(rsp),xmm14
1837         .byte   0x2c,0xd8,0x07,0x00     #vmovaps 0x70(rsp),xmm13
1838         .byte   0x27,0xc8,0x06,0x00     #vmovaps 0x60(rsp),xmm12
1839         .byte   0x22,0xb8,0x05,0x00     #vmovaps 0x50(rsp),xmm11
1840         .byte   0x1d,0xa8,0x04,0x00     #vmovaps 0x40(rsp),xmm10
1841         .byte   0x18,0x98,0x03,0x00     #vmovaps 0x30(rsp),xmm9
1842         .byte   0x13,0x88,0x02,0x00     #vmovaps 0x20(rsp),xmm8
1843         .byte   0x0e,0x78,0x01,0x00     #vmovaps 0x10(rsp),xmm7
1844         .byte   0x09,0x68,0x00,0x00     #vmovaps 0x00(rsp),xmm6
1845         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1846 ___
1847 }
1848
1849 foreach (split("\n",$code)) {
1850         s/\`([^\`]*)\`/eval($1)/ge;
1851
1852         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1853
1854         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1855         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1856         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1857         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1858         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1859         print $_,"\n";
1860 }
1861
1862 }}} else {{{
1863 print <<___;    # assembler is too old
1864 .text
1865
1866 .globl  rsaz_avx2_eligible
1867 .type   rsaz_avx2_eligible,\@abi-omnipotent
1868 rsaz_avx2_eligible:
1869         xor     %eax,%eax
1870         ret
1871 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1872
1873 .globl  rsaz_1024_sqr_avx2
1874 .globl  rsaz_1024_mul_avx2
1875 .globl  rsaz_1024_norm2red_avx2
1876 .globl  rsaz_1024_red2norm_avx2
1877 .globl  rsaz_1024_scatter5_avx2
1878 .globl  rsaz_1024_gather5_avx2
1879 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
1880 rsaz_1024_sqr_avx2:
1881 rsaz_1024_mul_avx2:
1882 rsaz_1024_norm2red_avx2:
1883 rsaz_1024_red2norm_avx2:
1884 rsaz_1024_scatter5_avx2:
1885 rsaz_1024_gather5_avx2:
1886         .byte   0x0f,0x0b       # ud2
1887         ret
1888 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1889 ___
1890 }}}
1891
1892 close STDOUT;