bn/asm/rsaz-avx2.pl: fix occasional failures.
[oweals/openssl.git] / crypto / bn / asm / rsaz-avx2.pl
1 #!/usr/bin/env perl
2
3 ##############################################################################
4 #                                                                            #
5 #  Copyright (c) 2012, Intel Corporation                                     #
6 #                                                                            #
7 #  All rights reserved.                                                      #
8 #                                                                            #
9 #  Redistribution and use in source and binary forms, with or without        #
10 #  modification, are permitted provided that the following conditions are    #
11 #  met:                                                                      #
12 #                                                                            #
13 #  *  Redistributions of source code must retain the above copyright         #
14 #     notice, this list of conditions and the following disclaimer.          #
15 #                                                                            #
16 #  *  Redistributions in binary form must reproduce the above copyright      #
17 #     notice, this list of conditions and the following disclaimer in the    #
18 #     documentation and/or other materials provided with the                 #
19 #     distribution.                                                          #
20 #                                                                            #
21 #  *  Neither the name of the Intel Corporation nor the names of its         #
22 #     contributors may be used to endorse or promote products derived from   #
23 #     this software without specific prior written permission.               #
24 #                                                                            #
25 #                                                                            #
26 #  THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY          #
27 #  EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE         #
28 #  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR        #
29 #  PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR            #
30 #  CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,     #
31 #  EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,       #
32 #  PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR        #
33 #  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF    #
34 #  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING      #
35 #  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS        #
36 #  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.              #
37 #                                                                            #
38 ##############################################################################
39 # Developers and authors:                                                    #
40 # Shay Gueron (1, 2), and Vlad Krasnov (1)                                   #
41 # (1) Intel Corporation, Israel Development Center, Haifa, Israel            #
42 # (2) University of Haifa, Israel                                            #
43 ##############################################################################
44 # Reference:                                                                 #
45 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular             #
46 #     Exponentiation,  Using Advanced Vector Instructions Architectures",    #
47 #     F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,   #
48 #     pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012              #
49 # [2] S. Gueron: "Efficient Software Implementations of Modular              #
50 #     Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).  #
51 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE         #
52 #     Proceedings of 9th International Conference on Information Technology: #
53 #     New Generations (ITNG 2012), pp.821-823 (2012)                         #
54 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis    #
55 #     resistant 1024-bit modular exponentiation, for optimizing RSA2048      #
56 #     on AVX2 capable x86_64 platforms",                                     #
57 #     http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
58 ##############################################################################
59 #
60 # +13% improvement over original submission by <appro@openssl.org>
61 #
62 # rsa2048 sign/sec      OpenSSL 1.0.1   scalar(*)       this
63 # 2.3GHz Haswell        621             765/+23%        1113/+79%
64 #
65 # (*)   if system doesn't support AVX2, for reference purposes;
66
67 $flavour = shift;
68 $output  = shift;
69 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
70
71 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
72
73 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
74 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
75 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
76 die "can't locate x86_64-xlate.pl";
77
78 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
79                 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
80         $avx = ($1>=2.19) + ($1>=2.22);
81         $addx = ($1>=2.23);
82 }
83
84 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
85             `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
86         $avx = ($1>=2.09) + ($1>=2.10);
87         $addx = ($1>=2.10);
88 }
89
90 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
91             `ml64 2>&1` =~ /Version ([0-9]+)\./) {
92         $avx = ($1>=10) + ($1>=11);
93         $addx = ($1>=11);
94 }
95
96 if (!$avx && `$ENV{CC} -v 2>&1` =~ /LLVM ([3-9]\.[0-9]+)/) {
97         $avx = ($1>=3.0) + ($1>=3.1);
98         $addx = 0;
99 }
100
101 open OUT,"| $^X $xlate $flavour $output";
102 *STDOUT = *OUT;
103
104 if ($avx>1) {{{
105 { # void AMS_WW(
106 my $rp="%rdi";  # BN_ULONG *rp,
107 my $ap="%rsi";  # const BN_ULONG *ap,
108 my $np="%rdx";  # const BN_ULONG *np,
109 my $n0="%ecx";  # const BN_ULONG n0,
110 my $rep="%r8d"; # int repeat);
111
112 # The registers that hold the accumulated redundant result
113 # The AMM works on 1024 bit operands, and redundant word size is 29
114 # Therefore: ceil(1024/29)/4 = 9
115 my $ACC0="%ymm0";
116 my $ACC1="%ymm1";
117 my $ACC2="%ymm2";
118 my $ACC3="%ymm3";
119 my $ACC4="%ymm4";
120 my $ACC5="%ymm5";
121 my $ACC6="%ymm6";
122 my $ACC7="%ymm7";
123 my $ACC8="%ymm8";
124 my $ACC9="%ymm9";
125 # Registers that hold the broadcasted words of bp, currently used
126 my $B1="%ymm10";
127 my $B2="%ymm11";
128 # Registers that hold the broadcasted words of Y, currently used
129 my $Y1="%ymm12";
130 my $Y2="%ymm13";
131 # Helper registers
132 my $TEMP1="%ymm14";
133 my $AND_MASK="%ymm15";
134 # alu registers that hold the first words of the ACC
135 my $r0="%r9";
136 my $r1="%r10";
137 my $r2="%r11";
138 my $r3="%r12";
139
140 my $i="%r14d";                  # loop counter
141 my $tmp = "%r15";
142
143 my $FrameSize=32*18+32*8;       # place for A^2 and 2*A
144
145 my $aap=$r0;
146 my $tp0="%rbx";
147 my $tp1=$r3;
148 my $tpa=$tmp;
149
150 $np="%r13";                     # reassigned argument
151
152 $code.=<<___;
153 .text
154
155 .globl  rsaz_1024_sqr_avx2
156 .type   rsaz_1024_sqr_avx2,\@function,5
157 .align  64
158 rsaz_1024_sqr_avx2:             # 702 cycles, 14% faster than rsaz_1024_mul_avx2
159         lea     (%rsp), %rax
160         push    %rbx
161         push    %rbp
162         push    %r12
163         push    %r13
164         push    %r14
165         push    %r15
166         vzeroupper
167 ___
168 $code.=<<___ if ($win64);
169         lea     -0xa8(%rsp),%rsp
170         vmovaps %xmm6,-0xd8(%rax)
171         vmovaps %xmm7,-0xc8(%rax)
172         vmovaps %xmm8,-0xb8(%rax)
173         vmovaps %xmm9,-0xa8(%rax)
174         vmovaps %xmm10,-0x98(%rax)
175         vmovaps %xmm11,-0x88(%rax)
176         vmovaps %xmm12,-0x78(%rax)
177         vmovaps %xmm13,-0x68(%rax)
178         vmovaps %xmm14,-0x58(%rax)
179         vmovaps %xmm15,-0x48(%rax)
180 .Lsqr_1024_body:
181 ___
182 $code.=<<___;
183         mov     %rax,%rbp
184         mov     %rdx, $np                       # reassigned argument
185         sub     \$$FrameSize, %rsp
186         mov     $np, $tmp
187         sub     \$-128, $rp                     # size optimization
188         sub     \$-128, $ap
189         sub     \$-128, $np
190
191         and     \$4095, $tmp                    # see if $np crosses page
192         add     \$32*10, $tmp
193         shr     \$12, $tmp
194         vpxor   $ACC9,$ACC9,$ACC9
195         jz      .Lsqr_1024_no_n_copy
196
197         # unaligned 256-bit load that crosses page boundary can
198         # cause >2x performance degradation here, so if $np does
199         # cross page boundary, copy it to stack and make sure stack
200         # frame doesn't...
201         sub             \$32*10,%rsp
202         vmovdqu         32*0-128($np), $ACC0
203         and             \$-2048, %rsp
204         vmovdqu         32*1-128($np), $ACC1
205         vmovdqu         32*2-128($np), $ACC2
206         vmovdqu         32*3-128($np), $ACC3
207         vmovdqu         32*4-128($np), $ACC4
208         vmovdqu         32*5-128($np), $ACC5
209         vmovdqu         32*6-128($np), $ACC6
210         vmovdqu         32*7-128($np), $ACC7
211         vmovdqu         32*8-128($np), $ACC8
212         lea             $FrameSize+128(%rsp),$np
213         vmovdqu         $ACC0, 32*0-128($np)
214         vmovdqu         $ACC1, 32*1-128($np)
215         vmovdqu         $ACC2, 32*2-128($np)
216         vmovdqu         $ACC3, 32*3-128($np)
217         vmovdqu         $ACC4, 32*4-128($np)
218         vmovdqu         $ACC5, 32*5-128($np)
219         vmovdqu         $ACC6, 32*6-128($np)
220         vmovdqu         $ACC7, 32*7-128($np)
221         vmovdqu         $ACC8, 32*8-128($np)
222         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero
223
224 .Lsqr_1024_no_n_copy:
225         and             \$-1024, %rsp
226
227         vmovdqu         32*1-128($ap), $ACC1
228         vmovdqu         32*2-128($ap), $ACC2
229         vmovdqu         32*3-128($ap), $ACC3
230         vmovdqu         32*4-128($ap), $ACC4
231         vmovdqu         32*5-128($ap), $ACC5
232         vmovdqu         32*6-128($ap), $ACC6
233         vmovdqu         32*7-128($ap), $ACC7
234         vmovdqu         32*8-128($ap), $ACC8
235
236         lea     192(%rsp), $tp0                 # 64+128=192
237         vpbroadcastq    .Land_mask(%rip), $AND_MASK
238         jmp     .LOOP_GRANDE_SQR_1024
239
240 .align  32
241 .LOOP_GRANDE_SQR_1024:
242         lea     32*18+128(%rsp), $aap           # size optimization
243         lea     448(%rsp), $tp1                 # 64+128+256=448
244
245         # the squaring is performed as described in Variant B of
246         # "Speeding up Big-Number Squaring", so start by calculating
247         # the A*2=A+A vector
248         vpaddq          $ACC1, $ACC1, $ACC1
249          vpbroadcastq   32*0-128($ap), $B1
250         vpaddq          $ACC2, $ACC2, $ACC2
251         vmovdqa         $ACC1, 32*0-128($aap)
252         vpaddq          $ACC3, $ACC3, $ACC3
253         vmovdqa         $ACC2, 32*1-128($aap)
254         vpaddq          $ACC4, $ACC4, $ACC4
255         vmovdqa         $ACC3, 32*2-128($aap)
256         vpaddq          $ACC5, $ACC5, $ACC5
257         vmovdqa         $ACC4, 32*3-128($aap)
258         vpaddq          $ACC6, $ACC6, $ACC6
259         vmovdqa         $ACC5, 32*4-128($aap)
260         vpaddq          $ACC7, $ACC7, $ACC7
261         vmovdqa         $ACC6, 32*5-128($aap)
262         vpaddq          $ACC8, $ACC8, $ACC8
263         vmovdqa         $ACC7, 32*6-128($aap)
264         vpxor           $ACC9, $ACC9, $ACC9
265         vmovdqa         $ACC8, 32*7-128($aap)
266
267         vpmuludq        32*0-128($ap), $B1, $ACC0
268          vpbroadcastq   32*1-128($ap), $B2
269          vmovdqu        $ACC9, 32*9-192($tp0)   # zero upper half
270         vpmuludq        $B1, $ACC1, $ACC1
271          vmovdqu        $ACC9, 32*10-448($tp1)
272         vpmuludq        $B1, $ACC2, $ACC2
273          vmovdqu        $ACC9, 32*11-448($tp1)
274         vpmuludq        $B1, $ACC3, $ACC3
275          vmovdqu        $ACC9, 32*12-448($tp1)
276         vpmuludq        $B1, $ACC4, $ACC4
277          vmovdqu        $ACC9, 32*13-448($tp1)
278         vpmuludq        $B1, $ACC5, $ACC5
279          vmovdqu        $ACC9, 32*14-448($tp1)
280         vpmuludq        $B1, $ACC6, $ACC6
281          vmovdqu        $ACC9, 32*15-448($tp1)
282         vpmuludq        $B1, $ACC7, $ACC7
283          vmovdqu        $ACC9, 32*16-448($tp1)
284         vpmuludq        $B1, $ACC8, $ACC8
285          vpbroadcastq   32*2-128($ap), $B1
286          vmovdqu        $ACC9, 32*17-448($tp1)
287
288         mov     $ap, $tpa
289         mov     \$4, $i
290         jmp     .Lsqr_entry_1024
291 ___
292 $TEMP0=$Y1;
293 $TEMP2=$Y2;
294 $code.=<<___;
295 .align  32
296 .LOOP_SQR_1024:
297          vpbroadcastq   32*1-128($tpa), $B2
298         vpmuludq        32*0-128($ap), $B1, $ACC0
299         vpaddq          32*0-192($tp0), $ACC0, $ACC0
300         vpmuludq        32*0-128($aap), $B1, $ACC1
301         vpaddq          32*1-192($tp0), $ACC1, $ACC1
302         vpmuludq        32*1-128($aap), $B1, $ACC2
303         vpaddq          32*2-192($tp0), $ACC2, $ACC2
304         vpmuludq        32*2-128($aap), $B1, $ACC3
305         vpaddq          32*3-192($tp0), $ACC3, $ACC3
306         vpmuludq        32*3-128($aap), $B1, $ACC4
307         vpaddq          32*4-192($tp0), $ACC4, $ACC4
308         vpmuludq        32*4-128($aap), $B1, $ACC5
309         vpaddq          32*5-192($tp0), $ACC5, $ACC5
310         vpmuludq        32*5-128($aap), $B1, $ACC6
311         vpaddq          32*6-192($tp0), $ACC6, $ACC6
312         vpmuludq        32*6-128($aap), $B1, $ACC7
313         vpaddq          32*7-192($tp0), $ACC7, $ACC7
314         vpmuludq        32*7-128($aap), $B1, $ACC8
315          vpbroadcastq   32*2-128($tpa), $B1
316         vpaddq          32*8-192($tp0), $ACC8, $ACC8
317 .Lsqr_entry_1024:
318         vmovdqu         $ACC0, 32*0-192($tp0)
319         vmovdqu         $ACC1, 32*1-192($tp0)
320
321         vpmuludq        32*1-128($ap), $B2, $TEMP0
322         vpaddq          $TEMP0, $ACC2, $ACC2
323         vpmuludq        32*1-128($aap), $B2, $TEMP1
324         vpaddq          $TEMP1, $ACC3, $ACC3
325         vpmuludq        32*2-128($aap), $B2, $TEMP2
326         vpaddq          $TEMP2, $ACC4, $ACC4
327         vpmuludq        32*3-128($aap), $B2, $TEMP0
328         vpaddq          $TEMP0, $ACC5, $ACC5
329         vpmuludq        32*4-128($aap), $B2, $TEMP1
330         vpaddq          $TEMP1, $ACC6, $ACC6
331         vpmuludq        32*5-128($aap), $B2, $TEMP2
332         vpaddq          $TEMP2, $ACC7, $ACC7
333         vpmuludq        32*6-128($aap), $B2, $TEMP0
334         vpaddq          $TEMP0, $ACC8, $ACC8
335         vpmuludq        32*7-128($aap), $B2, $ACC0
336          vpbroadcastq   32*3-128($tpa), $B2
337         vpaddq          32*9-192($tp0), $ACC0, $ACC0
338
339         vmovdqu         $ACC2, 32*2-192($tp0)
340         vmovdqu         $ACC3, 32*3-192($tp0)
341
342         vpmuludq        32*2-128($ap), $B1, $TEMP2
343         vpaddq          $TEMP2, $ACC4, $ACC4
344         vpmuludq        32*2-128($aap), $B1, $TEMP0
345         vpaddq          $TEMP0, $ACC5, $ACC5
346         vpmuludq        32*3-128($aap), $B1, $TEMP1
347         vpaddq          $TEMP1, $ACC6, $ACC6
348         vpmuludq        32*4-128($aap), $B1, $TEMP2
349         vpaddq          $TEMP2, $ACC7, $ACC7
350         vpmuludq        32*5-128($aap), $B1, $TEMP0
351         vpaddq          $TEMP0, $ACC8, $ACC8
352         vpmuludq        32*6-128($aap), $B1, $TEMP1
353         vpaddq          $TEMP1, $ACC0, $ACC0
354         vpmuludq        32*7-128($aap), $B1, $ACC1
355          vpbroadcastq   32*4-128($tpa), $B1
356         vpaddq          32*10-448($tp1), $ACC1, $ACC1
357
358         vmovdqu         $ACC4, 32*4-192($tp0)
359         vmovdqu         $ACC5, 32*5-192($tp0)
360
361         vpmuludq        32*3-128($ap), $B2, $TEMP0
362         vpaddq          $TEMP0, $ACC6, $ACC6
363         vpmuludq        32*3-128($aap), $B2, $TEMP1
364         vpaddq          $TEMP1, $ACC7, $ACC7
365         vpmuludq        32*4-128($aap), $B2, $TEMP2
366         vpaddq          $TEMP2, $ACC8, $ACC8
367         vpmuludq        32*5-128($aap), $B2, $TEMP0
368         vpaddq          $TEMP0, $ACC0, $ACC0
369         vpmuludq        32*6-128($aap), $B2, $TEMP1
370         vpaddq          $TEMP1, $ACC1, $ACC1
371         vpmuludq        32*7-128($aap), $B2, $ACC2
372          vpbroadcastq   32*5-128($tpa), $B2
373         vpaddq          32*11-448($tp1), $ACC2, $ACC2   
374
375         vmovdqu         $ACC6, 32*6-192($tp0)
376         vmovdqu         $ACC7, 32*7-192($tp0)
377
378         vpmuludq        32*4-128($ap), $B1, $TEMP0
379         vpaddq          $TEMP0, $ACC8, $ACC8
380         vpmuludq        32*4-128($aap), $B1, $TEMP1
381         vpaddq          $TEMP1, $ACC0, $ACC0
382         vpmuludq        32*5-128($aap), $B1, $TEMP2
383         vpaddq          $TEMP2, $ACC1, $ACC1
384         vpmuludq        32*6-128($aap), $B1, $TEMP0
385         vpaddq          $TEMP0, $ACC2, $ACC2
386         vpmuludq        32*7-128($aap), $B1, $ACC3
387          vpbroadcastq   32*6-128($tpa), $B1
388         vpaddq          32*12-448($tp1), $ACC3, $ACC3
389
390         vmovdqu         $ACC8, 32*8-192($tp0)
391         vmovdqu         $ACC0, 32*9-192($tp0)
392         lea             8($tp0), $tp0
393
394         vpmuludq        32*5-128($ap), $B2, $TEMP2
395         vpaddq          $TEMP2, $ACC1, $ACC1
396         vpmuludq        32*5-128($aap), $B2, $TEMP0
397         vpaddq          $TEMP0, $ACC2, $ACC2
398         vpmuludq        32*6-128($aap), $B2, $TEMP1
399         vpaddq          $TEMP1, $ACC3, $ACC3
400         vpmuludq        32*7-128($aap), $B2, $ACC4
401          vpbroadcastq   32*7-128($tpa), $B2
402         vpaddq          32*13-448($tp1), $ACC4, $ACC4
403
404         vmovdqu         $ACC1, 32*10-448($tp1)
405         vmovdqu         $ACC2, 32*11-448($tp1)
406
407         vpmuludq        32*6-128($ap), $B1, $TEMP0
408         vpaddq          $TEMP0, $ACC3, $ACC3
409         vpmuludq        32*6-128($aap), $B1, $TEMP1
410          vpbroadcastq   32*8-128($tpa), $ACC0           # borrow $ACC0 for $B1
411         vpaddq          $TEMP1, $ACC4, $ACC4
412         vpmuludq        32*7-128($aap), $B1, $ACC5
413          vpbroadcastq   32*0+8-128($tpa), $B1           # for next iteration
414         vpaddq          32*14-448($tp1), $ACC5, $ACC5
415
416         vmovdqu         $ACC3, 32*12-448($tp1)
417         vmovdqu         $ACC4, 32*13-448($tp1)
418         lea             8($tpa), $tpa
419
420         vpmuludq        32*7-128($ap), $B2, $TEMP0
421         vpaddq          $TEMP0, $ACC5, $ACC5
422         vpmuludq        32*7-128($aap), $B2, $ACC6
423         vpaddq          32*15-448($tp1), $ACC6, $ACC6
424
425         vpmuludq        32*8-128($ap), $ACC0, $ACC7
426         vmovdqu         $ACC5, 32*14-448($tp1)
427         vpaddq          32*16-448($tp1), $ACC7, $ACC7
428         vmovdqu         $ACC6, 32*15-448($tp1)
429         vmovdqu         $ACC7, 32*16-448($tp1)
430         lea             8($tp1), $tp1
431
432         dec     $i        
433         jnz     .LOOP_SQR_1024
434 ___
435 $ZERO = $ACC9;
436 $TEMP0 = $B1;
437 $TEMP2 = $B2;
438 $TEMP3 = $Y1;
439 $TEMP4 = $Y2;
440 $code.=<<___;
441         #we need to fix indexes 32-39 to avoid overflow
442         vmovdqu         32*8(%rsp), $ACC8               # 32*8-192($tp0),
443         vmovdqu         32*9(%rsp), $ACC1               # 32*9-192($tp0)
444         vmovdqu         32*10(%rsp), $ACC2              # 32*10-192($tp0)
445         lea             192(%rsp), $tp0                 # 64+128=192
446
447         vpsrlq          \$29, $ACC8, $TEMP1
448         vpand           $AND_MASK, $ACC8, $ACC8
449         vpsrlq          \$29, $ACC1, $TEMP2
450         vpand           $AND_MASK, $ACC1, $ACC1
451
452         vpermq          \$0x93, $TEMP1, $TEMP1
453         vpxor           $ZERO, $ZERO, $ZERO
454         vpermq          \$0x93, $TEMP2, $TEMP2
455
456         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
457         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
458         vpaddq          $TEMP0, $ACC8, $ACC8
459         vpblendd        \$3, $TEMP2, $ZERO, $TEMP2
460         vpaddq          $TEMP1, $ACC1, $ACC1
461         vpaddq          $TEMP2, $ACC2, $ACC2
462         vmovdqu         $ACC1, 32*9-192($tp0)
463         vmovdqu         $ACC2, 32*10-192($tp0)
464
465         mov     (%rsp), %rax
466         mov     8(%rsp), $r1
467         mov     16(%rsp), $r2
468         mov     24(%rsp), $r3
469         vmovdqu 32*1(%rsp), $ACC1
470         vmovdqu 32*2-192($tp0), $ACC2
471         vmovdqu 32*3-192($tp0), $ACC3
472         vmovdqu 32*4-192($tp0), $ACC4
473         vmovdqu 32*5-192($tp0), $ACC5
474         vmovdqu 32*6-192($tp0), $ACC6
475         vmovdqu 32*7-192($tp0), $ACC7
476
477         mov     %rax, $r0
478         imull   $n0, %eax
479         and     \$0x1fffffff, %eax
480         vmovd   %eax, $Y1
481
482         mov     %rax, %rdx
483         imulq   -128($np), %rax
484          vpbroadcastq   $Y1, $Y1
485         add     %rax, $r0
486         mov     %rdx, %rax
487         imulq   8-128($np), %rax
488         shr     \$29, $r0
489         add     %rax, $r1
490         mov     %rdx, %rax
491         imulq   16-128($np), %rax
492         add     $r0, $r1
493         add     %rax, $r2
494         imulq   24-128($np), %rdx
495         add     %rdx, $r3
496
497         mov     $r1, %rax
498         imull   $n0, %eax
499         and     \$0x1fffffff, %eax
500
501         mov \$9, $i
502         jmp .LOOP_REDUCE_1024
503
504 .align  32
505 .LOOP_REDUCE_1024:
506         vmovd   %eax, $Y2
507         vpbroadcastq    $Y2, $Y2
508
509         vpmuludq        32*1-128($np), $Y1, $TEMP0
510          mov    %rax, %rdx
511          imulq  -128($np), %rax
512         vpaddq          $TEMP0, $ACC1, $ACC1
513          add    %rax, $r1
514         vpmuludq        32*2-128($np), $Y1, $TEMP1
515          mov    %rdx, %rax
516          imulq  8-128($np), %rax
517         vpaddq          $TEMP1, $ACC2, $ACC2
518         vpmuludq        32*3-128($np), $Y1, $TEMP2
519          .byte  0x67
520          add    %rax, $r2
521          .byte  0x67
522          mov    %rdx, %rax
523          imulq  16-128($np), %rax
524          shr    \$29, $r1
525         vpaddq          $TEMP2, $ACC3, $ACC3
526         vpmuludq        32*4-128($np), $Y1, $TEMP0
527          add    %rax, $r3
528          add    $r1, $r2
529         vpaddq          $TEMP0, $ACC4, $ACC4
530         vpmuludq        32*5-128($np), $Y1, $TEMP1
531          mov    $r2, %rax
532          imull  $n0, %eax
533         vpaddq          $TEMP1, $ACC5, $ACC5
534         vpmuludq        32*6-128($np), $Y1, $TEMP2
535          and    \$0x1fffffff, %eax
536         vpaddq          $TEMP2, $ACC6, $ACC6
537         vpmuludq        32*7-128($np), $Y1, $TEMP0
538         vpaddq          $TEMP0, $ACC7, $ACC7
539         vpmuludq        32*8-128($np), $Y1, $TEMP1
540          vmovd  %eax, $Y1
541          #vmovdqu       32*1-8-128($np), $TEMP2         # moved below
542         vpaddq          $TEMP1, $ACC8, $ACC8
543          #vmovdqu       32*2-8-128($np), $TEMP0         # moved below
544          vpbroadcastq   $Y1, $Y1
545
546         vpmuludq        32*1-8-128($np), $Y2, $TEMP2    # see above
547         vmovdqu         32*3-8-128($np), $TEMP1
548          mov    %rax, %rdx
549          imulq  -128($np), %rax
550         vpaddq          $TEMP2, $ACC1, $ACC1
551         vpmuludq        32*2-8-128($np), $Y2, $TEMP0    # see above
552         vmovdqu         32*4-8-128($np), $TEMP2
553          add    %rax, $r2
554          mov    %rdx, %rax
555          imulq  8-128($np), %rax
556         vpaddq          $TEMP0, $ACC2, $ACC2
557          add    $r3, %rax
558          shr    \$29, $r2
559         vpmuludq        $Y2, $TEMP1, $TEMP1
560         vmovdqu         32*5-8-128($np), $TEMP0
561          add    $r2, %rax
562         vpaddq          $TEMP1, $ACC3, $ACC3
563         vpmuludq        $Y2, $TEMP2, $TEMP2
564         vmovdqu         32*6-8-128($np), $TEMP1
565          .byte  0x67
566          mov    %rax, $r3
567          imull  $n0, %eax
568         vpaddq          $TEMP2, $ACC4, $ACC4
569         vpmuludq        $Y2, $TEMP0, $TEMP0
570         .byte   0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00    # vmovdqu               32*7-8-128($np), $TEMP2
571          and    \$0x1fffffff, %eax
572         vpaddq          $TEMP0, $ACC5, $ACC5
573         vpmuludq        $Y2, $TEMP1, $TEMP1
574         vmovdqu         32*8-8-128($np), $TEMP0
575         vpaddq          $TEMP1, $ACC6, $ACC6
576         vpmuludq        $Y2, $TEMP2, $TEMP2
577         vmovdqu         32*9-8-128($np), $ACC9
578          vmovd  %eax, $ACC0                     # borrow ACC0 for Y2
579          imulq  -128($np), %rax
580         vpaddq          $TEMP2, $ACC7, $ACC7
581         vpmuludq        $Y2, $TEMP0, $TEMP0
582          vmovdqu        32*1-16-128($np), $TEMP1
583          vpbroadcastq   $ACC0, $ACC0
584         vpaddq          $TEMP0, $ACC8, $ACC8
585         vpmuludq        $Y2, $ACC9, $ACC9
586          vmovdqu        32*2-16-128($np), $TEMP2
587          add    %rax, $r3
588
589 ___
590 ($ACC0,$Y2)=($Y2,$ACC0);
591 $code.=<<___;
592          vmovdqu        32*1-24-128($np), $ACC0
593         vpmuludq        $Y1, $TEMP1, $TEMP1
594         vmovdqu         32*3-16-128($np), $TEMP0
595         vpaddq          $TEMP1, $ACC1, $ACC1
596          vpmuludq       $Y2, $ACC0, $ACC0
597         vpmuludq        $Y1, $TEMP2, $TEMP2
598         .byte   0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff    # vmovdqu               32*4-16-128($np), $TEMP1
599          vpaddq         $ACC1, $ACC0, $ACC0
600         vpaddq          $TEMP2, $ACC2, $ACC2
601         vpmuludq        $Y1, $TEMP0, $TEMP0
602         vmovdqu         32*5-16-128($np), $TEMP2
603          .byte  0x67
604          vmovq          $ACC0, %rax
605          vmovdqu        $ACC0, (%rsp)           # transfer $r0-$r3
606         vpaddq          $TEMP0, $ACC3, $ACC3
607         vpmuludq        $Y1, $TEMP1, $TEMP1
608         vmovdqu         32*6-16-128($np), $TEMP0
609         vpaddq          $TEMP1, $ACC4, $ACC4
610         vpmuludq        $Y1, $TEMP2, $TEMP2
611         vmovdqu         32*7-16-128($np), $TEMP1
612         vpaddq          $TEMP2, $ACC5, $ACC5
613         vpmuludq        $Y1, $TEMP0, $TEMP0
614         vmovdqu         32*8-16-128($np), $TEMP2
615         vpaddq          $TEMP0, $ACC6, $ACC6
616         vpmuludq        $Y1, $TEMP1, $TEMP1
617          shr    \$29, $r3
618         vmovdqu         32*9-16-128($np), $TEMP0
619          add    $r3, %rax
620         vpaddq          $TEMP1, $ACC7, $ACC7
621         vpmuludq        $Y1, $TEMP2, $TEMP2
622          #vmovdqu       32*2-24-128($np), $TEMP1        # moved below
623          mov    %rax, $r0
624          imull  $n0, %eax
625         vpaddq          $TEMP2, $ACC8, $ACC8
626         vpmuludq        $Y1, $TEMP0, $TEMP0
627          and    \$0x1fffffff, %eax
628          vmovd  %eax, $Y1
629          vmovdqu        32*3-24-128($np), $TEMP2
630         .byte   0x67
631         vpaddq          $TEMP0, $ACC9, $ACC9
632          vpbroadcastq   $Y1, $Y1
633
634         vpmuludq        32*2-24-128($np), $Y2, $TEMP1   # see above
635         vmovdqu         32*4-24-128($np), $TEMP0
636          mov    %rax, %rdx
637          imulq  -128($np), %rax
638          mov    8(%rsp), $r1
639         vpaddq          $TEMP1, $ACC2, $ACC1
640         vpmuludq        $Y2, $TEMP2, $TEMP2
641         vmovdqu         32*5-24-128($np), $TEMP1
642          add    %rax, $r0
643          mov    %rdx, %rax
644          imulq  8-128($np), %rax
645          .byte  0x67
646          shr    \$29, $r0
647          mov    16(%rsp), $r2
648         vpaddq          $TEMP2, $ACC3, $ACC2
649         vpmuludq        $Y2, $TEMP0, $TEMP0
650         vmovdqu         32*6-24-128($np), $TEMP2
651          add    %rax, $r1
652          mov    %rdx, %rax
653          imulq  16-128($np), %rax
654         vpaddq          $TEMP0, $ACC4, $ACC3
655         vpmuludq        $Y2, $TEMP1, $TEMP1
656         vmovdqu         32*7-24-128($np), $TEMP0
657          imulq  24-128($np), %rdx               # future $r3
658          add    %rax, $r2
659          lea    ($r0,$r1), %rax
660         vpaddq          $TEMP1, $ACC5, $ACC4
661         vpmuludq        $Y2, $TEMP2, $TEMP2
662         vmovdqu         32*8-24-128($np), $TEMP1
663          mov    %rax, $r1
664          imull  $n0, %eax
665         vpmuludq        $Y2, $TEMP0, $TEMP0
666         vpaddq          $TEMP2, $ACC6, $ACC5
667         vmovdqu         32*9-24-128($np), $TEMP2
668          and    \$0x1fffffff, %eax
669         vpaddq          $TEMP0, $ACC7, $ACC6
670         vpmuludq        $Y2, $TEMP1, $TEMP1
671          add    24(%rsp), %rdx
672         vpaddq          $TEMP1, $ACC8, $ACC7
673         vpmuludq        $Y2, $TEMP2, $TEMP2
674         vpaddq          $TEMP2, $ACC9, $ACC8
675          vmovq  $r3, $ACC9
676          mov    %rdx, $r3
677
678         dec     $i
679         jnz     .LOOP_REDUCE_1024
680 ___
681 ($ACC0,$Y2)=($Y2,$ACC0);
682 $code.=<<___;
683         lea     448(%rsp), $tp1                 # size optimization
684         vpaddq  $ACC9, $Y2, $ACC0
685         vpxor   $ZERO, $ZERO, $ZERO
686
687         vpaddq          32*9-192($tp0), $ACC0, $ACC0
688         vpaddq          32*10-448($tp1), $ACC1, $ACC1
689         vpaddq          32*11-448($tp1), $ACC2, $ACC2
690         vpaddq          32*12-448($tp1), $ACC3, $ACC3
691         vpaddq          32*13-448($tp1), $ACC4, $ACC4
692         vpaddq          32*14-448($tp1), $ACC5, $ACC5
693         vpaddq          32*15-448($tp1), $ACC6, $ACC6
694         vpaddq          32*16-448($tp1), $ACC7, $ACC7
695         vpaddq          32*17-448($tp1), $ACC8, $ACC8
696
697         vpsrlq          \$29, $ACC0, $TEMP1
698         vpand           $AND_MASK, $ACC0, $ACC0
699         vpsrlq          \$29, $ACC1, $TEMP2
700         vpand           $AND_MASK, $ACC1, $ACC1
701         vpsrlq          \$29, $ACC2, $TEMP3
702         vpermq          \$0x93, $TEMP1, $TEMP1
703         vpand           $AND_MASK, $ACC2, $ACC2
704         vpsrlq          \$29, $ACC3, $TEMP4
705         vpermq          \$0x93, $TEMP2, $TEMP2
706         vpand           $AND_MASK, $ACC3, $ACC3
707         vpermq          \$0x93, $TEMP3, $TEMP3
708
709         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
710         vpermq          \$0x93, $TEMP4, $TEMP4
711         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
712         vpaddq          $TEMP0, $ACC0, $ACC0
713         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
714         vpaddq          $TEMP1, $ACC1, $ACC1
715         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
716         vpaddq          $TEMP2, $ACC2, $ACC2
717         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
718         vpaddq          $TEMP3, $ACC3, $ACC3
719         vpaddq          $TEMP4, $ACC4, $ACC4
720
721         vpsrlq          \$29, $ACC0, $TEMP1
722         vpand           $AND_MASK, $ACC0, $ACC0
723         vpsrlq          \$29, $ACC1, $TEMP2
724         vpand           $AND_MASK, $ACC1, $ACC1
725         vpsrlq          \$29, $ACC2, $TEMP3
726         vpermq          \$0x93, $TEMP1, $TEMP1
727         vpand           $AND_MASK, $ACC2, $ACC2
728         vpsrlq          \$29, $ACC3, $TEMP4
729         vpermq          \$0x93, $TEMP2, $TEMP2
730         vpand           $AND_MASK, $ACC3, $ACC3
731         vpermq          \$0x93, $TEMP3, $TEMP3
732
733         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
734         vpermq          \$0x93, $TEMP4, $TEMP4
735         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
736         vpaddq          $TEMP0, $ACC0, $ACC0
737         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
738         vpaddq          $TEMP1, $ACC1, $ACC1
739         vmovdqu         $ACC0, 32*0-128($rp)
740         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
741         vpaddq          $TEMP2, $ACC2, $ACC2
742         vmovdqu         $ACC1, 32*1-128($rp)
743         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
744         vpaddq          $TEMP3, $ACC3, $ACC3
745         vmovdqu         $ACC2, 32*2-128($rp)
746         vpaddq          $TEMP4, $ACC4, $ACC4
747         vmovdqu         $ACC3, 32*3-128($rp)
748 ___
749 $TEMP5=$ACC0;
750 $code.=<<___;
751         vpsrlq          \$29, $ACC4, $TEMP1
752         vpand           $AND_MASK, $ACC4, $ACC4
753         vpsrlq          \$29, $ACC5, $TEMP2
754         vpand           $AND_MASK, $ACC5, $ACC5
755         vpsrlq          \$29, $ACC6, $TEMP3
756         vpermq          \$0x93, $TEMP1, $TEMP1
757         vpand           $AND_MASK, $ACC6, $ACC6
758         vpsrlq          \$29, $ACC7, $TEMP4
759         vpermq          \$0x93, $TEMP2, $TEMP2
760         vpand           $AND_MASK, $ACC7, $ACC7
761         vpsrlq          \$29, $ACC8, $TEMP5
762         vpermq          \$0x93, $TEMP3, $TEMP3
763         vpand           $AND_MASK, $ACC8, $ACC8
764         vpermq          \$0x93, $TEMP4, $TEMP4
765
766         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
767         vpermq          \$0x93, $TEMP5, $TEMP5
768         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
769         vpaddq          $TEMP0, $ACC4, $ACC4
770         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
771         vpaddq          $TEMP1, $ACC5, $ACC5
772         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
773         vpaddq          $TEMP2, $ACC6, $ACC6
774         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
775         vpaddq          $TEMP3, $ACC7, $ACC7
776         vpaddq          $TEMP4, $ACC8, $ACC8
777      
778         vpsrlq          \$29, $ACC4, $TEMP1
779         vpand           $AND_MASK, $ACC4, $ACC4
780         vpsrlq          \$29, $ACC5, $TEMP2
781         vpand           $AND_MASK, $ACC5, $ACC5
782         vpsrlq          \$29, $ACC6, $TEMP3
783         vpermq          \$0x93, $TEMP1, $TEMP1
784         vpand           $AND_MASK, $ACC6, $ACC6
785         vpsrlq          \$29, $ACC7, $TEMP4
786         vpermq          \$0x93, $TEMP2, $TEMP2
787         vpand           $AND_MASK, $ACC7, $ACC7
788         vpsrlq          \$29, $ACC8, $TEMP5
789         vpermq          \$0x93, $TEMP3, $TEMP3
790         vpand           $AND_MASK, $ACC8, $ACC8
791         vpermq          \$0x93, $TEMP4, $TEMP4
792
793         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
794         vpermq          \$0x93, $TEMP5, $TEMP5
795         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
796         vpaddq          $TEMP0, $ACC4, $ACC4
797         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
798         vpaddq          $TEMP1, $ACC5, $ACC5
799         vmovdqu         $ACC4, 32*4-128($rp)
800         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
801         vpaddq          $TEMP2, $ACC6, $ACC6
802         vmovdqu         $ACC5, 32*5-128($rp)
803         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
804         vpaddq          $TEMP3, $ACC7, $ACC7
805         vmovdqu         $ACC6, 32*6-128($rp)
806         vpaddq          $TEMP4, $ACC8, $ACC8
807         vmovdqu         $ACC7, 32*7-128($rp)
808         vmovdqu         $ACC8, 32*8-128($rp)
809
810         mov     $rp, $ap
811         dec     $rep
812         jne     .LOOP_GRANDE_SQR_1024
813
814         vzeroall
815         mov     %rbp, %rax
816 ___
817 $code.=<<___ if ($win64);
818         movaps  -0xd8(%rax),%xmm6
819         movaps  -0xc8(%rax),%xmm7
820         movaps  -0xb8(%rax),%xmm8
821         movaps  -0xa8(%rax),%xmm9
822         movaps  -0x98(%rax),%xmm10
823         movaps  -0x88(%rax),%xmm11
824         movaps  -0x78(%rax),%xmm12
825         movaps  -0x68(%rax),%xmm13
826         movaps  -0x58(%rax),%xmm14
827         movaps  -0x48(%rax),%xmm15
828 ___
829 $code.=<<___;
830         mov     -48(%rax),%r15
831         mov     -40(%rax),%r14
832         mov     -32(%rax),%r13
833         mov     -24(%rax),%r12
834         mov     -16(%rax),%rbp
835         mov     -8(%rax),%rbx
836         lea     (%rax),%rsp             # restore %rsp
837 .Lsqr_1024_epilogue:
838         ret
839 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
840 ___
841 }
842
843 { # void AMM_WW(
844 my $rp="%rdi";  # BN_ULONG *rp,
845 my $ap="%rsi";  # const BN_ULONG *ap,
846 my $bp="%rdx";  # const BN_ULONG *bp,
847 my $np="%rcx";  # const BN_ULONG *np,
848 my $n0="%r8d";  # unsigned int n0);
849
850 # The registers that hold the accumulated redundant result
851 # The AMM works on 1024 bit operands, and redundant word size is 29
852 # Therefore: ceil(1024/29)/4 = 9
853 my $ACC0="%ymm0";
854 my $ACC1="%ymm1";
855 my $ACC2="%ymm2";
856 my $ACC3="%ymm3";
857 my $ACC4="%ymm4";
858 my $ACC5="%ymm5";
859 my $ACC6="%ymm6";
860 my $ACC7="%ymm7";
861 my $ACC8="%ymm8";
862 my $ACC9="%ymm9";
863
864 # Registers that hold the broadcasted words of multiplier, currently used
865 my $Bi="%ymm10";
866 my $Yi="%ymm11";
867
868 # Helper registers
869 my $TEMP0=$ACC0;
870 my $TEMP1="%ymm12";
871 my $TEMP2="%ymm13";
872 my $ZERO="%ymm14";
873 my $AND_MASK="%ymm15";
874
875 # alu registers that hold the first words of the ACC
876 my $r0="%r9";
877 my $r1="%r10";
878 my $r2="%r11";
879 my $r3="%r12";
880
881 my $i="%r14d";
882 my $tmp="%r15";
883
884 $bp="%r13";     # reassigned argument
885
886 $code.=<<___;
887 .globl  rsaz_1024_mul_avx2
888 .type   rsaz_1024_mul_avx2,\@function,5
889 .align  64
890 rsaz_1024_mul_avx2:
891         lea     (%rsp), %rax
892         push    %rbx
893         push    %rbp
894         push    %r12
895         push    %r13
896         push    %r14
897         push    %r15
898 ___
899 $code.=<<___ if ($win64);
900         vzeroupper
901         lea     -0xa8(%rsp),%rsp
902         vmovaps %xmm6,-0xd8(%rax)
903         vmovaps %xmm7,-0xc8(%rax)
904         vmovaps %xmm8,-0xb8(%rax)
905         vmovaps %xmm9,-0xa8(%rax)
906         vmovaps %xmm10,-0x98(%rax)
907         vmovaps %xmm11,-0x88(%rax)
908         vmovaps %xmm12,-0x78(%rax)
909         vmovaps %xmm13,-0x68(%rax)
910         vmovaps %xmm14,-0x58(%rax)
911         vmovaps %xmm15,-0x48(%rax)
912 .Lmul_1024_body:
913 ___
914 $code.=<<___;
915         mov     %rax,%rbp
916         vzeroall
917         mov     %rdx, $bp       # reassigned argument
918         sub     \$64,%rsp
919
920         # unaligned 256-bit load that crosses page boundary can
921         # cause severe performance degradation here, so if $ap does
922         # cross page boundary, swap it with $bp [meaning that caller
923         # is advised to lay down $ap and $bp next to each other, so
924         # that only one can cross page boundary].
925         .byte   0x67,0x67
926         mov     $ap, $tmp
927         and     \$4095, $tmp
928         add     \$32*10, $tmp
929         shr     \$12, $tmp
930         mov     $ap, $tmp
931         cmovnz  $bp, $ap
932         cmovnz  $tmp, $bp
933
934         mov     $np, $tmp
935         sub     \$-128,$ap      # size optimization
936         sub     \$-128,$np
937         sub     \$-128,$rp
938
939         and     \$4095, $tmp    # see if $np crosses page
940         add     \$32*10, $tmp
941         .byte   0x67,0x67
942         shr     \$12, $tmp
943         jz      .Lmul_1024_no_n_copy
944
945         # unaligned 256-bit load that crosses page boundary can
946         # cause severe performance degradation here, so if $np does
947         # cross page boundary, copy it to stack and make sure stack
948         # frame doesn't...
949         sub             \$32*10,%rsp
950         vmovdqu         32*0-128($np), $ACC0
951         and             \$-512, %rsp
952         vmovdqu         32*1-128($np), $ACC1
953         vmovdqu         32*2-128($np), $ACC2
954         vmovdqu         32*3-128($np), $ACC3
955         vmovdqu         32*4-128($np), $ACC4
956         vmovdqu         32*5-128($np), $ACC5
957         vmovdqu         32*6-128($np), $ACC6
958         vmovdqu         32*7-128($np), $ACC7
959         vmovdqu         32*8-128($np), $ACC8
960         lea             64+128(%rsp),$np
961         vmovdqu         $ACC0, 32*0-128($np)
962         vpxor           $ACC0, $ACC0, $ACC0
963         vmovdqu         $ACC1, 32*1-128($np)
964         vpxor           $ACC1, $ACC1, $ACC1
965         vmovdqu         $ACC2, 32*2-128($np)
966         vpxor           $ACC2, $ACC2, $ACC2
967         vmovdqu         $ACC3, 32*3-128($np)
968         vpxor           $ACC3, $ACC3, $ACC3
969         vmovdqu         $ACC4, 32*4-128($np)
970         vpxor           $ACC4, $ACC4, $ACC4
971         vmovdqu         $ACC5, 32*5-128($np)
972         vpxor           $ACC5, $ACC5, $ACC5
973         vmovdqu         $ACC6, 32*6-128($np)
974         vpxor           $ACC6, $ACC6, $ACC6
975         vmovdqu         $ACC7, 32*7-128($np)
976         vpxor           $ACC7, $ACC7, $ACC7
977         vmovdqu         $ACC8, 32*8-128($np)
978         vmovdqa         $ACC0, $ACC8
979         vmovdqu         $ACC9, 32*9-128($np)    # $ACC9 is zero after vzeroall
980 .Lmul_1024_no_n_copy:
981         and     \$-64,%rsp
982
983         mov     ($bp), %rbx
984         vpbroadcastq ($bp), $Bi
985         vmovdqu $ACC0, (%rsp)                   # clear top of stack
986         xor     $r0, $r0
987         .byte   0x67
988         xor     $r1, $r1
989         xor     $r2, $r2
990         xor     $r3, $r3
991
992         vmovdqu .Land_mask(%rip), $AND_MASK
993         mov     \$9, $i
994         vmovdqu $ACC9, 32*9-128($rp)            # $ACC9 is zero after vzeroall
995         jmp     .Loop_mul_1024
996
997 .align  32
998 .Loop_mul_1024:
999          vpsrlq         \$29, $ACC3, $ACC9              # correct $ACC3(*)
1000         mov     %rbx, %rax
1001         imulq   -128($ap), %rax
1002         add     $r0, %rax
1003         mov     %rbx, $r1
1004         imulq   8-128($ap), $r1
1005         add     8(%rsp), $r1
1006
1007         mov     %rax, $r0
1008         imull   $n0, %eax
1009         and     \$0x1fffffff, %eax
1010
1011          mov    %rbx, $r2
1012          imulq  16-128($ap), $r2
1013          add    16(%rsp), $r2
1014
1015          mov    %rbx, $r3
1016          imulq  24-128($ap), $r3
1017          add    24(%rsp), $r3
1018         vpmuludq        32*1-128($ap),$Bi,$TEMP0
1019          vmovd          %eax, $Yi
1020         vpaddq          $TEMP0,$ACC1,$ACC1
1021         vpmuludq        32*2-128($ap),$Bi,$TEMP1
1022          vpbroadcastq   $Yi, $Yi
1023         vpaddq          $TEMP1,$ACC2,$ACC2
1024         vpmuludq        32*3-128($ap),$Bi,$TEMP2
1025          vpand          $AND_MASK, $ACC3, $ACC3         # correct $ACC3
1026         vpaddq          $TEMP2,$ACC3,$ACC3
1027         vpmuludq        32*4-128($ap),$Bi,$TEMP0
1028         vpaddq          $TEMP0,$ACC4,$ACC4
1029         vpmuludq        32*5-128($ap),$Bi,$TEMP1
1030         vpaddq          $TEMP1,$ACC5,$ACC5
1031         vpmuludq        32*6-128($ap),$Bi,$TEMP2
1032         vpaddq          $TEMP2,$ACC6,$ACC6
1033         vpmuludq        32*7-128($ap),$Bi,$TEMP0
1034          vpermq         \$0x93, $ACC9, $ACC9            # correct $ACC3
1035         vpaddq          $TEMP0,$ACC7,$ACC7
1036         vpmuludq        32*8-128($ap),$Bi,$TEMP1
1037          vpbroadcastq   8($bp), $Bi
1038         vpaddq          $TEMP1,$ACC8,$ACC8
1039
1040         mov     %rax,%rdx
1041         imulq   -128($np),%rax
1042         add     %rax,$r0
1043         mov     %rdx,%rax
1044         imulq   8-128($np),%rax
1045         add     %rax,$r1
1046         mov     %rdx,%rax
1047         imulq   16-128($np),%rax
1048         add     %rax,$r2
1049         shr     \$29, $r0
1050         imulq   24-128($np),%rdx
1051         add     %rdx,$r3
1052         add     $r0, $r1
1053
1054         vpmuludq        32*1-128($np),$Yi,$TEMP2
1055          vmovq          $Bi, %rbx
1056         vpaddq          $TEMP2,$ACC1,$ACC1
1057         vpmuludq        32*2-128($np),$Yi,$TEMP0
1058         vpaddq          $TEMP0,$ACC2,$ACC2
1059         vpmuludq        32*3-128($np),$Yi,$TEMP1
1060         vpaddq          $TEMP1,$ACC3,$ACC3
1061         vpmuludq        32*4-128($np),$Yi,$TEMP2
1062         vpaddq          $TEMP2,$ACC4,$ACC4
1063         vpmuludq        32*5-128($np),$Yi,$TEMP0
1064         vpaddq          $TEMP0,$ACC5,$ACC5
1065         vpmuludq        32*6-128($np),$Yi,$TEMP1
1066         vpaddq          $TEMP1,$ACC6,$ACC6
1067         vpmuludq        32*7-128($np),$Yi,$TEMP2
1068          vpblendd       \$3, $ZERO, $ACC9, $ACC9        # correct $ACC3
1069         vpaddq          $TEMP2,$ACC7,$ACC7
1070         vpmuludq        32*8-128($np),$Yi,$TEMP0
1071          vpaddq         $ACC9, $ACC3, $ACC3             # correct $ACC3
1072         vpaddq          $TEMP0,$ACC8,$ACC8
1073
1074         mov     %rbx, %rax
1075         imulq   -128($ap),%rax
1076         add     %rax,$r1
1077          vmovdqu        -8+32*1-128($ap),$TEMP1
1078         mov     %rbx, %rax
1079         imulq   8-128($ap),%rax
1080         add     %rax,$r2
1081          vmovdqu        -8+32*2-128($ap),$TEMP2
1082
1083         mov     $r1, %rax
1084         imull   $n0, %eax
1085         and     \$0x1fffffff, %eax
1086
1087          imulq  16-128($ap),%rbx
1088          add    %rbx,$r3
1089         vpmuludq        $Bi,$TEMP1,$TEMP1
1090          vmovd          %eax, $Yi
1091         vmovdqu         -8+32*3-128($ap),$TEMP0
1092         vpaddq          $TEMP1,$ACC1,$ACC1
1093         vpmuludq        $Bi,$TEMP2,$TEMP2
1094          vpbroadcastq   $Yi, $Yi
1095         vmovdqu         -8+32*4-128($ap),$TEMP1
1096         vpaddq          $TEMP2,$ACC2,$ACC2
1097         vpmuludq        $Bi,$TEMP0,$TEMP0
1098         vmovdqu         -8+32*5-128($ap),$TEMP2
1099         vpaddq          $TEMP0,$ACC3,$ACC3
1100         vpmuludq        $Bi,$TEMP1,$TEMP1
1101         vmovdqu         -8+32*6-128($ap),$TEMP0
1102         vpaddq          $TEMP1,$ACC4,$ACC4
1103         vpmuludq        $Bi,$TEMP2,$TEMP2
1104         vmovdqu         -8+32*7-128($ap),$TEMP1
1105         vpaddq          $TEMP2,$ACC5,$ACC5
1106         vpmuludq        $Bi,$TEMP0,$TEMP0
1107         vmovdqu         -8+32*8-128($ap),$TEMP2
1108         vpaddq          $TEMP0,$ACC6,$ACC6
1109         vpmuludq        $Bi,$TEMP1,$TEMP1
1110         vmovdqu         -8+32*9-128($ap),$ACC9
1111         vpaddq          $TEMP1,$ACC7,$ACC7
1112         vpmuludq        $Bi,$TEMP2,$TEMP2
1113         vpaddq          $TEMP2,$ACC8,$ACC8
1114         vpmuludq        $Bi,$ACC9,$ACC9
1115          vpbroadcastq   16($bp), $Bi
1116
1117         mov     %rax,%rdx
1118         imulq   -128($np),%rax
1119         add     %rax,$r1
1120          vmovdqu        -8+32*1-128($np),$TEMP0
1121         mov     %rdx,%rax
1122         imulq   8-128($np),%rax
1123         add     %rax,$r2
1124          vmovdqu        -8+32*2-128($np),$TEMP1
1125         shr     \$29, $r1
1126         imulq   16-128($np),%rdx
1127         add     %rdx,$r3
1128         add     $r1, $r2
1129
1130         vpmuludq        $Yi,$TEMP0,$TEMP0
1131          vmovq          $Bi, %rbx
1132         vmovdqu         -8+32*3-128($np),$TEMP2
1133         vpaddq          $TEMP0,$ACC1,$ACC1
1134         vpmuludq        $Yi,$TEMP1,$TEMP1
1135         vmovdqu         -8+32*4-128($np),$TEMP0
1136         vpaddq          $TEMP1,$ACC2,$ACC2
1137         vpmuludq        $Yi,$TEMP2,$TEMP2
1138         vmovdqu         -8+32*5-128($np),$TEMP1
1139         vpaddq          $TEMP2,$ACC3,$ACC3
1140         vpmuludq        $Yi,$TEMP0,$TEMP0
1141         vmovdqu         -8+32*6-128($np),$TEMP2
1142         vpaddq          $TEMP0,$ACC4,$ACC4
1143         vpmuludq        $Yi,$TEMP1,$TEMP1
1144         vmovdqu         -8+32*7-128($np),$TEMP0
1145         vpaddq          $TEMP1,$ACC5,$ACC5
1146         vpmuludq        $Yi,$TEMP2,$TEMP2
1147         vmovdqu         -8+32*8-128($np),$TEMP1
1148         vpaddq          $TEMP2,$ACC6,$ACC6
1149         vpmuludq        $Yi,$TEMP0,$TEMP0
1150         vmovdqu         -8+32*9-128($np),$TEMP2
1151         vpaddq          $TEMP0,$ACC7,$ACC7
1152         vpmuludq        $Yi,$TEMP1,$TEMP1
1153         vpaddq          $TEMP1,$ACC8,$ACC8
1154         vpmuludq        $Yi,$TEMP2,$TEMP2
1155         vpaddq          $TEMP2,$ACC9,$ACC9
1156
1157          vmovdqu        -16+32*1-128($ap),$TEMP0
1158         mov     %rbx,%rax
1159         imulq   -128($ap),%rax
1160         add     $r2,%rax
1161
1162          vmovdqu        -16+32*2-128($ap),$TEMP1
1163         mov     %rax,$r2
1164         imull   $n0, %eax
1165         and     \$0x1fffffff, %eax
1166
1167          imulq  8-128($ap),%rbx
1168          add    %rbx,$r3
1169         vpmuludq        $Bi,$TEMP0,$TEMP0
1170          vmovd          %eax, $Yi
1171         vmovdqu         -16+32*3-128($ap),$TEMP2
1172         vpaddq          $TEMP0,$ACC1,$ACC1
1173         vpmuludq        $Bi,$TEMP1,$TEMP1
1174          vpbroadcastq   $Yi, $Yi
1175         vmovdqu         -16+32*4-128($ap),$TEMP0
1176         vpaddq          $TEMP1,$ACC2,$ACC2
1177         vpmuludq        $Bi,$TEMP2,$TEMP2
1178         vmovdqu         -16+32*5-128($ap),$TEMP1
1179         vpaddq          $TEMP2,$ACC3,$ACC3
1180         vpmuludq        $Bi,$TEMP0,$TEMP0
1181         vmovdqu         -16+32*6-128($ap),$TEMP2
1182         vpaddq          $TEMP0,$ACC4,$ACC4
1183         vpmuludq        $Bi,$TEMP1,$TEMP1
1184         vmovdqu         -16+32*7-128($ap),$TEMP0
1185         vpaddq          $TEMP1,$ACC5,$ACC5
1186         vpmuludq        $Bi,$TEMP2,$TEMP2
1187         vmovdqu         -16+32*8-128($ap),$TEMP1
1188         vpaddq          $TEMP2,$ACC6,$ACC6
1189         vpmuludq        $Bi,$TEMP0,$TEMP0
1190         vmovdqu         -16+32*9-128($ap),$TEMP2
1191         vpaddq          $TEMP0,$ACC7,$ACC7
1192         vpmuludq        $Bi,$TEMP1,$TEMP1
1193         vpaddq          $TEMP1,$ACC8,$ACC8
1194         vpmuludq        $Bi,$TEMP2,$TEMP2
1195          vpbroadcastq   24($bp), $Bi
1196         vpaddq          $TEMP2,$ACC9,$ACC9
1197
1198          vmovdqu        -16+32*1-128($np),$TEMP0
1199         mov     %rax,%rdx
1200         imulq   -128($np),%rax
1201         add     %rax,$r2
1202          vmovdqu        -16+32*2-128($np),$TEMP1
1203         imulq   8-128($np),%rdx
1204         add     %rdx,$r3
1205         shr     \$29, $r2
1206
1207         vpmuludq        $Yi,$TEMP0,$TEMP0
1208          vmovq          $Bi, %rbx
1209         vmovdqu         -16+32*3-128($np),$TEMP2
1210         vpaddq          $TEMP0,$ACC1,$ACC1
1211         vpmuludq        $Yi,$TEMP1,$TEMP1
1212         vmovdqu         -16+32*4-128($np),$TEMP0
1213         vpaddq          $TEMP1,$ACC2,$ACC2
1214         vpmuludq        $Yi,$TEMP2,$TEMP2
1215         vmovdqu         -16+32*5-128($np),$TEMP1
1216         vpaddq          $TEMP2,$ACC3,$ACC3
1217         vpmuludq        $Yi,$TEMP0,$TEMP0
1218         vmovdqu         -16+32*6-128($np),$TEMP2
1219         vpaddq          $TEMP0,$ACC4,$ACC4
1220         vpmuludq        $Yi,$TEMP1,$TEMP1
1221         vmovdqu         -16+32*7-128($np),$TEMP0
1222         vpaddq          $TEMP1,$ACC5,$ACC5
1223         vpmuludq        $Yi,$TEMP2,$TEMP2
1224         vmovdqu         -16+32*8-128($np),$TEMP1
1225         vpaddq          $TEMP2,$ACC6,$ACC6
1226         vpmuludq        $Yi,$TEMP0,$TEMP0
1227         vmovdqu         -16+32*9-128($np),$TEMP2
1228         vpaddq          $TEMP0,$ACC7,$ACC7
1229         vpmuludq        $Yi,$TEMP1,$TEMP1
1230          vmovdqu        -24+32*1-128($ap),$TEMP0
1231         vpaddq          $TEMP1,$ACC8,$ACC8
1232         vpmuludq        $Yi,$TEMP2,$TEMP2
1233          vmovdqu        -24+32*2-128($ap),$TEMP1
1234         vpaddq          $TEMP2,$ACC9,$ACC9
1235
1236         add     $r2, $r3
1237         imulq   -128($ap),%rbx
1238         add     %rbx,$r3
1239
1240         mov     $r3, %rax
1241         imull   $n0, %eax
1242         and     \$0x1fffffff, %eax
1243
1244         vpmuludq        $Bi,$TEMP0,$TEMP0
1245          vmovd          %eax, $Yi
1246         vmovdqu         -24+32*3-128($ap),$TEMP2
1247         vpaddq          $TEMP0,$ACC1,$ACC1
1248         vpmuludq        $Bi,$TEMP1,$TEMP1
1249          vpbroadcastq   $Yi, $Yi
1250         vmovdqu         -24+32*4-128($ap),$TEMP0
1251         vpaddq          $TEMP1,$ACC2,$ACC2
1252         vpmuludq        $Bi,$TEMP2,$TEMP2
1253         vmovdqu         -24+32*5-128($ap),$TEMP1
1254         vpaddq          $TEMP2,$ACC3,$ACC3
1255         vpmuludq        $Bi,$TEMP0,$TEMP0
1256         vmovdqu         -24+32*6-128($ap),$TEMP2
1257         vpaddq          $TEMP0,$ACC4,$ACC4
1258         vpmuludq        $Bi,$TEMP1,$TEMP1
1259         vmovdqu         -24+32*7-128($ap),$TEMP0
1260         vpaddq          $TEMP1,$ACC5,$ACC5
1261         vpmuludq        $Bi,$TEMP2,$TEMP2
1262         vmovdqu         -24+32*8-128($ap),$TEMP1
1263         vpaddq          $TEMP2,$ACC6,$ACC6
1264         vpmuludq        $Bi,$TEMP0,$TEMP0
1265         vmovdqu         -24+32*9-128($ap),$TEMP2
1266         vpaddq          $TEMP0,$ACC7,$ACC7
1267         vpmuludq        $Bi,$TEMP1,$TEMP1
1268         vpaddq          $TEMP1,$ACC8,$ACC8
1269         vpmuludq        $Bi,$TEMP2,$TEMP2
1270          vpbroadcastq   32($bp), $Bi
1271         vpaddq          $TEMP2,$ACC9,$ACC9
1272          add            \$32, $bp                       # $bp++
1273
1274         vmovdqu         -24+32*1-128($np),$TEMP0
1275         imulq   -128($np),%rax
1276         add     %rax,$r3
1277         shr     \$29, $r3
1278
1279         vmovdqu         -24+32*2-128($np),$TEMP1
1280         vpmuludq        $Yi,$TEMP0,$TEMP0
1281          vmovq          $Bi, %rbx
1282         vmovdqu         -24+32*3-128($np),$TEMP2
1283         vpaddq          $TEMP0,$ACC1,$ACC0              # $ACC0==$TEMP0
1284         vpmuludq        $Yi,$TEMP1,$TEMP1
1285          vmovdqu        $ACC0, (%rsp)                   # transfer $r0-$r3
1286         vpaddq          $TEMP1,$ACC2,$ACC1
1287         vmovdqu         -24+32*4-128($np),$TEMP0
1288         vpmuludq        $Yi,$TEMP2,$TEMP2
1289         vmovdqu         -24+32*5-128($np),$TEMP1
1290         vpaddq          $TEMP2,$ACC3,$ACC2
1291         vpmuludq        $Yi,$TEMP0,$TEMP0
1292         vmovdqu         -24+32*6-128($np),$TEMP2
1293         vpaddq          $TEMP0,$ACC4,$ACC3
1294         vpmuludq        $Yi,$TEMP1,$TEMP1
1295         vmovdqu         -24+32*7-128($np),$TEMP0
1296         vpaddq          $TEMP1,$ACC5,$ACC4
1297         vpmuludq        $Yi,$TEMP2,$TEMP2
1298         vmovdqu         -24+32*8-128($np),$TEMP1
1299         vpaddq          $TEMP2,$ACC6,$ACC5
1300         vpmuludq        $Yi,$TEMP0,$TEMP0
1301         vmovdqu         -24+32*9-128($np),$TEMP2
1302          mov    $r3, $r0
1303         vpaddq          $TEMP0,$ACC7,$ACC6
1304         vpmuludq        $Yi,$TEMP1,$TEMP1
1305          add    (%rsp), $r0
1306         vpaddq          $TEMP1,$ACC8,$ACC7
1307         vpmuludq        $Yi,$TEMP2,$TEMP2
1308          vmovq  $r3, $TEMP1
1309         vpaddq          $TEMP2,$ACC9,$ACC8
1310
1311         dec     $i
1312         jnz     .Loop_mul_1024
1313 ___
1314
1315 # (*)   Original implementation was correcting ACC1-ACC3 for overflow
1316 #       after 7 loop runs, or after 28 iterations, or 56 additions.
1317 #       But as we underutilize resources, it's possible to correct in
1318 #       each iteration with marginal performance loss. But then, as
1319 #       we do it in each iteration, we can correct less digits, and
1320 #       avoid performance penalties completely. Also note that we
1321 #       correct only three digits out of four. This works because
1322 #       most significant digit is subjected to less additions.
1323
1324 $TEMP0 = $ACC9;
1325 $TEMP3 = $Bi;
1326 $TEMP4 = $Yi;
1327 $code.=<<___;
1328         vpermq          \$0, $AND_MASK, $AND_MASK
1329         vpaddq          (%rsp), $TEMP1, $ACC0
1330
1331         vpsrlq          \$29, $ACC0, $TEMP1
1332         vpand           $AND_MASK, $ACC0, $ACC0
1333         vpsrlq          \$29, $ACC1, $TEMP2
1334         vpand           $AND_MASK, $ACC1, $ACC1
1335         vpsrlq          \$29, $ACC2, $TEMP3
1336         vpermq          \$0x93, $TEMP1, $TEMP1
1337         vpand           $AND_MASK, $ACC2, $ACC2
1338         vpsrlq          \$29, $ACC3, $TEMP4
1339         vpermq          \$0x93, $TEMP2, $TEMP2
1340         vpand           $AND_MASK, $ACC3, $ACC3
1341
1342         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1343         vpermq          \$0x93, $TEMP3, $TEMP3
1344         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1345         vpermq          \$0x93, $TEMP4, $TEMP4
1346         vpaddq          $TEMP0, $ACC0, $ACC0
1347         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1348         vpaddq          $TEMP1, $ACC1, $ACC1
1349         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1350         vpaddq          $TEMP2, $ACC2, $ACC2
1351         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1352         vpaddq          $TEMP3, $ACC3, $ACC3
1353         vpaddq          $TEMP4, $ACC4, $ACC4
1354
1355         vpsrlq          \$29, $ACC0, $TEMP1
1356         vpand           $AND_MASK, $ACC0, $ACC0
1357         vpsrlq          \$29, $ACC1, $TEMP2
1358         vpand           $AND_MASK, $ACC1, $ACC1
1359         vpsrlq          \$29, $ACC2, $TEMP3
1360         vpermq          \$0x93, $TEMP1, $TEMP1
1361         vpand           $AND_MASK, $ACC2, $ACC2
1362         vpsrlq          \$29, $ACC3, $TEMP4
1363         vpermq          \$0x93, $TEMP2, $TEMP2
1364         vpand           $AND_MASK, $ACC3, $ACC3
1365         vpermq          \$0x93, $TEMP3, $TEMP3
1366
1367         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1368         vpermq          \$0x93, $TEMP4, $TEMP4
1369         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1370         vpaddq          $TEMP0, $ACC0, $ACC0
1371         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1372         vpaddq          $TEMP1, $ACC1, $ACC1
1373         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1374         vpaddq          $TEMP2, $ACC2, $ACC2
1375         vpblendd        \$3, $TEMP4, $ZERO, $TEMP4
1376         vpaddq          $TEMP3, $ACC3, $ACC3
1377         vpaddq          $TEMP4, $ACC4, $ACC4
1378
1379         vmovdqu         $ACC0, 0-128($rp)
1380         vmovdqu         $ACC1, 32-128($rp)
1381         vmovdqu         $ACC2, 64-128($rp)
1382         vmovdqu         $ACC3, 96-128($rp)
1383 ___
1384
1385 $TEMP5=$ACC0;
1386 $code.=<<___;
1387         vpsrlq          \$29, $ACC4, $TEMP1
1388         vpand           $AND_MASK, $ACC4, $ACC4
1389         vpsrlq          \$29, $ACC5, $TEMP2
1390         vpand           $AND_MASK, $ACC5, $ACC5
1391         vpsrlq          \$29, $ACC6, $TEMP3
1392         vpermq          \$0x93, $TEMP1, $TEMP1
1393         vpand           $AND_MASK, $ACC6, $ACC6
1394         vpsrlq          \$29, $ACC7, $TEMP4
1395         vpermq          \$0x93, $TEMP2, $TEMP2
1396         vpand           $AND_MASK, $ACC7, $ACC7
1397         vpsrlq          \$29, $ACC8, $TEMP5
1398         vpermq          \$0x93, $TEMP3, $TEMP3
1399         vpand           $AND_MASK, $ACC8, $ACC8
1400         vpermq          \$0x93, $TEMP4, $TEMP4
1401
1402         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1403         vpermq          \$0x93, $TEMP5, $TEMP5
1404         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1405         vpaddq          $TEMP0, $ACC4, $ACC4
1406         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1407         vpaddq          $TEMP1, $ACC5, $ACC5
1408         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1409         vpaddq          $TEMP2, $ACC6, $ACC6
1410         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1411         vpaddq          $TEMP3, $ACC7, $ACC7
1412         vpaddq          $TEMP4, $ACC8, $ACC8
1413
1414         vpsrlq          \$29, $ACC4, $TEMP1
1415         vpand           $AND_MASK, $ACC4, $ACC4
1416         vpsrlq          \$29, $ACC5, $TEMP2
1417         vpand           $AND_MASK, $ACC5, $ACC5
1418         vpsrlq          \$29, $ACC6, $TEMP3
1419         vpermq          \$0x93, $TEMP1, $TEMP1
1420         vpand           $AND_MASK, $ACC6, $ACC6
1421         vpsrlq          \$29, $ACC7, $TEMP4
1422         vpermq          \$0x93, $TEMP2, $TEMP2
1423         vpand           $AND_MASK, $ACC7, $ACC7
1424         vpsrlq          \$29, $ACC8, $TEMP5
1425         vpermq          \$0x93, $TEMP3, $TEMP3
1426         vpand           $AND_MASK, $ACC8, $ACC8
1427         vpermq          \$0x93, $TEMP4, $TEMP4
1428
1429         vpblendd        \$3, $ZERO, $TEMP1, $TEMP0
1430         vpermq          \$0x93, $TEMP5, $TEMP5
1431         vpblendd        \$3, $TEMP1, $TEMP2, $TEMP1
1432         vpaddq          $TEMP0, $ACC4, $ACC4
1433         vpblendd        \$3, $TEMP2, $TEMP3, $TEMP2
1434         vpaddq          $TEMP1, $ACC5, $ACC5
1435         vpblendd        \$3, $TEMP3, $TEMP4, $TEMP3
1436         vpaddq          $TEMP2, $ACC6, $ACC6
1437         vpblendd        \$3, $TEMP4, $TEMP5, $TEMP4
1438         vpaddq          $TEMP3, $ACC7, $ACC7
1439         vpaddq          $TEMP4, $ACC8, $ACC8
1440
1441         vmovdqu         $ACC4, 128-128($rp)
1442         vmovdqu         $ACC5, 160-128($rp)    
1443         vmovdqu         $ACC6, 192-128($rp)
1444         vmovdqu         $ACC7, 224-128($rp)
1445         vmovdqu         $ACC8, 256-128($rp)
1446         vzeroupper
1447
1448         mov     %rbp, %rax
1449 ___
1450 $code.=<<___ if ($win64);
1451         movaps  -0xd8(%rax),%xmm6
1452         movaps  -0xc8(%rax),%xmm7
1453         movaps  -0xb8(%rax),%xmm8
1454         movaps  -0xa8(%rax),%xmm9
1455         movaps  -0x98(%rax),%xmm10
1456         movaps  -0x88(%rax),%xmm11
1457         movaps  -0x78(%rax),%xmm12
1458         movaps  -0x68(%rax),%xmm13
1459         movaps  -0x58(%rax),%xmm14
1460         movaps  -0x48(%rax),%xmm15
1461 ___
1462 $code.=<<___;
1463         mov     -48(%rax),%r15
1464         mov     -40(%rax),%r14
1465         mov     -32(%rax),%r13
1466         mov     -24(%rax),%r12
1467         mov     -16(%rax),%rbp
1468         mov     -8(%rax),%rbx
1469         lea     (%rax),%rsp             # restore %rsp
1470 .Lmul_1024_epilogue:
1471         ret
1472 .size   rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1473 ___
1474 }
1475 {
1476 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1477 my @T = map("%r$_",(8..11));
1478
1479 $code.=<<___;
1480 .globl  rsaz_1024_red2norm_avx2
1481 .type   rsaz_1024_red2norm_avx2,\@abi-omnipotent
1482 .align  32
1483 rsaz_1024_red2norm_avx2:
1484         sub     \$-128,$inp     # size optimization
1485         xor     %rax,%rax
1486 ___
1487
1488 for ($j=0,$i=0; $i<16; $i++) {
1489     my $k=0;
1490     while (29*$j<64*($i+1)) {   # load data till boundary
1491         $code.="        mov     `8*$j-128`($inp), @T[0]\n";
1492         $j++; $k++; push(@T,shift(@T));
1493     }
1494     $l=$k;
1495     while ($k>1) {              # shift loaded data but last value
1496         $code.="        shl     \$`29*($j-$k)`,@T[-$k]\n";
1497         $k--;
1498     }
1499     $code.=<<___;               # shift last value
1500         mov     @T[-1], @T[0]
1501         shl     \$`29*($j-1)`, @T[-1]
1502         shr     \$`-29*($j-1)`, @T[0]
1503 ___
1504     while ($l) {                # accumulate all values
1505         $code.="        add     @T[-$l], %rax\n";
1506         $l--;
1507     }
1508         $code.=<<___;
1509         adc     \$0, @T[0]      # consume eventual carry
1510         mov     %rax, 8*$i($out)
1511         mov     @T[0], %rax
1512 ___
1513     push(@T,shift(@T));
1514 }
1515 $code.=<<___;
1516         ret
1517 .size   rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1518
1519 .globl  rsaz_1024_norm2red_avx2
1520 .type   rsaz_1024_norm2red_avx2,\@abi-omnipotent
1521 .align  32
1522 rsaz_1024_norm2red_avx2:
1523         sub     \$-128,$out     # size optimization
1524         mov     ($inp),@T[0]
1525         mov     \$0x1fffffff,%eax
1526 ___
1527 for ($j=0,$i=0; $i<16; $i++) {
1528     $code.="    mov     `8*($i+1)`($inp),@T[1]\n"       if ($i<15);
1529     $code.="    xor     @T[1],@T[1]\n"                  if ($i==15);
1530     my $k=1;
1531     while (29*($j+1)<64*($i+1)) {
1532         $code.=<<___;
1533         mov     @T[0],@T[-$k]
1534         shr     \$`29*$j`,@T[-$k]
1535         and     %rax,@T[-$k]                            # &0x1fffffff
1536         mov     @T[-$k],`8*$j-128`($out)
1537 ___
1538         $j++; $k++;
1539     }
1540     $code.=<<___;
1541         shrd    \$`29*$j`,@T[1],@T[0]
1542         and     %rax,@T[0]
1543         mov     @T[0],`8*$j-128`($out)
1544 ___
1545     $j++;
1546     push(@T,shift(@T));
1547 }
1548 $code.=<<___;
1549         mov     @T[0],`8*$j-128`($out)                  # zero
1550         mov     @T[0],`8*($j+1)-128`($out)
1551         mov     @T[0],`8*($j+2)-128`($out)
1552         mov     @T[0],`8*($j+3)-128`($out)
1553         ret
1554 .size   rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1555 ___
1556 }
1557 {
1558 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1559
1560 $code.=<<___;
1561 .globl  rsaz_1024_scatter5_avx2
1562 .type   rsaz_1024_scatter5_avx2,\@abi-omnipotent
1563 .align  32
1564 rsaz_1024_scatter5_avx2:
1565         vzeroupper
1566         vmovdqu .Lscatter_permd(%rip),%ymm5
1567         shl     \$4,$power
1568         lea     ($out,$power),$out
1569         mov     \$9,%eax
1570         jmp     .Loop_scatter_1024
1571
1572 .align  32
1573 .Loop_scatter_1024:
1574         vmovdqu         ($inp),%ymm0
1575         lea             32($inp),$inp
1576         vpermd          %ymm0,%ymm5,%ymm0
1577         vmovdqu         %xmm0,($out)
1578         lea             16*32($out),$out
1579         dec     %eax
1580         jnz     .Loop_scatter_1024
1581
1582         vzeroupper
1583         ret
1584 .size   rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1585
1586 .globl  rsaz_1024_gather5_avx2
1587 .type   rsaz_1024_gather5_avx2,\@abi-omnipotent
1588 .align  32
1589 rsaz_1024_gather5_avx2:
1590 ___
1591 $code.=<<___ if ($win64);
1592         lea     -0x88(%rsp),%rax
1593         vzeroupper
1594 .LSEH_begin_rsaz_1024_gather5:
1595         # I can't trust assembler to use specific encoding:-(
1596         .byte   0x48,0x8d,0x60,0xe0             #lea    -0x20(%rax),%rsp
1597         .byte   0xc5,0xf8,0x29,0x70,0xe0        #vmovaps %xmm6,-0x20(%rax)
1598         .byte   0xc5,0xf8,0x29,0x78,0xf0        #vmovaps %xmm7,-0x10(%rax)
1599         .byte   0xc5,0x78,0x29,0x40,0x00        #vmovaps %xmm8,0(%rax)
1600         .byte   0xc5,0x78,0x29,0x48,0x10        #vmovaps %xmm9,0x10(%rax)
1601         .byte   0xc5,0x78,0x29,0x50,0x20        #vmovaps %xmm10,0x20(%rax)
1602         .byte   0xc5,0x78,0x29,0x58,0x30        #vmovaps %xmm11,0x30(%rax)
1603         .byte   0xc5,0x78,0x29,0x60,0x40        #vmovaps %xmm12,0x40(%rax)
1604         .byte   0xc5,0x78,0x29,0x68,0x50        #vmovaps %xmm13,0x50(%rax)
1605         .byte   0xc5,0x78,0x29,0x70,0x60        #vmovaps %xmm14,0x60(%rax)
1606         .byte   0xc5,0x78,0x29,0x78,0x70        #vmovaps %xmm15,0x70(%rax)
1607 ___
1608 $code.=<<___;
1609         lea     .Lgather_table(%rip),%r11
1610         mov     $power,%eax
1611         and     \$3,$power
1612         shr     \$2,%eax                        # cache line number
1613         shl     \$4,$power                      # offset within cache line
1614
1615         vmovdqu         -32(%r11),%ymm7         # .Lgather_permd
1616         vpbroadcastb    8(%r11,%rax), %xmm8
1617         vpbroadcastb    7(%r11,%rax), %xmm9
1618         vpbroadcastb    6(%r11,%rax), %xmm10
1619         vpbroadcastb    5(%r11,%rax), %xmm11
1620         vpbroadcastb    4(%r11,%rax), %xmm12
1621         vpbroadcastb    3(%r11,%rax), %xmm13
1622         vpbroadcastb    2(%r11,%rax), %xmm14
1623         vpbroadcastb    1(%r11,%rax), %xmm15
1624
1625         lea     64($inp,$power),$inp
1626         mov     \$64,%r11                       # size optimization
1627         mov     \$9,%eax
1628         jmp     .Loop_gather_1024
1629
1630 .align  32
1631 .Loop_gather_1024:
1632         vpand           -64($inp),              %xmm8,%xmm0
1633         vpand           ($inp),                 %xmm9,%xmm1
1634         vpand           64($inp),               %xmm10,%xmm2
1635         vpand           ($inp,%r11,2),          %xmm11,%xmm3
1636          vpor                                   %xmm0,%xmm1,%xmm1
1637         vpand           64($inp,%r11,2),        %xmm12,%xmm4
1638          vpor                                   %xmm2,%xmm3,%xmm3
1639         vpand           ($inp,%r11,4),          %xmm13,%xmm5
1640          vpor                                   %xmm1,%xmm3,%xmm3
1641         vpand           64($inp,%r11,4),        %xmm14,%xmm6
1642          vpor                                   %xmm4,%xmm5,%xmm5
1643         vpand           -128($inp,%r11,8),      %xmm15,%xmm2
1644         lea             ($inp,%r11,8),$inp
1645          vpor                                   %xmm3,%xmm5,%xmm5
1646          vpor                                   %xmm2,%xmm6,%xmm6
1647          vpor                                   %xmm5,%xmm6,%xmm6
1648         vpermd          %ymm6,%ymm7,%ymm6
1649         vmovdqu         %ymm6,($out)
1650         lea             32($out),$out
1651         dec     %eax
1652         jnz     .Loop_gather_1024
1653
1654         vpxor   %ymm0,%ymm0,%ymm0
1655         vmovdqu %ymm0,($out)
1656         vzeroupper
1657 ___
1658 $code.=<<___ if ($win64);
1659         movaps  (%rsp),%xmm6
1660         movaps  0x10(%rsp),%xmm7
1661         movaps  0x20(%rsp),%xmm8
1662         movaps  0x30(%rsp),%xmm9
1663         movaps  0x40(%rsp),%xmm10
1664         movaps  0x50(%rsp),%xmm11
1665         movaps  0x60(%rsp),%xmm12
1666         movaps  0x70(%rsp),%xmm13
1667         movaps  0x80(%rsp),%xmm14
1668         movaps  0x90(%rsp),%xmm15
1669         lea     0xa8(%rsp),%rsp
1670 .LSEH_end_rsaz_1024_gather5:
1671 ___
1672 $code.=<<___;
1673         ret
1674 .size   rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1675 ___
1676 }
1677
1678 $code.=<<___;
1679 .extern OPENSSL_ia32cap_P
1680 .globl  rsaz_avx2_eligible
1681 .type   rsaz_avx2_eligible,\@abi-omnipotent
1682 .align  32
1683 rsaz_avx2_eligible:
1684         mov     OPENSSL_ia32cap_P+8(%rip),%eax
1685 ___
1686 $code.=<<___    if ($addx);
1687         mov     \$`1<<8|1<<19`,%ecx
1688         mov     \$0,%edx
1689         and     %eax,%ecx
1690         cmp     \$`1<<8|1<<19`,%ecx     # check for BMI2+AD*X
1691         cmove   %edx,%eax
1692 ___
1693 $code.=<<___;
1694         and     \$`1<<5`,%eax
1695         shr     \$5,%eax
1696         ret
1697 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1698
1699 .align  64
1700 .Land_mask:
1701         .quad   0x1fffffff,0x1fffffff,0x1fffffff,-1
1702 .Lscatter_permd:
1703         .long   0,2,4,6,7,7,7,7
1704 .Lgather_permd:
1705         .long   0,7,1,7,2,7,3,7
1706 .Lgather_table:
1707         .byte   0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
1708 .align  64
1709 ___
1710
1711 if ($win64) {
1712 $rec="%rcx";
1713 $frame="%rdx";
1714 $context="%r8";
1715 $disp="%r9";
1716
1717 $code.=<<___
1718 .extern __imp_RtlVirtualUnwind
1719 .type   rsaz_se_handler,\@abi-omnipotent
1720 .align  16
1721 rsaz_se_handler:
1722         push    %rsi
1723         push    %rdi
1724         push    %rbx
1725         push    %rbp
1726         push    %r12
1727         push    %r13
1728         push    %r14
1729         push    %r15
1730         pushfq
1731         sub     \$64,%rsp
1732
1733         mov     120($context),%rax      # pull context->Rax
1734         mov     248($context),%rbx      # pull context->Rip
1735
1736         mov     8($disp),%rsi           # disp->ImageBase
1737         mov     56($disp),%r11          # disp->HandlerData
1738
1739         mov     0(%r11),%r10d           # HandlerData[0]
1740         lea     (%rsi,%r10),%r10        # prologue label
1741         cmp     %r10,%rbx               # context->Rip<prologue label
1742         jb      .Lcommon_seh_tail
1743
1744         mov     152($context),%rax      # pull context->Rsp
1745
1746         mov     4(%r11),%r10d           # HandlerData[1]
1747         lea     (%rsi,%r10),%r10        # epilogue label
1748         cmp     %r10,%rbx               # context->Rip>=epilogue label
1749         jae     .Lcommon_seh_tail
1750
1751         mov     160($context),%rax      # pull context->Rbp
1752
1753         mov     -48(%rax),%r15
1754         mov     -40(%rax),%r14
1755         mov     -32(%rax),%r13
1756         mov     -24(%rax),%r12
1757         mov     -16(%rax),%rbp
1758         mov     -8(%rax),%rbx
1759         mov     %r15,240($context)
1760         mov     %r14,232($context)
1761         mov     %r13,224($context)
1762         mov     %r12,216($context)
1763         mov     %rbp,160($context)
1764         mov     %rbx,144($context)
1765
1766         lea     -0xd8(%rax),%rsi        # %xmm save area
1767         lea     512($context),%rdi      # & context.Xmm6
1768         mov     \$20,%ecx               # 10*sizeof(%xmm0)/sizeof(%rax)
1769         .long   0xa548f3fc              # cld; rep movsq
1770
1771 .Lcommon_seh_tail:
1772         mov     8(%rax),%rdi
1773         mov     16(%rax),%rsi
1774         mov     %rax,152($context)      # restore context->Rsp
1775         mov     %rsi,168($context)      # restore context->Rsi
1776         mov     %rdi,176($context)      # restore context->Rdi
1777
1778         mov     40($disp),%rdi          # disp->ContextRecord
1779         mov     $context,%rsi           # context
1780         mov     \$154,%ecx              # sizeof(CONTEXT)
1781         .long   0xa548f3fc              # cld; rep movsq
1782
1783         mov     $disp,%rsi
1784         xor     %rcx,%rcx               # arg1, UNW_FLAG_NHANDLER
1785         mov     8(%rsi),%rdx            # arg2, disp->ImageBase
1786         mov     0(%rsi),%r8             # arg3, disp->ControlPc
1787         mov     16(%rsi),%r9            # arg4, disp->FunctionEntry
1788         mov     40(%rsi),%r10           # disp->ContextRecord
1789         lea     56(%rsi),%r11           # &disp->HandlerData
1790         lea     24(%rsi),%r12           # &disp->EstablisherFrame
1791         mov     %r10,32(%rsp)           # arg5
1792         mov     %r11,40(%rsp)           # arg6
1793         mov     %r12,48(%rsp)           # arg7
1794         mov     %rcx,56(%rsp)           # arg8, (NULL)
1795         call    *__imp_RtlVirtualUnwind(%rip)
1796
1797         mov     \$1,%eax                # ExceptionContinueSearch
1798         add     \$64,%rsp
1799         popfq
1800         pop     %r15
1801         pop     %r14
1802         pop     %r13
1803         pop     %r12
1804         pop     %rbp
1805         pop     %rbx
1806         pop     %rdi
1807         pop     %rsi
1808         ret
1809 .size   rsaz_se_handler,.-rsaz_se_handler
1810
1811 .section        .pdata
1812 .align  4
1813         .rva    .LSEH_begin_rsaz_1024_sqr_avx2
1814         .rva    .LSEH_end_rsaz_1024_sqr_avx2
1815         .rva    .LSEH_info_rsaz_1024_sqr_avx2
1816
1817         .rva    .LSEH_begin_rsaz_1024_mul_avx2
1818         .rva    .LSEH_end_rsaz_1024_mul_avx2
1819         .rva    .LSEH_info_rsaz_1024_mul_avx2
1820
1821         .rva    .LSEH_begin_rsaz_1024_gather5
1822         .rva    .LSEH_end_rsaz_1024_gather5
1823         .rva    .LSEH_info_rsaz_1024_gather5
1824 .section        .xdata
1825 .align  8
1826 .LSEH_info_rsaz_1024_sqr_avx2:
1827         .byte   9,0,0,0
1828         .rva    rsaz_se_handler
1829         .rva    .Lsqr_1024_body,.Lsqr_1024_epilogue
1830 .LSEH_info_rsaz_1024_mul_avx2:
1831         .byte   9,0,0,0
1832         .rva    rsaz_se_handler
1833         .rva    .Lmul_1024_body,.Lmul_1024_epilogue
1834 .LSEH_info_rsaz_1024_gather5:
1835         .byte   0x01,0x33,0x16,0x00
1836         .byte   0x36,0xf8,0x09,0x00     #vmovaps 0x90(rsp),xmm15
1837         .byte   0x31,0xe8,0x08,0x00     #vmovaps 0x80(rsp),xmm14
1838         .byte   0x2c,0xd8,0x07,0x00     #vmovaps 0x70(rsp),xmm13
1839         .byte   0x27,0xc8,0x06,0x00     #vmovaps 0x60(rsp),xmm12
1840         .byte   0x22,0xb8,0x05,0x00     #vmovaps 0x50(rsp),xmm11
1841         .byte   0x1d,0xa8,0x04,0x00     #vmovaps 0x40(rsp),xmm10
1842         .byte   0x18,0x98,0x03,0x00     #vmovaps 0x30(rsp),xmm9
1843         .byte   0x13,0x88,0x02,0x00     #vmovaps 0x20(rsp),xmm8
1844         .byte   0x0e,0x78,0x01,0x00     #vmovaps 0x10(rsp),xmm7
1845         .byte   0x09,0x68,0x00,0x00     #vmovaps 0x00(rsp),xmm6
1846         .byte   0x04,0x01,0x15,0x00     #sub    rsp,0xa8
1847 ___
1848 }
1849
1850 foreach (split("\n",$code)) {
1851         s/\`([^\`]*)\`/eval($1)/ge;
1852
1853         s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge               or
1854
1855         s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go          or
1856         s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go         or
1857         s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1858         s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go        or
1859         s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1860         print $_,"\n";
1861 }
1862
1863 }}} else {{{
1864 print <<___;    # assembler is too old
1865 .text
1866
1867 .globl  rsaz_avx2_eligible
1868 .type   rsaz_avx2_eligible,\@abi-omnipotent
1869 rsaz_avx2_eligible:
1870         xor     %eax,%eax
1871         ret
1872 .size   rsaz_avx2_eligible,.-rsaz_avx2_eligible
1873
1874 .globl  rsaz_1024_sqr_avx2
1875 .globl  rsaz_1024_mul_avx2
1876 .globl  rsaz_1024_norm2red_avx2
1877 .globl  rsaz_1024_red2norm_avx2
1878 .globl  rsaz_1024_scatter5_avx2
1879 .globl  rsaz_1024_gather5_avx2
1880 .type   rsaz_1024_sqr_avx2,\@abi-omnipotent
1881 rsaz_1024_sqr_avx2:
1882 rsaz_1024_mul_avx2:
1883 rsaz_1024_norm2red_avx2:
1884 rsaz_1024_red2norm_avx2:
1885 rsaz_1024_scatter5_avx2:
1886 rsaz_1024_gather5_avx2:
1887         .byte   0x0f,0x0b       # ud2
1888         ret
1889 .size   rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
1890 ___
1891 }}}
1892
1893 close STDOUT;