3 ##############################################################################
5 # Copyright (c) 2012, Intel Corporation #
7 # All rights reserved. #
9 # Redistribution and use in source and binary forms, with or without #
10 # modification, are permitted provided that the following conditions are #
13 # * Redistributions of source code must retain the above copyright #
14 # notice, this list of conditions and the following disclaimer. #
16 # * Redistributions in binary form must reproduce the above copyright #
17 # notice, this list of conditions and the following disclaimer in the #
18 # documentation and/or other materials provided with the #
21 # * Neither the name of the Intel Corporation nor the names of its #
22 # contributors may be used to endorse or promote products derived from #
23 # this software without specific prior written permission. #
26 # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY #
27 # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
28 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR #
29 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR #
30 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #
31 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #
32 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
33 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
34 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
35 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
36 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
38 ##############################################################################
39 # Developers and authors: #
40 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
41 # (1) Intel Corporation, Israel Development Center, Haifa, Israel #
42 # (2) University of Haifa, Israel #
43 ##############################################################################
45 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular #
46 # Exponentiation, Using Advanced Vector Instructions Architectures", #
47 # F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369, #
48 # pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012 #
49 # [2] S. Gueron: "Efficient Software Implementations of Modular #
50 # Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012). #
51 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE #
52 # Proceedings of 9th International Conference on Information Technology: #
53 # New Generations (ITNG 2012), pp.821-823 (2012) #
54 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis #
55 # resistant 1024-bit modular exponentiation, for optimizing RSA2048 #
56 # on AVX2 capable x86_64 platforms", #
57 # http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
58 ##############################################################################
60 # +13% improvement over original submission by <appro@openssl.org>
62 # rsa2048 sign/sec OpenSSL 1.0.1 scalar(*) this
63 # 2.3GHz Haswell 621 765/+23% 1113/+79%
65 # (*) if system doesn't support AVX2, for reference purposes;
69 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
71 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
73 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
74 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
75 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
76 die "can't locate x86_64-xlate.pl";
78 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
79 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
80 $avx = ($1>=2.19) + ($1>=2.22);
84 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
85 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
86 $avx = ($1>=2.09) + ($1>=2.10);
90 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
91 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
92 $avx = ($1>=10) + ($1>=11);
96 if (!$avx && `$ENV{CC} -v 2>&1` =~ /LLVM ([3-9]\.[0-9]+)/) {
97 $avx = ($1>=3.0) + ($1>=3.1);
101 open OUT,"| $^X $xlate $flavour $output";
106 my $rp="%rdi"; # BN_ULONG *rp,
107 my $ap="%rsi"; # const BN_ULONG *ap,
108 my $np="%rdx"; # const BN_ULONG *np,
109 my $n0="%ecx"; # const BN_ULONG n0,
110 my $rep="%r8d"; # int repeat);
112 # The registers that hold the accumulated redundant result
113 # The AMM works on 1024 bit operands, and redundant word size is 29
114 # Therefore: ceil(1024/29)/4 = 9
125 # Registers that hold the broadcasted words of bp, currently used
128 # Registers that hold the broadcasted words of Y, currently used
133 my $AND_MASK="%ymm15";
134 # alu registers that hold the first words of the ACC
140 my $i="%r14d"; # loop counter
143 my $FrameSize=32*18+32*8; # place for A^2 and 2*A
150 $np="%r13"; # reassigned argument
155 .globl rsaz_1024_sqr_avx2
156 .type rsaz_1024_sqr_avx2,\@function,5
158 rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
168 $code.=<<___ if ($win64);
170 vmovaps %xmm6,-0xd8(%rax)
171 vmovaps %xmm7,-0xc8(%rax)
172 vmovaps %xmm8,-0xb8(%rax)
173 vmovaps %xmm9,-0xa8(%rax)
174 vmovaps %xmm10,-0x98(%rax)
175 vmovaps %xmm11,-0x88(%rax)
176 vmovaps %xmm12,-0x78(%rax)
177 vmovaps %xmm13,-0x68(%rax)
178 vmovaps %xmm14,-0x58(%rax)
179 vmovaps %xmm15,-0x48(%rax)
184 mov %rdx, $np # reassigned argument
185 sub \$$FrameSize, %rsp
187 sub \$-128, $rp # size optimization
191 and \$4095, $tmp # see if $np crosses page
194 vpxor $ACC9,$ACC9,$ACC9
195 jz .Lsqr_1024_no_n_copy
197 # unaligned 256-bit load that crosses page boundary can
198 # cause >2x performance degradation here, so if $np does
199 # cross page boundary, copy it to stack and make sure stack
202 vmovdqu 32*0-128($np), $ACC0
204 vmovdqu 32*1-128($np), $ACC1
205 vmovdqu 32*2-128($np), $ACC2
206 vmovdqu 32*3-128($np), $ACC3
207 vmovdqu 32*4-128($np), $ACC4
208 vmovdqu 32*5-128($np), $ACC5
209 vmovdqu 32*6-128($np), $ACC6
210 vmovdqu 32*7-128($np), $ACC7
211 vmovdqu 32*8-128($np), $ACC8
212 lea $FrameSize+128(%rsp),$np
213 vmovdqu $ACC0, 32*0-128($np)
214 vmovdqu $ACC1, 32*1-128($np)
215 vmovdqu $ACC2, 32*2-128($np)
216 vmovdqu $ACC3, 32*3-128($np)
217 vmovdqu $ACC4, 32*4-128($np)
218 vmovdqu $ACC5, 32*5-128($np)
219 vmovdqu $ACC6, 32*6-128($np)
220 vmovdqu $ACC7, 32*7-128($np)
221 vmovdqu $ACC8, 32*8-128($np)
222 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero
224 .Lsqr_1024_no_n_copy:
227 vmovdqu 32*1-128($ap), $ACC1
228 vmovdqu 32*2-128($ap), $ACC2
229 vmovdqu 32*3-128($ap), $ACC3
230 vmovdqu 32*4-128($ap), $ACC4
231 vmovdqu 32*5-128($ap), $ACC5
232 vmovdqu 32*6-128($ap), $ACC6
233 vmovdqu 32*7-128($ap), $ACC7
234 vmovdqu 32*8-128($ap), $ACC8
236 lea 192(%rsp), $tp0 # 64+128=192
237 vpbroadcastq .Land_mask(%rip), $AND_MASK
238 jmp .LOOP_GRANDE_SQR_1024
241 .LOOP_GRANDE_SQR_1024:
242 lea 32*18+128(%rsp), $aap # size optimization
243 lea 448(%rsp), $tp1 # 64+128+256=448
245 # the squaring is performed as described in Variant B of
246 # "Speeding up Big-Number Squaring", so start by calculating
248 vpaddq $ACC1, $ACC1, $ACC1
249 vpbroadcastq 32*0-128($ap), $B1
250 vpaddq $ACC2, $ACC2, $ACC2
251 vmovdqa $ACC1, 32*0-128($aap)
252 vpaddq $ACC3, $ACC3, $ACC3
253 vmovdqa $ACC2, 32*1-128($aap)
254 vpaddq $ACC4, $ACC4, $ACC4
255 vmovdqa $ACC3, 32*2-128($aap)
256 vpaddq $ACC5, $ACC5, $ACC5
257 vmovdqa $ACC4, 32*3-128($aap)
258 vpaddq $ACC6, $ACC6, $ACC6
259 vmovdqa $ACC5, 32*4-128($aap)
260 vpaddq $ACC7, $ACC7, $ACC7
261 vmovdqa $ACC6, 32*5-128($aap)
262 vpaddq $ACC8, $ACC8, $ACC8
263 vmovdqa $ACC7, 32*6-128($aap)
264 vpxor $ACC9, $ACC9, $ACC9
265 vmovdqa $ACC8, 32*7-128($aap)
267 vpmuludq 32*0-128($ap), $B1, $ACC0
268 vpbroadcastq 32*1-128($ap), $B2
269 vmovdqu $ACC9, 32*9-192($tp0) # zero upper half
270 vpmuludq $B1, $ACC1, $ACC1
271 vmovdqu $ACC9, 32*10-448($tp1)
272 vpmuludq $B1, $ACC2, $ACC2
273 vmovdqu $ACC9, 32*11-448($tp1)
274 vpmuludq $B1, $ACC3, $ACC3
275 vmovdqu $ACC9, 32*12-448($tp1)
276 vpmuludq $B1, $ACC4, $ACC4
277 vmovdqu $ACC9, 32*13-448($tp1)
278 vpmuludq $B1, $ACC5, $ACC5
279 vmovdqu $ACC9, 32*14-448($tp1)
280 vpmuludq $B1, $ACC6, $ACC6
281 vmovdqu $ACC9, 32*15-448($tp1)
282 vpmuludq $B1, $ACC7, $ACC7
283 vmovdqu $ACC9, 32*16-448($tp1)
284 vpmuludq $B1, $ACC8, $ACC8
285 vpbroadcastq 32*2-128($ap), $B1
286 vmovdqu $ACC9, 32*17-448($tp1)
297 vpbroadcastq 32*1-128($tpa), $B2
298 vpmuludq 32*0-128($ap), $B1, $ACC0
299 vpaddq 32*0-192($tp0), $ACC0, $ACC0
300 vpmuludq 32*0-128($aap), $B1, $ACC1
301 vpaddq 32*1-192($tp0), $ACC1, $ACC1
302 vpmuludq 32*1-128($aap), $B1, $ACC2
303 vpaddq 32*2-192($tp0), $ACC2, $ACC2
304 vpmuludq 32*2-128($aap), $B1, $ACC3
305 vpaddq 32*3-192($tp0), $ACC3, $ACC3
306 vpmuludq 32*3-128($aap), $B1, $ACC4
307 vpaddq 32*4-192($tp0), $ACC4, $ACC4
308 vpmuludq 32*4-128($aap), $B1, $ACC5
309 vpaddq 32*5-192($tp0), $ACC5, $ACC5
310 vpmuludq 32*5-128($aap), $B1, $ACC6
311 vpaddq 32*6-192($tp0), $ACC6, $ACC6
312 vpmuludq 32*6-128($aap), $B1, $ACC7
313 vpaddq 32*7-192($tp0), $ACC7, $ACC7
314 vpmuludq 32*7-128($aap), $B1, $ACC8
315 vpbroadcastq 32*2-128($tpa), $B1
316 vpaddq 32*8-192($tp0), $ACC8, $ACC8
318 vmovdqu $ACC0, 32*0-192($tp0)
319 vmovdqu $ACC1, 32*1-192($tp0)
321 vpmuludq 32*1-128($ap), $B2, $TEMP0
322 vpaddq $TEMP0, $ACC2, $ACC2
323 vpmuludq 32*1-128($aap), $B2, $TEMP1
324 vpaddq $TEMP1, $ACC3, $ACC3
325 vpmuludq 32*2-128($aap), $B2, $TEMP2
326 vpaddq $TEMP2, $ACC4, $ACC4
327 vpmuludq 32*3-128($aap), $B2, $TEMP0
328 vpaddq $TEMP0, $ACC5, $ACC5
329 vpmuludq 32*4-128($aap), $B2, $TEMP1
330 vpaddq $TEMP1, $ACC6, $ACC6
331 vpmuludq 32*5-128($aap), $B2, $TEMP2
332 vpaddq $TEMP2, $ACC7, $ACC7
333 vpmuludq 32*6-128($aap), $B2, $TEMP0
334 vpaddq $TEMP0, $ACC8, $ACC8
335 vpmuludq 32*7-128($aap), $B2, $ACC0
336 vpbroadcastq 32*3-128($tpa), $B2
337 vpaddq 32*9-192($tp0), $ACC0, $ACC0
339 vmovdqu $ACC2, 32*2-192($tp0)
340 vmovdqu $ACC3, 32*3-192($tp0)
342 vpmuludq 32*2-128($ap), $B1, $TEMP2
343 vpaddq $TEMP2, $ACC4, $ACC4
344 vpmuludq 32*2-128($aap), $B1, $TEMP0
345 vpaddq $TEMP0, $ACC5, $ACC5
346 vpmuludq 32*3-128($aap), $B1, $TEMP1
347 vpaddq $TEMP1, $ACC6, $ACC6
348 vpmuludq 32*4-128($aap), $B1, $TEMP2
349 vpaddq $TEMP2, $ACC7, $ACC7
350 vpmuludq 32*5-128($aap), $B1, $TEMP0
351 vpaddq $TEMP0, $ACC8, $ACC8
352 vpmuludq 32*6-128($aap), $B1, $TEMP1
353 vpaddq $TEMP1, $ACC0, $ACC0
354 vpmuludq 32*7-128($aap), $B1, $ACC1
355 vpbroadcastq 32*4-128($tpa), $B1
356 vpaddq 32*10-448($tp1), $ACC1, $ACC1
358 vmovdqu $ACC4, 32*4-192($tp0)
359 vmovdqu $ACC5, 32*5-192($tp0)
361 vpmuludq 32*3-128($ap), $B2, $TEMP0
362 vpaddq $TEMP0, $ACC6, $ACC6
363 vpmuludq 32*3-128($aap), $B2, $TEMP1
364 vpaddq $TEMP1, $ACC7, $ACC7
365 vpmuludq 32*4-128($aap), $B2, $TEMP2
366 vpaddq $TEMP2, $ACC8, $ACC8
367 vpmuludq 32*5-128($aap), $B2, $TEMP0
368 vpaddq $TEMP0, $ACC0, $ACC0
369 vpmuludq 32*6-128($aap), $B2, $TEMP1
370 vpaddq $TEMP1, $ACC1, $ACC1
371 vpmuludq 32*7-128($aap), $B2, $ACC2
372 vpbroadcastq 32*5-128($tpa), $B2
373 vpaddq 32*11-448($tp1), $ACC2, $ACC2
375 vmovdqu $ACC6, 32*6-192($tp0)
376 vmovdqu $ACC7, 32*7-192($tp0)
378 vpmuludq 32*4-128($ap), $B1, $TEMP0
379 vpaddq $TEMP0, $ACC8, $ACC8
380 vpmuludq 32*4-128($aap), $B1, $TEMP1
381 vpaddq $TEMP1, $ACC0, $ACC0
382 vpmuludq 32*5-128($aap), $B1, $TEMP2
383 vpaddq $TEMP2, $ACC1, $ACC1
384 vpmuludq 32*6-128($aap), $B1, $TEMP0
385 vpaddq $TEMP0, $ACC2, $ACC2
386 vpmuludq 32*7-128($aap), $B1, $ACC3
387 vpbroadcastq 32*6-128($tpa), $B1
388 vpaddq 32*12-448($tp1), $ACC3, $ACC3
390 vmovdqu $ACC8, 32*8-192($tp0)
391 vmovdqu $ACC0, 32*9-192($tp0)
394 vpmuludq 32*5-128($ap), $B2, $TEMP2
395 vpaddq $TEMP2, $ACC1, $ACC1
396 vpmuludq 32*5-128($aap), $B2, $TEMP0
397 vpaddq $TEMP0, $ACC2, $ACC2
398 vpmuludq 32*6-128($aap), $B2, $TEMP1
399 vpaddq $TEMP1, $ACC3, $ACC3
400 vpmuludq 32*7-128($aap), $B2, $ACC4
401 vpbroadcastq 32*7-128($tpa), $B2
402 vpaddq 32*13-448($tp1), $ACC4, $ACC4
404 vmovdqu $ACC1, 32*10-448($tp1)
405 vmovdqu $ACC2, 32*11-448($tp1)
407 vpmuludq 32*6-128($ap), $B1, $TEMP0
408 vpaddq $TEMP0, $ACC3, $ACC3
409 vpmuludq 32*6-128($aap), $B1, $TEMP1
410 vpbroadcastq 32*8-128($tpa), $ACC0 # borrow $ACC0 for $B1
411 vpaddq $TEMP1, $ACC4, $ACC4
412 vpmuludq 32*7-128($aap), $B1, $ACC5
413 vpbroadcastq 32*0+8-128($tpa), $B1 # for next iteration
414 vpaddq 32*14-448($tp1), $ACC5, $ACC5
416 vmovdqu $ACC3, 32*12-448($tp1)
417 vmovdqu $ACC4, 32*13-448($tp1)
420 vpmuludq 32*7-128($ap), $B2, $TEMP0
421 vpaddq $TEMP0, $ACC5, $ACC5
422 vpmuludq 32*7-128($aap), $B2, $ACC6
423 vpaddq 32*15-448($tp1), $ACC6, $ACC6
425 vpmuludq 32*8-128($ap), $ACC0, $ACC7
426 vmovdqu $ACC5, 32*14-448($tp1)
427 vpaddq 32*16-448($tp1), $ACC7, $ACC7
428 vmovdqu $ACC6, 32*15-448($tp1)
429 vmovdqu $ACC7, 32*16-448($tp1)
441 #we need to fix indexes 32-39 to avoid overflow
442 vmovdqu 32*8(%rsp), $ACC8 # 32*8-192($tp0),
443 vmovdqu 32*9(%rsp), $ACC1 # 32*9-192($tp0)
444 vmovdqu 32*10(%rsp), $ACC2 # 32*10-192($tp0)
445 lea 192(%rsp), $tp0 # 64+128=192
447 vpsrlq \$29, $ACC8, $TEMP1
448 vpand $AND_MASK, $ACC8, $ACC8
449 vpsrlq \$29, $ACC1, $TEMP2
450 vpand $AND_MASK, $ACC1, $ACC1
452 vpermq \$0x93, $TEMP1, $TEMP1
453 vpxor $ZERO, $ZERO, $ZERO
454 vpermq \$0x93, $TEMP2, $TEMP2
456 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
457 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
458 vpaddq $TEMP0, $ACC8, $ACC8
459 vpblendd \$3, $TEMP2, $ZERO, $TEMP2
460 vpaddq $TEMP1, $ACC1, $ACC1
461 vpaddq $TEMP2, $ACC2, $ACC2
462 vmovdqu $ACC1, 32*9-192($tp0)
463 vmovdqu $ACC2, 32*10-192($tp0)
469 vmovdqu 32*1(%rsp), $ACC1
470 vmovdqu 32*2-192($tp0), $ACC2
471 vmovdqu 32*3-192($tp0), $ACC3
472 vmovdqu 32*4-192($tp0), $ACC4
473 vmovdqu 32*5-192($tp0), $ACC5
474 vmovdqu 32*6-192($tp0), $ACC6
475 vmovdqu 32*7-192($tp0), $ACC7
479 and \$0x1fffffff, %eax
483 imulq -128($np), %rax
484 vpbroadcastq $Y1, $Y1
487 imulq 8-128($np), %rax
491 imulq 16-128($np), %rax
494 imulq 24-128($np), %rdx
499 and \$0x1fffffff, %eax
502 jmp .LOOP_REDUCE_1024
507 vpbroadcastq $Y2, $Y2
509 vpmuludq 32*1-128($np), $Y1, $TEMP0
511 imulq -128($np), %rax
512 vpaddq $TEMP0, $ACC1, $ACC1
514 vpmuludq 32*2-128($np), $Y1, $TEMP1
516 imulq 8-128($np), %rax
517 vpaddq $TEMP1, $ACC2, $ACC2
518 vpmuludq 32*3-128($np), $Y1, $TEMP2
523 imulq 16-128($np), %rax
525 vpaddq $TEMP2, $ACC3, $ACC3
526 vpmuludq 32*4-128($np), $Y1, $TEMP0
529 vpaddq $TEMP0, $ACC4, $ACC4
530 vpmuludq 32*5-128($np), $Y1, $TEMP1
533 vpaddq $TEMP1, $ACC5, $ACC5
534 vpmuludq 32*6-128($np), $Y1, $TEMP2
535 and \$0x1fffffff, %eax
536 vpaddq $TEMP2, $ACC6, $ACC6
537 vpmuludq 32*7-128($np), $Y1, $TEMP0
538 vpaddq $TEMP0, $ACC7, $ACC7
539 vpmuludq 32*8-128($np), $Y1, $TEMP1
541 #vmovdqu 32*1-8-128($np), $TEMP2 # moved below
542 vpaddq $TEMP1, $ACC8, $ACC8
543 #vmovdqu 32*2-8-128($np), $TEMP0 # moved below
544 vpbroadcastq $Y1, $Y1
546 vpmuludq 32*1-8-128($np), $Y2, $TEMP2 # see above
547 vmovdqu 32*3-8-128($np), $TEMP1
549 imulq -128($np), %rax
550 vpaddq $TEMP2, $ACC1, $ACC1
551 vpmuludq 32*2-8-128($np), $Y2, $TEMP0 # see above
552 vmovdqu 32*4-8-128($np), $TEMP2
555 imulq 8-128($np), %rax
556 vpaddq $TEMP0, $ACC2, $ACC2
559 vpmuludq $Y2, $TEMP1, $TEMP1
560 vmovdqu 32*5-8-128($np), $TEMP0
562 vpaddq $TEMP1, $ACC3, $ACC3
563 vpmuludq $Y2, $TEMP2, $TEMP2
564 vmovdqu 32*6-8-128($np), $TEMP1
568 vpaddq $TEMP2, $ACC4, $ACC4
569 vpmuludq $Y2, $TEMP0, $TEMP0
570 .byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 # vmovdqu 32*7-8-128($np), $TEMP2
571 and \$0x1fffffff, %eax
572 vpaddq $TEMP0, $ACC5, $ACC5
573 vpmuludq $Y2, $TEMP1, $TEMP1
574 vmovdqu 32*8-8-128($np), $TEMP0
575 vpaddq $TEMP1, $ACC6, $ACC6
576 vpmuludq $Y2, $TEMP2, $TEMP2
577 vmovdqu 32*9-8-128($np), $ACC9
578 vmovd %eax, $ACC0 # borrow ACC0 for Y2
579 imulq -128($np), %rax
580 vpaddq $TEMP2, $ACC7, $ACC7
581 vpmuludq $Y2, $TEMP0, $TEMP0
582 vmovdqu 32*1-16-128($np), $TEMP1
583 vpbroadcastq $ACC0, $ACC0
584 vpaddq $TEMP0, $ACC8, $ACC8
585 vpmuludq $Y2, $ACC9, $ACC9
586 vmovdqu 32*2-16-128($np), $TEMP2
590 ($ACC0,$Y2)=($Y2,$ACC0);
592 vmovdqu 32*1-24-128($np), $ACC0
593 vpmuludq $Y1, $TEMP1, $TEMP1
594 vmovdqu 32*3-16-128($np), $TEMP0
595 vpaddq $TEMP1, $ACC1, $ACC1
596 vpmuludq $Y2, $ACC0, $ACC0
597 vpmuludq $Y1, $TEMP2, $TEMP2
598 .byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff # vmovdqu 32*4-16-128($np), $TEMP1
599 vpaddq $ACC1, $ACC0, $ACC0
600 vpaddq $TEMP2, $ACC2, $ACC2
601 vpmuludq $Y1, $TEMP0, $TEMP0
602 vmovdqu 32*5-16-128($np), $TEMP2
605 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
606 vpaddq $TEMP0, $ACC3, $ACC3
607 vpmuludq $Y1, $TEMP1, $TEMP1
608 vmovdqu 32*6-16-128($np), $TEMP0
609 vpaddq $TEMP1, $ACC4, $ACC4
610 vpmuludq $Y1, $TEMP2, $TEMP2
611 vmovdqu 32*7-16-128($np), $TEMP1
612 vpaddq $TEMP2, $ACC5, $ACC5
613 vpmuludq $Y1, $TEMP0, $TEMP0
614 vmovdqu 32*8-16-128($np), $TEMP2
615 vpaddq $TEMP0, $ACC6, $ACC6
616 vpmuludq $Y1, $TEMP1, $TEMP1
618 vmovdqu 32*9-16-128($np), $TEMP0
620 vpaddq $TEMP1, $ACC7, $ACC7
621 vpmuludq $Y1, $TEMP2, $TEMP2
622 #vmovdqu 32*2-24-128($np), $TEMP1 # moved below
625 vpaddq $TEMP2, $ACC8, $ACC8
626 vpmuludq $Y1, $TEMP0, $TEMP0
627 and \$0x1fffffff, %eax
629 vmovdqu 32*3-24-128($np), $TEMP2
631 vpaddq $TEMP0, $ACC9, $ACC9
632 vpbroadcastq $Y1, $Y1
634 vpmuludq 32*2-24-128($np), $Y2, $TEMP1 # see above
635 vmovdqu 32*4-24-128($np), $TEMP0
637 imulq -128($np), %rax
639 vpaddq $TEMP1, $ACC2, $ACC1
640 vpmuludq $Y2, $TEMP2, $TEMP2
641 vmovdqu 32*5-24-128($np), $TEMP1
644 imulq 8-128($np), %rax
648 vpaddq $TEMP2, $ACC3, $ACC2
649 vpmuludq $Y2, $TEMP0, $TEMP0
650 vmovdqu 32*6-24-128($np), $TEMP2
653 imulq 16-128($np), %rax
654 vpaddq $TEMP0, $ACC4, $ACC3
655 vpmuludq $Y2, $TEMP1, $TEMP1
656 vmovdqu 32*7-24-128($np), $TEMP0
657 imulq 24-128($np), %rdx # future $r3
660 vpaddq $TEMP1, $ACC5, $ACC4
661 vpmuludq $Y2, $TEMP2, $TEMP2
662 vmovdqu 32*8-24-128($np), $TEMP1
665 vpmuludq $Y2, $TEMP0, $TEMP0
666 vpaddq $TEMP2, $ACC6, $ACC5
667 vmovdqu 32*9-24-128($np), $TEMP2
668 and \$0x1fffffff, %eax
669 vpaddq $TEMP0, $ACC7, $ACC6
670 vpmuludq $Y2, $TEMP1, $TEMP1
672 vpaddq $TEMP1, $ACC8, $ACC7
673 vpmuludq $Y2, $TEMP2, $TEMP2
674 vpaddq $TEMP2, $ACC9, $ACC8
679 jnz .LOOP_REDUCE_1024
681 ($ACC0,$Y2)=($Y2,$ACC0);
683 lea 448(%rsp), $tp1 # size optimization
684 vpaddq $ACC9, $Y2, $ACC0
685 vpxor $ZERO, $ZERO, $ZERO
687 vpaddq 32*9-192($tp0), $ACC0, $ACC0
688 vpaddq 32*10-448($tp1), $ACC1, $ACC1
689 vpaddq 32*11-448($tp1), $ACC2, $ACC2
690 vpaddq 32*12-448($tp1), $ACC3, $ACC3
691 vpaddq 32*13-448($tp1), $ACC4, $ACC4
692 vpaddq 32*14-448($tp1), $ACC5, $ACC5
693 vpaddq 32*15-448($tp1), $ACC6, $ACC6
694 vpaddq 32*16-448($tp1), $ACC7, $ACC7
695 vpaddq 32*17-448($tp1), $ACC8, $ACC8
697 vpsrlq \$29, $ACC0, $TEMP1
698 vpand $AND_MASK, $ACC0, $ACC0
699 vpsrlq \$29, $ACC1, $TEMP2
700 vpand $AND_MASK, $ACC1, $ACC1
701 vpsrlq \$29, $ACC2, $TEMP3
702 vpermq \$0x93, $TEMP1, $TEMP1
703 vpand $AND_MASK, $ACC2, $ACC2
704 vpsrlq \$29, $ACC3, $TEMP4
705 vpermq \$0x93, $TEMP2, $TEMP2
706 vpand $AND_MASK, $ACC3, $ACC3
707 vpermq \$0x93, $TEMP3, $TEMP3
709 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
710 vpermq \$0x93, $TEMP4, $TEMP4
711 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
712 vpaddq $TEMP0, $ACC0, $ACC0
713 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
714 vpaddq $TEMP1, $ACC1, $ACC1
715 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
716 vpaddq $TEMP2, $ACC2, $ACC2
717 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
718 vpaddq $TEMP3, $ACC3, $ACC3
719 vpaddq $TEMP4, $ACC4, $ACC4
721 vpsrlq \$29, $ACC0, $TEMP1
722 vpand $AND_MASK, $ACC0, $ACC0
723 vpsrlq \$29, $ACC1, $TEMP2
724 vpand $AND_MASK, $ACC1, $ACC1
725 vpsrlq \$29, $ACC2, $TEMP3
726 vpermq \$0x93, $TEMP1, $TEMP1
727 vpand $AND_MASK, $ACC2, $ACC2
728 vpsrlq \$29, $ACC3, $TEMP4
729 vpermq \$0x93, $TEMP2, $TEMP2
730 vpand $AND_MASK, $ACC3, $ACC3
731 vpermq \$0x93, $TEMP3, $TEMP3
733 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
734 vpermq \$0x93, $TEMP4, $TEMP4
735 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
736 vpaddq $TEMP0, $ACC0, $ACC0
737 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
738 vpaddq $TEMP1, $ACC1, $ACC1
739 vmovdqu $ACC0, 32*0-128($rp)
740 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
741 vpaddq $TEMP2, $ACC2, $ACC2
742 vmovdqu $ACC1, 32*1-128($rp)
743 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
744 vpaddq $TEMP3, $ACC3, $ACC3
745 vmovdqu $ACC2, 32*2-128($rp)
746 vpaddq $TEMP4, $ACC4, $ACC4
747 vmovdqu $ACC3, 32*3-128($rp)
751 vpsrlq \$29, $ACC4, $TEMP1
752 vpand $AND_MASK, $ACC4, $ACC4
753 vpsrlq \$29, $ACC5, $TEMP2
754 vpand $AND_MASK, $ACC5, $ACC5
755 vpsrlq \$29, $ACC6, $TEMP3
756 vpermq \$0x93, $TEMP1, $TEMP1
757 vpand $AND_MASK, $ACC6, $ACC6
758 vpsrlq \$29, $ACC7, $TEMP4
759 vpermq \$0x93, $TEMP2, $TEMP2
760 vpand $AND_MASK, $ACC7, $ACC7
761 vpsrlq \$29, $ACC8, $TEMP5
762 vpermq \$0x93, $TEMP3, $TEMP3
763 vpand $AND_MASK, $ACC8, $ACC8
764 vpermq \$0x93, $TEMP4, $TEMP4
766 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
767 vpermq \$0x93, $TEMP5, $TEMP5
768 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
769 vpaddq $TEMP0, $ACC4, $ACC4
770 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
771 vpaddq $TEMP1, $ACC5, $ACC5
772 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
773 vpaddq $TEMP2, $ACC6, $ACC6
774 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
775 vpaddq $TEMP3, $ACC7, $ACC7
776 vpaddq $TEMP4, $ACC8, $ACC8
778 vpsrlq \$29, $ACC4, $TEMP1
779 vpand $AND_MASK, $ACC4, $ACC4
780 vpsrlq \$29, $ACC5, $TEMP2
781 vpand $AND_MASK, $ACC5, $ACC5
782 vpsrlq \$29, $ACC6, $TEMP3
783 vpermq \$0x93, $TEMP1, $TEMP1
784 vpand $AND_MASK, $ACC6, $ACC6
785 vpsrlq \$29, $ACC7, $TEMP4
786 vpermq \$0x93, $TEMP2, $TEMP2
787 vpand $AND_MASK, $ACC7, $ACC7
788 vpsrlq \$29, $ACC8, $TEMP5
789 vpermq \$0x93, $TEMP3, $TEMP3
790 vpand $AND_MASK, $ACC8, $ACC8
791 vpermq \$0x93, $TEMP4, $TEMP4
793 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
794 vpermq \$0x93, $TEMP5, $TEMP5
795 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
796 vpaddq $TEMP0, $ACC4, $ACC4
797 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
798 vpaddq $TEMP1, $ACC5, $ACC5
799 vmovdqu $ACC4, 32*4-128($rp)
800 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
801 vpaddq $TEMP2, $ACC6, $ACC6
802 vmovdqu $ACC5, 32*5-128($rp)
803 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
804 vpaddq $TEMP3, $ACC7, $ACC7
805 vmovdqu $ACC6, 32*6-128($rp)
806 vpaddq $TEMP4, $ACC8, $ACC8
807 vmovdqu $ACC7, 32*7-128($rp)
808 vmovdqu $ACC8, 32*8-128($rp)
812 jne .LOOP_GRANDE_SQR_1024
817 $code.=<<___ if ($win64);
818 movaps -0xd8(%rax),%xmm6
819 movaps -0xc8(%rax),%xmm7
820 movaps -0xb8(%rax),%xmm8
821 movaps -0xa8(%rax),%xmm9
822 movaps -0x98(%rax),%xmm10
823 movaps -0x88(%rax),%xmm11
824 movaps -0x78(%rax),%xmm12
825 movaps -0x68(%rax),%xmm13
826 movaps -0x58(%rax),%xmm14
827 movaps -0x48(%rax),%xmm15
836 lea (%rax),%rsp # restore %rsp
839 .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
844 my $rp="%rdi"; # BN_ULONG *rp,
845 my $ap="%rsi"; # const BN_ULONG *ap,
846 my $bp="%rdx"; # const BN_ULONG *bp,
847 my $np="%rcx"; # const BN_ULONG *np,
848 my $n0="%r8d"; # unsigned int n0);
850 # The registers that hold the accumulated redundant result
851 # The AMM works on 1024 bit operands, and redundant word size is 29
852 # Therefore: ceil(1024/29)/4 = 9
864 # Registers that hold the broadcasted words of multiplier, currently used
873 my $AND_MASK="%ymm15";
875 # alu registers that hold the first words of the ACC
884 $bp="%r13"; # reassigned argument
887 .globl rsaz_1024_mul_avx2
888 .type rsaz_1024_mul_avx2,\@function,5
899 $code.=<<___ if ($win64);
902 vmovaps %xmm6,-0xd8(%rax)
903 vmovaps %xmm7,-0xc8(%rax)
904 vmovaps %xmm8,-0xb8(%rax)
905 vmovaps %xmm9,-0xa8(%rax)
906 vmovaps %xmm10,-0x98(%rax)
907 vmovaps %xmm11,-0x88(%rax)
908 vmovaps %xmm12,-0x78(%rax)
909 vmovaps %xmm13,-0x68(%rax)
910 vmovaps %xmm14,-0x58(%rax)
911 vmovaps %xmm15,-0x48(%rax)
917 mov %rdx, $bp # reassigned argument
920 # unaligned 256-bit load that crosses page boundary can
921 # cause severe performance degradation here, so if $ap does
922 # cross page boundary, swap it with $bp [meaning that caller
923 # is advised to lay down $ap and $bp next to each other, so
924 # that only one can cross page boundary].
935 sub \$-128,$ap # size optimization
939 and \$4095, $tmp # see if $np crosses page
943 jz .Lmul_1024_no_n_copy
945 # unaligned 256-bit load that crosses page boundary can
946 # cause severe performance degradation here, so if $np does
947 # cross page boundary, copy it to stack and make sure stack
950 vmovdqu 32*0-128($np), $ACC0
952 vmovdqu 32*1-128($np), $ACC1
953 vmovdqu 32*2-128($np), $ACC2
954 vmovdqu 32*3-128($np), $ACC3
955 vmovdqu 32*4-128($np), $ACC4
956 vmovdqu 32*5-128($np), $ACC5
957 vmovdqu 32*6-128($np), $ACC6
958 vmovdqu 32*7-128($np), $ACC7
959 vmovdqu 32*8-128($np), $ACC8
961 vmovdqu $ACC0, 32*0-128($np)
962 vpxor $ACC0, $ACC0, $ACC0
963 vmovdqu $ACC1, 32*1-128($np)
964 vpxor $ACC1, $ACC1, $ACC1
965 vmovdqu $ACC2, 32*2-128($np)
966 vpxor $ACC2, $ACC2, $ACC2
967 vmovdqu $ACC3, 32*3-128($np)
968 vpxor $ACC3, $ACC3, $ACC3
969 vmovdqu $ACC4, 32*4-128($np)
970 vpxor $ACC4, $ACC4, $ACC4
971 vmovdqu $ACC5, 32*5-128($np)
972 vpxor $ACC5, $ACC5, $ACC5
973 vmovdqu $ACC6, 32*6-128($np)
974 vpxor $ACC6, $ACC6, $ACC6
975 vmovdqu $ACC7, 32*7-128($np)
976 vpxor $ACC7, $ACC7, $ACC7
977 vmovdqu $ACC8, 32*8-128($np)
979 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero after vzeroall
980 .Lmul_1024_no_n_copy:
984 vpbroadcastq ($bp), $Bi
985 vmovdqu $ACC0, (%rsp) # clear top of stack
992 vmovdqu .Land_mask(%rip), $AND_MASK
994 vmovdqu $ACC9, 32*9-128($rp) # $ACC9 is zero after vzeroall
999 vpsrlq \$29, $ACC3, $ACC9 # correct $ACC3(*)
1001 imulq -128($ap), %rax
1004 imulq 8-128($ap), $r1
1009 and \$0x1fffffff, %eax
1012 imulq 16-128($ap), $r2
1016 imulq 24-128($ap), $r3
1018 vpmuludq 32*1-128($ap),$Bi,$TEMP0
1020 vpaddq $TEMP0,$ACC1,$ACC1
1021 vpmuludq 32*2-128($ap),$Bi,$TEMP1
1022 vpbroadcastq $Yi, $Yi
1023 vpaddq $TEMP1,$ACC2,$ACC2
1024 vpmuludq 32*3-128($ap),$Bi,$TEMP2
1025 vpand $AND_MASK, $ACC3, $ACC3 # correct $ACC3
1026 vpaddq $TEMP2,$ACC3,$ACC3
1027 vpmuludq 32*4-128($ap),$Bi,$TEMP0
1028 vpaddq $TEMP0,$ACC4,$ACC4
1029 vpmuludq 32*5-128($ap),$Bi,$TEMP1
1030 vpaddq $TEMP1,$ACC5,$ACC5
1031 vpmuludq 32*6-128($ap),$Bi,$TEMP2
1032 vpaddq $TEMP2,$ACC6,$ACC6
1033 vpmuludq 32*7-128($ap),$Bi,$TEMP0
1034 vpermq \$0x93, $ACC9, $ACC9 # correct $ACC3
1035 vpaddq $TEMP0,$ACC7,$ACC7
1036 vpmuludq 32*8-128($ap),$Bi,$TEMP1
1037 vpbroadcastq 8($bp), $Bi
1038 vpaddq $TEMP1,$ACC8,$ACC8
1041 imulq -128($np),%rax
1044 imulq 8-128($np),%rax
1047 imulq 16-128($np),%rax
1050 imulq 24-128($np),%rdx
1054 vpmuludq 32*1-128($np),$Yi,$TEMP2
1056 vpaddq $TEMP2,$ACC1,$ACC1
1057 vpmuludq 32*2-128($np),$Yi,$TEMP0
1058 vpaddq $TEMP0,$ACC2,$ACC2
1059 vpmuludq 32*3-128($np),$Yi,$TEMP1
1060 vpaddq $TEMP1,$ACC3,$ACC3
1061 vpmuludq 32*4-128($np),$Yi,$TEMP2
1062 vpaddq $TEMP2,$ACC4,$ACC4
1063 vpmuludq 32*5-128($np),$Yi,$TEMP0
1064 vpaddq $TEMP0,$ACC5,$ACC5
1065 vpmuludq 32*6-128($np),$Yi,$TEMP1
1066 vpaddq $TEMP1,$ACC6,$ACC6
1067 vpmuludq 32*7-128($np),$Yi,$TEMP2
1068 vpblendd \$3, $ZERO, $ACC9, $ACC9 # correct $ACC3
1069 vpaddq $TEMP2,$ACC7,$ACC7
1070 vpmuludq 32*8-128($np),$Yi,$TEMP0
1071 vpaddq $ACC9, $ACC3, $ACC3 # correct $ACC3
1072 vpaddq $TEMP0,$ACC8,$ACC8
1075 imulq -128($ap),%rax
1077 vmovdqu -8+32*1-128($ap),$TEMP1
1079 imulq 8-128($ap),%rax
1081 vmovdqu -8+32*2-128($ap),$TEMP2
1085 and \$0x1fffffff, %eax
1087 imulq 16-128($ap),%rbx
1089 vpmuludq $Bi,$TEMP1,$TEMP1
1091 vmovdqu -8+32*3-128($ap),$TEMP0
1092 vpaddq $TEMP1,$ACC1,$ACC1
1093 vpmuludq $Bi,$TEMP2,$TEMP2
1094 vpbroadcastq $Yi, $Yi
1095 vmovdqu -8+32*4-128($ap),$TEMP1
1096 vpaddq $TEMP2,$ACC2,$ACC2
1097 vpmuludq $Bi,$TEMP0,$TEMP0
1098 vmovdqu -8+32*5-128($ap),$TEMP2
1099 vpaddq $TEMP0,$ACC3,$ACC3
1100 vpmuludq $Bi,$TEMP1,$TEMP1
1101 vmovdqu -8+32*6-128($ap),$TEMP0
1102 vpaddq $TEMP1,$ACC4,$ACC4
1103 vpmuludq $Bi,$TEMP2,$TEMP2
1104 vmovdqu -8+32*7-128($ap),$TEMP1
1105 vpaddq $TEMP2,$ACC5,$ACC5
1106 vpmuludq $Bi,$TEMP0,$TEMP0
1107 vmovdqu -8+32*8-128($ap),$TEMP2
1108 vpaddq $TEMP0,$ACC6,$ACC6
1109 vpmuludq $Bi,$TEMP1,$TEMP1
1110 vmovdqu -8+32*9-128($ap),$ACC9
1111 vpaddq $TEMP1,$ACC7,$ACC7
1112 vpmuludq $Bi,$TEMP2,$TEMP2
1113 vpaddq $TEMP2,$ACC8,$ACC8
1114 vpmuludq $Bi,$ACC9,$ACC9
1115 vpbroadcastq 16($bp), $Bi
1118 imulq -128($np),%rax
1120 vmovdqu -8+32*1-128($np),$TEMP0
1122 imulq 8-128($np),%rax
1124 vmovdqu -8+32*2-128($np),$TEMP1
1126 imulq 16-128($np),%rdx
1130 vpmuludq $Yi,$TEMP0,$TEMP0
1132 vmovdqu -8+32*3-128($np),$TEMP2
1133 vpaddq $TEMP0,$ACC1,$ACC1
1134 vpmuludq $Yi,$TEMP1,$TEMP1
1135 vmovdqu -8+32*4-128($np),$TEMP0
1136 vpaddq $TEMP1,$ACC2,$ACC2
1137 vpmuludq $Yi,$TEMP2,$TEMP2
1138 vmovdqu -8+32*5-128($np),$TEMP1
1139 vpaddq $TEMP2,$ACC3,$ACC3
1140 vpmuludq $Yi,$TEMP0,$TEMP0
1141 vmovdqu -8+32*6-128($np),$TEMP2
1142 vpaddq $TEMP0,$ACC4,$ACC4
1143 vpmuludq $Yi,$TEMP1,$TEMP1
1144 vmovdqu -8+32*7-128($np),$TEMP0
1145 vpaddq $TEMP1,$ACC5,$ACC5
1146 vpmuludq $Yi,$TEMP2,$TEMP2
1147 vmovdqu -8+32*8-128($np),$TEMP1
1148 vpaddq $TEMP2,$ACC6,$ACC6
1149 vpmuludq $Yi,$TEMP0,$TEMP0
1150 vmovdqu -8+32*9-128($np),$TEMP2
1151 vpaddq $TEMP0,$ACC7,$ACC7
1152 vpmuludq $Yi,$TEMP1,$TEMP1
1153 vpaddq $TEMP1,$ACC8,$ACC8
1154 vpmuludq $Yi,$TEMP2,$TEMP2
1155 vpaddq $TEMP2,$ACC9,$ACC9
1157 vmovdqu -16+32*1-128($ap),$TEMP0
1159 imulq -128($ap),%rax
1162 vmovdqu -16+32*2-128($ap),$TEMP1
1165 and \$0x1fffffff, %eax
1167 imulq 8-128($ap),%rbx
1169 vpmuludq $Bi,$TEMP0,$TEMP0
1171 vmovdqu -16+32*3-128($ap),$TEMP2
1172 vpaddq $TEMP0,$ACC1,$ACC1
1173 vpmuludq $Bi,$TEMP1,$TEMP1
1174 vpbroadcastq $Yi, $Yi
1175 vmovdqu -16+32*4-128($ap),$TEMP0
1176 vpaddq $TEMP1,$ACC2,$ACC2
1177 vpmuludq $Bi,$TEMP2,$TEMP2
1178 vmovdqu -16+32*5-128($ap),$TEMP1
1179 vpaddq $TEMP2,$ACC3,$ACC3
1180 vpmuludq $Bi,$TEMP0,$TEMP0
1181 vmovdqu -16+32*6-128($ap),$TEMP2
1182 vpaddq $TEMP0,$ACC4,$ACC4
1183 vpmuludq $Bi,$TEMP1,$TEMP1
1184 vmovdqu -16+32*7-128($ap),$TEMP0
1185 vpaddq $TEMP1,$ACC5,$ACC5
1186 vpmuludq $Bi,$TEMP2,$TEMP2
1187 vmovdqu -16+32*8-128($ap),$TEMP1
1188 vpaddq $TEMP2,$ACC6,$ACC6
1189 vpmuludq $Bi,$TEMP0,$TEMP0
1190 vmovdqu -16+32*9-128($ap),$TEMP2
1191 vpaddq $TEMP0,$ACC7,$ACC7
1192 vpmuludq $Bi,$TEMP1,$TEMP1
1193 vpaddq $TEMP1,$ACC8,$ACC8
1194 vpmuludq $Bi,$TEMP2,$TEMP2
1195 vpbroadcastq 24($bp), $Bi
1196 vpaddq $TEMP2,$ACC9,$ACC9
1198 vmovdqu -16+32*1-128($np),$TEMP0
1200 imulq -128($np),%rax
1202 vmovdqu -16+32*2-128($np),$TEMP1
1203 imulq 8-128($np),%rdx
1207 vpmuludq $Yi,$TEMP0,$TEMP0
1209 vmovdqu -16+32*3-128($np),$TEMP2
1210 vpaddq $TEMP0,$ACC1,$ACC1
1211 vpmuludq $Yi,$TEMP1,$TEMP1
1212 vmovdqu -16+32*4-128($np),$TEMP0
1213 vpaddq $TEMP1,$ACC2,$ACC2
1214 vpmuludq $Yi,$TEMP2,$TEMP2
1215 vmovdqu -16+32*5-128($np),$TEMP1
1216 vpaddq $TEMP2,$ACC3,$ACC3
1217 vpmuludq $Yi,$TEMP0,$TEMP0
1218 vmovdqu -16+32*6-128($np),$TEMP2
1219 vpaddq $TEMP0,$ACC4,$ACC4
1220 vpmuludq $Yi,$TEMP1,$TEMP1
1221 vmovdqu -16+32*7-128($np),$TEMP0
1222 vpaddq $TEMP1,$ACC5,$ACC5
1223 vpmuludq $Yi,$TEMP2,$TEMP2
1224 vmovdqu -16+32*8-128($np),$TEMP1
1225 vpaddq $TEMP2,$ACC6,$ACC6
1226 vpmuludq $Yi,$TEMP0,$TEMP0
1227 vmovdqu -16+32*9-128($np),$TEMP2
1228 vpaddq $TEMP0,$ACC7,$ACC7
1229 vpmuludq $Yi,$TEMP1,$TEMP1
1230 vmovdqu -24+32*1-128($ap),$TEMP0
1231 vpaddq $TEMP1,$ACC8,$ACC8
1232 vpmuludq $Yi,$TEMP2,$TEMP2
1233 vmovdqu -24+32*2-128($ap),$TEMP1
1234 vpaddq $TEMP2,$ACC9,$ACC9
1237 imulq -128($ap),%rbx
1242 and \$0x1fffffff, %eax
1244 vpmuludq $Bi,$TEMP0,$TEMP0
1246 vmovdqu -24+32*3-128($ap),$TEMP2
1247 vpaddq $TEMP0,$ACC1,$ACC1
1248 vpmuludq $Bi,$TEMP1,$TEMP1
1249 vpbroadcastq $Yi, $Yi
1250 vmovdqu -24+32*4-128($ap),$TEMP0
1251 vpaddq $TEMP1,$ACC2,$ACC2
1252 vpmuludq $Bi,$TEMP2,$TEMP2
1253 vmovdqu -24+32*5-128($ap),$TEMP1
1254 vpaddq $TEMP2,$ACC3,$ACC3
1255 vpmuludq $Bi,$TEMP0,$TEMP0
1256 vmovdqu -24+32*6-128($ap),$TEMP2
1257 vpaddq $TEMP0,$ACC4,$ACC4
1258 vpmuludq $Bi,$TEMP1,$TEMP1
1259 vmovdqu -24+32*7-128($ap),$TEMP0
1260 vpaddq $TEMP1,$ACC5,$ACC5
1261 vpmuludq $Bi,$TEMP2,$TEMP2
1262 vmovdqu -24+32*8-128($ap),$TEMP1
1263 vpaddq $TEMP2,$ACC6,$ACC6
1264 vpmuludq $Bi,$TEMP0,$TEMP0
1265 vmovdqu -24+32*9-128($ap),$TEMP2
1266 vpaddq $TEMP0,$ACC7,$ACC7
1267 vpmuludq $Bi,$TEMP1,$TEMP1
1268 vpaddq $TEMP1,$ACC8,$ACC8
1269 vpmuludq $Bi,$TEMP2,$TEMP2
1270 vpbroadcastq 32($bp), $Bi
1271 vpaddq $TEMP2,$ACC9,$ACC9
1272 add \$32, $bp # $bp++
1274 vmovdqu -24+32*1-128($np),$TEMP0
1275 imulq -128($np),%rax
1279 vmovdqu -24+32*2-128($np),$TEMP1
1280 vpmuludq $Yi,$TEMP0,$TEMP0
1282 vmovdqu -24+32*3-128($np),$TEMP2
1283 vpaddq $TEMP0,$ACC1,$ACC0 # $ACC0==$TEMP0
1284 vpmuludq $Yi,$TEMP1,$TEMP1
1285 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
1286 vpaddq $TEMP1,$ACC2,$ACC1
1287 vmovdqu -24+32*4-128($np),$TEMP0
1288 vpmuludq $Yi,$TEMP2,$TEMP2
1289 vmovdqu -24+32*5-128($np),$TEMP1
1290 vpaddq $TEMP2,$ACC3,$ACC2
1291 vpmuludq $Yi,$TEMP0,$TEMP0
1292 vmovdqu -24+32*6-128($np),$TEMP2
1293 vpaddq $TEMP0,$ACC4,$ACC3
1294 vpmuludq $Yi,$TEMP1,$TEMP1
1295 vmovdqu -24+32*7-128($np),$TEMP0
1296 vpaddq $TEMP1,$ACC5,$ACC4
1297 vpmuludq $Yi,$TEMP2,$TEMP2
1298 vmovdqu -24+32*8-128($np),$TEMP1
1299 vpaddq $TEMP2,$ACC6,$ACC5
1300 vpmuludq $Yi,$TEMP0,$TEMP0
1301 vmovdqu -24+32*9-128($np),$TEMP2
1303 vpaddq $TEMP0,$ACC7,$ACC6
1304 vpmuludq $Yi,$TEMP1,$TEMP1
1306 vpaddq $TEMP1,$ACC8,$ACC7
1307 vpmuludq $Yi,$TEMP2,$TEMP2
1309 vpaddq $TEMP2,$ACC9,$ACC8
1315 # (*) Original implementation was correcting ACC1-ACC3 for overflow
1316 # after 7 loop runs, or after 28 iterations, or 56 additions.
1317 # But as we underutilize resources, it's possible to correct in
1318 # each iteration with marginal performance loss. But then, as
1319 # we do it in each iteration, we can correct less digits, and
1320 # avoid performance penalties completely. Also note that we
1321 # correct only three digits out of four. This works because
1322 # most significant digit is subjected to less additions.
1328 vpermq \$0, $AND_MASK, $AND_MASK
1329 vpaddq (%rsp), $TEMP1, $ACC0
1331 vpsrlq \$29, $ACC0, $TEMP1
1332 vpand $AND_MASK, $ACC0, $ACC0
1333 vpsrlq \$29, $ACC1, $TEMP2
1334 vpand $AND_MASK, $ACC1, $ACC1
1335 vpsrlq \$29, $ACC2, $TEMP3
1336 vpermq \$0x93, $TEMP1, $TEMP1
1337 vpand $AND_MASK, $ACC2, $ACC2
1338 vpsrlq \$29, $ACC3, $TEMP4
1339 vpermq \$0x93, $TEMP2, $TEMP2
1340 vpand $AND_MASK, $ACC3, $ACC3
1342 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1343 vpermq \$0x93, $TEMP3, $TEMP3
1344 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1345 vpermq \$0x93, $TEMP4, $TEMP4
1346 vpaddq $TEMP0, $ACC0, $ACC0
1347 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1348 vpaddq $TEMP1, $ACC1, $ACC1
1349 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1350 vpaddq $TEMP2, $ACC2, $ACC2
1351 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1352 vpaddq $TEMP3, $ACC3, $ACC3
1353 vpaddq $TEMP4, $ACC4, $ACC4
1355 vpsrlq \$29, $ACC0, $TEMP1
1356 vpand $AND_MASK, $ACC0, $ACC0
1357 vpsrlq \$29, $ACC1, $TEMP2
1358 vpand $AND_MASK, $ACC1, $ACC1
1359 vpsrlq \$29, $ACC2, $TEMP3
1360 vpermq \$0x93, $TEMP1, $TEMP1
1361 vpand $AND_MASK, $ACC2, $ACC2
1362 vpsrlq \$29, $ACC3, $TEMP4
1363 vpermq \$0x93, $TEMP2, $TEMP2
1364 vpand $AND_MASK, $ACC3, $ACC3
1365 vpermq \$0x93, $TEMP3, $TEMP3
1367 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1368 vpermq \$0x93, $TEMP4, $TEMP4
1369 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1370 vpaddq $TEMP0, $ACC0, $ACC0
1371 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1372 vpaddq $TEMP1, $ACC1, $ACC1
1373 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1374 vpaddq $TEMP2, $ACC2, $ACC2
1375 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1376 vpaddq $TEMP3, $ACC3, $ACC3
1377 vpaddq $TEMP4, $ACC4, $ACC4
1379 vmovdqu $ACC0, 0-128($rp)
1380 vmovdqu $ACC1, 32-128($rp)
1381 vmovdqu $ACC2, 64-128($rp)
1382 vmovdqu $ACC3, 96-128($rp)
1387 vpsrlq \$29, $ACC4, $TEMP1
1388 vpand $AND_MASK, $ACC4, $ACC4
1389 vpsrlq \$29, $ACC5, $TEMP2
1390 vpand $AND_MASK, $ACC5, $ACC5
1391 vpsrlq \$29, $ACC6, $TEMP3
1392 vpermq \$0x93, $TEMP1, $TEMP1
1393 vpand $AND_MASK, $ACC6, $ACC6
1394 vpsrlq \$29, $ACC7, $TEMP4
1395 vpermq \$0x93, $TEMP2, $TEMP2
1396 vpand $AND_MASK, $ACC7, $ACC7
1397 vpsrlq \$29, $ACC8, $TEMP5
1398 vpermq \$0x93, $TEMP3, $TEMP3
1399 vpand $AND_MASK, $ACC8, $ACC8
1400 vpermq \$0x93, $TEMP4, $TEMP4
1402 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1403 vpermq \$0x93, $TEMP5, $TEMP5
1404 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1405 vpaddq $TEMP0, $ACC4, $ACC4
1406 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1407 vpaddq $TEMP1, $ACC5, $ACC5
1408 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1409 vpaddq $TEMP2, $ACC6, $ACC6
1410 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1411 vpaddq $TEMP3, $ACC7, $ACC7
1412 vpaddq $TEMP4, $ACC8, $ACC8
1414 vpsrlq \$29, $ACC4, $TEMP1
1415 vpand $AND_MASK, $ACC4, $ACC4
1416 vpsrlq \$29, $ACC5, $TEMP2
1417 vpand $AND_MASK, $ACC5, $ACC5
1418 vpsrlq \$29, $ACC6, $TEMP3
1419 vpermq \$0x93, $TEMP1, $TEMP1
1420 vpand $AND_MASK, $ACC6, $ACC6
1421 vpsrlq \$29, $ACC7, $TEMP4
1422 vpermq \$0x93, $TEMP2, $TEMP2
1423 vpand $AND_MASK, $ACC7, $ACC7
1424 vpsrlq \$29, $ACC8, $TEMP5
1425 vpermq \$0x93, $TEMP3, $TEMP3
1426 vpand $AND_MASK, $ACC8, $ACC8
1427 vpermq \$0x93, $TEMP4, $TEMP4
1429 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1430 vpermq \$0x93, $TEMP5, $TEMP5
1431 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1432 vpaddq $TEMP0, $ACC4, $ACC4
1433 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1434 vpaddq $TEMP1, $ACC5, $ACC5
1435 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1436 vpaddq $TEMP2, $ACC6, $ACC6
1437 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1438 vpaddq $TEMP3, $ACC7, $ACC7
1439 vpaddq $TEMP4, $ACC8, $ACC8
1441 vmovdqu $ACC4, 128-128($rp)
1442 vmovdqu $ACC5, 160-128($rp)
1443 vmovdqu $ACC6, 192-128($rp)
1444 vmovdqu $ACC7, 224-128($rp)
1445 vmovdqu $ACC8, 256-128($rp)
1450 $code.=<<___ if ($win64);
1451 movaps -0xd8(%rax),%xmm6
1452 movaps -0xc8(%rax),%xmm7
1453 movaps -0xb8(%rax),%xmm8
1454 movaps -0xa8(%rax),%xmm9
1455 movaps -0x98(%rax),%xmm10
1456 movaps -0x88(%rax),%xmm11
1457 movaps -0x78(%rax),%xmm12
1458 movaps -0x68(%rax),%xmm13
1459 movaps -0x58(%rax),%xmm14
1460 movaps -0x48(%rax),%xmm15
1469 lea (%rax),%rsp # restore %rsp
1470 .Lmul_1024_epilogue:
1472 .size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1476 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1477 my @T = map("%r$_",(8..11));
1480 .globl rsaz_1024_red2norm_avx2
1481 .type rsaz_1024_red2norm_avx2,\@abi-omnipotent
1483 rsaz_1024_red2norm_avx2:
1484 sub \$-128,$inp # size optimization
1488 for ($j=0,$i=0; $i<16; $i++) {
1490 while (29*$j<64*($i+1)) { # load data till boundary
1491 $code.=" mov `8*$j-128`($inp), @T[0]\n";
1492 $j++; $k++; push(@T,shift(@T));
1495 while ($k>1) { # shift loaded data but last value
1496 $code.=" shl \$`29*($j-$k)`,@T[-$k]\n";
1499 $code.=<<___; # shift last value
1501 shl \$`29*($j-1)`, @T[-1]
1502 shr \$`-29*($j-1)`, @T[0]
1504 while ($l) { # accumulate all values
1505 $code.=" add @T[-$l], %rax\n";
1509 adc \$0, @T[0] # consume eventual carry
1510 mov %rax, 8*$i($out)
1517 .size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1519 .globl rsaz_1024_norm2red_avx2
1520 .type rsaz_1024_norm2red_avx2,\@abi-omnipotent
1522 rsaz_1024_norm2red_avx2:
1523 sub \$-128,$out # size optimization
1525 mov \$0x1fffffff,%eax
1527 for ($j=0,$i=0; $i<16; $i++) {
1528 $code.=" mov `8*($i+1)`($inp),@T[1]\n" if ($i<15);
1529 $code.=" xor @T[1],@T[1]\n" if ($i==15);
1531 while (29*($j+1)<64*($i+1)) {
1534 shr \$`29*$j`,@T[-$k]
1535 and %rax,@T[-$k] # &0x1fffffff
1536 mov @T[-$k],`8*$j-128`($out)
1541 shrd \$`29*$j`,@T[1],@T[0]
1543 mov @T[0],`8*$j-128`($out)
1549 mov @T[0],`8*$j-128`($out) # zero
1550 mov @T[0],`8*($j+1)-128`($out)
1551 mov @T[0],`8*($j+2)-128`($out)
1552 mov @T[0],`8*($j+3)-128`($out)
1554 .size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1558 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1561 .globl rsaz_1024_scatter5_avx2
1562 .type rsaz_1024_scatter5_avx2,\@abi-omnipotent
1564 rsaz_1024_scatter5_avx2:
1566 vmovdqu .Lscatter_permd(%rip),%ymm5
1568 lea ($out,$power),$out
1570 jmp .Loop_scatter_1024
1574 vmovdqu ($inp),%ymm0
1576 vpermd %ymm0,%ymm5,%ymm0
1577 vmovdqu %xmm0,($out)
1578 lea 16*32($out),$out
1580 jnz .Loop_scatter_1024
1584 .size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1586 .globl rsaz_1024_gather5_avx2
1587 .type rsaz_1024_gather5_avx2,\@abi-omnipotent
1589 rsaz_1024_gather5_avx2:
1591 $code.=<<___ if ($win64);
1592 lea -0x88(%rsp),%rax
1594 .LSEH_begin_rsaz_1024_gather5:
1595 # I can't trust assembler to use specific encoding:-(
1596 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
1597 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6,-0x20(%rax)
1598 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7,-0x10(%rax)
1599 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8,0(%rax)
1600 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9,0x10(%rax)
1601 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10,0x20(%rax)
1602 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11,0x30(%rax)
1603 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12,0x40(%rax)
1604 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13,0x50(%rax)
1605 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14,0x60(%rax)
1606 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15,0x70(%rax)
1609 lea .Lgather_table(%rip),%r11
1612 shr \$2,%eax # cache line number
1613 shl \$4,$power # offset within cache line
1615 vmovdqu -32(%r11),%ymm7 # .Lgather_permd
1616 vpbroadcastb 8(%r11,%rax), %xmm8
1617 vpbroadcastb 7(%r11,%rax), %xmm9
1618 vpbroadcastb 6(%r11,%rax), %xmm10
1619 vpbroadcastb 5(%r11,%rax), %xmm11
1620 vpbroadcastb 4(%r11,%rax), %xmm12
1621 vpbroadcastb 3(%r11,%rax), %xmm13
1622 vpbroadcastb 2(%r11,%rax), %xmm14
1623 vpbroadcastb 1(%r11,%rax), %xmm15
1625 lea 64($inp,$power),$inp
1626 mov \$64,%r11 # size optimization
1628 jmp .Loop_gather_1024
1632 vpand -64($inp), %xmm8,%xmm0
1633 vpand ($inp), %xmm9,%xmm1
1634 vpand 64($inp), %xmm10,%xmm2
1635 vpand ($inp,%r11,2), %xmm11,%xmm3
1636 vpor %xmm0,%xmm1,%xmm1
1637 vpand 64($inp,%r11,2), %xmm12,%xmm4
1638 vpor %xmm2,%xmm3,%xmm3
1639 vpand ($inp,%r11,4), %xmm13,%xmm5
1640 vpor %xmm1,%xmm3,%xmm3
1641 vpand 64($inp,%r11,4), %xmm14,%xmm6
1642 vpor %xmm4,%xmm5,%xmm5
1643 vpand -128($inp,%r11,8), %xmm15,%xmm2
1644 lea ($inp,%r11,8),$inp
1645 vpor %xmm3,%xmm5,%xmm5
1646 vpor %xmm2,%xmm6,%xmm6
1647 vpor %xmm5,%xmm6,%xmm6
1648 vpermd %ymm6,%ymm7,%ymm6
1649 vmovdqu %ymm6,($out)
1652 jnz .Loop_gather_1024
1654 vpxor %ymm0,%ymm0,%ymm0
1655 vmovdqu %ymm0,($out)
1658 $code.=<<___ if ($win64);
1660 movaps 0x10(%rsp),%xmm7
1661 movaps 0x20(%rsp),%xmm8
1662 movaps 0x30(%rsp),%xmm9
1663 movaps 0x40(%rsp),%xmm10
1664 movaps 0x50(%rsp),%xmm11
1665 movaps 0x60(%rsp),%xmm12
1666 movaps 0x70(%rsp),%xmm13
1667 movaps 0x80(%rsp),%xmm14
1668 movaps 0x90(%rsp),%xmm15
1670 .LSEH_end_rsaz_1024_gather5:
1674 .size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1679 .extern OPENSSL_ia32cap_P
1680 .globl rsaz_avx2_eligible
1681 .type rsaz_avx2_eligible,\@abi-omnipotent
1684 mov OPENSSL_ia32cap_P+8(%rip),%eax
1686 $code.=<<___ if ($addx);
1687 mov \$`1<<8|1<<19`,%ecx
1690 cmp \$`1<<8|1<<19`,%ecx # check for BMI2+AD*X
1697 .size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1701 .quad 0x1fffffff,0x1fffffff,0x1fffffff,-1
1703 .long 0,2,4,6,7,7,7,7
1705 .long 0,7,1,7,2,7,3,7
1707 .byte 0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
1718 .extern __imp_RtlVirtualUnwind
1719 .type rsaz_se_handler,\@abi-omnipotent
1733 mov 120($context),%rax # pull context->Rax
1734 mov 248($context),%rbx # pull context->Rip
1736 mov 8($disp),%rsi # disp->ImageBase
1737 mov 56($disp),%r11 # disp->HandlerData
1739 mov 0(%r11),%r10d # HandlerData[0]
1740 lea (%rsi,%r10),%r10 # prologue label
1741 cmp %r10,%rbx # context->Rip<prologue label
1742 jb .Lcommon_seh_tail
1744 mov 152($context),%rax # pull context->Rsp
1746 mov 4(%r11),%r10d # HandlerData[1]
1747 lea (%rsi,%r10),%r10 # epilogue label
1748 cmp %r10,%rbx # context->Rip>=epilogue label
1749 jae .Lcommon_seh_tail
1751 mov 160($context),%rax # pull context->Rbp
1759 mov %r15,240($context)
1760 mov %r14,232($context)
1761 mov %r13,224($context)
1762 mov %r12,216($context)
1763 mov %rbp,160($context)
1764 mov %rbx,144($context)
1766 lea -0xd8(%rax),%rsi # %xmm save area
1767 lea 512($context),%rdi # & context.Xmm6
1768 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
1769 .long 0xa548f3fc # cld; rep movsq
1774 mov %rax,152($context) # restore context->Rsp
1775 mov %rsi,168($context) # restore context->Rsi
1776 mov %rdi,176($context) # restore context->Rdi
1778 mov 40($disp),%rdi # disp->ContextRecord
1779 mov $context,%rsi # context
1780 mov \$154,%ecx # sizeof(CONTEXT)
1781 .long 0xa548f3fc # cld; rep movsq
1784 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1785 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1786 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1787 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1788 mov 40(%rsi),%r10 # disp->ContextRecord
1789 lea 56(%rsi),%r11 # &disp->HandlerData
1790 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1791 mov %r10,32(%rsp) # arg5
1792 mov %r11,40(%rsp) # arg6
1793 mov %r12,48(%rsp) # arg7
1794 mov %rcx,56(%rsp) # arg8, (NULL)
1795 call *__imp_RtlVirtualUnwind(%rip)
1797 mov \$1,%eax # ExceptionContinueSearch
1809 .size rsaz_se_handler,.-rsaz_se_handler
1813 .rva .LSEH_begin_rsaz_1024_sqr_avx2
1814 .rva .LSEH_end_rsaz_1024_sqr_avx2
1815 .rva .LSEH_info_rsaz_1024_sqr_avx2
1817 .rva .LSEH_begin_rsaz_1024_mul_avx2
1818 .rva .LSEH_end_rsaz_1024_mul_avx2
1819 .rva .LSEH_info_rsaz_1024_mul_avx2
1821 .rva .LSEH_begin_rsaz_1024_gather5
1822 .rva .LSEH_end_rsaz_1024_gather5
1823 .rva .LSEH_info_rsaz_1024_gather5
1826 .LSEH_info_rsaz_1024_sqr_avx2:
1828 .rva rsaz_se_handler
1829 .rva .Lsqr_1024_body,.Lsqr_1024_epilogue
1830 .LSEH_info_rsaz_1024_mul_avx2:
1832 .rva rsaz_se_handler
1833 .rva .Lmul_1024_body,.Lmul_1024_epilogue
1834 .LSEH_info_rsaz_1024_gather5:
1835 .byte 0x01,0x33,0x16,0x00
1836 .byte 0x36,0xf8,0x09,0x00 #vmovaps 0x90(rsp),xmm15
1837 .byte 0x31,0xe8,0x08,0x00 #vmovaps 0x80(rsp),xmm14
1838 .byte 0x2c,0xd8,0x07,0x00 #vmovaps 0x70(rsp),xmm13
1839 .byte 0x27,0xc8,0x06,0x00 #vmovaps 0x60(rsp),xmm12
1840 .byte 0x22,0xb8,0x05,0x00 #vmovaps 0x50(rsp),xmm11
1841 .byte 0x1d,0xa8,0x04,0x00 #vmovaps 0x40(rsp),xmm10
1842 .byte 0x18,0x98,0x03,0x00 #vmovaps 0x30(rsp),xmm9
1843 .byte 0x13,0x88,0x02,0x00 #vmovaps 0x20(rsp),xmm8
1844 .byte 0x0e,0x78,0x01,0x00 #vmovaps 0x10(rsp),xmm7
1845 .byte 0x09,0x68,0x00,0x00 #vmovaps 0x00(rsp),xmm6
1846 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
1850 foreach (split("\n",$code)) {
1851 s/\`([^\`]*)\`/eval($1)/ge;
1853 s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge or
1855 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1856 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
1857 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1858 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1859 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1864 print <<___; # assembler is too old
1867 .globl rsaz_avx2_eligible
1868 .type rsaz_avx2_eligible,\@abi-omnipotent
1872 .size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1874 .globl rsaz_1024_sqr_avx2
1875 .globl rsaz_1024_mul_avx2
1876 .globl rsaz_1024_norm2red_avx2
1877 .globl rsaz_1024_red2norm_avx2
1878 .globl rsaz_1024_scatter5_avx2
1879 .globl rsaz_1024_gather5_avx2
1880 .type rsaz_1024_sqr_avx2,\@abi-omnipotent
1883 rsaz_1024_norm2red_avx2:
1884 rsaz_1024_red2norm_avx2:
1885 rsaz_1024_scatter5_avx2:
1886 rsaz_1024_gather5_avx2:
1887 .byte 0x0f,0x0b # ud2
1889 .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2