3 ##############################################################################
5 # Copyright (c) 2012, Intel Corporation #
7 # All rights reserved. #
9 # Redistribution and use in source and binary forms, with or without #
10 # modification, are permitted provided that the following conditions are #
13 # * Redistributions of source code must retain the above copyright #
14 # notice, this list of conditions and the following disclaimer. #
16 # * Redistributions in binary form must reproduce the above copyright #
17 # notice, this list of conditions and the following disclaimer in the #
18 # documentation and/or other materials provided with the #
21 # * Neither the name of the Intel Corporation nor the names of its #
22 # contributors may be used to endorse or promote products derived from #
23 # this software without specific prior written permission. #
26 # THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY #
27 # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #
28 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR #
29 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR #
30 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, #
31 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, #
32 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR #
33 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #
34 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING #
35 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #
36 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
38 ##############################################################################
39 # Developers and authors: #
40 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
41 # (1) Intel Corporation, Israel Development Center, Haifa, Israel #
42 # (2) University of Haifa, Israel #
43 ##############################################################################
45 # [1] S. Gueron, V. Krasnov: "Software Implementation of Modular #
46 # Exponentiation, Using Advanced Vector Instructions Architectures", #
47 # F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369, #
48 # pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012 #
49 # [2] S. Gueron: "Efficient Software Implementations of Modular #
50 # Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012). #
51 # [3] S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE #
52 # Proceedings of 9th International Conference on Information Technology: #
53 # New Generations (ITNG 2012), pp.821-823 (2012) #
54 # [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis #
55 # resistant 1024-bit modular exponentiation, for optimizing RSA2048 #
56 # on AVX2 capable x86_64 platforms", #
57 # http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest#
58 ##############################################################################
60 # +13% improvement over original submission by <appro@openssl.org>
62 # rsa2048 sign/sec OpenSSL 1.0.1 scalar(*) this
63 # 2.3GHz Haswell 621 765/+23% 1113/+79%
65 # (*) if system doesn't support AVX2, for reference purposes;
69 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
71 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
73 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
74 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
75 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
76 die "can't locate x86_64-xlate.pl";
78 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
79 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
80 $avx = ($1>=2.19) + ($1>=2.22);
83 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
84 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
85 $avx = ($1>=2.09) + ($1>=2.10);
88 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
89 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
90 $avx = ($1>=10) + ($1>=11);
93 open OUT,"| $^X $xlate $flavour $output";
98 my $rp="%rdi"; # BN_ULONG *rp,
99 my $ap="%rsi"; # const BN_ULONG *ap,
100 my $np="%rdx"; # const BN_ULONG *np,
101 my $n0="%ecx"; # const BN_ULONG n0,
102 my $rep="%r8d"; # int repeat);
104 # The registers that hold the accumulated redundant result
105 # The AMM works on 1024 bit operands, and redundant word size is 29
106 # Therefore: ceil(1024/29)/4 = 9
117 # Registers that hold the broadcasted words of bp, currently used
120 # Registers that hold the broadcasted words of Y, currently used
125 my $AND_MASK="%ymm15";
126 # alu registers that hold the first words of the ACC
132 my $i="%r14d"; # loop counter
135 my $FrameSize=32*18+32*8; # place for A^2 and 2*A
142 $np="%r13"; # reassigned argument
147 .globl rsaz_1024_sqr_avx2
148 .type rsaz_1024_sqr_avx2,\@function,5
150 rsaz_1024_sqr_avx2: # 702 cycles, 14% faster than rsaz_1024_mul_avx2
160 $code.=<<___ if ($win64);
162 vmovaps %xmm6,-0xd8(%rax)
163 vmovaps %xmm7,-0xc8(%rax)
164 vmovaps %xmm8,-0xb8(%rax)
165 vmovaps %xmm9,-0xa8(%rax)
166 vmovaps %xmm10,-0x98(%rax)
167 vmovaps %xmm11,-0x88(%rax)
168 vmovaps %xmm12,-0x78(%rax)
169 vmovaps %xmm13,-0x68(%rax)
170 vmovaps %xmm14,-0x58(%rax)
171 vmovaps %xmm15,-0x48(%rax)
176 mov %rdx, $np # reassigned argument
177 sub \$$FrameSize, %rsp
179 sub \$-128, $rp # size optimization
183 and \$4095, $tmp # see if $np crosses page
186 vpxor $ACC9,$ACC9,$ACC9
187 jz .Lsqr_1024_no_n_copy
189 # unaligned 256-bit load that crosses page boundary can
190 # cause >2x performance degradation here, so if $np does
191 # cross page boundary, copy it to stack and make sure stack
194 vmovdqu 32*0-128($np), $ACC0
196 vmovdqu 32*1-128($np), $ACC1
197 vmovdqu 32*2-128($np), $ACC2
198 vmovdqu 32*3-128($np), $ACC3
199 vmovdqu 32*4-128($np), $ACC4
200 vmovdqu 32*5-128($np), $ACC5
201 vmovdqu 32*6-128($np), $ACC6
202 vmovdqu 32*7-128($np), $ACC7
203 vmovdqu 32*8-128($np), $ACC8
204 lea $FrameSize+128(%rsp),$np
205 vmovdqu $ACC0, 32*0-128($np)
206 vmovdqu $ACC1, 32*1-128($np)
207 vmovdqu $ACC2, 32*2-128($np)
208 vmovdqu $ACC3, 32*3-128($np)
209 vmovdqu $ACC4, 32*4-128($np)
210 vmovdqu $ACC5, 32*5-128($np)
211 vmovdqu $ACC6, 32*6-128($np)
212 vmovdqu $ACC7, 32*7-128($np)
213 vmovdqu $ACC8, 32*8-128($np)
214 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero
216 .Lsqr_1024_no_n_copy:
219 vmovdqu 32*1-128($ap), $ACC1
220 vmovdqu 32*2-128($ap), $ACC2
221 vmovdqu 32*3-128($ap), $ACC3
222 vmovdqu 32*4-128($ap), $ACC4
223 vmovdqu 32*5-128($ap), $ACC5
224 vmovdqu 32*6-128($ap), $ACC6
225 vmovdqu 32*7-128($ap), $ACC7
226 vmovdqu 32*8-128($ap), $ACC8
228 lea 192(%rsp), $tp0 # 64+128=192
229 vpbroadcastq .Land_mask(%rip), $AND_MASK
230 jmp .LOOP_GRANDE_SQR_1024
233 .LOOP_GRANDE_SQR_1024:
234 lea 32*18+128(%rsp), $aap # size optimization
235 lea 448(%rsp), $tp1 # 64+128+256=448
237 # the squaring is performed as described in Variant B of
238 # "Speeding up Big-Number Squaring", so start by calculating
240 vpaddq $ACC1, $ACC1, $ACC1
241 vpbroadcastq 32*0-128($ap), $B1
242 vpaddq $ACC2, $ACC2, $ACC2
243 vmovdqa $ACC1, 32*0-128($aap)
244 vpaddq $ACC3, $ACC3, $ACC3
245 vmovdqa $ACC2, 32*1-128($aap)
246 vpaddq $ACC4, $ACC4, $ACC4
247 vmovdqa $ACC3, 32*2-128($aap)
248 vpaddq $ACC5, $ACC5, $ACC5
249 vmovdqa $ACC4, 32*3-128($aap)
250 vpaddq $ACC6, $ACC6, $ACC6
251 vmovdqa $ACC5, 32*4-128($aap)
252 vpaddq $ACC7, $ACC7, $ACC7
253 vmovdqa $ACC6, 32*5-128($aap)
254 vpaddq $ACC8, $ACC8, $ACC8
255 vmovdqa $ACC7, 32*6-128($aap)
256 vpxor $ACC9, $ACC9, $ACC9
257 vmovdqa $ACC8, 32*7-128($aap)
259 vpmuludq 32*0-128($ap), $B1, $ACC0
260 vpbroadcastq 32*1-128($ap), $B2
261 vmovdqu $ACC9, 32*9-192($tp0) # zero upper half
262 vpmuludq $B1, $ACC1, $ACC1
263 vmovdqu $ACC9, 32*10-448($tp1)
264 vpmuludq $B1, $ACC2, $ACC2
265 vmovdqu $ACC9, 32*11-448($tp1)
266 vpmuludq $B1, $ACC3, $ACC3
267 vmovdqu $ACC9, 32*12-448($tp1)
268 vpmuludq $B1, $ACC4, $ACC4
269 vmovdqu $ACC9, 32*13-448($tp1)
270 vpmuludq $B1, $ACC5, $ACC5
271 vmovdqu $ACC9, 32*14-448($tp1)
272 vpmuludq $B1, $ACC6, $ACC6
273 vmovdqu $ACC9, 32*15-448($tp1)
274 vpmuludq $B1, $ACC7, $ACC7
275 vmovdqu $ACC9, 32*16-448($tp1)
276 vpmuludq $B1, $ACC8, $ACC8
277 vpbroadcastq 32*2-128($ap), $B1
278 vmovdqu $ACC9, 32*17-448($tp1)
289 vpbroadcastq 32*1-128($tpa), $B2
290 vpmuludq 32*0-128($ap), $B1, $ACC0
291 vpaddq 32*0-192($tp0), $ACC0, $ACC0
292 vpmuludq 32*0-128($aap), $B1, $ACC1
293 vpaddq 32*1-192($tp0), $ACC1, $ACC1
294 vpmuludq 32*1-128($aap), $B1, $ACC2
295 vpaddq 32*2-192($tp0), $ACC2, $ACC2
296 vpmuludq 32*2-128($aap), $B1, $ACC3
297 vpaddq 32*3-192($tp0), $ACC3, $ACC3
298 vpmuludq 32*3-128($aap), $B1, $ACC4
299 vpaddq 32*4-192($tp0), $ACC4, $ACC4
300 vpmuludq 32*4-128($aap), $B1, $ACC5
301 vpaddq 32*5-192($tp0), $ACC5, $ACC5
302 vpmuludq 32*5-128($aap), $B1, $ACC6
303 vpaddq 32*6-192($tp0), $ACC6, $ACC6
304 vpmuludq 32*6-128($aap), $B1, $ACC7
305 vpaddq 32*7-192($tp0), $ACC7, $ACC7
306 vpmuludq 32*7-128($aap), $B1, $ACC8
307 vpbroadcastq 32*2-128($tpa), $B1
308 vpaddq 32*8-192($tp0), $ACC8, $ACC8
310 vmovdqu $ACC0, 32*0-192($tp0)
311 vmovdqu $ACC1, 32*1-192($tp0)
313 vpmuludq 32*1-128($ap), $B2, $TEMP0
314 vpaddq $TEMP0, $ACC2, $ACC2
315 vpmuludq 32*1-128($aap), $B2, $TEMP1
316 vpaddq $TEMP1, $ACC3, $ACC3
317 vpmuludq 32*2-128($aap), $B2, $TEMP2
318 vpaddq $TEMP2, $ACC4, $ACC4
319 vpmuludq 32*3-128($aap), $B2, $TEMP0
320 vpaddq $TEMP0, $ACC5, $ACC5
321 vpmuludq 32*4-128($aap), $B2, $TEMP1
322 vpaddq $TEMP1, $ACC6, $ACC6
323 vpmuludq 32*5-128($aap), $B2, $TEMP2
324 vpaddq $TEMP2, $ACC7, $ACC7
325 vpmuludq 32*6-128($aap), $B2, $TEMP0
326 vpaddq $TEMP0, $ACC8, $ACC8
327 vpmuludq 32*7-128($aap), $B2, $ACC0
328 vpbroadcastq 32*3-128($tpa), $B2
329 vpaddq 32*9-192($tp0), $ACC0, $ACC0
331 vmovdqu $ACC2, 32*2-192($tp0)
332 vmovdqu $ACC3, 32*3-192($tp0)
334 vpmuludq 32*2-128($ap), $B1, $TEMP2
335 vpaddq $TEMP2, $ACC4, $ACC4
336 vpmuludq 32*2-128($aap), $B1, $TEMP0
337 vpaddq $TEMP0, $ACC5, $ACC5
338 vpmuludq 32*3-128($aap), $B1, $TEMP1
339 vpaddq $TEMP1, $ACC6, $ACC6
340 vpmuludq 32*4-128($aap), $B1, $TEMP2
341 vpaddq $TEMP2, $ACC7, $ACC7
342 vpmuludq 32*5-128($aap), $B1, $TEMP0
343 vpaddq $TEMP0, $ACC8, $ACC8
344 vpmuludq 32*6-128($aap), $B1, $TEMP1
345 vpaddq $TEMP1, $ACC0, $ACC0
346 vpmuludq 32*7-128($aap), $B1, $ACC1
347 vpbroadcastq 32*4-128($tpa), $B1
348 vpaddq 32*10-448($tp1), $ACC1, $ACC1
350 vmovdqu $ACC4, 32*4-192($tp0)
351 vmovdqu $ACC5, 32*5-192($tp0)
353 vpmuludq 32*3-128($ap), $B2, $TEMP0
354 vpaddq $TEMP0, $ACC6, $ACC6
355 vpmuludq 32*3-128($aap), $B2, $TEMP1
356 vpaddq $TEMP1, $ACC7, $ACC7
357 vpmuludq 32*4-128($aap), $B2, $TEMP2
358 vpaddq $TEMP2, $ACC8, $ACC8
359 vpmuludq 32*5-128($aap), $B2, $TEMP0
360 vpaddq $TEMP0, $ACC0, $ACC0
361 vpmuludq 32*6-128($aap), $B2, $TEMP1
362 vpaddq $TEMP1, $ACC1, $ACC1
363 vpmuludq 32*7-128($aap), $B2, $ACC2
364 vpbroadcastq 32*5-128($tpa), $B2
365 vpaddq 32*11-448($tp1), $ACC2, $ACC2
367 vmovdqu $ACC6, 32*6-192($tp0)
368 vmovdqu $ACC7, 32*7-192($tp0)
370 vpmuludq 32*4-128($ap), $B1, $TEMP0
371 vpaddq $TEMP0, $ACC8, $ACC8
372 vpmuludq 32*4-128($aap), $B1, $TEMP1
373 vpaddq $TEMP1, $ACC0, $ACC0
374 vpmuludq 32*5-128($aap), $B1, $TEMP2
375 vpaddq $TEMP2, $ACC1, $ACC1
376 vpmuludq 32*6-128($aap), $B1, $TEMP0
377 vpaddq $TEMP0, $ACC2, $ACC2
378 vpmuludq 32*7-128($aap), $B1, $ACC3
379 vpbroadcastq 32*6-128($tpa), $B1
380 vpaddq 32*12-448($tp1), $ACC3, $ACC3
382 vmovdqu $ACC8, 32*8-192($tp0)
383 vmovdqu $ACC0, 32*9-192($tp0)
386 vpmuludq 32*5-128($ap), $B2, $TEMP2
387 vpaddq $TEMP2, $ACC1, $ACC1
388 vpmuludq 32*5-128($aap), $B2, $TEMP0
389 vpaddq $TEMP0, $ACC2, $ACC2
390 vpmuludq 32*6-128($aap), $B2, $TEMP1
391 vpaddq $TEMP1, $ACC3, $ACC3
392 vpmuludq 32*7-128($aap), $B2, $ACC4
393 vpbroadcastq 32*7-128($tpa), $B2
394 vpaddq 32*13-448($tp1), $ACC4, $ACC4
396 vmovdqu $ACC1, 32*10-448($tp1)
397 vmovdqu $ACC2, 32*11-448($tp1)
399 vpmuludq 32*6-128($ap), $B1, $TEMP0
400 vpaddq $TEMP0, $ACC3, $ACC3
401 vpmuludq 32*6-128($aap), $B1, $TEMP1
402 vpbroadcastq 32*8-128($tpa), $ACC0 # borrow $ACC0 for $B1
403 vpaddq $TEMP1, $ACC4, $ACC4
404 vpmuludq 32*7-128($aap), $B1, $ACC5
405 vpbroadcastq 32*0+8-128($tpa), $B1 # for next iteration
406 vpaddq 32*14-448($tp1), $ACC5, $ACC5
408 vmovdqu $ACC3, 32*12-448($tp1)
409 vmovdqu $ACC4, 32*13-448($tp1)
412 vpmuludq 32*7-128($ap), $B2, $TEMP0
413 vpaddq $TEMP0, $ACC5, $ACC5
414 vpmuludq 32*7-128($aap), $B2, $ACC6
415 vpaddq 32*15-448($tp1), $ACC6, $ACC6
417 vpmuludq 32*8-128($ap), $ACC0, $ACC7
418 vmovdqu $ACC5, 32*14-448($tp1)
419 vpaddq 32*16-448($tp1), $ACC7, $ACC7
420 vmovdqu $ACC6, 32*15-448($tp1)
421 vmovdqu $ACC7, 32*16-448($tp1)
433 #we need to fix indexes 32-39 to avoid overflow
434 vmovdqu 32*8(%rsp), $ACC8 # 32*8-192($tp0),
435 vmovdqu 32*9(%rsp), $ACC1 # 32*9-192($tp0)
436 vmovdqu 32*10(%rsp), $ACC2 # 32*10-192($tp0)
437 lea 192(%rsp), $tp0 # 64+128=192
439 vpsrlq \$29, $ACC8, $TEMP1
440 vpand $AND_MASK, $ACC8, $ACC8
441 vpsrlq \$29, $ACC1, $TEMP2
442 vpand $AND_MASK, $ACC1, $ACC1
444 vpermq \$0x93, $TEMP1, $TEMP1
445 vpxor $ZERO, $ZERO, $ZERO
446 vpermq \$0x93, $TEMP2, $TEMP2
448 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
449 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
450 vpaddq $TEMP0, $ACC8, $ACC8
451 vpblendd \$3, $TEMP2, $ZERO, $TEMP2
452 vpaddq $TEMP1, $ACC1, $ACC1
453 vpaddq $TEMP2, $ACC2, $ACC2
454 vmovdqu $ACC1, 32*9-192($tp0)
455 vmovdqu $ACC2, 32*10-192($tp0)
461 vmovdqu 32*1(%rsp), $ACC1
462 vmovdqu 32*2-192($tp0), $ACC2
463 vmovdqu 32*3-192($tp0), $ACC3
464 vmovdqu 32*4-192($tp0), $ACC4
465 vmovdqu 32*5-192($tp0), $ACC5
466 vmovdqu 32*6-192($tp0), $ACC6
467 vmovdqu 32*7-192($tp0), $ACC7
471 and \$0x1fffffff, %eax
475 imulq -128($np), %rax
476 vpbroadcastq $Y1, $Y1
479 imulq 8-128($np), %rax
483 imulq 16-128($np), %rax
486 imulq 24-128($np), %rdx
491 and \$0x1fffffff, %eax
494 jmp .LOOP_REDUCE_1024
499 vpbroadcastq $Y2, $Y2
501 vpmuludq 32*1-128($np), $Y1, $TEMP0
503 imulq -128($np), %rax
504 vpaddq $TEMP0, $ACC1, $ACC1
506 vpmuludq 32*2-128($np), $Y1, $TEMP1
508 imulq 8-128($np), %rax
509 vpaddq $TEMP1, $ACC2, $ACC2
510 vpmuludq 32*3-128($np), $Y1, $TEMP2
515 imulq 16-128($np), %rax
517 vpaddq $TEMP2, $ACC3, $ACC3
518 vpmuludq 32*4-128($np), $Y1, $TEMP0
521 vpaddq $TEMP0, $ACC4, $ACC4
522 vpmuludq 32*5-128($np), $Y1, $TEMP1
525 vpaddq $TEMP1, $ACC5, $ACC5
526 vpmuludq 32*6-128($np), $Y1, $TEMP2
527 and \$0x1fffffff, %eax
528 vpaddq $TEMP2, $ACC6, $ACC6
529 vpmuludq 32*7-128($np), $Y1, $TEMP0
530 vpaddq $TEMP0, $ACC7, $ACC7
531 vpmuludq 32*8-128($np), $Y1, $TEMP1
533 #vmovdqu 32*1-8-128($np), $TEMP2 # moved below
534 vpaddq $TEMP1, $ACC8, $ACC8
535 #vmovdqu 32*2-8-128($np), $TEMP0 # moved below
536 vpbroadcastq $Y1, $Y1
538 vpmuludq 32*1-8-128($np), $Y2, $TEMP2 # see above
539 vmovdqu 32*3-8-128($np), $TEMP1
541 imulq -128($np), %rax
542 vpaddq $TEMP2, $ACC1, $ACC1
543 vpmuludq 32*2-8-128($np), $Y2, $TEMP0 # see above
544 vmovdqu 32*4-8-128($np), $TEMP2
547 imulq 8-128($np), %rax
548 vpaddq $TEMP0, $ACC2, $ACC2
551 vpmuludq $Y2, $TEMP1, $TEMP1
552 vmovdqu 32*5-8-128($np), $TEMP0
554 vpaddq $TEMP1, $ACC3, $ACC3
555 vpmuludq $Y2, $TEMP2, $TEMP2
556 vmovdqu 32*6-8-128($np), $TEMP1
560 vpaddq $TEMP2, $ACC4, $ACC4
561 vpmuludq $Y2, $TEMP0, $TEMP0
562 .byte 0xc4,0x41,0x7e,0x6f,0x9d,0x58,0x00,0x00,0x00 # vmovdqu 32*7-8-128($np), $TEMP2
563 and \$0x1fffffff, %eax
564 vpaddq $TEMP0, $ACC5, $ACC5
565 vpmuludq $Y2, $TEMP1, $TEMP1
566 vmovdqu 32*8-8-128($np), $TEMP0
567 vpaddq $TEMP1, $ACC6, $ACC6
568 vpmuludq $Y2, $TEMP2, $TEMP2
569 vmovdqu 32*9-8-128($np), $ACC9
570 vmovd %eax, $ACC0 # borrow ACC0 for Y2
571 imulq -128($np), %rax
572 vpaddq $TEMP2, $ACC7, $ACC7
573 vpmuludq $Y2, $TEMP0, $TEMP0
574 vmovdqu 32*1-16-128($np), $TEMP1
575 vpbroadcastq $ACC0, $ACC0
576 vpaddq $TEMP0, $ACC8, $ACC8
577 vpmuludq $Y2, $ACC9, $ACC9
578 vmovdqu 32*2-16-128($np), $TEMP2
582 ($ACC0,$Y2)=($Y2,$ACC0);
584 vmovdqu 32*1-24-128($np), $ACC0
585 vpmuludq $Y1, $TEMP1, $TEMP1
586 vmovdqu 32*3-16-128($np), $TEMP0
587 vpaddq $TEMP1, $ACC1, $ACC1
588 vpmuludq $Y2, $ACC0, $ACC0
589 vpmuludq $Y1, $TEMP2, $TEMP2
590 .byte 0xc4,0x41,0x7e,0x6f,0xb5,0xf0,0xff,0xff,0xff # vmovdqu 32*4-16-128($np), $TEMP1
591 vpaddq $ACC1, $ACC0, $ACC0
592 vpaddq $TEMP2, $ACC2, $ACC2
593 vpmuludq $Y1, $TEMP0, $TEMP0
594 vmovdqu 32*5-16-128($np), $TEMP2
597 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
598 vpaddq $TEMP0, $ACC3, $ACC3
599 vpmuludq $Y1, $TEMP1, $TEMP1
600 vmovdqu 32*6-16-128($np), $TEMP0
601 vpaddq $TEMP1, $ACC4, $ACC4
602 vpmuludq $Y1, $TEMP2, $TEMP2
603 vmovdqu 32*7-16-128($np), $TEMP1
604 vpaddq $TEMP2, $ACC5, $ACC5
605 vpmuludq $Y1, $TEMP0, $TEMP0
606 vmovdqu 32*8-16-128($np), $TEMP2
607 vpaddq $TEMP0, $ACC6, $ACC6
608 vpmuludq $Y1, $TEMP1, $TEMP1
610 vmovdqu 32*9-16-128($np), $TEMP0
612 vpaddq $TEMP1, $ACC7, $ACC7
613 vpmuludq $Y1, $TEMP2, $TEMP2
614 #vmovdqu 32*2-24-128($np), $TEMP1 # moved below
617 vpaddq $TEMP2, $ACC8, $ACC8
618 vpmuludq $Y1, $TEMP0, $TEMP0
619 and \$0x1fffffff, %eax
621 vmovdqu 32*3-24-128($np), $TEMP2
623 vpaddq $TEMP0, $ACC9, $ACC9
624 vpbroadcastq $Y1, $Y1
626 vpmuludq 32*2-24-128($np), $Y2, $TEMP1 # see above
627 vmovdqu 32*4-24-128($np), $TEMP0
629 imulq -128($np), %rax
631 vpaddq $TEMP1, $ACC2, $ACC1
632 vpmuludq $Y2, $TEMP2, $TEMP2
633 vmovdqu 32*5-24-128($np), $TEMP1
636 imulq 8-128($np), %rax
640 vpaddq $TEMP2, $ACC3, $ACC2
641 vpmuludq $Y2, $TEMP0, $TEMP0
642 vmovdqu 32*6-24-128($np), $TEMP2
645 imulq 16-128($np), %rax
646 vpaddq $TEMP0, $ACC4, $ACC3
647 vpmuludq $Y2, $TEMP1, $TEMP1
648 vmovdqu 32*7-24-128($np), $TEMP0
649 imulq 24-128($np), %rdx # future $r3
652 vpaddq $TEMP1, $ACC5, $ACC4
653 vpmuludq $Y2, $TEMP2, $TEMP2
654 vmovdqu 32*8-24-128($np), $TEMP1
657 vpmuludq $Y2, $TEMP0, $TEMP0
658 vpaddq $TEMP2, $ACC6, $ACC5
659 vmovdqu 32*9-24-128($np), $TEMP2
660 and \$0x1fffffff, %eax
661 vpaddq $TEMP0, $ACC7, $ACC6
662 vpmuludq $Y2, $TEMP1, $TEMP1
664 vpaddq $TEMP1, $ACC8, $ACC7
665 vpmuludq $Y2, $TEMP2, $TEMP2
666 vpaddq $TEMP2, $ACC9, $ACC8
671 jnz .LOOP_REDUCE_1024
673 ($ACC0,$Y2)=($Y2,$ACC0);
675 lea 448(%rsp), $tp1 # size optimization
676 vpaddq $ACC9, $Y2, $ACC0
677 vpxor $ZERO, $ZERO, $ZERO
679 vpaddq 32*9-192($tp0), $ACC0, $ACC0
680 vpaddq 32*10-448($tp1), $ACC1, $ACC1
681 vpaddq 32*11-448($tp1), $ACC2, $ACC2
682 vpaddq 32*12-448($tp1), $ACC3, $ACC3
683 vpaddq 32*13-448($tp1), $ACC4, $ACC4
684 vpaddq 32*14-448($tp1), $ACC5, $ACC5
685 vpaddq 32*15-448($tp1), $ACC6, $ACC6
686 vpaddq 32*16-448($tp1), $ACC7, $ACC7
687 vpaddq 32*17-448($tp1), $ACC8, $ACC8
689 vpsrlq \$29, $ACC0, $TEMP1
690 vpand $AND_MASK, $ACC0, $ACC0
691 vpsrlq \$29, $ACC1, $TEMP2
692 vpand $AND_MASK, $ACC1, $ACC1
693 vpsrlq \$29, $ACC2, $TEMP3
694 vpermq \$0x93, $TEMP1, $TEMP1
695 vpand $AND_MASK, $ACC2, $ACC2
696 vpsrlq \$29, $ACC3, $TEMP4
697 vpermq \$0x93, $TEMP2, $TEMP2
698 vpand $AND_MASK, $ACC3, $ACC3
699 vpermq \$0x93, $TEMP3, $TEMP3
701 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
702 vpermq \$0x93, $TEMP4, $TEMP4
703 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
704 vpaddq $TEMP0, $ACC0, $ACC0
705 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
706 vpaddq $TEMP1, $ACC1, $ACC1
707 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
708 vpaddq $TEMP2, $ACC2, $ACC2
709 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
710 vpaddq $TEMP3, $ACC3, $ACC3
711 vpaddq $TEMP4, $ACC4, $ACC4
713 vpsrlq \$29, $ACC0, $TEMP1
714 vpand $AND_MASK, $ACC0, $ACC0
715 vpsrlq \$29, $ACC1, $TEMP2
716 vpand $AND_MASK, $ACC1, $ACC1
717 vpsrlq \$29, $ACC2, $TEMP3
718 vpermq \$0x93, $TEMP1, $TEMP1
719 vpand $AND_MASK, $ACC2, $ACC2
720 vpsrlq \$29, $ACC3, $TEMP4
721 vpermq \$0x93, $TEMP2, $TEMP2
722 vpand $AND_MASK, $ACC3, $ACC3
723 vpermq \$0x93, $TEMP3, $TEMP3
725 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
726 vpermq \$0x93, $TEMP4, $TEMP4
727 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
728 vpaddq $TEMP0, $ACC0, $ACC0
729 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
730 vpaddq $TEMP1, $ACC1, $ACC1
731 vmovdqu $ACC0, 32*0-128($rp)
732 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
733 vpaddq $TEMP2, $ACC2, $ACC2
734 vmovdqu $ACC1, 32*1-128($rp)
735 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
736 vpaddq $TEMP3, $ACC3, $ACC3
737 vmovdqu $ACC2, 32*2-128($rp)
738 vpaddq $TEMP4, $ACC4, $ACC4
739 vmovdqu $ACC3, 32*3-128($rp)
743 vpsrlq \$29, $ACC4, $TEMP1
744 vpand $AND_MASK, $ACC4, $ACC4
745 vpsrlq \$29, $ACC5, $TEMP2
746 vpand $AND_MASK, $ACC5, $ACC5
747 vpsrlq \$29, $ACC6, $TEMP3
748 vpermq \$0x93, $TEMP1, $TEMP1
749 vpand $AND_MASK, $ACC6, $ACC6
750 vpsrlq \$29, $ACC7, $TEMP4
751 vpermq \$0x93, $TEMP2, $TEMP2
752 vpand $AND_MASK, $ACC7, $ACC7
753 vpsrlq \$29, $ACC8, $TEMP5
754 vpermq \$0x93, $TEMP3, $TEMP3
755 vpand $AND_MASK, $ACC8, $ACC8
756 vpermq \$0x93, $TEMP4, $TEMP4
758 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
759 vpermq \$0x93, $TEMP5, $TEMP5
760 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
761 vpaddq $TEMP0, $ACC4, $ACC4
762 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
763 vpaddq $TEMP1, $ACC5, $ACC5
764 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
765 vpaddq $TEMP2, $ACC6, $ACC6
766 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
767 vpaddq $TEMP3, $ACC7, $ACC7
768 vpaddq $TEMP4, $ACC8, $ACC8
770 vpsrlq \$29, $ACC4, $TEMP1
771 vpand $AND_MASK, $ACC4, $ACC4
772 vpsrlq \$29, $ACC5, $TEMP2
773 vpand $AND_MASK, $ACC5, $ACC5
774 vpsrlq \$29, $ACC6, $TEMP3
775 vpermq \$0x93, $TEMP1, $TEMP1
776 vpand $AND_MASK, $ACC6, $ACC6
777 vpsrlq \$29, $ACC7, $TEMP4
778 vpermq \$0x93, $TEMP2, $TEMP2
779 vpand $AND_MASK, $ACC7, $ACC7
780 vpsrlq \$29, $ACC8, $TEMP5
781 vpermq \$0x93, $TEMP3, $TEMP3
782 vpand $AND_MASK, $ACC8, $ACC8
783 vpermq \$0x93, $TEMP4, $TEMP4
785 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
786 vpermq \$0x93, $TEMP5, $TEMP5
787 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
788 vpaddq $TEMP0, $ACC4, $ACC4
789 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
790 vpaddq $TEMP1, $ACC5, $ACC5
791 vmovdqu $ACC4, 32*4-128($rp)
792 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
793 vpaddq $TEMP2, $ACC6, $ACC6
794 vmovdqu $ACC5, 32*5-128($rp)
795 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
796 vpaddq $TEMP3, $ACC7, $ACC7
797 vmovdqu $ACC6, 32*6-128($rp)
798 vpaddq $TEMP4, $ACC8, $ACC8
799 vmovdqu $ACC7, 32*7-128($rp)
800 vmovdqu $ACC8, 32*8-128($rp)
804 jne .LOOP_GRANDE_SQR_1024
809 $code.=<<___ if ($win64);
810 movaps -0xd8(%rax),%xmm6
811 movaps -0xc8(%rax),%xmm7
812 movaps -0xb8(%rax),%xmm8
813 movaps -0xa8(%rax),%xmm9
814 movaps -0x98(%rax),%xmm10
815 movaps -0x88(%rax),%xmm11
816 movaps -0x78(%rax),%xmm12
817 movaps -0x68(%rax),%xmm13
818 movaps -0x58(%rax),%xmm14
819 movaps -0x48(%rax),%xmm15
828 lea (%rax),%rsp # restore %rsp
831 .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
836 my $rp="%rdi"; # BN_ULONG *rp,
837 my $ap="%rsi"; # const BN_ULONG *ap,
838 my $bp="%rdx"; # const BN_ULONG *bp,
839 my $np="%rcx"; # const BN_ULONG *np,
840 my $n0="%r8d"; # unsigned int n0);
842 # The registers that hold the accumulated redundant result
843 # The AMM works on 1024 bit operands, and redundant word size is 29
844 # Therefore: ceil(1024/29)/4 = 9
856 # Registers that hold the broadcasted words of multiplier, currently used
865 my $AND_MASK="%ymm15";
867 # alu registers that hold the first words of the ACC
876 $bp="%r13"; # reassigned argument
879 .globl rsaz_1024_mul_avx2
880 .type rsaz_1024_mul_avx2,\@function,5
891 $code.=<<___ if ($win64);
894 vmovaps %xmm6,-0xd8(%rax)
895 vmovaps %xmm7,-0xc8(%rax)
896 vmovaps %xmm8,-0xb8(%rax)
897 vmovaps %xmm9,-0xa8(%rax)
898 vmovaps %xmm10,-0x98(%rax)
899 vmovaps %xmm11,-0x88(%rax)
900 vmovaps %xmm12,-0x78(%rax)
901 vmovaps %xmm13,-0x68(%rax)
902 vmovaps %xmm14,-0x58(%rax)
903 vmovaps %xmm15,-0x48(%rax)
909 mov %rdx, $bp # reassigned argument
912 # unaligned 256-bit load that crosses page boundary can
913 # cause severe performance degradation here, so if $ap does
914 # cross page boundary, swap it with $bp [meaning that caller
915 # is advised to lay down $ap and $bp next to each other, so
916 # that only one can cross page boundary].
927 sub \$-128,$ap # size optimization
931 and \$4095, $tmp # see if $np crosses page
935 jz .Lmul_1024_no_n_copy
937 # unaligned 256-bit load that crosses page boundary can
938 # cause severe performance degradation here, so if $np does
939 # cross page boundary, copy it to stack and make sure stack
942 vmovdqu 32*0-128($np), $ACC0
944 vmovdqu 32*1-128($np), $ACC1
945 vmovdqu 32*2-128($np), $ACC2
946 vmovdqu 32*3-128($np), $ACC3
947 vmovdqu 32*4-128($np), $ACC4
948 vmovdqu 32*5-128($np), $ACC5
949 vmovdqu 32*6-128($np), $ACC6
950 vmovdqu 32*7-128($np), $ACC7
951 vmovdqu 32*8-128($np), $ACC8
953 vmovdqu $ACC0, 32*0-128($np)
954 vpxor $ACC0, $ACC0, $ACC0
955 vmovdqu $ACC1, 32*1-128($np)
956 vpxor $ACC1, $ACC1, $ACC1
957 vmovdqu $ACC2, 32*2-128($np)
958 vpxor $ACC2, $ACC2, $ACC2
959 vmovdqu $ACC3, 32*3-128($np)
960 vpxor $ACC3, $ACC3, $ACC3
961 vmovdqu $ACC4, 32*4-128($np)
962 vpxor $ACC4, $ACC4, $ACC4
963 vmovdqu $ACC5, 32*5-128($np)
964 vpxor $ACC5, $ACC5, $ACC5
965 vmovdqu $ACC6, 32*6-128($np)
966 vpxor $ACC6, $ACC6, $ACC6
967 vmovdqu $ACC7, 32*7-128($np)
968 vpxor $ACC7, $ACC7, $ACC7
969 vmovdqu $ACC8, 32*8-128($np)
971 vmovdqu $ACC9, 32*9-128($np) # $ACC9 is zero after vzeroall
972 .Lmul_1024_no_n_copy:
976 vpbroadcastq ($bp), $Bi
977 vmovdqu $ACC0, (%rsp) # clear top of stack
984 vmovdqu .Land_mask(%rip), $AND_MASK
990 vpsrlq \$29, $ACC3, $ACC9 # correct $ACC3(*)
992 imulq -128($ap), %rax
995 imulq 8-128($ap), $r1
1000 and \$0x1fffffff, %eax
1003 imulq 16-128($ap), $r2
1007 imulq 24-128($ap), $r3
1009 vpmuludq 32*1-128($ap),$Bi,$TEMP0
1011 vpaddq $TEMP0,$ACC1,$ACC1
1012 vpmuludq 32*2-128($ap),$Bi,$TEMP1
1013 vpbroadcastq $Yi, $Yi
1014 vpaddq $TEMP1,$ACC2,$ACC2
1015 vpmuludq 32*3-128($ap),$Bi,$TEMP2
1016 vpand $AND_MASK, $ACC3, $ACC3 # correct $ACC3
1017 vpaddq $TEMP2,$ACC3,$ACC3
1018 vpmuludq 32*4-128($ap),$Bi,$TEMP0
1019 vpaddq $TEMP0,$ACC4,$ACC4
1020 vpmuludq 32*5-128($ap),$Bi,$TEMP1
1021 vpaddq $TEMP1,$ACC5,$ACC5
1022 vpmuludq 32*6-128($ap),$Bi,$TEMP2
1023 vpaddq $TEMP2,$ACC6,$ACC6
1024 vpmuludq 32*7-128($ap),$Bi,$TEMP0
1025 vpermq \$0x93, $ACC9, $ACC9 # correct $ACC3
1026 vpaddq $TEMP0,$ACC7,$ACC7
1027 vpmuludq 32*8-128($ap),$Bi,$TEMP1
1028 vpbroadcastq 8($bp), $Bi
1029 vpaddq $TEMP1,$ACC8,$ACC8
1032 imulq -128($np),%rax
1035 imulq 8-128($np),%rax
1038 imulq 16-128($np),%rax
1041 imulq 24-128($np),%rdx
1045 vpmuludq 32*1-128($np),$Yi,$TEMP2
1047 vpaddq $TEMP2,$ACC1,$ACC1
1048 vpmuludq 32*2-128($np),$Yi,$TEMP0
1049 vpaddq $TEMP0,$ACC2,$ACC2
1050 vpmuludq 32*3-128($np),$Yi,$TEMP1
1051 vpaddq $TEMP1,$ACC3,$ACC3
1052 vpmuludq 32*4-128($np),$Yi,$TEMP2
1053 vpaddq $TEMP2,$ACC4,$ACC4
1054 vpmuludq 32*5-128($np),$Yi,$TEMP0
1055 vpaddq $TEMP0,$ACC5,$ACC5
1056 vpmuludq 32*6-128($np),$Yi,$TEMP1
1057 vpaddq $TEMP1,$ACC6,$ACC6
1058 vpmuludq 32*7-128($np),$Yi,$TEMP2
1059 vpblendd \$3, $ZERO, $ACC9, $ACC9 # correct $ACC3
1060 vpaddq $TEMP2,$ACC7,$ACC7
1061 vpmuludq 32*8-128($np),$Yi,$TEMP0
1062 vpaddq $ACC9, $ACC3, $ACC3 # correct $ACC3
1063 vpaddq $TEMP0,$ACC8,$ACC8
1066 imulq -128($ap),%rax
1068 vmovdqu -8+32*1-128($ap),$TEMP1
1070 imulq 8-128($ap),%rax
1072 vmovdqu -8+32*2-128($ap),$TEMP2
1076 and \$0x1fffffff, %eax
1078 imulq 16-128($ap),%rbx
1080 vpmuludq $Bi,$TEMP1,$TEMP1
1082 vmovdqu -8+32*3-128($ap),$TEMP0
1083 vpaddq $TEMP1,$ACC1,$ACC1
1084 vpmuludq $Bi,$TEMP2,$TEMP2
1085 vpbroadcastq $Yi, $Yi
1086 vmovdqu -8+32*4-128($ap),$TEMP1
1087 vpaddq $TEMP2,$ACC2,$ACC2
1088 vpmuludq $Bi,$TEMP0,$TEMP0
1089 vmovdqu -8+32*5-128($ap),$TEMP2
1090 vpaddq $TEMP0,$ACC3,$ACC3
1091 vpmuludq $Bi,$TEMP1,$TEMP1
1092 vmovdqu -8+32*6-128($ap),$TEMP0
1093 vpaddq $TEMP1,$ACC4,$ACC4
1094 vpmuludq $Bi,$TEMP2,$TEMP2
1095 vmovdqu -8+32*7-128($ap),$TEMP1
1096 vpaddq $TEMP2,$ACC5,$ACC5
1097 vpmuludq $Bi,$TEMP0,$TEMP0
1098 vmovdqu -8+32*8-128($ap),$TEMP2
1099 vpaddq $TEMP0,$ACC6,$ACC6
1100 vpmuludq $Bi,$TEMP1,$TEMP1
1101 vmovdqu -8+32*9-128($ap),$ACC9
1102 vpaddq $TEMP1,$ACC7,$ACC7
1103 vpmuludq $Bi,$TEMP2,$TEMP2
1104 vpaddq $TEMP2,$ACC8,$ACC8
1105 vpmuludq $Bi,$ACC9,$ACC9
1106 vpbroadcastq 16($bp), $Bi
1109 imulq -128($np),%rax
1111 vmovdqu -8+32*1-128($np),$TEMP0
1113 imulq 8-128($np),%rax
1115 vmovdqu -8+32*2-128($np),$TEMP1
1117 imulq 16-128($np),%rdx
1121 vpmuludq $Yi,$TEMP0,$TEMP0
1123 vmovdqu -8+32*3-128($np),$TEMP2
1124 vpaddq $TEMP0,$ACC1,$ACC1
1125 vpmuludq $Yi,$TEMP1,$TEMP1
1126 vmovdqu -8+32*4-128($np),$TEMP0
1127 vpaddq $TEMP1,$ACC2,$ACC2
1128 vpmuludq $Yi,$TEMP2,$TEMP2
1129 vmovdqu -8+32*5-128($np),$TEMP1
1130 vpaddq $TEMP2,$ACC3,$ACC3
1131 vpmuludq $Yi,$TEMP0,$TEMP0
1132 vmovdqu -8+32*6-128($np),$TEMP2
1133 vpaddq $TEMP0,$ACC4,$ACC4
1134 vpmuludq $Yi,$TEMP1,$TEMP1
1135 vmovdqu -8+32*7-128($np),$TEMP0
1136 vpaddq $TEMP1,$ACC5,$ACC5
1137 vpmuludq $Yi,$TEMP2,$TEMP2
1138 vmovdqu -8+32*8-128($np),$TEMP1
1139 vpaddq $TEMP2,$ACC6,$ACC6
1140 vpmuludq $Yi,$TEMP0,$TEMP0
1141 vmovdqu -8+32*9-128($np),$TEMP2
1142 vpaddq $TEMP0,$ACC7,$ACC7
1143 vpmuludq $Yi,$TEMP1,$TEMP1
1144 vpaddq $TEMP1,$ACC8,$ACC8
1145 vpmuludq $Yi,$TEMP2,$TEMP2
1146 vpaddq $TEMP2,$ACC9,$ACC9
1148 vmovdqu -16+32*1-128($ap),$TEMP0
1150 imulq -128($ap),%rax
1153 vmovdqu -16+32*2-128($ap),$TEMP1
1156 and \$0x1fffffff, %eax
1158 imulq 8-128($ap),%rbx
1160 vpmuludq $Bi,$TEMP0,$TEMP0
1162 vmovdqu -16+32*3-128($ap),$TEMP2
1163 vpaddq $TEMP0,$ACC1,$ACC1
1164 vpmuludq $Bi,$TEMP1,$TEMP1
1165 vpbroadcastq $Yi, $Yi
1166 vmovdqu -16+32*4-128($ap),$TEMP0
1167 vpaddq $TEMP1,$ACC2,$ACC2
1168 vpmuludq $Bi,$TEMP2,$TEMP2
1169 vmovdqu -16+32*5-128($ap),$TEMP1
1170 vpaddq $TEMP2,$ACC3,$ACC3
1171 vpmuludq $Bi,$TEMP0,$TEMP0
1172 vmovdqu -16+32*6-128($ap),$TEMP2
1173 vpaddq $TEMP0,$ACC4,$ACC4
1174 vpmuludq $Bi,$TEMP1,$TEMP1
1175 vmovdqu -16+32*7-128($ap),$TEMP0
1176 vpaddq $TEMP1,$ACC5,$ACC5
1177 vpmuludq $Bi,$TEMP2,$TEMP2
1178 vmovdqu -16+32*8-128($ap),$TEMP1
1179 vpaddq $TEMP2,$ACC6,$ACC6
1180 vpmuludq $Bi,$TEMP0,$TEMP0
1181 vmovdqu -16+32*9-128($ap),$TEMP2
1182 vpaddq $TEMP0,$ACC7,$ACC7
1183 vpmuludq $Bi,$TEMP1,$TEMP1
1184 vpaddq $TEMP1,$ACC8,$ACC8
1185 vpmuludq $Bi,$TEMP2,$TEMP2
1186 vpbroadcastq 24($bp), $Bi
1187 vpaddq $TEMP2,$ACC9,$ACC9
1189 vmovdqu -16+32*1-128($np),$TEMP0
1191 imulq -128($np),%rax
1193 vmovdqu -16+32*2-128($np),$TEMP1
1194 imulq 8-128($np),%rdx
1198 vpmuludq $Yi,$TEMP0,$TEMP0
1200 vmovdqu -16+32*3-128($np),$TEMP2
1201 vpaddq $TEMP0,$ACC1,$ACC1
1202 vpmuludq $Yi,$TEMP1,$TEMP1
1203 vmovdqu -16+32*4-128($np),$TEMP0
1204 vpaddq $TEMP1,$ACC2,$ACC2
1205 vpmuludq $Yi,$TEMP2,$TEMP2
1206 vmovdqu -16+32*5-128($np),$TEMP1
1207 vpaddq $TEMP2,$ACC3,$ACC3
1208 vpmuludq $Yi,$TEMP0,$TEMP0
1209 vmovdqu -16+32*6-128($np),$TEMP2
1210 vpaddq $TEMP0,$ACC4,$ACC4
1211 vpmuludq $Yi,$TEMP1,$TEMP1
1212 vmovdqu -16+32*7-128($np),$TEMP0
1213 vpaddq $TEMP1,$ACC5,$ACC5
1214 vpmuludq $Yi,$TEMP2,$TEMP2
1215 vmovdqu -16+32*8-128($np),$TEMP1
1216 vpaddq $TEMP2,$ACC6,$ACC6
1217 vpmuludq $Yi,$TEMP0,$TEMP0
1218 vmovdqu -16+32*9-128($np),$TEMP2
1219 vpaddq $TEMP0,$ACC7,$ACC7
1220 vpmuludq $Yi,$TEMP1,$TEMP1
1221 vmovdqu -24+32*1-128($ap),$TEMP0
1222 vpaddq $TEMP1,$ACC8,$ACC8
1223 vpmuludq $Yi,$TEMP2,$TEMP2
1224 vmovdqu -24+32*2-128($ap),$TEMP1
1225 vpaddq $TEMP2,$ACC9,$ACC9
1228 imulq -128($ap),%rbx
1233 and \$0x1fffffff, %eax
1235 vpmuludq $Bi,$TEMP0,$TEMP0
1237 vmovdqu -24+32*3-128($ap),$TEMP2
1238 vpaddq $TEMP0,$ACC1,$ACC1
1239 vpmuludq $Bi,$TEMP1,$TEMP1
1240 vpbroadcastq $Yi, $Yi
1241 vmovdqu -24+32*4-128($ap),$TEMP0
1242 vpaddq $TEMP1,$ACC2,$ACC2
1243 vpmuludq $Bi,$TEMP2,$TEMP2
1244 vmovdqu -24+32*5-128($ap),$TEMP1
1245 vpaddq $TEMP2,$ACC3,$ACC3
1246 vpmuludq $Bi,$TEMP0,$TEMP0
1247 vmovdqu -24+32*6-128($ap),$TEMP2
1248 vpaddq $TEMP0,$ACC4,$ACC4
1249 vpmuludq $Bi,$TEMP1,$TEMP1
1250 vmovdqu -24+32*7-128($ap),$TEMP0
1251 vpaddq $TEMP1,$ACC5,$ACC5
1252 vpmuludq $Bi,$TEMP2,$TEMP2
1253 vmovdqu -24+32*8-128($ap),$TEMP1
1254 vpaddq $TEMP2,$ACC6,$ACC6
1255 vpmuludq $Bi,$TEMP0,$TEMP0
1256 vmovdqu -24+32*9-128($ap),$TEMP2
1257 vpaddq $TEMP0,$ACC7,$ACC7
1258 vpmuludq $Bi,$TEMP1,$TEMP1
1259 vpaddq $TEMP1,$ACC8,$ACC8
1260 vpmuludq $Bi,$TEMP2,$TEMP2
1261 vpbroadcastq 32($bp), $Bi
1262 vpaddq $TEMP2,$ACC9,$ACC9
1263 add \$32, $bp # $bp++
1265 vmovdqu -24+32*1-128($np),$TEMP0
1266 imulq -128($np),%rax
1270 vmovdqu -24+32*2-128($np),$TEMP1
1271 vpmuludq $Yi,$TEMP0,$TEMP0
1273 vmovdqu -24+32*3-128($np),$TEMP2
1274 vpaddq $TEMP0,$ACC1,$ACC0 # $ACC0==$TEMP0
1275 vpmuludq $Yi,$TEMP1,$TEMP1
1276 vmovdqu $ACC0, (%rsp) # transfer $r0-$r3
1277 vpaddq $TEMP1,$ACC2,$ACC1
1278 vmovdqu -24+32*4-128($np),$TEMP0
1279 vpmuludq $Yi,$TEMP2,$TEMP2
1280 vmovdqu -24+32*5-128($np),$TEMP1
1281 vpaddq $TEMP2,$ACC3,$ACC2
1282 vpmuludq $Yi,$TEMP0,$TEMP0
1283 vmovdqu -24+32*6-128($np),$TEMP2
1284 vpaddq $TEMP0,$ACC4,$ACC3
1285 vpmuludq $Yi,$TEMP1,$TEMP1
1286 vmovdqu -24+32*7-128($np),$TEMP0
1287 vpaddq $TEMP1,$ACC5,$ACC4
1288 vpmuludq $Yi,$TEMP2,$TEMP2
1289 vmovdqu -24+32*8-128($np),$TEMP1
1290 vpaddq $TEMP2,$ACC6,$ACC5
1291 vpmuludq $Yi,$TEMP0,$TEMP0
1292 vmovdqu -24+32*9-128($np),$TEMP2
1294 vpaddq $TEMP0,$ACC7,$ACC6
1295 vpmuludq $Yi,$TEMP1,$TEMP1
1297 vpaddq $TEMP1,$ACC8,$ACC7
1298 vpmuludq $Yi,$TEMP2,$TEMP2
1300 vpaddq $TEMP2,$ACC9,$ACC8
1306 # (*) Original implementation was correcting ACC1-ACC3 for overflow
1307 # after 7 loop runs, or after 28 iterations, or 56 additions.
1308 # But as we underutilize resources, it's possible to correct in
1309 # each iteration with marginal performance loss. But then, as
1310 # we do it in each iteration, we can correct less digits, and
1311 # avoid performance penalties completely. Also note that we
1312 # correct only three digits out of four. This works because
1313 # most significant digit is subjected to less additions.
1319 vpermq \$0, $AND_MASK, $AND_MASK
1320 vpaddq (%rsp), $TEMP1, $ACC0
1322 vpsrlq \$29, $ACC0, $TEMP1
1323 vpand $AND_MASK, $ACC0, $ACC0
1324 vpsrlq \$29, $ACC1, $TEMP2
1325 vpand $AND_MASK, $ACC1, $ACC1
1326 vpsrlq \$29, $ACC2, $TEMP3
1327 vpermq \$0x93, $TEMP1, $TEMP1
1328 vpand $AND_MASK, $ACC2, $ACC2
1329 vpsrlq \$29, $ACC3, $TEMP4
1330 vpermq \$0x93, $TEMP2, $TEMP2
1331 vpand $AND_MASK, $ACC3, $ACC3
1333 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1334 vpermq \$0x93, $TEMP3, $TEMP3
1335 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1336 vpermq \$0x93, $TEMP4, $TEMP4
1337 vpaddq $TEMP0, $ACC0, $ACC0
1338 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1339 vpaddq $TEMP1, $ACC1, $ACC1
1340 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1341 vpaddq $TEMP2, $ACC2, $ACC2
1342 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1343 vpaddq $TEMP3, $ACC3, $ACC3
1344 vpaddq $TEMP4, $ACC4, $ACC4
1346 vpsrlq \$29, $ACC0, $TEMP1
1347 vpand $AND_MASK, $ACC0, $ACC0
1348 vpsrlq \$29, $ACC1, $TEMP2
1349 vpand $AND_MASK, $ACC1, $ACC1
1350 vpsrlq \$29, $ACC2, $TEMP3
1351 vpermq \$0x93, $TEMP1, $TEMP1
1352 vpand $AND_MASK, $ACC2, $ACC2
1353 vpsrlq \$29, $ACC3, $TEMP4
1354 vpermq \$0x93, $TEMP2, $TEMP2
1355 vpand $AND_MASK, $ACC3, $ACC3
1356 vpermq \$0x93, $TEMP3, $TEMP3
1358 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1359 vpermq \$0x93, $TEMP4, $TEMP4
1360 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1361 vpaddq $TEMP0, $ACC0, $ACC0
1362 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1363 vpaddq $TEMP1, $ACC1, $ACC1
1364 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1365 vpaddq $TEMP2, $ACC2, $ACC2
1366 vpblendd \$3, $TEMP4, $ZERO, $TEMP4
1367 vpaddq $TEMP3, $ACC3, $ACC3
1368 vpaddq $TEMP4, $ACC4, $ACC4
1370 vmovdqu $ACC0, 0-128($rp)
1371 vmovdqu $ACC1, 32-128($rp)
1372 vmovdqu $ACC2, 64-128($rp)
1373 vmovdqu $ACC3, 96-128($rp)
1378 vpsrlq \$29, $ACC4, $TEMP1
1379 vpand $AND_MASK, $ACC4, $ACC4
1380 vpsrlq \$29, $ACC5, $TEMP2
1381 vpand $AND_MASK, $ACC5, $ACC5
1382 vpsrlq \$29, $ACC6, $TEMP3
1383 vpermq \$0x93, $TEMP1, $TEMP1
1384 vpand $AND_MASK, $ACC6, $ACC6
1385 vpsrlq \$29, $ACC7, $TEMP4
1386 vpermq \$0x93, $TEMP2, $TEMP2
1387 vpand $AND_MASK, $ACC7, $ACC7
1388 vpsrlq \$29, $ACC8, $TEMP5
1389 vpermq \$0x93, $TEMP3, $TEMP3
1390 vpand $AND_MASK, $ACC8, $ACC8
1391 vpermq \$0x93, $TEMP4, $TEMP4
1393 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1394 vpermq \$0x93, $TEMP5, $TEMP5
1395 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1396 vpaddq $TEMP0, $ACC4, $ACC4
1397 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1398 vpaddq $TEMP1, $ACC5, $ACC5
1399 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1400 vpaddq $TEMP2, $ACC6, $ACC6
1401 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1402 vpaddq $TEMP3, $ACC7, $ACC7
1403 vpaddq $TEMP4, $ACC8, $ACC8
1405 vpsrlq \$29, $ACC4, $TEMP1
1406 vpand $AND_MASK, $ACC4, $ACC4
1407 vpsrlq \$29, $ACC5, $TEMP2
1408 vpand $AND_MASK, $ACC5, $ACC5
1409 vpsrlq \$29, $ACC6, $TEMP3
1410 vpermq \$0x93, $TEMP1, $TEMP1
1411 vpand $AND_MASK, $ACC6, $ACC6
1412 vpsrlq \$29, $ACC7, $TEMP4
1413 vpermq \$0x93, $TEMP2, $TEMP2
1414 vpand $AND_MASK, $ACC7, $ACC7
1415 vpsrlq \$29, $ACC8, $TEMP5
1416 vpermq \$0x93, $TEMP3, $TEMP3
1417 vpand $AND_MASK, $ACC8, $ACC8
1418 vpermq \$0x93, $TEMP4, $TEMP4
1420 vpblendd \$3, $ZERO, $TEMP1, $TEMP0
1421 vpermq \$0x93, $TEMP5, $TEMP5
1422 vpblendd \$3, $TEMP1, $TEMP2, $TEMP1
1423 vpaddq $TEMP0, $ACC4, $ACC4
1424 vpblendd \$3, $TEMP2, $TEMP3, $TEMP2
1425 vpaddq $TEMP1, $ACC5, $ACC5
1426 vpblendd \$3, $TEMP3, $TEMP4, $TEMP3
1427 vpaddq $TEMP2, $ACC6, $ACC6
1428 vpblendd \$3, $TEMP4, $TEMP5, $TEMP4
1429 vpaddq $TEMP3, $ACC7, $ACC7
1430 vpaddq $TEMP4, $ACC8, $ACC8
1432 vmovdqu $ACC4, 128-128($rp)
1433 vmovdqu $ACC5, 160-128($rp)
1434 vmovdqu $ACC6, 192-128($rp)
1435 vmovdqu $ACC7, 224-128($rp)
1436 vmovdqu $ACC8, 256-128($rp)
1441 $code.=<<___ if ($win64);
1442 movaps -0xd8(%rax),%xmm6
1443 movaps -0xc8(%rax),%xmm7
1444 movaps -0xb8(%rax),%xmm8
1445 movaps -0xa8(%rax),%xmm9
1446 movaps -0x98(%rax),%xmm10
1447 movaps -0x88(%rax),%xmm11
1448 movaps -0x78(%rax),%xmm12
1449 movaps -0x68(%rax),%xmm13
1450 movaps -0x58(%rax),%xmm14
1451 movaps -0x48(%rax),%xmm15
1460 lea (%rax),%rsp # restore %rsp
1461 .Lmul_1024_epilogue:
1463 .size rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
1467 my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
1468 my @T = map("%r$_",(8..11));
1471 .globl rsaz_1024_red2norm_avx2
1472 .type rsaz_1024_red2norm_avx2,\@abi-omnipotent
1474 rsaz_1024_red2norm_avx2:
1475 sub \$-128,$inp # size optimization
1479 for ($j=0,$i=0; $i<16; $i++) {
1481 while (29*$j<64*($i+1)) { # load data till boundary
1482 $code.=" mov `8*$j-128`($inp), @T[0]\n";
1483 $j++; $k++; push(@T,shift(@T));
1486 while ($k>1) { # shift loaded data but last value
1487 $code.=" shl \$`29*($j-$k)`,@T[-$k]\n";
1490 $code.=<<___; # shift last value
1492 shl \$`29*($j-1)`, @T[-1]
1493 shr \$`-29*($j-1)`, @T[0]
1495 while ($l) { # accumulate all values
1496 $code.=" add @T[-$l], %rax\n";
1500 adc \$0, @T[0] # consume eventual carry
1501 mov %rax, 8*$i($out)
1508 .size rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
1510 .globl rsaz_1024_norm2red_avx2
1511 .type rsaz_1024_norm2red_avx2,\@abi-omnipotent
1513 rsaz_1024_norm2red_avx2:
1514 sub \$-128,$out # size optimization
1516 mov \$0x1fffffff,%eax
1518 for ($j=0,$i=0; $i<16; $i++) {
1519 $code.=" mov `8*($i+1)`($inp),@T[1]\n" if ($i<15);
1520 $code.=" xor @T[1],@T[1]\n" if ($i==15);
1522 while (29*($j+1)<64*($i+1)) {
1525 shr \$`29*$j`,@T[-$k]
1526 and %rax,@T[-$k] # &0x1fffffff
1527 mov @T[-$k],`8*$j-128`($out)
1532 shrd \$`29*$j`,@T[1],@T[0]
1534 mov @T[0],`8*$j-128`($out)
1540 mov @T[0],`8*$j-128`($out) # zero
1541 mov @T[0],`8*($j+1)-128`($out)
1542 mov @T[0],`8*($j+2)-128`($out)
1543 mov @T[0],`8*($j+3)-128`($out)
1545 .size rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
1549 my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
1552 .globl rsaz_1024_scatter5_avx2
1553 .type rsaz_1024_scatter5_avx2,\@abi-omnipotent
1555 rsaz_1024_scatter5_avx2:
1557 vmovdqu .Lscatter_permd(%rip),%ymm5
1559 lea ($out,$power),$out
1561 jmp .Loop_scatter_1024
1565 vmovdqu ($inp),%ymm0
1567 vpermd %ymm0,%ymm5,%ymm0
1568 vmovdqu %xmm0,($out)
1569 lea 16*32($out),$out
1571 jnz .Loop_scatter_1024
1575 .size rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
1577 .globl rsaz_1024_gather5_avx2
1578 .type rsaz_1024_gather5_avx2,\@abi-omnipotent
1580 rsaz_1024_gather5_avx2:
1582 $code.=<<___ if ($win64);
1583 lea -0x88(%rsp),%rax
1585 .LSEH_begin_rsaz_1024_gather5:
1586 # I can't trust assembler to use specific encoding:-(
1587 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
1588 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6,-0x20(%rax)
1589 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7,-0x10(%rax)
1590 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8,0(%rax)
1591 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9,0x10(%rax)
1592 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10,0x20(%rax)
1593 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11,0x30(%rax)
1594 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12,0x40(%rax)
1595 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13,0x50(%rax)
1596 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14,0x60(%rax)
1597 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15,0x70(%rax)
1600 lea .Lgather_table(%rip),%r11
1603 shr \$2,%eax # cache line number
1604 shl \$4,$power # offset within cache line
1606 vmovdqu -32(%r11),%ymm7 # .Lgather_permd
1607 vpbroadcastb 8(%r11,%rax), %xmm8
1608 vpbroadcastb 7(%r11,%rax), %xmm9
1609 vpbroadcastb 6(%r11,%rax), %xmm10
1610 vpbroadcastb 5(%r11,%rax), %xmm11
1611 vpbroadcastb 4(%r11,%rax), %xmm12
1612 vpbroadcastb 3(%r11,%rax), %xmm13
1613 vpbroadcastb 2(%r11,%rax), %xmm14
1614 vpbroadcastb 1(%r11,%rax), %xmm15
1616 lea 64($inp,$power),$inp
1617 mov \$64,%r11 # size optimization
1619 jmp .Loop_gather_1024
1623 vpand -64($inp), %xmm8,%xmm0
1624 vpand ($inp), %xmm9,%xmm1
1625 vpand 64($inp), %xmm10,%xmm2
1626 vpand ($inp,%r11,2), %xmm11,%xmm3
1627 vpor %xmm0,%xmm1,%xmm1
1628 vpand 64($inp,%r11,2), %xmm12,%xmm4
1629 vpor %xmm2,%xmm3,%xmm3
1630 vpand ($inp,%r11,4), %xmm13,%xmm5
1631 vpor %xmm1,%xmm3,%xmm3
1632 vpand 64($inp,%r11,4), %xmm14,%xmm6
1633 vpor %xmm4,%xmm5,%xmm5
1634 vpand -128($inp,%r11,8), %xmm15,%xmm2
1635 lea ($inp,%r11,8),$inp
1636 vpor %xmm3,%xmm5,%xmm5
1637 vpor %xmm2,%xmm6,%xmm6
1638 vpor %xmm5,%xmm6,%xmm6
1639 vpermd %ymm6,%ymm7,%ymm6
1640 vmovdqu %ymm6,($out)
1643 jnz .Loop_gather_1024
1645 vpxor %ymm0,%ymm0,%ymm0
1646 vmovdqu %ymm0,($out)
1649 $code.=<<___ if ($win64);
1651 movaps 0x10(%rsp),%xmm7
1652 movaps 0x20(%rsp),%xmm8
1653 movaps 0x30(%rsp),%xmm9
1654 movaps 0x40(%rsp),%xmm10
1655 movaps 0x50(%rsp),%xmm11
1656 movaps 0x60(%rsp),%xmm12
1657 movaps 0x70(%rsp),%xmm13
1658 movaps 0x80(%rsp),%xmm14
1659 movaps 0x90(%rsp),%xmm15
1661 .LSEH_end_rsaz_1024_gather5:
1665 .size rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
1670 .extern OPENSSL_ia32cap_P
1671 .globl rsaz_avx2_eligible
1672 .type rsaz_avx2_eligible,\@abi-omnipotent
1675 mov OPENSSL_ia32cap_P+8(%rip),%eax
1679 .size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1683 .quad 0x1fffffff,0x1fffffff,0x1fffffff,-1
1685 .long 0,2,4,6,7,7,7,7
1687 .long 0,7,1,7,2,7,3,7
1689 .byte 0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
1700 .extern __imp_RtlVirtualUnwind
1701 .type rsaz_se_handler,\@abi-omnipotent
1715 mov 120($context),%rax # pull context->Rax
1716 mov 248($context),%rbx # pull context->Rip
1718 mov 8($disp),%rsi # disp->ImageBase
1719 mov 56($disp),%r11 # disp->HandlerData
1721 mov 0(%r11),%r10d # HandlerData[0]
1722 lea (%rsi,%r10),%r10 # prologue label
1723 cmp %r10,%rbx # context->Rip<prologue label
1724 jb .Lcommon_seh_tail
1726 mov 152($context),%rax # pull context->Rsp
1728 mov 4(%r11),%r10d # HandlerData[1]
1729 lea (%rsi,%r10),%r10 # epilogue label
1730 cmp %r10,%rbx # context->Rip>=epilogue label
1731 jae .Lcommon_seh_tail
1733 mov 160($context),%rax # pull context->Rbp
1741 mov %r15,240($context)
1742 mov %r14,232($context)
1743 mov %r13,224($context)
1744 mov %r12,216($context)
1745 mov %rbp,160($context)
1746 mov %rbx,144($context)
1748 lea -0xd8(%rax),%rsi # %xmm save area
1749 lea 512($context),%rdi # & context.Xmm6
1750 mov \$20,%ecx # 10*sizeof(%xmm0)/sizeof(%rax)
1751 .long 0xa548f3fc # cld; rep movsq
1756 mov %rax,152($context) # restore context->Rsp
1757 mov %rsi,168($context) # restore context->Rsi
1758 mov %rdi,176($context) # restore context->Rdi
1760 mov 40($disp),%rdi # disp->ContextRecord
1761 mov $context,%rsi # context
1762 mov \$154,%ecx # sizeof(CONTEXT)
1763 .long 0xa548f3fc # cld; rep movsq
1766 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1767 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1768 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1769 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1770 mov 40(%rsi),%r10 # disp->ContextRecord
1771 lea 56(%rsi),%r11 # &disp->HandlerData
1772 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1773 mov %r10,32(%rsp) # arg5
1774 mov %r11,40(%rsp) # arg6
1775 mov %r12,48(%rsp) # arg7
1776 mov %rcx,56(%rsp) # arg8, (NULL)
1777 call *__imp_RtlVirtualUnwind(%rip)
1779 mov \$1,%eax # ExceptionContinueSearch
1791 .size rsaz_se_handler,.-rsaz_se_handler
1795 .rva .LSEH_begin_rsaz_1024_sqr_avx2
1796 .rva .LSEH_end_rsaz_1024_sqr_avx2
1797 .rva .LSEH_info_rsaz_1024_sqr_avx2
1799 .rva .LSEH_begin_rsaz_1024_mul_avx2
1800 .rva .LSEH_end_rsaz_1024_mul_avx2
1801 .rva .LSEH_info_rsaz_1024_mul_avx2
1803 .rva .LSEH_begin_rsaz_1024_gather5
1804 .rva .LSEH_end_rsaz_1024_gather5
1805 .rva .LSEH_info_rsaz_1024_gather5
1808 .LSEH_info_rsaz_1024_sqr_avx2:
1810 .rva rsaz_se_handler
1811 .rva .Lsqr_1024_body,.Lsqr_1024_epilogue
1812 .LSEH_info_rsaz_1024_mul_avx2:
1814 .rva rsaz_se_handler
1815 .rva .Lmul_1024_body,.Lmul_1024_epilogue
1816 .LSEH_info_rsaz_1024_gather5:
1817 .byte 0x01,0x33,0x16,0x00
1818 .byte 0x36,0xf8,0x09,0x00 #vmovaps 0x90(rsp),xmm15
1819 .byte 0x31,0xe8,0x08,0x00 #vmovaps 0x80(rsp),xmm14
1820 .byte 0x2c,0xd8,0x07,0x00 #vmovaps 0x70(rsp),xmm13
1821 .byte 0x27,0xc8,0x06,0x00 #vmovaps 0x60(rsp),xmm12
1822 .byte 0x22,0xb8,0x05,0x00 #vmovaps 0x50(rsp),xmm11
1823 .byte 0x1d,0xa8,0x04,0x00 #vmovaps 0x40(rsp),xmm10
1824 .byte 0x18,0x98,0x03,0x00 #vmovaps 0x30(rsp),xmm9
1825 .byte 0x13,0x88,0x02,0x00 #vmovaps 0x20(rsp),xmm8
1826 .byte 0x0e,0x78,0x01,0x00 #vmovaps 0x10(rsp),xmm7
1827 .byte 0x09,0x68,0x00,0x00 #vmovaps 0x00(rsp),xmm6
1828 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
1832 foreach (split("\n",$code)) {
1833 s/\`([^\`]*)\`/eval($1)/ge;
1835 s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge or
1837 s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1838 s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go or
1839 s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1840 s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go or
1841 s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
1846 print <<___; # assembler is too old
1849 .globl rsaz_avx2_eligible
1850 .type rsaz_avx2_eligible,\@abi-omnipotent
1854 .size rsaz_avx2_eligible,.-rsaz_avx2_eligible
1856 .globl rsaz_1024_sqr_avx2
1857 .globl rsaz_1024_mul_avx2
1858 .globl rsaz_1024_norm2red_avx2
1859 .globl rsaz_1024_red2norm_avx2
1860 .globl rsaz_1024_scatter5_avx2
1861 .globl rsaz_1024_gather5_avx2
1862 .type rsaz_1024_sqr_avx2,\@abi-omnipotent
1865 rsaz_1024_norm2red_avx2:
1866 rsaz_1024_red2norm_avx2:
1867 rsaz_1024_scatter5_avx2:
1868 rsaz_1024_gather5_avx2:
1869 .byte 0x0f,0x0b # ud2
1871 .size rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2