From: Andy Polyakov <appro@openssl.org>
Date: Fri, 5 Jul 2013 19:30:18 +0000 (+0200)
Subject: Add RSAZ assembly modules.
X-Git-Tag: master-post-reformat~1264
X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=0b4bb91db65697ab6d3a0fc05b140887cbce3080;p=oweals%2Fopenssl.git

Add RSAZ assembly modules.

RT: 2582, 2850
---

diff --git a/crypto/bn/asm/rsaz-avx2.pl b/crypto/bn/asm/rsaz-avx2.pl
new file mode 100755
index 0000000000..33f20e64a1
--- /dev/null
+++ b/crypto/bn/asm/rsaz-avx2.pl
@@ -0,0 +1,1863 @@
+#!/usr/bin/env perl
+
+#******************************************************************************
+#* Copyright(c) 2012, Intel Corp.                                             
+#* Developers and authors:                                                    
+#* Shay Gueron (1, 2), and Vlad Krasnov (1)                                   
+#* (1) Intel Corporation, Israel Development Center, Haifa, Israel
+#* (2) University of Haifa, Israel                                              
+#******************************************************************************
+#* LICENSE:                                                                
+#* This submission to OpenSSL is to be made available under the OpenSSL  
+#* license, and only to the OpenSSL project, in order to allow integration    
+#* into the publicly distributed code. 
+#* The use of this code, or portions of this code, or concepts embedded in
+#* this code, or modification of this code and/or algorithm(s) in it, or the
+#* use of this code for any other purpose than stated above, requires special
+#* licensing.                                                                  
+#******************************************************************************
+#* DISCLAIMER:                                                                
+#* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS AND THE COPYRIGHT OWNERS     
+#* ``AS IS''. ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
+#* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
+#* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS OR THE COPYRIGHT
+#* OWNERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, 
+#* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF    
+#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS   
+#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN    
+#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)    
+#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 
+#* POSSIBILITY OF SUCH DAMAGE.                                                
+#******************************************************************************
+#* Reference:                                                                 
+#* [1]	S. Gueron, V. Krasnov: "Software Implementation of Modular
+#*	Exponentiation,  Using Advanced Vector Instructions Architectures",
+#*	F. Ozbudak and F. Rodriguez-Henriquez (Eds.): WAIFI 2012, LNCS 7369,
+#*	pp. 119?135, 2012. Springer-Verlag Berlin Heidelberg 2012
+#* [2]	S. Gueron: "Efficient Software Implementations of Modular
+#*	Exponentiation", Journal of Cryptographic Engineering 2:31-43 (2012).
+#* [3]	S. Gueron, V. Krasnov: "Speeding up Big-numbers Squaring",IEEE
+#*	Proceedings of 9th International Conference on Information Technology:
+#*	New Generations (ITNG 2012), pp.821-823 (2012)
+#* [4]	S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis
+#*	resistant 1024-bit modular exponentiation, for optimizing RSA2048
+#*	on AVX2 capable x86_64 platforms",
+#*	http://rt.openssl.org/Ticket/Display.html?id=2850&user=guest&pass=guest
+#******************************************************************************
+
+# +10% improvement by <appro@openssl.org>
+#
+# rsa2048 sign/sec	OpenSSL 1.0.1	scalar(*)	this
+# 2GHz Haswell		544		632/+16%	947/+74%
+#
+# (*)	if system doesn't support AVX2, for reference purposes;
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+		=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.19) + ($1>=2.22);
+}
+
+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+	    `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+	$avx = ($1>=2.09) + ($1>=2.11);
+}
+
+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+	    `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+	$avx = ($1>=10) + ($1>=11);
+}
+
+open OUT,"| $^X $xlate $flavour $output";
+*STDOUT = *OUT;
+
+if ($avx>1) {{{
+{ # void AMS_WW(
+my $rp="%rdi";	# BN_ULONG *rp,
+my $ap="%rsi";	# const BN_ULONG *ap,
+my $np="%rdx";	# const BN_ULONG *np,
+my $n0="%ecx";	# const BN_ULONG n0,
+my $rep="%r8d";	# int repeat);
+
+# The registers that hold the accumulated redundant result
+# The AMM works on 1024 bit operands, and redundant word size is 29
+# Therefore: ceil(1024/29)/4 = 9
+my $ACC0="%ymm0";
+my $ACC1="%ymm1";
+my $ACC2="%ymm2";
+my $ACC3="%ymm3";
+my $ACC4="%ymm4";
+my $ACC5="%ymm5";
+my $ACC6="%ymm6";
+my $ACC7="%ymm7";
+my $ACC8="%ymm8";
+my $ACC9="%ymm9";
+# Registers that hold the broadcasted words of bp, currently used
+my $B1="%ymm10";
+my $B2="%ymm11";
+# Registers that hold the broadcasted words of Y, currently used
+my $Y1="%ymm12";
+my $Y2="%ymm13";
+# Helper registers
+my $TEMP1="%ymm14";
+my $AND_MASK="%ymm15";
+# alu registers that hold the first words of the ACC
+my $r0="%r9";
+my $r1="%r10";
+my $r2="%r11";
+my $r3="%r12";
+
+my $i="%r14d";			# loop counter
+my $tmp = "%r15";
+
+my $FrameSize=32*18+32*8;	# place for A^2 and 2*A
+
+my $aap=$r0;
+my $tp0="%rbx";
+my $tp1=$r3;
+
+$np="%r13";			# reassigned argument
+
+$code.=<<___;
+.globl	rsaz_1024_sqr_avx2
+.type	rsaz_1024_sqr_avx2,\@function,5
+.align	64
+rsaz_1024_sqr_avx2:		# 702 cycles, 14% faster than rsaz_1024_mul_avx2
+	lea	(%rsp), %rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+___
+$code.=<<___ if ($win64);
+	lea	-0xa8(%rsp),%rsp
+	movaps  %xmm6,-0xd8(%rax)
+	movaps  %xmm7,-0xc8(%rax)
+	movaps  %xmm8,-0xb8(%rax)
+	movaps  %xmm9,-0xa8(%rax)
+	movaps  %xmm10,-0x98(%rax)
+	movaps  %xmm11,-0x88(%rax)
+	movaps  %xmm12,-0x78(%rax)
+	movaps  %xmm13,-0x68(%rax)
+	movaps  %xmm14,-0x58(%rax)
+	movaps  %xmm15,-0x48(%rax)
+.Lsqr_1024_body:
+___
+$code.=<<___;
+	mov	%rax,%rbp
+	vzeroall
+	mov	%rdx, $np			# reassigned argument
+	sub	\$$FrameSize, %rsp
+	mov	$np, $tmp
+	sub	\$-128, $rp			# size optimization
+	sub	\$-128, $ap
+	sub	\$-128, $np
+
+	and	\$4095, $tmp			# see if $np crosses page
+	add	\$32*10, $tmp
+	shr	\$12, $tmp
+	jz	.Lsqr_1024_no_n_copy
+
+	# unaligned 256-bit load that crosses page boundary can
+	# cause >2x performance degradation here, so if $np does
+	# cross page boundary, copy it to stack and make sure stack
+	# frame doesn't...
+	sub		\$32*10,%rsp
+	vmovdqu		32*0-128($np), $ACC0
+	and		\$-2048, %rsp
+	vmovdqu		32*1-128($np), $ACC1
+	vmovdqu		32*2-128($np), $ACC2
+	vmovdqu		32*3-128($np), $ACC3
+	vmovdqu		32*4-128($np), $ACC4
+	vmovdqu		32*5-128($np), $ACC5
+	vmovdqu		32*6-128($np), $ACC6
+	vmovdqu		32*7-128($np), $ACC7
+	vmovdqu		32*8-128($np), $ACC8
+	lea		$FrameSize+128(%rsp),$np
+	vmovdqu		$ACC0, 32*0-128($np)
+	vmovdqu		$ACC1, 32*1-128($np)
+	vmovdqu		$ACC2, 32*2-128($np)
+	vmovdqu		$ACC3, 32*3-128($np)
+	vmovdqu		$ACC4, 32*4-128($np)
+	vmovdqu		$ACC5, 32*5-128($np)
+	vmovdqu		$ACC6, 32*6-128($np)
+	vmovdqu		$ACC7, 32*7-128($np)
+	vmovdqu		$ACC8, 32*8-128($np)
+	vmovdqu		$ACC9, 32*9-128($np)	# $ACC9 is zero after vzeroall
+
+.Lsqr_1024_no_n_copy:
+	and		\$-1024, %rsp
+
+	vmovdqu		32*1-128($ap), $ACC1
+	vmovdqu		32*2-128($ap), $ACC2
+	vmovdqu		32*3-128($ap), $ACC3
+	vmovdqu		32*4-128($ap), $ACC4
+	vmovdqu		32*5-128($ap), $ACC5
+	vmovdqu		32*6-128($ap), $ACC6
+	vmovdqu		32*7-128($ap), $ACC7
+	vmovdqu		32*8-128($ap), $ACC8
+
+	lea	192(%rsp), $tp0			# 64+128=192
+	vpbroadcastq	.Land_mask(%rip), $AND_MASK
+	jmp	.LOOP_GRANDE_SQR_1024
+
+.align	32
+.LOOP_GRANDE_SQR_1024:
+	lea	32*18+128(%rsp), $aap		# size optimization
+	lea	448(%rsp), $tp1			# 64+128+256=448
+
+	# the squaring is performed as described in Variant B of
+	# "Speeding up Big-Number Squaring", so start by calculating
+	# the A*2=A+A vector
+	vpaddq		$ACC1, $ACC1, $ACC1
+	 vpbroadcastq	32*0-128($ap), $B1
+	vpaddq		$ACC2, $ACC2, $ACC2
+	vmovdqa		$ACC1, 32*0-128($aap)
+	vpaddq		$ACC3, $ACC3, $ACC3
+	vmovdqa		$ACC2, 32*1-128($aap)
+	vpaddq		$ACC4, $ACC4, $ACC4
+	vmovdqa		$ACC3, 32*2-128($aap)
+	vpaddq		$ACC5, $ACC5, $ACC5
+	vmovdqa		$ACC4, 32*3-128($aap)
+	vpaddq		$ACC6, $ACC6, $ACC6
+	vmovdqa		$ACC5, 32*4-128($aap)
+	vpaddq		$ACC7, $ACC7, $ACC7
+	vmovdqa		$ACC6, 32*5-128($aap)
+	vpaddq		$ACC8, $ACC8, $ACC8
+	vmovdqa		$ACC7, 32*6-128($aap)
+	vpxor		$ACC9, $ACC9, $ACC9
+	vmovdqa		$ACC8, 32*7-128($aap)
+
+	vpmuludq	32*0-128($ap), $B1, $ACC0
+	 vpbroadcastq	32*1-128($ap), $B2
+	 vmovdqu	$ACC9, 32*9-192($tp0)	# zero upper half
+	vpmuludq	$B1, $ACC1, $ACC1
+	 vmovdqu	$ACC9, 32*10-448($tp1)
+	vpmuludq	$B1, $ACC2, $ACC2
+	 vmovdqu	$ACC9, 32*11-448($tp1)
+	vpmuludq	$B1, $ACC3, $ACC3
+	 vmovdqu	$ACC9, 32*12-448($tp1)
+	vpmuludq	$B1, $ACC4, $ACC4
+	 vmovdqu	$ACC9, 32*13-448($tp1)
+	vpmuludq	$B1, $ACC5, $ACC5
+	 vmovdqu	$ACC9, 32*14-448($tp1)
+	vpmuludq	$B1, $ACC6, $ACC6
+	 vmovdqu	$ACC9, 32*15-448($tp1)
+	vpmuludq	$B1, $ACC7, $ACC7
+	 vmovdqu	$ACC9, 32*16-448($tp1)
+	vpmuludq	$B1, $ACC8, $ACC8
+	 vpbroadcastq	32*2-128($ap), $B1
+	 vmovdqu	$ACC9, 32*17-448($tp1)
+
+	xor	$tmp, $tmp
+	mov 	\$4, $i
+	jmp	.Lentry_1024
+___
+$TEMP0=$Y1;
+$TEMP2=$Y2;
+$code.=<<___;
+.align	32
+.LOOP_SQR_1024:
+	vmovdqu		32*0(%rsp,$tmp), $TEMP0	# 32*0-192($tp0,$tmp)
+	vmovdqu		32*1(%rsp,$tmp), $TEMP1	# 32*1-192($tp0,$tmp)
+	 vpbroadcastq	32*1-128($ap,$tmp), $B2
+	vpmuludq	32*0-128($ap), $B1, $ACC0
+	vmovdqu		32*2-192($tp0,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC0, $ACC0
+	vpmuludq	32*0-128($aap), $B1, $ACC1
+	vmovdqu		32*3-192($tp0,$tmp), $TEMP0
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	vpmuludq	32*1-128($aap), $B1, $ACC2
+	vmovdqu		32*4-192($tp0,$tmp), $TEMP1
+	vpaddq		$TEMP2, $ACC2, $ACC2
+	vpmuludq	32*2-128($aap), $B1, $ACC3
+	vmovdqu		32*5-192($tp0,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC3, $ACC3
+	vpmuludq	32*3-128($aap), $B1, $ACC4
+	vmovdqu		32*6-192($tp0,$tmp), $TEMP0
+	vpaddq		$TEMP1, $ACC4, $ACC4
+	vpmuludq	32*4-128($aap), $B1, $ACC5
+	vmovdqu		32*7-192($tp0,$tmp), $TEMP1
+	vpaddq		$TEMP2, $ACC5, $ACC5
+	vpmuludq	32*5-128($aap), $B1, $ACC6
+	vmovdqu		32*8-192($tp0,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC6, $ACC6
+	vpmuludq	32*6-128($aap), $B1, $ACC7
+	vpaddq		$TEMP1, $ACC7, $ACC7
+	vpmuludq	32*7-128($aap), $B1, $ACC8
+	 vpbroadcastq	32*2-128($ap,$tmp), $B1
+	vpaddq		$TEMP2, $ACC8, $ACC8
+.Lentry_1024:
+	vmovdqu		$ACC0, 32*0(%rsp,$tmp)	# 32*0-192($tp0,$tmp)
+	vmovdqu		$ACC1, 32*1(%rsp,$tmp)	# 32*1-192($tp0,$tmp)
+
+	vpmuludq	32*1-128($ap), $B2, $TEMP0
+	vpaddq		$TEMP0, $ACC2, $ACC2
+	vpmuludq	32*1-128($aap), $B2, $TEMP1
+	vpaddq		$TEMP1, $ACC3, $ACC3
+	vpmuludq	32*2-128($aap), $B2, $TEMP2
+	vpaddq		$TEMP2, $ACC4, $ACC4
+	vpmuludq	32*3-128($aap), $B2, $TEMP0
+	vpaddq		$TEMP0, $ACC5, $ACC5
+	vpmuludq	32*4-128($aap), $B2, $TEMP1
+	vpaddq		$TEMP1, $ACC6, $ACC6
+	vpmuludq	32*5-128($aap), $B2, $TEMP2
+	vmovdqu		32*9-192($tp0,$tmp), $TEMP1
+	vpaddq		$TEMP2, $ACC7, $ACC7
+	vpmuludq	32*6-128($aap), $B2, $TEMP0
+	vpaddq		$TEMP0, $ACC8, $ACC8
+	vpmuludq	32*7-128($aap), $B2, $ACC0
+	 vpbroadcastq	32*3-128($ap,$tmp), $B2
+	vpaddq		$TEMP1, $ACC0, $ACC0
+
+	vmovdqu		$ACC2, 32*2-192($tp0,$tmp)
+	vmovdqu		$ACC3, 32*3-192($tp0,$tmp)
+
+	vpmuludq	32*2-128($ap), $B1, $TEMP2
+	vpaddq		$TEMP2, $ACC4, $ACC4
+	vpmuludq	32*2-128($aap), $B1, $TEMP0
+	vpaddq		$TEMP0, $ACC5, $ACC5
+	vpmuludq	32*3-128($aap), $B1, $TEMP1
+	vpaddq		$TEMP1, $ACC6, $ACC6
+	vpmuludq	32*4-128($aap), $B1, $TEMP2
+	vpaddq		$TEMP2, $ACC7, $ACC7
+	vpmuludq	32*5-128($aap), $B1, $TEMP0
+	vmovdqu		32*10-448($tp1,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC8, $ACC8
+	vpmuludq	32*6-128($aap), $B1, $TEMP1
+	vpaddq		$TEMP1, $ACC0, $ACC0
+	vpmuludq	32*7-128($aap), $B1, $ACC1
+	 vpbroadcastq	32*4-128($ap,$tmp), $B1
+	vpaddq		$TEMP2, $ACC1, $ACC1
+
+	vmovdqu		$ACC4, 32*4-192($tp0,$tmp)
+	vmovdqu		$ACC5, 32*5-192($tp0,$tmp)
+
+	vpmuludq	32*3-128($ap), $B2, $TEMP0
+	vpaddq		$TEMP0, $ACC6, $ACC6
+	vpmuludq	32*3-128($aap), $B2, $TEMP1
+	vpaddq		$TEMP1, $ACC7, $ACC7
+	vpmuludq	32*4-128($aap), $B2, $TEMP2
+	vpaddq		$TEMP2, $ACC8, $ACC8
+	vpmuludq	32*5-128($aap), $B2, $TEMP0
+	vmovdqu		32*11-448($tp1,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC0, $ACC0
+	vpmuludq	32*6-128($aap), $B2, $TEMP1
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	vpmuludq	32*7-128($aap), $B2, $ACC2
+	 vpbroadcastq	32*5-128($ap,$tmp), $B2
+	vpaddq		$TEMP2, $ACC2, $ACC2	
+
+	vmovdqu		$ACC6, 32*6-192($tp0,$tmp)
+	vmovdqu		$ACC7, 32*7-192($tp0,$tmp)
+
+	vpmuludq	32*4-128($ap), $B1, $TEMP0
+	vpaddq		$TEMP0, $ACC8, $ACC8
+	vpmuludq	32*4-128($aap), $B1, $TEMP1
+	vpaddq		$TEMP1, $ACC0, $ACC0
+	vpmuludq	32*5-128($aap), $B1, $TEMP2
+	vmovdqu		32*12-448($tp1,$tmp), $TEMP1
+	vpaddq		$TEMP2, $ACC1, $ACC1
+	vpmuludq	32*6-128($aap), $B1, $TEMP0
+	vpaddq		$TEMP0, $ACC2, $ACC2
+	vpmuludq	32*7-128($aap), $B1, $ACC3
+	 vpbroadcastq	32*6-128($ap,$tmp), $B1
+	vpaddq		$TEMP1, $ACC3, $ACC3
+
+	vmovdqu		$ACC8, 32*8-192($tp0,$tmp)
+	vmovdqu		$ACC0, 32*9-192($tp0,$tmp)
+
+	vpmuludq	32*5-128($ap), $B2, $TEMP2
+	vpaddq		$TEMP2, $ACC1, $ACC1
+	vpmuludq	32*5-128($aap), $B2, $TEMP0
+	vmovdqu		32*13-448($tp1,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC2, $ACC2
+	vpmuludq	32*6-128($aap), $B2, $TEMP1
+	vpaddq		$TEMP1, $ACC3, $ACC3
+	vpmuludq	32*7-128($aap), $B2, $ACC4
+	 vpbroadcastq	32*7-128($ap,$tmp), $B2
+	vpaddq		$TEMP2, $ACC4, $ACC4
+
+	vmovdqu		$ACC1, 32*10-448($tp1,$tmp)
+	vmovdqu		$ACC2, 32*11-448($tp1,$tmp)
+
+	vpmuludq	32*6-128($ap), $B1, $TEMP0
+	vmovdqu		32*14-448($tp1,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC3, $ACC3
+	vpmuludq	32*6-128($aap), $B1, $TEMP1
+	 vpbroadcastq	32*8-128($ap,$tmp), $ACC0	# borrow $ACC0 for $B1
+	vpaddq		$TEMP1, $ACC4, $ACC4
+	vpmuludq	32*7-128($aap), $B1, $ACC5
+	 vpbroadcastq	32*0+8-128($ap,$tmp), $B1	# for next iteration
+	vpaddq		$TEMP2, $ACC5, $ACC5
+	vmovdqu		32*15-448($tp1,$tmp), $TEMP1
+
+	vmovdqu		$ACC3, 32*12-448($tp1,$tmp)
+	vmovdqu		$ACC4, 32*13-448($tp1,$tmp)
+
+	vpmuludq	32*7-128($ap), $B2, $TEMP0
+	vmovdqu		32*16-448($tp1,$tmp), $TEMP2
+	vpaddq		$TEMP0, $ACC5, $ACC5
+	vpmuludq	32*7-128($aap), $B2, $ACC6
+	vpaddq		$TEMP1, $ACC6, $ACC6
+
+	vpmuludq	32*8-128($ap), $ACC0, $ACC7
+	vmovdqu		$ACC5, 32*14-448($tp1,$tmp)
+	vpaddq		$TEMP2, $ACC7, $ACC7
+	vmovdqu		$ACC6, 32*15-448($tp1,$tmp)
+	vmovdqu		$ACC7, 32*16-448($tp1,$tmp)
+
+	lea	8($tmp), $tmp
+	dec	$i        
+	jnz	.LOOP_SQR_1024
+___
+$ZERO = $ACC9;
+$TEMP0 = $B1;
+$TEMP2 = $B2;
+$TEMP3 = $Y1;
+$TEMP4 = $Y2;
+$code.=<<___;
+	#we need to fix indexes 32-39 to avoid overflow
+	vmovdqu		32*8-192($tp0), $ACC8
+	vmovdqu		32*9-192($tp0), $ACC1
+	vmovdqu		32*10-448($tp1), $ACC2
+
+	vpsrlq		\$29, $ACC8, $TEMP1
+	vpand		$AND_MASK, $ACC8, $ACC8
+	vpsrlq		\$29, $ACC1, $TEMP2
+	vpand		$AND_MASK, $ACC1, $ACC1
+
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpxor		$ZERO, $ZERO, $ZERO
+	vpermq		\$0x93, $TEMP2, $TEMP2
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC8, $ACC8
+	vpblendd	\$3, $TEMP2, $ZERO, $TEMP2
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	vpaddq		$TEMP2, $ACC2, $ACC2
+	vmovdqu		$ACC1, 32*9-192($tp0)
+	vmovdqu		$ACC2, 32*10-448($tp1)
+
+	mov	(%rsp), %rax
+	mov	8(%rsp), $r1
+	mov	16(%rsp), $r2
+	mov	24(%rsp), $r3
+	vmovdqu	32*1(%rsp), $ACC1
+	vmovdqu	32*2-192($tp0), $ACC2
+	vmovdqu	32*3-192($tp0), $ACC3
+	vmovdqu	32*4-192($tp0), $ACC4
+	vmovdqu	32*5-192($tp0), $ACC5
+	vmovdqu	32*6-192($tp0), $ACC6
+	vmovdqu	32*7-192($tp0), $ACC7
+
+	mov	%rax, $r0
+	imull	$n0, %eax
+	and	\$0x1fffffff, %eax
+	vmovd	%eax, $Y1
+
+	mov	%rax, %rdx
+	imulq	-128($np), %rax
+	 vpbroadcastq	$Y1, $Y1
+	add	%rax, $r0
+	mov	%rdx, %rax
+	imulq	8-128($np), %rax
+	shr	\$29, $r0
+	add	%rax, $r1
+	mov	%rdx, %rax
+	imulq	16-128($np), %rax
+	add	$r0, $r1
+	add	%rax, $r2
+	imulq	24-128($np), %rdx
+	add	%rdx, $r3
+
+	mov	$r1, %rax
+	imull	$n0, %eax
+	and	\$0x1fffffff, %eax
+
+	mov \$9, $i
+	jmp .LOOP_REDUCE_1024
+
+.align	32
+.LOOP_REDUCE_1024:
+	vmovd	%eax, $Y2
+	vpbroadcastq	$Y2, $Y2
+
+	vpmuludq	32*1-128($np), $Y1, $TEMP0
+	 mov	%rax, %rdx
+	 imulq	-128($np), %rax
+	vpaddq		$TEMP0, $ACC1, $ACC1
+	vpmuludq	32*2-128($np), $Y1, $TEMP1
+	 add	%rax, $r1
+	 mov	%rdx, %rax
+	 imulq	8-128($np), %rax
+	vpaddq		$TEMP1, $ACC2, $ACC2
+	vpmuludq	32*3-128($np), $Y1, $TEMP2
+	 add	%rax, $r2
+	 mov	%rdx, %rax
+	 imulq	16-128($np), %rax
+	 shr	\$29, $r1
+	vpaddq		$TEMP2, $ACC3, $ACC3
+	vpmuludq	32*4-128($np), $Y1, $TEMP0
+	 add	%rax, $r3
+	 add	$r1, $r2
+	vpaddq		$TEMP0, $ACC4, $ACC4
+	vpmuludq	32*5-128($np), $Y1, $TEMP1
+	 mov	$r2, %rax
+	 imull	$n0, %eax
+	vpaddq		$TEMP1, $ACC5, $ACC5
+	vpmuludq	32*6-128($np), $Y1, $TEMP2
+	 and	\$0x1fffffff, %eax
+	vpaddq		$TEMP2, $ACC6, $ACC6
+	vpmuludq	32*7-128($np), $Y1, $TEMP0
+	vpaddq		$TEMP0, $ACC7, $ACC7
+	vpmuludq	32*8-128($np), $Y1, $TEMP1
+	 vmovd	%eax, $Y1
+	 vmovdqu	32*1-8-128($np), $TEMP2
+	vpaddq		$TEMP1, $ACC8, $ACC8
+	 vmovdqu	32*2-8-128($np), $TEMP0
+	 vpbroadcastq	$Y1, $Y1
+
+	vpmuludq	$Y2, $TEMP2, $TEMP2
+	vmovdqu		32*3-8-128($np), $TEMP1
+	 mov	%rax, %rdx
+	 imulq	-128($np), %rax
+	vpaddq		$TEMP2, $ACC1, $ACC1
+	vpmuludq	$Y2, $TEMP0, $TEMP0
+	vmovdqu		32*4-8-128($np), $TEMP2
+	 add	%rax, $r2
+	 mov	%rdx, %rax
+	 imulq	8-128($np), %rax
+	vpaddq		$TEMP0, $ACC2, $ACC2
+	 add	$r3, %rax
+	 shr	\$29, $r2
+	vpmuludq	$Y2, $TEMP1, $TEMP1
+	vmovdqu		32*5-8-128($np), $TEMP0
+	 add	$r2, %rax
+	vpaddq		$TEMP1, $ACC3, $ACC3
+	vpmuludq	$Y2, $TEMP2, $TEMP2
+	vmovdqu		32*6-8-128($np), $TEMP1
+	 mov	%rax, $r3
+	 imull	$n0, %eax
+	vpaddq		$TEMP2, $ACC4, $ACC4
+	vpmuludq	$Y2, $TEMP0, $TEMP0
+	vmovdqu		32*7-8-128($np), $TEMP2
+	 and	\$0x1fffffff, %eax
+	vpaddq		$TEMP0, $ACC5, $ACC5
+	vpmuludq	$Y2, $TEMP1, $TEMP1
+	vmovdqu		32*8-8-128($np), $TEMP0
+	vpaddq		$TEMP1, $ACC6, $ACC6
+	vpmuludq	$Y2, $TEMP2, $TEMP2
+	vmovdqu		32*9-8-128($np), $ACC9
+	 vmovd	%eax, $ACC0			# borrow ACC0 for Y2
+	 imulq	-128($np), %rax
+	vpaddq		$TEMP2, $ACC7, $ACC7
+	vpmuludq	$Y2, $TEMP0, $TEMP0
+	 vmovdqu	32*1-16-128($np), $TEMP1
+	 vpbroadcastq	$ACC0, $ACC0
+	vpaddq		$TEMP0, $ACC8, $ACC8
+	vpmuludq	$Y2, $ACC9, $ACC9
+	 vmovdqu	32*2-16-128($np), $TEMP2
+	 add	%rax, $r3
+
+___
+($ACC0,$Y2)=($Y2,$ACC0);
+$code.=<<___;
+	 vmovdqu	32*1-24-128($np), $ACC0
+	vpmuludq	$Y1, $TEMP1, $TEMP1
+	vmovdqu		32*3-16-128($np), $TEMP0
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	 vpmuludq	$Y2, $ACC0, $ACC0
+	vpmuludq	$Y1, $TEMP2, $TEMP2
+	vmovdqu		32*4-16-128($np), $TEMP1
+	 vpaddq		$ACC1, $ACC0, $ACC0
+	vpaddq		$TEMP2, $ACC2, $ACC2
+	vpmuludq	$Y1, $TEMP0, $TEMP0
+	vmovdqu		32*5-16-128($np), $TEMP2
+	 vmovq		$ACC0, %rax
+	 vmovdqu	$ACC0, (%rsp)		# transfer $r0-$r3
+	vpaddq		$TEMP0, $ACC3, $ACC3
+	vpmuludq	$Y1, $TEMP1, $TEMP1
+	vmovdqu		32*6-16-128($np), $TEMP0
+	vpaddq		$TEMP1, $ACC4, $ACC4
+	vpmuludq	$Y1, $TEMP2, $TEMP2
+	vmovdqu		32*7-16-128($np), $TEMP1
+	vpaddq		$TEMP2, $ACC5, $ACC5
+	vpmuludq	$Y1, $TEMP0, $TEMP0
+	vmovdqu		32*8-16-128($np), $TEMP2
+	vpaddq		$TEMP0, $ACC6, $ACC6
+	vpmuludq	$Y1, $TEMP1, $TEMP1
+	vmovdqu		32*9-16-128($np), $TEMP0
+	 shr	\$29, $r3
+	vpaddq		$TEMP1, $ACC7, $ACC7
+	vpmuludq	$Y1, $TEMP2, $TEMP2
+	 vmovdqu	32*2-24-128($np), $TEMP1
+	 add	$r3, %rax
+	 mov	%rax, $r0
+	 imull	$n0, %eax
+	vpaddq		$TEMP2, $ACC8, $ACC8
+	vpmuludq	$Y1, $TEMP0, $TEMP0
+	 and	\$0x1fffffff, %eax
+	 vmovd	%eax, $Y1
+	 vmovdqu	32*3-24-128($np), $TEMP2
+	vpaddq		$TEMP0, $ACC9, $ACC9
+	 vpbroadcastq	$Y1, $Y1
+
+	vpmuludq	$Y2, $TEMP1, $TEMP1
+	vmovdqu		32*4-24-128($np), $TEMP0
+	 mov	%rax, %rdx
+	 imulq	-128($np), %rax
+	 mov	8(%rsp), $r1
+	vpaddq		$TEMP1, $ACC2, $ACC1
+	vpmuludq	$Y2, $TEMP2, $TEMP2
+	vmovdqu		32*5-24-128($np), $TEMP1
+	 add	%rax, $r0
+	 mov	%rdx, %rax
+	 imulq	8-128($np), %rax
+	 shr	\$29, $r0
+	 mov	16(%rsp), $r2
+	vpaddq		$TEMP2, $ACC3, $ACC2
+	vpmuludq	$Y2, $TEMP0, $TEMP0
+	vmovdqu		32*6-24-128($np), $TEMP2
+	 add	%rax, $r1
+	 mov	%rdx, %rax
+	 imulq	16-128($np), %rax
+	vpaddq		$TEMP0, $ACC4, $ACC3
+	vpmuludq	$Y2, $TEMP1, $TEMP1
+	vmovdqu		32*7-24-128($np), $TEMP0
+	 imulq	24-128($np), %rdx		# future $r3
+	 add	%rax, $r2
+	 lea	($r0,$r1), %rax
+	vpaddq		$TEMP1, $ACC5, $ACC4
+	vpmuludq	$Y2, $TEMP2, $TEMP2
+	vmovdqu		32*8-24-128($np), $TEMP1
+	 mov	%rax, $r1
+	 imull	$n0, %eax
+	vpaddq		$TEMP2, $ACC6, $ACC5
+	vpmuludq	$Y2, $TEMP0, $TEMP0
+	vmovdqu		32*9-24-128($np), $TEMP2
+	 and	\$0x1fffffff, %eax
+	vpaddq		$TEMP0, $ACC7, $ACC6
+	vpmuludq	$Y2, $TEMP1, $TEMP1
+	 add	24(%rsp), %rdx
+	vpaddq		$TEMP1, $ACC8, $ACC7
+	vpmuludq	$Y2, $TEMP2, $TEMP2
+	vpaddq		$TEMP2, $ACC9, $ACC8
+	 vmovq	$r3, $ACC9
+	 mov	%rdx, $r3
+
+	dec	$i
+	jnz	.LOOP_REDUCE_1024
+___
+($ACC0,$Y2)=($Y2,$ACC0);
+$code.=<<___;
+	lea	448(%rsp), $tp1			# size optimization
+	vpaddq	$ACC9, $Y2, $ACC0
+	vpxor	$ZERO, $ZERO, $ZERO
+
+	vpaddq		32*9-192($tp0), $ACC0, $ACC0
+	vpaddq		32*10-448($tp1), $ACC1, $ACC1
+	vpaddq		32*11-448($tp1), $ACC2, $ACC2
+	vpaddq		32*12-448($tp1), $ACC3, $ACC3
+	vpaddq		32*13-448($tp1), $ACC4, $ACC4
+	vpaddq		32*14-448($tp1), $ACC5, $ACC5
+	vpaddq		32*15-448($tp1), $ACC6, $ACC6
+	vpaddq		32*16-448($tp1), $ACC7, $ACC7
+	vpaddq		32*17-448($tp1), $ACC8, $ACC8
+
+	vpsrlq		\$29, $ACC0, $TEMP1
+	vpand		$AND_MASK, $ACC0, $ACC0
+	vpsrlq		\$29, $ACC1, $TEMP2
+	vpand		$AND_MASK, $ACC1, $ACC1
+	vpsrlq		\$29, $ACC2, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC2, $ACC2
+	vpsrlq		\$29, $ACC3, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC3, $ACC3
+	vpermq		\$0x93, $TEMP3, $TEMP3
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP4, $TEMP4
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC0, $ACC0
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC2, $ACC2
+	vpblendd	\$3, $TEMP4, $ZERO, $TEMP4
+	vpaddq		$TEMP3, $ACC3, $ACC3
+	vpaddq		$TEMP4, $ACC4, $ACC4
+
+	vpsrlq		\$29, $ACC0, $TEMP1
+	vpand		$AND_MASK, $ACC0, $ACC0
+	vpsrlq		\$29, $ACC1, $TEMP2
+	vpand		$AND_MASK, $ACC1, $ACC1
+	vpsrlq		\$29, $ACC2, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC2, $ACC2
+	vpsrlq		\$29, $ACC3, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC3, $ACC3
+	vpermq		\$0x93, $TEMP3, $TEMP3
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP4, $TEMP4
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC0, $ACC0
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	vmovdqu		$ACC0, 32*0-128($rp)
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC2, $ACC2
+	vmovdqu		$ACC1, 32*1-128($rp)
+	vpblendd	\$3, $TEMP4, $ZERO, $TEMP4
+	vpaddq		$TEMP3, $ACC3, $ACC3
+	vmovdqu		$ACC2, 32*2-128($rp)
+	vpaddq		$TEMP4, $ACC4, $ACC4
+	vmovdqu		$ACC3, 32*3-128($rp)
+___
+$TEMP5=$ACC0;
+$code.=<<___;
+	vpsrlq		\$29, $ACC4, $TEMP1
+	vpand		$AND_MASK, $ACC4, $ACC4
+	vpsrlq		\$29, $ACC5, $TEMP2
+	vpand		$AND_MASK, $ACC5, $ACC5
+	vpsrlq		\$29, $ACC6, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC6, $ACC6
+	vpsrlq		\$29, $ACC7, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC7, $ACC7
+	vpsrlq		\$29, $ACC8, $TEMP5
+	vpermq		\$0x93, $TEMP3, $TEMP3
+	vpand		$AND_MASK, $ACC8, $ACC8
+	vpermq		\$0x93, $TEMP4, $TEMP4
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP5, $TEMP5
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC4, $ACC4
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC5, $ACC5
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC6, $ACC6
+	vpblendd	\$3, $TEMP4, $TEMP5, $TEMP4
+	vpaddq		$TEMP3, $ACC7, $ACC7
+	vpaddq		$TEMP4, $ACC8, $ACC8
+     
+	vpsrlq		\$29, $ACC4, $TEMP1
+	vpand		$AND_MASK, $ACC4, $ACC4
+	vpsrlq		\$29, $ACC5, $TEMP2
+	vpand		$AND_MASK, $ACC5, $ACC5
+	vpsrlq		\$29, $ACC6, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC6, $ACC6
+	vpsrlq		\$29, $ACC7, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC7, $ACC7
+	vpsrlq		\$29, $ACC8, $TEMP5
+	vpermq		\$0x93, $TEMP3, $TEMP3
+	vpand		$AND_MASK, $ACC8, $ACC8
+	vpermq		\$0x93, $TEMP4, $TEMP4
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP5, $TEMP5
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC4, $ACC4
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC5, $ACC5
+	vmovdqu		$ACC4, 32*4-128($rp)
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC6, $ACC6
+	vmovdqu		$ACC5, 32*5-128($rp)
+	vpblendd	\$3, $TEMP4, $TEMP5, $TEMP4
+	vpaddq		$TEMP3, $ACC7, $ACC7
+	vmovdqu		$ACC6, 32*6-128($rp)
+	vpaddq		$TEMP4, $ACC8, $ACC8
+	vmovdqu		$ACC7, 32*7-128($rp)
+	vmovdqu		$ACC8, 32*8-128($rp)
+
+	mov	$rp, $ap
+	dec	$rep
+	jne	.LOOP_GRANDE_SQR_1024
+
+	vzeroall
+	mov	%rbp, %rax
+___
+$code.=<<___ if ($win64);
+	movaps	-0xd8(%rax),%xmm6
+	movaps	-0xc8(%rax),%xmm7
+	movaps	-0xb8(%rax),%xmm8
+	movaps	-0xa8(%rax),%xmm9
+	movaps	-0x98(%rax),%xmm10
+	movaps	-0x88(%rax),%xmm11
+	movaps	-0x78(%rax),%xmm12
+	movaps	-0x68(%rax),%xmm13
+	movaps	-0x58(%rax),%xmm14
+	movaps	-0x48(%rax),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rax),%r15
+	mov	-40(%rax),%r14
+	mov	-32(%rax),%r13
+	mov	-24(%rax),%r12
+	mov	-16(%rax),%rbp
+	mov	-8(%rax),%rbx
+	lea	(%rax),%rsp		# restore %rsp
+.Lsqr_1024_epilogue:
+	ret
+.size	rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
+___
+}
+
+{ # void AMM_WW(
+my $rp="%rdi";	# BN_ULONG *rp,
+my $ap="%rsi";	# const BN_ULONG *ap,
+my $bp="%rdx";	# const BN_ULONG *bp,
+my $np="%rcx";	# const BN_ULONG *np,
+my $n0="%r8d";	# unsigned int n0);
+
+# The registers that hold the accumulated redundant result
+# The AMM works on 1024 bit operands, and redundant word size is 29
+# Therefore: ceil(1024/29)/4 = 9
+my $ACC0="%ymm0";
+my $ACC1="%ymm1";
+my $ACC2="%ymm2";
+my $ACC3="%ymm3";
+my $ACC4="%ymm4";
+my $ACC5="%ymm5";
+my $ACC6="%ymm6";
+my $ACC7="%ymm7";
+my $ACC8="%ymm8";
+my $ACC9="%ymm9";
+
+# Registers that hold the broadcasted words of multiplier, currently used
+my $Bi="%ymm10";
+my $Yi="%ymm11";
+
+# Helper registers
+my $TEMP0=$ACC0;
+my $TEMP1="%ymm12";
+my $TEMP2="%ymm13";
+my $ZERO="%ymm14";
+my $AND_MASK="%ymm15";
+
+# alu registers that hold the first words of the ACC
+my $r0="%r9";
+my $r1="%r10";
+my $r2="%r11";
+my $r3="%r12";
+
+my $i="%r14d";
+my $tmp="%r15";
+
+$bp="%r13";	# reassigned argument
+
+$code.=<<___;
+.globl	rsaz_1024_mul_avx2
+.type	rsaz_1024_mul_avx2,\@function,5
+.align	64
+rsaz_1024_mul_avx2:
+	lea	(%rsp), %rax
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+___
+$code.=<<___ if ($win64);
+	lea	-0xa8(%rsp),%rsp
+	movaps  %xmm6,-0xd8(%rax)
+	movaps  %xmm7,-0xc8(%rax)
+	movaps  %xmm8,-0xb8(%rax)
+	movaps  %xmm9,-0xa8(%rax)
+	movaps  %xmm10,-0x98(%rax)
+	movaps  %xmm11,-0x88(%rax)
+	movaps  %xmm12,-0x78(%rax)
+	movaps  %xmm13,-0x68(%rax)
+	movaps  %xmm14,-0x58(%rax)
+	movaps  %xmm15,-0x48(%rax)
+.Lmul_1024_body:
+___
+$code.=<<___;
+	mov	%rax,%rbp
+	vzeroall
+	mov	%rdx, $bp	# reassigned argument
+	sub	\$64,%rsp
+
+	# unaligned 256-bit load that crosses page boundary can
+	# cause severe performance degradation here, so if $ap does
+	# cross page boundary, swap it with $bp [meaning that caller
+	# is advised to lay down $ap and $bp next to each other, so
+	# that only one can cross page boundary].
+	mov	$ap, $tmp
+	and	\$4095, $tmp
+	add	\$32*10, $tmp
+	shr	\$12, $tmp
+	mov	$ap, $tmp
+	cmovnz	$bp, $ap
+	cmovnz	$tmp, $bp
+
+	mov	$np, $tmp
+	sub	\$-128,$ap	# size optimization
+	sub	\$-128,$np
+	sub	\$-128,$rp
+
+	and	\$4095, $tmp	# see if $np crosses page
+	add	\$32*10, $tmp
+	shr	\$12, $tmp
+	jz	.Lmul_1024_no_n_copy
+
+	# unaligned 256-bit load that crosses page boundary can
+	# cause severe performance degradation here, so if $np does
+	# cross page boundary, copy it to stack and make sure stack
+	# frame doesn't...
+	sub		\$32*10,%rsp
+	vmovdqu		32*0-128($np), $ACC0
+	and		\$-512, %rsp
+	vmovdqu		32*1-128($np), $ACC1
+	vmovdqu		32*2-128($np), $ACC2
+	vmovdqu		32*3-128($np), $ACC3
+	vmovdqu		32*4-128($np), $ACC4
+	vmovdqu		32*5-128($np), $ACC5
+	vmovdqu		32*6-128($np), $ACC6
+	vmovdqu		32*7-128($np), $ACC7
+	vmovdqu		32*8-128($np), $ACC8
+	lea		64+128(%rsp),$np
+	vmovdqu		$ACC0, 32*0-128($np)
+	vpxor		$ACC0, $ACC0, $ACC0
+	vmovdqu		$ACC1, 32*1-128($np)
+	vpxor		$ACC1, $ACC1, $ACC1
+	vmovdqu		$ACC2, 32*2-128($np)
+	vpxor		$ACC2, $ACC2, $ACC2
+	vmovdqu		$ACC3, 32*3-128($np)
+	vpxor		$ACC3, $ACC3, $ACC3
+	vmovdqu		$ACC4, 32*4-128($np)
+	vpxor		$ACC4, $ACC4, $ACC4
+	vmovdqu		$ACC5, 32*5-128($np)
+	vpxor		$ACC5, $ACC5, $ACC5
+	vmovdqu		$ACC6, 32*6-128($np)
+	vpxor		$ACC6, $ACC6, $ACC6
+	vmovdqu		$ACC7, 32*7-128($np)
+	vpxor		$ACC7, $ACC7, $ACC7
+	vmovdqu		$ACC8, 32*8-128($np)
+	vmovdqa		$ACC0, $ACC8
+	vmovdqu		$ACC9, 32*9-128($np)	# $ACC9 is zero after vzeroall
+.Lmul_1024_no_n_copy:
+	and	\$-64,%rsp
+
+	mov	($bp), %rbx
+	vpbroadcastq ($bp), $Bi
+	vmovdqu	$ACC0, (%rsp)			# clear top of stack
+	xor	$r0, $r0
+	xor	$r1, $r1
+	xor	$r2, $r2
+	xor	$r3, $r3
+
+	vmovdqu	.Land_mask(%rip), $AND_MASK
+	mov	\$9, $i
+	jmp	.Loop_mul_1024
+
+.align	32
+.Loop_mul_1024:
+	 vpsrlq		\$29, $ACC3, $ACC9		# correct $ACC3(*)
+	mov	%rbx, %rax
+	imulq	-128($ap), %rax
+	add	$r0, %rax
+	mov	%rbx, $r1
+	imulq	8-128($ap), $r1
+	add	8(%rsp), $r1
+
+	mov	%rax, $r0
+	imull	$n0, %eax
+	and	\$0x1fffffff, %eax
+
+	 mov	%rbx, $r2
+	 imulq	16-128($ap), $r2
+	 add	16(%rsp), $r2
+
+	 mov	%rbx, $r3
+	 imulq	24-128($ap), $r3
+	 add	24(%rsp), $r3
+	vpmuludq	32*1-128($ap),$Bi,$TEMP0
+	 vmovd		%eax, $Yi
+	vpaddq		$TEMP0,$ACC1,$ACC1
+	vpmuludq	32*2-128($ap),$Bi,$TEMP1
+	 vpbroadcastq	$Yi, $Yi
+	vpaddq		$TEMP1,$ACC2,$ACC2
+	vpmuludq	32*3-128($ap),$Bi,$TEMP2
+	 vpand		$AND_MASK, $ACC3, $ACC3		# correct $ACC3
+	vpaddq		$TEMP2,$ACC3,$ACC3
+	vpmuludq	32*4-128($ap),$Bi,$TEMP0
+	vpaddq		$TEMP0,$ACC4,$ACC4
+	vpmuludq	32*5-128($ap),$Bi,$TEMP1
+	vpaddq		$TEMP1,$ACC5,$ACC5
+	vpmuludq	32*6-128($ap),$Bi,$TEMP2
+	vpaddq		$TEMP2,$ACC6,$ACC6
+	vpmuludq	32*7-128($ap),$Bi,$TEMP0
+	 vpermq		\$0x93, $ACC9, $ACC9		# correct $ACC3
+	vpaddq		$TEMP0,$ACC7,$ACC7
+	vpmuludq	32*8-128($ap),$Bi,$TEMP1
+	 vpbroadcastq	8($bp), $Bi
+	vpaddq		$TEMP1,$ACC8,$ACC8
+
+	mov	%rax,%rdx
+	imulq	-128($np),%rax
+	add	%rax,$r0
+	mov	%rdx,%rax
+	imulq	8-128($np),%rax
+	add	%rax,$r1
+	mov	%rdx,%rax
+	imulq	16-128($np),%rax
+	add	%rax,$r2
+	shr	\$29, $r0
+	imulq	24-128($np),%rdx
+	add	%rdx,$r3
+	add	$r0, $r1
+
+	vpmuludq	32*1-128($np),$Yi,$TEMP2
+	 vmovq		$Bi, %rbx
+	vpaddq		$TEMP2,$ACC1,$ACC1
+	vpmuludq	32*2-128($np),$Yi,$TEMP0
+	vpaddq		$TEMP0,$ACC2,$ACC2
+	vpmuludq	32*3-128($np),$Yi,$TEMP1
+	vpaddq		$TEMP1,$ACC3,$ACC3
+	vpmuludq	32*4-128($np),$Yi,$TEMP2
+	vpaddq		$TEMP2,$ACC4,$ACC4
+	vpmuludq	32*5-128($np),$Yi,$TEMP0
+	vpaddq		$TEMP0,$ACC5,$ACC5
+	vpmuludq	32*6-128($np),$Yi,$TEMP1
+	vpaddq		$TEMP1,$ACC6,$ACC6
+	vpmuludq	32*7-128($np),$Yi,$TEMP2
+	 vpblendd	\$3, $ZERO, $ACC9, $ACC9	# correct $ACC3
+	vpaddq		$TEMP2,$ACC7,$ACC7
+	vpmuludq	32*8-128($np),$Yi,$TEMP0
+	 vpaddq		$ACC9, $ACC3, $ACC3		# correct $ACC3
+	vpaddq		$TEMP0,$ACC8,$ACC8
+
+	mov	%rbx, %rax
+	imulq	-128($ap),%rax
+	add	%rax,$r1
+	 vmovdqu	-8+32*1-128($ap),$TEMP1
+	mov	%rbx, %rax
+	imulq	8-128($ap),%rax
+	add	%rax,$r2
+	 vmovdqu	-8+32*2-128($ap),$TEMP2
+
+	mov	$r1, %rax
+	imull	$n0, %eax
+	and	\$0x1fffffff, %eax
+
+	 imulq	16-128($ap),%rbx
+	 add	%rbx,$r3
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	 vmovd		%eax, $Yi
+	vmovdqu		-8+32*3-128($ap),$TEMP0
+	vpaddq		$TEMP1,$ACC1,$ACC1
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	 vpbroadcastq	$Yi, $Yi
+	vmovdqu		-8+32*4-128($ap),$TEMP1
+	vpaddq		$TEMP2,$ACC2,$ACC2
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	vmovdqu		-8+32*5-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC3,$ACC3
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	vmovdqu		-8+32*6-128($ap),$TEMP0
+	vpaddq		$TEMP1,$ACC4,$ACC4
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	vmovdqu		-8+32*7-128($ap),$TEMP1
+	vpaddq		$TEMP2,$ACC5,$ACC5
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	vmovdqu		-8+32*8-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC6,$ACC6
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	vmovdqu		-8+32*9-128($ap),$ACC9
+	vpaddq		$TEMP1,$ACC7,$ACC7
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	vpaddq		$TEMP2,$ACC8,$ACC8
+	vpmuludq	$Bi,$ACC9,$ACC9
+	 vpbroadcastq	16($bp), $Bi
+
+	mov	%rax,%rdx
+	imulq	-128($np),%rax
+	add	%rax,$r1
+	 vmovdqu	-8+32*1-128($np),$TEMP0
+	mov	%rdx,%rax
+	imulq	8-128($np),%rax
+	add	%rax,$r2
+	 vmovdqu	-8+32*2-128($np),$TEMP1
+	shr	\$29, $r1
+	imulq	16-128($np),%rdx
+	add	%rdx,$r3
+	add	$r1, $r2
+
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	 vmovq		$Bi, %rbx
+	vmovdqu		-8+32*3-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC1,$ACC1
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	vmovdqu		-8+32*4-128($np),$TEMP0
+	vpaddq		$TEMP1,$ACC2,$ACC2
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	vmovdqu		-8+32*5-128($np),$TEMP1
+	vpaddq		$TEMP2,$ACC3,$ACC3
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	vmovdqu		-8+32*6-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC4,$ACC4
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	vmovdqu		-8+32*7-128($np),$TEMP0
+	vpaddq		$TEMP1,$ACC5,$ACC5
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	vmovdqu		-8+32*8-128($np),$TEMP1
+	vpaddq		$TEMP2,$ACC6,$ACC6
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	vmovdqu		-8+32*9-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC7,$ACC7
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	vpaddq		$TEMP1,$ACC8,$ACC8
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	vpaddq		$TEMP2,$ACC9,$ACC9
+
+	 vmovdqu	-16+32*1-128($ap),$TEMP0
+	mov	%rbx,%rax
+	imulq	-128($ap),%rax
+	add	$r2,%rax
+
+	 vmovdqu	-16+32*2-128($ap),$TEMP1
+	mov	%rax,$r2
+	imull	$n0, %eax
+	and	\$0x1fffffff, %eax
+
+	 imulq	8-128($ap),%rbx
+	 add	%rbx,$r3
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	 vmovd		%eax, $Yi
+	vmovdqu		-16+32*3-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC1,$ACC1
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	 vpbroadcastq	$Yi, $Yi
+	vmovdqu		-16+32*4-128($ap),$TEMP0
+	vpaddq		$TEMP1,$ACC2,$ACC2
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	vmovdqu		-16+32*5-128($ap),$TEMP1
+	vpaddq		$TEMP2,$ACC3,$ACC3
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	vmovdqu		-16+32*6-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC4,$ACC4
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	vmovdqu		-16+32*7-128($ap),$TEMP0
+	vpaddq		$TEMP1,$ACC5,$ACC5
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	vmovdqu		-16+32*8-128($ap),$TEMP1
+	vpaddq		$TEMP2,$ACC6,$ACC6
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	vmovdqu		-16+32*9-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC7,$ACC7
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	vpaddq		$TEMP1,$ACC8,$ACC8
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	 vpbroadcastq	24($bp), $Bi
+	vpaddq		$TEMP2,$ACC9,$ACC9
+
+	 vmovdqu	-16+32*1-128($np),$TEMP0
+	mov	%rax,%rdx
+	imulq	-128($np),%rax
+	add	%rax,$r2
+	 vmovdqu	-16+32*2-128($np),$TEMP1
+	imulq	8-128($np),%rdx
+	add	%rdx,$r3
+	shr	\$29, $r2
+
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	 vmovq		$Bi, %rbx
+	vmovdqu		-16+32*3-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC1,$ACC1
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	vmovdqu		-16+32*4-128($np),$TEMP0
+	vpaddq		$TEMP1,$ACC2,$ACC2
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	vmovdqu		-16+32*5-128($np),$TEMP1
+	vpaddq		$TEMP2,$ACC3,$ACC3
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	vmovdqu		-16+32*6-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC4,$ACC4
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	vmovdqu		-16+32*7-128($np),$TEMP0
+	vpaddq		$TEMP1,$ACC5,$ACC5
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	vmovdqu		-16+32*8-128($np),$TEMP1
+	vpaddq		$TEMP2,$ACC6,$ACC6
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	vmovdqu		-16+32*9-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC7,$ACC7
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	 vmovdqu	-24+32*1-128($ap),$TEMP0
+	vpaddq		$TEMP1,$ACC8,$ACC8
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	 vmovdqu	-24+32*2-128($ap),$TEMP1
+	vpaddq		$TEMP2,$ACC9,$ACC9
+
+	add	$r2, $r3
+	imulq	-128($ap),%rbx
+	add	%rbx,$r3
+
+	mov	$r3, %rax
+	imull	$n0, %eax
+	and	\$0x1fffffff, %eax
+
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	 vmovd		%eax, $Yi
+	vmovdqu		-24+32*3-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC1,$ACC1
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	 vpbroadcastq	$Yi, $Yi
+	vmovdqu		-24+32*4-128($ap),$TEMP0
+	vpaddq		$TEMP1,$ACC2,$ACC2
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	vmovdqu		-24+32*5-128($ap),$TEMP1
+	vpaddq		$TEMP2,$ACC3,$ACC3
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	vmovdqu		-24+32*6-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC4,$ACC4
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	vmovdqu		-24+32*7-128($ap),$TEMP0
+	vpaddq		$TEMP1,$ACC5,$ACC5
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	vmovdqu		-24+32*8-128($ap),$TEMP1
+	vpaddq		$TEMP2,$ACC6,$ACC6
+	vpmuludq	$Bi,$TEMP0,$TEMP0
+	vmovdqu		-24+32*9-128($ap),$TEMP2
+	vpaddq		$TEMP0,$ACC7,$ACC7
+	vpmuludq	$Bi,$TEMP1,$TEMP1
+	vpaddq		$TEMP1,$ACC8,$ACC8
+	vpmuludq	$Bi,$TEMP2,$TEMP2
+	 vpbroadcastq	32($bp), $Bi
+	vpaddq		$TEMP2,$ACC9,$ACC9
+	 add		\$32, $bp			# $bp++
+
+	vmovdqu		-24+32*1-128($np),$TEMP0
+	imulq	-128($np),%rax
+	add	%rax,$r3
+	shr	\$29, $r3
+
+	vmovdqu		-24+32*2-128($np),$TEMP1
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	 vmovq		$Bi, %rbx
+	vmovdqu		-24+32*3-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC1,$ACC0		# $ACC0==$TEMP0
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	 vmovdqu	$ACC0, (%rsp)			# transfer $r0-$r3
+	vpaddq		$TEMP1,$ACC2,$ACC1
+	vmovdqu		-24+32*4-128($np),$TEMP0
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	vmovdqu		-24+32*5-128($np),$TEMP1
+	vpaddq		$TEMP2,$ACC3,$ACC2
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	vmovdqu		-24+32*6-128($np),$TEMP2
+	vpaddq		$TEMP0,$ACC4,$ACC3
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	vmovdqu		-24+32*7-128($np),$TEMP0
+	vpaddq		$TEMP1,$ACC5,$ACC4
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	vmovdqu		-24+32*8-128($np),$TEMP1
+	vpaddq		$TEMP2,$ACC6,$ACC5
+	vpmuludq	$Yi,$TEMP0,$TEMP0
+	vmovdqu		-24+32*9-128($np),$TEMP2
+	 mov	$r3, $r0
+	vpaddq		$TEMP0,$ACC7,$ACC6
+	vpmuludq	$Yi,$TEMP1,$TEMP1
+	 add	(%rsp), $r0
+	vpaddq		$TEMP1,$ACC8,$ACC7
+	vpmuludq	$Yi,$TEMP2,$TEMP2
+	 vmovq	$r3, $TEMP1
+	vpaddq		$TEMP2,$ACC9,$ACC8
+
+	dec	$i
+	jnz	.Loop_mul_1024
+___
+
+# (*)	Original implementation was correcting ACC1-ACC3 for overflow
+#	after 7 loop runs, or after 28 iterations, or 56 additions.
+#	But as we underutilize resources, it's possible to correct in
+#	each iteration with marginal performance loss. But then, as
+#	we do it in each iteration, we can correct less digits, and
+#	avoid performance penalties completely. Also note that we
+#	correct only three digits out of four. This works because
+#	most significant digit is subjected to less additions.
+
+$TEMP0 = $ACC9;
+$TEMP3 = $Bi;
+$TEMP4 = $Yi;
+$code.=<<___;
+	vpermq		\$0, $AND_MASK, $AND_MASK
+	vpaddq		(%rsp), $TEMP1, $ACC0
+
+	vpsrlq		\$29, $ACC0, $TEMP1
+	vpand		$AND_MASK, $ACC0, $ACC0
+	vpsrlq		\$29, $ACC1, $TEMP2
+	vpand		$AND_MASK, $ACC1, $ACC1
+	vpsrlq		\$29, $ACC2, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC2, $ACC2
+	vpsrlq		\$29, $ACC3, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC3, $ACC3
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP3, $TEMP3
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpermq		\$0x93, $TEMP4, $TEMP4
+	vpaddq		$TEMP0, $ACC0, $ACC0
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC2, $ACC2
+	vpblendd	\$3, $TEMP4, $ZERO, $TEMP4
+	vpaddq		$TEMP3, $ACC3, $ACC3
+	vpaddq		$TEMP4, $ACC4, $ACC4
+
+	vpsrlq		\$29, $ACC0, $TEMP1
+	vpand		$AND_MASK, $ACC0, $ACC0
+	vpsrlq		\$29, $ACC1, $TEMP2
+	vpand		$AND_MASK, $ACC1, $ACC1
+	vpsrlq		\$29, $ACC2, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC2, $ACC2
+	vpsrlq		\$29, $ACC3, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC3, $ACC3
+	vpermq		\$0x93, $TEMP3, $TEMP3
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP4, $TEMP4
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC0, $ACC0
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC1, $ACC1
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC2, $ACC2
+	vpblendd	\$3, $TEMP4, $ZERO, $TEMP4
+	vpaddq		$TEMP3, $ACC3, $ACC3
+	vpaddq		$TEMP4, $ACC4, $ACC4
+
+	vmovdqu		$ACC0, 0-128($rp)
+	vmovdqu		$ACC1, 32-128($rp)
+	vmovdqu		$ACC2, 64-128($rp)
+	vmovdqu		$ACC3, 96-128($rp)
+___
+
+$TEMP5=$ACC0;
+$code.=<<___;
+	vpsrlq		\$29, $ACC4, $TEMP1
+	vpand		$AND_MASK, $ACC4, $ACC4
+	vpsrlq		\$29, $ACC5, $TEMP2
+	vpand		$AND_MASK, $ACC5, $ACC5
+	vpsrlq		\$29, $ACC6, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC6, $ACC6
+	vpsrlq		\$29, $ACC7, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC7, $ACC7
+	vpsrlq		\$29, $ACC8, $TEMP5
+	vpermq		\$0x93, $TEMP3, $TEMP3
+	vpand		$AND_MASK, $ACC8, $ACC8
+	vpermq		\$0x93, $TEMP4, $TEMP4
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP5, $TEMP5
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC4, $ACC4
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC5, $ACC5
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC6, $ACC6
+	vpblendd	\$3, $TEMP4, $TEMP5, $TEMP4
+	vpaddq		$TEMP3, $ACC7, $ACC7
+	vpaddq		$TEMP4, $ACC8, $ACC8
+
+	vpsrlq		\$29, $ACC4, $TEMP1
+	vpand		$AND_MASK, $ACC4, $ACC4
+	vpsrlq		\$29, $ACC5, $TEMP2
+	vpand		$AND_MASK, $ACC5, $ACC5
+	vpsrlq		\$29, $ACC6, $TEMP3
+	vpermq		\$0x93, $TEMP1, $TEMP1
+	vpand		$AND_MASK, $ACC6, $ACC6
+	vpsrlq		\$29, $ACC7, $TEMP4
+	vpermq		\$0x93, $TEMP2, $TEMP2
+	vpand		$AND_MASK, $ACC7, $ACC7
+	vpsrlq		\$29, $ACC8, $TEMP5
+	vpermq		\$0x93, $TEMP3, $TEMP3
+	vpand		$AND_MASK, $ACC8, $ACC8
+	vpermq		\$0x93, $TEMP4, $TEMP4
+
+	vpblendd	\$3, $ZERO, $TEMP1, $TEMP0
+	vpermq		\$0x93, $TEMP5, $TEMP5
+	vpblendd	\$3, $TEMP1, $TEMP2, $TEMP1
+	vpaddq		$TEMP0, $ACC4, $ACC4
+	vpblendd	\$3, $TEMP2, $TEMP3, $TEMP2
+	vpaddq		$TEMP1, $ACC5, $ACC5
+	vpblendd	\$3, $TEMP3, $TEMP4, $TEMP3
+	vpaddq		$TEMP2, $ACC6, $ACC6
+	vpblendd	\$3, $TEMP4, $TEMP5, $TEMP4
+	vpaddq		$TEMP3, $ACC7, $ACC7
+	vpaddq		$TEMP4, $ACC8, $ACC8
+
+	vmovdqu		$ACC4, 128-128($rp)
+	vmovdqu		$ACC5, 160-128($rp)    
+	vmovdqu		$ACC6, 192-128($rp)
+	vmovdqu		$ACC7, 224-128($rp)
+	vmovdqu		$ACC8, 256-128($rp)
+	vzeroupper
+
+	mov	%rbp, %rax
+___
+$code.=<<___ if ($win64);
+	movaps	-0xd8(%rax),%xmm6
+	movaps	-0xc8(%rax),%xmm7
+	movaps	-0xb8(%rax),%xmm8
+	movaps	-0xa8(%rax),%xmm9
+	movaps	-0x98(%rax),%xmm10
+	movaps	-0x88(%rax),%xmm11
+	movaps	-0x78(%rax),%xmm12
+	movaps	-0x68(%rax),%xmm13
+	movaps	-0x58(%rax),%xmm14
+	movaps	-0x48(%rax),%xmm15
+___
+$code.=<<___;
+	mov	-48(%rax),%r15
+	mov	-40(%rax),%r14
+	mov	-32(%rax),%r13
+	mov	-24(%rax),%r12
+	mov	-16(%rax),%rbp
+	mov	-8(%rax),%rbx
+	lea	(%rax),%rsp		# restore %rsp
+.Lmul_1024_epilogue:
+	ret
+.size	rsaz_1024_mul_avx2,.-rsaz_1024_mul_avx2
+___
+}
+{
+my ($out,$inp) = $win64 ? ("%rcx","%rdx") : ("%rdi","%rsi");
+my @T = map("%r$_",(8..11));
+
+$code.=<<___;
+.globl	rsaz_1024_red2norm_avx2
+.type	rsaz_1024_red2norm_avx2,\@abi-omnipotent
+.align	32
+rsaz_1024_red2norm_avx2:
+	sub	\$-128,$inp	# size optimization
+	xor	%rax,%rax
+___
+
+for ($j=0,$i=0; $i<16; $i++) {
+    my $k=0;
+    while (29*$j<64*($i+1)) {	# load data till boundary
+	$code.="	mov	`8*$j-128`($inp), @T[0]\n";
+	$j++; $k++; push(@T,shift(@T));
+    }
+    $l=$k;
+    while ($k>1) {		# shift loaded data but last value
+	$code.="	shl	\$`29*($j-$k)`,@T[-$k]\n";
+	$k--;
+    }
+    $code.=<<___;		# shift last value
+	mov	@T[-1], @T[0]
+	shl	\$`29*($j-1)`, @T[-1]
+	shr	\$`-29*($j-1)`, @T[0]
+___
+    while ($l) {		# accumulate all values
+	$code.="	add	@T[-$l], %rax\n";
+	$l--;
+    }
+	$code.=<<___;
+	adc	\$0, @T[0]	# consume eventual carry
+	mov	%rax, 8*$i($out)
+	mov	@T[0], %rax
+___
+    push(@T,shift(@T));
+}
+$code.=<<___;
+	ret
+.size	rsaz_1024_red2norm_avx2,.-rsaz_1024_red2norm_avx2
+
+.globl	rsaz_1024_norm2red_avx2
+.type	rsaz_1024_norm2red_avx2,\@abi-omnipotent
+.align	32
+rsaz_1024_norm2red_avx2:
+	sub	\$-128,$out	# size optimization
+	mov	($inp),@T[0]
+	mov	\$0x1fffffff,%eax
+___
+for ($j=0,$i=0; $i<16; $i++) {
+    $code.="	mov	`8*($i+1)`($inp),@T[1]\n"	if ($i<15);
+    $code.="	xor	@T[1],@T[1]\n"			if ($i==15);
+    my $k=1;
+    while (29*($j+1)<64*($i+1)) {
+    	$code.=<<___;
+	mov	@T[0],@T[-$k]
+	shr	\$`29*$j`,@T[-$k]
+	and	%rax,@T[-$k]				# &0x1fffffff
+	mov	@T[-$k],`8*$j-128`($out)
+___
+	$j++; $k++;
+    }
+    $code.=<<___;
+	shrd	\$`29*$j`,@T[1],@T[0]
+	and	%rax,@T[0]
+	mov	@T[0],`8*$j-128`($out)
+___
+    $j++;
+    push(@T,shift(@T));
+}
+$code.=<<___;
+	mov	@T[0],`8*$j-128`($out)			# zero
+	mov	@T[0],`8*($j+1)-128`($out)
+	mov	@T[0],`8*($j+2)-128`($out)
+	mov	@T[0],`8*($j+3)-128`($out)
+	ret
+.size	rsaz_1024_norm2red_avx2,.-rsaz_1024_norm2red_avx2
+___
+}
+{
+my ($out,$inp,$power) = $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
+
+$code.=<<___;
+.globl	rsaz_1024_scatter5_avx2
+.type	rsaz_1024_scatter5_avx2,\@abi-omnipotent
+.align	32
+rsaz_1024_scatter5_avx2:
+	vzeroupper
+	vmovdqu	.Lscatter_permd(%rip),%ymm5
+	shl	\$4,$power
+	lea	($out,$power),$out
+	mov	\$9,%eax
+	jmp	.Loop_scatter_1024
+
+.align	32
+.Loop_scatter_1024:
+	vmovdqu		($inp),%ymm0
+	lea		32($inp),$inp
+	vpermd		%ymm0,%ymm5,%ymm0
+	vmovdqu		%xmm0,($out)
+	lea		16*32($out),$out
+	dec	%eax
+	jnz	.Loop_scatter_1024
+
+	vzeroupper
+	ret
+.size	rsaz_1024_scatter5_avx2,.-rsaz_1024_scatter5_avx2
+
+.globl	rsaz_1024_gather5_avx2
+.type	rsaz_1024_gather5_avx2,\@abi-omnipotent
+.align	32
+rsaz_1024_gather5_avx2:
+___
+$code.=<<___ if ($win64);
+	lea	-0x88(%rsp),%rax
+.LSEH_begin_rsaz_1024_gather5:
+	# I can't trust assembler to use specific encoding:-(
+	.byte	0x48,0x8d,0x60,0xe0		#lea	-0x20(%rax),%rsp
+	.byte	0x0f,0x29,0x70,0xe0		#movaps	%xmm6,-0x20(%rax)
+	.byte	0x0f,0x29,0x78,0xf0		#movaps	%xmm7,-0x10(%rax)
+	.byte	0x44,0x0f,0x29,0x00		#movaps	%xmm8,0(%rax)
+	.byte	0x44,0x0f,0x29,0x48,0x10	#movaps	%xmm9,0x10(%rax)
+	.byte	0x44,0x0f,0x29,0x50,0x20	#movaps	%xmm10,0x20(%rax)
+	.byte	0x44,0x0f,0x29,0x58,0x30	#movaps	%xmm11,0x30(%rax)
+	.byte	0x44,0x0f,0x29,0x60,0x40	#movaps	%xmm12,0x40(%rax)
+	.byte	0x44,0x0f,0x29,0x68,0x50	#movaps	%xmm13,0x50(%rax)
+	.byte	0x44,0x0f,0x29,0x70,0x60	#movaps	%xmm14,0x60(%rax)
+	.byte	0x44,0x0f,0x29,0x78,0x70	#movaps	%xmm15,0x70(%rax)
+___
+$code.=<<___;
+	vzeroupper
+	lea	.Lgather_table(%rip),%r11
+	mov	$power,%eax
+	and	\$3,$power
+	shr	\$2,%eax			# cache line number
+	shl	\$4,$power			# offset within cache line
+
+	vmovdqu		-32(%r11),%ymm7		# .Lgather_permd
+	vpbroadcastb	8(%r11,%rax), %xmm8
+	vpbroadcastb	7(%r11,%rax), %xmm9
+	vpbroadcastb	6(%r11,%rax), %xmm10
+	vpbroadcastb	5(%r11,%rax), %xmm11
+	vpbroadcastb	4(%r11,%rax), %xmm12
+	vpbroadcastb	3(%r11,%rax), %xmm13
+	vpbroadcastb	2(%r11,%rax), %xmm14
+	vpbroadcastb	1(%r11,%rax), %xmm15
+
+	lea	($inp,$power),$inp
+	mov	\$64,%r11			# size optimization
+	mov	\$9,%eax
+	jmp	.Loop_gather_1024
+
+.align	32
+.Loop_gather_1024:
+	vpand		($inp),			%xmm8,%xmm0
+	vpand		($inp,%r11),		%xmm9,%xmm1
+	vpand		($inp,%r11,2),		%xmm10,%xmm2
+	vpand		64($inp,%r11,2),	%xmm11,%xmm3
+	 vpor					%xmm0,%xmm1,%xmm1
+	vpand		($inp,%r11,4),		%xmm12,%xmm4
+	 vpor					%xmm2,%xmm3,%xmm3
+	vpand		64($inp,%r11,4),	%xmm13,%xmm5
+	 vpor					%xmm1,%xmm3,%xmm3
+	vpand		-128($inp,%r11,8),	%xmm14,%xmm6
+	 vpor					%xmm4,%xmm5,%xmm5
+	vpand		-64($inp,%r11,8),	%xmm15,%xmm2
+	lea		($inp,%r11,8),$inp
+	 vpor					%xmm3,%xmm5,%xmm5
+	 vpor					%xmm2,%xmm6,%xmm6
+	 vpor					%xmm5,%xmm6,%xmm6
+	vpermd		%ymm6,%ymm7,%ymm6
+	vmovdqu		%ymm6,($out)
+	lea		32($out),$out
+	dec	%eax
+	jnz	.Loop_gather_1024
+
+	vpxor	%ymm0,%ymm0,%ymm0
+	vmovdqu	%ymm0,($out)
+	vzeroupper
+___
+$code.=<<___ if ($win64);
+	movaps	(%rsp),%xmm6
+	movaps	0x10(%rsp),%xmm7
+	movaps	0x20(%rsp),%xmm8
+	movaps	0x30(%rsp),%xmm9
+	movaps	0x40(%rsp),%xmm10
+	movaps	0x50(%rsp),%xmm11
+	movaps	0x60(%rsp),%xmm12
+	movaps	0x70(%rsp),%xmm13
+	movaps	0x80(%rsp),%xmm14
+	movaps	0x90(%rsp),%xmm15
+	lea	0xa8(%rsp),%rsp
+.LSEH_end_rsaz_1024_gather5:
+___
+$code.=<<___;
+	ret
+.size	rsaz_1024_gather5_avx2,.-rsaz_1024_gather5_avx2
+___
+}
+
+$code.=<<___;
+.extern	OPENSSL_ia32cap_P
+.globl	rsaz_avx2_eligible
+.type	rsaz_avx2_eligible,\@abi-omnipotent
+.align	32
+rsaz_avx2_eligible:
+	mov	OPENSSL_ia32cap_P+8(%rip),%eax
+	and	\$`1<<5`,%eax
+	shr	\$5,%eax
+	ret
+.size	rsaz_avx2_eligible,.-rsaz_avx2_eligible
+
+.align	64
+.Land_mask:
+	.quad	0x1fffffff,0x1fffffff,0x1fffffff,-1
+.Lscatter_permd:
+	.long	0,2,4,6,7,7,7,7
+.Lgather_permd:
+	.long	0,7,1,7,2,7,3,7
+.Lgather_table:
+	.byte	0,0,0,0,0,0,0,0, 0xff,0,0,0,0,0,0,0
+.align	64
+___
+
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___
+.extern	__imp_RtlVirtualUnwind
+.type	rsaz_se_handler,\@abi-omnipotent
+.align	16
+rsaz_se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# prologue label
+	cmp	%r10,%rbx		# context->Rip<prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	mov	160($context),%rax	# pull context->Rbp
+
+	mov	-48(%rax),%r15
+	mov	-40(%rax),%r14
+	mov	-32(%rax),%r13
+	mov	-24(%rax),%r12
+	mov	-16(%rax),%rbp
+	mov	-8(%rax),%rbx
+	mov	%r15,240($context)
+	mov	%r14,232($context)
+	mov	%r13,224($context)
+	mov	%r12,216($context)
+	mov	%rbp,160($context)
+	mov	%rbx,144($context)
+
+	lea	-0xd8(%rax),%rsi	# %xmm save area
+	lea	512($context),%rdi	# & context.Xmm6
+	mov	\$20,%ecx		# 10*sizeof(%xmm0)/sizeof(%rax)
+	.long	0xa548f3fc		# cld; rep movsq
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	rsaz_se_handler,.-rsaz_se_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_rsaz_1024_sqr_avx2
+	.rva	.LSEH_end_rsaz_1024_sqr_avx2
+	.rva	.LSEH_info_rsaz_1024_sqr_avx2
+
+	.rva	.LSEH_begin_rsaz_1024_mul_avx2
+	.rva	.LSEH_end_rsaz_1024_mul_avx2
+	.rva	.LSEH_info_rsaz_1024_mul_avx2
+
+	.rva	.LSEH_begin_rsaz_1024_gather5
+	.rva	.LSEH_end_rsaz_1024_gather5
+	.rva	.LSEH_info_rsaz_1024_gather5
+.section	.xdata
+.align	8
+.LSEH_info_rsaz_1024_sqr_avx2:
+	.byte	9,0,0,0
+	.rva	rsaz_se_handler
+	.rva	.Lsqr_1024_body,.Lsqr_1024_epilogue
+.LSEH_info_rsaz_1024_mul_avx2:
+	.byte	9,0,0,0
+	.rva	rsaz_se_handler
+	.rva	.Lmul_1024_body,.Lmul_1024_epilogue
+.LSEH_info_rsaz_1024_gather5:
+	.byte	0x01,0x33,0x16,0x00
+	.byte	0x33,0xf8,0x09,0x00	#movaps 0x90(rsp),xmm15
+	.byte	0x2e,0xe8,0x08,0x00	#movaps 0x80(rsp),xmm14
+	.byte	0x29,0xd8,0x07,0x00	#movaps 0x70(rsp),xmm13
+	.byte	0x24,0xc8,0x06,0x00	#movaps 0x60(rsp),xmm12
+	.byte	0x1f,0xb8,0x05,0x00	#movaps 0x50(rsp),xmm11
+	.byte	0x1a,0xa8,0x04,0x00	#movaps 0x40(rsp),xmm10
+	.byte	0x15,0x98,0x03,0x00	#movaps 0x30(rsp),xmm9
+	.byte	0x10,0x88,0x02,0x00	#movaps 0x20(rsp),xmm8
+	.byte	0x0c,0x78,0x01,0x00	#movaps 0x10(rsp),xmm7
+	.byte	0x08,0x68,0x00,0x00	#movaps 0x00(rsp),xmm6
+	.byte	0x04,0x01,0x15,0x00	#sub	rsp,0xa8
+___
+}
+
+foreach (split("\n",$code)) {
+	s/\`([^\`]*)\`/eval($1)/ge;
+
+	s/\b(sh[rl]d?\s+\$)(-?[0-9]+)/$1.$2%64/ge		or
+
+	s/\b(vmov[dq])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go		or
+	s/\b(vmovdqu)\b(.+)%x%ymm([0-9]+)/$1$2%xmm$3/go		or
+	s/\b(vpinsr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go	or
+	s/\b(vpextr[qd])\b(.+)%ymm([0-9]+)/$1$2%xmm$3/go	or
+	s/\b(vpbroadcast[qd]\s+)%ymm([0-9]+)/$1%xmm$2/go;
+	print $_,"\n";
+}
+
+}}} else {{{
+print <<___;	# assembler is too old
+.text
+
+.globl	rsaz_avx2_eligible
+.type	rsaz_avx2_eligible,\@abi-omnipotent
+rsaz_eligible:
+	xor	%eax,%eax
+	ret
+.size	rsaz_avx2_eligible,.-rsaz_avx2_eligible
+
+.globl	rsaz_1024_sqr_avx2
+.globl	rsaz_1024_mul_avx2
+.globl	rsaz_1024_norm2red_avx2
+.globl	rsaz_1024_red2norm_avx2
+.globl	rsaz_1024_scatter5_avx2
+.globl	rsaz_1024_gather5_avx2
+.type	rsaz_1024_sqr_avx2,\@abi-omnipotent
+rsaz_1024_sqr_avx2:
+rsaz_1024_mul_avx2:
+rsaz_1024_norm2red_avx2:
+rsaz_1024_red2norm_avx2:
+rsaz_1024_scatter5_avx2:
+rsaz_1024_gather5_avx2:
+	.byte	0x0f,0x0b	# ud2
+	ret
+.size	rsaz_1024_sqr_avx2,.-rsaz_1024_sqr_avx2
+___
+}}}
+
+close STDOUT;
diff --git a/crypto/bn/asm/rsaz-x86_64.pl b/crypto/bn/asm/rsaz-x86_64.pl
new file mode 100755
index 0000000000..ed84463697
--- /dev/null
+++ b/crypto/bn/asm/rsaz-x86_64.pl
@@ -0,0 +1,1706 @@
+#!/usr/bin/env perl
+
+#******************************************************************************#
+#* Copyright(c) 2012, Intel Corp.                                             *#
+#* Developers and authors:                                                    *#
+#* Shay Gueron (1, 2), and Vlad Krasnov (1)                                   *#
+#* (1) Intel Architecture Group, Microprocessor and Chipset Development,      *#
+#*     Israel Development Center, Haifa, Israel                               *#
+#* (2) University of Haifa                                                    *#
+#******************************************************************************#
+#* This submission to OpenSSL is to be made available under the OpenSSL       *#
+#* license, and only to the OpenSSL project, in order to allow integration    *#
+#* into the publicly distributed code. ?                                      *#
+#* The use of this code, or portions of this code, or concepts embedded in    *#
+#* this code, or modification of this code and/or algorithm(s) in it, or the  *#
+#* use of this code for any other purpose than stated above, requires special *#
+#* licensing.                                                                 *#
+#******************************************************************************#
+#******************************************************************************#
+#* DISCLAIMER:                                                                *#
+#* THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS AND THE COPYRIGHT OWNERS     *#
+#* ``AS IS''. ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *#
+#* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR *#
+#* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS OR THE COPYRIGHT*#
+#* OWNERS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, *#
+#* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF    *#
+#* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS   *#
+#* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN    *#
+#* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)    *#
+#* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE *#
+#* POSSIBILITY OF SUCH DAMAGE.                                                *#
+#******************************************************************************#
+#* Reference:                                                                 *#
+#* [1] S. Gueron, "Efficient Software Implementations of Modular              *#
+#*     Exponentiation", http://eprint.iacr.org/2011/239                       *#
+#* [2] S. Gueron, V. Krasnov. "Speeding up Big-Numbers Squaring".             *#
+#*     IEEE Proceedings of 9th International Conference on Information        *#
+#*     Technology: New Generations (ITNG 2012), 821-823 (2012).               *#
+#* [3] S. Gueron, Efficient Software Implementations of Modular Exponentiation*#
+#*     Journal of Cryptographic Engineering 2:31-43 (2012).                   *#
+#* [4] S. Gueron, V. Krasnov: "[PATCH] Efficient and side channel analysis    *#
+#*     resistant 512-bit and 1024-bit modular exponentiation for optimizing   *#
+#*     RSA1024 and RSA2048 on x86_64 platforms",                              *#
+#*     http://rt.openssl.org/Ticket/Display.html?id=2582&user=guest&pass=guest*#
+################################################################################
+
+# While original submission covers 512- and 1024-bit exponentiation,
+# this module is limited to 512-bit version only (and as such
+# accelerates RSA1024 sign). This is because improvement for longer
+# keys is not high enough to justify the effort, highest measured
+# was ~5% on Westmere. [This is relative to OpenSSL 1.0.2, upcoming
+# for the moment of this writing!] Nor does this module implement
+# "monolithic" complete exponentiation jumbo-subroutine, but adheres
+# to more modular mixture of C and assembly. And it's optimized even
+# for processors other than Intel Core family (see table below for
+# improvement coefficients).
+# 						<appro@openssl.org>
+#
+# RSA1024 sign/sec	this/original	|this/rsax(*)	this/fips(*)
+#			----------------+---------------------------
+# Opteron		+13%		|+5%		+20%
+# Bulldozer		-0%		|-1%		+10%
+# P4			+11%		|+7%		+8%
+# Westmere		+5%		|+14%		+17%
+# Sandy Bridge		+2%		|+12%		+29%
+# Ivy Bridge		+1%		|+11%		+35%
+# Haswell(**)		-0%		|+12%		+39%
+# Atom			+13%		|+11%		+4%
+# VIA Nano		+70%		|+9%		+25%
+#
+# (*)	rsax engine and fips numbers are presented for reference
+#	purposes;
+# (**)	you might notice MULX code below, strangely enough gain is
+#	marginal, which is why code remains disabled;
+
+$flavour = shift;
+$output  = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| $^X $xlate $flavour $output";
+*STDOUT=*OUT;
+
+($out, $inp, $mod) = ("%rdi", "%rsi", "%rbp");	# common internal API
+{
+my ($out,$inp,$mod,$n0,$times) = ("%rdi","%rsi","%rdx","%rcx","%r8d");
+
+$code.=<<___;
+.text
+
+.globl	rsaz_512_sqr
+.type	rsaz_512_sqr,\@function,4
+.align	32
+rsaz_512_sqr:				# 25-29% faster than rsaz_512_mul
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	subq	\$128+24, %rsp
+.Lsqr_body:
+	movq	$mod, %rbp		# common argument
+	movq	($inp), %rdx
+	movq	8($inp), %rax
+	movq	$n0, 128(%rsp)
+	jmp	.Loop_sqr
+
+.align	32
+.Loop_sqr:
+	movl	$times,128+8(%rsp)
+___
+if (1) {
+$code.=<<___;
+#first iteration
+	movq	%rdx, %rbx
+	mulq	%rdx
+	movq	%rax, %r8
+	movq	16($inp), %rax
+	movq	%rdx, %r9
+
+	mulq	%rbx
+	addq	%rax, %r9
+	movq	24($inp), %rax
+	movq	%rdx, %r10
+	adcq	\$0, %r10
+
+	mulq	%rbx
+	addq	%rax, %r10
+	movq	32($inp), %rax
+	movq	%rdx, %r11
+	adcq	\$0, %r11
+
+	mulq	%rbx
+	addq	%rax, %r11
+	movq	40($inp), %rax
+	movq	%rdx, %r12
+	adcq	\$0, %r12
+
+	mulq	%rbx
+	addq	%rax, %r12
+	movq	48($inp), %rax
+	movq	%rdx, %r13
+	adcq	\$0, %r13
+
+	mulq	%rbx
+	addq	%rax, %r13
+	movq	56($inp), %rax
+	movq	%rdx, %r14
+	adcq	\$0, %r14
+
+	mulq	%rbx
+	addq	%rax, %r14
+	movq	%rbx, %rax
+	movq	%rdx, %r15
+	adcq	\$0, %r15
+
+	addq	%r8, %r8		#shlq	\$1, %r8
+	movq	%r9, %rcx
+	adcq	%r9, %r9		#shld	\$1, %r8, %r9
+
+	mulq	%rax
+	movq	%rax, (%rsp)
+	addq	%rdx, %r8
+	adcq	\$0, %r9
+
+	movq	%r8, 8(%rsp)
+	shrq	\$63, %rcx
+
+#second iteration
+	movq	8($inp), %r8
+	movq	16($inp), %rax
+	mulq	%r8
+	addq	%rax, %r10
+	movq	24($inp), %rax
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r8
+	addq	%rax, %r11
+	movq	32($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r11
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r8
+	addq	%rax, %r12
+	movq	40($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r12
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r8
+	addq	%rax, %r13
+	movq	48($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r13
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r8
+	addq	%rax, %r14
+	movq	56($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r14
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r8
+	addq	%rax, %r15
+	movq	%r8, %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r15
+	movq	%rdx, %r8
+	movq	%r10, %rdx
+	adcq	\$0, %r8
+
+	add	%rdx, %rdx
+	lea	(%rcx,%r10,2), %r10	#shld	\$1, %rcx, %r10
+	movq	%r11, %rbx
+	adcq	%r11, %r11		#shld	\$1, %r10, %r11
+
+	mulq	%rax
+	addq	%rax, %r9
+	adcq	%rdx, %r10
+	adcq	\$0, %r11
+
+	movq	%r9, 16(%rsp)
+	movq	%r10, 24(%rsp)
+	shrq	\$63, %rbx
+	
+#third iteration
+	movq	16($inp), %r9	
+	movq	24($inp), %rax
+	mulq	%r9
+	addq	%rax, %r12
+	movq	32($inp), %rax
+	movq	%rdx, %rcx
+	adcq	\$0, %rcx
+
+	mulq	%r9
+	addq	%rax, %r13
+	movq	40($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rcx, %r13
+	movq	%rdx, %rcx
+	adcq	\$0, %rcx
+
+	mulq	%r9
+	addq	%rax, %r14
+	movq	48($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rcx, %r14
+	movq	%rdx, %rcx
+	adcq	\$0, %rcx
+
+	mulq	%r9
+	 movq	%r12, %r10
+	 lea	(%rbx,%r12,2), %r12	#shld	\$1, %rbx, %r12
+	addq	%rax, %r15
+	movq	56($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rcx, %r15
+	movq	%rdx, %rcx
+	adcq	\$0, %rcx
+
+	mulq	%r9
+	 shrq	\$63, %r10
+	addq	%rax, %r8
+	movq	%r9, %rax
+	adcq	\$0, %rdx
+	addq	%rcx, %r8
+	movq	%rdx, %r9
+	adcq	\$0, %r9
+
+	movq	%r13, %rcx
+	leaq	(%r10,%r13,2), %r13	#shld	\$1, %r12, %r13
+
+	mulq	%rax
+	addq	%rax, %r11
+	adcq	%rdx, %r12
+	adcq	\$0, %r13
+
+	movq	%r11, 32(%rsp)
+	movq	%r12, 40(%rsp)
+	shrq	\$63, %rcx
+
+#fourth iteration
+	movq	24($inp), %r10
+	movq	32($inp), %rax
+	mulq	%r10
+	addq	%rax, %r14
+	movq	40($inp), %rax
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r10
+	addq	%rax, %r15
+	movq	48($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r15
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r10
+	 movq	%r14, %r12
+	 leaq	(%rcx,%r14,2), %r14	#shld	\$1, %rcx, %r14
+	addq	%rax, %r8
+	movq	56($inp), %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r8
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r10
+	 shrq	\$63, %r12
+	addq	%rax, %r9
+	movq	%r10, %rax
+	adcq	\$0, %rdx
+	addq	%rbx, %r9
+	movq	%rdx, %r10
+	adcq	\$0, %r10
+
+	movq	%r15, %rbx
+	leaq	(%r12,%r15,2),%r15	#shld	\$1, %r14, %r15
+
+	mulq	%rax
+	addq	%rax, %r13
+	adcq	%rdx, %r14
+	adcq	\$0, %r15
+
+	movq	%r13, 48(%rsp)
+	movq	%r14, 56(%rsp)
+	shrq	\$63, %rbx
+
+#fifth iteration
+	movq	32($inp), %r11
+	movq	40($inp), %rax
+	mulq	%r11
+	addq	%rax, %r8
+	movq	48($inp), %rax
+	movq	%rdx, %rcx
+	adcq	\$0, %rcx
+
+	mulq	%r11
+	addq	%rax, %r9
+	movq	56($inp), %rax
+	adcq	\$0, %rdx
+	 movq	%r8, %r12
+	 leaq	(%rbx,%r8,2), %r8	#shld	\$1, %rbx, %r8
+	addq	%rcx, %r9
+	movq	%rdx, %rcx
+	adcq	\$0, %rcx
+
+	mulq	%r11
+	 shrq	\$63, %r12
+	addq	%rax, %r10
+	movq	%r11, %rax
+	adcq	\$0, %rdx
+	addq	%rcx, %r10
+	movq	%rdx, %r11
+	adcq	\$0, %r11
+
+	movq	%r9, %rcx
+	leaq	(%r12,%r9,2), %r9	#shld	\$1, %r8, %r9
+
+	mulq	%rax
+	addq	%rax, %r15
+	adcq	%rdx, %r8
+	adcq	\$0, %r9
+
+	movq	%r15, 64(%rsp)
+	movq	%r8, 72(%rsp)
+	shrq	\$63, %rcx
+
+#sixth iteration
+	movq	40($inp), %r12
+	movq	48($inp), %rax
+	mulq	%r12
+	addq	%rax, %r10
+	movq	56($inp), %rax
+	movq	%rdx, %rbx
+	adcq	\$0, %rbx
+
+	mulq	%r12
+	addq	%rax, %r11
+	movq	%r12, %rax
+	 movq	%r10, %r15
+	 leaq	(%rcx,%r10,2), %r10	#shld	\$1, %rcx, %r10
+	adcq	\$0, %rdx
+	 shrq	\$63, %r15
+	addq	%rbx, %r11
+	movq	%rdx, %r12
+	adcq	\$0, %r12
+
+	movq	%r11, %rbx
+	leaq	(%r15,%r11,2), %r11	#shld	\$1, %r10, %r11
+
+	mulq	%rax
+	addq	%rax, %r9
+	adcq	%rdx, %r10
+	adcq	\$0, %r11
+
+	movq	%r9, 80(%rsp)
+	movq	%r10, 88(%rsp)
+
+#seventh iteration
+	movq	48($inp), %r13
+	movq	56($inp), %rax
+	mulq	%r13
+	addq	%rax, %r12
+	movq	%r13, %rax
+	movq	%rdx, %r13
+	adcq	\$0, %r13
+
+	xorq	%r14, %r14
+	shlq	\$1, %rbx
+	adcq	%r12, %r12		#shld	\$1, %rbx, %r12
+	adcq	%r13, %r13		#shld	\$1, %r12, %r13
+	adcq	%r14, %r14		#shld	\$1, %r13, %r14
+
+	mulq	%rax
+	addq	%rax, %r11
+	adcq	%rdx, %r12
+	adcq	\$0, %r13
+
+	movq	%r11, 96(%rsp)
+	movq	%r12, 104(%rsp)
+
+#eighth iteration
+	movq	56($inp), %rax
+	mulq	%rax
+	addq	%rax, %r13
+	adcq	\$0, %rdx
+
+	addq	%rdx, %r14
+
+	movq	%r13, 112(%rsp)
+	movq	%r14, 120(%rsp)
+___
+} else {
+$code.=<<___;
+	movq	$out, %xmm0		# off-load
+#first iteration	
+	mulx	%rax, %r8, %r9
+
+	mulx	16($inp), %rcx, %r10
+
+	mulx	24($inp), %rax, %r11
+	add	%rcx, %r9
+
+	mulx	32($inp), %rcx, %r12
+	adc	%rax, %r10
+
+	mulx	40($inp), %rax, %r13
+	adc	%rcx, %r11
+
+	mulx	48($inp), %rcx, %r14
+	adc	%rax, %r12
+
+	mulx	56($inp), %rax, %r15
+	adc	%rcx, %r13
+	mov	%r9, %rcx
+	adc	%rax, %r14
+	adc	\$0, %r15
+
+	shld	\$1, %r8, %r9
+	shl	\$1, %r8
+
+	mulx	%rdx, %rax, %rdx
+	add	%rdx, %r8
+	adc	\$0, %r9
+
+	mov	%rax, (%rsp)
+	mov	%r8, 8(%rsp)
+
+#second iteration	
+	mov	8($inp), %rdx
+	mulx	16($inp), %rax, %rbx
+
+	mulx	24($inp), $out, %r8
+	add	%rax, %r10
+	adc	%rbx, %r11
+	adc	\$0, %r8
+
+	mulx	32($inp), %rax, %rbx
+	add	$out, %r11
+	adc	%r8, %r12
+	adc	\$0, %rbx
+
+	mulx	40($inp), $out, %r8
+	add	%rax, %r12
+	adc	%rbx, %r13
+	adc	\$0, %r8
+
+	mulx	48($inp), %rax, %rbx
+	add	$out, %r13
+	adc	%r8, %r14
+	adc	\$0, %rbx
+
+	mulx	56($inp), $out, %r8
+	add	%rax, %r14
+	adc	%rbx, %r15
+	mov	%r11, %rbx
+	adc	\$0, %r8
+	add	$out, %r15
+	adc	\$0, %r8
+
+	shld	\$1, %r10, %r11
+	shld	\$1, %rcx, %r10
+
+	mulx	%rdx, %rax, %rcx
+	add	%rax, %r9
+	adc	%rcx, %r10
+	adc	\$0, %r11
+
+	mov	%r9, 16(%rsp)
+	mov	%r10, 24(%rsp)
+	
+#third iteration	
+	mov	16($inp), %rdx
+	mulx	24($inp), $out, %r9
+
+	mulx	32($inp), %rax, %rcx
+	add	$out, %r12
+	adc	%r9, %r13
+	adc	\$0, %rcx
+
+	mulx	40($inp), $out, %r9
+	add	%rax, %r13
+	adc	%rcx, %r14
+	adc	\$0, %r9
+
+	mulx	48($inp), %rax, %rcx
+	add	$out, %r14
+	adc	%r9, %r15
+	adc	\$0, %rcx
+
+	mulx	56($inp), $out, %r9
+	add	%rax, %r15
+	adc	%rcx, %r8
+	mov	%r13, %rcx
+	adc	\$0, %r9
+	add	$out, %r8
+	adc	\$0, %r9
+
+	shld	\$1, %r12, %r13
+	shld	\$1, %rbx, %r12
+
+	mulx	%rdx, %rax, %rdx
+	add	%rax, %r11
+	adc	%rdx, %r12
+	adc	\$0, %r13
+
+	mov	%r11, 32(%rsp)
+	mov	%r12, 40(%rsp)
+	
+#fourth iteration	
+	mov	24($inp), %rdx
+	mulx	32($inp), %rax, %rbx
+
+	mulx	40($inp), $out, %r10
+	add	%rax, %r14
+	adc	%rbx, %r15
+	adc	\$0, %r10
+
+	mulx	48($inp), %rax, %rbx
+	add	$out, %r15
+	adc	%r10, %r8
+	adc	\$0, %rbx
+
+	mulx	56($inp), $out, %r10
+	add	%rax, %r8
+	adc	\$0, %rbx
+	add	$out, %r9
+	adc	\$0, %r10
+	add	%rbx, %r9
+	mov	%r15, %rbx
+	adc	\$0, %r10
+
+	shld	\$1, %r14, %r15
+	shld	\$1, %rcx, %r14
+
+	mulx	%rdx, %rax, %rdx
+	add	%rax, %r13
+	adc	%rdx, %r14
+	adc	\$0, %r15
+
+	mov	%r13, 48(%rsp)
+	mov	%r14, 56(%rsp)
+	
+#fifth iteration	
+	mov	32($inp), %rdx
+	mulx	40($inp), $out, %r11
+
+	mulx	48($inp), %rax, %rcx
+	add	$out, %r8
+	adc	%r11, %r9
+	adc	\$0, %rcx
+
+	mulx	56($inp), $out, %r11
+	add	%rax, %r9
+	adc	%rcx, %r10
+	adc	\$0, %r11
+	add	$out, %r10
+	adc	\$0, %r11
+
+	mov	%r9, %rcx
+	shld	\$1, %r8, %r9
+	shld	\$1, %rbx, %r8
+
+	mulx	%rdx, %rax, %rdx
+	add	%rax, %r15
+	adc	%rdx, %r8
+	adc	\$0, %r9
+
+	mov	%r15, 64(%rsp)
+	mov	%r8, 72(%rsp)
+	
+#sixth iteration	
+	mov	40($inp), %rdx
+	mulx	48($inp), %rax, %rbx
+
+	mulx	56($inp), $out, %r12
+	add	%rax, %r10
+	adc	%rbx, %r11
+	adc	\$0, %r12
+	add	$out, %r11
+	adc	\$0, %r12
+
+	mov	%r11, %rbx
+	shld	\$1, %r10, %r11
+	shld	\$1, %rcx, %r10
+
+	mulx	%rdx, %rax, %rdx
+	add	%rax, %r9
+	adc	%rdx, %r10
+	adc	\$0, %r11
+
+	mov	%r9, 80(%rsp)
+	mov	%r10, 88(%rsp)
+
+#seventh iteration
+	mov	48($inp), %rdx
+	mulx	56($inp), %rax, %r13
+	add	%rax, %r12
+	adc	\$0, %r13
+
+	xor	%r14, %r14
+	shld	\$1, %r13, %r14
+	shld	\$1, %r12, %r13
+	shld	\$1, %rbx, %r12
+
+	mulx	%rdx, %rax, %rdx
+	add	%rax, %r11
+	adc	%rdx, %r12
+	adc	\$0, %r13
+
+	mov	%r11, 96(%rsp)
+	mov	%r12, 104(%rsp)
+
+#eighth iteration
+	mov	56($inp), %rdx
+	mulx	%rdx, %rax, %rdx
+	add	%rax, %r13
+	adc	\$0, %rdx
+	
+	add	%rdx, %r14
+
+	movq	%r13, 112(%rsp)
+	movq	%r14, 120(%rsp)
+	movq	%xmm0, $out
+___
+}
+$code.=<<___;
+	movq	(%rsp), %r8
+	movq	8(%rsp), %r9
+	movq	16(%rsp), %r10
+	movq	24(%rsp), %r11
+	movq	32(%rsp), %r12
+	movq	40(%rsp), %r13
+	movq	48(%rsp), %r14
+	movq	56(%rsp), %r15
+
+	call	_rsaz_512_reduce
+
+	addq	64(%rsp), %r8
+	adcq	72(%rsp), %r9
+	adcq	80(%rsp), %r10
+	adcq	88(%rsp), %r11
+	adcq	96(%rsp), %r12
+	adcq	104(%rsp), %r13
+	adcq	112(%rsp), %r14
+	adcq	120(%rsp), %r15
+	sbbq	%rcx, %rcx
+
+	call	_rsaz_512_subtract
+
+	movq	%r8, %rdx
+	movq	%r9, %rax
+	movl	128+8(%rsp), $times
+	movq	$out, $inp
+
+	decl	$times
+	jnz	.Loop_sqr
+
+	leaq	128+24+48(%rsp), %rax
+	movq	-48(%rax), %r15
+	movq	-40(%rax), %r14
+	movq	-32(%rax), %r13
+	movq	-24(%rax), %r12
+	movq	-16(%rax), %rbp
+	movq	-8(%rax), %rbx
+	leaq	(%rax), %rsp
+.Lsqr_epilogue:
+	ret
+.size	rsaz_512_sqr,.-rsaz_512_sqr
+___
+}
+{
+my ($out,$ap,$bp,$mod,$n0) = ("%rdi","%rsi","%rdx","%rcx","%r8");
+$code.=<<___;
+.global	rsaz_512_mul
+.type	rsaz_512_mul,\@function,5
+.align	32
+rsaz_512_mul:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	subq	\$128+24, %rsp
+.Lmul_body:
+	movq	$out, %xmm0		# off-load arguments
+	movq	$mod, %xmm1
+	movq	$n0, 128(%rsp)
+
+	movq	$bp, %rbp		# pass argument
+	call	_rsaz_512_mul
+
+	movq	%xmm0, $out
+	movq	%xmm1, %rbp
+
+	movq	(%rsp), %r8
+	movq	8(%rsp), %r9
+	movq	16(%rsp), %r10
+	movq	24(%rsp), %r11
+	movq	32(%rsp), %r12
+	movq	40(%rsp), %r13
+	movq	48(%rsp), %r14
+	movq	56(%rsp), %r15
+
+	call	_rsaz_512_reduce
+
+	addq	64(%rsp), %r8
+	adcq	72(%rsp), %r9
+	adcq	80(%rsp), %r10
+	adcq	88(%rsp), %r11
+	adcq	96(%rsp), %r12
+	adcq	104(%rsp), %r13
+	adcq	112(%rsp), %r14
+	adcq	120(%rsp), %r15
+	sbbq	%rcx, %rcx
+
+	call	_rsaz_512_subtract
+
+	leaq	128+24+48(%rsp), %rax
+	movq	-48(%rax), %r15
+	movq	-40(%rax), %r14
+	movq	-32(%rax), %r13
+	movq	-24(%rax), %r12
+	movq	-16(%rax), %rbp
+	movq	-8(%rax), %rbx
+	leaq	(%rax), %rsp
+.Lmul_epilogue:
+	ret
+.size	rsaz_512_mul,.-rsaz_512_mul
+___
+}
+{
+my ($out,$ap,$bp,$mod,$n0,$pwr) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
+$code.=<<___;
+.global	rsaz_512_mul_gather4
+.type	rsaz_512_mul_gather4,\@function,6
+.align	32
+rsaz_512_mul_gather4:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	subq	\$128+24, %rsp
+.Lmul_gather4_body:
+	movl	64($bp,$pwr,4), %eax
+	movq	$out, %xmm0		# off-load arguments
+	movl	($bp,$pwr,4), %ebx
+	movq	$mod, %xmm1
+	movq	$n0, 128(%rsp)
+
+	shlq	\$32, %rax
+	or	%rax, %rbx
+	movq	($ap), %rax
+	 movq	8($ap), %rcx
+	 leaq	128($bp,$pwr,4), %rbp
+	mulq	%rbx			# 0 iteration
+	movq	%rax, (%rsp)
+	movq	%rcx, %rax
+	movq	%rdx, %r8
+
+	mulq	%rbx
+	 movd	(%rbp), %xmm4
+	addq	%rax, %r8
+	movq	16($ap), %rax
+	movq	%rdx, %r9
+	adcq	\$0, %r9
+
+	mulq	%rbx
+	 movd	64(%rbp), %xmm5
+	addq	%rax, %r9
+	movq	24($ap), %rax
+	movq	%rdx, %r10
+	adcq	\$0, %r10
+
+	mulq	%rbx
+	 pslldq	\$4, %xmm5
+	addq	%rax, %r10
+	movq	32($ap), %rax
+	movq	%rdx, %r11
+	adcq	\$0, %r11
+
+	mulq	%rbx
+	 por	%xmm5, %xmm4
+	addq	%rax, %r11
+	movq	40($ap), %rax
+	movq	%rdx, %r12
+	adcq	\$0, %r12
+
+	mulq	%rbx
+	addq	%rax, %r12
+	movq	48($ap), %rax
+	movq	%rdx, %r13
+	adcq	\$0, %r13
+
+	mulq	%rbx
+	 leaq	128(%rbp), %rbp
+	addq	%rax, %r13
+	movq	56($ap), %rax
+	movq	%rdx, %r14
+	adcq	\$0, %r14
+	
+	mulq	%rbx
+	 movq	%xmm4, %rbx
+	addq	%rax, %r14
+	 movq	($ap), %rax
+	movq	%rdx, %r15
+	adcq	\$0, %r15
+
+	leaq	8(%rsp), %rdi
+	movl	\$7, %ecx
+	jmp	.Loop_mul_gather
+
+.align	32
+.Loop_mul_gather:
+	mulq	%rbx
+	addq	%rax, %r8
+	movq	8($ap), %rax
+	movq	%r8, (%rdi)
+	movq	%rdx, %r8
+	adcq	\$0, %r8
+
+	mulq	%rbx
+	 movd	(%rbp), %xmm4
+	addq	%rax, %r9
+	movq	16($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r9, %r8
+	movq	%rdx, %r9
+	adcq	\$0, %r9
+
+	mulq	%rbx
+	 movd	64(%rbp), %xmm5
+	addq	%rax, %r10
+	movq	24($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r10, %r9
+	movq	%rdx, %r10
+	adcq	\$0, %r10
+
+	mulq	%rbx
+	 pslldq	\$4, %xmm5
+	addq	%rax, %r11
+	movq	32($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r11, %r10
+	movq	%rdx, %r11
+	adcq	\$0, %r11
+
+	mulq	%rbx
+	 por	%xmm5, %xmm4
+	addq	%rax, %r12
+	movq	40($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r12, %r11
+	movq	%rdx, %r12
+	adcq	\$0, %r12
+
+	mulq	%rbx
+	addq	%rax, %r13
+	movq	48($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r13, %r12
+	movq	%rdx, %r13
+	adcq	\$0, %r13
+
+	mulq	%rbx
+	addq	%rax, %r14
+	movq	56($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r14, %r13
+	movq	%rdx, %r14
+	adcq	\$0, %r14
+
+	mulq	%rbx
+	 movq	%xmm4, %rbx
+	addq	%rax, %r15
+	 movq	($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r15, %r14
+	movq	%rdx, %r15	
+	adcq	\$0, %r15
+
+	leaq	128(%rbp), %rbp
+	leaq	8(%rdi), %rdi
+
+	decl	%ecx
+	jnz	.Loop_mul_gather
+
+	movq	%r8, (%rdi)
+	movq	%r9, 8(%rdi)
+	movq	%r10, 16(%rdi)
+	movq	%r11, 24(%rdi)
+	movq	%r12, 32(%rdi)
+	movq	%r13, 40(%rdi)
+	movq	%r14, 48(%rdi)
+	movq	%r15, 56(%rdi)
+
+	movq	%xmm0, $out
+	movq	%xmm1, %rbp
+
+	movq	(%rsp), %r8
+	movq	8(%rsp), %r9
+	movq	16(%rsp), %r10
+	movq	24(%rsp), %r11
+	movq	32(%rsp), %r12
+	movq	40(%rsp), %r13
+	movq	48(%rsp), %r14
+	movq	56(%rsp), %r15
+
+	call	_rsaz_512_reduce
+
+	addq	64(%rsp), %r8
+	adcq	72(%rsp), %r9
+	adcq	80(%rsp), %r10
+	adcq	88(%rsp), %r11
+	adcq	96(%rsp), %r12
+	adcq	104(%rsp), %r13
+	adcq	112(%rsp), %r14
+	adcq	120(%rsp), %r15
+	sbbq	%rcx, %rcx
+
+	call	_rsaz_512_subtract
+
+	leaq	128+24+48(%rsp), %rax
+	movq	-48(%rax), %r15
+	movq	-40(%rax), %r14
+	movq	-32(%rax), %r13
+	movq	-24(%rax), %r12
+	movq	-16(%rax), %rbp
+	movq	-8(%rax), %rbx
+	leaq	(%rax), %rsp
+.Lmul_gather4_epilogue:
+	ret
+.size	rsaz_512_mul_gather4,.-rsaz_512_mul_gather4
+___
+}
+{
+my ($out,$ap,$mod,$n0,$tbl,$pwr) = ("%rdi","%rsi","%rdx","%rcx","%r8","%r9d");
+$code.=<<___;
+.global	rsaz_512_mul_scatter4
+.type	rsaz_512_mul_scatter4,\@function,6
+.align	32
+rsaz_512_mul_scatter4:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	subq	\$128+24, %rsp
+.Lmul_scatter4_body:
+	leaq	($tbl,$pwr,4), $tbl
+	movq	$out, %xmm0		# off-load arguments
+	movq	$mod, %xmm1
+	movq	$tbl, %xmm2
+	movq	$n0, 128(%rsp)
+
+	movq	$out, %rbp
+	call	_rsaz_512_mul
+
+	movq	%xmm0, $out
+	movq	%xmm1, %rbp
+
+	movq	(%rsp), %r8
+	movq	8(%rsp), %r9
+	movq	16(%rsp), %r10
+	movq	24(%rsp), %r11
+	movq	32(%rsp), %r12
+	movq	40(%rsp), %r13
+	movq	48(%rsp), %r14
+	movq	56(%rsp), %r15
+
+	call	_rsaz_512_reduce
+
+	addq	64(%rsp), %r8
+	adcq	72(%rsp), %r9
+	adcq	80(%rsp), %r10
+	adcq	88(%rsp), %r11
+	adcq	96(%rsp), %r12
+	adcq	104(%rsp), %r13
+	adcq	112(%rsp), %r14
+	adcq	120(%rsp), %r15
+	movq	%xmm2, $inp
+	sbbq	%rcx, %rcx
+
+	call	_rsaz_512_subtract
+
+	movl	%r8d, 64*0($inp)	# scatter
+	shrq	\$32, %r8
+	movl	%r9d, 64*2($inp)
+	shrq	\$32, %r9
+	movl	%r10d, 64*4($inp)
+	shrq	\$32, %r10
+	movl	%r11d, 64*6($inp)
+	shrq	\$32, %r11
+	movl	%r12d, 64*8($inp)
+	shrq	\$32, %r12
+	movl	%r13d, 64*10($inp)
+	shrq	\$32, %r13
+	movl	%r14d, 64*12($inp)
+	shrq	\$32, %r14
+	movl	%r15d, 64*14($inp)
+	shrq	\$32, %r15
+	movl	%r8d, 64*1($inp)
+	movl	%r9d, 64*3($inp)
+	movl	%r10d, 64*5($inp)
+	movl	%r11d, 64*7($inp)
+	movl	%r12d, 64*9($inp)
+	movl	%r13d, 64*11($inp)
+	movl	%r14d, 64*13($inp)
+	movl	%r15d, 64*15($inp)
+
+	leaq	128+24+48(%rsp), %rax
+	movq	-48(%rax), %r15
+	movq	-40(%rax), %r14
+	movq	-32(%rax), %r13
+	movq	-24(%rax), %r12
+	movq	-16(%rax), %rbp
+	movq	-8(%rax), %rbx
+	leaq	(%rax), %rsp
+.Lmul_scatter4_epilogue:
+	ret
+.size	rsaz_512_mul_scatter4,.-rsaz_512_mul_scatter4
+___
+}
+{
+my ($out,$inp,$mod,$n0) = ("%rdi","%rsi","%rdx","%rcx");
+$code.=<<___;
+.globl	rsaz_512_mul_by_one
+.type	rsaz_512_mul_by_one,\@function,4
+.align	32
+rsaz_512_mul_by_one:
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+
+	subq	\$128+24, %rsp
+.Lmul_by_one_body:
+	movq	$mod, %rbp	# reassign argument
+	movq	$n0, 128(%rsp)
+
+	movq	($inp), %r8
+	pxor	%xmm0, %xmm0
+	movq	8($inp), %r9
+	movq	16($inp), %r10
+	movq	24($inp), %r11
+	movq	32($inp), %r12
+	movq	40($inp), %r13
+	movq	48($inp), %r14
+	movq	56($inp), %r15
+
+	movdqa	%xmm0, (%rsp)
+	movdqa	%xmm0, 16(%rsp)
+	movdqa	%xmm0, 32(%rsp)
+	movdqa	%xmm0, 48(%rsp)
+	movdqa	%xmm0, 64(%rsp)
+	movdqa	%xmm0, 80(%rsp)
+	movdqa	%xmm0, 96(%rsp)
+
+	call	_rsaz_512_reduce
+
+	movq	%r8, ($out)
+	movq	%r9, 8($out)
+	movq	%r10, 16($out)
+	movq	%r11, 24($out)
+	movq	%r12, 32($out)
+	movq	%r13, 40($out)
+	movq	%r14, 48($out)
+	movq	%r15, 56($out)
+
+	leaq	128+24+48(%rsp), %rax
+	movq	-48(%rax), %r15
+	movq	-40(%rax), %r14
+	movq	-32(%rax), %r13
+	movq	-24(%rax), %r12
+	movq	-16(%rax), %rbp
+	movq	-8(%rax), %rbx
+	leaq	(%rax), %rsp
+.Lmul_by_one_epilogue:
+	ret
+.size	rsaz_512_mul_by_one,.-rsaz_512_mul_by_one
+___
+}
+{	# _rsaz_512_reduce
+	#
+	# input:	%r8-%r15, %rbp - mod, 128(%rsp) - n0
+	# output:	%r8-%r15
+	# clobbers:	everything except %rbp and %rdi
+$code.=<<___;
+.type	_rsaz_512_reduce,\@abi-omnipotent
+.align	32
+_rsaz_512_reduce:
+___
+if (1) {
+$code.=<<___;
+	movq	%r8, %rbx
+	imulq	128+8(%rsp), %rbx
+	movq	0(%rbp), %rax
+	movl	\$8, %ecx
+	jmp	.Lreduction_loop
+
+.align	32
+.Lreduction_loop:
+	mulq	%rbx
+	movq	8(%rbp), %rax
+	negq	%r8
+	movq	%rdx, %r8
+	adcq	\$0, %r8
+
+	mulq	%rbx
+	addq	%rax, %r9
+	movq	16(%rbp), %rax
+	adcq	\$0, %rdx
+	addq	%r9, %r8
+	movq	%rdx, %r9
+	adcq	\$0, %r9
+
+	mulq	%rbx
+	addq	%rax, %r10
+	movq	24(%rbp), %rax
+	adcq	\$0, %rdx
+	addq	%r10, %r9
+	movq	%rdx, %r10
+	adcq	\$0, %r10
+
+	mulq	%rbx
+	addq	%rax, %r11
+	movq	32(%rbp), %rax
+	adcq	\$0, %rdx
+	addq	%r11, %r10
+	 movq	128+8(%rsp), %rsi
+	movq	%rdx, %r11
+	adcq	\$0, %r11
+
+	mulq	%rbx
+	addq	%rax, %r12
+	movq	40(%rbp), %rax
+	adcq	\$0, %rdx
+	 imulq	%r8, %rsi
+	addq	%r12, %r11
+	movq	%rdx, %r12
+	adcq	\$0, %r12
+
+	mulq	%rbx
+	addq	%rax, %r13
+	movq	48(%rbp), %rax
+	adcq	\$0, %rdx
+	addq	%r13, %r12
+	movq	%rdx, %r13
+	adcq	\$0, %r13
+
+	mulq	%rbx
+	addq	%rax, %r14
+	movq	56(%rbp), %rax
+	adcq	\$0, %rdx
+	addq	%r14, %r13
+	movq	%rdx, %r14
+	adcq	\$0, %r14
+
+	mulq	%rbx
+	 movq	%rsi, %rbx
+	addq	%rax, %r15
+	 movq	0(%rbp), %rax
+	adcq	\$0, %rdx
+	addq	%r15, %r14
+	movq	%rdx, %r15
+	adcq	\$0, %r15
+
+	decl	%ecx
+	jne	.Lreduction_loop
+___
+} else {
+$code.=<<___;
+	movq	128+8(%rsp), %rdx		# pull $n0
+	imulq	%r8, %rdx
+	movl	\$8, %ecx
+	jmp	.Lreduction_loop
+
+.align	32
+.Lreduction_loop:
+	neg	%r8
+	mulx	0(%rbp), %rax, %r8
+	adc	%r9, %r8
+
+	mulx	8(%rbp), %rax, %r9
+	adc	\$0, %r9
+	add	%rax, %r8
+	adc	%r10, %r9
+
+	mulx	16(%rbp), %rax, %r10
+	adc	\$0, %r10
+	 mov	128+8(%rsp), %rbx		# pull $n0
+	 imul	%r8, %rbx
+	add	%rax, %r9
+	adc	%r11, %r10
+
+	mulx	24(%rbp), %rax, %r11
+	adc	\$0, %r11
+	add	%rax, %r10
+	adc	%r12, %r11
+
+	mulx	32(%rbp), %rax, %r12
+	adc	\$0, %r12
+	add	%rax, %r11
+	adc	%r13, %r12
+
+	mulx	40(%rbp), %rax, %r13
+	adc	\$0, %r13
+	add	%rax, %r12
+	adc	%r14, %r13
+
+	mulx	48(%rbp), %rax, %r14
+	adc	\$0, %r14
+	add	%rax, %r13
+	adc	%r15, %r14
+
+	mulx	56(%rbp), %rax, %r15
+	 mov	%rbx, %rdx
+	adc	\$0, %r15
+	add	%rax, %r14
+	adc	\$0, %r15
+
+	dec	%ecx
+	jne	.Lreduction_loop
+___
+}
+$code.=<<___;
+	ret
+.size	_rsaz_512_reduce,.-_rsaz_512_reduce
+___
+}
+{	# _rsaz_512_subtract
+	# input: %r8-%r15, %rdi - $out, %rbp - $mod, %rcx - mask
+	# output:
+	# clobbers: everything but %rdi, %rsi and %rbp
+$code.=<<___;
+.type	_rsaz_512_subtract,\@abi-omnipotent
+.align	32
+_rsaz_512_subtract:
+	movq	%r8, ($out)
+	movq	%r9, 8($out)
+	movq	%r10, 16($out)
+	movq	%r11, 24($out)
+	movq	%r12, 32($out)
+	movq	%r13, 40($out)
+	movq	%r14, 48($out)
+	movq	%r15, 56($out)
+
+	movq	0($mod), %r8
+	movq	8($mod), %r9
+	negq	%r8
+	notq	%r9
+	andq	%rcx, %r8
+	movq	16($mod), %r10
+	andq	%rcx, %r9
+	notq	%r10
+	movq	24($mod), %r11
+	andq	%rcx, %r10
+	notq	%r11
+	movq	32($mod), %r12
+	andq	%rcx, %r11
+	notq	%r12
+	movq	40($mod), %r13
+	andq	%rcx, %r12
+	notq	%r13
+	movq	48($mod), %r14
+	andq	%rcx, %r13
+	notq	%r14
+	movq	56($mod), %r15
+	andq	%rcx, %r14
+	notq	%r15
+	andq	%rcx, %r15
+
+	addq	($out), %r8
+	adcq	8($out), %r9
+	adcq	16($out), %r10
+	adcq	24($out), %r11
+	adcq	32($out), %r12
+	adcq	40($out), %r13
+	adcq	48($out), %r14
+	adcq	56($out), %r15
+
+	movq	%r8, ($out)
+	movq	%r9, 8($out)
+	movq	%r10, 16($out)
+	movq	%r11, 24($out)
+	movq	%r12, 32($out)
+	movq	%r13, 40($out)
+	movq	%r14, 48($out)
+	movq	%r15, 56($out)
+
+	ret
+.size	_rsaz_512_subtract,.-_rsaz_512_subtract
+___
+}
+{	# _rsaz_512_mul
+	#
+	# input: %rsi - ap, %rbp - bp
+	# ouput:
+	# clobbers: everything
+my ($ap,$bp) = ("%rsi","%rbp");
+$code.=<<___;
+.type	_rsaz_512_mul,\@abi-omnipotent
+.align	32
+_rsaz_512_mul:
+	leaq	8(%rsp), %rdi
+
+	movq	($bp), %rbx
+	movq	($ap), %rax
+	mulq	%rbx
+	movq	%rax, (%rdi)
+	movq	8($ap), %rax
+	movq	%rdx, %r8
+
+	mulq	%rbx
+	addq	%rax, %r8
+	movq	16($ap), %rax
+	movq	%rdx, %r9
+	adcq	\$0, %r9
+
+	mulq	%rbx
+	addq	%rax, %r9
+	movq	24($ap), %rax
+	movq	%rdx, %r10
+	adcq	\$0, %r10
+
+	mulq	%rbx
+	addq	%rax, %r10
+	movq	32($ap), %rax
+	movq	%rdx, %r11
+	adcq	\$0, %r11
+
+	mulq	%rbx
+	addq	%rax, %r11
+	movq	40($ap), %rax
+	movq	%rdx, %r12
+	adcq	\$0, %r12
+
+	mulq	%rbx
+	addq	%rax, %r12
+	movq	48($ap), %rax
+	movq	%rdx, %r13
+	adcq	\$0, %r13
+
+	mulq	%rbx
+	addq	%rax, %r13
+	movq	56($ap), %rax
+	movq	%rdx, %r14
+	adcq	\$0, %r14
+	
+	mulq	%rbx
+	addq	%rax, %r14
+	 movq	($ap), %rax
+	movq	%rdx, %r15
+	adcq	\$0, %r15
+
+	leaq	8($bp), $bp
+	leaq	8(%rdi), %rdi
+
+	movl	\$7, %ecx
+	jmp	.Loop_mul
+
+.align	32
+.Loop_mul:
+	movq	($bp), %rbx
+	mulq	%rbx
+	addq	%rax, %r8
+	movq	8($ap), %rax
+	movq	%r8, (%rdi)
+	movq	%rdx, %r8
+	adcq	\$0, %r8
+
+	mulq	%rbx
+	addq	%rax, %r9
+	movq	16($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r9, %r8
+	movq	%rdx, %r9
+	adcq	\$0, %r9
+
+	mulq	%rbx
+	addq	%rax, %r10
+	movq	24($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r10, %r9
+	movq	%rdx, %r10
+	adcq	\$0, %r10
+
+	mulq	%rbx
+	addq	%rax, %r11
+	movq	32($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r11, %r10
+	movq	%rdx, %r11
+	adcq	\$0, %r11
+
+	mulq	%rbx
+	addq	%rax, %r12
+	movq	40($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r12, %r11
+	movq	%rdx, %r12
+	adcq	\$0, %r12
+
+	mulq	%rbx
+	addq	%rax, %r13
+	movq	48($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r13, %r12
+	movq	%rdx, %r13
+	adcq	\$0, %r13
+
+	mulq	%rbx
+	addq	%rax, %r14
+	movq	56($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r14, %r13
+	movq	%rdx, %r14
+	 leaq	8($bp), $bp
+	adcq	\$0, %r14
+
+	mulq	%rbx
+	addq	%rax, %r15
+	 movq	($ap), %rax
+	adcq	\$0, %rdx
+	addq	%r15, %r14
+	movq	%rdx, %r15	
+	adcq	\$0, %r15
+
+	leaq	8(%rdi), %rdi
+
+	decl	%ecx
+	jnz	.Loop_mul
+
+	movq	%r8, (%rdi)
+	movq	%r9, 8(%rdi)
+	movq	%r10, 16(%rdi)
+	movq	%r11, 24(%rdi)
+	movq	%r12, 32(%rdi)
+	movq	%r13, 40(%rdi)
+	movq	%r14, 48(%rdi)
+	movq	%r15, 56(%rdi)
+
+	ret
+.size	_rsaz_512_mul,.-_rsaz_512_mul
+___
+}
+{
+my ($out,$inp,$power)= $win64 ? ("%rcx","%rdx","%r8d") : ("%rdi","%rsi","%edx");
+$code.=<<___;
+.globl	rsaz_512_scatter4
+.type	rsaz_512_scatter4,\@abi-omnipotent
+.align	16
+rsaz_512_scatter4:
+	leaq	($out,$power,4), $out
+	movl	\$8, %r9d
+	jmp	.Loop_scatter
+.align	16
+.Loop_scatter:
+	movq	($inp), %rax
+	leaq	8($inp), $inp
+	movl	%eax, ($out)
+	shrq	\$32, %rax
+	movl	%eax, 64($out)
+	leaq	128($out), $out
+	decl	%r9d
+	jnz	.Loop_scatter
+	ret
+.size	rsaz_512_scatter4,.-rsaz_512_scatter4
+
+.globl	rsaz_512_gather4
+.type	rsaz_512_gather4,\@abi-omnipotent
+.align	16
+rsaz_512_gather4:
+	leaq	($inp,$power,4), $inp
+	movl	\$8, %r9d
+	jmp	.Loop_gather
+.align	16
+.Loop_gather:
+	movl	($inp), %eax
+	movl	64($inp), %r8d
+	leaq	128($inp), $inp
+	shlq	\$32, %r8
+	or	%r8, %rax
+	movq	%rax, ($out)
+	leaq	8($out), $out
+	decl	%r9d
+	jnz	.Loop_gather
+	ret
+.size	rsaz_512_gather4,.-rsaz_512_gather4
+___
+}
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern	__imp_RtlVirtualUnwind
+.type	se_handler,\@abi-omnipotent
+.align	16
+se_handler:
+	push	%rsi
+	push	%rdi
+	push	%rbx
+	push	%rbp
+	push	%r12
+	push	%r13
+	push	%r14
+	push	%r15
+	pushfq
+	sub	\$64,%rsp
+
+	mov	120($context),%rax	# pull context->Rax
+	mov	248($context),%rbx	# pull context->Rip
+
+	mov	8($disp),%rsi		# disp->ImageBase
+	mov	56($disp),%r11		# disp->HandlerData
+
+	mov	0(%r11),%r10d		# HandlerData[0]
+	lea	(%rsi,%r10),%r10	# end of prologue label
+	cmp	%r10,%rbx		# context->Rip<end of prologue label
+	jb	.Lcommon_seh_tail
+
+	mov	152($context),%rax	# pull context->Rsp
+
+	mov	4(%r11),%r10d		# HandlerData[1]
+	lea	(%rsi,%r10),%r10	# epilogue label
+	cmp	%r10,%rbx		# context->Rip>=epilogue label
+	jae	.Lcommon_seh_tail
+
+	lea	128+24+48(%rax),%rax
+
+	mov	-8(%rax),%rbx
+	mov	-16(%rax),%rbp
+	mov	-24(%rax),%r12
+	mov	-32(%rax),%r13
+	mov	-40(%rax),%r14
+	mov	-48(%rax),%r15
+	mov	%rbx,144($context)	# restore context->Rbx
+	mov	%rbp,160($context)	# restore context->Rbp
+	mov	%r12,216($context)	# restore context->R12
+	mov	%r13,224($context)	# restore context->R13
+	mov	%r14,232($context)	# restore context->R14
+	mov	%r15,240($context)	# restore context->R15
+
+.Lcommon_seh_tail:
+	mov	8(%rax),%rdi
+	mov	16(%rax),%rsi
+	mov	%rax,152($context)	# restore context->Rsp
+	mov	%rsi,168($context)	# restore context->Rsi
+	mov	%rdi,176($context)	# restore context->Rdi
+
+	mov	40($disp),%rdi		# disp->ContextRecord
+	mov	$context,%rsi		# context
+	mov	\$154,%ecx		# sizeof(CONTEXT)
+	.long	0xa548f3fc		# cld; rep movsq
+
+	mov	$disp,%rsi
+	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
+	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
+	mov	0(%rsi),%r8		# arg3, disp->ControlPc
+	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
+	mov	40(%rsi),%r10		# disp->ContextRecord
+	lea	56(%rsi),%r11		# &disp->HandlerData
+	lea	24(%rsi),%r12		# &disp->EstablisherFrame
+	mov	%r10,32(%rsp)		# arg5
+	mov	%r11,40(%rsp)		# arg6
+	mov	%r12,48(%rsp)		# arg7
+	mov	%rcx,56(%rsp)		# arg8, (NULL)
+	call	*__imp_RtlVirtualUnwind(%rip)
+
+	mov	\$1,%eax		# ExceptionContinueSearch
+	add	\$64,%rsp
+	popfq
+	pop	%r15
+	pop	%r14
+	pop	%r13
+	pop	%r12
+	pop	%rbp
+	pop	%rbx
+	pop	%rdi
+	pop	%rsi
+	ret
+.size	sqr_handler,.-sqr_handler
+
+.section	.pdata
+.align	4
+	.rva	.LSEH_begin_rsaz_512_sqr
+	.rva	.LSEH_end_rsaz_512_sqr
+	.rva	.LSEH_info_rsaz_512_sqr
+
+	.rva	.LSEH_begin_rsaz_512_mul
+	.rva	.LSEH_end_rsaz_512_mul
+	.rva	.LSEH_info_rsaz_512_mul
+
+	.rva	.LSEH_begin_rsaz_512_mul_gather4
+	.rva	.LSEH_end_rsaz_512_mul_gather4
+	.rva	.LSEH_info_rsaz_512_mul_gather4
+
+	.rva	.LSEH_begin_rsaz_512_mul_scatter4
+	.rva	.LSEH_end_rsaz_512_mul_scatter4
+	.rva	.LSEH_info_rsaz_512_mul_scatter4
+
+	.rva	.LSEH_begin_rsaz_512_mul_by_one
+	.rva	.LSEH_end_rsaz_512_mul_by_one
+	.rva	.LSEH_info_rsaz_512_mul_by_one
+
+.section	.xdata
+.align	8
+.LSEH_info_rsaz_512_sqr:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lsqr_body,.Lsqr_epilogue			# HandlerData[]
+.LSEH_info_rsaz_512_mul:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lmul_body,.Lmul_epilogue			# HandlerData[]
+.LSEH_info_rsaz_512_mul_gather4:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lmul_gather4_body,.Lmul_gather4_epilogue	# HandlerData[]
+.LSEH_info_rsaz_512_mul_scatter4:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lmul_scatter4_body,.Lmul_scatter4_epilogue	# HandlerData[]
+.LSEH_info_rsaz_512_mul_by_one:
+	.byte	9,0,0,0
+	.rva	se_handler
+	.rva	.Lmul_by_one_body,.Lmul_by_one_epilogue		# HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;