3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that
14 # it uses 256 bytes per-key table [+128 bytes shared table]. GHASH
15 # function features so called "528B" variant utilizing additional
16 # 256+16 bytes of per-key storage [+512 bytes shared table].
17 # Performance results are for this streamed GHASH subroutine and are
18 # expressed in cycles per processed byte, less is better:
20 # gcc 3.4.x(*) assembler
23 # Opteron 19.3 7.7 +150%
24 # Core2 17.8 8.1(**) +120%
26 # VIA Nano 21.8 10.1 +115%
28 # (*) comparison is not completely fair, because C results are
29 # for vanilla "256B" implementation, while assembler results
31 # (**) it's mystery [to me] why Core2 result is not same as for
36 # Add PCLMULQDQ version performing at 2.02 cycles per processed byte.
37 # See ghash-x86.pl for background information and details about coding
40 # Special thanks to David Woodhouse <dwmw2@infradead.org> for
41 # providing access to a Westmere-based system on behalf of Intel
42 # Open Source Technology Centre.
46 # Overhaul: aggregate Karatsuba post-processing, improve ILP in
47 # reduction_alg9, increase reduction aggregate factor to 4x. As for
48 # the latter. ghash-x86.pl discusses that it makes lesser sense to
49 # increase aggregate factor. Then why increase here? Critical path
50 # consists of 3 independent pclmulqdq instructions, Karatsuba post-
51 # processing and reduction. "On top" of this we lay down aggregated
52 # multiplication operations, triplets of independent pclmulqdq's. As
53 # issue rate for pclmulqdq is limited, it makes lesser sense to
54 # aggregate more multiplications than it takes to perform remaining
55 # non-multiplication operations. 2x is near-optimal coefficient for
56 # contemporary Intel CPUs (therefore modest improvement coefficient),
57 # but not for Bulldozer. Latter is because logical SIMD operations
58 # are twice as slow in comparison to Intel, so that critical path is
59 # longer. A CPU with higher pclmulqdq issue rate would also benefit
60 # from higher aggregate factor...
63 # Sandy Bridge 1.79(+9%)
64 # Ivy Bridge 1.79(+8%)
65 # Haswell 0.55(+93%) (if system doesn't support AVX)
66 # Bulldozer 1.52(+25%)
70 # ... 8x aggregate factor AVX code path is using reduction algorithm
71 # suggested by Shay Gueron[1]. Even though contemporary AVX-capable
72 # CPUs such as Sandy and Ivy Bridge can execute it, the code performs
73 # sub-optimally in comparison to above mentioned version. But thanks
74 # to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that
75 # it performs in 0.41 cycles per byte on Haswell processor.
77 # [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest
81 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
83 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
85 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
86 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
87 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
88 die "can't locate x86_64-xlate.pl";
90 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
91 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
92 $avx = ($1>=2.19) + ($1>=2.22);
95 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
96 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
97 $avx = ($1>=2.09) + ($1>=2.10);
100 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
101 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
102 $avx = ($1>=10) + ($1>=11);
105 open OUT,"| \"$^X\" $xlate $flavour $output";
110 # common register layout
121 # per-function register layout
125 sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or
126 $r =~ s/%[er]([sd]i)/%\1l/ or
127 $r =~ s/%[er](bp)/%\1l/ or
128 $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; }
130 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
131 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
133 $arg = "\$$arg" if ($arg*1 eq $arg);
134 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
145 mov `&LB("$Zlo")`,`&LB("$nlo")`
146 mov `&LB("$Zlo")`,`&LB("$nhi")`
147 shl \$4,`&LB("$nlo")`
149 mov 8($Htbl,$nlo),$Zlo
150 mov ($Htbl,$nlo),$Zhi
151 and \$0xf0,`&LB("$nhi")`
160 mov ($inp,$cnt),`&LB("$nlo")`
162 xor 8($Htbl,$nhi),$Zlo
164 xor ($Htbl,$nhi),$Zhi
165 mov `&LB("$nlo")`,`&LB("$nhi")`
166 xor ($rem_4bit,$rem,8),$Zhi
168 shl \$4,`&LB("$nlo")`
177 xor 8($Htbl,$nlo),$Zlo
179 xor ($Htbl,$nlo),$Zhi
180 and \$0xf0,`&LB("$nhi")`
181 xor ($rem_4bit,$rem,8),$Zhi
192 xor 8($Htbl,$nlo),$Zlo
194 xor ($Htbl,$nlo),$Zhi
195 and \$0xf0,`&LB("$nhi")`
196 xor ($rem_4bit,$rem,8),$Zhi
204 xor 8($Htbl,$nhi),$Zlo
206 xor ($Htbl,$nhi),$Zhi
208 xor ($rem_4bit,$rem,8),$Zhi
217 .extern OPENSSL_ia32cap_P
219 .globl gcm_gmult_4bit
220 .type gcm_gmult_4bit,\@function,2
224 push %rbp # %rbp and %r12 are pushed exclusively in
225 push %r12 # order to reuse Win64 exception handler...
229 lea .Lrem_4bit(%rip),$rem_4bit
240 .size gcm_gmult_4bit,.-gcm_gmult_4bit
243 # per-function register layout
249 .globl gcm_ghash_4bit
250 .type gcm_ghash_4bit,\@function,4
261 mov $inp,%r14 # reassign couple of args
267 my @nhi=("%ebx","%ecx");
268 my @rem=("%r12","%r13");
271 &sub ($Htbl,-128); # size optimization
272 &lea ($Hshr4,"16+128(%rsp)");
273 { my @lo =($nlo,$nhi);
277 for ($i=0,$j=-2;$i<18;$i++,$j++) {
278 &mov ("$j(%rsp)",&LB($dat)) if ($i>1);
279 &or ($lo[0],$tmp) if ($i>1);
280 &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17);
281 &shr ($lo[1],4) if ($i>0 && $i<17);
282 &mov ($tmp,$hi[1]) if ($i>0 && $i<17);
283 &shr ($hi[1],4) if ($i>0 && $i<17);
284 &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1);
285 &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16);
286 &shl (&LB($dat),4) if ($i>0 && $i<17);
287 &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1);
288 &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16);
289 &shl ($tmp,60) if ($i>0 && $i<17);
291 push (@lo,shift(@lo));
292 push (@hi,shift(@hi));
296 &mov ($Zlo,"8($Xi)");
297 &mov ($Zhi,"0($Xi)");
298 &add ($len,$inp); # pointer to the end of data
299 &lea ($rem_8bit,".Lrem_8bit(%rip)");
300 &jmp (".Louter_loop");
302 $code.=".align 16\n.Louter_loop:\n";
303 &xor ($Zhi,"($inp)");
304 &mov ("%rdx","8($inp)");
305 &lea ($inp,"16($inp)");
308 &mov ("8($Xi)","%rdx");
313 &mov (&LB($nlo),&LB($dat));
314 &movz ($nhi[0],&LB($dat));
318 for ($j=11,$i=0;$i<15;$i++) {
320 &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0);
321 &xor ($Zhi,"($Htbl,$nlo)") if ($i>0);
322 &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0);
323 &mov ($Zhi,"($Htbl,$nlo)") if ($i==0);
325 &mov (&LB($nlo),&LB($dat));
326 &xor ($Zlo,$tmp) if ($i>0);
327 &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0);
329 &movz ($nhi[1],&LB($dat));
331 &movzb ($rem[0],"(%rsp,$nhi[0])");
333 &shr ($nhi[1],4) if ($i<14);
334 &and ($nhi[1],0xf0) if ($i==14);
335 &shl ($rem[1],48) if ($i>0);
339 &xor ($Zhi,$rem[1]) if ($i>0);
342 &movz ($rem[0],&LB($rem[0]));
343 &mov ($dat,"$j($Xi)") if (--$j%4==0);
346 &xor ($Zlo,"-128($Hshr4,$nhi[0],8)");
348 &xor ($Zhi,"($Hshr4,$nhi[0],8)");
350 unshift (@nhi,pop(@nhi)); # "rotate" registers
351 unshift (@rem,pop(@rem));
353 &movzw ($rem[1],"($rem_8bit,$rem[1],2)");
354 &xor ($Zlo,"8($Htbl,$nlo)");
355 &xor ($Zhi,"($Htbl,$nlo)");
361 &movz ($rem[0],&LB($Zlo));
365 &shl (&LB($rem[0]),4);
368 &xor ($Zlo,"8($Htbl,$nhi[0])");
369 &movzw ($rem[0],"($rem_8bit,$rem[0],2)");
372 &xor ($Zhi,"($Htbl,$nhi[0])");
381 &jb (".Louter_loop");
397 .size gcm_ghash_4bit,.-gcm_ghash_4bit
400 ######################################################################
403 @_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order
404 ("%rdi","%rsi","%rdx","%rcx"); # Unix order
406 ($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2";
407 ($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5");
409 sub clmul64x64_T2 { # minimal register pressure
410 my ($Xhi,$Xi,$Hkey,$HK)=@_;
412 if (!defined($HK)) { $HK = $T2;
415 pshufd \$0b01001110,$Xi,$T1
416 pshufd \$0b01001110,$Hkey,$T2
423 pshufd \$0b01001110,$Xi,$T1
428 pclmulqdq \$0x00,$Hkey,$Xi #######
429 pclmulqdq \$0x11,$Hkey,$Xhi #######
430 pclmulqdq \$0x00,$HK,$T1 #######
442 sub reduction_alg9 { # 17/11 times faster than Intel version
472 { my ($Htbl,$Xip)=@_4args;
476 .globl gcm_init_clmul
477 .type gcm_init_clmul,\@abi-omnipotent
482 $code.=<<___ if ($win64);
483 .LSEH_begin_gcm_init_clmul:
484 # I can't trust assembler to use specific encoding:-(
485 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp
486 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
490 pshufd \$0b01001110,$Hkey,$Hkey # dword swap
493 pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword
498 pcmpgtd $T2,$T3 # broadcast carry bit
500 por $T1,$Hkey # H<<=1
503 pand .L0x1c2_polynomial(%rip),$T3
504 pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial
507 pshufd \$0b01001110,$Hkey,$HK
511 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK);
512 &reduction_alg9 ($Xhi,$Xi);
514 pshufd \$0b01001110,$Hkey,$T1
515 pshufd \$0b01001110,$Xi,$T2
516 pxor $Hkey,$T1 # Karatsuba pre-processing
517 movdqu $Hkey,0x00($Htbl) # save H
518 pxor $Xi,$T2 # Karatsuba pre-processing
519 movdqu $Xi,0x10($Htbl) # save H^2
520 palignr \$8,$T1,$T2 # low part is H.lo^H.hi...
521 movdqu $T2,0x20($Htbl) # save Karatsuba "salt"
524 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^3
525 &reduction_alg9 ($Xhi,$Xi);
529 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^4
530 &reduction_alg9 ($Xhi,$Xi);
532 pshufd \$0b01001110,$T3,$T1
533 pshufd \$0b01001110,$Xi,$T2
534 pxor $T3,$T1 # Karatsuba pre-processing
535 movdqu $T3,0x30($Htbl) # save H^3
536 pxor $Xi,$T2 # Karatsuba pre-processing
537 movdqu $Xi,0x40($Htbl) # save H^4
538 palignr \$8,$T1,$T2 # low part is H^3.lo^H^3.hi...
539 movdqu $T2,0x50($Htbl) # save Karatsuba "salt"
542 $code.=<<___ if ($win64);
545 .LSEH_end_gcm_init_clmul:
549 .size gcm_init_clmul,.-gcm_init_clmul
553 { my ($Xip,$Htbl)=@_4args;
556 .globl gcm_gmult_clmul
557 .type gcm_gmult_clmul,\@abi-omnipotent
562 movdqa .Lbswap_mask(%rip),$T3
564 movdqu 0x20($Htbl),$T2
567 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2);
568 $code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0));
569 # experimental alternative. special thing about is that there
570 # no dependency between the two multiplications...
572 mov \$0xA040608020C0E000,%r10 # ((7..0)·0xE0)&0xff
576 movq %r11,$T3 # borrow $T3
578 pshufb $T3,$T2 # ($Xi&7)·0xE0
580 pclmulqdq \$0x00,$Xi,$T1 # ·(0xE1<<1)
583 paddd $T2,$T2 # <<(64+56+1)
585 pclmulqdq \$0x01,$T3,$Xi
586 movdqa .Lbswap_mask(%rip),$T3 # reload $T3
596 .size gcm_gmult_clmul,.-gcm_gmult_clmul
600 { my ($Xip,$Htbl,$inp,$len)=@_4args;
601 my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7));
602 my ($T1,$T2,$T3)=map("%xmm$_",(8..10));
605 .globl gcm_ghash_clmul
606 .type gcm_ghash_clmul,\@abi-omnipotent
611 $code.=<<___ if ($win64);
613 .LSEH_begin_gcm_ghash_clmul:
614 # I can't trust assembler to use specific encoding:-(
615 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
616 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax)
617 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax)
618 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax)
619 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax)
620 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax)
621 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax)
622 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax)
623 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax)
624 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax)
625 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax)
628 movdqa .Lbswap_mask(%rip),$T3
632 movdqu 0x20($Htbl),$HK
638 movdqu 0x10($Htbl),$Hkey2
641 my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15));
644 mov OPENSSL_ia32cap_P+4(%rip),%eax
648 and \$`1<<26|1<<22`,%eax # isolate MOVBE+XSAVE
649 cmp \$`1<<22`,%eax # check for MOVBE without XSAVE
653 mov \$0xA040608020C0E000,%rax # ((7..0)·0xE0)&0xff
654 movdqu 0x30($Htbl),$Hkey3
655 movdqu 0x40($Htbl),$Hkey4
658 # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P
660 movdqu 0x30($inp),$Xln
661 movdqu 0x20($inp),$Xl
665 pshufd \$0b01001110,$Xln,$Xmn
667 pclmulqdq \$0x00,$Hkey,$Xln
668 pclmulqdq \$0x11,$Hkey,$Xhn
669 pclmulqdq \$0x00,$HK,$Xmn
672 pshufd \$0b01001110,$Xl,$Xm
674 pclmulqdq \$0x00,$Hkey2,$Xl
675 pclmulqdq \$0x11,$Hkey2,$Xh
677 pclmulqdq \$0x10,$HK,$Xm
679 movups 0x50($Htbl),$HK
682 movdqu 0x10($inp),$Xl
687 pshufd \$0b01001110,$Xl,$Xm
690 pclmulqdq \$0x00,$Hkey3,$Xl
692 pshufd \$0b01001110,$Xi,$T1
694 pclmulqdq \$0x11,$Hkey3,$Xh
696 pclmulqdq \$0x00,$HK,$Xm
706 pclmulqdq \$0x00,$Hkey4,$Xi
708 movdqu 0x30($inp),$Xl
710 pclmulqdq \$0x11,$Hkey4,$Xhi
712 movdqu 0x20($inp),$Xln
714 pshufd \$0b01001110,$Xl,$Xm
715 pclmulqdq \$0x10,$HK,$T1
719 movups 0x20($Htbl),$HK
720 pclmulqdq \$0x00,$Hkey,$Xl
723 pshufd \$0b01001110,$Xln,$Xmn
725 pxor $Xi,$T1 # aggregated Karatsuba post-processing
730 pclmulqdq \$0x11,$Hkey,$Xh
733 movdqa .L7_mask(%rip),$T1
737 pand $Xi,$T1 # 1st phase
739 pclmulqdq \$0x00,$HK,$Xm
744 pclmulqdq \$0x00,$Hkey2,$Xln
750 movdqa $Xi,$T2 # 2nd phase
752 pclmulqdq \$0x11,$Hkey2,$Xhn
754 movdqu 0x10($inp),$Xl
756 pclmulqdq \$0x10,$HK,$Xmn
758 movups 0x50($Htbl),$HK
766 pshufd \$0b01001110,$Xl,$Xm
768 pclmulqdq \$0x00,$Hkey3,$Xl
772 pclmulqdq \$0x11,$Hkey3,$Xh
776 pclmulqdq \$0x00,$HK,$Xm
780 pshufd \$0b01001110,$Xi,$T1
788 pclmulqdq \$0x00,$Hkey4,$Xi
790 pclmulqdq \$0x11,$Hkey4,$Xhi
792 pclmulqdq \$0x10,$HK,$T1
794 pxor $Xi,$Xhi # aggregated Karatsuba post-processing
806 &reduction_alg9($Xhi,$Xi);
810 movdqu 0x20($Htbl),$HK
818 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
819 # [(H*Ii+1) + (H*Xi+1)] mod P =
820 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P
822 movdqu ($inp),$T1 # Ii
823 movdqu 16($inp),$Xln # Ii+1
829 pshufd \$0b01001110,$Xln,$Xmn
831 pclmulqdq \$0x00,$Hkey,$Xln
832 pclmulqdq \$0x11,$Hkey,$Xhn
833 pclmulqdq \$0x00,$HK,$Xmn
835 lea 32($inp),$inp # i+=2
846 pshufd \$0b01001110,$Xi,$Xmn #
849 pclmulqdq \$0x00,$Hkey2,$Xi
850 pclmulqdq \$0x11,$Hkey2,$Xhi
851 pclmulqdq \$0x10,$HK,$Xmn
853 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi)
855 movdqu ($inp),$Xhn # Ii
856 pxor $Xi,$T1 # aggregated Karatsuba post-processing
858 movdqu 16($inp),$Xln # Ii+1
861 pxor $Xhn,$Xhi # "Ii+Xi", consume early
872 movdqa $Xi,$T2 # 1st phase
876 pclmulqdq \$0x00,$Hkey,$Xln #######
884 pshufd \$0b01001110,$Xhn,$Xmn
888 pclmulqdq \$0x11,$Hkey,$Xhn #######
889 movdqa $Xi,$T2 # 2nd phase
897 pclmulqdq \$0x00,$HK,$Xmn #######
907 pshufd \$0b01001110,$Xi,$Xmn #
910 pclmulqdq \$0x00,$Hkey2,$Xi
911 pclmulqdq \$0x11,$Hkey2,$Xhi
912 pclmulqdq \$0x10,$HK,$Xmn
914 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi)
925 &reduction_alg9 ($Xhi,$Xi);
931 movdqu ($inp),$T1 # Ii
935 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H*(Ii+Xi)
936 &reduction_alg9 ($Xhi,$Xi);
942 $code.=<<___ if ($win64);
944 movaps 0x10(%rsp),%xmm7
945 movaps 0x20(%rsp),%xmm8
946 movaps 0x30(%rsp),%xmm9
947 movaps 0x40(%rsp),%xmm10
948 movaps 0x50(%rsp),%xmm11
949 movaps 0x60(%rsp),%xmm12
950 movaps 0x70(%rsp),%xmm13
951 movaps 0x80(%rsp),%xmm14
952 movaps 0x90(%rsp),%xmm15
954 .LSEH_end_gcm_ghash_clmul:
958 .size gcm_ghash_clmul,.-gcm_ghash_clmul
964 .type gcm_init_avx,\@abi-omnipotent
969 my ($Htbl,$Xip)=@_4args;
972 $code.=<<___ if ($win64);
973 .LSEH_begin_gcm_init_avx:
974 # I can't trust assembler to use specific encoding:-(
975 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp
976 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
982 vpshufd \$0b01001110,$Hkey,$Hkey # dword swap
985 vpshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword
986 vpsrlq \$63,$Hkey,$T1
987 vpsllq \$1,$Hkey,$Hkey
989 vpcmpgtd $T2,$T3,$T3 # broadcast carry bit
991 vpor $T1,$Hkey,$Hkey # H<<=1
994 vpand .L0x1c2_polynomial(%rip),$T3,$T3
995 vpxor $T3,$Hkey,$Hkey # if(carry) H^=0x1c2_polynomial
997 vpunpckhqdq $Hkey,$Hkey,$HK
1000 mov \$4,%r10 # up to H^8
1001 jmp .Linit_start_avx
1004 sub clmul64x64_avx {
1005 my ($Xhi,$Xi,$Hkey,$HK)=@_;
1007 if (!defined($HK)) { $HK = $T2;
1009 vpunpckhqdq $Xi,$Xi,$T1
1010 vpunpckhqdq $Hkey,$Hkey,$T2
1016 vpunpckhqdq $Xi,$Xi,$T1
1021 vpclmulqdq \$0x11,$Hkey,$Xi,$Xhi #######
1022 vpclmulqdq \$0x00,$Hkey,$Xi,$Xi #######
1023 vpclmulqdq \$0x00,$HK,$T1,$T1 #######
1024 vpxor $Xi,$Xhi,$T2 #
1027 vpslldq \$8,$T1,$T2 #
1038 vpsllq \$57,$Xi,$T1 # 1st phase
1043 vpslldq \$8,$T2,$T1 #
1048 vpsrlq \$1,$Xi,$T2 # 2nd phase
1053 vpsrlq \$1,$Xi,$Xi #
1054 vpxor $Xhi,$Xi,$Xi #
1061 vpalignr \$8,$T1,$T2,$T3 # low part is H.lo^H.hi...
1062 vmovdqu $T3,-0x10($Htbl) # save Karatsuba "salt"
1064 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^3,5,7
1065 &reduction_avx ($Xhi,$Xi);
1070 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^2,4,6,8
1071 &reduction_avx ($Xhi,$Xi);
1073 vpshufd \$0b01001110,$T3,$T1
1074 vpshufd \$0b01001110,$Xi,$T2
1075 vpxor $T3,$T1,$T1 # Karatsuba pre-processing
1076 vmovdqu $T3,0x00($Htbl) # save H^1,3,5,7
1077 vpxor $Xi,$T2,$T2 # Karatsuba pre-processing
1078 vmovdqu $Xi,0x10($Htbl) # save H^2,4,6,8
1079 lea 0x30($Htbl),$Htbl
1083 vpalignr \$8,$T2,$T1,$T3 # last "salt" is flipped
1084 vmovdqu $T3,-0x10($Htbl)
1088 $code.=<<___ if ($win64);
1091 .LSEH_end_gcm_init_avx:
1095 .size gcm_init_avx,.-gcm_init_avx
1100 .size gcm_init_avx,.-gcm_init_avx
1105 .globl gcm_gmult_avx
1106 .type gcm_gmult_avx,\@abi-omnipotent
1110 .size gcm_gmult_avx,.-gcm_gmult_avx
1114 .globl gcm_ghash_avx
1115 .type gcm_ghash_avx,\@abi-omnipotent
1120 my ($Xip,$Htbl,$inp,$len)=@_4args;
1124 $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15));
1126 $code.=<<___ if ($win64);
1127 lea -0x88(%rsp),%rax
1128 .LSEH_begin_gcm_ghash_avx:
1129 # I can't trust assembler to use specific encoding:-(
1130 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp
1131 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax)
1132 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax)
1133 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax)
1134 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax)
1135 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax)
1136 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax)
1137 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax)
1138 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax)
1139 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax)
1140 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax)
1145 vmovdqu ($Xip),$Xi # load $Xi
1146 lea .L0x1c2_polynomial(%rip),%r10
1147 lea 0x40($Htbl),$Htbl # size optimization
1148 vmovdqu .Lbswap_mask(%rip),$bswap
1149 vpshufb $bswap,$Xi,$Xi
1154 vmovdqu 0x70($inp),$Ii # I[7]
1155 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1156 vpshufb $bswap,$Ii,$Ii
1157 vmovdqu 0x20-0x40($Htbl),$HK
1159 vpunpckhqdq $Ii,$Ii,$T2
1160 vmovdqu 0x60($inp),$Ij # I[6]
1161 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1163 vpshufb $bswap,$Ij,$Ij
1164 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1165 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1166 vpunpckhqdq $Ij,$Ij,$T1
1167 vmovdqu 0x50($inp),$Ii # I[5]
1168 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1171 vpshufb $bswap,$Ii,$Ii
1172 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1173 vpunpckhqdq $Ii,$Ii,$T2
1174 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1175 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1177 vmovdqu 0x40($inp),$Ij # I[4]
1178 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1179 vmovdqu 0x50-0x40($Htbl),$HK
1181 vpshufb $bswap,$Ij,$Ij
1182 vpxor $Xlo,$Zlo,$Zlo
1183 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1184 vpxor $Xhi,$Zhi,$Zhi
1185 vpunpckhqdq $Ij,$Ij,$T1
1186 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1187 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1188 vpxor $Xmi,$Zmi,$Zmi
1189 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1192 vmovdqu 0x30($inp),$Ii # I[3]
1193 vpxor $Zlo,$Xlo,$Xlo
1194 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1195 vpxor $Zhi,$Xhi,$Xhi
1196 vpshufb $bswap,$Ii,$Ii
1197 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1198 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1199 vpxor $Zmi,$Xmi,$Xmi
1200 vpunpckhqdq $Ii,$Ii,$T2
1201 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1202 vmovdqu 0x80-0x40($Htbl),$HK
1205 vmovdqu 0x20($inp),$Ij # I[2]
1206 vpxor $Xlo,$Zlo,$Zlo
1207 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1208 vpxor $Xhi,$Zhi,$Zhi
1209 vpshufb $bswap,$Ij,$Ij
1210 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1211 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1212 vpxor $Xmi,$Zmi,$Zmi
1213 vpunpckhqdq $Ij,$Ij,$T1
1214 vpclmulqdq \$0x00,$HK,$T2,$Xmi
1217 vmovdqu 0x10($inp),$Ii # I[1]
1218 vpxor $Zlo,$Xlo,$Xlo
1219 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1220 vpxor $Zhi,$Xhi,$Xhi
1221 vpshufb $bswap,$Ii,$Ii
1222 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1223 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1224 vpxor $Zmi,$Xmi,$Xmi
1225 vpunpckhqdq $Ii,$Ii,$T2
1226 vpclmulqdq \$0x10,$HK,$T1,$Zmi
1227 vmovdqu 0xb0-0x40($Htbl),$HK
1230 vmovdqu ($inp),$Ij # I[0]
1231 vpxor $Xlo,$Zlo,$Zlo
1232 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1233 vpxor $Xhi,$Zhi,$Zhi
1234 vpshufb $bswap,$Ij,$Ij
1235 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1236 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8
1237 vpxor $Xmi,$Zmi,$Zmi
1238 vpclmulqdq \$0x10,$HK,$T2,$Xmi
1244 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1250 vpunpckhqdq $Ij,$Ij,$T1
1251 vmovdqu 0x70($inp),$Ii # I[7]
1252 vpxor $Xlo,$Zlo,$Zlo
1254 vpclmulqdq \$0x00,$Hkey,$Ij,$Xi
1255 vpshufb $bswap,$Ii,$Ii
1256 vpxor $Xhi,$Zhi,$Zhi
1257 vpclmulqdq \$0x11,$Hkey,$Ij,$Xo
1258 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1259 vpunpckhqdq $Ii,$Ii,$T2
1260 vpxor $Xmi,$Zmi,$Zmi
1261 vpclmulqdq \$0x00,$HK,$T1,$Tred
1262 vmovdqu 0x20-0x40($Htbl),$HK
1265 vmovdqu 0x60($inp),$Ij # I[6]
1266 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1267 vpxor $Zlo,$Xi,$Xi # collect result
1268 vpshufb $bswap,$Ij,$Ij
1269 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1271 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1272 vpunpckhqdq $Ij,$Ij,$T1
1273 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1274 vpxor $Zmi,$Tred,$Tred
1277 vmovdqu 0x50($inp),$Ii # I[5]
1278 vpxor $Xi,$Tred,$Tred # aggregated Karatsuba post-processing
1279 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1280 vpxor $Xo,$Tred,$Tred
1281 vpslldq \$8,$Tred,$T2
1282 vpxor $Xlo,$Zlo,$Zlo
1283 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1284 vpsrldq \$8,$Tred,$Tred
1286 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1287 vpshufb $bswap,$Ii,$Ii
1288 vxorps $Tred,$Xo, $Xo
1289 vpxor $Xhi,$Zhi,$Zhi
1290 vpunpckhqdq $Ii,$Ii,$T2
1291 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1292 vmovdqu 0x50-0x40($Htbl),$HK
1294 vpxor $Xmi,$Zmi,$Zmi
1296 vmovdqu 0x40($inp),$Ij # I[4]
1297 vpalignr \$8,$Xi,$Xi,$Tred # 1st phase
1298 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1299 vpshufb $bswap,$Ij,$Ij
1300 vpxor $Zlo,$Xlo,$Xlo
1301 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1302 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1303 vpunpckhqdq $Ij,$Ij,$T1
1304 vpxor $Zhi,$Xhi,$Xhi
1305 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1307 vpxor $Zmi,$Xmi,$Xmi
1309 vmovdqu 0x30($inp),$Ii # I[3]
1310 vpclmulqdq \$0x10,(%r10),$Xi,$Xi
1311 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1312 vpshufb $bswap,$Ii,$Ii
1313 vpxor $Xlo,$Zlo,$Zlo
1314 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1315 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1316 vpunpckhqdq $Ii,$Ii,$T2
1317 vpxor $Xhi,$Zhi,$Zhi
1318 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1319 vmovdqu 0x80-0x40($Htbl),$HK
1321 vpxor $Xmi,$Zmi,$Zmi
1323 vmovdqu 0x20($inp),$Ij # I[2]
1324 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1325 vpshufb $bswap,$Ij,$Ij
1326 vpxor $Zlo,$Xlo,$Xlo
1327 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1328 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1329 vpunpckhqdq $Ij,$Ij,$T1
1330 vpxor $Zhi,$Xhi,$Xhi
1331 vpclmulqdq \$0x00,$HK, $T2,$Xmi
1333 vpxor $Zmi,$Xmi,$Xmi
1334 vxorps $Tred,$Xi,$Xi
1336 vmovdqu 0x10($inp),$Ii # I[1]
1337 vpalignr \$8,$Xi,$Xi,$Tred # 2nd phase
1338 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo
1339 vpshufb $bswap,$Ii,$Ii
1340 vpxor $Xlo,$Zlo,$Zlo
1341 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi
1342 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1343 vpclmulqdq \$0x10,(%r10),$Xi,$Xi
1344 vxorps $Xo,$Tred,$Tred
1345 vpunpckhqdq $Ii,$Ii,$T2
1346 vpxor $Xhi,$Zhi,$Zhi
1347 vpclmulqdq \$0x10,$HK, $T1,$Zmi
1348 vmovdqu 0xb0-0x40($Htbl),$HK
1350 vpxor $Xmi,$Zmi,$Zmi
1352 vmovdqu ($inp),$Ij # I[0]
1353 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo
1354 vpshufb $bswap,$Ij,$Ij
1355 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi
1356 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8
1358 vpclmulqdq \$0x10,$HK, $T2,$Xmi
1359 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1366 jmp .Ltail_no_xor_avx
1370 vmovdqu -0x10($inp,$len),$Ii # very last word
1371 lea ($inp,$len),$inp
1372 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1
1373 vmovdqu 0x20-0x40($Htbl),$HK
1374 vpshufb $bswap,$Ii,$Ij
1376 vmovdqa $Xlo,$Zlo # subtle way to zero $Zlo,
1377 vmovdqa $Xhi,$Zhi # $Zhi and
1378 vmovdqa $Xmi,$Zmi # $Zmi
1382 vpunpckhqdq $Ij,$Ij,$T1
1383 vpxor $Xlo,$Zlo,$Zlo
1384 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1386 vmovdqu -0x20($inp),$Ii
1387 vpxor $Xhi,$Zhi,$Zhi
1388 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1389 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2
1390 vpshufb $bswap,$Ii,$Ij
1391 vpxor $Xmi,$Zmi,$Zmi
1392 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1397 vpunpckhqdq $Ij,$Ij,$T1
1398 vpxor $Xlo,$Zlo,$Zlo
1399 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1401 vmovdqu -0x30($inp),$Ii
1402 vpxor $Xhi,$Zhi,$Zhi
1403 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1404 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3
1405 vpshufb $bswap,$Ii,$Ij
1406 vpxor $Xmi,$Zmi,$Zmi
1407 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1408 vmovdqu 0x50-0x40($Htbl),$HK
1412 vpunpckhqdq $Ij,$Ij,$T1
1413 vpxor $Xlo,$Zlo,$Zlo
1414 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1416 vmovdqu -0x40($inp),$Ii
1417 vpxor $Xhi,$Zhi,$Zhi
1418 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1419 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4
1420 vpshufb $bswap,$Ii,$Ij
1421 vpxor $Xmi,$Zmi,$Zmi
1422 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1427 vpunpckhqdq $Ij,$Ij,$T1
1428 vpxor $Xlo,$Zlo,$Zlo
1429 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1431 vmovdqu -0x50($inp),$Ii
1432 vpxor $Xhi,$Zhi,$Zhi
1433 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1434 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5
1435 vpshufb $bswap,$Ii,$Ij
1436 vpxor $Xmi,$Zmi,$Zmi
1437 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1438 vmovdqu 0x80-0x40($Htbl),$HK
1442 vpunpckhqdq $Ij,$Ij,$T1
1443 vpxor $Xlo,$Zlo,$Zlo
1444 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1446 vmovdqu -0x60($inp),$Ii
1447 vpxor $Xhi,$Zhi,$Zhi
1448 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1449 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6
1450 vpshufb $bswap,$Ii,$Ij
1451 vpxor $Xmi,$Zmi,$Zmi
1452 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1457 vpunpckhqdq $Ij,$Ij,$T1
1458 vpxor $Xlo,$Zlo,$Zlo
1459 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1461 vmovdqu -0x70($inp),$Ii
1462 vpxor $Xhi,$Zhi,$Zhi
1463 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1464 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7
1465 vpshufb $bswap,$Ii,$Ij
1466 vpxor $Xmi,$Zmi,$Zmi
1467 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1468 vmovq 0xb8-0x40($Htbl),$HK
1474 vpxor $Xi,$Ij,$Ij # accumulate $Xi
1476 vpunpckhqdq $Ij,$Ij,$T1
1477 vpxor $Xlo,$Zlo,$Zlo
1478 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo
1480 vpxor $Xhi,$Zhi,$Zhi
1481 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi
1482 vpxor $Xmi,$Zmi,$Zmi
1483 vpclmulqdq \$0x00,$HK,$T1,$Xmi
1485 vmovdqu (%r10),$Tred
1489 vpxor $Xmi,$Zmi,$Zmi
1491 vpxor $Xi, $Zmi,$Zmi # aggregated Karatsuba post-processing
1492 vpxor $Xo, $Zmi,$Zmi
1493 vpslldq \$8, $Zmi,$T2
1494 vpsrldq \$8, $Zmi,$Zmi
1498 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 1st phase
1499 vpalignr \$8,$Xi,$Xi,$Xi
1502 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 2nd phase
1503 vpalignr \$8,$Xi,$Xi,$Xi
1510 vpshufb $bswap,$Xi,$Xi
1514 $code.=<<___ if ($win64);
1516 movaps 0x10(%rsp),%xmm7
1517 movaps 0x20(%rsp),%xmm8
1518 movaps 0x30(%rsp),%xmm9
1519 movaps 0x40(%rsp),%xmm10
1520 movaps 0x50(%rsp),%xmm11
1521 movaps 0x60(%rsp),%xmm12
1522 movaps 0x70(%rsp),%xmm13
1523 movaps 0x80(%rsp),%xmm14
1524 movaps 0x90(%rsp),%xmm15
1526 .LSEH_end_gcm_ghash_avx:
1530 .size gcm_ghash_avx,.-gcm_ghash_avx
1535 .size gcm_ghash_avx,.-gcm_ghash_avx
1542 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0
1544 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2
1548 .long 7,0,`0xE1<<1`,0
1550 .type .Lrem_4bit,\@object
1552 .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16`
1553 .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16`
1554 .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16`
1555 .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16`
1556 .type .Lrem_8bit,\@object
1558 .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E
1559 .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E
1560 .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E
1561 .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E
1562 .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E
1563 .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E
1564 .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E
1565 .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E
1566 .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE
1567 .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE
1568 .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE
1569 .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE
1570 .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E
1571 .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E
1572 .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE
1573 .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE
1574 .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E
1575 .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E
1576 .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E
1577 .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E
1578 .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E
1579 .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E
1580 .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E
1581 .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E
1582 .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE
1583 .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE
1584 .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE
1585 .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE
1586 .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E
1587 .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E
1588 .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE
1589 .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE
1591 .asciz "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
1595 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
1596 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
1604 .extern __imp_RtlVirtualUnwind
1605 .type se_handler,\@abi-omnipotent
1619 mov 120($context),%rax # pull context->Rax
1620 mov 248($context),%rbx # pull context->Rip
1622 mov 8($disp),%rsi # disp->ImageBase
1623 mov 56($disp),%r11 # disp->HandlerData
1625 mov 0(%r11),%r10d # HandlerData[0]
1626 lea (%rsi,%r10),%r10 # prologue label
1627 cmp %r10,%rbx # context->Rip<prologue label
1630 mov 152($context),%rax # pull context->Rsp
1632 mov 4(%r11),%r10d # HandlerData[1]
1633 lea (%rsi,%r10),%r10 # epilogue label
1634 cmp %r10,%rbx # context->Rip>=epilogue label
1637 lea 24(%rax),%rax # adjust "rsp"
1642 mov %rbx,144($context) # restore context->Rbx
1643 mov %rbp,160($context) # restore context->Rbp
1644 mov %r12,216($context) # restore context->R12
1649 mov %rax,152($context) # restore context->Rsp
1650 mov %rsi,168($context) # restore context->Rsi
1651 mov %rdi,176($context) # restore context->Rdi
1653 mov 40($disp),%rdi # disp->ContextRecord
1654 mov $context,%rsi # context
1655 mov \$`1232/8`,%ecx # sizeof(CONTEXT)
1656 .long 0xa548f3fc # cld; rep movsq
1659 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
1660 mov 8(%rsi),%rdx # arg2, disp->ImageBase
1661 mov 0(%rsi),%r8 # arg3, disp->ControlPc
1662 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
1663 mov 40(%rsi),%r10 # disp->ContextRecord
1664 lea 56(%rsi),%r11 # &disp->HandlerData
1665 lea 24(%rsi),%r12 # &disp->EstablisherFrame
1666 mov %r10,32(%rsp) # arg5
1667 mov %r11,40(%rsp) # arg6
1668 mov %r12,48(%rsp) # arg7
1669 mov %rcx,56(%rsp) # arg8, (NULL)
1670 call *__imp_RtlVirtualUnwind(%rip)
1672 mov \$1,%eax # ExceptionContinueSearch
1684 .size se_handler,.-se_handler
1688 .rva .LSEH_begin_gcm_gmult_4bit
1689 .rva .LSEH_end_gcm_gmult_4bit
1690 .rva .LSEH_info_gcm_gmult_4bit
1692 .rva .LSEH_begin_gcm_ghash_4bit
1693 .rva .LSEH_end_gcm_ghash_4bit
1694 .rva .LSEH_info_gcm_ghash_4bit
1696 .rva .LSEH_begin_gcm_init_clmul
1697 .rva .LSEH_end_gcm_init_clmul
1698 .rva .LSEH_info_gcm_init_clmul
1700 .rva .LSEH_begin_gcm_ghash_clmul
1701 .rva .LSEH_end_gcm_ghash_clmul
1702 .rva .LSEH_info_gcm_ghash_clmul
1704 $code.=<<___ if ($avx);
1705 .rva .LSEH_begin_gcm_init_avx
1706 .rva .LSEH_end_gcm_init_avx
1707 .rva .LSEH_info_gcm_init_clmul
1709 .rva .LSEH_begin_gcm_ghash_avx
1710 .rva .LSEH_end_gcm_ghash_avx
1711 .rva .LSEH_info_gcm_ghash_clmul
1716 .LSEH_info_gcm_gmult_4bit:
1719 .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData
1720 .LSEH_info_gcm_ghash_4bit:
1723 .rva .Lghash_prologue,.Lghash_epilogue # HandlerData
1724 .LSEH_info_gcm_init_clmul:
1725 .byte 0x01,0x08,0x03,0x00
1726 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
1727 .byte 0x04,0x22,0x00,0x00 #sub rsp,0x18
1728 .LSEH_info_gcm_ghash_clmul:
1729 .byte 0x01,0x33,0x16,0x00
1730 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15
1731 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14
1732 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13
1733 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12
1734 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11
1735 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10
1736 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9
1737 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8
1738 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
1739 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
1740 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
1744 $code =~ s/\`([^\`]*)\`/eval($1)/gem;