2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # This module implements Poly1305 hash for x86_64.
25 # Add AVX512F+VL+BW code path.
27 # Numbers are cycles per processed byte with poly1305_blocks alone,
28 # measured with rdtsc at fixed clock frequency.
30 # IALU/gcc-4.8(*) AVX(**) AVX2
33 # Westmere 1.88/+120% -
34 # Sandy Bridge 1.39/+140% 1.10
35 # Haswell 1.14/+175% 1.11 0.65
36 # Skylake 1.13/+120% 0.96 0.51
37 # Silvermont 2.83/+95% -
38 # Goldmont 1.70/+180% -
39 # VIA Nano 1.82/+150% -
40 # Sledgehammer 1.38/+160% -
41 # Bulldozer 2.30/+130% 0.97
43 # (*) improvement coefficients relative to clang are more modest and
44 # are ~50% on most processors, in both cases we are comparing to
46 # (**) SSE2 implementation was attempted, but among non-AVX processors
47 # it was faster than integer-only code only on older Intel P4 and
48 # Core processors, 50-30%, less newer processor is, but slower on
49 # contemporary ones, for example almost 2x slower on Atom, and as
50 # former are naturally disappearing, SSE2 is deemed unnecessary;
54 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
56 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
58 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
59 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
60 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
61 die "can't locate x86_64-xlate.pl";
63 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
64 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
65 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25) + ($1>=2.26);
68 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
69 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
70 $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12);
71 $avx += 2 if ($1==2.11 && $2>=8);
74 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
75 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
76 $avx = ($1>=10) + ($1>=12);
79 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
80 $avx = ($2>=3.0) + ($2>3.0);
83 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
86 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
87 my ($mac,$nonce)=($inp,$len); # *_emit arguments
88 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
89 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
91 sub poly1305_iteration {
92 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
93 # output: $h0-$h2 *= $r0-$r1
101 mov %rax,$h0 # future $h0
111 mov $h2,$h1 # borrow $h1
115 imulq $s1,$h1 # h2*s1
120 imulq $r0,$h2 # h2*r0
122 mov \$-4,%rax # mask value
125 and $d3,%rax # last reduction step
136 ########################################################################
137 # Layout of opaque area is following.
139 # unsigned __int64 h[3]; # current hash value base 2^64
140 # unsigned __int64 r[2]; # key value base 2^64
145 .extern OPENSSL_ia32cap_P
148 .hidden poly1305_init
149 .globl poly1305_blocks
150 .hidden poly1305_blocks
152 .hidden poly1305_emit
154 .type poly1305_init,\@function,3
158 mov %rax,0($ctx) # initialize hash value
165 lea poly1305_blocks(%rip),%r10
166 lea poly1305_emit(%rip),%r11
168 $code.=<<___ if ($avx);
169 mov OPENSSL_ia32cap_P+4(%rip),%r9
170 lea poly1305_blocks_avx(%rip),%rax
171 lea poly1305_emit_avx(%rip),%rcx
172 bt \$`60-32`,%r9 # AVX?
176 $code.=<<___ if ($avx>1);
177 lea poly1305_blocks_avx2(%rip),%rax
178 bt \$`5+32`,%r9 # AVX2?
181 $code.=<<___ if ($avx>3);
182 mov \$`(1<<31|1<<21|1<<16)`,%rax
189 mov \$0x0ffffffc0fffffff,%rax
190 mov \$0x0ffffffc0ffffffc,%rcx
196 $code.=<<___ if ($flavour !~ /elf32/);
200 $code.=<<___ if ($flavour =~ /elf32/);
208 .size poly1305_init,.-poly1305_init
210 .type poly1305_blocks,\@function,4
216 jz .Lno_data # too short
232 mov $len,%r15 # reassign $len
234 mov 24($ctx),$r0 # load r
237 mov 0($ctx),$h0 # load hash value
244 add $r1,$s1 # s1 = r1 + (r1 >> 2)
249 add 0($inp),$h0 # accumulate input
254 &poly1305_iteration();
260 mov $h0,0($ctx) # store hash value
277 .cfi_adjust_cfa_offset -48
282 .size poly1305_blocks,.-poly1305_blocks
284 .type poly1305_emit,\@function,3
288 mov 0($ctx),%r8 # load hash value
293 add \$5,%r8 # compare to modulus
297 shr \$2,%r10 # did 130-bit value overfow?
301 add 0($nonce),%rax # accumulate nonce
303 mov %rax,0($mac) # write result
307 .size poly1305_emit,.-poly1305_emit
311 ########################################################################
312 # Layout of opaque area is following.
314 # unsigned __int32 h[5]; # current hash value base 2^26
315 # unsigned __int32 is_base2_26;
316 # unsigned __int64 r[2]; # key value base 2^64
317 # unsigned __int64 pad;
318 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
320 # where r^n are base 2^26 digits of degrees of multiplier key. There are
321 # 5 digits, but last four are interleaved with multiples of 5, totalling
322 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
324 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
325 map("%xmm$_",(0..15));
328 .type __poly1305_block,\@abi-omnipotent
332 &poly1305_iteration();
335 .size __poly1305_block,.-__poly1305_block
337 .type __poly1305_init_avx,\@abi-omnipotent
344 lea 48+64($ctx),$ctx # size optimization
347 call __poly1305_block # r^2
349 mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
355 mov %eax,`16*0+0-64`($ctx)
357 mov %edx,`16*0+4-64`($ctx)
364 mov %eax,`16*1+0-64`($ctx)
365 lea (%rax,%rax,4),%eax # *5
366 mov %edx,`16*1+4-64`($ctx)
367 lea (%rdx,%rdx,4),%edx # *5
368 mov %eax,`16*2+0-64`($ctx)
370 mov %edx,`16*2+4-64`($ctx)
381 mov %eax,`16*3+0-64`($ctx)
382 lea (%rax,%rax,4),%eax # *5
383 mov %edx,`16*3+4-64`($ctx)
384 lea (%rdx,%rdx,4),%edx # *5
385 mov %eax,`16*4+0-64`($ctx)
387 mov %edx,`16*4+4-64`($ctx)
396 mov %eax,`16*5+0-64`($ctx)
397 lea (%rax,%rax,4),%eax # *5
398 mov %edx,`16*5+4-64`($ctx)
399 lea (%rdx,%rdx,4),%edx # *5
400 mov %eax,`16*6+0-64`($ctx)
402 mov %edx,`16*6+4-64`($ctx)
408 mov $d1#d,`16*7+0-64`($ctx)
409 lea ($d1,$d1,4),$d1 # *5
410 mov $d2#d,`16*7+4-64`($ctx)
411 lea ($d2,$d2,4),$d2 # *5
412 mov $d1#d,`16*8+0-64`($ctx)
413 mov $d2#d,`16*8+4-64`($ctx)
416 call __poly1305_block # r^3
418 mov \$0x3ffffff,%eax # save r^3 base 2^26
422 mov %eax,`16*0+12-64`($ctx)
426 mov %edx,`16*1+12-64`($ctx)
427 lea (%rdx,%rdx,4),%edx # *5
429 mov %edx,`16*2+12-64`($ctx)
435 mov %eax,`16*3+12-64`($ctx)
436 lea (%rax,%rax,4),%eax # *5
438 mov %eax,`16*4+12-64`($ctx)
443 mov %edx,`16*5+12-64`($ctx)
444 lea (%rdx,%rdx,4),%edx # *5
446 mov %edx,`16*6+12-64`($ctx)
451 mov $d1#d,`16*7+12-64`($ctx)
452 lea ($d1,$d1,4),$d1 # *5
453 mov $d1#d,`16*8+12-64`($ctx)
456 call __poly1305_block # r^4
458 mov \$0x3ffffff,%eax # save r^4 base 2^26
462 mov %eax,`16*0+8-64`($ctx)
466 mov %edx,`16*1+8-64`($ctx)
467 lea (%rdx,%rdx,4),%edx # *5
469 mov %edx,`16*2+8-64`($ctx)
475 mov %eax,`16*3+8-64`($ctx)
476 lea (%rax,%rax,4),%eax # *5
478 mov %eax,`16*4+8-64`($ctx)
483 mov %edx,`16*5+8-64`($ctx)
484 lea (%rdx,%rdx,4),%edx # *5
486 mov %edx,`16*6+8-64`($ctx)
491 mov $d1#d,`16*7+8-64`($ctx)
492 lea ($d1,$d1,4),$d1 # *5
493 mov $d1#d,`16*8+8-64`($ctx)
495 lea -48-64($ctx),$ctx # size [de-]optimization
497 .size __poly1305_init_avx,.-__poly1305_init_avx
499 .type poly1305_blocks_avx,\@function,4
503 mov 20($ctx),%r8d # is_base2_26
535 mov $len,%r15 # reassign $len
537 mov 0($ctx),$d1 # load hash value
541 mov 24($ctx),$r0 # load r
544 ################################# base 2^26 -> base 2^64
546 and \$`-1*(1<<31)`,$d1
547 mov $d2,$r1 # borrow $r1
549 and \$`-1*(1<<31)`,$d2
563 adc \$0,$h2 # can be partially reduced...
565 mov \$-4,$d2 # ... so reduce
578 add $r1,$s1 # s1 = r1 + (r1 >> 2)
580 add 0($inp),$h0 # accumulate input
585 call __poly1305_block
587 test $padbit,$padbit # if $padbit is zero,
588 jz .Lstore_base2_64_avx # store hash in base 2^64 format
590 ################################# base 2^64 -> base 2^26
597 and \$0x3ffffff,%rax # h[0]
599 and \$0x3ffffff,%rdx # h[1]
603 and \$0x3ffffff,$h0 # h[2]
605 and \$0x3ffffff,$h1 # h[3]
609 jz .Lstore_base2_26_avx
619 .Lstore_base2_64_avx:
622 mov $h2,16($ctx) # note that is_base2_26 is zeroed
626 .Lstore_base2_26_avx:
627 mov %rax#d,0($ctx) # store hash value base 2^26
647 .cfi_adjust_cfa_offset -48
649 .Lblocks_avx_epilogue:
670 mov $len,%r15 # reassign $len
672 mov 24($ctx),$r0 # load r
675 mov 0($ctx),$h0 # load hash value
682 add $r1,$s1 # s1 = r1 + (r1 >> 2)
687 add 0($inp),$h0 # accumulate input
693 call __poly1305_block
696 ################################# base 2^64 -> base 2^26
703 and \$0x3ffffff,%rax # h[0]
705 and \$0x3ffffff,%rdx # h[1]
709 and \$0x3ffffff,$h0 # h[2]
711 and \$0x3ffffff,$h1 # h[3]
719 movl \$1,20($ctx) # set is_base2_26
721 call __poly1305_init_avx
740 .cfi_adjust_cfa_offset -48
741 .Lbase2_64_avx_epilogue:
748 vmovd 4*0($ctx),$H0 # load hash value
756 $code.=<<___ if (!$win64);
758 .cfi_def_cfa %r11,0x60
761 $code.=<<___ if ($win64);
764 vmovdqa %xmm6,0x50(%r11)
765 vmovdqa %xmm7,0x60(%r11)
766 vmovdqa %xmm8,0x70(%r11)
767 vmovdqa %xmm9,0x80(%r11)
768 vmovdqa %xmm10,0x90(%r11)
769 vmovdqa %xmm11,0xa0(%r11)
770 vmovdqa %xmm12,0xb0(%r11)
771 vmovdqa %xmm13,0xc0(%r11)
772 vmovdqa %xmm14,0xd0(%r11)
773 vmovdqa %xmm15,0xe0(%r11)
781 vmovdqu `16*3`($ctx),$D4 # preload r0^2
782 lea `16*3+64`($ctx),$ctx # size optimization
783 lea .Lconst(%rip),%rcx
785 ################################################################
787 vmovdqu 16*2($inp),$T0
788 vmovdqu 16*3($inp),$T1
789 vmovdqa 64(%rcx),$MASK # .Lmask26
791 vpsrldq \$6,$T0,$T2 # splat input
793 vpunpckhqdq $T1,$T0,$T4 # 4
794 vpunpcklqdq $T1,$T0,$T0 # 0:1
795 vpunpcklqdq $T3,$T2,$T3 # 2:3
797 vpsrlq \$40,$T4,$T4 # 4
799 vpand $MASK,$T0,$T0 # 0
801 vpand $MASK,$T1,$T1 # 1
803 vpand $MASK,$T2,$T2 # 2
804 vpand $MASK,$T3,$T3 # 3
805 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
809 # expand and copy pre-calculated table to stack
810 vmovdqu `16*1-64`($ctx),$D1
811 vmovdqu `16*2-64`($ctx),$D2
812 vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
813 vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
814 vmovdqa $D3,-0x90(%r11)
815 vmovdqa $D0,0x00(%rsp)
816 vpshufd \$0xEE,$D1,$D4
817 vmovdqu `16*3-64`($ctx),$D0
818 vpshufd \$0x44,$D1,$D1
819 vmovdqa $D4,-0x80(%r11)
820 vmovdqa $D1,0x10(%rsp)
821 vpshufd \$0xEE,$D2,$D3
822 vmovdqu `16*4-64`($ctx),$D1
823 vpshufd \$0x44,$D2,$D2
824 vmovdqa $D3,-0x70(%r11)
825 vmovdqa $D2,0x20(%rsp)
826 vpshufd \$0xEE,$D0,$D4
827 vmovdqu `16*5-64`($ctx),$D2
828 vpshufd \$0x44,$D0,$D0
829 vmovdqa $D4,-0x60(%r11)
830 vmovdqa $D0,0x30(%rsp)
831 vpshufd \$0xEE,$D1,$D3
832 vmovdqu `16*6-64`($ctx),$D0
833 vpshufd \$0x44,$D1,$D1
834 vmovdqa $D3,-0x50(%r11)
835 vmovdqa $D1,0x40(%rsp)
836 vpshufd \$0xEE,$D2,$D4
837 vmovdqu `16*7-64`($ctx),$D1
838 vpshufd \$0x44,$D2,$D2
839 vmovdqa $D4,-0x40(%r11)
840 vmovdqa $D2,0x50(%rsp)
841 vpshufd \$0xEE,$D0,$D3
842 vmovdqu `16*8-64`($ctx),$D2
843 vpshufd \$0x44,$D0,$D0
844 vmovdqa $D3,-0x30(%r11)
845 vmovdqa $D0,0x60(%rsp)
846 vpshufd \$0xEE,$D1,$D4
847 vpshufd \$0x44,$D1,$D1
848 vmovdqa $D4,-0x20(%r11)
849 vmovdqa $D1,0x70(%rsp)
850 vpshufd \$0xEE,$D2,$D3
851 vmovdqa 0x00(%rsp),$D4 # preload r0^2
852 vpshufd \$0x44,$D2,$D2
853 vmovdqa $D3,-0x10(%r11)
854 vmovdqa $D2,0x80(%rsp)
860 ################################################################
861 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
862 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
863 # \___________________/
864 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
865 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
866 # \___________________/ \____________________/
868 # Note that we start with inp[2:3]*r^2. This is because it
869 # doesn't depend on reduction in previous iteration.
870 ################################################################
871 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
872 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
873 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
874 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
875 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
877 # though note that $Tx and $Hx are "reversed" in this section,
878 # and $D4 is preloaded with r0^2...
880 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
881 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
882 vmovdqa $H2,0x20(%r11) # offload hash
883 vpmuludq $T2,$D4,$D2 # d3 = h2*r0
884 vmovdqa 0x10(%rsp),$H2 # r1^2
885 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
886 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
888 vmovdqa $H0,0x00(%r11) #
889 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
890 vmovdqa $H1,0x10(%r11) #
891 vpmuludq $T3,$H2,$H1 # h3*r1
892 vpaddq $H0,$D0,$D0 # d0 += h4*s1
893 vpaddq $H1,$D4,$D4 # d4 += h3*r1
894 vmovdqa $H3,0x30(%r11) #
895 vpmuludq $T2,$H2,$H0 # h2*r1
896 vpmuludq $T1,$H2,$H1 # h1*r1
897 vpaddq $H0,$D3,$D3 # d3 += h2*r1
898 vmovdqa 0x30(%rsp),$H3 # r2^2
899 vpaddq $H1,$D2,$D2 # d2 += h1*r1
900 vmovdqa $H4,0x40(%r11) #
901 vpmuludq $T0,$H2,$H2 # h0*r1
902 vpmuludq $T2,$H3,$H0 # h2*r2
903 vpaddq $H2,$D1,$D1 # d1 += h0*r1
905 vmovdqa 0x40(%rsp),$H4 # s2^2
906 vpaddq $H0,$D4,$D4 # d4 += h2*r2
907 vpmuludq $T1,$H3,$H1 # h1*r2
908 vpmuludq $T0,$H3,$H3 # h0*r2
909 vpaddq $H1,$D3,$D3 # d3 += h1*r2
910 vmovdqa 0x50(%rsp),$H2 # r3^2
911 vpaddq $H3,$D2,$D2 # d2 += h0*r2
912 vpmuludq $T4,$H4,$H0 # h4*s2
913 vpmuludq $T3,$H4,$H4 # h3*s2
914 vpaddq $H0,$D1,$D1 # d1 += h4*s2
915 vmovdqa 0x60(%rsp),$H3 # s3^2
916 vpaddq $H4,$D0,$D0 # d0 += h3*s2
918 vmovdqa 0x80(%rsp),$H4 # s4^2
919 vpmuludq $T1,$H2,$H1 # h1*r3
920 vpmuludq $T0,$H2,$H2 # h0*r3
921 vpaddq $H1,$D4,$D4 # d4 += h1*r3
922 vpaddq $H2,$D3,$D3 # d3 += h0*r3
923 vpmuludq $T4,$H3,$H0 # h4*s3
924 vpmuludq $T3,$H3,$H1 # h3*s3
925 vpaddq $H0,$D2,$D2 # d2 += h4*s3
926 vmovdqu 16*0($inp),$H0 # load input
927 vpaddq $H1,$D1,$D1 # d1 += h3*s3
928 vpmuludq $T2,$H3,$H3 # h2*s3
929 vpmuludq $T2,$H4,$T2 # h2*s4
930 vpaddq $H3,$D0,$D0 # d0 += h2*s3
932 vmovdqu 16*1($inp),$H1 #
933 vpaddq $T2,$D1,$D1 # d1 += h2*s4
934 vpmuludq $T3,$H4,$T3 # h3*s4
935 vpmuludq $T4,$H4,$T4 # h4*s4
936 vpsrldq \$6,$H0,$H2 # splat input
937 vpaddq $T3,$D2,$D2 # d2 += h3*s4
938 vpaddq $T4,$D3,$D3 # d3 += h4*s4
939 vpsrldq \$6,$H1,$H3 #
940 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
941 vpmuludq $T1,$H4,$T0 # h1*s4
942 vpunpckhqdq $H1,$H0,$H4 # 4
943 vpaddq $T4,$D4,$D4 # d4 += h0*r4
944 vmovdqa -0x90(%r11),$T4 # r0^4
945 vpaddq $T0,$D0,$D0 # d0 += h1*s4
947 vpunpcklqdq $H1,$H0,$H0 # 0:1
948 vpunpcklqdq $H3,$H2,$H3 # 2:3
950 #vpsrlq \$40,$H4,$H4 # 4
951 vpsrldq \$`40/8`,$H4,$H4 # 4
953 vpand $MASK,$H0,$H0 # 0
955 vpand $MASK,$H1,$H1 # 1
956 vpand 0(%rcx),$H4,$H4 # .Lmask24
958 vpand $MASK,$H2,$H2 # 2
959 vpand $MASK,$H3,$H3 # 3
960 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
962 vpaddq 0x00(%r11),$H0,$H0 # add hash value
963 vpaddq 0x10(%r11),$H1,$H1
964 vpaddq 0x20(%r11),$H2,$H2
965 vpaddq 0x30(%r11),$H3,$H3
966 vpaddq 0x40(%r11),$H4,$H4
973 ################################################################
974 # Now we accumulate (inp[0:1]+hash)*r^4
975 ################################################################
976 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
977 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
978 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
979 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
980 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
982 vpmuludq $H0,$T4,$T0 # h0*r0
983 vpmuludq $H1,$T4,$T1 # h1*r0
986 vmovdqa -0x80(%r11),$T2 # r1^4
987 vpmuludq $H2,$T4,$T0 # h2*r0
988 vpmuludq $H3,$T4,$T1 # h3*r0
991 vpmuludq $H4,$T4,$T4 # h4*r0
992 vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
995 vpaddq $T0,$D0,$D0 # d0 += h4*s1
996 vpmuludq $H2,$T2,$T1 # h2*r1
997 vpmuludq $H3,$T2,$T0 # h3*r1
998 vpaddq $T1,$D3,$D3 # d3 += h2*r1
999 vmovdqa -0x60(%r11),$T3 # r2^4
1000 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1001 vpmuludq $H1,$T2,$T1 # h1*r1
1002 vpmuludq $H0,$T2,$T2 # h0*r1
1003 vpaddq $T1,$D2,$D2 # d2 += h1*r1
1004 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1006 vmovdqa -0x50(%r11),$T4 # s2^4
1007 vpmuludq $H2,$T3,$T0 # h2*r2
1008 vpmuludq $H1,$T3,$T1 # h1*r2
1009 vpaddq $T0,$D4,$D4 # d4 += h2*r2
1010 vpaddq $T1,$D3,$D3 # d3 += h1*r2
1011 vmovdqa -0x40(%r11),$T2 # r3^4
1012 vpmuludq $H0,$T3,$T3 # h0*r2
1013 vpmuludq $H4,$T4,$T0 # h4*s2
1014 vpaddq $T3,$D2,$D2 # d2 += h0*r2
1015 vpaddq $T0,$D1,$D1 # d1 += h4*s2
1016 vmovdqa -0x30(%r11),$T3 # s3^4
1017 vpmuludq $H3,$T4,$T4 # h3*s2
1018 vpmuludq $H1,$T2,$T1 # h1*r3
1019 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1021 vmovdqa -0x10(%r11),$T4 # s4^4
1022 vpaddq $T1,$D4,$D4 # d4 += h1*r3
1023 vpmuludq $H0,$T2,$T2 # h0*r3
1024 vpmuludq $H4,$T3,$T0 # h4*s3
1025 vpaddq $T2,$D3,$D3 # d3 += h0*r3
1026 vpaddq $T0,$D2,$D2 # d2 += h4*s3
1027 vmovdqu 16*2($inp),$T0 # load input
1028 vpmuludq $H3,$T3,$T2 # h3*s3
1029 vpmuludq $H2,$T3,$T3 # h2*s3
1030 vpaddq $T2,$D1,$D1 # d1 += h3*s3
1031 vmovdqu 16*3($inp),$T1 #
1032 vpaddq $T3,$D0,$D0 # d0 += h2*s3
1034 vpmuludq $H2,$T4,$H2 # h2*s4
1035 vpmuludq $H3,$T4,$H3 # h3*s4
1036 vpsrldq \$6,$T0,$T2 # splat input
1037 vpaddq $H2,$D1,$D1 # d1 += h2*s4
1038 vpmuludq $H4,$T4,$H4 # h4*s4
1039 vpsrldq \$6,$T1,$T3 #
1040 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
1041 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
1042 vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
1043 vpmuludq $H1,$T4,$H0
1044 vpunpckhqdq $T1,$T0,$T4 # 4
1045 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1046 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1048 vpunpcklqdq $T1,$T0,$T0 # 0:1
1049 vpunpcklqdq $T3,$T2,$T3 # 2:3
1051 #vpsrlq \$40,$T4,$T4 # 4
1052 vpsrldq \$`40/8`,$T4,$T4 # 4
1054 vmovdqa 0x00(%rsp),$D4 # preload r0^2
1055 vpand $MASK,$T0,$T0 # 0
1057 vpand $MASK,$T1,$T1 # 1
1058 vpand 0(%rcx),$T4,$T4 # .Lmask24
1060 vpand $MASK,$T2,$T2 # 2
1061 vpand $MASK,$T3,$T3 # 3
1062 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1064 ################################################################
1065 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1070 vpaddq $D3,$H4,$H4 # h3 -> h4
1074 vpaddq $D0,$D1,$H1 # h0 -> h1
1081 vpaddq $D1,$H2,$H2 # h1 -> h2
1085 vpaddq $D0,$H0,$H0 # h4 -> h0
1089 vpaddq $D2,$H3,$H3 # h2 -> h3
1093 vpaddq $D0,$H1,$H1 # h0 -> h1
1097 vpaddq $D3,$H4,$H4 # h3 -> h4
1102 ################################################################
1103 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1105 vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1116 vmovdqa $H2,0x20(%r11)
1117 vmovdqa $H0,0x00(%r11)
1118 vmovdqa $H1,0x10(%r11)
1119 vmovdqa $H3,0x30(%r11)
1120 vmovdqa $H4,0x40(%r11)
1122 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1123 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1124 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1125 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1126 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1128 vpmuludq $T2,$D4,$D2 # d2 = h2*r0
1129 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
1130 vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
1131 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
1132 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
1133 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
1135 vpmuludq $T3,$H2,$H0 # h3*r1
1136 vpaddq $H0,$D4,$D4 # d4 += h3*r1
1137 vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
1138 vpmuludq $T2,$H2,$H1 # h2*r1
1139 vpaddq $H1,$D3,$D3 # d3 += h2*r1
1140 vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
1141 vpmuludq $T1,$H2,$H0 # h1*r1
1142 vpaddq $H0,$D2,$D2 # d2 += h1*r1
1143 vpmuludq $T0,$H2,$H2 # h0*r1
1144 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1145 vpmuludq $T4,$H3,$H3 # h4*s1
1146 vpaddq $H3,$D0,$D0 # d0 += h4*s1
1148 vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
1149 vpmuludq $T2,$H4,$H1 # h2*r2
1150 vpaddq $H1,$D4,$D4 # d4 += h2*r2
1151 vpmuludq $T1,$H4,$H0 # h1*r2
1152 vpaddq $H0,$D3,$D3 # d3 += h1*r2
1153 vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
1154 vpmuludq $T0,$H4,$H4 # h0*r2
1155 vpaddq $H4,$D2,$D2 # d2 += h0*r2
1156 vpmuludq $T4,$H2,$H1 # h4*s2
1157 vpaddq $H1,$D1,$D1 # d1 += h4*s2
1158 vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
1159 vpmuludq $T3,$H2,$H2 # h3*s2
1160 vpaddq $H2,$D0,$D0 # d0 += h3*s2
1162 vpmuludq $T1,$H3,$H0 # h1*r3
1163 vpaddq $H0,$D4,$D4 # d4 += h1*r3
1164 vpmuludq $T0,$H3,$H3 # h0*r3
1165 vpaddq $H3,$D3,$D3 # d3 += h0*r3
1166 vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
1167 vpmuludq $T4,$H4,$H1 # h4*s3
1168 vpaddq $H1,$D2,$D2 # d2 += h4*s3
1169 vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
1170 vpmuludq $T3,$H4,$H0 # h3*s3
1171 vpaddq $H0,$D1,$D1 # d1 += h3*s3
1172 vpmuludq $T2,$H4,$H4 # h2*s3
1173 vpaddq $H4,$D0,$D0 # d0 += h2*s3
1175 vpmuludq $T0,$H2,$H2 # h0*r4
1176 vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
1177 vpmuludq $T4,$H3,$H1 # h4*s4
1178 vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
1179 vpmuludq $T3,$H3,$H0 # h3*s4
1180 vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
1181 vpmuludq $T2,$H3,$H1 # h2*s4
1182 vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
1183 vpmuludq $T1,$H3,$H3 # h1*s4
1184 vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
1188 vmovdqu 16*0($inp),$H0 # load input
1189 vmovdqu 16*1($inp),$H1
1191 vpsrldq \$6,$H0,$H2 # splat input
1193 vpunpckhqdq $H1,$H0,$H4 # 4
1194 vpunpcklqdq $H1,$H0,$H0 # 0:1
1195 vpunpcklqdq $H3,$H2,$H3 # 2:3
1197 vpsrlq \$40,$H4,$H4 # 4
1199 vpand $MASK,$H0,$H0 # 0
1201 vpand $MASK,$H1,$H1 # 1
1203 vpand $MASK,$H2,$H2 # 2
1204 vpand $MASK,$H3,$H3 # 3
1205 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1207 vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1208 vpaddq 0x00(%r11),$H0,$H0
1209 vpaddq 0x10(%r11),$H1,$H1
1210 vpaddq 0x20(%r11),$H2,$H2
1211 vpaddq 0x30(%r11),$H3,$H3
1212 vpaddq 0x40(%r11),$H4,$H4
1214 ################################################################
1215 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1217 vpmuludq $H0,$T4,$T0 # h0*r0
1218 vpaddq $T0,$D0,$D0 # d0 += h0*r0
1219 vpmuludq $H1,$T4,$T1 # h1*r0
1220 vpaddq $T1,$D1,$D1 # d1 += h1*r0
1221 vpmuludq $H2,$T4,$T0 # h2*r0
1222 vpaddq $T0,$D2,$D2 # d2 += h2*r0
1223 vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
1224 vpmuludq $H3,$T4,$T1 # h3*r0
1225 vpaddq $T1,$D3,$D3 # d3 += h3*r0
1226 vpmuludq $H4,$T4,$T4 # h4*r0
1227 vpaddq $T4,$D4,$D4 # d4 += h4*r0
1229 vpmuludq $H3,$T2,$T0 # h3*r1
1230 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1231 vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
1232 vpmuludq $H2,$T2,$T1 # h2*r1
1233 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1234 vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
1235 vpmuludq $H1,$T2,$T0 # h1*r1
1236 vpaddq $T0,$D2,$D2 # d2 += h1*r1
1237 vpmuludq $H0,$T2,$T2 # h0*r1
1238 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1239 vpmuludq $H4,$T3,$T3 # h4*s1
1240 vpaddq $T3,$D0,$D0 # d0 += h4*s1
1242 vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
1243 vpmuludq $H2,$T4,$T1 # h2*r2
1244 vpaddq $T1,$D4,$D4 # d4 += h2*r2
1245 vpmuludq $H1,$T4,$T0 # h1*r2
1246 vpaddq $T0,$D3,$D3 # d3 += h1*r2
1247 vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
1248 vpmuludq $H0,$T4,$T4 # h0*r2
1249 vpaddq $T4,$D2,$D2 # d2 += h0*r2
1250 vpmuludq $H4,$T2,$T1 # h4*s2
1251 vpaddq $T1,$D1,$D1 # d1 += h4*s2
1252 vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
1253 vpmuludq $H3,$T2,$T2 # h3*s2
1254 vpaddq $T2,$D0,$D0 # d0 += h3*s2
1256 vpmuludq $H1,$T3,$T0 # h1*r3
1257 vpaddq $T0,$D4,$D4 # d4 += h1*r3
1258 vpmuludq $H0,$T3,$T3 # h0*r3
1259 vpaddq $T3,$D3,$D3 # d3 += h0*r3
1260 vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
1261 vpmuludq $H4,$T4,$T1 # h4*s3
1262 vpaddq $T1,$D2,$D2 # d2 += h4*s3
1263 vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
1264 vpmuludq $H3,$T4,$T0 # h3*s3
1265 vpaddq $T0,$D1,$D1 # d1 += h3*s3
1266 vpmuludq $H2,$T4,$T4 # h2*s3
1267 vpaddq $T4,$D0,$D0 # d0 += h2*s3
1269 vpmuludq $H0,$T2,$T2 # h0*r4
1270 vpaddq $T2,$D4,$D4 # d4 += h0*r4
1271 vpmuludq $H4,$T3,$T1 # h4*s4
1272 vpaddq $T1,$D3,$D3 # d3 += h4*s4
1273 vpmuludq $H3,$T3,$T0 # h3*s4
1274 vpaddq $T0,$D2,$D2 # d2 += h3*s4
1275 vpmuludq $H2,$T3,$T1 # h2*s4
1276 vpaddq $T1,$D1,$D1 # d1 += h2*s4
1277 vpmuludq $H1,$T3,$T3 # h1*s4
1278 vpaddq $T3,$D0,$D0 # d0 += h1*s4
1281 ################################################################
1282 # horizontal addition
1295 ################################################################
1300 vpaddq $H3,$D4,$D4 # h3 -> h4
1304 vpaddq $H0,$D1,$D1 # h0 -> h1
1311 vpaddq $H1,$D2,$D2 # h1 -> h2
1315 vpaddq $H4,$D0,$D0 # h4 -> h0
1319 vpaddq $H2,$D3,$D3 # h2 -> h3
1323 vpaddq $H0,$D1,$D1 # h0 -> h1
1327 vpaddq $H3,$D4,$D4 # h3 -> h4
1329 vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
1330 vmovd $D1,`4*1-48-64`($ctx)
1331 vmovd $D2,`4*2-48-64`($ctx)
1332 vmovd $D3,`4*3-48-64`($ctx)
1333 vmovd $D4,`4*4-48-64`($ctx)
1335 $code.=<<___ if ($win64);
1336 vmovdqa 0x50(%r11),%xmm6
1337 vmovdqa 0x60(%r11),%xmm7
1338 vmovdqa 0x70(%r11),%xmm8
1339 vmovdqa 0x80(%r11),%xmm9
1340 vmovdqa 0x90(%r11),%xmm10
1341 vmovdqa 0xa0(%r11),%xmm11
1342 vmovdqa 0xb0(%r11),%xmm12
1343 vmovdqa 0xc0(%r11),%xmm13
1344 vmovdqa 0xd0(%r11),%xmm14
1345 vmovdqa 0xe0(%r11),%xmm15
1349 $code.=<<___ if (!$win64);
1357 .size poly1305_blocks_avx,.-poly1305_blocks_avx
1359 .type poly1305_emit_avx,\@function,3
1362 cmpl \$0,20($ctx) # is_base2_26?
1365 mov 0($ctx),%eax # load hash value base 2^26
1371 shl \$26,%rcx # base 2^26 -> base 2^64
1387 mov %r10,%rax # could be partially reduced, so reduce
1398 add \$5,%r8 # compare to modulus
1402 shr \$2,%r10 # did 130-bit value overfow?
1406 add 0($nonce),%rax # accumulate nonce
1408 mov %rax,0($mac) # write result
1412 .size poly1305_emit_avx,.-poly1305_emit_avx
1416 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1417 map("%ymm$_",(0..15));
1421 .type poly1305_blocks_avx2,\@function,4
1423 poly1305_blocks_avx2:
1425 mov 20($ctx),%r8d # is_base2_26
1457 mov $len,%r15 # reassign $len
1459 mov 0($ctx),$d1 # load hash value
1463 mov 24($ctx),$r0 # load r
1466 ################################# base 2^26 -> base 2^64
1468 and \$`-1*(1<<31)`,$d1
1469 mov $d2,$r1 # borrow $r1
1471 and \$`-1*(1<<31)`,$d2
1485 adc \$0,$h2 # can be partially reduced...
1487 mov \$-4,$d2 # ... so reduce
1500 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1502 .Lbase2_26_pre_avx2:
1503 add 0($inp),$h0 # accumulate input
1509 call __poly1305_block
1513 jnz .Lbase2_26_pre_avx2
1515 test $padbit,$padbit # if $padbit is zero,
1516 jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
1518 ################################# base 2^64 -> base 2^26
1525 and \$0x3ffffff,%rax # h[0]
1527 and \$0x3ffffff,%rdx # h[1]
1531 and \$0x3ffffff,$h0 # h[2]
1533 and \$0x3ffffff,$h1 # h[3]
1537 jz .Lstore_base2_26_avx2
1547 .Lstore_base2_64_avx2:
1550 mov $h2,16($ctx) # note that is_base2_26 is zeroed
1554 .Lstore_base2_26_avx2:
1555 mov %rax#d,0($ctx) # store hash value base 2^26
1575 .cfi_adjust_cfa_offset -48
1577 .Lblocks_avx2_epilogue:
1596 .Lbase2_64_avx2_body:
1598 mov $len,%r15 # reassign $len
1600 mov 24($ctx),$r0 # load r
1603 mov 0($ctx),$h0 # load hash value
1610 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1615 .Lbase2_64_pre_avx2:
1616 add 0($inp),$h0 # accumulate input
1622 call __poly1305_block
1626 jnz .Lbase2_64_pre_avx2
1629 ################################# base 2^64 -> base 2^26
1636 and \$0x3ffffff,%rax # h[0]
1638 and \$0x3ffffff,%rdx # h[1]
1642 and \$0x3ffffff,$h0 # h[2]
1644 and \$0x3ffffff,$h1 # h[3]
1652 movl \$1,20($ctx) # set is_base2_26
1654 call __poly1305_init_avx
1657 mov %r15,$len # restore $len
1658 mov OPENSSL_ia32cap_P+8(%rip),%r10d
1659 mov \$`(1<<31|1<<30|1<<16)`,%r11d
1675 .cfi_adjust_cfa_offset -48
1676 .Lbase2_64_avx2_epilogue:
1683 mov OPENSSL_ia32cap_P+8(%rip),%r10d
1684 mov \$`(1<<31|1<<30|1<<16)`,%r11d
1685 vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
1686 vmovd 4*1($ctx),%x#$H1
1687 vmovd 4*2($ctx),%x#$H2
1688 vmovd 4*3($ctx),%x#$H3
1689 vmovd 4*4($ctx),%x#$H4
1693 $code.=<<___ if ($avx>2);
1697 cmp %r11d,%r10d # check for AVX512F+BW+VL
1701 $code.=<<___ if (!$win64);
1703 .cfi_def_cfa %r11,16
1706 $code.=<<___ if ($win64);
1707 lea -0xf8(%rsp),%r11
1709 vmovdqa %xmm6,0x50(%r11)
1710 vmovdqa %xmm7,0x60(%r11)
1711 vmovdqa %xmm8,0x70(%r11)
1712 vmovdqa %xmm9,0x80(%r11)
1713 vmovdqa %xmm10,0x90(%r11)
1714 vmovdqa %xmm11,0xa0(%r11)
1715 vmovdqa %xmm12,0xb0(%r11)
1716 vmovdqa %xmm13,0xc0(%r11)
1717 vmovdqa %xmm14,0xd0(%r11)
1718 vmovdqa %xmm15,0xe0(%r11)
1722 lea .Lconst(%rip),%rcx
1723 lea 48+64($ctx),$ctx # size optimization
1724 vmovdqa 96(%rcx),$T0 # .Lpermd_avx2
1726 # expand and copy pre-calculated table to stack
1727 vmovdqu `16*0-64`($ctx),%x#$T2
1729 vmovdqu `16*1-64`($ctx),%x#$T3
1730 vmovdqu `16*2-64`($ctx),%x#$T4
1731 vmovdqu `16*3-64`($ctx),%x#$D0
1732 vmovdqu `16*4-64`($ctx),%x#$D1
1733 vmovdqu `16*5-64`($ctx),%x#$D2
1734 lea 0x90(%rsp),%rax # size optimization
1735 vmovdqu `16*6-64`($ctx),%x#$D3
1736 vpermd $T2,$T0,$T2 # 00003412 -> 14243444
1737 vmovdqu `16*7-64`($ctx),%x#$D4
1739 vmovdqu `16*8-64`($ctx),%x#$MASK
1741 vmovdqa $T2,0x00(%rsp)
1743 vmovdqa $T3,0x20-0x90(%rax)
1745 vmovdqa $T4,0x40-0x90(%rax)
1747 vmovdqa $D0,0x60-0x90(%rax)
1749 vmovdqa $D1,0x80-0x90(%rax)
1751 vmovdqa $D2,0xa0-0x90(%rax)
1752 vpermd $MASK,$T0,$MASK
1753 vmovdqa $D3,0xc0-0x90(%rax)
1754 vmovdqa $D4,0xe0-0x90(%rax)
1755 vmovdqa $MASK,0x100-0x90(%rax)
1756 vmovdqa 64(%rcx),$MASK # .Lmask26
1758 ################################################################
1760 vmovdqu 16*0($inp),%x#$T0
1761 vmovdqu 16*1($inp),%x#$T1
1762 vinserti128 \$1,16*2($inp),$T0,$T0
1763 vinserti128 \$1,16*3($inp),$T1,$T1
1766 vpsrldq \$6,$T0,$T2 # splat input
1768 vpunpckhqdq $T1,$T0,$T4 # 4
1769 vpunpcklqdq $T3,$T2,$T2 # 2:3
1770 vpunpcklqdq $T1,$T0,$T0 # 0:1
1775 vpsrlq \$40,$T4,$T4 # 4
1776 vpand $MASK,$T2,$T2 # 2
1777 vpand $MASK,$T0,$T0 # 0
1778 vpand $MASK,$T1,$T1 # 1
1779 vpand $MASK,$T3,$T3 # 3
1780 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1782 vpaddq $H2,$T2,$H2 # accumulate input
1789 ################################################################
1790 # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4
1791 # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3
1792 # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2
1793 # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1
1794 # \________/\__________/
1795 ################################################################
1796 #vpaddq $H2,$T2,$H2 # accumulate input
1798 vmovdqa `32*0`(%rsp),$T0 # r0^4
1800 vmovdqa `32*1`(%rsp),$T1 # r1^4
1802 vmovdqa `32*3`(%rsp),$T2 # r2^4
1804 vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
1805 vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
1807 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1808 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1809 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1810 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1811 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1813 # however, as h2 is "chronologically" first one available pull
1814 # corresponding operations up, so it's
1816 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1817 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1818 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1819 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1820 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1822 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1823 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1824 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1825 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1826 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1828 vpmuludq $H0,$T1,$T4 # h0*r1
1829 vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1830 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1831 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1832 vpmuludq $H3,$T1,$T4 # h3*r1
1833 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1834 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1835 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1836 vmovdqa `32*4-0x90`(%rax),$T1 # s2
1838 vpmuludq $H0,$T0,$T4 # h0*r0
1839 vpmuludq $H1,$T0,$H2 # h1*r0
1840 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1841 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1842 vpmuludq $H3,$T0,$T4 # h3*r0
1843 vpmuludq $H4,$T0,$H2 # h4*r0
1844 vmovdqu 16*0($inp),%x#$T0 # load input
1845 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1846 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1847 vinserti128 \$1,16*2($inp),$T0,$T0
1849 vpmuludq $H3,$T1,$T4 # h3*s2
1850 vpmuludq $H4,$T1,$H2 # h4*s2
1851 vmovdqu 16*1($inp),%x#$T1
1852 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1853 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1854 vmovdqa `32*5-0x90`(%rax),$H2 # r3
1855 vpmuludq $H1,$T2,$T4 # h1*r2
1856 vpmuludq $H0,$T2,$T2 # h0*r2
1857 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1858 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1859 vinserti128 \$1,16*3($inp),$T1,$T1
1862 vpmuludq $H1,$H2,$T4 # h1*r3
1863 vpmuludq $H0,$H2,$H2 # h0*r3
1864 vpsrldq \$6,$T0,$T2 # splat input
1865 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1866 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1867 vpmuludq $H3,$T3,$T4 # h3*s3
1868 vpmuludq $H4,$T3,$H2 # h4*s3
1870 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1871 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1872 vpunpckhqdq $T1,$T0,$T4 # 4
1874 vpmuludq $H3,$S4,$H3 # h3*s4
1875 vpmuludq $H4,$S4,$H4 # h4*s4
1876 vpunpcklqdq $T1,$T0,$T0 # 0:1
1877 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1878 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1879 vpunpcklqdq $T3,$T2,$T3 # 2:3
1880 vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
1881 vpmuludq $H1,$S4,$H0 # h1*s4
1882 vmovdqa 64(%rcx),$MASK # .Lmask26
1883 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1884 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1886 ################################################################
1887 # lazy reduction (interleaved with tail of input splat)
1891 vpaddq $D3,$H4,$H4 # h3 -> h4
1895 vpaddq $D0,$D1,$H1 # h0 -> h1
1904 vpaddq $D1,$H2,$H2 # h1 -> h2
1908 vpaddq $D4,$H0,$H0 # h4 -> h0
1910 vpand $MASK,$T2,$T2 # 2
1915 vpaddq $D2,$H3,$H3 # h2 -> h3
1917 vpaddq $T2,$H2,$H2 # modulo-scheduled
1922 vpaddq $D0,$H1,$H1 # h0 -> h1
1924 vpsrlq \$40,$T4,$T4 # 4
1928 vpaddq $D3,$H4,$H4 # h3 -> h4
1930 vpand $MASK,$T0,$T0 # 0
1931 vpand $MASK,$T1,$T1 # 1
1932 vpand $MASK,$T3,$T3 # 3
1933 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1940 ################################################################
1941 # while above multiplications were by r^4 in all lanes, in last
1942 # iteration we multiply least significant lane by r^4 and most
1943 # significant one by r, so copy of above except that references
1944 # to the precomputed table are displaced by 4...
1946 #vpaddq $H2,$T2,$H2 # accumulate input
1948 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
1950 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
1952 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
1954 vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
1955 vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
1957 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1958 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1959 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1960 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1961 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1963 vpmuludq $H0,$T1,$T4 # h0*r1
1964 vpmuludq $H1,$T1,$H2 # h1*r1
1965 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1966 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1967 vpmuludq $H3,$T1,$T4 # h3*r1
1968 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
1969 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1970 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1972 vpmuludq $H0,$T0,$T4 # h0*r0
1973 vpmuludq $H1,$T0,$H2 # h1*r0
1974 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1975 vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
1976 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1977 vpmuludq $H3,$T0,$T4 # h3*r0
1978 vpmuludq $H4,$T0,$H2 # h4*r0
1979 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1980 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1982 vpmuludq $H3,$T1,$T4 # h3*s2
1983 vpmuludq $H4,$T1,$H2 # h4*s2
1984 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1985 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1986 vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
1987 vpmuludq $H1,$T2,$T4 # h1*r2
1988 vpmuludq $H0,$T2,$T2 # h0*r2
1989 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1990 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1992 vpmuludq $H1,$H2,$T4 # h1*r3
1993 vpmuludq $H0,$H2,$H2 # h0*r3
1994 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1995 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1996 vpmuludq $H3,$T3,$T4 # h3*s3
1997 vpmuludq $H4,$T3,$H2 # h4*s3
1998 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1999 vpaddq $H2,$D2,$D2 # d2 += h4*s3
2001 vpmuludq $H3,$S4,$H3 # h3*s4
2002 vpmuludq $H4,$S4,$H4 # h4*s4
2003 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
2004 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
2005 vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
2006 vpmuludq $H1,$S4,$H0 # h1*s4
2007 vmovdqa 64(%rcx),$MASK # .Lmask26
2008 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
2009 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
2011 ################################################################
2012 # horizontal addition
2025 vpermq \$0x2,$H3,$T3
2026 vpermq \$0x2,$H4,$T4
2027 vpermq \$0x2,$H0,$T0
2028 vpermq \$0x2,$D1,$T1
2029 vpermq \$0x2,$H2,$T2
2036 ################################################################
2041 vpaddq $D3,$H4,$H4 # h3 -> h4
2045 vpaddq $D0,$D1,$H1 # h0 -> h1
2052 vpaddq $D1,$H2,$H2 # h1 -> h2
2056 vpaddq $D4,$H0,$H0 # h4 -> h0
2060 vpaddq $D2,$H3,$H3 # h2 -> h3
2064 vpaddq $D0,$H1,$H1 # h0 -> h1
2068 vpaddq $D3,$H4,$H4 # h3 -> h4
2070 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2071 vmovd %x#$H1,`4*1-48-64`($ctx)
2072 vmovd %x#$H2,`4*2-48-64`($ctx)
2073 vmovd %x#$H3,`4*3-48-64`($ctx)
2074 vmovd %x#$H4,`4*4-48-64`($ctx)
2076 $code.=<<___ if ($win64);
2077 vmovdqa 0x50(%r11),%xmm6
2078 vmovdqa 0x60(%r11),%xmm7
2079 vmovdqa 0x70(%r11),%xmm8
2080 vmovdqa 0x80(%r11),%xmm9
2081 vmovdqa 0x90(%r11),%xmm10
2082 vmovdqa 0xa0(%r11),%xmm11
2083 vmovdqa 0xb0(%r11),%xmm12
2084 vmovdqa 0xc0(%r11),%xmm13
2085 vmovdqa 0xd0(%r11),%xmm14
2086 vmovdqa 0xe0(%r11),%xmm15
2090 $code.=<<___ if (!$win64);
2098 .size poly1305_blocks_avx2,.-poly1305_blocks_avx2
2100 #######################################################################
2102 # On entry we have input length divisible by 64. But since inner loop
2103 # processes 128 bytes per iteration, cases when length is not divisible
2104 # by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this
2105 # reason stack layout is kept identical to poly1305_blocks_avx2. If not
2106 # for this tail, we wouldn't have to even allocate stack frame...
2108 my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%ymm$_",(16..24));
2109 my ($M0,$M1,$M2,$M3,$M4) = map("%ymm$_",(25..29));
2110 my $PADBIT="%zmm30";
2111 my $GATHER="%ymm31";
2114 .type poly1305_blocks_avx512,\@function,4
2116 poly1305_blocks_avx512:
2121 $code.=<<___ if (!$win64);
2123 .cfi_def_cfa %r11,16
2126 $code.=<<___ if ($win64);
2127 lea -0xf8(%rsp),%r11
2129 vmovdqa %xmm6,0x50(%r11)
2130 vmovdqa %xmm7,0x60(%r11)
2131 vmovdqa %xmm8,0x70(%r11)
2132 vmovdqa %xmm9,0x80(%r11)
2133 vmovdqa %xmm10,0x90(%r11)
2134 vmovdqa %xmm11,0xa0(%r11)
2135 vmovdqa %xmm12,0xb0(%r11)
2136 vmovdqa %xmm13,0xc0(%r11)
2137 vmovdqa %xmm14,0xd0(%r11)
2138 vmovdqa %xmm15,0xe0(%r11)
2142 lea .Lconst(%rip),%rcx
2143 lea 48+64($ctx),$ctx # size optimization
2144 vmovdqa 96(%rcx),$T2 # .Lpermd_avx2
2146 # expand pre-calculated table
2147 vmovdqu32 `16*0-64`($ctx),%x#$R0
2149 vmovdqu32 `16*1-64`($ctx),%x#$R1
2150 vmovdqu32 `16*2-64`($ctx),%x#$S1
2151 vmovdqu32 `16*3-64`($ctx),%x#$R2
2152 vmovdqu32 `16*4-64`($ctx),%x#$S2
2153 vmovdqu32 `16*5-64`($ctx),%x#$R3
2154 vmovdqu32 `16*6-64`($ctx),%x#$S3
2155 vmovdqu32 `16*7-64`($ctx),%x#$R4
2156 vmovdqu32 `16*8-64`($ctx),%x#$S4
2157 vpermd $R0,$T2,$R0 # 00003412 -> 14243444
2158 vmovdqa64 64(%rcx),$MASK # .Lmask26
2162 vmovdqa32 $R0,0x00(%rsp) # save in case $len%128 != 0
2163 vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304
2165 vmovdqa32 $R1,0x20(%rsp)
2168 vmovdqa32 $S1,0x40(%rsp)
2171 vmovdqa32 $R2,0x60(%rsp)
2173 vmovdqa32 $S2,0x80(%rsp)
2174 vmovdqa32 $R3,0xa0(%rsp)
2175 vmovdqa32 $S3,0xc0(%rsp)
2176 vmovdqa32 $R4,0xe0(%rsp)
2177 vmovdqa32 $S4,0x100(%rsp)
2179 ################################################################
2180 # calculate 5th through 8th powers of the key
2182 # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1
2183 # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2
2184 # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3
2185 # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4
2186 # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0
2188 vpmuludq $T0,$R0,$D0 # d0 = r0'*r0
2189 vpmuludq $T0,$R1,$D1 # d1 = r0'*r1
2190 vpmuludq $T0,$R2,$D2 # d2 = r0'*r2
2191 vpmuludq $T0,$R3,$D3 # d3 = r0'*r3
2192 vpmuludq $T0,$R4,$D4 # d4 = r0'*r4
2195 vpmuludq $T1,$S4,$M0
2196 vpmuludq $T1,$R0,$M1
2197 vpmuludq $T1,$R1,$M2
2198 vpmuludq $T1,$R2,$M3
2199 vpmuludq $T1,$R3,$M4
2201 vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4
2202 vpaddq $M1,$D1,$D1 # d1 += r1'*r0
2203 vpaddq $M2,$D2,$D2 # d2 += r1'*r1
2204 vpaddq $M3,$D3,$D3 # d3 += r1'*r2
2205 vpaddq $M4,$D4,$D4 # d4 += r1'*r3
2207 vpmuludq $T2,$S3,$M0
2208 vpmuludq $T2,$S4,$M1
2209 vpmuludq $T2,$R1,$M3
2210 vpmuludq $T2,$R2,$M4
2211 vpmuludq $T2,$R0,$M2
2213 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3
2214 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4
2215 vpaddq $M3,$D3,$D3 # d3 += r2'*r1
2216 vpaddq $M4,$D4,$D4 # d4 += r2'*r2
2217 vpaddq $M2,$D2,$D2 # d2 += r2'*r0
2219 vpmuludq $T3,$S2,$M0
2220 vpmuludq $T3,$R0,$M3
2221 vpmuludq $T3,$R1,$M4
2222 vpmuludq $T3,$S3,$M1
2223 vpmuludq $T3,$S4,$M2
2224 vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2
2225 vpaddq $M3,$D3,$D3 # d3 += r3'*r0
2226 vpaddq $M4,$D4,$D4 # d4 += r3'*r1
2227 vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3
2228 vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4
2230 vpmuludq $T4,$S4,$M3
2231 vpmuludq $T4,$R0,$M4
2232 vpmuludq $T4,$S1,$M0
2233 vpmuludq $T4,$S2,$M1
2234 vpmuludq $T4,$S3,$M2
2235 vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4
2236 vpaddq $M4,$D4,$D4 # d4 += r2'*r0
2237 vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1
2238 vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2
2239 vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3
2241 ################################################################
2243 vmovdqu64 16*0($inp),%z#$T3
2244 vmovdqu64 16*4($inp),%z#$T4
2247 ################################################################
2251 vpandq $MASK,$D3,$D3
2252 vpaddq $M3,$D4,$D4 # d3 -> d4
2255 vpandq $MASK,$D0,$D0
2256 vpaddq $M0,$D1,$D1 # d0 -> d1
2259 vpandq $MASK,$D4,$D4
2262 vpandq $MASK,$D1,$D1
2263 vpaddq $M1,$D2,$D2 # d1 -> d2
2267 vpaddq $M4,$D0,$D0 # d4 -> d0
2270 vpandq $MASK,$D2,$D2
2271 vpaddq $M2,$D3,$D3 # d2 -> d3
2274 vpandq $MASK,$D0,$D0
2275 vpaddq $M0,$D1,$D1 # d0 -> d1
2278 vpandq $MASK,$D3,$D3
2279 vpaddq $M3,$D4,$D4 # d3 -> d4
2282 map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain
2283 map(s/%y/%z/,($M4,$M0,$M1,$M2,$M3));
2284 map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4));
2285 map(s/%y/%z/,($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4));
2286 map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4));
2287 map(s/%y/%z/,($MASK));
2289 ################################################################
2290 # at this point we have 14243444 in $R0-$S4 and 05060708 in
2293 vpunpcklqdq $T4,$T3,$T0 # transpose input
2294 vpunpckhqdq $T4,$T3,$T4
2296 # ... since input 64-bit lanes are ordered as 73625140, we could
2297 # "vperm" it to 76543210 (here and in each loop iteration), *or*
2298 # we could just flow along, hence the goal for $R0-$S4 is
2299 # 1858286838784888 ...
2301 mov \$0b0110011001100110,%eax
2302 mov \$0b1100110011001100,%r8d
2303 mov \$0b0101010101010101,%r9d
2308 vpbroadcastq %x#$D0,$M0 # 0808080808080808
2309 vpbroadcastq %x#$D1,$M1
2310 vpbroadcastq %x#$D2,$M2
2311 vpbroadcastq %x#$D3,$M3
2312 vpbroadcastq %x#$D4,$M4
2314 vpexpandd $D0,${D0}{%k1} # 05060708 -> -05--06--07--08-
2315 vpexpandd $D1,${D1}{%k1}
2316 vpexpandd $D2,${D2}{%k1}
2317 vpexpandd $D3,${D3}{%k1}
2318 vpexpandd $D4,${D4}{%k1}
2320 vpexpandd $R0,${D0}{%k2} # -05--06--07--08- -> 145-246-347-448-
2321 vpexpandd $R1,${D1}{%k2}
2322 vpexpandd $R2,${D2}{%k2}
2323 vpexpandd $R3,${D3}{%k2}
2324 vpexpandd $R4,${D4}{%k2}
2326 vpblendmd $M0,$D0,${R0}{%k3} # 1858286838784888
2327 vpblendmd $M1,$D1,${R1}{%k3}
2328 vpblendmd $M2,$D2,${R2}{%k3}
2329 vpblendmd $M3,$D3,${R3}{%k3}
2330 vpblendmd $M4,$D4,${R4}{%k3}
2332 vpslld \$2,$R1,$S1 # *5
2341 vpbroadcastq %x#$MASK,$MASK
2342 vpbroadcastq 32(%rcx),$PADBIT # .L129
2344 vpsrlq \$52,$T0,$T2 # splat input
2349 vpsrlq \$40,$T4,$T4 # 4
2350 vpandq $MASK,$T2,$T2 # 2
2351 vpandq $MASK,$T0,$T0 # 0
2352 vpandq $MASK,$T1,$T1 # 1
2353 vpandq $MASK,$T3,$T3 # 3
2354 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2356 vpaddq $H2,$T2,$H2 # accumulate input
2364 ################################################################
2365 # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8
2366 # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7
2367 # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6
2368 # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5
2369 # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4
2370 # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3
2371 # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2
2372 # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1
2373 # \________/\___________/
2374 ################################################################
2375 #vpaddq $H2,$T2,$H2 # accumulate input
2377 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
2378 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
2379 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
2380 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
2381 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
2383 # however, as h2 is "chronologically" first one available pull
2384 # corresponding operations up, so it's
2386 # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4
2387 # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0
2388 # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1
2389 # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2
2390 # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3
2392 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2394 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2395 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2396 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2397 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2398 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2399 vpaddq $H1,$T1,$H1 # accumulate input
2403 vmovdqu64 16*0($inp),$T3 # load input
2404 vmovdqu64 16*4($inp),$T4
2406 vpmuludq $H0,$R3,$M3
2407 vpmuludq $H0,$R4,$M4
2408 vpmuludq $H0,$R0,$M0
2409 vpmuludq $H0,$R1,$M1
2410 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2411 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2412 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2413 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2415 vpmuludq $H1,$R2,$M3
2416 vpmuludq $H1,$R3,$M4
2417 vpmuludq $H1,$S4,$M0
2418 vpmuludq $H0,$R2,$M2
2419 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2420 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2421 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2422 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2424 vpunpcklqdq $T4,$T3,$T0 # transpose input
2425 vpunpckhqdq $T4,$T3,$T4
2427 vpmuludq $H3,$R0,$M3
2428 vpmuludq $H3,$R1,$M4
2429 vpmuludq $H1,$R0,$M1
2430 vpmuludq $H1,$R1,$M2
2431 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2432 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2433 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2434 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2436 vpmuludq $H4,$S4,$M3
2437 vpmuludq $H4,$R0,$M4
2438 vpmuludq $H3,$S2,$M0
2439 vpmuludq $H3,$S3,$M1
2440 vpaddq $M3,$D3,$D3 # d3 += h4*s4
2441 vpmuludq $H3,$S4,$M2
2442 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2443 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2444 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2445 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2447 vpmuludq $H4,$S1,$M0
2448 vpmuludq $H4,$S2,$M1
2449 vpmuludq $H4,$S3,$M2
2450 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2451 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2452 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2454 ################################################################
2455 # lazy reduction (interleaved with input splat)
2457 vpsrlq \$52,$T0,$T2 # splat input
2461 vpandq $MASK,$D3,$D3
2462 vpaddq $H3,$D4,$H4 # h3 -> h4
2467 vpandq $MASK,$H0,$H0
2468 vpaddq $D0,$H1,$H1 # h0 -> h1
2470 vpandq $MASK,$T2,$T2 # 2
2473 vpandq $MASK,$H4,$H4
2476 vpandq $MASK,$H1,$H1
2477 vpaddq $D1,$H2,$H2 # h1 -> h2
2481 vpaddq $D4,$H0,$H0 # h4 -> h0
2483 vpaddq $T2,$H2,$H2 # modulo-scheduled
2487 vpandq $MASK,$H2,$H2
2488 vpaddq $D2,$D3,$H3 # h2 -> h3
2493 vpandq $MASK,$H0,$H0
2494 vpaddq $D0,$H1,$H1 # h0 -> h1
2496 vpsrlq \$40,$T4,$T4 # 4
2499 vpandq $MASK,$H3,$H3
2500 vpaddq $D3,$H4,$H4 # h3 -> h4
2502 vpandq $MASK,$T0,$T0 # 0
2503 vpandq $MASK,$T1,$T1 # 1
2504 vpandq $MASK,$T3,$T3 # 3
2505 #vporq $PADBIT,$T4,$T4 # padbit, yes, always
2511 ################################################################
2512 # while above multiplications were by r^8 in all lanes, in last
2513 # iteration we multiply least significant lane by r^8 and most
2514 # significant one by r, that's why table gets shifted...
2516 vpsrlq \$32,$R0,$R0 # 0105020603070408
2526 ################################################################
2527 # load either next or last 64 byte of input
2528 lea ($inp,$len),$inp
2530 #vpaddq $H2,$T2,$H2 # accumulate input
2533 vpmuludq $H2,$R1,$D3 # d3 = h2*r1
2534 vpmuludq $H2,$R2,$D4 # d4 = h2*r2
2535 vpmuludq $H2,$S3,$D0 # d0 = h2*s3
2536 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
2537 vpmuludq $H2,$R0,$D2 # d2 = h2*r0
2538 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2539 vpaddq $H1,$T1,$H1 # accumulate input
2543 vmovdqu64 16*0($inp),%x#$T0
2544 vpmuludq $H0,$R3,$M3
2545 vpmuludq $H0,$R4,$M4
2546 vpmuludq $H0,$R0,$M0
2547 vpmuludq $H0,$R1,$M1
2548 vpaddq $M3,$D3,$D3 # d3 += h0*r3
2549 vpaddq $M4,$D4,$D4 # d4 += h0*r4
2550 vpaddq $M0,$D0,$D0 # d0 += h0*r0
2551 vpaddq $M1,$D1,$D1 # d1 += h0*r1
2553 vmovdqu64 16*1($inp),%x#$T1
2554 vpmuludq $H1,$R2,$M3
2555 vpmuludq $H1,$R3,$M4
2556 vpmuludq $H1,$S4,$M0
2557 vpmuludq $H0,$R2,$M2
2558 vpaddq $M3,$D3,$D3 # d3 += h1*r2
2559 vpaddq $M4,$D4,$D4 # d4 += h1*r3
2560 vpaddq $M0,$D0,$D0 # d0 += h1*s4
2561 vpaddq $M2,$D2,$D2 # d2 += h0*r2
2563 vinserti64x2 \$1,16*2($inp),$T0,$T0
2564 vpmuludq $H3,$R0,$M3
2565 vpmuludq $H3,$R1,$M4
2566 vpmuludq $H1,$R0,$M1
2567 vpmuludq $H1,$R1,$M2
2568 vpaddq $M3,$D3,$D3 # d3 += h3*r0
2569 vpaddq $M4,$D4,$D4 # d4 += h3*r1
2570 vpaddq $M1,$D1,$D1 # d1 += h1*r0
2571 vpaddq $M2,$D2,$D2 # d2 += h1*r1
2573 vinserti64x2 \$1,16*3($inp),$T1,$T1
2574 vpmuludq $H4,$S4,$M3
2575 vpmuludq $H4,$R0,$M4
2576 vpmuludq $H3,$S2,$M0
2577 vpmuludq $H3,$S3,$M1
2578 vpmuludq $H3,$S4,$M2
2579 vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4
2580 vpaddq $M4,$D4,$D4 # d4 += h4*r0
2581 vpaddq $M0,$D0,$D0 # d0 += h3*s2
2582 vpaddq $M1,$D1,$D1 # d1 += h3*s3
2583 vpaddq $M2,$D2,$D2 # d2 += h3*s4
2585 vpmuludq $H4,$S1,$M0
2586 vpmuludq $H4,$S2,$M1
2587 vpmuludq $H4,$S3,$M2
2588 vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1
2589 vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2
2590 vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3
2592 ################################################################
2593 # horizontal addition
2608 vpermq \$0x2,$H3,$D3
2609 vpermq \$0x2,$H4,$D4
2610 vpermq \$0x2,$H0,$D0
2611 vpermq \$0x2,$H1,$D1
2612 vpermq \$0x2,$H2,$D2
2619 vextracti64x4 \$0x1,$H3,%y#$D3
2620 vextracti64x4 \$0x1,$H4,%y#$D4
2621 vextracti64x4 \$0x1,$H0,%y#$D0
2622 vextracti64x4 \$0x1,$H1,%y#$D1
2623 vextracti64x4 \$0x1,$H2,%y#$D2
2624 vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case
2625 vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2
2626 vpaddq $D0,$H0,${H0}{%k3}{z}
2627 vpaddq $D1,$H1,${H1}{%k3}{z}
2628 vpaddq $D2,$H2,${H2}{%k3}{z}
2630 map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT));
2631 map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK));
2633 ################################################################
2634 # lazy reduction (interleaved with input splat)
2637 vpandq $MASK,$H3,$H3
2638 vpsrldq \$6,$T0,$T2 # splat input
2640 vpunpckhqdq $T1,$T0,$T4 # 4
2641 vpaddq $D3,$H4,$H4 # h3 -> h4
2644 vpandq $MASK,$H0,$H0
2645 vpunpcklqdq $T3,$T2,$T2 # 2:3
2646 vpunpcklqdq $T1,$T0,$T0 # 0:1
2647 vpaddq $D0,$H1,$H1 # h0 -> h1
2650 vpandq $MASK,$H4,$H4
2653 vpandq $MASK,$H1,$H1
2656 vpaddq $D1,$H2,$H2 # h1 -> h2
2661 vpsrlq \$40,$T4,$T4 # 4
2662 vpaddq $D4,$H0,$H0 # h4 -> h0
2665 vpandq $MASK,$H2,$H2
2666 vpandq $MASK,$T2,$T2 # 2
2667 vpandq $MASK,$T0,$T0 # 0
2668 vpaddq $D2,$H3,$H3 # h2 -> h3
2671 vpandq $MASK,$H0,$H0
2672 vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2
2673 vpandq $MASK,$T1,$T1 # 1
2674 vpaddq $D0,$H1,$H1 # h0 -> h1
2677 vpandq $MASK,$H3,$H3
2678 vpandq $MASK,$T3,$T3 # 3
2679 vporq $PADBIT,$T4,$T4 # padbit, yes, always
2680 vpaddq $D3,$H4,$H4 # h3 -> h4
2682 lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2
2686 vpsubq $T2,$H2,$H2 # undo input accumulation
2687 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
2688 vmovd %x#$H1,`4*1-48-64`($ctx)
2689 vmovd %x#$H2,`4*2-48-64`($ctx)
2690 vmovd %x#$H3,`4*3-48-64`($ctx)
2691 vmovd %x#$H4,`4*4-48-64`($ctx)
2694 $code.=<<___ if ($win64);
2695 movdqa 0x50(%r11),%xmm6
2696 movdqa 0x60(%r11),%xmm7
2697 movdqa 0x70(%r11),%xmm8
2698 movdqa 0x80(%r11),%xmm9
2699 movdqa 0x90(%r11),%xmm10
2700 movdqa 0xa0(%r11),%xmm11
2701 movdqa 0xb0(%r11),%xmm12
2702 movdqa 0xc0(%r11),%xmm13
2703 movdqa 0xd0(%r11),%xmm14
2704 movdqa 0xe0(%r11),%xmm15
2706 .Ldo_avx512_epilogue:
2708 $code.=<<___ if (!$win64);
2715 .size poly1305_blocks_avx512,.-poly1305_blocks_avx512
2718 ########################################################################
2719 # VPMADD52 version using 2^44 radix.
2721 # One can argue that base 2^52 would be more natural. Well, even though
2722 # some operations would be more natural, one has to recognize couple of
2723 # things. Base 2^52 doesn't provide advantage over base 2^44 if you look
2724 # at amount of multiply-n-accumulate operations. Secondly, it makes it
2725 # impossible to pre-compute multiples of 5 [referred to as s[]/sN in
2726 # reference implementations], which means that more such operations
2727 # would have to be performed in inner loop, which in turn makes critical
2728 # path longer. In other words, even though base 2^44 reduction might
2729 # look less elegant, overall critical path is actually shorter...
2732 .type poly1305_init_base2_44,\@function,3
2734 poly1305_init_base2_44:
2736 mov %rax,0($ctx) # initialize hash value
2741 lea poly1305_blocks_vpmadd52(%rip),%r10
2742 lea poly1305_emit_base2_44(%rip),%r11
2744 mov \$0x0ffffffc0fffffff,%rax
2745 mov \$0x0ffffffc0ffffffc,%rcx
2747 mov \$0x00000fffffffffff,%r8
2749 mov \$0x00000fffffffffff,%r9
2752 mov %r8,40($ctx) # r0
2755 mov %rax,48($ctx) # r1
2756 lea (%rax,%rax,4),%rax # *5
2757 mov %rcx,56($ctx) # r2
2758 shl \$2,%rax # magic <<2
2759 lea (%rcx,%rcx,4),%rcx # *5
2760 shl \$2,%rcx # magic <<2
2761 mov %rax,24($ctx) # s1
2762 mov %rcx,32($ctx) # s2
2764 $code.=<<___ if ($flavour !~ /elf32/);
2768 $code.=<<___ if ($flavour =~ /elf32/);
2775 .size poly1305_init_base2_44,.-poly1305_init_base2_44
2778 my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17));
2779 my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21));
2780 my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25));
2783 .type poly1305_blocks_vpmadd52,\@function,4
2785 poly1305_blocks_vpmadd52:
2787 jz .Lno_data_vpmadd52 # too short
2792 lea .L2_44_inp_permd(%rip),%r10
2796 vmovq $padbit,%x#$PAD
2797 vmovdqa64 0(%r10),$inp_permd # .L2_44_inp_permd
2798 vmovdqa64 32(%r10),$inp_shift # .L2_44_inp_shift
2799 vpermq \$0xcf,$PAD,$PAD
2800 vmovdqa64 64(%r10),$reduc_mask # .L2_44_mask
2802 vmovdqu64 0($ctx),${Dlo}{%k7}{z} # load hash value
2803 vmovdqu64 40($ctx),${r2r1r0}{%k7}{z} # load keys
2804 vmovdqu64 32($ctx),${r1r0s2}{%k7}{z}
2805 vmovdqu64 24($ctx),${r0s2s1}{%k7}{z}
2807 vmovdqa64 96(%r10),$reduc_rght # .L2_44_shift_rgt
2808 vmovdqa64 128(%r10),$reduc_left # .L2_44_shift_lft
2814 vmovdqu32 0($inp),%x#$T0 # load input as ----3210
2817 vpermd $T0,$inp_permd,$T0 # ----3210 -> --322110
2818 vpsrlvq $inp_shift,$T0,$T0
2819 vpandq $reduc_mask,$T0,$T0
2822 vpaddq $T0,$Dlo,$Dlo # accumulate input
2824 vpermq \$0,$Dlo,${H0}{%k7}{z} # smash hash value
2825 vpermq \$0b01010101,$Dlo,${H1}{%k7}{z}
2826 vpermq \$0b10101010,$Dlo,${H2}{%k7}{z}
2828 vpxord $Dlo,$Dlo,$Dlo
2829 vpxord $Dhi,$Dhi,$Dhi
2831 vpmadd52luq $r2r1r0,$H0,$Dlo
2832 vpmadd52huq $r2r1r0,$H0,$Dhi
2834 vpmadd52luq $r1r0s2,$H1,$Dlo
2835 vpmadd52huq $r1r0s2,$H1,$Dhi
2837 vpmadd52luq $r0s2s1,$H2,$Dlo
2838 vpmadd52huq $r0s2s1,$H2,$Dhi
2840 vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost qword
2841 vpsllvq $reduc_left,$Dhi,$Dhi # 0 in topmost qword
2842 vpandq $reduc_mask,$Dlo,$Dlo
2844 vpaddq $T0,$Dhi,$Dhi
2846 vpermq \$0b10010011,$Dhi,$Dhi # 0 in lowest qword
2848 vpaddq $Dhi,$Dlo,$Dlo # note topmost qword :-)
2850 vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost word
2851 vpandq $reduc_mask,$Dlo,$Dlo
2853 vpermq \$0b10010011,$T0,$T0
2855 vpaddq $T0,$Dlo,$Dlo
2857 vpermq \$0b10010011,$Dlo,${T0}{%k1}{z}
2859 vpaddq $T0,$Dlo,$Dlo
2862 vpaddq $T0,$Dlo,$Dlo
2867 vmovdqu64 $Dlo,0($ctx){%k7} # store hash value
2871 .size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52
2875 .type poly1305_emit_base2_44,\@function,3
2877 poly1305_emit_base2_44:
2878 mov 0($ctx),%r8 # load hash value
2894 add \$5,%r8 # compare to modulus
2898 shr \$2,%r10 # did 130-bit value overfow?
2902 add 0($nonce),%rax # accumulate nonce
2904 mov %rax,0($mac) # write result
2908 .size poly1305_emit_base2_44,.-poly1305_emit_base2_44
2915 .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
2917 .long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
2919 .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
2921 .long 2,2,2,3,2,0,2,1
2924 .long 0,1,1,2,2,3,7,7
2928 .quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff
2937 .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2941 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2942 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2950 .extern __imp_RtlVirtualUnwind
2951 .type se_handler,\@abi-omnipotent
2965 mov 120($context),%rax # pull context->Rax
2966 mov 248($context),%rbx # pull context->Rip
2968 mov 8($disp),%rsi # disp->ImageBase
2969 mov 56($disp),%r11 # disp->HandlerData
2971 mov 0(%r11),%r10d # HandlerData[0]
2972 lea (%rsi,%r10),%r10 # prologue label
2973 cmp %r10,%rbx # context->Rip<.Lprologue
2974 jb .Lcommon_seh_tail
2976 mov 152($context),%rax # pull context->Rsp
2978 mov 4(%r11),%r10d # HandlerData[1]
2979 lea (%rsi,%r10),%r10 # epilogue label
2980 cmp %r10,%rbx # context->Rip>=.Lepilogue
2981 jae .Lcommon_seh_tail
2991 mov %rbx,144($context) # restore context->Rbx
2992 mov %rbp,160($context) # restore context->Rbp
2993 mov %r12,216($context) # restore context->R12
2994 mov %r13,224($context) # restore context->R13
2995 mov %r14,232($context) # restore context->R14
2996 mov %r15,240($context) # restore context->R14
2998 jmp .Lcommon_seh_tail
2999 .size se_handler,.-se_handler
3001 .type avx_handler,\@abi-omnipotent
3015 mov 120($context),%rax # pull context->Rax
3016 mov 248($context),%rbx # pull context->Rip
3018 mov 8($disp),%rsi # disp->ImageBase
3019 mov 56($disp),%r11 # disp->HandlerData
3021 mov 0(%r11),%r10d # HandlerData[0]
3022 lea (%rsi,%r10),%r10 # prologue label
3023 cmp %r10,%rbx # context->Rip<prologue label
3024 jb .Lcommon_seh_tail
3026 mov 152($context),%rax # pull context->Rsp
3028 mov 4(%r11),%r10d # HandlerData[1]
3029 lea (%rsi,%r10),%r10 # epilogue label
3030 cmp %r10,%rbx # context->Rip>=epilogue label
3031 jae .Lcommon_seh_tail
3033 mov 208($context),%rax # pull context->R11
3037 lea 512($context),%rdi # &context.Xmm6
3039 .long 0xa548f3fc # cld; rep movsq
3044 mov %rax,152($context) # restore context->Rsp
3045 mov %rsi,168($context) # restore context->Rsi
3046 mov %rdi,176($context) # restore context->Rdi
3048 mov 40($disp),%rdi # disp->ContextRecord
3049 mov $context,%rsi # context
3050 mov \$154,%ecx # sizeof(CONTEXT)
3051 .long 0xa548f3fc # cld; rep movsq
3054 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3055 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3056 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3057 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3058 mov 40(%rsi),%r10 # disp->ContextRecord
3059 lea 56(%rsi),%r11 # &disp->HandlerData
3060 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3061 mov %r10,32(%rsp) # arg5
3062 mov %r11,40(%rsp) # arg6
3063 mov %r12,48(%rsp) # arg7
3064 mov %rcx,56(%rsp) # arg8, (NULL)
3065 call *__imp_RtlVirtualUnwind(%rip)
3067 mov \$1,%eax # ExceptionContinueSearch
3079 .size avx_handler,.-avx_handler
3083 .rva .LSEH_begin_poly1305_init
3084 .rva .LSEH_end_poly1305_init
3085 .rva .LSEH_info_poly1305_init
3087 .rva .LSEH_begin_poly1305_blocks
3088 .rva .LSEH_end_poly1305_blocks
3089 .rva .LSEH_info_poly1305_blocks
3091 .rva .LSEH_begin_poly1305_emit
3092 .rva .LSEH_end_poly1305_emit
3093 .rva .LSEH_info_poly1305_emit
3095 $code.=<<___ if ($avx);
3096 .rva .LSEH_begin_poly1305_blocks_avx
3098 .rva .LSEH_info_poly1305_blocks_avx_1
3102 .rva .LSEH_info_poly1305_blocks_avx_2
3105 .rva .LSEH_end_poly1305_blocks_avx
3106 .rva .LSEH_info_poly1305_blocks_avx_3
3108 .rva .LSEH_begin_poly1305_emit_avx
3109 .rva .LSEH_end_poly1305_emit_avx
3110 .rva .LSEH_info_poly1305_emit_avx
3112 $code.=<<___ if ($avx>1);
3113 .rva .LSEH_begin_poly1305_blocks_avx2
3114 .rva .Lbase2_64_avx2
3115 .rva .LSEH_info_poly1305_blocks_avx2_1
3117 .rva .Lbase2_64_avx2
3119 .rva .LSEH_info_poly1305_blocks_avx2_2
3122 .rva .LSEH_end_poly1305_blocks_avx2
3123 .rva .LSEH_info_poly1305_blocks_avx2_3
3125 $code.=<<___ if ($avx>2);
3126 .rva .LSEH_begin_poly1305_blocks_avx512
3127 .rva .LSEH_end_poly1305_blocks_avx512
3128 .rva .LSEH_info_poly1305_blocks_avx512
3133 .LSEH_info_poly1305_init:
3136 .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
3138 .LSEH_info_poly1305_blocks:
3141 .rva .Lblocks_body,.Lblocks_epilogue
3143 .LSEH_info_poly1305_emit:
3146 .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
3148 $code.=<<___ if ($avx);
3149 .LSEH_info_poly1305_blocks_avx_1:
3152 .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
3154 .LSEH_info_poly1305_blocks_avx_2:
3157 .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
3159 .LSEH_info_poly1305_blocks_avx_3:
3162 .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
3164 .LSEH_info_poly1305_emit_avx:
3167 .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
3169 $code.=<<___ if ($avx>1);
3170 .LSEH_info_poly1305_blocks_avx2_1:
3173 .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
3175 .LSEH_info_poly1305_blocks_avx2_2:
3178 .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
3180 .LSEH_info_poly1305_blocks_avx2_3:
3183 .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
3185 $code.=<<___ if ($avx>2);
3186 .LSEH_info_poly1305_blocks_avx512:
3189 .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[]
3193 foreach (split('\n',$code)) {
3194 s/\`([^\`]*)\`/eval($1)/ge;
3195 s/%r([a-z]+)#d/%e$1/g;
3196 s/%r([0-9]+)#d/%r$1d/g;
3197 s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g;