2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # This module implements Poly1305 hash for x86_64.
21 # Numbers are cycles per processed byte with poly1305_blocks alone,
22 # measured with rdtsc at fixed clock frequency.
24 # IALU/gcc-4.8(*) AVX(**) AVX2
27 # Westmere 1.88/+120% -
28 # Sandy Bridge 1.39/+140% 1.10
29 # Haswell 1.14/+175% 1.11 0.65
30 # Skylake 1.13/+120% 0.96 0.51
31 # Silvermont 2.83/+95% -
32 # Goldmont 1.70/+180% -
33 # VIA Nano 1.82/+150% -
34 # Sledgehammer 1.38/+160% -
35 # Bulldozer 2.30/+130% 0.97
37 # (*) improvement coefficients relative to clang are more modest and
38 # are ~50% on most processors, in both cases we are comparing to
40 # (**) SSE2 implementation was attempted, but among non-AVX processors
41 # it was faster than integer-only code only on older Intel P4 and
42 # Core processors, 50-30%, less newer processor is, but slower on
43 # contemporary ones, for example almost 2x slower on Atom, and as
44 # former are naturally disappearing, SSE2 is deemed unnecessary;
48 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
50 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
52 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
53 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
54 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
55 die "can't locate x86_64-xlate.pl";
57 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
58 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
59 $avx = ($1>=2.19) + ($1>=2.22);
62 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
63 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
64 $avx = ($1>=2.09) + ($1>=2.10);
67 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
68 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
69 $avx = ($1>=10) + ($1>=12);
72 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
73 $avx = ($2>=3.0) + ($2>3.0);
76 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
79 my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx");
80 my ($mac,$nonce)=($inp,$len); # *_emit arguments
81 my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13));
82 my ($h0,$h1,$h2)=("%r14","%rbx","%rbp");
84 sub poly1305_iteration {
85 # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1
86 # output: $h0-$h2 *= $r0-$r1
94 mov %rax,$h0 # future $h0
104 mov $h2,$h1 # borrow $h1
108 imulq $s1,$h1 # h2*s1
113 imulq $r0,$h2 # h2*r0
115 mov \$-4,%rax # mask value
118 and $d3,%rax # last reduction step
129 ########################################################################
130 # Layout of opaque area is following.
132 # unsigned __int64 h[3]; # current hash value base 2^64
133 # unsigned __int64 r[2]; # key value base 2^64
138 .extern OPENSSL_ia32cap_P
141 .hidden poly1305_init
142 .globl poly1305_blocks
143 .hidden poly1305_blocks
145 .hidden poly1305_emit
147 .type poly1305_init,\@function,3
151 mov %rax,0($ctx) # initialize hash value
158 lea poly1305_blocks(%rip),%r10
159 lea poly1305_emit(%rip),%r11
161 $code.=<<___ if ($avx);
162 mov OPENSSL_ia32cap_P+4(%rip),%r9
163 lea poly1305_blocks_avx(%rip),%rax
164 lea poly1305_emit_avx(%rip),%rcx
165 bt \$`60-32`,%r9 # AVX?
169 $code.=<<___ if ($avx>1);
170 lea poly1305_blocks_avx2(%rip),%rax
171 bt \$`5+32`,%r9 # AVX2?
175 mov \$0x0ffffffc0fffffff,%rax
176 mov \$0x0ffffffc0ffffffc,%rcx
182 $code.=<<___ if ($flavour !~ /elf32/);
186 $code.=<<___ if ($flavour =~ /elf32/);
194 .size poly1305_init,.-poly1305_init
196 .type poly1305_blocks,\@function,4
201 jz .Lno_data # too short
211 mov $len,%r15 # reassign $len
213 mov 24($ctx),$r0 # load r
216 mov 0($ctx),$h0 # load hash value
223 add $r1,$s1 # s1 = r1 + (r1 >> 2)
228 add 0($inp),$h0 # accumulate input
233 &poly1305_iteration();
239 mov $h0,0($ctx) # store hash value
253 .size poly1305_blocks,.-poly1305_blocks
255 .type poly1305_emit,\@function,3
259 mov 0($ctx),%r8 # load hash value
264 add \$5,%r8 # compare to modulus
268 shr \$2,%r10 # did 130-bit value overfow?
272 add 0($nonce),%rax # accumulate nonce
274 mov %rax,0($mac) # write result
278 .size poly1305_emit,.-poly1305_emit
282 ########################################################################
283 # Layout of opaque area is following.
285 # unsigned __int32 h[5]; # current hash value base 2^26
286 # unsigned __int32 is_base2_26;
287 # unsigned __int64 r[2]; # key value base 2^64
288 # unsigned __int64 pad;
289 # struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9];
291 # where r^n are base 2^26 digits of degrees of multiplier key. There are
292 # 5 digits, but last four are interleaved with multiples of 5, totalling
293 # in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4.
295 my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) =
296 map("%xmm$_",(0..15));
299 .type __poly1305_block,\@abi-omnipotent
303 &poly1305_iteration();
306 .size __poly1305_block,.-__poly1305_block
308 .type __poly1305_init_avx,\@abi-omnipotent
315 lea 48+64($ctx),$ctx # size optimization
318 call __poly1305_block # r^2
320 mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26
326 mov %eax,`16*0+0-64`($ctx)
328 mov %edx,`16*0+4-64`($ctx)
335 mov %eax,`16*1+0-64`($ctx)
336 lea (%rax,%rax,4),%eax # *5
337 mov %edx,`16*1+4-64`($ctx)
338 lea (%rdx,%rdx,4),%edx # *5
339 mov %eax,`16*2+0-64`($ctx)
341 mov %edx,`16*2+4-64`($ctx)
352 mov %eax,`16*3+0-64`($ctx)
353 lea (%rax,%rax,4),%eax # *5
354 mov %edx,`16*3+4-64`($ctx)
355 lea (%rdx,%rdx,4),%edx # *5
356 mov %eax,`16*4+0-64`($ctx)
358 mov %edx,`16*4+4-64`($ctx)
367 mov %eax,`16*5+0-64`($ctx)
368 lea (%rax,%rax,4),%eax # *5
369 mov %edx,`16*5+4-64`($ctx)
370 lea (%rdx,%rdx,4),%edx # *5
371 mov %eax,`16*6+0-64`($ctx)
373 mov %edx,`16*6+4-64`($ctx)
379 mov $d1#d,`16*7+0-64`($ctx)
380 lea ($d1,$d1,4),$d1 # *5
381 mov $d2#d,`16*7+4-64`($ctx)
382 lea ($d2,$d2,4),$d2 # *5
383 mov $d1#d,`16*8+0-64`($ctx)
384 mov $d2#d,`16*8+4-64`($ctx)
387 call __poly1305_block # r^3
389 mov \$0x3ffffff,%eax # save r^3 base 2^26
393 mov %eax,`16*0+12-64`($ctx)
397 mov %edx,`16*1+12-64`($ctx)
398 lea (%rdx,%rdx,4),%edx # *5
400 mov %edx,`16*2+12-64`($ctx)
406 mov %eax,`16*3+12-64`($ctx)
407 lea (%rax,%rax,4),%eax # *5
409 mov %eax,`16*4+12-64`($ctx)
414 mov %edx,`16*5+12-64`($ctx)
415 lea (%rdx,%rdx,4),%edx # *5
417 mov %edx,`16*6+12-64`($ctx)
422 mov $d1#d,`16*7+12-64`($ctx)
423 lea ($d1,$d1,4),$d1 # *5
424 mov $d1#d,`16*8+12-64`($ctx)
427 call __poly1305_block # r^4
429 mov \$0x3ffffff,%eax # save r^4 base 2^26
433 mov %eax,`16*0+8-64`($ctx)
437 mov %edx,`16*1+8-64`($ctx)
438 lea (%rdx,%rdx,4),%edx # *5
440 mov %edx,`16*2+8-64`($ctx)
446 mov %eax,`16*3+8-64`($ctx)
447 lea (%rax,%rax,4),%eax # *5
449 mov %eax,`16*4+8-64`($ctx)
454 mov %edx,`16*5+8-64`($ctx)
455 lea (%rdx,%rdx,4),%edx # *5
457 mov %edx,`16*6+8-64`($ctx)
462 mov $d1#d,`16*7+8-64`($ctx)
463 lea ($d1,$d1,4),$d1 # *5
464 mov $d1#d,`16*8+8-64`($ctx)
466 lea -48-64($ctx),$ctx # size [de-]optimization
468 .size __poly1305_init_avx,.-__poly1305_init_avx
470 .type poly1305_blocks_avx,\@function,4
473 mov 20($ctx),%r8d # is_base2_26
499 mov $len,%r15 # reassign $len
501 mov 0($ctx),$d1 # load hash value
505 mov 24($ctx),$r0 # load r
508 ################################# base 2^26 -> base 2^64
510 and \$`-1*(1<<31)`,$d1
511 mov $d2,$r1 # borrow $r1
513 and \$`-1*(1<<31)`,$d2
527 adc \$0,$h2 # can be partially reduced...
529 mov \$-4,$d2 # ... so reduce
542 add $r1,$s1 # s1 = r1 + (r1 >> 2)
544 add 0($inp),$h0 # accumulate input
549 call __poly1305_block
551 test $padbit,$padbit # if $padbit is zero,
552 jz .Lstore_base2_64_avx # store hash in base 2^64 format
554 ################################# base 2^64 -> base 2^26
561 and \$0x3ffffff,%rax # h[0]
563 and \$0x3ffffff,%rdx # h[1]
567 and \$0x3ffffff,$h0 # h[2]
569 and \$0x3ffffff,$h1 # h[3]
573 jz .Lstore_base2_26_avx
583 .Lstore_base2_64_avx:
586 mov $h2,16($ctx) # note that is_base2_26 is zeroed
590 .Lstore_base2_26_avx:
591 mov %rax#d,0($ctx) # store hash value base 2^26
606 .Lblocks_avx_epilogue:
619 mov $len,%r15 # reassign $len
621 mov 24($ctx),$r0 # load r
624 mov 0($ctx),$h0 # load hash value
631 add $r1,$s1 # s1 = r1 + (r1 >> 2)
636 add 0($inp),$h0 # accumulate input
642 call __poly1305_block
645 ################################# base 2^64 -> base 2^26
652 and \$0x3ffffff,%rax # h[0]
654 and \$0x3ffffff,%rdx # h[1]
658 and \$0x3ffffff,$h0 # h[2]
660 and \$0x3ffffff,$h1 # h[3]
668 movl \$1,20($ctx) # set is_base2_26
670 call __poly1305_init_avx
683 .Lbase2_64_avx_epilogue:
688 vmovd 4*0($ctx),$H0 # load hash value
696 $code.=<<___ if (!$win64);
700 $code.=<<___ if ($win64);
703 vmovdqa %xmm6,0x50(%r11)
704 vmovdqa %xmm7,0x60(%r11)
705 vmovdqa %xmm8,0x70(%r11)
706 vmovdqa %xmm9,0x80(%r11)
707 vmovdqa %xmm10,0x90(%r11)
708 vmovdqa %xmm11,0xa0(%r11)
709 vmovdqa %xmm12,0xb0(%r11)
710 vmovdqa %xmm13,0xc0(%r11)
711 vmovdqa %xmm14,0xd0(%r11)
712 vmovdqa %xmm15,0xe0(%r11)
720 vmovdqu `16*3`($ctx),$D4 # preload r0^2
721 lea `16*3+64`($ctx),$ctx # size optimization
722 lea .Lconst(%rip),%rcx
724 ################################################################
726 vmovdqu 16*2($inp),$T0
727 vmovdqu 16*3($inp),$T1
728 vmovdqa 64(%rcx),$MASK # .Lmask26
730 vpsrldq \$6,$T0,$T2 # splat input
732 vpunpckhqdq $T1,$T0,$T4 # 4
733 vpunpcklqdq $T1,$T0,$T0 # 0:1
734 vpunpcklqdq $T3,$T2,$T3 # 2:3
736 vpsrlq \$40,$T4,$T4 # 4
738 vpand $MASK,$T0,$T0 # 0
740 vpand $MASK,$T1,$T1 # 1
742 vpand $MASK,$T2,$T2 # 2
743 vpand $MASK,$T3,$T3 # 3
744 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
748 # expand and copy pre-calculated table to stack
749 vmovdqu `16*1-64`($ctx),$D1
750 vmovdqu `16*2-64`($ctx),$D2
751 vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434
752 vpshufd \$0x44,$D4,$D0 # xx12 -> 1212
753 vmovdqa $D3,-0x90(%r11)
754 vmovdqa $D0,0x00(%rsp)
755 vpshufd \$0xEE,$D1,$D4
756 vmovdqu `16*3-64`($ctx),$D0
757 vpshufd \$0x44,$D1,$D1
758 vmovdqa $D4,-0x80(%r11)
759 vmovdqa $D1,0x10(%rsp)
760 vpshufd \$0xEE,$D2,$D3
761 vmovdqu `16*4-64`($ctx),$D1
762 vpshufd \$0x44,$D2,$D2
763 vmovdqa $D3,-0x70(%r11)
764 vmovdqa $D2,0x20(%rsp)
765 vpshufd \$0xEE,$D0,$D4
766 vmovdqu `16*5-64`($ctx),$D2
767 vpshufd \$0x44,$D0,$D0
768 vmovdqa $D4,-0x60(%r11)
769 vmovdqa $D0,0x30(%rsp)
770 vpshufd \$0xEE,$D1,$D3
771 vmovdqu `16*6-64`($ctx),$D0
772 vpshufd \$0x44,$D1,$D1
773 vmovdqa $D3,-0x50(%r11)
774 vmovdqa $D1,0x40(%rsp)
775 vpshufd \$0xEE,$D2,$D4
776 vmovdqu `16*7-64`($ctx),$D1
777 vpshufd \$0x44,$D2,$D2
778 vmovdqa $D4,-0x40(%r11)
779 vmovdqa $D2,0x50(%rsp)
780 vpshufd \$0xEE,$D0,$D3
781 vmovdqu `16*8-64`($ctx),$D2
782 vpshufd \$0x44,$D0,$D0
783 vmovdqa $D3,-0x30(%r11)
784 vmovdqa $D0,0x60(%rsp)
785 vpshufd \$0xEE,$D1,$D4
786 vpshufd \$0x44,$D1,$D1
787 vmovdqa $D4,-0x20(%r11)
788 vmovdqa $D1,0x70(%rsp)
789 vpshufd \$0xEE,$D2,$D3
790 vmovdqa 0x00(%rsp),$D4 # preload r0^2
791 vpshufd \$0x44,$D2,$D2
792 vmovdqa $D3,-0x10(%r11)
793 vmovdqa $D2,0x80(%rsp)
799 ################################################################
800 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
801 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
802 # \___________________/
803 # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
804 # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
805 # \___________________/ \____________________/
807 # Note that we start with inp[2:3]*r^2. This is because it
808 # doesn't depend on reduction in previous iteration.
809 ################################################################
810 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
811 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
812 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
813 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
814 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
816 # though note that $Tx and $Hx are "reversed" in this section,
817 # and $D4 is preloaded with r0^2...
819 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
820 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
821 vmovdqa $H2,0x20(%r11) # offload hash
822 vpmuludq $T2,$D4,$D2 # d3 = h2*r0
823 vmovdqa 0x10(%rsp),$H2 # r1^2
824 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
825 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
827 vmovdqa $H0,0x00(%r11) #
828 vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1
829 vmovdqa $H1,0x10(%r11) #
830 vpmuludq $T3,$H2,$H1 # h3*r1
831 vpaddq $H0,$D0,$D0 # d0 += h4*s1
832 vpaddq $H1,$D4,$D4 # d4 += h3*r1
833 vmovdqa $H3,0x30(%r11) #
834 vpmuludq $T2,$H2,$H0 # h2*r1
835 vpmuludq $T1,$H2,$H1 # h1*r1
836 vpaddq $H0,$D3,$D3 # d3 += h2*r1
837 vmovdqa 0x30(%rsp),$H3 # r2^2
838 vpaddq $H1,$D2,$D2 # d2 += h1*r1
839 vmovdqa $H4,0x40(%r11) #
840 vpmuludq $T0,$H2,$H2 # h0*r1
841 vpmuludq $T2,$H3,$H0 # h2*r2
842 vpaddq $H2,$D1,$D1 # d1 += h0*r1
844 vmovdqa 0x40(%rsp),$H4 # s2^2
845 vpaddq $H0,$D4,$D4 # d4 += h2*r2
846 vpmuludq $T1,$H3,$H1 # h1*r2
847 vpmuludq $T0,$H3,$H3 # h0*r2
848 vpaddq $H1,$D3,$D3 # d3 += h1*r2
849 vmovdqa 0x50(%rsp),$H2 # r3^2
850 vpaddq $H3,$D2,$D2 # d2 += h0*r2
851 vpmuludq $T4,$H4,$H0 # h4*s2
852 vpmuludq $T3,$H4,$H4 # h3*s2
853 vpaddq $H0,$D1,$D1 # d1 += h4*s2
854 vmovdqa 0x60(%rsp),$H3 # s3^2
855 vpaddq $H4,$D0,$D0 # d0 += h3*s2
857 vmovdqa 0x80(%rsp),$H4 # s4^2
858 vpmuludq $T1,$H2,$H1 # h1*r3
859 vpmuludq $T0,$H2,$H2 # h0*r3
860 vpaddq $H1,$D4,$D4 # d4 += h1*r3
861 vpaddq $H2,$D3,$D3 # d3 += h0*r3
862 vpmuludq $T4,$H3,$H0 # h4*s3
863 vpmuludq $T3,$H3,$H1 # h3*s3
864 vpaddq $H0,$D2,$D2 # d2 += h4*s3
865 vmovdqu 16*0($inp),$H0 # load input
866 vpaddq $H1,$D1,$D1 # d1 += h3*s3
867 vpmuludq $T2,$H3,$H3 # h2*s3
868 vpmuludq $T2,$H4,$T2 # h2*s4
869 vpaddq $H3,$D0,$D0 # d0 += h2*s3
871 vmovdqu 16*1($inp),$H1 #
872 vpaddq $T2,$D1,$D1 # d1 += h2*s4
873 vpmuludq $T3,$H4,$T3 # h3*s4
874 vpmuludq $T4,$H4,$T4 # h4*s4
875 vpsrldq \$6,$H0,$H2 # splat input
876 vpaddq $T3,$D2,$D2 # d2 += h3*s4
877 vpaddq $T4,$D3,$D3 # d3 += h4*s4
878 vpsrldq \$6,$H1,$H3 #
879 vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4
880 vpmuludq $T1,$H4,$T0 # h1*s4
881 vpunpckhqdq $H1,$H0,$H4 # 4
882 vpaddq $T4,$D4,$D4 # d4 += h0*r4
883 vmovdqa -0x90(%r11),$T4 # r0^4
884 vpaddq $T0,$D0,$D0 # d0 += h1*s4
886 vpunpcklqdq $H1,$H0,$H0 # 0:1
887 vpunpcklqdq $H3,$H2,$H3 # 2:3
889 #vpsrlq \$40,$H4,$H4 # 4
890 vpsrldq \$`40/8`,$H4,$H4 # 4
892 vpand $MASK,$H0,$H0 # 0
894 vpand $MASK,$H1,$H1 # 1
895 vpand 0(%rcx),$H4,$H4 # .Lmask24
897 vpand $MASK,$H2,$H2 # 2
898 vpand $MASK,$H3,$H3 # 3
899 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
901 vpaddq 0x00(%r11),$H0,$H0 # add hash value
902 vpaddq 0x10(%r11),$H1,$H1
903 vpaddq 0x20(%r11),$H2,$H2
904 vpaddq 0x30(%r11),$H3,$H3
905 vpaddq 0x40(%r11),$H4,$H4
912 ################################################################
913 # Now we accumulate (inp[0:1]+hash)*r^4
914 ################################################################
915 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
916 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
917 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
918 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
919 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
921 vpmuludq $H0,$T4,$T0 # h0*r0
922 vpmuludq $H1,$T4,$T1 # h1*r0
925 vmovdqa -0x80(%r11),$T2 # r1^4
926 vpmuludq $H2,$T4,$T0 # h2*r0
927 vpmuludq $H3,$T4,$T1 # h3*r0
930 vpmuludq $H4,$T4,$T4 # h4*r0
931 vpmuludq -0x70(%r11),$H4,$T0 # h4*s1
934 vpaddq $T0,$D0,$D0 # d0 += h4*s1
935 vpmuludq $H2,$T2,$T1 # h2*r1
936 vpmuludq $H3,$T2,$T0 # h3*r1
937 vpaddq $T1,$D3,$D3 # d3 += h2*r1
938 vmovdqa -0x60(%r11),$T3 # r2^4
939 vpaddq $T0,$D4,$D4 # d4 += h3*r1
940 vpmuludq $H1,$T2,$T1 # h1*r1
941 vpmuludq $H0,$T2,$T2 # h0*r1
942 vpaddq $T1,$D2,$D2 # d2 += h1*r1
943 vpaddq $T2,$D1,$D1 # d1 += h0*r1
945 vmovdqa -0x50(%r11),$T4 # s2^4
946 vpmuludq $H2,$T3,$T0 # h2*r2
947 vpmuludq $H1,$T3,$T1 # h1*r2
948 vpaddq $T0,$D4,$D4 # d4 += h2*r2
949 vpaddq $T1,$D3,$D3 # d3 += h1*r2
950 vmovdqa -0x40(%r11),$T2 # r3^4
951 vpmuludq $H0,$T3,$T3 # h0*r2
952 vpmuludq $H4,$T4,$T0 # h4*s2
953 vpaddq $T3,$D2,$D2 # d2 += h0*r2
954 vpaddq $T0,$D1,$D1 # d1 += h4*s2
955 vmovdqa -0x30(%r11),$T3 # s3^4
956 vpmuludq $H3,$T4,$T4 # h3*s2
957 vpmuludq $H1,$T2,$T1 # h1*r3
958 vpaddq $T4,$D0,$D0 # d0 += h3*s2
960 vmovdqa -0x10(%r11),$T4 # s4^4
961 vpaddq $T1,$D4,$D4 # d4 += h1*r3
962 vpmuludq $H0,$T2,$T2 # h0*r3
963 vpmuludq $H4,$T3,$T0 # h4*s3
964 vpaddq $T2,$D3,$D3 # d3 += h0*r3
965 vpaddq $T0,$D2,$D2 # d2 += h4*s3
966 vmovdqu 16*2($inp),$T0 # load input
967 vpmuludq $H3,$T3,$T2 # h3*s3
968 vpmuludq $H2,$T3,$T3 # h2*s3
969 vpaddq $T2,$D1,$D1 # d1 += h3*s3
970 vmovdqu 16*3($inp),$T1 #
971 vpaddq $T3,$D0,$D0 # d0 += h2*s3
973 vpmuludq $H2,$T4,$H2 # h2*s4
974 vpmuludq $H3,$T4,$H3 # h3*s4
975 vpsrldq \$6,$T0,$T2 # splat input
976 vpaddq $H2,$D1,$D1 # d1 += h2*s4
977 vpmuludq $H4,$T4,$H4 # h4*s4
978 vpsrldq \$6,$T1,$T3 #
979 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4
980 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4
981 vpmuludq -0x20(%r11),$H0,$H4 # h0*r4
983 vpunpckhqdq $T1,$T0,$T4 # 4
984 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
985 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
987 vpunpcklqdq $T1,$T0,$T0 # 0:1
988 vpunpcklqdq $T3,$T2,$T3 # 2:3
990 #vpsrlq \$40,$T4,$T4 # 4
991 vpsrldq \$`40/8`,$T4,$T4 # 4
993 vmovdqa 0x00(%rsp),$D4 # preload r0^2
994 vpand $MASK,$T0,$T0 # 0
996 vpand $MASK,$T1,$T1 # 1
997 vpand 0(%rcx),$T4,$T4 # .Lmask24
999 vpand $MASK,$T2,$T2 # 2
1000 vpand $MASK,$T3,$T3 # 3
1001 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1003 ################################################################
1004 # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
1009 vpaddq $D3,$H4,$H4 # h3 -> h4
1013 vpaddq $D0,$D1,$H1 # h0 -> h1
1020 vpaddq $D1,$H2,$H2 # h1 -> h2
1024 vpaddq $D0,$H0,$H0 # h4 -> h0
1028 vpaddq $D2,$H3,$H3 # h2 -> h3
1032 vpaddq $D0,$H1,$H1 # h0 -> h1
1036 vpaddq $D3,$H4,$H4 # h3 -> h4
1041 ################################################################
1042 # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
1044 vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2
1055 vmovdqa $H2,0x20(%r11)
1056 vmovdqa $H0,0x00(%r11)
1057 vmovdqa $H1,0x10(%r11)
1058 vmovdqa $H3,0x30(%r11)
1059 vmovdqa $H4,0x40(%r11)
1061 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1062 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1063 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1064 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1065 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1067 vpmuludq $T2,$D4,$D2 # d2 = h2*r0
1068 vpmuludq $T0,$D4,$D0 # d0 = h0*r0
1069 vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n
1070 vpmuludq $T1,$D4,$D1 # d1 = h1*r0
1071 vpmuludq $T3,$D4,$D3 # d3 = h3*r0
1072 vpmuludq $T4,$D4,$D4 # d4 = h4*r0
1074 vpmuludq $T3,$H2,$H0 # h3*r1
1075 vpaddq $H0,$D4,$D4 # d4 += h3*r1
1076 vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n
1077 vpmuludq $T2,$H2,$H1 # h2*r1
1078 vpaddq $H1,$D3,$D3 # d3 += h2*r1
1079 vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n
1080 vpmuludq $T1,$H2,$H0 # h1*r1
1081 vpaddq $H0,$D2,$D2 # d2 += h1*r1
1082 vpmuludq $T0,$H2,$H2 # h0*r1
1083 vpaddq $H2,$D1,$D1 # d1 += h0*r1
1084 vpmuludq $T4,$H3,$H3 # h4*s1
1085 vpaddq $H3,$D0,$D0 # d0 += h4*s1
1087 vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n
1088 vpmuludq $T2,$H4,$H1 # h2*r2
1089 vpaddq $H1,$D4,$D4 # d4 += h2*r2
1090 vpmuludq $T1,$H4,$H0 # h1*r2
1091 vpaddq $H0,$D3,$D3 # d3 += h1*r2
1092 vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n
1093 vpmuludq $T0,$H4,$H4 # h0*r2
1094 vpaddq $H4,$D2,$D2 # d2 += h0*r2
1095 vpmuludq $T4,$H2,$H1 # h4*s2
1096 vpaddq $H1,$D1,$D1 # d1 += h4*s2
1097 vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n
1098 vpmuludq $T3,$H2,$H2 # h3*s2
1099 vpaddq $H2,$D0,$D0 # d0 += h3*s2
1101 vpmuludq $T1,$H3,$H0 # h1*r3
1102 vpaddq $H0,$D4,$D4 # d4 += h1*r3
1103 vpmuludq $T0,$H3,$H3 # h0*r3
1104 vpaddq $H3,$D3,$D3 # d3 += h0*r3
1105 vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n
1106 vpmuludq $T4,$H4,$H1 # h4*s3
1107 vpaddq $H1,$D2,$D2 # d2 += h4*s3
1108 vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n
1109 vpmuludq $T3,$H4,$H0 # h3*s3
1110 vpaddq $H0,$D1,$D1 # d1 += h3*s3
1111 vpmuludq $T2,$H4,$H4 # h2*s3
1112 vpaddq $H4,$D0,$D0 # d0 += h2*s3
1114 vpmuludq $T0,$H2,$H2 # h0*r4
1115 vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4
1116 vpmuludq $T4,$H3,$H1 # h4*s4
1117 vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4
1118 vpmuludq $T3,$H3,$H0 # h3*s4
1119 vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4
1120 vpmuludq $T2,$H3,$H1 # h2*s4
1121 vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4
1122 vpmuludq $T1,$H3,$H3 # h1*s4
1123 vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4
1127 vmovdqu 16*0($inp),$H0 # load input
1128 vmovdqu 16*1($inp),$H1
1130 vpsrldq \$6,$H0,$H2 # splat input
1132 vpunpckhqdq $H1,$H0,$H4 # 4
1133 vpunpcklqdq $H1,$H0,$H0 # 0:1
1134 vpunpcklqdq $H3,$H2,$H3 # 2:3
1136 vpsrlq \$40,$H4,$H4 # 4
1138 vpand $MASK,$H0,$H0 # 0
1140 vpand $MASK,$H1,$H1 # 1
1142 vpand $MASK,$H2,$H2 # 2
1143 vpand $MASK,$H3,$H3 # 3
1144 vpor 32(%rcx),$H4,$H4 # padbit, yes, always
1146 vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4
1147 vpaddq 0x00(%r11),$H0,$H0
1148 vpaddq 0x10(%r11),$H1,$H1
1149 vpaddq 0x20(%r11),$H2,$H2
1150 vpaddq 0x30(%r11),$H3,$H3
1151 vpaddq 0x40(%r11),$H4,$H4
1153 ################################################################
1154 # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate
1156 vpmuludq $H0,$T4,$T0 # h0*r0
1157 vpaddq $T0,$D0,$D0 # d0 += h0*r0
1158 vpmuludq $H1,$T4,$T1 # h1*r0
1159 vpaddq $T1,$D1,$D1 # d1 += h1*r0
1160 vpmuludq $H2,$T4,$T0 # h2*r0
1161 vpaddq $T0,$D2,$D2 # d2 += h2*r0
1162 vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n
1163 vpmuludq $H3,$T4,$T1 # h3*r0
1164 vpaddq $T1,$D3,$D3 # d3 += h3*r0
1165 vpmuludq $H4,$T4,$T4 # h4*r0
1166 vpaddq $T4,$D4,$D4 # d4 += h4*r0
1168 vpmuludq $H3,$T2,$T0 # h3*r1
1169 vpaddq $T0,$D4,$D4 # d4 += h3*r1
1170 vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1
1171 vpmuludq $H2,$T2,$T1 # h2*r1
1172 vpaddq $T1,$D3,$D3 # d3 += h2*r1
1173 vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2
1174 vpmuludq $H1,$T2,$T0 # h1*r1
1175 vpaddq $T0,$D2,$D2 # d2 += h1*r1
1176 vpmuludq $H0,$T2,$T2 # h0*r1
1177 vpaddq $T2,$D1,$D1 # d1 += h0*r1
1178 vpmuludq $H4,$T3,$T3 # h4*s1
1179 vpaddq $T3,$D0,$D0 # d0 += h4*s1
1181 vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2
1182 vpmuludq $H2,$T4,$T1 # h2*r2
1183 vpaddq $T1,$D4,$D4 # d4 += h2*r2
1184 vpmuludq $H1,$T4,$T0 # h1*r2
1185 vpaddq $T0,$D3,$D3 # d3 += h1*r2
1186 vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3
1187 vpmuludq $H0,$T4,$T4 # h0*r2
1188 vpaddq $T4,$D2,$D2 # d2 += h0*r2
1189 vpmuludq $H4,$T2,$T1 # h4*s2
1190 vpaddq $T1,$D1,$D1 # d1 += h4*s2
1191 vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3
1192 vpmuludq $H3,$T2,$T2 # h3*s2
1193 vpaddq $T2,$D0,$D0 # d0 += h3*s2
1195 vpmuludq $H1,$T3,$T0 # h1*r3
1196 vpaddq $T0,$D4,$D4 # d4 += h1*r3
1197 vpmuludq $H0,$T3,$T3 # h0*r3
1198 vpaddq $T3,$D3,$D3 # d3 += h0*r3
1199 vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4
1200 vpmuludq $H4,$T4,$T1 # h4*s3
1201 vpaddq $T1,$D2,$D2 # d2 += h4*s3
1202 vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4
1203 vpmuludq $H3,$T4,$T0 # h3*s3
1204 vpaddq $T0,$D1,$D1 # d1 += h3*s3
1205 vpmuludq $H2,$T4,$T4 # h2*s3
1206 vpaddq $T4,$D0,$D0 # d0 += h2*s3
1208 vpmuludq $H0,$T2,$T2 # h0*r4
1209 vpaddq $T2,$D4,$D4 # d4 += h0*r4
1210 vpmuludq $H4,$T3,$T1 # h4*s4
1211 vpaddq $T1,$D3,$D3 # d3 += h4*s4
1212 vpmuludq $H3,$T3,$T0 # h3*s4
1213 vpaddq $T0,$D2,$D2 # d2 += h3*s4
1214 vpmuludq $H2,$T3,$T1 # h2*s4
1215 vpaddq $T1,$D1,$D1 # d1 += h2*s4
1216 vpmuludq $H1,$T3,$T3 # h1*s4
1217 vpaddq $T3,$D0,$D0 # d0 += h1*s4
1220 ################################################################
1221 # horizontal addition
1234 ################################################################
1239 vpaddq $H3,$D4,$D4 # h3 -> h4
1243 vpaddq $H0,$D1,$D1 # h0 -> h1
1250 vpaddq $H1,$D2,$D2 # h1 -> h2
1254 vpaddq $H4,$D0,$D0 # h4 -> h0
1258 vpaddq $H2,$D3,$D3 # h2 -> h3
1262 vpaddq $H0,$D1,$D1 # h0 -> h1
1266 vpaddq $H3,$D4,$D4 # h3 -> h4
1268 vmovd $D0,`4*0-48-64`($ctx) # save partially reduced
1269 vmovd $D1,`4*1-48-64`($ctx)
1270 vmovd $D2,`4*2-48-64`($ctx)
1271 vmovd $D3,`4*3-48-64`($ctx)
1272 vmovd $D4,`4*4-48-64`($ctx)
1274 $code.=<<___ if ($win64);
1275 vmovdqa 0x50(%r11),%xmm6
1276 vmovdqa 0x60(%r11),%xmm7
1277 vmovdqa 0x70(%r11),%xmm8
1278 vmovdqa 0x80(%r11),%xmm9
1279 vmovdqa 0x90(%r11),%xmm10
1280 vmovdqa 0xa0(%r11),%xmm11
1281 vmovdqa 0xb0(%r11),%xmm12
1282 vmovdqa 0xc0(%r11),%xmm13
1283 vmovdqa 0xd0(%r11),%xmm14
1284 vmovdqa 0xe0(%r11),%xmm15
1288 $code.=<<___ if (!$win64);
1294 .size poly1305_blocks_avx,.-poly1305_blocks_avx
1296 .type poly1305_emit_avx,\@function,3
1299 cmpl \$0,20($ctx) # is_base2_26?
1302 mov 0($ctx),%eax # load hash value base 2^26
1308 shl \$26,%rcx # base 2^26 -> base 2^64
1324 mov %r10,%rax # could be partially reduced, so reduce
1335 add \$5,%r8 # compare to modulus
1339 shr \$2,%r10 # did 130-bit value overfow?
1343 add 0($nonce),%rax # accumulate nonce
1345 mov %rax,0($mac) # write result
1349 .size poly1305_emit_avx,.-poly1305_emit_avx
1353 my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) =
1354 map("%ymm$_",(0..15));
1358 .type poly1305_blocks_avx2,\@function,4
1360 poly1305_blocks_avx2:
1361 mov 20($ctx),%r8d # is_base2_26
1387 mov $len,%r15 # reassign $len
1389 mov 0($ctx),$d1 # load hash value
1393 mov 24($ctx),$r0 # load r
1396 ################################# base 2^26 -> base 2^64
1398 and \$`-1*(1<<31)`,$d1
1399 mov $d2,$r1 # borrow $r1
1401 and \$`-1*(1<<31)`,$d2
1415 adc \$0,$h2 # can be partially reduced...
1417 mov \$-4,$d2 # ... so reduce
1430 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1432 .Lbase2_26_pre_avx2:
1433 add 0($inp),$h0 # accumulate input
1439 call __poly1305_block
1443 jnz .Lbase2_26_pre_avx2
1445 test $padbit,$padbit # if $padbit is zero,
1446 jz .Lstore_base2_64_avx2 # store hash in base 2^64 format
1448 ################################# base 2^64 -> base 2^26
1455 and \$0x3ffffff,%rax # h[0]
1457 and \$0x3ffffff,%rdx # h[1]
1461 and \$0x3ffffff,$h0 # h[2]
1463 and \$0x3ffffff,$h1 # h[3]
1467 jz .Lstore_base2_26_avx2
1477 .Lstore_base2_64_avx2:
1480 mov $h2,16($ctx) # note that is_base2_26 is zeroed
1484 .Lstore_base2_26_avx2:
1485 mov %rax#d,0($ctx) # store hash value base 2^26
1500 .Lblocks_avx2_epilogue:
1511 .Lbase2_64_avx2_body:
1513 mov $len,%r15 # reassign $len
1515 mov 24($ctx),$r0 # load r
1518 mov 0($ctx),$h0 # load hash value
1525 add $r1,$s1 # s1 = r1 + (r1 >> 2)
1530 .Lbase2_64_pre_avx2:
1531 add 0($inp),$h0 # accumulate input
1537 call __poly1305_block
1541 jnz .Lbase2_64_pre_avx2
1544 ################################# base 2^64 -> base 2^26
1551 and \$0x3ffffff,%rax # h[0]
1553 and \$0x3ffffff,%rdx # h[1]
1557 and \$0x3ffffff,$h0 # h[2]
1559 and \$0x3ffffff,$h1 # h[3]
1567 movl \$1,20($ctx) # set is_base2_26
1569 call __poly1305_init_avx
1582 .Lbase2_64_avx2_epilogue:
1587 vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26
1588 vmovd 4*1($ctx),%x#$H1
1589 vmovd 4*2($ctx),%x#$H2
1590 vmovd 4*3($ctx),%x#$H3
1591 vmovd 4*4($ctx),%x#$H4
1595 $code.=<<___ if (!$win64);
1599 $code.=<<___ if ($win64);
1600 lea -0xf8(%rsp),%r11
1602 vmovdqa %xmm6,0x50(%r11)
1603 vmovdqa %xmm7,0x60(%r11)
1604 vmovdqa %xmm8,0x70(%r11)
1605 vmovdqa %xmm9,0x80(%r11)
1606 vmovdqa %xmm10,0x90(%r11)
1607 vmovdqa %xmm11,0xa0(%r11)
1608 vmovdqa %xmm12,0xb0(%r11)
1609 vmovdqa %xmm13,0xc0(%r11)
1610 vmovdqa %xmm14,0xd0(%r11)
1611 vmovdqa %xmm15,0xe0(%r11)
1615 lea 48+64($ctx),$ctx # size optimization
1616 lea .Lconst(%rip),%rcx
1618 # expand and copy pre-calculated table to stack
1619 vmovdqu `16*0-64`($ctx),%x#$T2
1621 vmovdqu `16*1-64`($ctx),%x#$T3
1622 vmovdqu `16*2-64`($ctx),%x#$T4
1623 vmovdqu `16*3-64`($ctx),%x#$D0
1624 vmovdqu `16*4-64`($ctx),%x#$D1
1625 vmovdqu `16*5-64`($ctx),%x#$D2
1626 vmovdqu `16*6-64`($ctx),%x#$D3
1627 vpermq \$0x15,$T2,$T2 # 00003412 -> 12343434
1628 vmovdqu `16*7-64`($ctx),%x#$D4
1629 vpermq \$0x15,$T3,$T3
1630 vpshufd \$0xc8,$T2,$T2 # 12343434 -> 14243444
1631 vmovdqu `16*8-64`($ctx),%x#$MASK
1632 vpermq \$0x15,$T4,$T4
1633 vpshufd \$0xc8,$T3,$T3
1634 vmovdqa $T2,0x00(%rsp)
1635 vpermq \$0x15,$D0,$D0
1636 vpshufd \$0xc8,$T4,$T4
1637 vmovdqa $T3,0x20(%rsp)
1638 vpermq \$0x15,$D1,$D1
1639 vpshufd \$0xc8,$D0,$D0
1640 vmovdqa $T4,0x40(%rsp)
1641 vpermq \$0x15,$D2,$D2
1642 vpshufd \$0xc8,$D1,$D1
1643 vmovdqa $D0,0x60(%rsp)
1644 vpermq \$0x15,$D3,$D3
1645 vpshufd \$0xc8,$D2,$D2
1646 vmovdqa $D1,0x80(%rsp)
1647 vpermq \$0x15,$D4,$D4
1648 vpshufd \$0xc8,$D3,$D3
1649 vmovdqa $D2,0xa0(%rsp)
1650 vpermq \$0x15,$MASK,$MASK
1651 vpshufd \$0xc8,$D4,$D4
1652 vmovdqa $D3,0xc0(%rsp)
1653 vpshufd \$0xc8,$MASK,$MASK
1654 vmovdqa $D4,0xe0(%rsp)
1655 vmovdqa $MASK,0x100(%rsp)
1656 vmovdqa 64(%rcx),$MASK # .Lmask26
1658 ################################################################
1660 vmovdqu 16*0($inp),%x#$T0
1661 vmovdqu 16*1($inp),%x#$T1
1662 vinserti128 \$1,16*2($inp),$T0,$T0
1663 vinserti128 \$1,16*3($inp),$T1,$T1
1666 vpsrldq \$6,$T0,$T2 # splat input
1668 vpunpckhqdq $T1,$T0,$T4 # 4
1669 vpunpcklqdq $T3,$T2,$T2 # 2:3
1670 vpunpcklqdq $T1,$T0,$T0 # 0:1
1675 vpsrlq \$40,$T4,$T4 # 4
1676 vpand $MASK,$T2,$T2 # 2
1677 vpand $MASK,$T0,$T0 # 0
1678 vpand $MASK,$T1,$T1 # 1
1679 vpand $MASK,$T3,$T3 # 3
1680 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1682 lea 0x90(%rsp),%rax # size optimization
1683 vpaddq $H2,$T2,$H2 # accumulate input
1690 ################################################################
1691 # ((inp[0]*r^4+r[4])*r^4+r[8])*r^4
1692 # ((inp[1]*r^4+r[5])*r^4+r[9])*r^3
1693 # ((inp[2]*r^4+r[6])*r^4+r[10])*r^2
1694 # ((inp[3]*r^4+r[7])*r^4+r[11])*r^1
1695 # \________/\________/
1696 ################################################################
1697 #vpaddq $H2,$T2,$H2 # accumulate input
1699 vmovdqa `32*0`(%rsp),$T0 # r0^4
1701 vmovdqa `32*1`(%rsp),$T1 # r1^4
1703 vmovdqa `32*3`(%rsp),$T2 # r2^4
1705 vmovdqa `32*6-0x90`(%rax),$T3 # s3^4
1706 vmovdqa `32*8-0x90`(%rax),$S4 # s4^4
1708 # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
1709 # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
1710 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1711 # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
1712 # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
1714 # however, as h2 is "chronologically" first one available pull
1715 # corresponding operations up, so it's
1717 # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4
1718 # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4
1719 # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
1720 # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3
1721 # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4
1723 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1724 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1725 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1726 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1727 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1729 vpmuludq $H0,$T1,$T4 # h0*r1
1730 vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp
1731 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1732 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1733 vpmuludq $H3,$T1,$T4 # h3*r1
1734 vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1
1735 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1736 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1737 vmovdqa `32*4-0x90`(%rax),$T1 # s2
1739 vpmuludq $H0,$T0,$T4 # h0*r0
1740 vpmuludq $H1,$T0,$H2 # h1*r0
1741 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1742 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1743 vpmuludq $H3,$T0,$T4 # h3*r0
1744 vpmuludq $H4,$T0,$H2 # h4*r0
1745 vmovdqu 16*0($inp),%x#$T0 # load input
1746 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1747 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1748 vinserti128 \$1,16*2($inp),$T0,$T0
1750 vpmuludq $H3,$T1,$T4 # h3*s2
1751 vpmuludq $H4,$T1,$H2 # h4*s2
1752 vmovdqu 16*1($inp),%x#$T1
1753 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1754 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1755 vmovdqa `32*5-0x90`(%rax),$H2 # r3
1756 vpmuludq $H1,$T2,$T4 # h1*r2
1757 vpmuludq $H0,$T2,$T2 # h0*r2
1758 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1759 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1760 vinserti128 \$1,16*3($inp),$T1,$T1
1763 vpmuludq $H1,$H2,$T4 # h1*r3
1764 vpmuludq $H0,$H2,$H2 # h0*r3
1765 vpsrldq \$6,$T0,$T2 # splat input
1766 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1767 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1768 vpmuludq $H3,$T3,$T4 # h3*s3
1769 vpmuludq $H4,$T3,$H2 # h4*s3
1771 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1772 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1773 vpunpckhqdq $T1,$T0,$T4 # 4
1775 vpmuludq $H3,$S4,$H3 # h3*s4
1776 vpmuludq $H4,$S4,$H4 # h4*s4
1777 vpunpcklqdq $T1,$T0,$T0 # 0:1
1778 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1779 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1780 vpunpcklqdq $T3,$T2,$T3 # 2:3
1781 vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4
1782 vpmuludq $H1,$S4,$H0 # h1*s4
1783 vmovdqa 64(%rcx),$MASK # .Lmask26
1784 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1785 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1787 ################################################################
1788 # lazy reduction (interleaved with tail of input splat)
1792 vpaddq $D3,$H4,$H4 # h3 -> h4
1796 vpaddq $D0,$D1,$H1 # h0 -> h1
1805 vpaddq $D1,$H2,$H2 # h1 -> h2
1809 vpaddq $D4,$H0,$H0 # h4 -> h0
1811 vpand $MASK,$T2,$T2 # 2
1816 vpaddq $D2,$H3,$H3 # h2 -> h3
1818 vpaddq $T2,$H2,$H2 # modulo-scheduled
1823 vpaddq $D0,$H1,$H1 # h0 -> h1
1825 vpsrlq \$40,$T4,$T4 # 4
1829 vpaddq $D3,$H4,$H4 # h3 -> h4
1831 vpand $MASK,$T0,$T0 # 0
1832 vpand $MASK,$T1,$T1 # 1
1833 vpand $MASK,$T3,$T3 # 3
1834 vpor 32(%rcx),$T4,$T4 # padbit, yes, always
1841 ################################################################
1842 # while above multiplications were by r^4 in all lanes, in last
1843 # iteration we multiply least significant lane by r^4 and most
1844 # significant one by r, so copy of above except that references
1845 # to the precomputed table are displaced by 4...
1847 #vpaddq $H2,$T2,$H2 # accumulate input
1849 vmovdqu `32*0+4`(%rsp),$T0 # r0^4
1851 vmovdqu `32*1+4`(%rsp),$T1 # r1^4
1853 vmovdqu `32*3+4`(%rsp),$T2 # r2^4
1855 vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4
1856 vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4
1858 vpmuludq $H2,$T0,$D2 # d2 = h2*r0
1859 vpmuludq $H2,$T1,$D3 # d3 = h2*r1
1860 vpmuludq $H2,$T2,$D4 # d4 = h2*r2
1861 vpmuludq $H2,$T3,$D0 # d0 = h2*s3
1862 vpmuludq $H2,$S4,$D1 # d1 = h2*s4
1864 vpmuludq $H0,$T1,$T4 # h0*r1
1865 vpmuludq $H1,$T1,$H2 # h1*r1
1866 vpaddq $T4,$D1,$D1 # d1 += h0*r1
1867 vpaddq $H2,$D2,$D2 # d2 += h1*r1
1868 vpmuludq $H3,$T1,$T4 # h3*r1
1869 vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1
1870 vpaddq $T4,$D4,$D4 # d4 += h3*r1
1871 vpaddq $H2,$D0,$D0 # d0 += h4*s1
1873 vpmuludq $H0,$T0,$T4 # h0*r0
1874 vpmuludq $H1,$T0,$H2 # h1*r0
1875 vpaddq $T4,$D0,$D0 # d0 += h0*r0
1876 vmovdqu `32*4+4-0x90`(%rax),$T1 # s2
1877 vpaddq $H2,$D1,$D1 # d1 += h1*r0
1878 vpmuludq $H3,$T0,$T4 # h3*r0
1879 vpmuludq $H4,$T0,$H2 # h4*r0
1880 vpaddq $T4,$D3,$D3 # d3 += h3*r0
1881 vpaddq $H2,$D4,$D4 # d4 += h4*r0
1883 vpmuludq $H3,$T1,$T4 # h3*s2
1884 vpmuludq $H4,$T1,$H2 # h4*s2
1885 vpaddq $T4,$D0,$D0 # d0 += h3*s2
1886 vpaddq $H2,$D1,$D1 # d1 += h4*s2
1887 vmovdqu `32*5+4-0x90`(%rax),$H2 # r3
1888 vpmuludq $H1,$T2,$T4 # h1*r2
1889 vpmuludq $H0,$T2,$T2 # h0*r2
1890 vpaddq $T4,$D3,$D3 # d3 += h1*r2
1891 vpaddq $T2,$D2,$D2 # d2 += h0*r2
1893 vpmuludq $H1,$H2,$T4 # h1*r3
1894 vpmuludq $H0,$H2,$H2 # h0*r3
1895 vpaddq $T4,$D4,$D4 # d4 += h1*r3
1896 vpaddq $H2,$D3,$D3 # d3 += h0*r3
1897 vpmuludq $H3,$T3,$T4 # h3*s3
1898 vpmuludq $H4,$T3,$H2 # h4*s3
1899 vpaddq $T4,$D1,$D1 # d1 += h3*s3
1900 vpaddq $H2,$D2,$D2 # d2 += h4*s3
1902 vpmuludq $H3,$S4,$H3 # h3*s4
1903 vpmuludq $H4,$S4,$H4 # h4*s4
1904 vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4
1905 vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4
1906 vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4
1907 vpmuludq $H1,$S4,$H0 # h1*s4
1908 vmovdqa 64(%rcx),$MASK # .Lmask26
1909 vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4
1910 vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4
1912 ################################################################
1913 # horizontal addition
1926 vpermq \$0x2,$H3,$T3
1927 vpermq \$0x2,$H4,$T4
1928 vpermq \$0x2,$H0,$T0
1929 vpermq \$0x2,$D1,$T1
1930 vpermq \$0x2,$H2,$T2
1937 ################################################################
1942 vpaddq $D3,$H4,$H4 # h3 -> h4
1946 vpaddq $D0,$D1,$H1 # h0 -> h1
1953 vpaddq $D1,$H2,$H2 # h1 -> h2
1957 vpaddq $D4,$H0,$H0 # h4 -> h0
1961 vpaddq $D2,$H3,$H3 # h2 -> h3
1965 vpaddq $D0,$H1,$H1 # h0 -> h1
1969 vpaddq $D3,$H4,$H4 # h3 -> h4
1971 vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced
1972 vmovd %x#$H1,`4*1-48-64`($ctx)
1973 vmovd %x#$H2,`4*2-48-64`($ctx)
1974 vmovd %x#$H3,`4*3-48-64`($ctx)
1975 vmovd %x#$H4,`4*4-48-64`($ctx)
1977 $code.=<<___ if ($win64);
1978 vmovdqa 0x50(%r11),%xmm6
1979 vmovdqa 0x60(%r11),%xmm7
1980 vmovdqa 0x70(%r11),%xmm8
1981 vmovdqa 0x80(%r11),%xmm9
1982 vmovdqa 0x90(%r11),%xmm10
1983 vmovdqa 0xa0(%r11),%xmm11
1984 vmovdqa 0xb0(%r11),%xmm12
1985 vmovdqa 0xc0(%r11),%xmm13
1986 vmovdqa 0xd0(%r11),%xmm14
1987 vmovdqa 0xe0(%r11),%xmm15
1991 $code.=<<___ if (!$win64);
1997 .size poly1305_blocks_avx2,.-poly1305_blocks_avx2
2004 .long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0
2006 .long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0
2008 .long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0
2010 .long 5,0,5,0,5,0,5,0
2015 .asciz "Poly1305 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2019 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2020 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
2028 .extern __imp_RtlVirtualUnwind
2029 .type se_handler,\@abi-omnipotent
2043 mov 120($context),%rax # pull context->Rax
2044 mov 248($context),%rbx # pull context->Rip
2046 mov 8($disp),%rsi # disp->ImageBase
2047 mov 56($disp),%r11 # disp->HandlerData
2049 mov 0(%r11),%r10d # HandlerData[0]
2050 lea (%rsi,%r10),%r10 # prologue label
2051 cmp %r10,%rbx # context->Rip<.Lprologue
2052 jb .Lcommon_seh_tail
2054 mov 152($context),%rax # pull context->Rsp
2056 mov 4(%r11),%r10d # HandlerData[1]
2057 lea (%rsi,%r10),%r10 # epilogue label
2058 cmp %r10,%rbx # context->Rip>=.Lepilogue
2059 jae .Lcommon_seh_tail
2069 mov %rbx,144($context) # restore context->Rbx
2070 mov %rbp,160($context) # restore context->Rbp
2071 mov %r12,216($context) # restore context->R12
2072 mov %r13,224($context) # restore context->R13
2073 mov %r14,232($context) # restore context->R14
2074 mov %r15,240($context) # restore context->R14
2076 jmp .Lcommon_seh_tail
2077 .size se_handler,.-se_handler
2079 .type avx_handler,\@abi-omnipotent
2093 mov 120($context),%rax # pull context->Rax
2094 mov 248($context),%rbx # pull context->Rip
2096 mov 8($disp),%rsi # disp->ImageBase
2097 mov 56($disp),%r11 # disp->HandlerData
2099 mov 0(%r11),%r10d # HandlerData[0]
2100 lea (%rsi,%r10),%r10 # prologue label
2101 cmp %r10,%rbx # context->Rip<prologue label
2102 jb .Lcommon_seh_tail
2104 mov 152($context),%rax # pull context->Rsp
2106 mov 4(%r11),%r10d # HandlerData[1]
2107 lea (%rsi,%r10),%r10 # epilogue label
2108 cmp %r10,%rbx # context->Rip>=epilogue label
2109 jae .Lcommon_seh_tail
2111 mov 208($context),%rax # pull context->R11
2115 lea 512($context),%rdi # &context.Xmm6
2117 .long 0xa548f3fc # cld; rep movsq
2122 mov %rax,152($context) # restore context->Rsp
2123 mov %rsi,168($context) # restore context->Rsi
2124 mov %rdi,176($context) # restore context->Rdi
2126 mov 40($disp),%rdi # disp->ContextRecord
2127 mov $context,%rsi # context
2128 mov \$154,%ecx # sizeof(CONTEXT)
2129 .long 0xa548f3fc # cld; rep movsq
2132 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
2133 mov 8(%rsi),%rdx # arg2, disp->ImageBase
2134 mov 0(%rsi),%r8 # arg3, disp->ControlPc
2135 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
2136 mov 40(%rsi),%r10 # disp->ContextRecord
2137 lea 56(%rsi),%r11 # &disp->HandlerData
2138 lea 24(%rsi),%r12 # &disp->EstablisherFrame
2139 mov %r10,32(%rsp) # arg5
2140 mov %r11,40(%rsp) # arg6
2141 mov %r12,48(%rsp) # arg7
2142 mov %rcx,56(%rsp) # arg8, (NULL)
2143 call *__imp_RtlVirtualUnwind(%rip)
2145 mov \$1,%eax # ExceptionContinueSearch
2157 .size avx_handler,.-avx_handler
2161 .rva .LSEH_begin_poly1305_init
2162 .rva .LSEH_end_poly1305_init
2163 .rva .LSEH_info_poly1305_init
2165 .rva .LSEH_begin_poly1305_blocks
2166 .rva .LSEH_end_poly1305_blocks
2167 .rva .LSEH_info_poly1305_blocks
2169 .rva .LSEH_begin_poly1305_emit
2170 .rva .LSEH_end_poly1305_emit
2171 .rva .LSEH_info_poly1305_emit
2173 $code.=<<___ if ($avx);
2174 .rva .LSEH_begin_poly1305_blocks_avx
2176 .rva .LSEH_info_poly1305_blocks_avx_1
2180 .rva .LSEH_info_poly1305_blocks_avx_2
2183 .rva .LSEH_end_poly1305_blocks_avx
2184 .rva .LSEH_info_poly1305_blocks_avx_3
2186 .rva .LSEH_begin_poly1305_emit_avx
2187 .rva .LSEH_end_poly1305_emit_avx
2188 .rva .LSEH_info_poly1305_emit_avx
2190 $code.=<<___ if ($avx>1);
2191 .rva .LSEH_begin_poly1305_blocks_avx2
2192 .rva .Lbase2_64_avx2
2193 .rva .LSEH_info_poly1305_blocks_avx2_1
2195 .rva .Lbase2_64_avx2
2197 .rva .LSEH_info_poly1305_blocks_avx2_2
2200 .rva .LSEH_end_poly1305_blocks_avx2
2201 .rva .LSEH_info_poly1305_blocks_avx2_3
2206 .LSEH_info_poly1305_init:
2209 .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init
2211 .LSEH_info_poly1305_blocks:
2214 .rva .Lblocks_body,.Lblocks_epilogue
2216 .LSEH_info_poly1305_emit:
2219 .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit
2221 $code.=<<___ if ($avx);
2222 .LSEH_info_poly1305_blocks_avx_1:
2225 .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[]
2227 .LSEH_info_poly1305_blocks_avx_2:
2230 .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[]
2232 .LSEH_info_poly1305_blocks_avx_3:
2235 .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[]
2237 .LSEH_info_poly1305_emit_avx:
2240 .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx
2242 $code.=<<___ if ($avx>1);
2243 .LSEH_info_poly1305_blocks_avx2_1:
2246 .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[]
2248 .LSEH_info_poly1305_blocks_avx2_2:
2251 .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[]
2253 .LSEH_info_poly1305_blocks_avx2_3:
2256 .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[]
2260 foreach (split('\n',$code)) {
2261 s/\`([^\`]*)\`/eval($1)/ge;
2262 s/%r([a-z]+)#d/%e$1/g;
2263 s/%r([0-9]+)#d/%r$1d/g;