2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # ChaCha20 for x86_64.
23 # Add AVX512F code path.
25 # Performance in cycles per byte out of large buffer.
27 # IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 8xAVX2
29 # P4 9.48/+99% -/22.7(ii) -
30 # Core2 7.83/+55% 7.90/8.08 4.35
31 # Westmere 7.19/+50% 5.60/6.70 3.00
32 # Sandy Bridge 8.31/+42% 5.45/6.76 2.72
33 # Ivy Bridge 6.71/+46% 5.40/6.49 2.41
34 # Haswell 5.92/+43% 5.20/6.45 2.42 1.23
35 # Skylake 5.87/+39% 4.70/- 2.31 1.19
36 # Silvermont 12.0/+33% 7.75/7.40 7.03(iii)
37 # Goldmont 10.6/+17% 5.10/- 3.28
38 # Sledgehammer 7.28/+52% -/14.2(ii) -
39 # Bulldozer 9.66/+28% 9.85/11.1 3.06(iv)
40 # Ryzen 5.96 ? 2.40 2.09
41 # VIA Nano 10.5/+46% 6.72/8.60 6.05
43 # (i) compared to older gcc 3.x one can observe >2x improvement on
45 # (ii) as it can be seen, SSE2 performance is too low on legacy
46 # processors; NxSSE2 results are naturally better, but not
47 # impressively better than IALU ones, which is why you won't
48 # find SSE2 code below;
49 # (iii) this is not optimal result for Atom because of MSROM
50 # limitations, SSE2 can do better, but gain is considered too
51 # low to justify the [maintenance] effort;
52 # (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20;
56 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
58 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
60 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
61 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
62 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
63 die "can't locate x86_64-xlate.pl";
65 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
66 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
67 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
70 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
71 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
72 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
73 $avx += 1 if ($1==2.11 && $2>=8);
76 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
77 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
78 $avx = ($1>=10) + ($1>=11);
81 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
82 $avx = ($2>=3.0) + ($2>3.0);
85 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
88 # input parameter block
89 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
94 .extern OPENSSL_ia32cap_P
106 .long 0,2,4,6,1,3,5,7
108 .long 8,8,8,8,8,8,8,8
110 .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
112 .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
114 .asciz "expand 32-byte k"
117 .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
119 .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
121 .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
123 .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
124 .asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
127 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
128 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
130 $arg = "\$$arg" if ($arg*1 eq $arg);
131 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
134 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
135 "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
138 sub ROUND { # critical path is 24 cycles per round
139 my ($a0,$b0,$c0,$d0)=@_;
140 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
141 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
142 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
143 my ($xc,$xc_)=map("\"$_\"",@t);
144 my @x=map("\"$_\"",@x);
146 # Consider order in which variables are addressed by their
151 # 0 4 8 12 < even round
155 # 0 5 10 15 < odd round
160 # 'a', 'b' and 'd's are permanently allocated in registers,
161 # @x[0..7,12..15], while 'c's are maintained in memory. If
162 # you observe 'c' column, you'll notice that pair of 'c's is
163 # invariant between rounds. This means that we have to reload
164 # them once per round, in the middle. This is why you'll see
165 # bunch of 'c' stores and loads in the middle, but none in
166 # the beginning or end.
168 # Normally instructions would be interleaved to favour in-order
169 # execution. Generally out-of-order cores manage it gracefully,
170 # but not this time for some reason. As in-order execution
171 # cores are dying breed, old Atom is the only one around,
172 # instructions are left uninterleaved. Besides, Atom is better
173 # off executing 1xSSSE3 code anyway...
176 "&add (@x[$a0],@x[$b0])", # Q1
177 "&xor (@x[$d0],@x[$a0])",
179 "&add (@x[$a1],@x[$b1])", # Q2
180 "&xor (@x[$d1],@x[$a1])",
183 "&add ($xc,@x[$d0])",
184 "&xor (@x[$b0],$xc)",
186 "&add ($xc_,@x[$d1])",
187 "&xor (@x[$b1],$xc_)",
190 "&add (@x[$a0],@x[$b0])",
191 "&xor (@x[$d0],@x[$a0])",
193 "&add (@x[$a1],@x[$b1])",
194 "&xor (@x[$d1],@x[$a1])",
197 "&add ($xc,@x[$d0])",
198 "&xor (@x[$b0],$xc)",
200 "&add ($xc_,@x[$d1])",
201 "&xor (@x[$b1],$xc_)",
204 "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
205 "&mov (\"4*$c1(%rsp)\",$xc_)",
206 "&mov ($xc,\"4*$c2(%rsp)\")",
207 "&mov ($xc_,\"4*$c3(%rsp)\")",
209 "&add (@x[$a2],@x[$b2])", # Q3
210 "&xor (@x[$d2],@x[$a2])",
212 "&add (@x[$a3],@x[$b3])", # Q4
213 "&xor (@x[$d3],@x[$a3])",
216 "&add ($xc,@x[$d2])",
217 "&xor (@x[$b2],$xc)",
219 "&add ($xc_,@x[$d3])",
220 "&xor (@x[$b3],$xc_)",
223 "&add (@x[$a2],@x[$b2])",
224 "&xor (@x[$d2],@x[$a2])",
226 "&add (@x[$a3],@x[$b3])",
227 "&xor (@x[$d3],@x[$a3])",
230 "&add ($xc,@x[$d2])",
231 "&xor (@x[$b2],$xc)",
233 "&add ($xc_,@x[$d3])",
234 "&xor (@x[$b3],$xc_)",
239 ########################################################################
240 # Generic code path that handles all lengths on pre-SSSE3 processors.
242 .globl ChaCha20_ctr32
243 .type ChaCha20_ctr32,\@function,5
249 mov OPENSSL_ia32cap_P+4(%rip),%r10
251 $code.=<<___ if ($avx>2);
252 bt \$48,%r10 # check for AVX512F
256 test \$`1<<(41-32)`,%r10d
272 .cfi_adjust_cfa_offset 64+24
275 #movdqa .Lsigma(%rip),%xmm0
277 movdqu 16($key),%xmm2
278 movdqu ($counter),%xmm3
279 movdqa .Lone(%rip),%xmm4
281 #movdqa %xmm0,4*0(%rsp) # key[0]
282 movdqa %xmm1,4*4(%rsp) # key[1]
283 movdqa %xmm2,4*8(%rsp) # key[2]
284 movdqa %xmm3,4*12(%rsp) # key[3]
285 mov $len,%rbp # reassign $len
290 mov \$0x61707865,@x[0] # 'expa'
291 mov \$0x3320646e,@x[1] # 'nd 3'
292 mov \$0x79622d32,@x[2] # '2-by'
293 mov \$0x6b206574,@x[3] # 'te k'
299 mov 4*13(%rsp),@x[13]
300 mov 4*14(%rsp),@x[14]
301 mov 4*15(%rsp),@x[15]
303 mov %rbp,64+0(%rsp) # save len
305 mov $inp,64+8(%rsp) # save inp
306 movq %xmm2,%rsi # "@x[8]"
307 mov $out,64+16(%rsp) # save out
309 shr \$32,%rdi # "@x[9]"
315 foreach (&ROUND (0, 4, 8,12)) { eval; }
316 foreach (&ROUND (0, 5,10,15)) { eval; }
321 mov @t[1],4*9(%rsp) # modulo-scheduled
323 mov 64(%rsp),%rbp # load len
325 mov 64+8(%rsp),$inp # load inp
326 paddd %xmm4,%xmm3 # increment counter
327 mov 64+16(%rsp),$out # load out
329 add \$0x61707865,@x[0] # 'expa'
330 add \$0x3320646e,@x[1] # 'nd 3'
331 add \$0x79622d32,@x[2] # '2-by'
332 add \$0x6b206574,@x[3] # 'te k'
337 add 4*12(%rsp),@x[12]
338 add 4*13(%rsp),@x[13]
339 add 4*14(%rsp),@x[14]
340 add 4*15(%rsp),@x[15]
341 paddd 4*8(%rsp),%xmm1
346 xor 4*0($inp),@x[0] # xor with input
354 movdqu 4*8($inp),%xmm0
355 xor 4*12($inp),@x[12]
356 xor 4*13($inp),@x[13]
357 xor 4*14($inp),@x[14]
358 xor 4*15($inp),@x[15]
359 lea 4*16($inp),$inp # inp+=64
362 movdqa %xmm2,4*8(%rsp)
363 movd %xmm3,4*12(%rsp)
365 mov @x[0],4*0($out) # write output
373 movdqu %xmm0,4*8($out)
374 mov @x[12],4*12($out)
375 mov @x[13],4*13($out)
376 mov @x[14],4*14($out)
377 mov @x[15],4*15($out)
378 lea 4*16($out),$out # out+=64
396 movdqa %xmm1,4*8(%rsp)
397 mov @x[12],4*12(%rsp)
398 mov @x[13],4*13(%rsp)
399 mov @x[14],4*14(%rsp)
400 mov @x[15],4*15(%rsp)
403 movzb ($inp,%rbx),%eax
404 movzb (%rsp,%rbx),%edx
407 mov %al,-1($out,%rbx)
412 lea 64+24+48(%rsp),%rsi
427 .cfi_def_cfa_register %rsp
431 .size ChaCha20_ctr32,.-ChaCha20_ctr32
434 ########################################################################
435 # SSSE3 code path that handles shorter lengths
437 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
439 sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
463 my $xframe = $win64 ? 32+8 : 8;
466 .type ChaCha20_ssse3,\@function,5
471 mov %rsp,%r9 # frame pointer
472 .cfi_def_cfa_register %r9
474 $code.=<<___ if ($avx);
475 test \$`1<<(43-32)`,%r10d
476 jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
479 cmp \$128,$len # we might throw away some data,
480 ja .LChaCha20_4x # but overall it won't be slower
483 sub \$64+$xframe,%rsp
485 $code.=<<___ if ($win64);
486 movaps %xmm6,-0x28(%r9)
487 movaps %xmm7,-0x18(%r9)
491 movdqa .Lsigma(%rip),$a
495 movdqa .Lrot16(%rip),$rot16
496 movdqa .Lrot24(%rip),$rot24
502 mov \$10,$counter # reuse $counter
507 movdqa .Lone(%rip),$d
520 &pshufd ($c,$c,0b01001110);
521 &pshufd ($b,$b,0b00111001);
522 &pshufd ($d,$d,0b10010011);
526 &pshufd ($c,$c,0b01001110);
527 &pshufd ($b,$b,0b10010011);
528 &pshufd ($d,$d,0b00111001);
531 &jnz (".Loop_ssse3");
543 movdqu 0x10($inp),$t1
544 pxor $t,$a # xor with input
547 movdqu 0x30($inp),$t1
548 lea 0x40($inp),$inp # inp+=64
552 movdqu $a,0x00($out) # write output
556 lea 0x40($out),$out # out+=64
559 jnz .Loop_outer_ssse3
569 xor $counter,$counter
572 movzb ($inp,$counter),%eax
573 movzb (%rsp,$counter),%ecx
574 lea 1($counter),$counter
576 mov %al,-1($out,$counter)
582 $code.=<<___ if ($win64);
583 movaps -0x28(%r9),%xmm6
584 movaps -0x18(%r9),%xmm7
588 .cfi_def_cfa_register %rsp
592 .size ChaCha20_ssse3,.-ChaCha20_ssse3
596 ########################################################################
597 # SSSE3 code path that handles longer messages.
599 # assign variables to favor Atom front-end
600 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
601 $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
602 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
603 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
605 sub SSSE3_lane_ROUND {
606 my ($a0,$b0,$c0,$d0)=@_;
607 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
608 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
609 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
610 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
611 my @x=map("\"$_\"",@xx);
613 # Consider order in which variables are addressed by their
618 # 0 4 8 12 < even round
622 # 0 5 10 15 < odd round
627 # 'a', 'b' and 'd's are permanently allocated in registers,
628 # @x[0..7,12..15], while 'c's are maintained in memory. If
629 # you observe 'c' column, you'll notice that pair of 'c's is
630 # invariant between rounds. This means that we have to reload
631 # them once per round, in the middle. This is why you'll see
632 # bunch of 'c' stores and loads in the middle, but none in
633 # the beginning or end.
636 "&paddd (@x[$a0],@x[$b0])", # Q1
637 "&paddd (@x[$a1],@x[$b1])", # Q2
638 "&pxor (@x[$d0],@x[$a0])",
639 "&pxor (@x[$d1],@x[$a1])",
640 "&pshufb (@x[$d0],$t1)",
641 "&pshufb (@x[$d1],$t1)",
643 "&paddd ($xc,@x[$d0])",
644 "&paddd ($xc_,@x[$d1])",
645 "&pxor (@x[$b0],$xc)",
646 "&pxor (@x[$b1],$xc_)",
647 "&movdqa ($t0,@x[$b0])",
648 "&pslld (@x[$b0],12)",
650 "&movdqa ($t1,@x[$b1])",
651 "&pslld (@x[$b1],12)",
652 "&por (@x[$b0],$t0)",
654 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
655 "&por (@x[$b1],$t1)",
657 "&paddd (@x[$a0],@x[$b0])",
658 "&paddd (@x[$a1],@x[$b1])",
659 "&pxor (@x[$d0],@x[$a0])",
660 "&pxor (@x[$d1],@x[$a1])",
661 "&pshufb (@x[$d0],$t0)",
662 "&pshufb (@x[$d1],$t0)",
664 "&paddd ($xc,@x[$d0])",
665 "&paddd ($xc_,@x[$d1])",
666 "&pxor (@x[$b0],$xc)",
667 "&pxor (@x[$b1],$xc_)",
668 "&movdqa ($t1,@x[$b0])",
669 "&pslld (@x[$b0],7)",
671 "&movdqa ($t0,@x[$b1])",
672 "&pslld (@x[$b1],7)",
673 "&por (@x[$b0],$t1)",
675 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
676 "&por (@x[$b1],$t0)",
678 "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
679 "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
680 "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
681 "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
683 "&paddd (@x[$a2],@x[$b2])", # Q3
684 "&paddd (@x[$a3],@x[$b3])", # Q4
685 "&pxor (@x[$d2],@x[$a2])",
686 "&pxor (@x[$d3],@x[$a3])",
687 "&pshufb (@x[$d2],$t1)",
688 "&pshufb (@x[$d3],$t1)",
690 "&paddd ($xc,@x[$d2])",
691 "&paddd ($xc_,@x[$d3])",
692 "&pxor (@x[$b2],$xc)",
693 "&pxor (@x[$b3],$xc_)",
694 "&movdqa ($t0,@x[$b2])",
695 "&pslld (@x[$b2],12)",
697 "&movdqa ($t1,@x[$b3])",
698 "&pslld (@x[$b3],12)",
699 "&por (@x[$b2],$t0)",
701 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
702 "&por (@x[$b3],$t1)",
704 "&paddd (@x[$a2],@x[$b2])",
705 "&paddd (@x[$a3],@x[$b3])",
706 "&pxor (@x[$d2],@x[$a2])",
707 "&pxor (@x[$d3],@x[$a3])",
708 "&pshufb (@x[$d2],$t0)",
709 "&pshufb (@x[$d3],$t0)",
711 "&paddd ($xc,@x[$d2])",
712 "&paddd ($xc_,@x[$d3])",
713 "&pxor (@x[$b2],$xc)",
714 "&pxor (@x[$b3],$xc_)",
715 "&movdqa ($t1,@x[$b2])",
716 "&pslld (@x[$b2],7)",
718 "&movdqa ($t0,@x[$b3])",
719 "&pslld (@x[$b3],7)",
720 "&por (@x[$b2],$t1)",
722 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
727 my $xframe = $win64 ? 0xa8 : 8;
730 .type ChaCha20_4x,\@function,5
735 mov %rsp,%r9 # frame pointer
736 .cfi_def_cfa_register %r9
739 $code.=<<___ if ($avx>1);
740 shr \$32,%r10 # OPENSSL_ia32cap_P+8
741 test \$`1<<5`,%r10 # test AVX2
748 and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
749 cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
750 je .Ldo_sse3_after_all # to detect Atom
753 sub \$0x140+$xframe,%rsp
755 ################ stack layout
756 # +0x00 SIMD equivalent of @x[8-12]
758 # +0x40 constant copy of key[0-2] smashed by lanes
760 # +0x100 SIMD counters (with nonce smashed by lanes)
763 $code.=<<___ if ($win64);
764 movaps %xmm6,-0xa8(%r9)
765 movaps %xmm7,-0x98(%r9)
766 movaps %xmm8,-0x88(%r9)
767 movaps %xmm9,-0x78(%r9)
768 movaps %xmm10,-0x68(%r9)
769 movaps %xmm11,-0x58(%r9)
770 movaps %xmm12,-0x48(%r9)
771 movaps %xmm13,-0x38(%r9)
772 movaps %xmm14,-0x28(%r9)
773 movaps %xmm15,-0x18(%r9)
777 movdqa .Lsigma(%rip),$xa3 # key[0]
778 movdqu ($key),$xb3 # key[1]
779 movdqu 16($key),$xt3 # key[2]
780 movdqu ($counter),$xd3 # key[3]
781 lea 0x100(%rsp),%rcx # size optimization
782 lea .Lrot16(%rip),%r10
783 lea .Lrot24(%rip),%r11
785 pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
786 pshufd \$0x55,$xa3,$xa1
787 movdqa $xa0,0x40(%rsp) # ... and offload
788 pshufd \$0xaa,$xa3,$xa2
789 movdqa $xa1,0x50(%rsp)
790 pshufd \$0xff,$xa3,$xa3
791 movdqa $xa2,0x60(%rsp)
792 movdqa $xa3,0x70(%rsp)
794 pshufd \$0x00,$xb3,$xb0
795 pshufd \$0x55,$xb3,$xb1
796 movdqa $xb0,0x80-0x100(%rcx)
797 pshufd \$0xaa,$xb3,$xb2
798 movdqa $xb1,0x90-0x100(%rcx)
799 pshufd \$0xff,$xb3,$xb3
800 movdqa $xb2,0xa0-0x100(%rcx)
801 movdqa $xb3,0xb0-0x100(%rcx)
803 pshufd \$0x00,$xt3,$xt0 # "$xc0"
804 pshufd \$0x55,$xt3,$xt1 # "$xc1"
805 movdqa $xt0,0xc0-0x100(%rcx)
806 pshufd \$0xaa,$xt3,$xt2 # "$xc2"
807 movdqa $xt1,0xd0-0x100(%rcx)
808 pshufd \$0xff,$xt3,$xt3 # "$xc3"
809 movdqa $xt2,0xe0-0x100(%rcx)
810 movdqa $xt3,0xf0-0x100(%rcx)
812 pshufd \$0x00,$xd3,$xd0
813 pshufd \$0x55,$xd3,$xd1
814 paddd .Linc(%rip),$xd0 # don't save counters yet
815 pshufd \$0xaa,$xd3,$xd2
816 movdqa $xd1,0x110-0x100(%rcx)
817 pshufd \$0xff,$xd3,$xd3
818 movdqa $xd2,0x120-0x100(%rcx)
819 movdqa $xd3,0x130-0x100(%rcx)
825 movdqa 0x40(%rsp),$xa0 # re-load smashed key
826 movdqa 0x50(%rsp),$xa1
827 movdqa 0x60(%rsp),$xa2
828 movdqa 0x70(%rsp),$xa3
829 movdqa 0x80-0x100(%rcx),$xb0
830 movdqa 0x90-0x100(%rcx),$xb1
831 movdqa 0xa0-0x100(%rcx),$xb2
832 movdqa 0xb0-0x100(%rcx),$xb3
833 movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
834 movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
835 movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
836 movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
837 movdqa 0x100-0x100(%rcx),$xd0
838 movdqa 0x110-0x100(%rcx),$xd1
839 movdqa 0x120-0x100(%rcx),$xd2
840 movdqa 0x130-0x100(%rcx),$xd3
841 paddd .Lfour(%rip),$xd0 # next SIMD counters
844 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
845 movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
846 movdqa (%r10),$xt3 # .Lrot16(%rip)
848 movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
854 foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
855 foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
860 paddd 0x40(%rsp),$xa0 # accumulate key material
861 paddd 0x50(%rsp),$xa1
862 paddd 0x60(%rsp),$xa2
863 paddd 0x70(%rsp),$xa3
865 movdqa $xa0,$xt2 # "de-interlace" data
872 punpcklqdq $xa2,$xa0 # "a0"
874 punpcklqdq $xt3,$xt2 # "a2"
875 punpckhqdq $xa2,$xa1 # "a1"
876 punpckhqdq $xt3,$xa3 # "a3"
878 ($xa2,$xt2)=($xt2,$xa2);
880 paddd 0x80-0x100(%rcx),$xb0
881 paddd 0x90-0x100(%rcx),$xb1
882 paddd 0xa0-0x100(%rcx),$xb2
883 paddd 0xb0-0x100(%rcx),$xb3
885 movdqa $xa0,0x00(%rsp) # offload $xaN
886 movdqa $xa1,0x10(%rsp)
887 movdqa 0x20(%rsp),$xa0 # "xc2"
888 movdqa 0x30(%rsp),$xa1 # "xc3"
897 punpcklqdq $xb2,$xb0 # "b0"
899 punpcklqdq $xt3,$xt2 # "b2"
900 punpckhqdq $xb2,$xb1 # "b1"
901 punpckhqdq $xt3,$xb3 # "b3"
903 ($xb2,$xt2)=($xt2,$xb2);
904 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
906 paddd 0xc0-0x100(%rcx),$xc0
907 paddd 0xd0-0x100(%rcx),$xc1
908 paddd 0xe0-0x100(%rcx),$xc2
909 paddd 0xf0-0x100(%rcx),$xc3
911 movdqa $xa2,0x20(%rsp) # keep offloading $xaN
912 movdqa $xa3,0x30(%rsp)
921 punpcklqdq $xc2,$xc0 # "c0"
923 punpcklqdq $xt3,$xt2 # "c2"
924 punpckhqdq $xc2,$xc1 # "c1"
925 punpckhqdq $xt3,$xc3 # "c3"
927 ($xc2,$xt2)=($xt2,$xc2);
928 ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
930 paddd 0x100-0x100(%rcx),$xd0
931 paddd 0x110-0x100(%rcx),$xd1
932 paddd 0x120-0x100(%rcx),$xd2
933 paddd 0x130-0x100(%rcx),$xd3
942 punpcklqdq $xd2,$xd0 # "d0"
944 punpcklqdq $xt3,$xt2 # "d2"
945 punpckhqdq $xd2,$xd1 # "d1"
946 punpckhqdq $xt3,$xd3 # "d3"
948 ($xd2,$xt2)=($xt2,$xd2);
953 movdqu 0x00($inp),$xt0 # xor with input
954 movdqu 0x10($inp),$xt1
955 movdqu 0x20($inp),$xt2
956 movdqu 0x30($inp),$xt3
957 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
962 movdqu $xt0,0x00($out)
963 movdqu 0x40($inp),$xt0
964 movdqu $xt1,0x10($out)
965 movdqu 0x50($inp),$xt1
966 movdqu $xt2,0x20($out)
967 movdqu 0x60($inp),$xt2
968 movdqu $xt3,0x30($out)
969 movdqu 0x70($inp),$xt3
970 lea 0x80($inp),$inp # size optimization
976 movdqu $xt0,0x40($out)
977 movdqu 0x00($inp),$xt0
978 movdqu $xt1,0x50($out)
979 movdqu 0x10($inp),$xt1
980 movdqu $xt2,0x60($out)
981 movdqu 0x20($inp),$xt2
982 movdqu $xt3,0x70($out)
983 lea 0x80($out),$out # size optimization
984 movdqu 0x30($inp),$xt3
990 movdqu $xt0,0x00($out)
991 movdqu 0x40($inp),$xt0
992 movdqu $xt1,0x10($out)
993 movdqu 0x50($inp),$xt1
994 movdqu $xt2,0x20($out)
995 movdqu 0x60($inp),$xt2
996 movdqu $xt3,0x30($out)
997 movdqu 0x70($inp),$xt3
998 lea 0x80($inp),$inp # inp+=64*4
1003 movdqu $xt0,0x40($out)
1004 movdqu $xt1,0x50($out)
1005 movdqu $xt2,0x60($out)
1006 movdqu $xt3,0x70($out)
1007 lea 0x80($out),$out # out+=64*4
1022 #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1024 #movdqa $xt0,0x00(%rsp)
1025 movdqa $xb0,0x10(%rsp)
1026 movdqa $xc0,0x20(%rsp)
1027 movdqa $xd0,0x30(%rsp)
1032 movdqu 0x00($inp),$xt0 # xor with input
1033 movdqu 0x10($inp),$xt1
1034 movdqu 0x20($inp),$xt2
1035 movdqu 0x30($inp),$xt3
1036 pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
1040 movdqu $xt0,0x00($out)
1041 movdqu $xt1,0x10($out)
1042 movdqu $xt2,0x20($out)
1043 movdqu $xt3,0x30($out)
1046 movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
1047 lea 0x40($inp),$inp # inp+=64*1
1049 movdqa $xt0,0x00(%rsp)
1050 movdqa $xb1,0x10(%rsp)
1051 lea 0x40($out),$out # out+=64*1
1052 movdqa $xc1,0x20(%rsp)
1053 sub \$64,$len # len-=64*1
1054 movdqa $xd1,0x30(%rsp)
1059 movdqu 0x00($inp),$xt0 # xor with input
1060 movdqu 0x10($inp),$xt1
1061 movdqu 0x20($inp),$xt2
1062 movdqu 0x30($inp),$xt3
1063 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1068 movdqu $xt0,0x00($out)
1069 movdqu 0x40($inp),$xt0
1070 movdqu $xt1,0x10($out)
1071 movdqu 0x50($inp),$xt1
1072 movdqu $xt2,0x20($out)
1073 movdqu 0x60($inp),$xt2
1074 movdqu $xt3,0x30($out)
1075 movdqu 0x70($inp),$xt3
1076 pxor 0x10(%rsp),$xt0
1080 movdqu $xt0,0x40($out)
1081 movdqu $xt1,0x50($out)
1082 movdqu $xt2,0x60($out)
1083 movdqu $xt3,0x70($out)
1086 movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
1087 lea 0x80($inp),$inp # inp+=64*2
1089 movdqa $xt0,0x00(%rsp)
1090 movdqa $xb2,0x10(%rsp)
1091 lea 0x80($out),$out # out+=64*2
1092 movdqa $xc2,0x20(%rsp)
1093 sub \$128,$len # len-=64*2
1094 movdqa $xd2,0x30(%rsp)
1099 movdqu 0x00($inp),$xt0 # xor with input
1100 movdqu 0x10($inp),$xt1
1101 movdqu 0x20($inp),$xt2
1102 movdqu 0x30($inp),$xt3
1103 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1108 movdqu $xt0,0x00($out)
1109 movdqu 0x40($inp),$xt0
1110 movdqu $xt1,0x10($out)
1111 movdqu 0x50($inp),$xt1
1112 movdqu $xt2,0x20($out)
1113 movdqu 0x60($inp),$xt2
1114 movdqu $xt3,0x30($out)
1115 movdqu 0x70($inp),$xt3
1116 lea 0x80($inp),$inp # size optimization
1117 pxor 0x10(%rsp),$xt0
1122 movdqu $xt0,0x40($out)
1123 movdqu 0x00($inp),$xt0
1124 movdqu $xt1,0x50($out)
1125 movdqu 0x10($inp),$xt1
1126 movdqu $xt2,0x60($out)
1127 movdqu 0x20($inp),$xt2
1128 movdqu $xt3,0x70($out)
1129 lea 0x80($out),$out # size optimization
1130 movdqu 0x30($inp),$xt3
1131 pxor 0x20(%rsp),$xt0
1135 movdqu $xt0,0x00($out)
1136 movdqu $xt1,0x10($out)
1137 movdqu $xt2,0x20($out)
1138 movdqu $xt3,0x30($out)
1141 movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
1142 lea 0x40($inp),$inp # inp+=64*3
1144 movdqa $xt0,0x00(%rsp)
1145 movdqa $xb3,0x10(%rsp)
1146 lea 0x40($out),$out # out+=64*3
1147 movdqa $xc3,0x20(%rsp)
1148 sub \$192,$len # len-=64*3
1149 movdqa $xd3,0x30(%rsp)
1152 movzb ($inp,%r10),%eax
1153 movzb (%rsp,%r10),%ecx
1156 mov %al,-1($out,%r10)
1162 $code.=<<___ if ($win64);
1163 movaps -0xa8(%r9),%xmm6
1164 movaps -0x98(%r9),%xmm7
1165 movaps -0x88(%r9),%xmm8
1166 movaps -0x78(%r9),%xmm9
1167 movaps -0x68(%r9),%xmm10
1168 movaps -0x58(%r9),%xmm11
1169 movaps -0x48(%r9),%xmm12
1170 movaps -0x38(%r9),%xmm13
1171 movaps -0x28(%r9),%xmm14
1172 movaps -0x18(%r9),%xmm15
1176 .cfi_def_cfa_register %rsp
1180 .size ChaCha20_4x,.-ChaCha20_4x
1184 ########################################################################
1185 # XOP code path that handles all lengths.
1187 # There is some "anomaly" observed depending on instructions' size or
1188 # alignment. If you look closely at below code you'll notice that
1189 # sometimes argument order varies. The order affects instruction
1190 # encoding by making it larger, and such fiddling gives 5% performance
1191 # improvement. This is on FX-4100...
1193 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1194 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1195 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1196 $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1198 sub XOP_lane_ROUND {
1199 my ($a0,$b0,$c0,$d0)=@_;
1200 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1201 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1202 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1203 my @x=map("\"$_\"",@xx);
1206 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1207 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1208 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1209 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1210 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1211 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1212 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1213 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1214 "&vprotd (@x[$d0],@x[$d0],16)",
1215 "&vprotd (@x[$d1],@x[$d1],16)",
1216 "&vprotd (@x[$d2],@x[$d2],16)",
1217 "&vprotd (@x[$d3],@x[$d3],16)",
1219 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1220 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1221 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1222 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1223 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1224 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1225 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1226 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1227 "&vprotd (@x[$b0],@x[$b0],12)",
1228 "&vprotd (@x[$b1],@x[$b1],12)",
1229 "&vprotd (@x[$b2],@x[$b2],12)",
1230 "&vprotd (@x[$b3],@x[$b3],12)",
1232 "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
1233 "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
1234 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1235 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1236 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1237 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1238 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1239 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1240 "&vprotd (@x[$d0],@x[$d0],8)",
1241 "&vprotd (@x[$d1],@x[$d1],8)",
1242 "&vprotd (@x[$d2],@x[$d2],8)",
1243 "&vprotd (@x[$d3],@x[$d3],8)",
1245 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1246 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1247 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1248 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1249 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1250 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1251 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1252 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1253 "&vprotd (@x[$b0],@x[$b0],7)",
1254 "&vprotd (@x[$b1],@x[$b1],7)",
1255 "&vprotd (@x[$b2],@x[$b2],7)",
1256 "&vprotd (@x[$b3],@x[$b3],7)"
1260 my $xframe = $win64 ? 0xa8 : 8;
1263 .type ChaCha20_4xop,\@function,5
1268 mov %rsp,%r9 # frame pointer
1269 .cfi_def_cfa_register %r9
1270 sub \$0x140+$xframe,%rsp
1272 ################ stack layout
1273 # +0x00 SIMD equivalent of @x[8-12]
1275 # +0x40 constant copy of key[0-2] smashed by lanes
1277 # +0x100 SIMD counters (with nonce smashed by lanes)
1280 $code.=<<___ if ($win64);
1281 movaps %xmm6,-0xa8(%r9)
1282 movaps %xmm7,-0x98(%r9)
1283 movaps %xmm8,-0x88(%r9)
1284 movaps %xmm9,-0x78(%r9)
1285 movaps %xmm10,-0x68(%r9)
1286 movaps %xmm11,-0x58(%r9)
1287 movaps %xmm12,-0x48(%r9)
1288 movaps %xmm13,-0x38(%r9)
1289 movaps %xmm14,-0x28(%r9)
1290 movaps %xmm15,-0x18(%r9)
1296 vmovdqa .Lsigma(%rip),$xa3 # key[0]
1297 vmovdqu ($key),$xb3 # key[1]
1298 vmovdqu 16($key),$xt3 # key[2]
1299 vmovdqu ($counter),$xd3 # key[3]
1300 lea 0x100(%rsp),%rcx # size optimization
1302 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1303 vpshufd \$0x55,$xa3,$xa1
1304 vmovdqa $xa0,0x40(%rsp) # ... and offload
1305 vpshufd \$0xaa,$xa3,$xa2
1306 vmovdqa $xa1,0x50(%rsp)
1307 vpshufd \$0xff,$xa3,$xa3
1308 vmovdqa $xa2,0x60(%rsp)
1309 vmovdqa $xa3,0x70(%rsp)
1311 vpshufd \$0x00,$xb3,$xb0
1312 vpshufd \$0x55,$xb3,$xb1
1313 vmovdqa $xb0,0x80-0x100(%rcx)
1314 vpshufd \$0xaa,$xb3,$xb2
1315 vmovdqa $xb1,0x90-0x100(%rcx)
1316 vpshufd \$0xff,$xb3,$xb3
1317 vmovdqa $xb2,0xa0-0x100(%rcx)
1318 vmovdqa $xb3,0xb0-0x100(%rcx)
1320 vpshufd \$0x00,$xt3,$xt0 # "$xc0"
1321 vpshufd \$0x55,$xt3,$xt1 # "$xc1"
1322 vmovdqa $xt0,0xc0-0x100(%rcx)
1323 vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
1324 vmovdqa $xt1,0xd0-0x100(%rcx)
1325 vpshufd \$0xff,$xt3,$xt3 # "$xc3"
1326 vmovdqa $xt2,0xe0-0x100(%rcx)
1327 vmovdqa $xt3,0xf0-0x100(%rcx)
1329 vpshufd \$0x00,$xd3,$xd0
1330 vpshufd \$0x55,$xd3,$xd1
1331 vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
1332 vpshufd \$0xaa,$xd3,$xd2
1333 vmovdqa $xd1,0x110-0x100(%rcx)
1334 vpshufd \$0xff,$xd3,$xd3
1335 vmovdqa $xd2,0x120-0x100(%rcx)
1336 vmovdqa $xd3,0x130-0x100(%rcx)
1342 vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
1343 vmovdqa 0x50(%rsp),$xa1
1344 vmovdqa 0x60(%rsp),$xa2
1345 vmovdqa 0x70(%rsp),$xa3
1346 vmovdqa 0x80-0x100(%rcx),$xb0
1347 vmovdqa 0x90-0x100(%rcx),$xb1
1348 vmovdqa 0xa0-0x100(%rcx),$xb2
1349 vmovdqa 0xb0-0x100(%rcx),$xb3
1350 vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
1351 vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
1352 vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
1353 vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
1354 vmovdqa 0x100-0x100(%rcx),$xd0
1355 vmovdqa 0x110-0x100(%rcx),$xd1
1356 vmovdqa 0x120-0x100(%rcx),$xd2
1357 vmovdqa 0x130-0x100(%rcx),$xd3
1358 vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
1362 vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
1368 foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1369 foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1374 vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
1375 vpaddd 0x50(%rsp),$xa1,$xa1
1376 vpaddd 0x60(%rsp),$xa2,$xa2
1377 vpaddd 0x70(%rsp),$xa3,$xa3
1379 vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
1380 vmovdqa $xt3,0x30(%rsp)
1382 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1383 vpunpckldq $xa3,$xa2,$xt3
1384 vpunpckhdq $xa1,$xa0,$xa0
1385 vpunpckhdq $xa3,$xa2,$xa2
1386 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1387 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1388 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1389 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1391 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1393 vpaddd 0x80-0x100(%rcx),$xb0,$xb0
1394 vpaddd 0x90-0x100(%rcx),$xb1,$xb1
1395 vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
1396 vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
1398 vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
1399 vmovdqa $xa1,0x10(%rsp)
1400 vmovdqa 0x20(%rsp),$xa0 # "xc2"
1401 vmovdqa 0x30(%rsp),$xa1 # "xc3"
1403 vpunpckldq $xb1,$xb0,$xt2
1404 vpunpckldq $xb3,$xb2,$xt3
1405 vpunpckhdq $xb1,$xb0,$xb0
1406 vpunpckhdq $xb3,$xb2,$xb2
1407 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1408 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1409 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1410 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1412 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1413 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1415 vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
1416 vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
1417 vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
1418 vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
1420 vpunpckldq $xc1,$xc0,$xt2
1421 vpunpckldq $xc3,$xc2,$xt3
1422 vpunpckhdq $xc1,$xc0,$xc0
1423 vpunpckhdq $xc3,$xc2,$xc2
1424 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1425 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1426 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1427 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1429 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1431 vpaddd 0x100-0x100(%rcx),$xd0,$xd0
1432 vpaddd 0x110-0x100(%rcx),$xd1,$xd1
1433 vpaddd 0x120-0x100(%rcx),$xd2,$xd2
1434 vpaddd 0x130-0x100(%rcx),$xd3,$xd3
1436 vpunpckldq $xd1,$xd0,$xt2
1437 vpunpckldq $xd3,$xd2,$xt3
1438 vpunpckhdq $xd1,$xd0,$xd0
1439 vpunpckhdq $xd3,$xd2,$xd2
1440 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1441 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1442 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1443 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1445 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1446 ($xa0,$xa1)=($xt2,$xt3);
1448 vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
1449 vmovdqa 0x10(%rsp),$xa1
1454 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1455 vpxor 0x10($inp),$xb0,$xb0
1456 vpxor 0x20($inp),$xc0,$xc0
1457 vpxor 0x30($inp),$xd0,$xd0
1458 vpxor 0x40($inp),$xa1,$xa1
1459 vpxor 0x50($inp),$xb1,$xb1
1460 vpxor 0x60($inp),$xc1,$xc1
1461 vpxor 0x70($inp),$xd1,$xd1
1462 lea 0x80($inp),$inp # size optimization
1463 vpxor 0x00($inp),$xa2,$xa2
1464 vpxor 0x10($inp),$xb2,$xb2
1465 vpxor 0x20($inp),$xc2,$xc2
1466 vpxor 0x30($inp),$xd2,$xd2
1467 vpxor 0x40($inp),$xa3,$xa3
1468 vpxor 0x50($inp),$xb3,$xb3
1469 vpxor 0x60($inp),$xc3,$xc3
1470 vpxor 0x70($inp),$xd3,$xd3
1471 lea 0x80($inp),$inp # inp+=64*4
1473 vmovdqu $xa0,0x00($out)
1474 vmovdqu $xb0,0x10($out)
1475 vmovdqu $xc0,0x20($out)
1476 vmovdqu $xd0,0x30($out)
1477 vmovdqu $xa1,0x40($out)
1478 vmovdqu $xb1,0x50($out)
1479 vmovdqu $xc1,0x60($out)
1480 vmovdqu $xd1,0x70($out)
1481 lea 0x80($out),$out # size optimization
1482 vmovdqu $xa2,0x00($out)
1483 vmovdqu $xb2,0x10($out)
1484 vmovdqu $xc2,0x20($out)
1485 vmovdqu $xd2,0x30($out)
1486 vmovdqu $xa3,0x40($out)
1487 vmovdqu $xb3,0x50($out)
1488 vmovdqu $xc3,0x60($out)
1489 vmovdqu $xd3,0x70($out)
1490 lea 0x80($out),$out # out+=64*4
1500 jae .L192_or_more4xop
1502 jae .L128_or_more4xop
1504 jae .L64_or_more4xop
1507 vmovdqa $xa0,0x00(%rsp)
1508 vmovdqa $xb0,0x10(%rsp)
1509 vmovdqa $xc0,0x20(%rsp)
1510 vmovdqa $xd0,0x30(%rsp)
1515 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1516 vpxor 0x10($inp),$xb0,$xb0
1517 vpxor 0x20($inp),$xc0,$xc0
1518 vpxor 0x30($inp),$xd0,$xd0
1519 vmovdqu $xa0,0x00($out)
1520 vmovdqu $xb0,0x10($out)
1521 vmovdqu $xc0,0x20($out)
1522 vmovdqu $xd0,0x30($out)
1525 lea 0x40($inp),$inp # inp+=64*1
1526 vmovdqa $xa1,0x00(%rsp)
1528 vmovdqa $xb1,0x10(%rsp)
1529 lea 0x40($out),$out # out+=64*1
1530 vmovdqa $xc1,0x20(%rsp)
1531 sub \$64,$len # len-=64*1
1532 vmovdqa $xd1,0x30(%rsp)
1537 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1538 vpxor 0x10($inp),$xb0,$xb0
1539 vpxor 0x20($inp),$xc0,$xc0
1540 vpxor 0x30($inp),$xd0,$xd0
1541 vpxor 0x40($inp),$xa1,$xa1
1542 vpxor 0x50($inp),$xb1,$xb1
1543 vpxor 0x60($inp),$xc1,$xc1
1544 vpxor 0x70($inp),$xd1,$xd1
1546 vmovdqu $xa0,0x00($out)
1547 vmovdqu $xb0,0x10($out)
1548 vmovdqu $xc0,0x20($out)
1549 vmovdqu $xd0,0x30($out)
1550 vmovdqu $xa1,0x40($out)
1551 vmovdqu $xb1,0x50($out)
1552 vmovdqu $xc1,0x60($out)
1553 vmovdqu $xd1,0x70($out)
1556 lea 0x80($inp),$inp # inp+=64*2
1557 vmovdqa $xa2,0x00(%rsp)
1559 vmovdqa $xb2,0x10(%rsp)
1560 lea 0x80($out),$out # out+=64*2
1561 vmovdqa $xc2,0x20(%rsp)
1562 sub \$128,$len # len-=64*2
1563 vmovdqa $xd2,0x30(%rsp)
1568 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1569 vpxor 0x10($inp),$xb0,$xb0
1570 vpxor 0x20($inp),$xc0,$xc0
1571 vpxor 0x30($inp),$xd0,$xd0
1572 vpxor 0x40($inp),$xa1,$xa1
1573 vpxor 0x50($inp),$xb1,$xb1
1574 vpxor 0x60($inp),$xc1,$xc1
1575 vpxor 0x70($inp),$xd1,$xd1
1576 lea 0x80($inp),$inp # size optimization
1577 vpxor 0x00($inp),$xa2,$xa2
1578 vpxor 0x10($inp),$xb2,$xb2
1579 vpxor 0x20($inp),$xc2,$xc2
1580 vpxor 0x30($inp),$xd2,$xd2
1582 vmovdqu $xa0,0x00($out)
1583 vmovdqu $xb0,0x10($out)
1584 vmovdqu $xc0,0x20($out)
1585 vmovdqu $xd0,0x30($out)
1586 vmovdqu $xa1,0x40($out)
1587 vmovdqu $xb1,0x50($out)
1588 vmovdqu $xc1,0x60($out)
1589 vmovdqu $xd1,0x70($out)
1590 lea 0x80($out),$out # size optimization
1591 vmovdqu $xa2,0x00($out)
1592 vmovdqu $xb2,0x10($out)
1593 vmovdqu $xc2,0x20($out)
1594 vmovdqu $xd2,0x30($out)
1597 lea 0x40($inp),$inp # inp+=64*3
1598 vmovdqa $xa3,0x00(%rsp)
1600 vmovdqa $xb3,0x10(%rsp)
1601 lea 0x40($out),$out # out+=64*3
1602 vmovdqa $xc3,0x20(%rsp)
1603 sub \$192,$len # len-=64*3
1604 vmovdqa $xd3,0x30(%rsp)
1607 movzb ($inp,%r10),%eax
1608 movzb (%rsp,%r10),%ecx
1611 mov %al,-1($out,%r10)
1618 $code.=<<___ if ($win64);
1619 movaps -0xa8(%r9),%xmm6
1620 movaps -0x98(%r9),%xmm7
1621 movaps -0x88(%r9),%xmm8
1622 movaps -0x78(%r9),%xmm9
1623 movaps -0x68(%r9),%xmm10
1624 movaps -0x58(%r9),%xmm11
1625 movaps -0x48(%r9),%xmm12
1626 movaps -0x38(%r9),%xmm13
1627 movaps -0x28(%r9),%xmm14
1628 movaps -0x18(%r9),%xmm15
1632 .cfi_def_cfa_register %rsp
1636 .size ChaCha20_4xop,.-ChaCha20_4xop
1640 ########################################################################
1643 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1644 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1645 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1646 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1648 sub AVX2_lane_ROUND {
1649 my ($a0,$b0,$c0,$d0)=@_;
1650 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1651 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1652 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1653 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1654 my @x=map("\"$_\"",@xx);
1656 # Consider order in which variables are addressed by their
1661 # 0 4 8 12 < even round
1665 # 0 5 10 15 < odd round
1670 # 'a', 'b' and 'd's are permanently allocated in registers,
1671 # @x[0..7,12..15], while 'c's are maintained in memory. If
1672 # you observe 'c' column, you'll notice that pair of 'c's is
1673 # invariant between rounds. This means that we have to reload
1674 # them once per round, in the middle. This is why you'll see
1675 # bunch of 'c' stores and loads in the middle, but none in
1676 # the beginning or end.
1679 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1680 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1681 "&vpshufb (@x[$d0],@x[$d0],$t1)",
1682 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1683 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1684 "&vpshufb (@x[$d1],@x[$d1],$t1)",
1686 "&vpaddd ($xc,$xc,@x[$d0])",
1687 "&vpxor (@x[$b0],$xc,@x[$b0])",
1688 "&vpslld ($t0,@x[$b0],12)",
1689 "&vpsrld (@x[$b0],@x[$b0],20)",
1690 "&vpor (@x[$b0],$t0,@x[$b0])",
1691 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1692 "&vpaddd ($xc_,$xc_,@x[$d1])",
1693 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1694 "&vpslld ($t1,@x[$b1],12)",
1695 "&vpsrld (@x[$b1],@x[$b1],20)",
1696 "&vpor (@x[$b1],$t1,@x[$b1])",
1698 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
1699 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1700 "&vpshufb (@x[$d0],@x[$d0],$t0)",
1701 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
1702 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1703 "&vpshufb (@x[$d1],@x[$d1],$t0)",
1705 "&vpaddd ($xc,$xc,@x[$d0])",
1706 "&vpxor (@x[$b0],$xc,@x[$b0])",
1707 "&vpslld ($t1,@x[$b0],7)",
1708 "&vpsrld (@x[$b0],@x[$b0],25)",
1709 "&vpor (@x[$b0],$t1,@x[$b0])",
1710 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1711 "&vpaddd ($xc_,$xc_,@x[$d1])",
1712 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1713 "&vpslld ($t0,@x[$b1],7)",
1714 "&vpsrld (@x[$b1],@x[$b1],25)",
1715 "&vpor (@x[$b1],$t0,@x[$b1])",
1717 "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
1718 "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
1719 "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
1720 "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
1722 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1723 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1724 "&vpshufb (@x[$d2],@x[$d2],$t1)",
1725 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1726 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1727 "&vpshufb (@x[$d3],@x[$d3],$t1)",
1729 "&vpaddd ($xc,$xc,@x[$d2])",
1730 "&vpxor (@x[$b2],$xc,@x[$b2])",
1731 "&vpslld ($t0,@x[$b2],12)",
1732 "&vpsrld (@x[$b2],@x[$b2],20)",
1733 "&vpor (@x[$b2],$t0,@x[$b2])",
1734 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1735 "&vpaddd ($xc_,$xc_,@x[$d3])",
1736 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1737 "&vpslld ($t1,@x[$b3],12)",
1738 "&vpsrld (@x[$b3],@x[$b3],20)",
1739 "&vpor (@x[$b3],$t1,@x[$b3])",
1741 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1742 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1743 "&vpshufb (@x[$d2],@x[$d2],$t0)",
1744 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1745 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1746 "&vpshufb (@x[$d3],@x[$d3],$t0)",
1748 "&vpaddd ($xc,$xc,@x[$d2])",
1749 "&vpxor (@x[$b2],$xc,@x[$b2])",
1750 "&vpslld ($t1,@x[$b2],7)",
1751 "&vpsrld (@x[$b2],@x[$b2],25)",
1752 "&vpor (@x[$b2],$t1,@x[$b2])",
1753 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1754 "&vpaddd ($xc_,$xc_,@x[$d3])",
1755 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1756 "&vpslld ($t0,@x[$b3],7)",
1757 "&vpsrld (@x[$b3],@x[$b3],25)",
1758 "&vpor (@x[$b3],$t0,@x[$b3])"
1762 my $xframe = $win64 ? 0xa8 : 8;
1765 .type ChaCha20_8x,\@function,5
1770 mov %rsp,%r9 # frame register
1771 .cfi_def_cfa_register %r9
1772 sub \$0x280+$xframe,%rsp
1775 $code.=<<___ if ($win64);
1776 movaps %xmm6,-0xa8(%r9)
1777 movaps %xmm7,-0x98(%r9)
1778 movaps %xmm8,-0x88(%r9)
1779 movaps %xmm9,-0x78(%r9)
1780 movaps %xmm10,-0x68(%r9)
1781 movaps %xmm11,-0x58(%r9)
1782 movaps %xmm12,-0x48(%r9)
1783 movaps %xmm13,-0x38(%r9)
1784 movaps %xmm14,-0x28(%r9)
1785 movaps %xmm15,-0x18(%r9)
1791 ################ stack layout
1792 # +0x00 SIMD equivalent of @x[8-12]
1794 # +0x80 constant copy of key[0-2] smashed by lanes
1796 # +0x200 SIMD counters (with nonce smashed by lanes)
1800 vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
1801 vbroadcasti128 ($key),$xb3 # key[1]
1802 vbroadcasti128 16($key),$xt3 # key[2]
1803 vbroadcasti128 ($counter),$xd3 # key[3]
1804 lea 0x100(%rsp),%rcx # size optimization
1805 lea 0x200(%rsp),%rax # size optimization
1806 lea .Lrot16(%rip),%r10
1807 lea .Lrot24(%rip),%r11
1809 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1810 vpshufd \$0x55,$xa3,$xa1
1811 vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
1812 vpshufd \$0xaa,$xa3,$xa2
1813 vmovdqa $xa1,0xa0-0x100(%rcx)
1814 vpshufd \$0xff,$xa3,$xa3
1815 vmovdqa $xa2,0xc0-0x100(%rcx)
1816 vmovdqa $xa3,0xe0-0x100(%rcx)
1818 vpshufd \$0x00,$xb3,$xb0
1819 vpshufd \$0x55,$xb3,$xb1
1820 vmovdqa $xb0,0x100-0x100(%rcx)
1821 vpshufd \$0xaa,$xb3,$xb2
1822 vmovdqa $xb1,0x120-0x100(%rcx)
1823 vpshufd \$0xff,$xb3,$xb3
1824 vmovdqa $xb2,0x140-0x100(%rcx)
1825 vmovdqa $xb3,0x160-0x100(%rcx)
1827 vpshufd \$0x00,$xt3,$xt0 # "xc0"
1828 vpshufd \$0x55,$xt3,$xt1 # "xc1"
1829 vmovdqa $xt0,0x180-0x200(%rax)
1830 vpshufd \$0xaa,$xt3,$xt2 # "xc2"
1831 vmovdqa $xt1,0x1a0-0x200(%rax)
1832 vpshufd \$0xff,$xt3,$xt3 # "xc3"
1833 vmovdqa $xt2,0x1c0-0x200(%rax)
1834 vmovdqa $xt3,0x1e0-0x200(%rax)
1836 vpshufd \$0x00,$xd3,$xd0
1837 vpshufd \$0x55,$xd3,$xd1
1838 vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
1839 vpshufd \$0xaa,$xd3,$xd2
1840 vmovdqa $xd1,0x220-0x200(%rax)
1841 vpshufd \$0xff,$xd3,$xd3
1842 vmovdqa $xd2,0x240-0x200(%rax)
1843 vmovdqa $xd3,0x260-0x200(%rax)
1849 vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
1850 vmovdqa 0xa0-0x100(%rcx),$xa1
1851 vmovdqa 0xc0-0x100(%rcx),$xa2
1852 vmovdqa 0xe0-0x100(%rcx),$xa3
1853 vmovdqa 0x100-0x100(%rcx),$xb0
1854 vmovdqa 0x120-0x100(%rcx),$xb1
1855 vmovdqa 0x140-0x100(%rcx),$xb2
1856 vmovdqa 0x160-0x100(%rcx),$xb3
1857 vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
1858 vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
1859 vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
1860 vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
1861 vmovdqa 0x200-0x200(%rax),$xd0
1862 vmovdqa 0x220-0x200(%rax),$xd1
1863 vmovdqa 0x240-0x200(%rax),$xd2
1864 vmovdqa 0x260-0x200(%rax),$xd3
1865 vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
1868 vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
1869 vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
1870 vbroadcasti128 (%r10),$xt3
1871 vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
1878 foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1879 foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1884 lea 0x200(%rsp),%rax # size optimization
1885 vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
1886 vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
1887 vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
1888 vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
1890 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1891 vpunpckldq $xa3,$xa2,$xt3
1892 vpunpckhdq $xa1,$xa0,$xa0
1893 vpunpckhdq $xa3,$xa2,$xa2
1894 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1895 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1896 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1897 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1899 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1901 vpaddd 0x100-0x100(%rcx),$xb0,$xb0
1902 vpaddd 0x120-0x100(%rcx),$xb1,$xb1
1903 vpaddd 0x140-0x100(%rcx),$xb2,$xb2
1904 vpaddd 0x160-0x100(%rcx),$xb3,$xb3
1906 vpunpckldq $xb1,$xb0,$xt2
1907 vpunpckldq $xb3,$xb2,$xt3
1908 vpunpckhdq $xb1,$xb0,$xb0
1909 vpunpckhdq $xb3,$xb2,$xb2
1910 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1911 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1912 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1913 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1915 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1917 vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
1918 vperm2i128 \$0x31,$xb0,$xa0,$xb0
1919 vperm2i128 \$0x20,$xb1,$xa1,$xa0
1920 vperm2i128 \$0x31,$xb1,$xa1,$xb1
1921 vperm2i128 \$0x20,$xb2,$xa2,$xa1
1922 vperm2i128 \$0x31,$xb2,$xa2,$xb2
1923 vperm2i128 \$0x20,$xb3,$xa3,$xa2
1924 vperm2i128 \$0x31,$xb3,$xa3,$xb3
1926 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1927 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1929 vmovdqa $xa0,0x00(%rsp) # offload $xaN
1930 vmovdqa $xa1,0x20(%rsp)
1931 vmovdqa 0x40(%rsp),$xc2 # $xa0
1932 vmovdqa 0x60(%rsp),$xc3 # $xa1
1934 vpaddd 0x180-0x200(%rax),$xc0,$xc0
1935 vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
1936 vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
1937 vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
1939 vpunpckldq $xc1,$xc0,$xt2
1940 vpunpckldq $xc3,$xc2,$xt3
1941 vpunpckhdq $xc1,$xc0,$xc0
1942 vpunpckhdq $xc3,$xc2,$xc2
1943 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1944 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1945 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1946 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1948 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1950 vpaddd 0x200-0x200(%rax),$xd0,$xd0
1951 vpaddd 0x220-0x200(%rax),$xd1,$xd1
1952 vpaddd 0x240-0x200(%rax),$xd2,$xd2
1953 vpaddd 0x260-0x200(%rax),$xd3,$xd3
1955 vpunpckldq $xd1,$xd0,$xt2
1956 vpunpckldq $xd3,$xd2,$xt3
1957 vpunpckhdq $xd1,$xd0,$xd0
1958 vpunpckhdq $xd3,$xd2,$xd2
1959 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1960 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1961 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1962 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1964 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1966 vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
1967 vperm2i128 \$0x31,$xd0,$xc0,$xd0
1968 vperm2i128 \$0x20,$xd1,$xc1,$xc0
1969 vperm2i128 \$0x31,$xd1,$xc1,$xd1
1970 vperm2i128 \$0x20,$xd2,$xc2,$xc1
1971 vperm2i128 \$0x31,$xd2,$xc2,$xd2
1972 vperm2i128 \$0x20,$xd3,$xc3,$xc2
1973 vperm2i128 \$0x31,$xd3,$xc3,$xd3
1975 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1976 ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1977 ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1978 ($xa0,$xa1)=($xt2,$xt3);
1980 vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
1981 vmovdqa 0x20(%rsp),$xa1
1986 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1987 vpxor 0x20($inp),$xb0,$xb0
1988 vpxor 0x40($inp),$xc0,$xc0
1989 vpxor 0x60($inp),$xd0,$xd0
1990 lea 0x80($inp),$inp # size optimization
1991 vmovdqu $xa0,0x00($out)
1992 vmovdqu $xb0,0x20($out)
1993 vmovdqu $xc0,0x40($out)
1994 vmovdqu $xd0,0x60($out)
1995 lea 0x80($out),$out # size optimization
1997 vpxor 0x00($inp),$xa1,$xa1
1998 vpxor 0x20($inp),$xb1,$xb1
1999 vpxor 0x40($inp),$xc1,$xc1
2000 vpxor 0x60($inp),$xd1,$xd1
2001 lea 0x80($inp),$inp # size optimization
2002 vmovdqu $xa1,0x00($out)
2003 vmovdqu $xb1,0x20($out)
2004 vmovdqu $xc1,0x40($out)
2005 vmovdqu $xd1,0x60($out)
2006 lea 0x80($out),$out # size optimization
2008 vpxor 0x00($inp),$xa2,$xa2
2009 vpxor 0x20($inp),$xb2,$xb2
2010 vpxor 0x40($inp),$xc2,$xc2
2011 vpxor 0x60($inp),$xd2,$xd2
2012 lea 0x80($inp),$inp # size optimization
2013 vmovdqu $xa2,0x00($out)
2014 vmovdqu $xb2,0x20($out)
2015 vmovdqu $xc2,0x40($out)
2016 vmovdqu $xd2,0x60($out)
2017 lea 0x80($out),$out # size optimization
2019 vpxor 0x00($inp),$xa3,$xa3
2020 vpxor 0x20($inp),$xb3,$xb3
2021 vpxor 0x40($inp),$xc3,$xc3
2022 vpxor 0x60($inp),$xd3,$xd3
2023 lea 0x80($inp),$inp # size optimization
2024 vmovdqu $xa3,0x00($out)
2025 vmovdqu $xb3,0x20($out)
2026 vmovdqu $xc3,0x40($out)
2027 vmovdqu $xd3,0x60($out)
2028 lea 0x80($out),$out # size optimization
2052 vmovdqa $xa0,0x00(%rsp)
2053 vmovdqa $xb0,0x20(%rsp)
2058 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2059 vpxor 0x20($inp),$xb0,$xb0
2060 vmovdqu $xa0,0x00($out)
2061 vmovdqu $xb0,0x20($out)
2064 lea 0x40($inp),$inp # inp+=64*1
2066 vmovdqa $xc0,0x00(%rsp)
2067 lea 0x40($out),$out # out+=64*1
2068 sub \$64,$len # len-=64*1
2069 vmovdqa $xd0,0x20(%rsp)
2074 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2075 vpxor 0x20($inp),$xb0,$xb0
2076 vpxor 0x40($inp),$xc0,$xc0
2077 vpxor 0x60($inp),$xd0,$xd0
2078 vmovdqu $xa0,0x00($out)
2079 vmovdqu $xb0,0x20($out)
2080 vmovdqu $xc0,0x40($out)
2081 vmovdqu $xd0,0x60($out)
2084 lea 0x80($inp),$inp # inp+=64*2
2086 vmovdqa $xa1,0x00(%rsp)
2087 lea 0x80($out),$out # out+=64*2
2088 sub \$128,$len # len-=64*2
2089 vmovdqa $xb1,0x20(%rsp)
2094 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2095 vpxor 0x20($inp),$xb0,$xb0
2096 vpxor 0x40($inp),$xc0,$xc0
2097 vpxor 0x60($inp),$xd0,$xd0
2098 vpxor 0x80($inp),$xa1,$xa1
2099 vpxor 0xa0($inp),$xb1,$xb1
2100 vmovdqu $xa0,0x00($out)
2101 vmovdqu $xb0,0x20($out)
2102 vmovdqu $xc0,0x40($out)
2103 vmovdqu $xd0,0x60($out)
2104 vmovdqu $xa1,0x80($out)
2105 vmovdqu $xb1,0xa0($out)
2108 lea 0xc0($inp),$inp # inp+=64*3
2110 vmovdqa $xc1,0x00(%rsp)
2111 lea 0xc0($out),$out # out+=64*3
2112 sub \$192,$len # len-=64*3
2113 vmovdqa $xd1,0x20(%rsp)
2118 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2119 vpxor 0x20($inp),$xb0,$xb0
2120 vpxor 0x40($inp),$xc0,$xc0
2121 vpxor 0x60($inp),$xd0,$xd0
2122 vpxor 0x80($inp),$xa1,$xa1
2123 vpxor 0xa0($inp),$xb1,$xb1
2124 vpxor 0xc0($inp),$xc1,$xc1
2125 vpxor 0xe0($inp),$xd1,$xd1
2126 vmovdqu $xa0,0x00($out)
2127 vmovdqu $xb0,0x20($out)
2128 vmovdqu $xc0,0x40($out)
2129 vmovdqu $xd0,0x60($out)
2130 vmovdqu $xa1,0x80($out)
2131 vmovdqu $xb1,0xa0($out)
2132 vmovdqu $xc1,0xc0($out)
2133 vmovdqu $xd1,0xe0($out)
2136 lea 0x100($inp),$inp # inp+=64*4
2138 vmovdqa $xa2,0x00(%rsp)
2139 lea 0x100($out),$out # out+=64*4
2140 sub \$256,$len # len-=64*4
2141 vmovdqa $xb2,0x20(%rsp)
2146 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2147 vpxor 0x20($inp),$xb0,$xb0
2148 vpxor 0x40($inp),$xc0,$xc0
2149 vpxor 0x60($inp),$xd0,$xd0
2150 vpxor 0x80($inp),$xa1,$xa1
2151 vpxor 0xa0($inp),$xb1,$xb1
2152 vpxor 0xc0($inp),$xc1,$xc1
2153 vpxor 0xe0($inp),$xd1,$xd1
2154 vpxor 0x100($inp),$xa2,$xa2
2155 vpxor 0x120($inp),$xb2,$xb2
2156 vmovdqu $xa0,0x00($out)
2157 vmovdqu $xb0,0x20($out)
2158 vmovdqu $xc0,0x40($out)
2159 vmovdqu $xd0,0x60($out)
2160 vmovdqu $xa1,0x80($out)
2161 vmovdqu $xb1,0xa0($out)
2162 vmovdqu $xc1,0xc0($out)
2163 vmovdqu $xd1,0xe0($out)
2164 vmovdqu $xa2,0x100($out)
2165 vmovdqu $xb2,0x120($out)
2168 lea 0x140($inp),$inp # inp+=64*5
2170 vmovdqa $xc2,0x00(%rsp)
2171 lea 0x140($out),$out # out+=64*5
2172 sub \$320,$len # len-=64*5
2173 vmovdqa $xd2,0x20(%rsp)
2178 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2179 vpxor 0x20($inp),$xb0,$xb0
2180 vpxor 0x40($inp),$xc0,$xc0
2181 vpxor 0x60($inp),$xd0,$xd0
2182 vpxor 0x80($inp),$xa1,$xa1
2183 vpxor 0xa0($inp),$xb1,$xb1
2184 vpxor 0xc0($inp),$xc1,$xc1
2185 vpxor 0xe0($inp),$xd1,$xd1
2186 vpxor 0x100($inp),$xa2,$xa2
2187 vpxor 0x120($inp),$xb2,$xb2
2188 vpxor 0x140($inp),$xc2,$xc2
2189 vpxor 0x160($inp),$xd2,$xd2
2190 vmovdqu $xa0,0x00($out)
2191 vmovdqu $xb0,0x20($out)
2192 vmovdqu $xc0,0x40($out)
2193 vmovdqu $xd0,0x60($out)
2194 vmovdqu $xa1,0x80($out)
2195 vmovdqu $xb1,0xa0($out)
2196 vmovdqu $xc1,0xc0($out)
2197 vmovdqu $xd1,0xe0($out)
2198 vmovdqu $xa2,0x100($out)
2199 vmovdqu $xb2,0x120($out)
2200 vmovdqu $xc2,0x140($out)
2201 vmovdqu $xd2,0x160($out)
2204 lea 0x180($inp),$inp # inp+=64*6
2206 vmovdqa $xa3,0x00(%rsp)
2207 lea 0x180($out),$out # out+=64*6
2208 sub \$384,$len # len-=64*6
2209 vmovdqa $xb3,0x20(%rsp)
2214 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2215 vpxor 0x20($inp),$xb0,$xb0
2216 vpxor 0x40($inp),$xc0,$xc0
2217 vpxor 0x60($inp),$xd0,$xd0
2218 vpxor 0x80($inp),$xa1,$xa1
2219 vpxor 0xa0($inp),$xb1,$xb1
2220 vpxor 0xc0($inp),$xc1,$xc1
2221 vpxor 0xe0($inp),$xd1,$xd1
2222 vpxor 0x100($inp),$xa2,$xa2
2223 vpxor 0x120($inp),$xb2,$xb2
2224 vpxor 0x140($inp),$xc2,$xc2
2225 vpxor 0x160($inp),$xd2,$xd2
2226 vpxor 0x180($inp),$xa3,$xa3
2227 vpxor 0x1a0($inp),$xb3,$xb3
2228 vmovdqu $xa0,0x00($out)
2229 vmovdqu $xb0,0x20($out)
2230 vmovdqu $xc0,0x40($out)
2231 vmovdqu $xd0,0x60($out)
2232 vmovdqu $xa1,0x80($out)
2233 vmovdqu $xb1,0xa0($out)
2234 vmovdqu $xc1,0xc0($out)
2235 vmovdqu $xd1,0xe0($out)
2236 vmovdqu $xa2,0x100($out)
2237 vmovdqu $xb2,0x120($out)
2238 vmovdqu $xc2,0x140($out)
2239 vmovdqu $xd2,0x160($out)
2240 vmovdqu $xa3,0x180($out)
2241 vmovdqu $xb3,0x1a0($out)
2244 lea 0x1c0($inp),$inp # inp+=64*7
2246 vmovdqa $xc3,0x00(%rsp)
2247 lea 0x1c0($out),$out # out+=64*7
2248 sub \$448,$len # len-=64*7
2249 vmovdqa $xd3,0x20(%rsp)
2252 movzb ($inp,%r10),%eax
2253 movzb (%rsp,%r10),%ecx
2256 mov %al,-1($out,%r10)
2263 $code.=<<___ if ($win64);
2264 movaps -0xa8(%r9),%xmm6
2265 movaps -0x98(%r9),%xmm7
2266 movaps -0x88(%r9),%xmm8
2267 movaps -0x78(%r9),%xmm9
2268 movaps -0x68(%r9),%xmm10
2269 movaps -0x58(%r9),%xmm11
2270 movaps -0x48(%r9),%xmm12
2271 movaps -0x38(%r9),%xmm13
2272 movaps -0x28(%r9),%xmm14
2273 movaps -0x18(%r9),%xmm15
2277 .cfi_def_cfa_register %rsp
2281 .size ChaCha20_8x,.-ChaCha20_8x
2285 ########################################################################
2288 # This one handles shorter inputs...
2290 my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
2291 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
2293 sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round
2311 my $xframe = $win64 ? 32+8 : 8;
2314 .type ChaCha20_avx512,\@function,5
2319 mov %rsp,%r9 # frame pointer
2320 .cfi_def_cfa_register %r9
2324 sub \$64+$xframe,%rsp
2326 $code.=<<___ if ($win64);
2327 movaps %xmm6,-0x28(%r9)
2328 movaps %xmm7,-0x18(%r9)
2332 vbroadcasti32x4 .Lsigma(%rip),$a
2333 vbroadcasti32x4 ($key),$b
2334 vbroadcasti32x4 16($key),$c
2335 vbroadcasti32x4 ($counter),$d
2340 vpaddd .Lzeroz(%rip),$d,$d
2341 vmovdqa32 .Lfourz(%rip),$fourz
2342 mov \$10,$counter # reuse $counter
2351 vpaddd $fourz,$d_,$d
2360 &vpshufd ($c,$c,0b01001110);
2361 &vpshufd ($b,$b,0b00111001);
2362 &vpshufd ($d,$d,0b10010011);
2365 &vpshufd ($c,$c,0b01001110);
2366 &vpshufd ($b,$b,0b10010011);
2367 &vpshufd ($d,$d,0b00111001);
2370 &jnz (".Loop_avx512");
2381 vpxor 0x00($inp),%x#$a,$t0 # xor with input
2382 vpxor 0x10($inp),%x#$b,$t1
2383 vpxor 0x20($inp),%x#$c,$t2
2384 vpxor 0x30($inp),%x#$d,$t3
2385 lea 0x40($inp),$inp # inp+=64
2387 vmovdqu $t0,0x00($out) # write output
2388 vmovdqu $t1,0x10($out)
2389 vmovdqu $t2,0x20($out)
2390 vmovdqu $t3,0x30($out)
2391 lea 0x40($out),$out # out+=64
2395 vextracti32x4 \$1,$a,$t0
2396 vextracti32x4 \$1,$b,$t1
2397 vextracti32x4 \$1,$c,$t2
2398 vextracti32x4 \$1,$d,$t3
2403 vpxor 0x00($inp),$t0,$t0 # xor with input
2404 vpxor 0x10($inp),$t1,$t1
2405 vpxor 0x20($inp),$t2,$t2
2406 vpxor 0x30($inp),$t3,$t3
2407 lea 0x40($inp),$inp # inp+=64
2409 vmovdqu $t0,0x00($out) # write output
2410 vmovdqu $t1,0x10($out)
2411 vmovdqu $t2,0x20($out)
2412 vmovdqu $t3,0x30($out)
2413 lea 0x40($out),$out # out+=64
2417 vextracti32x4 \$2,$a,$t0
2418 vextracti32x4 \$2,$b,$t1
2419 vextracti32x4 \$2,$c,$t2
2420 vextracti32x4 \$2,$d,$t3
2425 vpxor 0x00($inp),$t0,$t0 # xor with input
2426 vpxor 0x10($inp),$t1,$t1
2427 vpxor 0x20($inp),$t2,$t2
2428 vpxor 0x30($inp),$t3,$t3
2429 lea 0x40($inp),$inp # inp+=64
2431 vmovdqu $t0,0x00($out) # write output
2432 vmovdqu $t1,0x10($out)
2433 vmovdqu $t2,0x20($out)
2434 vmovdqu $t3,0x30($out)
2435 lea 0x40($out),$out # out+=64
2439 vextracti32x4 \$3,$a,$t0
2440 vextracti32x4 \$3,$b,$t1
2441 vextracti32x4 \$3,$c,$t2
2442 vextracti32x4 \$3,$d,$t3
2447 vpxor 0x00($inp),$t0,$t0 # xor with input
2448 vpxor 0x10($inp),$t1,$t1
2449 vpxor 0x20($inp),$t2,$t2
2450 vpxor 0x30($inp),$t3,$t3
2451 lea 0x40($inp),$inp # inp+=64
2453 vmovdqu $t0,0x00($out) # write output
2454 vmovdqu $t1,0x10($out)
2455 vmovdqu $t2,0x20($out)
2456 vmovdqu $t3,0x30($out)
2457 lea 0x40($out),$out # out+=64
2459 jnz .Loop_outer_avx512
2465 vmovdqa %x#$a,0x00(%rsp)
2466 vmovdqa %x#$b,0x10(%rsp)
2467 vmovdqa %x#$c,0x20(%rsp)
2468 vmovdqa %x#$d,0x30(%rsp)
2470 jmp .Loop_tail_avx512
2474 vmovdqa $t0,0x00(%rsp)
2475 vmovdqa $t1,0x10(%rsp)
2476 vmovdqa $t2,0x20(%rsp)
2477 vmovdqa $t3,0x30(%rsp)
2481 movzb ($inp,$counter),%eax
2482 movzb (%rsp,$counter),%ecx
2483 lea 1($counter),$counter
2485 mov %al,-1($out,$counter)
2487 jnz .Loop_tail_avx512
2489 vmovdqa32 $a_,0x00(%rsp)
2494 $code.=<<___ if ($win64);
2495 movaps -0x28(%r9),%xmm6
2496 movaps -0x18(%r9),%xmm7
2500 .cfi_def_cfa_register %rsp
2504 .size ChaCha20_avx512,.-ChaCha20_avx512
2508 # This one handles longer inputs...
2510 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2511 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2512 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2513 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2514 my @key=map("%zmm$_",(16..31));
2515 my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2517 sub AVX512_lane_ROUND {
2518 my ($a0,$b0,$c0,$d0)=@_;
2519 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2520 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2521 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2522 my @x=map("\"$_\"",@xx);
2525 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
2526 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
2527 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
2528 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
2529 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2530 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2531 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2532 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2533 "&vprold (@x[$d0],@x[$d0],16)",
2534 "&vprold (@x[$d1],@x[$d1],16)",
2535 "&vprold (@x[$d2],@x[$d2],16)",
2536 "&vprold (@x[$d3],@x[$d3],16)",
2538 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2539 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2540 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2541 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2542 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2543 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2544 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2545 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2546 "&vprold (@x[$b0],@x[$b0],12)",
2547 "&vprold (@x[$b1],@x[$b1],12)",
2548 "&vprold (@x[$b2],@x[$b2],12)",
2549 "&vprold (@x[$b3],@x[$b3],12)",
2551 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
2552 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
2553 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
2554 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
2555 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2556 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2557 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2558 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2559 "&vprold (@x[$d0],@x[$d0],8)",
2560 "&vprold (@x[$d1],@x[$d1],8)",
2561 "&vprold (@x[$d2],@x[$d2],8)",
2562 "&vprold (@x[$d3],@x[$d3],8)",
2564 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2565 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2566 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2567 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2568 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2569 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2570 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2571 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2572 "&vprold (@x[$b0],@x[$b0],7)",
2573 "&vprold (@x[$b1],@x[$b1],7)",
2574 "&vprold (@x[$b2],@x[$b2],7)",
2575 "&vprold (@x[$b3],@x[$b3],7)"
2579 my $xframe = $win64 ? 0xa8 : 8;
2582 .type ChaCha20_16x,\@function,5
2587 mov %rsp,%r9 # frame register
2588 .cfi_def_cfa_register %r9
2589 sub \$64+$xframe,%rsp
2592 $code.=<<___ if ($win64);
2593 movaps %xmm6,-0xa8(%r9)
2594 movaps %xmm7,-0x98(%r9)
2595 movaps %xmm8,-0x88(%r9)
2596 movaps %xmm9,-0x78(%r9)
2597 movaps %xmm10,-0x68(%r9)
2598 movaps %xmm11,-0x58(%r9)
2599 movaps %xmm12,-0x48(%r9)
2600 movaps %xmm13,-0x38(%r9)
2601 movaps %xmm14,-0x28(%r9)
2602 movaps %xmm15,-0x18(%r9)
2608 lea .Lsigma(%rip),%r10
2609 vbroadcasti32x4 (%r10),$xa3 # key[0]
2610 vbroadcasti32x4 ($key),$xb3 # key[1]
2611 vbroadcasti32x4 16($key),$xc3 # key[2]
2612 vbroadcasti32x4 ($counter),$xd3 # key[3]
2614 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
2615 vpshufd \$0x55,$xa3,$xa1
2616 vpshufd \$0xaa,$xa3,$xa2
2617 vpshufd \$0xff,$xa3,$xa3
2618 vmovdqa64 $xa0,@key[0]
2619 vmovdqa64 $xa1,@key[1]
2620 vmovdqa64 $xa2,@key[2]
2621 vmovdqa64 $xa3,@key[3]
2623 vpshufd \$0x00,$xb3,$xb0
2624 vpshufd \$0x55,$xb3,$xb1
2625 vpshufd \$0xaa,$xb3,$xb2
2626 vpshufd \$0xff,$xb3,$xb3
2627 vmovdqa64 $xb0,@key[4]
2628 vmovdqa64 $xb1,@key[5]
2629 vmovdqa64 $xb2,@key[6]
2630 vmovdqa64 $xb3,@key[7]
2632 vpshufd \$0x00,$xc3,$xc0
2633 vpshufd \$0x55,$xc3,$xc1
2634 vpshufd \$0xaa,$xc3,$xc2
2635 vpshufd \$0xff,$xc3,$xc3
2636 vmovdqa64 $xc0,@key[8]
2637 vmovdqa64 $xc1,@key[9]
2638 vmovdqa64 $xc2,@key[10]
2639 vmovdqa64 $xc3,@key[11]
2641 vpshufd \$0x00,$xd3,$xd0
2642 vpshufd \$0x55,$xd3,$xd1
2643 vpshufd \$0xaa,$xd3,$xd2
2644 vpshufd \$0xff,$xd3,$xd3
2645 vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
2646 vmovdqa64 $xd0,@key[12]
2647 vmovdqa64 $xd1,@key[13]
2648 vmovdqa64 $xd2,@key[14]
2649 vmovdqa64 $xd3,@key[15]
2656 vpbroadcastd 0(%r10),$xa0 # reload key
2657 vpbroadcastd 4(%r10),$xa1
2658 vpbroadcastd 8(%r10),$xa2
2659 vpbroadcastd 12(%r10),$xa3
2660 vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
2661 vmovdqa64 @key[4],$xb0
2662 vmovdqa64 @key[5],$xb1
2663 vmovdqa64 @key[6],$xb2
2664 vmovdqa64 @key[7],$xb3
2665 vmovdqa64 @key[8],$xc0
2666 vmovdqa64 @key[9],$xc1
2667 vmovdqa64 @key[10],$xc2
2668 vmovdqa64 @key[11],$xc3
2669 vmovdqa64 @key[12],$xd0
2670 vmovdqa64 @key[13],$xd1
2671 vmovdqa64 @key[14],$xd2
2672 vmovdqa64 @key[15],$xd3
2674 vmovdqa64 $xa0,@key[0]
2675 vmovdqa64 $xa1,@key[1]
2676 vmovdqa64 $xa2,@key[2]
2677 vmovdqa64 $xa3,@key[3]
2685 foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2686 foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2691 vpaddd @key[0],$xa0,$xa0 # accumulate key
2692 vpaddd @key[1],$xa1,$xa1
2693 vpaddd @key[2],$xa2,$xa2
2694 vpaddd @key[3],$xa3,$xa3
2696 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
2697 vpunpckldq $xa3,$xa2,$xt3
2698 vpunpckhdq $xa1,$xa0,$xa0
2699 vpunpckhdq $xa3,$xa2,$xa2
2700 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
2701 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
2702 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
2703 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
2705 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2707 vpaddd @key[4],$xb0,$xb0
2708 vpaddd @key[5],$xb1,$xb1
2709 vpaddd @key[6],$xb2,$xb2
2710 vpaddd @key[7],$xb3,$xb3
2712 vpunpckldq $xb1,$xb0,$xt2
2713 vpunpckldq $xb3,$xb2,$xt3
2714 vpunpckhdq $xb1,$xb0,$xb0
2715 vpunpckhdq $xb3,$xb2,$xb2
2716 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
2717 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
2718 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
2719 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
2721 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2723 vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
2724 vshufi32x4 \$0xee,$xb0,$xa0,$xb0
2725 vshufi32x4 \$0x44,$xb1,$xa1,$xa0
2726 vshufi32x4 \$0xee,$xb1,$xa1,$xb1
2727 vshufi32x4 \$0x44,$xb2,$xa2,$xa1
2728 vshufi32x4 \$0xee,$xb2,$xa2,$xb2
2729 vshufi32x4 \$0x44,$xb3,$xa3,$xa2
2730 vshufi32x4 \$0xee,$xb3,$xa3,$xb3
2732 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2734 vpaddd @key[8],$xc0,$xc0
2735 vpaddd @key[9],$xc1,$xc1
2736 vpaddd @key[10],$xc2,$xc2
2737 vpaddd @key[11],$xc3,$xc3
2739 vpunpckldq $xc1,$xc0,$xt2
2740 vpunpckldq $xc3,$xc2,$xt3
2741 vpunpckhdq $xc1,$xc0,$xc0
2742 vpunpckhdq $xc3,$xc2,$xc2
2743 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
2744 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
2745 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
2746 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
2748 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2750 vpaddd @key[12],$xd0,$xd0
2751 vpaddd @key[13],$xd1,$xd1
2752 vpaddd @key[14],$xd2,$xd2
2753 vpaddd @key[15],$xd3,$xd3
2755 vpunpckldq $xd1,$xd0,$xt2
2756 vpunpckldq $xd3,$xd2,$xt3
2757 vpunpckhdq $xd1,$xd0,$xd0
2758 vpunpckhdq $xd3,$xd2,$xd2
2759 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
2760 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
2761 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
2762 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
2764 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2766 vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
2767 vshufi32x4 \$0xee,$xd0,$xc0,$xd0
2768 vshufi32x4 \$0x44,$xd1,$xc1,$xc0
2769 vshufi32x4 \$0xee,$xd1,$xc1,$xd1
2770 vshufi32x4 \$0x44,$xd2,$xc2,$xc1
2771 vshufi32x4 \$0xee,$xd2,$xc2,$xd2
2772 vshufi32x4 \$0x44,$xd3,$xc3,$xc2
2773 vshufi32x4 \$0xee,$xd3,$xc3,$xd3
2775 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2777 vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
2778 vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
2779 vshufi32x4 \$0x88,$xd0,$xb0,$xc0
2780 vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
2781 vshufi32x4 \$0x88,$xc1,$xa1,$xt1
2782 vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
2783 vshufi32x4 \$0x88,$xd1,$xb1,$xc1
2784 vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
2785 vshufi32x4 \$0x88,$xc2,$xa2,$xt2
2786 vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
2787 vshufi32x4 \$0x88,$xd2,$xb2,$xc2
2788 vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
2789 vshufi32x4 \$0x88,$xc3,$xa3,$xt3
2790 vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
2791 vshufi32x4 \$0x88,$xd3,$xb3,$xc3
2792 vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
2794 ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2795 ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2797 ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2798 $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2799 ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2800 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2805 vpxord 0x00($inp),$xa0,$xa0 # xor with input
2806 vpxord 0x40($inp),$xb0,$xb0
2807 vpxord 0x80($inp),$xc0,$xc0
2808 vpxord 0xc0($inp),$xd0,$xd0
2809 vmovdqu32 $xa0,0x00($out)
2810 vmovdqu32 $xb0,0x40($out)
2811 vmovdqu32 $xc0,0x80($out)
2812 vmovdqu32 $xd0,0xc0($out)
2814 vpxord 0x100($inp),$xa1,$xa1
2815 vpxord 0x140($inp),$xb1,$xb1
2816 vpxord 0x180($inp),$xc1,$xc1
2817 vpxord 0x1c0($inp),$xd1,$xd1
2818 vmovdqu32 $xa1,0x100($out)
2819 vmovdqu32 $xb1,0x140($out)
2820 vmovdqu32 $xc1,0x180($out)
2821 vmovdqu32 $xd1,0x1c0($out)
2823 vpxord 0x200($inp),$xa2,$xa2
2824 vpxord 0x240($inp),$xb2,$xb2
2825 vpxord 0x280($inp),$xc2,$xc2
2826 vpxord 0x2c0($inp),$xd2,$xd2
2827 vmovdqu32 $xa2,0x200($out)
2828 vmovdqu32 $xb2,0x240($out)
2829 vmovdqu32 $xc2,0x280($out)
2830 vmovdqu32 $xd2,0x2c0($out)
2832 vpxord 0x300($inp),$xa3,$xa3
2833 vpxord 0x340($inp),$xb3,$xb3
2834 vpxord 0x380($inp),$xc3,$xc3
2835 vpxord 0x3c0($inp),$xd3,$xd3
2836 lea 0x400($inp),$inp
2837 vmovdqu32 $xa3,0x300($out)
2838 vmovdqu32 $xb3,0x340($out)
2839 vmovdqu32 $xc3,0x380($out)
2840 vmovdqu32 $xd3,0x3c0($out)
2841 lea 0x400($out),$out
2853 jb .Less_than_64_16x
2854 vpxord ($inp),$xa0,$xa0 # xor with input
2855 vmovdqu32 $xa0,($out,$inp)
2861 jb .Less_than_64_16x
2862 vpxord ($inp),$xb0,$xb0
2863 vmovdqu32 $xb0,($out,$inp)
2869 jb .Less_than_64_16x
2870 vpxord ($inp),$xc0,$xc0
2871 vmovdqu32 $xc0,($out,$inp)
2877 jb .Less_than_64_16x
2878 vpxord ($inp),$xd0,$xd0
2879 vmovdqu32 $xd0,($out,$inp)
2885 jb .Less_than_64_16x
2886 vpxord ($inp),$xa1,$xa1
2887 vmovdqu32 $xa1,($out,$inp)
2893 jb .Less_than_64_16x
2894 vpxord ($inp),$xb1,$xb1
2895 vmovdqu32 $xb1,($out,$inp)
2901 jb .Less_than_64_16x
2902 vpxord ($inp),$xc1,$xc1
2903 vmovdqu32 $xc1,($out,$inp)
2909 jb .Less_than_64_16x
2910 vpxord ($inp),$xd1,$xd1
2911 vmovdqu32 $xd1,($out,$inp)
2917 jb .Less_than_64_16x
2918 vpxord ($inp),$xa2,$xa2
2919 vmovdqu32 $xa2,($out,$inp)
2925 jb .Less_than_64_16x
2926 vpxord ($inp),$xb2,$xb2
2927 vmovdqu32 $xb2,($out,$inp)
2933 jb .Less_than_64_16x
2934 vpxord ($inp),$xc2,$xc2
2935 vmovdqu32 $xc2,($out,$inp)
2941 jb .Less_than_64_16x
2942 vpxord ($inp),$xd2,$xd2
2943 vmovdqu32 $xd2,($out,$inp)
2949 jb .Less_than_64_16x
2950 vpxord ($inp),$xa3,$xa3
2951 vmovdqu32 $xa3,($out,$inp)
2957 jb .Less_than_64_16x
2958 vpxord ($inp),$xb3,$xb3
2959 vmovdqu32 $xb3,($out,$inp)
2965 jb .Less_than_64_16x
2966 vpxord ($inp),$xc3,$xc3
2967 vmovdqu32 $xc3,($out,$inp)
2973 vmovdqa32 $xa0,0x00(%rsp)
2974 lea ($out,$inp),$out
2978 movzb ($inp,%r10),%eax
2979 movzb (%rsp,%r10),%ecx
2982 mov %al,-1($out,%r10)
2986 vpxord $xa0,$xa0,$xa0
2987 vmovdqa32 $xa0,0(%rsp)
2992 $code.=<<___ if ($win64);
2993 movaps -0xa8(%r9),%xmm6
2994 movaps -0x98(%r9),%xmm7
2995 movaps -0x88(%r9),%xmm8
2996 movaps -0x78(%r9),%xmm9
2997 movaps -0x68(%r9),%xmm10
2998 movaps -0x58(%r9),%xmm11
2999 movaps -0x48(%r9),%xmm12
3000 movaps -0x38(%r9),%xmm13
3001 movaps -0x28(%r9),%xmm14
3002 movaps -0x18(%r9),%xmm15
3006 .cfi_def_cfa_register %rsp
3010 .size ChaCha20_16x,.-ChaCha20_16x
3014 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3015 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3023 .extern __imp_RtlVirtualUnwind
3024 .type se_handler,\@abi-omnipotent
3038 mov 120($context),%rax # pull context->Rax
3039 mov 248($context),%rbx # pull context->Rip
3041 mov 8($disp),%rsi # disp->ImageBase
3042 mov 56($disp),%r11 # disp->HandlerData
3044 lea .Lctr32_body(%rip),%r10
3045 cmp %r10,%rbx # context->Rip<.Lprologue
3046 jb .Lcommon_seh_tail
3048 mov 152($context),%rax # pull context->Rsp
3050 lea .Lno_data(%rip),%r10 # epilogue label
3051 cmp %r10,%rbx # context->Rip>=.Lepilogue
3052 jae .Lcommon_seh_tail
3054 lea 64+24+48(%rax),%rax
3062 mov %rbx,144($context) # restore context->Rbx
3063 mov %rbp,160($context) # restore context->Rbp
3064 mov %r12,216($context) # restore context->R12
3065 mov %r13,224($context) # restore context->R13
3066 mov %r14,232($context) # restore context->R14
3067 mov %r15,240($context) # restore context->R14
3072 mov %rax,152($context) # restore context->Rsp
3073 mov %rsi,168($context) # restore context->Rsi
3074 mov %rdi,176($context) # restore context->Rdi
3076 mov 40($disp),%rdi # disp->ContextRecord
3077 mov $context,%rsi # context
3078 mov \$154,%ecx # sizeof(CONTEXT)
3079 .long 0xa548f3fc # cld; rep movsq
3082 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3083 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3084 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3085 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3086 mov 40(%rsi),%r10 # disp->ContextRecord
3087 lea 56(%rsi),%r11 # &disp->HandlerData
3088 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3089 mov %r10,32(%rsp) # arg5
3090 mov %r11,40(%rsp) # arg6
3091 mov %r12,48(%rsp) # arg7
3092 mov %rcx,56(%rsp) # arg8, (NULL)
3093 call *__imp_RtlVirtualUnwind(%rip)
3095 mov \$1,%eax # ExceptionContinueSearch
3107 .size se_handler,.-se_handler
3109 .type ssse3_handler,\@abi-omnipotent
3123 mov 120($context),%rax # pull context->Rax
3124 mov 248($context),%rbx # pull context->Rip
3126 mov 8($disp),%rsi # disp->ImageBase
3127 mov 56($disp),%r11 # disp->HandlerData
3129 mov 0(%r11),%r10d # HandlerData[0]
3130 lea (%rsi,%r10),%r10 # prologue label
3131 cmp %r10,%rbx # context->Rip<prologue label
3132 jb .Lcommon_seh_tail
3134 mov 192($context),%rax # pull context->R9
3136 mov 4(%r11),%r10d # HandlerData[1]
3137 lea (%rsi,%r10),%r10 # epilogue label
3138 cmp %r10,%rbx # context->Rip>=epilogue label
3139 jae .Lcommon_seh_tail
3141 lea -0x28(%rax),%rsi
3142 lea 512($context),%rdi # &context.Xmm6
3144 .long 0xa548f3fc # cld; rep movsq
3146 jmp .Lcommon_seh_tail
3147 .size ssse3_handler,.-ssse3_handler
3149 .type full_handler,\@abi-omnipotent
3163 mov 120($context),%rax # pull context->Rax
3164 mov 248($context),%rbx # pull context->Rip
3166 mov 8($disp),%rsi # disp->ImageBase
3167 mov 56($disp),%r11 # disp->HandlerData
3169 mov 0(%r11),%r10d # HandlerData[0]
3170 lea (%rsi,%r10),%r10 # prologue label
3171 cmp %r10,%rbx # context->Rip<prologue label
3172 jb .Lcommon_seh_tail
3174 mov 192($context),%rax # pull context->R9
3176 mov 4(%r11),%r10d # HandlerData[1]
3177 lea (%rsi,%r10),%r10 # epilogue label
3178 cmp %r10,%rbx # context->Rip>=epilogue label
3179 jae .Lcommon_seh_tail
3181 lea -0xa8(%rax),%rsi
3182 lea 512($context),%rdi # &context.Xmm6
3184 .long 0xa548f3fc # cld; rep movsq
3186 jmp .Lcommon_seh_tail
3187 .size full_handler,.-full_handler
3191 .rva .LSEH_begin_ChaCha20_ctr32
3192 .rva .LSEH_end_ChaCha20_ctr32
3193 .rva .LSEH_info_ChaCha20_ctr32
3195 .rva .LSEH_begin_ChaCha20_ssse3
3196 .rva .LSEH_end_ChaCha20_ssse3
3197 .rva .LSEH_info_ChaCha20_ssse3
3199 .rva .LSEH_begin_ChaCha20_4x
3200 .rva .LSEH_end_ChaCha20_4x
3201 .rva .LSEH_info_ChaCha20_4x
3203 $code.=<<___ if ($avx);
3204 .rva .LSEH_begin_ChaCha20_4xop
3205 .rva .LSEH_end_ChaCha20_4xop
3206 .rva .LSEH_info_ChaCha20_4xop
3208 $code.=<<___ if ($avx>1);
3209 .rva .LSEH_begin_ChaCha20_8x
3210 .rva .LSEH_end_ChaCha20_8x
3211 .rva .LSEH_info_ChaCha20_8x
3213 $code.=<<___ if ($avx>2);
3214 .rva .LSEH_begin_ChaCha20_avx512
3215 .rva .LSEH_end_ChaCha20_avx512
3216 .rva .LSEH_info_ChaCha20_avx512
3218 .rva .LSEH_begin_ChaCha20_16x
3219 .rva .LSEH_end_ChaCha20_16x
3220 .rva .LSEH_info_ChaCha20_16x
3225 .LSEH_info_ChaCha20_ctr32:
3229 .LSEH_info_ChaCha20_ssse3:
3232 .rva .Lssse3_body,.Lssse3_epilogue
3234 .LSEH_info_ChaCha20_4x:
3237 .rva .L4x_body,.L4x_epilogue
3239 $code.=<<___ if ($avx);
3240 .LSEH_info_ChaCha20_4xop:
3243 .rva .L4xop_body,.L4xop_epilogue # HandlerData[]
3245 $code.=<<___ if ($avx>1);
3246 .LSEH_info_ChaCha20_8x:
3249 .rva .L8x_body,.L8x_epilogue # HandlerData[]
3251 $code.=<<___ if ($avx>2);
3252 .LSEH_info_ChaCha20_avx512:
3255 .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[]
3257 .LSEH_info_ChaCha20_16x:
3260 .rva .L16x_body,.L16x_epilogue # HandlerData[]
3264 foreach (split("\n",$code)) {
3265 s/\`([^\`]*)\`/eval $1/ge;
3267 s/%x#%[yz]/%x/g; # "down-shift"