2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # ChaCha20 for x86_64.
23 # Add AVX512F code path.
25 # Performance in cycles per byte out of large buffer.
27 # IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 NxAVX(v)
29 # P4 9.48/+99% -/22.7(ii) -
30 # Core2 7.83/+55% 7.90/8.08 4.35
31 # Westmere 7.19/+50% 5.60/6.70 3.00
32 # Sandy Bridge 8.31/+42% 5.45/6.76 2.72
33 # Ivy Bridge 6.71/+46% 5.40/6.49 2.41
34 # Haswell 5.92/+43% 5.20/6.45 2.42 1.23
35 # Skylake[-X] 5.87/+39% 4.70/- 2.31 1.19[0.57]
36 # Silvermont 12.0/+33% 7.75/7.40 7.03(iii)
37 # Knights L 11.7/- - 9.60(iii) 0.80
38 # Goldmont 10.6/+17% 5.10/- 3.28
39 # Sledgehammer 7.28/+52% -/14.2(ii) -
40 # Bulldozer 9.66/+28% 9.85/11.1 3.06(iv)
41 # Ryzen 5.96/+50% 5.19/- 2.40 2.09
42 # VIA Nano 10.5/+46% 6.72/8.60 6.05
44 # (i) compared to older gcc 3.x one can observe >2x improvement on
46 # (ii) as it can be seen, SSE2 performance is too low on legacy
47 # processors; NxSSE2 results are naturally better, but not
48 # impressively better than IALU ones, which is why you won't
49 # find SSE2 code below;
50 # (iii) this is not optimal result for Atom because of MSROM
51 # limitations, SSE2 can do better, but gain is considered too
52 # low to justify the [maintenance] effort;
53 # (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20;
54 # (v) 8xAVX2 or 16xAVX-512, whichever best applicable;
58 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
60 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
62 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
63 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
64 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
65 die "can't locate x86_64-xlate.pl";
67 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
68 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
69 $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25);
72 if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
73 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) {
74 $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12);
75 $avx += 1 if ($1==2.11 && $2>=8);
78 if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
79 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
80 $avx = ($1>=10) + ($1>=11);
83 if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) {
84 $avx = ($2>=3.0) + ($2>3.0);
87 open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
90 # input parameter block
91 ($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8");
96 .extern OPENSSL_ia32cap_P
108 .long 0,2,4,6,1,3,5,7
110 .long 8,8,8,8,8,8,8,8
112 .byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd
114 .byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe
116 .asciz "expand 32-byte k"
119 .long 0,0,0,0, 1,0,0,0, 2,0,0,0, 3,0,0,0
121 .long 4,0,0,0, 4,0,0,0, 4,0,0,0, 4,0,0,0
123 .long 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
125 .long 16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16
126 .asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
129 sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm
130 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://;
132 $arg = "\$$arg" if ($arg*1 eq $arg);
133 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n";
136 @x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)),
137 "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15)));
140 sub ROUND { # critical path is 24 cycles per round
141 my ($a0,$b0,$c0,$d0)=@_;
142 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
143 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
144 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
145 my ($xc,$xc_)=map("\"$_\"",@t);
146 my @x=map("\"$_\"",@x);
148 # Consider order in which variables are addressed by their
153 # 0 4 8 12 < even round
157 # 0 5 10 15 < odd round
162 # 'a', 'b' and 'd's are permanently allocated in registers,
163 # @x[0..7,12..15], while 'c's are maintained in memory. If
164 # you observe 'c' column, you'll notice that pair of 'c's is
165 # invariant between rounds. This means that we have to reload
166 # them once per round, in the middle. This is why you'll see
167 # bunch of 'c' stores and loads in the middle, but none in
168 # the beginning or end.
170 # Normally instructions would be interleaved to favour in-order
171 # execution. Generally out-of-order cores manage it gracefully,
172 # but not this time for some reason. As in-order execution
173 # cores are dying breed, old Atom is the only one around,
174 # instructions are left uninterleaved. Besides, Atom is better
175 # off executing 1xSSSE3 code anyway...
178 "&add (@x[$a0],@x[$b0])", # Q1
179 "&xor (@x[$d0],@x[$a0])",
181 "&add (@x[$a1],@x[$b1])", # Q2
182 "&xor (@x[$d1],@x[$a1])",
185 "&add ($xc,@x[$d0])",
186 "&xor (@x[$b0],$xc)",
188 "&add ($xc_,@x[$d1])",
189 "&xor (@x[$b1],$xc_)",
192 "&add (@x[$a0],@x[$b0])",
193 "&xor (@x[$d0],@x[$a0])",
195 "&add (@x[$a1],@x[$b1])",
196 "&xor (@x[$d1],@x[$a1])",
199 "&add ($xc,@x[$d0])",
200 "&xor (@x[$b0],$xc)",
202 "&add ($xc_,@x[$d1])",
203 "&xor (@x[$b1],$xc_)",
206 "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's
207 "&mov (\"4*$c1(%rsp)\",$xc_)",
208 "&mov ($xc,\"4*$c2(%rsp)\")",
209 "&mov ($xc_,\"4*$c3(%rsp)\")",
211 "&add (@x[$a2],@x[$b2])", # Q3
212 "&xor (@x[$d2],@x[$a2])",
214 "&add (@x[$a3],@x[$b3])", # Q4
215 "&xor (@x[$d3],@x[$a3])",
218 "&add ($xc,@x[$d2])",
219 "&xor (@x[$b2],$xc)",
221 "&add ($xc_,@x[$d3])",
222 "&xor (@x[$b3],$xc_)",
225 "&add (@x[$a2],@x[$b2])",
226 "&xor (@x[$d2],@x[$a2])",
228 "&add (@x[$a3],@x[$b3])",
229 "&xor (@x[$d3],@x[$a3])",
232 "&add ($xc,@x[$d2])",
233 "&xor (@x[$b2],$xc)",
235 "&add ($xc_,@x[$d3])",
236 "&xor (@x[$b3],$xc_)",
241 ########################################################################
242 # Generic code path that handles all lengths on pre-SSSE3 processors.
244 .globl ChaCha20_ctr32
245 .type ChaCha20_ctr32,\@function,5
251 mov OPENSSL_ia32cap_P+4(%rip),%r10
253 $code.=<<___ if ($avx>2);
254 bt \$48,%r10 # check for AVX512F
258 test \$`1<<(41-32)`,%r10d
274 .cfi_adjust_cfa_offset 64+24
277 #movdqa .Lsigma(%rip),%xmm0
279 movdqu 16($key),%xmm2
280 movdqu ($counter),%xmm3
281 movdqa .Lone(%rip),%xmm4
283 #movdqa %xmm0,4*0(%rsp) # key[0]
284 movdqa %xmm1,4*4(%rsp) # key[1]
285 movdqa %xmm2,4*8(%rsp) # key[2]
286 movdqa %xmm3,4*12(%rsp) # key[3]
287 mov $len,%rbp # reassign $len
292 mov \$0x61707865,@x[0] # 'expa'
293 mov \$0x3320646e,@x[1] # 'nd 3'
294 mov \$0x79622d32,@x[2] # '2-by'
295 mov \$0x6b206574,@x[3] # 'te k'
301 mov 4*13(%rsp),@x[13]
302 mov 4*14(%rsp),@x[14]
303 mov 4*15(%rsp),@x[15]
305 mov %rbp,64+0(%rsp) # save len
307 mov $inp,64+8(%rsp) # save inp
308 movq %xmm2,%rsi # "@x[8]"
309 mov $out,64+16(%rsp) # save out
311 shr \$32,%rdi # "@x[9]"
317 foreach (&ROUND (0, 4, 8,12)) { eval; }
318 foreach (&ROUND (0, 5,10,15)) { eval; }
323 mov @t[1],4*9(%rsp) # modulo-scheduled
325 mov 64(%rsp),%rbp # load len
327 mov 64+8(%rsp),$inp # load inp
328 paddd %xmm4,%xmm3 # increment counter
329 mov 64+16(%rsp),$out # load out
331 add \$0x61707865,@x[0] # 'expa'
332 add \$0x3320646e,@x[1] # 'nd 3'
333 add \$0x79622d32,@x[2] # '2-by'
334 add \$0x6b206574,@x[3] # 'te k'
339 add 4*12(%rsp),@x[12]
340 add 4*13(%rsp),@x[13]
341 add 4*14(%rsp),@x[14]
342 add 4*15(%rsp),@x[15]
343 paddd 4*8(%rsp),%xmm1
348 xor 4*0($inp),@x[0] # xor with input
356 movdqu 4*8($inp),%xmm0
357 xor 4*12($inp),@x[12]
358 xor 4*13($inp),@x[13]
359 xor 4*14($inp),@x[14]
360 xor 4*15($inp),@x[15]
361 lea 4*16($inp),$inp # inp+=64
364 movdqa %xmm2,4*8(%rsp)
365 movd %xmm3,4*12(%rsp)
367 mov @x[0],4*0($out) # write output
375 movdqu %xmm0,4*8($out)
376 mov @x[12],4*12($out)
377 mov @x[13],4*13($out)
378 mov @x[14],4*14($out)
379 mov @x[15],4*15($out)
380 lea 4*16($out),$out # out+=64
398 movdqa %xmm1,4*8(%rsp)
399 mov @x[12],4*12(%rsp)
400 mov @x[13],4*13(%rsp)
401 mov @x[14],4*14(%rsp)
402 mov @x[15],4*15(%rsp)
405 movzb ($inp,%rbx),%eax
406 movzb (%rsp,%rbx),%edx
409 mov %al,-1($out,%rbx)
414 lea 64+24+48(%rsp),%rsi
429 .cfi_def_cfa_register %rsp
433 .size ChaCha20_ctr32,.-ChaCha20_ctr32
436 ########################################################################
437 # SSSE3 code path that handles shorter lengths
439 my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7));
441 sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round
465 my $xframe = $win64 ? 32+8 : 8;
468 .type ChaCha20_ssse3,\@function,5
473 mov %rsp,%r9 # frame pointer
474 .cfi_def_cfa_register %r9
476 $code.=<<___ if ($avx);
477 test \$`1<<(43-32)`,%r10d
478 jnz .LChaCha20_4xop # XOP is fastest even if we use 1/4
481 cmp \$128,$len # we might throw away some data,
482 ja .LChaCha20_4x # but overall it won't be slower
485 sub \$64+$xframe,%rsp
487 $code.=<<___ if ($win64);
488 movaps %xmm6,-0x28(%r9)
489 movaps %xmm7,-0x18(%r9)
493 movdqa .Lsigma(%rip),$a
497 movdqa .Lrot16(%rip),$rot16
498 movdqa .Lrot24(%rip),$rot24
504 mov \$10,$counter # reuse $counter
509 movdqa .Lone(%rip),$d
522 &pshufd ($c,$c,0b01001110);
523 &pshufd ($b,$b,0b00111001);
524 &pshufd ($d,$d,0b10010011);
528 &pshufd ($c,$c,0b01001110);
529 &pshufd ($b,$b,0b10010011);
530 &pshufd ($d,$d,0b00111001);
533 &jnz (".Loop_ssse3");
545 movdqu 0x10($inp),$t1
546 pxor $t,$a # xor with input
549 movdqu 0x30($inp),$t1
550 lea 0x40($inp),$inp # inp+=64
554 movdqu $a,0x00($out) # write output
558 lea 0x40($out),$out # out+=64
561 jnz .Loop_outer_ssse3
571 xor $counter,$counter
574 movzb ($inp,$counter),%eax
575 movzb (%rsp,$counter),%ecx
576 lea 1($counter),$counter
578 mov %al,-1($out,$counter)
584 $code.=<<___ if ($win64);
585 movaps -0x28(%r9),%xmm6
586 movaps -0x18(%r9),%xmm7
590 .cfi_def_cfa_register %rsp
594 .size ChaCha20_ssse3,.-ChaCha20_ssse3
598 ########################################################################
599 # SSSE3 code path that handles longer messages.
601 # assign variables to favor Atom front-end
602 my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3,
603 $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15));
604 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
605 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
607 sub SSSE3_lane_ROUND {
608 my ($a0,$b0,$c0,$d0)=@_;
609 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
610 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
611 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
612 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
613 my @x=map("\"$_\"",@xx);
615 # Consider order in which variables are addressed by their
620 # 0 4 8 12 < even round
624 # 0 5 10 15 < odd round
629 # 'a', 'b' and 'd's are permanently allocated in registers,
630 # @x[0..7,12..15], while 'c's are maintained in memory. If
631 # you observe 'c' column, you'll notice that pair of 'c's is
632 # invariant between rounds. This means that we have to reload
633 # them once per round, in the middle. This is why you'll see
634 # bunch of 'c' stores and loads in the middle, but none in
635 # the beginning or end.
638 "&paddd (@x[$a0],@x[$b0])", # Q1
639 "&paddd (@x[$a1],@x[$b1])", # Q2
640 "&pxor (@x[$d0],@x[$a0])",
641 "&pxor (@x[$d1],@x[$a1])",
642 "&pshufb (@x[$d0],$t1)",
643 "&pshufb (@x[$d1],$t1)",
645 "&paddd ($xc,@x[$d0])",
646 "&paddd ($xc_,@x[$d1])",
647 "&pxor (@x[$b0],$xc)",
648 "&pxor (@x[$b1],$xc_)",
649 "&movdqa ($t0,@x[$b0])",
650 "&pslld (@x[$b0],12)",
652 "&movdqa ($t1,@x[$b1])",
653 "&pslld (@x[$b1],12)",
654 "&por (@x[$b0],$t0)",
656 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
657 "&por (@x[$b1],$t1)",
659 "&paddd (@x[$a0],@x[$b0])",
660 "&paddd (@x[$a1],@x[$b1])",
661 "&pxor (@x[$d0],@x[$a0])",
662 "&pxor (@x[$d1],@x[$a1])",
663 "&pshufb (@x[$d0],$t0)",
664 "&pshufb (@x[$d1],$t0)",
666 "&paddd ($xc,@x[$d0])",
667 "&paddd ($xc_,@x[$d1])",
668 "&pxor (@x[$b0],$xc)",
669 "&pxor (@x[$b1],$xc_)",
670 "&movdqa ($t1,@x[$b0])",
671 "&pslld (@x[$b0],7)",
673 "&movdqa ($t0,@x[$b1])",
674 "&pslld (@x[$b1],7)",
675 "&por (@x[$b0],$t1)",
677 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
678 "&por (@x[$b1],$t0)",
680 "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
681 "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)",
682 "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")",
683 "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")",
685 "&paddd (@x[$a2],@x[$b2])", # Q3
686 "&paddd (@x[$a3],@x[$b3])", # Q4
687 "&pxor (@x[$d2],@x[$a2])",
688 "&pxor (@x[$d3],@x[$a3])",
689 "&pshufb (@x[$d2],$t1)",
690 "&pshufb (@x[$d3],$t1)",
692 "&paddd ($xc,@x[$d2])",
693 "&paddd ($xc_,@x[$d3])",
694 "&pxor (@x[$b2],$xc)",
695 "&pxor (@x[$b3],$xc_)",
696 "&movdqa ($t0,@x[$b2])",
697 "&pslld (@x[$b2],12)",
699 "&movdqa ($t1,@x[$b3])",
700 "&pslld (@x[$b3],12)",
701 "&por (@x[$b2],$t0)",
703 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip)
704 "&por (@x[$b3],$t1)",
706 "&paddd (@x[$a2],@x[$b2])",
707 "&paddd (@x[$a3],@x[$b3])",
708 "&pxor (@x[$d2],@x[$a2])",
709 "&pxor (@x[$d3],@x[$a3])",
710 "&pshufb (@x[$d2],$t0)",
711 "&pshufb (@x[$d3],$t0)",
713 "&paddd ($xc,@x[$d2])",
714 "&paddd ($xc_,@x[$d3])",
715 "&pxor (@x[$b2],$xc)",
716 "&pxor (@x[$b3],$xc_)",
717 "&movdqa ($t1,@x[$b2])",
718 "&pslld (@x[$b2],7)",
720 "&movdqa ($t0,@x[$b3])",
721 "&pslld (@x[$b3],7)",
722 "&por (@x[$b2],$t1)",
724 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip)
729 my $xframe = $win64 ? 0xa8 : 8;
732 .type ChaCha20_4x,\@function,5
737 mov %rsp,%r9 # frame pointer
738 .cfi_def_cfa_register %r9
741 $code.=<<___ if ($avx>1);
742 shr \$32,%r10 # OPENSSL_ia32cap_P+8
743 test \$`1<<5`,%r10 # test AVX2
750 and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE
751 cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE
752 je .Ldo_sse3_after_all # to detect Atom
755 sub \$0x140+$xframe,%rsp
757 ################ stack layout
758 # +0x00 SIMD equivalent of @x[8-12]
760 # +0x40 constant copy of key[0-2] smashed by lanes
762 # +0x100 SIMD counters (with nonce smashed by lanes)
765 $code.=<<___ if ($win64);
766 movaps %xmm6,-0xa8(%r9)
767 movaps %xmm7,-0x98(%r9)
768 movaps %xmm8,-0x88(%r9)
769 movaps %xmm9,-0x78(%r9)
770 movaps %xmm10,-0x68(%r9)
771 movaps %xmm11,-0x58(%r9)
772 movaps %xmm12,-0x48(%r9)
773 movaps %xmm13,-0x38(%r9)
774 movaps %xmm14,-0x28(%r9)
775 movaps %xmm15,-0x18(%r9)
779 movdqa .Lsigma(%rip),$xa3 # key[0]
780 movdqu ($key),$xb3 # key[1]
781 movdqu 16($key),$xt3 # key[2]
782 movdqu ($counter),$xd3 # key[3]
783 lea 0x100(%rsp),%rcx # size optimization
784 lea .Lrot16(%rip),%r10
785 lea .Lrot24(%rip),%r11
787 pshufd \$0x00,$xa3,$xa0 # smash key by lanes...
788 pshufd \$0x55,$xa3,$xa1
789 movdqa $xa0,0x40(%rsp) # ... and offload
790 pshufd \$0xaa,$xa3,$xa2
791 movdqa $xa1,0x50(%rsp)
792 pshufd \$0xff,$xa3,$xa3
793 movdqa $xa2,0x60(%rsp)
794 movdqa $xa3,0x70(%rsp)
796 pshufd \$0x00,$xb3,$xb0
797 pshufd \$0x55,$xb3,$xb1
798 movdqa $xb0,0x80-0x100(%rcx)
799 pshufd \$0xaa,$xb3,$xb2
800 movdqa $xb1,0x90-0x100(%rcx)
801 pshufd \$0xff,$xb3,$xb3
802 movdqa $xb2,0xa0-0x100(%rcx)
803 movdqa $xb3,0xb0-0x100(%rcx)
805 pshufd \$0x00,$xt3,$xt0 # "$xc0"
806 pshufd \$0x55,$xt3,$xt1 # "$xc1"
807 movdqa $xt0,0xc0-0x100(%rcx)
808 pshufd \$0xaa,$xt3,$xt2 # "$xc2"
809 movdqa $xt1,0xd0-0x100(%rcx)
810 pshufd \$0xff,$xt3,$xt3 # "$xc3"
811 movdqa $xt2,0xe0-0x100(%rcx)
812 movdqa $xt3,0xf0-0x100(%rcx)
814 pshufd \$0x00,$xd3,$xd0
815 pshufd \$0x55,$xd3,$xd1
816 paddd .Linc(%rip),$xd0 # don't save counters yet
817 pshufd \$0xaa,$xd3,$xd2
818 movdqa $xd1,0x110-0x100(%rcx)
819 pshufd \$0xff,$xd3,$xd3
820 movdqa $xd2,0x120-0x100(%rcx)
821 movdqa $xd3,0x130-0x100(%rcx)
827 movdqa 0x40(%rsp),$xa0 # re-load smashed key
828 movdqa 0x50(%rsp),$xa1
829 movdqa 0x60(%rsp),$xa2
830 movdqa 0x70(%rsp),$xa3
831 movdqa 0x80-0x100(%rcx),$xb0
832 movdqa 0x90-0x100(%rcx),$xb1
833 movdqa 0xa0-0x100(%rcx),$xb2
834 movdqa 0xb0-0x100(%rcx),$xb3
835 movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
836 movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
837 movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
838 movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
839 movdqa 0x100-0x100(%rcx),$xd0
840 movdqa 0x110-0x100(%rcx),$xd1
841 movdqa 0x120-0x100(%rcx),$xd2
842 movdqa 0x130-0x100(%rcx),$xd3
843 paddd .Lfour(%rip),$xd0 # next SIMD counters
846 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]"
847 movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]"
848 movdqa (%r10),$xt3 # .Lrot16(%rip)
850 movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
856 foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; }
857 foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; }
862 paddd 0x40(%rsp),$xa0 # accumulate key material
863 paddd 0x50(%rsp),$xa1
864 paddd 0x60(%rsp),$xa2
865 paddd 0x70(%rsp),$xa3
867 movdqa $xa0,$xt2 # "de-interlace" data
874 punpcklqdq $xa2,$xa0 # "a0"
876 punpcklqdq $xt3,$xt2 # "a2"
877 punpckhqdq $xa2,$xa1 # "a1"
878 punpckhqdq $xt3,$xa3 # "a3"
880 ($xa2,$xt2)=($xt2,$xa2);
882 paddd 0x80-0x100(%rcx),$xb0
883 paddd 0x90-0x100(%rcx),$xb1
884 paddd 0xa0-0x100(%rcx),$xb2
885 paddd 0xb0-0x100(%rcx),$xb3
887 movdqa $xa0,0x00(%rsp) # offload $xaN
888 movdqa $xa1,0x10(%rsp)
889 movdqa 0x20(%rsp),$xa0 # "xc2"
890 movdqa 0x30(%rsp),$xa1 # "xc3"
899 punpcklqdq $xb2,$xb0 # "b0"
901 punpcklqdq $xt3,$xt2 # "b2"
902 punpckhqdq $xb2,$xb1 # "b1"
903 punpckhqdq $xt3,$xb3 # "b3"
905 ($xb2,$xt2)=($xt2,$xb2);
906 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
908 paddd 0xc0-0x100(%rcx),$xc0
909 paddd 0xd0-0x100(%rcx),$xc1
910 paddd 0xe0-0x100(%rcx),$xc2
911 paddd 0xf0-0x100(%rcx),$xc3
913 movdqa $xa2,0x20(%rsp) # keep offloading $xaN
914 movdqa $xa3,0x30(%rsp)
923 punpcklqdq $xc2,$xc0 # "c0"
925 punpcklqdq $xt3,$xt2 # "c2"
926 punpckhqdq $xc2,$xc1 # "c1"
927 punpckhqdq $xt3,$xc3 # "c3"
929 ($xc2,$xt2)=($xt2,$xc2);
930 ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary
932 paddd 0x100-0x100(%rcx),$xd0
933 paddd 0x110-0x100(%rcx),$xd1
934 paddd 0x120-0x100(%rcx),$xd2
935 paddd 0x130-0x100(%rcx),$xd3
944 punpcklqdq $xd2,$xd0 # "d0"
946 punpcklqdq $xt3,$xt2 # "d2"
947 punpckhqdq $xd2,$xd1 # "d1"
948 punpckhqdq $xt3,$xd3 # "d3"
950 ($xd2,$xt2)=($xt2,$xd2);
955 movdqu 0x00($inp),$xt0 # xor with input
956 movdqu 0x10($inp),$xt1
957 movdqu 0x20($inp),$xt2
958 movdqu 0x30($inp),$xt3
959 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
964 movdqu $xt0,0x00($out)
965 movdqu 0x40($inp),$xt0
966 movdqu $xt1,0x10($out)
967 movdqu 0x50($inp),$xt1
968 movdqu $xt2,0x20($out)
969 movdqu 0x60($inp),$xt2
970 movdqu $xt3,0x30($out)
971 movdqu 0x70($inp),$xt3
972 lea 0x80($inp),$inp # size optimization
978 movdqu $xt0,0x40($out)
979 movdqu 0x00($inp),$xt0
980 movdqu $xt1,0x50($out)
981 movdqu 0x10($inp),$xt1
982 movdqu $xt2,0x60($out)
983 movdqu 0x20($inp),$xt2
984 movdqu $xt3,0x70($out)
985 lea 0x80($out),$out # size optimization
986 movdqu 0x30($inp),$xt3
992 movdqu $xt0,0x00($out)
993 movdqu 0x40($inp),$xt0
994 movdqu $xt1,0x10($out)
995 movdqu 0x50($inp),$xt1
996 movdqu $xt2,0x20($out)
997 movdqu 0x60($inp),$xt2
998 movdqu $xt3,0x30($out)
999 movdqu 0x70($inp),$xt3
1000 lea 0x80($inp),$inp # inp+=64*4
1001 pxor 0x30(%rsp),$xt0
1005 movdqu $xt0,0x40($out)
1006 movdqu $xt1,0x50($out)
1007 movdqu $xt2,0x60($out)
1008 movdqu $xt3,0x70($out)
1009 lea 0x80($out),$out # out+=64*4
1024 #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1026 #movdqa $xt0,0x00(%rsp)
1027 movdqa $xb0,0x10(%rsp)
1028 movdqa $xc0,0x20(%rsp)
1029 movdqa $xd0,0x30(%rsp)
1034 movdqu 0x00($inp),$xt0 # xor with input
1035 movdqu 0x10($inp),$xt1
1036 movdqu 0x20($inp),$xt2
1037 movdqu 0x30($inp),$xt3
1038 pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember?
1042 movdqu $xt0,0x00($out)
1043 movdqu $xt1,0x10($out)
1044 movdqu $xt2,0x20($out)
1045 movdqu $xt3,0x30($out)
1048 movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember?
1049 lea 0x40($inp),$inp # inp+=64*1
1051 movdqa $xt0,0x00(%rsp)
1052 movdqa $xb1,0x10(%rsp)
1053 lea 0x40($out),$out # out+=64*1
1054 movdqa $xc1,0x20(%rsp)
1055 sub \$64,$len # len-=64*1
1056 movdqa $xd1,0x30(%rsp)
1061 movdqu 0x00($inp),$xt0 # xor with input
1062 movdqu 0x10($inp),$xt1
1063 movdqu 0x20($inp),$xt2
1064 movdqu 0x30($inp),$xt3
1065 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1070 movdqu $xt0,0x00($out)
1071 movdqu 0x40($inp),$xt0
1072 movdqu $xt1,0x10($out)
1073 movdqu 0x50($inp),$xt1
1074 movdqu $xt2,0x20($out)
1075 movdqu 0x60($inp),$xt2
1076 movdqu $xt3,0x30($out)
1077 movdqu 0x70($inp),$xt3
1078 pxor 0x10(%rsp),$xt0
1082 movdqu $xt0,0x40($out)
1083 movdqu $xt1,0x50($out)
1084 movdqu $xt2,0x60($out)
1085 movdqu $xt3,0x70($out)
1088 movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember?
1089 lea 0x80($inp),$inp # inp+=64*2
1091 movdqa $xt0,0x00(%rsp)
1092 movdqa $xb2,0x10(%rsp)
1093 lea 0x80($out),$out # out+=64*2
1094 movdqa $xc2,0x20(%rsp)
1095 sub \$128,$len # len-=64*2
1096 movdqa $xd2,0x30(%rsp)
1101 movdqu 0x00($inp),$xt0 # xor with input
1102 movdqu 0x10($inp),$xt1
1103 movdqu 0x20($inp),$xt2
1104 movdqu 0x30($inp),$xt3
1105 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember?
1110 movdqu $xt0,0x00($out)
1111 movdqu 0x40($inp),$xt0
1112 movdqu $xt1,0x10($out)
1113 movdqu 0x50($inp),$xt1
1114 movdqu $xt2,0x20($out)
1115 movdqu 0x60($inp),$xt2
1116 movdqu $xt3,0x30($out)
1117 movdqu 0x70($inp),$xt3
1118 lea 0x80($inp),$inp # size optimization
1119 pxor 0x10(%rsp),$xt0
1124 movdqu $xt0,0x40($out)
1125 movdqu 0x00($inp),$xt0
1126 movdqu $xt1,0x50($out)
1127 movdqu 0x10($inp),$xt1
1128 movdqu $xt2,0x60($out)
1129 movdqu 0x20($inp),$xt2
1130 movdqu $xt3,0x70($out)
1131 lea 0x80($out),$out # size optimization
1132 movdqu 0x30($inp),$xt3
1133 pxor 0x20(%rsp),$xt0
1137 movdqu $xt0,0x00($out)
1138 movdqu $xt1,0x10($out)
1139 movdqu $xt2,0x20($out)
1140 movdqu $xt3,0x30($out)
1143 movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember?
1144 lea 0x40($inp),$inp # inp+=64*3
1146 movdqa $xt0,0x00(%rsp)
1147 movdqa $xb3,0x10(%rsp)
1148 lea 0x40($out),$out # out+=64*3
1149 movdqa $xc3,0x20(%rsp)
1150 sub \$192,$len # len-=64*3
1151 movdqa $xd3,0x30(%rsp)
1154 movzb ($inp,%r10),%eax
1155 movzb (%rsp,%r10),%ecx
1158 mov %al,-1($out,%r10)
1164 $code.=<<___ if ($win64);
1165 movaps -0xa8(%r9),%xmm6
1166 movaps -0x98(%r9),%xmm7
1167 movaps -0x88(%r9),%xmm8
1168 movaps -0x78(%r9),%xmm9
1169 movaps -0x68(%r9),%xmm10
1170 movaps -0x58(%r9),%xmm11
1171 movaps -0x48(%r9),%xmm12
1172 movaps -0x38(%r9),%xmm13
1173 movaps -0x28(%r9),%xmm14
1174 movaps -0x18(%r9),%xmm15
1178 .cfi_def_cfa_register %rsp
1182 .size ChaCha20_4x,.-ChaCha20_4x
1186 ########################################################################
1187 # XOP code path that handles all lengths.
1189 # There is some "anomaly" observed depending on instructions' size or
1190 # alignment. If you look closely at below code you'll notice that
1191 # sometimes argument order varies. The order affects instruction
1192 # encoding by making it larger, and such fiddling gives 5% performance
1193 # improvement. This is on FX-4100...
1195 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1196 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%xmm$_",(0..15));
1197 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1198 $xt0,$xt1,$xt2,$xt3, $xd0,$xd1,$xd2,$xd3);
1200 sub XOP_lane_ROUND {
1201 my ($a0,$b0,$c0,$d0)=@_;
1202 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1203 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1204 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1205 my @x=map("\"$_\"",@xx);
1208 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1209 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1210 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1211 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1212 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1213 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1214 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1215 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1216 "&vprotd (@x[$d0],@x[$d0],16)",
1217 "&vprotd (@x[$d1],@x[$d1],16)",
1218 "&vprotd (@x[$d2],@x[$d2],16)",
1219 "&vprotd (@x[$d3],@x[$d3],16)",
1221 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1222 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1223 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1224 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1225 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1226 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1227 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1228 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1229 "&vprotd (@x[$b0],@x[$b0],12)",
1230 "&vprotd (@x[$b1],@x[$b1],12)",
1231 "&vprotd (@x[$b2],@x[$b2],12)",
1232 "&vprotd (@x[$b3],@x[$b3],12)",
1234 "&vpaddd (@x[$a0],@x[$b0],@x[$a0])", # flip
1235 "&vpaddd (@x[$a1],@x[$b1],@x[$a1])", # flip
1236 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1237 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1238 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1239 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1240 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1241 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1242 "&vprotd (@x[$d0],@x[$d0],8)",
1243 "&vprotd (@x[$d1],@x[$d1],8)",
1244 "&vprotd (@x[$d2],@x[$d2],8)",
1245 "&vprotd (@x[$d3],@x[$d3],8)",
1247 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
1248 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
1249 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
1250 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
1251 "&vpxor (@x[$b0],@x[$c0],@x[$b0])",
1252 "&vpxor (@x[$b1],@x[$c1],@x[$b1])",
1253 "&vpxor (@x[$b2],@x[$b2],@x[$c2])", # flip
1254 "&vpxor (@x[$b3],@x[$b3],@x[$c3])", # flip
1255 "&vprotd (@x[$b0],@x[$b0],7)",
1256 "&vprotd (@x[$b1],@x[$b1],7)",
1257 "&vprotd (@x[$b2],@x[$b2],7)",
1258 "&vprotd (@x[$b3],@x[$b3],7)"
1262 my $xframe = $win64 ? 0xa8 : 8;
1265 .type ChaCha20_4xop,\@function,5
1270 mov %rsp,%r9 # frame pointer
1271 .cfi_def_cfa_register %r9
1272 sub \$0x140+$xframe,%rsp
1274 ################ stack layout
1275 # +0x00 SIMD equivalent of @x[8-12]
1277 # +0x40 constant copy of key[0-2] smashed by lanes
1279 # +0x100 SIMD counters (with nonce smashed by lanes)
1282 $code.=<<___ if ($win64);
1283 movaps %xmm6,-0xa8(%r9)
1284 movaps %xmm7,-0x98(%r9)
1285 movaps %xmm8,-0x88(%r9)
1286 movaps %xmm9,-0x78(%r9)
1287 movaps %xmm10,-0x68(%r9)
1288 movaps %xmm11,-0x58(%r9)
1289 movaps %xmm12,-0x48(%r9)
1290 movaps %xmm13,-0x38(%r9)
1291 movaps %xmm14,-0x28(%r9)
1292 movaps %xmm15,-0x18(%r9)
1298 vmovdqa .Lsigma(%rip),$xa3 # key[0]
1299 vmovdqu ($key),$xb3 # key[1]
1300 vmovdqu 16($key),$xt3 # key[2]
1301 vmovdqu ($counter),$xd3 # key[3]
1302 lea 0x100(%rsp),%rcx # size optimization
1304 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1305 vpshufd \$0x55,$xa3,$xa1
1306 vmovdqa $xa0,0x40(%rsp) # ... and offload
1307 vpshufd \$0xaa,$xa3,$xa2
1308 vmovdqa $xa1,0x50(%rsp)
1309 vpshufd \$0xff,$xa3,$xa3
1310 vmovdqa $xa2,0x60(%rsp)
1311 vmovdqa $xa3,0x70(%rsp)
1313 vpshufd \$0x00,$xb3,$xb0
1314 vpshufd \$0x55,$xb3,$xb1
1315 vmovdqa $xb0,0x80-0x100(%rcx)
1316 vpshufd \$0xaa,$xb3,$xb2
1317 vmovdqa $xb1,0x90-0x100(%rcx)
1318 vpshufd \$0xff,$xb3,$xb3
1319 vmovdqa $xb2,0xa0-0x100(%rcx)
1320 vmovdqa $xb3,0xb0-0x100(%rcx)
1322 vpshufd \$0x00,$xt3,$xt0 # "$xc0"
1323 vpshufd \$0x55,$xt3,$xt1 # "$xc1"
1324 vmovdqa $xt0,0xc0-0x100(%rcx)
1325 vpshufd \$0xaa,$xt3,$xt2 # "$xc2"
1326 vmovdqa $xt1,0xd0-0x100(%rcx)
1327 vpshufd \$0xff,$xt3,$xt3 # "$xc3"
1328 vmovdqa $xt2,0xe0-0x100(%rcx)
1329 vmovdqa $xt3,0xf0-0x100(%rcx)
1331 vpshufd \$0x00,$xd3,$xd0
1332 vpshufd \$0x55,$xd3,$xd1
1333 vpaddd .Linc(%rip),$xd0,$xd0 # don't save counters yet
1334 vpshufd \$0xaa,$xd3,$xd2
1335 vmovdqa $xd1,0x110-0x100(%rcx)
1336 vpshufd \$0xff,$xd3,$xd3
1337 vmovdqa $xd2,0x120-0x100(%rcx)
1338 vmovdqa $xd3,0x130-0x100(%rcx)
1344 vmovdqa 0x40(%rsp),$xa0 # re-load smashed key
1345 vmovdqa 0x50(%rsp),$xa1
1346 vmovdqa 0x60(%rsp),$xa2
1347 vmovdqa 0x70(%rsp),$xa3
1348 vmovdqa 0x80-0x100(%rcx),$xb0
1349 vmovdqa 0x90-0x100(%rcx),$xb1
1350 vmovdqa 0xa0-0x100(%rcx),$xb2
1351 vmovdqa 0xb0-0x100(%rcx),$xb3
1352 vmovdqa 0xc0-0x100(%rcx),$xt0 # "$xc0"
1353 vmovdqa 0xd0-0x100(%rcx),$xt1 # "$xc1"
1354 vmovdqa 0xe0-0x100(%rcx),$xt2 # "$xc2"
1355 vmovdqa 0xf0-0x100(%rcx),$xt3 # "$xc3"
1356 vmovdqa 0x100-0x100(%rcx),$xd0
1357 vmovdqa 0x110-0x100(%rcx),$xd1
1358 vmovdqa 0x120-0x100(%rcx),$xd2
1359 vmovdqa 0x130-0x100(%rcx),$xd3
1360 vpaddd .Lfour(%rip),$xd0,$xd0 # next SIMD counters
1364 vmovdqa $xd0,0x100-0x100(%rcx) # save SIMD counters
1370 foreach (&XOP_lane_ROUND(0, 4, 8,12)) { eval; }
1371 foreach (&XOP_lane_ROUND(0, 5,10,15)) { eval; }
1376 vpaddd 0x40(%rsp),$xa0,$xa0 # accumulate key material
1377 vpaddd 0x50(%rsp),$xa1,$xa1
1378 vpaddd 0x60(%rsp),$xa2,$xa2
1379 vpaddd 0x70(%rsp),$xa3,$xa3
1381 vmovdqa $xt2,0x20(%rsp) # offload $xc2,3
1382 vmovdqa $xt3,0x30(%rsp)
1384 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1385 vpunpckldq $xa3,$xa2,$xt3
1386 vpunpckhdq $xa1,$xa0,$xa0
1387 vpunpckhdq $xa3,$xa2,$xa2
1388 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1389 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1390 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1391 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1393 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1395 vpaddd 0x80-0x100(%rcx),$xb0,$xb0
1396 vpaddd 0x90-0x100(%rcx),$xb1,$xb1
1397 vpaddd 0xa0-0x100(%rcx),$xb2,$xb2
1398 vpaddd 0xb0-0x100(%rcx),$xb3,$xb3
1400 vmovdqa $xa0,0x00(%rsp) # offload $xa0,1
1401 vmovdqa $xa1,0x10(%rsp)
1402 vmovdqa 0x20(%rsp),$xa0 # "xc2"
1403 vmovdqa 0x30(%rsp),$xa1 # "xc3"
1405 vpunpckldq $xb1,$xb0,$xt2
1406 vpunpckldq $xb3,$xb2,$xt3
1407 vpunpckhdq $xb1,$xb0,$xb0
1408 vpunpckhdq $xb3,$xb2,$xb2
1409 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1410 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1411 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1412 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1414 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1415 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1417 vpaddd 0xc0-0x100(%rcx),$xc0,$xc0
1418 vpaddd 0xd0-0x100(%rcx),$xc1,$xc1
1419 vpaddd 0xe0-0x100(%rcx),$xc2,$xc2
1420 vpaddd 0xf0-0x100(%rcx),$xc3,$xc3
1422 vpunpckldq $xc1,$xc0,$xt2
1423 vpunpckldq $xc3,$xc2,$xt3
1424 vpunpckhdq $xc1,$xc0,$xc0
1425 vpunpckhdq $xc3,$xc2,$xc2
1426 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1427 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1428 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1429 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1431 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1433 vpaddd 0x100-0x100(%rcx),$xd0,$xd0
1434 vpaddd 0x110-0x100(%rcx),$xd1,$xd1
1435 vpaddd 0x120-0x100(%rcx),$xd2,$xd2
1436 vpaddd 0x130-0x100(%rcx),$xd3,$xd3
1438 vpunpckldq $xd1,$xd0,$xt2
1439 vpunpckldq $xd3,$xd2,$xt3
1440 vpunpckhdq $xd1,$xd0,$xd0
1441 vpunpckhdq $xd3,$xd2,$xd2
1442 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1443 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1444 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1445 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1447 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1448 ($xa0,$xa1)=($xt2,$xt3);
1450 vmovdqa 0x00(%rsp),$xa0 # restore $xa0,1
1451 vmovdqa 0x10(%rsp),$xa1
1456 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1457 vpxor 0x10($inp),$xb0,$xb0
1458 vpxor 0x20($inp),$xc0,$xc0
1459 vpxor 0x30($inp),$xd0,$xd0
1460 vpxor 0x40($inp),$xa1,$xa1
1461 vpxor 0x50($inp),$xb1,$xb1
1462 vpxor 0x60($inp),$xc1,$xc1
1463 vpxor 0x70($inp),$xd1,$xd1
1464 lea 0x80($inp),$inp # size optimization
1465 vpxor 0x00($inp),$xa2,$xa2
1466 vpxor 0x10($inp),$xb2,$xb2
1467 vpxor 0x20($inp),$xc2,$xc2
1468 vpxor 0x30($inp),$xd2,$xd2
1469 vpxor 0x40($inp),$xa3,$xa3
1470 vpxor 0x50($inp),$xb3,$xb3
1471 vpxor 0x60($inp),$xc3,$xc3
1472 vpxor 0x70($inp),$xd3,$xd3
1473 lea 0x80($inp),$inp # inp+=64*4
1475 vmovdqu $xa0,0x00($out)
1476 vmovdqu $xb0,0x10($out)
1477 vmovdqu $xc0,0x20($out)
1478 vmovdqu $xd0,0x30($out)
1479 vmovdqu $xa1,0x40($out)
1480 vmovdqu $xb1,0x50($out)
1481 vmovdqu $xc1,0x60($out)
1482 vmovdqu $xd1,0x70($out)
1483 lea 0x80($out),$out # size optimization
1484 vmovdqu $xa2,0x00($out)
1485 vmovdqu $xb2,0x10($out)
1486 vmovdqu $xc2,0x20($out)
1487 vmovdqu $xd2,0x30($out)
1488 vmovdqu $xa3,0x40($out)
1489 vmovdqu $xb3,0x50($out)
1490 vmovdqu $xc3,0x60($out)
1491 vmovdqu $xd3,0x70($out)
1492 lea 0x80($out),$out # out+=64*4
1502 jae .L192_or_more4xop
1504 jae .L128_or_more4xop
1506 jae .L64_or_more4xop
1509 vmovdqa $xa0,0x00(%rsp)
1510 vmovdqa $xb0,0x10(%rsp)
1511 vmovdqa $xc0,0x20(%rsp)
1512 vmovdqa $xd0,0x30(%rsp)
1517 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1518 vpxor 0x10($inp),$xb0,$xb0
1519 vpxor 0x20($inp),$xc0,$xc0
1520 vpxor 0x30($inp),$xd0,$xd0
1521 vmovdqu $xa0,0x00($out)
1522 vmovdqu $xb0,0x10($out)
1523 vmovdqu $xc0,0x20($out)
1524 vmovdqu $xd0,0x30($out)
1527 lea 0x40($inp),$inp # inp+=64*1
1528 vmovdqa $xa1,0x00(%rsp)
1530 vmovdqa $xb1,0x10(%rsp)
1531 lea 0x40($out),$out # out+=64*1
1532 vmovdqa $xc1,0x20(%rsp)
1533 sub \$64,$len # len-=64*1
1534 vmovdqa $xd1,0x30(%rsp)
1539 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1540 vpxor 0x10($inp),$xb0,$xb0
1541 vpxor 0x20($inp),$xc0,$xc0
1542 vpxor 0x30($inp),$xd0,$xd0
1543 vpxor 0x40($inp),$xa1,$xa1
1544 vpxor 0x50($inp),$xb1,$xb1
1545 vpxor 0x60($inp),$xc1,$xc1
1546 vpxor 0x70($inp),$xd1,$xd1
1548 vmovdqu $xa0,0x00($out)
1549 vmovdqu $xb0,0x10($out)
1550 vmovdqu $xc0,0x20($out)
1551 vmovdqu $xd0,0x30($out)
1552 vmovdqu $xa1,0x40($out)
1553 vmovdqu $xb1,0x50($out)
1554 vmovdqu $xc1,0x60($out)
1555 vmovdqu $xd1,0x70($out)
1558 lea 0x80($inp),$inp # inp+=64*2
1559 vmovdqa $xa2,0x00(%rsp)
1561 vmovdqa $xb2,0x10(%rsp)
1562 lea 0x80($out),$out # out+=64*2
1563 vmovdqa $xc2,0x20(%rsp)
1564 sub \$128,$len # len-=64*2
1565 vmovdqa $xd2,0x30(%rsp)
1570 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1571 vpxor 0x10($inp),$xb0,$xb0
1572 vpxor 0x20($inp),$xc0,$xc0
1573 vpxor 0x30($inp),$xd0,$xd0
1574 vpxor 0x40($inp),$xa1,$xa1
1575 vpxor 0x50($inp),$xb1,$xb1
1576 vpxor 0x60($inp),$xc1,$xc1
1577 vpxor 0x70($inp),$xd1,$xd1
1578 lea 0x80($inp),$inp # size optimization
1579 vpxor 0x00($inp),$xa2,$xa2
1580 vpxor 0x10($inp),$xb2,$xb2
1581 vpxor 0x20($inp),$xc2,$xc2
1582 vpxor 0x30($inp),$xd2,$xd2
1584 vmovdqu $xa0,0x00($out)
1585 vmovdqu $xb0,0x10($out)
1586 vmovdqu $xc0,0x20($out)
1587 vmovdqu $xd0,0x30($out)
1588 vmovdqu $xa1,0x40($out)
1589 vmovdqu $xb1,0x50($out)
1590 vmovdqu $xc1,0x60($out)
1591 vmovdqu $xd1,0x70($out)
1592 lea 0x80($out),$out # size optimization
1593 vmovdqu $xa2,0x00($out)
1594 vmovdqu $xb2,0x10($out)
1595 vmovdqu $xc2,0x20($out)
1596 vmovdqu $xd2,0x30($out)
1599 lea 0x40($inp),$inp # inp+=64*3
1600 vmovdqa $xa3,0x00(%rsp)
1602 vmovdqa $xb3,0x10(%rsp)
1603 lea 0x40($out),$out # out+=64*3
1604 vmovdqa $xc3,0x20(%rsp)
1605 sub \$192,$len # len-=64*3
1606 vmovdqa $xd3,0x30(%rsp)
1609 movzb ($inp,%r10),%eax
1610 movzb (%rsp,%r10),%ecx
1613 mov %al,-1($out,%r10)
1620 $code.=<<___ if ($win64);
1621 movaps -0xa8(%r9),%xmm6
1622 movaps -0x98(%r9),%xmm7
1623 movaps -0x88(%r9),%xmm8
1624 movaps -0x78(%r9),%xmm9
1625 movaps -0x68(%r9),%xmm10
1626 movaps -0x58(%r9),%xmm11
1627 movaps -0x48(%r9),%xmm12
1628 movaps -0x38(%r9),%xmm13
1629 movaps -0x28(%r9),%xmm14
1630 movaps -0x18(%r9),%xmm15
1634 .cfi_def_cfa_register %rsp
1638 .size ChaCha20_4xop,.-ChaCha20_4xop
1642 ########################################################################
1645 my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3,
1646 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15));
1647 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
1648 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3);
1650 sub AVX2_lane_ROUND {
1651 my ($a0,$b0,$c0,$d0)=@_;
1652 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
1653 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
1654 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
1655 my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3);
1656 my @x=map("\"$_\"",@xx);
1658 # Consider order in which variables are addressed by their
1663 # 0 4 8 12 < even round
1667 # 0 5 10 15 < odd round
1672 # 'a', 'b' and 'd's are permanently allocated in registers,
1673 # @x[0..7,12..15], while 'c's are maintained in memory. If
1674 # you observe 'c' column, you'll notice that pair of 'c's is
1675 # invariant between rounds. This means that we have to reload
1676 # them once per round, in the middle. This is why you'll see
1677 # bunch of 'c' stores and loads in the middle, but none in
1678 # the beginning or end.
1681 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
1682 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1683 "&vpshufb (@x[$d0],@x[$d0],$t1)",
1684 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
1685 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1686 "&vpshufb (@x[$d1],@x[$d1],$t1)",
1688 "&vpaddd ($xc,$xc,@x[$d0])",
1689 "&vpxor (@x[$b0],$xc,@x[$b0])",
1690 "&vpslld ($t0,@x[$b0],12)",
1691 "&vpsrld (@x[$b0],@x[$b0],20)",
1692 "&vpor (@x[$b0],$t0,@x[$b0])",
1693 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1694 "&vpaddd ($xc_,$xc_,@x[$d1])",
1695 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1696 "&vpslld ($t1,@x[$b1],12)",
1697 "&vpsrld (@x[$b1],@x[$b1],20)",
1698 "&vpor (@x[$b1],$t1,@x[$b1])",
1700 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
1701 "&vpxor (@x[$d0],@x[$a0],@x[$d0])",
1702 "&vpshufb (@x[$d0],@x[$d0],$t0)",
1703 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
1704 "&vpxor (@x[$d1],@x[$a1],@x[$d1])",
1705 "&vpshufb (@x[$d1],@x[$d1],$t0)",
1707 "&vpaddd ($xc,$xc,@x[$d0])",
1708 "&vpxor (@x[$b0],$xc,@x[$b0])",
1709 "&vpslld ($t1,@x[$b0],7)",
1710 "&vpsrld (@x[$b0],@x[$b0],25)",
1711 "&vpor (@x[$b0],$t1,@x[$b0])",
1712 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1713 "&vpaddd ($xc_,$xc_,@x[$d1])",
1714 "&vpxor (@x[$b1],$xc_,@x[$b1])",
1715 "&vpslld ($t0,@x[$b1],7)",
1716 "&vpsrld (@x[$b1],@x[$b1],25)",
1717 "&vpor (@x[$b1],$t0,@x[$b1])",
1719 "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's
1720 "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)",
1721 "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")",
1722 "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")",
1724 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
1725 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1726 "&vpshufb (@x[$d2],@x[$d2],$t1)",
1727 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
1728 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1729 "&vpshufb (@x[$d3],@x[$d3],$t1)",
1731 "&vpaddd ($xc,$xc,@x[$d2])",
1732 "&vpxor (@x[$b2],$xc,@x[$b2])",
1733 "&vpslld ($t0,@x[$b2],12)",
1734 "&vpsrld (@x[$b2],@x[$b2],20)",
1735 "&vpor (@x[$b2],$t0,@x[$b2])",
1736 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip)
1737 "&vpaddd ($xc_,$xc_,@x[$d3])",
1738 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1739 "&vpslld ($t1,@x[$b3],12)",
1740 "&vpsrld (@x[$b3],@x[$b3],20)",
1741 "&vpor (@x[$b3],$t1,@x[$b3])",
1743 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
1744 "&vpxor (@x[$d2],@x[$a2],@x[$d2])",
1745 "&vpshufb (@x[$d2],@x[$d2],$t0)",
1746 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
1747 "&vpxor (@x[$d3],@x[$a3],@x[$d3])",
1748 "&vpshufb (@x[$d3],@x[$d3],$t0)",
1750 "&vpaddd ($xc,$xc,@x[$d2])",
1751 "&vpxor (@x[$b2],$xc,@x[$b2])",
1752 "&vpslld ($t1,@x[$b2],7)",
1753 "&vpsrld (@x[$b2],@x[$b2],25)",
1754 "&vpor (@x[$b2],$t1,@x[$b2])",
1755 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip)
1756 "&vpaddd ($xc_,$xc_,@x[$d3])",
1757 "&vpxor (@x[$b3],$xc_,@x[$b3])",
1758 "&vpslld ($t0,@x[$b3],7)",
1759 "&vpsrld (@x[$b3],@x[$b3],25)",
1760 "&vpor (@x[$b3],$t0,@x[$b3])"
1764 my $xframe = $win64 ? 0xa8 : 8;
1767 .type ChaCha20_8x,\@function,5
1772 mov %rsp,%r9 # frame register
1773 .cfi_def_cfa_register %r9
1774 sub \$0x280+$xframe,%rsp
1777 $code.=<<___ if ($win64);
1778 movaps %xmm6,-0xa8(%r9)
1779 movaps %xmm7,-0x98(%r9)
1780 movaps %xmm8,-0x88(%r9)
1781 movaps %xmm9,-0x78(%r9)
1782 movaps %xmm10,-0x68(%r9)
1783 movaps %xmm11,-0x58(%r9)
1784 movaps %xmm12,-0x48(%r9)
1785 movaps %xmm13,-0x38(%r9)
1786 movaps %xmm14,-0x28(%r9)
1787 movaps %xmm15,-0x18(%r9)
1793 ################ stack layout
1794 # +0x00 SIMD equivalent of @x[8-12]
1796 # +0x80 constant copy of key[0-2] smashed by lanes
1798 # +0x200 SIMD counters (with nonce smashed by lanes)
1802 vbroadcasti128 .Lsigma(%rip),$xa3 # key[0]
1803 vbroadcasti128 ($key),$xb3 # key[1]
1804 vbroadcasti128 16($key),$xt3 # key[2]
1805 vbroadcasti128 ($counter),$xd3 # key[3]
1806 lea 0x100(%rsp),%rcx # size optimization
1807 lea 0x200(%rsp),%rax # size optimization
1808 lea .Lrot16(%rip),%r10
1809 lea .Lrot24(%rip),%r11
1811 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
1812 vpshufd \$0x55,$xa3,$xa1
1813 vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload
1814 vpshufd \$0xaa,$xa3,$xa2
1815 vmovdqa $xa1,0xa0-0x100(%rcx)
1816 vpshufd \$0xff,$xa3,$xa3
1817 vmovdqa $xa2,0xc0-0x100(%rcx)
1818 vmovdqa $xa3,0xe0-0x100(%rcx)
1820 vpshufd \$0x00,$xb3,$xb0
1821 vpshufd \$0x55,$xb3,$xb1
1822 vmovdqa $xb0,0x100-0x100(%rcx)
1823 vpshufd \$0xaa,$xb3,$xb2
1824 vmovdqa $xb1,0x120-0x100(%rcx)
1825 vpshufd \$0xff,$xb3,$xb3
1826 vmovdqa $xb2,0x140-0x100(%rcx)
1827 vmovdqa $xb3,0x160-0x100(%rcx)
1829 vpshufd \$0x00,$xt3,$xt0 # "xc0"
1830 vpshufd \$0x55,$xt3,$xt1 # "xc1"
1831 vmovdqa $xt0,0x180-0x200(%rax)
1832 vpshufd \$0xaa,$xt3,$xt2 # "xc2"
1833 vmovdqa $xt1,0x1a0-0x200(%rax)
1834 vpshufd \$0xff,$xt3,$xt3 # "xc3"
1835 vmovdqa $xt2,0x1c0-0x200(%rax)
1836 vmovdqa $xt3,0x1e0-0x200(%rax)
1838 vpshufd \$0x00,$xd3,$xd0
1839 vpshufd \$0x55,$xd3,$xd1
1840 vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet
1841 vpshufd \$0xaa,$xd3,$xd2
1842 vmovdqa $xd1,0x220-0x200(%rax)
1843 vpshufd \$0xff,$xd3,$xd3
1844 vmovdqa $xd2,0x240-0x200(%rax)
1845 vmovdqa $xd3,0x260-0x200(%rax)
1851 vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key
1852 vmovdqa 0xa0-0x100(%rcx),$xa1
1853 vmovdqa 0xc0-0x100(%rcx),$xa2
1854 vmovdqa 0xe0-0x100(%rcx),$xa3
1855 vmovdqa 0x100-0x100(%rcx),$xb0
1856 vmovdqa 0x120-0x100(%rcx),$xb1
1857 vmovdqa 0x140-0x100(%rcx),$xb2
1858 vmovdqa 0x160-0x100(%rcx),$xb3
1859 vmovdqa 0x180-0x200(%rax),$xt0 # "xc0"
1860 vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1"
1861 vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2"
1862 vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3"
1863 vmovdqa 0x200-0x200(%rax),$xd0
1864 vmovdqa 0x220-0x200(%rax),$xd1
1865 vmovdqa 0x240-0x200(%rax),$xd2
1866 vmovdqa 0x260-0x200(%rax),$xd3
1867 vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters
1870 vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]"
1871 vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]"
1872 vbroadcasti128 (%r10),$xt3
1873 vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters
1880 foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; }
1881 foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; }
1886 lea 0x200(%rsp),%rax # size optimization
1887 vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key
1888 vpaddd 0xa0-0x100(%rcx),$xa1,$xa1
1889 vpaddd 0xc0-0x100(%rcx),$xa2,$xa2
1890 vpaddd 0xe0-0x100(%rcx),$xa3,$xa3
1892 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
1893 vpunpckldq $xa3,$xa2,$xt3
1894 vpunpckhdq $xa1,$xa0,$xa0
1895 vpunpckhdq $xa3,$xa2,$xa2
1896 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
1897 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
1898 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
1899 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
1901 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
1903 vpaddd 0x100-0x100(%rcx),$xb0,$xb0
1904 vpaddd 0x120-0x100(%rcx),$xb1,$xb1
1905 vpaddd 0x140-0x100(%rcx),$xb2,$xb2
1906 vpaddd 0x160-0x100(%rcx),$xb3,$xb3
1908 vpunpckldq $xb1,$xb0,$xt2
1909 vpunpckldq $xb3,$xb2,$xt3
1910 vpunpckhdq $xb1,$xb0,$xb0
1911 vpunpckhdq $xb3,$xb2,$xb2
1912 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
1913 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
1914 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
1915 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
1917 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
1919 vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further
1920 vperm2i128 \$0x31,$xb0,$xa0,$xb0
1921 vperm2i128 \$0x20,$xb1,$xa1,$xa0
1922 vperm2i128 \$0x31,$xb1,$xa1,$xb1
1923 vperm2i128 \$0x20,$xb2,$xa2,$xa1
1924 vperm2i128 \$0x31,$xb2,$xa2,$xb2
1925 vperm2i128 \$0x20,$xb3,$xa3,$xa2
1926 vperm2i128 \$0x31,$xb3,$xa3,$xb3
1928 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
1929 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1);
1931 vmovdqa $xa0,0x00(%rsp) # offload $xaN
1932 vmovdqa $xa1,0x20(%rsp)
1933 vmovdqa 0x40(%rsp),$xc2 # $xa0
1934 vmovdqa 0x60(%rsp),$xc3 # $xa1
1936 vpaddd 0x180-0x200(%rax),$xc0,$xc0
1937 vpaddd 0x1a0-0x200(%rax),$xc1,$xc1
1938 vpaddd 0x1c0-0x200(%rax),$xc2,$xc2
1939 vpaddd 0x1e0-0x200(%rax),$xc3,$xc3
1941 vpunpckldq $xc1,$xc0,$xt2
1942 vpunpckldq $xc3,$xc2,$xt3
1943 vpunpckhdq $xc1,$xc0,$xc0
1944 vpunpckhdq $xc3,$xc2,$xc2
1945 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
1946 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
1947 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
1948 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
1950 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
1952 vpaddd 0x200-0x200(%rax),$xd0,$xd0
1953 vpaddd 0x220-0x200(%rax),$xd1,$xd1
1954 vpaddd 0x240-0x200(%rax),$xd2,$xd2
1955 vpaddd 0x260-0x200(%rax),$xd3,$xd3
1957 vpunpckldq $xd1,$xd0,$xt2
1958 vpunpckldq $xd3,$xd2,$xt3
1959 vpunpckhdq $xd1,$xd0,$xd0
1960 vpunpckhdq $xd3,$xd2,$xd2
1961 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
1962 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
1963 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
1964 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
1966 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
1968 vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further
1969 vperm2i128 \$0x31,$xd0,$xc0,$xd0
1970 vperm2i128 \$0x20,$xd1,$xc1,$xc0
1971 vperm2i128 \$0x31,$xd1,$xc1,$xd1
1972 vperm2i128 \$0x20,$xd2,$xc2,$xc1
1973 vperm2i128 \$0x31,$xd2,$xc2,$xd2
1974 vperm2i128 \$0x20,$xd3,$xc3,$xc2
1975 vperm2i128 \$0x31,$xd3,$xc3,$xd3
1977 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
1978 ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)=
1979 ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3);
1980 ($xa0,$xa1)=($xt2,$xt3);
1982 vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember?
1983 vmovdqa 0x20(%rsp),$xa1
1988 vpxor 0x00($inp),$xa0,$xa0 # xor with input
1989 vpxor 0x20($inp),$xb0,$xb0
1990 vpxor 0x40($inp),$xc0,$xc0
1991 vpxor 0x60($inp),$xd0,$xd0
1992 lea 0x80($inp),$inp # size optimization
1993 vmovdqu $xa0,0x00($out)
1994 vmovdqu $xb0,0x20($out)
1995 vmovdqu $xc0,0x40($out)
1996 vmovdqu $xd0,0x60($out)
1997 lea 0x80($out),$out # size optimization
1999 vpxor 0x00($inp),$xa1,$xa1
2000 vpxor 0x20($inp),$xb1,$xb1
2001 vpxor 0x40($inp),$xc1,$xc1
2002 vpxor 0x60($inp),$xd1,$xd1
2003 lea 0x80($inp),$inp # size optimization
2004 vmovdqu $xa1,0x00($out)
2005 vmovdqu $xb1,0x20($out)
2006 vmovdqu $xc1,0x40($out)
2007 vmovdqu $xd1,0x60($out)
2008 lea 0x80($out),$out # size optimization
2010 vpxor 0x00($inp),$xa2,$xa2
2011 vpxor 0x20($inp),$xb2,$xb2
2012 vpxor 0x40($inp),$xc2,$xc2
2013 vpxor 0x60($inp),$xd2,$xd2
2014 lea 0x80($inp),$inp # size optimization
2015 vmovdqu $xa2,0x00($out)
2016 vmovdqu $xb2,0x20($out)
2017 vmovdqu $xc2,0x40($out)
2018 vmovdqu $xd2,0x60($out)
2019 lea 0x80($out),$out # size optimization
2021 vpxor 0x00($inp),$xa3,$xa3
2022 vpxor 0x20($inp),$xb3,$xb3
2023 vpxor 0x40($inp),$xc3,$xc3
2024 vpxor 0x60($inp),$xd3,$xd3
2025 lea 0x80($inp),$inp # size optimization
2026 vmovdqu $xa3,0x00($out)
2027 vmovdqu $xb3,0x20($out)
2028 vmovdqu $xc3,0x40($out)
2029 vmovdqu $xd3,0x60($out)
2030 lea 0x80($out),$out # size optimization
2054 vmovdqa $xa0,0x00(%rsp)
2055 vmovdqa $xb0,0x20(%rsp)
2060 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2061 vpxor 0x20($inp),$xb0,$xb0
2062 vmovdqu $xa0,0x00($out)
2063 vmovdqu $xb0,0x20($out)
2066 lea 0x40($inp),$inp # inp+=64*1
2068 vmovdqa $xc0,0x00(%rsp)
2069 lea 0x40($out),$out # out+=64*1
2070 sub \$64,$len # len-=64*1
2071 vmovdqa $xd0,0x20(%rsp)
2076 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2077 vpxor 0x20($inp),$xb0,$xb0
2078 vpxor 0x40($inp),$xc0,$xc0
2079 vpxor 0x60($inp),$xd0,$xd0
2080 vmovdqu $xa0,0x00($out)
2081 vmovdqu $xb0,0x20($out)
2082 vmovdqu $xc0,0x40($out)
2083 vmovdqu $xd0,0x60($out)
2086 lea 0x80($inp),$inp # inp+=64*2
2088 vmovdqa $xa1,0x00(%rsp)
2089 lea 0x80($out),$out # out+=64*2
2090 sub \$128,$len # len-=64*2
2091 vmovdqa $xb1,0x20(%rsp)
2096 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2097 vpxor 0x20($inp),$xb0,$xb0
2098 vpxor 0x40($inp),$xc0,$xc0
2099 vpxor 0x60($inp),$xd0,$xd0
2100 vpxor 0x80($inp),$xa1,$xa1
2101 vpxor 0xa0($inp),$xb1,$xb1
2102 vmovdqu $xa0,0x00($out)
2103 vmovdqu $xb0,0x20($out)
2104 vmovdqu $xc0,0x40($out)
2105 vmovdqu $xd0,0x60($out)
2106 vmovdqu $xa1,0x80($out)
2107 vmovdqu $xb1,0xa0($out)
2110 lea 0xc0($inp),$inp # inp+=64*3
2112 vmovdqa $xc1,0x00(%rsp)
2113 lea 0xc0($out),$out # out+=64*3
2114 sub \$192,$len # len-=64*3
2115 vmovdqa $xd1,0x20(%rsp)
2120 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2121 vpxor 0x20($inp),$xb0,$xb0
2122 vpxor 0x40($inp),$xc0,$xc0
2123 vpxor 0x60($inp),$xd0,$xd0
2124 vpxor 0x80($inp),$xa1,$xa1
2125 vpxor 0xa0($inp),$xb1,$xb1
2126 vpxor 0xc0($inp),$xc1,$xc1
2127 vpxor 0xe0($inp),$xd1,$xd1
2128 vmovdqu $xa0,0x00($out)
2129 vmovdqu $xb0,0x20($out)
2130 vmovdqu $xc0,0x40($out)
2131 vmovdqu $xd0,0x60($out)
2132 vmovdqu $xa1,0x80($out)
2133 vmovdqu $xb1,0xa0($out)
2134 vmovdqu $xc1,0xc0($out)
2135 vmovdqu $xd1,0xe0($out)
2138 lea 0x100($inp),$inp # inp+=64*4
2140 vmovdqa $xa2,0x00(%rsp)
2141 lea 0x100($out),$out # out+=64*4
2142 sub \$256,$len # len-=64*4
2143 vmovdqa $xb2,0x20(%rsp)
2148 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2149 vpxor 0x20($inp),$xb0,$xb0
2150 vpxor 0x40($inp),$xc0,$xc0
2151 vpxor 0x60($inp),$xd0,$xd0
2152 vpxor 0x80($inp),$xa1,$xa1
2153 vpxor 0xa0($inp),$xb1,$xb1
2154 vpxor 0xc0($inp),$xc1,$xc1
2155 vpxor 0xe0($inp),$xd1,$xd1
2156 vpxor 0x100($inp),$xa2,$xa2
2157 vpxor 0x120($inp),$xb2,$xb2
2158 vmovdqu $xa0,0x00($out)
2159 vmovdqu $xb0,0x20($out)
2160 vmovdqu $xc0,0x40($out)
2161 vmovdqu $xd0,0x60($out)
2162 vmovdqu $xa1,0x80($out)
2163 vmovdqu $xb1,0xa0($out)
2164 vmovdqu $xc1,0xc0($out)
2165 vmovdqu $xd1,0xe0($out)
2166 vmovdqu $xa2,0x100($out)
2167 vmovdqu $xb2,0x120($out)
2170 lea 0x140($inp),$inp # inp+=64*5
2172 vmovdqa $xc2,0x00(%rsp)
2173 lea 0x140($out),$out # out+=64*5
2174 sub \$320,$len # len-=64*5
2175 vmovdqa $xd2,0x20(%rsp)
2180 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2181 vpxor 0x20($inp),$xb0,$xb0
2182 vpxor 0x40($inp),$xc0,$xc0
2183 vpxor 0x60($inp),$xd0,$xd0
2184 vpxor 0x80($inp),$xa1,$xa1
2185 vpxor 0xa0($inp),$xb1,$xb1
2186 vpxor 0xc0($inp),$xc1,$xc1
2187 vpxor 0xe0($inp),$xd1,$xd1
2188 vpxor 0x100($inp),$xa2,$xa2
2189 vpxor 0x120($inp),$xb2,$xb2
2190 vpxor 0x140($inp),$xc2,$xc2
2191 vpxor 0x160($inp),$xd2,$xd2
2192 vmovdqu $xa0,0x00($out)
2193 vmovdqu $xb0,0x20($out)
2194 vmovdqu $xc0,0x40($out)
2195 vmovdqu $xd0,0x60($out)
2196 vmovdqu $xa1,0x80($out)
2197 vmovdqu $xb1,0xa0($out)
2198 vmovdqu $xc1,0xc0($out)
2199 vmovdqu $xd1,0xe0($out)
2200 vmovdqu $xa2,0x100($out)
2201 vmovdqu $xb2,0x120($out)
2202 vmovdqu $xc2,0x140($out)
2203 vmovdqu $xd2,0x160($out)
2206 lea 0x180($inp),$inp # inp+=64*6
2208 vmovdqa $xa3,0x00(%rsp)
2209 lea 0x180($out),$out # out+=64*6
2210 sub \$384,$len # len-=64*6
2211 vmovdqa $xb3,0x20(%rsp)
2216 vpxor 0x00($inp),$xa0,$xa0 # xor with input
2217 vpxor 0x20($inp),$xb0,$xb0
2218 vpxor 0x40($inp),$xc0,$xc0
2219 vpxor 0x60($inp),$xd0,$xd0
2220 vpxor 0x80($inp),$xa1,$xa1
2221 vpxor 0xa0($inp),$xb1,$xb1
2222 vpxor 0xc0($inp),$xc1,$xc1
2223 vpxor 0xe0($inp),$xd1,$xd1
2224 vpxor 0x100($inp),$xa2,$xa2
2225 vpxor 0x120($inp),$xb2,$xb2
2226 vpxor 0x140($inp),$xc2,$xc2
2227 vpxor 0x160($inp),$xd2,$xd2
2228 vpxor 0x180($inp),$xa3,$xa3
2229 vpxor 0x1a0($inp),$xb3,$xb3
2230 vmovdqu $xa0,0x00($out)
2231 vmovdqu $xb0,0x20($out)
2232 vmovdqu $xc0,0x40($out)
2233 vmovdqu $xd0,0x60($out)
2234 vmovdqu $xa1,0x80($out)
2235 vmovdqu $xb1,0xa0($out)
2236 vmovdqu $xc1,0xc0($out)
2237 vmovdqu $xd1,0xe0($out)
2238 vmovdqu $xa2,0x100($out)
2239 vmovdqu $xb2,0x120($out)
2240 vmovdqu $xc2,0x140($out)
2241 vmovdqu $xd2,0x160($out)
2242 vmovdqu $xa3,0x180($out)
2243 vmovdqu $xb3,0x1a0($out)
2246 lea 0x1c0($inp),$inp # inp+=64*7
2248 vmovdqa $xc3,0x00(%rsp)
2249 lea 0x1c0($out),$out # out+=64*7
2250 sub \$448,$len # len-=64*7
2251 vmovdqa $xd3,0x20(%rsp)
2254 movzb ($inp,%r10),%eax
2255 movzb (%rsp,%r10),%ecx
2258 mov %al,-1($out,%r10)
2265 $code.=<<___ if ($win64);
2266 movaps -0xa8(%r9),%xmm6
2267 movaps -0x98(%r9),%xmm7
2268 movaps -0x88(%r9),%xmm8
2269 movaps -0x78(%r9),%xmm9
2270 movaps -0x68(%r9),%xmm10
2271 movaps -0x58(%r9),%xmm11
2272 movaps -0x48(%r9),%xmm12
2273 movaps -0x38(%r9),%xmm13
2274 movaps -0x28(%r9),%xmm14
2275 movaps -0x18(%r9),%xmm15
2279 .cfi_def_cfa_register %rsp
2283 .size ChaCha20_8x,.-ChaCha20_8x
2287 ########################################################################
2290 # This one handles shorter inputs...
2292 my ($a,$b,$c,$d, $a_,$b_,$c_,$d_,$fourz) = map("%zmm$_",(0..3,16..20));
2293 my ($t0,$t1,$t2,$t3) = map("%xmm$_",(4..7));
2295 sub AVX512ROUND { # critical path is 14 "SIMD ticks" per round
2313 my $xframe = $win64 ? 32+8 : 8;
2316 .type ChaCha20_avx512,\@function,5
2321 mov %rsp,%r9 # frame pointer
2322 .cfi_def_cfa_register %r9
2326 sub \$64+$xframe,%rsp
2328 $code.=<<___ if ($win64);
2329 movaps %xmm6,-0x28(%r9)
2330 movaps %xmm7,-0x18(%r9)
2334 vbroadcasti32x4 .Lsigma(%rip),$a
2335 vbroadcasti32x4 ($key),$b
2336 vbroadcasti32x4 16($key),$c
2337 vbroadcasti32x4 ($counter),$d
2342 vpaddd .Lzeroz(%rip),$d,$d
2343 vmovdqa32 .Lfourz(%rip),$fourz
2344 mov \$10,$counter # reuse $counter
2353 vpaddd $fourz,$d_,$d
2362 &vpshufd ($c,$c,0b01001110);
2363 &vpshufd ($b,$b,0b00111001);
2364 &vpshufd ($d,$d,0b10010011);
2367 &vpshufd ($c,$c,0b01001110);
2368 &vpshufd ($b,$b,0b10010011);
2369 &vpshufd ($d,$d,0b00111001);
2372 &jnz (".Loop_avx512");
2383 vpxor 0x00($inp),%x#$a,$t0 # xor with input
2384 vpxor 0x10($inp),%x#$b,$t1
2385 vpxor 0x20($inp),%x#$c,$t2
2386 vpxor 0x30($inp),%x#$d,$t3
2387 lea 0x40($inp),$inp # inp+=64
2389 vmovdqu $t0,0x00($out) # write output
2390 vmovdqu $t1,0x10($out)
2391 vmovdqu $t2,0x20($out)
2392 vmovdqu $t3,0x30($out)
2393 lea 0x40($out),$out # out+=64
2397 vextracti32x4 \$1,$a,$t0
2398 vextracti32x4 \$1,$b,$t1
2399 vextracti32x4 \$1,$c,$t2
2400 vextracti32x4 \$1,$d,$t3
2405 vpxor 0x00($inp),$t0,$t0 # xor with input
2406 vpxor 0x10($inp),$t1,$t1
2407 vpxor 0x20($inp),$t2,$t2
2408 vpxor 0x30($inp),$t3,$t3
2409 lea 0x40($inp),$inp # inp+=64
2411 vmovdqu $t0,0x00($out) # write output
2412 vmovdqu $t1,0x10($out)
2413 vmovdqu $t2,0x20($out)
2414 vmovdqu $t3,0x30($out)
2415 lea 0x40($out),$out # out+=64
2419 vextracti32x4 \$2,$a,$t0
2420 vextracti32x4 \$2,$b,$t1
2421 vextracti32x4 \$2,$c,$t2
2422 vextracti32x4 \$2,$d,$t3
2427 vpxor 0x00($inp),$t0,$t0 # xor with input
2428 vpxor 0x10($inp),$t1,$t1
2429 vpxor 0x20($inp),$t2,$t2
2430 vpxor 0x30($inp),$t3,$t3
2431 lea 0x40($inp),$inp # inp+=64
2433 vmovdqu $t0,0x00($out) # write output
2434 vmovdqu $t1,0x10($out)
2435 vmovdqu $t2,0x20($out)
2436 vmovdqu $t3,0x30($out)
2437 lea 0x40($out),$out # out+=64
2441 vextracti32x4 \$3,$a,$t0
2442 vextracti32x4 \$3,$b,$t1
2443 vextracti32x4 \$3,$c,$t2
2444 vextracti32x4 \$3,$d,$t3
2449 vpxor 0x00($inp),$t0,$t0 # xor with input
2450 vpxor 0x10($inp),$t1,$t1
2451 vpxor 0x20($inp),$t2,$t2
2452 vpxor 0x30($inp),$t3,$t3
2453 lea 0x40($inp),$inp # inp+=64
2455 vmovdqu $t0,0x00($out) # write output
2456 vmovdqu $t1,0x10($out)
2457 vmovdqu $t2,0x20($out)
2458 vmovdqu $t3,0x30($out)
2459 lea 0x40($out),$out # out+=64
2461 jnz .Loop_outer_avx512
2467 vmovdqa %x#$a,0x00(%rsp)
2468 vmovdqa %x#$b,0x10(%rsp)
2469 vmovdqa %x#$c,0x20(%rsp)
2470 vmovdqa %x#$d,0x30(%rsp)
2472 jmp .Loop_tail_avx512
2476 vmovdqa $t0,0x00(%rsp)
2477 vmovdqa $t1,0x10(%rsp)
2478 vmovdqa $t2,0x20(%rsp)
2479 vmovdqa $t3,0x30(%rsp)
2483 movzb ($inp,$counter),%eax
2484 movzb (%rsp,$counter),%ecx
2485 lea 1($counter),$counter
2487 mov %al,-1($out,$counter)
2489 jnz .Loop_tail_avx512
2491 vmovdqu32 $a_,0x00(%rsp)
2496 $code.=<<___ if ($win64);
2497 movaps -0x28(%r9),%xmm6
2498 movaps -0x18(%r9),%xmm7
2502 .cfi_def_cfa_register %rsp
2506 .size ChaCha20_avx512,.-ChaCha20_avx512
2510 # This one handles longer inputs...
2512 my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2513 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3)=map("%zmm$_",(0..15));
2514 my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2515 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2516 my @key=map("%zmm$_",(16..31));
2517 my ($xt0,$xt1,$xt2,$xt3)=@key[0..3];
2519 sub AVX512_lane_ROUND {
2520 my ($a0,$b0,$c0,$d0)=@_;
2521 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
2522 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
2523 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
2524 my @x=map("\"$_\"",@xx);
2527 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1
2528 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2
2529 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3
2530 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4
2531 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2532 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2533 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2534 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2535 "&vprold (@x[$d0],@x[$d0],16)",
2536 "&vprold (@x[$d1],@x[$d1],16)",
2537 "&vprold (@x[$d2],@x[$d2],16)",
2538 "&vprold (@x[$d3],@x[$d3],16)",
2540 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2541 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2542 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2543 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2544 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2545 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2546 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2547 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2548 "&vprold (@x[$b0],@x[$b0],12)",
2549 "&vprold (@x[$b1],@x[$b1],12)",
2550 "&vprold (@x[$b2],@x[$b2],12)",
2551 "&vprold (@x[$b3],@x[$b3],12)",
2553 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])",
2554 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])",
2555 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])",
2556 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])",
2557 "&vpxord (@x[$d0],@x[$d0],@x[$a0])",
2558 "&vpxord (@x[$d1],@x[$d1],@x[$a1])",
2559 "&vpxord (@x[$d2],@x[$d2],@x[$a2])",
2560 "&vpxord (@x[$d3],@x[$d3],@x[$a3])",
2561 "&vprold (@x[$d0],@x[$d0],8)",
2562 "&vprold (@x[$d1],@x[$d1],8)",
2563 "&vprold (@x[$d2],@x[$d2],8)",
2564 "&vprold (@x[$d3],@x[$d3],8)",
2566 "&vpaddd (@x[$c0],@x[$c0],@x[$d0])",
2567 "&vpaddd (@x[$c1],@x[$c1],@x[$d1])",
2568 "&vpaddd (@x[$c2],@x[$c2],@x[$d2])",
2569 "&vpaddd (@x[$c3],@x[$c3],@x[$d3])",
2570 "&vpxord (@x[$b0],@x[$b0],@x[$c0])",
2571 "&vpxord (@x[$b1],@x[$b1],@x[$c1])",
2572 "&vpxord (@x[$b2],@x[$b2],@x[$c2])",
2573 "&vpxord (@x[$b3],@x[$b3],@x[$c3])",
2574 "&vprold (@x[$b0],@x[$b0],7)",
2575 "&vprold (@x[$b1],@x[$b1],7)",
2576 "&vprold (@x[$b2],@x[$b2],7)",
2577 "&vprold (@x[$b3],@x[$b3],7)"
2581 my $xframe = $win64 ? 0xa8 : 8;
2584 .type ChaCha20_16x,\@function,5
2589 mov %rsp,%r9 # frame register
2590 .cfi_def_cfa_register %r9
2591 sub \$64+$xframe,%rsp
2594 $code.=<<___ if ($win64);
2595 movaps %xmm6,-0xa8(%r9)
2596 movaps %xmm7,-0x98(%r9)
2597 movaps %xmm8,-0x88(%r9)
2598 movaps %xmm9,-0x78(%r9)
2599 movaps %xmm10,-0x68(%r9)
2600 movaps %xmm11,-0x58(%r9)
2601 movaps %xmm12,-0x48(%r9)
2602 movaps %xmm13,-0x38(%r9)
2603 movaps %xmm14,-0x28(%r9)
2604 movaps %xmm15,-0x18(%r9)
2610 lea .Lsigma(%rip),%r10
2611 vbroadcasti32x4 (%r10),$xa3 # key[0]
2612 vbroadcasti32x4 ($key),$xb3 # key[1]
2613 vbroadcasti32x4 16($key),$xc3 # key[2]
2614 vbroadcasti32x4 ($counter),$xd3 # key[3]
2616 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes...
2617 vpshufd \$0x55,$xa3,$xa1
2618 vpshufd \$0xaa,$xa3,$xa2
2619 vpshufd \$0xff,$xa3,$xa3
2620 vmovdqa64 $xa0,@key[0]
2621 vmovdqa64 $xa1,@key[1]
2622 vmovdqa64 $xa2,@key[2]
2623 vmovdqa64 $xa3,@key[3]
2625 vpshufd \$0x00,$xb3,$xb0
2626 vpshufd \$0x55,$xb3,$xb1
2627 vpshufd \$0xaa,$xb3,$xb2
2628 vpshufd \$0xff,$xb3,$xb3
2629 vmovdqa64 $xb0,@key[4]
2630 vmovdqa64 $xb1,@key[5]
2631 vmovdqa64 $xb2,@key[6]
2632 vmovdqa64 $xb3,@key[7]
2634 vpshufd \$0x00,$xc3,$xc0
2635 vpshufd \$0x55,$xc3,$xc1
2636 vpshufd \$0xaa,$xc3,$xc2
2637 vpshufd \$0xff,$xc3,$xc3
2638 vmovdqa64 $xc0,@key[8]
2639 vmovdqa64 $xc1,@key[9]
2640 vmovdqa64 $xc2,@key[10]
2641 vmovdqa64 $xc3,@key[11]
2643 vpshufd \$0x00,$xd3,$xd0
2644 vpshufd \$0x55,$xd3,$xd1
2645 vpshufd \$0xaa,$xd3,$xd2
2646 vpshufd \$0xff,$xd3,$xd3
2647 vpaddd .Lincz(%rip),$xd0,$xd0 # don't save counters yet
2648 vmovdqa64 $xd0,@key[12]
2649 vmovdqa64 $xd1,@key[13]
2650 vmovdqa64 $xd2,@key[14]
2651 vmovdqa64 $xd3,@key[15]
2658 vpbroadcastd 0(%r10),$xa0 # reload key
2659 vpbroadcastd 4(%r10),$xa1
2660 vpbroadcastd 8(%r10),$xa2
2661 vpbroadcastd 12(%r10),$xa3
2662 vpaddd .Lsixteen(%rip),@key[12],@key[12] # next SIMD counters
2663 vmovdqa64 @key[4],$xb0
2664 vmovdqa64 @key[5],$xb1
2665 vmovdqa64 @key[6],$xb2
2666 vmovdqa64 @key[7],$xb3
2667 vmovdqa64 @key[8],$xc0
2668 vmovdqa64 @key[9],$xc1
2669 vmovdqa64 @key[10],$xc2
2670 vmovdqa64 @key[11],$xc3
2671 vmovdqa64 @key[12],$xd0
2672 vmovdqa64 @key[13],$xd1
2673 vmovdqa64 @key[14],$xd2
2674 vmovdqa64 @key[15],$xd3
2676 vmovdqa64 $xa0,@key[0]
2677 vmovdqa64 $xa1,@key[1]
2678 vmovdqa64 $xa2,@key[2]
2679 vmovdqa64 $xa3,@key[3]
2687 foreach (&AVX512_lane_ROUND(0, 4, 8,12)) { eval; }
2688 foreach (&AVX512_lane_ROUND(0, 5,10,15)) { eval; }
2693 vpaddd @key[0],$xa0,$xa0 # accumulate key
2694 vpaddd @key[1],$xa1,$xa1
2695 vpaddd @key[2],$xa2,$xa2
2696 vpaddd @key[3],$xa3,$xa3
2698 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data
2699 vpunpckldq $xa3,$xa2,$xt3
2700 vpunpckhdq $xa1,$xa0,$xa0
2701 vpunpckhdq $xa3,$xa2,$xa2
2702 vpunpcklqdq $xt3,$xt2,$xa1 # "a0"
2703 vpunpckhqdq $xt3,$xt2,$xt2 # "a1"
2704 vpunpcklqdq $xa2,$xa0,$xa3 # "a2"
2705 vpunpckhqdq $xa2,$xa0,$xa0 # "a3"
2707 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2);
2709 vpaddd @key[4],$xb0,$xb0
2710 vpaddd @key[5],$xb1,$xb1
2711 vpaddd @key[6],$xb2,$xb2
2712 vpaddd @key[7],$xb3,$xb3
2714 vpunpckldq $xb1,$xb0,$xt2
2715 vpunpckldq $xb3,$xb2,$xt3
2716 vpunpckhdq $xb1,$xb0,$xb0
2717 vpunpckhdq $xb3,$xb2,$xb2
2718 vpunpcklqdq $xt3,$xt2,$xb1 # "b0"
2719 vpunpckhqdq $xt3,$xt2,$xt2 # "b1"
2720 vpunpcklqdq $xb2,$xb0,$xb3 # "b2"
2721 vpunpckhqdq $xb2,$xb0,$xb0 # "b3"
2723 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2);
2725 vshufi32x4 \$0x44,$xb0,$xa0,$xt3 # "de-interlace" further
2726 vshufi32x4 \$0xee,$xb0,$xa0,$xb0
2727 vshufi32x4 \$0x44,$xb1,$xa1,$xa0
2728 vshufi32x4 \$0xee,$xb1,$xa1,$xb1
2729 vshufi32x4 \$0x44,$xb2,$xa2,$xa1
2730 vshufi32x4 \$0xee,$xb2,$xa2,$xb2
2731 vshufi32x4 \$0x44,$xb3,$xa3,$xa2
2732 vshufi32x4 \$0xee,$xb3,$xa3,$xb3
2734 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3);
2736 vpaddd @key[8],$xc0,$xc0
2737 vpaddd @key[9],$xc1,$xc1
2738 vpaddd @key[10],$xc2,$xc2
2739 vpaddd @key[11],$xc3,$xc3
2741 vpunpckldq $xc1,$xc0,$xt2
2742 vpunpckldq $xc3,$xc2,$xt3
2743 vpunpckhdq $xc1,$xc0,$xc0
2744 vpunpckhdq $xc3,$xc2,$xc2
2745 vpunpcklqdq $xt3,$xt2,$xc1 # "c0"
2746 vpunpckhqdq $xt3,$xt2,$xt2 # "c1"
2747 vpunpcklqdq $xc2,$xc0,$xc3 # "c2"
2748 vpunpckhqdq $xc2,$xc0,$xc0 # "c3"
2750 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2);
2752 vpaddd @key[12],$xd0,$xd0
2753 vpaddd @key[13],$xd1,$xd1
2754 vpaddd @key[14],$xd2,$xd2
2755 vpaddd @key[15],$xd3,$xd3
2757 vpunpckldq $xd1,$xd0,$xt2
2758 vpunpckldq $xd3,$xd2,$xt3
2759 vpunpckhdq $xd1,$xd0,$xd0
2760 vpunpckhdq $xd3,$xd2,$xd2
2761 vpunpcklqdq $xt3,$xt2,$xd1 # "d0"
2762 vpunpckhqdq $xt3,$xt2,$xt2 # "d1"
2763 vpunpcklqdq $xd2,$xd0,$xd3 # "d2"
2764 vpunpckhqdq $xd2,$xd0,$xd0 # "d3"
2766 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2);
2768 vshufi32x4 \$0x44,$xd0,$xc0,$xt3 # "de-interlace" further
2769 vshufi32x4 \$0xee,$xd0,$xc0,$xd0
2770 vshufi32x4 \$0x44,$xd1,$xc1,$xc0
2771 vshufi32x4 \$0xee,$xd1,$xc1,$xd1
2772 vshufi32x4 \$0x44,$xd2,$xc2,$xc1
2773 vshufi32x4 \$0xee,$xd2,$xc2,$xd2
2774 vshufi32x4 \$0x44,$xd3,$xc3,$xc2
2775 vshufi32x4 \$0xee,$xd3,$xc3,$xd3
2777 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3);
2779 vshufi32x4 \$0x88,$xc0,$xa0,$xt0 # "de-interlace" further
2780 vshufi32x4 \$0xdd,$xc0,$xa0,$xa0
2781 vshufi32x4 \$0x88,$xd0,$xb0,$xc0
2782 vshufi32x4 \$0xdd,$xd0,$xb0,$xd0
2783 vshufi32x4 \$0x88,$xc1,$xa1,$xt1
2784 vshufi32x4 \$0xdd,$xc1,$xa1,$xa1
2785 vshufi32x4 \$0x88,$xd1,$xb1,$xc1
2786 vshufi32x4 \$0xdd,$xd1,$xb1,$xd1
2787 vshufi32x4 \$0x88,$xc2,$xa2,$xt2
2788 vshufi32x4 \$0xdd,$xc2,$xa2,$xa2
2789 vshufi32x4 \$0x88,$xd2,$xb2,$xc2
2790 vshufi32x4 \$0xdd,$xd2,$xb2,$xd2
2791 vshufi32x4 \$0x88,$xc3,$xa3,$xt3
2792 vshufi32x4 \$0xdd,$xc3,$xa3,$xa3
2793 vshufi32x4 \$0x88,$xd3,$xb3,$xc3
2794 vshufi32x4 \$0xdd,$xd3,$xb3,$xd3
2796 ($xa0,$xa1,$xa2,$xa3,$xb0,$xb1,$xb2,$xb3)=
2797 ($xt0,$xt1,$xt2,$xt3,$xa0,$xa1,$xa2,$xa3);
2799 ($xa0,$xb0,$xc0,$xd0, $xa1,$xb1,$xc1,$xd1,
2800 $xa2,$xb2,$xc2,$xd2, $xa3,$xb3,$xc3,$xd3) =
2801 ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
2802 $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3);
2807 vpxord 0x00($inp),$xa0,$xa0 # xor with input
2808 vpxord 0x40($inp),$xb0,$xb0
2809 vpxord 0x80($inp),$xc0,$xc0
2810 vpxord 0xc0($inp),$xd0,$xd0
2811 vmovdqu32 $xa0,0x00($out)
2812 vmovdqu32 $xb0,0x40($out)
2813 vmovdqu32 $xc0,0x80($out)
2814 vmovdqu32 $xd0,0xc0($out)
2816 vpxord 0x100($inp),$xa1,$xa1
2817 vpxord 0x140($inp),$xb1,$xb1
2818 vpxord 0x180($inp),$xc1,$xc1
2819 vpxord 0x1c0($inp),$xd1,$xd1
2820 vmovdqu32 $xa1,0x100($out)
2821 vmovdqu32 $xb1,0x140($out)
2822 vmovdqu32 $xc1,0x180($out)
2823 vmovdqu32 $xd1,0x1c0($out)
2825 vpxord 0x200($inp),$xa2,$xa2
2826 vpxord 0x240($inp),$xb2,$xb2
2827 vpxord 0x280($inp),$xc2,$xc2
2828 vpxord 0x2c0($inp),$xd2,$xd2
2829 vmovdqu32 $xa2,0x200($out)
2830 vmovdqu32 $xb2,0x240($out)
2831 vmovdqu32 $xc2,0x280($out)
2832 vmovdqu32 $xd2,0x2c0($out)
2834 vpxord 0x300($inp),$xa3,$xa3
2835 vpxord 0x340($inp),$xb3,$xb3
2836 vpxord 0x380($inp),$xc3,$xc3
2837 vpxord 0x3c0($inp),$xd3,$xd3
2838 lea 0x400($inp),$inp
2839 vmovdqu32 $xa3,0x300($out)
2840 vmovdqu32 $xb3,0x340($out)
2841 vmovdqu32 $xc3,0x380($out)
2842 vmovdqu32 $xd3,0x3c0($out)
2843 lea 0x400($out),$out
2855 jb .Less_than_64_16x
2856 vpxord ($inp),$xa0,$xa0 # xor with input
2857 vmovdqu32 $xa0,($out,$inp)
2863 jb .Less_than_64_16x
2864 vpxord ($inp),$xb0,$xb0
2865 vmovdqu32 $xb0,($out,$inp)
2871 jb .Less_than_64_16x
2872 vpxord ($inp),$xc0,$xc0
2873 vmovdqu32 $xc0,($out,$inp)
2879 jb .Less_than_64_16x
2880 vpxord ($inp),$xd0,$xd0
2881 vmovdqu32 $xd0,($out,$inp)
2887 jb .Less_than_64_16x
2888 vpxord ($inp),$xa1,$xa1
2889 vmovdqu32 $xa1,($out,$inp)
2895 jb .Less_than_64_16x
2896 vpxord ($inp),$xb1,$xb1
2897 vmovdqu32 $xb1,($out,$inp)
2903 jb .Less_than_64_16x
2904 vpxord ($inp),$xc1,$xc1
2905 vmovdqu32 $xc1,($out,$inp)
2911 jb .Less_than_64_16x
2912 vpxord ($inp),$xd1,$xd1
2913 vmovdqu32 $xd1,($out,$inp)
2919 jb .Less_than_64_16x
2920 vpxord ($inp),$xa2,$xa2
2921 vmovdqu32 $xa2,($out,$inp)
2927 jb .Less_than_64_16x
2928 vpxord ($inp),$xb2,$xb2
2929 vmovdqu32 $xb2,($out,$inp)
2935 jb .Less_than_64_16x
2936 vpxord ($inp),$xc2,$xc2
2937 vmovdqu32 $xc2,($out,$inp)
2943 jb .Less_than_64_16x
2944 vpxord ($inp),$xd2,$xd2
2945 vmovdqu32 $xd2,($out,$inp)
2951 jb .Less_than_64_16x
2952 vpxord ($inp),$xa3,$xa3
2953 vmovdqu32 $xa3,($out,$inp)
2959 jb .Less_than_64_16x
2960 vpxord ($inp),$xb3,$xb3
2961 vmovdqu32 $xb3,($out,$inp)
2967 jb .Less_than_64_16x
2968 vpxord ($inp),$xc3,$xc3
2969 vmovdqu32 $xc3,($out,$inp)
2975 vmovdqa32 $xa0,0x00(%rsp)
2976 lea ($out,$inp),$out
2980 movzb ($inp,%r10),%eax
2981 movzb (%rsp,%r10),%ecx
2984 mov %al,-1($out,%r10)
2988 vpxord $xa0,$xa0,$xa0
2989 vmovdqa32 $xa0,0(%rsp)
2994 $code.=<<___ if ($win64);
2995 movaps -0xa8(%r9),%xmm6
2996 movaps -0x98(%r9),%xmm7
2997 movaps -0x88(%r9),%xmm8
2998 movaps -0x78(%r9),%xmm9
2999 movaps -0x68(%r9),%xmm10
3000 movaps -0x58(%r9),%xmm11
3001 movaps -0x48(%r9),%xmm12
3002 movaps -0x38(%r9),%xmm13
3003 movaps -0x28(%r9),%xmm14
3004 movaps -0x18(%r9),%xmm15
3008 .cfi_def_cfa_register %rsp
3012 .size ChaCha20_16x,.-ChaCha20_16x
3016 # EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
3017 # CONTEXT *context,DISPATCHER_CONTEXT *disp)
3025 .extern __imp_RtlVirtualUnwind
3026 .type se_handler,\@abi-omnipotent
3040 mov 120($context),%rax # pull context->Rax
3041 mov 248($context),%rbx # pull context->Rip
3043 mov 8($disp),%rsi # disp->ImageBase
3044 mov 56($disp),%r11 # disp->HandlerData
3046 lea .Lctr32_body(%rip),%r10
3047 cmp %r10,%rbx # context->Rip<.Lprologue
3048 jb .Lcommon_seh_tail
3050 mov 152($context),%rax # pull context->Rsp
3052 lea .Lno_data(%rip),%r10 # epilogue label
3053 cmp %r10,%rbx # context->Rip>=.Lepilogue
3054 jae .Lcommon_seh_tail
3056 lea 64+24+48(%rax),%rax
3064 mov %rbx,144($context) # restore context->Rbx
3065 mov %rbp,160($context) # restore context->Rbp
3066 mov %r12,216($context) # restore context->R12
3067 mov %r13,224($context) # restore context->R13
3068 mov %r14,232($context) # restore context->R14
3069 mov %r15,240($context) # restore context->R14
3074 mov %rax,152($context) # restore context->Rsp
3075 mov %rsi,168($context) # restore context->Rsi
3076 mov %rdi,176($context) # restore context->Rdi
3078 mov 40($disp),%rdi # disp->ContextRecord
3079 mov $context,%rsi # context
3080 mov \$154,%ecx # sizeof(CONTEXT)
3081 .long 0xa548f3fc # cld; rep movsq
3084 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
3085 mov 8(%rsi),%rdx # arg2, disp->ImageBase
3086 mov 0(%rsi),%r8 # arg3, disp->ControlPc
3087 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
3088 mov 40(%rsi),%r10 # disp->ContextRecord
3089 lea 56(%rsi),%r11 # &disp->HandlerData
3090 lea 24(%rsi),%r12 # &disp->EstablisherFrame
3091 mov %r10,32(%rsp) # arg5
3092 mov %r11,40(%rsp) # arg6
3093 mov %r12,48(%rsp) # arg7
3094 mov %rcx,56(%rsp) # arg8, (NULL)
3095 call *__imp_RtlVirtualUnwind(%rip)
3097 mov \$1,%eax # ExceptionContinueSearch
3109 .size se_handler,.-se_handler
3111 .type ssse3_handler,\@abi-omnipotent
3125 mov 120($context),%rax # pull context->Rax
3126 mov 248($context),%rbx # pull context->Rip
3128 mov 8($disp),%rsi # disp->ImageBase
3129 mov 56($disp),%r11 # disp->HandlerData
3131 mov 0(%r11),%r10d # HandlerData[0]
3132 lea (%rsi,%r10),%r10 # prologue label
3133 cmp %r10,%rbx # context->Rip<prologue label
3134 jb .Lcommon_seh_tail
3136 mov 192($context),%rax # pull context->R9
3138 mov 4(%r11),%r10d # HandlerData[1]
3139 lea (%rsi,%r10),%r10 # epilogue label
3140 cmp %r10,%rbx # context->Rip>=epilogue label
3141 jae .Lcommon_seh_tail
3143 lea -0x28(%rax),%rsi
3144 lea 512($context),%rdi # &context.Xmm6
3146 .long 0xa548f3fc # cld; rep movsq
3148 jmp .Lcommon_seh_tail
3149 .size ssse3_handler,.-ssse3_handler
3151 .type full_handler,\@abi-omnipotent
3165 mov 120($context),%rax # pull context->Rax
3166 mov 248($context),%rbx # pull context->Rip
3168 mov 8($disp),%rsi # disp->ImageBase
3169 mov 56($disp),%r11 # disp->HandlerData
3171 mov 0(%r11),%r10d # HandlerData[0]
3172 lea (%rsi,%r10),%r10 # prologue label
3173 cmp %r10,%rbx # context->Rip<prologue label
3174 jb .Lcommon_seh_tail
3176 mov 192($context),%rax # pull context->R9
3178 mov 4(%r11),%r10d # HandlerData[1]
3179 lea (%rsi,%r10),%r10 # epilogue label
3180 cmp %r10,%rbx # context->Rip>=epilogue label
3181 jae .Lcommon_seh_tail
3183 lea -0xa8(%rax),%rsi
3184 lea 512($context),%rdi # &context.Xmm6
3186 .long 0xa548f3fc # cld; rep movsq
3188 jmp .Lcommon_seh_tail
3189 .size full_handler,.-full_handler
3193 .rva .LSEH_begin_ChaCha20_ctr32
3194 .rva .LSEH_end_ChaCha20_ctr32
3195 .rva .LSEH_info_ChaCha20_ctr32
3197 .rva .LSEH_begin_ChaCha20_ssse3
3198 .rva .LSEH_end_ChaCha20_ssse3
3199 .rva .LSEH_info_ChaCha20_ssse3
3201 .rva .LSEH_begin_ChaCha20_4x
3202 .rva .LSEH_end_ChaCha20_4x
3203 .rva .LSEH_info_ChaCha20_4x
3205 $code.=<<___ if ($avx);
3206 .rva .LSEH_begin_ChaCha20_4xop
3207 .rva .LSEH_end_ChaCha20_4xop
3208 .rva .LSEH_info_ChaCha20_4xop
3210 $code.=<<___ if ($avx>1);
3211 .rva .LSEH_begin_ChaCha20_8x
3212 .rva .LSEH_end_ChaCha20_8x
3213 .rva .LSEH_info_ChaCha20_8x
3215 $code.=<<___ if ($avx>2);
3216 .rva .LSEH_begin_ChaCha20_avx512
3217 .rva .LSEH_end_ChaCha20_avx512
3218 .rva .LSEH_info_ChaCha20_avx512
3220 .rva .LSEH_begin_ChaCha20_16x
3221 .rva .LSEH_end_ChaCha20_16x
3222 .rva .LSEH_info_ChaCha20_16x
3227 .LSEH_info_ChaCha20_ctr32:
3231 .LSEH_info_ChaCha20_ssse3:
3234 .rva .Lssse3_body,.Lssse3_epilogue
3236 .LSEH_info_ChaCha20_4x:
3239 .rva .L4x_body,.L4x_epilogue
3241 $code.=<<___ if ($avx);
3242 .LSEH_info_ChaCha20_4xop:
3245 .rva .L4xop_body,.L4xop_epilogue # HandlerData[]
3247 $code.=<<___ if ($avx>1);
3248 .LSEH_info_ChaCha20_8x:
3251 .rva .L8x_body,.L8x_epilogue # HandlerData[]
3253 $code.=<<___ if ($avx>2);
3254 .LSEH_info_ChaCha20_avx512:
3257 .rva .Lavx512_body,.Lavx512_epilogue # HandlerData[]
3259 .LSEH_info_ChaCha20_16x:
3262 .rva .L16x_body,.L16x_epilogue # HandlerData[]
3266 foreach (split("\n",$code)) {
3267 s/\`([^\`]*)\`/eval $1/ge;
3269 s/%x#%[yz]/%x/g; # "down-shift"