2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # ChaCha20 for PowerPC/AltiVec.
21 # Performance in cycles per byte out of large buffer.
23 # IALU/gcc-4.x 3xAltiVec+1xIALU
25 # Freescale e300 13.6/+115% -
26 # PPC74x0/G4e 6.81/+310% 4.66
27 # PPC970/G5 9.29/+160% 4.60
28 # POWER7 8.62/+61% 4.27
29 # POWER8 8.70/+51% 3.96
33 if ($flavour =~ /64/) {
40 } elsif ($flavour =~ /32/) {
47 } else { die "nonsense $flavour"; }
49 $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
51 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
52 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
53 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
54 die "can't locate ppc-xlate.pl";
56 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
59 $FRAME=$LOCALS+64+18*$SIZE_T; # 64 is for local variables
61 sub AUTOLOAD() # thunk [simplified] x86-style perlasm
62 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
63 $code .= "\t$opcode\t".join(',',@_)."\n";
68 my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
70 my @x=map("r$_",(16..31));
71 my @d=map("r$_",(11,12,14,15));
72 my @t=map("r$_",(7..10));
75 my ($a0,$b0,$c0,$d0)=@_;
76 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
77 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
78 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
81 "&add (@x[$a0],@x[$a0],@x[$b0])",
82 "&add (@x[$a1],@x[$a1],@x[$b1])",
83 "&add (@x[$a2],@x[$a2],@x[$b2])",
84 "&add (@x[$a3],@x[$a3],@x[$b3])",
85 "&xor (@x[$d0],@x[$d0],@x[$a0])",
86 "&xor (@x[$d1],@x[$d1],@x[$a1])",
87 "&xor (@x[$d2],@x[$d2],@x[$a2])",
88 "&xor (@x[$d3],@x[$d3],@x[$a3])",
89 "&rotlwi (@x[$d0],@x[$d0],16)",
90 "&rotlwi (@x[$d1],@x[$d1],16)",
91 "&rotlwi (@x[$d2],@x[$d2],16)",
92 "&rotlwi (@x[$d3],@x[$d3],16)",
94 "&add (@x[$c0],@x[$c0],@x[$d0])",
95 "&add (@x[$c1],@x[$c1],@x[$d1])",
96 "&add (@x[$c2],@x[$c2],@x[$d2])",
97 "&add (@x[$c3],@x[$c3],@x[$d3])",
98 "&xor (@x[$b0],@x[$b0],@x[$c0])",
99 "&xor (@x[$b1],@x[$b1],@x[$c1])",
100 "&xor (@x[$b2],@x[$b2],@x[$c2])",
101 "&xor (@x[$b3],@x[$b3],@x[$c3])",
102 "&rotlwi (@x[$b0],@x[$b0],12)",
103 "&rotlwi (@x[$b1],@x[$b1],12)",
104 "&rotlwi (@x[$b2],@x[$b2],12)",
105 "&rotlwi (@x[$b3],@x[$b3],12)",
107 "&add (@x[$a0],@x[$a0],@x[$b0])",
108 "&add (@x[$a1],@x[$a1],@x[$b1])",
109 "&add (@x[$a2],@x[$a2],@x[$b2])",
110 "&add (@x[$a3],@x[$a3],@x[$b3])",
111 "&xor (@x[$d0],@x[$d0],@x[$a0])",
112 "&xor (@x[$d1],@x[$d1],@x[$a1])",
113 "&xor (@x[$d2],@x[$d2],@x[$a2])",
114 "&xor (@x[$d3],@x[$d3],@x[$a3])",
115 "&rotlwi (@x[$d0],@x[$d0],8)",
116 "&rotlwi (@x[$d1],@x[$d1],8)",
117 "&rotlwi (@x[$d2],@x[$d2],8)",
118 "&rotlwi (@x[$d3],@x[$d3],8)",
120 "&add (@x[$c0],@x[$c0],@x[$d0])",
121 "&add (@x[$c1],@x[$c1],@x[$d1])",
122 "&add (@x[$c2],@x[$c2],@x[$d2])",
123 "&add (@x[$c3],@x[$c3],@x[$d3])",
124 "&xor (@x[$b0],@x[$b0],@x[$c0])",
125 "&xor (@x[$b1],@x[$b1],@x[$c1])",
126 "&xor (@x[$b2],@x[$b2],@x[$c2])",
127 "&xor (@x[$b3],@x[$b3],@x[$c3])",
128 "&rotlwi (@x[$b0],@x[$b0],7)",
129 "&rotlwi (@x[$b1],@x[$b1],7)",
130 "&rotlwi (@x[$b2],@x[$b2],7)",
131 "&rotlwi (@x[$b3],@x[$b3],7)"
139 .globl .ChaCha20_ctr32_int
142 __ChaCha20_ctr32_int:
146 $STU $sp,-$FRAME($sp)
149 $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
150 $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
151 $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
152 $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
153 $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
154 $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
155 $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
156 $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
157 $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
158 $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
159 $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
160 $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
161 $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
162 $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
163 $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
164 $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
165 $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
166 $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
167 $PUSH r0,`$FRAME+$LRSAVE`($sp)
169 lwz @d[0],0($ctr) # load counter
176 $POP r0,`$FRAME+$LRSAVE`($sp)
177 $POP r14,`$FRAME-$SIZE_T*18`($sp)
178 $POP r15,`$FRAME-$SIZE_T*17`($sp)
179 $POP r16,`$FRAME-$SIZE_T*16`($sp)
180 $POP r17,`$FRAME-$SIZE_T*15`($sp)
181 $POP r18,`$FRAME-$SIZE_T*14`($sp)
182 $POP r19,`$FRAME-$SIZE_T*13`($sp)
183 $POP r20,`$FRAME-$SIZE_T*12`($sp)
184 $POP r21,`$FRAME-$SIZE_T*11`($sp)
185 $POP r22,`$FRAME-$SIZE_T*10`($sp)
186 $POP r23,`$FRAME-$SIZE_T*9`($sp)
187 $POP r24,`$FRAME-$SIZE_T*8`($sp)
188 $POP r25,`$FRAME-$SIZE_T*7`($sp)
189 $POP r26,`$FRAME-$SIZE_T*6`($sp)
190 $POP r27,`$FRAME-$SIZE_T*5`($sp)
191 $POP r28,`$FRAME-$SIZE_T*4`($sp)
192 $POP r29,`$FRAME-$SIZE_T*3`($sp)
193 $POP r30,`$FRAME-$SIZE_T*2`($sp)
194 $POP r31,`$FRAME-$SIZE_T*1`($sp)
199 .byte 0,12,4,1,0x80,18,5,0
201 .size .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
206 lis @x[0],0x6170 # synthesize sigma
210 ori @x[0],@x[0],0x7865
211 ori @x[1],@x[1],0x646e
212 ori @x[2],@x[2],0x2d32
213 ori @x[3],@x[3],0x6574
215 li r0,10 # inner loop counter
216 lwz @x[4],0($key) # load key
221 mr @x[12],@d[0] # copy counter
237 foreach (&ROUND(0, 4, 8,12)) { eval; }
238 foreach (&ROUND(0, 5,10,15)) { eval; }
242 subic $len,$len,64 # $len-=64
243 addi @x[0],@x[0],0x7865 # accumulate key block
244 addi @x[1],@x[1],0x646e
245 addi @x[2],@x[2],0x2d32
246 addi @x[3],@x[3],0x6574
247 addis @x[0],@x[0],0x6170
248 addis @x[1],@x[1],0x3320
249 addis @x[2],@x[2],0x7962
250 addis @x[3],@x[3],0x6b20
252 subfe. r0,r0,r0 # borrow?-1:0
253 add @x[4],@x[4],@t[0]
255 add @x[5],@x[5],@t[1]
257 add @x[6],@x[6],@t[2]
259 add @x[7],@x[7],@t[3]
261 add @x[8],@x[8],@t[0]
262 add @x[9],@x[9],@t[1]
263 add @x[10],@x[10],@t[2]
264 add @x[11],@x[11],@t[3]
266 add @x[12],@x[12],@d[0]
267 add @x[13],@x[13],@d[1]
268 add @x[14],@x[14],@d[2]
269 add @x[15],@x[15],@d[3]
270 addi @d[0],@d[0],1 # increment counter
272 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
275 rotlwi @x[$i],@x[$i],8
276 rlwimi @x[$i],@t[$i&3],24,0,7
277 rlwimi @x[$i],@t[$i&3],24,16,23
281 bne Ltail # $len-=64 borrowed
283 lwz @t[0],0($inp) # load input, aligned or not
285 ${UCMP}i $len,0 # done already?
288 xor @x[0],@x[0],@t[0] # xor with input
290 xor @x[1],@x[1],@t[1]
292 xor @x[2],@x[2],@t[2]
294 xor @x[3],@x[3],@t[3]
296 xor @x[4],@x[4],@t[0]
298 xor @x[5],@x[5],@t[1]
300 xor @x[6],@x[6],@t[2]
302 xor @x[7],@x[7],@t[3]
304 xor @x[8],@x[8],@t[0]
306 xor @x[9],@x[9],@t[1]
308 xor @x[10],@x[10],@t[2]
310 xor @x[11],@x[11],@t[3]
312 xor @x[12],@x[12],@t[0]
313 stw @x[0],0($out) # store output, aligned or not
314 xor @x[13],@x[13],@t[1]
316 xor @x[14],@x[14],@t[2]
318 xor @x[15],@x[15],@t[3]
341 addi $len,$len,64 # restore tail length
342 subi $inp,$inp,1 # prepare for *++ptr
344 addi @t[0],$sp,$LOCALS-1
347 stw @x[0],`$LOCALS+0`($sp) # save whole block to stack
348 stw @x[1],`$LOCALS+4`($sp)
349 stw @x[2],`$LOCALS+8`($sp)
350 stw @x[3],`$LOCALS+12`($sp)
351 stw @x[4],`$LOCALS+16`($sp)
352 stw @x[5],`$LOCALS+20`($sp)
353 stw @x[6],`$LOCALS+24`($sp)
354 stw @x[7],`$LOCALS+28`($sp)
355 stw @x[8],`$LOCALS+32`($sp)
356 stw @x[9],`$LOCALS+36`($sp)
357 stw @x[10],`$LOCALS+40`($sp)
358 stw @x[11],`$LOCALS+44`($sp)
359 stw @x[12],`$LOCALS+48`($sp)
360 stw @x[13],`$LOCALS+52`($sp)
361 stw @x[14],`$LOCALS+56`($sp)
362 stw @x[15],`$LOCALS+60`($sp)
364 Loop_tail: # byte-by-byte loop
367 xor @d[1],@d[0],@x[0]
371 stw $sp,`$LOCALS+0`($sp) # wipe block on stack
372 stw $sp,`$LOCALS+4`($sp)
373 stw $sp,`$LOCALS+8`($sp)
374 stw $sp,`$LOCALS+12`($sp)
375 stw $sp,`$LOCALS+16`($sp)
376 stw $sp,`$LOCALS+20`($sp)
377 stw $sp,`$LOCALS+24`($sp)
378 stw $sp,`$LOCALS+28`($sp)
379 stw $sp,`$LOCALS+32`($sp)
380 stw $sp,`$LOCALS+36`($sp)
381 stw $sp,`$LOCALS+40`($sp)
382 stw $sp,`$LOCALS+44`($sp)
383 stw $sp,`$LOCALS+48`($sp)
384 stw $sp,`$LOCALS+52`($sp)
385 stw $sp,`$LOCALS+56`($sp)
386 stw $sp,`$LOCALS+60`($sp)
390 .byte 0,12,0x14,0,0,0,0,0
394 my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2) =
396 my (@K)=map("v$_",(15..20));
397 my ($FOUR,$sixteen,$twenty4,$twenty,$twelve,$twenty5,$seven) =
399 my ($inpperm,$outperm,$outmask) = map("v$_",(28..30));
400 my @D=("v31",$seven,$T0,$T1,$T2);
402 my $FRAME=$LOCALS+64+13*16+18*$SIZE_T; # 13*16 is for v20-v31 offload
406 my ($a,$b,$c,$d,$t)=@_;
409 "&vadduwm ('$a','$a','$b')",
410 "&vxor ('$d','$d','$a')",
411 "&vperm ('$d','$d','$d','$sixteen')",
413 "&vadduwm ('$c','$c','$d')",
414 "&vxor ('$t','$b','$c')",
415 "&vsrw ('$b','$t','$twenty')",
416 "&vslw ('$t','$t','$twelve')",
417 "&vor ('$b','$b','$t')",
419 "&vadduwm ('$a','$a','$b')",
420 "&vxor ('$d','$d','$a')",
421 "&vperm ('$d','$d','$d','$twenty4')",
423 "&vadduwm ('$c','$c','$d')",
424 "&vxor ('$t','$b','$c')",
425 "&vsrw ('$b','$t','$twenty5')",
426 "&vslw ('$t','$t','$seven')",
427 "&vor ('$b','$b','$t')",
429 "&vsldoi ('$c','$c','$c',8)",
430 "&vsldoi ('$b','$b','$b',$odd?4:12)",
431 "&vsldoi ('$d','$d','$d',$odd?12:4)"
437 .globl .ChaCha20_ctr32_vmx
441 blt __ChaCha20_ctr32_int
443 $STU $sp,-$FRAME($sp)
445 li r10,`15+$LOCALS+64`
446 li r11,`31+$LOCALS+64`
470 stw r12,`$FRAME-$SIZE_T*18-4`($sp) # save vrsave
471 $PUSH r14,`$FRAME-$SIZE_T*18`($sp)
472 $PUSH r15,`$FRAME-$SIZE_T*17`($sp)
473 $PUSH r16,`$FRAME-$SIZE_T*16`($sp)
474 $PUSH r17,`$FRAME-$SIZE_T*15`($sp)
475 $PUSH r18,`$FRAME-$SIZE_T*14`($sp)
476 $PUSH r19,`$FRAME-$SIZE_T*13`($sp)
477 $PUSH r20,`$FRAME-$SIZE_T*12`($sp)
478 $PUSH r21,`$FRAME-$SIZE_T*11`($sp)
479 $PUSH r22,`$FRAME-$SIZE_T*10`($sp)
480 $PUSH r23,`$FRAME-$SIZE_T*9`($sp)
481 $PUSH r24,`$FRAME-$SIZE_T*8`($sp)
482 $PUSH r25,`$FRAME-$SIZE_T*7`($sp)
483 $PUSH r26,`$FRAME-$SIZE_T*6`($sp)
484 $PUSH r27,`$FRAME-$SIZE_T*5`($sp)
485 $PUSH r28,`$FRAME-$SIZE_T*4`($sp)
486 $PUSH r29,`$FRAME-$SIZE_T*3`($sp)
487 $PUSH r30,`$FRAME-$SIZE_T*2`($sp)
488 $PUSH r31,`$FRAME-$SIZE_T*1`($sp)
490 $PUSH r0, `$FRAME+$LRSAVE`($sp)
491 mtspr 256,r12 # preserve all AltiVec registers
493 bl Lconsts # returns pointer Lsigma in r12
498 li @x[4],31 # 31 is not a typo
499 li @x[5],15 # nor is 15
501 lvx @K[1],0,$key # load key
502 ?lvsr $T0,0,$key # prepare unaligned load
506 lvx @K[3],0,$ctr # load counter
507 ?lvsr $T1,0,$ctr # prepare unaligned load
510 lvx @K[0],0,r12 # load constants
511 lvx @K[5],@x[0],r12 # one
513 lvx $sixteen,@x[2],r12
514 lvx $twenty4,@x[3],r12
516 ?vperm @K[1],@K[2],@K[1],$T0 # align key
517 ?vperm @K[2],@D[0],@K[2],$T0
518 ?vperm @K[3],@D[1],@K[3],$T1 # align counter
520 lwz @d[0],0($ctr) # load counter to GPR
522 vadduwm @K[3],@K[3],@K[5] # adjust AltiVec counter
524 vadduwm @K[4],@K[3],@K[5]
526 vadduwm @K[5],@K[4],@K[5]
528 vspltisw $twenty,-12 # synthesize constants
531 #vspltisw $seven,7 # synthesized in the loop
533 vxor $T0,$T0,$T0 # 0x00..00
534 vspltisw $outmask,-1 # 0xff..ff
535 ?lvsr $inpperm,0,$inp # prepare for unaligned load
536 ?lvsl $outperm,0,$out # prepare for unaligned store
537 ?vperm $outmask,$outmask,$T0,$outperm
539 be?lvsl $T0,0,@x[0] # 0x00..0f
540 be?vspltisb $T1,3 # 0x03..03
541 be?vxor $T0,$T0,$T1 # swap bytes within words
542 be?vxor $outperm,$outperm,$T1
543 be?vperm $inpperm,$inpperm,$inpperm,$T0
549 lis @x[0],0x6170 # synthesize sigma
555 ori @x[0],@x[0],0x7865
556 ori @x[1],@x[1],0x646e
558 ori @x[2],@x[2],0x2d32
559 ori @x[3],@x[3],0x6574
562 li r0,10 # inner loop counter
563 lwz @x[4],0($key) # load key to GPR
573 mr @x[12],@d[0] # copy GPR counter
594 my @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,0);
595 my @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,0);
596 my @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,0);
597 my @thread3=&ROUND(0,4,8,12);
600 eval; eval(shift(@thread3));
601 eval(shift(@thread1)); eval(shift(@thread3));
602 eval(shift(@thread2)); eval(shift(@thread3));
605 @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,1);
606 @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,1);
607 @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,1);
608 @thread3=&ROUND(0,5,10,15);
611 eval; eval(shift(@thread3));
612 eval(shift(@thread1)); eval(shift(@thread3));
613 eval(shift(@thread2)); eval(shift(@thread3));
618 subi $len,$len,256 # $len-=256
619 addi @x[0],@x[0],0x7865 # accumulate key block
620 addi @x[1],@x[1],0x646e
621 addi @x[2],@x[2],0x2d32
622 addi @x[3],@x[3],0x6574
623 addis @x[0],@x[0],0x6170
624 addis @x[1],@x[1],0x3320
625 addis @x[2],@x[2],0x7962
626 addis @x[3],@x[3],0x6b20
627 add @x[4],@x[4],@t[0]
629 add @x[5],@x[5],@t[1]
631 add @x[6],@x[6],@t[2]
633 add @x[7],@x[7],@t[3]
635 add @x[8],@x[8],@t[0]
636 add @x[9],@x[9],@t[1]
637 add @x[10],@x[10],@t[2]
638 add @x[11],@x[11],@t[3]
639 add @x[12],@x[12],@d[0]
640 add @x[13],@x[13],@d[1]
641 add @x[14],@x[14],@d[2]
642 add @x[15],@x[15],@d[3]
644 vadduwm $A0,$A0,@K[0] # accumulate key block
645 vadduwm $A1,$A1,@K[0]
646 vadduwm $A2,$A2,@K[0]
647 vadduwm $B0,$B0,@K[1]
648 vadduwm $B1,$B1,@K[1]
649 vadduwm $B2,$B2,@K[1]
650 vadduwm $C0,$C0,@K[2]
651 vadduwm $C1,$C1,@K[2]
652 vadduwm $C2,$C2,@K[2]
653 vadduwm $D0,$D0,@K[3]
654 vadduwm $D1,$D1,@K[4]
655 vadduwm $D2,$D2,@K[5]
657 addi @d[0],@d[0],4 # increment counter
658 vadduwm @K[3],@K[3],$FOUR
659 vadduwm @K[4],@K[4],$FOUR
660 vadduwm @K[5],@K[5],$FOUR
663 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) { # flip byte order
666 rotlwi @x[$i],@x[$i],8
667 rlwimi @x[$i],@t[$i&3],24,0,7
668 rlwimi @x[$i],@t[$i&3],24,16,23
672 lwz @t[0],0($inp) # load input, aligned or not
676 xor @x[0],@x[0],@t[0] # xor with input
678 xor @x[1],@x[1],@t[1]
680 xor @x[2],@x[2],@t[2]
682 xor @x[3],@x[3],@t[3]
684 xor @x[4],@x[4],@t[0]
686 xor @x[5],@x[5],@t[1]
688 xor @x[6],@x[6],@t[2]
690 xor @x[7],@x[7],@t[3]
692 xor @x[8],@x[8],@t[0]
694 xor @x[9],@x[9],@t[1]
696 xor @x[10],@x[10],@t[2]
698 xor @x[11],@x[11],@t[3]
700 xor @x[12],@x[12],@t[0]
701 stw @x[0],0($out) # store output, aligned or not
702 xor @x[13],@x[13],@t[1]
704 xor @x[14],@x[14],@t[2]
706 xor @x[15],@x[15],@t[3]
727 lvx @D[0],0,$inp # load input
734 ?vperm @D[0],@D[1],@D[0],$inpperm # align input
735 ?vperm @D[1],@D[2],@D[1],$inpperm
736 ?vperm @D[2],@D[3],@D[2],$inpperm
737 ?vperm @D[3],@D[4],@D[3],$inpperm
738 vxor $A0,$A0,@D[0] # xor with input
740 lvx @D[1],@t[0],$inp # keep loading input
747 li @t[3],63 # 63 is not a typo
748 vperm $A0,$A0,$A0,$outperm # pre-misalign output
749 vperm $B0,$B0,$B0,$outperm
750 vperm $C0,$C0,$C0,$outperm
751 vperm $D0,$D0,$D0,$outperm
753 ?vperm @D[4],@D[1],@D[4],$inpperm # align input
754 ?vperm @D[1],@D[2],@D[1],$inpperm
755 ?vperm @D[2],@D[3],@D[2],$inpperm
756 ?vperm @D[3],@D[0],@D[3],$inpperm
759 lvx @D[1],@t[0],$inp # keep loading input
764 lvx @D[4],@t[3],$inp # redundant in aligned case
766 vperm $A1,$A1,$A1,$outperm # pre-misalign output
767 vperm $B1,$B1,$B1,$outperm
768 vperm $C1,$C1,$C1,$outperm
769 vperm $D1,$D1,$D1,$outperm
771 ?vperm @D[0],@D[1],@D[0],$inpperm # align input
772 ?vperm @D[1],@D[2],@D[1],$inpperm
773 ?vperm @D[2],@D[3],@D[2],$inpperm
774 ?vperm @D[3],@D[4],@D[3],$inpperm
779 vperm $A2,$A2,$A2,$outperm # pre-misalign output
780 vperm $B2,$B2,$B2,$outperm
781 vperm $C2,$C2,$C2,$outperm
782 vperm $D2,$D2,$D2,$outperm
784 andi. @x[1],$out,15 # is $out aligned?
787 vsel @D[0],$A0,$B0,$outmask # collect pre-misaligned output
788 vsel @D[1],$B0,$C0,$outmask
789 vsel @D[2],$C0,$D0,$outmask
790 vsel @D[3],$D0,$A1,$outmask
791 vsel $B0,$A1,$B1,$outmask
792 vsel $C0,$B1,$C1,$outmask
793 vsel $D0,$C1,$D1,$outmask
794 vsel $A1,$D1,$A2,$outmask
795 vsel $B1,$A2,$B2,$outmask
796 vsel $C1,$B2,$C2,$outmask
797 vsel $D1,$C2,$D2,$outmask
799 #stvx $A0,0,$out # take it easy on the edges
800 stvx @D[0],@t[0],$out # store output
801 stvx @D[1],@t[1],$out
802 stvx @D[2],@t[2],$out
817 sub @x[2],$out,@x[1] # in misaligned case edges
818 li @x[3],0 # are written byte-by-byte
820 stvebx $D2,@x[3],@x[2]
823 bne Lunaligned_tail_vmx
825 sub @x[2],@x[0],@x[1]
827 stvebx $A0,@x[1],@x[2]
830 bne Lunaligned_head_vmx
832 ${UCMP}i $len,255 # done with 256-byte blocks yet?
839 stvx $A0,0,@x[0] # head hexaword was not stored
841 ${UCMP}i $len,255 # done with 256-byte blocks yet?
846 ${UCMP}i $len,0 # done yet?
849 lwz r12,`$FRAME-$SIZE_T*18-4`($sp) # pull vrsave
850 li r10,`15+$LOCALS+64`
851 li r11,`31+$LOCALS+64`
852 mtspr 256,r12 # restore vrsave
875 $POP r0, `$FRAME+$LRSAVE`($sp)
876 $POP r14,`$FRAME-$SIZE_T*18`($sp)
877 $POP r15,`$FRAME-$SIZE_T*17`($sp)
878 $POP r16,`$FRAME-$SIZE_T*16`($sp)
879 $POP r17,`$FRAME-$SIZE_T*15`($sp)
880 $POP r18,`$FRAME-$SIZE_T*14`($sp)
881 $POP r19,`$FRAME-$SIZE_T*13`($sp)
882 $POP r20,`$FRAME-$SIZE_T*12`($sp)
883 $POP r21,`$FRAME-$SIZE_T*11`($sp)
884 $POP r22,`$FRAME-$SIZE_T*10`($sp)
885 $POP r23,`$FRAME-$SIZE_T*9`($sp)
886 $POP r24,`$FRAME-$SIZE_T*8`($sp)
887 $POP r25,`$FRAME-$SIZE_T*7`($sp)
888 $POP r26,`$FRAME-$SIZE_T*6`($sp)
889 $POP r27,`$FRAME-$SIZE_T*5`($sp)
890 $POP r28,`$FRAME-$SIZE_T*4`($sp)
891 $POP r29,`$FRAME-$SIZE_T*3`($sp)
892 $POP r30,`$FRAME-$SIZE_T*2`($sp)
893 $POP r31,`$FRAME-$SIZE_T*1`($sp)
898 .byte 0,12,0x04,1,0x80,18,5,0
900 .size .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
906 mflr r12 #vvvvv "distance between . and _vpaes_consts
911 .byte 0,12,0x14,0,0,0,0,0
914 .long 0x61707865,0x3320646e,0x79622d32,0x6b206574
918 $code.=<<___ if ($LITTLE_ENDIAN);
919 .long 0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
920 .long 0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
922 $code.=<<___ if (!$LITTLE_ENDIAN); # flipped words
923 .long 0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
924 .long 0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
927 .asciz "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
932 foreach (split("\n",$code)) {
933 s/\`([^\`]*)\`/eval $1/ge;
935 # instructions prefixed with '?' are endian-specific and need
936 # to be adjusted accordingly...
937 if ($flavour !~ /le$/) { # big-endian
942 s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
943 s/(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/;
944 } else { # little-endian