Avoid fragile aliasing of SHA224/384 update/final
[oweals/openssl.git] / crypto / chacha / asm / chacha-ppc.pl
1 #! /usr/bin/env perl
2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the OpenSSL license (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # October 2015
18 #
19 # ChaCha20 for PowerPC/AltiVec.
20 #
21 # Performance in cycles per byte out of large buffer.
22 #
23 #                       IALU/gcc-4.x    3xAltiVec+1xIALU
24 #
25 # Freescale e300        13.6/+115%      -
26 # PPC74x0/G4e           6.81/+310%      4.66
27 # PPC970/G5             9.29/+160%      4.60
28 # POWER7                8.62/+61%       4.27
29 # POWER8                8.70/+51%       3.96
30
31 $flavour = shift;
32
33 if ($flavour =~ /64/) {
34         $SIZE_T =8;
35         $LRSAVE =2*$SIZE_T;
36         $STU    ="stdu";
37         $POP    ="ld";
38         $PUSH   ="std";
39         $UCMP   ="cmpld";
40 } elsif ($flavour =~ /32/) {
41         $SIZE_T =4;
42         $LRSAVE =$SIZE_T;
43         $STU    ="stwu";
44         $POP    ="lwz";
45         $PUSH   ="stw";
46         $UCMP   ="cmplw";
47 } else { die "nonsense $flavour"; }
48
49 $LITTLE_ENDIAN = ($flavour=~/le$/) ? 1 : 0;
50
51 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
52 ( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
53 ( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
54 die "can't locate ppc-xlate.pl";
55
56 open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
57
58 $LOCALS=6*$SIZE_T;
59 $FRAME=$LOCALS+64+18*$SIZE_T;   # 64 is for local variables
60
61 sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
62 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
63     $code .= "\t$opcode\t".join(',',@_)."\n";
64 }
65
66 my $sp = "r1";
67
68 my ($out,$inp,$len,$key,$ctr) = map("r$_",(3..7));
69
70 my @x=map("r$_",(16..31));
71 my @d=map("r$_",(11,12,14,15));
72 my @t=map("r$_",(7..10));
73
74 sub ROUND {
75 my ($a0,$b0,$c0,$d0)=@_;
76 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
77 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
78 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
79
80     (
81         "&add           (@x[$a0],@x[$a0],@x[$b0])",
82          "&add          (@x[$a1],@x[$a1],@x[$b1])",
83           "&add         (@x[$a2],@x[$a2],@x[$b2])",
84            "&add        (@x[$a3],@x[$a3],@x[$b3])",
85         "&xor           (@x[$d0],@x[$d0],@x[$a0])",
86          "&xor          (@x[$d1],@x[$d1],@x[$a1])",
87           "&xor         (@x[$d2],@x[$d2],@x[$a2])",
88            "&xor        (@x[$d3],@x[$d3],@x[$a3])",
89         "&rotlwi        (@x[$d0],@x[$d0],16)",
90          "&rotlwi       (@x[$d1],@x[$d1],16)",
91           "&rotlwi      (@x[$d2],@x[$d2],16)",
92            "&rotlwi     (@x[$d3],@x[$d3],16)",
93
94         "&add           (@x[$c0],@x[$c0],@x[$d0])",
95          "&add          (@x[$c1],@x[$c1],@x[$d1])",
96           "&add         (@x[$c2],@x[$c2],@x[$d2])",
97            "&add        (@x[$c3],@x[$c3],@x[$d3])",
98         "&xor           (@x[$b0],@x[$b0],@x[$c0])",
99          "&xor          (@x[$b1],@x[$b1],@x[$c1])",
100           "&xor         (@x[$b2],@x[$b2],@x[$c2])",
101            "&xor        (@x[$b3],@x[$b3],@x[$c3])",
102         "&rotlwi        (@x[$b0],@x[$b0],12)",
103          "&rotlwi       (@x[$b1],@x[$b1],12)",
104           "&rotlwi      (@x[$b2],@x[$b2],12)",
105            "&rotlwi     (@x[$b3],@x[$b3],12)",
106
107         "&add           (@x[$a0],@x[$a0],@x[$b0])",
108          "&add          (@x[$a1],@x[$a1],@x[$b1])",
109           "&add         (@x[$a2],@x[$a2],@x[$b2])",
110            "&add        (@x[$a3],@x[$a3],@x[$b3])",
111         "&xor           (@x[$d0],@x[$d0],@x[$a0])",
112          "&xor          (@x[$d1],@x[$d1],@x[$a1])",
113           "&xor         (@x[$d2],@x[$d2],@x[$a2])",
114            "&xor        (@x[$d3],@x[$d3],@x[$a3])",
115         "&rotlwi        (@x[$d0],@x[$d0],8)",
116          "&rotlwi       (@x[$d1],@x[$d1],8)",
117           "&rotlwi      (@x[$d2],@x[$d2],8)",
118            "&rotlwi     (@x[$d3],@x[$d3],8)",
119
120         "&add           (@x[$c0],@x[$c0],@x[$d0])",
121          "&add          (@x[$c1],@x[$c1],@x[$d1])",
122           "&add         (@x[$c2],@x[$c2],@x[$d2])",
123            "&add        (@x[$c3],@x[$c3],@x[$d3])",
124         "&xor           (@x[$b0],@x[$b0],@x[$c0])",
125          "&xor          (@x[$b1],@x[$b1],@x[$c1])",
126           "&xor         (@x[$b2],@x[$b2],@x[$c2])",
127            "&xor        (@x[$b3],@x[$b3],@x[$c3])",
128         "&rotlwi        (@x[$b0],@x[$b0],7)",
129          "&rotlwi       (@x[$b1],@x[$b1],7)",
130           "&rotlwi      (@x[$b2],@x[$b2],7)",
131            "&rotlwi     (@x[$b3],@x[$b3],7)"
132     );
133 }
134
135 $code.=<<___;
136 .machine        "any"
137 .text
138
139 .globl  .ChaCha20_ctr32_int
140 .align  5
141 .ChaCha20_ctr32_int:
142 __ChaCha20_ctr32_int:
143         ${UCMP}i $len,0
144         beqlr-
145
146         $STU    $sp,-$FRAME($sp)
147         mflr    r0
148
149         $PUSH   r14,`$FRAME-$SIZE_T*18`($sp)
150         $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
151         $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
152         $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
153         $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
154         $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
155         $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
156         $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
157         $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
158         $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
159         $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
160         $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
161         $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
162         $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
163         $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
164         $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
165         $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
166         $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
167         $PUSH   r0,`$FRAME+$LRSAVE`($sp)
168
169         lwz     @d[0],0($ctr)                   # load counter
170         lwz     @d[1],4($ctr)
171         lwz     @d[2],8($ctr)
172         lwz     @d[3],12($ctr)
173
174         bl      __ChaCha20_1x
175
176         $POP    r0,`$FRAME+$LRSAVE`($sp)
177         $POP    r14,`$FRAME-$SIZE_T*18`($sp)
178         $POP    r15,`$FRAME-$SIZE_T*17`($sp)
179         $POP    r16,`$FRAME-$SIZE_T*16`($sp)
180         $POP    r17,`$FRAME-$SIZE_T*15`($sp)
181         $POP    r18,`$FRAME-$SIZE_T*14`($sp)
182         $POP    r19,`$FRAME-$SIZE_T*13`($sp)
183         $POP    r20,`$FRAME-$SIZE_T*12`($sp)
184         $POP    r21,`$FRAME-$SIZE_T*11`($sp)
185         $POP    r22,`$FRAME-$SIZE_T*10`($sp)
186         $POP    r23,`$FRAME-$SIZE_T*9`($sp)
187         $POP    r24,`$FRAME-$SIZE_T*8`($sp)
188         $POP    r25,`$FRAME-$SIZE_T*7`($sp)
189         $POP    r26,`$FRAME-$SIZE_T*6`($sp)
190         $POP    r27,`$FRAME-$SIZE_T*5`($sp)
191         $POP    r28,`$FRAME-$SIZE_T*4`($sp)
192         $POP    r29,`$FRAME-$SIZE_T*3`($sp)
193         $POP    r30,`$FRAME-$SIZE_T*2`($sp)
194         $POP    r31,`$FRAME-$SIZE_T*1`($sp)
195         mtlr    r0
196         addi    $sp,$sp,$FRAME
197         blr
198         .long   0
199         .byte   0,12,4,1,0x80,18,5,0
200         .long   0
201 .size   .ChaCha20_ctr32_int,.-.ChaCha20_ctr32_int
202
203 .align  5
204 __ChaCha20_1x:
205 Loop_outer:
206         lis     @x[0],0x6170                    # synthesize sigma
207         lis     @x[1],0x3320
208         lis     @x[2],0x7962
209         lis     @x[3],0x6b20
210         ori     @x[0],@x[0],0x7865
211         ori     @x[1],@x[1],0x646e
212         ori     @x[2],@x[2],0x2d32
213         ori     @x[3],@x[3],0x6574
214
215         li      r0,10                           # inner loop counter
216         lwz     @x[4],0($key)                   # load key
217         lwz     @x[5],4($key)
218         lwz     @x[6],8($key)
219         lwz     @x[7],12($key)
220         lwz     @x[8],16($key)
221         mr      @x[12],@d[0]                    # copy counter
222         lwz     @x[9],20($key)
223         mr      @x[13],@d[1]
224         lwz     @x[10],24($key)
225         mr      @x[14],@d[2]
226         lwz     @x[11],28($key)
227         mr      @x[15],@d[3]
228
229         mr      @t[0],@x[4]
230         mr      @t[1],@x[5]
231         mr      @t[2],@x[6]
232         mr      @t[3],@x[7]
233
234         mtctr   r0
235 Loop:
236 ___
237         foreach (&ROUND(0, 4, 8,12)) { eval; }
238         foreach (&ROUND(0, 5,10,15)) { eval; }
239 $code.=<<___;
240         bdnz    Loop
241
242         subic   $len,$len,64                    # $len-=64
243         addi    @x[0],@x[0],0x7865              # accumulate key block
244         addi    @x[1],@x[1],0x646e
245         addi    @x[2],@x[2],0x2d32
246         addi    @x[3],@x[3],0x6574
247         addis   @x[0],@x[0],0x6170
248         addis   @x[1],@x[1],0x3320
249         addis   @x[2],@x[2],0x7962
250         addis   @x[3],@x[3],0x6b20
251
252         subfe.  r0,r0,r0                        # borrow?-1:0
253         add     @x[4],@x[4],@t[0]
254         lwz     @t[0],16($key)
255         add     @x[5],@x[5],@t[1]
256         lwz     @t[1],20($key)
257         add     @x[6],@x[6],@t[2]
258         lwz     @t[2],24($key)
259         add     @x[7],@x[7],@t[3]
260         lwz     @t[3],28($key)
261         add     @x[8],@x[8],@t[0]
262         add     @x[9],@x[9],@t[1]
263         add     @x[10],@x[10],@t[2]
264         add     @x[11],@x[11],@t[3]
265
266         add     @x[12],@x[12],@d[0]
267         add     @x[13],@x[13],@d[1]
268         add     @x[14],@x[14],@d[2]
269         add     @x[15],@x[15],@d[3]
270         addi    @d[0],@d[0],1                   # increment counter
271 ___
272 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) {   # flip byte order
273 $code.=<<___;
274         mr      @t[$i&3],@x[$i]
275         rotlwi  @x[$i],@x[$i],8
276         rlwimi  @x[$i],@t[$i&3],24,0,7
277         rlwimi  @x[$i],@t[$i&3],24,16,23
278 ___
279 } }
280 $code.=<<___;
281         bne     Ltail                           # $len-=64 borrowed
282
283         lwz     @t[0],0($inp)                   # load input, aligned or not
284         lwz     @t[1],4($inp)
285         ${UCMP}i $len,0                         # done already?
286         lwz     @t[2],8($inp)
287         lwz     @t[3],12($inp)
288         xor     @x[0],@x[0],@t[0]               # xor with input
289         lwz     @t[0],16($inp)
290         xor     @x[1],@x[1],@t[1]
291         lwz     @t[1],20($inp)
292         xor     @x[2],@x[2],@t[2]
293         lwz     @t[2],24($inp)
294         xor     @x[3],@x[3],@t[3]
295         lwz     @t[3],28($inp)
296         xor     @x[4],@x[4],@t[0]
297         lwz     @t[0],32($inp)
298         xor     @x[5],@x[5],@t[1]
299         lwz     @t[1],36($inp)
300         xor     @x[6],@x[6],@t[2]
301         lwz     @t[2],40($inp)
302         xor     @x[7],@x[7],@t[3]
303         lwz     @t[3],44($inp)
304         xor     @x[8],@x[8],@t[0]
305         lwz     @t[0],48($inp)
306         xor     @x[9],@x[9],@t[1]
307         lwz     @t[1],52($inp)
308         xor     @x[10],@x[10],@t[2]
309         lwz     @t[2],56($inp)
310         xor     @x[11],@x[11],@t[3]
311         lwz     @t[3],60($inp)
312         xor     @x[12],@x[12],@t[0]
313         stw     @x[0],0($out)                   # store output, aligned or not
314         xor     @x[13],@x[13],@t[1]
315         stw     @x[1],4($out)
316         xor     @x[14],@x[14],@t[2]
317         stw     @x[2],8($out)
318         xor     @x[15],@x[15],@t[3]
319         stw     @x[3],12($out)
320         stw     @x[4],16($out)
321         stw     @x[5],20($out)
322         stw     @x[6],24($out)
323         stw     @x[7],28($out)
324         stw     @x[8],32($out)
325         stw     @x[9],36($out)
326         stw     @x[10],40($out)
327         stw     @x[11],44($out)
328         stw     @x[12],48($out)
329         stw     @x[13],52($out)
330         stw     @x[14],56($out)
331         addi    $inp,$inp,64
332         stw     @x[15],60($out)
333         addi    $out,$out,64
334
335         bne     Loop_outer
336
337         blr
338
339 .align  4
340 Ltail:
341         addi    $len,$len,64                    # restore tail length
342         subi    $inp,$inp,1                     # prepare for *++ptr
343         subi    $out,$out,1
344         addi    @t[0],$sp,$LOCALS-1
345         mtctr   $len
346
347         stw     @x[0],`$LOCALS+0`($sp)          # save whole block to stack
348         stw     @x[1],`$LOCALS+4`($sp)
349         stw     @x[2],`$LOCALS+8`($sp)
350         stw     @x[3],`$LOCALS+12`($sp)
351         stw     @x[4],`$LOCALS+16`($sp)
352         stw     @x[5],`$LOCALS+20`($sp)
353         stw     @x[6],`$LOCALS+24`($sp)
354         stw     @x[7],`$LOCALS+28`($sp)
355         stw     @x[8],`$LOCALS+32`($sp)
356         stw     @x[9],`$LOCALS+36`($sp)
357         stw     @x[10],`$LOCALS+40`($sp)
358         stw     @x[11],`$LOCALS+44`($sp)
359         stw     @x[12],`$LOCALS+48`($sp)
360         stw     @x[13],`$LOCALS+52`($sp)
361         stw     @x[14],`$LOCALS+56`($sp)
362         stw     @x[15],`$LOCALS+60`($sp)
363
364 Loop_tail:                                      # byte-by-byte loop
365         lbzu    @d[0],1($inp)
366         lbzu    @x[0],1(@t[0])
367         xor     @d[1],@d[0],@x[0]
368         stbu    @d[1],1($out)
369         bdnz    Loop_tail
370
371         stw     $sp,`$LOCALS+0`($sp)            # wipe block on stack
372         stw     $sp,`$LOCALS+4`($sp)
373         stw     $sp,`$LOCALS+8`($sp)
374         stw     $sp,`$LOCALS+12`($sp)
375         stw     $sp,`$LOCALS+16`($sp)
376         stw     $sp,`$LOCALS+20`($sp)
377         stw     $sp,`$LOCALS+24`($sp)
378         stw     $sp,`$LOCALS+28`($sp)
379         stw     $sp,`$LOCALS+32`($sp)
380         stw     $sp,`$LOCALS+36`($sp)
381         stw     $sp,`$LOCALS+40`($sp)
382         stw     $sp,`$LOCALS+44`($sp)
383         stw     $sp,`$LOCALS+48`($sp)
384         stw     $sp,`$LOCALS+52`($sp)
385         stw     $sp,`$LOCALS+56`($sp)
386         stw     $sp,`$LOCALS+60`($sp)
387
388         blr
389         .long   0
390         .byte   0,12,0x14,0,0,0,0,0
391 ___
392
393 {{{
394 my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,$T0,$T1,$T2) =
395     map("v$_",(0..14));
396 my (@K)=map("v$_",(15..20));
397 my ($FOUR,$sixteen,$twenty4,$twenty,$twelve,$twenty5,$seven) =
398     map("v$_",(21..27));
399 my ($inpperm,$outperm,$outmask) = map("v$_",(28..30));
400 my @D=("v31",$seven,$T0,$T1,$T2);
401
402 my $FRAME=$LOCALS+64+13*16+18*$SIZE_T;  # 13*16 is for v20-v31 offload
403
404 sub VMXROUND {
405 my $odd = pop;
406 my ($a,$b,$c,$d,$t)=@_;
407
408         (
409         "&vadduwm       ('$a','$a','$b')",
410         "&vxor          ('$d','$d','$a')",
411         "&vperm         ('$d','$d','$d','$sixteen')",
412
413         "&vadduwm       ('$c','$c','$d')",
414         "&vxor          ('$t','$b','$c')",
415         "&vsrw          ('$b','$t','$twenty')",
416         "&vslw          ('$t','$t','$twelve')",
417         "&vor           ('$b','$b','$t')",
418
419         "&vadduwm       ('$a','$a','$b')",
420         "&vxor          ('$d','$d','$a')",
421         "&vperm         ('$d','$d','$d','$twenty4')",
422
423         "&vadduwm       ('$c','$c','$d')",
424         "&vxor          ('$t','$b','$c')",
425         "&vsrw          ('$b','$t','$twenty5')",
426         "&vslw          ('$t','$t','$seven')",
427         "&vor           ('$b','$b','$t')",
428
429         "&vsldoi        ('$c','$c','$c',8)",
430         "&vsldoi        ('$b','$b','$b',$odd?4:12)",
431         "&vsldoi        ('$d','$d','$d',$odd?12:4)"
432         );
433 }
434
435 $code.=<<___;
436
437 .globl  .ChaCha20_ctr32_vmx
438 .align  5
439 .ChaCha20_ctr32_vmx:
440         ${UCMP}i $len,256
441         blt     __ChaCha20_ctr32_int
442
443         $STU    $sp,-$FRAME($sp)
444         mflr    r0
445         li      r10,`15+$LOCALS+64`
446         li      r11,`31+$LOCALS+64`
447         mfspr   r12,256
448         stvx    v20,r10,$sp
449         addi    r10,r10,32
450         stvx    v21,r11,$sp
451         addi    r11,r11,32
452         stvx    v22,r10,$sp
453         addi    r10,r10,32
454         stvx    v23,r11,$sp
455         addi    r11,r11,32
456         stvx    v24,r10,$sp
457         addi    r10,r10,32
458         stvx    v25,r11,$sp
459         addi    r11,r11,32
460         stvx    v26,r10,$sp
461         addi    r10,r10,32
462         stvx    v27,r11,$sp
463         addi    r11,r11,32
464         stvx    v28,r10,$sp
465         addi    r10,r10,32
466         stvx    v29,r11,$sp
467         addi    r11,r11,32
468         stvx    v30,r10,$sp
469         stvx    v31,r11,$sp
470         stw     r12,`$FRAME-$SIZE_T*18-4`($sp)  # save vrsave
471         $PUSH   r14,`$FRAME-$SIZE_T*18`($sp)
472         $PUSH   r15,`$FRAME-$SIZE_T*17`($sp)
473         $PUSH   r16,`$FRAME-$SIZE_T*16`($sp)
474         $PUSH   r17,`$FRAME-$SIZE_T*15`($sp)
475         $PUSH   r18,`$FRAME-$SIZE_T*14`($sp)
476         $PUSH   r19,`$FRAME-$SIZE_T*13`($sp)
477         $PUSH   r20,`$FRAME-$SIZE_T*12`($sp)
478         $PUSH   r21,`$FRAME-$SIZE_T*11`($sp)
479         $PUSH   r22,`$FRAME-$SIZE_T*10`($sp)
480         $PUSH   r23,`$FRAME-$SIZE_T*9`($sp)
481         $PUSH   r24,`$FRAME-$SIZE_T*8`($sp)
482         $PUSH   r25,`$FRAME-$SIZE_T*7`($sp)
483         $PUSH   r26,`$FRAME-$SIZE_T*6`($sp)
484         $PUSH   r27,`$FRAME-$SIZE_T*5`($sp)
485         $PUSH   r28,`$FRAME-$SIZE_T*4`($sp)
486         $PUSH   r29,`$FRAME-$SIZE_T*3`($sp)
487         $PUSH   r30,`$FRAME-$SIZE_T*2`($sp)
488         $PUSH   r31,`$FRAME-$SIZE_T*1`($sp)
489         li      r12,-1
490         $PUSH   r0, `$FRAME+$LRSAVE`($sp)
491         mtspr   256,r12                         # preserve all AltiVec registers
492
493         bl      Lconsts                         # returns pointer Lsigma in r12
494         li      @x[0],16
495         li      @x[1],32
496         li      @x[2],48
497         li      @x[3],64
498         li      @x[4],31                        # 31 is not a typo
499         li      @x[5],15                        # nor is 15
500
501         lvx     @K[1],0,$key                    # load key
502         ?lvsr   $T0,0,$key                      # prepare unaligned load
503         lvx     @K[2],@x[0],$key
504         lvx     @D[0],@x[4],$key
505
506         lvx     @K[3],0,$ctr                    # load counter
507         ?lvsr   $T1,0,$ctr                      # prepare unaligned load
508         lvx     @D[1],@x[5],$ctr
509
510         lvx     @K[0],0,r12                     # load constants
511         lvx     @K[5],@x[0],r12                 # one
512         lvx     $FOUR,@x[1],r12
513         lvx     $sixteen,@x[2],r12
514         lvx     $twenty4,@x[3],r12
515
516         ?vperm  @K[1],@K[2],@K[1],$T0           # align key
517         ?vperm  @K[2],@D[0],@K[2],$T0
518         ?vperm  @K[3],@D[1],@K[3],$T1           # align counter
519
520         lwz     @d[0],0($ctr)                   # load counter to GPR
521         lwz     @d[1],4($ctr)
522         vadduwm @K[3],@K[3],@K[5]               # adjust AltiVec counter
523         lwz     @d[2],8($ctr)
524         vadduwm @K[4],@K[3],@K[5]
525         lwz     @d[3],12($ctr)
526         vadduwm @K[5],@K[4],@K[5]
527
528         vspltisw $twenty,-12                    # synthesize constants
529         vspltisw $twelve,12
530         vspltisw $twenty5,-7
531         #vspltisw $seven,7                      # synthesized in the loop
532
533         vxor    $T0,$T0,$T0                     # 0x00..00
534         vspltisw $outmask,-1                    # 0xff..ff
535         ?lvsr   $inpperm,0,$inp                 # prepare for unaligned load
536         ?lvsl   $outperm,0,$out                 # prepare for unaligned store
537         ?vperm  $outmask,$outmask,$T0,$outperm
538
539         be?lvsl $T0,0,@x[0]                     # 0x00..0f
540         be?vspltisb $T1,3                       # 0x03..03
541         be?vxor $T0,$T0,$T1                     # swap bytes within words
542         be?vxor $outperm,$outperm,$T1
543         be?vperm $inpperm,$inpperm,$inpperm,$T0
544
545         b       Loop_outer_vmx
546
547 .align  4
548 Loop_outer_vmx:
549         lis     @x[0],0x6170                    # synthesize sigma
550         lis     @x[1],0x3320
551          vmr    $A0,@K[0]
552         lis     @x[2],0x7962
553         lis     @x[3],0x6b20
554          vmr    $A1,@K[0]
555         ori     @x[0],@x[0],0x7865
556         ori     @x[1],@x[1],0x646e
557          vmr    $A2,@K[0]
558         ori     @x[2],@x[2],0x2d32
559         ori     @x[3],@x[3],0x6574
560          vmr    $B0,@K[1]
561
562         li      r0,10                           # inner loop counter
563         lwz     @x[4],0($key)                   # load key to GPR
564          vmr    $B1,@K[1]
565         lwz     @x[5],4($key)
566          vmr    $B2,@K[1]
567         lwz     @x[6],8($key)
568          vmr    $C0,@K[2]
569         lwz     @x[7],12($key)
570          vmr    $C1,@K[2]
571         lwz     @x[8],16($key)
572          vmr    $C2,@K[2]
573         mr      @x[12],@d[0]                    # copy GPR counter
574         lwz     @x[9],20($key)
575          vmr    $D0,@K[3]
576         mr      @x[13],@d[1]
577         lwz     @x[10],24($key)
578          vmr    $D1,@K[4]
579         mr      @x[14],@d[2]
580         lwz     @x[11],28($key)
581          vmr    $D2,@K[5]
582         mr      @x[15],@d[3]
583
584         mr      @t[0],@x[4]
585         mr      @t[1],@x[5]
586         mr      @t[2],@x[6]
587         mr      @t[3],@x[7]
588         vspltisw $seven,7
589
590         mtctr   r0
591         nop
592 Loop_vmx:
593 ___
594         my @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,0);
595         my @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,0);
596         my @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,0);
597         my @thread3=&ROUND(0,4,8,12);
598
599         foreach (@thread0) {
600                 eval;                   eval(shift(@thread3));
601                 eval(shift(@thread1));  eval(shift(@thread3));
602                 eval(shift(@thread2));  eval(shift(@thread3));
603         }
604
605         @thread0=&VMXROUND($A0,$B0,$C0,$D0,$T0,1);
606         @thread1=&VMXROUND($A1,$B1,$C1,$D1,$T1,1);
607         @thread2=&VMXROUND($A2,$B2,$C2,$D2,$T2,1);
608         @thread3=&ROUND(0,5,10,15);
609
610         foreach (@thread0) {
611                 eval;                   eval(shift(@thread3));
612                 eval(shift(@thread1));  eval(shift(@thread3));
613                 eval(shift(@thread2));  eval(shift(@thread3));
614         }
615 $code.=<<___;
616         bdnz    Loop_vmx
617
618         subi    $len,$len,256                   # $len-=256
619         addi    @x[0],@x[0],0x7865              # accumulate key block
620         addi    @x[1],@x[1],0x646e
621         addi    @x[2],@x[2],0x2d32
622         addi    @x[3],@x[3],0x6574
623         addis   @x[0],@x[0],0x6170
624         addis   @x[1],@x[1],0x3320
625         addis   @x[2],@x[2],0x7962
626         addis   @x[3],@x[3],0x6b20
627         add     @x[4],@x[4],@t[0]
628         lwz     @t[0],16($key)
629         add     @x[5],@x[5],@t[1]
630         lwz     @t[1],20($key)
631         add     @x[6],@x[6],@t[2]
632         lwz     @t[2],24($key)
633         add     @x[7],@x[7],@t[3]
634         lwz     @t[3],28($key)
635         add     @x[8],@x[8],@t[0]
636         add     @x[9],@x[9],@t[1]
637         add     @x[10],@x[10],@t[2]
638         add     @x[11],@x[11],@t[3]
639         add     @x[12],@x[12],@d[0]
640         add     @x[13],@x[13],@d[1]
641         add     @x[14],@x[14],@d[2]
642         add     @x[15],@x[15],@d[3]
643
644         vadduwm $A0,$A0,@K[0]                   # accumulate key block
645         vadduwm $A1,$A1,@K[0]
646         vadduwm $A2,$A2,@K[0]
647         vadduwm $B0,$B0,@K[1]
648         vadduwm $B1,$B1,@K[1]
649         vadduwm $B2,$B2,@K[1]
650         vadduwm $C0,$C0,@K[2]
651         vadduwm $C1,$C1,@K[2]
652         vadduwm $C2,$C2,@K[2]
653         vadduwm $D0,$D0,@K[3]
654         vadduwm $D1,$D1,@K[4]
655         vadduwm $D2,$D2,@K[5]
656
657         addi    @d[0],@d[0],4                   # increment counter
658         vadduwm @K[3],@K[3],$FOUR
659         vadduwm @K[4],@K[4],$FOUR
660         vadduwm @K[5],@K[5],$FOUR
661
662 ___
663 if (!$LITTLE_ENDIAN) { for($i=0;$i<16;$i++) {   # flip byte order
664 $code.=<<___;
665         mr      @t[$i&3],@x[$i]
666         rotlwi  @x[$i],@x[$i],8
667         rlwimi  @x[$i],@t[$i&3],24,0,7
668         rlwimi  @x[$i],@t[$i&3],24,16,23
669 ___
670 } }
671 $code.=<<___;
672         lwz     @t[0],0($inp)                   # load input, aligned or not
673         lwz     @t[1],4($inp)
674         lwz     @t[2],8($inp)
675         lwz     @t[3],12($inp)
676         xor     @x[0],@x[0],@t[0]               # xor with input
677         lwz     @t[0],16($inp)
678         xor     @x[1],@x[1],@t[1]
679         lwz     @t[1],20($inp)
680         xor     @x[2],@x[2],@t[2]
681         lwz     @t[2],24($inp)
682         xor     @x[3],@x[3],@t[3]
683         lwz     @t[3],28($inp)
684         xor     @x[4],@x[4],@t[0]
685         lwz     @t[0],32($inp)
686         xor     @x[5],@x[5],@t[1]
687         lwz     @t[1],36($inp)
688         xor     @x[6],@x[6],@t[2]
689         lwz     @t[2],40($inp)
690         xor     @x[7],@x[7],@t[3]
691         lwz     @t[3],44($inp)
692         xor     @x[8],@x[8],@t[0]
693         lwz     @t[0],48($inp)
694         xor     @x[9],@x[9],@t[1]
695         lwz     @t[1],52($inp)
696         xor     @x[10],@x[10],@t[2]
697         lwz     @t[2],56($inp)
698         xor     @x[11],@x[11],@t[3]
699         lwz     @t[3],60($inp)
700         xor     @x[12],@x[12],@t[0]
701         stw     @x[0],0($out)                   # store output, aligned or not
702         xor     @x[13],@x[13],@t[1]
703         stw     @x[1],4($out)
704         xor     @x[14],@x[14],@t[2]
705         stw     @x[2],8($out)
706         xor     @x[15],@x[15],@t[3]
707         stw     @x[3],12($out)
708         addi    $inp,$inp,64
709         stw     @x[4],16($out)
710         li      @t[0],16
711         stw     @x[5],20($out)
712         li      @t[1],32
713         stw     @x[6],24($out)
714         li      @t[2],48
715         stw     @x[7],28($out)
716         li      @t[3],64
717         stw     @x[8],32($out)
718         stw     @x[9],36($out)
719         stw     @x[10],40($out)
720         stw     @x[11],44($out)
721         stw     @x[12],48($out)
722         stw     @x[13],52($out)
723         stw     @x[14],56($out)
724         stw     @x[15],60($out)
725         addi    $out,$out,64
726
727         lvx     @D[0],0,$inp                    # load input
728         lvx     @D[1],@t[0],$inp
729         lvx     @D[2],@t[1],$inp
730         lvx     @D[3],@t[2],$inp
731         lvx     @D[4],@t[3],$inp
732         addi    $inp,$inp,64
733
734         ?vperm  @D[0],@D[1],@D[0],$inpperm      # align input
735         ?vperm  @D[1],@D[2],@D[1],$inpperm
736         ?vperm  @D[2],@D[3],@D[2],$inpperm
737         ?vperm  @D[3],@D[4],@D[3],$inpperm
738         vxor    $A0,$A0,@D[0]                   # xor with input
739         vxor    $B0,$B0,@D[1]
740         lvx     @D[1],@t[0],$inp                # keep loading input
741         vxor    $C0,$C0,@D[2]
742         lvx     @D[2],@t[1],$inp
743         vxor    $D0,$D0,@D[3]
744         lvx     @D[3],@t[2],$inp
745         lvx     @D[0],@t[3],$inp
746         addi    $inp,$inp,64
747         li      @t[3],63                        # 63 is not a typo
748         vperm   $A0,$A0,$A0,$outperm            # pre-misalign output
749         vperm   $B0,$B0,$B0,$outperm
750         vperm   $C0,$C0,$C0,$outperm
751         vperm   $D0,$D0,$D0,$outperm
752
753         ?vperm  @D[4],@D[1],@D[4],$inpperm      # align input
754         ?vperm  @D[1],@D[2],@D[1],$inpperm
755         ?vperm  @D[2],@D[3],@D[2],$inpperm
756         ?vperm  @D[3],@D[0],@D[3],$inpperm
757         vxor    $A1,$A1,@D[4]
758         vxor    $B1,$B1,@D[1]
759         lvx     @D[1],@t[0],$inp                # keep loading input
760         vxor    $C1,$C1,@D[2]
761         lvx     @D[2],@t[1],$inp
762         vxor    $D1,$D1,@D[3]
763         lvx     @D[3],@t[2],$inp
764         lvx     @D[4],@t[3],$inp                # redundant in aligned case
765         addi    $inp,$inp,64
766         vperm   $A1,$A1,$A1,$outperm            # pre-misalign output
767         vperm   $B1,$B1,$B1,$outperm
768         vperm   $C1,$C1,$C1,$outperm
769         vperm   $D1,$D1,$D1,$outperm
770
771         ?vperm  @D[0],@D[1],@D[0],$inpperm      # align input
772         ?vperm  @D[1],@D[2],@D[1],$inpperm
773         ?vperm  @D[2],@D[3],@D[2],$inpperm
774         ?vperm  @D[3],@D[4],@D[3],$inpperm
775         vxor    $A2,$A2,@D[0]
776         vxor    $B2,$B2,@D[1]
777         vxor    $C2,$C2,@D[2]
778         vxor    $D2,$D2,@D[3]
779         vperm   $A2,$A2,$A2,$outperm            # pre-misalign output
780         vperm   $B2,$B2,$B2,$outperm
781         vperm   $C2,$C2,$C2,$outperm
782         vperm   $D2,$D2,$D2,$outperm
783
784         andi.   @x[1],$out,15                   # is $out aligned?
785         mr      @x[0],$out
786
787         vsel    @D[0],$A0,$B0,$outmask          # collect pre-misaligned output
788         vsel    @D[1],$B0,$C0,$outmask
789         vsel    @D[2],$C0,$D0,$outmask
790         vsel    @D[3],$D0,$A1,$outmask
791         vsel    $B0,$A1,$B1,$outmask
792         vsel    $C0,$B1,$C1,$outmask
793         vsel    $D0,$C1,$D1,$outmask
794         vsel    $A1,$D1,$A2,$outmask
795         vsel    $B1,$A2,$B2,$outmask
796         vsel    $C1,$B2,$C2,$outmask
797         vsel    $D1,$C2,$D2,$outmask
798
799         #stvx   $A0,0,$out                      # take it easy on the edges
800         stvx    @D[0],@t[0],$out                # store output
801         stvx    @D[1],@t[1],$out
802         stvx    @D[2],@t[2],$out
803         addi    $out,$out,64
804         stvx    @D[3],0,$out
805         stvx    $B0,@t[0],$out
806         stvx    $C0,@t[1],$out
807         stvx    $D0,@t[2],$out
808         addi    $out,$out,64
809         stvx    $A1,0,$out
810         stvx    $B1,@t[0],$out
811         stvx    $C1,@t[1],$out
812         stvx    $D1,@t[2],$out
813         addi    $out,$out,64
814
815         beq     Laligned_vmx
816
817         sub     @x[2],$out,@x[1]                # in misaligned case edges
818         li      @x[3],0                         # are written byte-by-byte
819 Lunaligned_tail_vmx:
820         stvebx  $D2,@x[3],@x[2]
821         addi    @x[3],@x[3],1
822         cmpw    @x[3],@x[1]
823         bne     Lunaligned_tail_vmx
824
825         sub     @x[2],@x[0],@x[1]
826 Lunaligned_head_vmx:
827         stvebx  $A0,@x[1],@x[2]
828         cmpwi   @x[1],15
829         addi    @x[1],@x[1],1
830         bne     Lunaligned_head_vmx
831
832         ${UCMP}i $len,255                       # done with 256-byte blocks yet?
833         bgt     Loop_outer_vmx
834
835         b       Ldone_vmx
836
837 .align  4
838 Laligned_vmx:
839         stvx    $A0,0,@x[0]                     # head hexaword was not stored
840
841         ${UCMP}i $len,255                       # done with 256-byte blocks yet?
842         bgt     Loop_outer_vmx
843         nop
844
845 Ldone_vmx:
846         ${UCMP}i $len,0                         # done yet?
847         bnel    __ChaCha20_1x
848
849         lwz     r12,`$FRAME-$SIZE_T*18-4`($sp)  # pull vrsave
850         li      r10,`15+$LOCALS+64`
851         li      r11,`31+$LOCALS+64`
852         mtspr   256,r12                         # restore vrsave
853         lvx     v20,r10,$sp
854         addi    r10,r10,32
855         lvx     v21,r11,$sp
856         addi    r11,r11,32
857         lvx     v22,r10,$sp
858         addi    r10,r10,32
859         lvx     v23,r11,$sp
860         addi    r11,r11,32
861         lvx     v24,r10,$sp
862         addi    r10,r10,32
863         lvx     v25,r11,$sp
864         addi    r11,r11,32
865         lvx     v26,r10,$sp
866         addi    r10,r10,32
867         lvx     v27,r11,$sp
868         addi    r11,r11,32
869         lvx     v28,r10,$sp
870         addi    r10,r10,32
871         lvx     v29,r11,$sp
872         addi    r11,r11,32
873         lvx     v30,r10,$sp
874         lvx     v31,r11,$sp
875         $POP    r0, `$FRAME+$LRSAVE`($sp)
876         $POP    r14,`$FRAME-$SIZE_T*18`($sp)
877         $POP    r15,`$FRAME-$SIZE_T*17`($sp)
878         $POP    r16,`$FRAME-$SIZE_T*16`($sp)
879         $POP    r17,`$FRAME-$SIZE_T*15`($sp)
880         $POP    r18,`$FRAME-$SIZE_T*14`($sp)
881         $POP    r19,`$FRAME-$SIZE_T*13`($sp)
882         $POP    r20,`$FRAME-$SIZE_T*12`($sp)
883         $POP    r21,`$FRAME-$SIZE_T*11`($sp)
884         $POP    r22,`$FRAME-$SIZE_T*10`($sp)
885         $POP    r23,`$FRAME-$SIZE_T*9`($sp)
886         $POP    r24,`$FRAME-$SIZE_T*8`($sp)
887         $POP    r25,`$FRAME-$SIZE_T*7`($sp)
888         $POP    r26,`$FRAME-$SIZE_T*6`($sp)
889         $POP    r27,`$FRAME-$SIZE_T*5`($sp)
890         $POP    r28,`$FRAME-$SIZE_T*4`($sp)
891         $POP    r29,`$FRAME-$SIZE_T*3`($sp)
892         $POP    r30,`$FRAME-$SIZE_T*2`($sp)
893         $POP    r31,`$FRAME-$SIZE_T*1`($sp)
894         mtlr    r0
895         addi    $sp,$sp,$FRAME
896         blr
897         .long   0
898         .byte   0,12,0x04,1,0x80,18,5,0
899         .long   0
900 .size   .ChaCha20_ctr32_vmx,.-.ChaCha20_ctr32_vmx
901
902 .align  5
903 Lconsts:
904         mflr    r0
905         bcl     20,31,\$+4
906         mflr    r12     #vvvvv "distance between . and _vpaes_consts
907         addi    r12,r12,`64-8`
908         mtlr    r0
909         blr
910         .long   0
911         .byte   0,12,0x14,0,0,0,0,0
912         .space  `64-9*4`
913 Lsigma:
914         .long   0x61707865,0x3320646e,0x79622d32,0x6b206574
915         .long   1,0,0,0
916         .long   4,0,0,0
917 ___
918 $code.=<<___    if ($LITTLE_ENDIAN);
919         .long   0x0e0f0c0d,0x0a0b0809,0x06070405,0x02030001
920         .long   0x0d0e0f0c,0x090a0b08,0x05060704,0x01020300
921 ___
922 $code.=<<___    if (!$LITTLE_ENDIAN);   # flipped words
923         .long   0x02030001,0x06070405,0x0a0b0809,0x0e0f0c0d
924         .long   0x01020300,0x05060704,0x090a0b08,0x0d0e0f0c
925 ___
926 $code.=<<___;
927 .asciz  "ChaCha20 for PowerPC/AltiVec, CRYPTOGAMS by <appro\@openssl.org>"
928 .align  2
929 ___
930 }}}
931
932 foreach (split("\n",$code)) {
933         s/\`([^\`]*)\`/eval $1/ge;
934
935         # instructions prefixed with '?' are endian-specific and need
936         # to be adjusted accordingly...
937         if ($flavour !~ /le$/) {        # big-endian
938             s/be\?//            or
939             s/le\?/#le#/        or
940             s/\?lvsr/lvsl/      or
941             s/\?lvsl/lvsr/      or
942             s/\?(vperm\s+v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+,\s*)(v[0-9]+)/$1$3$2$4/ or
943             s/(vsldoi\s+v[0-9]+,\s*)(v[0-9]+,)\s*(v[0-9]+,\s*)([0-9]+)/$1$3$2 16-$4/;
944         } else {                        # little-endian
945             s/le\?//            or
946             s/be\?/#be#/        or
947             s/\?([a-z]+)/$1/;
948         }
949
950         print $_,"\n";
951 }
952
953 close STDOUT;