ARM assembly pack: add ChaCha20 and Poly1305 modules.
[oweals/openssl.git] / crypto / chacha / asm / chacha-armv4.pl
1 #!/usr/bin/env perl
2 #
3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
9 #
10 # December 2014
11
12 # ChaCha20 for ARMv4.
13 #
14 # Performance in cycles per byte out of large buffer.
15 #
16 #                       IALU/gcc-4.4    1xNEON      3xNEON+1xIALU
17 #
18 # Cortex-A5             19.3(*)/+95%    21.8        14.1
19 # Cortex-A8             10.5(*)/+160%   13.9        6.35
20 # Cortex-A9             12.9(**)/+110%  14.3        6.50
21 # Cortex-A15            11.0/+40%       16.0        5.00
22 # Snapdragon S4         11.5/+125%      13.6        4.90
23 #
24 # (*)   most "favourable" result for aligned data on little-endian
25 #       processor, result for misaligned data is 10-15% lower;
26 # (**)  this result is a trade-off: it can be improved by 20%,
27 #       but then Snapdragon S4 and Cortex-A8 results get
28 #       20-25% worse;
29
30 $flavour = shift;
31 if ($flavour=~/^\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
32 else { while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} }
33
34 if ($flavour && $flavour ne "void") {
35     $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
36     ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
37     ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
38     die "can't locate arm-xlate.pl";
39
40     open STDOUT,"| \"$^X\" $xlate $flavour $output";
41 } else {
42     open STDOUT,">$output";
43 }
44
45 sub AUTOLOAD()          # thunk [simplified] x86-style perlasm
46 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
47   my $arg = pop;
48     $arg = "#$arg" if ($arg*1 eq $arg);
49     $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
50 }
51
52 my @x=map("r$_",(0..7,"x","x","x","x",12,"x",14,"x"));
53 my @t=map("r$_",(8..11));
54
55 sub ROUND {
56 my ($a0,$b0,$c0,$d0)=@_;
57 my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
58 my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
59 my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
60 my $odd = $d0&1;
61 my ($xc,$xc_) = (@t[0..1]);
62 my ($xd,$xd_) = $odd ? (@t[2],@x[$d1]) : (@x[$d0],@t[2]);
63 my @ret;
64
65         # Consider order in which variables are addressed by their
66         # index:
67         #
68         #       a   b   c   d
69         #
70         #       0   4   8  12 < even round
71         #       1   5   9  13
72         #       2   6  10  14
73         #       3   7  11  15
74         #       0   5  10  15 < odd round
75         #       1   6  11  12
76         #       2   7   8  13
77         #       3   4   9  14
78         #
79         # 'a', 'b' are permanently allocated in registers, @x[0..7],
80         # while 'c's and pair of 'd's are maintained in memory. If
81         # you observe 'c' column, you'll notice that pair of 'c's is
82         # invariant between rounds. This means that we have to reload
83         # them once per round, in the middle. This is why you'll see
84         # bunch of 'c' stores and loads in the middle, but none in
85         # the beginning or end. If you observe 'd' column, you'll
86         # notice that 15 and 13 are reused in next pair of rounds.
87         # This is why these two are chosen for offloading to memory,
88         # to make loads count more.
89                                                         push @ret,(
90         "&add   (@x[$a0],@x[$a0],@x[$b0])",
91         "&mov   ($xd,$xd,'ror#16')",
92          "&add  (@x[$a1],@x[$a1],@x[$b1])",
93          "&mov  ($xd_,$xd_,'ror#16')",
94         "&eor   ($xd,$xd,@x[$a0],'ror#16')",
95          "&eor  ($xd_,$xd_,@x[$a1],'ror#16')",
96
97         "&add   ($xc,$xc,$xd)",
98         "&mov   (@x[$b0],@x[$b0],'ror#20')",
99          "&add  ($xc_,$xc_,$xd_)",
100          "&mov  (@x[$b1],@x[$b1],'ror#20')",
101         "&eor   (@x[$b0],@x[$b0],$xc,'ror#20')",
102          "&eor  (@x[$b1],@x[$b1],$xc_,'ror#20')",
103
104         "&add   (@x[$a0],@x[$a0],@x[$b0])",
105         "&mov   ($xd,$xd,'ror#24')",
106          "&add  (@x[$a1],@x[$a1],@x[$b1])",
107          "&mov  ($xd_,$xd_,'ror#24')",
108         "&eor   ($xd,$xd,@x[$a0],'ror#24')",
109          "&eor  ($xd_,$xd_,@x[$a1],'ror#24')",
110
111         "&add   ($xc,$xc,$xd)",
112         "&mov   (@x[$b0],@x[$b0],'ror#25')"             );
113                                                         push @ret,(
114         "&str   ($xd,'[sp,#4*(16+$d0)]')",
115         "&ldr   ($xd,'[sp,#4*(16+$d2)]')"               ) if ($odd);
116                                                         push @ret,(
117          "&add  ($xc_,$xc_,$xd_)",
118          "&mov  (@x[$b1],@x[$b1],'ror#25')"             );
119                                                         push @ret,(
120          "&str  ($xd_,'[sp,#4*(16+$d1)]')",
121          "&ldr  ($xd_,'[sp,#4*(16+$d3)]')"              ) if (!$odd);
122                                                         push @ret,(
123         "&eor   (@x[$b0],@x[$b0],$xc,'ror#25')",
124          "&eor  (@x[$b1],@x[$b1],$xc_,'ror#25')"        );
125
126         $xd=@x[$d2]                                     if (!$odd);
127         $xd_=@x[$d3]                                    if ($odd);
128                                                         push @ret,(
129         "&str   ($xc,'[sp,#4*(16+$c0)]')",
130         "&ldr   ($xc,'[sp,#4*(16+$c2)]')",
131         "&add   (@x[$a2],@x[$a2],@x[$b2])",
132         "&mov   ($xd,$xd,'ror#16')",
133          "&str  ($xc_,'[sp,#4*(16+$c1)]')",
134          "&ldr  ($xc_,'[sp,#4*(16+$c3)]')",
135          "&add  (@x[$a3],@x[$a3],@x[$b3])",
136          "&mov  ($xd_,$xd_,'ror#16')",
137         "&eor   ($xd,$xd,@x[$a2],'ror#16')",
138          "&eor  ($xd_,$xd_,@x[$a3],'ror#16')",
139
140         "&add   ($xc,$xc,$xd)",
141         "&mov   (@x[$b2],@x[$b2],'ror#20')",
142          "&add  ($xc_,$xc_,$xd_)",
143          "&mov  (@x[$b3],@x[$b3],'ror#20')",
144         "&eor   (@x[$b2],@x[$b2],$xc,'ror#20')",
145          "&eor  (@x[$b3],@x[$b3],$xc_,'ror#20')",
146
147         "&add   (@x[$a2],@x[$a2],@x[$b2])",
148         "&mov   ($xd,$xd,'ror#24')",
149          "&add  (@x[$a3],@x[$a3],@x[$b3])",
150          "&mov  ($xd_,$xd_,'ror#24')",
151         "&eor   ($xd,$xd,@x[$a2],'ror#24')",
152          "&eor  ($xd_,$xd_,@x[$a3],'ror#24')",
153
154         "&add   ($xc,$xc,$xd)",
155         "&mov   (@x[$b2],@x[$b2],'ror#25')",
156          "&add  ($xc_,$xc_,$xd_)",
157          "&mov  (@x[$b3],@x[$b3],'ror#25')",
158         "&eor   (@x[$b2],@x[$b2],$xc,'ror#25')",
159          "&eor  (@x[$b3],@x[$b3],$xc_,'ror#25')"        );
160
161         @ret;
162 }
163
164 $code.=<<___;
165 #include "arm_arch.h"
166
167 .text
168 #if defined(__thumb2__)
169 .syntax unified
170 .thumb
171 #else
172 .code   32
173 #endif
174
175 #if defined(__thumb2__) || defined(__clang__)
176 #define ldrhsb  ldrbhs
177 #endif
178
179 .align  5
180 .Lsigma:
181 .long   0x61707865,0x3320646e,0x79622d32,0x6b206574     @ endian-neutral
182 .Lone:
183 .long   1,0,0,0
184 #if __ARM_MAX_ARCH__>=7
185 .LOPENSSL_armcap:
186 .word   OPENSSL_armcap_P-.LChaCha20_ctr32
187 #else
188 .word   -1
189 #endif
190
191 .globl  ChaCha20_ctr32
192 .type   ChaCha20_ctr32,%function
193 .align  5
194 ChaCha20_ctr32:
195 .LChaCha20_ctr32:
196         ldr     r12,[sp,#0]             @ pull pointer to counter and nonce
197         stmdb   sp!,{r0-r2,r4-r11,lr}
198 #if __ARM_ARCH__<7 && !defined(__thumb2__)
199         sub     r14,pc,#16              @ ChaCha20_ctr32
200 #else
201         adr     r14,.LChaCha20_ctr32
202 #endif
203 #if __ARM_MAX_ARCH__>=7
204         cmp     r2,#192                 @ test len
205         bls     .Lshort
206         ldr     r4,[r14,#-32]
207         ldr     r4,[r14,r4]
208 # ifdef __APPLE__
209         ldr     r4,[r4]
210 # endif
211         tst     r4,#1
212         bne     .LChaCha20_neon
213 .Lshort:
214 #endif
215         ldmia   r12,{r4-r7}             @ load counter and nonce
216         sub     sp,sp,#4*(16)           @ off-load area
217         sub     r14,r14,#64             @ .Lsigma
218         stmdb   sp!,{r4-r7}             @ copy counter and nonce
219         ldmia   r3,{r4-r11}             @ load key
220         ldmia   r14,{r0-r3}             @ load sigma
221         stmdb   sp!,{r4-r11}            @ copy key
222         stmdb   sp!,{r0-r3}             @ copy sigma
223         str     r10,[sp,#4*(16+10)]     @ off-load "@x[10]"
224         str     r11,[sp,#4*(16+11)]     @ off-load "@x[11]"
225         b       .Loop_outer_enter
226
227 .align  4
228 .Loop_outer:
229         ldmia   sp,{r0-r9}              @ load key material
230         str     @t[3],[sp,#4*(32+2)]    @ save len
231         str     r12,  [sp,#4*(32+1)]    @ save inp
232         str     r14,  [sp,#4*(32+0)]    @ save out
233 .Loop_outer_enter:
234         ldr     @t[3], [sp,#4*(15)]
235         ldr     @x[12],[sp,#4*(12)]     @ modulo-scheduled load
236         ldr     @t[2], [sp,#4*(13)]
237         ldr     @x[14],[sp,#4*(14)]
238         str     @t[3], [sp,#4*(16+15)]
239         mov     @t[3],#10
240         b       .Loop
241
242 .align  4
243 .Loop:
244         subs    @t[3],@t[3],#1
245 ___
246         foreach (&ROUND(0, 4, 8,12)) { eval; }
247         foreach (&ROUND(0, 5,10,15)) { eval; }
248 $code.=<<___;
249         bne     .Loop
250
251         ldr     @t[3],[sp,#4*(32+2)]    @ load len
252
253         str     @t[0], [sp,#4*(16+8)]   @ modulo-scheduled store
254         str     @t[1], [sp,#4*(16+9)]
255         str     @x[12],[sp,#4*(16+12)]
256         str     @t[2], [sp,#4*(16+13)]
257         str     @x[14],[sp,#4*(16+14)]
258
259         @ at this point we have first half of 512-bit result in
260         @ @x[0-7] and second half at sp+4*(16+8)
261
262         cmp     @t[3],#64               @ done yet?
263 #ifdef  __thumb2__
264         itete   lo
265 #endif
266         addlo   r12,sp,#4*(0)           @ shortcut or ...
267         ldrhs   r12,[sp,#4*(32+1)]      @ ... load inp
268         addlo   r14,sp,#4*(0)           @ shortcut or ...
269         ldrhs   r14,[sp,#4*(32+0)]      @ ... load out
270
271         ldr     @t[0],[sp,#4*(0)]       @ load key material
272         ldr     @t[1],[sp,#4*(1)]
273
274 #if __ARM_ARCH__>=6 || !defined(__ARMEB__)
275 # if __ARM_ARCH__<7
276         orr     @t[2],r12,r14
277         tst     @t[2],#3                @ are input and output aligned?
278         ldr     @t[2],[sp,#4*(2)]
279         bne     .Lunaligned
280         cmp     @t[3],#64               @ restore flags
281 # else
282         ldr     @t[2],[sp,#4*(2)]
283 # endif
284         ldr     @t[3],[sp,#4*(3)]
285
286         add     @x[0],@x[0],@t[0]       @ accumulate key material
287         add     @x[1],@x[1],@t[1]
288 # ifdef __thumb2__
289         itt     hs
290 # endif
291         ldrhs   @t[0],[r12],#16         @ load input
292         ldrhs   @t[1],[r12,#-12]
293
294         add     @x[2],@x[2],@t[2]
295         add     @x[3],@x[3],@t[3]
296 # ifdef __thumb2__
297         itt     hs
298 # endif
299         ldrhs   @t[2],[r12,#-8]
300         ldrhs   @t[3],[r12,#-4]
301 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
302         rev     @x[0],@x[0]
303         rev     @x[1],@x[1]
304         rev     @x[2],@x[2]
305         rev     @x[3],@x[3]
306 # endif
307 # ifdef __thumb2__
308         itt     hs
309 # endif
310         eorhs   @x[0],@x[0],@t[0]       @ xor with input
311         eorhs   @x[1],@x[1],@t[1]
312          add    @t[0],sp,#4*(4)
313         str     @x[0],[r14],#16         @ store output
314 # ifdef __thumb2__
315         itt     hs
316 # endif
317         eorhs   @x[2],@x[2],@t[2]
318         eorhs   @x[3],@x[3],@t[3]
319          ldmia  @t[0],{@t[0]-@t[3]}     @ load key material
320         str     @x[1],[r14,#-12]
321         str     @x[2],[r14,#-8]
322         str     @x[3],[r14,#-4]
323
324         add     @x[4],@x[4],@t[0]       @ accumulate key material
325         add     @x[5],@x[5],@t[1]
326 # ifdef __thumb2__
327         itt     hs
328 # endif
329         ldrhs   @t[0],[r12],#16         @ load input
330         ldrhs   @t[1],[r12,#-12]
331         add     @x[6],@x[6],@t[2]
332         add     @x[7],@x[7],@t[3]
333 # ifdef __thumb2__
334         itt     hs
335 # endif
336         ldrhs   @t[2],[r12,#-8]
337         ldrhs   @t[3],[r12,#-4]
338 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
339         rev     @x[4],@x[4]
340         rev     @x[5],@x[5]
341         rev     @x[6],@x[6]
342         rev     @x[7],@x[7]
343 # endif
344 # ifdef __thumb2__
345         itt     hs
346 # endif
347         eorhs   @x[4],@x[4],@t[0]
348         eorhs   @x[5],@x[5],@t[1]
349          add    @t[0],sp,#4*(8)
350         str     @x[4],[r14],#16         @ store output
351 # ifdef __thumb2__
352         itt     hs
353 # endif
354         eorhs   @x[6],@x[6],@t[2]
355         eorhs   @x[7],@x[7],@t[3]
356         str     @x[5],[r14,#-12]
357          ldmia  @t[0],{@t[0]-@t[3]}     @ load key material
358         str     @x[6],[r14,#-8]
359          add    @x[0],sp,#4*(16+8)
360         str     @x[7],[r14,#-4]
361
362         ldmia   @x[0],{@x[0]-@x[7]}     @ load second half
363
364         add     @x[0],@x[0],@t[0]       @ accumulate key material
365         add     @x[1],@x[1],@t[1]
366 # ifdef __thumb2__
367         itt     hs
368 # endif
369         ldrhs   @t[0],[r12],#16         @ load input
370         ldrhs   @t[1],[r12,#-12]
371 # ifdef __thumb2__
372         itt     hi
373 # endif
374          strhi  @t[2],[sp,#4*(16+10)]   @ copy "@x[10]" while at it
375          strhi  @t[3],[sp,#4*(16+11)]   @ copy "@x[11]" while at it
376         add     @x[2],@x[2],@t[2]
377         add     @x[3],@x[3],@t[3]
378 # ifdef __thumb2__
379         itt     hs
380 # endif
381         ldrhs   @t[2],[r12,#-8]
382         ldrhs   @t[3],[r12,#-4]
383 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
384         rev     @x[0],@x[0]
385         rev     @x[1],@x[1]
386         rev     @x[2],@x[2]
387         rev     @x[3],@x[3]
388 # endif
389 # ifdef __thumb2__
390         itt     hs
391 # endif
392         eorhs   @x[0],@x[0],@t[0]
393         eorhs   @x[1],@x[1],@t[1]
394          add    @t[0],sp,#4*(12)
395         str     @x[0],[r14],#16         @ store output
396 # ifdef __thumb2__
397         itt     hs
398 # endif
399         eorhs   @x[2],@x[2],@t[2]
400         eorhs   @x[3],@x[3],@t[3]
401         str     @x[1],[r14,#-12]
402          ldmia  @t[0],{@t[0]-@t[3]}     @ load key material
403         str     @x[2],[r14,#-8]
404         str     @x[3],[r14,#-4]
405
406         add     @x[4],@x[4],@t[0]       @ accumulate key material
407         add     @x[5],@x[5],@t[1]
408 # ifdef __thumb2__
409         itt     hi
410 # endif
411          addhi  @t[0],@t[0],#1          @ next counter value
412          strhi  @t[0],[sp,#4*(12)]      @ save next counter value
413 # ifdef __thumb2__
414         itt     hs
415 # endif
416         ldrhs   @t[0],[r12],#16         @ load input
417         ldrhs   @t[1],[r12,#-12]
418         add     @x[6],@x[6],@t[2]
419         add     @x[7],@x[7],@t[3]
420 # ifdef __thumb2__
421         itt     hs
422 # endif
423         ldrhs   @t[2],[r12,#-8]
424         ldrhs   @t[3],[r12,#-4]
425 # if __ARM_ARCH__>=6 && defined(__ARMEB__)
426         rev     @x[4],@x[4]
427         rev     @x[5],@x[5]
428         rev     @x[6],@x[6]
429         rev     @x[7],@x[7]
430 # endif
431 # ifdef __thumb2__
432         itt     hs
433 # endif
434         eorhs   @x[4],@x[4],@t[0]
435         eorhs   @x[5],@x[5],@t[1]
436 # ifdef __thumb2__
437         it      hi
438 # endif
439          ldrhi  @t[0],[sp,#4*(32+2)]    @ re-load len
440 # ifdef __thumb2__
441         itt     hs
442 # endif
443         eorhs   @x[6],@x[6],@t[2]
444         eorhs   @x[7],@x[7],@t[3]
445         str     @x[4],[r14],#16         @ store output
446         str     @x[5],[r14,#-12]
447 # ifdef __thumb2__
448         it      hs
449 # endif
450          subhs  @t[3],@t[0],#64         @ len-=64
451         str     @x[6],[r14,#-8]
452         str     @x[7],[r14,#-4]
453         bhi     .Loop_outer
454
455         beq     .Ldone
456 # if __ARM_ARCH__<7
457         b       .Ltail
458
459 .align  4
460 .Lunaligned:                            @ unaligned endian-neutral path
461         cmp     @t[3],#64               @ restore flags
462 # endif
463 #endif
464 #if __ARM_ARCH__<7
465         ldr     @t[3],[sp,#4*(3)]
466 ___
467 for ($i=0;$i<16;$i+=4) {
468 my $j=$i&0x7;
469
470 $code.=<<___    if ($i==4);
471         add     @x[0],sp,#4*(16+8)
472 ___
473 $code.=<<___    if ($i==8);
474         ldmia   @x[0],{@x[0]-@x[7]}             @ load second half
475 # ifdef __thumb2__
476         itt     hi
477 # endif
478         strhi   @t[2],[sp,#4*(16+10)]           @ copy "@x[10]"
479         strhi   @t[3],[sp,#4*(16+11)]           @ copy "@x[11]"
480 ___
481 $code.=<<___;
482         add     @x[$j+0],@x[$j+0],@t[0]         @ accumulate key material
483 ___
484 $code.=<<___    if ($i==12);
485 # ifdef __thumb2__
486         itt     hi
487 # endif
488         addhi   @t[0],@t[0],#1                  @ next counter value
489         strhi   @t[0],[sp,#4*(12)]              @ save next counter value
490 ___
491 $code.=<<___;
492         add     @x[$j+1],@x[$j+1],@t[1]
493         add     @x[$j+2],@x[$j+2],@t[2]
494 # ifdef __thumb2__
495         itete   lo
496 # endif
497         eorlo   @t[0],@t[0],@t[0]               @ zero or ...
498         ldrhsb  @t[0],[r12],#16                 @ ... load input
499         eorlo   @t[1],@t[1],@t[1]
500         ldrhsb  @t[1],[r12,#-12]
501
502         add     @x[$j+3],@x[$j+3],@t[3]
503 # ifdef __thumb2__
504         itete   lo
505 # endif
506         eorlo   @t[2],@t[2],@t[2]
507         ldrhsb  @t[2],[r12,#-8]
508         eorlo   @t[3],@t[3],@t[3]
509         ldrhsb  @t[3],[r12,#-4]
510
511         eor     @x[$j+0],@t[0],@x[$j+0]         @ xor with input (or zero)
512         eor     @x[$j+1],@t[1],@x[$j+1]
513 # ifdef __thumb2__
514         itt     hs
515 # endif
516         ldrhsb  @t[0],[r12,#-15]                @ load more input
517         ldrhsb  @t[1],[r12,#-11]
518         eor     @x[$j+2],@t[2],@x[$j+2]
519          strb   @x[$j+0],[r14],#16              @ store output
520         eor     @x[$j+3],@t[3],@x[$j+3]
521 # ifdef __thumb2__
522         itt     hs
523 # endif
524         ldrhsb  @t[2],[r12,#-7]
525         ldrhsb  @t[3],[r12,#-3]
526          strb   @x[$j+1],[r14,#-12]
527         eor     @x[$j+0],@t[0],@x[$j+0],lsr#8
528          strb   @x[$j+2],[r14,#-8]
529         eor     @x[$j+1],@t[1],@x[$j+1],lsr#8
530 # ifdef __thumb2__
531         itt     hs
532 # endif
533         ldrhsb  @t[0],[r12,#-14]                @ load more input
534         ldrhsb  @t[1],[r12,#-10]
535          strb   @x[$j+3],[r14,#-4]
536         eor     @x[$j+2],@t[2],@x[$j+2],lsr#8
537          strb   @x[$j+0],[r14,#-15]
538         eor     @x[$j+3],@t[3],@x[$j+3],lsr#8
539 # ifdef __thumb2__
540         itt     hs
541 # endif
542         ldrhsb  @t[2],[r12,#-6]
543         ldrhsb  @t[3],[r12,#-2]
544          strb   @x[$j+1],[r14,#-11]
545         eor     @x[$j+0],@t[0],@x[$j+0],lsr#8
546          strb   @x[$j+2],[r14,#-7]
547         eor     @x[$j+1],@t[1],@x[$j+1],lsr#8
548 # ifdef __thumb2__
549         itt     hs
550 # endif
551         ldrhsb  @t[0],[r12,#-13]                @ load more input
552         ldrhsb  @t[1],[r12,#-9]
553          strb   @x[$j+3],[r14,#-3]
554         eor     @x[$j+2],@t[2],@x[$j+2],lsr#8
555          strb   @x[$j+0],[r14,#-14]
556         eor     @x[$j+3],@t[3],@x[$j+3],lsr#8
557 # ifdef __thumb2__
558         itt     hs
559 # endif
560         ldrhsb  @t[2],[r12,#-5]
561         ldrhsb  @t[3],[r12,#-1]
562          strb   @x[$j+1],[r14,#-10]
563          strb   @x[$j+2],[r14,#-6]
564         eor     @x[$j+0],@t[0],@x[$j+0],lsr#8
565          strb   @x[$j+3],[r14,#-2]
566         eor     @x[$j+1],@t[1],@x[$j+1],lsr#8
567          strb   @x[$j+0],[r14,#-13]
568         eor     @x[$j+2],@t[2],@x[$j+2],lsr#8
569          strb   @x[$j+1],[r14,#-9]
570         eor     @x[$j+3],@t[3],@x[$j+3],lsr#8
571          strb   @x[$j+2],[r14,#-5]
572          strb   @x[$j+3],[r14,#-1]
573 ___
574 $code.=<<___    if ($i<12);
575         add     @t[0],sp,#4*(4+$i)
576         ldmia   @t[0],{@t[0]-@t[3]}             @ load key material
577 ___
578 }
579 $code.=<<___;
580 # ifdef __thumb2__
581         it      hi
582 # endif
583         ldrhi   @t[0],[sp,#4*(32+2)]            @ re-load len
584 # ifdef __thumb2__
585         it      hs
586 # endif
587         subhs   @t[3],@t[0],#64                 @ len-=64
588         bhi     .Loop_outer
589
590         beq     .Ldone
591 #endif
592
593 .Ltail:
594         ldr     r12,[sp,#4*(32+1)]      @ load inp
595         add     @t[2],sp,#4*(0)
596         ldr     r14,[sp,#4*(32+0)]      @ load out
597
598 .Loop_tail:
599         ldrb    @t[0],[@t[2]],#1        @ read buffer on stack
600         ldrb    @t[1],[r12],#1          @ read input
601         subs    @t[3],@t[3],#1
602         eor     @t[0],@t[0],@t[1]
603         strb    @t[0],[r14],#1          @ store output
604         bne     .Loop_tail
605
606 .Ldone:
607         add     sp,sp,#4*(32+3)
608         ldmia   sp!,{r4-r11,pc}
609 .size   ChaCha20_ctr32,.-ChaCha20_ctr32
610 ___
611
612 {{{
613 my ($a0,$b0,$c0,$d0,$a1,$b1,$c1,$d1,$a2,$b2,$c2,$d2,$t0,$t1,$t2,$t3) =
614     map("q$_",(0..15));
615
616 sub NEONROUND {
617 my $odd = pop;
618 my ($a,$b,$c,$d,$t)=@_;
619
620         (
621         "&vadd_i32      ($a,$a,$b)",
622         "&veor          ($d,$d,$a)",
623         "&vrev32_16     ($d,$d)",       # vrot ($d,16)
624
625         "&vadd_i32      ($c,$c,$d)",
626         "&veor          ($t,$b,$c)",
627         "&vshr_u32      ($b,$t,20)",
628         "&vsli_32       ($b,$t,12)",
629
630         "&vadd_i32      ($a,$a,$b)",
631         "&veor          ($t,$d,$a)",
632         "&vshr_u32      ($d,$t,24)",
633         "&vsli_32       ($d,$t,8)",
634
635         "&vadd_i32      ($c,$c,$d)",
636         "&veor          ($t,$b,$c)",
637         "&vshr_u32      ($b,$t,25)",
638         "&vsli_32       ($b,$t,7)",
639
640         "&vext_8        ($c,$c,$c,8)",
641         "&vext_8        ($b,$b,$b,$odd?12:4)",
642         "&vext_8        ($d,$d,$d,$odd?4:12)"
643         );
644 }
645
646 $code.=<<___;
647 #if __ARM_MAX_ARCH__>=7
648 .arch   armv7-a
649 .fpu    neon
650
651 .type   ChaCha20_neon,%function
652 .align  5
653 ChaCha20_neon:
654         ldr             r12,[sp,#0]             @ pull pointer to counter and nonce
655         stmdb           sp!,{r0-r2,r4-r11,lr}
656 .LChaCha20_neon:
657         adr             r14,.Lsigma
658         vstmdb          sp!,{d8-d15}            @ ABI spec says so
659         stmdb           sp!,{r0-r3}
660
661         vld1.32         {$b0-$c0},[r3]          @ load key
662         ldmia           r3,{r4-r11}             @ load key
663
664         sub             sp,sp,#4*(16+16)
665         vld1.32         {$d0},[r12]             @ load counter and nonce
666         add             r12,sp,#4*8
667         ldmia           r14,{r0-r3}             @ load sigma
668         vld1.32         {$a0},[r14]!            @ load sigma
669         vld1.32         {$t0},[r14]             @ one
670         vst1.32         {$c0-$d0},[r12]         @ copy 1/2key|counter|nonce
671         vst1.32         {$a0-$b0},[sp]          @ copy sigma|1/2key
672
673         str             r10,[sp,#4*(16+10)]     @ off-load "@x[10]"
674         str             r11,[sp,#4*(16+11)]     @ off-load "@x[11]"
675         vshl.i32        $t1#lo,$t0#lo,#1        @ two
676         vstr            $t0#lo,[sp,#4*(16+0)]
677         vshl.i32        $t2#lo,$t0#lo,#2        @ four
678         vstr            $t1#lo,[sp,#4*(16+2)]
679         vmov            $a1,$a0
680         vstr            $t2#lo,[sp,#4*(16+4)]
681         vmov            $a2,$a0
682         vmov            $b1,$b0
683         vmov            $b2,$b0
684         b               .Loop_neon_enter
685
686 .align  4
687 .Loop_neon_outer:
688         ldmia           sp,{r0-r9}              @ load key material
689         cmp             @t[3],#64*2             @ if len<=64*2
690         bls             .Lbreak_neon            @ switch to integer-only
691         vmov            $a1,$a0
692         str             @t[3],[sp,#4*(32+2)]    @ save len
693         vmov            $a2,$a0
694         str             r12,  [sp,#4*(32+1)]    @ save inp
695         vmov            $b1,$b0
696         str             r14,  [sp,#4*(32+0)]    @ save out
697         vmov            $b2,$b0
698 .Loop_neon_enter:
699         ldr             @t[3], [sp,#4*(15)]
700         vadd.i32        $d1,$d0,$t0             @ counter+1
701         ldr             @x[12],[sp,#4*(12)]     @ modulo-scheduled load
702         vmov            $c1,$c0
703         ldr             @t[2], [sp,#4*(13)]
704         vmov            $c2,$c0
705         ldr             @x[14],[sp,#4*(14)]
706         vadd.i32        $d2,$d1,$t0             @ counter+2
707         str             @t[3], [sp,#4*(16+15)]
708         mov             @t[3],#10
709         add             @x[12],@x[12],#3        @ counter+3 
710         b               .Loop_neon
711
712 .align  4
713 .Loop_neon:
714         subs            @t[3],@t[3],#1
715 ___
716         my @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,0);
717         my @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,0);
718         my @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,0);
719         my @thread3=&ROUND(0,4,8,12);
720
721         foreach (@thread0) {
722                 eval;                   eval(shift(@thread3));
723                 eval(shift(@thread1));  eval(shift(@thread3));
724                 eval(shift(@thread2));  eval(shift(@thread3));
725         }
726
727         @thread0=&NEONROUND($a0,$b0,$c0,$d0,$t0,1);
728         @thread1=&NEONROUND($a1,$b1,$c1,$d1,$t1,1);
729         @thread2=&NEONROUND($a2,$b2,$c2,$d2,$t2,1);
730         @thread3=&ROUND(0,5,10,15);
731
732         foreach (@thread0) {
733                 eval;                   eval(shift(@thread3));
734                 eval(shift(@thread1));  eval(shift(@thread3));
735                 eval(shift(@thread2));  eval(shift(@thread3));
736         }
737 $code.=<<___;
738         bne             .Loop_neon
739
740         add             @t[3],sp,#32
741         vld1.32         {$t0-$t1},[sp]          @ load key material
742         vld1.32         {$t2-$t3},[@t[3]]
743
744         ldr             @t[3],[sp,#4*(32+2)]    @ load len
745
746         str             @t[0], [sp,#4*(16+8)]   @ modulo-scheduled store
747         str             @t[1], [sp,#4*(16+9)]
748         str             @x[12],[sp,#4*(16+12)]
749         str             @t[2], [sp,#4*(16+13)]
750         str             @x[14],[sp,#4*(16+14)]
751
752         @ at this point we have first half of 512-bit result in
753         @ @x[0-7] and second half at sp+4*(16+8)
754
755         ldr             r12,[sp,#4*(32+1)]      @ load inp
756         ldr             r14,[sp,#4*(32+0)]      @ load out
757
758         vadd.i32        $a0,$a0,$t0             @ accumulate key material
759         vadd.i32        $a1,$a1,$t0
760         vadd.i32        $a2,$a2,$t0
761         vldr            $t0#lo,[sp,#4*(16+0)]   @ one
762
763         vadd.i32        $b0,$b0,$t1
764         vadd.i32        $b1,$b1,$t1
765         vadd.i32        $b2,$b2,$t1
766         vldr            $t1#lo,[sp,#4*(16+2)]   @ two
767
768         vadd.i32        $c0,$c0,$t2
769         vadd.i32        $c1,$c1,$t2
770         vadd.i32        $c2,$c2,$t2
771         vadd.i32        $d1#lo,$d1#lo,$t0#lo    @ counter+1
772         vadd.i32        $d2#lo,$d2#lo,$t1#lo    @ counter+2
773
774         vadd.i32        $d0,$d0,$t3
775         vadd.i32        $d1,$d1,$t3
776         vadd.i32        $d2,$d2,$t3
777
778         cmp             @t[3],#64*4
779         blo             .Ltail_neon
780
781         vld1.8          {$t0-$t1},[r12]!        @ load input
782          mov            @t[3],sp
783         vld1.8          {$t2-$t3},[r12]!
784         veor            $a0,$a0,$t0             @ xor with input
785         veor            $b0,$b0,$t1
786         vld1.8          {$t0-$t1},[r12]!
787         veor            $c0,$c0,$t2
788         veor            $d0,$d0,$t3
789         vld1.8          {$t2-$t3},[r12]!
790
791         veor            $a1,$a1,$t0
792          vst1.8         {$a0-$b0},[r14]!        @ store output
793         veor            $b1,$b1,$t1
794         vld1.8          {$t0-$t1},[r12]!
795         veor            $c1,$c1,$t2
796          vst1.8         {$c0-$d0},[r14]!
797         veor            $d1,$d1,$t3
798         vld1.8          {$t2-$t3},[r12]!
799
800         veor            $a2,$a2,$t0
801          vld1.32        {$a0-$b0},[@t[3]]!      @ load for next iteration
802          veor           $t0#hi,$t0#hi,$t0#hi
803          vldr           $t0#lo,[sp,#4*(16+4)]   @ four
804         veor            $b2,$b2,$t1
805          vld1.32        {$c0-$d0},[@t[3]]
806         veor            $c2,$c2,$t2
807          vst1.8         {$a1-$b1},[r14]!
808         veor            $d2,$d2,$t3
809          vst1.8         {$c1-$d1},[r14]!
810
811         vadd.i32        $d0#lo,$d0#lo,$t0#lo    @ next counter value
812         vldr            $t0#lo,[sp,#4*(16+0)]   @ one
813
814         ldmia           sp,{@t[0]-@t[3]}        @ load key material
815         add             @x[0],@x[0],@t[0]       @ accumulate key material
816         ldr             @t[0],[r12],#16         @ load input
817          vst1.8         {$a2-$b2},[r14]!
818         add             @x[1],@x[1],@t[1]
819         ldr             @t[1],[r12,#-12]
820          vst1.8         {$c2-$d2},[r14]!
821         add             @x[2],@x[2],@t[2]
822         ldr             @t[2],[r12,#-8]
823         add             @x[3],@x[3],@t[3]
824         ldr             @t[3],[r12,#-4]
825 # ifdef __ARMEB__
826         rev             @x[0],@x[0]
827         rev             @x[1],@x[1]
828         rev             @x[2],@x[2]
829         rev             @x[3],@x[3]
830 # endif
831         eor             @x[0],@x[0],@t[0]       @ xor with input
832          add            @t[0],sp,#4*(4)
833         eor             @x[1],@x[1],@t[1]
834         str             @x[0],[r14],#16         @ store output
835         eor             @x[2],@x[2],@t[2]
836         str             @x[1],[r14,#-12]
837         eor             @x[3],@x[3],@t[3]
838          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
839         str             @x[2],[r14,#-8]
840         str             @x[3],[r14,#-4]
841
842         add             @x[4],@x[4],@t[0]       @ accumulate key material
843         ldr             @t[0],[r12],#16         @ load input
844         add             @x[5],@x[5],@t[1]
845         ldr             @t[1],[r12,#-12]
846         add             @x[6],@x[6],@t[2]
847         ldr             @t[2],[r12,#-8]
848         add             @x[7],@x[7],@t[3]
849         ldr             @t[3],[r12,#-4]
850 # ifdef __ARMEB__
851         rev             @x[4],@x[4]
852         rev             @x[5],@x[5]
853         rev             @x[6],@x[6]
854         rev             @x[7],@x[7]
855 # endif
856         eor             @x[4],@x[4],@t[0]
857          add            @t[0],sp,#4*(8)
858         eor             @x[5],@x[5],@t[1]
859         str             @x[4],[r14],#16         @ store output
860         eor             @x[6],@x[6],@t[2]
861         str             @x[5],[r14,#-12]
862         eor             @x[7],@x[7],@t[3]
863          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
864         str             @x[6],[r14,#-8]
865          add            @x[0],sp,#4*(16+8)
866         str             @x[7],[r14,#-4]
867
868         ldmia           @x[0],{@x[0]-@x[7]}     @ load second half
869
870         add             @x[0],@x[0],@t[0]       @ accumulate key material
871         ldr             @t[0],[r12],#16         @ load input
872         add             @x[1],@x[1],@t[1]
873         ldr             @t[1],[r12,#-12]
874 # ifdef __thumb2__
875         it      hi
876 # endif
877          strhi          @t[2],[sp,#4*(16+10)]   @ copy "@x[10]" while at it
878         add             @x[2],@x[2],@t[2]
879         ldr             @t[2],[r12,#-8]
880 # ifdef __thumb2__
881         it      hi
882 # endif
883          strhi          @t[3],[sp,#4*(16+11)]   @ copy "@x[11]" while at it
884         add             @x[3],@x[3],@t[3]
885         ldr             @t[3],[r12,#-4]
886 # ifdef __ARMEB__
887         rev             @x[0],@x[0]
888         rev             @x[1],@x[1]
889         rev             @x[2],@x[2]
890         rev             @x[3],@x[3]
891 # endif
892         eor             @x[0],@x[0],@t[0]
893          add            @t[0],sp,#4*(12)
894         eor             @x[1],@x[1],@t[1]
895         str             @x[0],[r14],#16         @ store output
896         eor             @x[2],@x[2],@t[2]
897         str             @x[1],[r14,#-12]
898         eor             @x[3],@x[3],@t[3]
899          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
900         str             @x[2],[r14,#-8]
901         str             @x[3],[r14,#-4]
902
903         add             @x[4],@x[4],@t[0]       @ accumulate key material
904          add            @t[0],@t[0],#4          @ next counter value
905         add             @x[5],@x[5],@t[1]
906          str            @t[0],[sp,#4*(12)]      @ save next counter value
907         ldr             @t[0],[r12],#16         @ load input
908         add             @x[6],@x[6],@t[2]
909          add            @x[4],@x[4],#3          @ counter+3
910         ldr             @t[1],[r12,#-12]
911         add             @x[7],@x[7],@t[3]
912         ldr             @t[2],[r12,#-8]
913         ldr             @t[3],[r12,#-4]
914 # ifdef __ARMEB__
915         rev             @x[4],@x[4]
916         rev             @x[5],@x[5]
917         rev             @x[6],@x[6]
918         rev             @x[7],@x[7]
919 # endif
920         eor             @x[4],@x[4],@t[0]
921 # ifdef __thumb2__
922         it      hi
923 # endif
924          ldrhi          @t[0],[sp,#4*(32+2)]    @ re-load len
925         eor             @x[5],@x[5],@t[1]
926         eor             @x[6],@x[6],@t[2]
927         str             @x[4],[r14],#16         @ store output
928         eor             @x[7],@x[7],@t[3]
929         str             @x[5],[r14,#-12]
930          sub            @t[3],@t[0],#64*4       @ len-=64*4
931         str             @x[6],[r14,#-8]
932         str             @x[7],[r14,#-4]
933         bhi             .Loop_neon_outer
934
935         b               .Ldone_neon
936
937 .align  4
938 .Lbreak_neon:
939         @ harmonize NEON and integer-only stack frames: load data
940         @ from NEON frame, but save to integer-only one; distance
941         @ between the two is 4*(32+4+16-32)=4*(20).
942
943         str             @t[3], [sp,#4*(20+32+2)]        @ save len
944          add            @t[3],sp,#4*(32+4)
945         str             r12,   [sp,#4*(20+32+1)]        @ save inp
946         str             r14,   [sp,#4*(20+32+0)]        @ save out
947
948         ldr             @x[12],[sp,#4*(16+10)]
949         ldr             @x[14],[sp,#4*(16+11)]
950          vldmia         @t[3],{d8-d15}                  @ fulfill ABI requirement
951         str             @x[12],[sp,#4*(20+16+10)]       @ copy "@x[10]"
952         str             @x[14],[sp,#4*(20+16+11)]       @ copy "@x[11]"
953
954         ldr             @t[3], [sp,#4*(15)]
955         ldr             @x[12],[sp,#4*(12)]             @ modulo-scheduled load
956         ldr             @t[2], [sp,#4*(13)]
957         ldr             @x[14],[sp,#4*(14)]
958         str             @t[3], [sp,#4*(20+16+15)]
959         add             @t[3],sp,#4*(20)
960         vst1.32         {$a0-$b0},[@t[3]]!              @ copy key
961         add             sp,sp,#4*(20)                   @ switch frame
962         vst1.32         {$c0-$d0},[@t[3]]
963         mov             @t[3],#10
964         b               .Loop                           @ go integer-only
965
966 .align  4
967 .Ltail_neon:
968         cmp             @t[3],#64*3
969         bhs             .L192_or_more_neon
970         cmp             @t[3],#64*2
971         bhs             .L128_or_more_neon
972         cmp             @t[3],#64*1
973         bhs             .L64_or_more_neon
974
975         add             @t[0],sp,#4*(8)
976         vst1.8          {$a0-$b0},[sp]
977         add             @t[2],sp,#4*(0)
978         vst1.8          {$c0-$d0},[@t[0]]
979         b               .Loop_tail_neon
980
981 .align  4
982 .L64_or_more_neon:
983         vld1.8          {$t0-$t1},[r12]!
984         vld1.8          {$t2-$t3},[r12]!
985         veor            $a0,$a0,$t0
986         veor            $b0,$b0,$t1
987         veor            $c0,$c0,$t2
988         veor            $d0,$d0,$t3
989         vst1.8          {$a0-$b0},[r14]!
990         vst1.8          {$c0-$d0},[r14]!
991
992         beq             .Ldone_neon
993
994         add             @t[0],sp,#4*(8)
995         vst1.8          {$a1-$b1},[sp]
996         add             @t[2],sp,#4*(0)
997         vst1.8          {$c1-$d1},[@t[0]]
998         sub             @t[3],@t[3],#64*1       @ len-=64*1
999         b               .Loop_tail_neon
1000
1001 .align  4
1002 .L128_or_more_neon:
1003         vld1.8          {$t0-$t1},[r12]!
1004         vld1.8          {$t2-$t3},[r12]!
1005         veor            $a0,$a0,$t0
1006         veor            $b0,$b0,$t1
1007         vld1.8          {$t0-$t1},[r12]!
1008         veor            $c0,$c0,$t2
1009         veor            $d0,$d0,$t3
1010         vld1.8          {$t2-$t3},[r12]!
1011
1012         veor            $a1,$a1,$t0
1013         veor            $b1,$b1,$t1
1014          vst1.8         {$a0-$b0},[r14]!
1015         veor            $c1,$c1,$t2
1016          vst1.8         {$c0-$d0},[r14]!
1017         veor            $d1,$d1,$t3
1018         vst1.8          {$a1-$b1},[r14]!
1019         vst1.8          {$c1-$d1},[r14]!
1020
1021         beq             .Ldone_neon
1022
1023         add             @t[0],sp,#4*(8)
1024         vst1.8          {$a2-$b2},[sp]
1025         add             @t[2],sp,#4*(0)
1026         vst1.8          {$c2-$d2},[@t[0]]
1027         sub             @t[3],@t[3],#64*2       @ len-=64*2
1028         b               .Loop_tail_neon
1029
1030 .align  4
1031 .L192_or_more_neon:
1032         vld1.8          {$t0-$t1},[r12]!
1033         vld1.8          {$t2-$t3},[r12]!
1034         veor            $a0,$a0,$t0
1035         veor            $b0,$b0,$t1
1036         vld1.8          {$t0-$t1},[r12]!
1037         veor            $c0,$c0,$t2
1038         veor            $d0,$d0,$t3
1039         vld1.8          {$t2-$t3},[r12]!
1040
1041         veor            $a1,$a1,$t0
1042         veor            $b1,$b1,$t1
1043         vld1.8          {$t0-$t1},[r12]!
1044         veor            $c1,$c1,$t2
1045          vst1.8         {$a0-$b0},[r14]!
1046         veor            $d1,$d1,$t3
1047         vld1.8          {$t2-$t3},[r12]!
1048
1049         veor            $a2,$a2,$t0
1050          vst1.8         {$c0-$d0},[r14]!
1051         veor            $b2,$b2,$t1
1052          vst1.8         {$a1-$b1},[r14]!
1053         veor            $c2,$c2,$t2
1054          vst1.8         {$c1-$d1},[r14]!
1055         veor            $d2,$d2,$t3
1056         vst1.8          {$a2-$b2},[r14]!
1057         vst1.8          {$c2-$d2},[r14]!
1058
1059         beq             .Ldone_neon
1060
1061         ldmia           sp,{@t[0]-@t[3]}        @ load key material
1062         add             @x[0],@x[0],@t[0]       @ accumulate key material
1063          add            @t[0],sp,#4*(4)
1064         add             @x[1],@x[1],@t[1]
1065         add             @x[2],@x[2],@t[2]
1066         add             @x[3],@x[3],@t[3]
1067          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
1068
1069         add             @x[4],@x[4],@t[0]       @ accumulate key material
1070          add            @t[0],sp,#4*(8)
1071         add             @x[5],@x[5],@t[1]
1072         add             @x[6],@x[6],@t[2]
1073         add             @x[7],@x[7],@t[3]
1074          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
1075 # ifdef __ARMEB__
1076         rev             @x[0],@x[0]
1077         rev             @x[1],@x[1]
1078         rev             @x[2],@x[2]
1079         rev             @x[3],@x[3]
1080         rev             @x[4],@x[4]
1081         rev             @x[5],@x[5]
1082         rev             @x[6],@x[6]
1083         rev             @x[7],@x[7]
1084 # endif
1085         stmia           sp,{@x[0]-@x[7]}
1086          add            @x[0],sp,#4*(16+8)
1087
1088         ldmia           @x[0],{@x[0]-@x[7]}     @ load second half
1089
1090         add             @x[0],@x[0],@t[0]       @ accumulate key material
1091          add            @t[0],sp,#4*(12)
1092         add             @x[1],@x[1],@t[1]
1093         add             @x[2],@x[2],@t[2]
1094         add             @x[3],@x[3],@t[3]
1095          ldmia          @t[0],{@t[0]-@t[3]}     @ load key material
1096
1097         add             @x[4],@x[4],@t[0]       @ accumulate key material
1098          add            @t[0],sp,#4*(8)
1099         add             @x[5],@x[5],@t[1]
1100          add            @x[4],@x[4],#3          @ counter+3
1101         add             @x[6],@x[6],@t[2]
1102         add             @x[7],@x[7],@t[3]
1103          ldr            @t[3],[sp,#4*(32+2)]    @ re-load len
1104 # ifdef __ARMEB__
1105         rev             @x[0],@x[0]
1106         rev             @x[1],@x[1]
1107         rev             @x[2],@x[2]
1108         rev             @x[3],@x[3]
1109         rev             @x[4],@x[4]
1110         rev             @x[5],@x[5]
1111         rev             @x[6],@x[6]
1112         rev             @x[7],@x[7]
1113 # endif
1114         stmia           @t[0],{@x[0]-@x[7]}
1115          add            @t[2],sp,#4*(0)
1116          sub            @t[3],@t[0],#64*3       @ len-=64*3
1117
1118 .Loop_tail_neon:
1119         ldrb            @t[0],[@t[2]],#1        @ read buffer on stack
1120         ldrb            @t[1],[r12],#1          @ read input
1121         subs            @t[3],@t[3],#1
1122         eor             @t[0],@t[0],@t[1]
1123         strb            @t[0],[r14],#1          @ store ouput
1124         bne             .Loop_tail_neon
1125
1126 .Ldone_neon:
1127         add             sp,sp,#4*(32+4)
1128         vldmia          sp,{d8-d15}
1129         add             sp,sp,#4*(16+3)
1130         ldmia           sp!,{r4-r11,pc}
1131 .size   ChaCha20_neon,.-ChaCha20_neon
1132 .comm   OPENSSL_armcap_P,4,4
1133 #endif
1134 ___
1135 }}}
1136
1137 foreach (split("\n",$code)) {
1138         s/\`([^\`]*)\`/eval $1/geo;
1139
1140         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1141
1142         print $_,"\n";
1143 }
1144 close STDOUT;