ARM64 assembly pack: add ThunderX2 results.
[oweals/openssl.git] / crypto / modes / asm / ghashv8-armx.pl
1 #! /usr/bin/env perl
2 # Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved.
3 #
4 # Licensed under the Apache License 2.0 (the "License").  You may not use
5 # this file except in compliance with the License.  You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
8
9 #
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
16 #
17 # GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication.
18 #
19 # June 2014
20 #
21 # Initial version was developed in tight cooperation with Ard
22 # Biesheuvel of Linaro from bits-n-pieces from other assembly modules.
23 # Just like aesv8-armx.pl this module supports both AArch32 and
24 # AArch64 execution modes.
25 #
26 # July 2014
27 #
28 # Implement 2x aggregated reduction [see ghash-x86.pl for background
29 # information].
30 #
31 # November 2017
32 #
33 # AArch64 register bank to "accommodate" 4x aggregated reduction and
34 # improve performance by 20-70% depending on processor.
35 #
36 # Current performance in cycles per processed byte:
37 #
38 #               64-bit PMULL    32-bit PMULL    32-bit NEON(*)
39 # Apple A7      0.58            0.92            5.62
40 # Cortex-A53    0.85            1.01            8.39
41 # Cortex-A57    0.73            1.17            7.61
42 # Denver        0.51            0.65            6.02
43 # Mongoose      0.65            1.10            8.06
44 # Kryo          0.76            1.16            8.00
45 # ThunderX2     1.05
46 #
47 # (*)   presented for reference/comparison purposes;
48
49 $flavour = shift;
50 $output  = shift;
51
52 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
53 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
54 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
55 die "can't locate arm-xlate.pl";
56
57 open OUT,"| \"$^X\" $xlate $flavour $output";
58 *STDOUT=*OUT;
59
60 $Xi="x0";       # argument block
61 $Htbl="x1";
62 $inp="x2";
63 $len="x3";
64
65 $inc="x12";
66
67 {
68 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
69 my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14));
70 my $_byte = ($flavour =~ /win/ ? "DCB" : ".byte");
71
72 $code=<<___;
73 #include "arm_arch.h"
74
75 #if __ARM_MAX_ARCH__>=7
76 ___
77 $code.=".arch   armv8-a+crypto\n.text\n"        if ($flavour =~ /64/);
78 $code.=<<___                                    if ($flavour !~ /64/);
79 .fpu    neon
80 #ifdef __thumb2__
81 .syntax        unified
82 .thumb
83 # define INST(a,b,c,d) $_byte  c,0xef,a,b
84 #else
85 .code  32
86 # define INST(a,b,c,d) $_byte  a,b,c,0xf2
87 #endif
88
89 .text
90 ___
91
92 ################################################################################
93 # void gcm_init_v8(u128 Htable[16],const u64 H[2]);
94 #
95 # input:        128-bit H - secret parameter E(K,0^128)
96 # output:       precomputed table filled with degrees of twisted H;
97 #               H is twisted to handle reverse bitness of GHASH;
98 #               only few of 16 slots of Htable[16] are used;
99 #               data is opaque to outside world (which allows to
100 #               optimize the code independently);
101 #
102 $code.=<<___;
103 .global gcm_init_v8
104 .type   gcm_init_v8,%function
105 .align  4
106 gcm_init_v8:
107         vld1.64         {$t1},[x1]              @ load input H
108         vmov.i8         $xC2,#0xe1
109         vshl.i64        $xC2,$xC2,#57           @ 0xc2.0
110         vext.8          $IN,$t1,$t1,#8
111         vshr.u64        $t2,$xC2,#63
112         vdup.32         $t1,${t1}[1]
113         vext.8          $t0,$t2,$xC2,#8         @ t0=0xc2....01
114         vshr.u64        $t2,$IN,#63
115         vshr.s32        $t1,$t1,#31             @ broadcast carry bit
116         vand            $t2,$t2,$t0
117         vshl.i64        $IN,$IN,#1
118         vext.8          $t2,$t2,$t2,#8
119         vand            $t0,$t0,$t1
120         vorr            $IN,$IN,$t2             @ H<<<=1
121         veor            $H,$IN,$t0              @ twisted H
122         vst1.64         {$H},[x0],#16           @ store Htable[0]
123
124         @ calculate H^2
125         vext.8          $t0,$H,$H,#8            @ Karatsuba pre-processing
126         vpmull.p64      $Xl,$H,$H
127         veor            $t0,$t0,$H
128         vpmull2.p64     $Xh,$H,$H
129         vpmull.p64      $Xm,$t0,$t0
130
131         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
132         veor            $t2,$Xl,$Xh
133         veor            $Xm,$Xm,$t1
134         veor            $Xm,$Xm,$t2
135         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase
136
137         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
138         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
139         veor            $Xl,$Xm,$t2
140
141         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase
142         vpmull.p64      $Xl,$Xl,$xC2
143         veor            $t2,$t2,$Xh
144         veor            $H2,$Xl,$t2
145
146         vext.8          $t1,$H2,$H2,#8          @ Karatsuba pre-processing
147         veor            $t1,$t1,$H2
148         vext.8          $Hhl,$t0,$t1,#8         @ pack Karatsuba pre-processed
149         vst1.64         {$Hhl-$H2},[x0],#32     @ store Htable[1..2]
150 ___
151 if ($flavour =~ /64/) {
152 my ($t3,$Yl,$Ym,$Yh) = map("q$_",(4..7));
153
154 $code.=<<___;
155         @ calculate H^3 and H^4
156         vpmull.p64      $Xl,$H, $H2
157          vpmull.p64     $Yl,$H2,$H2
158         vpmull2.p64     $Xh,$H, $H2
159          vpmull2.p64    $Yh,$H2,$H2
160         vpmull.p64      $Xm,$t0,$t1
161          vpmull.p64     $Ym,$t1,$t1
162
163         vext.8          $t0,$Xl,$Xh,#8          @ Karatsuba post-processing
164          vext.8         $t1,$Yl,$Yh,#8
165         veor            $t2,$Xl,$Xh
166         veor            $Xm,$Xm,$t0
167          veor           $t3,$Yl,$Yh
168          veor           $Ym,$Ym,$t1
169         veor            $Xm,$Xm,$t2
170         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase
171          veor           $Ym,$Ym,$t3
172          vpmull.p64     $t3,$Yl,$xC2
173
174         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
175          vmov           $Yh#lo,$Ym#hi
176         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
177          vmov           $Ym#hi,$Yl#lo
178         veor            $Xl,$Xm,$t2
179          veor           $Yl,$Ym,$t3
180
181         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase
182          vext.8         $t3,$Yl,$Yl,#8
183         vpmull.p64      $Xl,$Xl,$xC2
184          vpmull.p64     $Yl,$Yl,$xC2
185         veor            $t2,$t2,$Xh
186          veor           $t3,$t3,$Yh
187         veor            $H, $Xl,$t2             @ H^3
188          veor           $H2,$Yl,$t3             @ H^4
189
190         vext.8          $t0,$H, $H,#8           @ Karatsuba pre-processing
191          vext.8         $t1,$H2,$H2,#8
192         veor            $t0,$t0,$H
193          veor           $t1,$t1,$H2
194         vext.8          $Hhl,$t0,$t1,#8         @ pack Karatsuba pre-processed
195         vst1.64         {$H-$H2},[x0]           @ store Htable[3..5]
196 ___
197 }
198 $code.=<<___;
199         ret
200 .size   gcm_init_v8,.-gcm_init_v8
201 ___
202 ################################################################################
203 # void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]);
204 #
205 # input:        Xi - current hash value;
206 #               Htable - table precomputed in gcm_init_v8;
207 # output:       Xi - next hash value Xi;
208 #
209 $code.=<<___;
210 .global gcm_gmult_v8
211 .type   gcm_gmult_v8,%function
212 .align  4
213 gcm_gmult_v8:
214         vld1.64         {$t1},[$Xi]             @ load Xi
215         vmov.i8         $xC2,#0xe1
216         vld1.64         {$H-$Hhl},[$Htbl]       @ load twisted H, ...
217         vshl.u64        $xC2,$xC2,#57
218 #ifndef __ARMEB__
219         vrev64.8        $t1,$t1
220 #endif
221         vext.8          $IN,$t1,$t1,#8
222
223         vpmull.p64      $Xl,$H,$IN              @ H.lo·Xi.lo
224         veor            $t1,$t1,$IN             @ Karatsuba pre-processing
225         vpmull2.p64     $Xh,$H,$IN              @ H.hi·Xi.hi
226         vpmull.p64      $Xm,$Hhl,$t1            @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
227
228         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
229         veor            $t2,$Xl,$Xh
230         veor            $Xm,$Xm,$t1
231         veor            $Xm,$Xm,$t2
232         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
233
234         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
235         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
236         veor            $Xl,$Xm,$t2
237
238         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
239         vpmull.p64      $Xl,$Xl,$xC2
240         veor            $t2,$t2,$Xh
241         veor            $Xl,$Xl,$t2
242
243 #ifndef __ARMEB__
244         vrev64.8        $Xl,$Xl
245 #endif
246         vext.8          $Xl,$Xl,$Xl,#8
247         vst1.64         {$Xl},[$Xi]             @ write out Xi
248
249         ret
250 .size   gcm_gmult_v8,.-gcm_gmult_v8
251 ___
252 ################################################################################
253 # void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len);
254 #
255 # input:        table precomputed in gcm_init_v8;
256 #               current hash value Xi;
257 #               pointer to input data;
258 #               length of input data in bytes, but divisible by block size;
259 # output:       next hash value Xi;
260 #
261 $code.=<<___;
262 .global gcm_ghash_v8
263 .type   gcm_ghash_v8,%function
264 .align  4
265 gcm_ghash_v8:
266 ___
267 $code.=<<___    if ($flavour =~ /64/);
268         cmp             $len,#64
269         b.hs            .Lgcm_ghash_v8_4x
270 ___
271 $code.=<<___            if ($flavour !~ /64/);
272         vstmdb          sp!,{d8-d15}            @ 32-bit ABI says so
273 ___
274 $code.=<<___;
275         vld1.64         {$Xl},[$Xi]             @ load [rotated] Xi
276                                                 @ "[rotated]" means that
277                                                 @ loaded value would have
278                                                 @ to be rotated in order to
279                                                 @ make it appear as in
280                                                 @ algorithm specification
281         subs            $len,$len,#32           @ see if $len is 32 or larger
282         mov             $inc,#16                @ $inc is used as post-
283                                                 @ increment for input pointer;
284                                                 @ as loop is modulo-scheduled
285                                                 @ $inc is zeroed just in time
286                                                 @ to preclude overstepping
287                                                 @ inp[len], which means that
288                                                 @ last block[s] are actually
289                                                 @ loaded twice, but last
290                                                 @ copy is not processed
291         vld1.64         {$H-$Hhl},[$Htbl],#32   @ load twisted H, ..., H^2
292         vmov.i8         $xC2,#0xe1
293         vld1.64         {$H2},[$Htbl]
294         cclr            $inc,eq                 @ is it time to zero $inc?
295         vext.8          $Xl,$Xl,$Xl,#8          @ rotate Xi
296         vld1.64         {$t0},[$inp],#16        @ load [rotated] I[0]
297         vshl.u64        $xC2,$xC2,#57           @ compose 0xc2.0 constant
298 #ifndef __ARMEB__
299         vrev64.8        $t0,$t0
300         vrev64.8        $Xl,$Xl
301 #endif
302         vext.8          $IN,$t0,$t0,#8          @ rotate I[0]
303         b.lo            .Lodd_tail_v8           @ $len was less than 32
304 ___
305 { my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7));
306         #######
307         # Xi+2 =[H*(Ii+1 + Xi+1)] mod P =
308         #       [(H*Ii+1) + (H*Xi+1)] mod P =
309         #       [(H*Ii+1) + H^2*(Ii+Xi)] mod P
310         #
311 $code.=<<___;
312         vld1.64         {$t1},[$inp],$inc       @ load [rotated] I[1]
313 #ifndef __ARMEB__
314         vrev64.8        $t1,$t1
315 #endif
316         vext.8          $In,$t1,$t1,#8
317         veor            $IN,$IN,$Xl             @ I[i]^=Xi
318         vpmull.p64      $Xln,$H,$In             @ H·Ii+1
319         veor            $t1,$t1,$In             @ Karatsuba pre-processing
320         vpmull2.p64     $Xhn,$H,$In
321         b               .Loop_mod2x_v8
322
323 .align  4
324 .Loop_mod2x_v8:
325         vext.8          $t2,$IN,$IN,#8
326         subs            $len,$len,#32           @ is there more data?
327         vpmull.p64      $Xl,$H2,$IN             @ H^2.lo·Xi.lo
328         cclr            $inc,lo                 @ is it time to zero $inc?
329
330          vpmull.p64     $Xmn,$Hhl,$t1
331         veor            $t2,$t2,$IN             @ Karatsuba pre-processing
332         vpmull2.p64     $Xh,$H2,$IN             @ H^2.hi·Xi.hi
333         veor            $Xl,$Xl,$Xln            @ accumulate
334         vpmull2.p64     $Xm,$Hhl,$t2            @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
335          vld1.64        {$t0},[$inp],$inc       @ load [rotated] I[i+2]
336
337         veor            $Xh,$Xh,$Xhn
338          cclr           $inc,eq                 @ is it time to zero $inc?
339         veor            $Xm,$Xm,$Xmn
340
341         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
342         veor            $t2,$Xl,$Xh
343         veor            $Xm,$Xm,$t1
344          vld1.64        {$t1},[$inp],$inc       @ load [rotated] I[i+3]
345 #ifndef __ARMEB__
346          vrev64.8       $t0,$t0
347 #endif
348         veor            $Xm,$Xm,$t2
349         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
350
351 #ifndef __ARMEB__
352          vrev64.8       $t1,$t1
353 #endif
354         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
355         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
356          vext.8         $In,$t1,$t1,#8
357          vext.8         $IN,$t0,$t0,#8
358         veor            $Xl,$Xm,$t2
359          vpmull.p64     $Xln,$H,$In             @ H·Ii+1
360         veor            $IN,$IN,$Xh             @ accumulate $IN early
361
362         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
363         vpmull.p64      $Xl,$Xl,$xC2
364         veor            $IN,$IN,$t2
365          veor           $t1,$t1,$In             @ Karatsuba pre-processing
366         veor            $IN,$IN,$Xl
367          vpmull2.p64    $Xhn,$H,$In
368         b.hs            .Loop_mod2x_v8          @ there was at least 32 more bytes
369
370         veor            $Xh,$Xh,$t2
371         vext.8          $IN,$t0,$t0,#8          @ re-construct $IN
372         adds            $len,$len,#32           @ re-construct $len
373         veor            $Xl,$Xl,$Xh             @ re-construct $Xl
374         b.eq            .Ldone_v8               @ is $len zero?
375 ___
376 }
377 $code.=<<___;
378 .Lodd_tail_v8:
379         vext.8          $t2,$Xl,$Xl,#8
380         veor            $IN,$IN,$Xl             @ inp^=Xi
381         veor            $t1,$t0,$t2             @ $t1 is rotated inp^Xi
382
383         vpmull.p64      $Xl,$H,$IN              @ H.lo·Xi.lo
384         veor            $t1,$t1,$IN             @ Karatsuba pre-processing
385         vpmull2.p64     $Xh,$H,$IN              @ H.hi·Xi.hi
386         vpmull.p64      $Xm,$Hhl,$t1            @ (H.lo+H.hi)·(Xi.lo+Xi.hi)
387
388         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
389         veor            $t2,$Xl,$Xh
390         veor            $Xm,$Xm,$t1
391         veor            $Xm,$Xm,$t2
392         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
393
394         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
395         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
396         veor            $Xl,$Xm,$t2
397
398         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
399         vpmull.p64      $Xl,$Xl,$xC2
400         veor            $t2,$t2,$Xh
401         veor            $Xl,$Xl,$t2
402
403 .Ldone_v8:
404 #ifndef __ARMEB__
405         vrev64.8        $Xl,$Xl
406 #endif
407         vext.8          $Xl,$Xl,$Xl,#8
408         vst1.64         {$Xl},[$Xi]             @ write out Xi
409
410 ___
411 $code.=<<___            if ($flavour !~ /64/);
412         vldmia          sp!,{d8-d15}            @ 32-bit ABI says so
413 ___
414 $code.=<<___;
415         ret
416 .size   gcm_ghash_v8,.-gcm_ghash_v8
417 ___
418
419 if ($flavour =~ /64/) {                         # 4x subroutine
420 my ($I0,$j1,$j2,$j3,
421     $I1,$I2,$I3,$H3,$H34,$H4,$Yl,$Ym,$Yh) = map("q$_",(4..7,15..23));
422
423 $code.=<<___;
424 .type   gcm_ghash_v8_4x,%function
425 .align  4
426 gcm_ghash_v8_4x:
427 .Lgcm_ghash_v8_4x:
428         vld1.64         {$Xl},[$Xi]             @ load [rotated] Xi
429         vld1.64         {$H-$H2},[$Htbl],#48    @ load twisted H, ..., H^2
430         vmov.i8         $xC2,#0xe1
431         vld1.64         {$H3-$H4},[$Htbl]       @ load twisted H^3, ..., H^4
432         vshl.u64        $xC2,$xC2,#57           @ compose 0xc2.0 constant
433
434         vld1.64         {$I0-$j3},[$inp],#64
435 #ifndef __ARMEB__
436         vrev64.8        $Xl,$Xl
437         vrev64.8        $j1,$j1
438         vrev64.8        $j2,$j2
439         vrev64.8        $j3,$j3
440         vrev64.8        $I0,$I0
441 #endif
442         vext.8          $I3,$j3,$j3,#8
443         vext.8          $I2,$j2,$j2,#8
444         vext.8          $I1,$j1,$j1,#8
445
446         vpmull.p64      $Yl,$H,$I3              @ H·Ii+3
447         veor            $j3,$j3,$I3
448         vpmull2.p64     $Yh,$H,$I3
449         vpmull.p64      $Ym,$Hhl,$j3
450
451         vpmull.p64      $t0,$H2,$I2             @ H^2·Ii+2
452         veor            $j2,$j2,$I2
453         vpmull2.p64     $I2,$H2,$I2
454         vpmull2.p64     $j2,$Hhl,$j2
455
456         veor            $Yl,$Yl,$t0
457         veor            $Yh,$Yh,$I2
458         veor            $Ym,$Ym,$j2
459
460         vpmull.p64      $j3,$H3,$I1             @ H^3·Ii+1
461         veor            $j1,$j1,$I1
462         vpmull2.p64     $I1,$H3,$I1
463         vpmull.p64      $j1,$H34,$j1
464
465         veor            $Yl,$Yl,$j3
466         veor            $Yh,$Yh,$I1
467         veor            $Ym,$Ym,$j1
468
469         subs            $len,$len,#128
470         b.lo            .Ltail4x
471
472         b               .Loop4x
473
474 .align  4
475 .Loop4x:
476         veor            $t0,$I0,$Xl
477          vld1.64        {$I0-$j3},[$inp],#64
478         vext.8          $IN,$t0,$t0,#8
479 #ifndef __ARMEB__
480          vrev64.8       $j1,$j1
481          vrev64.8       $j2,$j2
482          vrev64.8       $j3,$j3
483          vrev64.8       $I0,$I0
484 #endif
485
486         vpmull.p64      $Xl,$H4,$IN             @ H^4·(Xi+Ii)
487         veor            $t0,$t0,$IN
488         vpmull2.p64     $Xh,$H4,$IN
489          vext.8         $I3,$j3,$j3,#8
490         vpmull2.p64     $Xm,$H34,$t0
491
492         veor            $Xl,$Xl,$Yl
493         veor            $Xh,$Xh,$Yh
494          vext.8         $I2,$j2,$j2,#8
495         veor            $Xm,$Xm,$Ym
496          vext.8         $I1,$j1,$j1,#8
497
498         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
499         veor            $t2,$Xl,$Xh
500          vpmull.p64     $Yl,$H,$I3              @ H·Ii+3
501          veor           $j3,$j3,$I3
502         veor            $Xm,$Xm,$t1
503          vpmull2.p64    $Yh,$H,$I3
504         veor            $Xm,$Xm,$t2
505          vpmull.p64     $Ym,$Hhl,$j3
506
507         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
508         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
509         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
510          vpmull.p64     $t0,$H2,$I2             @ H^2·Ii+2
511          veor           $j2,$j2,$I2
512          vpmull2.p64    $I2,$H2,$I2
513         veor            $Xl,$Xm,$t2
514          vpmull2.p64    $j2,$Hhl,$j2
515
516          veor           $Yl,$Yl,$t0
517          veor           $Yh,$Yh,$I2
518          veor           $Ym,$Ym,$j2
519
520         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
521         vpmull.p64      $Xl,$Xl,$xC2
522          vpmull.p64     $j3,$H3,$I1             @ H^3·Ii+1
523          veor           $j1,$j1,$I1
524         veor            $t2,$t2,$Xh
525          vpmull2.p64    $I1,$H3,$I1
526          vpmull.p64     $j1,$H34,$j1
527
528         veor            $Xl,$Xl,$t2
529          veor           $Yl,$Yl,$j3
530          veor           $Yh,$Yh,$I1
531         vext.8          $Xl,$Xl,$Xl,#8
532          veor           $Ym,$Ym,$j1
533
534         subs            $len,$len,#64
535         b.hs            .Loop4x
536
537 .Ltail4x:
538         veor            $t0,$I0,$Xl
539         vext.8          $IN,$t0,$t0,#8
540
541         vpmull.p64      $Xl,$H4,$IN             @ H^4·(Xi+Ii)
542         veor            $t0,$t0,$IN
543         vpmull2.p64     $Xh,$H4,$IN
544         vpmull2.p64     $Xm,$H34,$t0
545
546         veor            $Xl,$Xl,$Yl
547         veor            $Xh,$Xh,$Yh
548         veor            $Xm,$Xm,$Ym
549
550         adds            $len,$len,#64
551         b.eq            .Ldone4x
552
553         cmp             $len,#32
554         b.lo            .Lone
555         b.eq            .Ltwo
556 .Lthree:
557         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
558         veor            $t2,$Xl,$Xh
559         veor            $Xm,$Xm,$t1
560          vld1.64        {$I0-$j2},[$inp]
561         veor            $Xm,$Xm,$t2
562 #ifndef __ARMEB__
563          vrev64.8       $j1,$j1
564          vrev64.8       $j2,$j2
565          vrev64.8       $I0,$I0
566 #endif
567
568         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
569         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
570         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
571          vext.8         $I2,$j2,$j2,#8
572          vext.8         $I1,$j1,$j1,#8
573         veor            $Xl,$Xm,$t2
574
575          vpmull.p64     $Yl,$H,$I2              @ H·Ii+2
576          veor           $j2,$j2,$I2
577
578         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
579         vpmull.p64      $Xl,$Xl,$xC2
580         veor            $t2,$t2,$Xh
581          vpmull2.p64    $Yh,$H,$I2
582          vpmull.p64     $Ym,$Hhl,$j2
583         veor            $Xl,$Xl,$t2
584          vpmull.p64     $j3,$H2,$I1             @ H^2·Ii+1
585          veor           $j1,$j1,$I1
586         vext.8          $Xl,$Xl,$Xl,#8
587
588          vpmull2.p64    $I1,$H2,$I1
589         veor            $t0,$I0,$Xl
590          vpmull2.p64    $j1,$Hhl,$j1
591         vext.8          $IN,$t0,$t0,#8
592
593          veor           $Yl,$Yl,$j3
594          veor           $Yh,$Yh,$I1
595          veor           $Ym,$Ym,$j1
596
597         vpmull.p64      $Xl,$H3,$IN             @ H^3·(Xi+Ii)
598         veor            $t0,$t0,$IN
599         vpmull2.p64     $Xh,$H3,$IN
600         vpmull.p64      $Xm,$H34,$t0
601
602         veor            $Xl,$Xl,$Yl
603         veor            $Xh,$Xh,$Yh
604         veor            $Xm,$Xm,$Ym
605         b               .Ldone4x
606
607 .align  4
608 .Ltwo:
609         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
610         veor            $t2,$Xl,$Xh
611         veor            $Xm,$Xm,$t1
612          vld1.64        {$I0-$j1},[$inp]
613         veor            $Xm,$Xm,$t2
614 #ifndef __ARMEB__
615          vrev64.8       $j1,$j1
616          vrev64.8       $I0,$I0
617 #endif
618
619         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
620         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
621         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
622          vext.8         $I1,$j1,$j1,#8
623         veor            $Xl,$Xm,$t2
624
625         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
626         vpmull.p64      $Xl,$Xl,$xC2
627         veor            $t2,$t2,$Xh
628         veor            $Xl,$Xl,$t2
629         vext.8          $Xl,$Xl,$Xl,#8
630
631          vpmull.p64     $Yl,$H,$I1              @ H·Ii+1
632          veor           $j1,$j1,$I1
633
634         veor            $t0,$I0,$Xl
635         vext.8          $IN,$t0,$t0,#8
636
637          vpmull2.p64    $Yh,$H,$I1
638          vpmull.p64     $Ym,$Hhl,$j1
639
640         vpmull.p64      $Xl,$H2,$IN             @ H^2·(Xi+Ii)
641         veor            $t0,$t0,$IN
642         vpmull2.p64     $Xh,$H2,$IN
643         vpmull2.p64     $Xm,$Hhl,$t0
644
645         veor            $Xl,$Xl,$Yl
646         veor            $Xh,$Xh,$Yh
647         veor            $Xm,$Xm,$Ym
648         b               .Ldone4x
649
650 .align  4
651 .Lone:
652         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
653         veor            $t2,$Xl,$Xh
654         veor            $Xm,$Xm,$t1
655          vld1.64        {$I0},[$inp]
656         veor            $Xm,$Xm,$t2
657 #ifndef __ARMEB__
658          vrev64.8       $I0,$I0
659 #endif
660
661         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
662         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
663         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
664         veor            $Xl,$Xm,$t2
665
666         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
667         vpmull.p64      $Xl,$Xl,$xC2
668         veor            $t2,$t2,$Xh
669         veor            $Xl,$Xl,$t2
670         vext.8          $Xl,$Xl,$Xl,#8
671
672         veor            $t0,$I0,$Xl
673         vext.8          $IN,$t0,$t0,#8
674
675         vpmull.p64      $Xl,$H,$IN
676         veor            $t0,$t0,$IN
677         vpmull2.p64     $Xh,$H,$IN
678         vpmull.p64      $Xm,$Hhl,$t0
679
680 .Ldone4x:
681         vext.8          $t1,$Xl,$Xh,#8          @ Karatsuba post-processing
682         veor            $t2,$Xl,$Xh
683         veor            $Xm,$Xm,$t1
684         veor            $Xm,$Xm,$t2
685
686         vpmull.p64      $t2,$Xl,$xC2            @ 1st phase of reduction
687         vmov            $Xh#lo,$Xm#hi           @ Xh|Xm - 256-bit result
688         vmov            $Xm#hi,$Xl#lo           @ Xm is rotated Xl
689         veor            $Xl,$Xm,$t2
690
691         vext.8          $t2,$Xl,$Xl,#8          @ 2nd phase of reduction
692         vpmull.p64      $Xl,$Xl,$xC2
693         veor            $t2,$t2,$Xh
694         veor            $Xl,$Xl,$t2
695         vext.8          $Xl,$Xl,$Xl,#8
696
697 #ifndef __ARMEB__
698         vrev64.8        $Xl,$Xl
699 #endif
700         vst1.64         {$Xl},[$Xi]             @ write out Xi
701
702         ret
703 .size   gcm_ghash_v8_4x,.-gcm_ghash_v8_4x
704 ___
705
706 }
707 }
708
709 $code.=<<___;
710 .asciz  "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
711 .align  2
712 #endif
713 ___
714
715 if ($flavour =~ /64/) {                 ######## 64-bit code
716     sub unvmov {
717         my $arg=shift;
718
719         $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o &&
720         sprintf "ins    v%d.d[%d],v%d.d[%d]",$1<8?$1:$1+8,($2 eq "lo")?0:1,
721                                              $3<8?$3:$3+8,($4 eq "lo")?0:1;
722     }
723     foreach(split("\n",$code)) {
724         s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o     or
725         s/vmov\.i8/movi/o               or      # fix up legacy mnemonics
726         s/vmov\s+(.*)/unvmov($1)/geo    or
727         s/vext\.8/ext/o                 or
728         s/vshr\.s/sshr\.s/o             or
729         s/vshr/ushr/o                   or
730         s/^(\s+)v/$1/o                  or      # strip off v prefix
731         s/\bbx\s+lr\b/ret/o;
732
733         s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo;  # old->new registers
734         s/@\s/\/\//o;                           # old->new style commentary
735
736         # fix up remaining legacy suffixes
737         s/\.[ui]?8(\s)/$1/o;
738         s/\.[uis]?32//o and s/\.16b/\.4s/go;
739         m/\.p64/o and s/\.16b/\.1q/o;           # 1st pmull argument
740         m/l\.p64/o and s/\.16b/\.1d/go;         # 2nd and 3rd pmull arguments
741         s/\.[uisp]?64//o and s/\.16b/\.2d/go;
742         s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o;
743
744         print $_,"\n";
745     }
746 } else {                                ######## 32-bit code
747     sub unvdup32 {
748         my $arg=shift;
749
750         $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o &&
751         sprintf "vdup.32        q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1;
752     }
753     sub unvpmullp64 {
754         my ($mnemonic,$arg)=@_;
755
756         if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) {
757             my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19)
758                                  |(($2&7)<<17)|(($2&8)<<4)
759                                  |(($3&7)<<1) |(($3&8)<<2);
760             $word |= 0x00010001  if ($mnemonic =~ "2");
761             # since ARMv7 instructions are always encoded little-endian.
762             # correct solution is to use .inst directive, but older
763             # assemblers don't implement it:-(
764             sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
765                         $word&0xff,($word>>8)&0xff,
766                         ($word>>16)&0xff,($word>>24)&0xff,
767                         $mnemonic,$arg;
768         }
769     }
770
771     foreach(split("\n",$code)) {
772         s/\b[wx]([0-9]+)\b/r$1/go;              # new->old registers
773         s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go;   # new->old registers
774         s/\/\/\s?/@ /o;                         # new->old style commentary
775
776         # fix up remaining new-style suffixes
777         s/\],#[0-9]+/]!/o;
778
779         s/cclr\s+([^,]+),\s*([a-z]+)/mov.$2     $1,#0/o                 or
780         s/vdup\.32\s+(.*)/unvdup32($1)/geo                              or
781         s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo                or
782         s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo       or
783         s/^(\s+)b\./$1b/o                                               or
784         s/^(\s+)ret/$1bx\tlr/o;
785
786         if (s/^(\s+)mov\.([a-z]+)/$1mov$2/) {
787             print "     it      $2\n";
788         }
789
790         print $_,"\n";
791     }
792 }
793
794 close STDOUT; # enforce flush