2 # Copyright 2010-2018 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # The module implements "4-bit" GCM GHASH function and underlying
20 # single multiplication operation in GF(2^128). "4-bit" means that it
21 # uses 256 bytes per-key table [+32 bytes shared table]. There is no
22 # experimental performance data available yet. The only approximation
23 # that can be made at this point is based on code size. Inner loop is
24 # 32 instructions long and on single-issue core should execute in <40
25 # cycles. Having verified that gcc 3.4 didn't unroll corresponding
26 # loop, this assembler loop body was found to be ~3x smaller than
27 # compiler-generated one...
31 # Rescheduling for dual-issue pipeline resulted in 8.5% improvement on
32 # Cortex A8 core and ~25 cycles per processed byte (which was observed
33 # to be ~3 times faster than gcc-generated code:-)
37 # Profiler-assisted and platform-specific optimization resulted in 7%
38 # improvement on Cortex A8 core and ~23.5 cycles per byte.
42 # Add NEON implementation featuring polynomial multiplication, i.e. no
43 # lookup tables involved. On Cortex A8 it was measured to process one
44 # byte in 15 cycles or 55% faster than integer-only code.
48 # Switch to multiplication algorithm suggested in paper referred
49 # below and combine it with reduction algorithm from x86 module.
50 # Performance improvement over previous version varies from 65% on
51 # Snapdragon S4 to 110% on Cortex A9. In absolute terms Cortex A8
52 # processes one byte in 8.45 cycles, A9 - in 10.2, A15 - in 7.63,
53 # Snapdragon S4 - in 9.33.
55 # Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
56 # Polynomial Multiplication on ARM Processors using the NEON Engine.
58 # http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
60 # ====================================================================
61 # Note about "528B" variant. In ARM case it makes lesser sense to
62 # implement it for following reasons:
64 # - performance improvement won't be anywhere near 50%, because 128-
65 # bit shift operation is neatly fused with 128-bit xor here, and
66 # "538B" variant would eliminate only 4-5 instructions out of 32
67 # in the inner loop (meaning that estimated improvement is ~15%);
68 # - ARM-based systems are often embedded ones and extra memory
69 # consumption might be unappreciated (for so little improvement);
71 # Byte order [in]dependence. =========================================
73 # Caller is expected to maintain specific *dword* order in Htable,
74 # namely with *least* significant dword of 128-bit value at *lower*
75 # address. This differs completely from C code and has everything to
76 # do with ldm instruction and order in which dwords are "consumed" by
77 # algorithm. *Byte* order within these dwords in turn is whatever
78 # *native* byte order on current platform. See gcm128.c for working
81 # $output is the last argument if it looks like a file (it has an extension)
82 # $flavour is the first argument if it doesn't look like a file
83 $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
84 $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
86 if ($flavour && $flavour ne "void") {
87 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
88 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
89 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
90 die "can't locate arm-xlate.pl";
92 open STDOUT,"| \"$^X\" $xlate $flavour \"$output\""
93 or die "can't call $xlate: $!";
95 $output and open STDOUT,">$output";
98 $Xi="r0"; # argument block
103 $Zll="r4"; # variables
112 ################# r13 is stack pointer
114 ################# r15 is program counter
116 $rem_4bit=$inp; # used in gcm_gmult_4bit
122 for ($Zll,$Zlh,$Zhl,$Zhh) {
124 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
127 #elif defined(__ARMEB__)
133 strb $Tlh,[$Xi,#$i+2]
135 strb $Thl,[$Xi,#$i+1]
139 $code.="\t".shift(@args)."\n";
145 #include "arm_arch.h"
147 #if defined(__thumb2__) || defined(__clang__)
149 #define ldrplb ldrbpl
150 #define ldrneb ldrbne
152 #if defined(__thumb2__)
160 .type rem_4bit,%object
163 .short 0x0000,0x1C20,0x3840,0x2460
164 .short 0x7080,0x6CA0,0x48C0,0x54E0
165 .short 0xE100,0xFD20,0xD940,0xC560
166 .short 0x9180,0x8DA0,0xA9C0,0xB5E0
167 .size rem_4bit,.-rem_4bit
169 .type rem_4bit_get,%function
171 #if defined(__thumb2__)
172 adr $rem_4bit,rem_4bit
174 sub $rem_4bit,pc,#8+32 @ &rem_4bit
179 .size rem_4bit_get,.-rem_4bit_get
181 .global gcm_ghash_4bit
182 .type gcm_ghash_4bit,%function
185 #if defined(__thumb2__)
188 sub r12,pc,#8+48 @ &rem_4bit
190 add $len,$inp,$len @ $len to point at the end
191 stmdb sp!,{r3-r11,lr} @ save $len/end too
193 ldmia r12,{r4-r11} @ copy rem_4bit ...
194 stmdb sp!,{r4-r11} @ ... to stack
204 add $Zhh,$Htbl,$nlo,lsl#4
205 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
209 and $nhi,$Zll,#0xf @ rem
210 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
212 eor $Zll,$Tll,$Zll,lsr#4
213 ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
214 eor $Zll,$Zll,$Zlh,lsl#28
216 eor $Zlh,$Tlh,$Zlh,lsr#4
217 eor $Zlh,$Zlh,$Zhl,lsl#28
218 eor $Zhl,$Thl,$Zhl,lsr#4
219 eor $Zhl,$Zhl,$Zhh,lsl#28
220 eor $Zhh,$Thh,$Zhh,lsr#4
224 eor $Zhh,$Zhh,$Tll,lsl#16
227 add $Thh,$Htbl,$nlo,lsl#4
228 and $nlo,$Zll,#0xf @ rem
231 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
232 eor $Zll,$Tll,$Zll,lsr#4
233 eor $Zll,$Zll,$Zlh,lsl#28
234 eor $Zlh,$Tlh,$Zlh,lsr#4
235 eor $Zlh,$Zlh,$Zhl,lsl#28
236 ldrh $Tll,[sp,$nlo] @ rem_4bit[rem]
237 eor $Zhl,$Thl,$Zhl,lsr#4
241 ldrplb $nlo,[$inp,$cnt]
242 eor $Zhl,$Zhl,$Zhh,lsl#28
243 eor $Zhh,$Thh,$Zhh,lsr#4
246 and $nhi,$Zll,#0xf @ rem
247 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
249 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
250 eor $Zll,$Tll,$Zll,lsr#4
254 ldrplb $Tll,[$Xi,$cnt]
255 eor $Zll,$Zll,$Zlh,lsl#28
256 eor $Zlh,$Tlh,$Zlh,lsr#4
258 eor $Zlh,$Zlh,$Zhl,lsl#28
259 eor $Zhl,$Thl,$Zhl,lsr#4
260 eor $Zhl,$Zhl,$Zhh,lsl#28
265 eor $Zhh,$Thh,$Zhh,lsr#4
269 andpl $nhi,$nlo,#0xf0
270 andpl $nlo,$nlo,#0x0f
271 eor $Zhh,$Zhh,$Tlh,lsl#16 @ ^= rem_4bit[rem]
274 ldr $len,[sp,#32] @ re-load $len/end
278 &Zsmash("cmp\t$inp,$len","\n".
279 "#ifdef __thumb2__\n".
282 " ldrneb $nlo,[$inp,#15]");
288 ldmia sp!,{r4-r11,pc}
290 ldmia sp!,{r4-r11,lr}
292 moveq pc,lr @ be binary compatible with V4, yet
293 bx lr @ interoperable with Thumb ISA:-)
295 .size gcm_ghash_4bit,.-gcm_ghash_4bit
297 .global gcm_gmult_4bit
298 .type gcm_gmult_4bit,%function
300 stmdb sp!,{r4-r11,lr}
308 add $Zhh,$Htbl,$nlo,lsl#4
309 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
313 and $nhi,$Zll,#0xf @ rem
314 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
316 eor $Zll,$Tll,$Zll,lsr#4
317 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
318 eor $Zll,$Zll,$Zlh,lsl#28
319 eor $Zlh,$Tlh,$Zlh,lsr#4
320 eor $Zlh,$Zlh,$Zhl,lsl#28
321 eor $Zhl,$Thl,$Zhl,lsr#4
322 eor $Zhl,$Zhl,$Zhh,lsl#28
323 eor $Zhh,$Thh,$Zhh,lsr#4
325 eor $Zhh,$Zhh,$Tll,lsl#16
329 add $Thh,$Htbl,$nlo,lsl#4
330 and $nlo,$Zll,#0xf @ rem
333 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
334 eor $Zll,$Tll,$Zll,lsr#4
335 eor $Zll,$Zll,$Zlh,lsl#28
336 eor $Zlh,$Tlh,$Zlh,lsr#4
337 eor $Zlh,$Zlh,$Zhl,lsl#28
338 ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem]
339 eor $Zhl,$Thl,$Zhl,lsr#4
343 ldrplb $nlo,[$Xi,$cnt]
344 eor $Zhl,$Zhl,$Zhh,lsl#28
345 eor $Zhh,$Thh,$Zhh,lsr#4
348 and $nhi,$Zll,#0xf @ rem
349 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
351 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
352 eor $Zll,$Tll,$Zll,lsr#4
353 eor $Zll,$Zll,$Zlh,lsl#28
354 eor $Zlh,$Tlh,$Zlh,lsr#4
355 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
356 eor $Zlh,$Zlh,$Zhl,lsl#28
357 eor $Zhl,$Thl,$Zhl,lsr#4
358 eor $Zhl,$Zhl,$Zhh,lsl#28
359 eor $Zhh,$Thh,$Zhh,lsr#4
363 andpl $nhi,$nlo,#0xf0
364 andpl $nlo,$nlo,#0x0f
365 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
371 ldmia sp!,{r4-r11,pc}
373 ldmia sp!,{r4-r11,lr}
375 moveq pc,lr @ be binary compatible with V4, yet
376 bx lr @ interoperable with Thumb ISA:-)
378 .size gcm_gmult_4bit,.-gcm_gmult_4bit
381 my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3));
382 my ($t0,$t1,$t2,$t3)=map("q$_",(8..12));
383 my ($Hlo,$Hhi,$Hhl,$k48,$k32,$k16)=map("d$_",(26..31));
388 vext.8 $t0#lo, $a, $a, #1 @ A1
389 vmull.p8 $t0, $t0#lo, $b @ F = A1*B
390 vext.8 $r#lo, $b, $b, #1 @ B1
391 vmull.p8 $r, $a, $r#lo @ E = A*B1
392 vext.8 $t1#lo, $a, $a, #2 @ A2
393 vmull.p8 $t1, $t1#lo, $b @ H = A2*B
394 vext.8 $t3#lo, $b, $b, #2 @ B2
395 vmull.p8 $t3, $a, $t3#lo @ G = A*B2
396 vext.8 $t2#lo, $a, $a, #3 @ A3
397 veor $t0, $t0, $r @ L = E + F
398 vmull.p8 $t2, $t2#lo, $b @ J = A3*B
399 vext.8 $r#lo, $b, $b, #3 @ B3
400 veor $t1, $t1, $t3 @ M = G + H
401 vmull.p8 $r, $a, $r#lo @ I = A*B3
402 veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
403 vand $t0#hi, $t0#hi, $k48
404 vext.8 $t3#lo, $b, $b, #4 @ B4
405 veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
406 vand $t1#hi, $t1#hi, $k32
407 vmull.p8 $t3, $a, $t3#lo @ K = A*B4
408 veor $t2, $t2, $r @ N = I + J
409 veor $t0#lo, $t0#lo, $t0#hi
410 veor $t1#lo, $t1#lo, $t1#hi
411 veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
412 vand $t2#hi, $t2#hi, $k16
413 vext.8 $t0, $t0, $t0, #15
414 veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
416 vext.8 $t1, $t1, $t1, #14
417 veor $t2#lo, $t2#lo, $t2#hi
418 vmull.p8 $r, $a, $b @ D = A*B
419 vext.8 $t3, $t3, $t3, #12
420 vext.8 $t2, $t2, $t2, #13
429 #if __ARM_MAX_ARCH__>=7
433 .global gcm_init_neon
434 .type gcm_init_neon,%function
437 vld1.64 $IN#hi,[r1]! @ load H
441 vshr.u64 $t0#lo,#63 @ t0=0xc2....01
443 vshr.u64 $Hlo,$IN#lo,#63
444 vshr.s8 $t1,#7 @ broadcast carry bit
447 vorr $IN#hi,$Hlo @ H<<<=1
448 veor $IN,$IN,$t0 @ twisted H
452 .size gcm_init_neon,.-gcm_init_neon
454 .global gcm_gmult_neon
455 .type gcm_gmult_neon,%function
458 vld1.64 $IN#hi,[$Xi]! @ load Xi
459 vld1.64 $IN#lo,[$Xi]!
460 vmov.i64 $k48,#0x0000ffffffffffff
461 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
462 vmov.i64 $k32,#0x00000000ffffffff
466 vmov.i64 $k16,#0x000000000000ffff
467 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
470 .size gcm_gmult_neon,.-gcm_gmult_neon
472 .global gcm_ghash_neon
473 .type gcm_ghash_neon,%function
476 vld1.64 $Xl#hi,[$Xi]! @ load Xi
477 vld1.64 $Xl#lo,[$Xi]!
478 vmov.i64 $k48,#0x0000ffffffffffff
479 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H
480 vmov.i64 $k32,#0x00000000ffffffff
484 vmov.i64 $k16,#0x000000000000ffff
485 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing
488 vld1.64 $IN#hi,[$inp]! @ load inp
489 vld1.64 $IN#lo,[$inp]!
493 veor $IN,$Xl @ inp^=Xi
496 &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo
498 veor $IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing
500 &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi)
501 &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi
503 veor $Xm,$Xm,$Xl @ Karatsuba post-processing
505 veor $Xl#hi,$Xl#hi,$Xm#lo
506 veor $Xh#lo,$Xh#lo,$Xm#hi @ Xh|Xl - 256-bit result
508 @ equivalent of reduction_avx from ghash-x86_64.pl
509 vshl.i64 $t1,$Xl,#57 @ 1st phase
514 veor $Xl#hi,$Xl#hi,$t2#lo @
515 veor $Xh#lo,$Xh#lo,$t2#hi
517 vshr.u64 $t2,$Xl,#1 @ 2nd phase
521 vshr.u64 $Xl,$Xl,#1 @
532 vst1.64 $Xl#hi,[$Xi]! @ write out Xi
536 .size gcm_ghash_neon,.-gcm_ghash_neon
541 .asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
545 foreach (split("\n",$code)) {
546 s/\`([^\`]*)\`/eval $1/geo;
548 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
549 s/\bret\b/bx lr/go or
550 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
554 close STDOUT or die "error closing STDOUT"; # enforce flush