3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # The module implements "4-bit" GCM GHASH function and underlying
13 # single multiplication operation in GF(2^128). "4-bit" means that it
14 # uses 256 bytes per-key table [+32 bytes shared table]. There is no
15 # experimental performance data available yet. The only approximation
16 # that can be made at this point is based on code size. Inner loop is
17 # 32 instructions long and on single-issue core should execute in <40
18 # cycles. Having verified that gcc 3.4 didn't unroll corresponding
19 # loop, this assembler loop body was found to be ~3x smaller than
20 # compiler-generated one...
24 # Rescheduling for dual-issue pipeline resulted in 8.5% improvement on
25 # Cortex A8 core and ~25 cycles per processed byte (which was observed
26 # to be ~3 times faster than gcc-generated code:-)
30 # Profiler-assisted and platform-specific optimization resulted in 7%
31 # improvement on Cortex A8 core and ~23.5 cycles per byte.
35 # Add NEON implementation featuring polynomial multiplication, i.e. no
36 # lookup tables involved. On Cortex A8 it was measured to process one
37 # byte in 15 cycles or 55% faster than integer-only code.
39 # ====================================================================
40 # Note about "528B" variant. In ARM case it makes lesser sense to
41 # implement it for following reasons:
43 # - performance improvement won't be anywhere near 50%, because 128-
44 # bit shift operation is neatly fused with 128-bit xor here, and
45 # "538B" variant would eliminate only 4-5 instructions out of 32
46 # in the inner loop (meaning that estimated improvement is ~15%);
47 # - ARM-based systems are often embedded ones and extra memory
48 # consumption might be unappreciated (for so little improvement);
50 # Byte order [in]dependence. =========================================
52 # Caller is expected to maintain specific *dword* order in Htable,
53 # namely with *least* significant dword of 128-bit value at *lower*
54 # address. This differs completely from C code and has everything to
55 # do with ldm instruction and order in which dwords are "consumed" by
56 # algorithm. *Byte* order within these dwords in turn is whatever
57 # *native* byte order on current platform. See gcm128.c for working
60 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
61 open STDOUT,">$output";
63 $Xi="r0"; # argument block
68 $Zll="r4"; # variables
77 ################# r13 is stack pointer
79 ################# r15 is program counter
81 $rem_4bit=$inp; # used in gcm_gmult_4bit
87 for ($Zll,$Zlh,$Zhl,$Zhh) {
89 #if __ARM_ARCH__>=7 && defined(__ARMEL__)
92 #elif defined(__ARMEB__)
100 strb $Thl,[$Xi,#$i+1]
104 $code.="\t".shift(@args)."\n";
110 #include "arm_arch.h"
115 .type rem_4bit,%object
118 .short 0x0000,0x1C20,0x3840,0x2460
119 .short 0x7080,0x6CA0,0x48C0,0x54E0
120 .short 0xE100,0xFD20,0xD940,0xC560
121 .short 0x9180,0x8DA0,0xA9C0,0xB5E0
122 .size rem_4bit,.-rem_4bit
124 .type rem_4bit_get,%function
127 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit
130 .size rem_4bit_get,.-rem_4bit_get
132 .global gcm_ghash_4bit
133 .type gcm_ghash_4bit,%function
136 add $len,$inp,$len @ $len to point at the end
137 stmdb sp!,{r3-r11,lr} @ save $len/end too
138 sub r12,r12,#48 @ &rem_4bit
140 ldmia r12,{r4-r11} @ copy rem_4bit ...
141 stmdb sp!,{r4-r11} @ ... to stack
151 add $Zhh,$Htbl,$nlo,lsl#4
152 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
156 and $nhi,$Zll,#0xf @ rem
157 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
159 eor $Zll,$Tll,$Zll,lsr#4
160 ldrh $Tll,[sp,$nhi] @ rem_4bit[rem]
161 eor $Zll,$Zll,$Zlh,lsl#28
163 eor $Zlh,$Tlh,$Zlh,lsr#4
164 eor $Zlh,$Zlh,$Zhl,lsl#28
165 eor $Zhl,$Thl,$Zhl,lsr#4
166 eor $Zhl,$Zhl,$Zhh,lsl#28
167 eor $Zhh,$Thh,$Zhh,lsr#4
171 eor $Zhh,$Zhh,$Tll,lsl#16
174 add $Thh,$Htbl,$nlo,lsl#4
175 and $nlo,$Zll,#0xf @ rem
178 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
179 eor $Zll,$Tll,$Zll,lsr#4
180 eor $Zll,$Zll,$Zlh,lsl#28
181 eor $Zlh,$Tlh,$Zlh,lsr#4
182 eor $Zlh,$Zlh,$Zhl,lsl#28
183 ldrh $Tll,[sp,$nlo] @ rem_4bit[rem]
184 eor $Zhl,$Thl,$Zhl,lsr#4
185 ldrplb $nlo,[$inp,$cnt]
186 eor $Zhl,$Zhl,$Zhh,lsl#28
187 eor $Zhh,$Thh,$Zhh,lsr#4
190 and $nhi,$Zll,#0xf @ rem
191 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
193 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
194 eor $Zll,$Tll,$Zll,lsr#4
195 ldrplb $Tll,[$Xi,$cnt]
196 eor $Zll,$Zll,$Zlh,lsl#28
197 eor $Zlh,$Tlh,$Zlh,lsr#4
199 eor $Zlh,$Zlh,$Zhl,lsl#28
200 eor $Zhl,$Thl,$Zhl,lsr#4
201 eor $Zhl,$Zhl,$Zhh,lsl#28
203 eor $Zhh,$Thh,$Zhh,lsr#4
204 andpl $nhi,$nlo,#0xf0
205 andpl $nlo,$nlo,#0x0f
206 eor $Zhh,$Zhh,$Tlh,lsl#16 @ ^= rem_4bit[rem]
209 ldr $len,[sp,#32] @ re-load $len/end
213 &Zsmash("cmp\t$inp,$len","ldrneb\t$nlo,[$inp,#15]");
219 ldmia sp!,{r4-r11,pc}
221 ldmia sp!,{r4-r11,lr}
223 moveq pc,lr @ be binary compatible with V4, yet
224 bx lr @ interoperable with Thumb ISA:-)
226 .size gcm_ghash_4bit,.-gcm_ghash_4bit
228 .global gcm_gmult_4bit
229 .type gcm_gmult_4bit,%function
231 stmdb sp!,{r4-r11,lr}
239 add $Zhh,$Htbl,$nlo,lsl#4
240 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo]
244 and $nhi,$Zll,#0xf @ rem
245 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
247 eor $Zll,$Tll,$Zll,lsr#4
248 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
249 eor $Zll,$Zll,$Zlh,lsl#28
250 eor $Zlh,$Tlh,$Zlh,lsr#4
251 eor $Zlh,$Zlh,$Zhl,lsl#28
252 eor $Zhl,$Thl,$Zhl,lsr#4
253 eor $Zhl,$Zhl,$Zhh,lsl#28
254 eor $Zhh,$Thh,$Zhh,lsr#4
256 eor $Zhh,$Zhh,$Tll,lsl#16
260 add $Thh,$Htbl,$nlo,lsl#4
261 and $nlo,$Zll,#0xf @ rem
264 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo]
265 eor $Zll,$Tll,$Zll,lsr#4
266 eor $Zll,$Zll,$Zlh,lsl#28
267 eor $Zlh,$Tlh,$Zlh,lsr#4
268 eor $Zlh,$Zlh,$Zhl,lsl#28
269 ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem]
270 eor $Zhl,$Thl,$Zhl,lsr#4
271 ldrplb $nlo,[$Xi,$cnt]
272 eor $Zhl,$Zhl,$Zhh,lsl#28
273 eor $Zhh,$Thh,$Zhh,lsr#4
276 and $nhi,$Zll,#0xf @ rem
277 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
279 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi]
280 eor $Zll,$Tll,$Zll,lsr#4
281 eor $Zll,$Zll,$Zlh,lsl#28
282 eor $Zlh,$Tlh,$Zlh,lsr#4
283 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem]
284 eor $Zlh,$Zlh,$Zhl,lsl#28
285 eor $Zhl,$Thl,$Zhl,lsr#4
286 eor $Zhl,$Zhl,$Zhh,lsl#28
287 eor $Zhh,$Thh,$Zhh,lsr#4
288 andpl $nhi,$nlo,#0xf0
289 andpl $nlo,$nlo,#0x0f
290 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem]
296 ldmia sp!,{r4-r11,pc}
298 ldmia sp!,{r4-r11,lr}
300 moveq pc,lr @ be binary compatible with V4, yet
301 bx lr @ interoperable with Thumb ISA:-)
303 .size gcm_gmult_4bit,.-gcm_gmult_4bit
306 my $cnt=$Htbl; # $Htbl is used once in the very beginning
308 my ($Hhi, $Hlo, $Zo, $T, $xi, $mod) = map("d$_",(0..7));
309 my ($Qhi, $Qlo, $Z, $R, $zero, $Qpost, $IN) = map("q$_",(8..15));
311 # Z:Zo keeps 128-bit result shifted by 1 to the right, with bottom bit
312 # in Zo. Or should I say "top bit", because GHASH is specified in
313 # reverse bit order? Otherwise straightforward 128-bt H by one input
314 # byte multiplication and modulo-reduction, times 16.
316 sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
317 sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
318 sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
324 .global gcm_gmult_neon
325 .type gcm_gmult_neon,%function
328 sub $Htbl,#16 @ point at H in GCM128_CTX
329 vld1.64 `&Dhi("$IN")`,[$Xi,:64]!@ load Xi
330 vmov.i32 $mod,#0xe1 @ our irreducible polynomial
331 vld1.64 `&Dlo("$IN")`,[$Xi,:64]!
333 vldmia $Htbl,{$Hhi-$Hlo} @ load H
344 vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte
346 .size gcm_gmult_neon,.-gcm_gmult_neon
348 .global gcm_ghash_neon
349 .type gcm_ghash_neon,%function
352 vld1.64 `&Dhi("$Z")`,[$Xi,:64]! @ load Xi
353 vmov.i32 $mod,#0xe1 @ our irreducible polynomial
354 vld1.64 `&Dlo("$Z")`,[$Xi,:64]!
356 vldmia $Xi,{$Hhi-$Hlo} @ load H
363 vld1.64 `&Dhi($IN)`,[$inp]! @ load inp
365 vld1.64 `&Dlo($IN)`,[$inp]!
372 veor $IN,$Z @ inp^=Xi
374 vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte
377 vmull.p8 $Qlo,$Hlo,$xi @ H.lo·Xi[i]
378 vmull.p8 $Qhi,$Hhi,$xi @ H.hi·Xi[i]
379 vext.8 $IN,$zero,#1 @ IN>>=8
381 veor $Z,$Qpost @ modulo-scheduled part
382 vshl.i64 `&Dlo("$R")`,#48
383 vdup.8 $xi,`&Dlo("$IN")`[0] @ broadcast lowest byte
384 veor $T,`&Dlo("$Qlo")`,`&Dlo("$Z")`
386 veor `&Dhi("$Z")`,`&Dlo("$R")`
388 vsli.8 $Zo,$T,#1 @ compose the "carry" byte
389 vext.8 $Z,$zero,#1 @ Z>>=8
391 vmull.p8 $R,$Zo,$mod @ "carry"·0xe1
392 vshr.u8 $Zo,$T,#7 @ save Z's bottom bit
393 vext.8 $Qpost,$Qlo,$zero,#1 @ Qlo>>=8
397 veor $Z,$Qpost @ modulo-scheduled artefact
398 vshl.i64 `&Dlo("$R")`,#48
399 veor `&Dhi("$Z")`,`&Dlo("$R")`
401 @ finalization, normalize Z:Zo
402 vand $Zo,$mod @ suffices to mask the bit
403 vshr.u64 `&Dhi(&Q("$Zo"))`,`&Dlo("$Z")`,#63
406 vorr $Z,`&Q("$Zo")` @ Z=Z:Zo<<1
413 vst1.64 `&Dhi("$Z")`,[$Xi,:64]! @ write out Xi
414 vst1.64 `&Dlo("$Z")`,[$Xi,:64]
417 .size gcm_ghash_neon,.-gcm_ghash_neon
422 .asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
426 $code =~ s/\`([^\`]*)\`/eval $1/gem;
427 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
429 close STDOUT; # enforce flush