3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # sha1_block procedure for ARMv4.
14 # Size/performance trade-off
15 # ====================================================================
16 # impl size in bytes comp cycles[*] measured performance
17 # ====================================================================
19 # armv4-small 392/+29% 1958/+64% 2250/+96%
20 # armv4-compact 740/+89% 1552/+26% 1840/+22%
21 # armv4-large 1420/+92% 1307/+19% 1370/+34%[***]
22 # full unroll ~5100/+260% ~1260/+4% ~1300/+5%
23 # ====================================================================
24 # thumb = same as 'small' but in Thumb instructions[**] and
25 # with recurring code in two private functions;
26 # small = detached Xload/update, loops are folded;
27 # compact = detached Xload/update, 5x unroll;
28 # large = interleaved Xload/update, 5x unroll;
29 # full unroll = interleaved Xload/update, full unroll, estimated[!];
31 # [*] Manually counted instructions in "grand" loop body. Measured
32 # performance is affected by prologue and epilogue overhead,
33 # i-cache availability, branch penalties, etc.
34 # [**] While each Thumb instruction is twice smaller, they are not as
35 # diverse as ARM ones: e.g., there are only two arithmetic
36 # instructions with 3 arguments, no [fixed] rotate, addressing
37 # modes are limited. As result it takes more instructions to do
38 # the same job in Thumb, therefore the code is never twice as
39 # small and always slower.
40 # [***] which is also ~35% better than compiler generated code. Dual-
41 # issue Cortex A8 core was measured to process input block in
46 # Rescheduling for dual-issue pipeline resulted in 13% improvement on
47 # Cortex A8 core and in absolute terms ~870 cycles per input block
48 # [or 13.6 cycles per byte].
52 # Profiler-assisted and platform-specific optimization resulted in 10%
53 # improvement on Cortex A8 core and 12.2 cycles per byte.
57 # Add NEON implementation (see sha1-586.pl for background info). On
58 # Cortex A8 it was measured to process one byte in 6.7 cycles or >80%
59 # faster than integer-only code. Because [fully unrolled] NEON code
60 # is ~2.5x larger and there are some redundant instructions executed
61 # when processing last block, improvement is not as big for smallest
62 # blocks, only ~30%. Snapdragon S4 is a tad faster, 6.4 cycles per
63 # byte, which is also >80% faster than integer-only code.
67 # Add ARMv8 code path performing at 2.35 cpb on Apple A7.
69 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
70 open STDOUT,">$output";
89 my ($a,$b,$c,$d,$e,$opt1,$opt2)=@_;
94 add $e,$K,$e,ror#2 @ E+=K_xx_xx
97 eor $t2,$t2,$t3 @ 1 cycle stall
98 eor $t1,$c,$d @ F_xx_xx
100 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
101 eor $t0,$t0,$t2,ror#31
105 add $e,$e,$t0 @ E+=X[i]
110 my ($a,$b,$c,$d,$e)=@_;
116 add $e,$K,$e,ror#2 @ E+=K_00_19
118 orr $t0,$t0,$t1,lsl#8
119 eor $t1,$c,$d @ F_xx_xx
120 orr $t0,$t0,$t2,lsl#16
121 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
122 orr $t0,$t0,$t3,lsl#24
124 ldr $t0,[$inp],#4 @ handles unaligned
125 add $e,$K,$e,ror#2 @ E+=K_00_19
126 eor $t1,$c,$d @ F_xx_xx
127 add $e,$e,$a,ror#27 @ E+=ROR(A,27)
129 rev $t0,$t0 @ byte swap
133 add $e,$e,$t0 @ E+=X[i]
134 eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
136 add $e,$e,$t1 @ E+=F_00_19(B,C,D)
141 my ($a,$b,$c,$d,$e)=@_;
142 &Xupdate(@_,"and $t1,$b,$t1,ror#2");
144 eor $t1,$t1,$d,ror#2 @ F_00_19(B,C,D)
145 add $e,$e,$t1 @ E+=F_00_19(B,C,D)
150 my ($a,$b,$c,$d,$e)=@_;
151 &Xupdate(@_,"eor $t1,$b,$t1,ror#2");
153 add $e,$e,$t1 @ E+=F_20_39(B,C,D)
158 my ($a,$b,$c,$d,$e)=@_;
159 &Xupdate(@_,"and $t1,$b,$t1,ror#2","and $t2,$c,$d");
161 add $e,$e,$t1 @ E+=F_40_59(B,C,D)
167 #include "arm_arch.h"
172 .global sha1_block_data_order
173 .type sha1_block_data_order,%function
176 sha1_block_data_order:
177 #if __ARM_MAX_ARCH__>=7
178 sub r3,pc,#8 @ sha1_block_data_order
179 ldr r12,.LOPENSSL_armcap
180 ldr r12,[r3,r12] @ OPENSSL_armcap_P
186 stmdb sp!,{r4-r12,lr}
187 add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
188 ldmia $ctx,{$a,$b,$c,$d,$e}
195 mov $e,$e,ror#30 @ [6]
198 for($i=0;$i<5;$i++) {
199 &BODY_00_15(@V); unshift(@V,pop(@V));
203 bne .L_00_15 @ [((11+4)*5+2)*3]
206 &BODY_00_15(@V); unshift(@V,pop(@V));
207 &BODY_16_19(@V); unshift(@V,pop(@V));
208 &BODY_16_19(@V); unshift(@V,pop(@V));
209 &BODY_16_19(@V); unshift(@V,pop(@V));
210 &BODY_16_19(@V); unshift(@V,pop(@V));
213 ldr $K,.LK_20_39 @ [+15+16*4]
214 cmn sp,#0 @ [+3], clear carry to denote 20_39
217 for($i=0;$i<5;$i++) {
218 &BODY_20_39(@V); unshift(@V,pop(@V));
221 teq $Xi,sp @ preserve carry
222 bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
223 bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
226 sub sp,sp,#20*4 @ [+2]
229 for($i=0;$i<5;$i++) {
230 &BODY_40_59(@V); unshift(@V,pop(@V));
234 bne .L_40_59 @ [+((12+5)*5+2)*4]
238 cmp sp,#0 @ set carry to denote 60_79
239 b .L_20_39_or_60_79 @ [+4], spare 300 bytes
241 add sp,sp,#80*4 @ "deallocate" stack frame
242 ldmia $ctx,{$K,$t0,$t1,$t2,$t3}
248 stmia $ctx,{$a,$b,$c,$d,$e}
250 bne .Lloop @ [+18], total 1307
253 ldmia sp!,{r4-r12,pc}
255 ldmia sp!,{r4-r12,lr}
257 moveq pc,lr @ be binary compatible with V4, yet
258 bx lr @ interoperable with Thumb ISA:-)
260 .size sha1_block_data_order,.-sha1_block_data_order
263 .LK_00_19: .word 0x5a827999
264 .LK_20_39: .word 0x6ed9eba1
265 .LK_40_59: .word 0x8f1bbcdc
266 .LK_60_79: .word 0xca62c1d6
267 #if __ARM_MAX_ARCH__>=7
269 .word OPENSSL_armcap_P-sha1_block_data_order
271 .asciz "SHA1 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
274 #####################################################################
278 my @V=($a,$b,$c,$d,$e);
279 my ($K_XX_XX,$Ki,$t0,$t1,$Xfer,$saved_sp)=map("r$_",(8..12,14));
281 my @X=map("q$_",(8..11,0..3));
282 my @Tx=("q12","q13");
283 my ($K,$zero)=("q14","q15");
286 sub AUTOLOAD() # thunk [simplified] x86-style perlasm
287 { my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
289 $arg = "#$arg" if ($arg*1 eq $arg);
290 $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
295 '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
297 '&add ($e,$e,$Ki)', # e+=X[i]+K
299 '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
300 '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
301 '&eor ($t1,$t1,$t0)', # F_00_19
302 '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
303 '&add ($e,$e,$t1);'. # e+=F_00_19
304 '$j++; unshift(@V,pop(@V));'
309 '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
311 '&add ($e,$e,$Ki)', # e+=X[i]+K
312 '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15)) if ($j<79)',
313 '&eor ($t1,$t0,$c)', # F_20_39
314 '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
315 '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
316 '&add ($e,$e,$t1);'. # e+=F_20_39
317 '$j++; unshift(@V,pop(@V));'
322 '($a,$b,$c,$d,$e)=@V;'. # '$code.="@ $j\n";'.
323 '&add ($e,$e,$Ki)', # e+=X[i]+K
325 '&ldr ($Ki,sprintf "[sp,#%d]",4*(($j+1)&15))',
326 '&add ($e,$e,$a,"ror#27")', # e+=ROR(A,27)
330 '&mov ($b,$b,"ror#2")', # b=ROR(b,2)
331 '&add ($e,$e,$t1);'. # e+=F_40_59
332 '$j++; unshift(@V,pop(@V));'
339 my @insns = (&$body,&$body,&$body,&$body);
342 &vext_8 (@X[0],@X[-4&7],@X[-3&7],8); # compose "X[-14]" in "X[0]"
346 &vadd_i32 (@Tx[1],@X[-1&7],$K);
348 &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0);
350 &vext_8 (@Tx[0],@X[-1&7],$zero,4); # "X[-3]", 3 words
354 &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]"
357 &veor (@Tx[0],@Tx[0],@X[-2&7]); # "X[-3]"^"X[-8]"
360 &veor (@Tx[0],@Tx[0],@X[0]); # "X[0]"^="X[-3]"^"X[-8]
363 &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer
364 &sub ($Xfer,$Xfer,64) if ($Xi%4==0);
367 &vext_8 (@Tx[1],$zero,@Tx[0],4); # "X[0]"<<96, extract one dword
370 &vadd_i32 (@X[0],@Tx[0],@Tx[0]);
373 &vsri_32 (@X[0],@Tx[0],31); # "X[0]"<<<=1
377 &vshr_u32 (@Tx[0],@Tx[1],30);
380 &vshl_u32 (@Tx[1],@Tx[1],2);
383 &veor (@X[0],@X[0],@Tx[0]);
386 &veor (@X[0],@X[0],@Tx[1]); # "X[0]"^=("X[0]">>96)<<<2
388 foreach (@insns) { eval; } # remaining instructions [if any]
390 $Xi++; push(@X,shift(@X)); # "rotate" X[]
396 my @insns = (&$body,&$body,&$body,&$body);
399 &vext_8 (@Tx[0],@X[-2&7],@X[-1&7],8); # compose "X[-6]"
403 &veor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]"
406 &veor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]"
409 &vadd_i32 (@Tx[1],@X[-1&7],$K);
411 &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!") if ($Xi%5==0);
413 &veor (@Tx[0],@Tx[0],@X[0]); # "X[-6]"^="X[0]"
416 &vshr_u32 (@X[0],@Tx[0],30);
419 &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!"); # X[]+K xfer
420 &sub ($Xfer,$Xfer,64) if ($Xi%4==0);
423 &vsli_32 (@X[0],@Tx[0],2); # "X[0]"="X[-6]"<<<2
425 foreach (@insns) { eval; } # remaining instructions [if any]
427 $Xi++; push(@X,shift(@X)); # "rotate" X[]
433 my @insns = (&$body,&$body,&$body,&$body);
436 &vadd_i32 (@Tx[1],@X[-1&7],$K);
439 &vst1_32 ("{@Tx[1]}","[$Xfer,:128]!");
440 &sub ($Xfer,$Xfer,64);
443 &sub ($K_XX_XX,$K_XX_XX,16); # rewind $K_XX_XX
444 &subeq ($inp,$inp,64); # reload last block to avoid SEGV
445 &vld1_8 ("{@X[-4&7]-@X[-3&7]}","[$inp]!");
448 &vld1_8 ("{@X[-2&7]-@X[-1&7]}","[$inp]!");
451 &vld1_32 ("{$K\[]}","[$K_XX_XX,:32]!"); # load K_00_19
454 &vrev32_8 (@X[-4&7],@X[-4&7]);
456 foreach (@insns) { eval; } # remaining instructions
464 my @insns = (&$body,&$body,&$body,&$body);
467 &vrev32_8 (@X[($Xi-3)&7],@X[($Xi-3)&7]);
470 &vadd_i32 (@X[$Xi&7],@X[($Xi-4)&7],$K);
473 &vst1_32 ("{@X[$Xi&7]}","[$Xfer,:128]!");# X[]+K xfer to IALU
475 foreach (@insns) { eval; }
481 #if __ARM_MAX_ARCH__>=7
485 .type sha1_block_data_order_neon,%function
487 sha1_block_data_order_neon:
489 stmdb sp!,{r4-r12,lr}
490 add $len,$inp,$len,lsl#6 @ $len to point at the end of $inp
491 @ dmb @ errata #451034 on early Cortex A8
492 @ vstmdb sp!,{d8-d15} @ ABI specification says so
494 sub sp,sp,#64 @ alloca
495 adr $K_XX_XX,.LK_00_19
496 bic sp,sp,#15 @ align for 128-bit stores
498 ldmia $ctx,{$a,$b,$c,$d,$e} @ load context
501 vld1.8 {@X[-4&7]-@X[-3&7]},[$inp]! @ handles unaligned
502 veor $zero,$zero,$zero
503 vld1.8 {@X[-2&7]-@X[-1&7]},[$inp]!
504 vld1.32 {${K}\[]},[$K_XX_XX,:32]! @ load K_00_19
505 vrev32.8 @X[-4&7],@X[-4&7] @ yes, even on
506 vrev32.8 @X[-3&7],@X[-3&7] @ big-endian...
507 vrev32.8 @X[-2&7],@X[-2&7]
508 vadd.i32 @X[0],@X[-4&7],$K
509 vrev32.8 @X[-1&7],@X[-1&7]
510 vadd.i32 @X[1],@X[-3&7],$K
511 vst1.32 {@X[0]},[$Xfer,:128]!
512 vadd.i32 @X[2],@X[-2&7],$K
513 vst1.32 {@X[1]},[$Xfer,:128]!
514 vst1.32 {@X[2]},[$Xfer,:128]!
515 ldr $Ki,[sp] @ big RAW stall
519 &Xupdate_16_31(\&body_00_19);
520 &Xupdate_16_31(\&body_00_19);
521 &Xupdate_16_31(\&body_00_19);
522 &Xupdate_16_31(\&body_00_19);
523 &Xupdate_32_79(\&body_00_19);
524 &Xupdate_32_79(\&body_20_39);
525 &Xupdate_32_79(\&body_20_39);
526 &Xupdate_32_79(\&body_20_39);
527 &Xupdate_32_79(\&body_20_39);
528 &Xupdate_32_79(\&body_20_39);
529 &Xupdate_32_79(\&body_40_59);
530 &Xupdate_32_79(\&body_40_59);
531 &Xupdate_32_79(\&body_40_59);
532 &Xupdate_32_79(\&body_40_59);
533 &Xupdate_32_79(\&body_40_59);
534 &Xupdate_32_79(\&body_20_39);
535 &Xuplast_80(\&body_20_39);
536 &Xloop(\&body_20_39);
537 &Xloop(\&body_20_39);
538 &Xloop(\&body_20_39);
540 ldmia $ctx,{$Ki,$t0,$t1,$Xfer} @ accumulate context
549 stmia $ctx,{$a,$b,$c,$d,$e}
553 @ vldmia sp!,{d8-d15}
554 ldmia sp!,{r4-r12,pc}
555 .size sha1_block_data_order_neon,.-sha1_block_data_order_neon
559 #####################################################################
563 my ($ABCD,$E,$E0,$E1)=map("q$_",(0..3));
564 my @MSG=map("q$_",(4..7));
565 my @Kxx=map("q$_",(8..11));
566 my ($W0,$W1,$ABCD_SAVE)=map("q$_",(12..14));
569 #if __ARM_MAX_ARCH__>=7
570 .type sha1_block_data_order_armv8,%function
572 sha1_block_data_order_armv8:
574 vstmdb sp!,{d8-d15} @ ABI specification says so
578 vld1.32 {$ABCD},[$ctx]!
579 vld1.32 {$E\[0]},[$ctx]
581 vld1.32 {@Kxx[0]\[]},[r3,:32]!
582 vld1.32 {@Kxx[1]\[]},[r3,:32]!
583 vld1.32 {@Kxx[2]\[]},[r3,:32]!
584 vld1.32 {@Kxx[3]\[]},[r3,:32]
587 vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
588 vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
589 vrev32.8 @MSG[0],@MSG[0]
590 vrev32.8 @MSG[1],@MSG[1]
592 vadd.i32 $W0,@Kxx[0],@MSG[0]
593 vrev32.8 @MSG[2],@MSG[2]
594 vmov $ABCD_SAVE,$ABCD @ offload
597 vadd.i32 $W1,@Kxx[0],@MSG[1]
598 vrev32.8 @MSG[3],@MSG[3]
601 vadd.i32 $W0,@Kxx[$j],@MSG[2]
602 sha1su0 @MSG[0],@MSG[1],@MSG[2]
604 for ($j=0,$i=1;$i<20-3;$i++) {
605 my $f=("c","p","m","p")[$i/5];
609 vadd.i32 $W1,@Kxx[$j],@MSG[3]
610 sha1su1 @MSG[0],@MSG[3]
612 $code.=<<___ if ($i<20-4);
613 sha1su0 @MSG[1],@MSG[2],@MSG[3]
615 ($E0,$E1)=($E1,$E0); ($W0,$W1)=($W1,$W0);
616 push(@MSG,shift(@MSG)); $j++ if ((($i+3)%5)==0);
621 vadd.i32 $W1,@Kxx[$j],@MSG[3]
630 vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
633 vst1.32 {$ABCD},[$ctx]!
634 vst1.32 {$E\[0]},[$ctx]
638 .size sha1_block_data_order_armv8,.-sha1_block_data_order_armv8
643 #if __ARM_MAX_ARCH__>=7
644 .comm OPENSSL_armcap_P,4,4
649 "sha1c" => 0xf2000c40, "sha1p" => 0xf2100c40,
650 "sha1m" => 0xf2200c40, "sha1su0" => 0xf2300c40,
651 "sha1h" => 0xf3b902c0, "sha1su1" => 0xf3ba0380 );
654 my ($mnemonic,$arg)=@_;
656 if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
657 my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
658 |(($2&7)<<17)|(($2&8)<<4)
659 |(($3&7)<<1) |(($3&8)<<2);
660 # since ARMv7 instructions are always encoded little-endian.
661 # correct solution is to use .inst directive, but older
662 # assemblers don't implement it:-(
663 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s",
664 $word&0xff,($word>>8)&0xff,
665 ($word>>16)&0xff,($word>>24)&0xff,
671 foreach (split($/,$code)) {
672 s/{q([0-9]+)\[\]}/sprintf "{d%d[],d%d[]}",2*$1,2*$1+1/eo or
673 s/{q([0-9]+)\[0\]}/sprintf "{d%d[0]}",2*$1/eo;
675 s/\b(sha1\w+)\s+(q.*)/unsha1($1,$2)/geo;
678 s/\bbx\s+lr\b/.word\t0xe12fff1e/o; # make it possible to compile with -march=armv4
683 close STDOUT; # enforce flush