2 # Copyright 2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # IALU(*)/gcc-4.4 NEON
19 # ARM11xx(ARMv6) 7.78/+100% -
20 # Cortex-A5 6.35/+130% 3.00
21 # Cortex-A8 6.25/+115% 2.36
22 # Cortex-A9 5.10/+95% 2.55
23 # Cortex-A15 3.85/+85% 1.25(**)
24 # Snapdragon S4 5.70/+100% 1.48(**)
26 # (*) this is for -march=armv6, i.e. with bunch of ldrb loading data;
27 # (**) these are trade-off results, they can be improved by ~8% but at
28 # the cost of 15/12% regression on Cortex-A5/A7, it's even possible
29 # to improve Cortex-A9 result, but then A5/A7 loose more than 20%;
32 if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
33 else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
35 if ($flavour && $flavour ne "void") {
36 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
37 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
38 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
39 die "can't locate arm-xlate.pl";
41 open STDOUT,"| \"$^X\" $xlate $flavour $output";
43 open STDOUT,">$output";
46 ($ctx,$inp,$len,$padbit)=map("r$_",(0..3));
52 #if defined(__thumb2__)
60 .globl poly1305_blocks
62 .type poly1305_init,%function
70 str r3,[$ctx,#0] @ zero hash value
75 str r3,[$ctx,#36] @ is_base2_26
84 #if __ARM_MAX_ARCH__>=7
85 adr r11,.Lpoly1305_init
86 ldr r12,.LOPENSSL_armcap
91 and r3,r10,#-4 @ 0x0ffffffc
102 #if __ARM_MAX_ARCH__>=7
103 ldr r12,[r11,r12] @ OPENSSL_armcap_P
117 #if __ARM_MAX_ARCH__>=7
118 tst r12,#ARMV7_NEON @ check for NEON
120 adr r9,poly1305_blocks_neon
121 adr r11,poly1305_blocks
126 adr r12,poly1305_emit
127 adr r10,poly1305_emit_neon
136 addeq r12,r11,#(poly1305_emit-.Lpoly1305_init)
137 addne r12,r11,#(poly1305_emit_neon-.Lpoly1305_init)
138 addeq r11,r11,#(poly1305_blocks-.Lpoly1305_init)
139 addne r11,r11,#(poly1305_blocks_neon-.Lpoly1305_init)
142 orr r12,r12,#1 @ thumb-ify address
164 #if __ARM_MAX_ARCH__>=7
165 stmia r2,{r11,r12} @ fill functions table
176 moveq pc,lr @ be binary compatible with V4, yet
177 bx lr @ interoperable with Thumb ISA:-)
179 .size poly1305_init,.-poly1305_init
182 my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12));
183 my ($s1,$s2,$s3)=($r1,$r2,$r3);
186 .type poly1305_blocks,%function
189 stmdb sp!,{r3-r11,lr}
195 add $len,$len,$inp @ end pointer
198 ldmia $ctx,{$h0-$r3} @ load context
200 str $ctx,[sp,#12] @ offload stuff
210 ldrb r0,[lr],#16 @ load input
214 addhi $h4,$h4,#1 @ 1<<128
224 adds $h0,$h0,r3 @ accumulate input
246 str lr,[sp,#8] @ offload input pointer
248 add $s1,$r1,$r1,lsr#2
251 ldr r0,[lr],#16 @ load input
255 addhi $h4,$h4,#1 @ padbit
265 adds $h0,$h0,r0 @ accumulate input
266 str lr,[sp,#8] @ offload input pointer
268 add $s1,$r1,$r1,lsr#2
271 add $s2,$r2,$r2,lsr#2
273 add $s3,$r3,$r3,lsr#2
280 ldr $r1,[sp,#20] @ reload $r1
286 str r0,[sp,#0] @ future $h0
288 ldr $r2,[sp,#24] @ reload $r2
289 adds r2,r2,r1 @ d1+=d0>>32
291 adc lr,r3,#0 @ future $h2
292 str r2,[sp,#4] @ future $h1
297 ldr $r3,[sp,#28] @ reload $r3
309 adds $h2,lr,r0 @ d2+=d1>>32
310 ldr lr,[sp,#8] @ reload input pointer
312 adds $h3,r2,r1 @ d3+=d2>>32
313 ldr r0,[sp,#16] @ reload end pointer
315 add $h4,$h4,r3 @ h4+=d3>>32
319 add r1,r1,r1,lsr#2 @ *=5
326 cmp r0,lr @ done yet?
331 stmia $ctx,{$h0-$h4} @ store the result
335 ldmia sp!,{r3-r11,pc}
337 ldmia sp!,{r3-r11,lr}
339 moveq pc,lr @ be binary compatible with V4, yet
340 bx lr @ interoperable with Thumb ISA:-)
342 .size poly1305_blocks,.-poly1305_blocks
346 my ($ctx,$mac,$nonce)=map("r$_",(0..2));
347 my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11));
351 .type poly1305_emit,%function
355 .Lpoly1305_emit_enter:
358 adds $g0,$h0,#5 @ compare to modulus
363 tst $g4,#4 @ did it carry/borrow?
440 moveq pc,lr @ be binary compatible with V4, yet
441 bx lr @ interoperable with Thumb ISA:-)
443 .size poly1305_emit,.-poly1305_emit
446 my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9));
447 my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14));
448 my ($T0,$T1,$MASK) = map("q$_",(15,4,0));
450 my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7));
453 #if __ARM_MAX_ARCH__>=7
456 .type poly1305_init_neon,%function
459 ldr r4,[$ctx,#20] @ load key base 2^32
464 and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
472 and r3,r3,#0x03ffffff
473 and r4,r4,#0x03ffffff
474 and r5,r5,#0x03ffffff
476 vdup.32 $R0,r2 @ r^1 in both lanes
477 add r2,r3,r3,lsl#2 @ *5
490 mov $zeros,#2 @ counter
493 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
494 @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
495 @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
496 @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
497 @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
498 @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
500 vmull.u32 $D0,$R0,${R0}[1]
501 vmull.u32 $D1,$R1,${R0}[1]
502 vmull.u32 $D2,$R2,${R0}[1]
503 vmull.u32 $D3,$R3,${R0}[1]
504 vmull.u32 $D4,$R4,${R0}[1]
506 vmlal.u32 $D0,$R4,${S1}[1]
507 vmlal.u32 $D1,$R0,${R1}[1]
508 vmlal.u32 $D2,$R1,${R1}[1]
509 vmlal.u32 $D3,$R2,${R1}[1]
510 vmlal.u32 $D4,$R3,${R1}[1]
512 vmlal.u32 $D0,$R3,${S2}[1]
513 vmlal.u32 $D1,$R4,${S2}[1]
514 vmlal.u32 $D3,$R1,${R2}[1]
515 vmlal.u32 $D2,$R0,${R2}[1]
516 vmlal.u32 $D4,$R2,${R2}[1]
518 vmlal.u32 $D0,$R2,${S3}[1]
519 vmlal.u32 $D3,$R0,${R3}[1]
520 vmlal.u32 $D1,$R3,${S3}[1]
521 vmlal.u32 $D2,$R4,${S3}[1]
522 vmlal.u32 $D4,$R1,${R3}[1]
524 vmlal.u32 $D3,$R4,${S4}[1]
525 vmlal.u32 $D0,$R1,${S4}[1]
526 vmlal.u32 $D1,$R2,${S4}[1]
527 vmlal.u32 $D2,$R3,${S4}[1]
528 vmlal.u32 $D4,$R0,${R4}[1]
530 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
531 @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein
534 @ H0>>+H1>>+H2>>+H3>>+H4
535 @ H3>>+H4>>*5+H0>>+H1
539 @ Result of multiplication of n-bit number by m-bit number is
540 @ n+m bits wide. However! Even though 2^n is a n+1-bit number,
541 @ m-bit number multiplied by 2^n is still n+m bits wide.
543 @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2,
544 @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit
545 @ one is n+1 bits wide.
547 @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that
548 @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4
549 @ can be 27. However! In cases when their width exceeds 26 bits
550 @ they are limited by 2^26+2^6. This in turn means that *sum*
551 @ of the products with these values can still be viewed as sum
552 @ of 52-bit numbers as long as the amount of addends is not a
553 @ power of 2. For example,
555 @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4,
557 @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or
558 @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than
559 @ 8 * (2^52) or 2^55. However, the value is then multiplied by
560 @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12),
561 @ which is less than 32 * (2^52) or 2^57. And when processing
562 @ data we are looking at triple as many addends...
564 @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and
565 @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the
566 @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while
567 @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32
568 @ instruction accepts 2x32-bit input and writes 2x64-bit result.
569 @ This means that result of reduction have to be compressed upon
570 @ loop wrap-around. This can be done in the process of reduction
571 @ to minimize amount of instructions [as well as amount of
572 @ 128-bit instructions, which benefits low-end processors], but
573 @ one has to watch for H2 (which is narrower than H0) and 5*H4
574 @ not being wider than 58 bits, so that result of right shift
575 @ by 26 bits fits in 32 bits. This is also useful on x86,
576 @ because it allows to use paddd in place for paddq, which
577 @ benefits Atom, where paddq is ridiculously slow.
583 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
584 vbic.i32 $D3#lo,#0xfc000000 @ &=0x03ffffff
585 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
586 vbic.i32 $D0#lo,#0xfc000000
588 vshrn.u64 $T0#lo,$D4,#26
592 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
593 vbic.i32 $D4#lo,#0xfc000000
594 vbic.i32 $D1#lo,#0xfc000000
596 vadd.i32 $D0#lo,$D0#lo,$T0#lo
597 vshl.u32 $T0#lo,$T0#lo,#2
598 vshrn.u64 $T1#lo,$D2,#26
600 vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0
601 vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
602 vbic.i32 $D2#lo,#0xfc000000
604 vshr.u32 $T0#lo,$D0#lo,#26
605 vbic.i32 $D0#lo,#0xfc000000
606 vshr.u32 $T1#lo,$D3#lo,#26
607 vbic.i32 $D3#lo,#0xfc000000
608 vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
609 vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
611 subs $zeros,$zeros,#1
612 beq .Lsquare_break_neon
614 add $tbl0,$ctx,#(48+0*9*4)
615 add $tbl1,$ctx,#(48+1*9*4)
617 vtrn.32 $R0,$D0#lo @ r^2:r^1
623 vshl.u32 $S2,$R2,#2 @ *5
632 vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
633 vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
634 vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
635 vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
636 vst1.32 {${S4}[0]},[$tbl0,:32]
637 vst1.32 {${S4}[1]},[$tbl1,:32]
643 add $tbl0,$ctx,#(48+2*4*9)
644 add $tbl1,$ctx,#(48+3*4*9)
646 vmov $R0,$D0#lo @ r^4:r^3
647 vshl.u32 $S1,$D1#lo,#2 @ *5
649 vshl.u32 $S2,$D2#lo,#2
651 vshl.u32 $S3,$D3#lo,#2
653 vshl.u32 $S4,$D4#lo,#2
655 vadd.i32 $S1,$S1,$D1#lo
656 vadd.i32 $S2,$S2,$D2#lo
657 vadd.i32 $S3,$S3,$D3#lo
658 vadd.i32 $S4,$S4,$D4#lo
660 vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]!
661 vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]!
662 vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
663 vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
664 vst1.32 {${S4}[0]},[$tbl0]
665 vst1.32 {${S4}[1]},[$tbl1]
668 .size poly1305_init_neon,.-poly1305_init_neon
670 .type poly1305_blocks_neon,%function
672 poly1305_blocks_neon:
673 ldr ip,[$ctx,#36] @ is_base2_26
679 tst ip,ip @ is_base2_26?
684 vstmdb sp!,{d8-d15} @ ABI specification says so
686 tst ip,ip @ is_base2_26?
690 bl poly1305_init_neon
692 ldr r4,[$ctx,#0] @ load hash value base 2^32
698 and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26
700 veor $D0#lo,$D0#lo,$D0#lo
703 veor $D1#lo,$D1#lo,$D1#lo
706 veor $D2#lo,$D2#lo,$D2#lo
709 veor $D3#lo,$D3#lo,$D3#lo
710 and r3,r3,#0x03ffffff
712 veor $D4#lo,$D4#lo,$D4#lo
713 and r4,r4,#0x03ffffff
715 and r5,r5,#0x03ffffff
716 str r1,[$ctx,#36] @ is_base2_26
730 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
733 veor $D0#lo,$D0#lo,$D0#lo
734 veor $D1#lo,$D1#lo,$D1#lo
735 veor $D2#lo,$D2#lo,$D2#lo
736 veor $D3#lo,$D3#lo,$D3#lo
737 veor $D4#lo,$D4#lo,$D4#lo
738 vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
740 vld1.32 {$D4#lo[0]},[$ctx]
741 sub $ctx,$ctx,#16 @ rewind
745 mov $padbit,$padbit,lsl#24
749 vld4.32 {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]!
750 vmov.32 $H4#lo[0],$padbit
760 vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26
761 vshl.u32 $H3#lo,$H3#lo,#18
763 vsri.u32 $H3#lo,$H2#lo,#14
764 vshl.u32 $H2#lo,$H2#lo,#12
765 vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi
767 vbic.i32 $H3#lo,#0xfc000000
768 vsri.u32 $H2#lo,$H1#lo,#20
769 vshl.u32 $H1#lo,$H1#lo,#6
771 vbic.i32 $H2#lo,#0xfc000000
772 vsri.u32 $H1#lo,$H0#lo,#26
773 vadd.i32 $H3#hi,$H3#lo,$D3#lo
775 vbic.i32 $H0#lo,#0xfc000000
776 vbic.i32 $H1#lo,#0xfc000000
777 vadd.i32 $H2#hi,$H2#lo,$D2#lo
779 vadd.i32 $H0#hi,$H0#lo,$D0#lo
780 vadd.i32 $H1#hi,$H1#lo,$D1#lo
794 vmov.i32 $H4,#1<<24 @ padbit, yes, always
795 vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
797 vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
800 addhi $tbl1,$ctx,#(48+1*9*4)
801 addhi $tbl0,$ctx,#(48+3*9*4)
809 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
815 vbic.i32 $H3,#0xfc000000
819 vbic.i32 $H2,#0xfc000000
822 vbic.i32 $H0,#0xfc000000
823 vbic.i32 $H1,#0xfc000000
827 vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^2
828 vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
829 vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
830 vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
835 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
836 @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2
837 @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r
838 @ \___________________/
839 @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2
840 @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r
841 @ \___________________/ \____________________/
843 @ Note that we start with inp[2:3]*r^2. This is because it
844 @ doesn't depend on reduction in previous iteration.
845 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
846 @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4
847 @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4
848 @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4
849 @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4
850 @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4
852 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
855 vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1]
856 vmull.u32 $D2,$H2#hi,${R0}[1]
857 vadd.i32 $H0#lo,$H0#lo,$D0#lo
858 vmull.u32 $D0,$H0#hi,${R0}[1]
859 vadd.i32 $H3#lo,$H3#lo,$D3#lo
860 vmull.u32 $D3,$H3#hi,${R0}[1]
861 vmlal.u32 $D2,$H1#hi,${R1}[1]
862 vadd.i32 $H1#lo,$H1#lo,$D1#lo
863 vmull.u32 $D1,$H1#hi,${R0}[1]
865 vadd.i32 $H4#lo,$H4#lo,$D4#lo
866 vmull.u32 $D4,$H4#hi,${R0}[1]
868 vmlal.u32 $D0,$H4#hi,${S1}[1]
871 vmlal.u32 $D3,$H2#hi,${R1}[1]
872 vld1.32 ${S4}[1],[$tbl1,:32]
873 vmlal.u32 $D1,$H0#hi,${R1}[1]
874 vmlal.u32 $D4,$H3#hi,${R1}[1]
876 vmlal.u32 $D0,$H3#hi,${S2}[1]
877 vmlal.u32 $D3,$H1#hi,${R2}[1]
878 vmlal.u32 $D4,$H2#hi,${R2}[1]
879 vmlal.u32 $D1,$H4#hi,${S2}[1]
880 vmlal.u32 $D2,$H0#hi,${R2}[1]
882 vmlal.u32 $D3,$H0#hi,${R3}[1]
883 vmlal.u32 $D0,$H2#hi,${S3}[1]
884 vmlal.u32 $D4,$H1#hi,${R3}[1]
885 vmlal.u32 $D1,$H3#hi,${S3}[1]
886 vmlal.u32 $D2,$H4#hi,${S3}[1]
888 vmlal.u32 $D3,$H4#hi,${S4}[1]
889 vmlal.u32 $D0,$H1#hi,${S4}[1]
890 vmlal.u32 $D4,$H0#hi,${R4}[1]
891 vmlal.u32 $D1,$H2#hi,${S4}[1]
892 vmlal.u32 $D2,$H3#hi,${S4}[1]
894 vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0)
897 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
898 @ (hash+inp[0:1])*r^4 and accumulate
900 vmlal.u32 $D3,$H3#lo,${R0}[0]
901 vmlal.u32 $D0,$H0#lo,${R0}[0]
902 vmlal.u32 $D4,$H4#lo,${R0}[0]
903 vmlal.u32 $D1,$H1#lo,${R0}[0]
904 vmlal.u32 $D2,$H2#lo,${R0}[0]
905 vld1.32 ${S4}[0],[$tbl0,:32]
907 vmlal.u32 $D3,$H2#lo,${R1}[0]
908 vmlal.u32 $D0,$H4#lo,${S1}[0]
909 vmlal.u32 $D4,$H3#lo,${R1}[0]
910 vmlal.u32 $D1,$H0#lo,${R1}[0]
911 vmlal.u32 $D2,$H1#lo,${R1}[0]
913 vmlal.u32 $D3,$H1#lo,${R2}[0]
914 vmlal.u32 $D0,$H3#lo,${S2}[0]
915 vmlal.u32 $D4,$H2#lo,${R2}[0]
916 vmlal.u32 $D1,$H4#lo,${S2}[0]
917 vmlal.u32 $D2,$H0#lo,${R2}[0]
919 vmlal.u32 $D3,$H0#lo,${R3}[0]
920 vmlal.u32 $D0,$H2#lo,${S3}[0]
921 vmlal.u32 $D4,$H1#lo,${R3}[0]
922 vmlal.u32 $D1,$H3#lo,${S3}[0]
923 vmlal.u32 $D3,$H4#lo,${S4}[0]
925 vmlal.u32 $D2,$H4#lo,${S3}[0]
926 vmlal.u32 $D0,$H1#lo,${S4}[0]
927 vmlal.u32 $D4,$H0#lo,${R4}[0]
928 vmov.i32 $H4,#1<<24 @ padbit, yes, always
929 vmlal.u32 $D1,$H2#lo,${S4}[0]
930 vmlal.u32 $D2,$H3#lo,${S4}[0]
932 vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1]
941 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
942 @ lazy reduction interleaved with base 2^32 -> base 2^26 of
943 @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4.
949 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
950 vbic.i32 $D3#lo,#0xfc000000
951 vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26
952 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
954 vbic.i32 $D0#lo,#0xfc000000
956 vshrn.u64 $T0#lo,$D4,#26
960 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
962 vbic.i32 $D4#lo,#0xfc000000
964 vbic.i32 $D1#lo,#0xfc000000
966 vadd.i32 $D0#lo,$D0#lo,$T0#lo
967 vshl.u32 $T0#lo,$T0#lo,#2
968 vbic.i32 $H3,#0xfc000000
969 vshrn.u64 $T1#lo,$D2,#26
971 vaddl.u32 $D0,$D0#lo,$T0#lo @ h4 -> h0 [widen for a sec]
973 vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3
975 vbic.i32 $D2#lo,#0xfc000000
976 vbic.i32 $H2,#0xfc000000
978 vshrn.u64 $T0#lo,$D0,#26 @ re-narrow
981 vbic.i32 $H0,#0xfc000000
982 vshr.u32 $T1#lo,$D3#lo,#26
983 vbic.i32 $D3#lo,#0xfc000000
984 vbic.i32 $D0#lo,#0xfc000000
985 vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1
986 vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4
987 vbic.i32 $H1,#0xfc000000
992 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
993 @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1
995 add $tbl1,$ctx,#(48+0*9*4)
996 add $tbl0,$ctx,#(48+1*9*4)
1002 vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi
1003 vadd.i32 $H0#hi,$H0#lo,$D0#lo
1004 vadd.i32 $H3#hi,$H3#lo,$D3#lo
1005 vadd.i32 $H1#hi,$H1#lo,$D1#lo
1006 vadd.i32 $H4#hi,$H4#lo,$D4#lo
1009 vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^1
1010 vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^2
1012 vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant
1013 vmull.u32 $D2,$H2#hi,$R0
1014 vadd.i32 $H0#lo,$H0#lo,$D0#lo
1015 vmull.u32 $D0,$H0#hi,$R0
1016 vadd.i32 $H3#lo,$H3#lo,$D3#lo
1017 vmull.u32 $D3,$H3#hi,$R0
1018 vadd.i32 $H1#lo,$H1#lo,$D1#lo
1019 vmull.u32 $D1,$H1#hi,$R0
1020 vadd.i32 $H4#lo,$H4#lo,$D4#lo
1021 vmull.u32 $D4,$H4#hi,$R0
1023 vmlal.u32 $D0,$H4#hi,$S1
1024 vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1025 vmlal.u32 $D3,$H2#hi,$R1
1026 vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1027 vmlal.u32 $D1,$H0#hi,$R1
1028 vmlal.u32 $D4,$H3#hi,$R1
1029 vmlal.u32 $D2,$H1#hi,$R1
1031 vmlal.u32 $D3,$H1#hi,$R2
1032 vld1.32 ${S4}[1],[$tbl1,:32]
1033 vmlal.u32 $D0,$H3#hi,$S2
1034 vld1.32 ${S4}[0],[$tbl0,:32]
1035 vmlal.u32 $D4,$H2#hi,$R2
1036 vmlal.u32 $D1,$H4#hi,$S2
1037 vmlal.u32 $D2,$H0#hi,$R2
1039 vmlal.u32 $D3,$H0#hi,$R3
1041 addne $tbl1,$ctx,#(48+2*9*4)
1042 vmlal.u32 $D0,$H2#hi,$S3
1044 addne $tbl0,$ctx,#(48+3*9*4)
1045 vmlal.u32 $D4,$H1#hi,$R3
1046 vmlal.u32 $D1,$H3#hi,$S3
1047 vmlal.u32 $D2,$H4#hi,$S3
1049 vmlal.u32 $D3,$H4#hi,$S4
1050 vorn $MASK,$MASK,$MASK @ all-ones, can be redundant
1051 vmlal.u32 $D0,$H1#hi,$S4
1052 vshr.u64 $MASK,$MASK,#38
1053 vmlal.u32 $D4,$H0#hi,$R4
1054 vmlal.u32 $D1,$H2#hi,$S4
1055 vmlal.u32 $D2,$H3#hi,$S4
1059 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1060 @ (hash+inp[0:1])*r^4:r^3 and accumulate
1062 vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^3
1063 vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4
1065 vmlal.u32 $D2,$H2#lo,$R0
1066 vmlal.u32 $D0,$H0#lo,$R0
1067 vmlal.u32 $D3,$H3#lo,$R0
1068 vmlal.u32 $D1,$H1#lo,$R0
1069 vmlal.u32 $D4,$H4#lo,$R0
1071 vmlal.u32 $D0,$H4#lo,$S1
1072 vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]!
1073 vmlal.u32 $D3,$H2#lo,$R1
1074 vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]!
1075 vmlal.u32 $D1,$H0#lo,$R1
1076 vmlal.u32 $D4,$H3#lo,$R1
1077 vmlal.u32 $D2,$H1#lo,$R1
1079 vmlal.u32 $D3,$H1#lo,$R2
1080 vld1.32 ${S4}[1],[$tbl1,:32]
1081 vmlal.u32 $D0,$H3#lo,$S2
1082 vld1.32 ${S4}[0],[$tbl0,:32]
1083 vmlal.u32 $D4,$H2#lo,$R2
1084 vmlal.u32 $D1,$H4#lo,$S2
1085 vmlal.u32 $D2,$H0#lo,$R2
1087 vmlal.u32 $D3,$H0#lo,$R3
1088 vmlal.u32 $D0,$H2#lo,$S3
1089 vmlal.u32 $D4,$H1#lo,$R3
1090 vmlal.u32 $D1,$H3#lo,$S3
1091 vmlal.u32 $D2,$H4#lo,$S3
1093 vmlal.u32 $D3,$H4#lo,$S4
1094 vorn $MASK,$MASK,$MASK @ all-ones
1095 vmlal.u32 $D0,$H1#lo,$S4
1096 vshr.u64 $MASK,$MASK,#38
1097 vmlal.u32 $D4,$H0#lo,$R4
1098 vmlal.u32 $D1,$H2#lo,$S4
1099 vmlal.u32 $D2,$H3#lo,$S4
1102 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1103 @ horizontal addition
1105 vadd.i64 $D3#lo,$D3#lo,$D3#hi
1106 vadd.i64 $D0#lo,$D0#lo,$D0#hi
1107 vadd.i64 $D4#lo,$D4#lo,$D4#hi
1108 vadd.i64 $D1#lo,$D1#lo,$D1#hi
1109 vadd.i64 $D2#lo,$D2#lo,$D2#hi
1111 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1112 @ lazy reduction, but without narrowing
1114 vshr.u64 $T0,$D3,#26
1115 vand.i64 $D3,$D3,$MASK
1116 vshr.u64 $T1,$D0,#26
1117 vand.i64 $D0,$D0,$MASK
1118 vadd.i64 $D4,$D4,$T0 @ h3 -> h4
1119 vadd.i64 $D1,$D1,$T1 @ h0 -> h1
1121 vshr.u64 $T0,$D4,#26
1122 vand.i64 $D4,$D4,$MASK
1123 vshr.u64 $T1,$D1,#26
1124 vand.i64 $D1,$D1,$MASK
1125 vadd.i64 $D2,$D2,$T1 @ h1 -> h2
1127 vadd.i64 $D0,$D0,$T0
1129 vshr.u64 $T1,$D2,#26
1130 vand.i64 $D2,$D2,$MASK
1131 vadd.i64 $D0,$D0,$T0 @ h4 -> h0
1132 vadd.i64 $D3,$D3,$T1 @ h2 -> h3
1134 vshr.u64 $T0,$D0,#26
1135 vand.i64 $D0,$D0,$MASK
1136 vshr.u64 $T1,$D3,#26
1137 vand.i64 $D3,$D3,$MASK
1138 vadd.i64 $D1,$D1,$T0 @ h0 -> h1
1139 vadd.i64 $D4,$D4,$T1 @ h3 -> h4
1144 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
1147 vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]!
1148 vst1.32 {$D4#lo[0]},[$ctx]
1150 vldmia sp!,{d8-d15} @ epilogue
1154 .size poly1305_blocks_neon,.-poly1305_blocks_neon
1156 .type poly1305_emit_neon,%function
1159 ldr ip,[$ctx,#36] @ is_base2_26
1164 beq .Lpoly1305_emit_enter
1166 ldmia $ctx,{$h0-$h4}
1169 adds $h0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32
1171 adcs $h1,$h1,$h2,lsl#20
1173 adcs $h2,$h2,$h3,lsl#14
1175 adcs $h3,$h3,$h4,lsl#8
1176 adc $h4,$g0,$h4,lsr#24 @ can be partially reduced ...
1178 and $g0,$h4,#-4 @ ... so reduce
1180 add $g0,$g0,$g0,lsr#2 @ *= 5
1187 adds $g0,$h0,#5 @ compare to modulus
1192 tst $g4,#4 @ did it carry/borrow?
1205 ldr $g3,[$nonce,#12]
1207 adds $h0,$h0,$g0 @ accumulate nonce
1218 str $h0,[$mac,#0] @ store the result
1225 .size poly1305_emit_neon,.-poly1305_emit_neon
1229 .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
1231 .word OPENSSL_armcap_P-.Lpoly1305_init
1236 .asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
1238 #if __ARM_MAX_ARCH__>=7
1239 .comm OPENSSL_armcap_P,4,4
1243 foreach (split("\n",$code)) {
1244 s/\`([^\`]*)\`/eval $1/geo;
1246 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
1247 s/\bret\b/bx lr/go or
1248 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
1252 close STDOUT; # enforce flush