3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # The module implements bn_GF2m_mul_2x2 polynomial multiplication
13 # used in bn_gf2m.c. It's kind of low-hanging mechanical port from
14 # C for the time being... Except that it has two code paths: pure
15 # integer code suitable for any ARMv4 and later CPU and NEON code
16 # suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
17 # in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
18 # faster than compiler-generated code. For ECDH and ECDSA verify (but
19 # not for ECDSA sign) it means 25%-45% improvement depending on key
20 # length, more for longer keys. Even though NEON 1x1 multiplication
21 # runs in even less cycles, ~30, improvement is measurable only on
22 # longer keys. One has to optimize code elsewhere to get NEON glow...
26 # Double bn_GF2m_mul_2x2 performance by using algorithm from paper
27 # referred below, which improves ECDH and ECDSA verify benchmarks
30 # Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software
31 # Polynomial Multiplication on ARM Processors using the NEON Engine.
33 # http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf
36 if ($flavour=~/^\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
37 else { while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} }
39 if ($flavour && $flavour ne "void") {
40 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
41 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
42 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
43 die "can't locate arm-xlate.pl";
45 open STDOUT,"| \"$^X\" $xlate $flavour $output";
47 open STDOUT,">$output";
57 # private interface to mul_1x1_ialu
62 ($a0,$a1,$a2,$a12,$a4,$a14)=
63 ($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
68 .type mul_1x1_ialu,%function
72 bic $a1,$a,#3<<30 @ a1=a&0x3fffffff
73 str $a0,[sp,#0] @ tab[0]=0
74 add $a2,$a1,$a1 @ a2=a1<<1
75 str $a1,[sp,#4] @ tab[1]=a1
76 eor $a12,$a1,$a2 @ a1^a2
77 str $a2,[sp,#8] @ tab[2]=a2
78 mov $a4,$a1,lsl#2 @ a4=a1<<2
79 str $a12,[sp,#12] @ tab[3]=a1^a2
80 eor $a14,$a1,$a4 @ a1^a4
81 str $a4,[sp,#16] @ tab[4]=a4
82 eor $a0,$a2,$a4 @ a2^a4
83 str $a14,[sp,#20] @ tab[5]=a1^a4
84 eor $a12,$a12,$a4 @ a1^a2^a4
85 str $a0,[sp,#24] @ tab[6]=a2^a4
86 and $i0,$mask,$b,lsl#2
87 str $a12,[sp,#28] @ tab[7]=a1^a2^a4
89 and $i1,$mask,$b,lsr#1
90 ldr $lo,[sp,$i0] @ tab[b & 0x7]
91 and $i0,$mask,$b,lsr#4
92 ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7]
93 and $i1,$mask,$b,lsr#7
94 ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7]
95 eor $lo,$lo,$t1,lsl#3 @ stall
97 ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7]
99 and $i0,$mask,$b,lsr#10
100 eor $lo,$lo,$t0,lsl#6
101 eor $hi,$hi,$t0,lsr#26
102 ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7]
104 and $i1,$mask,$b,lsr#13
105 eor $lo,$lo,$t1,lsl#9
106 eor $hi,$hi,$t1,lsr#23
107 ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7]
109 and $i0,$mask,$b,lsr#16
110 eor $lo,$lo,$t0,lsl#12
111 eor $hi,$hi,$t0,lsr#20
112 ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7]
114 and $i1,$mask,$b,lsr#19
115 eor $lo,$lo,$t1,lsl#15
116 eor $hi,$hi,$t1,lsr#17
117 ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7]
119 and $i0,$mask,$b,lsr#22
120 eor $lo,$lo,$t0,lsl#18
121 eor $hi,$hi,$t0,lsr#14
122 ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7]
124 and $i1,$mask,$b,lsr#25
125 eor $lo,$lo,$t1,lsl#21
126 eor $hi,$hi,$t1,lsr#11
127 ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7]
130 and $i0,$mask,$b,lsr#28
131 eor $lo,$lo,$t0,lsl#24
132 eor $hi,$hi,$t0,lsr#8
133 ldr $t0,[sp,$i0] @ tab[b >> 30 ]
135 eorne $lo,$lo,$b,lsl#30
136 eorne $hi,$hi,$b,lsr#2
138 eor $lo,$lo,$t1,lsl#27
139 eor $hi,$hi,$t1,lsr#5
140 eorne $lo,$lo,$b,lsl#31
141 eorne $hi,$hi,$b,lsr#1
142 eor $lo,$lo,$t0,lsl#30
143 eor $hi,$hi,$t0,lsr#2
146 .size mul_1x1_ialu,.-mul_1x1_ialu
149 # void bn_GF2m_mul_2x2(BN_ULONG *r,
150 # BN_ULONG a1,BN_ULONG a0,
151 # BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
154 .global bn_GF2m_mul_2x2
155 .type bn_GF2m_mul_2x2,%function
158 #if __ARM_MAX_ARCH__>=7
159 ldr r12,.LOPENSSL_armcap
160 .Lpic: ldr r12,[pc,r12]
165 $ret="r10"; # reassigned 1st argument
167 stmdb sp!,{r4-r10,lr}
168 mov $ret,r0 @ reassign 1st argument
170 ldr r3,[sp,#32] @ load b0
172 sub sp,sp,#32 @ allocate tab[8]
174 bl mul_1x1_ialu @ a1·b1
178 eor $b,$b,r3 @ flip b0 and b1
179 eor $a,$a,r2 @ flip a0 and a1
184 bl mul_1x1_ialu @ a0·b0
190 bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
192 @r=map("r$_",(6..9));
194 ldmia $ret,{@r[0]-@r[3]}
203 add sp,sp,#32 @ destroy tab[8]
207 ldmia sp!,{r4-r10,pc}
209 ldmia sp!,{r4-r10,lr}
211 moveq pc,lr @ be binary compatible with V4, yet
212 bx lr @ interoperable with Thumb ISA:-)
217 my ($r,$t0,$t1,$t2,$t3)=map("q$_",(0..3,8..12));
218 my ($a,$b,$k48,$k32,$k16)=map("d$_",(26..31));
221 #if __ARM_MAX_ARCH__>=7
227 ldr r12, [sp] @ 5th argument
230 vmov.i64 $k48, #0x0000ffffffffffff
231 vmov.i64 $k32, #0x00000000ffffffff
232 vmov.i64 $k16, #0x000000000000ffff
234 vext.8 $t0#lo, $a, $a, #1 @ A1
235 vmull.p8 $t0, $t0#lo, $b @ F = A1*B
236 vext.8 $r#lo, $b, $b, #1 @ B1
237 vmull.p8 $r, $a, $r#lo @ E = A*B1
238 vext.8 $t1#lo, $a, $a, #2 @ A2
239 vmull.p8 $t1, $t1#lo, $b @ H = A2*B
240 vext.8 $t3#lo, $b, $b, #2 @ B2
241 vmull.p8 $t3, $a, $t3#lo @ G = A*B2
242 vext.8 $t2#lo, $a, $a, #3 @ A3
243 veor $t0, $t0, $r @ L = E + F
244 vmull.p8 $t2, $t2#lo, $b @ J = A3*B
245 vext.8 $r#lo, $b, $b, #3 @ B3
246 veor $t1, $t1, $t3 @ M = G + H
247 vmull.p8 $r, $a, $r#lo @ I = A*B3
248 veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8
249 vand $t0#hi, $t0#hi, $k48
250 vext.8 $t3#lo, $b, $b, #4 @ B4
251 veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16
252 vand $t1#hi, $t1#hi, $k32
253 vmull.p8 $t3, $a, $t3#lo @ K = A*B4
254 veor $t2, $t2, $r @ N = I + J
255 veor $t0#lo, $t0#lo, $t0#hi
256 veor $t1#lo, $t1#lo, $t1#hi
257 veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24
258 vand $t2#hi, $t2#hi, $k16
259 vext.8 $t0, $t0, $t0, #15
260 veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32
262 vext.8 $t1, $t1, $t1, #14
263 veor $t2#lo, $t2#lo, $t2#hi
264 vmull.p8 $r, $a, $b @ D = A*B
265 vext.8 $t3, $t3, $t3, #12
266 vext.8 $t2, $t2, $t2, #13
278 .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
279 #if __ARM_MAX_ARCH__>=7
282 .word OPENSSL_armcap_P-(.Lpic+8)
284 .asciz "GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
287 #if __ARM_MAX_ARCH__>=7
288 .comm OPENSSL_armcap_P,4,4
292 foreach (split("\n",$code)) {
293 s/\`([^\`]*)\`/eval $1/geo;
295 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or
296 s/\bret\b/bx lr/go or
297 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
301 close STDOUT; # enforce flush