3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # The module implements bn_GF2m_mul_2x2 polynomial multiplication
13 # used in bn_gf2m.c. It's kind of low-hanging mechanical port from
14 # C for the time being... Except that it has two code paths: pure
15 # integer code suitable for any ARMv4 and later CPU and NEON code
16 # suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
17 # in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
18 # faster than compiler-generated code. For ECDH and ECDSA verify (but
19 # not for ECDSA sign) it means 25%-45% improvement depending on key
20 # length, more for longer keys. Even though NEON 1x1 multiplication
21 # runs in even less cycles, ~30, improvement is measurable only on
22 # longer keys. One has to optimize code elsewhere to get NEON glow...
27 ($a0,$a1,$a2,$a12,$a4,$a14)=
28 ($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
32 sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
33 sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
34 sub Q() { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
45 .type mul_1x1_neon,%function
48 vshl.u64 `&Dlo("q1")`,d16,#8 @ q1-q3 are slided $a
49 vmull.p8 `&Q("d0")`,d16,d17 @ a·bb
50 vshl.u64 `&Dlo("q2")`,d16,#16
51 vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb
52 vshl.u64 `&Dlo("q3")`,d16,#24
53 vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb
54 vshr.u64 `&Dlo("q1")`,#8
55 vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb
56 vshl.u64 `&Dhi("q1")`,#24
58 vshr.u64 `&Dlo("q2")`,#16
60 vshl.u64 `&Dhi("q2")`,#16
62 vshr.u64 `&Dlo("q3")`,#24
64 vshl.u64 `&Dhi("q3")`,#8
68 .size mul_1x1_neon,.-mul_1x1_neon
72 .type mul_1x1_ialu,%function
75 bic $a1,$a,#3<<30 @ a1=a&0x3fffffff
76 str $a0,[sp,#0] @ tab[0]=0
77 add $a2,$a1,$a1 @ a2=a1<<1
78 str $a1,[sp,#4] @ tab[1]=a1
79 eor $a12,$a1,$a2 @ a1^a2
80 str $a2,[sp,#8] @ tab[2]=a2
81 mov $a4,$a1,lsl#2 @ a4=a1<<2
82 str $a12,[sp,#12] @ tab[3]=a1^a2
83 eor $a14,$a1,$a4 @ a1^a4
84 str $a4,[sp,#16] @ tab[4]=a4
85 eor $a0,$a2,$a4 @ a2^a4
86 str $a14,[sp,#20] @ tab[5]=a1^a4
87 eor $a12,$a12,$a4 @ a1^a2^a4
88 str $a0,[sp,#24] @ tab[6]=a2^a4
89 and $i0,$mask,$b,lsl#2
90 str $a12,[sp,#28] @ tab[7]=a1^a2^a4
92 and $i1,$mask,$b,lsr#1
93 ldr $lo,[sp,$i0] @ tab[b & 0x7]
94 and $i0,$mask,$b,lsr#4
95 ldr $t1,[sp,$i1] @ tab[b >> 3 & 0x7]
96 and $i1,$mask,$b,lsr#7
97 ldr $t0,[sp,$i0] @ tab[b >> 6 & 0x7]
98 eor $lo,$lo,$t1,lsl#3 @ stall
100 ldr $t1,[sp,$i1] @ tab[b >> 9 & 0x7]
102 and $i0,$mask,$b,lsr#10
103 eor $lo,$lo,$t0,lsl#6
104 eor $hi,$hi,$t0,lsr#26
105 ldr $t0,[sp,$i0] @ tab[b >> 12 & 0x7]
107 and $i1,$mask,$b,lsr#13
108 eor $lo,$lo,$t1,lsl#9
109 eor $hi,$hi,$t1,lsr#23
110 ldr $t1,[sp,$i1] @ tab[b >> 15 & 0x7]
112 and $i0,$mask,$b,lsr#16
113 eor $lo,$lo,$t0,lsl#12
114 eor $hi,$hi,$t0,lsr#20
115 ldr $t0,[sp,$i0] @ tab[b >> 18 & 0x7]
117 and $i1,$mask,$b,lsr#19
118 eor $lo,$lo,$t1,lsl#15
119 eor $hi,$hi,$t1,lsr#17
120 ldr $t1,[sp,$i1] @ tab[b >> 21 & 0x7]
122 and $i0,$mask,$b,lsr#22
123 eor $lo,$lo,$t0,lsl#18
124 eor $hi,$hi,$t0,lsr#14
125 ldr $t0,[sp,$i0] @ tab[b >> 24 & 0x7]
127 and $i1,$mask,$b,lsr#25
128 eor $lo,$lo,$t1,lsl#21
129 eor $hi,$hi,$t1,lsr#11
130 ldr $t1,[sp,$i1] @ tab[b >> 27 & 0x7]
133 and $i0,$mask,$b,lsr#28
134 eor $lo,$lo,$t0,lsl#24
135 eor $hi,$hi,$t0,lsr#8
136 ldr $t0,[sp,$i0] @ tab[b >> 30 ]
138 eorne $lo,$lo,$b,lsl#30
139 eorne $hi,$hi,$b,lsr#2
141 eor $lo,$lo,$t1,lsl#27
142 eor $hi,$hi,$t1,lsr#5
143 eorne $lo,$lo,$b,lsl#31
144 eorne $hi,$hi,$b,lsr#1
145 eor $lo,$lo,$t0,lsl#30
146 eor $hi,$hi,$t0,lsr#2
149 .size mul_1x1_ialu,.-mul_1x1_ialu
151 .global bn_GF2m_mul_2x2
152 .type bn_GF2m_mul_2x2,%function
156 ldr r12,.LOPENSSL_armcap
157 .Lpic: ldr r12,[pc,r12]
161 ($A1,$B1,$A0,$B0,$A0B0,$A1B1)=map("d$_",(18..23));
164 vmov.32 $B1,r3,r3 @ two copies of b1
165 vmov.32 ${A1}[0],r1 @ a1
168 vld1.32 ${B0}[],[sp,:32] @ two copies of b0
169 vmov.32 ${A0}[0],r2 @ a0
174 bl mul_1x1_neon @ a1·b1
179 bl mul_1x1_neon @ a0·b0
185 bl mul_1x1_neon @ (a0+a1)·(b0+b1)
187 veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1
192 vst1.32 {${A0B0}[0]},[r0,:32]!
193 vst1.32 {${A0B0}[1]},[r0,:32]!
194 vst1.32 {${A1B1}[0]},[r0,:32]!
195 vst1.32 {${A1B1}[1]},[r0,:32]
201 $ret="r10"; # reassigned 1st argument
203 stmdb sp!,{r4-r10,lr}
204 mov $ret,r0 @ reassign 1st argument
206 ldr r3,[sp,#32] @ load b0
208 sub sp,#32 @ allocate tab[8]
210 bl mul_1x1_ialu @ a1·b1
214 eor $b,r3 @ flip b0 and b1
215 eor $a,r2 @ flip a0 and a1
220 bl mul_1x1_ialu @ a0·b0
226 bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
228 @r=map("r$_",(6..9));
230 ldmia $ret,{@r[0]-@r[3]}
239 add sp,#32 @ destroy tab[8]
243 ldmia sp!,{r4-r10,pc}
245 ldmia sp!,{r4-r10,lr}
247 moveq pc,lr @ be binary compatible with V4, yet
248 bx lr @ interoperable with Thumb ISA:-)
250 .size bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
254 .word OPENSSL_armcap-(.Lpic+8)
256 .asciz "GF2m Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
259 .comm OPENSSL_armcap,4,4
262 $code =~ s/\`([^\`]*)\`/eval $1/gem;
263 $code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm; # make it possible to compile with -march=armv4
265 close STDOUT; # enforce flush