2 # Copyright 2007-2018 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the Apache License 2.0 (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
19 # Montgomery multiplication for ARMv4.
21 # Performance improvement naturally varies among CPU implementations
22 # and compilers. The code was observed to provide +65-35% improvement
23 # [depending on key length, less for longer keys] on ARM920T, and
24 # +115-80% on Intel IXP425. This is compared to pre-bn_mul_mont code
25 # base and compiler generated code with in-lined umull and even umlal
26 # instructions. The latter means that this code didn't really have an
27 # "advantage" of utilizing some "secret" instruction.
29 # The code is interoperable with Thumb ISA and is rather compact, less
30 # than 1/2KB. Windows CE port would be trivial, as it's exclusively
31 # about decorations, ABI and instruction syntax are identical.
35 # Add NEON code path, which handles lengths divisible by 8. RSA/DSA
36 # performance improvement on Cortex-A8 is ~45-100% depending on key
37 # length, more for longer keys. On Cortex-A15 the span is ~10-105%.
38 # On Snapdragon S4 improvement was measured to vary from ~70% to
39 # incredible ~380%, yes, 4.8x faster, for RSA4096 sign. But this is
40 # rather because original integer-only code seems to perform
41 # suboptimally on S4. Situation on Cortex-A9 is unfortunately
42 # different. It's being looked into, but the trouble is that
43 # performance for vectors longer than 256 bits is actually couple
44 # of percent worse than for integer-only code. The code is chosen
45 # for execution on all NEON-capable processors, because gain on
46 # others outweighs the marginal loss on Cortex-A9.
50 # Align Cortex-A9 performance with November 2013 improvements, i.e.
51 # NEON code is now ~20-105% faster than integer-only one on this
52 # processor. But this optimization further improved performance even
53 # on other processors: NEON code path is ~45-180% faster than original
54 # integer-only on Cortex-A8, ~10-210% on Cortex-A15, ~70-450% on
57 # $output is the last argument if it looks like a file (it has an extension)
58 # $flavour is the first argument if it doesn't look like a file
59 my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
60 my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
62 if ($flavour && $flavour ne "void") {
63 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
64 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
65 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
66 die "can't locate arm-xlate.pl";
68 open STDOUT,"| \"$^X\" $xlate $flavour \"$output\""
69 or die "can't call $xlate: $1";
71 $output and open STDOUT,">$output";
74 $num="r0"; # starts as num argument, but holds &tp[num-1]
76 $bp="r2"; $bi="r2"; $rp="r2";
83 ########### # r9 is reserved by ELF as platform specific, e.g. TLS pointer
84 $alo="r10"; # sl, gcc uses it to keep @GOT
87 ########### # r13 is stack pointer
89 ########### # r15 is program counter
91 #### argument block layout relative to &tp[num-1], a.k.a. $num
93 # ap permanently resides in r1
95 # np permanently resides in r3
97 $_num="$num,#15*4"; $_bpend=$_num;
100 #include "arm_arch.h"
102 #if defined(__thumb2__)
111 #if __ARM_MAX_ARCH__>=7
115 .word OPENSSL_armcap_P
117 .word OPENSSL_armcap_P-.Lbn_mul_mont
122 .type bn_mul_mont,%function
127 ldr ip,[sp,#4] @ load num
128 stmdb sp!,{r0,r2} @ sp points at argument block
129 #if __ARM_MAX_ARCH__>=7
132 ldr r0,.LOPENSSL_armcap
137 # if defined(__APPLE__) || defined(_WIN32)
140 tst r0,#ARMV7_NEON @ NEON available?
149 mov $num,ip @ load num
157 stmdb sp!,{r4-r12,lr} @ save 10 registers
159 mov $num,$num,lsl#2 @ rescale $num for byte count
160 sub sp,sp,$num @ alloca(4*num)
161 sub sp,sp,#4 @ +extra dword
162 sub $num,$num,#4 @ "num=num-1"
163 add $tp,$bp,$num @ &bp[num-1]
165 add $num,sp,$num @ $num to point at &tp[num-1]
167 ldr $bi,[$bp] @ bp[0]
168 ldr $aj,[$ap],#4 @ ap[0],ap++
169 ldr $nj,[$np],#4 @ np[0],np++
171 str $tp,[$_bpend] @ save &bp[num]
173 umull $alo,$ahi,$aj,$bi @ ap[0]*bp[0]
174 str $n0,[$_n0] @ save n0 value
175 mul $n0,$alo,$n0 @ "tp[0]"*n0
177 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"t[0]"
181 ldr $aj,[$ap],#4 @ ap[j],ap++
183 ldr $nj,[$np],#4 @ np[j],np++
185 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[0]
187 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
189 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
195 ldr $tp,[$_bp] @ restore bp
197 ldr $n0,[$_n0] @ restore n0
199 str $nlo,[$num] @ tp[num-1]=
201 str $nhi,[$num,#4] @ tp[num]=
204 sub $tj,$num,$tj @ "original" $num-1 value
205 sub $ap,$ap,$tj @ "rewind" ap to &ap[1]
206 ldr $bi,[$tp,#4]! @ *(++bp)
207 sub $np,$np,$tj @ "rewind" np to &np[1]
208 ldr $aj,[$ap,#-4] @ ap[0]
209 ldr $alo,[sp] @ tp[0]
210 ldr $nj,[$np,#-4] @ np[0]
211 ldr $tj,[sp,#4] @ tp[1]
214 umlal $alo,$ahi,$aj,$bi @ ap[0]*bp[i]+tp[0]
215 str $tp,[$_bp] @ save bp
218 umlal $alo,$nlo,$nj,$n0 @ np[0]*n0+"tp[0]"
222 ldr $aj,[$ap],#4 @ ap[j],ap++
223 adds $alo,$ahi,$tj @ +=tp[j]
224 ldr $nj,[$np],#4 @ np[j],np++
226 umlal $alo,$ahi,$aj,$bi @ ap[j]*bp[i]
228 umlal $nlo,$nhi,$nj,$n0 @ np[j]*n0
230 ldr $tj,[$tp,#8] @ tp[j+1]
232 str $nlo,[$tp],#4 @ tp[j-1]=,tp++
239 ldr $tp,[$_bp] @ restore bp
241 ldr $n0,[$_n0] @ restore n0
243 ldr $tj,[$_bpend] @ restore &bp[num]
245 str $nlo,[$num] @ tp[num-1]=
246 str $nhi,[$num,#4] @ tp[num]=
255 ldr $rp,[$_rp] @ pull rp
257 add $num,$num,#4 @ $num to point at &tp[num]
258 sub $aj,$num,$aj @ "original" num value
259 mov $tp,sp @ "rewind" $tp
260 mov $ap,$tp @ "borrow" $ap
261 sub $np,$np,$aj @ "rewind" $np to &np[0]
263 subs $tj,$tj,$tj @ "clear" carry flag
264 .Lsub: ldr $tj,[$tp],#4
266 sbcs $tj,$tj,$nj @ tp[j]-np[j]
267 str $tj,[$rp],#4 @ rp[j]=
268 teq $tp,$num @ preserve carry
270 sbcs $nhi,$nhi,#0 @ upmost carry
271 mov $tp,sp @ "rewind" $tp
272 sub $rp,$rp,$aj @ "rewind" $rp
274 .Lcopy: ldr $tj,[$tp] @ conditional copy
276 str sp,[$tp],#4 @ zap tp
282 teq $tp,$num @ preserve carry
286 add sp,sp,#4 @ skip over tp[num+1]
287 ldmia sp!,{r4-r12,lr} @ restore registers
288 add sp,sp,#2*4 @ skip over {r0,r2}
295 moveq pc,lr @ be binary compatible with V4, yet
296 bx lr @ interoperable with Thumb ISA:-)
298 .size bn_mul_mont,.-bn_mul_mont
301 my ($A0,$A1,$A2,$A3)=map("d$_",(0..3));
302 my ($N0,$N1,$N2,$N3)=map("d$_",(4..7));
303 my ($Z,$Temp)=("q4","q5");
304 my @ACC=map("q$_",(6..13));
305 my ($Bi,$Ni,$M0)=map("d$_",(28..31));
309 my ($rptr,$aptr,$bptr,$nptr,$n0,$num)=map("r$_",(0..5));
310 my ($tinptr,$toutptr,$inner,$outer,$bnptr)=map("r$_",(6..11));
313 #if __ARM_MAX_ARCH__>=7
317 .type bn_mul8x_mont_neon,%function
322 vstmdb sp!,{d8-d15} @ ABI specification says so
323 ldmia ip,{r4-r5} @ load rest of parameter block
329 @ special case for $num==8, everything is in register bank...
331 vld1.32 {${Bi}[0]}, [$bptr,:32]!
332 veor $zero,$zero,$zero
333 sub $toutptr,sp,$num,lsl#4
334 vld1.32 {$A0-$A3}, [$aptr]! @ can't specify :32 :-(
335 and $toutptr,$toutptr,#-64
336 vld1.32 {${M0}[0]}, [$n0,:32]
337 mov sp,$toutptr @ alloca
340 vmull.u32 @ACC[0],$Bi,${A0}[0]
341 vmull.u32 @ACC[1],$Bi,${A0}[1]
342 vmull.u32 @ACC[2],$Bi,${A1}[0]
343 vshl.i64 $Ni,@ACC[0]#hi,#16
344 vmull.u32 @ACC[3],$Bi,${A1}[1]
346 vadd.u64 $Ni,$Ni,@ACC[0]#lo
347 veor $zero,$zero,$zero
350 vmull.u32 @ACC[4],$Bi,${A2}[0]
351 vld1.32 {$N0-$N3}, [$nptr]!
352 vmull.u32 @ACC[5],$Bi,${A2}[1]
353 vmull.u32 @ACC[6],$Bi,${A3}[0]
355 vmull.u32 @ACC[7],$Bi,${A3}[1]
357 vmlal.u32 @ACC[0],$Ni,${N0}[0]
359 vmlal.u32 @ACC[1],$Ni,${N0}[1]
360 vmlal.u32 @ACC[2],$Ni,${N1}[0]
361 vmlal.u32 @ACC[3],$Ni,${N1}[1]
363 vmlal.u32 @ACC[4],$Ni,${N2}[0]
365 vmlal.u32 @ACC[5],$Ni,${N2}[1]
367 vmlal.u32 @ACC[6],$Ni,${N3}[0]
369 vmlal.u32 @ACC[7],$Ni,${N3}[1]
372 vshr.u64 $temp,$temp,#16
375 vadd.u64 $temp,$temp,$Temp#hi
378 vshr.u64 $temp,$temp,#16
384 vld1.32 {${Bi}[0]}, [$bptr,:32]!
385 veor $zero,$zero,$zero
387 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
389 vmlal.u32 @ACC[0],$Bi,${A0}[0]
390 vmlal.u32 @ACC[1],$Bi,${A0}[1]
391 vmlal.u32 @ACC[2],$Bi,${A1}[0]
392 vshl.i64 $Ni,@ACC[0]#hi,#16
393 vmlal.u32 @ACC[3],$Bi,${A1}[1]
395 vadd.u64 $Ni,$Ni,@ACC[0]#lo
396 veor $zero,$zero,$zero
397 subs $outer,$outer,#1
400 vmlal.u32 @ACC[4],$Bi,${A2}[0]
401 vmlal.u32 @ACC[5],$Bi,${A2}[1]
402 vmlal.u32 @ACC[6],$Bi,${A3}[0]
404 vmlal.u32 @ACC[7],$Bi,${A3}[1]
406 vmlal.u32 @ACC[0],$Ni,${N0}[0]
407 vmlal.u32 @ACC[1],$Ni,${N0}[1]
408 vmlal.u32 @ACC[2],$Ni,${N1}[0]
409 vmlal.u32 @ACC[3],$Ni,${N1}[1]
411 vmlal.u32 @ACC[4],$Ni,${N2}[0]
413 vmlal.u32 @ACC[5],$Ni,${N2}[1]
415 vmlal.u32 @ACC[6],$Ni,${N3}[0]
417 vmlal.u32 @ACC[7],$Ni,${N3}[1]
420 vshr.u64 $temp,$temp,#16
423 vadd.u64 $temp,$temp,$Temp#hi
426 vshr.u64 $temp,$temp,#16
430 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
432 vshr.u64 $temp,@ACC[0]#lo,#16
434 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
436 vshr.u64 $temp,@ACC[0]#hi,#16
437 vzip.16 @ACC[0]#lo,@ACC[0]#hi
443 veor @ACC[0],@ACC[0],@ACC[0]
445 veor @ACC[1],@ACC[1],@ACC[1]
446 sub $toutptr,$toutptr,$num,lsl#4
447 veor @ACC[2],@ACC[2],@ACC[2]
448 and $toutptr,$toutptr,#-64
449 veor @ACC[3],@ACC[3],@ACC[3]
450 mov sp,$toutptr @ alloca
451 veor @ACC[4],@ACC[4],@ACC[4]
452 add $toutptr,$toutptr,#256
453 veor @ACC[5],@ACC[5],@ACC[5]
455 veor @ACC[6],@ACC[6],@ACC[6]
456 veor @ACC[7],@ACC[7],@ACC[7]
459 vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
460 subs $inner,$inner,#8
461 vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
462 vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
463 vst1.64 {@ACC[6]-@ACC[7]},[$toutptr,:256]!
467 vld1.32 {$A0-$A3},[$aptr]!
469 vld1.32 {${M0}[0]},[$n0,:32]
475 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
476 veor $zero,$zero,$zero
479 vld1.32 {$N0-$N3},[$nptr]!
481 vmlal.u32 @ACC[0],$Bi,${A0}[0]
482 vmlal.u32 @ACC[1],$Bi,${A0}[1]
483 veor $zero,$zero,$zero
484 vmlal.u32 @ACC[2],$Bi,${A1}[0]
485 vshl.i64 $Ni,@ACC[0]#hi,#16
486 vmlal.u32 @ACC[3],$Bi,${A1}[1]
487 vadd.u64 $Ni,$Ni,@ACC[0]#lo
488 vmlal.u32 @ACC[4],$Bi,${A2}[0]
490 vmlal.u32 @ACC[5],$Bi,${A2}[1]
491 vst1.32 {$Bi},[sp,:64] @ put aside smashed b[8*i+0]
492 vmlal.u32 @ACC[6],$Bi,${A3}[0]
494 vmlal.u32 @ACC[7],$Bi,${A3}[1]
498 vld1.32 {${Bi}[0]},[$bptr,:32]! @ *b++
499 vmlal.u32 @ACC[0],$Ni,${N0}[0]
500 veor $temp,$temp,$temp
501 vmlal.u32 @ACC[1],$Ni,${N0}[1]
503 vmlal.u32 @ACC[2],$Ni,${N1}[0]
504 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
505 vmlal.u32 @ACC[3],$Ni,${N1}[1]
506 vmlal.u32 @ACC[4],$Ni,${N2}[0]
507 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
508 vmlal.u32 @ACC[5],$Ni,${N2}[1]
509 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
510 vmlal.u32 @ACC[6],$Ni,${N3}[0]
511 vmlal.u32 @ACC[7],$Ni,${N3}[1]
512 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
513 vst1.32 {$Ni},[$bnptr,:64]! @ put aside smashed m[8*i+$i]
515 push(@ACC,shift(@ACC)); $i++;
517 vmlal.u32 @ACC[0],$Bi,${A0}[0]
518 vld1.64 {@ACC[7]},[$tinptr,:128]!
519 vmlal.u32 @ACC[1],$Bi,${A0}[1]
520 veor $zero,$zero,$zero
521 vmlal.u32 @ACC[2],$Bi,${A1}[0]
522 vshl.i64 $Ni,@ACC[0]#hi,#16
523 vmlal.u32 @ACC[3],$Bi,${A1}[1]
524 vadd.u64 $Ni,$Ni,@ACC[0]#lo
525 vmlal.u32 @ACC[4],$Bi,${A2}[0]
527 vmlal.u32 @ACC[5],$Bi,${A2}[1]
528 vst1.32 {$Bi},[$bnptr,:64]! @ put aside smashed b[8*i+$i]
529 vmlal.u32 @ACC[6],$Bi,${A3}[0]
531 vmlal.u32 @ACC[7],$Bi,${A3}[1]
535 vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
536 vmlal.u32 @ACC[0],$Ni,${N0}[0]
537 vld1.32 {$A0-$A3},[$aptr]!
538 vmlal.u32 @ACC[1],$Ni,${N0}[1]
539 vmlal.u32 @ACC[2],$Ni,${N1}[0]
540 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
541 vmlal.u32 @ACC[3],$Ni,${N1}[1]
542 vmlal.u32 @ACC[4],$Ni,${N2}[0]
543 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,@ACC[0]#hi
544 vmlal.u32 @ACC[5],$Ni,${N2}[1]
545 vshr.u64 @ACC[0]#lo,@ACC[0]#lo,#16
546 vmlal.u32 @ACC[6],$Ni,${N3}[0]
547 vmlal.u32 @ACC[7],$Ni,${N3}[1]
548 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,@ACC[0]#lo
549 vst1.32 {$Ni},[$bnptr,:64] @ put aside smashed m[8*i+$i]
550 add $bnptr,sp,#8 @ rewind
552 push(@ACC,shift(@ACC));
559 subs $inner,$inner,#8
560 vmlal.u32 @ACC[0],$Bi,${A0}[0]
561 vld1.64 {@ACC[7]},[$tinptr,:128]
562 vmlal.u32 @ACC[1],$Bi,${A0}[1]
563 vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+0]
564 vmlal.u32 @ACC[2],$Bi,${A1}[0]
565 vld1.32 {$N0-$N3},[$nptr]!
566 vmlal.u32 @ACC[3],$Bi,${A1}[1]
568 addne $tinptr,$tinptr,#16 @ don't advance in last iteration
569 vmlal.u32 @ACC[4],$Bi,${A2}[0]
570 vmlal.u32 @ACC[5],$Bi,${A2}[1]
571 vmlal.u32 @ACC[6],$Bi,${A3}[0]
572 vmlal.u32 @ACC[7],$Bi,${A3}[1]
574 for ($i=1; $i<8; $i++) {
576 vld1.32 {$Bi},[$bnptr,:64]! @ pull smashed b[8*i+$i]
577 vmlal.u32 @ACC[0],$Ni,${N0}[0]
578 vmlal.u32 @ACC[1],$Ni,${N0}[1]
579 vmlal.u32 @ACC[2],$Ni,${N1}[0]
580 vmlal.u32 @ACC[3],$Ni,${N1}[1]
581 vmlal.u32 @ACC[4],$Ni,${N2}[0]
582 vmlal.u32 @ACC[5],$Ni,${N2}[1]
583 vmlal.u32 @ACC[6],$Ni,${N3}[0]
584 vmlal.u32 @ACC[7],$Ni,${N3}[1]
585 vst1.64 {@ACC[0]},[$toutptr,:128]!
587 push(@ACC,shift(@ACC));
589 vmlal.u32 @ACC[0],$Bi,${A0}[0]
590 vld1.64 {@ACC[7]},[$tinptr,:128]
591 vmlal.u32 @ACC[1],$Bi,${A0}[1]
592 vld1.32 {$Ni},[$bnptr,:64]! @ pull smashed m[8*i+$i]
593 vmlal.u32 @ACC[2],$Bi,${A1}[0]
595 addne $tinptr,$tinptr,#16 @ don't advance in last iteration
596 vmlal.u32 @ACC[3],$Bi,${A1}[1]
597 vmlal.u32 @ACC[4],$Bi,${A2}[0]
598 vmlal.u32 @ACC[5],$Bi,${A2}[1]
599 vmlal.u32 @ACC[6],$Bi,${A3}[0]
600 vmlal.u32 @ACC[7],$Bi,${A3}[1]
605 subeq $aptr,$aptr,$num,lsl#2 @ rewind
606 vmlal.u32 @ACC[0],$Ni,${N0}[0]
607 vld1.32 {$Bi},[sp,:64] @ pull smashed b[8*i+0]
608 vmlal.u32 @ACC[1],$Ni,${N0}[1]
609 vld1.32 {$A0-$A3},[$aptr]!
610 vmlal.u32 @ACC[2],$Ni,${N1}[0]
611 add $bnptr,sp,#8 @ rewind
612 vmlal.u32 @ACC[3],$Ni,${N1}[1]
613 vmlal.u32 @ACC[4],$Ni,${N2}[0]
614 vmlal.u32 @ACC[5],$Ni,${N2}[1]
615 vmlal.u32 @ACC[6],$Ni,${N3}[0]
616 vst1.64 {@ACC[0]},[$toutptr,:128]!
617 vmlal.u32 @ACC[7],$Ni,${N3}[1]
621 push(@ACC,shift(@ACC));
624 vst1.64 {@ACC[0]-@ACC[1]},[$toutptr,:256]!
625 veor q2,q2,q2 @ $N0-$N1
626 vst1.64 {@ACC[2]-@ACC[3]},[$toutptr,:256]!
627 veor q3,q3,q3 @ $N2-$N3
628 vst1.64 {@ACC[4]-@ACC[5]},[$toutptr,:256]!
629 vst1.64 {@ACC[6]},[$toutptr,:128]
631 subs $outer,$outer,#8
632 vld1.64 {@ACC[0]-@ACC[1]},[$tinptr,:256]!
633 vld1.64 {@ACC[2]-@ACC[3]},[$tinptr,:256]!
634 vld1.64 {@ACC[4]-@ACC[5]},[$tinptr,:256]!
635 vld1.64 {@ACC[6]-@ACC[7]},[$tinptr,:256]!
638 subne $nptr,$nptr,$num,lsl#2 @ rewind
642 vst1.64 {q2-q3}, [sp,:256]! @ start wiping stack frame
643 vshr.u64 $temp,@ACC[0]#lo,#16
644 vst1.64 {q2-q3},[sp,:256]!
645 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
646 vst1.64 {q2-q3}, [sp,:256]!
647 vshr.u64 $temp,@ACC[0]#hi,#16
648 vst1.64 {q2-q3}, [sp,:256]!
649 vzip.16 @ACC[0]#lo,@ACC[0]#hi
656 vadd.u64 @ACC[0]#lo,@ACC[0]#lo,$temp
657 vshr.u64 $temp,@ACC[0]#lo,#16
658 vld1.64 {@ACC[2]-@ACC[3]}, [$tinptr, :256]!
659 vadd.u64 @ACC[0]#hi,@ACC[0]#hi,$temp
660 vld1.64 {@ACC[4]-@ACC[5]}, [$tinptr, :256]!
661 vshr.u64 $temp,@ACC[0]#hi,#16
662 vld1.64 {@ACC[6]-@ACC[7]}, [$tinptr, :256]!
663 vzip.16 @ACC[0]#lo,@ACC[0]#hi
667 for ($i=1; $i<8; $i++) {
669 vadd.u64 @ACC[1]#lo,@ACC[1]#lo,$temp
670 vst1.32 {@ACC[0]#lo[0]}, [$toutptr, :32]!
671 vshr.u64 $temp,@ACC[1]#lo,#16
672 vadd.u64 @ACC[1]#hi,@ACC[1]#hi,$temp
673 vshr.u64 $temp,@ACC[1]#hi,#16
674 vzip.16 @ACC[1]#lo,@ACC[1]#hi
676 push(@ACC,shift(@ACC));
678 push(@ACC,shift(@ACC));
680 vld1.64 {@ACC[0]-@ACC[1]}, [$tinptr, :256]!
681 subs $inner,$inner,#8
682 vst1.32 {@ACC[7]#lo[0]}, [$toutptr, :32]!
685 vst1.32 {${temp}[0]}, [$toutptr, :32] @ top-most bit
686 sub $nptr,$nptr,$num,lsl#2 @ rewind $nptr
687 subs $aptr,sp,#0 @ clear carry flag
688 add $bptr,sp,$num,lsl#2
691 ldmia $aptr!, {r4-r7}
692 ldmia $nptr!, {r8-r11}
697 teq $aptr,$bptr @ preserves carry
698 stmia $rptr!, {r8-r11}
701 ldr r10, [$aptr] @ load top-most bit
704 sub r11,$bptr,r11 @ this is num*4
707 sub $rptr,$rptr,r11 @ rewind $rptr
708 mov $nptr,$bptr @ second 3/4th of frame
709 sbcs r10,r10,#0 @ result is carry flag
712 ldmia $aptr!, {r4-r7}
713 ldmia $rptr, {r8-r11}
716 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
720 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
724 stmia $rptr!, {r8-r11}
726 ldmia $rptr, {r8-r11}
729 vst1.64 {q0-q1}, [$aptr,:256]! @ wipe
733 vst1.64 {q0-q1}, [$nptr,:256]! @ wipe
736 teq $aptr,$bptr @ preserves carry
737 stmia $rptr!, {r8-r11}
738 bne .LNEON_copy_n_zap
744 .size bn_mul8x_mont_neon,.-bn_mul8x_mont_neon
749 .asciz "Montgomery multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
751 #if __ARM_MAX_ARCH__>=7
752 .comm OPENSSL_armcap_P,4,4
756 foreach (split("\n",$code)) {
757 s/\`([^\`]*)\`/eval $1/ge;
759 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/ge or
761 s/\bbx\s+lr\b/.word\t0xe12fff1e/g; # make it possible to compile with -march=armv4
766 close STDOUT or die "error closing STDOUT";