3 # ====================================================================
4 # Written by David S. Miller <davem@devemloft.net> and Andy Polyakov
5 # <appro@openssl.org>. The module is licensed under 2-clause BSD
6 # license. November 2012. All rights reserved.
7 # ====================================================================
9 ######################################################################
10 # Montgomery squaring-n-multiplication module for SPARC T4.
12 # The module consists of three parts:
14 # 1) collection of "single-op" subroutines that perform single
15 # operation, Montgomery squaring or multiplication, on 512-,
16 # 1024-, 1536- and 2048-bit operands;
17 # 2) collection of "multi-op" subroutines that perform 5 squaring and
18 # 1 multiplication operations on operands of above lengths;
19 # 3) fall-back and helper VIS3 subroutines.
21 # RSA sign is dominated by multi-op subroutine, while RSA verify and
22 # DSA - by single-op. Special note about 4096-bit RSA verify result.
23 # Operands are too long for dedicated hardware and it's handled by
24 # VIS3 code, which is why you don't see any improvement. It's surely
25 # possible to improve it [by deploying 'mpmul' instruction], maybe in
28 # Performance improvement.
30 # 64-bit process, VIS3:
31 # sign verify sign/s verify/s
32 # rsa 1024 bits 0.000633s 0.000033s 1578.9 30513.3
33 # rsa 2048 bits 0.003297s 0.000116s 303.3 8585.8
34 # rsa 4096 bits 0.026000s 0.000387s 38.5 2587.0
35 # dsa 1024 bits 0.000301s 0.000332s 3323.7 3013.9
36 # dsa 2048 bits 0.001056s 0.001233s 946.9 810.8
38 # 64-bit process, this module:
39 # sign verify sign/s verify/s
40 # rsa 1024 bits 0.000341s 0.000021s 2931.5 46873.8
41 # rsa 2048 bits 0.001244s 0.000044s 803.9 22569.1
42 # rsa 4096 bits 0.006203s 0.000387s 161.2 2586.3
43 # dsa 1024 bits 0.000179s 0.000195s 5573.9 5115.6
44 # dsa 2048 bits 0.000311s 0.000350s 3212.3 2856.6
46 ######################################################################
47 # 32-bit process, VIS3:
48 # sign verify sign/s verify/s
49 # rsa 1024 bits 0.000675s 0.000033s 1480.9 30159.0
50 # rsa 2048 bits 0.003383s 0.000118s 295.6 8499.9
51 # rsa 4096 bits 0.026178s 0.000394s 38.2 2541.3
52 # dsa 1024 bits 0.000326s 0.000343s 3070.0 2918.8
53 # dsa 2048 bits 0.001121s 0.001291s 891.9 774.4
55 # 32-bit process, this module:
56 # sign verify sign/s verify/s
57 # rsa 1024 bits 0.000386s 0.000022s 2589.6 45704.9
58 # rsa 2048 bits 0.001335s 0.000046s 749.3 21766.8
59 # rsa 4096 bits 0.006390s 0.000393s 156.5 2544.8
60 # dsa 1024 bits 0.000208s 0.000204s 4817.6 4896.6
61 # dsa 2048 bits 0.000345s 0.000364s 2898.8 2747.3
63 # 32-bit code is prone to performance degradation as interrupt rate
64 # dispatched to CPU executing the code grows. This is because in
65 # standard process of handling interrupt in 32-bit process context
66 # upper halves of most integer registers used as input or output are
67 # zeroed. This renders result invalid, and operation has to be re-run.
68 # If CPU is "bothered" with timer interrupts only, the penalty is
69 # hardly measurable. But in order to mitigate this problem for higher
70 # interrupt rates contemporary Linux kernel recognizes biased stack
71 # even in 32-bit process context and preserves full register contents.
72 # See http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;h=517ffce4e1a03aea979fe3a18a3dd1761a24fafb
75 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
76 push(@INC,"${dir}","${dir}../../perlasm");
77 require "sparcv9_modes.pl";
80 #include "sparc_arch.h"
83 .register %g2,#scratch
84 .register %g3,#scratch
87 .section ".text",#alloc,#execinstr
94 ########################################################################
95 # Register layout for mont[mul|sqr] instructions.
96 # For details see "Oracle SPARC Architecture 2011" manual at
97 # http://www.oracle.com/technetwork/server-storage/sun-sparc-enterprise/documentation/.
99 my @R=map("%f".2*$_,(0..11,30,31,12..29));
100 my @N=(map("%l$_",(0..7)),map("%o$_",(0..5))); @N=(@N,@N,@N[0..3]);
101 my @B=(map("%o$_",(0..5)),@N[0..13],@N[0..11]);
102 my @A=(@N[0..13],@R[14..31]);
104 ########################################################################
105 # int bn_mul_mont_t4_$NUM(u64 *rp,const u64 *ap,const u64 *bp,
106 # const u64 *np,const BN_ULONG *n0);
108 sub generate_bn_mul_mont_t4() {
110 my ($rp,$ap,$bp,$np,$sentinel)=map("%g$_",(1..5));
113 .globl bn_mul_mont_t4_$NUM
119 #elif defined(SPARCV9_64BIT_STACK)
120 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
121 ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0]
123 and %g1,SPARCV9_64BIT_STACK,%g1
131 sllx $sentinel,32,$sentinel
134 save %sp,-128,%sp ! warm it up
149 or %g4,$sentinel,$sentinel
151 ! copy arguments to global registers
156 ld [%i4+0],%f1 ! load *n0
161 # load ap[$NUM] ########################################################
163 save %sp,-128,%sp; or $sentinel,%fp,%fp
165 for($i=0; $i<14 && $i<$NUM; $i++) {
166 my $lo=$i<13?@A[$i+1]:"%o7";
169 ld [$ap+$i*8+4],@A[$i]
170 sllx @A[$i],32,@A[$i]
174 for(; $i<$NUM; $i++) {
175 my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1));
182 # load np[$NUM] ########################################################
184 save %sp,-128,%sp; or $sentinel,%fp,%fp
186 for($i=0; $i<14 && $i<$NUM; $i++) {
187 my $lo=$i<13?@N[$i+1]:"%o7";
190 ld [$np+$i*8+4],@N[$i]
191 sllx @N[$i],32,@N[$i]
196 save %sp,-128,%sp; or $sentinel,%fp,%fp
198 for(; $i<28 && $i<$NUM; $i++) {
199 my $lo=$i<27?@N[$i+1]:"%o7";
202 ld [$np+$i*8+4],@N[$i]
203 sllx @N[$i],32,@N[$i]
208 save %sp,-128,%sp; or $sentinel,%fp,%fp
210 for(; $i<$NUM; $i++) {
211 my $lo=($i<$NUM-1)?@N[$i+1]:"%o7";
214 ld [$np+$i*8+4],@N[$i]
215 sllx @N[$i],32,@N[$i]
221 be SIZE_T_CC,.Lmsquare_$NUM
225 # load bp[$NUM] ########################################################
226 for($i=0; $i<6 && $i<$NUM; $i++) {
227 my $lo=$i<5?@B[$i+1]:"%o7";
230 ld [$bp+$i*8+4],@B[$i]
231 sllx @B[$i],32,@B[$i]
236 save %sp,-128,%sp; or $sentinel,%fp,%fp
238 for(; $i<20 && $i<$NUM; $i++) {
239 my $lo=$i<19?@B[$i+1]:"%o7";
242 ld [$bp+$i*8+4],@B[$i]
243 sllx @B[$i],32,@B[$i]
248 save %sp,-128,%sp; or $sentinel,%fp,%fp
250 for(; $i<$NUM; $i++) {
251 my $lo=($i<$NUM-1)?@B[$i+1]:"%o7";
254 ld [$bp+$i*8+4],@B[$i]
255 sllx @B[$i],32,@B[$i]
259 # magic ################################################################
261 .word 0x81b02920+$NUM-1 ! montmul $NUM-1
263 fbu,pn %fcc3,.Lmabort_$NUM
265 and %fp,$sentinel,$sentinel
266 brz,pn $sentinel,.Lmabort_$NUM
276 restore; and %fp,$sentinel,$sentinel
277 restore; and %fp,$sentinel,$sentinel
278 restore; and %fp,$sentinel,$sentinel
279 restore; and %fp,$sentinel,$sentinel
280 brz,pn $sentinel,.Lmabort1_$NUM
285 # save tp[$NUM] ########################################################
286 for($i=0; $i<14 && $i<$NUM; $i++) {
288 movxtod @A[$i],@R[$i]
295 and %fp,$sentinel,$sentinel
298 and %fp,$sentinel,$sentinel
299 srl %fp,0,%fp ! just in case?
300 or %o7,$sentinel,$sentinel
301 brz,a,pn $sentinel,.Lmdone_$NUM
302 mov 0,%i0 ! return failure
305 for($i=0; $i<12 && $i<$NUM; $i++) {
306 @R[$i] =~ /%f([0-9]+)/;
307 my $lo = "%f".($1+1);
310 st @R[$i],[$rp+$i*8+4]
313 for(; $i<$NUM; $i++) {
314 my ($hi,$lo)=("%f".2*($i%4),"%f".(2*($i%4)+1));
322 mov 1,%i0 ! return success
336 mov 0,%i0 ! return failure
342 save %sp,-128,%sp; or $sentinel,%fp,%fp
343 save %sp,-128,%sp; or $sentinel,%fp,%fp
344 .word 0x81b02940+$NUM-1 ! montsqr $NUM-1
347 .type bn_mul_mont_t4_$NUM, #function
348 .size bn_mul_mont_t4_$NUM, .-bn_mul_mont_t4_$NUM
352 for ($i=8;$i<=32;$i+=8) {
353 &generate_bn_mul_mont_t4($i);
356 ########################################################################
359 my ($ptbl,$pwr,$tmp)=@_;
361 sethi %hi(.Lmagic-1f),$tmp
364 inc %lo(.Lmagic-1b),%o7
365 and $pwr, 7<<2, $tmp ! offset within "magic table"
368 sll $tmp, 3, $tmp ! offset within first cache line
369 add $tmp, $ptbl, $ptbl ! of the pwrtbl
371 ! "magic table" is organized in such way that below comparisons
372 ! make %fcc3:%fcc2:%fcc1:%fcc0 form a byte of 1s with one 0,
373 ! e.g. 0b11011111, with 0 denoting relevant cache line.
374 ld [%o7+0], %f0 ! load column
377 fcmps %fcc0, %f0, %f1
379 fcmps %fcc1, %f1, %f2
380 fcmps %fcc2, %f2, %f3
381 fcmps %fcc3, %f3, %f0
387 ldd [$ptbl+0*32],%f0 ! load all cache lines
390 fmovdg %fcc0,%f0,%f16 ! pick one value
392 fmovdl %fcc0,%f2,%f16
394 fmovdg %fcc1,%f4,%f16
395 ldd [$ptbl+5*32],%f10
396 fmovdl %fcc1,%f6,%f16
397 ldd [$ptbl+6*32],%f12
398 fmovdg %fcc2,%f8,%f16
399 ldd [$ptbl+7*32],%f14
400 fmovdl %fcc2,%f10,%f16
401 fmovdg %fcc3,%f12,%f16
402 fmovdl %fcc3,%f14,%f16
407 ########################################################################
408 # int bn_pwr5_mont_t4_$NUM(u64 *tp,const u64 *np,const BN_ULONG *n0,
409 # const u64 *pwrtbl,int pwr);
411 sub generate_bn_pwr5_mont_t4() {
413 my ($tp,$np,$pwrtbl,$pwr,$sentinel)=map("%g$_",(1..5));
416 .globl bn_pwr5_mont_t4_$NUM
418 bn_pwr5_mont_t4_$NUM:
422 #elif defined(SPARCV9_64BIT_STACK)
423 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
424 ld [%g1+0],%g1 ! OPENSSL_sparcv9_P[0]
426 and %g1,SPARCV9_64BIT_STACK,%g1
434 sllx $sentinel,32,$sentinel
437 save %sp,-128,%sp ! warm it up
452 or %g4,$sentinel,$sentinel
454 ! copy arguments to global registers
457 ld [%i2+0],%f1 ! load *n0
464 # load tp[$NUM] ########################################################
466 save %sp,-128,%sp; or $sentinel,%fp,%fp
468 for($i=0; $i<14 && $i<$NUM; $i++) {
470 ldx [$tp+$i*8],@A[$i]
473 for(; $i<$NUM; $i++) {
475 ldd [$tp+$i*8],@A[$i]
478 # load np[$NUM] ########################################################
480 save %sp,-128,%sp; or $sentinel,%fp,%fp
482 for($i=0; $i<14 && $i<$NUM; $i++) {
484 ldx [$np+$i*8],@N[$i]
488 save %sp,-128,%sp; or $sentinel,%fp,%fp
490 for(; $i<28 && $i<$NUM; $i++) {
492 ldx [$np+$i*8],@N[$i]
496 save %sp,-128,%sp; or $sentinel,%fp,%fp
498 for(; $i<$NUM; $i++) {
500 ldx [$np+$i*8],@N[$i]
503 # load pwrtbl[pwr] ########################################################
504 &load_fcc($pwrtbl,$pwr,@B[0]);
505 for($i=0; $i<6 && $i<$NUM; $i++) {
512 save %sp,-128,%sp; or $sentinel,%fp,%fp
514 for(; $i<20 && $i<$NUM; $i++) {
521 save %sp,-128,%sp; or $sentinel,%fp,%fp
523 for(; $i<$NUM; $i++) {
530 # magic ################################################################
531 for($i=0; $i<5; $i++) {
533 .word 0x81b02940+$NUM-1 ! montsqr $NUM-1
534 fbu,pn %fcc3,.Labort_$NUM
536 and %fp,$sentinel,$sentinel
537 brz,pn $sentinel,.Labort_$NUM
543 .word 0x81b02920+$NUM-1 ! montmul $NUM-1
544 fbu,pn %fcc3,.Labort_$NUM
546 and %fp,$sentinel,$sentinel
547 brz,pn $sentinel,.Labort_$NUM
558 restore; and %fp,$sentinel,$sentinel
559 restore; and %fp,$sentinel,$sentinel
560 restore; and %fp,$sentinel,$sentinel
561 restore; and %fp,$sentinel,$sentinel
562 brz,pn $sentinel,.Labort1_$NUM
567 # save tp[$NUM] ########################################################
568 for($i=0; $i<14 && $i<$NUM; $i++) {
570 movxtod @A[$i],@R[$i]
577 and %fp,$sentinel,$sentinel
580 and %fp,$sentinel,$sentinel
581 srl %fp,0,%fp ! just in case?
582 or %o7,$sentinel,$sentinel
583 brz,a,pn $sentinel,.Ldone_$NUM
584 mov 0,%i0 ! return failure
587 for($i=0; $i<$NUM; $i++) {
589 std @R[$i],[$tp+$i*8]
593 mov 1,%i0 ! return success
607 mov 0,%i0 ! return failure
610 .type bn_pwr5_mont_t4_$NUM, #function
611 .size bn_pwr5_mont_t4_$NUM, .-bn_pwr5_mont_t4_$NUM
615 for ($i=8;$i<=32;$i+=8) {
616 &generate_bn_pwr5_mont_t4($i);
620 ########################################################################
621 # Fall-back subroutines
623 # copy of bn_mul_mont_vis3 adjusted for vectors of 64-bit values
625 ($n0,$m0,$m1,$lo0,$hi0, $lo1,$hi1,$aj,$alo,$nj,$nlo,$tj)=
626 (map("%g$_",(1..5)),map("%o$_",(0..5,7)));
629 $rp="%o0"; # u64 *rp,
630 $ap="%o1"; # const u64 *ap,
631 $bp="%o2"; # const u64 *bp,
632 $np="%o3"; # const u64 *np,
633 $n0p="%o4"; # const BN_ULONG *n0,
634 $num="%o5"; # int num); # caller ensures that num is >=3
636 .globl bn_mul_mont_t4
639 add %sp, STACK_BIAS, %g4 ! real top of stack
640 sll $num, 3, $num ! size in bytes
642 andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes
644 andn %g1, 63, %g1 ! align at 64 byte
645 sub %g1, STACK_FRAME, %g1 ! new top of stack
650 # +-------------------------------+<----- %sp
652 # +-------------------------------+<----- aligned at 64 bytes
654 # +-------------------------------+
657 # +-------------------------------+<----- aligned at 64 bytes
659 ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
660 ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz)=map("%l$_",(0..7));
663 ld [$n0p+0], $t0 ! pull n0[0..1] value
665 add %sp, STACK_BIAS+STACK_FRAME, $tp
666 ldx [$bp+0], $m0 ! m0=bp[0]
671 ldx [$ap+0], $aj ! ap[0]
673 mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
674 umulxhi $aj, $m0, $hi0
676 ldx [$ap+8], $aj ! ap[1]
678 ldx [$np+0], $nj ! np[0]
680 mulx $lo0, $n0, $m1 ! "tp[0]"*n0
682 mulx $aj, $m0, $alo ! ap[1]*bp[0]
683 umulxhi $aj, $m0, $aj ! ahi=aj
685 mulx $nj, $m1, $lo1 ! np[0]*m1
686 umulxhi $nj, $m1, $hi1
688 ldx [$np+8], $nj ! np[1]
690 addcc $lo0, $lo1, $lo1
692 addxc %g0, $hi1, $hi1
694 mulx $nj, $m1, $nlo ! np[1]*m1
695 umulxhi $nj, $m1, $nj ! nhi=nj
698 sub $num, 24, $cnt ! cnt=num-3
702 addcc $alo, $hi0, $lo0
705 ldx [$ap+0], $aj ! ap[j]
706 addcc $nlo, $hi1, $lo1
708 addxc $nj, %g0, $hi1 ! nhi=nj
710 ldx [$np+0], $nj ! np[j]
711 mulx $aj, $m0, $alo ! ap[j]*bp[0]
713 umulxhi $aj, $m0, $aj ! ahi=aj
715 mulx $nj, $m1, $nlo ! np[j]*m1
716 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
717 umulxhi $nj, $m1, $nj ! nhi=nj
718 addxc %g0, $hi1, $hi1
719 stxa $lo1, [$tp]0xe2 ! tp[j-1]
720 add $tp, 8, $tp ! tp++
723 sub $cnt, 8, $cnt ! j--
725 addcc $alo, $hi0, $lo0
726 addxc $aj, %g0, $hi0 ! ahi=aj
728 addcc $nlo, $hi1, $lo1
730 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
731 addxc %g0, $hi1, $hi1
732 stxa $lo1, [$tp]0xe2 ! tp[j-1]
735 addcc $hi0, $hi1, $hi1
736 addxc %g0, %g0, $ovf ! upmost overflow bit
741 sub $num, 16, $i ! i=num-2
745 ldx [$bp+0], $m0 ! m0=bp[i]
748 sub $ap, $num, $ap ! rewind
752 ldx [$ap+0], $aj ! ap[0]
753 ldx [$np+0], $nj ! np[0]
755 mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
756 ldx [$tp], $tj ! tp[0]
757 umulxhi $aj, $m0, $hi0
758 ldx [$ap+8], $aj ! ap[1]
759 addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
760 mulx $aj, $m0, $alo ! ap[1]*bp[i]
761 addxc %g0, $hi0, $hi0
762 mulx $lo0, $n0, $m1 ! tp[0]*n0
763 umulxhi $aj, $m0, $aj ! ahi=aj
764 mulx $nj, $m1, $lo1 ! np[0]*m1
766 umulxhi $nj, $m1, $hi1
767 ldx [$np+8], $nj ! np[1]
769 addcc $lo1, $lo0, $lo1
770 mulx $nj, $m1, $nlo ! np[1]*m1
771 addxc %g0, $hi1, $hi1
772 umulxhi $nj, $m1, $nj ! nhi=nj
775 sub $num, 24, $cnt ! cnt=num-3
778 addcc $alo, $hi0, $lo0
779 ldx [$tp+8], $tj ! tp[j]
780 addxc $aj, %g0, $hi0 ! ahi=aj
781 ldx [$ap+0], $aj ! ap[j]
783 addcc $nlo, $hi1, $lo1
784 mulx $aj, $m0, $alo ! ap[j]*bp[i]
785 addxc $nj, %g0, $hi1 ! nhi=nj
786 ldx [$np+0], $nj ! np[j]
788 umulxhi $aj, $m0, $aj ! ahi=aj
789 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
790 mulx $nj, $m1, $nlo ! np[j]*m1
791 addxc %g0, $hi0, $hi0
792 umulxhi $nj, $m1, $nj ! nhi=nj
793 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
794 addxc %g0, $hi1, $hi1
795 stx $lo1, [$tp] ! tp[j-1]
797 brnz,pt $cnt, .Linner
800 ldx [$tp+8], $tj ! tp[j]
801 addcc $alo, $hi0, $lo0
802 addxc $aj, %g0, $hi0 ! ahi=aj
803 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
804 addxc %g0, $hi0, $hi0
806 addcc $nlo, $hi1, $lo1
807 addxc $nj, %g0, $hi1 ! nhi=nj
808 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
809 addxc %g0, $hi1, $hi1
810 stx $lo1, [$tp] ! tp[j-1]
812 subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
813 addxccc $hi1, $hi0, $hi1
821 sub $ap, $num, $ap ! rewind
825 subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
833 subccc $tj, $nj, $t2 ! tp[j]-np[j]
838 st $t2, [$rp-4] ! reverse order
843 sub $np, $num, $np ! rewind
847 subc $ovf, %g0, $ovf ! handle upmost overflow bit
850 or $np, $ap, $ap ! ap=borrow?tp:rp
855 .Lcopy: ! copy or in-place refresh
868 .type bn_mul_mont_t4, #function
869 .size bn_mul_mont_t4, .-bn_mul_mont_t4
872 # int bn_mul_mont_gather5(
873 $rp="%o0"; # u64 *rp,
874 $ap="%o1"; # const u64 *ap,
875 $bp="%o2"; # const u64 *pwrtbl,
876 $np="%o3"; # const u64 *np,
877 $n0p="%o4"; # const BN_ULONG *n0,
878 $num="%o5"; # int num, # caller ensures that num is >=3
881 .globl bn_mul_mont_gather5_t4
883 bn_mul_mont_gather5_t4:
884 add %sp, STACK_BIAS, %g4 ! real top of stack
885 sll $num, 3, $num ! size in bytes
887 andn %g1, 63, %g1 ! buffer size rounded up to 64 bytes
889 andn %g1, 63, %g1 ! align at 64 byte
890 sub %g1, STACK_FRAME, %g1 ! new top of stack
892 LDPTR [%sp+STACK_7thARG], %g4 ! load power, 7th argument
896 # +-------------------------------+<----- %sp
898 # +-------------------------------+<----- aligned at 64 bytes
900 # +-------------------------------+
903 # +-------------------------------+<----- aligned at 64 bytes
905 ($rp,$ap,$bp,$np,$n0p,$num)=map("%i$_",(0..5));
906 ($t0,$t1,$t2,$t3,$cnt,$tp,$bufsz)=map("%l$_",(0..7));
908 &load_fcc($bp,"%g4","%g1");
911 movdtox %f16, $m0 ! m0=bp[0]
913 ld [$n0p+0], $t0 ! pull n0[0..1] value
915 add %sp, STACK_BIAS+STACK_FRAME, $tp
919 ldx [$ap+0], $aj ! ap[0]
921 mulx $aj, $m0, $lo0 ! ap[0]*bp[0]
922 umulxhi $aj, $m0, $hi0
924 ldx [$ap+8], $aj ! ap[1]
926 ldx [$np+0], $nj ! np[0]
928 mulx $lo0, $n0, $m1 ! "tp[0]"*n0
930 mulx $aj, $m0, $alo ! ap[1]*bp[0]
931 umulxhi $aj, $m0, $aj ! ahi=aj
933 mulx $nj, $m1, $lo1 ! np[0]*m1
934 umulxhi $nj, $m1, $hi1
936 ldx [$np+8], $nj ! np[1]
938 addcc $lo0, $lo1, $lo1
940 addxc %g0, $hi1, $hi1
942 mulx $nj, $m1, $nlo ! np[1]*m1
943 umulxhi $nj, $m1, $nj ! nhi=nj
946 sub $num, 24, $cnt ! cnt=num-3
950 addcc $alo, $hi0, $lo0
953 ldx [$ap+0], $aj ! ap[j]
954 addcc $nlo, $hi1, $lo1
956 addxc $nj, %g0, $hi1 ! nhi=nj
958 ldx [$np+0], $nj ! np[j]
959 mulx $aj, $m0, $alo ! ap[j]*bp[0]
961 umulxhi $aj, $m0, $aj ! ahi=aj
963 mulx $nj, $m1, $nlo ! np[j]*m1
964 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
965 umulxhi $nj, $m1, $nj ! nhi=nj
966 addxc %g0, $hi1, $hi1
967 stxa $lo1, [$tp]0xe2 ! tp[j-1]
968 add $tp, 8, $tp ! tp++
970 brnz,pt $cnt, .L1st_g5
971 sub $cnt, 8, $cnt ! j--
973 addcc $alo, $hi0, $lo0
974 addxc $aj, %g0, $hi0 ! ahi=aj
976 addcc $nlo, $hi1, $lo1
978 addcc $lo0, $lo1, $lo1 ! np[j]*m1+ap[j]*bp[0]
979 addxc %g0, $hi1, $hi1
980 stxa $lo1, [$tp]0xe2 ! tp[j-1]
983 addcc $hi0, $hi1, $hi1
984 addxc %g0, %g0, $ovf ! upmost overflow bit
989 sub $num, 16, $i ! i=num-2
996 movdtox %f16, $m0 ! m0=bp[i]
998 sub $ap, $num, $ap ! rewind
1002 ldx [$ap+0], $aj ! ap[0]
1003 ldx [$np+0], $nj ! np[0]
1005 mulx $aj, $m0, $lo0 ! ap[0]*bp[i]
1006 ldx [$tp], $tj ! tp[0]
1007 umulxhi $aj, $m0, $hi0
1008 ldx [$ap+8], $aj ! ap[1]
1009 addcc $lo0, $tj, $lo0 ! ap[0]*bp[i]+tp[0]
1010 mulx $aj, $m0, $alo ! ap[1]*bp[i]
1011 addxc %g0, $hi0, $hi0
1012 mulx $lo0, $n0, $m1 ! tp[0]*n0
1013 umulxhi $aj, $m0, $aj ! ahi=aj
1014 mulx $nj, $m1, $lo1 ! np[0]*m1
1016 umulxhi $nj, $m1, $hi1
1017 ldx [$np+8], $nj ! np[1]
1019 addcc $lo1, $lo0, $lo1
1020 mulx $nj, $m1, $nlo ! np[1]*m1
1021 addxc %g0, $hi1, $hi1
1022 umulxhi $nj, $m1, $nj ! nhi=nj
1025 sub $num, 24, $cnt ! cnt=num-3
1028 addcc $alo, $hi0, $lo0
1029 ldx [$tp+8], $tj ! tp[j]
1030 addxc $aj, %g0, $hi0 ! ahi=aj
1031 ldx [$ap+0], $aj ! ap[j]
1033 addcc $nlo, $hi1, $lo1
1034 mulx $aj, $m0, $alo ! ap[j]*bp[i]
1035 addxc $nj, %g0, $hi1 ! nhi=nj
1036 ldx [$np+0], $nj ! np[j]
1038 umulxhi $aj, $m0, $aj ! ahi=aj
1039 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
1040 mulx $nj, $m1, $nlo ! np[j]*m1
1041 addxc %g0, $hi0, $hi0
1042 umulxhi $nj, $m1, $nj ! nhi=nj
1043 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
1044 addxc %g0, $hi1, $hi1
1045 stx $lo1, [$tp] ! tp[j-1]
1047 brnz,pt $cnt, .Linner_g5
1050 ldx [$tp+8], $tj ! tp[j]
1051 addcc $alo, $hi0, $lo0
1052 addxc $aj, %g0, $hi0 ! ahi=aj
1053 addcc $lo0, $tj, $lo0 ! ap[j]*bp[i]+tp[j]
1054 addxc %g0, $hi0, $hi0
1056 addcc $nlo, $hi1, $lo1
1057 addxc $nj, %g0, $hi1 ! nhi=nj
1058 addcc $lo1, $lo0, $lo1 ! np[j]*m1+ap[j]*bp[i]+tp[j]
1059 addxc %g0, $hi1, $hi1
1060 stx $lo1, [$tp] ! tp[j-1]
1062 subcc %g0, $ovf, %g0 ! move upmost overflow to CCR.xcc
1063 addxccc $hi1, $hi0, $hi1
1064 addxc %g0, %g0, $ovf
1068 brnz,pt $i, .Louter_g5
1071 sub $ap, $num, $ap ! rewind
1075 subcc $num, 8, $cnt ! cnt=num-1 and clear CCR.xcc
1083 subccc $tj, $nj, $t2 ! tp[j]-np[j]
1086 subccc $tj, $nj, $t3
1088 st $t2, [$rp-4] ! reverse order
1090 brnz,pt $cnt, .Lsub_g5
1093 sub $np, $num, $np ! rewind
1097 subc $ovf, %g0, $ovf ! handle upmost overflow bit
1100 or $np, $ap, $ap ! ap=borrow?tp:rp
1105 .Lcopy_g5: ! copy or in-place refresh
1108 stx %g0, [$tp] ! zap
1112 brnz $cnt, .Lcopy_g5
1118 .type bn_mul_mont_gather5_t4, #function
1119 .size bn_mul_mont_gather5_t4, .-bn_mul_mont_gather5_t4
1134 brnz %o2, .Loop_flip
1138 .type bn_flip_t4, #function
1139 .size bn_flip_t4, .-bn_flip_t4
1141 .globl bn_scatter5_t4
1146 add %o3, %o2, %o2 ! &pwrtbl[pwr]
1149 ldx [%o0], %g1 ! inp[i]
1153 brnz %o1, .Loop_scatter5
1157 .type bn_scatter5_t4, #function
1158 .size bn_scatter5_t4, .-bn_scatter5_t4
1160 .globl bn_gather5_t4
1165 &load_fcc("%o2","%o3","%o4");
1175 brnz %o1, .Loop_gather5
1180 .type bn_gather5_t4, #function
1181 .size bn_gather5_t4, .-bn_gather5_t4
1185 #define ONE 0x3f800000
1186 #define NUL 0x00000000
1187 #define NaN 0xffffffff
1191 .long ONE,NUL,NaN,NaN,NaN,NaN,NUL,ONE
1192 .long NUL,ONE,ONE,NUL,NaN,NaN,NaN,NaN
1193 .long NaN,NaN,NUL,ONE,ONE,NUL,NaN,NaN
1194 .long NaN,NaN,NaN,NaN,NUL,ONE,ONE,NUL
1195 .asciz "Montgomery Multiplication for SPARC T4, David S. Miller, Andy Polyakov"