3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
13 # Because unlike integer multiplier, which simply stalls whole CPU,
14 # FPU is fully pipelined and can effectively emit 48 bit partial
15 # product every cycle. Why not blended SPARC v9? One can argue that
16 # making this module dependent on UltraSPARC VIS extension limits its
17 # binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
18 # implementations from compatibility matrix. But the rest, whole Sun
19 # UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
20 # VIS extension instructions used in this module. This is considered
21 # good enough to not care about HAL SPARC64 users [if any] who have
22 # integer-only pure SPARCv9 module to "fall down" to.
24 # USI&II cores currently exhibit uniform 2x improvement [over pre-
25 # bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
26 # performance improves few percents for shorter keys and worsens few
27 # percents for longer keys. This is because USIII integer multiplier
28 # is >3x faster than USI&II one, which is harder to match [but see
29 # TODO list below]. It should also be noted that SPARC64 V features
30 # out-of-order execution, which *might* mean that integer multiplier
31 # is pipelined, which in turn *might* be impossible to match... On
32 # additional note, SPARC64 V implements FP Multiply-Add instruction,
33 # which is perfectly usable in this context... In other words, as far
34 # as Fujitsu SPARC64 V goes, talk to the author:-)
36 # The implementation implies following "non-natural" limitations on
38 # - num may not be less than 4;
39 # - num has to be even;
40 # Failure to meet either condition has no fatal effects, simply
41 # doesn't give any performance gain.
44 # - modulo-schedule inner loop for better performance (on in-order
45 # execution core such as UltraSPARC this shall result in further
46 # noticeable(!) improvement);
47 # - dedicated squaring procedure[?];
49 ######################################################################
52 # Modulo-scheduled inner loops allow to interleave floating point and
53 # integer instructions and minimize Read-After-Write penalties. This
54 # results in *further* 20-50% perfromance improvement [depending on
55 # key length, more for longer keys] on USI&II cores and 30-80% - on
58 $fname="bn_mul_mont_fpu";
60 for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
67 $frame=128; # 96 rounded up to largest known cache-line
71 # In order to provide for 32-/64-bit ABI duality, I keep integers wider
72 # than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
73 # exclusively for pointers, indexes and other small values...
75 $rp="%i0"; # BN_ULONG *rp,
76 $ap="%i1"; # const BN_ULONG *ap,
77 $bp="%i2"; # const BN_ULONG *bp,
78 $np="%i3"; # const BN_ULONG *np,
79 $n0="%i4"; # const BN_ULONG *n0,
80 $num="%i5"; # int num);
83 $ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
84 $ap_h="%l2"; # to these four vectors as double-precision FP values.
85 $np_l="%l3"; # This way a bunch of fxtods are eliminated in second
86 $np_h="%l4"; # loop and L1-cache aliasing is minimized...
89 $mask="%l7"; # 16-bit mask, 0xffff
91 $n0="%g4"; # reassigned(!) to "64-bit" register
92 $carry="%i4"; # %i4 reused(!) for a carry bit
94 # FP register naming chart
109 $ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
110 $na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
111 $alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
112 $nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
114 $dota="%f24"; $dotb="%f26";
116 $aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
117 $ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
118 $nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
119 $nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
121 $ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
124 .ident "UltraSPARC Montgomery multiply by <appro\@fy.chalmers.se>"
125 .section ".text",#alloc,#execinstr
130 save %sp,-$frame-$locals,%sp
131 sethi %hi(0xffff),$mask
132 or $mask,%lo(0xffff),$mask
137 andcc $num,1,%g0 ! $num has to be even...
139 clr %i0 ! signal "unsupported input value"
144 andcc %l0,7,%g0 ! ...and pointers has to be 8-byte aligned
146 clr %i0 ! signal "unsupported input value"
147 ld [%i4+0],$n0 ! $n0 reassigned, remember?
150 or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
152 sll $num,3,$num ! num*=8
154 add %sp,$bias,%o0 ! real top of stack
156 add %o1,$num,%o1 ! %o1=num*5
158 and %o0,-2048,%o0 ! optimize TLB utilization
159 sub %o0,$bias,%sp ! alloca(5*num*8)
161 rd %asi,%o7 ! save %asi
162 add %sp,$bias+$frame+$locals,$tp
164 add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
169 wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
171 add $rp,$num,$rp ! readjust input pointers to point
172 add $ap,$num,$ap ! at the ends too...
176 stx %o7,[%sp+$bias+$frame+48] ! save %asi
178 sub %g0,$num,$i ! i=-num
179 sub %g0,$num,$j ! j=-num
184 ld [%o3+4],%g1 ! bp[0]
186 ld [%o4+4],%g5 ! ap[0]
195 mulx %o1,%o0,%o0 ! ap[0]*bp[0]
196 mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
197 stx %o0,[%sp+$bias+$frame+0]
199 ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
203 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
208 ! transfer b[i] to FPU as 4x16-bit values
218 ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
219 ldda [%sp+$bias+$frame+6]%asi,$na
221 ldda [%sp+$bias+$frame+4]%asi,$nb
223 ldda [%sp+$bias+$frame+2]%asi,$nc
225 ldda [%sp+$bias+$frame+0]%asi,$nd
228 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
232 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
242 faddd $aloa,$nloa,$nloa
245 faddd $alob,$nlob,$nlob
248 faddd $aloc,$nloc,$nloc
251 faddd $alod,$nlod,$nlod
254 faddd $ahia,$nhia,$nhia
257 faddd $ahib,$nhib,$nhib
260 faddd $ahic,$nhic,$dota ! $nhic
261 faddd $ahid,$nhid,$dotb ! $nhid
263 faddd $nloc,$nhia,$nloc
264 faddd $nlod,$nhib,$nlod
271 std $nloa,[%sp+$bias+$frame+0]
273 std $nlob,[%sp+$bias+$frame+8]
275 std $nloc,[%sp+$bias+$frame+16]
277 std $nlod,[%sp+$bias+$frame+24]
279 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
283 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
293 ldx [%sp+$bias+$frame+0],%o0
295 ldx [%sp+$bias+$frame+8],%o1
297 ldx [%sp+$bias+$frame+16],%o2
299 ldx [%sp+$bias+$frame+24],%o3
303 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
307 faddd $aloa,$nloa,$nloa
310 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
314 faddd $alob,$nlob,$nlob
318 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
319 faddd $aloc,$nloc,$nloc
329 !or %o7,%o0,%o0 ! 64-bit result
330 srlx %o3,16,%g1 ! 34-bit carry
333 faddd $alod,$nlod,$nlod
336 faddd $ahia,$nhia,$nhia
339 faddd $ahib,$nhib,$nhib
342 faddd $dota,$nloa,$nloa
343 faddd $dotb,$nlob,$nlob
344 faddd $ahic,$nhic,$dota ! $nhic
345 faddd $ahid,$nhid,$dotb ! $nhid
347 faddd $nloc,$nhia,$nloc
348 faddd $nlod,$nhib,$nlod
355 std $nloa,[%sp+$bias+$frame+0]
356 std $nlob,[%sp+$bias+$frame+8]
358 std $nloc,[%sp+$bias+$frame+16]
360 std $nlod,[%sp+$bias+$frame+24]
366 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
370 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
380 ldx [%sp+$bias+$frame+0],%o0
382 ldx [%sp+$bias+$frame+8],%o1
384 ldx [%sp+$bias+$frame+16],%o2
386 ldx [%sp+$bias+$frame+24],%o3
390 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
394 faddd $aloa,$nloa,$nloa
397 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
401 faddd $alob,$nlob,$nlob
405 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
407 faddd $aloc,$nloc,$nloc
413 faddd $alod,$nlod,$nlod
419 faddd $ahia,$nhia,$nhia
423 or %o7,%o0,%o0 ! 64-bit result
424 faddd $ahib,$nhib,$nhib
427 faddd $dota,$nloa,$nloa
428 srlx %o3,16,%g1 ! 34-bit carry
429 faddd $dotb,$nlob,$nlob
433 stx %o0,[$tp] ! tp[j-1]=
435 faddd $ahic,$nhic,$dota ! $nhic
436 faddd $ahid,$nhid,$dotb ! $nhid
438 faddd $nloc,$nhia,$nloc
439 faddd $nlod,$nhib,$nlod
446 std $nloa,[%sp+$bias+$frame+0]
447 std $nlob,[%sp+$bias+$frame+8]
448 std $nloc,[%sp+$bias+$frame+16]
449 std $nlod,[%sp+$bias+$frame+24]
459 ldx [%sp+$bias+$frame+0],%o0
460 ldx [%sp+$bias+$frame+8],%o1
461 ldx [%sp+$bias+$frame+16],%o2
462 ldx [%sp+$bias+$frame+24],%o3
465 std $dota,[%sp+$bias+$frame+32]
467 std $dotb,[%sp+$bias+$frame+40]
471 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
480 or %o7,%o0,%o0 ! 64-bit result
481 ldx [%sp+$bias+$frame+32],%o4
483 ldx [%sp+$bias+$frame+40],%o5
484 srlx %o3,16,%g1 ! 34-bit carry
488 stx %o0,[$tp] ! tp[j-1]=
502 stx %o4,[$tp] ! tp[num-1]=
508 sub %g0,$num,$j ! j=-num
509 add %sp,$bias+$frame+$locals,$tp
514 ld [%o3+4],%g1 ! bp[i]
516 ld [%o4+4],%g5 ! ap[0]
523 ldx [$tp],%o2 ! tp[0]
526 mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
527 stx %o0,[%sp+$bias+$frame+0]
529 ! transfer b[i] to FPU as 4x16-bit values
535 ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
536 ldda [%sp+$bias+$frame+6]%asi,$na
538 ldda [%sp+$bias+$frame+4]%asi,$nb
540 ldda [%sp+$bias+$frame+2]%asi,$nc
542 ldda [%sp+$bias+$frame+0]%asi,$nd
544 ldd [$ap_l+$j],$alo ! load a[j] in double format
548 ldd [$np_l+$j],$nlo ! load n[j] in double format
558 faddd $aloa,$nloa,$nloa
561 faddd $alob,$nlob,$nlob
564 faddd $aloc,$nloc,$nloc
567 faddd $alod,$nlod,$nlod
570 faddd $ahia,$nhia,$nhia
573 faddd $ahib,$nhib,$nhib
576 faddd $ahic,$nhic,$dota ! $nhic
577 faddd $ahid,$nhid,$dotb ! $nhid
579 faddd $nloc,$nhia,$nloc
580 faddd $nlod,$nhib,$nlod
587 std $nloa,[%sp+$bias+$frame+0]
588 std $nlob,[%sp+$bias+$frame+8]
589 std $nloc,[%sp+$bias+$frame+16]
591 std $nlod,[%sp+$bias+$frame+24]
593 ldd [$ap_l+$j],$alo ! load a[j] in double format
595 ldd [$np_l+$j],$nlo ! load n[j] in double format
603 ldx [%sp+$bias+$frame+0],%o0
604 faddd $aloa,$nloa,$nloa
606 ldx [%sp+$bias+$frame+8],%o1
608 ldx [%sp+$bias+$frame+16],%o2
609 faddd $alob,$nlob,$nlob
611 ldx [%sp+$bias+$frame+24],%o3
615 faddd $aloc,$nloc,$nloc
620 faddd $alod,$nlod,$nlod
625 faddd $ahia,$nhia,$nhia
627 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
633 faddd $ahib,$nhib,$nhib
636 faddd $dota,$nloa,$nloa
638 faddd $dotb,$nlob,$nlob
641 faddd $ahic,$nhic,$dota ! $nhic
643 faddd $ahid,$nhid,$dotb ! $nhid
644 or %o7,%o0,%o0 ! 64-bit result
646 faddd $nloc,$nhia,$nloc
649 faddd $nlod,$nhib,$nlod
650 srlx %o3,16,%g1 ! 34-bit carry
659 std $nloa,[%sp+$bias+$frame+0]
660 std $nlob,[%sp+$bias+$frame+8]
662 std $nloc,[%sp+$bias+$frame+16]
663 bz,pn %icc,.Linnerskip
664 std $nlod,[%sp+$bias+$frame+24]
670 ldd [$ap_l+$j],$alo ! load a[j] in double format
672 ldd [$np_l+$j],$nlo ! load n[j] in double format
680 ldx [%sp+$bias+$frame+0],%o0
681 faddd $aloa,$nloa,$nloa
683 ldx [%sp+$bias+$frame+8],%o1
685 ldx [%sp+$bias+$frame+16],%o2
686 faddd $alob,$nlob,$nlob
688 ldx [%sp+$bias+$frame+24],%o3
692 faddd $aloc,$nloc,$nloc
697 faddd $alod,$nlod,$nlod
702 faddd $ahia,$nhia,$nhia
704 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
709 faddd $ahib,$nhib,$nhib
712 faddd $dota,$nloa,$nloa
714 faddd $dotb,$nlob,$nlob
717 faddd $ahic,$nhic,$dota ! $nhic
719 faddd $ahid,$nhid,$dotb ! $nhid
720 or %o7,%o0,%o0 ! 64-bit result
721 faddd $nloc,$nhia,$nloc
723 ldx [$tp+8],%o7 ! tp[j]
724 faddd $nlod,$nhib,$nlod
725 srlx %o3,16,%g1 ! 34-bit carry
735 stx %o0,[$tp] ! tp[j-1]
738 std $nloa,[%sp+$bias+$frame+0]
739 std $nlob,[%sp+$bias+$frame+8]
740 std $nloc,[%sp+$bias+$frame+16]
742 std $nlod,[%sp+$bias+$frame+24]
750 ldx [%sp+$bias+$frame+0],%o0
751 ldx [%sp+$bias+$frame+8],%o1
752 ldx [%sp+$bias+$frame+16],%o2
753 ldx [%sp+$bias+$frame+24],%o3
756 std $dota,[%sp+$bias+$frame+32]
758 std $dotb,[%sp+$bias+$frame+40]
762 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
771 ldx [%sp+$bias+$frame+32],%o4
772 or %o7,%o0,%o0 ! 64-bit result
773 ldx [%sp+$bias+$frame+40],%o5
775 ldx [$tp+8],%o7 ! tp[j]
776 srlx %o3,16,%g1 ! 34-bit carry
784 stx %o0,[$tp] ! tp[j-1]
798 stx %o4,[$tp] ! tp[num-1]
807 sub %g0,$num,%o7 ! n=-num
808 cmp $carry,0 ! clears %icc.c
810 add $tp,8,$tp ! adjust tp to point at the end
814 cmp %o0,%o1 ! compare topmost words
815 bcs,pt %icc,.Lcopy ! %icc.c is clean if not taken
832 subccc $carry,0,$carry
834 sub %g0,$num,%o7 ! n=-num
846 sub %g0,$num,%o7 ! n=-num
859 ldx [%sp+$bias+$frame+48],%o7
860 wr %g0,%o7,%asi ! restore %asi
866 .type $fname,#function
867 .size $fname,(.-$fname)
868 .asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
871 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
873 # Below substitution makes it possible to compile without demanding
874 # VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
875 # dare to do this, because VIS capability is detected at run-time now
876 # and this routine is not called on CPU not capable to execute it. Do
877 # note that fzeros is not the only VIS dependency! Another dependency
878 # is implicit and is just _a_ numerical value loaded to %asi register,
879 # which assembler can't recognize as VIS specific...
880 $code =~ s/fzeros\s+%f([0-9]+)/
881 sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)