3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
13 # Because unlike integer multiplier, which simply stalls whole CPU,
14 # FPU is fully pipelined and can effectively emit 48 bit partial
15 # product every cycle. Why not blended SPARC v9? One can argue that
16 # making this module dependent on UltraSPARC VIS extension limits its
17 # binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
18 # implementations from compatibility matrix. But the rest, whole Sun
19 # UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
20 # VIS extension instructions used in this module. This is considered
21 # good enough to recommend HAL SPARC64 users [if any] to simply fall
22 # down to no-asm configuration.
24 # USI&II cores currently exhibit uniform 2x improvement [over pre-
25 # bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
26 # performance improves few percents for shorter keys and worsens few
27 # percents for longer keys. This is because USIII integer multiplier
28 # is >3x faster than USI&II one, which is harder to match [but see
29 # TODO list below]. It should also be noted that SPARC64 V features
30 # out-of-order execution, which *might* mean that integer multiplier
31 # is pipelined, which in turn *might* be impossible to match... On
32 # additional note, SPARC64 V implements FP Multiply-Add instruction,
33 # which is perfectly usable in this context... In other words, as far
34 # as HAL/Fujitsu SPARC64 family goes, talk to the author:-)
36 # The implementation implies following "non-natural" limitations on
38 # - num may not be less than 4;
39 # - num has to be even;
40 # - ap, bp, rp, np has to be 64-bit aligned [which is not a problem
41 # as long as BIGNUM.d are malloc-ated];
42 # Failure to meet either condition has no fatal effects, simply
43 # doesn't give any performance gain.
46 # - modulo-schedule inner loop for better performance (on in-order
47 # execution core such as UltraSPARC this shall result in further
48 # noticeable(!) improvement);
49 # - dedicated squaring procedure[?];
51 ######################################################################
54 # Modulo-scheduled inner loops allow to interleave floating point and
55 # integer instructions and minimize Read-After-Write penalties. This
56 # results in *further* 20-50% perfromance improvement [depending on
57 # key length, more for longer keys] on USI&II cores and 30-80% - on
60 $fname="bn_mul_mont_fpu";
62 for (@ARGV) { $bits=64 if (/\-m64/ || /\-xarch\=v9/); }
69 $frame=128; # 96 rounded up to largest known cache-line
73 # In order to provide for 32-/64-bit ABI duality, I keep integers wider
74 # than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
75 # exclusively for pointers, indexes and other small values...
77 $rp="%i0"; # BN_ULONG *rp,
78 $ap="%i1"; # const BN_ULONG *ap,
79 $bp="%i2"; # const BN_ULONG *bp,
80 $np="%i3"; # const BN_ULONG *np,
81 $n0="%i4"; # const BN_ULONG *n0,
82 $num="%i5"; # int num);
85 $ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
86 $ap_h="%l2"; # to these four vectors as double-precision FP values.
87 $np_l="%l3"; # This way a bunch of fxtods are eliminated in second
88 $np_h="%l4"; # loop and L1-cache aliasing is minimized...
91 $mask="%l7"; # 16-bit mask, 0xffff
93 $n0="%g4"; # reassigned(!) to "64-bit" register
94 $carry="%i4"; # %i4 reused(!) for a carry bit
96 # FP register naming chart
111 $ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
112 $na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
113 $alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
114 $nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
116 $dota="%f24"; $dotb="%f26";
118 $aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
119 $ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
120 $nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
121 $nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
123 $ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
126 .ident "UltraSPARC Montgomery multiply by <appro\@fy.chalmers.se>"
127 .section ".text",#alloc,#execinstr
132 save %sp,-$frame-$locals,%sp
133 sethi %hi(0xffff),$mask
134 or $mask,%lo(0xffff),$mask
139 andcc $num,1,%g0 ! $num has to be even...
141 clr %i0 ! signal "unsupported input value"
146 andcc %l0,7,%g0 ! ...and pointers has to be 8-byte aligned
148 clr %i0 ! signal "unsupported input value"
149 ld [%i4+0],$n0 ! $n0 reassigned, remember?
152 or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
154 sll $num,3,$num ! num*=8
156 add %sp,$bias,%o0 ! real top of stack
158 add %o1,$num,%o1 ! %o1=num*5
160 and %o0,-2048,%o0 ! optimize TLB utilization
161 sub %o0,$bias,%sp ! alloca(5*num*8)
163 rd %asi,%o7 ! save %asi
164 add %sp,$bias+$frame+$locals,$tp
166 add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
171 wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
173 add $rp,$num,$rp ! readjust input pointers to point
174 add $ap,$num,$ap ! at the ends too...
178 stx %o7,[%sp+$bias+$frame+48] ! save %asi
180 sub %g0,$num,$i ! i=-num
181 sub %g0,$num,$j ! j=-num
186 ldx [$bp+$i],%o0 ! bp[0]
187 ldx [$ap+$j],%o1 ! ap[0]
197 mulx %o1,%o0,%o0 ! ap[0]*bp[0]
198 mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
199 stx %o0,[%sp+$bias+$frame+0]
201 ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
205 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
210 ! transfer b[i] to FPU as 4x16-bit values
220 ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
221 ldda [%sp+$bias+$frame+6]%asi,$na
223 ldda [%sp+$bias+$frame+4]%asi,$nb
225 ldda [%sp+$bias+$frame+2]%asi,$nc
227 ldda [%sp+$bias+$frame+0]%asi,$nd
230 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
234 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
244 faddd $aloa,$nloa,$nloa
247 faddd $alob,$nlob,$nlob
250 faddd $aloc,$nloc,$nloc
253 faddd $alod,$nlod,$nlod
256 faddd $ahia,$nhia,$nhia
259 faddd $ahib,$nhib,$nhib
262 faddd $ahic,$nhic,$dota ! $nhic
263 faddd $ahid,$nhid,$dotb ! $nhid
265 faddd $nloc,$nhia,$nloc
266 faddd $nlod,$nhib,$nlod
273 std $nloa,[%sp+$bias+$frame+0]
275 std $nlob,[%sp+$bias+$frame+8]
277 std $nloc,[%sp+$bias+$frame+16]
279 std $nlod,[%sp+$bias+$frame+24]
281 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
285 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
295 ldx [%sp+$bias+$frame+0],%o0
297 ldx [%sp+$bias+$frame+8],%o1
299 ldx [%sp+$bias+$frame+16],%o2
301 ldx [%sp+$bias+$frame+24],%o3
305 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
309 faddd $aloa,$nloa,$nloa
312 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
316 faddd $alob,$nlob,$nlob
320 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
321 faddd $aloc,$nloc,$nloc
331 !or %o7,%o0,%o0 ! 64-bit result
332 srlx %o3,16,%g1 ! 34-bit carry
335 faddd $alod,$nlod,$nlod
338 faddd $ahia,$nhia,$nhia
341 faddd $ahib,$nhib,$nhib
344 faddd $dota,$nloa,$nloa
345 faddd $dotb,$nlob,$nlob
346 faddd $ahic,$nhic,$dota ! $nhic
347 faddd $ahid,$nhid,$dotb ! $nhid
349 faddd $nloc,$nhia,$nloc
350 faddd $nlod,$nhib,$nlod
357 std $nloa,[%sp+$bias+$frame+0]
358 std $nlob,[%sp+$bias+$frame+8]
360 std $nloc,[%sp+$bias+$frame+16]
362 std $nlod,[%sp+$bias+$frame+24]
368 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
372 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
382 ldx [%sp+$bias+$frame+0],%o0
384 ldx [%sp+$bias+$frame+8],%o1
386 ldx [%sp+$bias+$frame+16],%o2
388 ldx [%sp+$bias+$frame+24],%o3
392 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
396 faddd $aloa,$nloa,$nloa
399 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
403 faddd $alob,$nlob,$nlob
407 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
409 faddd $aloc,$nloc,$nloc
415 faddd $alod,$nlod,$nlod
421 faddd $ahia,$nhia,$nhia
425 or %o7,%o0,%o0 ! 64-bit result
426 faddd $ahib,$nhib,$nhib
429 faddd $dota,$nloa,$nloa
430 srlx %o3,16,%g1 ! 34-bit carry
431 faddd $dotb,$nlob,$nlob
435 stx %o0,[$tp] ! tp[j-1]=
437 faddd $ahic,$nhic,$dota ! $nhic
438 faddd $ahid,$nhid,$dotb ! $nhid
440 faddd $nloc,$nhia,$nloc
441 faddd $nlod,$nhib,$nlod
448 std $nloa,[%sp+$bias+$frame+0]
449 std $nlob,[%sp+$bias+$frame+8]
450 std $nloc,[%sp+$bias+$frame+16]
451 std $nlod,[%sp+$bias+$frame+24]
461 ldx [%sp+$bias+$frame+0],%o0
462 ldx [%sp+$bias+$frame+8],%o1
463 ldx [%sp+$bias+$frame+16],%o2
464 ldx [%sp+$bias+$frame+24],%o3
467 std $dota,[%sp+$bias+$frame+32]
469 std $dotb,[%sp+$bias+$frame+40]
473 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
482 or %o7,%o0,%o0 ! 64-bit result
483 ldx [%sp+$bias+$frame+32],%o4
485 ldx [%sp+$bias+$frame+40],%o5
486 srlx %o3,16,%g1 ! 34-bit carry
490 stx %o0,[$tp] ! tp[j-1]=
504 stx %o4,[$tp] ! tp[num-1]=
510 sub %g0,$num,$j ! j=-num
511 add %sp,$bias+$frame+$locals,$tp
515 ldx [$bp+$i],%o0 ! bp[i]
516 ldx [$ap+$j],%o1 ! ap[0]
524 ldx [$tp],%o2 ! tp[0]
527 mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
528 stx %o0,[%sp+$bias+$frame+0]
530 ! transfer b[i] to FPU as 4x16-bit values
536 ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
537 ldda [%sp+$bias+$frame+6]%asi,$na
539 ldda [%sp+$bias+$frame+4]%asi,$nb
541 ldda [%sp+$bias+$frame+2]%asi,$nc
543 ldda [%sp+$bias+$frame+0]%asi,$nd
545 ldd [$ap_l+$j],$alo ! load a[j] in double format
549 ldd [$np_l+$j],$nlo ! load n[j] in double format
559 faddd $aloa,$nloa,$nloa
562 faddd $alob,$nlob,$nlob
565 faddd $aloc,$nloc,$nloc
568 faddd $alod,$nlod,$nlod
571 faddd $ahia,$nhia,$nhia
574 faddd $ahib,$nhib,$nhib
577 faddd $ahic,$nhic,$dota ! $nhic
578 faddd $ahid,$nhid,$dotb ! $nhid
580 faddd $nloc,$nhia,$nloc
581 faddd $nlod,$nhib,$nlod
588 std $nloa,[%sp+$bias+$frame+0]
589 std $nlob,[%sp+$bias+$frame+8]
590 std $nloc,[%sp+$bias+$frame+16]
592 std $nlod,[%sp+$bias+$frame+24]
594 ldd [$ap_l+$j],$alo ! load a[j] in double format
596 ldd [$np_l+$j],$nlo ! load n[j] in double format
604 ldx [%sp+$bias+$frame+0],%o0
605 faddd $aloa,$nloa,$nloa
607 ldx [%sp+$bias+$frame+8],%o1
609 ldx [%sp+$bias+$frame+16],%o2
610 faddd $alob,$nlob,$nlob
612 ldx [%sp+$bias+$frame+24],%o3
616 faddd $aloc,$nloc,$nloc
621 faddd $alod,$nlod,$nlod
626 faddd $ahia,$nhia,$nhia
628 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
634 faddd $ahib,$nhib,$nhib
637 faddd $dota,$nloa,$nloa
639 faddd $dotb,$nlob,$nlob
642 faddd $ahic,$nhic,$dota ! $nhic
644 faddd $ahid,$nhid,$dotb ! $nhid
645 or %o7,%o0,%o0 ! 64-bit result
647 faddd $nloc,$nhia,$nloc
650 faddd $nlod,$nhib,$nlod
651 srlx %o3,16,%g1 ! 34-bit carry
660 std $nloa,[%sp+$bias+$frame+0]
661 std $nlob,[%sp+$bias+$frame+8]
663 std $nloc,[%sp+$bias+$frame+16]
664 bz,pn %icc,.Linnerskip
665 std $nlod,[%sp+$bias+$frame+24]
671 ldd [$ap_l+$j],$alo ! load a[j] in double format
673 ldd [$np_l+$j],$nlo ! load n[j] in double format
681 ldx [%sp+$bias+$frame+0],%o0
682 faddd $aloa,$nloa,$nloa
684 ldx [%sp+$bias+$frame+8],%o1
686 ldx [%sp+$bias+$frame+16],%o2
687 faddd $alob,$nlob,$nlob
689 ldx [%sp+$bias+$frame+24],%o3
693 faddd $aloc,$nloc,$nloc
698 faddd $alod,$nlod,$nlod
703 faddd $ahia,$nhia,$nhia
705 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
710 faddd $ahib,$nhib,$nhib
713 faddd $dota,$nloa,$nloa
715 faddd $dotb,$nlob,$nlob
718 faddd $ahic,$nhic,$dota ! $nhic
720 faddd $ahid,$nhid,$dotb ! $nhid
721 or %o7,%o0,%o0 ! 64-bit result
722 faddd $nloc,$nhia,$nloc
724 ldx [$tp+8],%o7 ! tp[j]
725 faddd $nlod,$nhib,$nlod
726 srlx %o3,16,%g1 ! 34-bit carry
736 stx %o0,[$tp] ! tp[j-1]
739 std $nloa,[%sp+$bias+$frame+0]
740 std $nlob,[%sp+$bias+$frame+8]
741 std $nloc,[%sp+$bias+$frame+16]
743 std $nlod,[%sp+$bias+$frame+24]
751 ldx [%sp+$bias+$frame+0],%o0
752 ldx [%sp+$bias+$frame+8],%o1
753 ldx [%sp+$bias+$frame+16],%o2
754 ldx [%sp+$bias+$frame+24],%o3
757 std $dota,[%sp+$bias+$frame+32]
759 std $dotb,[%sp+$bias+$frame+40]
763 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
772 ldx [%sp+$bias+$frame+32],%o4
773 or %o7,%o0,%o0 ! 64-bit result
774 ldx [%sp+$bias+$frame+40],%o5
776 ldx [$tp+8],%o7 ! tp[j]
777 srlx %o3,16,%g1 ! 34-bit carry
785 stx %o0,[$tp] ! tp[j-1]
799 stx %o4,[$tp] ! tp[num-1]
808 sub %g0,$num,%o7 ! n=-num
809 cmp $carry,0 ! clears %icc.c
811 add $tp,8,$tp ! adjust tp to point at the end
815 cmp %o0,%o1 ! compare topmost words
816 bcs,pt %icc,.Lcopy ! %icc.c is clean if not taken
829 subccc $carry,0,$carry
831 sub %g0,$num,%o7 ! n=-num
842 sub %g0,$num,%o7 ! n=-num
855 ldx [%sp+$bias+$frame+48],%o7
856 wr %g0,%o7,%asi ! restore %asi
862 .type $fname,#function
863 .size $fname,(.-$fname)
866 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
868 # Below substitution makes it possible to compile without demanding
869 # VIS extentions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
870 # dare to do this, because VIS capability is detected at run-time now
871 # and this routine is not called on CPU not capable to execute it. Do
872 # note that fzeros is not the only VIS dependency! Another dependency
873 # is implicit and is just _a_ numerical value loaded to %asi register,
874 # which assembler can't recognize as VIS specific...
875 $code =~ s/fzeros\s+%f([0-9]+)/
876 sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)