3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
12 # "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
13 # Because unlike integer multiplier, which simply stalls whole CPU,
14 # FPU is fully pipelined and can effectively emit 48 bit partial
15 # product every cycle. Why not blended SPARC v9? One can argue that
16 # making this module dependent on UltraSPARC VIS extension limits its
17 # binary compatibility. Well yes, it does exclude SPARC64 prior-V(!)
18 # implementations from compatibility matrix. But the rest, whole Sun
19 # UltraSPARC family and brand new Fujitsu's SPARC64 V, all support
20 # VIS extension instructions used in this module. This is considered
21 # good enough to not care about HAL SPARC64 users [if any] who have
22 # integer-only pure SPARCv9 module to "fall down" to.
24 # USI&II cores currently exhibit uniform 2x improvement [over pre-
25 # bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
26 # performance improves few percents for shorter keys and worsens few
27 # percents for longer keys. This is because USIII integer multiplier
28 # is >3x faster than USI&II one, which is harder to match [but see
29 # TODO list below]. It should also be noted that SPARC64 V features
30 # out-of-order execution, which *might* mean that integer multiplier
31 # is pipelined, which in turn *might* be impossible to match... On
32 # additional note, SPARC64 V implements FP Multiply-Add instruction,
33 # which is perfectly usable in this context... In other words, as far
34 # as Fujitsu SPARC64 V goes, talk to the author:-)
36 # The implementation implies following "non-natural" limitations on
38 # - num may not be less than 4;
39 # - num has to be even;
40 # Failure to meet either condition has no fatal effects, simply
41 # doesn't give any performance gain.
44 # - modulo-schedule inner loop for better performance (on in-order
45 # execution core such as UltraSPARC this shall result in further
46 # noticeable(!) improvement);
47 # - dedicated squaring procedure[?];
49 ######################################################################
52 # Modulo-scheduled inner loops allow to interleave floating point and
53 # integer instructions and minimize Read-After-Write penalties. This
54 # results in *further* 20-50% perfromance improvement [depending on
55 # key length, more for longer keys] on USI&II cores and 30-80% - on
59 open STDOUT,">$output";
61 $fname="bn_mul_mont_fpu";
67 # In order to provide for 32-/64-bit ABI duality, I keep integers wider
68 # than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
69 # exclusively for pointers, indexes and other small values...
71 $rp="%i0"; # BN_ULONG *rp,
72 $ap="%i1"; # const BN_ULONG *ap,
73 $bp="%i2"; # const BN_ULONG *bp,
74 $np="%i3"; # const BN_ULONG *np,
75 $n0="%i4"; # const BN_ULONG *n0,
76 $num="%i5"; # int num);
79 $ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
80 $ap_h="%l2"; # to these four vectors as double-precision FP values.
81 $np_l="%l3"; # This way a bunch of fxtods are eliminated in second
82 $np_h="%l4"; # loop and L1-cache aliasing is minimized...
85 $mask="%l7"; # 16-bit mask, 0xffff
87 $n0="%g4"; # reassigned(!) to "64-bit" register
88 $carry="%i4"; # %i4 reused(!) for a carry bit
90 # FP register naming chart
105 $ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
106 $na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
107 $alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
108 $nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
110 $dota="%f24"; $dotb="%f26";
112 $aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
113 $ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
114 $nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
115 $nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
117 $ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
120 #include "sparc_arch.h"
122 .section ".text",#alloc,#execinstr
127 save %sp,-$frame-$locals,%sp
132 andcc $num,1,%g0 ! $num has to be even...
134 clr %i0 ! signal "unsupported input value"
137 sethi %hi(0xffff),$mask
138 ld [%i4+0],$n0 ! $n0 reassigned, remember?
139 or $mask,%lo(0xffff),$mask
142 or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
144 sll $num,3,$num ! num*=8
146 add %sp,$bias,%o0 ! real top of stack
148 add %o1,$num,%o1 ! %o1=num*5
150 and %o0,-2048,%o0 ! optimize TLB utilization
151 sub %o0,$bias,%sp ! alloca(5*num*8)
153 rd %asi,%o7 ! save %asi
154 add %sp,$bias+$frame+$locals,$tp
156 add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
161 wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
163 add $rp,$num,$rp ! readjust input pointers to point
164 add $ap,$num,$ap ! at the ends too...
168 stx %o7,[%sp+$bias+$frame+48] ! save %asi
170 sub %g0,$num,$i ! i=-num
171 sub %g0,$num,$j ! j=-num
176 ld [%o3+4],%g1 ! bp[0]
178 ld [%o4+4],%g5 ! ap[0]
187 mulx %o1,%o0,%o0 ! ap[0]*bp[0]
188 mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
189 stx %o0,[%sp+$bias+$frame+0]
191 ld [%o3+0],$alo_ ! load a[j] as pair of 32-bit words
195 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
200 ! transfer b[i] to FPU as 4x16-bit values
210 ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
211 ldda [%sp+$bias+$frame+6]%asi,$na
213 ldda [%sp+$bias+$frame+4]%asi,$nb
215 ldda [%sp+$bias+$frame+2]%asi,$nc
217 ldda [%sp+$bias+$frame+0]%asi,$nd
220 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
224 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
234 faddd $aloa,$nloa,$nloa
237 faddd $alob,$nlob,$nlob
240 faddd $aloc,$nloc,$nloc
243 faddd $alod,$nlod,$nlod
246 faddd $ahia,$nhia,$nhia
249 faddd $ahib,$nhib,$nhib
252 faddd $ahic,$nhic,$dota ! $nhic
253 faddd $ahid,$nhid,$dotb ! $nhid
255 faddd $nloc,$nhia,$nloc
256 faddd $nlod,$nhib,$nlod
263 std $nloa,[%sp+$bias+$frame+0]
265 std $nlob,[%sp+$bias+$frame+8]
267 std $nloc,[%sp+$bias+$frame+16]
269 std $nlod,[%sp+$bias+$frame+24]
271 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
275 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
285 ldx [%sp+$bias+$frame+0],%o0
287 ldx [%sp+$bias+$frame+8],%o1
289 ldx [%sp+$bias+$frame+16],%o2
291 ldx [%sp+$bias+$frame+24],%o3
295 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
299 faddd $aloa,$nloa,$nloa
302 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
306 faddd $alob,$nlob,$nlob
310 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
311 faddd $aloc,$nloc,$nloc
321 !or %o7,%o0,%o0 ! 64-bit result
322 srlx %o3,16,%g1 ! 34-bit carry
325 faddd $alod,$nlod,$nlod
328 faddd $ahia,$nhia,$nhia
331 faddd $ahib,$nhib,$nhib
334 faddd $dota,$nloa,$nloa
335 faddd $dotb,$nlob,$nlob
336 faddd $ahic,$nhic,$dota ! $nhic
337 faddd $ahid,$nhid,$dotb ! $nhid
339 faddd $nloc,$nhia,$nloc
340 faddd $nlod,$nhib,$nlod
347 std $nloa,[%sp+$bias+$frame+0]
348 std $nlob,[%sp+$bias+$frame+8]
350 std $nloc,[%sp+$bias+$frame+16]
352 std $nlod,[%sp+$bias+$frame+24]
354 .align 32 ! incidentally already aligned !
358 ld [%o4+0],$alo_ ! load a[j] as pair of 32-bit words
362 ld [%o5+0],$nlo_ ! load n[j] as pair of 32-bit words
372 ldx [%sp+$bias+$frame+0],%o0
374 ldx [%sp+$bias+$frame+8],%o1
376 ldx [%sp+$bias+$frame+16],%o2
378 ldx [%sp+$bias+$frame+24],%o3
382 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
386 faddd $aloa,$nloa,$nloa
389 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
393 faddd $alob,$nlob,$nlob
397 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
399 faddd $aloc,$nloc,$nloc
405 faddd $alod,$nlod,$nlod
411 faddd $ahia,$nhia,$nhia
415 or %o7,%o0,%o0 ! 64-bit result
416 faddd $ahib,$nhib,$nhib
419 faddd $dota,$nloa,$nloa
420 srlx %o3,16,%g1 ! 34-bit carry
421 faddd $dotb,$nlob,$nlob
425 stx %o0,[$tp] ! tp[j-1]=
427 faddd $ahic,$nhic,$dota ! $nhic
428 faddd $ahid,$nhid,$dotb ! $nhid
430 faddd $nloc,$nhia,$nloc
431 faddd $nlod,$nhib,$nlod
438 std $nloa,[%sp+$bias+$frame+0]
439 std $nlob,[%sp+$bias+$frame+8]
440 std $nloc,[%sp+$bias+$frame+16]
441 std $nlod,[%sp+$bias+$frame+24]
451 ldx [%sp+$bias+$frame+0],%o0
452 ldx [%sp+$bias+$frame+8],%o1
453 ldx [%sp+$bias+$frame+16],%o2
454 ldx [%sp+$bias+$frame+24],%o3
457 std $dota,[%sp+$bias+$frame+32]
459 std $dotb,[%sp+$bias+$frame+40]
463 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
472 or %o7,%o0,%o0 ! 64-bit result
473 ldx [%sp+$bias+$frame+32],%o4
475 ldx [%sp+$bias+$frame+40],%o5
476 srlx %o3,16,%g1 ! 34-bit carry
480 stx %o0,[$tp] ! tp[j-1]=
494 stx %o4,[$tp] ! tp[num-1]=
500 sub %g0,$num,$j ! j=-num
501 add %sp,$bias+$frame+$locals,$tp
506 ld [%o3+4],%g1 ! bp[i]
508 ld [%o4+4],%g5 ! ap[0]
515 ldx [$tp],%o2 ! tp[0]
518 mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
519 stx %o0,[%sp+$bias+$frame+0]
521 ! transfer b[i] to FPU as 4x16-bit values
527 ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
528 ldda [%sp+$bias+$frame+6]%asi,$na
530 ldda [%sp+$bias+$frame+4]%asi,$nb
532 ldda [%sp+$bias+$frame+2]%asi,$nc
534 ldda [%sp+$bias+$frame+0]%asi,$nd
536 ldd [$ap_l+$j],$alo ! load a[j] in double format
540 ldd [$np_l+$j],$nlo ! load n[j] in double format
550 faddd $aloa,$nloa,$nloa
553 faddd $alob,$nlob,$nlob
556 faddd $aloc,$nloc,$nloc
559 faddd $alod,$nlod,$nlod
562 faddd $ahia,$nhia,$nhia
565 faddd $ahib,$nhib,$nhib
568 faddd $ahic,$nhic,$dota ! $nhic
569 faddd $ahid,$nhid,$dotb ! $nhid
571 faddd $nloc,$nhia,$nloc
572 faddd $nlod,$nhib,$nlod
579 std $nloa,[%sp+$bias+$frame+0]
580 std $nlob,[%sp+$bias+$frame+8]
581 std $nloc,[%sp+$bias+$frame+16]
583 std $nlod,[%sp+$bias+$frame+24]
585 ldd [$ap_l+$j],$alo ! load a[j] in double format
587 ldd [$np_l+$j],$nlo ! load n[j] in double format
595 ldx [%sp+$bias+$frame+0],%o0
596 faddd $aloa,$nloa,$nloa
598 ldx [%sp+$bias+$frame+8],%o1
600 ldx [%sp+$bias+$frame+16],%o2
601 faddd $alob,$nlob,$nlob
603 ldx [%sp+$bias+$frame+24],%o3
607 faddd $aloc,$nloc,$nloc
612 faddd $alod,$nlod,$nlod
617 faddd $ahia,$nhia,$nhia
619 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
625 faddd $ahib,$nhib,$nhib
628 faddd $dota,$nloa,$nloa
630 faddd $dotb,$nlob,$nlob
633 faddd $ahic,$nhic,$dota ! $nhic
635 faddd $ahid,$nhid,$dotb ! $nhid
636 or %o7,%o0,%o0 ! 64-bit result
638 faddd $nloc,$nhia,$nloc
641 faddd $nlod,$nhib,$nlod
642 srlx %o3,16,%g1 ! 34-bit carry
651 std $nloa,[%sp+$bias+$frame+0]
652 std $nlob,[%sp+$bias+$frame+8]
654 std $nloc,[%sp+$bias+$frame+16]
655 bz,pn %icc,.Linnerskip
656 std $nlod,[%sp+$bias+$frame+24]
662 ldd [$ap_l+$j],$alo ! load a[j] in double format
664 ldd [$np_l+$j],$nlo ! load n[j] in double format
672 ldx [%sp+$bias+$frame+0],%o0
673 faddd $aloa,$nloa,$nloa
675 ldx [%sp+$bias+$frame+8],%o1
677 ldx [%sp+$bias+$frame+16],%o2
678 faddd $alob,$nlob,$nlob
680 ldx [%sp+$bias+$frame+24],%o3
684 faddd $aloc,$nloc,$nloc
689 faddd $alod,$nlod,$nlod
694 faddd $ahia,$nhia,$nhia
696 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
701 faddd $ahib,$nhib,$nhib
704 faddd $dota,$nloa,$nloa
706 faddd $dotb,$nlob,$nlob
709 faddd $ahic,$nhic,$dota ! $nhic
711 faddd $ahid,$nhid,$dotb ! $nhid
712 or %o7,%o0,%o0 ! 64-bit result
713 faddd $nloc,$nhia,$nloc
715 ldx [$tp+8],%o7 ! tp[j]
716 faddd $nlod,$nhib,$nlod
717 srlx %o3,16,%g1 ! 34-bit carry
727 stx %o0,[$tp] ! tp[j-1]
730 std $nloa,[%sp+$bias+$frame+0]
731 std $nlob,[%sp+$bias+$frame+8]
732 std $nloc,[%sp+$bias+$frame+16]
734 std $nlod,[%sp+$bias+$frame+24]
742 ldx [%sp+$bias+$frame+0],%o0
743 ldx [%sp+$bias+$frame+8],%o1
744 ldx [%sp+$bias+$frame+16],%o2
745 ldx [%sp+$bias+$frame+24],%o3
748 std $dota,[%sp+$bias+$frame+32]
750 std $dotb,[%sp+$bias+$frame+40]
754 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
763 ldx [%sp+$bias+$frame+32],%o4
764 or %o7,%o0,%o0 ! 64-bit result
765 ldx [%sp+$bias+$frame+40],%o5
767 ldx [$tp+8],%o7 ! tp[j]
768 srlx %o3,16,%g1 ! 34-bit carry
776 stx %o0,[$tp] ! tp[j-1]
790 stx %o4,[$tp] ! tp[num-1]
799 add $tp,8,$tp ! adjust tp to point at the end
801 sub %g0,$num,%o7 ! n=-num
803 subcc %g0,%g0,%g0 ! clear %icc.c
820 sub %g0,$num,%o7 ! n=-num
841 sub %g0,$num,%o7 ! n=-num
852 ldx [%sp+$bias+$frame+48],%o7
853 wr %g0,%o7,%asi ! restore %asi
859 .type $fname,#function
860 .size $fname,(.-$fname)
861 .asciz "Montgomery Multipltication for UltraSPARC, CRYPTOGAMS by <appro\@openssl.org>"
865 $code =~ s/\`([^\`]*)\`/eval($1)/gem;
867 # Below substitution makes it possible to compile without demanding
868 # VIS extensions on command line, e.g. -xarch=v9 vs. -xarch=v9a. I
869 # dare to do this, because VIS capability is detected at run-time now
870 # and this routine is not called on CPU not capable to execute it. Do
871 # note that fzeros is not the only VIS dependency! Another dependency
872 # is implicit and is just _a_ numerical value loaded to %asi register,
873 # which assembler can't recognize as VIS specific...
874 $code =~ s/fzeros\s+%f([0-9]+)/
875 sprintf(".word\t0x%x\t! fzeros %%f%d",0x81b00c20|($1<<25),$1)