3 # ====================================================================
4 # Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
5 # project. Rights for redistribution and usage in source and binary
6 # forms are granted according to the OpenSSL license.
7 # ====================================================================
11 # "Teaser" Montgomery multiplication module for UltraSPARC. Why FPU?
12 # Because unlike integer multiplier, which simply stalls whole CPU,
13 # FPU is fully pipelined and can effectively emit 48 bit partial
14 # product every cycle. Why not blended SPARC v9? One can argue that
15 # making this module dependent on UltraSPARC VIS extension limits its
16 # binary compatibility. Very well may be, but the simple fact is that
17 # there is no known SPARC v9 implementation, which does not implement
18 # VIS. Even brand new Fujitsu's SPARC64 V is equipped with VIS unit.
20 # USI&II cores currently exhibit uniform 2x improvement [over pre-
21 # bn_mul_mont codebase] for all key lengths and benchmarks. On USIII
22 # performance improves few percents for shorter keys and worsens few
23 # percents for longer keys. This is because USIII integer multiplier
24 # is >3x faster than USI&II one, which is harder to match [but see
25 # TODO list below]. It should also be noted that SPARC64 V features
26 # out-of-order execution, which *might* mean that integer multiplier
27 # is pipelined, which in turn *might* be impossible to match...
29 # In 32-bit context the implementation implies following additional
30 # limitations on input arguments:
31 # - num may not be less than 4;
32 # - num has to be even;
33 # - ap, bp, rp, np has to be 64-bit aligned [which is not a problem
34 # as long as BIGNUM.d are malloc-ated];
35 # Failure to meet either condition has no fatal effects, simply
36 # doesn't give any performance gain.
39 # - modulo-schedule inner loop for better performance (on in-order
40 # execution core such as UltraSPARC this shall result in further
41 # noticeable(!) improvement);
42 # - dedicated squaring procedure[?];
47 $bits=64 if (/\-m64/ || /\-xarch\=v9/);
48 $vis=1 if (/\-mcpu=ultra/ || /\-xarch\=v[9|8plus]\S/);
53 .section ".text",#alloc,#execinstr
57 xor %o0,%o0,%o0 ! just signal "not implemented"
58 .type $fname,#function
59 .size $fname,(.-$fname)
69 $frame=128; # 96 rounded up to largest known cache-line
73 # In order to provide for 32-/64-bit ABI duality, I keep integers wider
74 # than 32 bit in %g1-%g4 and %o0-%o5. %l0-%l7 and %i0-%i5 are used
75 # exclusively for pointers, indexes and other small values...
77 $rp="%i0"; # BN_ULONG *rp,
78 $ap="%i1"; # const BN_ULONG *ap,
79 $bp="%i2"; # const BN_ULONG *bp,
80 $np="%i3"; # const BN_ULONG *np,
81 $n0="%i4"; # const BN_ULONG *n0,
82 $num="%i5"; # int num);
85 $ap_l="%l1"; # a[num],n[num] are smashed to 32-bit words and saved
86 $ap_h="%l2"; # to these four vectors as double-precision FP values.
87 $np_l="%l3"; # This way a bunch of fxtods are eliminated in second
88 $np_h="%l4"; # loop and L1-cache aliasing is minimized...
91 $mask="%l7"; # 16-bit mask, 0xffff
93 $n0="%g4"; # reassigned(!) to "64-bit" register
94 $carry="%i4"; # %i4 reused(!) for a carry bit
96 # FP register naming chart
111 $ba="%f0"; $bb="%f2"; $bc="%f4"; $bd="%f6";
112 $na="%f8"; $nb="%f10"; $nc="%f12"; $nd="%f14";
113 $alo="%f16"; $alo_="%f17"; $ahi="%f18"; $ahi_="%f19";
114 $nlo="%f20"; $nlo_="%f21"; $nhi="%f22"; $nhi_="%f23";
116 $dota="%f24"; $dotb="%f26";
118 $aloa="%f32"; $alob="%f34"; $aloc="%f36"; $alod="%f38";
119 $ahia="%f40"; $ahib="%f42"; $ahic="%f44"; $ahid="%f46";
120 $nloa="%f48"; $nlob="%f50"; $nloc="%f52"; $nlod="%f54";
121 $nhia="%f56"; $nhib="%f58"; $nhic="%f60"; $nhid="%f62";
123 $ASI_FL16_P=0xD2; # magic ASI value to engage 16-bit FP load
126 .ident "UltraSPARC Montgomery multiply by <appro\@fy.chalmers.se>"
127 .section ".text",#alloc,#execinstr
132 save %sp,-$frame-$locals,%sp
133 sethi %hi(0xffff),$mask
134 or $mask,%lo(0xffff),$mask
136 $code.=<<___ if ($bits==64);
137 ldx [%i4],$n0 ! $n0 reassigned, remember?
139 $code.=<<___ if ($bits==32);
143 andcc $num,1,%g0 ! $num has to be even...
145 clr %i0 ! signal "unsupported input value"
150 andcc %l0,7,%g0 ! ...and pointers has to be 8-byte aligned
152 clr %i0 ! signal "unsupported input value"
153 ld [%i4+0],$n0 ! $n0 reassigned, remember?
156 or %o0,$n0,$n0 ! $n0=n0[1].n0[0]
159 sll $num,3,$num ! num*=8
161 add %sp,$bias,%o0 ! real top of stack
163 add %o1,$num,%o1 ! %o1=num*5
165 and %o0,-2048,%o0 ! optimize TLB utilization
166 sub %o0,$bias,%sp ! alloca(5*num*8)
168 rd %asi,%o7 ! save %asi
169 add %sp,$bias+$frame+$locals,$tp
171 add $ap_l,$num,$ap_l ! [an]p_[lh] point at the vectors' ends !
176 wr %g0,$ASI_FL16_P,%asi ! setup %asi for 16-bit FP loads
178 add $rp,$num,$rp ! readjust input pointers to point
179 add $ap,$num,$ap ! at the ends too...
183 stx %o7,[%sp+$bias+$frame+48] ! save %asi
191 $code.=<<___ if ($bits==64);
192 ldx [$bp+$i],%o0 ! bp[0]
193 ldx [$ap+$j],%o1 ! ap[0]
195 $code.=<<___ if ($bits==32);
196 ldd [$bp+$i],%o0 ! bp[0]
197 ldd [$ap+$j],%g2 ! ap[0]
206 mulx %o1,%o0,%o0 ! ap[0]*bp[0]
207 mulx $n0,%o0,%o0 ! ap[0]*bp[0]*n0
208 stx %o0,[%sp+$bias+$frame+0]
210 ld [%o3+`$bits==32 ? 0 : 4`],$alo_ ! load a[j] as pair of 32-bit words
212 ld [%o3+`$bits==32 ? 4 : 0`],$ahi_
214 ld [%o5+`$bits==32 ? 0 : 4`],$nlo_ ! load n[j] as pair of 32-bit words
216 ld [%o5+`$bits==32 ? 4 : 0`],$nhi_
219 ! transfer b[i] to FPU as 4x16-bit values
220 ldda [%o4+`$bits==32 ? 2 : 6`]%asi,$ba
222 ldda [%o4+`$bits==32 ? 0 : 4`]%asi,$bb
224 ldda [%o4+`$bits==32 ? 6 : 2`]%asi,$bc
226 ldda [%o4+`$bits==32 ? 4 : 0`]%asi,$bd
229 ! transfer ap[0]*b[0]*n0 to FPU as 4x16-bit values
230 ldda [%sp+$bias+$frame+6]%asi,$na
232 ldda [%sp+$bias+$frame+4]%asi,$nb
234 ldda [%sp+$bias+$frame+2]%asi,$nc
236 ldda [%sp+$bias+$frame+0]%asi,$nd
239 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
243 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
254 faddd $aloa,$nloa,$nloa
257 faddd $alob,$nlob,$nlob
260 faddd $aloc,$nloc,$nloc
263 faddd $alod,$nlod,$nlod
266 faddd $ahia,$nhia,$nhia
270 faddd $ahib,$nhib,$nhib
271 faddd $ahic,$nhic,$dota ! $nhic
272 faddd $ahid,$nhid,$dotb ! $nhid
274 faddd $nloc,$nhia,$nloc
275 faddd $nlod,$nhib,$nlod
282 std $nloa,[%sp+$bias+$frame+0]
283 std $nlob,[%sp+$bias+$frame+8]
284 std $nloc,[%sp+$bias+$frame+16]
285 std $nlod,[%sp+$bias+$frame+24]
286 ldx [%sp+$bias+$frame+0],%o0
287 ldx [%sp+$bias+$frame+8],%o1
288 ldx [%sp+$bias+$frame+16],%o2
289 ldx [%sp+$bias+$frame+24],%o3
296 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
305 !or %o7,%o0,%o0 ! 64-bit result
306 srlx %o3,16,%g1 ! 34-bit carry
314 ld [%o3+`$bits==32 ? 0 : 4`],$alo_ ! load a[j] as pair of 32-bit words
316 ld [%o3+`$bits==32 ? 4 : 0`],$ahi_
318 ld [%o4+`$bits==32 ? 0 : 4`],$nlo_ ! load n[j] as pair of 32-bit words
320 ld [%o4+`$bits==32 ? 4 : 0`],$nhi_
328 std $alo,[$ap_l+$j] ! save smashed ap[j] in double format
332 std $nlo,[$np_l+$j] ! save smashed np[j] in double format
338 faddd $aloa,$nloa,$nloa
341 faddd $alob,$nlob,$nlob
344 faddd $aloc,$nloc,$nloc
347 faddd $alod,$nlod,$nlod
350 faddd $ahia,$nhia,$nhia
353 faddd $ahib,$nhib,$nhib
355 faddd $dota,$nloa,$nloa
356 faddd $dotb,$nlob,$nlob
357 faddd $ahic,$nhic,$dota ! $nhic
358 faddd $ahid,$nhid,$dotb ! $nhid
360 faddd $nloc,$nhia,$nloc
361 faddd $nlod,$nhib,$nlod
368 std $nloa,[%sp+$bias+$frame+0]
369 std $nlob,[%sp+$bias+$frame+8]
370 std $nloc,[%sp+$bias+$frame+16]
371 std $nlod,[%sp+$bias+$frame+24]
372 ldx [%sp+$bias+$frame+0],%o0
373 ldx [%sp+$bias+$frame+8],%o1
374 ldx [%sp+$bias+$frame+16],%o2
375 ldx [%sp+$bias+$frame+24],%o3
382 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
391 or %o7,%o0,%o0 ! 64-bit result
393 srlx %o3,16,%g1 ! 34-bit carry
397 stx %o0,[$tp] ! tp[j-1]=
404 std $dota,[%sp+$bias+$frame+32]
405 std $dotb,[%sp+$bias+$frame+40]
406 ldx [%sp+$bias+$frame+32],%o0
407 ldx [%sp+$bias+$frame+40],%o1
420 stx %o0,[$tp] ! tp[num-1]=
427 add %sp,$bias+$frame+$locals,$tp
431 $code.=<<___ if ($bits==64);
432 ldx [$bp+$i],%o0 ! bp[i]
433 ldx [$ap+$j],%o1 ! ap[0]
435 $code.=<<___ if ($bits==32);
436 ldd [$bp+$i],%o0 ! bp[i]
437 ldd [$ap+$j],%g2 ! ap[0]
444 ldx [$tp],%o2 ! tp[0]
447 mulx $n0,%o0,%o0 ! (ap[0]*bp[i]+t[0])*n0
448 stx %o0,[%sp+$bias+$frame+0]
451 ! transfer b[i] to FPU as 4x16-bit values
452 ldda [%o4+`$bits==32 ? 2 : 6`]%asi,$ba
453 ldda [%o4+`$bits==32 ? 0 : 4`]%asi,$bb
454 ldda [%o4+`$bits==32 ? 6 : 2`]%asi,$bc
455 ldda [%o4+`$bits==32 ? 4 : 0`]%asi,$bd
457 ! transfer (ap[0]*b[i]+t[0])*n0 to FPU as 4x16-bit values
458 ldda [%sp+$bias+$frame+6]%asi,$na
460 ldda [%sp+$bias+$frame+4]%asi,$nb
462 ldda [%sp+$bias+$frame+2]%asi,$nc
464 ldda [%sp+$bias+$frame+0]%asi,$nd
466 ldd [$ap_l+$j],$alo ! load a[j] in double format
470 ldd [$np_l+$j],$nlo ! load n[j] in double format
481 faddd $aloa,$nloa,$nloa
484 faddd $alob,$nlob,$nlob
487 faddd $aloc,$nloc,$nloc
490 faddd $alod,$nlod,$nlod
493 faddd $ahia,$nhia,$nhia
497 faddd $ahib,$nhib,$nhib
498 faddd $ahic,$nhic,$dota ! $nhic
499 faddd $ahid,$nhid,$dotb ! $nhid
501 faddd $nloc,$nhia,$nloc
502 faddd $nlod,$nhib,$nlod
509 std $nloa,[%sp+$bias+$frame+0]
510 std $nlob,[%sp+$bias+$frame+8]
511 std $nloc,[%sp+$bias+$frame+16]
512 std $nlod,[%sp+$bias+$frame+24]
513 ldx [%sp+$bias+$frame+0],%o0
514 ldx [%sp+$bias+$frame+8],%o1
515 ldx [%sp+$bias+$frame+16],%o2
516 ldx [%sp+$bias+$frame+24],%o3
523 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
533 or %o7,%o0,%o0 ! 64-bit result
537 srlx %o3,16,%g1 ! 34-bit carry
545 ldd [$ap_l+$j],$alo ! load a[j] in double format
547 ldd [$np_l+$j],$nlo ! load n[j] in double format
556 faddd $aloa,$nloa,$nloa
559 faddd $alob,$nlob,$nlob
562 faddd $aloc,$nloc,$nloc
565 faddd $alod,$nlod,$nlod
568 faddd $ahia,$nhia,$nhia
572 faddd $ahib,$nhib,$nhib
573 faddd $dota,$nloa,$nloa
574 faddd $dotb,$nlob,$nlob
575 faddd $ahic,$nhic,$dota ! $nhic
576 faddd $ahid,$nhid,$dotb ! $nhid
578 faddd $nloc,$nhia,$nloc
579 faddd $nlod,$nhib,$nlod
586 std $nloa,[%sp+$bias+$frame+0]
587 std $nlob,[%sp+$bias+$frame+8]
588 std $nloc,[%sp+$bias+$frame+16]
589 std $nlod,[%sp+$bias+$frame+24]
590 ldx [%sp+$bias+$frame+0],%o0
591 ldx [%sp+$bias+$frame+8],%o1
592 ldx [%sp+$bias+$frame+16],%o2
593 ldx [%sp+$bias+$frame+24],%o3
600 add %o7,%o3,%o3 ! %o3.%o2[0..15].%o1[0..15].%o0[0..15]
609 or %o7,%o0,%o0 ! 64-bit result
611 srlx %o3,16,%g1 ! 34-bit carry
615 ldx [$tp+8],%o7 ! tp[j]
620 stx %o0,[$tp] ! tp[j-1]
627 std $dota,[%sp+$bias+$frame+32]
628 std $dotb,[%sp+$bias+$frame+40]
629 ldx [%sp+$bias+$frame+32],%o0
630 ldx [%sp+$bias+$frame+40],%o1
643 stx %o0,[$tp] ! tp[num-1]
652 sub %g0,$num,%o7 ! n=-num
653 cmp $carry,0 ! clears %icc.c
655 add $tp,8,$tp ! adjust tp to point at the end
658 ld [$np-`$bits==32 ? 4 : 8`],%o1
659 cmp %o0,%o1 ! compare topmost words
660 bcs,pt %icc,.Lcopy ! %icc.c is clean if not taken
668 $code.=<<___ if ($bits==64);
672 $code.=<<___ if ($bits==32);
681 subccc $carry,0,$carry
689 $code.=<<___ if ($bits==64);
692 $code.=<<___ if ($bits==32);
714 ldx [%sp+$bias+$frame+48],%o7
715 wr %g0,%o7,%asi ! restore %asi
721 .type $fname,#function
722 .size $fname,(.-$fname)
725 $code =~ s/\`([^\`]*)\`/eval($1)/gem;