3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # ECP_NISTZ256 module for SPARCv9.
14 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
15 # http://eprint.iacr.org/2013/816. In the process of adaptation
16 # original .c module was made 32-bit savvy in order to make this
17 # implementation possible.
19 # with/without -DECP_NISTZ256_ASM
20 # UltraSPARC III +12-18%
21 # SPARC T4 +99-550% (+66-150% on 32-bit Solaris)
23 # Ranges denote minimum and maximum improvement coefficients depending
24 # on benchmark. Lower coefficients are for ECDSA sign, server-side
25 # operation. Keep in mind that +200% means 3x improvement.
28 open STDOUT,">$output";
31 #include "sparc_arch.h"
33 #define LOCALS (STACK_BIAS+STACK_FRAME)
35 .register %g2,#scratch
36 .register %g3,#scratch
37 # define STACK64_FRAME STACK_FRAME
38 # define LOCALS64 LOCALS
40 # define STACK64_FRAME (2047+192)
41 # define LOCALS64 STACK64_FRAME
44 .section ".text",#alloc,#execinstr
46 ########################################################################
47 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
49 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
50 open TABLE,"<ecp_nistz256_table.c" or
51 open TABLE,"<${dir}../ecp_nistz256_table.c" or
52 die "failed to open ecp_nistz256_table.c:",$!;
57 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
61 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
62 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
64 die "insane number of elements" if ($#arr != 64*16*37-1);
67 .globl ecp_nistz256_precomputed
69 ecp_nistz256_precomputed:
71 ########################################################################
72 # this conversion smashes P256_POINT_AFFINE by individual bytes with
73 # 64 byte interval, similar to
77 @tbl = splice(@arr,0,64*16);
78 for($i=0;$i<64;$i++) {
80 for($j=0;$j<64;$j++) {
81 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
84 $code.=join(',',map { sprintf "0x%02x",$_} @line);
90 my ($rp,$ap,$bp)=map("%i$_",(0..2));
91 my @acc=map("%l$_",(0..7));
92 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
93 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
94 my ($rp_real,$ap_real)=("%g2","%g3");
97 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
99 .LRR: ! 2^512 mod P precomputed for NIST P256 polynomial
100 .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
101 .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
103 .long 1,0,0,0,0,0,0,0
104 .asciz "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
106 ! void ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
107 .globl ecp_nistz256_to_mont
109 ecp_nistz256_to_mont:
110 save %sp,-STACK_FRAME,%sp
114 call __ecp_nistz256_mul_mont
118 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
120 ! void ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
121 .globl ecp_nistz256_from_mont
123 ecp_nistz256_from_mont:
124 save %sp,-STACK_FRAME,%sp
128 call __ecp_nistz256_mul_mont
132 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
134 ! void ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
135 ! const BN_ULONG %i2[8]);
136 .globl ecp_nistz256_mul_mont
138 ecp_nistz256_mul_mont:
139 save %sp,-STACK_FRAME,%sp
141 call __ecp_nistz256_mul_mont
145 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
147 ! void ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
148 .globl ecp_nistz256_sqr_mont
150 ecp_nistz256_sqr_mont:
151 save %sp,-STACK_FRAME,%sp
153 call __ecp_nistz256_mul_mont
157 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
160 ########################################################################
161 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
162 # while all others are meant to keep 32. "Meant to" means that additions
163 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
164 # they can affect outcome (follow 'and' with $mask). Also keep in mind
165 # that addition with carry is addition with 32-bit carry, even though
166 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
167 # below for VIS3 code paths.]
171 __ecp_nistz256_mul_mont:
172 ld [$bp+0],$bi ! b[0]
175 srl $mask,0,$mask ! 0xffffffff
183 mulx $a0,$bi,$t0 ! a[0-7]*b[0], 64-bit results
191 srlx $t0,32,@acc[1] ! extract high parts
198 srlx $t7,32,@acc[0] ! "@acc[8]"
201 for($i=1;$i<8;$i++) {
203 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
204 ld [$bp+4*$i],$bi ! b[$i]
205 ld [$ap+4],$t1 ! re-load a[1-7]
206 addccc @acc[2],$t2,@acc[2]
207 addccc @acc[3],$t3,@acc[3]
210 addccc @acc[4],$t4,@acc[4]
211 addccc @acc[5],$t5,@acc[5]
214 addccc @acc[6],$t6,@acc[6]
215 addccc @acc[7],$t7,@acc[7]
218 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
221 # Reduction iteration is normally performed by accumulating
222 # result of multiplication of modulus by "magic" digit [and
223 # omitting least significant word, which is guaranteed to
224 # be 0], but thanks to special form of modulus and "magic"
225 # digit being equal to least significant word, it can be
226 # performed with additions and subtractions alone. Indeed:
228 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
230 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
232 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
235 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
236 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
237 # - abcd.0000.0000.0000.0000.0000.0000.abcd
239 # or marking redundant operations:
241 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
242 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
243 # - abcd.----.----.----.----.----.----.----
246 ! multiplication-less reduction
247 addcc @acc[3],$t0,@acc[3] ! r[3]+=r[0]
248 addccc @acc[4],%g0,@acc[4] ! r[4]+=0
249 and @acc[1],$mask,@acc[1]
250 and @acc[2],$mask,@acc[2]
251 addccc @acc[5],%g0,@acc[5] ! r[5]+=0
252 addccc @acc[6],$t0,@acc[6] ! r[6]+=r[0]
253 and @acc[3],$mask,@acc[3]
254 and @acc[4],$mask,@acc[4]
255 addccc @acc[7],%g0,@acc[7] ! r[7]+=0
256 addccc @acc[0],$t0,@acc[0] ! r[8]+=r[0] "@acc[8]"
257 and @acc[5],$mask,@acc[5]
258 and @acc[6],$mask,@acc[6]
259 addc $carry,%g0,$carry ! top-most carry
260 subcc @acc[7],$t0,@acc[7] ! r[7]-=r[0]
261 subccc @acc[0],%g0,@acc[0] ! r[8]-=0 "@acc[8]"
262 subc $carry,%g0,$carry ! top-most carry
263 and @acc[7],$mask,@acc[7]
264 and @acc[0],$mask,@acc[0] ! "@acc[8]"
266 push(@acc,shift(@acc)); # rotate registers to "omit" acc[0]
268 mulx $a0,$bi,$t0 ! a[0-7]*b[$i], 64-bit results
276 add @acc[0],$t0,$t0 ! accumulate low parts, can't overflow
278 srlx $t0,32,@acc[1] ! extract high parts
291 srlx $t7,32,@acc[0] ! "@acc[8]"
295 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
296 addccc @acc[2],$t2,@acc[2]
297 addccc @acc[3],$t3,@acc[3]
298 addccc @acc[4],$t4,@acc[4]
299 addccc @acc[5],$t5,@acc[5]
300 addccc @acc[6],$t6,@acc[6]
301 addccc @acc[7],$t7,@acc[7]
302 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
305 addcc @acc[3],$t0,@acc[3] ! multiplication-less reduction
306 addccc @acc[4],%g0,@acc[4]
307 addccc @acc[5],%g0,@acc[5]
308 addccc @acc[6],$t0,@acc[6]
309 addccc @acc[7],%g0,@acc[7]
310 addccc @acc[0],$t0,@acc[0] ! "@acc[8]"
311 addc $carry,%g0,$carry
312 subcc @acc[7],$t0,@acc[7]
313 subccc @acc[0],%g0,@acc[0] ! "@acc[8]"
314 subc $carry,%g0,$carry ! top-most carry
316 push(@acc,shift(@acc)); # rotate registers to omit acc[0]
318 ! Final step is "if result > mod, subtract mod", but we do it
319 ! "other way around", namely subtract modulus from result
320 ! and if it borrowed, add modulus back.
322 subcc @acc[0],-1,@acc[0] ! subtract modulus
323 subccc @acc[1],-1,@acc[1]
324 subccc @acc[2],-1,@acc[2]
325 subccc @acc[3],0,@acc[3]
326 subccc @acc[4],0,@acc[4]
327 subccc @acc[5],0,@acc[5]
328 subccc @acc[6],1,@acc[6]
329 subccc @acc[7],-1,@acc[7]
330 subc $carry,0,$carry ! broadcast borrow bit
332 ! Note that because mod has special form, i.e. consists of
333 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
334 ! using value of broadcasted borrow and the borrow bit itself.
335 ! To minimize dependency chain we first broadcast and then
336 ! extract the bit by negating (follow $bi).
338 addcc @acc[0],$carry,@acc[0] ! add modulus or zero
339 addccc @acc[1],$carry,@acc[1]
342 addccc @acc[2],$carry,@acc[2]
344 addccc @acc[3],0,@acc[3]
346 addccc @acc[4],0,@acc[4]
348 addccc @acc[5],0,@acc[5]
350 addccc @acc[6],$bi,@acc[6]
352 addc @acc[7],$carry,@acc[7]
356 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
358 ! void ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
359 ! const BN_ULONG %i2[8]);
360 .globl ecp_nistz256_add
363 save %sp,-STACK_FRAME,%sp
371 call __ecp_nistz256_add
375 .size ecp_nistz256_add,.-ecp_nistz256_add
379 ld [$bp+0],$t0 ! b[0]
383 addcc @acc[0],$t0,@acc[0]
386 addccc @acc[1],$t1,@acc[1]
389 addccc @acc[2],$t2,@acc[2]
390 addccc @acc[3],$t3,@acc[3]
391 addccc @acc[4],$t4,@acc[4]
392 addccc @acc[5],$t5,@acc[5]
393 addccc @acc[6],$t6,@acc[6]
394 addccc @acc[7],$t7,@acc[7]
395 subc %g0,%g0,$carry ! broadcast carry bit
399 ! if a+b carries, subtract modulus.
401 ! Note that because mod has special form, i.e. consists of
402 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
403 ! using value of broadcasted borrow and the borrow bit itself.
404 ! To minimize dependency chain we first broadcast and then
405 ! extract the bit by negating (follow $bi).
407 subcc @acc[0],$carry,@acc[0] ! subtract synthesized modulus
408 subccc @acc[1],$carry,@acc[1]
411 subccc @acc[2],$carry,@acc[2]
413 subccc @acc[3],0,@acc[3]
415 subccc @acc[4],0,@acc[4]
417 subccc @acc[5],0,@acc[5]
419 subccc @acc[6],$bi,@acc[6]
421 subc @acc[7],$carry,@acc[7]
425 .size __ecp_nistz256_add,.-__ecp_nistz256_add
427 ! void ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
428 .globl ecp_nistz256_mul_by_2
430 ecp_nistz256_mul_by_2:
431 save %sp,-STACK_FRAME,%sp
439 call __ecp_nistz256_mul_by_2
443 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
446 __ecp_nistz256_mul_by_2:
447 addcc @acc[0],@acc[0],@acc[0] ! a+a=2*a
448 addccc @acc[1],@acc[1],@acc[1]
449 addccc @acc[2],@acc[2],@acc[2]
450 addccc @acc[3],@acc[3],@acc[3]
451 addccc @acc[4],@acc[4],@acc[4]
452 addccc @acc[5],@acc[5],@acc[5]
453 addccc @acc[6],@acc[6],@acc[6]
454 addccc @acc[7],@acc[7],@acc[7]
456 subc %g0,%g0,$carry ! broadcast carry bit
457 .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
459 ! void ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
460 .globl ecp_nistz256_mul_by_3
462 ecp_nistz256_mul_by_3:
463 save %sp,-STACK_FRAME,%sp
471 call __ecp_nistz256_mul_by_3
475 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
478 __ecp_nistz256_mul_by_3:
479 addcc @acc[0],@acc[0],$t0 ! a+a=2*a
480 addccc @acc[1],@acc[1],$t1
481 addccc @acc[2],@acc[2],$t2
482 addccc @acc[3],@acc[3],$t3
483 addccc @acc[4],@acc[4],$t4
484 addccc @acc[5],@acc[5],$t5
485 addccc @acc[6],@acc[6],$t6
486 addccc @acc[7],@acc[7],$t7
487 subc %g0,%g0,$carry ! broadcast carry bit
489 subcc $t0,$carry,$t0 ! .Lreduce_by_sub but without stores
491 subccc $t1,$carry,$t1
492 subccc $t2,$carry,$t2
499 addcc $t0,@acc[0],@acc[0] ! 2*a+a=3*a
500 addccc $t1,@acc[1],@acc[1]
501 addccc $t2,@acc[2],@acc[2]
502 addccc $t3,@acc[3],@acc[3]
503 addccc $t4,@acc[4],@acc[4]
504 addccc $t5,@acc[5],@acc[5]
505 addccc $t6,@acc[6],@acc[6]
506 addccc $t7,@acc[7],@acc[7]
508 subc %g0,%g0,$carry ! broadcast carry bit
509 .size __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
511 ! void ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
512 ! const BN_ULONG %i2[8]);
513 .globl ecp_nistz256_sub
516 save %sp,-STACK_FRAME,%sp
524 call __ecp_nistz256_sub_from
528 .size ecp_nistz256_sub,.-ecp_nistz256_sub
530 ! void ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
531 .globl ecp_nistz256_neg
534 save %sp,-STACK_FRAME,%sp
543 call __ecp_nistz256_sub_from
547 .size ecp_nistz256_neg,.-ecp_nistz256_neg
550 __ecp_nistz256_sub_from:
551 ld [$bp+0],$t0 ! b[0]
555 subcc @acc[0],$t0,@acc[0]
558 subccc @acc[1],$t1,@acc[1]
559 subccc @acc[2],$t2,@acc[2]
562 subccc @acc[3],$t3,@acc[3]
563 subccc @acc[4],$t4,@acc[4]
564 subccc @acc[5],$t5,@acc[5]
565 subccc @acc[6],$t6,@acc[6]
566 subccc @acc[7],$t7,@acc[7]
567 subc %g0,%g0,$carry ! broadcast borrow bit
571 ! if a-b borrows, add modulus.
573 ! Note that because mod has special form, i.e. consists of
574 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
575 ! using value of broadcasted borrow and the borrow bit itself.
576 ! To minimize dependency chain we first broadcast and then
577 ! extract the bit by negating (follow $bi).
579 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
580 addccc @acc[1],$carry,@acc[1]
583 addccc @acc[2],$carry,@acc[2]
585 addccc @acc[3],0,@acc[3]
587 addccc @acc[4],0,@acc[4]
589 addccc @acc[5],0,@acc[5]
591 addccc @acc[6],$bi,@acc[6]
593 addc @acc[7],$carry,@acc[7]
597 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
600 __ecp_nistz256_sub_morf:
601 ld [$bp+0],$t0 ! b[0]
605 subcc $t0,@acc[0],@acc[0]
608 subccc $t1,@acc[1],@acc[1]
609 subccc $t2,@acc[2],@acc[2]
612 subccc $t3,@acc[3],@acc[3]
613 subccc $t4,@acc[4],@acc[4]
614 subccc $t5,@acc[5],@acc[5]
615 subccc $t6,@acc[6],@acc[6]
616 subccc $t7,@acc[7],@acc[7]
618 subc %g0,%g0,$carry ! broadcast borrow bit
619 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
621 ! void ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
622 .globl ecp_nistz256_div_by_2
624 ecp_nistz256_div_by_2:
625 save %sp,-STACK_FRAME,%sp
633 call __ecp_nistz256_div_by_2
637 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
640 __ecp_nistz256_div_by_2:
641 ! ret = (a is odd ? a+mod : a) >> 1
645 addcc @acc[0],$carry,@acc[0]
646 addccc @acc[1],$carry,@acc[1]
647 addccc @acc[2],$carry,@acc[2]
648 addccc @acc[3],0,@acc[3]
649 addccc @acc[4],0,@acc[4]
650 addccc @acc[5],0,@acc[5]
651 addccc @acc[6],$bi,@acc[6]
652 addccc @acc[7],$carry,@acc[7]
657 srl @acc[0],1,@acc[0]
659 srl @acc[1],1,@acc[1]
660 or @acc[0],$t0,@acc[0]
662 srl @acc[2],1,@acc[2]
663 or @acc[1],$t1,@acc[1]
666 srl @acc[3],1,@acc[3]
667 or @acc[2],$t2,@acc[2]
670 srl @acc[4],1,@acc[4]
671 or @acc[3],$t3,@acc[3]
674 srl @acc[5],1,@acc[5]
675 or @acc[4],$t4,@acc[4]
678 srl @acc[6],1,@acc[6]
679 or @acc[5],$t5,@acc[5]
682 srl @acc[7],1,@acc[7]
683 or @acc[6],$t6,@acc[6]
686 or @acc[7],$t7,@acc[7]
690 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
693 ########################################################################
694 # following subroutines are "literal" implemetation of those found in
697 ########################################################################
698 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
701 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
702 # above map() describes stack layout with 4 temporary
703 # 256-bit vectors on top.
710 .globl ecp_nistz256_point_double
712 ecp_nistz256_point_double:
713 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
714 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
715 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
716 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
717 be ecp_nistz256_point_double_vis3
720 save %sp,-STACK_FRAME-32*4,%sp
726 ld [$ap+32+4],@acc[1]
727 ld [$ap+32+8],@acc[2]
728 ld [$ap+32+12],@acc[3]
729 ld [$ap+32+16],@acc[4]
730 ld [$ap+32+20],@acc[5]
731 ld [$ap+32+24],@acc[6]
732 ld [$ap+32+28],@acc[7]
733 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
734 add %sp,LOCALS+$S,$rp
738 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
739 add %sp,LOCALS+$Zsqr,$rp
742 call __ecp_nistz256_add ! p256_add(M, Zsqr, in_x);
743 add %sp,LOCALS+$M,$rp
745 add %sp,LOCALS+$S,$bp
746 add %sp,LOCALS+$S,$ap
747 call __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
748 add %sp,LOCALS+$S,$rp
750 ld [$ap_real],@acc[0]
751 add %sp,LOCALS+$Zsqr,$bp
752 ld [$ap_real+4],@acc[1]
753 ld [$ap_real+8],@acc[2]
754 ld [$ap_real+12],@acc[3]
755 ld [$ap_real+16],@acc[4]
756 ld [$ap_real+20],@acc[5]
757 ld [$ap_real+24],@acc[6]
758 ld [$ap_real+28],@acc[7]
759 call __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
760 add %sp,LOCALS+$Zsqr,$rp
764 call __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
765 add %sp,LOCALS+$tmp0,$rp
767 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
770 add %sp,LOCALS+$Zsqr,$bp
771 add %sp,LOCALS+$M,$ap
772 call __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
773 add %sp,LOCALS+$M,$rp
775 call __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
776 add %sp,LOCALS+$M,$rp
778 add %sp,LOCALS+$S,$bp
779 add %sp,LOCALS+$S,$ap
780 call __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
781 add %sp,LOCALS+$tmp0,$rp
783 call __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
787 add %sp,LOCALS+$S,$ap
788 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
789 add %sp,LOCALS+$S,$rp
791 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
792 add %sp,LOCALS+$tmp0,$rp
794 add %sp,LOCALS+$M,$bp
795 add %sp,LOCALS+$M,$ap
796 call __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
799 add %sp,LOCALS+$tmp0,$bp
800 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
803 add %sp,LOCALS+$S,$bp
804 call __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
805 add %sp,LOCALS+$S,$rp
807 add %sp,LOCALS+$M,$bp
808 add %sp,LOCALS+$S,$ap
809 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
810 add %sp,LOCALS+$S,$rp
813 call __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
818 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
822 ########################################################################
823 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
824 # const P256_POINT *in2);
826 my ($res_x,$res_y,$res_z,
827 $H,$Hsqr,$R,$Rsqr,$Hcub,
828 $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
829 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
831 # above map() describes stack layout with 12 temporary
832 # 256-bit vectors on top. Then we reserve some space for
833 # !in1infty, !in2infty, result of check for zero and return pointer.
835 my $bp_real=$rp_real;
838 .globl ecp_nistz256_point_add
840 ecp_nistz256_point_add:
841 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
842 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
843 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
844 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
845 be ecp_nistz256_point_add_vis3
848 save %sp,-STACK_FRAME-32*12-32,%sp
850 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
854 ld [$bp],@acc[0] ! in2_x
862 ld [$bp+32],$t0 ! in2_y
870 or @acc[1],@acc[0],@acc[0]
871 or @acc[3],@acc[2],@acc[2]
872 or @acc[5],@acc[4],@acc[4]
873 or @acc[7],@acc[6],@acc[6]
874 or @acc[2],@acc[0],@acc[0]
875 or @acc[6],@acc[4],@acc[4]
876 or @acc[4],@acc[0],@acc[0]
884 or @acc[0],$t0,$t0 ! !in2infty
886 st $t0,[%fp+STACK_BIAS-12]
888 ld [$ap],@acc[0] ! in1_x
896 ld [$ap+32],$t0 ! in1_y
904 or @acc[1],@acc[0],@acc[0]
905 or @acc[3],@acc[2],@acc[2]
906 or @acc[5],@acc[4],@acc[4]
907 or @acc[7],@acc[6],@acc[6]
908 or @acc[2],@acc[0],@acc[0]
909 or @acc[6],@acc[4],@acc[4]
910 or @acc[4],@acc[0],@acc[0]
918 or @acc[0],$t0,$t0 ! !in1infty
920 st $t0,[%fp+STACK_BIAS-16]
924 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
925 add %sp,LOCALS+$Z2sqr,$rp
929 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
930 add %sp,LOCALS+$Z1sqr,$rp
933 add %sp,LOCALS+$Z2sqr,$ap
934 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
935 add %sp,LOCALS+$S1,$rp
938 add %sp,LOCALS+$Z1sqr,$ap
939 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
940 add %sp,LOCALS+$S2,$rp
943 add %sp,LOCALS+$S1,$ap
944 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
945 add %sp,LOCALS+$S1,$rp
948 add %sp,LOCALS+$S2,$ap
949 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
950 add %sp,LOCALS+$S2,$rp
952 add %sp,LOCALS+$S1,$bp
953 call __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
954 add %sp,LOCALS+$R,$rp
956 or @acc[1],@acc[0],@acc[0] ! see if result is zero
957 or @acc[3],@acc[2],@acc[2]
958 or @acc[5],@acc[4],@acc[4]
959 or @acc[7],@acc[6],@acc[6]
960 or @acc[2],@acc[0],@acc[0]
961 or @acc[6],@acc[4],@acc[4]
962 or @acc[4],@acc[0],@acc[0]
963 st @acc[0],[%fp+STACK_BIAS-20]
966 add %sp,LOCALS+$Z2sqr,$ap
967 call __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
968 add %sp,LOCALS+$U1,$rp
971 add %sp,LOCALS+$Z1sqr,$ap
972 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
973 add %sp,LOCALS+$U2,$rp
975 add %sp,LOCALS+$U1,$bp
976 call __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
977 add %sp,LOCALS+$H,$rp
979 or @acc[1],@acc[0],@acc[0] ! see if result is zero
980 or @acc[3],@acc[2],@acc[2]
981 or @acc[5],@acc[4],@acc[4]
982 or @acc[7],@acc[6],@acc[6]
983 or @acc[2],@acc[0],@acc[0]
984 or @acc[6],@acc[4],@acc[4]
985 orcc @acc[4],@acc[0],@acc[0]
987 bne,pt %icc,.Ladd_proceed ! is_equal(U1,U2)?
990 ld [%fp+STACK_BIAS-12],$t0
991 ld [%fp+STACK_BIAS-16],$t1
992 ld [%fp+STACK_BIAS-20],$t2
994 be,pt %icc,.Ladd_proceed ! (in1infty || in2infty)?
997 be,pt %icc,.Ladd_proceed ! is_equal(S1,S2)?
1000 ldx [%fp+STACK_BIAS-8],$rp
1030 add %sp,LOCALS+$R,$bp
1031 add %sp,LOCALS+$R,$ap
1032 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1033 add %sp,LOCALS+$Rsqr,$rp
1036 add %sp,LOCALS+$H,$ap
1037 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1038 add %sp,LOCALS+$res_z,$rp
1040 add %sp,LOCALS+$H,$bp
1041 add %sp,LOCALS+$H,$ap
1042 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1043 add %sp,LOCALS+$Hsqr,$rp
1046 add %sp,LOCALS+$res_z,$ap
1047 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1048 add %sp,LOCALS+$res_z,$rp
1050 add %sp,LOCALS+$H,$bp
1051 add %sp,LOCALS+$Hsqr,$ap
1052 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1053 add %sp,LOCALS+$Hcub,$rp
1055 add %sp,LOCALS+$U1,$bp
1056 add %sp,LOCALS+$Hsqr,$ap
1057 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1058 add %sp,LOCALS+$U2,$rp
1060 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1061 add %sp,LOCALS+$Hsqr,$rp
1063 add %sp,LOCALS+$Rsqr,$bp
1064 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1065 add %sp,LOCALS+$res_x,$rp
1067 add %sp,LOCALS+$Hcub,$bp
1068 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1069 add %sp,LOCALS+$res_x,$rp
1071 add %sp,LOCALS+$U2,$bp
1072 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1073 add %sp,LOCALS+$res_y,$rp
1075 add %sp,LOCALS+$Hcub,$bp
1076 add %sp,LOCALS+$S1,$ap
1077 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1078 add %sp,LOCALS+$S2,$rp
1080 add %sp,LOCALS+$R,$bp
1081 add %sp,LOCALS+$res_y,$ap
1082 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1083 add %sp,LOCALS+$res_y,$rp
1085 add %sp,LOCALS+$S2,$bp
1086 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1087 add %sp,LOCALS+$res_y,$rp
1089 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1090 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1091 ldx [%fp+STACK_BIAS-8],$rp
1093 for($i=0;$i<96;$i+=8) { # conditional moves
1095 ld [%sp+LOCALS+$i],@acc[0] ! res
1096 ld [%sp+LOCALS+$i+4],@acc[1]
1097 ld [$bp_real+$i],@acc[2] ! in2
1098 ld [$bp_real+$i+4],@acc[3]
1099 ld [$ap_real+$i],@acc[4] ! in1
1100 ld [$ap_real+$i+4],@acc[5]
1101 movrz $t1,@acc[2],@acc[0]
1102 movrz $t1,@acc[3],@acc[1]
1103 movrz $t2,@acc[4],@acc[0]
1104 movrz $t2,@acc[5],@acc[1]
1106 st @acc[1],[$rp+$i+4]
1113 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1117 ########################################################################
1118 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1119 # const P256_POINT_AFFINE *in2);
1121 my ($res_x,$res_y,$res_z,
1122 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1124 # above map() describes stack layout with 10 temporary
1125 # 256-bit vectors on top. Then we reserve some space for
1126 # !in1infty, !in2infty, result of check for zero and return pointer.
1128 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1129 my $bp_real=$rp_real;
1132 .globl ecp_nistz256_point_add_affine
1134 ecp_nistz256_point_add_affine:
1135 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1136 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
1137 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1138 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1139 be ecp_nistz256_point_add_affine_vis3
1142 save %sp,-STACK_FRAME-32*10-32,%sp
1144 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
1148 ld [$ap],@acc[0] ! in1_x
1156 ld [$ap+32],$t0 ! in1_y
1164 or @acc[1],@acc[0],@acc[0]
1165 or @acc[3],@acc[2],@acc[2]
1166 or @acc[5],@acc[4],@acc[4]
1167 or @acc[7],@acc[6],@acc[6]
1168 or @acc[2],@acc[0],@acc[0]
1169 or @acc[6],@acc[4],@acc[4]
1170 or @acc[4],@acc[0],@acc[0]
1178 or @acc[0],$t0,$t0 ! !in1infty
1180 st $t0,[%fp+STACK_BIAS-16]
1182 ld [$bp],@acc[0] ! in2_x
1190 ld [$bp+32],$t0 ! in2_y
1198 or @acc[1],@acc[0],@acc[0]
1199 or @acc[3],@acc[2],@acc[2]
1200 or @acc[5],@acc[4],@acc[4]
1201 or @acc[7],@acc[6],@acc[6]
1202 or @acc[2],@acc[0],@acc[0]
1203 or @acc[6],@acc[4],@acc[4]
1204 or @acc[4],@acc[0],@acc[0]
1212 or @acc[0],$t0,$t0 ! !in2infty
1214 st $t0,[%fp+STACK_BIAS-12]
1218 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1219 add %sp,LOCALS+$Z1sqr,$rp
1222 add %sp,LOCALS+$Z1sqr,$ap
1223 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1224 add %sp,LOCALS+$U2,$rp
1227 call __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1228 add %sp,LOCALS+$H,$rp
1231 add %sp,LOCALS+$Z1sqr,$ap
1232 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1233 add %sp,LOCALS+$S2,$rp
1236 add %sp,LOCALS+$H,$ap
1237 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1238 add %sp,LOCALS+$res_z,$rp
1241 add %sp,LOCALS+$S2,$ap
1242 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1243 add %sp,LOCALS+$S2,$rp
1246 call __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1247 add %sp,LOCALS+$R,$rp
1249 add %sp,LOCALS+$H,$bp
1250 add %sp,LOCALS+$H,$ap
1251 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1252 add %sp,LOCALS+$Hsqr,$rp
1254 add %sp,LOCALS+$R,$bp
1255 add %sp,LOCALS+$R,$ap
1256 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1257 add %sp,LOCALS+$Rsqr,$rp
1259 add %sp,LOCALS+$H,$bp
1260 add %sp,LOCALS+$Hsqr,$ap
1261 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1262 add %sp,LOCALS+$Hcub,$rp
1265 add %sp,LOCALS+$Hsqr,$ap
1266 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1267 add %sp,LOCALS+$U2,$rp
1269 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1270 add %sp,LOCALS+$Hsqr,$rp
1272 add %sp,LOCALS+$Rsqr,$bp
1273 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1274 add %sp,LOCALS+$res_x,$rp
1276 add %sp,LOCALS+$Hcub,$bp
1277 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1278 add %sp,LOCALS+$res_x,$rp
1280 add %sp,LOCALS+$U2,$bp
1281 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1282 add %sp,LOCALS+$res_y,$rp
1285 add %sp,LOCALS+$Hcub,$ap
1286 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1287 add %sp,LOCALS+$S2,$rp
1289 add %sp,LOCALS+$R,$bp
1290 add %sp,LOCALS+$res_y,$ap
1291 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1292 add %sp,LOCALS+$res_y,$rp
1294 add %sp,LOCALS+$S2,$bp
1295 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1296 add %sp,LOCALS+$res_y,$rp
1298 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1299 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1300 ldx [%fp+STACK_BIAS-8],$rp
1302 for($i=0;$i<64;$i+=8) { # conditional moves
1304 ld [%sp+LOCALS+$i],@acc[0] ! res
1305 ld [%sp+LOCALS+$i+4],@acc[1]
1306 ld [$bp_real+$i],@acc[2] ! in2
1307 ld [$bp_real+$i+4],@acc[3]
1308 ld [$ap_real+$i],@acc[4] ! in1
1309 ld [$ap_real+$i+4],@acc[5]
1310 movrz $t1,@acc[2],@acc[0]
1311 movrz $t1,@acc[3],@acc[1]
1312 movrz $t2,@acc[4],@acc[0]
1313 movrz $t2,@acc[5],@acc[1]
1315 st @acc[1],[$rp+$i+4]
1321 ld [%sp+LOCALS+$i],@acc[0] ! res
1322 ld [%sp+LOCALS+$i+4],@acc[1]
1323 ld [$ap_real+$i],@acc[4] ! in1
1324 ld [$ap_real+$i+4],@acc[5]
1325 movrz $t1,@ONE_mont[$j],@acc[0]
1326 movrz $t1,@ONE_mont[$j+1],@acc[1]
1327 movrz $t2,@acc[4],@acc[0]
1328 movrz $t2,@acc[5],@acc[1]
1330 st @acc[1],[$rp+$i+4]
1336 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1340 my ($out,$inp,$index)=map("%i$_",(0..2));
1344 ! void ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1346 .globl ecp_nistz256_scatter_w5
1348 ecp_nistz256_scatter_w5:
1349 save %sp,-STACK_FRAME,%sp
1352 add $out,$index,$out
1363 st %l0,[$out+64*0-4]
1364 st %l1,[$out+64*1-4]
1365 st %l2,[$out+64*2-4]
1366 st %l3,[$out+64*3-4]
1367 st %l4,[$out+64*4-4]
1368 st %l5,[$out+64*5-4]
1369 st %l6,[$out+64*6-4]
1370 st %l7,[$out+64*7-4]
1382 st %l0,[$out+64*0-4]
1383 st %l1,[$out+64*1-4]
1384 st %l2,[$out+64*2-4]
1385 st %l3,[$out+64*3-4]
1386 st %l4,[$out+64*4-4]
1387 st %l5,[$out+64*5-4]
1388 st %l6,[$out+64*6-4]
1389 st %l7,[$out+64*7-4]
1400 st %l0,[$out+64*0-4]
1401 st %l1,[$out+64*1-4]
1402 st %l2,[$out+64*2-4]
1403 st %l3,[$out+64*3-4]
1404 st %l4,[$out+64*4-4]
1405 st %l5,[$out+64*5-4]
1406 st %l6,[$out+64*6-4]
1407 st %l7,[$out+64*7-4]
1411 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1413 ! void ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1415 .globl ecp_nistz256_gather_w5
1417 ecp_nistz256_gather_w5:
1418 save %sp,-STACK_FRAME,%sp
1423 add $index,$mask,$index
1425 add $inp,$index,$inp
1508 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1510 ! void ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1512 .globl ecp_nistz256_scatter_w7
1514 ecp_nistz256_scatter_w7:
1515 save %sp,-STACK_FRAME,%sp
1517 add $out,$index,$out
1522 subcc $index,1,$index
1523 stb %l0,[$out+64*0-1]
1525 stb %l1,[$out+64*1-1]
1527 stb %l2,[$out+64*2-1]
1529 stb %l3,[$out+64*3-1]
1530 bne .Loop_scatter_w7
1535 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1537 ! void ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1539 .globl ecp_nistz256_gather_w7
1541 ecp_nistz256_gather_w7:
1542 save %sp,-STACK_FRAME,%sp
1547 add $index,$mask,$index
1548 add $inp,$index,$inp
1552 ldub [$inp+64*0],%l0
1553 prefetch [$inp+3840+64*0],1
1554 subcc $index,1,$index
1555 ldub [$inp+64*1],%l1
1556 prefetch [$inp+3840+64*1],1
1557 ldub [$inp+64*2],%l2
1558 prefetch [$inp+3840+64*2],1
1559 ldub [$inp+64*3],%l3
1560 prefetch [$inp+3840+64*3],1
1575 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1579 ########################################################################
1580 # Following subroutines are VIS3 counterparts of those above that
1581 # implement ones found in ecp_nistz256.c. Key difference is that they
1582 # use 128-bit muliplication and addition with 64-bit carry, and in order
1583 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1584 # entry and vice versa on return.
1586 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1587 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1588 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1589 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1590 my ($rp_real,$ap_real)=("%g2","%g3");
1591 my ($acc6,$acc7)=($bp,$bi); # used in squaring
1595 __ecp_nistz256_mul_by_2_vis3:
1596 addcc $acc0,$acc0,$acc0
1597 addxccc $acc1,$acc1,$acc1
1598 addxccc $acc2,$acc2,$acc2
1599 addxccc $acc3,$acc3,$acc3
1600 b .Lreduce_by_sub_vis3
1601 addxc %g0,%g0,$acc4 ! did it carry?
1602 .size __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1605 __ecp_nistz256_add_vis3:
1611 __ecp_nistz256_add_noload_vis3:
1613 addcc $t0,$acc0,$acc0
1614 addxccc $t1,$acc1,$acc1
1615 addxccc $t2,$acc2,$acc2
1616 addxccc $t3,$acc3,$acc3
1617 addxc %g0,%g0,$acc4 ! did it carry?
1619 .Lreduce_by_sub_vis3:
1621 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
1622 addxccc $acc1,$poly1,$t1
1623 addxccc $acc2,$minus1,$t2
1624 addxc $acc3,$poly3,$t3
1626 movrnz $acc4,$t0,$acc0 ! if a+b carried, ret = ret-mod
1627 movrnz $acc4,$t1,$acc1
1629 movrnz $acc4,$t2,$acc2
1631 movrnz $acc4,$t3,$acc3
1635 .size __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1637 ! Trouble with subtraction is that there is no subtraction with 64-bit
1638 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1639 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1640 ! recall that SPARC is big-endian, which is why you'll observe that
1641 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1642 ! "collect" result back to 64-bit $acc0-$acc3.
1644 __ecp_nistz256_sub_from_vis3:
1653 subcc $acc0,$t0,$acc0
1655 subccc $acc4,$t1,$acc4
1657 subccc $acc1,$t2,$acc1
1659 and $acc0,$poly1,$acc0
1660 subccc $acc5,$t3,$acc5
1663 and $acc1,$poly1,$acc1
1665 or $acc0,$acc4,$acc0
1667 or $acc1,$acc5,$acc1
1669 subccc $acc2,$t0,$acc2
1670 subccc $acc4,$t1,$acc4
1671 subccc $acc3,$t2,$acc3
1672 and $acc2,$poly1,$acc2
1673 subccc $acc5,$t3,$acc5
1675 and $acc3,$poly1,$acc3
1677 or $acc2,$acc4,$acc2
1678 subc %g0,%g0,$acc4 ! did it borrow?
1679 b .Lreduce_by_add_vis3
1680 or $acc3,$acc5,$acc3
1681 .size __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1684 __ecp_nistz256_sub_morf_vis3:
1693 subcc $t0,$acc0,$acc0
1695 subccc $t1,$acc4,$acc4
1697 subccc $t2,$acc1,$acc1
1699 and $acc0,$poly1,$acc0
1700 subccc $t3,$acc5,$acc5
1703 and $acc1,$poly1,$acc1
1705 or $acc0,$acc4,$acc0
1707 or $acc1,$acc5,$acc1
1709 subccc $t0,$acc2,$acc2
1710 subccc $t1,$acc4,$acc4
1711 subccc $t2,$acc3,$acc3
1712 and $acc2,$poly1,$acc2
1713 subccc $t3,$acc5,$acc5
1715 and $acc3,$poly1,$acc3
1717 or $acc2,$acc4,$acc2
1718 subc %g0,%g0,$acc4 ! did it borrow?
1719 or $acc3,$acc5,$acc3
1721 .Lreduce_by_add_vis3:
1723 addcc $acc0,-1,$t0 ! add modulus
1725 addxccc $acc1,$poly1,$t1
1726 not $poly1,$poly1 ! restore $poly1
1727 addxccc $acc2,%g0,$t2
1730 movrnz $acc4,$t0,$acc0 ! if a-b borrowed, ret = ret+mod
1731 movrnz $acc4,$t1,$acc1
1733 movrnz $acc4,$t2,$acc2
1735 movrnz $acc4,$t3,$acc3
1739 .size __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1742 __ecp_nistz256_div_by_2_vis3:
1743 ! ret = (a is odd ? a+mod : a) >> 1
1748 addcc $acc0,-1,$t0 ! add modulus
1749 addxccc $acc1,$t1,$t1
1750 addxccc $acc2,%g0,$t2
1751 addxccc $acc3,$t3,$t3
1752 addxc %g0,%g0,$acc4 ! carry bit
1754 movrnz $acc5,$t0,$acc0
1755 movrnz $acc5,$t1,$acc1
1756 movrnz $acc5,$t2,$acc2
1757 movrnz $acc5,$t3,$acc3
1758 movrz $acc5,%g0,$acc4
1773 sllx $acc4,63,$t3 ! don't forget carry bit
1779 .size __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1781 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1782 ! 4x faster [on T4]...
1784 __ecp_nistz256_mul_mont_vis3:
1786 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1794 ldx [$bp+8],$bi ! b[1]
1796 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1798 addxccc $acc2,$t1,$acc2
1800 addxccc $acc3,$t2,$acc3
1804 for($i=1;$i<4;$i++) {
1805 # Reduction iteration is normally performed by accumulating
1806 # result of multiplication of modulus by "magic" digit [and
1807 # omitting least significant word, which is guaranteed to
1808 # be 0], but thanks to special form of modulus and "magic"
1809 # digit being equal to least significant word, it can be
1810 # performed with additions and subtractions alone. Indeed:
1812 # ffff0001.00000000.0000ffff.ffffffff
1814 # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1816 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1819 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1820 # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1821 # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1823 # or marking redundant operations:
1825 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1826 # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1827 # - 0000abcd.efgh0000.--------.--------.--------
1828 # ^^^^^^^^ but this word is calculated with umulxhi, because
1829 # there is no subtract with 64-bit borrow:-(
1832 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1833 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1834 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1836 addxccc $acc2,$t1,$acc1
1838 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1840 addxccc $acc4,$t3,$acc3
1842 addxc $acc5,%g0,$acc4
1844 addcc $acc0,$t0,$acc0 ! accumulate low parts of multiplication
1846 addxccc $acc1,$t1,$acc1
1848 addxccc $acc2,$t2,$acc2
1850 addxccc $acc3,$t3,$acc3
1852 addxc $acc4,%g0,$acc4
1854 $code.=<<___ if ($i<3);
1855 ldx [$bp+8*($i+1)],$bi ! bp[$i+1]
1858 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1860 addxccc $acc2,$t1,$acc2
1862 addxccc $acc3,$t2,$acc3
1863 addxccc $acc4,$t3,$acc4
1868 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1869 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1870 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1871 addxccc $acc2,$t1,$acc1
1872 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1873 addxccc $acc4,$t3,$acc3
1874 b .Lmul_final_vis3 ! see below
1875 addxc $acc5,%g0,$acc4
1876 .size __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1878 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1879 ! instructions, but only 14% faster [on T4]...
1881 __ecp_nistz256_sqr_mont_vis3:
1882 ! | | | | | |a1*a0| |
1883 ! | | | | |a2*a0| | |
1884 ! | |a3*a2|a3*a0| | | |
1885 ! | | | |a2*a1| | | |
1886 ! | | |a3*a1| | | | |
1887 ! *| | | | | | | | 2|
1888 ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1889 ! |--+--+--+--+--+--+--+--|
1890 ! |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1892 ! "can't overflow" below mark carrying into high part of
1893 ! multiplication result, which can't overflow, because it
1894 ! can never be all ones.
1896 mulx $a1,$a0,$acc1 ! a[1]*a[0]
1898 mulx $a2,$a0,$acc2 ! a[2]*a[0]
1900 mulx $a3,$a0,$acc3 ! a[3]*a[0]
1901 umulxhi $a3,$a0,$acc4
1903 addcc $acc2,$t1,$acc2 ! accumulate high parts of multiplication
1904 mulx $a2,$a1,$t0 ! a[2]*a[1]
1906 addxccc $acc3,$t2,$acc3
1907 mulx $a3,$a1,$t2 ! a[3]*a[1]
1909 addxc $acc4,%g0,$acc4 ! can't overflow
1911 mulx $a3,$a2,$acc5 ! a[3]*a[2]
1912 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1913 umulxhi $a3,$a2,$acc6
1915 addcc $t2,$t1,$t1 ! accumulate high parts of multiplication
1916 mulx $a0,$a0,$acc0 ! a[0]*a[0]
1917 addxc $t3,%g0,$t2 ! can't overflow
1919 addcc $acc3,$t0,$acc3 ! accumulate low parts of multiplication
1921 addxccc $acc4,$t1,$acc4
1922 mulx $a1,$a1,$t1 ! a[1]*a[1]
1923 addxccc $acc5,$t2,$acc5
1925 addxc $acc6,%g0,$acc6 ! can't overflow
1927 addcc $acc1,$acc1,$acc1 ! acc[1-6]*=2
1928 mulx $a2,$a2,$t2 ! a[2]*a[2]
1929 addxccc $acc2,$acc2,$acc2
1931 addxccc $acc3,$acc3,$acc3
1932 mulx $a3,$a3,$t3 ! a[3]*a[3]
1933 addxccc $acc4,$acc4,$acc4
1935 addxccc $acc5,$acc5,$acc5
1936 addxccc $acc6,$acc6,$acc6
1939 addcc $acc1,$a0,$acc1 ! +a[i]*a[i]
1940 addxccc $acc2,$t1,$acc2
1941 addxccc $acc3,$a1,$acc3
1942 addxccc $acc4,$t2,$acc4
1944 addxccc $acc5,$a2,$acc5
1946 addxccc $acc6,$t3,$acc6
1947 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1948 addxc $acc7,$a3,$acc7
1950 for($i=0;$i<3;$i++) { # reductions, see commentary
1951 # in multiplication for details
1953 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1954 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1956 addxccc $acc2,$t1,$acc1
1958 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1959 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1960 addxc %g0,$t3,$acc3 ! cant't overflow
1964 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1965 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1966 addxccc $acc2,$t1,$acc1
1967 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1968 addxc %g0,$t3,$acc3 ! can't overflow
1970 addcc $acc0,$acc4,$acc0 ! accumulate upper half
1971 addxccc $acc1,$acc5,$acc1
1972 addxccc $acc2,$acc6,$acc2
1973 addxccc $acc3,$acc7,$acc3
1978 ! Final step is "if result > mod, subtract mod", but as comparison
1979 ! means subtraction, we do the subtraction and then copy outcome
1980 ! if it didn't borrow. But note that as we [have to] replace
1981 ! subtraction with addition with negative, carry/borrow logic is
1984 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
1985 not $poly3,$poly3 ! restore 0x00000000FFFFFFFE
1986 addxccc $acc1,$poly1,$t1
1987 addxccc $acc2,$minus1,$t2
1988 addxccc $acc3,$poly3,$t3
1989 addxccc $acc4,$minus1,%g0 ! did it carry?
1991 movcs %xcc,$t0,$acc0
1992 movcs %xcc,$t1,$acc1
1994 movcs %xcc,$t2,$acc2
1996 movcs %xcc,$t3,$acc3
2000 .size __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2003 ########################################################################
2004 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2007 my ($res_x,$res_y,$res_z,
2009 $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2010 # above map() describes stack layout with 10 temporary
2011 # 256-bit vectors on top.
2015 ecp_nistz256_point_double_vis3:
2016 save %sp,-STACK64_FRAME-32*10,%sp
2021 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2022 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2024 ! convert input to uint64_t[4]
2035 ld [$ap+32],$acc0 ! in_y
2043 ld [$ap+32+16],$acc2
2047 ld [$ap+32+24],$acc3
2051 stx $a0,[%sp+LOCALS64+$in_x]
2053 stx $a1,[%sp+LOCALS64+$in_x+8]
2055 stx $a2,[%sp+LOCALS64+$in_x+16]
2057 stx $a3,[%sp+LOCALS64+$in_x+24]
2059 stx $acc0,[%sp+LOCALS64+$in_y]
2061 stx $acc1,[%sp+LOCALS64+$in_y+8]
2063 stx $acc2,[%sp+LOCALS64+$in_y+16]
2064 stx $acc3,[%sp+LOCALS64+$in_y+24]
2066 ld [$ap+64],$a0 ! in_z
2084 stx $a0,[%sp+LOCALS64+$in_z]
2086 stx $a1,[%sp+LOCALS64+$in_z+8]
2088 stx $a2,[%sp+LOCALS64+$in_z+16]
2089 stx $a3,[%sp+LOCALS64+$in_z+24]
2091 ! in_y is still in $acc0-$acc3
2092 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(S, in_y);
2093 add %sp,LOCALS64+$S,$rp
2095 ! in_z is still in $a0-$a3
2096 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Zsqr, in_z);
2097 add %sp,LOCALS64+$Zsqr,$rp
2099 mov $acc0,$a0 ! put Zsqr aside
2104 add %sp,LOCALS64+$in_x,$bp
2105 call __ecp_nistz256_add_vis3 ! p256_add(M, Zsqr, in_x);
2106 add %sp,LOCALS64+$M,$rp
2108 mov $a0,$acc0 ! restore Zsqr
2109 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2111 ldx [%sp+LOCALS64+$S+8],$a1
2113 ldx [%sp+LOCALS64+$S+16],$a2
2115 ldx [%sp+LOCALS64+$S+24],$a3
2117 add %sp,LOCALS64+$in_x,$bp
2118 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(Zsqr, in_x, Zsqr);
2119 add %sp,LOCALS64+$Zsqr,$rp
2121 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(S, S);
2122 add %sp,LOCALS64+$S,$rp
2124 ldx [%sp+LOCALS64+$in_z],$bi
2125 ldx [%sp+LOCALS64+$in_y],$a0
2126 ldx [%sp+LOCALS64+$in_y+8],$a1
2127 ldx [%sp+LOCALS64+$in_y+16],$a2
2128 ldx [%sp+LOCALS64+$in_y+24],$a3
2129 add %sp,LOCALS64+$in_z,$bp
2130 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(tmp0, in_z, in_y);
2131 add %sp,LOCALS64+$tmp0,$rp
2133 ldx [%sp+LOCALS64+$M],$bi ! forward load
2134 ldx [%sp+LOCALS64+$Zsqr],$a0
2135 ldx [%sp+LOCALS64+$Zsqr+8],$a1
2136 ldx [%sp+LOCALS64+$Zsqr+16],$a2
2137 ldx [%sp+LOCALS64+$Zsqr+24],$a3
2139 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(res_z, tmp0);
2140 add %sp,LOCALS64+$res_z,$rp
2142 add %sp,LOCALS64+$M,$bp
2143 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(M, M, Zsqr);
2144 add %sp,LOCALS64+$M,$rp
2146 mov $acc0,$a0 ! put aside M
2150 call __ecp_nistz256_mul_by_2_vis3
2151 add %sp,LOCALS64+$M,$rp
2152 mov $a0,$t0 ! copy M
2153 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2155 ldx [%sp+LOCALS64+$S+8],$a1
2157 ldx [%sp+LOCALS64+$S+16],$a2
2159 ldx [%sp+LOCALS64+$S+24],$a3
2160 call __ecp_nistz256_add_noload_vis3 ! p256_mul_by_3(M, M);
2161 add %sp,LOCALS64+$M,$rp
2163 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(tmp0, S);
2164 add %sp,LOCALS64+$tmp0,$rp
2166 ldx [%sp+LOCALS64+$S],$bi ! forward load
2167 ldx [%sp+LOCALS64+$in_x],$a0
2168 ldx [%sp+LOCALS64+$in_x+8],$a1
2169 ldx [%sp+LOCALS64+$in_x+16],$a2
2170 ldx [%sp+LOCALS64+$in_x+24],$a3
2172 call __ecp_nistz256_div_by_2_vis3 ! p256_div_by_2(res_y, tmp0);
2173 add %sp,LOCALS64+$res_y,$rp
2175 add %sp,LOCALS64+$S,$bp
2176 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, in_x);
2177 add %sp,LOCALS64+$S,$rp
2179 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2180 ldx [%sp+LOCALS64+$M+8],$a1
2181 ldx [%sp+LOCALS64+$M+16],$a2
2182 ldx [%sp+LOCALS64+$M+24],$a3
2184 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(tmp0, S);
2185 add %sp,LOCALS64+$tmp0,$rp
2187 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(res_x, M);
2188 add %sp,LOCALS64+$res_x,$rp
2190 add %sp,LOCALS64+$tmp0,$bp
2191 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, tmp0);
2192 add %sp,LOCALS64+$res_x,$rp
2194 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2195 ldx [%sp+LOCALS64+$M+8],$a1
2196 ldx [%sp+LOCALS64+$M+16],$a2
2197 ldx [%sp+LOCALS64+$M+24],$a3
2199 add %sp,LOCALS64+$S,$bp
2200 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(S, S, res_x);
2201 add %sp,LOCALS64+$S,$rp
2204 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, M);
2205 add %sp,LOCALS64+$S,$rp
2207 ldx [%sp+LOCALS64+$res_x],$a0 ! forward load
2208 ldx [%sp+LOCALS64+$res_x+8],$a1
2209 ldx [%sp+LOCALS64+$res_x+16],$a2
2210 ldx [%sp+LOCALS64+$res_x+24],$a3
2212 add %sp,LOCALS64+$res_y,$bp
2213 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, S, res_y);
2214 add %sp,LOCALS64+$res_y,$bp
2216 ! convert output to uint_32[8]
2219 st $a0,[$rp_real] ! res_x
2224 st $t1,[$rp_real+12]
2225 st $a2,[$rp_real+16]
2226 st $t2,[$rp_real+20]
2227 st $a3,[$rp_real+24]
2228 st $t3,[$rp_real+28]
2230 ldx [%sp+LOCALS64+$res_z],$a0 ! forward load
2232 ldx [%sp+LOCALS64+$res_z+8],$a1
2234 ldx [%sp+LOCALS64+$res_z+16],$a2
2236 ldx [%sp+LOCALS64+$res_z+24],$a3
2238 st $acc0,[$rp_real+32] ! res_y
2239 st $t0, [$rp_real+32+4]
2240 st $acc1,[$rp_real+32+8]
2241 st $t1, [$rp_real+32+12]
2242 st $acc2,[$rp_real+32+16]
2243 st $t2, [$rp_real+32+20]
2244 st $acc3,[$rp_real+32+24]
2245 st $t3, [$rp_real+32+28]
2249 st $a0,[$rp_real+64] ! res_z
2251 st $t0,[$rp_real+64+4]
2253 st $a1,[$rp_real+64+8]
2254 st $t1,[$rp_real+64+12]
2255 st $a2,[$rp_real+64+16]
2256 st $t2,[$rp_real+64+20]
2257 st $a3,[$rp_real+64+24]
2258 st $t3,[$rp_real+64+28]
2262 .size ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2265 ########################################################################
2266 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2267 # const P256_POINT *in2);
2269 my ($res_x,$res_y,$res_z,
2270 $in1_x,$in1_y,$in1_z,
2271 $in2_x,$in2_y,$in2_z,
2272 $H,$Hsqr,$R,$Rsqr,$Hcub,
2273 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2274 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2276 # above map() describes stack layout with 18 temporary
2277 # 256-bit vectors on top. Then we reserve some space for
2278 # !in1infty, !in2infty and result of check for zero.
2281 .globl ecp_nistz256_point_add_vis3
2283 ecp_nistz256_point_add_vis3:
2284 save %sp,-STACK64_FRAME-32*18-32,%sp
2289 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2290 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2292 ! convert input to uint64_t[4]
2293 ld [$bp],$a0 ! in2_x
2303 ld [$bp+32],$acc0 ! in2_y
2311 ld [$bp+32+16],$acc2
2315 ld [$bp+32+24],$acc3
2319 stx $a0,[%sp+LOCALS64+$in2_x]
2321 stx $a1,[%sp+LOCALS64+$in2_x+8]
2323 stx $a2,[%sp+LOCALS64+$in2_x+16]
2325 stx $a3,[%sp+LOCALS64+$in2_x+24]
2327 stx $acc0,[%sp+LOCALS64+$in2_y]
2329 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2331 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2332 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2336 or $acc1,$acc0,$acc0
2337 or $acc3,$acc2,$acc2
2339 or $acc2,$acc0,$acc0
2341 movrnz $a0,-1,$a0 ! !in2infty
2342 stx $a0,[%fp+STACK_BIAS-8]
2344 ld [$bp+64],$acc0 ! in2_z
2348 ld [$bp+64+16],$acc2
2350 ld [$bp+64+24],$acc3
2354 ld [$ap],$a0 ! in1_x
2370 stx $acc0,[%sp+LOCALS64+$in2_z]
2372 stx $acc1,[%sp+LOCALS64+$in2_z+8]
2374 stx $acc2,[%sp+LOCALS64+$in2_z+16]
2375 stx $acc3,[%sp+LOCALS64+$in2_z+24]
2378 ld [$ap+32],$acc0 ! in1_y
2385 ld [$ap+32+16],$acc2
2387 ld [$ap+32+24],$acc3
2391 stx $a0,[%sp+LOCALS64+$in1_x]
2393 stx $a1,[%sp+LOCALS64+$in1_x+8]
2395 stx $a2,[%sp+LOCALS64+$in1_x+16]
2397 stx $a3,[%sp+LOCALS64+$in1_x+24]
2399 stx $acc0,[%sp+LOCALS64+$in1_y]
2401 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2403 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2404 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2408 or $acc1,$acc0,$acc0
2409 or $acc3,$acc2,$acc2
2411 or $acc2,$acc0,$acc0
2413 movrnz $a0,-1,$a0 ! !in1infty
2414 stx $a0,[%fp+STACK_BIAS-16]
2416 ldx [%sp+LOCALS64+$in2_z],$a0 ! forward load
2417 ldx [%sp+LOCALS64+$in2_z+8],$a1
2418 ldx [%sp+LOCALS64+$in2_z+16],$a2
2419 ldx [%sp+LOCALS64+$in2_z+24],$a3
2421 ld [$ap+64],$acc0 ! in1_z
2425 ld [$ap+64+16],$acc2
2427 ld [$ap+64+24],$acc3
2435 stx $acc0,[%sp+LOCALS64+$in1_z]
2437 stx $acc1,[%sp+LOCALS64+$in1_z+8]
2439 stx $acc2,[%sp+LOCALS64+$in1_z+16]
2440 stx $acc3,[%sp+LOCALS64+$in1_z+24]
2442 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z2sqr, in2_z);
2443 add %sp,LOCALS64+$Z2sqr,$rp
2445 ldx [%sp+LOCALS64+$in1_z],$a0
2446 ldx [%sp+LOCALS64+$in1_z+8],$a1
2447 ldx [%sp+LOCALS64+$in1_z+16],$a2
2448 ldx [%sp+LOCALS64+$in1_z+24],$a3
2449 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2450 add %sp,LOCALS64+$Z1sqr,$rp
2452 ldx [%sp+LOCALS64+$Z2sqr],$bi
2453 ldx [%sp+LOCALS64+$in2_z],$a0
2454 ldx [%sp+LOCALS64+$in2_z+8],$a1
2455 ldx [%sp+LOCALS64+$in2_z+16],$a2
2456 ldx [%sp+LOCALS64+$in2_z+24],$a3
2457 add %sp,LOCALS64+$Z2sqr,$bp
2458 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, Z2sqr, in2_z);
2459 add %sp,LOCALS64+$S1,$rp
2461 ldx [%sp+LOCALS64+$Z1sqr],$bi
2462 ldx [%sp+LOCALS64+$in1_z],$a0
2463 ldx [%sp+LOCALS64+$in1_z+8],$a1
2464 ldx [%sp+LOCALS64+$in1_z+16],$a2
2465 ldx [%sp+LOCALS64+$in1_z+24],$a3
2466 add %sp,LOCALS64+$Z1sqr,$bp
2467 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2468 add %sp,LOCALS64+$S2,$rp
2470 ldx [%sp+LOCALS64+$S1],$bi
2471 ldx [%sp+LOCALS64+$in1_y],$a0
2472 ldx [%sp+LOCALS64+$in1_y+8],$a1
2473 ldx [%sp+LOCALS64+$in1_y+16],$a2
2474 ldx [%sp+LOCALS64+$in1_y+24],$a3
2475 add %sp,LOCALS64+$S1,$bp
2476 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, S1, in1_y);
2477 add %sp,LOCALS64+$S1,$rp
2479 ldx [%sp+LOCALS64+$S2],$bi
2480 ldx [%sp+LOCALS64+$in2_y],$a0
2481 ldx [%sp+LOCALS64+$in2_y+8],$a1
2482 ldx [%sp+LOCALS64+$in2_y+16],$a2
2483 ldx [%sp+LOCALS64+$in2_y+24],$a3
2484 add %sp,LOCALS64+$S2,$bp
2485 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2486 add %sp,LOCALS64+$S2,$rp
2488 ldx [%sp+LOCALS64+$Z2sqr],$bi ! forward load
2489 ldx [%sp+LOCALS64+$in1_x],$a0
2490 ldx [%sp+LOCALS64+$in1_x+8],$a1
2491 ldx [%sp+LOCALS64+$in1_x+16],$a2
2492 ldx [%sp+LOCALS64+$in1_x+24],$a3
2494 add %sp,LOCALS64+$S1,$bp
2495 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, S1);
2496 add %sp,LOCALS64+$R,$rp
2498 or $acc1,$acc0,$acc0 ! see if result is zero
2499 or $acc3,$acc2,$acc2
2500 or $acc2,$acc0,$acc0
2501 stx $acc0,[%fp+STACK_BIAS-24]
2503 add %sp,LOCALS64+$Z2sqr,$bp
2504 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U1, in1_x, Z2sqr);
2505 add %sp,LOCALS64+$U1,$rp
2507 ldx [%sp+LOCALS64+$Z1sqr],$bi
2508 ldx [%sp+LOCALS64+$in2_x],$a0
2509 ldx [%sp+LOCALS64+$in2_x+8],$a1
2510 ldx [%sp+LOCALS64+$in2_x+16],$a2
2511 ldx [%sp+LOCALS64+$in2_x+24],$a3
2512 add %sp,LOCALS64+$Z1sqr,$bp
2513 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in2_x, Z1sqr);
2514 add %sp,LOCALS64+$U2,$rp
2516 ldx [%sp+LOCALS64+$R],$a0 ! forward load
2517 ldx [%sp+LOCALS64+$R+8],$a1
2518 ldx [%sp+LOCALS64+$R+16],$a2
2519 ldx [%sp+LOCALS64+$R+24],$a3
2521 add %sp,LOCALS64+$U1,$bp
2522 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, U1);
2523 add %sp,LOCALS64+$H,$rp
2525 or $acc1,$acc0,$acc0 ! see if result is zero
2526 or $acc3,$acc2,$acc2
2527 orcc $acc2,$acc0,$acc0
2529 bne,pt %xcc,.Ladd_proceed_vis3 ! is_equal(U1,U2)?
2532 ldx [%fp+STACK_BIAS-8],$t0
2533 ldx [%fp+STACK_BIAS-16],$t1
2534 ldx [%fp+STACK_BIAS-24],$t2
2536 be,pt %xcc,.Ladd_proceed_vis3 ! (in1infty || in2infty)?
2539 be,pt %xcc,.Ladd_proceed_vis3 ! is_equal(S1,S2)?
2545 st %g0,[$rp_real+12]
2546 st %g0,[$rp_real+16]
2547 st %g0,[$rp_real+20]
2548 st %g0,[$rp_real+24]
2549 st %g0,[$rp_real+28]
2550 st %g0,[$rp_real+32]
2551 st %g0,[$rp_real+32+4]
2552 st %g0,[$rp_real+32+8]
2553 st %g0,[$rp_real+32+12]
2554 st %g0,[$rp_real+32+16]
2555 st %g0,[$rp_real+32+20]
2556 st %g0,[$rp_real+32+24]
2557 st %g0,[$rp_real+32+28]
2558 st %g0,[$rp_real+64]
2559 st %g0,[$rp_real+64+4]
2560 st %g0,[$rp_real+64+8]
2561 st %g0,[$rp_real+64+12]
2562 st %g0,[$rp_real+64+16]
2563 st %g0,[$rp_real+64+20]
2564 st %g0,[$rp_real+64+24]
2565 st %g0,[$rp_real+64+28]
2571 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2572 add %sp,LOCALS64+$Rsqr,$rp
2574 ldx [%sp+LOCALS64+$H],$bi
2575 ldx [%sp+LOCALS64+$in1_z],$a0
2576 ldx [%sp+LOCALS64+$in1_z+8],$a1
2577 ldx [%sp+LOCALS64+$in1_z+16],$a2
2578 ldx [%sp+LOCALS64+$in1_z+24],$a3
2579 add %sp,LOCALS64+$H,$bp
2580 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2581 add %sp,LOCALS64+$res_z,$rp
2583 ldx [%sp+LOCALS64+$H],$a0
2584 ldx [%sp+LOCALS64+$H+8],$a1
2585 ldx [%sp+LOCALS64+$H+16],$a2
2586 ldx [%sp+LOCALS64+$H+24],$a3
2587 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2588 add %sp,LOCALS64+$Hsqr,$rp
2590 ldx [%sp+LOCALS64+$res_z],$bi
2591 ldx [%sp+LOCALS64+$in2_z],$a0
2592 ldx [%sp+LOCALS64+$in2_z+8],$a1
2593 ldx [%sp+LOCALS64+$in2_z+16],$a2
2594 ldx [%sp+LOCALS64+$in2_z+24],$a3
2595 add %sp,LOCALS64+$res_z,$bp
2596 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, res_z, in2_z);
2597 add %sp,LOCALS64+$res_z,$rp
2599 ldx [%sp+LOCALS64+$H],$bi
2600 ldx [%sp+LOCALS64+$Hsqr],$a0
2601 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2602 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2603 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2604 add %sp,LOCALS64+$H,$bp
2605 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2606 add %sp,LOCALS64+$Hcub,$rp
2608 ldx [%sp+LOCALS64+$U1],$bi
2609 ldx [%sp+LOCALS64+$Hsqr],$a0
2610 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2611 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2612 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2613 add %sp,LOCALS64+$U1,$bp
2614 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, U1, Hsqr);
2615 add %sp,LOCALS64+$U2,$rp
2617 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2618 add %sp,LOCALS64+$Hsqr,$rp
2620 add %sp,LOCALS64+$Rsqr,$bp
2621 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2622 add %sp,LOCALS64+$res_x,$rp
2624 add %sp,LOCALS64+$Hcub,$bp
2625 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2626 add %sp,LOCALS64+$res_x,$rp
2628 ldx [%sp+LOCALS64+$S1],$bi ! forward load
2629 ldx [%sp+LOCALS64+$Hcub],$a0
2630 ldx [%sp+LOCALS64+$Hcub+8],$a1
2631 ldx [%sp+LOCALS64+$Hcub+16],$a2
2632 ldx [%sp+LOCALS64+$Hcub+24],$a3
2634 add %sp,LOCALS64+$U2,$bp
2635 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2636 add %sp,LOCALS64+$res_y,$rp
2638 add %sp,LOCALS64+$S1,$bp
2639 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S1, Hcub);
2640 add %sp,LOCALS64+$S2,$rp
2642 ldx [%sp+LOCALS64+$R],$bi
2643 ldx [%sp+LOCALS64+$res_y],$a0
2644 ldx [%sp+LOCALS64+$res_y+8],$a1
2645 ldx [%sp+LOCALS64+$res_y+16],$a2
2646 ldx [%sp+LOCALS64+$res_y+24],$a3
2647 add %sp,LOCALS64+$R,$bp
2648 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2649 add %sp,LOCALS64+$res_y,$rp
2651 add %sp,LOCALS64+$S2,$bp
2652 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2653 add %sp,LOCALS64+$res_y,$rp
2655 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2656 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2658 for($i=0;$i<96;$i+=16) { # conditional moves
2660 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2661 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2662 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2663 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2664 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2665 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2666 movrz $t1,$acc2,$acc0
2667 movrz $t1,$acc3,$acc1
2668 movrz $t2,$acc4,$acc0
2669 movrz $t2,$acc5,$acc1
2672 st $acc0,[$rp_real+$i]
2673 st $acc2,[$rp_real+$i+4]
2674 st $acc1,[$rp_real+$i+8]
2675 st $acc3,[$rp_real+$i+12]
2682 .size ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2685 ########################################################################
2686 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2687 # const P256_POINT_AFFINE *in2);
2689 my ($res_x,$res_y,$res_z,
2690 $in1_x,$in1_y,$in1_z,
2692 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2694 # above map() describes stack layout with 15 temporary
2695 # 256-bit vectors on top. Then we reserve some space for
2696 # !in1infty and !in2infty.
2700 ecp_nistz256_point_add_affine_vis3:
2701 save %sp,-STACK64_FRAME-32*15-32,%sp
2706 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2707 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2709 ! convert input to uint64_t[4]
2710 ld [$bp],$a0 ! in2_x
2720 ld [$bp+32],$acc0 ! in2_y
2728 ld [$bp+32+16],$acc2
2732 ld [$bp+32+24],$acc3
2736 stx $a0,[%sp+LOCALS64+$in2_x]
2738 stx $a1,[%sp+LOCALS64+$in2_x+8]
2740 stx $a2,[%sp+LOCALS64+$in2_x+16]
2742 stx $a3,[%sp+LOCALS64+$in2_x+24]
2744 stx $acc0,[%sp+LOCALS64+$in2_y]
2746 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2748 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2749 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2753 or $acc1,$acc0,$acc0
2754 or $acc3,$acc2,$acc2
2756 or $acc2,$acc0,$acc0
2758 movrnz $a0,-1,$a0 ! !in2infty
2759 stx $a0,[%fp+STACK_BIAS-8]
2761 ld [$ap],$a0 ! in1_x
2771 ld [$ap+32],$acc0 ! in1_y
2779 ld [$ap+32+16],$acc2
2783 ld [$ap+32+24],$acc3
2787 stx $a0,[%sp+LOCALS64+$in1_x]
2789 stx $a1,[%sp+LOCALS64+$in1_x+8]
2791 stx $a2,[%sp+LOCALS64+$in1_x+16]
2793 stx $a3,[%sp+LOCALS64+$in1_x+24]
2795 stx $acc0,[%sp+LOCALS64+$in1_y]
2797 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2799 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2800 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2804 or $acc1,$acc0,$acc0
2805 or $acc3,$acc2,$acc2
2807 or $acc2,$acc0,$acc0
2809 movrnz $a0,-1,$a0 ! !in1infty
2810 stx $a0,[%fp+STACK_BIAS-16]
2812 ld [$ap+64],$a0 ! in1_z
2826 stx $a0,[%sp+LOCALS64+$in1_z]
2828 stx $a1,[%sp+LOCALS64+$in1_z+8]
2830 stx $a2,[%sp+LOCALS64+$in1_z+16]
2831 stx $a3,[%sp+LOCALS64+$in1_z+24]
2833 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2834 add %sp,LOCALS64+$Z1sqr,$rp
2836 ldx [%sp+LOCALS64+$in2_x],$bi
2841 add %sp,LOCALS64+$in2_x,$bp
2842 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, Z1sqr, in2_x);
2843 add %sp,LOCALS64+$U2,$rp
2845 ldx [%sp+LOCALS64+$Z1sqr],$bi ! forward load
2846 ldx [%sp+LOCALS64+$in1_z],$a0
2847 ldx [%sp+LOCALS64+$in1_z+8],$a1
2848 ldx [%sp+LOCALS64+$in1_z+16],$a2
2849 ldx [%sp+LOCALS64+$in1_z+24],$a3
2851 add %sp,LOCALS64+$in1_x,$bp
2852 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, in1_x);
2853 add %sp,LOCALS64+$H,$rp
2855 add %sp,LOCALS64+$Z1sqr,$bp
2856 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2857 add %sp,LOCALS64+$S2,$rp
2859 ldx [%sp+LOCALS64+$H],$bi
2860 ldx [%sp+LOCALS64+$in1_z],$a0
2861 ldx [%sp+LOCALS64+$in1_z+8],$a1
2862 ldx [%sp+LOCALS64+$in1_z+16],$a2
2863 ldx [%sp+LOCALS64+$in1_z+24],$a3
2864 add %sp,LOCALS64+$H,$bp
2865 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2866 add %sp,LOCALS64+$res_z,$rp
2868 ldx [%sp+LOCALS64+$S2],$bi
2869 ldx [%sp+LOCALS64+$in2_y],$a0
2870 ldx [%sp+LOCALS64+$in2_y+8],$a1
2871 ldx [%sp+LOCALS64+$in2_y+16],$a2
2872 ldx [%sp+LOCALS64+$in2_y+24],$a3
2873 add %sp,LOCALS64+$S2,$bp
2874 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2875 add %sp,LOCALS64+$S2,$rp
2877 ldx [%sp+LOCALS64+$H],$a0 ! forward load
2878 ldx [%sp+LOCALS64+$H+8],$a1
2879 ldx [%sp+LOCALS64+$H+16],$a2
2880 ldx [%sp+LOCALS64+$H+24],$a3
2882 add %sp,LOCALS64+$in1_y,$bp
2883 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, in1_y);
2884 add %sp,LOCALS64+$R,$rp
2886 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2887 add %sp,LOCALS64+$Hsqr,$rp
2889 ldx [%sp+LOCALS64+$R],$a0
2890 ldx [%sp+LOCALS64+$R+8],$a1
2891 ldx [%sp+LOCALS64+$R+16],$a2
2892 ldx [%sp+LOCALS64+$R+24],$a3
2893 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2894 add %sp,LOCALS64+$Rsqr,$rp
2896 ldx [%sp+LOCALS64+$H],$bi
2897 ldx [%sp+LOCALS64+$Hsqr],$a0
2898 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2899 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2900 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2901 add %sp,LOCALS64+$H,$bp
2902 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2903 add %sp,LOCALS64+$Hcub,$rp
2905 ldx [%sp+LOCALS64+$Hsqr],$bi
2906 ldx [%sp+LOCALS64+$in1_x],$a0
2907 ldx [%sp+LOCALS64+$in1_x+8],$a1
2908 ldx [%sp+LOCALS64+$in1_x+16],$a2
2909 ldx [%sp+LOCALS64+$in1_x+24],$a3
2910 add %sp,LOCALS64+$Hsqr,$bp
2911 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in1_x, Hsqr);
2912 add %sp,LOCALS64+$U2,$rp
2914 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2915 add %sp,LOCALS64+$Hsqr,$rp
2917 add %sp,LOCALS64+$Rsqr,$bp
2918 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2919 add %sp,LOCALS64+$res_x,$rp
2921 add %sp,LOCALS64+$Hcub,$bp
2922 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2923 add %sp,LOCALS64+$res_x,$rp
2925 ldx [%sp+LOCALS64+$Hcub],$bi ! forward load
2926 ldx [%sp+LOCALS64+$in1_y],$a0
2927 ldx [%sp+LOCALS64+$in1_y+8],$a1
2928 ldx [%sp+LOCALS64+$in1_y+16],$a2
2929 ldx [%sp+LOCALS64+$in1_y+24],$a3
2931 add %sp,LOCALS64+$U2,$bp
2932 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2933 add %sp,LOCALS64+$res_y,$rp
2935 add %sp,LOCALS64+$Hcub,$bp
2936 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, in1_y, Hcub);
2937 add %sp,LOCALS64+$S2,$rp
2939 ldx [%sp+LOCALS64+$R],$bi
2940 ldx [%sp+LOCALS64+$res_y],$a0
2941 ldx [%sp+LOCALS64+$res_y+8],$a1
2942 ldx [%sp+LOCALS64+$res_y+16],$a2
2943 ldx [%sp+LOCALS64+$res_y+24],$a3
2944 add %sp,LOCALS64+$R,$bp
2945 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2946 add %sp,LOCALS64+$res_y,$rp
2948 add %sp,LOCALS64+$S2,$bp
2949 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2950 add %sp,LOCALS64+$res_y,$rp
2952 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2953 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2955 add %o7,.Lone_mont_vis3-1b,$bp
2957 for($i=0;$i<64;$i+=16) { # conditional moves
2959 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2960 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2961 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2962 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2963 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2964 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2965 movrz $t1,$acc2,$acc0
2966 movrz $t1,$acc3,$acc1
2967 movrz $t2,$acc4,$acc0
2968 movrz $t2,$acc5,$acc1
2971 st $acc0,[$rp_real+$i]
2972 st $acc2,[$rp_real+$i+4]
2973 st $acc1,[$rp_real+$i+8]
2974 st $acc3,[$rp_real+$i+12]
2977 for(;$i<96;$i+=16) {
2979 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2980 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2981 ldx [$bp+$i-64],$acc2 ! "in2"
2982 ldx [$bp+$i-64+8],$acc3
2983 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2984 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2985 movrz $t1,$acc2,$acc0
2986 movrz $t1,$acc3,$acc1
2987 movrz $t2,$acc4,$acc0
2988 movrz $t2,$acc5,$acc1
2991 st $acc0,[$rp_real+$i]
2992 st $acc2,[$rp_real+$i+4]
2993 st $acc1,[$rp_real+$i+8]
2994 st $acc3,[$rp_real+$i+12]
3000 .size ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3003 .long 0x00000000,0x00000001, 0xffffffff,0x00000000
3004 .long 0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3009 # Purpose of these subroutines is to explicitly encode VIS instructions,
3010 # so that one can compile the module without having to specify VIS
3011 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3012 # Idea is to reserve for option to produce "universal" binary and let
3013 # programmer detect if current CPU is VIS capable at run-time.
3015 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3016 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3018 my %visopf = ( "addxc" => 0x011,
3020 "umulxhi" => 0x016 );
3022 $ref = "$mnemonic\t$rs1,$rs2,$rd";
3024 if ($opf=$visopf{$mnemonic}) {
3025 foreach ($rs1,$rs2,$rd) {
3026 return $ref if (!/%([goli])([0-9])/);
3030 return sprintf ".word\t0x%08x !%s",
3031 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3038 foreach (split("\n",$code)) {
3039 s/\`([^\`]*)\`/eval $1/ge;
3041 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3042 &unvis3($1,$2,$3,$4)