2 # Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 # ====================================================================
11 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12 # project. The module is, however, dual licensed under OpenSSL and
13 # CRYPTOGAMS licenses depending on where you obtain it. For further
14 # details see http://www.openssl.org/~appro/cryptogams/.
15 # ====================================================================
17 # ECP_NISTZ256 module for SPARCv9.
21 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
22 # http://eprint.iacr.org/2013/816. In the process of adaptation
23 # original .c module was made 32-bit savvy in order to make this
24 # implementation possible.
26 # with/without -DECP_NISTZ256_ASM
27 # UltraSPARC III +12-18%
28 # SPARC T4 +99-550% (+66-150% on 32-bit Solaris)
30 # Ranges denote minimum and maximum improvement coefficients depending
31 # on benchmark. Lower coefficients are for ECDSA sign, server-side
32 # operation. Keep in mind that +200% means 3x improvement.
35 open STDOUT,">$output";
38 #include "sparc_arch.h"
40 #define LOCALS (STACK_BIAS+STACK_FRAME)
42 .register %g2,#scratch
43 .register %g3,#scratch
44 # define STACK64_FRAME STACK_FRAME
45 # define LOCALS64 LOCALS
47 # define STACK64_FRAME (2047+192)
48 # define LOCALS64 STACK64_FRAME
51 .section ".text",#alloc,#execinstr
53 ########################################################################
54 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
56 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
57 open TABLE,"<ecp_nistz256_table.c" or
58 open TABLE,"<${dir}../ecp_nistz256_table.c" or
59 die "failed to open ecp_nistz256_table.c:",$!;
64 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
68 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
69 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
71 die "insane number of elements" if ($#arr != 64*16*37-1);
74 .globl ecp_nistz256_precomputed
76 ecp_nistz256_precomputed:
78 ########################################################################
79 # this conversion smashes P256_POINT_AFFINE by individual bytes with
80 # 64 byte interval, similar to
84 @tbl = splice(@arr,0,64*16);
85 for($i=0;$i<64;$i++) {
87 for($j=0;$j<64;$j++) {
88 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
91 $code.=join(',',map { sprintf "0x%02x",$_} @line);
97 my ($rp,$ap,$bp)=map("%i$_",(0..2));
98 my @acc=map("%l$_",(0..7));
99 my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7)=(map("%o$_",(0..5)),"%g4","%g5");
100 my ($bi,$a0,$mask,$carry)=(map("%i$_",(3..5)),"%g1");
101 my ($rp_real,$ap_real)=("%g2","%g3");
104 .type ecp_nistz256_precomputed,#object
105 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
107 .LRR: ! 2^512 mod P precomputed for NIST P256 polynomial
108 .long 0x00000003, 0x00000000, 0xffffffff, 0xfffffffb
109 .long 0xfffffffe, 0xffffffff, 0xfffffffd, 0x00000004
111 .long 1,0,0,0,0,0,0,0
112 .asciz "ECP_NISTZ256 for SPARCv9, CRYPTOGAMS by <appro\@openssl.org>"
114 ! void ecp_nistz256_to_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
115 .globl ecp_nistz256_to_mont
117 ecp_nistz256_to_mont:
118 save %sp,-STACK_FRAME,%sp
122 call __ecp_nistz256_mul_mont
126 .type ecp_nistz256_to_mont,#function
127 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
129 ! void ecp_nistz256_from_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
130 .globl ecp_nistz256_from_mont
132 ecp_nistz256_from_mont:
133 save %sp,-STACK_FRAME,%sp
137 call __ecp_nistz256_mul_mont
141 .type ecp_nistz256_from_mont,#function
142 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
144 ! void ecp_nistz256_mul_mont(BN_ULONG %i0[8],const BN_ULONG %i1[8],
145 ! const BN_ULONG %i2[8]);
146 .globl ecp_nistz256_mul_mont
148 ecp_nistz256_mul_mont:
149 save %sp,-STACK_FRAME,%sp
151 call __ecp_nistz256_mul_mont
155 .type ecp_nistz256_mul_mont,#function
156 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
158 ! void ecp_nistz256_sqr_mont(BN_ULONG %i0[8],const BN_ULONG %i2[8]);
159 .globl ecp_nistz256_sqr_mont
161 ecp_nistz256_sqr_mont:
162 save %sp,-STACK_FRAME,%sp
164 call __ecp_nistz256_mul_mont
168 .type ecp_nistz256_sqr_mont,#function
169 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
172 ########################################################################
173 # Special thing to keep in mind is that $t0-$t7 hold 64-bit values,
174 # while all others are meant to keep 32. "Meant to" means that additions
175 # to @acc[0-7] do "contaminate" upper bits, but they are cleared before
176 # they can affect outcome (follow 'and' with $mask). Also keep in mind
177 # that addition with carry is addition with 32-bit carry, even though
178 # CPU is 64-bit. [Addition with 64-bit carry was introduced in T3, see
179 # below for VIS3 code paths.]
183 __ecp_nistz256_mul_mont:
184 ld [$bp+0],$bi ! b[0]
187 srl $mask,0,$mask ! 0xffffffff
195 mulx $a0,$bi,$t0 ! a[0-7]*b[0], 64-bit results
203 srlx $t0,32,@acc[1] ! extract high parts
210 srlx $t7,32,@acc[0] ! "@acc[8]"
213 for($i=1;$i<8;$i++) {
215 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
216 ld [$bp+4*$i],$bi ! b[$i]
217 ld [$ap+4],$t1 ! re-load a[1-7]
218 addccc @acc[2],$t2,@acc[2]
219 addccc @acc[3],$t3,@acc[3]
222 addccc @acc[4],$t4,@acc[4]
223 addccc @acc[5],$t5,@acc[5]
226 addccc @acc[6],$t6,@acc[6]
227 addccc @acc[7],$t7,@acc[7]
230 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
233 # Reduction iteration is normally performed by accumulating
234 # result of multiplication of modulus by "magic" digit [and
235 # omitting least significant word, which is guaranteed to
236 # be 0], but thanks to special form of modulus and "magic"
237 # digit being equal to least significant word, it can be
238 # performed with additions and subtractions alone. Indeed:
240 # ffff.0001.0000.0000.0000.ffff.ffff.ffff
242 # + xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
244 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
247 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.abcd
248 # + abcd.0000.abcd.0000.0000.abcd.0000.0000.0000
249 # - abcd.0000.0000.0000.0000.0000.0000.abcd
251 # or marking redundant operations:
253 # xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.xxxx.----
254 # + abcd.0000.abcd.0000.0000.abcd.----.----.----
255 # - abcd.----.----.----.----.----.----.----
258 ! multiplication-less reduction
259 addcc @acc[3],$t0,@acc[3] ! r[3]+=r[0]
260 addccc @acc[4],%g0,@acc[4] ! r[4]+=0
261 and @acc[1],$mask,@acc[1]
262 and @acc[2],$mask,@acc[2]
263 addccc @acc[5],%g0,@acc[5] ! r[5]+=0
264 addccc @acc[6],$t0,@acc[6] ! r[6]+=r[0]
265 and @acc[3],$mask,@acc[3]
266 and @acc[4],$mask,@acc[4]
267 addccc @acc[7],%g0,@acc[7] ! r[7]+=0
268 addccc @acc[0],$t0,@acc[0] ! r[8]+=r[0] "@acc[8]"
269 and @acc[5],$mask,@acc[5]
270 and @acc[6],$mask,@acc[6]
271 addc $carry,%g0,$carry ! top-most carry
272 subcc @acc[7],$t0,@acc[7] ! r[7]-=r[0]
273 subccc @acc[0],%g0,@acc[0] ! r[8]-=0 "@acc[8]"
274 subc $carry,%g0,$carry ! top-most carry
275 and @acc[7],$mask,@acc[7]
276 and @acc[0],$mask,@acc[0] ! "@acc[8]"
278 push(@acc,shift(@acc)); # rotate registers to "omit" acc[0]
280 mulx $a0,$bi,$t0 ! a[0-7]*b[$i], 64-bit results
288 add @acc[0],$t0,$t0 ! accumulate low parts, can't overflow
290 srlx $t0,32,@acc[1] ! extract high parts
303 srlx $t7,32,@acc[0] ! "@acc[8]"
307 addcc @acc[1],$t1,@acc[1] ! accumulate high parts
308 addccc @acc[2],$t2,@acc[2]
309 addccc @acc[3],$t3,@acc[3]
310 addccc @acc[4],$t4,@acc[4]
311 addccc @acc[5],$t5,@acc[5]
312 addccc @acc[6],$t6,@acc[6]
313 addccc @acc[7],$t7,@acc[7]
314 addccc @acc[0],$carry,@acc[0] ! "@acc[8]"
317 addcc @acc[3],$t0,@acc[3] ! multiplication-less reduction
318 addccc @acc[4],%g0,@acc[4]
319 addccc @acc[5],%g0,@acc[5]
320 addccc @acc[6],$t0,@acc[6]
321 addccc @acc[7],%g0,@acc[7]
322 addccc @acc[0],$t0,@acc[0] ! "@acc[8]"
323 addc $carry,%g0,$carry
324 subcc @acc[7],$t0,@acc[7]
325 subccc @acc[0],%g0,@acc[0] ! "@acc[8]"
326 subc $carry,%g0,$carry ! top-most carry
328 push(@acc,shift(@acc)); # rotate registers to omit acc[0]
330 ! Final step is "if result > mod, subtract mod", but we do it
331 ! "other way around", namely subtract modulus from result
332 ! and if it borrowed, add modulus back.
334 subcc @acc[0],-1,@acc[0] ! subtract modulus
335 subccc @acc[1],-1,@acc[1]
336 subccc @acc[2],-1,@acc[2]
337 subccc @acc[3],0,@acc[3]
338 subccc @acc[4],0,@acc[4]
339 subccc @acc[5],0,@acc[5]
340 subccc @acc[6],1,@acc[6]
341 subccc @acc[7],-1,@acc[7]
342 subc $carry,0,$carry ! broadcast borrow bit
344 ! Note that because mod has special form, i.e. consists of
345 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
346 ! using value of broadcasted borrow and the borrow bit itself.
347 ! To minimize dependency chain we first broadcast and then
348 ! extract the bit by negating (follow $bi).
350 addcc @acc[0],$carry,@acc[0] ! add modulus or zero
351 addccc @acc[1],$carry,@acc[1]
354 addccc @acc[2],$carry,@acc[2]
356 addccc @acc[3],0,@acc[3]
358 addccc @acc[4],0,@acc[4]
360 addccc @acc[5],0,@acc[5]
362 addccc @acc[6],$bi,@acc[6]
364 addc @acc[7],$carry,@acc[7]
368 .type __ecp_nistz256_mul_mont,#function
369 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
371 ! void ecp_nistz256_add(BN_ULONG %i0[8],const BN_ULONG %i1[8],
372 ! const BN_ULONG %i2[8]);
373 .globl ecp_nistz256_add
376 save %sp,-STACK_FRAME,%sp
384 call __ecp_nistz256_add
388 .type ecp_nistz256_add,#function
389 .size ecp_nistz256_add,.-ecp_nistz256_add
393 ld [$bp+0],$t0 ! b[0]
397 addcc @acc[0],$t0,@acc[0]
400 addccc @acc[1],$t1,@acc[1]
403 addccc @acc[2],$t2,@acc[2]
404 addccc @acc[3],$t3,@acc[3]
405 addccc @acc[4],$t4,@acc[4]
406 addccc @acc[5],$t5,@acc[5]
407 addccc @acc[6],$t6,@acc[6]
408 addccc @acc[7],$t7,@acc[7]
413 ! if a+b >= modulus, subtract modulus.
415 ! But since comparison implies subtraction, we subtract
416 ! modulus and then add it back if subraction borrowed.
418 subcc @acc[0],-1,@acc[0]
419 subccc @acc[1],-1,@acc[1]
420 subccc @acc[2],-1,@acc[2]
421 subccc @acc[3], 0,@acc[3]
422 subccc @acc[4], 0,@acc[4]
423 subccc @acc[5], 0,@acc[5]
424 subccc @acc[6], 1,@acc[6]
425 subccc @acc[7],-1,@acc[7]
428 ! Note that because mod has special form, i.e. consists of
429 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
430 ! using value of borrow and its negative.
432 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
433 addccc @acc[1],$carry,@acc[1]
436 addccc @acc[2],$carry,@acc[2]
438 addccc @acc[3],0,@acc[3]
440 addccc @acc[4],0,@acc[4]
442 addccc @acc[5],0,@acc[5]
444 addccc @acc[6],$bi,@acc[6]
446 addc @acc[7],$carry,@acc[7]
450 .type __ecp_nistz256_add,#function
451 .size __ecp_nistz256_add,.-__ecp_nistz256_add
453 ! void ecp_nistz256_mul_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
454 .globl ecp_nistz256_mul_by_2
456 ecp_nistz256_mul_by_2:
457 save %sp,-STACK_FRAME,%sp
465 call __ecp_nistz256_mul_by_2
469 .type ecp_nistz256_mul_by_2,#function
470 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
473 __ecp_nistz256_mul_by_2:
474 addcc @acc[0],@acc[0],@acc[0] ! a+a=2*a
475 addccc @acc[1],@acc[1],@acc[1]
476 addccc @acc[2],@acc[2],@acc[2]
477 addccc @acc[3],@acc[3],@acc[3]
478 addccc @acc[4],@acc[4],@acc[4]
479 addccc @acc[5],@acc[5],@acc[5]
480 addccc @acc[6],@acc[6],@acc[6]
481 addccc @acc[7],@acc[7],@acc[7]
484 .type __ecp_nistz256_mul_by_2,#function
485 .size __ecp_nistz256_mul_by_2,.-__ecp_nistz256_mul_by_2
487 ! void ecp_nistz256_mul_by_3(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
488 .globl ecp_nistz256_mul_by_3
490 ecp_nistz256_mul_by_3:
491 save %sp,-STACK_FRAME,%sp
499 call __ecp_nistz256_mul_by_3
503 .type ecp_nistz256_mul_by_3,#function
504 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
507 __ecp_nistz256_mul_by_3:
508 addcc @acc[0],@acc[0],$t0 ! a+a=2*a
509 addccc @acc[1],@acc[1],$t1
510 addccc @acc[2],@acc[2],$t2
511 addccc @acc[3],@acc[3],$t3
512 addccc @acc[4],@acc[4],$t4
513 addccc @acc[5],@acc[5],$t5
514 addccc @acc[6],@acc[6],$t6
515 addccc @acc[7],@acc[7],$t7
518 subcc $t0,-1,$t0 ! .Lreduce_by_sub but without stores
528 addcc $t0,$carry,$t0 ! add synthesized modulus
529 addccc $t1,$carry,$t1
531 addccc $t2,$carry,$t2
538 addcc $t0,@acc[0],@acc[0] ! 2*a+a=3*a
539 addccc $t1,@acc[1],@acc[1]
540 addccc $t2,@acc[2],@acc[2]
541 addccc $t3,@acc[3],@acc[3]
542 addccc $t4,@acc[4],@acc[4]
543 addccc $t5,@acc[5],@acc[5]
544 addccc $t6,@acc[6],@acc[6]
545 addccc $t7,@acc[7],@acc[7]
548 .type __ecp_nistz256_mul_by_3,#function
549 .size __ecp_nistz256_mul_by_3,.-__ecp_nistz256_mul_by_3
551 ! void ecp_nistz256_sub(BN_ULONG %i0[8],const BN_ULONG %i1[8],
552 ! const BN_ULONG %i2[8]);
553 .globl ecp_nistz256_sub
556 save %sp,-STACK_FRAME,%sp
564 call __ecp_nistz256_sub_from
568 .type ecp_nistz256_sub,#function
569 .size ecp_nistz256_sub,.-ecp_nistz256_sub
571 ! void ecp_nistz256_neg(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
572 .globl ecp_nistz256_neg
575 save %sp,-STACK_FRAME,%sp
584 call __ecp_nistz256_sub_from
588 .type ecp_nistz256_neg,#function
589 .size ecp_nistz256_neg,.-ecp_nistz256_neg
592 __ecp_nistz256_sub_from:
593 ld [$bp+0],$t0 ! b[0]
597 subcc @acc[0],$t0,@acc[0]
600 subccc @acc[1],$t1,@acc[1]
601 subccc @acc[2],$t2,@acc[2]
604 subccc @acc[3],$t3,@acc[3]
605 subccc @acc[4],$t4,@acc[4]
606 subccc @acc[5],$t5,@acc[5]
607 subccc @acc[6],$t6,@acc[6]
608 subccc @acc[7],$t7,@acc[7]
609 subc %g0,%g0,$carry ! broadcast borrow bit
613 ! if a-b borrows, add modulus.
615 ! Note that because mod has special form, i.e. consists of
616 ! 0xffffffff, 1 and 0s, we can conditionally synthesize it by
617 ! using value of broadcasted borrow and the borrow bit itself.
618 ! To minimize dependency chain we first broadcast and then
619 ! extract the bit by negating (follow $bi).
621 addcc @acc[0],$carry,@acc[0] ! add synthesized modulus
622 addccc @acc[1],$carry,@acc[1]
625 addccc @acc[2],$carry,@acc[2]
627 addccc @acc[3],0,@acc[3]
629 addccc @acc[4],0,@acc[4]
631 addccc @acc[5],0,@acc[5]
633 addccc @acc[6],$bi,@acc[6]
635 addc @acc[7],$carry,@acc[7]
639 .type __ecp_nistz256_sub_from,#function
640 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
643 __ecp_nistz256_sub_morf:
644 ld [$bp+0],$t0 ! b[0]
648 subcc $t0,@acc[0],@acc[0]
651 subccc $t1,@acc[1],@acc[1]
652 subccc $t2,@acc[2],@acc[2]
655 subccc $t3,@acc[3],@acc[3]
656 subccc $t4,@acc[4],@acc[4]
657 subccc $t5,@acc[5],@acc[5]
658 subccc $t6,@acc[6],@acc[6]
659 subccc $t7,@acc[7],@acc[7]
661 subc %g0,%g0,$carry ! broadcast borrow bit
662 .type __ecp_nistz256_sub_morf,#function
663 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
665 ! void ecp_nistz256_div_by_2(BN_ULONG %i0[8],const BN_ULONG %i1[8]);
666 .globl ecp_nistz256_div_by_2
668 ecp_nistz256_div_by_2:
669 save %sp,-STACK_FRAME,%sp
677 call __ecp_nistz256_div_by_2
681 .type ecp_nistz256_div_by_2,#function
682 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
685 __ecp_nistz256_div_by_2:
686 ! ret = (a is odd ? a+mod : a) >> 1
690 addcc @acc[0],$carry,@acc[0]
691 addccc @acc[1],$carry,@acc[1]
692 addccc @acc[2],$carry,@acc[2]
693 addccc @acc[3],0,@acc[3]
694 addccc @acc[4],0,@acc[4]
695 addccc @acc[5],0,@acc[5]
696 addccc @acc[6],$bi,@acc[6]
697 addccc @acc[7],$carry,@acc[7]
702 srl @acc[0],1,@acc[0]
704 srl @acc[1],1,@acc[1]
705 or @acc[0],$t0,@acc[0]
707 srl @acc[2],1,@acc[2]
708 or @acc[1],$t1,@acc[1]
711 srl @acc[3],1,@acc[3]
712 or @acc[2],$t2,@acc[2]
715 srl @acc[4],1,@acc[4]
716 or @acc[3],$t3,@acc[3]
719 srl @acc[5],1,@acc[5]
720 or @acc[4],$t4,@acc[4]
723 srl @acc[6],1,@acc[6]
724 or @acc[5],$t5,@acc[5]
727 srl @acc[7],1,@acc[7]
728 or @acc[6],$t6,@acc[6]
731 or @acc[7],$t7,@acc[7]
735 .type __ecp_nistz256_div_by_2,#function
736 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
739 ########################################################################
740 # following subroutines are "literal" implementation of those found in
743 ########################################################################
744 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
747 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
748 # above map() describes stack layout with 4 temporary
749 # 256-bit vectors on top.
756 .globl ecp_nistz256_point_double
758 ecp_nistz256_point_double:
759 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
760 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
761 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
762 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
763 be ecp_nistz256_point_double_vis3
766 save %sp,-STACK_FRAME-32*4,%sp
771 .Lpoint_double_shortcut:
773 ld [$ap+32+4],@acc[1]
774 ld [$ap+32+8],@acc[2]
775 ld [$ap+32+12],@acc[3]
776 ld [$ap+32+16],@acc[4]
777 ld [$ap+32+20],@acc[5]
778 ld [$ap+32+24],@acc[6]
779 ld [$ap+32+28],@acc[7]
780 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(S, in_y);
781 add %sp,LOCALS+$S,$rp
785 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Zsqr, in_z);
786 add %sp,LOCALS+$Zsqr,$rp
789 call __ecp_nistz256_add ! p256_add(M, Zsqr, in_x);
790 add %sp,LOCALS+$M,$rp
792 add %sp,LOCALS+$S,$bp
793 add %sp,LOCALS+$S,$ap
794 call __ecp_nistz256_mul_mont ! p256_sqr_mont(S, S);
795 add %sp,LOCALS+$S,$rp
797 ld [$ap_real],@acc[0]
798 add %sp,LOCALS+$Zsqr,$bp
799 ld [$ap_real+4],@acc[1]
800 ld [$ap_real+8],@acc[2]
801 ld [$ap_real+12],@acc[3]
802 ld [$ap_real+16],@acc[4]
803 ld [$ap_real+20],@acc[5]
804 ld [$ap_real+24],@acc[6]
805 ld [$ap_real+28],@acc[7]
806 call __ecp_nistz256_sub_from ! p256_sub(Zsqr, in_x, Zsqr);
807 add %sp,LOCALS+$Zsqr,$rp
811 call __ecp_nistz256_mul_mont ! p256_mul_mont(tmp0, in_z, in_y);
812 add %sp,LOCALS+$tmp0,$rp
814 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(res_z, tmp0);
817 add %sp,LOCALS+$Zsqr,$bp
818 add %sp,LOCALS+$M,$ap
819 call __ecp_nistz256_mul_mont ! p256_mul_mont(M, M, Zsqr);
820 add %sp,LOCALS+$M,$rp
822 call __ecp_nistz256_mul_by_3 ! p256_mul_by_3(M, M);
823 add %sp,LOCALS+$M,$rp
825 add %sp,LOCALS+$S,$bp
826 add %sp,LOCALS+$S,$ap
827 call __ecp_nistz256_mul_mont ! p256_sqr_mont(tmp0, S);
828 add %sp,LOCALS+$tmp0,$rp
830 call __ecp_nistz256_div_by_2 ! p256_div_by_2(res_y, tmp0);
834 add %sp,LOCALS+$S,$ap
835 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, in_x);
836 add %sp,LOCALS+$S,$rp
838 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(tmp0, S);
839 add %sp,LOCALS+$tmp0,$rp
841 add %sp,LOCALS+$M,$bp
842 add %sp,LOCALS+$M,$ap
843 call __ecp_nistz256_mul_mont ! p256_sqr_mont(res_x, M);
846 add %sp,LOCALS+$tmp0,$bp
847 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, tmp0);
850 add %sp,LOCALS+$S,$bp
851 call __ecp_nistz256_sub_morf ! p256_sub(S, S, res_x);
852 add %sp,LOCALS+$S,$rp
854 add %sp,LOCALS+$M,$bp
855 add %sp,LOCALS+$S,$ap
856 call __ecp_nistz256_mul_mont ! p256_mul_mont(S, S, M);
857 add %sp,LOCALS+$S,$rp
860 call __ecp_nistz256_sub_from ! p256_sub(res_y, S, res_y);
865 .type ecp_nistz256_point_double,#function
866 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
870 ########################################################################
871 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
872 # const P256_POINT *in2);
874 my ($res_x,$res_y,$res_z,
875 $H,$Hsqr,$R,$Rsqr,$Hcub,
876 $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
877 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
879 # above map() describes stack layout with 12 temporary
880 # 256-bit vectors on top. Then we reserve some space for
881 # !in1infty, !in2infty, result of check for zero and return pointer.
883 my $bp_real=$rp_real;
886 .globl ecp_nistz256_point_add
888 ecp_nistz256_point_add:
889 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
890 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
891 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
892 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
893 be ecp_nistz256_point_add_vis3
896 save %sp,-STACK_FRAME-32*12-32,%sp
898 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
902 ld [$bp],@acc[0] ! in2_x
910 ld [$bp+32],$t0 ! in2_y
918 or @acc[1],@acc[0],@acc[0]
919 or @acc[3],@acc[2],@acc[2]
920 or @acc[5],@acc[4],@acc[4]
921 or @acc[7],@acc[6],@acc[6]
922 or @acc[2],@acc[0],@acc[0]
923 or @acc[6],@acc[4],@acc[4]
924 or @acc[4],@acc[0],@acc[0]
932 or @acc[0],$t0,$t0 ! !in2infty
934 st $t0,[%fp+STACK_BIAS-12]
936 ld [$ap],@acc[0] ! in1_x
944 ld [$ap+32],$t0 ! in1_y
952 or @acc[1],@acc[0],@acc[0]
953 or @acc[3],@acc[2],@acc[2]
954 or @acc[5],@acc[4],@acc[4]
955 or @acc[7],@acc[6],@acc[6]
956 or @acc[2],@acc[0],@acc[0]
957 or @acc[6],@acc[4],@acc[4]
958 or @acc[4],@acc[0],@acc[0]
966 or @acc[0],$t0,$t0 ! !in1infty
968 st $t0,[%fp+STACK_BIAS-16]
972 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z2sqr, in2_z);
973 add %sp,LOCALS+$Z2sqr,$rp
977 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
978 add %sp,LOCALS+$Z1sqr,$rp
981 add %sp,LOCALS+$Z2sqr,$ap
982 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, Z2sqr, in2_z);
983 add %sp,LOCALS+$S1,$rp
986 add %sp,LOCALS+$Z1sqr,$ap
987 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
988 add %sp,LOCALS+$S2,$rp
991 add %sp,LOCALS+$S1,$ap
992 call __ecp_nistz256_mul_mont ! p256_mul_mont(S1, S1, in1_y);
993 add %sp,LOCALS+$S1,$rp
996 add %sp,LOCALS+$S2,$ap
997 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
998 add %sp,LOCALS+$S2,$rp
1000 add %sp,LOCALS+$S1,$bp
1001 call __ecp_nistz256_sub_from ! p256_sub(R, S2, S1);
1002 add %sp,LOCALS+$R,$rp
1004 or @acc[1],@acc[0],@acc[0] ! see if result is zero
1005 or @acc[3],@acc[2],@acc[2]
1006 or @acc[5],@acc[4],@acc[4]
1007 or @acc[7],@acc[6],@acc[6]
1008 or @acc[2],@acc[0],@acc[0]
1009 or @acc[6],@acc[4],@acc[4]
1010 or @acc[4],@acc[0],@acc[0]
1011 st @acc[0],[%fp+STACK_BIAS-20]
1014 add %sp,LOCALS+$Z2sqr,$ap
1015 call __ecp_nistz256_mul_mont ! p256_mul_mont(U1, in1_x, Z2sqr);
1016 add %sp,LOCALS+$U1,$rp
1019 add %sp,LOCALS+$Z1sqr,$ap
1020 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in2_x, Z1sqr);
1021 add %sp,LOCALS+$U2,$rp
1023 add %sp,LOCALS+$U1,$bp
1024 call __ecp_nistz256_sub_from ! p256_sub(H, U2, U1);
1025 add %sp,LOCALS+$H,$rp
1027 or @acc[1],@acc[0],@acc[0] ! see if result is zero
1028 or @acc[3],@acc[2],@acc[2]
1029 or @acc[5],@acc[4],@acc[4]
1030 or @acc[7],@acc[6],@acc[6]
1031 or @acc[2],@acc[0],@acc[0]
1032 or @acc[6],@acc[4],@acc[4]
1033 orcc @acc[4],@acc[0],@acc[0]
1035 bne,pt %icc,.Ladd_proceed ! is_equal(U1,U2)?
1038 ld [%fp+STACK_BIAS-12],$t0
1039 ld [%fp+STACK_BIAS-16],$t1
1040 ld [%fp+STACK_BIAS-20],$t2
1042 be,pt %icc,.Ladd_proceed ! (in1infty || in2infty)?
1045 be,pt %icc,.Ladd_double ! is_equal(S1,S2)?
1048 ldx [%fp+STACK_BIAS-8],$rp
1078 ldx [%fp+STACK_BIAS-8],$rp_real
1080 b .Lpoint_double_shortcut
1081 add %sp,32*(12-4)+32,%sp ! difference in frame sizes
1085 add %sp,LOCALS+$R,$bp
1086 add %sp,LOCALS+$R,$ap
1087 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1088 add %sp,LOCALS+$Rsqr,$rp
1091 add %sp,LOCALS+$H,$ap
1092 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1093 add %sp,LOCALS+$res_z,$rp
1095 add %sp,LOCALS+$H,$bp
1096 add %sp,LOCALS+$H,$ap
1097 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1098 add %sp,LOCALS+$Hsqr,$rp
1101 add %sp,LOCALS+$res_z,$ap
1102 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, res_z, in2_z);
1103 add %sp,LOCALS+$res_z,$rp
1105 add %sp,LOCALS+$H,$bp
1106 add %sp,LOCALS+$Hsqr,$ap
1107 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1108 add %sp,LOCALS+$Hcub,$rp
1110 add %sp,LOCALS+$U1,$bp
1111 add %sp,LOCALS+$Hsqr,$ap
1112 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, U1, Hsqr);
1113 add %sp,LOCALS+$U2,$rp
1115 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1116 add %sp,LOCALS+$Hsqr,$rp
1118 add %sp,LOCALS+$Rsqr,$bp
1119 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1120 add %sp,LOCALS+$res_x,$rp
1122 add %sp,LOCALS+$Hcub,$bp
1123 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1124 add %sp,LOCALS+$res_x,$rp
1126 add %sp,LOCALS+$U2,$bp
1127 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1128 add %sp,LOCALS+$res_y,$rp
1130 add %sp,LOCALS+$Hcub,$bp
1131 add %sp,LOCALS+$S1,$ap
1132 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S1, Hcub);
1133 add %sp,LOCALS+$S2,$rp
1135 add %sp,LOCALS+$R,$bp
1136 add %sp,LOCALS+$res_y,$ap
1137 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1138 add %sp,LOCALS+$res_y,$rp
1140 add %sp,LOCALS+$S2,$bp
1141 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1142 add %sp,LOCALS+$res_y,$rp
1144 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1145 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1146 ldx [%fp+STACK_BIAS-8],$rp
1148 for($i=0;$i<96;$i+=8) { # conditional moves
1150 ld [%sp+LOCALS+$i],@acc[0] ! res
1151 ld [%sp+LOCALS+$i+4],@acc[1]
1152 ld [$bp_real+$i],@acc[2] ! in2
1153 ld [$bp_real+$i+4],@acc[3]
1154 ld [$ap_real+$i],@acc[4] ! in1
1155 ld [$ap_real+$i+4],@acc[5]
1156 movrz $t1,@acc[2],@acc[0]
1157 movrz $t1,@acc[3],@acc[1]
1158 movrz $t2,@acc[4],@acc[0]
1159 movrz $t2,@acc[5],@acc[1]
1161 st @acc[1],[$rp+$i+4]
1168 .type ecp_nistz256_point_add,#function
1169 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1173 ########################################################################
1174 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1175 # const P256_POINT_AFFINE *in2);
1177 my ($res_x,$res_y,$res_z,
1178 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1180 # above map() describes stack layout with 10 temporary
1181 # 256-bit vectors on top. Then we reserve some space for
1182 # !in1infty, !in2infty, result of check for zero and return pointer.
1184 my @ONE_mont=(1,0,0,-1,-1,-1,-2,0);
1185 my $bp_real=$rp_real;
1188 .globl ecp_nistz256_point_add_affine
1190 ecp_nistz256_point_add_affine:
1191 SPARC_LOAD_ADDRESS_LEAF(OPENSSL_sparcv9cap_P,%g1,%g5)
1192 ld [%g1],%g1 ! OPENSSL_sparcv9cap_P[0]
1193 and %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK),%g1
1194 cmp %g1,(SPARCV9_VIS3|SPARCV9_64BIT_STACK)
1195 be ecp_nistz256_point_add_affine_vis3
1198 save %sp,-STACK_FRAME-32*10-32,%sp
1200 stx $rp,[%fp+STACK_BIAS-8] ! off-load $rp
1204 ld [$ap],@acc[0] ! in1_x
1212 ld [$ap+32],$t0 ! in1_y
1220 or @acc[1],@acc[0],@acc[0]
1221 or @acc[3],@acc[2],@acc[2]
1222 or @acc[5],@acc[4],@acc[4]
1223 or @acc[7],@acc[6],@acc[6]
1224 or @acc[2],@acc[0],@acc[0]
1225 or @acc[6],@acc[4],@acc[4]
1226 or @acc[4],@acc[0],@acc[0]
1234 or @acc[0],$t0,$t0 ! !in1infty
1236 st $t0,[%fp+STACK_BIAS-16]
1238 ld [$bp],@acc[0] ! in2_x
1246 ld [$bp+32],$t0 ! in2_y
1254 or @acc[1],@acc[0],@acc[0]
1255 or @acc[3],@acc[2],@acc[2]
1256 or @acc[5],@acc[4],@acc[4]
1257 or @acc[7],@acc[6],@acc[6]
1258 or @acc[2],@acc[0],@acc[0]
1259 or @acc[6],@acc[4],@acc[4]
1260 or @acc[4],@acc[0],@acc[0]
1268 or @acc[0],$t0,$t0 ! !in2infty
1270 st $t0,[%fp+STACK_BIAS-12]
1274 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Z1sqr, in1_z);
1275 add %sp,LOCALS+$Z1sqr,$rp
1278 add %sp,LOCALS+$Z1sqr,$ap
1279 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, Z1sqr, in2_x);
1280 add %sp,LOCALS+$U2,$rp
1283 call __ecp_nistz256_sub_from ! p256_sub(H, U2, in1_x);
1284 add %sp,LOCALS+$H,$rp
1287 add %sp,LOCALS+$Z1sqr,$ap
1288 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, Z1sqr, in1_z);
1289 add %sp,LOCALS+$S2,$rp
1292 add %sp,LOCALS+$H,$ap
1293 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_z, H, in1_z);
1294 add %sp,LOCALS+$res_z,$rp
1297 add %sp,LOCALS+$S2,$ap
1298 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, S2, in2_y);
1299 add %sp,LOCALS+$S2,$rp
1302 call __ecp_nistz256_sub_from ! p256_sub(R, S2, in1_y);
1303 add %sp,LOCALS+$R,$rp
1305 add %sp,LOCALS+$H,$bp
1306 add %sp,LOCALS+$H,$ap
1307 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Hsqr, H);
1308 add %sp,LOCALS+$Hsqr,$rp
1310 add %sp,LOCALS+$R,$bp
1311 add %sp,LOCALS+$R,$ap
1312 call __ecp_nistz256_mul_mont ! p256_sqr_mont(Rsqr, R);
1313 add %sp,LOCALS+$Rsqr,$rp
1315 add %sp,LOCALS+$H,$bp
1316 add %sp,LOCALS+$Hsqr,$ap
1317 call __ecp_nistz256_mul_mont ! p256_mul_mont(Hcub, Hsqr, H);
1318 add %sp,LOCALS+$Hcub,$rp
1321 add %sp,LOCALS+$Hsqr,$ap
1322 call __ecp_nistz256_mul_mont ! p256_mul_mont(U2, in1_x, Hsqr);
1323 add %sp,LOCALS+$U2,$rp
1325 call __ecp_nistz256_mul_by_2 ! p256_mul_by_2(Hsqr, U2);
1326 add %sp,LOCALS+$Hsqr,$rp
1328 add %sp,LOCALS+$Rsqr,$bp
1329 call __ecp_nistz256_sub_morf ! p256_sub(res_x, Rsqr, Hsqr);
1330 add %sp,LOCALS+$res_x,$rp
1332 add %sp,LOCALS+$Hcub,$bp
1333 call __ecp_nistz256_sub_from ! p256_sub(res_x, res_x, Hcub);
1334 add %sp,LOCALS+$res_x,$rp
1336 add %sp,LOCALS+$U2,$bp
1337 call __ecp_nistz256_sub_morf ! p256_sub(res_y, U2, res_x);
1338 add %sp,LOCALS+$res_y,$rp
1341 add %sp,LOCALS+$Hcub,$ap
1342 call __ecp_nistz256_mul_mont ! p256_mul_mont(S2, in1_y, Hcub);
1343 add %sp,LOCALS+$S2,$rp
1345 add %sp,LOCALS+$R,$bp
1346 add %sp,LOCALS+$res_y,$ap
1347 call __ecp_nistz256_mul_mont ! p256_mul_mont(res_y, res_y, R);
1348 add %sp,LOCALS+$res_y,$rp
1350 add %sp,LOCALS+$S2,$bp
1351 call __ecp_nistz256_sub_from ! p256_sub(res_y, res_y, S2);
1352 add %sp,LOCALS+$res_y,$rp
1354 ld [%fp+STACK_BIAS-16],$t1 ! !in1infty
1355 ld [%fp+STACK_BIAS-12],$t2 ! !in2infty
1356 ldx [%fp+STACK_BIAS-8],$rp
1358 for($i=0;$i<64;$i+=8) { # conditional moves
1360 ld [%sp+LOCALS+$i],@acc[0] ! res
1361 ld [%sp+LOCALS+$i+4],@acc[1]
1362 ld [$bp_real+$i],@acc[2] ! in2
1363 ld [$bp_real+$i+4],@acc[3]
1364 ld [$ap_real+$i],@acc[4] ! in1
1365 ld [$ap_real+$i+4],@acc[5]
1366 movrz $t1,@acc[2],@acc[0]
1367 movrz $t1,@acc[3],@acc[1]
1368 movrz $t2,@acc[4],@acc[0]
1369 movrz $t2,@acc[5],@acc[1]
1371 st @acc[1],[$rp+$i+4]
1377 ld [%sp+LOCALS+$i],@acc[0] ! res
1378 ld [%sp+LOCALS+$i+4],@acc[1]
1379 ld [$ap_real+$i],@acc[4] ! in1
1380 ld [$ap_real+$i+4],@acc[5]
1381 movrz $t1,@ONE_mont[$j],@acc[0]
1382 movrz $t1,@ONE_mont[$j+1],@acc[1]
1383 movrz $t2,@acc[4],@acc[0]
1384 movrz $t2,@acc[5],@acc[1]
1386 st @acc[1],[$rp+$i+4]
1392 .type ecp_nistz256_point_add_affine,#function
1393 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1397 my ($out,$inp,$index)=map("%i$_",(0..2));
1401 ! void ecp_nistz256_scatter_w5(void *%i0,const P256_POINT *%i1,
1403 .globl ecp_nistz256_scatter_w5
1405 ecp_nistz256_scatter_w5:
1406 save %sp,-STACK_FRAME,%sp
1409 add $out,$index,$out
1420 st %l0,[$out+64*0-4]
1421 st %l1,[$out+64*1-4]
1422 st %l2,[$out+64*2-4]
1423 st %l3,[$out+64*3-4]
1424 st %l4,[$out+64*4-4]
1425 st %l5,[$out+64*5-4]
1426 st %l6,[$out+64*6-4]
1427 st %l7,[$out+64*7-4]
1439 st %l0,[$out+64*0-4]
1440 st %l1,[$out+64*1-4]
1441 st %l2,[$out+64*2-4]
1442 st %l3,[$out+64*3-4]
1443 st %l4,[$out+64*4-4]
1444 st %l5,[$out+64*5-4]
1445 st %l6,[$out+64*6-4]
1446 st %l7,[$out+64*7-4]
1457 st %l0,[$out+64*0-4]
1458 st %l1,[$out+64*1-4]
1459 st %l2,[$out+64*2-4]
1460 st %l3,[$out+64*3-4]
1461 st %l4,[$out+64*4-4]
1462 st %l5,[$out+64*5-4]
1463 st %l6,[$out+64*6-4]
1464 st %l7,[$out+64*7-4]
1468 .type ecp_nistz256_scatter_w5,#function
1469 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1471 ! void ecp_nistz256_gather_w5(P256_POINT *%i0,const void *%i1,
1473 .globl ecp_nistz256_gather_w5
1475 ecp_nistz256_gather_w5:
1476 save %sp,-STACK_FRAME,%sp
1481 add $index,$mask,$index
1483 add $inp,$index,$inp
1566 .type ecp_nistz256_gather_w5,#function
1567 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1569 ! void ecp_nistz256_scatter_w7(void *%i0,const P256_POINT_AFFINE *%i1,
1571 .globl ecp_nistz256_scatter_w7
1573 ecp_nistz256_scatter_w7:
1574 save %sp,-STACK_FRAME,%sp
1576 add $out,$index,$out
1581 subcc $index,1,$index
1582 stb %l0,[$out+64*0-1]
1584 stb %l1,[$out+64*1-1]
1586 stb %l2,[$out+64*2-1]
1588 stb %l3,[$out+64*3-1]
1589 bne .Loop_scatter_w7
1594 .type ecp_nistz256_scatter_w7,#function
1595 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1597 ! void ecp_nistz256_gather_w7(P256_POINT_AFFINE *%i0,const void *%i1,
1599 .globl ecp_nistz256_gather_w7
1601 ecp_nistz256_gather_w7:
1602 save %sp,-STACK_FRAME,%sp
1607 add $index,$mask,$index
1608 add $inp,$index,$inp
1612 ldub [$inp+64*0],%l0
1613 prefetch [$inp+3840+64*0],1
1614 subcc $index,1,$index
1615 ldub [$inp+64*1],%l1
1616 prefetch [$inp+3840+64*1],1
1617 ldub [$inp+64*2],%l2
1618 prefetch [$inp+3840+64*2],1
1619 ldub [$inp+64*3],%l3
1620 prefetch [$inp+3840+64*3],1
1635 .type ecp_nistz256_gather_w7,#function
1636 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1640 ########################################################################
1641 # Following subroutines are VIS3 counterparts of those above that
1642 # implement ones found in ecp_nistz256.c. Key difference is that they
1643 # use 128-bit muliplication and addition with 64-bit carry, and in order
1644 # to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
1645 # entry and vice versa on return.
1647 my ($rp,$ap,$bp)=map("%i$_",(0..2));
1648 my ($t0,$t1,$t2,$t3,$a0,$a1,$a2,$a3)=map("%l$_",(0..7));
1649 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5)=map("%o$_",(0..5));
1650 my ($bi,$poly1,$poly3,$minus1)=(map("%i$_",(3..5)),"%g1");
1651 my ($rp_real,$ap_real)=("%g2","%g3");
1652 my ($acc6,$acc7)=($bp,$bi); # used in squaring
1656 __ecp_nistz256_mul_by_2_vis3:
1657 addcc $acc0,$acc0,$acc0
1658 addxccc $acc1,$acc1,$acc1
1659 addxccc $acc2,$acc2,$acc2
1660 addxccc $acc3,$acc3,$acc3
1661 b .Lreduce_by_sub_vis3
1662 addxc %g0,%g0,$acc4 ! did it carry?
1663 .type __ecp_nistz256_mul_by_2_vis3,#function
1664 .size __ecp_nistz256_mul_by_2_vis3,.-__ecp_nistz256_mul_by_2_vis3
1667 __ecp_nistz256_add_vis3:
1673 __ecp_nistz256_add_noload_vis3:
1675 addcc $t0,$acc0,$acc0
1676 addxccc $t1,$acc1,$acc1
1677 addxccc $t2,$acc2,$acc2
1678 addxccc $t3,$acc3,$acc3
1679 addxc %g0,%g0,$acc4 ! did it carry?
1681 .Lreduce_by_sub_vis3:
1683 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
1684 addxccc $acc1,$poly1,$t1
1685 addxccc $acc2,$minus1,$t2
1686 addxccc $acc3,$poly3,$t3
1687 addxc $acc4,$minus1,$acc4
1689 movrz $acc4,$t0,$acc0 ! ret = borrow ? ret : ret-modulus
1690 movrz $acc4,$t1,$acc1
1692 movrz $acc4,$t2,$acc2
1694 movrz $acc4,$t3,$acc3
1698 .type __ecp_nistz256_add_vis3,#function
1699 .size __ecp_nistz256_add_vis3,.-__ecp_nistz256_add_vis3
1701 ! Trouble with subtraction is that there is no subtraction with 64-bit
1702 ! borrow, only with 32-bit one. For this reason we "decompose" 64-bit
1703 ! $acc0-$acc3 to 32-bit values and pick b[4] in 32-bit pieces. But
1704 ! recall that SPARC is big-endian, which is why you'll observe that
1705 ! b[4] is accessed as 4-0-12-8-20-16-28-24. And prior reduction we
1706 ! "collect" result back to 64-bit $acc0-$acc3.
1708 __ecp_nistz256_sub_from_vis3:
1717 subcc $acc0,$t0,$acc0
1719 subccc $acc4,$t1,$acc4
1721 subccc $acc1,$t2,$acc1
1723 and $acc0,$poly1,$acc0
1724 subccc $acc5,$t3,$acc5
1727 and $acc1,$poly1,$acc1
1729 or $acc0,$acc4,$acc0
1731 or $acc1,$acc5,$acc1
1733 subccc $acc2,$t0,$acc2
1734 subccc $acc4,$t1,$acc4
1735 subccc $acc3,$t2,$acc3
1736 and $acc2,$poly1,$acc2
1737 subccc $acc5,$t3,$acc5
1739 and $acc3,$poly1,$acc3
1741 or $acc2,$acc4,$acc2
1742 subc %g0,%g0,$acc4 ! did it borrow?
1743 b .Lreduce_by_add_vis3
1744 or $acc3,$acc5,$acc3
1745 .type __ecp_nistz256_sub_from_vis3,#function
1746 .size __ecp_nistz256_sub_from_vis3,.-__ecp_nistz256_sub_from_vis3
1749 __ecp_nistz256_sub_morf_vis3:
1758 subcc $t0,$acc0,$acc0
1760 subccc $t1,$acc4,$acc4
1762 subccc $t2,$acc1,$acc1
1764 and $acc0,$poly1,$acc0
1765 subccc $t3,$acc5,$acc5
1768 and $acc1,$poly1,$acc1
1770 or $acc0,$acc4,$acc0
1772 or $acc1,$acc5,$acc1
1774 subccc $t0,$acc2,$acc2
1775 subccc $t1,$acc4,$acc4
1776 subccc $t2,$acc3,$acc3
1777 and $acc2,$poly1,$acc2
1778 subccc $t3,$acc5,$acc5
1780 and $acc3,$poly1,$acc3
1782 or $acc2,$acc4,$acc2
1783 subc %g0,%g0,$acc4 ! did it borrow?
1784 or $acc3,$acc5,$acc3
1786 .Lreduce_by_add_vis3:
1788 addcc $acc0,-1,$t0 ! add modulus
1790 addxccc $acc1,$poly1,$t1
1791 not $poly1,$poly1 ! restore $poly1
1792 addxccc $acc2,%g0,$t2
1795 movrnz $acc4,$t0,$acc0 ! if a-b borrowed, ret = ret+mod
1796 movrnz $acc4,$t1,$acc1
1798 movrnz $acc4,$t2,$acc2
1800 movrnz $acc4,$t3,$acc3
1804 .type __ecp_nistz256_sub_morf_vis3,#function
1805 .size __ecp_nistz256_sub_morf_vis3,.-__ecp_nistz256_sub_morf_vis3
1808 __ecp_nistz256_div_by_2_vis3:
1809 ! ret = (a is odd ? a+mod : a) >> 1
1814 addcc $acc0,-1,$t0 ! add modulus
1815 addxccc $acc1,$t1,$t1
1816 addxccc $acc2,%g0,$t2
1817 addxccc $acc3,$t3,$t3
1818 addxc %g0,%g0,$acc4 ! carry bit
1820 movrnz $acc5,$t0,$acc0
1821 movrnz $acc5,$t1,$acc1
1822 movrnz $acc5,$t2,$acc2
1823 movrnz $acc5,$t3,$acc3
1824 movrz $acc5,%g0,$acc4
1839 sllx $acc4,63,$t3 ! don't forget carry bit
1845 .type __ecp_nistz256_div_by_2_vis3,#function
1846 .size __ecp_nistz256_div_by_2_vis3,.-__ecp_nistz256_div_by_2_vis3
1848 ! compared to __ecp_nistz256_mul_mont it's almost 4x smaller and
1849 ! 4x faster [on T4]...
1851 __ecp_nistz256_mul_mont_vis3:
1853 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1861 ldx [$bp+8],$bi ! b[1]
1863 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1865 addxccc $acc2,$t1,$acc2
1867 addxccc $acc3,$t2,$acc3
1871 for($i=1;$i<4;$i++) {
1872 # Reduction iteration is normally performed by accumulating
1873 # result of multiplication of modulus by "magic" digit [and
1874 # omitting least significant word, which is guaranteed to
1875 # be 0], but thanks to special form of modulus and "magic"
1876 # digit being equal to least significant word, it can be
1877 # performed with additions and subtractions alone. Indeed:
1879 # ffff0001.00000000.0000ffff.ffffffff
1881 # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1883 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
1886 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
1887 # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
1888 # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
1890 # or marking redundant operations:
1892 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
1893 # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
1894 # - 0000abcd.efgh0000.--------.--------.--------
1895 # ^^^^^^^^ but this word is calculated with umulxhi, because
1896 # there is no subtract with 64-bit borrow:-(
1899 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1900 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1901 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1903 addxccc $acc2,$t1,$acc1
1905 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1907 addxccc $acc4,$t3,$acc3
1909 addxc $acc5,%g0,$acc4
1911 addcc $acc0,$t0,$acc0 ! accumulate low parts of multiplication
1913 addxccc $acc1,$t1,$acc1
1915 addxccc $acc2,$t2,$acc2
1917 addxccc $acc3,$t3,$acc3
1919 addxc $acc4,%g0,$acc4
1921 $code.=<<___ if ($i<3);
1922 ldx [$bp+8*($i+1)],$bi ! bp[$i+1]
1925 addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
1927 addxccc $acc2,$t1,$acc2
1929 addxccc $acc3,$t2,$acc3
1930 addxccc $acc4,$t3,$acc4
1935 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
1936 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
1937 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
1938 addxccc $acc2,$t1,$acc1
1939 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
1940 addxccc $acc4,$t3,$acc3
1941 b .Lmul_final_vis3 ! see below
1942 addxc $acc5,%g0,$acc4
1943 .type __ecp_nistz256_mul_mont_vis3,#function
1944 .size __ecp_nistz256_mul_mont_vis3,.-__ecp_nistz256_mul_mont_vis3
1946 ! compared to above __ecp_nistz256_mul_mont_vis3 it's 21% less
1947 ! instructions, but only 14% faster [on T4]...
1949 __ecp_nistz256_sqr_mont_vis3:
1950 ! | | | | | |a1*a0| |
1951 ! | | | | |a2*a0| | |
1952 ! | |a3*a2|a3*a0| | | |
1953 ! | | | |a2*a1| | | |
1954 ! | | |a3*a1| | | | |
1955 ! *| | | | | | | | 2|
1956 ! +|a3*a3|a2*a2|a1*a1|a0*a0|
1957 ! |--+--+--+--+--+--+--+--|
1958 ! |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
1960 ! "can't overflow" below mark carrying into high part of
1961 ! multiplication result, which can't overflow, because it
1962 ! can never be all ones.
1964 mulx $a1,$a0,$acc1 ! a[1]*a[0]
1966 mulx $a2,$a0,$acc2 ! a[2]*a[0]
1968 mulx $a3,$a0,$acc3 ! a[3]*a[0]
1969 umulxhi $a3,$a0,$acc4
1971 addcc $acc2,$t1,$acc2 ! accumulate high parts of multiplication
1972 mulx $a2,$a1,$t0 ! a[2]*a[1]
1974 addxccc $acc3,$t2,$acc3
1975 mulx $a3,$a1,$t2 ! a[3]*a[1]
1977 addxc $acc4,%g0,$acc4 ! can't overflow
1979 mulx $a3,$a2,$acc5 ! a[3]*a[2]
1980 not $poly3,$poly3 ! 0xFFFFFFFF00000001
1981 umulxhi $a3,$a2,$acc6
1983 addcc $t2,$t1,$t1 ! accumulate high parts of multiplication
1984 mulx $a0,$a0,$acc0 ! a[0]*a[0]
1985 addxc $t3,%g0,$t2 ! can't overflow
1987 addcc $acc3,$t0,$acc3 ! accumulate low parts of multiplication
1989 addxccc $acc4,$t1,$acc4
1990 mulx $a1,$a1,$t1 ! a[1]*a[1]
1991 addxccc $acc5,$t2,$acc5
1993 addxc $acc6,%g0,$acc6 ! can't overflow
1995 addcc $acc1,$acc1,$acc1 ! acc[1-6]*=2
1996 mulx $a2,$a2,$t2 ! a[2]*a[2]
1997 addxccc $acc2,$acc2,$acc2
1999 addxccc $acc3,$acc3,$acc3
2000 mulx $a3,$a3,$t3 ! a[3]*a[3]
2001 addxccc $acc4,$acc4,$acc4
2003 addxccc $acc5,$acc5,$acc5
2004 addxccc $acc6,$acc6,$acc6
2007 addcc $acc1,$a0,$acc1 ! +a[i]*a[i]
2008 addxccc $acc2,$t1,$acc2
2009 addxccc $acc3,$a1,$acc3
2010 addxccc $acc4,$t2,$acc4
2012 addxccc $acc5,$a2,$acc5
2014 addxccc $acc6,$t3,$acc6
2015 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
2016 addxc $acc7,$a3,$acc7
2018 for($i=0;$i<3;$i++) { # reductions, see commentary
2019 # in multiplication for details
2021 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
2022 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
2024 addxccc $acc2,$t1,$acc1
2026 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
2027 sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
2028 addxc %g0,$t3,$acc3 ! cant't overflow
2032 umulxhi $acc0,$poly3,$t3 ! acc0*0xFFFFFFFF00000001, high part
2033 addcc $acc1,$t0,$acc0 ! +=acc[0]<<96 and omit acc[0]
2034 addxccc $acc2,$t1,$acc1
2035 addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
2036 addxc %g0,$t3,$acc3 ! can't overflow
2038 addcc $acc0,$acc4,$acc0 ! accumulate upper half
2039 addxccc $acc1,$acc5,$acc1
2040 addxccc $acc2,$acc6,$acc2
2041 addxccc $acc3,$acc7,$acc3
2046 ! Final step is "if result > mod, subtract mod", but as comparison
2047 ! means subtraction, we do the subtraction and then copy outcome
2048 ! if it didn't borrow. But note that as we [have to] replace
2049 ! subtraction with addition with negative, carry/borrow logic is
2052 addcc $acc0,1,$t0 ! add -modulus, i.e. subtract
2053 not $poly3,$poly3 ! restore 0x00000000FFFFFFFE
2054 addxccc $acc1,$poly1,$t1
2055 addxccc $acc2,$minus1,$t2
2056 addxccc $acc3,$poly3,$t3
2057 addxccc $acc4,$minus1,%g0 ! did it carry?
2059 movcs %xcc,$t0,$acc0
2060 movcs %xcc,$t1,$acc1
2062 movcs %xcc,$t2,$acc2
2064 movcs %xcc,$t3,$acc3
2068 .type __ecp_nistz256_sqr_mont_vis3,#function
2069 .size __ecp_nistz256_sqr_mont_vis3,.-__ecp_nistz256_sqr_mont_vis3
2072 ########################################################################
2073 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
2076 my ($res_x,$res_y,$res_z,
2078 $S,$M,$Zsqr,$tmp0)=map(32*$_,(0..9));
2079 # above map() describes stack layout with 10 temporary
2080 # 256-bit vectors on top.
2084 ecp_nistz256_point_double_vis3:
2085 save %sp,-STACK64_FRAME-32*10,%sp
2088 .Ldouble_shortcut_vis3:
2091 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2092 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2094 ! convert input to uint64_t[4]
2105 ld [$ap+32],$acc0 ! in_y
2113 ld [$ap+32+16],$acc2
2117 ld [$ap+32+24],$acc3
2121 stx $a0,[%sp+LOCALS64+$in_x]
2123 stx $a1,[%sp+LOCALS64+$in_x+8]
2125 stx $a2,[%sp+LOCALS64+$in_x+16]
2127 stx $a3,[%sp+LOCALS64+$in_x+24]
2129 stx $acc0,[%sp+LOCALS64+$in_y]
2131 stx $acc1,[%sp+LOCALS64+$in_y+8]
2133 stx $acc2,[%sp+LOCALS64+$in_y+16]
2134 stx $acc3,[%sp+LOCALS64+$in_y+24]
2136 ld [$ap+64],$a0 ! in_z
2154 stx $a0,[%sp+LOCALS64+$in_z]
2156 stx $a1,[%sp+LOCALS64+$in_z+8]
2158 stx $a2,[%sp+LOCALS64+$in_z+16]
2159 stx $a3,[%sp+LOCALS64+$in_z+24]
2161 ! in_y is still in $acc0-$acc3
2162 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(S, in_y);
2163 add %sp,LOCALS64+$S,$rp
2165 ! in_z is still in $a0-$a3
2166 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Zsqr, in_z);
2167 add %sp,LOCALS64+$Zsqr,$rp
2169 mov $acc0,$a0 ! put Zsqr aside
2174 add %sp,LOCALS64+$in_x,$bp
2175 call __ecp_nistz256_add_vis3 ! p256_add(M, Zsqr, in_x);
2176 add %sp,LOCALS64+$M,$rp
2178 mov $a0,$acc0 ! restore Zsqr
2179 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2181 ldx [%sp+LOCALS64+$S+8],$a1
2183 ldx [%sp+LOCALS64+$S+16],$a2
2185 ldx [%sp+LOCALS64+$S+24],$a3
2187 add %sp,LOCALS64+$in_x,$bp
2188 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(Zsqr, in_x, Zsqr);
2189 add %sp,LOCALS64+$Zsqr,$rp
2191 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(S, S);
2192 add %sp,LOCALS64+$S,$rp
2194 ldx [%sp+LOCALS64+$in_z],$bi
2195 ldx [%sp+LOCALS64+$in_y],$a0
2196 ldx [%sp+LOCALS64+$in_y+8],$a1
2197 ldx [%sp+LOCALS64+$in_y+16],$a2
2198 ldx [%sp+LOCALS64+$in_y+24],$a3
2199 add %sp,LOCALS64+$in_z,$bp
2200 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(tmp0, in_z, in_y);
2201 add %sp,LOCALS64+$tmp0,$rp
2203 ldx [%sp+LOCALS64+$M],$bi ! forward load
2204 ldx [%sp+LOCALS64+$Zsqr],$a0
2205 ldx [%sp+LOCALS64+$Zsqr+8],$a1
2206 ldx [%sp+LOCALS64+$Zsqr+16],$a2
2207 ldx [%sp+LOCALS64+$Zsqr+24],$a3
2209 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(res_z, tmp0);
2210 add %sp,LOCALS64+$res_z,$rp
2212 add %sp,LOCALS64+$M,$bp
2213 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(M, M, Zsqr);
2214 add %sp,LOCALS64+$M,$rp
2216 mov $acc0,$a0 ! put aside M
2220 call __ecp_nistz256_mul_by_2_vis3
2221 add %sp,LOCALS64+$M,$rp
2222 mov $a0,$t0 ! copy M
2223 ldx [%sp+LOCALS64+$S],$a0 ! forward load
2225 ldx [%sp+LOCALS64+$S+8],$a1
2227 ldx [%sp+LOCALS64+$S+16],$a2
2229 ldx [%sp+LOCALS64+$S+24],$a3
2230 call __ecp_nistz256_add_noload_vis3 ! p256_mul_by_3(M, M);
2231 add %sp,LOCALS64+$M,$rp
2233 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(tmp0, S);
2234 add %sp,LOCALS64+$tmp0,$rp
2236 ldx [%sp+LOCALS64+$S],$bi ! forward load
2237 ldx [%sp+LOCALS64+$in_x],$a0
2238 ldx [%sp+LOCALS64+$in_x+8],$a1
2239 ldx [%sp+LOCALS64+$in_x+16],$a2
2240 ldx [%sp+LOCALS64+$in_x+24],$a3
2242 call __ecp_nistz256_div_by_2_vis3 ! p256_div_by_2(res_y, tmp0);
2243 add %sp,LOCALS64+$res_y,$rp
2245 add %sp,LOCALS64+$S,$bp
2246 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, in_x);
2247 add %sp,LOCALS64+$S,$rp
2249 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2250 ldx [%sp+LOCALS64+$M+8],$a1
2251 ldx [%sp+LOCALS64+$M+16],$a2
2252 ldx [%sp+LOCALS64+$M+24],$a3
2254 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(tmp0, S);
2255 add %sp,LOCALS64+$tmp0,$rp
2257 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(res_x, M);
2258 add %sp,LOCALS64+$res_x,$rp
2260 add %sp,LOCALS64+$tmp0,$bp
2261 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, tmp0);
2262 add %sp,LOCALS64+$res_x,$rp
2264 ldx [%sp+LOCALS64+$M],$a0 ! forward load
2265 ldx [%sp+LOCALS64+$M+8],$a1
2266 ldx [%sp+LOCALS64+$M+16],$a2
2267 ldx [%sp+LOCALS64+$M+24],$a3
2269 add %sp,LOCALS64+$S,$bp
2270 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(S, S, res_x);
2271 add %sp,LOCALS64+$S,$rp
2274 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S, S, M);
2275 add %sp,LOCALS64+$S,$rp
2277 ldx [%sp+LOCALS64+$res_x],$a0 ! forward load
2278 ldx [%sp+LOCALS64+$res_x+8],$a1
2279 ldx [%sp+LOCALS64+$res_x+16],$a2
2280 ldx [%sp+LOCALS64+$res_x+24],$a3
2282 add %sp,LOCALS64+$res_y,$bp
2283 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, S, res_y);
2284 add %sp,LOCALS64+$res_y,$bp
2286 ! convert output to uint_32[8]
2289 st $a0,[$rp_real] ! res_x
2294 st $t1,[$rp_real+12]
2295 st $a2,[$rp_real+16]
2296 st $t2,[$rp_real+20]
2297 st $a3,[$rp_real+24]
2298 st $t3,[$rp_real+28]
2300 ldx [%sp+LOCALS64+$res_z],$a0 ! forward load
2302 ldx [%sp+LOCALS64+$res_z+8],$a1
2304 ldx [%sp+LOCALS64+$res_z+16],$a2
2306 ldx [%sp+LOCALS64+$res_z+24],$a3
2308 st $acc0,[$rp_real+32] ! res_y
2309 st $t0, [$rp_real+32+4]
2310 st $acc1,[$rp_real+32+8]
2311 st $t1, [$rp_real+32+12]
2312 st $acc2,[$rp_real+32+16]
2313 st $t2, [$rp_real+32+20]
2314 st $acc3,[$rp_real+32+24]
2315 st $t3, [$rp_real+32+28]
2319 st $a0,[$rp_real+64] ! res_z
2321 st $t0,[$rp_real+64+4]
2323 st $a1,[$rp_real+64+8]
2324 st $t1,[$rp_real+64+12]
2325 st $a2,[$rp_real+64+16]
2326 st $t2,[$rp_real+64+20]
2327 st $a3,[$rp_real+64+24]
2328 st $t3,[$rp_real+64+28]
2332 .type ecp_nistz256_point_double_vis3,#function
2333 .size ecp_nistz256_point_double_vis3,.-ecp_nistz256_point_double_vis3
2336 ########################################################################
2337 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
2338 # const P256_POINT *in2);
2340 my ($res_x,$res_y,$res_z,
2341 $in1_x,$in1_y,$in1_z,
2342 $in2_x,$in2_y,$in2_z,
2343 $H,$Hsqr,$R,$Rsqr,$Hcub,
2344 $U1,$U2,$S1,$S2)=map(32*$_,(0..17));
2345 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2347 # above map() describes stack layout with 18 temporary
2348 # 256-bit vectors on top. Then we reserve some space for
2349 # !in1infty, !in2infty and result of check for zero.
2352 .globl ecp_nistz256_point_add_vis3
2354 ecp_nistz256_point_add_vis3:
2355 save %sp,-STACK64_FRAME-32*18-32,%sp
2360 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2361 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2363 ! convert input to uint64_t[4]
2364 ld [$bp],$a0 ! in2_x
2374 ld [$bp+32],$acc0 ! in2_y
2382 ld [$bp+32+16],$acc2
2386 ld [$bp+32+24],$acc3
2390 stx $a0,[%sp+LOCALS64+$in2_x]
2392 stx $a1,[%sp+LOCALS64+$in2_x+8]
2394 stx $a2,[%sp+LOCALS64+$in2_x+16]
2396 stx $a3,[%sp+LOCALS64+$in2_x+24]
2398 stx $acc0,[%sp+LOCALS64+$in2_y]
2400 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2402 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2403 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2407 or $acc1,$acc0,$acc0
2408 or $acc3,$acc2,$acc2
2410 or $acc2,$acc0,$acc0
2412 movrnz $a0,-1,$a0 ! !in2infty
2413 stx $a0,[%fp+STACK_BIAS-8]
2415 ld [$bp+64],$acc0 ! in2_z
2419 ld [$bp+64+16],$acc2
2421 ld [$bp+64+24],$acc3
2425 ld [$ap],$a0 ! in1_x
2441 stx $acc0,[%sp+LOCALS64+$in2_z]
2443 stx $acc1,[%sp+LOCALS64+$in2_z+8]
2445 stx $acc2,[%sp+LOCALS64+$in2_z+16]
2446 stx $acc3,[%sp+LOCALS64+$in2_z+24]
2449 ld [$ap+32],$acc0 ! in1_y
2456 ld [$ap+32+16],$acc2
2458 ld [$ap+32+24],$acc3
2462 stx $a0,[%sp+LOCALS64+$in1_x]
2464 stx $a1,[%sp+LOCALS64+$in1_x+8]
2466 stx $a2,[%sp+LOCALS64+$in1_x+16]
2468 stx $a3,[%sp+LOCALS64+$in1_x+24]
2470 stx $acc0,[%sp+LOCALS64+$in1_y]
2472 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2474 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2475 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2479 or $acc1,$acc0,$acc0
2480 or $acc3,$acc2,$acc2
2482 or $acc2,$acc0,$acc0
2484 movrnz $a0,-1,$a0 ! !in1infty
2485 stx $a0,[%fp+STACK_BIAS-16]
2487 ldx [%sp+LOCALS64+$in2_z],$a0 ! forward load
2488 ldx [%sp+LOCALS64+$in2_z+8],$a1
2489 ldx [%sp+LOCALS64+$in2_z+16],$a2
2490 ldx [%sp+LOCALS64+$in2_z+24],$a3
2492 ld [$ap+64],$acc0 ! in1_z
2496 ld [$ap+64+16],$acc2
2498 ld [$ap+64+24],$acc3
2506 stx $acc0,[%sp+LOCALS64+$in1_z]
2508 stx $acc1,[%sp+LOCALS64+$in1_z+8]
2510 stx $acc2,[%sp+LOCALS64+$in1_z+16]
2511 stx $acc3,[%sp+LOCALS64+$in1_z+24]
2513 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z2sqr, in2_z);
2514 add %sp,LOCALS64+$Z2sqr,$rp
2516 ldx [%sp+LOCALS64+$in1_z],$a0
2517 ldx [%sp+LOCALS64+$in1_z+8],$a1
2518 ldx [%sp+LOCALS64+$in1_z+16],$a2
2519 ldx [%sp+LOCALS64+$in1_z+24],$a3
2520 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2521 add %sp,LOCALS64+$Z1sqr,$rp
2523 ldx [%sp+LOCALS64+$Z2sqr],$bi
2524 ldx [%sp+LOCALS64+$in2_z],$a0
2525 ldx [%sp+LOCALS64+$in2_z+8],$a1
2526 ldx [%sp+LOCALS64+$in2_z+16],$a2
2527 ldx [%sp+LOCALS64+$in2_z+24],$a3
2528 add %sp,LOCALS64+$Z2sqr,$bp
2529 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, Z2sqr, in2_z);
2530 add %sp,LOCALS64+$S1,$rp
2532 ldx [%sp+LOCALS64+$Z1sqr],$bi
2533 ldx [%sp+LOCALS64+$in1_z],$a0
2534 ldx [%sp+LOCALS64+$in1_z+8],$a1
2535 ldx [%sp+LOCALS64+$in1_z+16],$a2
2536 ldx [%sp+LOCALS64+$in1_z+24],$a3
2537 add %sp,LOCALS64+$Z1sqr,$bp
2538 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2539 add %sp,LOCALS64+$S2,$rp
2541 ldx [%sp+LOCALS64+$S1],$bi
2542 ldx [%sp+LOCALS64+$in1_y],$a0
2543 ldx [%sp+LOCALS64+$in1_y+8],$a1
2544 ldx [%sp+LOCALS64+$in1_y+16],$a2
2545 ldx [%sp+LOCALS64+$in1_y+24],$a3
2546 add %sp,LOCALS64+$S1,$bp
2547 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S1, S1, in1_y);
2548 add %sp,LOCALS64+$S1,$rp
2550 ldx [%sp+LOCALS64+$S2],$bi
2551 ldx [%sp+LOCALS64+$in2_y],$a0
2552 ldx [%sp+LOCALS64+$in2_y+8],$a1
2553 ldx [%sp+LOCALS64+$in2_y+16],$a2
2554 ldx [%sp+LOCALS64+$in2_y+24],$a3
2555 add %sp,LOCALS64+$S2,$bp
2556 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2557 add %sp,LOCALS64+$S2,$rp
2559 ldx [%sp+LOCALS64+$Z2sqr],$bi ! forward load
2560 ldx [%sp+LOCALS64+$in1_x],$a0
2561 ldx [%sp+LOCALS64+$in1_x+8],$a1
2562 ldx [%sp+LOCALS64+$in1_x+16],$a2
2563 ldx [%sp+LOCALS64+$in1_x+24],$a3
2565 add %sp,LOCALS64+$S1,$bp
2566 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, S1);
2567 add %sp,LOCALS64+$R,$rp
2569 or $acc1,$acc0,$acc0 ! see if result is zero
2570 or $acc3,$acc2,$acc2
2571 or $acc2,$acc0,$acc0
2572 stx $acc0,[%fp+STACK_BIAS-24]
2574 add %sp,LOCALS64+$Z2sqr,$bp
2575 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U1, in1_x, Z2sqr);
2576 add %sp,LOCALS64+$U1,$rp
2578 ldx [%sp+LOCALS64+$Z1sqr],$bi
2579 ldx [%sp+LOCALS64+$in2_x],$a0
2580 ldx [%sp+LOCALS64+$in2_x+8],$a1
2581 ldx [%sp+LOCALS64+$in2_x+16],$a2
2582 ldx [%sp+LOCALS64+$in2_x+24],$a3
2583 add %sp,LOCALS64+$Z1sqr,$bp
2584 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in2_x, Z1sqr);
2585 add %sp,LOCALS64+$U2,$rp
2587 ldx [%sp+LOCALS64+$R],$a0 ! forward load
2588 ldx [%sp+LOCALS64+$R+8],$a1
2589 ldx [%sp+LOCALS64+$R+16],$a2
2590 ldx [%sp+LOCALS64+$R+24],$a3
2592 add %sp,LOCALS64+$U1,$bp
2593 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, U1);
2594 add %sp,LOCALS64+$H,$rp
2596 or $acc1,$acc0,$acc0 ! see if result is zero
2597 or $acc3,$acc2,$acc2
2598 orcc $acc2,$acc0,$acc0
2600 bne,pt %xcc,.Ladd_proceed_vis3 ! is_equal(U1,U2)?
2603 ldx [%fp+STACK_BIAS-8],$t0
2604 ldx [%fp+STACK_BIAS-16],$t1
2605 ldx [%fp+STACK_BIAS-24],$t2
2607 be,pt %xcc,.Ladd_proceed_vis3 ! (in1infty || in2infty)?
2610 be,a,pt %xcc,.Ldouble_shortcut_vis3 ! is_equal(S1,S2)?
2611 add %sp,32*(12-10)+32,%sp ! difference in frame sizes
2616 st %g0,[$rp_real+12]
2617 st %g0,[$rp_real+16]
2618 st %g0,[$rp_real+20]
2619 st %g0,[$rp_real+24]
2620 st %g0,[$rp_real+28]
2621 st %g0,[$rp_real+32]
2622 st %g0,[$rp_real+32+4]
2623 st %g0,[$rp_real+32+8]
2624 st %g0,[$rp_real+32+12]
2625 st %g0,[$rp_real+32+16]
2626 st %g0,[$rp_real+32+20]
2627 st %g0,[$rp_real+32+24]
2628 st %g0,[$rp_real+32+28]
2629 st %g0,[$rp_real+64]
2630 st %g0,[$rp_real+64+4]
2631 st %g0,[$rp_real+64+8]
2632 st %g0,[$rp_real+64+12]
2633 st %g0,[$rp_real+64+16]
2634 st %g0,[$rp_real+64+20]
2635 st %g0,[$rp_real+64+24]
2636 st %g0,[$rp_real+64+28]
2642 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2643 add %sp,LOCALS64+$Rsqr,$rp
2645 ldx [%sp+LOCALS64+$H],$bi
2646 ldx [%sp+LOCALS64+$in1_z],$a0
2647 ldx [%sp+LOCALS64+$in1_z+8],$a1
2648 ldx [%sp+LOCALS64+$in1_z+16],$a2
2649 ldx [%sp+LOCALS64+$in1_z+24],$a3
2650 add %sp,LOCALS64+$H,$bp
2651 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2652 add %sp,LOCALS64+$res_z,$rp
2654 ldx [%sp+LOCALS64+$H],$a0
2655 ldx [%sp+LOCALS64+$H+8],$a1
2656 ldx [%sp+LOCALS64+$H+16],$a2
2657 ldx [%sp+LOCALS64+$H+24],$a3
2658 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2659 add %sp,LOCALS64+$Hsqr,$rp
2661 ldx [%sp+LOCALS64+$res_z],$bi
2662 ldx [%sp+LOCALS64+$in2_z],$a0
2663 ldx [%sp+LOCALS64+$in2_z+8],$a1
2664 ldx [%sp+LOCALS64+$in2_z+16],$a2
2665 ldx [%sp+LOCALS64+$in2_z+24],$a3
2666 add %sp,LOCALS64+$res_z,$bp
2667 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, res_z, in2_z);
2668 add %sp,LOCALS64+$res_z,$rp
2670 ldx [%sp+LOCALS64+$H],$bi
2671 ldx [%sp+LOCALS64+$Hsqr],$a0
2672 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2673 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2674 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2675 add %sp,LOCALS64+$H,$bp
2676 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2677 add %sp,LOCALS64+$Hcub,$rp
2679 ldx [%sp+LOCALS64+$U1],$bi
2680 ldx [%sp+LOCALS64+$Hsqr],$a0
2681 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2682 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2683 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2684 add %sp,LOCALS64+$U1,$bp
2685 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, U1, Hsqr);
2686 add %sp,LOCALS64+$U2,$rp
2688 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2689 add %sp,LOCALS64+$Hsqr,$rp
2691 add %sp,LOCALS64+$Rsqr,$bp
2692 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2693 add %sp,LOCALS64+$res_x,$rp
2695 add %sp,LOCALS64+$Hcub,$bp
2696 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2697 add %sp,LOCALS64+$res_x,$rp
2699 ldx [%sp+LOCALS64+$S1],$bi ! forward load
2700 ldx [%sp+LOCALS64+$Hcub],$a0
2701 ldx [%sp+LOCALS64+$Hcub+8],$a1
2702 ldx [%sp+LOCALS64+$Hcub+16],$a2
2703 ldx [%sp+LOCALS64+$Hcub+24],$a3
2705 add %sp,LOCALS64+$U2,$bp
2706 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
2707 add %sp,LOCALS64+$res_y,$rp
2709 add %sp,LOCALS64+$S1,$bp
2710 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S1, Hcub);
2711 add %sp,LOCALS64+$S2,$rp
2713 ldx [%sp+LOCALS64+$R],$bi
2714 ldx [%sp+LOCALS64+$res_y],$a0
2715 ldx [%sp+LOCALS64+$res_y+8],$a1
2716 ldx [%sp+LOCALS64+$res_y+16],$a2
2717 ldx [%sp+LOCALS64+$res_y+24],$a3
2718 add %sp,LOCALS64+$R,$bp
2719 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
2720 add %sp,LOCALS64+$res_y,$rp
2722 add %sp,LOCALS64+$S2,$bp
2723 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
2724 add %sp,LOCALS64+$res_y,$rp
2726 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
2727 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
2729 for($i=0;$i<96;$i+=16) { # conditional moves
2731 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
2732 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
2733 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
2734 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
2735 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
2736 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
2737 movrz $t1,$acc2,$acc0
2738 movrz $t1,$acc3,$acc1
2739 movrz $t2,$acc4,$acc0
2740 movrz $t2,$acc5,$acc1
2743 st $acc0,[$rp_real+$i]
2744 st $acc2,[$rp_real+$i+4]
2745 st $acc1,[$rp_real+$i+8]
2746 st $acc3,[$rp_real+$i+12]
2753 .type ecp_nistz256_point_add_vis3,#function
2754 .size ecp_nistz256_point_add_vis3,.-ecp_nistz256_point_add_vis3
2757 ########################################################################
2758 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
2759 # const P256_POINT_AFFINE *in2);
2761 my ($res_x,$res_y,$res_z,
2762 $in1_x,$in1_y,$in1_z,
2764 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..14));
2766 # above map() describes stack layout with 15 temporary
2767 # 256-bit vectors on top. Then we reserve some space for
2768 # !in1infty and !in2infty.
2772 ecp_nistz256_point_add_affine_vis3:
2773 save %sp,-STACK64_FRAME-32*15-32,%sp
2778 sllx $minus1,32,$poly1 ! 0xFFFFFFFF00000000
2779 srl $poly3,0,$poly3 ! 0x00000000FFFFFFFE
2781 ! convert input to uint64_t[4]
2782 ld [$bp],$a0 ! in2_x
2792 ld [$bp+32],$acc0 ! in2_y
2800 ld [$bp+32+16],$acc2
2804 ld [$bp+32+24],$acc3
2808 stx $a0,[%sp+LOCALS64+$in2_x]
2810 stx $a1,[%sp+LOCALS64+$in2_x+8]
2812 stx $a2,[%sp+LOCALS64+$in2_x+16]
2814 stx $a3,[%sp+LOCALS64+$in2_x+24]
2816 stx $acc0,[%sp+LOCALS64+$in2_y]
2818 stx $acc1,[%sp+LOCALS64+$in2_y+8]
2820 stx $acc2,[%sp+LOCALS64+$in2_y+16]
2821 stx $acc3,[%sp+LOCALS64+$in2_y+24]
2825 or $acc1,$acc0,$acc0
2826 or $acc3,$acc2,$acc2
2828 or $acc2,$acc0,$acc0
2830 movrnz $a0,-1,$a0 ! !in2infty
2831 stx $a0,[%fp+STACK_BIAS-8]
2833 ld [$ap],$a0 ! in1_x
2843 ld [$ap+32],$acc0 ! in1_y
2851 ld [$ap+32+16],$acc2
2855 ld [$ap+32+24],$acc3
2859 stx $a0,[%sp+LOCALS64+$in1_x]
2861 stx $a1,[%sp+LOCALS64+$in1_x+8]
2863 stx $a2,[%sp+LOCALS64+$in1_x+16]
2865 stx $a3,[%sp+LOCALS64+$in1_x+24]
2867 stx $acc0,[%sp+LOCALS64+$in1_y]
2869 stx $acc1,[%sp+LOCALS64+$in1_y+8]
2871 stx $acc2,[%sp+LOCALS64+$in1_y+16]
2872 stx $acc3,[%sp+LOCALS64+$in1_y+24]
2876 or $acc1,$acc0,$acc0
2877 or $acc3,$acc2,$acc2
2879 or $acc2,$acc0,$acc0
2881 movrnz $a0,-1,$a0 ! !in1infty
2882 stx $a0,[%fp+STACK_BIAS-16]
2884 ld [$ap+64],$a0 ! in1_z
2898 stx $a0,[%sp+LOCALS64+$in1_z]
2900 stx $a1,[%sp+LOCALS64+$in1_z+8]
2902 stx $a2,[%sp+LOCALS64+$in1_z+16]
2903 stx $a3,[%sp+LOCALS64+$in1_z+24]
2905 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Z1sqr, in1_z);
2906 add %sp,LOCALS64+$Z1sqr,$rp
2908 ldx [%sp+LOCALS64+$in2_x],$bi
2913 add %sp,LOCALS64+$in2_x,$bp
2914 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, Z1sqr, in2_x);
2915 add %sp,LOCALS64+$U2,$rp
2917 ldx [%sp+LOCALS64+$Z1sqr],$bi ! forward load
2918 ldx [%sp+LOCALS64+$in1_z],$a0
2919 ldx [%sp+LOCALS64+$in1_z+8],$a1
2920 ldx [%sp+LOCALS64+$in1_z+16],$a2
2921 ldx [%sp+LOCALS64+$in1_z+24],$a3
2923 add %sp,LOCALS64+$in1_x,$bp
2924 call __ecp_nistz256_sub_from_vis3 ! p256_sub(H, U2, in1_x);
2925 add %sp,LOCALS64+$H,$rp
2927 add %sp,LOCALS64+$Z1sqr,$bp
2928 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, Z1sqr, in1_z);
2929 add %sp,LOCALS64+$S2,$rp
2931 ldx [%sp+LOCALS64+$H],$bi
2932 ldx [%sp+LOCALS64+$in1_z],$a0
2933 ldx [%sp+LOCALS64+$in1_z+8],$a1
2934 ldx [%sp+LOCALS64+$in1_z+16],$a2
2935 ldx [%sp+LOCALS64+$in1_z+24],$a3
2936 add %sp,LOCALS64+$H,$bp
2937 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_z, H, in1_z);
2938 add %sp,LOCALS64+$res_z,$rp
2940 ldx [%sp+LOCALS64+$S2],$bi
2941 ldx [%sp+LOCALS64+$in2_y],$a0
2942 ldx [%sp+LOCALS64+$in2_y+8],$a1
2943 ldx [%sp+LOCALS64+$in2_y+16],$a2
2944 ldx [%sp+LOCALS64+$in2_y+24],$a3
2945 add %sp,LOCALS64+$S2,$bp
2946 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, S2, in2_y);
2947 add %sp,LOCALS64+$S2,$rp
2949 ldx [%sp+LOCALS64+$H],$a0 ! forward load
2950 ldx [%sp+LOCALS64+$H+8],$a1
2951 ldx [%sp+LOCALS64+$H+16],$a2
2952 ldx [%sp+LOCALS64+$H+24],$a3
2954 add %sp,LOCALS64+$in1_y,$bp
2955 call __ecp_nistz256_sub_from_vis3 ! p256_sub(R, S2, in1_y);
2956 add %sp,LOCALS64+$R,$rp
2958 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Hsqr, H);
2959 add %sp,LOCALS64+$Hsqr,$rp
2961 ldx [%sp+LOCALS64+$R],$a0
2962 ldx [%sp+LOCALS64+$R+8],$a1
2963 ldx [%sp+LOCALS64+$R+16],$a2
2964 ldx [%sp+LOCALS64+$R+24],$a3
2965 call __ecp_nistz256_sqr_mont_vis3 ! p256_sqr_mont(Rsqr, R);
2966 add %sp,LOCALS64+$Rsqr,$rp
2968 ldx [%sp+LOCALS64+$H],$bi
2969 ldx [%sp+LOCALS64+$Hsqr],$a0
2970 ldx [%sp+LOCALS64+$Hsqr+8],$a1
2971 ldx [%sp+LOCALS64+$Hsqr+16],$a2
2972 ldx [%sp+LOCALS64+$Hsqr+24],$a3
2973 add %sp,LOCALS64+$H,$bp
2974 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(Hcub, Hsqr, H);
2975 add %sp,LOCALS64+$Hcub,$rp
2977 ldx [%sp+LOCALS64+$Hsqr],$bi
2978 ldx [%sp+LOCALS64+$in1_x],$a0
2979 ldx [%sp+LOCALS64+$in1_x+8],$a1
2980 ldx [%sp+LOCALS64+$in1_x+16],$a2
2981 ldx [%sp+LOCALS64+$in1_x+24],$a3
2982 add %sp,LOCALS64+$Hsqr,$bp
2983 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(U2, in1_x, Hsqr);
2984 add %sp,LOCALS64+$U2,$rp
2986 call __ecp_nistz256_mul_by_2_vis3 ! p256_mul_by_2(Hsqr, U2);
2987 add %sp,LOCALS64+$Hsqr,$rp
2989 add %sp,LOCALS64+$Rsqr,$bp
2990 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_x, Rsqr, Hsqr);
2991 add %sp,LOCALS64+$res_x,$rp
2993 add %sp,LOCALS64+$Hcub,$bp
2994 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_x, res_x, Hcub);
2995 add %sp,LOCALS64+$res_x,$rp
2997 ldx [%sp+LOCALS64+$Hcub],$bi ! forward load
2998 ldx [%sp+LOCALS64+$in1_y],$a0
2999 ldx [%sp+LOCALS64+$in1_y+8],$a1
3000 ldx [%sp+LOCALS64+$in1_y+16],$a2
3001 ldx [%sp+LOCALS64+$in1_y+24],$a3
3003 add %sp,LOCALS64+$U2,$bp
3004 call __ecp_nistz256_sub_morf_vis3 ! p256_sub(res_y, U2, res_x);
3005 add %sp,LOCALS64+$res_y,$rp
3007 add %sp,LOCALS64+$Hcub,$bp
3008 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(S2, in1_y, Hcub);
3009 add %sp,LOCALS64+$S2,$rp
3011 ldx [%sp+LOCALS64+$R],$bi
3012 ldx [%sp+LOCALS64+$res_y],$a0
3013 ldx [%sp+LOCALS64+$res_y+8],$a1
3014 ldx [%sp+LOCALS64+$res_y+16],$a2
3015 ldx [%sp+LOCALS64+$res_y+24],$a3
3016 add %sp,LOCALS64+$R,$bp
3017 call __ecp_nistz256_mul_mont_vis3 ! p256_mul_mont(res_y, res_y, R);
3018 add %sp,LOCALS64+$res_y,$rp
3020 add %sp,LOCALS64+$S2,$bp
3021 call __ecp_nistz256_sub_from_vis3 ! p256_sub(res_y, res_y, S2);
3022 add %sp,LOCALS64+$res_y,$rp
3024 ldx [%fp+STACK_BIAS-16],$t1 ! !in1infty
3025 ldx [%fp+STACK_BIAS-8],$t2 ! !in2infty
3027 add %o7,.Lone_mont_vis3-1b,$bp
3029 for($i=0;$i<64;$i+=16) { # conditional moves
3031 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
3032 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
3033 ldx [%sp+LOCALS64+$in2_x+$i],$acc2 ! in2
3034 ldx [%sp+LOCALS64+$in2_x+$i+8],$acc3
3035 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
3036 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
3037 movrz $t1,$acc2,$acc0
3038 movrz $t1,$acc3,$acc1
3039 movrz $t2,$acc4,$acc0
3040 movrz $t2,$acc5,$acc1
3043 st $acc0,[$rp_real+$i]
3044 st $acc2,[$rp_real+$i+4]
3045 st $acc1,[$rp_real+$i+8]
3046 st $acc3,[$rp_real+$i+12]
3049 for(;$i<96;$i+=16) {
3051 ldx [%sp+LOCALS64+$res_x+$i],$acc0 ! res
3052 ldx [%sp+LOCALS64+$res_x+$i+8],$acc1
3053 ldx [$bp+$i-64],$acc2 ! "in2"
3054 ldx [$bp+$i-64+8],$acc3
3055 ldx [%sp+LOCALS64+$in1_x+$i],$acc4 ! in1
3056 ldx [%sp+LOCALS64+$in1_x+$i+8],$acc5
3057 movrz $t1,$acc2,$acc0
3058 movrz $t1,$acc3,$acc1
3059 movrz $t2,$acc4,$acc0
3060 movrz $t2,$acc5,$acc1
3063 st $acc0,[$rp_real+$i]
3064 st $acc2,[$rp_real+$i+4]
3065 st $acc1,[$rp_real+$i+8]
3066 st $acc3,[$rp_real+$i+12]
3072 .type ecp_nistz256_point_add_affine_vis3,#function
3073 .size ecp_nistz256_point_add_affine_vis3,.-ecp_nistz256_point_add_affine_vis3
3076 .long 0x00000000,0x00000001, 0xffffffff,0x00000000
3077 .long 0xffffffff,0xffffffff, 0x00000000,0xfffffffe
3082 # Purpose of these subroutines is to explicitly encode VIS instructions,
3083 # so that one can compile the module without having to specify VIS
3084 # extensions on compiler command line, e.g. -xarch=v9 vs. -xarch=v9a.
3085 # Idea is to reserve for option to produce "universal" binary and let
3086 # programmer detect if current CPU is VIS capable at run-time.
3088 my ($mnemonic,$rs1,$rs2,$rd)=@_;
3089 my %bias = ( "g" => 0, "o" => 8, "l" => 16, "i" => 24 );
3091 my %visopf = ( "addxc" => 0x011,
3093 "umulxhi" => 0x016 );
3095 $ref = "$mnemonic\t$rs1,$rs2,$rd";
3097 if ($opf=$visopf{$mnemonic}) {
3098 foreach ($rs1,$rs2,$rd) {
3099 return $ref if (!/%([goli])([0-9])/);
3103 return sprintf ".word\t0x%08x !%s",
3104 0x81b00000|$rd<<25|$rs1<<14|$opf<<5|$rs2,
3111 foreach (split("\n",$code)) {
3112 s/\`([^\`]*)\`/eval $1/ge;
3114 s/\b(umulxhi|addxc[c]{0,2})\s+(%[goli][0-7]),\s*(%[goli][0-7]),\s*(%[goli][0-7])/
3115 &unvis3($1,$2,$3,$4)