3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # ECP_NISTZ256 module for ARMv8.
14 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
15 # http://eprint.iacr.org/2013/816.
17 # with/without -DECP_NISTZ256_ASM
19 # Cortex-A53 +120-400%
20 # Cortex-A57 +120-350%
24 # Ranges denote minimum and maximum improvement coefficients depending
25 # on benchmark. Lower coefficients are for ECDSA sign, server-side
26 # operation. Keep in mind that +400% means 5x improvement.
29 while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
31 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
32 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
33 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
34 die "can't locate arm-xlate.pl";
36 open OUT,"| \"$^X\" $xlate $flavour $output";
40 my ($rp,$ap,$bp,$bi,$a0,$a1,$a2,$a3,$t0,$t1,$t2,$t3,$poly1,$poly3,
41 $acc0,$acc1,$acc2,$acc3,$acc4,$acc5) =
42 map("x$_",(0..17,19,20));
44 my ($acc6,$acc7)=($ap,$bp); # used in __ecp_nistz256_sqr_mont
51 ########################################################################
52 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
54 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
55 open TABLE,"<ecp_nistz256_table.c" or
56 open TABLE,"<${dir}../ecp_nistz256_table.c" or
57 die "failed to open ecp_nistz256_table.c:",$!;
62 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
66 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
67 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
69 die "insane number of elements" if ($#arr != 64*16*37-1);
72 .globl ecp_nistz256_precomputed
73 .type ecp_nistz256_precomputed,%object
75 ecp_nistz256_precomputed:
77 ########################################################################
78 # this conversion smashes P256_POINT_AFFINE by individual bytes with
79 # 64 byte interval, similar to
83 @tbl = splice(@arr,0,64*16);
84 for($i=0;$i<64;$i++) {
86 for($j=0;$j<64;$j++) {
87 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
90 $code.=join(',',map { sprintf "0x%02x",$_} @line);
95 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
98 .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
99 .LRR: // 2^512 mod P precomputed for NIST P256 polynomial
100 .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
102 .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
105 .asciz "ECP_NISTZ256 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
107 // void ecp_nistz256_to_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
108 .globl ecp_nistz256_to_mont
109 .type ecp_nistz256_to_mont,%function
111 ecp_nistz256_to_mont:
112 stp x29,x30,[sp,#-32]!
116 ldr $bi,.LRR // bp[0]
118 ldp $a2,$a3,[$ap,#16]
121 adr $bp,.LRR // &bp[0]
123 bl __ecp_nistz256_mul_mont
128 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
130 // void ecp_nistz256_from_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
131 .globl ecp_nistz256_from_mont
132 .type ecp_nistz256_from_mont,%function
134 ecp_nistz256_from_mont:
135 stp x29,x30,[sp,#-32]!
141 ldp $a2,$a3,[$ap,#16]
144 adr $bp,.Lone // &bp[0]
146 bl __ecp_nistz256_mul_mont
151 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
153 // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
154 // const BN_ULONG x2[4]);
155 .globl ecp_nistz256_mul_mont
156 .type ecp_nistz256_mul_mont,%function
158 ecp_nistz256_mul_mont:
159 stp x29,x30,[sp,#-32]!
163 ldr $bi,[$bp] // bp[0]
165 ldp $a2,$a3,[$ap,#16]
169 bl __ecp_nistz256_mul_mont
174 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
176 // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
177 .globl ecp_nistz256_sqr_mont
178 .type ecp_nistz256_sqr_mont,%function
180 ecp_nistz256_sqr_mont:
181 stp x29,x30,[sp,#-32]!
186 ldp $a2,$a3,[$ap,#16]
190 bl __ecp_nistz256_sqr_mont
195 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
197 // void ecp_nistz256_add(BN_ULONG x0[4],const BN_ULONG x1[4],
198 // const BN_ULONG x2[4]);
199 .globl ecp_nistz256_add
200 .type ecp_nistz256_add,%function
203 stp x29,x30,[sp,#-16]!
206 ldp $acc0,$acc1,[$ap]
208 ldp $acc2,$acc3,[$ap,#16]
209 ldp $t2,$t3,[$bp,#16]
213 bl __ecp_nistz256_add
217 .size ecp_nistz256_add,.-ecp_nistz256_add
219 // void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
220 .globl ecp_nistz256_div_by_2
221 .type ecp_nistz256_div_by_2,%function
223 ecp_nistz256_div_by_2:
224 stp x29,x30,[sp,#-16]!
227 ldp $acc0,$acc1,[$ap]
228 ldp $acc2,$acc3,[$ap,#16]
232 bl __ecp_nistz256_div_by_2
236 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
238 // void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
239 .globl ecp_nistz256_mul_by_2
240 .type ecp_nistz256_mul_by_2,%function
242 ecp_nistz256_mul_by_2:
243 stp x29,x30,[sp,#-16]!
246 ldp $acc0,$acc1,[$ap]
247 ldp $acc2,$acc3,[$ap,#16]
255 bl __ecp_nistz256_add // ret = a+a // 2*a
259 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
261 // void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]);
262 .globl ecp_nistz256_mul_by_3
263 .type ecp_nistz256_mul_by_3,%function
265 ecp_nistz256_mul_by_3:
266 stp x29,x30,[sp,#-16]!
269 ldp $acc0,$acc1,[$ap]
270 ldp $acc2,$acc3,[$ap,#16]
282 bl __ecp_nistz256_add // ret = a+a // 2*a
289 bl __ecp_nistz256_add // ret += a // 2*a+a=3*a
293 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
295 // void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4],
296 // const BN_ULONG x2[4]);
297 .globl ecp_nistz256_sub
298 .type ecp_nistz256_sub,%function
301 stp x29,x30,[sp,#-16]!
304 ldp $acc0,$acc1,[$ap]
305 ldp $acc2,$acc3,[$ap,#16]
309 bl __ecp_nistz256_sub_from
313 .size ecp_nistz256_sub,.-ecp_nistz256_sub
315 // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
316 .globl ecp_nistz256_neg
317 .type ecp_nistz256_neg,%function
320 stp x29,x30,[sp,#-16]!
324 mov $acc0,xzr // a = 0
331 bl __ecp_nistz256_sub_from
335 .size ecp_nistz256_neg,.-ecp_nistz256_neg
337 // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
338 // to $a0-$a3 and b[0] - to $bi
339 .type __ecp_nistz256_mul_mont,%function
341 __ecp_nistz256_mul_mont:
342 mul $acc0,$a0,$bi // a[0]*b[0]
345 mul $acc1,$a1,$bi // a[1]*b[0]
348 mul $acc2,$a2,$bi // a[2]*b[0]
351 mul $acc3,$a3,$bi // a[3]*b[0]
353 ldr $bi,[$bp,#8] // b[1]
355 adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
363 for($i=1;$i<4;$i++) {
364 # Reduction iteration is normally performed by accumulating
365 # result of multiplication of modulus by "magic" digit [and
366 # omitting least significant word, which is guaranteed to
367 # be 0], but thanks to special form of modulus and "magic"
368 # digit being equal to least significant word, it can be
369 # performed with additions and subtractions alone. Indeed:
371 # ffff0001.00000000.0000ffff.ffffffff
373 # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
375 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
378 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
379 # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
380 # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
382 # or marking redundant operations:
384 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
385 # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
386 # - 0000abcd.efgh0000.--------.--------.--------
389 subs $t2,$acc0,$t0 // "*0xffff0001"
391 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
392 mul $t0,$a0,$bi // lo(a[0]*b[i])
394 mul $t1,$a1,$bi // lo(a[1]*b[i])
395 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
396 mul $t2,$a2,$bi // lo(a[2]*b[i])
398 mul $t3,$a3,$bi // lo(a[3]*b[i])
401 adds $acc0,$acc0,$t0 // accumulate low parts of multiplication
402 umulh $t0,$a0,$bi // hi(a[0]*b[i])
404 umulh $t1,$a1,$bi // hi(a[1]*b[i])
406 umulh $t2,$a2,$bi // hi(a[2]*b[i])
408 umulh $t3,$a3,$bi // hi(a[3]*b[i])
411 $code.=<<___ if ($i<3);
412 ldr $bi,[$bp,#8*($i+1)] // b[$i+1]
415 adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
426 subs $t2,$acc0,$t0 // "*0xffff0001"
428 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
430 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
434 adds $t0,$acc0,#1 // subs $t0,$acc0,#-1 // tmp = ret-modulus
435 sbcs $t1,$acc1,$poly1
437 sbcs $t3,$acc3,$poly3
438 sbcs xzr,$acc4,xzr // did it borrow?
440 csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
441 csel $acc1,$acc1,$t1,lo
442 csel $acc2,$acc2,$t2,lo
443 stp $acc0,$acc1,[$rp]
444 csel $acc3,$acc3,$t3,lo
445 stp $acc2,$acc3,[$rp,#16]
448 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
450 // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
452 .type __ecp_nistz256_sqr_mont,%function
454 __ecp_nistz256_sqr_mont:
455 // | | | | | |a1*a0| |
456 // | | | | |a2*a0| | |
457 // | |a3*a2|a3*a0| | | |
458 // | | | |a2*a1| | | |
459 // | | |a3*a1| | | | |
460 // *| | | | | | | | 2|
461 // +|a3*a3|a2*a2|a1*a1|a0*a0|
462 // |--+--+--+--+--+--+--+--|
463 // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
465 // "can't overflow" below mark carrying into high part of
466 // multiplication result, which can't overflow, because it
467 // can never be all ones.
469 mul $acc1,$a1,$a0 // a[1]*a[0]
471 mul $acc2,$a2,$a0 // a[2]*a[0]
473 mul $acc3,$a3,$a0 // a[3]*a[0]
476 adds $acc2,$acc2,$t1 // accumulate high parts of multiplication
477 mul $t0,$a2,$a1 // a[2]*a[1]
480 mul $t2,$a3,$a1 // a[3]*a[1]
482 adc $acc4,$acc4,xzr // can't overflow
484 mul $acc5,$a3,$a2 // a[3]*a[2]
487 adds $t1,$t1,$t2 // accumulate high parts of multiplication
488 mul $acc0,$a0,$a0 // a[0]*a[0]
489 adc $t2,$t3,xzr // can't overflow
491 adds $acc3,$acc3,$t0 // accumulate low parts of multiplication
494 mul $t1,$a1,$a1 // a[1]*a[1]
497 adc $acc6,$acc6,xzr // can't overflow
499 adds $acc1,$acc1,$acc1 // acc[1-6]*=2
500 mul $t2,$a2,$a2 // a[2]*a[2]
501 adcs $acc2,$acc2,$acc2
503 adcs $acc3,$acc3,$acc3
504 mul $t3,$a3,$a3 // a[3]*a[3]
505 adcs $acc4,$acc4,$acc4
507 adcs $acc5,$acc5,$acc5
508 adcs $acc6,$acc6,$acc6
511 adds $acc1,$acc1,$a0 // +a[i]*a[i]
521 for($i=0;$i<3;$i++) { # reductions, see commentary in
522 # multiplication for details
524 subs $t2,$acc0,$t0 // "*0xffff0001"
526 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
529 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
531 adc $acc3,$t3,xzr // can't overflow
535 subs $t2,$acc0,$t0 // "*0xffff0001"
537 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
539 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
540 adc $acc3,$t3,xzr // can't overflow
542 adds $acc0,$acc0,$acc4 // accumulate upper half
543 adcs $acc1,$acc1,$acc5
544 adcs $acc2,$acc2,$acc6
545 adcs $acc3,$acc3,$acc7
548 adds $t0,$acc0,#1 // subs $t0,$acc0,#-1 // tmp = ret-modulus
549 sbcs $t1,$acc1,$poly1
551 sbcs $t3,$acc3,$poly3
552 sbcs xzr,$acc4,xzr // did it borrow?
554 csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
555 csel $acc1,$acc1,$t1,lo
556 csel $acc2,$acc2,$t2,lo
557 stp $acc0,$acc1,[$rp]
558 csel $acc3,$acc3,$t3,lo
559 stp $acc2,$acc3,[$rp,#16]
562 .size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
564 // Note that __ecp_nistz256_add expects both input vectors pre-loaded to
565 // $a0-$a3 and $t0-$t3. This is done because it's used in multiple
566 // contexts, e.g. in multiplication by 2 and 3...
567 .type __ecp_nistz256_add,%function
570 adds $acc0,$acc0,$t0 // ret = a+b
574 adc $ap,xzr,xzr // zap $ap
576 adds $t0,$acc0,#1 // subs $t0,$a0,#-1 // tmp = ret-modulus
577 sbcs $t1,$acc1,$poly1
580 cmp $ap,xzr // did addition carry?
582 csel $acc0,$acc0,$t0,eq // ret = carry ? ret-modulus : ret
583 csel $acc1,$acc1,$t1,eq
584 csel $acc2,$acc2,$t2,eq
585 stp $acc0,$acc1,[$rp]
586 csel $acc3,$acc3,$t3,eq
587 stp $acc2,$acc3,[$rp,#16]
590 .size __ecp_nistz256_add,.-__ecp_nistz256_add
592 .type __ecp_nistz256_sub_from,%function
594 __ecp_nistz256_sub_from:
596 ldp $t2,$t3,[$bp,#16]
597 subs $acc0,$acc0,$t0 // ret = a-b
601 sbc $ap,xzr,xzr // zap $ap
603 subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = ret+modulus
604 adcs $t1,$acc1,$poly1
607 cmp $ap,xzr // did subtraction borrow?
609 csel $acc0,$acc0,$t0,eq // ret = borrow ? ret+modulus : ret
610 csel $acc1,$acc1,$t1,eq
611 csel $acc2,$acc2,$t2,eq
612 stp $acc0,$acc1,[$rp]
613 csel $acc3,$acc3,$t3,eq
614 stp $acc2,$acc3,[$rp,#16]
617 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
619 .type __ecp_nistz256_sub_morf,%function
621 __ecp_nistz256_sub_morf:
623 ldp $t2,$t3,[$bp,#16]
624 subs $acc0,$t0,$acc0 // ret = b-a
628 sbc $ap,xzr,xzr // zap $ap
630 subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = ret+modulus
631 adcs $t1,$acc1,$poly1
634 cmp $ap,xzr // did subtraction borrow?
636 csel $acc0,$acc0,$t0,eq // ret = borrow ? ret+modulus : ret
637 csel $acc1,$acc1,$t1,eq
638 csel $acc2,$acc2,$t2,eq
639 stp $acc0,$acc1,[$rp]
640 csel $acc3,$acc3,$t3,eq
641 stp $acc2,$acc3,[$rp,#16]
644 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
646 .type __ecp_nistz256_div_by_2,%function
648 __ecp_nistz256_div_by_2:
649 subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = a+modulus
650 adcs $t1,$acc1,$poly1
652 adcs $t3,$acc3,$poly3
653 adc $ap,xzr,xzr // zap $ap
654 tst $acc0,#1 // is a even?
656 csel $acc0,$acc0,$t0,eq // ret = even ? a : a+modulus
657 csel $acc1,$acc1,$t1,eq
658 csel $acc2,$acc2,$t2,eq
659 csel $acc3,$acc3,$t3,eq
662 lsr $acc0,$acc0,#1 // ret >>= 1
663 orr $acc0,$acc0,$acc1,lsl#63
665 orr $acc1,$acc1,$acc2,lsl#63
667 orr $acc2,$acc2,$acc3,lsl#63
669 stp $acc0,$acc1,[$rp]
670 orr $acc3,$acc3,$ap,lsl#63
671 stp $acc2,$acc3,[$rp,#16]
674 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
676 ########################################################################
677 # following subroutines are "literal" implemetation of those found in
680 ########################################################################
681 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
684 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
685 # above map() describes stack layout with 4 temporary
686 # 256-bit vectors on top.
687 my ($rp_real,$ap_real) = map("x$_",(21,22));
690 .globl ecp_nistz256_point_double
691 .type ecp_nistz256_point_double,%function
693 ecp_nistz256_point_double:
694 stp x29,x30,[sp,#-48]!
700 ldp $acc0,$acc1,[$ap,#32]
702 ldp $acc2,$acc3,[$ap,#48]
708 ldp $a0,$a1,[$ap_real,#64] // forward load for p256_sqr_mont
711 ldp $a2,$a3,[$ap_real,#64+16]
713 bl __ecp_nistz256_add // p256_mul_by_2(S, in_y);
716 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
718 ldp $t0,$t1,[$ap_real]
719 ldp $t2,$t3,[$ap_real,#16]
720 mov $a0,$acc0 // put Zsqr aside for p256_sub
725 bl __ecp_nistz256_add // p256_add(M, Zsqr, in_x);
728 mov $acc0,$a0 // restore Zsqr
730 ldp $a0,$a1,[sp,#$S] // forward load for p256_sqr_mont
733 ldp $a2,$a3,[sp,#$S+16]
735 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
738 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
740 ldr $bi,[$ap_real,#32]
741 ldp $a0,$a1,[$ap_real,#64]
742 ldp $a2,$a3,[$ap_real,#64+16]
745 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
749 ldp $a0,$a1,[sp,#$S] // forward load for p256_sqr_mont
752 ldp $a2,$a3,[sp,#$S+16]
754 bl __ecp_nistz256_add // p256_mul_by_2(res_z, tmp0);
757 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
759 ldr $bi,[sp,#$Zsqr] // forward load for p256_mul_mont
761 ldp $a2,$a3,[sp,#$M+16]
763 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
767 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
769 mov $t0,$acc0 // duplicate M
773 mov $a0,$acc0 // put M aside
778 bl __ecp_nistz256_add
779 mov $t0,$a0 // restore M
781 ldr $bi,[$ap_real] // forward load for p256_mul_mont
785 ldp $a2,$a3,[sp,#$S+16]
786 bl __ecp_nistz256_add // p256_mul_by_3(M, M);
790 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
794 ldp $a0,$a1,[sp,#$M] // forward load for p256_sqr_mont
797 ldp $a2,$a3,[sp,#$M+16]
799 bl __ecp_nistz256_add // p256_mul_by_2(tmp0, S);
802 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
805 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
809 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
812 mov $a0,$acc0 // copy S
817 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
821 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
823 add sp,x29,#0 // destroy frame
824 ldp x19,x20,[x29,#16]
825 ldp x21,x22,[x29,#32]
828 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
832 ########################################################################
833 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
834 # const P256_POINT *in2);
836 my ($res_x,$res_y,$res_z,
837 $H,$Hsqr,$R,$Rsqr,$Hcub,
838 $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
839 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
840 # above map() describes stack layout with 12 temporary
841 # 256-bit vectors on top.
842 my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("x$_",(21..26));
845 .globl ecp_nistz256_point_add
846 .type ecp_nistz256_point_add,%function
848 ecp_nistz256_point_add:
849 stp x29,x30,[sp,#-80]!
858 ldp $a2,$a3,[$bp,#16]
859 ldp $t0,$t1,[$bp,#32]
860 ldp $t2,$t3,[$bp,#48]
866 ldp $acc0,$acc1,[$ap]
869 ldp $acc2,$acc3,[$ap,#16]
872 ldp $t0,$t1,[$ap,#32]
873 orr $in2infty,$a0,$t2
875 ldp $t2,$t3,[$ap,#48]
876 csetm $in2infty,ne // !in2infty
878 ldp $a0,$a1,[$bp_real,#64] // forward load for p256_sqr_mont
879 orr $acc0,$acc0,$acc1
880 orr $acc2,$acc2,$acc3
881 ldp $a2,$a3,[$bp_real,#64+16]
884 orr $acc0,$acc0,$acc2
886 orr $in1infty,$acc0,$t0
890 csetm $in1infty,ne // !in1infty
893 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
895 ldp $a0,$a1,[$ap_real,#64]
896 ldp $a2,$a3,[$ap_real,#64+16]
898 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
900 ldr $bi,[$bp_real,#64]
901 ldp $a0,$a1,[sp,#$Z2sqr]
902 ldp $a2,$a3,[sp,#$Z2sqr+16]
905 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
907 ldr $bi,[$ap_real,#64]
908 ldp $a0,$a1,[sp,#$Z1sqr]
909 ldp $a2,$a3,[sp,#$Z1sqr+16]
912 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
914 ldr $bi,[$ap_real,#32]
915 ldp $a0,$a1,[sp,#$S1]
916 ldp $a2,$a3,[sp,#$S1+16]
919 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
921 ldr $bi,[$bp_real,#32]
922 ldp $a0,$a1,[sp,#$S2]
923 ldp $a2,$a3,[sp,#$S2+16]
926 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
929 ldr $bi,[sp,#$Z2sqr] // forward load for p256_mul_mont
930 ldp $a0,$a1,[$ap_real]
931 ldp $a2,$a3,[$ap_real,#16]
933 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
935 orr $acc0,$acc0,$acc1 // see if result is zero
936 orr $acc2,$acc2,$acc3
937 orr $temp,$acc0,$acc2
941 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
944 ldp $a0,$a1,[$bp_real]
945 ldp $a2,$a3,[$bp_real,#16]
948 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
951 ldp $a0,$a1,[sp,#$R] // forward load for p256_sqr_mont
952 ldp $a2,$a3,[sp,#$R+16]
954 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
956 orr $acc0,$acc0,$acc1 // see if result is zero
957 orr $acc2,$acc2,$acc3
958 orr $acc0,$acc0,$acc2
960 b.ne .Ladd_proceed // is_equal(U1,U2)?
962 tst $in1infty,$in2infty
963 b.eq .Ladd_proceed // (in1infty || in2infty)?
966 b.eq .Ladd_proceed // is_equal(S1,S2)?
970 stp $a0,$a1,[$rp_real]
971 stp $a0,$a1,[$rp_real,#16]
972 stp $a0,$a1,[$rp_real,#32]
973 stp $a0,$a1,[$rp_real,#48]
974 stp $a0,$a1,[$rp_real,#64]
975 stp $a0,$a1,[$rp_real,#80]
981 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
983 ldr $bi,[$ap_real,#64]
985 ldp $a2,$a3,[sp,#$H+16]
988 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
991 ldp $a2,$a3,[sp,#$H+16]
993 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
995 ldr $bi,[$bp_real,#64]
996 ldp $a0,$a1,[sp,#$res_z]
997 ldp $a2,$a3,[sp,#$res_z+16]
1000 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
1003 ldp $a0,$a1,[sp,#$Hsqr]
1004 ldp $a2,$a3,[sp,#$Hsqr+16]
1007 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
1010 ldp $a0,$a1,[sp,#$U1]
1011 ldp $a2,$a3,[sp,#$U1+16]
1014 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
1021 bl __ecp_nistz256_add // p256_mul_by_2(Hsqr, U2);
1025 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
1028 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
1031 ldr $bi,[sp,#$Hcub] // forward load for p256_mul_mont
1032 ldp $a0,$a1,[sp,#$S1]
1033 ldp $a2,$a3,[sp,#$S1+16]
1035 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
1039 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
1042 ldp $a0,$a1,[sp,#$res_y]
1043 ldp $a2,$a3,[sp,#$res_y+16]
1046 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
1049 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
1051 ldp $a0,$a1,[sp,#$res_x] // res
1052 ldp $a2,$a3,[sp,#$res_x+16]
1053 ldp $t0,$t1,[$bp_real] // in2
1054 ldp $t2,$t3,[$bp_real,#16]
1056 for($i=0;$i<64;$i+=32) { # conditional moves
1058 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1059 cmp $in1infty,#0 // !$in1intfy, remember?
1060 ldp $acc2,$acc3,[$ap_real,#$i+16]
1063 ldp $a0,$a1,[sp,#$res_x+$i+32] // res
1066 cmp $in2infty,#0 // !$in2intfy, remember?
1067 ldp $a2,$a3,[sp,#$res_x+$i+48]
1068 csel $acc0,$t0,$acc0,ne
1069 csel $acc1,$t1,$acc1,ne
1070 ldp $t0,$t1,[$bp_real,#$i+32] // in2
1071 csel $acc2,$t2,$acc2,ne
1072 csel $acc3,$t3,$acc3,ne
1073 ldp $t2,$t3,[$bp_real,#$i+48]
1074 stp $acc0,$acc1,[$rp_real,#$i]
1075 stp $acc2,$acc3,[$rp_real,#$i+16]
1079 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1080 cmp $in1infty,#0 // !$in1intfy, remember?
1081 ldp $acc2,$acc3,[$ap_real,#$i+16]
1086 cmp $in2infty,#0 // !$in2intfy, remember?
1087 csel $acc0,$t0,$acc0,ne
1088 csel $acc1,$t1,$acc1,ne
1089 csel $acc2,$t2,$acc2,ne
1090 csel $acc3,$t3,$acc3,ne
1091 stp $acc0,$acc1,[$rp_real,#$i]
1092 stp $acc2,$acc3,[$rp_real,#$i+16]
1095 add sp,x29,#0 // destroy frame
1096 ldp x19,x20,[x29,#16]
1097 ldp x21,x22,[x29,#32]
1098 ldp x23,x24,[x29,#48]
1099 ldp x25,x26,[x29,#64]
1100 ldp x29,x30,[sp],#80
1102 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1106 ########################################################################
1107 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1108 # const P256_POINT_AFFINE *in2);
1110 my ($res_x,$res_y,$res_z,
1111 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1113 # above map() describes stack layout with 10 temporary
1114 # 256-bit vectors on top.
1115 my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("x$_",(21..26));
1118 .globl ecp_nistz256_point_add_affine
1119 .type ecp_nistz256_point_add_affine,%function
1121 ecp_nistz256_point_add_affine:
1122 stp x29,x30,[sp,#-80]!
1124 stp x19,x20,[sp,#16]
1125 stp x21,x22,[sp,#32]
1126 stp x23,x24,[sp,#48]
1127 stp x25,x26,[sp,#64]
1134 ldr $poly3,.Lpoly+24
1137 ldp $a2,$a3,[$ap,#16]
1138 ldp $t0,$t1,[$ap,#32]
1139 ldp $t2,$t3,[$ap,#48]
1146 orr $in1infty,$a0,$t0
1148 csetm $in1infty,ne // !in1infty
1151 ldp $a2,$a3,[$bp,#16]
1152 ldp $t0,$t1,[$bp,#32]
1153 ldp $t2,$t3,[$bp,#48]
1160 orr $in2infty,$a0,$t0
1162 csetm $in2infty,ne // !in2infty
1164 ldp $a0,$a1,[$ap_real,#64]
1165 ldp $a2,$a3,[$ap_real,#64+16]
1167 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
1176 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
1179 ldr $bi,[$ap_real,#64] // forward load for p256_mul_mont
1180 ldp $a0,$a1,[sp,#$Z1sqr]
1181 ldp $a2,$a3,[sp,#$Z1sqr+16]
1183 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
1185 add $bp,$ap_real,#64
1187 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
1189 ldr $bi,[$ap_real,#64]
1190 ldp $a0,$a1,[sp,#$H]
1191 ldp $a2,$a3,[sp,#$H+16]
1192 add $bp,$ap_real,#64
1194 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
1196 ldr $bi,[$bp_real,#32]
1197 ldp $a0,$a1,[sp,#$S2]
1198 ldp $a2,$a3,[sp,#$S2+16]
1199 add $bp,$bp_real,#32
1201 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
1203 add $bp,$ap_real,#32
1204 ldp $a0,$a1,[sp,#$H] // forward load for p256_sqr_mont
1205 ldp $a2,$a3,[sp,#$H+16]
1207 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
1210 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
1212 ldp $a0,$a1,[sp,#$R]
1213 ldp $a2,$a3,[sp,#$R+16]
1215 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
1218 ldp $a0,$a1,[sp,#$Hsqr]
1219 ldp $a2,$a3,[sp,#$Hsqr+16]
1222 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
1225 ldp $a0,$a1,[sp,#$Hsqr]
1226 ldp $a2,$a3,[sp,#$Hsqr+16]
1229 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
1236 bl __ecp_nistz256_add // p256_mul_by_2(Hsqr, U2);
1240 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
1243 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
1246 ldr $bi,[$ap_real,#32] // forward load for p256_mul_mont
1247 ldp $a0,$a1,[sp,#$Hcub]
1248 ldp $a2,$a3,[sp,#$Hcub+16]
1250 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
1252 add $bp,$ap_real,#32
1254 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
1257 ldp $a0,$a1,[sp,#$res_y]
1258 ldp $a2,$a3,[sp,#$res_y+16]
1261 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
1264 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
1266 ldp $a0,$a1,[sp,#$res_x] // res
1267 ldp $a2,$a3,[sp,#$res_x+16]
1268 ldp $t0,$t1,[$bp_real] // in2
1269 ldp $t2,$t3,[$bp_real,#16]
1271 for($i=0;$i<64;$i+=32) { # conditional moves
1273 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1274 cmp $in1infty,#0 // !$in1intfy, remember?
1275 ldp $acc2,$acc3,[$ap_real,#$i+16]
1278 ldp $a0,$a1,[sp,#$res_x+$i+32] // res
1281 cmp $in2infty,#0 // !$in2intfy, remember?
1282 ldp $a2,$a3,[sp,#$res_x+$i+48]
1283 csel $acc0,$t0,$acc0,ne
1284 csel $acc1,$t1,$acc1,ne
1285 ldp $t0,$t1,[$bp_real,#$i+32] // in2
1286 csel $acc2,$t2,$acc2,ne
1287 csel $acc3,$t3,$acc3,ne
1288 ldp $t2,$t3,[$bp_real,#$i+48]
1289 stp $acc0,$acc1,[$rp_real,#$i]
1290 stp $acc2,$acc3,[$rp_real,#$i+16]
1292 $code.=<<___ if ($i == 0);
1293 adr $bp_real,.Lone_mont-64
1297 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1298 cmp $in1infty,#0 // !$in1intfy, remember?
1299 ldp $acc2,$acc3,[$ap_real,#$i+16]
1304 cmp $in2infty,#0 // !$in2intfy, remember?
1305 csel $acc0,$t0,$acc0,ne
1306 csel $acc1,$t1,$acc1,ne
1307 csel $acc2,$t2,$acc2,ne
1308 csel $acc3,$t3,$acc3,ne
1309 stp $acc0,$acc1,[$rp_real,#$i]
1310 stp $acc2,$acc3,[$rp_real,#$i+16]
1312 add sp,x29,#0 // destroy frame
1313 ldp x19,x20,[x29,#16]
1314 ldp x21,x22,[x29,#32]
1315 ldp x23,x24,[x29,#48]
1316 ldp x25,x26,[x29,#64]
1317 ldp x29,x30,[sp],#80
1319 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1323 ########################################################################
1324 # scatter-gather subroutines
1326 my ($out,$inp,$index,$mask)=map("x$_",(0..3));
1328 // void ecp_nistz256_scatter_w5(void *x0,const P256_POINT *x1,
1330 .globl ecp_nistz256_scatter_w5
1331 .type ecp_nistz256_scatter_w5,%function
1333 ecp_nistz256_scatter_w5:
1334 stp x29,x30,[sp,#-16]!
1337 add $out,$out,$index,lsl#2
1339 ldp x4,x5,[$inp] // X
1340 ldp x6,x7,[$inp,#16]
1341 str w4,[$out,#64*0-4]
1343 str w5,[$out,#64*1-4]
1345 str w6,[$out,#64*2-4]
1347 str w7,[$out,#64*3-4]
1349 str w4,[$out,#64*4-4]
1350 str w5,[$out,#64*5-4]
1351 str w6,[$out,#64*6-4]
1352 str w7,[$out,#64*7-4]
1355 ldp x4,x5,[$inp,#32] // Y
1356 ldp x6,x7,[$inp,#48]
1357 str w4,[$out,#64*0-4]
1359 str w5,[$out,#64*1-4]
1361 str w6,[$out,#64*2-4]
1363 str w7,[$out,#64*3-4]
1365 str w4,[$out,#64*4-4]
1366 str w5,[$out,#64*5-4]
1367 str w6,[$out,#64*6-4]
1368 str w7,[$out,#64*7-4]
1371 ldp x4,x5,[$inp,#64] // Z
1372 ldp x6,x7,[$inp,#80]
1373 str w4,[$out,#64*0-4]
1375 str w5,[$out,#64*1-4]
1377 str w6,[$out,#64*2-4]
1379 str w7,[$out,#64*3-4]
1381 str w4,[$out,#64*4-4]
1382 str w5,[$out,#64*5-4]
1383 str w6,[$out,#64*6-4]
1384 str w7,[$out,#64*7-4]
1388 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1390 // void ecp_nistz256_gather_w5(P256_POINT *x0,const void *x1,
1392 .globl ecp_nistz256_gather_w5
1393 .type ecp_nistz256_gather_w5,%function
1395 ecp_nistz256_gather_w5:
1396 stp x29,x30,[sp,#-16]!
1401 add $index,$index,x3
1402 add $inp,$inp,$index,lsl#2
1410 ldr w10,[$inp,#64*6]
1411 ldr w11,[$inp,#64*7]
1415 orr x6,x6,x10,lsl#32
1416 orr x7,x7,x11,lsl#32
1421 stp x4,x5,[$out] // X
1422 stp x6,x7,[$out,#16]
1430 ldr w10,[$inp,#64*6]
1431 ldr w11,[$inp,#64*7]
1435 orr x6,x6,x10,lsl#32
1436 orr x7,x7,x11,lsl#32
1441 stp x4,x5,[$out,#32] // Y
1442 stp x6,x7,[$out,#48]
1450 ldr w10,[$inp,#64*6]
1451 ldr w11,[$inp,#64*7]
1454 orr x6,x6,x10,lsl#32
1455 orr x7,x7,x11,lsl#32
1460 stp x4,x5,[$out,#64] // Z
1461 stp x6,x7,[$out,#80]
1465 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1467 // void ecp_nistz256_scatter_w7(void *x0,const P256_POINT_AFFINE *x1,
1469 .globl ecp_nistz256_scatter_w7
1470 .type ecp_nistz256_scatter_w7,%function
1472 ecp_nistz256_scatter_w7:
1473 stp x29,x30,[sp,#-16]!
1476 add $out,$out,$index
1480 subs $index,$index,#1
1481 prfm pstl1strm,[$out,#4096+64*0]
1482 prfm pstl1strm,[$out,#4096+64*1]
1483 prfm pstl1strm,[$out,#4096+64*2]
1484 prfm pstl1strm,[$out,#4096+64*3]
1485 prfm pstl1strm,[$out,#4096+64*4]
1486 prfm pstl1strm,[$out,#4096+64*5]
1487 prfm pstl1strm,[$out,#4096+64*6]
1488 prfm pstl1strm,[$out,#4096+64*7]
1489 strb w3,[$out,#64*0-1]
1491 strb w3,[$out,#64*1-1]
1493 strb w3,[$out,#64*2-1]
1495 strb w3,[$out,#64*3-1]
1497 strb w3,[$out,#64*4-1]
1499 strb w3,[$out,#64*5-1]
1501 strb w3,[$out,#64*6-1]
1503 strb w3,[$out,#64*7-1]
1505 b.ne .Loop_scatter_w7
1509 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1511 // void ecp_nistz256_gather_w7(P256_POINT_AFFINE *x0,const void *x1,
1513 .globl ecp_nistz256_gather_w7
1514 .type ecp_nistz256_gather_w7,%function
1516 ecp_nistz256_gather_w7:
1517 stp x29,x30,[sp,#-16]!
1522 add $index,$index,x3
1523 add $inp,$inp,$index
1527 ldrb w4,[$inp,#64*0]
1528 prfm pldl1strm,[$inp,#4096+64*0]
1529 subs $index,$index,#1
1530 ldrb w5,[$inp,#64*1]
1531 prfm pldl1strm,[$inp,#4096+64*1]
1532 ldrb w6,[$inp,#64*2]
1533 prfm pldl1strm,[$inp,#4096+64*2]
1534 ldrb w7,[$inp,#64*3]
1535 prfm pldl1strm,[$inp,#4096+64*3]
1536 ldrb w8,[$inp,#64*4]
1537 prfm pldl1strm,[$inp,#4096+64*4]
1538 ldrb w9,[$inp,#64*5]
1539 prfm pldl1strm,[$inp,#4096+64*5]
1540 ldrb w10,[$inp,#64*6]
1541 prfm pldl1strm,[$inp,#4096+64*6]
1542 ldrb w11,[$inp,#64*7]
1543 prfm pldl1strm,[$inp,#4096+64*7]
1549 orr x10,x10,x11,lsl#8
1551 orr x4,x4,x10,lsl#48
1554 b.ne .Loop_gather_w7
1558 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1562 foreach (split("\n",$code)) {
1563 s/\`([^\`]*)\`/eval $1/ge;
1567 close STDOUT; # enforce flush