3 # ====================================================================
4 # Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5 # project. The module is, however, dual licensed under OpenSSL and
6 # CRYPTOGAMS licenses depending on where you obtain it. For further
7 # details see http://www.openssl.org/~appro/cryptogams/.
8 # ====================================================================
10 # ECP_NISTZ256 module for ARMv8.
14 # Original ECP_NISTZ256 submission targeting x86_64 is detailed in
15 # http://eprint.iacr.org/2013/816.
17 # with/without -DECP_NISTZ256_ASM
19 # Cortex-A53 +120-400%
20 # Cortex-A57 +120-350%
24 # Ranges denote minimum and maximum improvement coefficients depending
25 # on benchmark. Lower coefficients are for ECDSA sign, server-side
26 # operation. Keep in mind that +400% means 5x improvement.
29 while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
31 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
32 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
33 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
34 die "can't locate arm-xlate.pl";
36 open OUT,"| \"$^X\" $xlate $flavour $output";
40 my ($rp,$ap,$bp,$bi,$a0,$a1,$a2,$a3,$t0,$t1,$t2,$t3,$poly1,$poly3,
41 $acc0,$acc1,$acc2,$acc3,$acc4,$acc5) =
42 map("x$_",(0..17,19,20));
44 my ($acc6,$acc7)=($ap,$bp); # used in __ecp_nistz256_sqr_mont
51 ########################################################################
52 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
54 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
55 open TABLE,"<ecp_nistz256_table.c" or
56 open TABLE,"<${dir}../ecp_nistz256_table.c" or
57 die "failed to open ecp_nistz256_table.c:",$!;
62 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
66 # See ecp_nistz256_table.c for explanation for why it's 64*16*37.
67 # 64*16*37-1 is because $#arr returns last valid index or @arr, not
69 die "insane number of elements" if ($#arr != 64*16*37-1);
72 .globl ecp_nistz256_precomputed
73 .type ecp_nistz256_precomputed,%object
75 ecp_nistz256_precomputed:
77 ########################################################################
78 # this conversion smashes P256_POINT_AFFINE by individual bytes with
79 # 64 byte interval, similar to
83 @tbl = splice(@arr,0,64*16);
84 for($i=0;$i<64;$i++) {
86 for($j=0;$j<64;$j++) {
87 push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
90 $code.=join(',',map { sprintf "0x%02x",$_} @line);
95 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
98 .quad 0xffffffffffffffff,0x00000000ffffffff,0x0000000000000000,0xffffffff00000001
99 .LRR: // 2^512 mod P precomputed for NIST P256 polynomial
100 .quad 0x0000000000000003,0xfffffffbffffffff,0xfffffffffffffffe,0x00000004fffffffd
102 .quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
105 .asciz "ECP_NISTZ256 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
107 // void ecp_nistz256_to_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
108 .globl ecp_nistz256_to_mont
109 .type ecp_nistz256_to_mont,%function
111 ecp_nistz256_to_mont:
112 stp x29,x30,[sp,#-32]!
116 ldr $bi,.LRR // bp[0]
118 ldp $a2,$a3,[$ap,#16]
121 adr $bp,.LRR // &bp[0]
123 bl __ecp_nistz256_mul_mont
128 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
130 // void ecp_nistz256_from_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
131 .globl ecp_nistz256_from_mont
132 .type ecp_nistz256_from_mont,%function
134 ecp_nistz256_from_mont:
135 stp x29,x30,[sp,#-32]!
141 ldp $a2,$a3,[$ap,#16]
144 adr $bp,.Lone // &bp[0]
146 bl __ecp_nistz256_mul_mont
151 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
153 // void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
154 // const BN_ULONG x2[4]);
155 .globl ecp_nistz256_mul_mont
156 .type ecp_nistz256_mul_mont,%function
158 ecp_nistz256_mul_mont:
159 stp x29,x30,[sp,#-32]!
163 ldr $bi,[$bp] // bp[0]
165 ldp $a2,$a3,[$ap,#16]
169 bl __ecp_nistz256_mul_mont
174 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
176 // void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
177 .globl ecp_nistz256_sqr_mont
178 .type ecp_nistz256_sqr_mont,%function
180 ecp_nistz256_sqr_mont:
181 stp x29,x30,[sp,#-32]!
186 ldp $a2,$a3,[$ap,#16]
190 bl __ecp_nistz256_sqr_mont
195 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
197 // void ecp_nistz256_add(BN_ULONG x0[4],const BN_ULONG x1[4],
198 // const BN_ULONG x2[4]);
199 .globl ecp_nistz256_add
200 .type ecp_nistz256_add,%function
203 stp x29,x30,[sp,#-16]!
206 ldp $acc0,$acc1,[$ap]
208 ldp $acc2,$acc3,[$ap,#16]
209 ldp $t2,$t3,[$bp,#16]
213 bl __ecp_nistz256_add
217 .size ecp_nistz256_add,.-ecp_nistz256_add
219 // void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
220 .globl ecp_nistz256_div_by_2
221 .type ecp_nistz256_div_by_2,%function
223 ecp_nistz256_div_by_2:
224 stp x29,x30,[sp,#-16]!
227 ldp $acc0,$acc1,[$ap]
228 ldp $acc2,$acc3,[$ap,#16]
232 bl __ecp_nistz256_div_by_2
236 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
238 // void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
239 .globl ecp_nistz256_mul_by_2
240 .type ecp_nistz256_mul_by_2,%function
242 ecp_nistz256_mul_by_2:
243 stp x29,x30,[sp,#-16]!
246 ldp $acc0,$acc1,[$ap]
247 ldp $acc2,$acc3,[$ap,#16]
255 bl __ecp_nistz256_add // ret = a+a // 2*a
259 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
261 // void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]);
262 .globl ecp_nistz256_mul_by_3
263 .type ecp_nistz256_mul_by_3,%function
265 ecp_nistz256_mul_by_3:
266 stp x29,x30,[sp,#-16]!
269 ldp $acc0,$acc1,[$ap]
270 ldp $acc2,$acc3,[$ap,#16]
282 bl __ecp_nistz256_add // ret = a+a // 2*a
289 bl __ecp_nistz256_add // ret += a // 2*a+a=3*a
293 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
295 // void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4],
296 // const BN_ULONG x2[4]);
297 .globl ecp_nistz256_sub
298 .type ecp_nistz256_sub,%function
301 stp x29,x30,[sp,#-16]!
304 ldp $acc0,$acc1,[$ap]
305 ldp $acc2,$acc3,[$ap,#16]
309 bl __ecp_nistz256_sub_from
313 .size ecp_nistz256_sub,.-ecp_nistz256_sub
315 // void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
316 .globl ecp_nistz256_neg
317 .type ecp_nistz256_neg,%function
320 stp x29,x30,[sp,#-16]!
324 mov $acc0,xzr // a = 0
331 bl __ecp_nistz256_sub_from
335 .size ecp_nistz256_neg,.-ecp_nistz256_neg
337 // note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
338 // to $a0-$a3 and b[0] - to $bi
339 .type __ecp_nistz256_mul_mont,%function
341 __ecp_nistz256_mul_mont:
342 mul $acc0,$a0,$bi // a[0]*b[0]
345 mul $acc1,$a1,$bi // a[1]*b[0]
348 mul $acc2,$a2,$bi // a[2]*b[0]
351 mul $acc3,$a3,$bi // a[3]*b[0]
353 ldr $bi,[$bp,#8] // b[1]
355 adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
363 for($i=1;$i<4;$i++) {
364 # Reduction iteration is normally performed by accumulating
365 # result of multiplication of modulus by "magic" digit [and
366 # omitting least significant word, which is guaranteed to
367 # be 0], but thanks to special form of modulus and "magic"
368 # digit being equal to least significant word, it can be
369 # performed with additions and subtractions alone. Indeed:
371 # ffff0001.00000000.0000ffff.ffffffff
373 # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
375 # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
378 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
379 # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
380 # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
382 # or marking redundant operations:
384 # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
385 # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
386 # - 0000abcd.efgh0000.--------.--------.--------
389 subs $t2,$acc0,$t0 // "*0xffff0001"
391 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
392 mul $t0,$a0,$bi // lo(a[0]*b[i])
394 mul $t1,$a1,$bi // lo(a[1]*b[i])
395 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
396 mul $t2,$a2,$bi // lo(a[2]*b[i])
398 mul $t3,$a3,$bi // lo(a[3]*b[i])
401 adds $acc0,$acc0,$t0 // accumulate low parts of multiplication
402 umulh $t0,$a0,$bi // hi(a[0]*b[i])
404 umulh $t1,$a1,$bi // hi(a[1]*b[i])
406 umulh $t2,$a2,$bi // hi(a[2]*b[i])
408 umulh $t3,$a3,$bi // hi(a[3]*b[i])
411 $code.=<<___ if ($i<3);
412 ldr $bi,[$bp,#8*($i+1)] // b[$i+1]
415 adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
426 subs $t2,$acc0,$t0 // "*0xffff0001"
428 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
430 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
434 adds $t0,$acc0,#1 // subs $t0,$acc0,#-1 // tmp = ret-modulus
435 sbcs $t1,$acc1,$poly1
437 sbcs $t3,$acc3,$poly3
438 sbcs xzr,$acc4,xzr // did it borrow?
440 csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
441 csel $acc1,$acc1,$t1,lo
442 csel $acc2,$acc2,$t2,lo
443 stp $acc0,$acc1,[$rp]
444 csel $acc3,$acc3,$t3,lo
445 stp $acc2,$acc3,[$rp,#16]
448 .size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
450 // note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
452 .type __ecp_nistz256_sqr_mont,%function
454 __ecp_nistz256_sqr_mont:
455 // | | | | | |a1*a0| |
456 // | | | | |a2*a0| | |
457 // | |a3*a2|a3*a0| | | |
458 // | | | |a2*a1| | | |
459 // | | |a3*a1| | | | |
460 // *| | | | | | | | 2|
461 // +|a3*a3|a2*a2|a1*a1|a0*a0|
462 // |--+--+--+--+--+--+--+--|
463 // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
465 // "can't overflow" below mark carrying into high part of
466 // multiplication result, which can't overflow, because it
467 // can never be all ones.
469 mul $acc1,$a1,$a0 // a[1]*a[0]
471 mul $acc2,$a2,$a0 // a[2]*a[0]
473 mul $acc3,$a3,$a0 // a[3]*a[0]
476 adds $acc2,$acc2,$t1 // accumulate high parts of multiplication
477 mul $t0,$a2,$a1 // a[2]*a[1]
480 mul $t2,$a3,$a1 // a[3]*a[1]
482 adc $acc4,$acc4,xzr // can't overflow
484 mul $acc5,$a3,$a2 // a[3]*a[2]
487 adds $t1,$t1,$t2 // accumulate high parts of multiplication
488 mul $acc0,$a0,$a0 // a[0]*a[0]
489 adc $t2,$t3,xzr // can't overflow
491 adds $acc3,$acc3,$t0 // accumulate low parts of multiplication
494 mul $t1,$a1,$a1 // a[1]*a[1]
497 adc $acc6,$acc6,xzr // can't overflow
499 adds $acc1,$acc1,$acc1 // acc[1-6]*=2
500 mul $t2,$a2,$a2 // a[2]*a[2]
501 adcs $acc2,$acc2,$acc2
503 adcs $acc3,$acc3,$acc3
504 mul $t3,$a3,$a3 // a[3]*a[3]
505 adcs $acc4,$acc4,$acc4
507 adcs $acc5,$acc5,$acc5
508 adcs $acc6,$acc6,$acc6
511 adds $acc1,$acc1,$a0 // +a[i]*a[i]
521 for($i=0;$i<3;$i++) { # reductions, see commentary in
522 # multiplication for details
524 subs $t2,$acc0,$t0 // "*0xffff0001"
526 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
529 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
531 adc $acc3,$t3,xzr // can't overflow
535 subs $t2,$acc0,$t0 // "*0xffff0001"
537 adds $acc0,$acc1,$t0 // +=acc[0]<<96 and omit acc[0]
539 adcs $acc2,$acc3,$t2 // +=acc[0]*0xffff0001
540 adc $acc3,$t3,xzr // can't overflow
542 adds $acc0,$acc0,$acc4 // accumulate upper half
543 adcs $acc1,$acc1,$acc5
544 adcs $acc2,$acc2,$acc6
545 adcs $acc3,$acc3,$acc7
548 adds $t0,$acc0,#1 // subs $t0,$acc0,#-1 // tmp = ret-modulus
549 sbcs $t1,$acc1,$poly1
551 sbcs $t3,$acc3,$poly3
552 sbcs xzr,$acc4,xzr // did it borrow?
554 csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
555 csel $acc1,$acc1,$t1,lo
556 csel $acc2,$acc2,$t2,lo
557 stp $acc0,$acc1,[$rp]
558 csel $acc3,$acc3,$t3,lo
559 stp $acc2,$acc3,[$rp,#16]
562 .size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
564 // Note that __ecp_nistz256_add expects both input vectors pre-loaded to
565 // $a0-$a3 and $t0-$t3. This is done because it's used in multiple
566 // contexts, e.g. in multiplication by 2 and 3...
567 .type __ecp_nistz256_add,%function
570 adds $acc0,$acc0,$t0 // ret = a+b
574 adc $ap,xzr,xzr // zap $ap
576 adds $t0,$acc0,#1 // subs $t0,$a0,#-1 // tmp = ret-modulus
577 sbcs $t1,$acc1,$poly1
580 cmp $ap,xzr // did addition carry?
582 csel $acc0,$acc0,$t0,eq // ret = carry ? ret-modulus : ret
583 csel $acc1,$acc1,$t1,eq
584 csel $acc2,$acc2,$t2,eq
585 stp $acc0,$acc1,[$rp]
586 csel $acc3,$acc3,$t3,eq
587 stp $acc2,$acc3,[$rp,#16]
590 .size __ecp_nistz256_add,.-__ecp_nistz256_add
592 .type __ecp_nistz256_sub_from,%function
594 __ecp_nistz256_sub_from:
596 ldp $t2,$t3,[$bp,#16]
597 subs $acc0,$acc0,$t0 // ret = a-b
601 sbc $ap,xzr,xzr // zap $ap
603 subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = ret+modulus
604 adcs $t1,$acc1,$poly1
607 cmp $ap,xzr // did subtraction borrow?
609 csel $acc0,$acc0,$t0,eq // ret = borrow ? ret+modulus : ret
610 csel $acc1,$acc1,$t1,eq
611 csel $acc2,$acc2,$t2,eq
612 stp $acc0,$acc1,[$rp]
613 csel $acc3,$acc3,$t3,eq
614 stp $acc2,$acc3,[$rp,#16]
617 .size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
619 .type __ecp_nistz256_sub_morf,%function
621 __ecp_nistz256_sub_morf:
623 ldp $t2,$t3,[$bp,#16]
624 subs $acc0,$t0,$acc0 // ret = b-a
628 sbc $ap,xzr,xzr // zap $ap
630 subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = ret+modulus
631 adcs $t1,$acc1,$poly1
634 cmp $ap,xzr // did subtraction borrow?
636 csel $acc0,$acc0,$t0,eq // ret = borrow ? ret+modulus : ret
637 csel $acc1,$acc1,$t1,eq
638 csel $acc2,$acc2,$t2,eq
639 stp $acc0,$acc1,[$rp]
640 csel $acc3,$acc3,$t3,eq
641 stp $acc2,$acc3,[$rp,#16]
644 .size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
646 .type __ecp_nistz256_div_by_2,%function
648 __ecp_nistz256_div_by_2:
649 subs $t0,$acc0,#1 // adds $t0,$a0,#-1 // tmp = a+modulus
650 adcs $t1,$acc1,$poly1
652 adcs $t3,$acc3,$poly3
653 adc $ap,xzr,xzr // zap $ap
654 tst $acc0,#1 // is a even?
656 csel $acc0,$acc0,$t0,eq // ret = even ? a : a+modulus
657 csel $acc1,$acc1,$t1,eq
658 csel $acc2,$acc2,$t2,eq
659 csel $acc3,$acc3,$t3,eq
662 lsr $acc0,$acc0,#1 // ret >>= 1
663 orr $acc0,$acc0,$acc1,lsl#63
665 orr $acc1,$acc1,$acc2,lsl#63
667 orr $acc2,$acc2,$acc3,lsl#63
669 stp $acc0,$acc1,[$rp]
670 orr $acc3,$acc3,$ap,lsl#63
671 stp $acc2,$acc3,[$rp,#16]
674 .size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
676 ########################################################################
677 # following subroutines are "literal" implemetation of those found in
680 ########################################################################
681 # void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
684 my ($S,$M,$Zsqr,$tmp0)=map(32*$_,(0..3));
685 # above map() describes stack layout with 4 temporary
686 # 256-bit vectors on top.
687 my ($rp_real,$ap_real) = map("x$_",(21,22));
690 .globl ecp_nistz256_point_double
691 .type ecp_nistz256_point_double,%function
693 ecp_nistz256_point_double:
694 stp x29,x30,[sp,#-80]!
701 ldp $acc0,$acc1,[$ap,#32]
703 ldp $acc2,$acc3,[$ap,#48]
709 ldp $a0,$a1,[$ap_real,#64] // forward load for p256_sqr_mont
712 ldp $a2,$a3,[$ap_real,#64+16]
714 bl __ecp_nistz256_add // p256_mul_by_2(S, in_y);
717 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Zsqr, in_z);
719 ldp $t0,$t1,[$ap_real]
720 ldp $t2,$t3,[$ap_real,#16]
721 mov $a0,$acc0 // put Zsqr aside for p256_sub
726 bl __ecp_nistz256_add // p256_add(M, Zsqr, in_x);
729 mov $acc0,$a0 // restore Zsqr
731 ldp $a0,$a1,[sp,#$S] // forward load for p256_sqr_mont
734 ldp $a2,$a3,[sp,#$S+16]
736 bl __ecp_nistz256_sub_morf // p256_sub(Zsqr, in_x, Zsqr);
739 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(S, S);
741 ldr $bi,[$ap_real,#32]
742 ldp $a0,$a1,[$ap_real,#64]
743 ldp $a2,$a3,[$ap_real,#64+16]
746 bl __ecp_nistz256_mul_mont // p256_mul_mont(tmp0, in_z, in_y);
750 ldp $a0,$a1,[sp,#$S] // forward load for p256_sqr_mont
753 ldp $a2,$a3,[sp,#$S+16]
755 bl __ecp_nistz256_add // p256_mul_by_2(res_z, tmp0);
758 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(tmp0, S);
760 ldr $bi,[sp,#$Zsqr] // forward load for p256_mul_mont
762 ldp $a2,$a3,[sp,#$M+16]
764 bl __ecp_nistz256_div_by_2 // p256_div_by_2(res_y, tmp0);
768 bl __ecp_nistz256_mul_mont // p256_mul_mont(M, M, Zsqr);
770 mov $t0,$acc0 // duplicate M
774 mov $a0,$acc0 // put M aside
779 bl __ecp_nistz256_add
780 mov $t0,$a0 // restore M
782 ldr $bi,[$ap_real] // forward load for p256_mul_mont
786 ldp $a2,$a3,[sp,#$S+16]
787 bl __ecp_nistz256_add // p256_mul_by_3(M, M);
791 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, in_x);
795 ldp $a0,$a1,[sp,#$M] // forward load for p256_sqr_mont
798 ldp $a2,$a3,[sp,#$M+16]
800 bl __ecp_nistz256_add // p256_mul_by_2(tmp0, S);
803 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(res_x, M);
806 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, tmp0);
810 bl __ecp_nistz256_sub_morf // p256_sub(S, S, res_x);
813 mov $a0,$acc0 // copy S
818 bl __ecp_nistz256_mul_mont // p256_mul_mont(S, S, M);
822 bl __ecp_nistz256_sub_from // p256_sub(res_y, S, res_y);
824 add sp,x29,#0 // destroy frame
825 ldp x19,x20,[x29,#16]
826 ldp x21,x22,[x29,#32]
829 .size ecp_nistz256_point_double,.-ecp_nistz256_point_double
833 ########################################################################
834 # void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
835 # const P256_POINT *in2);
837 my ($res_x,$res_y,$res_z,
838 $H,$Hsqr,$R,$Rsqr,$Hcub,
839 $U1,$U2,$S1,$S2)=map(32*$_,(0..11));
840 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
841 # above map() describes stack layout with 12 temporary
842 # 256-bit vectors on top.
843 my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("x$_",(21..26));
846 .globl ecp_nistz256_point_add
847 .type ecp_nistz256_point_add,%function
849 ecp_nistz256_point_add:
850 stp x29,x30,[sp,#-80]!
859 ldp $a2,$a3,[$bp,#16]
860 ldp $t0,$t1,[$bp,#32]
861 ldp $t2,$t3,[$bp,#48]
867 ldp $acc0,$acc1,[$ap]
870 ldp $acc2,$acc3,[$ap,#16]
873 ldp $t0,$t1,[$ap,#32]
874 orr $in2infty,$a0,$t2
876 ldp $t2,$t3,[$ap,#48]
877 csetm $in2infty,ne // !in2infty
879 ldp $a0,$a1,[$bp_real,#64] // forward load for p256_sqr_mont
880 orr $acc0,$acc0,$acc1
881 orr $acc2,$acc2,$acc3
882 ldp $a2,$a3,[$bp_real,#64+16]
885 orr $acc0,$acc0,$acc2
887 orr $in1infty,$acc0,$t0
891 csetm $in1infty,ne // !in1infty
894 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z2sqr, in2_z);
896 ldp $a0,$a1,[$ap_real,#64]
897 ldp $a2,$a3,[$ap_real,#64+16]
899 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
901 ldr $bi,[$bp_real,#64]
902 ldp $a0,$a1,[sp,#$Z2sqr]
903 ldp $a2,$a3,[sp,#$Z2sqr+16]
906 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, Z2sqr, in2_z);
908 ldr $bi,[$ap_real,#64]
909 ldp $a0,$a1,[sp,#$Z1sqr]
910 ldp $a2,$a3,[sp,#$Z1sqr+16]
913 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
915 ldr $bi,[$ap_real,#32]
916 ldp $a0,$a1,[sp,#$S1]
917 ldp $a2,$a3,[sp,#$S1+16]
920 bl __ecp_nistz256_mul_mont // p256_mul_mont(S1, S1, in1_y);
922 ldr $bi,[$bp_real,#32]
923 ldp $a0,$a1,[sp,#$S2]
924 ldp $a2,$a3,[sp,#$S2+16]
927 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
930 ldr $bi,[sp,#$Z2sqr] // forward load for p256_mul_mont
931 ldp $a0,$a1,[$ap_real]
932 ldp $a2,$a3,[$ap_real,#16]
934 bl __ecp_nistz256_sub_from // p256_sub(R, S2, S1);
936 orr $acc0,$acc0,$acc1 // see if result is zero
937 orr $acc2,$acc2,$acc3
938 orr $temp,$acc0,$acc2
942 bl __ecp_nistz256_mul_mont // p256_mul_mont(U1, in1_x, Z2sqr);
945 ldp $a0,$a1,[$bp_real]
946 ldp $a2,$a3,[$bp_real,#16]
949 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in2_x, Z1sqr);
952 ldp $a0,$a1,[sp,#$R] // forward load for p256_sqr_mont
953 ldp $a2,$a3,[sp,#$R+16]
955 bl __ecp_nistz256_sub_from // p256_sub(H, U2, U1);
957 orr $acc0,$acc0,$acc1 // see if result is zero
958 orr $acc2,$acc2,$acc3
959 orr $acc0,$acc0,$acc2
961 b.ne .Ladd_proceed // is_equal(U1,U2)?
963 tst $in1infty,$in2infty
964 b.eq .Ladd_proceed // (in1infty || in2infty)?
967 b.eq .Ladd_double // is_equal(S1,S2)?
971 stp $a0,$a1,[$rp_real]
972 stp $a0,$a1,[$rp_real,#16]
973 stp $a0,$a1,[$rp_real,#32]
974 stp $a0,$a1,[$rp_real,#48]
975 stp $a0,$a1,[$rp_real,#64]
976 stp $a0,$a1,[$rp_real,#80]
983 ldp x23,x24,[x29,#48]
984 ldp x25,x26,[x29,#64]
985 add sp,sp,#32*(12-4) // difference in stack frames
991 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
993 ldr $bi,[$ap_real,#64]
995 ldp $a2,$a3,[sp,#$H+16]
998 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
1000 ldp $a0,$a1,[sp,#$H]
1001 ldp $a2,$a3,[sp,#$H+16]
1003 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
1005 ldr $bi,[$bp_real,#64]
1006 ldp $a0,$a1,[sp,#$res_z]
1007 ldp $a2,$a3,[sp,#$res_z+16]
1008 add $bp,$bp_real,#64
1010 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, res_z, in2_z);
1013 ldp $a0,$a1,[sp,#$Hsqr]
1014 ldp $a2,$a3,[sp,#$Hsqr+16]
1017 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
1020 ldp $a0,$a1,[sp,#$U1]
1021 ldp $a2,$a3,[sp,#$U1+16]
1024 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, U1, Hsqr);
1031 bl __ecp_nistz256_add // p256_mul_by_2(Hsqr, U2);
1035 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
1038 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
1041 ldr $bi,[sp,#$Hcub] // forward load for p256_mul_mont
1042 ldp $a0,$a1,[sp,#$S1]
1043 ldp $a2,$a3,[sp,#$S1+16]
1045 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
1049 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S1, Hcub);
1052 ldp $a0,$a1,[sp,#$res_y]
1053 ldp $a2,$a3,[sp,#$res_y+16]
1056 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
1059 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
1061 ldp $a0,$a1,[sp,#$res_x] // res
1062 ldp $a2,$a3,[sp,#$res_x+16]
1063 ldp $t0,$t1,[$bp_real] // in2
1064 ldp $t2,$t3,[$bp_real,#16]
1066 for($i=0;$i<64;$i+=32) { # conditional moves
1068 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1069 cmp $in1infty,#0 // !$in1intfy, remember?
1070 ldp $acc2,$acc3,[$ap_real,#$i+16]
1073 ldp $a0,$a1,[sp,#$res_x+$i+32] // res
1076 cmp $in2infty,#0 // !$in2intfy, remember?
1077 ldp $a2,$a3,[sp,#$res_x+$i+48]
1078 csel $acc0,$t0,$acc0,ne
1079 csel $acc1,$t1,$acc1,ne
1080 ldp $t0,$t1,[$bp_real,#$i+32] // in2
1081 csel $acc2,$t2,$acc2,ne
1082 csel $acc3,$t3,$acc3,ne
1083 ldp $t2,$t3,[$bp_real,#$i+48]
1084 stp $acc0,$acc1,[$rp_real,#$i]
1085 stp $acc2,$acc3,[$rp_real,#$i+16]
1089 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1090 cmp $in1infty,#0 // !$in1intfy, remember?
1091 ldp $acc2,$acc3,[$ap_real,#$i+16]
1096 cmp $in2infty,#0 // !$in2intfy, remember?
1097 csel $acc0,$t0,$acc0,ne
1098 csel $acc1,$t1,$acc1,ne
1099 csel $acc2,$t2,$acc2,ne
1100 csel $acc3,$t3,$acc3,ne
1101 stp $acc0,$acc1,[$rp_real,#$i]
1102 stp $acc2,$acc3,[$rp_real,#$i+16]
1105 add sp,x29,#0 // destroy frame
1106 ldp x19,x20,[x29,#16]
1107 ldp x21,x22,[x29,#32]
1108 ldp x23,x24,[x29,#48]
1109 ldp x25,x26,[x29,#64]
1110 ldp x29,x30,[sp],#80
1112 .size ecp_nistz256_point_add,.-ecp_nistz256_point_add
1116 ########################################################################
1117 # void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
1118 # const P256_POINT_AFFINE *in2);
1120 my ($res_x,$res_y,$res_z,
1121 $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(32*$_,(0..9));
1123 # above map() describes stack layout with 10 temporary
1124 # 256-bit vectors on top.
1125 my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("x$_",(21..26));
1128 .globl ecp_nistz256_point_add_affine
1129 .type ecp_nistz256_point_add_affine,%function
1131 ecp_nistz256_point_add_affine:
1132 stp x29,x30,[sp,#-80]!
1134 stp x19,x20,[sp,#16]
1135 stp x21,x22,[sp,#32]
1136 stp x23,x24,[sp,#48]
1137 stp x25,x26,[sp,#64]
1144 ldr $poly3,.Lpoly+24
1147 ldp $a2,$a3,[$ap,#16]
1148 ldp $t0,$t1,[$ap,#32]
1149 ldp $t2,$t3,[$ap,#48]
1156 orr $in1infty,$a0,$t0
1158 csetm $in1infty,ne // !in1infty
1161 ldp $a2,$a3,[$bp,#16]
1162 ldp $t0,$t1,[$bp,#32]
1163 ldp $t2,$t3,[$bp,#48]
1170 orr $in2infty,$a0,$t0
1172 csetm $in2infty,ne // !in2infty
1174 ldp $a0,$a1,[$ap_real,#64]
1175 ldp $a2,$a3,[$ap_real,#64+16]
1177 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Z1sqr, in1_z);
1186 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, Z1sqr, in2_x);
1189 ldr $bi,[$ap_real,#64] // forward load for p256_mul_mont
1190 ldp $a0,$a1,[sp,#$Z1sqr]
1191 ldp $a2,$a3,[sp,#$Z1sqr+16]
1193 bl __ecp_nistz256_sub_from // p256_sub(H, U2, in1_x);
1195 add $bp,$ap_real,#64
1197 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, Z1sqr, in1_z);
1199 ldr $bi,[$ap_real,#64]
1200 ldp $a0,$a1,[sp,#$H]
1201 ldp $a2,$a3,[sp,#$H+16]
1202 add $bp,$ap_real,#64
1204 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_z, H, in1_z);
1206 ldr $bi,[$bp_real,#32]
1207 ldp $a0,$a1,[sp,#$S2]
1208 ldp $a2,$a3,[sp,#$S2+16]
1209 add $bp,$bp_real,#32
1211 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, S2, in2_y);
1213 add $bp,$ap_real,#32
1214 ldp $a0,$a1,[sp,#$H] // forward load for p256_sqr_mont
1215 ldp $a2,$a3,[sp,#$H+16]
1217 bl __ecp_nistz256_sub_from // p256_sub(R, S2, in1_y);
1220 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Hsqr, H);
1222 ldp $a0,$a1,[sp,#$R]
1223 ldp $a2,$a3,[sp,#$R+16]
1225 bl __ecp_nistz256_sqr_mont // p256_sqr_mont(Rsqr, R);
1228 ldp $a0,$a1,[sp,#$Hsqr]
1229 ldp $a2,$a3,[sp,#$Hsqr+16]
1232 bl __ecp_nistz256_mul_mont // p256_mul_mont(Hcub, Hsqr, H);
1235 ldp $a0,$a1,[sp,#$Hsqr]
1236 ldp $a2,$a3,[sp,#$Hsqr+16]
1239 bl __ecp_nistz256_mul_mont // p256_mul_mont(U2, in1_x, Hsqr);
1246 bl __ecp_nistz256_add // p256_mul_by_2(Hsqr, U2);
1250 bl __ecp_nistz256_sub_morf // p256_sub(res_x, Rsqr, Hsqr);
1253 bl __ecp_nistz256_sub_from // p256_sub(res_x, res_x, Hcub);
1256 ldr $bi,[$ap_real,#32] // forward load for p256_mul_mont
1257 ldp $a0,$a1,[sp,#$Hcub]
1258 ldp $a2,$a3,[sp,#$Hcub+16]
1260 bl __ecp_nistz256_sub_morf // p256_sub(res_y, U2, res_x);
1262 add $bp,$ap_real,#32
1264 bl __ecp_nistz256_mul_mont // p256_mul_mont(S2, in1_y, Hcub);
1267 ldp $a0,$a1,[sp,#$res_y]
1268 ldp $a2,$a3,[sp,#$res_y+16]
1271 bl __ecp_nistz256_mul_mont // p256_mul_mont(res_y, res_y, R);
1274 bl __ecp_nistz256_sub_from // p256_sub(res_y, res_y, S2);
1276 ldp $a0,$a1,[sp,#$res_x] // res
1277 ldp $a2,$a3,[sp,#$res_x+16]
1278 ldp $t0,$t1,[$bp_real] // in2
1279 ldp $t2,$t3,[$bp_real,#16]
1281 for($i=0;$i<64;$i+=32) { # conditional moves
1283 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1284 cmp $in1infty,#0 // !$in1intfy, remember?
1285 ldp $acc2,$acc3,[$ap_real,#$i+16]
1288 ldp $a0,$a1,[sp,#$res_x+$i+32] // res
1291 cmp $in2infty,#0 // !$in2intfy, remember?
1292 ldp $a2,$a3,[sp,#$res_x+$i+48]
1293 csel $acc0,$t0,$acc0,ne
1294 csel $acc1,$t1,$acc1,ne
1295 ldp $t0,$t1,[$bp_real,#$i+32] // in2
1296 csel $acc2,$t2,$acc2,ne
1297 csel $acc3,$t3,$acc3,ne
1298 ldp $t2,$t3,[$bp_real,#$i+48]
1299 stp $acc0,$acc1,[$rp_real,#$i]
1300 stp $acc2,$acc3,[$rp_real,#$i+16]
1302 $code.=<<___ if ($i == 0);
1303 adr $bp_real,.Lone_mont-64
1307 ldp $acc0,$acc1,[$ap_real,#$i] // in1
1308 cmp $in1infty,#0 // !$in1intfy, remember?
1309 ldp $acc2,$acc3,[$ap_real,#$i+16]
1314 cmp $in2infty,#0 // !$in2intfy, remember?
1315 csel $acc0,$t0,$acc0,ne
1316 csel $acc1,$t1,$acc1,ne
1317 csel $acc2,$t2,$acc2,ne
1318 csel $acc3,$t3,$acc3,ne
1319 stp $acc0,$acc1,[$rp_real,#$i]
1320 stp $acc2,$acc3,[$rp_real,#$i+16]
1322 add sp,x29,#0 // destroy frame
1323 ldp x19,x20,[x29,#16]
1324 ldp x21,x22,[x29,#32]
1325 ldp x23,x24,[x29,#48]
1326 ldp x25,x26,[x29,#64]
1327 ldp x29,x30,[sp],#80
1329 .size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
1333 ########################################################################
1334 # scatter-gather subroutines
1336 my ($out,$inp,$index,$mask)=map("x$_",(0..3));
1338 // void ecp_nistz256_scatter_w5(void *x0,const P256_POINT *x1,
1340 .globl ecp_nistz256_scatter_w5
1341 .type ecp_nistz256_scatter_w5,%function
1343 ecp_nistz256_scatter_w5:
1344 stp x29,x30,[sp,#-16]!
1347 add $out,$out,$index,lsl#2
1349 ldp x4,x5,[$inp] // X
1350 ldp x6,x7,[$inp,#16]
1351 str w4,[$out,#64*0-4]
1353 str w5,[$out,#64*1-4]
1355 str w6,[$out,#64*2-4]
1357 str w7,[$out,#64*3-4]
1359 str w4,[$out,#64*4-4]
1360 str w5,[$out,#64*5-4]
1361 str w6,[$out,#64*6-4]
1362 str w7,[$out,#64*7-4]
1365 ldp x4,x5,[$inp,#32] // Y
1366 ldp x6,x7,[$inp,#48]
1367 str w4,[$out,#64*0-4]
1369 str w5,[$out,#64*1-4]
1371 str w6,[$out,#64*2-4]
1373 str w7,[$out,#64*3-4]
1375 str w4,[$out,#64*4-4]
1376 str w5,[$out,#64*5-4]
1377 str w6,[$out,#64*6-4]
1378 str w7,[$out,#64*7-4]
1381 ldp x4,x5,[$inp,#64] // Z
1382 ldp x6,x7,[$inp,#80]
1383 str w4,[$out,#64*0-4]
1385 str w5,[$out,#64*1-4]
1387 str w6,[$out,#64*2-4]
1389 str w7,[$out,#64*3-4]
1391 str w4,[$out,#64*4-4]
1392 str w5,[$out,#64*5-4]
1393 str w6,[$out,#64*6-4]
1394 str w7,[$out,#64*7-4]
1398 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1400 // void ecp_nistz256_gather_w5(P256_POINT *x0,const void *x1,
1402 .globl ecp_nistz256_gather_w5
1403 .type ecp_nistz256_gather_w5,%function
1405 ecp_nistz256_gather_w5:
1406 stp x29,x30,[sp,#-16]!
1411 add $index,$index,x3
1412 add $inp,$inp,$index,lsl#2
1420 ldr w10,[$inp,#64*6]
1421 ldr w11,[$inp,#64*7]
1425 orr x6,x6,x10,lsl#32
1426 orr x7,x7,x11,lsl#32
1431 stp x4,x5,[$out] // X
1432 stp x6,x7,[$out,#16]
1440 ldr w10,[$inp,#64*6]
1441 ldr w11,[$inp,#64*7]
1445 orr x6,x6,x10,lsl#32
1446 orr x7,x7,x11,lsl#32
1451 stp x4,x5,[$out,#32] // Y
1452 stp x6,x7,[$out,#48]
1460 ldr w10,[$inp,#64*6]
1461 ldr w11,[$inp,#64*7]
1464 orr x6,x6,x10,lsl#32
1465 orr x7,x7,x11,lsl#32
1470 stp x4,x5,[$out,#64] // Z
1471 stp x6,x7,[$out,#80]
1475 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1477 // void ecp_nistz256_scatter_w7(void *x0,const P256_POINT_AFFINE *x1,
1479 .globl ecp_nistz256_scatter_w7
1480 .type ecp_nistz256_scatter_w7,%function
1482 ecp_nistz256_scatter_w7:
1483 stp x29,x30,[sp,#-16]!
1486 add $out,$out,$index
1490 subs $index,$index,#1
1491 prfm pstl1strm,[$out,#4096+64*0]
1492 prfm pstl1strm,[$out,#4096+64*1]
1493 prfm pstl1strm,[$out,#4096+64*2]
1494 prfm pstl1strm,[$out,#4096+64*3]
1495 prfm pstl1strm,[$out,#4096+64*4]
1496 prfm pstl1strm,[$out,#4096+64*5]
1497 prfm pstl1strm,[$out,#4096+64*6]
1498 prfm pstl1strm,[$out,#4096+64*7]
1499 strb w3,[$out,#64*0-1]
1501 strb w3,[$out,#64*1-1]
1503 strb w3,[$out,#64*2-1]
1505 strb w3,[$out,#64*3-1]
1507 strb w3,[$out,#64*4-1]
1509 strb w3,[$out,#64*5-1]
1511 strb w3,[$out,#64*6-1]
1513 strb w3,[$out,#64*7-1]
1515 b.ne .Loop_scatter_w7
1519 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1521 // void ecp_nistz256_gather_w7(P256_POINT_AFFINE *x0,const void *x1,
1523 .globl ecp_nistz256_gather_w7
1524 .type ecp_nistz256_gather_w7,%function
1526 ecp_nistz256_gather_w7:
1527 stp x29,x30,[sp,#-16]!
1532 add $index,$index,x3
1533 add $inp,$inp,$index
1537 ldrb w4,[$inp,#64*0]
1538 prfm pldl1strm,[$inp,#4096+64*0]
1539 subs $index,$index,#1
1540 ldrb w5,[$inp,#64*1]
1541 prfm pldl1strm,[$inp,#4096+64*1]
1542 ldrb w6,[$inp,#64*2]
1543 prfm pldl1strm,[$inp,#4096+64*2]
1544 ldrb w7,[$inp,#64*3]
1545 prfm pldl1strm,[$inp,#4096+64*3]
1546 ldrb w8,[$inp,#64*4]
1547 prfm pldl1strm,[$inp,#4096+64*4]
1548 ldrb w9,[$inp,#64*5]
1549 prfm pldl1strm,[$inp,#4096+64*5]
1550 ldrb w10,[$inp,#64*6]
1551 prfm pldl1strm,[$inp,#4096+64*6]
1552 ldrb w11,[$inp,#64*7]
1553 prfm pldl1strm,[$inp,#4096+64*7]
1559 orr x10,x10,x11,lsl#8
1561 orr x4,x4,x10,lsl#48
1564 b.ne .Loop_gather_w7
1568 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1572 foreach (split("\n",$code)) {
1573 s/\`([^\`]*)\`/eval $1/ge;
1577 close STDOUT; # enforce flush