2 # Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
4 # Licensed under the OpenSSL license (the "License"). You may not use
5 # this file except in compliance with the License. You can obtain a copy
6 # in the file LICENSE in the source distribution or at
7 # https://www.openssl.org/source/license.html
10 ##############################################################################
12 # Copyright 2014 Intel Corporation #
14 # Licensed under the Apache License, Version 2.0 (the "License"); #
15 # you may not use this file except in compliance with the License. #
16 # You may obtain a copy of the License at #
18 # http://www.apache.org/licenses/LICENSE-2.0 #
20 # Unless required by applicable law or agreed to in writing, software #
21 # distributed under the License is distributed on an "AS IS" BASIS, #
22 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
23 # See the License for the specific language governing permissions and #
24 # limitations under the License. #
26 ##############################################################################
28 # Developers and authors: #
29 # Shay Gueron (1, 2), and Vlad Krasnov (1) #
30 # (1) Intel Corporation, Israel Development Center #
31 # (2) University of Haifa #
33 # S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with#
36 ##############################################################################
38 # Further optimization by <appro@openssl.org>:
40 # this/original with/without -DECP_NISTZ256_ASM(*)
41 # Opteron +12-49% +110-150%
42 # Bulldozer +14-45% +175-210%
44 # Westmere +12-34% +80-87%
45 # Sandy Bridge +9-35% +110-120%
46 # Ivy Bridge +9-35% +110-125%
47 # Haswell +8-37% +140-160%
48 # Broadwell +18-58% +145-210%
49 # Atom +15-50% +130-180%
50 # VIA Nano +43-160% +300-480%
52 # (*) "without -DECP_NISTZ256_ASM" refers to build with
53 # "enable-ec_nistp_64_gcc_128";
55 # Ranges denote minimum and maximum improvement coefficients depending
56 # on benchmark. Lower coefficients are for ECDSA sign, relatively fastest
57 # server-side operation. Keep in mind that +100% means 2x improvement.
61 if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
63 $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
65 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
66 ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
67 ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
68 die "can't locate x86_64-xlate.pl";
70 open OUT,"| \"$^X\" $xlate $flavour $output";
73 if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
74 =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
75 $avx = ($1>=2.19) + ($1>=2.22);
79 if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
80 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
81 $avx = ($1>=2.09) + ($1>=2.10);
85 if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
86 `ml64 2>&1` =~ /Version ([0-9]+)\./) {
87 $avx = ($1>=10) + ($1>=11);
91 if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
92 my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
93 $avx = ($ver>=3.0) + ($ver>=3.01);
99 .extern OPENSSL_ia32cap_P
104 .quad 0xffffffffffffffff, 0x00000000ffffffff, 0x0000000000000000, 0xffffffff00000001
106 # 2^512 mod P precomputed for NIST P256 polynomial
108 .quad 0x0000000000000003, 0xfffffffbffffffff, 0xfffffffffffffffe, 0x00000004fffffffd
111 .long 1,1,1,1,1,1,1,1
113 .long 2,2,2,2,2,2,2,2
115 .long 3,3,3,3,3,3,3,3
117 .quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
121 ################################################################################
122 # void ecp_nistz256_mul_by_2(uint64_t res[4], uint64_t a[4]);
124 my ($a0,$a1,$a2,$a3)=map("%r$_",(8..11));
125 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rdx","%rcx","%r12","%r13");
126 my ($r_ptr,$a_ptr,$b_ptr)=("%rdi","%rsi","%rdx");
130 .globl ecp_nistz256_mul_by_2
131 .type ecp_nistz256_mul_by_2,\@function,2
133 ecp_nistz256_mul_by_2:
139 add $a0, $a0 # a0:a3+a0:a3
143 lea .Lpoly(%rip), $a_ptr
170 .size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
172 ################################################################################
173 # void ecp_nistz256_div_by_2(uint64_t res[4], uint64_t a[4]);
174 .globl ecp_nistz256_div_by_2
175 .type ecp_nistz256_div_by_2,\@function,2
177 ecp_nistz256_div_by_2:
186 lea .Lpoly(%rip), $a_ptr
197 xor $a_ptr, $a_ptr # borrow $a_ptr
206 mov $a1, $t0 # a0:a3>>1
230 .size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
232 ################################################################################
233 # void ecp_nistz256_mul_by_3(uint64_t res[4], uint64_t a[4]);
234 .globl ecp_nistz256_mul_by_3
235 .type ecp_nistz256_mul_by_3,\@function,2
237 ecp_nistz256_mul_by_3:
244 add $a0, $a0 # a0:a3+a0:a3
256 sbb .Lpoly+8*1(%rip), $a1
259 sbb .Lpoly+8*3(%rip), $a3
268 add 8*0($a_ptr), $a0 # a0:a3+=a_ptr[0:3]
278 sbb .Lpoly+8*1(%rip), $a1
281 sbb .Lpoly+8*3(%rip), $a3
296 .size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
298 ################################################################################
299 # void ecp_nistz256_add(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
300 .globl ecp_nistz256_add
301 .type ecp_nistz256_add,\@function,3
312 lea .Lpoly(%rip), $a_ptr
342 .size ecp_nistz256_add,.-ecp_nistz256_add
344 ################################################################################
345 # void ecp_nistz256_sub(uint64_t res[4], uint64_t a[4], uint64_t b[4]);
346 .globl ecp_nistz256_sub
347 .type ecp_nistz256_sub,\@function,3
358 lea .Lpoly(%rip), $a_ptr
388 .size ecp_nistz256_sub,.-ecp_nistz256_sub
390 ################################################################################
391 # void ecp_nistz256_neg(uint64_t res[4], uint64_t a[4]);
392 .globl ecp_nistz256_neg
393 .type ecp_nistz256_neg,\@function,2
410 lea .Lpoly(%rip), $a_ptr
434 .size ecp_nistz256_neg,.-ecp_nistz256_neg
438 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
439 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
440 my ($t0,$t1,$t2,$t3,$t4)=("%rcx","%rbp","%rbx","%rdx","%rax");
441 my ($poly1,$poly3)=($acc6,$acc7);
444 ################################################################################
445 # void ecp_nistz256_to_mont(
448 .globl ecp_nistz256_to_mont
449 .type ecp_nistz256_to_mont,\@function,2
451 ecp_nistz256_to_mont:
453 $code.=<<___ if ($addx);
455 and OPENSSL_ia32cap_P+8(%rip), %ecx
458 lea .LRR(%rip), $b_org
460 .size ecp_nistz256_to_mont,.-ecp_nistz256_to_mont
462 ################################################################################
463 # void ecp_nistz256_mul_mont(
468 .globl ecp_nistz256_mul_mont
469 .type ecp_nistz256_mul_mont,\@function,3
471 ecp_nistz256_mul_mont:
473 $code.=<<___ if ($addx);
475 and OPENSSL_ia32cap_P+8(%rip), %ecx
486 $code.=<<___ if ($addx);
492 mov 8*0($b_org), %rax
493 mov 8*0($a_ptr), $acc1
494 mov 8*1($a_ptr), $acc2
495 mov 8*2($a_ptr), $acc3
496 mov 8*3($a_ptr), $acc4
498 call __ecp_nistz256_mul_montq
500 $code.=<<___ if ($addx);
506 mov 8*0($b_org), %rdx
507 mov 8*0($a_ptr), $acc1
508 mov 8*1($a_ptr), $acc2
509 mov 8*2($a_ptr), $acc3
510 mov 8*3($a_ptr), $acc4
511 lea -128($a_ptr), $a_ptr # control u-op density
513 call __ecp_nistz256_mul_montx
524 .size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
526 .type __ecp_nistz256_mul_montq,\@abi-omnipotent
528 __ecp_nistz256_mul_montq:
529 ########################################################################
533 mov .Lpoly+8*1(%rip),$poly1
539 mov .Lpoly+8*3(%rip),$poly3
558 ########################################################################
559 # First reduction step
560 # Basically now we want to multiply acc[0] by p256,
561 # and add the result to the acc.
562 # Due to the special form of p256 we do some optimizations
564 # acc[0] x p256[0..1] = acc[0] x 2^96 - acc[0]
565 # then we add acc[0] and get acc[0] x 2^96
571 add $acc0, $acc1 # +=acc[0]<<96
574 mov 8*1($b_ptr), %rax
579 ########################################################################
612 ########################################################################
613 # Second reduction step
621 mov 8*2($b_ptr), %rax
626 ########################################################################
659 ########################################################################
660 # Third reduction step
668 mov 8*3($b_ptr), %rax
673 ########################################################################
706 ########################################################################
707 # Final reduction step
720 ########################################################################
721 # Branch-less conditional subtraction of P
722 sub \$-1, $acc4 # .Lpoly[0]
724 sbb $poly1, $acc5 # .Lpoly[1]
725 sbb \$0, $acc0 # .Lpoly[2]
727 sbb $poly3, $acc1 # .Lpoly[3]
732 mov $acc4, 8*0($r_ptr)
734 mov $acc5, 8*1($r_ptr)
736 mov $acc0, 8*2($r_ptr)
737 mov $acc1, 8*3($r_ptr)
740 .size __ecp_nistz256_mul_montq,.-__ecp_nistz256_mul_montq
742 ################################################################################
743 # void ecp_nistz256_sqr_mont(
747 # we optimize the square according to S.Gueron and V.Krasnov,
748 # "Speeding up Big-Number Squaring"
749 .globl ecp_nistz256_sqr_mont
750 .type ecp_nistz256_sqr_mont,\@function,2
752 ecp_nistz256_sqr_mont:
754 $code.=<<___ if ($addx);
756 and OPENSSL_ia32cap_P+8(%rip), %ecx
766 $code.=<<___ if ($addx);
771 mov 8*0($a_ptr), %rax
772 mov 8*1($a_ptr), $acc6
773 mov 8*2($a_ptr), $acc7
774 mov 8*3($a_ptr), $acc0
776 call __ecp_nistz256_sqr_montq
778 $code.=<<___ if ($addx);
783 mov 8*0($a_ptr), %rdx
784 mov 8*1($a_ptr), $acc6
785 mov 8*2($a_ptr), $acc7
786 mov 8*3($a_ptr), $acc0
787 lea -128($a_ptr), $a_ptr # control u-op density
789 call __ecp_nistz256_sqr_montx
800 .size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
802 .type __ecp_nistz256_sqr_montq,\@abi-omnipotent
804 __ecp_nistz256_sqr_montq:
806 mulq $acc6 # a[1]*a[0]
811 mulq $acc5 # a[0]*a[2]
817 mulq $acc5 # a[0]*a[3]
823 #################################
824 mulq $acc6 # a[1]*a[2]
830 mulq $acc6 # a[1]*a[3]
838 #################################
839 mulq $acc7 # a[2]*a[3]
842 mov 8*0($a_ptr), %rax
846 add $acc1, $acc1 # acc1:6<<1
856 mov 8*1($a_ptr), %rax
862 mov 8*2($a_ptr), %rax
869 mov 8*3($a_ptr), %rax
879 mov .Lpoly+8*1(%rip), $a_ptr
880 mov .Lpoly+8*3(%rip), $t1
882 ##########################################
889 add $acc0, $acc1 # +=acc[0]<<96
895 ##########################################
908 ##########################################
921 ###########################################
934 ############################################
935 # Add the rest of the acc
944 sub \$-1, $acc4 # .Lpoly[0]
946 sbb $a_ptr, $acc5 # .Lpoly[1]
947 sbb \$0, $acc6 # .Lpoly[2]
949 sbb $t1, $acc7 # .Lpoly[3]
954 mov $acc4, 8*0($r_ptr)
956 mov $acc5, 8*1($r_ptr)
958 mov $acc6, 8*2($r_ptr)
959 mov $acc7, 8*3($r_ptr)
962 .size __ecp_nistz256_sqr_montq,.-__ecp_nistz256_sqr_montq
967 .type __ecp_nistz256_mul_montx,\@abi-omnipotent
969 __ecp_nistz256_mul_montx:
970 ########################################################################
972 mulx $acc1, $acc0, $acc1
973 mulx $acc2, $t0, $acc2
975 xor $acc5, $acc5 # cf=0
976 mulx $acc3, $t1, $acc3
977 mov .Lpoly+8*3(%rip), $poly3
979 mulx $acc4, $t0, $acc4
982 shlx $poly1,$acc0,$t1
984 shrx $poly1,$acc0,$t0
987 ########################################################################
988 # First reduction step
992 mulx $poly3, $t0, $t1
993 mov 8*1($b_ptr), %rdx
997 xor $acc0, $acc0 # $acc0=0,cf=0,of=0
999 ########################################################################
1001 mulx 8*0+128($a_ptr), $t0, $t1
1005 mulx 8*1+128($a_ptr), $t0, $t1
1009 mulx 8*2+128($a_ptr), $t0, $t1
1013 mulx 8*3+128($a_ptr), $t0, $t1
1016 shlx $poly1, $acc1, $t0
1018 shrx $poly1, $acc1, $t1
1024 ########################################################################
1025 # Second reduction step
1029 mulx $poly3, $t0, $t1
1030 mov 8*2($b_ptr), %rdx
1034 xor $acc1 ,$acc1 # $acc1=0,cf=0,of=0
1036 ########################################################################
1038 mulx 8*0+128($a_ptr), $t0, $t1
1042 mulx 8*1+128($a_ptr), $t0, $t1
1046 mulx 8*2+128($a_ptr), $t0, $t1
1050 mulx 8*3+128($a_ptr), $t0, $t1
1053 shlx $poly1, $acc2, $t0
1055 shrx $poly1, $acc2, $t1
1061 ########################################################################
1062 # Third reduction step
1066 mulx $poly3, $t0, $t1
1067 mov 8*3($b_ptr), %rdx
1071 xor $acc2, $acc2 # $acc2=0,cf=0,of=0
1073 ########################################################################
1075 mulx 8*0+128($a_ptr), $t0, $t1
1079 mulx 8*1+128($a_ptr), $t0, $t1
1083 mulx 8*2+128($a_ptr), $t0, $t1
1087 mulx 8*3+128($a_ptr), $t0, $t1
1090 shlx $poly1, $acc3, $t0
1092 shrx $poly1, $acc3, $t1
1098 ########################################################################
1099 # Fourth reduction step
1103 mulx $poly3, $t0, $t1
1105 mov .Lpoly+8*1(%rip), $poly1
1111 ########################################################################
1112 # Branch-less conditional subtraction of P
1115 sbb \$-1, $acc4 # .Lpoly[0]
1116 sbb $poly1, $acc5 # .Lpoly[1]
1117 sbb \$0, $acc0 # .Lpoly[2]
1119 sbb $poly3, $acc1 # .Lpoly[3]
1124 mov $acc4, 8*0($r_ptr)
1126 mov $acc5, 8*1($r_ptr)
1128 mov $acc0, 8*2($r_ptr)
1129 mov $acc1, 8*3($r_ptr)
1132 .size __ecp_nistz256_mul_montx,.-__ecp_nistz256_mul_montx
1134 .type __ecp_nistz256_sqr_montx,\@abi-omnipotent
1136 __ecp_nistz256_sqr_montx:
1137 mulx $acc6, $acc1, $acc2 # a[0]*a[1]
1138 mulx $acc7, $t0, $acc3 # a[0]*a[2]
1141 mulx $acc0, $t1, $acc4 # a[0]*a[3]
1145 xor $acc5, $acc5 # $acc5=0,cf=0,of=0
1147 #################################
1148 mulx $acc7, $t0, $t1 # a[1]*a[2]
1152 mulx $acc0, $t0, $t1 # a[1]*a[3]
1158 #################################
1159 mulx $acc0, $t0, $acc6 # a[2]*a[3]
1160 mov 8*0+128($a_ptr), %rdx
1161 xor $acc7, $acc7 # $acc7=0,cf=0,of=0
1162 adcx $acc1, $acc1 # acc1:6<<1
1165 adox $acc7, $acc6 # of=0
1167 mulx %rdx, $acc0, $t1
1168 mov 8*1+128($a_ptr), %rdx
1173 mov 8*2+128($a_ptr), %rdx
1179 mov 8*3+128($a_ptr), %rdx
1189 shlx $a_ptr, $acc0, $t0
1191 shrx $a_ptr, $acc0, $t4
1192 mov .Lpoly+8*3(%rip), $t1
1198 mulx $t1, $t0, $acc0
1201 shlx $a_ptr, $acc1, $t0
1203 shrx $a_ptr, $acc1, $t4
1209 mulx $t1, $t0, $acc1
1212 shlx $a_ptr, $acc2, $t0
1214 shrx $a_ptr, $acc2, $t4
1220 mulx $t1, $t0, $acc2
1223 shlx $a_ptr, $acc3, $t0
1225 shrx $a_ptr, $acc3, $t4
1231 mulx $t1, $t0, $acc3
1236 adc $acc0, $acc4 # accumulate upper half
1237 mov .Lpoly+8*1(%rip), $a_ptr
1245 xor %eax, %eax # cf=0
1246 sbb \$-1, $acc4 # .Lpoly[0]
1248 sbb $a_ptr, $acc5 # .Lpoly[1]
1249 sbb \$0, $acc6 # .Lpoly[2]
1251 sbb $t1, $acc7 # .Lpoly[3]
1256 mov $acc4, 8*0($r_ptr)
1258 mov $acc5, 8*1($r_ptr)
1260 mov $acc6, 8*2($r_ptr)
1261 mov $acc7, 8*3($r_ptr)
1264 .size __ecp_nistz256_sqr_montx,.-__ecp_nistz256_sqr_montx
1269 my ($r_ptr,$in_ptr)=("%rdi","%rsi");
1270 my ($acc0,$acc1,$acc2,$acc3)=map("%r$_",(8..11));
1271 my ($t0,$t1,$t2)=("%rcx","%r12","%r13");
1274 ################################################################################
1275 # void ecp_nistz256_from_mont(
1278 # This one performs Montgomery multiplication by 1, so we only need the reduction
1280 .globl ecp_nistz256_from_mont
1281 .type ecp_nistz256_from_mont,\@function,2
1283 ecp_nistz256_from_mont:
1287 mov 8*0($in_ptr), %rax
1288 mov .Lpoly+8*3(%rip), $t2
1289 mov 8*1($in_ptr), $acc1
1290 mov 8*2($in_ptr), $acc2
1291 mov 8*3($in_ptr), $acc3
1293 mov .Lpoly+8*1(%rip), $t1
1295 #########################################
1307 #########################################
1320 ##########################################
1333 ###########################################
1347 ###########################################
1348 # Branch-less conditional subtraction
1358 cmovnz $in_ptr, $acc1
1359 mov $acc0, 8*0($r_ptr)
1361 mov $acc1, 8*1($r_ptr)
1363 mov $acc2, 8*2($r_ptr)
1364 mov $acc3, 8*3($r_ptr)
1369 .size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
1373 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1374 my ($ONE,$INDEX,$Ra,$Rb,$Rc,$Rd,$Re,$Rf)=map("%xmm$_",(0..7));
1375 my ($M0,$T0a,$T0b,$T0c,$T0d,$T0e,$T0f,$TMP0)=map("%xmm$_",(8..15));
1376 my ($M1,$T2a,$T2b,$TMP2,$M2,$T2a,$T2b,$TMP2)=map("%xmm$_",(8..15));
1379 ################################################################################
1380 # void ecp_nistz256_scatter_w5(uint64_t *val, uint64_t *in_t, int index);
1381 .globl ecp_nistz256_scatter_w5
1382 .type ecp_nistz256_scatter_w5,\@abi-omnipotent
1384 ecp_nistz256_scatter_w5:
1385 lea -3($index,$index,2), $index
1386 movdqa 0x00($in_t), %xmm0
1388 movdqa 0x10($in_t), %xmm1
1389 movdqa 0x20($in_t), %xmm2
1390 movdqa 0x30($in_t), %xmm3
1391 movdqa 0x40($in_t), %xmm4
1392 movdqa 0x50($in_t), %xmm5
1393 movdqa %xmm0, 0x00($val,$index)
1394 movdqa %xmm1, 0x10($val,$index)
1395 movdqa %xmm2, 0x20($val,$index)
1396 movdqa %xmm3, 0x30($val,$index)
1397 movdqa %xmm4, 0x40($val,$index)
1398 movdqa %xmm5, 0x50($val,$index)
1401 .size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
1403 ################################################################################
1404 # void ecp_nistz256_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1405 .globl ecp_nistz256_gather_w5
1406 .type ecp_nistz256_gather_w5,\@abi-omnipotent
1408 ecp_nistz256_gather_w5:
1410 $code.=<<___ if ($avx>1);
1411 mov OPENSSL_ia32cap_P+8(%rip), %eax
1413 jnz .Lavx2_gather_w5
1415 $code.=<<___ if ($win64);
1416 lea -0x88(%rsp), %rax
1417 .LSEH_begin_ecp_nistz256_gather_w5:
1418 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1419 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1420 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1421 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1422 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1423 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1424 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1425 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1426 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1427 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1428 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1431 movdqa .LOne(%rip), $ONE
1442 pshufd \$0, $INDEX, $INDEX
1445 .Lselect_loop_sse_w5:
1449 pcmpeqd $INDEX, $TMP0
1451 movdqa 16*0($in_t), $T0a
1452 movdqa 16*1($in_t), $T0b
1453 movdqa 16*2($in_t), $T0c
1454 movdqa 16*3($in_t), $T0d
1455 movdqa 16*4($in_t), $T0e
1456 movdqa 16*5($in_t), $T0f
1457 lea 16*6($in_t), $in_t
1473 jnz .Lselect_loop_sse_w5
1475 movdqu $Ra, 16*0($val)
1476 movdqu $Rb, 16*1($val)
1477 movdqu $Rc, 16*2($val)
1478 movdqu $Rd, 16*3($val)
1479 movdqu $Re, 16*4($val)
1480 movdqu $Rf, 16*5($val)
1482 $code.=<<___ if ($win64);
1483 movaps (%rsp), %xmm6
1484 movaps 0x10(%rsp), %xmm7
1485 movaps 0x20(%rsp), %xmm8
1486 movaps 0x30(%rsp), %xmm9
1487 movaps 0x40(%rsp), %xmm10
1488 movaps 0x50(%rsp), %xmm11
1489 movaps 0x60(%rsp), %xmm12
1490 movaps 0x70(%rsp), %xmm13
1491 movaps 0x80(%rsp), %xmm14
1492 movaps 0x90(%rsp), %xmm15
1493 lea 0xa8(%rsp), %rsp
1494 .LSEH_end_ecp_nistz256_gather_w5:
1498 .size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
1500 ################################################################################
1501 # void ecp_nistz256_scatter_w7(uint64_t *val, uint64_t *in_t, int index);
1502 .globl ecp_nistz256_scatter_w7
1503 .type ecp_nistz256_scatter_w7,\@abi-omnipotent
1505 ecp_nistz256_scatter_w7:
1506 movdqu 0x00($in_t), %xmm0
1508 movdqu 0x10($in_t), %xmm1
1509 movdqu 0x20($in_t), %xmm2
1510 movdqu 0x30($in_t), %xmm3
1511 movdqa %xmm0, 0x00($val,$index)
1512 movdqa %xmm1, 0x10($val,$index)
1513 movdqa %xmm2, 0x20($val,$index)
1514 movdqa %xmm3, 0x30($val,$index)
1517 .size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
1519 ################################################################################
1520 # void ecp_nistz256_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1521 .globl ecp_nistz256_gather_w7
1522 .type ecp_nistz256_gather_w7,\@abi-omnipotent
1524 ecp_nistz256_gather_w7:
1526 $code.=<<___ if ($avx>1);
1527 mov OPENSSL_ia32cap_P+8(%rip), %eax
1529 jnz .Lavx2_gather_w7
1531 $code.=<<___ if ($win64);
1532 lea -0x88(%rsp), %rax
1533 .LSEH_begin_ecp_nistz256_gather_w7:
1534 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1535 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6, -0x20(%rax)
1536 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7, -0x10(%rax)
1537 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8, 0(%rax)
1538 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9, 0x10(%rax)
1539 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10, 0x20(%rax)
1540 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11, 0x30(%rax)
1541 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12, 0x40(%rax)
1542 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13, 0x50(%rax)
1543 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14, 0x60(%rax)
1544 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15, 0x70(%rax)
1547 movdqa .LOne(%rip), $M0
1556 pshufd \$0, $INDEX, $INDEX
1559 .Lselect_loop_sse_w7:
1562 movdqa 16*0($in_t), $T0a
1563 movdqa 16*1($in_t), $T0b
1564 pcmpeqd $INDEX, $TMP0
1565 movdqa 16*2($in_t), $T0c
1566 movdqa 16*3($in_t), $T0d
1567 lea 16*4($in_t), $in_t
1576 prefetcht0 255($in_t)
1580 jnz .Lselect_loop_sse_w7
1582 movdqu $Ra, 16*0($val)
1583 movdqu $Rb, 16*1($val)
1584 movdqu $Rc, 16*2($val)
1585 movdqu $Rd, 16*3($val)
1587 $code.=<<___ if ($win64);
1588 movaps (%rsp), %xmm6
1589 movaps 0x10(%rsp), %xmm7
1590 movaps 0x20(%rsp), %xmm8
1591 movaps 0x30(%rsp), %xmm9
1592 movaps 0x40(%rsp), %xmm10
1593 movaps 0x50(%rsp), %xmm11
1594 movaps 0x60(%rsp), %xmm12
1595 movaps 0x70(%rsp), %xmm13
1596 movaps 0x80(%rsp), %xmm14
1597 movaps 0x90(%rsp), %xmm15
1598 lea 0xa8(%rsp), %rsp
1599 .LSEH_end_ecp_nistz256_gather_w7:
1603 .size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
1607 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1608 my ($TWO,$INDEX,$Ra,$Rb,$Rc)=map("%ymm$_",(0..4));
1609 my ($M0,$T0a,$T0b,$T0c,$TMP0)=map("%ymm$_",(5..9));
1610 my ($M1,$T1a,$T1b,$T1c,$TMP1)=map("%ymm$_",(10..14));
1613 ################################################################################
1614 # void ecp_nistz256_avx2_gather_w5(uint64_t *val, uint64_t *in_t, int index);
1615 .type ecp_nistz256_avx2_gather_w5,\@abi-omnipotent
1617 ecp_nistz256_avx2_gather_w5:
1621 $code.=<<___ if ($win64);
1622 lea -0x88(%rsp), %rax
1623 .LSEH_begin_ecp_nistz256_avx2_gather_w5:
1624 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1625 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
1626 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
1627 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
1628 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
1629 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
1630 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
1631 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
1632 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
1633 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
1634 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
1637 vmovdqa .LTwo(%rip), $TWO
1643 vmovdqa .LOne(%rip), $M0
1644 vmovdqa .LTwo(%rip), $M1
1647 vpermd $INDEX, $Ra, $INDEX
1650 .Lselect_loop_avx2_w5:
1652 vmovdqa 32*0($in_t), $T0a
1653 vmovdqa 32*1($in_t), $T0b
1654 vmovdqa 32*2($in_t), $T0c
1656 vmovdqa 32*3($in_t), $T1a
1657 vmovdqa 32*4($in_t), $T1b
1658 vmovdqa 32*5($in_t), $T1c
1660 vpcmpeqd $INDEX, $M0, $TMP0
1661 vpcmpeqd $INDEX, $M1, $TMP1
1663 vpaddd $TWO, $M0, $M0
1664 vpaddd $TWO, $M1, $M1
1665 lea 32*6($in_t), $in_t
1667 vpand $TMP0, $T0a, $T0a
1668 vpand $TMP0, $T0b, $T0b
1669 vpand $TMP0, $T0c, $T0c
1670 vpand $TMP1, $T1a, $T1a
1671 vpand $TMP1, $T1b, $T1b
1672 vpand $TMP1, $T1c, $T1c
1674 vpxor $T0a, $Ra, $Ra
1675 vpxor $T0b, $Rb, $Rb
1676 vpxor $T0c, $Rc, $Rc
1677 vpxor $T1a, $Ra, $Ra
1678 vpxor $T1b, $Rb, $Rb
1679 vpxor $T1c, $Rc, $Rc
1682 jnz .Lselect_loop_avx2_w5
1684 vmovdqu $Ra, 32*0($val)
1685 vmovdqu $Rb, 32*1($val)
1686 vmovdqu $Rc, 32*2($val)
1689 $code.=<<___ if ($win64);
1690 movaps (%rsp), %xmm6
1691 movaps 0x10(%rsp), %xmm7
1692 movaps 0x20(%rsp), %xmm8
1693 movaps 0x30(%rsp), %xmm9
1694 movaps 0x40(%rsp), %xmm10
1695 movaps 0x50(%rsp), %xmm11
1696 movaps 0x60(%rsp), %xmm12
1697 movaps 0x70(%rsp), %xmm13
1698 movaps 0x80(%rsp), %xmm14
1699 movaps 0x90(%rsp), %xmm15
1700 lea 0xa8(%rsp), %rsp
1701 .LSEH_end_ecp_nistz256_avx2_gather_w5:
1705 .size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
1709 my ($val,$in_t,$index)=$win64?("%rcx","%rdx","%r8d"):("%rdi","%rsi","%edx");
1710 my ($THREE,$INDEX,$Ra,$Rb)=map("%ymm$_",(0..3));
1711 my ($M0,$T0a,$T0b,$TMP0)=map("%ymm$_",(4..7));
1712 my ($M1,$T1a,$T1b,$TMP1)=map("%ymm$_",(8..11));
1713 my ($M2,$T2a,$T2b,$TMP2)=map("%ymm$_",(12..15));
1717 ################################################################################
1718 # void ecp_nistz256_avx2_gather_w7(uint64_t *val, uint64_t *in_t, int index);
1719 .globl ecp_nistz256_avx2_gather_w7
1720 .type ecp_nistz256_avx2_gather_w7,\@abi-omnipotent
1722 ecp_nistz256_avx2_gather_w7:
1726 $code.=<<___ if ($win64);
1727 lea -0x88(%rsp), %rax
1728 .LSEH_begin_ecp_nistz256_avx2_gather_w7:
1729 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
1730 .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
1731 .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
1732 .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
1733 .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
1734 .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
1735 .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
1736 .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
1737 .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
1738 .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
1739 .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
1742 vmovdqa .LThree(%rip), $THREE
1747 vmovdqa .LOne(%rip), $M0
1748 vmovdqa .LTwo(%rip), $M1
1749 vmovdqa .LThree(%rip), $M2
1752 vpermd $INDEX, $Ra, $INDEX
1753 # Skip index = 0, because it is implicitly the point at infinity
1756 .Lselect_loop_avx2_w7:
1758 vmovdqa 32*0($in_t), $T0a
1759 vmovdqa 32*1($in_t), $T0b
1761 vmovdqa 32*2($in_t), $T1a
1762 vmovdqa 32*3($in_t), $T1b
1764 vmovdqa 32*4($in_t), $T2a
1765 vmovdqa 32*5($in_t), $T2b
1767 vpcmpeqd $INDEX, $M0, $TMP0
1768 vpcmpeqd $INDEX, $M1, $TMP1
1769 vpcmpeqd $INDEX, $M2, $TMP2
1771 vpaddd $THREE, $M0, $M0
1772 vpaddd $THREE, $M1, $M1
1773 vpaddd $THREE, $M2, $M2
1774 lea 32*6($in_t), $in_t
1776 vpand $TMP0, $T0a, $T0a
1777 vpand $TMP0, $T0b, $T0b
1778 vpand $TMP1, $T1a, $T1a
1779 vpand $TMP1, $T1b, $T1b
1780 vpand $TMP2, $T2a, $T2a
1781 vpand $TMP2, $T2b, $T2b
1783 vpxor $T0a, $Ra, $Ra
1784 vpxor $T0b, $Rb, $Rb
1785 vpxor $T1a, $Ra, $Ra
1786 vpxor $T1b, $Rb, $Rb
1787 vpxor $T2a, $Ra, $Ra
1788 vpxor $T2b, $Rb, $Rb
1791 jnz .Lselect_loop_avx2_w7
1794 vmovdqa 32*0($in_t), $T0a
1795 vmovdqa 32*1($in_t), $T0b
1797 vpcmpeqd $INDEX, $M0, $TMP0
1799 vpand $TMP0, $T0a, $T0a
1800 vpand $TMP0, $T0b, $T0b
1802 vpxor $T0a, $Ra, $Ra
1803 vpxor $T0b, $Rb, $Rb
1805 vmovdqu $Ra, 32*0($val)
1806 vmovdqu $Rb, 32*1($val)
1809 $code.=<<___ if ($win64);
1810 movaps (%rsp), %xmm6
1811 movaps 0x10(%rsp), %xmm7
1812 movaps 0x20(%rsp), %xmm8
1813 movaps 0x30(%rsp), %xmm9
1814 movaps 0x40(%rsp), %xmm10
1815 movaps 0x50(%rsp), %xmm11
1816 movaps 0x60(%rsp), %xmm12
1817 movaps 0x70(%rsp), %xmm13
1818 movaps 0x80(%rsp), %xmm14
1819 movaps 0x90(%rsp), %xmm15
1820 lea 0xa8(%rsp), %rsp
1821 .LSEH_end_ecp_nistz256_avx2_gather_w7:
1825 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1829 .globl ecp_nistz256_avx2_gather_w7
1830 .type ecp_nistz256_avx2_gather_w7,\@function,3
1832 ecp_nistz256_avx2_gather_w7:
1833 .byte 0x0f,0x0b # ud2
1835 .size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
1839 ########################################################################
1840 # This block implements higher level point_double, point_add and
1841 # point_add_affine. The key to performance in this case is to allow
1842 # out-of-order execution logic to overlap computations from next step
1843 # with tail processing from current step. By using tailored calling
1844 # sequence we minimize inter-step overhead to give processor better
1845 # shot at overlapping operations...
1847 # You will notice that input data is copied to stack. Trouble is that
1848 # there are no registers to spare for holding original pointers and
1849 # reloading them, pointers, would create undesired dependencies on
1850 # effective addresses calculation paths. In other words it's too done
1851 # to favour out-of-order execution logic.
1852 # <appro@openssl.org>
1854 my ($r_ptr,$a_ptr,$b_org,$b_ptr)=("%rdi","%rsi","%rdx","%rbx");
1855 my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7)=map("%r$_",(8..15));
1856 my ($t0,$t1,$t2,$t3,$t4)=("%rax","%rbp","%rcx",$acc4,$acc4);
1857 my ($poly1,$poly3)=($acc6,$acc7);
1859 sub load_for_mul () {
1860 my ($a,$b,$src0) = @_;
1861 my $bias = $src0 eq "%rax" ? 0 : -128;
1867 lea $bias+$a, $a_ptr
1872 sub load_for_sqr () {
1874 my $bias = $src0 eq "%rax" ? 0 : -128;
1878 lea $bias+$a, $a_ptr
1884 ########################################################################
1885 # operate in 4-5-0-1 "name space" that matches multiplication output
1887 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
1890 .type __ecp_nistz256_add_toq,\@abi-omnipotent
1892 __ecp_nistz256_add_toq:
1893 add 8*0($b_ptr), $a0
1894 adc 8*1($b_ptr), $a1
1896 adc 8*2($b_ptr), $a2
1897 adc 8*3($b_ptr), $a3
1911 mov $a0, 8*0($r_ptr)
1913 mov $a1, 8*1($r_ptr)
1915 mov $a2, 8*2($r_ptr)
1916 mov $a3, 8*3($r_ptr)
1919 .size __ecp_nistz256_add_toq,.-__ecp_nistz256_add_toq
1921 .type __ecp_nistz256_sub_fromq,\@abi-omnipotent
1923 __ecp_nistz256_sub_fromq:
1924 sub 8*0($b_ptr), $a0
1925 sbb 8*1($b_ptr), $a1
1927 sbb 8*2($b_ptr), $a2
1928 sbb 8*3($b_ptr), $a3
1942 mov $a0, 8*0($r_ptr)
1944 mov $a1, 8*1($r_ptr)
1946 mov $a2, 8*2($r_ptr)
1947 mov $a3, 8*3($r_ptr)
1950 .size __ecp_nistz256_sub_fromq,.-__ecp_nistz256_sub_fromq
1952 .type __ecp_nistz256_subq,\@abi-omnipotent
1954 __ecp_nistz256_subq:
1977 .size __ecp_nistz256_subq,.-__ecp_nistz256_subq
1979 .type __ecp_nistz256_mul_by_2q,\@abi-omnipotent
1981 __ecp_nistz256_mul_by_2q:
1982 add $a0, $a0 # a0:a3+a0:a3
2000 mov $a0, 8*0($r_ptr)
2002 mov $a1, 8*1($r_ptr)
2004 mov $a2, 8*2($r_ptr)
2005 mov $a3, 8*3($r_ptr)
2008 .size __ecp_nistz256_mul_by_2q,.-__ecp_nistz256_mul_by_2q
2013 my ($src0,$sfx,$bias);
2014 my ($S,$M,$Zsqr,$in_x,$tmp0)=map(32*$_,(0..4));
2022 .globl ecp_nistz256_point_double
2023 .type ecp_nistz256_point_double,\@function,2
2025 ecp_nistz256_point_double:
2027 $code.=<<___ if ($addx);
2029 and OPENSSL_ia32cap_P+8(%rip), %ecx
2039 .type ecp_nistz256_point_doublex,\@function,2
2041 ecp_nistz256_point_doublex:
2054 .Lpoint_double_shortcut$x:
2055 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr.x
2056 mov $a_ptr, $b_ptr # backup copy
2057 movdqu 0x10($a_ptr), %xmm1
2058 mov 0x20+8*0($a_ptr), $acc4 # load in_y in "5-4-0-1" order
2059 mov 0x20+8*1($a_ptr), $acc5
2060 mov 0x20+8*2($a_ptr), $acc0
2061 mov 0x20+8*3($a_ptr), $acc1
2062 mov .Lpoly+8*1(%rip), $poly1
2063 mov .Lpoly+8*3(%rip), $poly3
2064 movdqa %xmm0, $in_x(%rsp)
2065 movdqa %xmm1, $in_x+0x10(%rsp)
2066 lea 0x20($r_ptr), $acc2
2067 lea 0x40($r_ptr), $acc3
2072 lea $S(%rsp), $r_ptr
2073 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(S, in_y);
2075 mov 0x40+8*0($a_ptr), $src0
2076 mov 0x40+8*1($a_ptr), $acc6
2077 mov 0x40+8*2($a_ptr), $acc7
2078 mov 0x40+8*3($a_ptr), $acc0
2079 lea 0x40-$bias($a_ptr), $a_ptr
2080 lea $Zsqr(%rsp), $r_ptr
2081 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Zsqr, in_z);
2083 `&load_for_sqr("$S(%rsp)", "$src0")`
2084 lea $S(%rsp), $r_ptr
2085 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(S, S);
2087 mov 0x20($b_ptr), $src0 # $b_ptr is still valid
2088 mov 0x40+8*0($b_ptr), $acc1
2089 mov 0x40+8*1($b_ptr), $acc2
2090 mov 0x40+8*2($b_ptr), $acc3
2091 mov 0x40+8*3($b_ptr), $acc4
2092 lea 0x40-$bias($b_ptr), $a_ptr
2093 lea 0x20($b_ptr), $b_ptr
2095 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, in_z, in_y);
2096 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(res_z, res_z);
2098 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2099 mov $in_x+8*1(%rsp), $acc5
2100 lea $Zsqr(%rsp), $b_ptr
2101 mov $in_x+8*2(%rsp), $acc0
2102 mov $in_x+8*3(%rsp), $acc1
2103 lea $M(%rsp), $r_ptr
2104 call __ecp_nistz256_add_to$x # p256_add(M, in_x, Zsqr);
2106 mov $in_x+8*0(%rsp), $acc4 # "5-4-0-1" order
2107 mov $in_x+8*1(%rsp), $acc5
2108 lea $Zsqr(%rsp), $b_ptr
2109 mov $in_x+8*2(%rsp), $acc0
2110 mov $in_x+8*3(%rsp), $acc1
2111 lea $Zsqr(%rsp), $r_ptr
2112 call __ecp_nistz256_sub_from$x # p256_sub(Zsqr, in_x, Zsqr);
2114 `&load_for_sqr("$S(%rsp)", "$src0")`
2116 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S);
2119 ######## ecp_nistz256_div_by_2(res_y, res_y); ##########################
2120 # operate in 4-5-6-7 "name space" that matches squaring output
2122 my ($poly1,$poly3)=($a_ptr,$t1);
2123 my ($a0,$a1,$a2,$a3,$t3,$t4,$t1)=($acc4,$acc5,$acc6,$acc7,$acc0,$acc1,$acc2);
2136 xor $a_ptr, $a_ptr # borrow $a_ptr
2145 mov $a1, $t0 # a0:a3>>1
2156 mov $a0, 8*0($r_ptr)
2158 mov $a1, 8*1($r_ptr)
2162 mov $a2, 8*2($r_ptr)
2163 mov $a3, 8*3($r_ptr)
2167 `&load_for_mul("$M(%rsp)", "$Zsqr(%rsp)", "$src0")`
2168 lea $M(%rsp), $r_ptr
2169 call __ecp_nistz256_mul_mont$x # p256_mul_mont(M, M, Zsqr);
2171 lea $tmp0(%rsp), $r_ptr
2172 call __ecp_nistz256_mul_by_2$x
2174 lea $M(%rsp), $b_ptr
2175 lea $M(%rsp), $r_ptr
2176 call __ecp_nistz256_add_to$x # p256_mul_by_3(M, M);
2178 `&load_for_mul("$S(%rsp)", "$in_x(%rsp)", "$src0")`
2179 lea $S(%rsp), $r_ptr
2180 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, in_x);
2182 lea $tmp0(%rsp), $r_ptr
2183 call __ecp_nistz256_mul_by_2$x # p256_mul_by_2(tmp0, S);
2185 `&load_for_sqr("$M(%rsp)", "$src0")`
2187 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_x, M);
2189 lea $tmp0(%rsp), $b_ptr
2190 mov $acc6, $acc0 # harmonize sqr output and sub input
2194 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, tmp0);
2196 mov $S+8*0(%rsp), $t0
2197 mov $S+8*1(%rsp), $t1
2198 mov $S+8*2(%rsp), $t2
2199 mov $S+8*3(%rsp), $acc2 # "4-5-0-1" order
2200 lea $S(%rsp), $r_ptr
2201 call __ecp_nistz256_sub$x # p256_sub(S, S, res_x);
2204 lea $M(%rsp), $b_ptr
2205 mov $acc4, $acc6 # harmonize sub output and mul input
2207 mov $acc4, $S+8*0(%rsp) # have to save:-(
2209 mov $acc5, $S+8*1(%rsp)
2211 mov $acc0, $S+8*2(%rsp)
2212 lea $S-$bias(%rsp), $a_ptr
2214 mov $acc1, $S+8*3(%rsp)
2216 lea $S(%rsp), $r_ptr
2217 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S, S, M);
2221 call __ecp_nistz256_sub_from$x # p256_sub(res_y, S, res_y);
2231 .size ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
2238 my ($src0,$sfx,$bias);
2239 my ($H,$Hsqr,$R,$Rsqr,$Hcub,
2241 $res_x,$res_y,$res_z,
2242 $in1_x,$in1_y,$in1_z,
2243 $in2_x,$in2_y,$in2_z)=map(32*$_,(0..17));
2244 my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
2252 .globl ecp_nistz256_point_add
2253 .type ecp_nistz256_point_add,\@function,3
2255 ecp_nistz256_point_add:
2257 $code.=<<___ if ($addx);
2259 and OPENSSL_ia32cap_P+8(%rip), %ecx
2269 .type ecp_nistz256_point_addx,\@function,3
2271 ecp_nistz256_point_addx:
2284 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2285 movdqu 0x10($a_ptr), %xmm1
2286 movdqu 0x20($a_ptr), %xmm2
2287 movdqu 0x30($a_ptr), %xmm3
2288 movdqu 0x40($a_ptr), %xmm4
2289 movdqu 0x50($a_ptr), %xmm5
2290 mov $a_ptr, $b_ptr # reassign
2291 mov $b_org, $a_ptr # reassign
2292 movdqa %xmm0, $in1_x(%rsp)
2293 movdqa %xmm1, $in1_x+0x10(%rsp)
2295 movdqa %xmm2, $in1_y(%rsp)
2296 movdqa %xmm3, $in1_y+0x10(%rsp)
2298 movdqa %xmm4, $in1_z(%rsp)
2299 movdqa %xmm5, $in1_z+0x10(%rsp)
2302 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$b_ptr
2303 pshufd \$0xb1, %xmm3, %xmm5
2304 movdqu 0x10($a_ptr), %xmm1
2305 movdqu 0x20($a_ptr), %xmm2
2307 movdqu 0x30($a_ptr), %xmm3
2308 mov 0x40+8*0($a_ptr), $src0 # load original in2_z
2309 mov 0x40+8*1($a_ptr), $acc6
2310 mov 0x40+8*2($a_ptr), $acc7
2311 mov 0x40+8*3($a_ptr), $acc0
2312 movdqa %xmm0, $in2_x(%rsp)
2313 pshufd \$0x1e, %xmm5, %xmm4
2314 movdqa %xmm1, $in2_x+0x10(%rsp)
2316 movq $r_ptr, %xmm0 # save $r_ptr
2317 movdqa %xmm2, $in2_y(%rsp)
2318 movdqa %xmm3, $in2_y+0x10(%rsp)
2324 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2325 mov $src0, $in2_z+8*0(%rsp) # make in2_z copy
2326 mov $acc6, $in2_z+8*1(%rsp)
2327 mov $acc7, $in2_z+8*2(%rsp)
2328 mov $acc0, $in2_z+8*3(%rsp)
2329 lea $Z2sqr(%rsp), $r_ptr # Z2^2
2330 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z2sqr, in2_z);
2332 pcmpeqd %xmm4, %xmm5
2333 pshufd \$0xb1, %xmm3, %xmm4
2335 pshufd \$0, %xmm5, %xmm5 # in1infty
2336 pshufd \$0x1e, %xmm4, %xmm3
2339 pcmpeqd %xmm3, %xmm4
2340 pshufd \$0, %xmm4, %xmm4 # in2infty
2341 mov 0x40+8*0($b_ptr), $src0 # load original in1_z
2342 mov 0x40+8*1($b_ptr), $acc6
2343 mov 0x40+8*2($b_ptr), $acc7
2344 mov 0x40+8*3($b_ptr), $acc0
2347 lea 0x40-$bias($b_ptr), $a_ptr
2348 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2349 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2351 `&load_for_mul("$Z2sqr(%rsp)", "$in2_z(%rsp)", "$src0")`
2352 lea $S1(%rsp), $r_ptr # S1 = Z2^3
2353 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, Z2sqr, in2_z);
2355 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2356 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2357 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2359 `&load_for_mul("$S1(%rsp)", "$in1_y(%rsp)", "$src0")`
2360 lea $S1(%rsp), $r_ptr # S1 = Y1*Z2^3
2361 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S1, S1, in1_y);
2363 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2364 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2365 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2367 lea $S1(%rsp), $b_ptr
2368 lea $R(%rsp), $r_ptr # R = S2 - S1
2369 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, S1);
2371 or $acc5, $acc4 # see if result is zero
2375 por %xmm5, %xmm2 # in1infty || in2infty
2378 `&load_for_mul("$Z2sqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2379 lea $U1(%rsp), $r_ptr # U1 = X1*Z2^2
2380 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U1, in1_x, Z2sqr);
2382 `&load_for_mul("$Z1sqr(%rsp)", "$in2_x(%rsp)", "$src0")`
2383 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2384 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in2_x, Z1sqr);
2386 lea $U1(%rsp), $b_ptr
2387 lea $H(%rsp), $r_ptr # H = U2 - U1
2388 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, U1);
2390 or $acc5, $acc4 # see if result is zero
2394 .byte 0x3e # predict taken
2395 jnz .Ladd_proceed$x # is_equal(U1,U2)?
2399 jnz .Ladd_proceed$x # (in1infty || in2infty)?
2401 jz .Ladd_double$x # is_equal(S1,S2)?
2403 movq %xmm0, $r_ptr # restore $r_ptr
2405 movdqu %xmm0, 0x00($r_ptr)
2406 movdqu %xmm0, 0x10($r_ptr)
2407 movdqu %xmm0, 0x20($r_ptr)
2408 movdqu %xmm0, 0x30($r_ptr)
2409 movdqu %xmm0, 0x40($r_ptr)
2410 movdqu %xmm0, 0x50($r_ptr)
2415 movq %xmm1, $a_ptr # restore $a_ptr
2416 movq %xmm0, $r_ptr # restore $r_ptr
2417 add \$`32*(18-5)`, %rsp # difference in frame sizes
2418 jmp .Lpoint_double_shortcut$x
2422 `&load_for_sqr("$R(%rsp)", "$src0")`
2423 lea $Rsqr(%rsp), $r_ptr # R^2
2424 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2426 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2427 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2428 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2430 `&load_for_sqr("$H(%rsp)", "$src0")`
2431 lea $Hsqr(%rsp), $r_ptr # H^2
2432 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2434 `&load_for_mul("$res_z(%rsp)", "$in2_z(%rsp)", "$src0")`
2435 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2436 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, res_z, in2_z);
2438 `&load_for_mul("$Hsqr(%rsp)", "$H(%rsp)", "$src0")`
2439 lea $Hcub(%rsp), $r_ptr # H^3
2440 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2442 `&load_for_mul("$Hsqr(%rsp)", "$U1(%rsp)", "$src0")`
2443 lea $U2(%rsp), $r_ptr # U1*H^2
2444 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, U1, Hsqr);
2447 #######################################################################
2448 # operate in 4-5-0-1 "name space" that matches multiplication output
2450 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2451 my ($poly1, $poly3)=($acc6,$acc7);
2454 #lea $U2(%rsp), $a_ptr
2455 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2456 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2458 add $acc0, $acc0 # a0:a3+a0:a3
2459 lea $Rsqr(%rsp), $a_ptr
2476 mov 8*0($a_ptr), $t0
2478 mov 8*1($a_ptr), $t1
2480 mov 8*2($a_ptr), $t2
2482 mov 8*3($a_ptr), $t3
2484 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2486 lea $Hcub(%rsp), $b_ptr
2487 lea $res_x(%rsp), $r_ptr
2488 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2490 mov $U2+8*0(%rsp), $t0
2491 mov $U2+8*1(%rsp), $t1
2492 mov $U2+8*2(%rsp), $t2
2493 mov $U2+8*3(%rsp), $t3
2494 lea $res_y(%rsp), $r_ptr
2496 call __ecp_nistz256_sub$x # p256_sub(res_y, U2, res_x);
2498 mov $acc0, 8*0($r_ptr) # save the result, as
2499 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2500 mov $acc2, 8*2($r_ptr)
2501 mov $acc3, 8*3($r_ptr)
2505 `&load_for_mul("$S1(%rsp)", "$Hcub(%rsp)", "$src0")`
2506 lea $S2(%rsp), $r_ptr
2507 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S1, Hcub);
2509 `&load_for_mul("$R(%rsp)", "$res_y(%rsp)", "$src0")`
2510 lea $res_y(%rsp), $r_ptr
2511 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_y, R, res_y);
2513 lea $S2(%rsp), $b_ptr
2514 lea $res_y(%rsp), $r_ptr
2515 call __ecp_nistz256_sub_from$x # p256_sub(res_y, res_y, S2);
2517 movq %xmm0, $r_ptr # restore $r_ptr
2519 movdqa %xmm5, %xmm0 # copy_conditional(res_z, in2_z, in1infty);
2521 pandn $res_z(%rsp), %xmm0
2523 pandn $res_z+0x10(%rsp), %xmm1
2525 pand $in2_z(%rsp), %xmm2
2526 pand $in2_z+0x10(%rsp), %xmm3
2530 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2536 pand $in1_z(%rsp), %xmm2
2537 pand $in1_z+0x10(%rsp), %xmm3
2540 movdqu %xmm2, 0x40($r_ptr)
2541 movdqu %xmm3, 0x50($r_ptr)
2543 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2545 pandn $res_x(%rsp), %xmm0
2547 pandn $res_x+0x10(%rsp), %xmm1
2549 pand $in2_x(%rsp), %xmm2
2550 pand $in2_x+0x10(%rsp), %xmm3
2554 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2560 pand $in1_x(%rsp), %xmm2
2561 pand $in1_x+0x10(%rsp), %xmm3
2564 movdqu %xmm2, 0x00($r_ptr)
2565 movdqu %xmm3, 0x10($r_ptr)
2567 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2569 pandn $res_y(%rsp), %xmm0
2571 pandn $res_y+0x10(%rsp), %xmm1
2573 pand $in2_y(%rsp), %xmm2
2574 pand $in2_y+0x10(%rsp), %xmm3
2578 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2584 pand $in1_y(%rsp), %xmm2
2585 pand $in1_y+0x10(%rsp), %xmm3
2588 movdqu %xmm2, 0x20($r_ptr)
2589 movdqu %xmm3, 0x30($r_ptr)
2600 .size ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
2605 sub gen_add_affine () {
2607 my ($src0,$sfx,$bias);
2608 my ($U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr,
2609 $res_x,$res_y,$res_z,
2610 $in1_x,$in1_y,$in1_z,
2611 $in2_x,$in2_y)=map(32*$_,(0..14));
2620 .globl ecp_nistz256_point_add_affine
2621 .type ecp_nistz256_point_add_affine,\@function,3
2623 ecp_nistz256_point_add_affine:
2625 $code.=<<___ if ($addx);
2627 and OPENSSL_ia32cap_P+8(%rip), %ecx
2629 je .Lpoint_add_affinex
2637 .type ecp_nistz256_point_add_affinex,\@function,3
2639 ecp_nistz256_point_add_affinex:
2640 .Lpoint_add_affinex:
2652 movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
2653 mov $b_org, $b_ptr # reassign
2654 movdqu 0x10($a_ptr), %xmm1
2655 movdqu 0x20($a_ptr), %xmm2
2656 movdqu 0x30($a_ptr), %xmm3
2657 movdqu 0x40($a_ptr), %xmm4
2658 movdqu 0x50($a_ptr), %xmm5
2659 mov 0x40+8*0($a_ptr), $src0 # load original in1_z
2660 mov 0x40+8*1($a_ptr), $acc6
2661 mov 0x40+8*2($a_ptr), $acc7
2662 mov 0x40+8*3($a_ptr), $acc0
2663 movdqa %xmm0, $in1_x(%rsp)
2664 movdqa %xmm1, $in1_x+0x10(%rsp)
2666 movdqa %xmm2, $in1_y(%rsp)
2667 movdqa %xmm3, $in1_y+0x10(%rsp)
2669 movdqa %xmm4, $in1_z(%rsp)
2670 movdqa %xmm5, $in1_z+0x10(%rsp)
2673 movdqu 0x00($b_ptr), %xmm0 # copy *(P256_POINT_AFFINE *)$b_ptr
2674 pshufd \$0xb1, %xmm3, %xmm5
2675 movdqu 0x10($b_ptr), %xmm1
2676 movdqu 0x20($b_ptr), %xmm2
2678 movdqu 0x30($b_ptr), %xmm3
2679 movdqa %xmm0, $in2_x(%rsp)
2680 pshufd \$0x1e, %xmm5, %xmm4
2681 movdqa %xmm1, $in2_x+0x10(%rsp)
2683 movq $r_ptr, %xmm0 # save $r_ptr
2684 movdqa %xmm2, $in2_y(%rsp)
2685 movdqa %xmm3, $in2_y+0x10(%rsp)
2691 lea 0x40-$bias($a_ptr), $a_ptr # $a_ptr is still valid
2692 lea $Z1sqr(%rsp), $r_ptr # Z1^2
2693 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Z1sqr, in1_z);
2695 pcmpeqd %xmm4, %xmm5
2696 pshufd \$0xb1, %xmm3, %xmm4
2697 mov 0x00($b_ptr), $src0 # $b_ptr is still valid
2698 #lea 0x00($b_ptr), $b_ptr
2699 mov $acc4, $acc1 # harmonize sqr output and mul input
2701 pshufd \$0, %xmm5, %xmm5 # in1infty
2702 pshufd \$0x1e, %xmm4, %xmm3
2707 pcmpeqd %xmm3, %xmm4
2708 pshufd \$0, %xmm4, %xmm4 # in2infty
2710 lea $Z1sqr-$bias(%rsp), $a_ptr
2712 lea $U2(%rsp), $r_ptr # U2 = X2*Z1^2
2713 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, Z1sqr, in2_x);
2715 lea $in1_x(%rsp), $b_ptr
2716 lea $H(%rsp), $r_ptr # H = U2 - U1
2717 call __ecp_nistz256_sub_from$x # p256_sub(H, U2, in1_x);
2719 `&load_for_mul("$Z1sqr(%rsp)", "$in1_z(%rsp)", "$src0")`
2720 lea $S2(%rsp), $r_ptr # S2 = Z1^3
2721 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Z1sqr, in1_z);
2723 `&load_for_mul("$H(%rsp)", "$in1_z(%rsp)", "$src0")`
2724 lea $res_z(%rsp), $r_ptr # Z3 = H*Z1*Z2
2725 call __ecp_nistz256_mul_mont$x # p256_mul_mont(res_z, H, in1_z);
2727 `&load_for_mul("$S2(%rsp)", "$in2_y(%rsp)", "$src0")`
2728 lea $S2(%rsp), $r_ptr # S2 = Y2*Z1^3
2729 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, S2, in2_y);
2731 lea $in1_y(%rsp), $b_ptr
2732 lea $R(%rsp), $r_ptr # R = S2 - S1
2733 call __ecp_nistz256_sub_from$x # p256_sub(R, S2, in1_y);
2735 `&load_for_sqr("$H(%rsp)", "$src0")`
2736 lea $Hsqr(%rsp), $r_ptr # H^2
2737 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Hsqr, H);
2739 `&load_for_sqr("$R(%rsp)", "$src0")`
2740 lea $Rsqr(%rsp), $r_ptr # R^2
2741 call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(Rsqr, R);
2743 `&load_for_mul("$H(%rsp)", "$Hsqr(%rsp)", "$src0")`
2744 lea $Hcub(%rsp), $r_ptr # H^3
2745 call __ecp_nistz256_mul_mont$x # p256_mul_mont(Hcub, Hsqr, H);
2747 `&load_for_mul("$Hsqr(%rsp)", "$in1_x(%rsp)", "$src0")`
2748 lea $U2(%rsp), $r_ptr # U1*H^2
2749 call __ecp_nistz256_mul_mont$x # p256_mul_mont(U2, in1_x, Hsqr);
2752 #######################################################################
2753 # operate in 4-5-0-1 "name space" that matches multiplication output
2755 my ($acc0,$acc1,$acc2,$acc3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2756 my ($poly1, $poly3)=($acc6,$acc7);
2759 #lea $U2(%rsp), $a_ptr
2760 #lea $Hsqr(%rsp), $r_ptr # 2*U1*H^2
2761 #call __ecp_nistz256_mul_by_2 # ecp_nistz256_mul_by_2(Hsqr, U2);
2763 add $acc0, $acc0 # a0:a3+a0:a3
2764 lea $Rsqr(%rsp), $a_ptr
2781 mov 8*0($a_ptr), $t0
2783 mov 8*1($a_ptr), $t1
2785 mov 8*2($a_ptr), $t2
2787 mov 8*3($a_ptr), $t3
2789 call __ecp_nistz256_sub$x # p256_sub(res_x, Rsqr, Hsqr);
2791 lea $Hcub(%rsp), $b_ptr
2792 lea $res_x(%rsp), $r_ptr
2793 call __ecp_nistz256_sub_from$x # p256_sub(res_x, res_x, Hcub);
2795 mov $U2+8*0(%rsp), $t0
2796 mov $U2+8*1(%rsp), $t1
2797 mov $U2+8*2(%rsp), $t2
2798 mov $U2+8*3(%rsp), $t3
2799 lea $H(%rsp), $r_ptr
2801 call __ecp_nistz256_sub$x # p256_sub(H, U2, res_x);
2803 mov $acc0, 8*0($r_ptr) # save the result, as
2804 mov $acc1, 8*1($r_ptr) # __ecp_nistz256_sub doesn't
2805 mov $acc2, 8*2($r_ptr)
2806 mov $acc3, 8*3($r_ptr)
2810 `&load_for_mul("$Hcub(%rsp)", "$in1_y(%rsp)", "$src0")`
2811 lea $S2(%rsp), $r_ptr
2812 call __ecp_nistz256_mul_mont$x # p256_mul_mont(S2, Hcub, in1_y);
2814 `&load_for_mul("$H(%rsp)", "$R(%rsp)", "$src0")`
2815 lea $H(%rsp), $r_ptr
2816 call __ecp_nistz256_mul_mont$x # p256_mul_mont(H, H, R);
2818 lea $S2(%rsp), $b_ptr
2819 lea $res_y(%rsp), $r_ptr
2820 call __ecp_nistz256_sub_from$x # p256_sub(res_y, H, S2);
2822 movq %xmm0, $r_ptr # restore $r_ptr
2824 movdqa %xmm5, %xmm0 # copy_conditional(res_z, ONE, in1infty);
2826 pandn $res_z(%rsp), %xmm0
2828 pandn $res_z+0x10(%rsp), %xmm1
2830 pand .LONE_mont(%rip), %xmm2
2831 pand .LONE_mont+0x10(%rip), %xmm3
2835 movdqa %xmm4, %xmm0 # copy_conditional(res_z, in1_z, in2infty);
2841 pand $in1_z(%rsp), %xmm2
2842 pand $in1_z+0x10(%rsp), %xmm3
2845 movdqu %xmm2, 0x40($r_ptr)
2846 movdqu %xmm3, 0x50($r_ptr)
2848 movdqa %xmm5, %xmm0 # copy_conditional(res_x, in2_x, in1infty);
2850 pandn $res_x(%rsp), %xmm0
2852 pandn $res_x+0x10(%rsp), %xmm1
2854 pand $in2_x(%rsp), %xmm2
2855 pand $in2_x+0x10(%rsp), %xmm3
2859 movdqa %xmm4, %xmm0 # copy_conditional(res_x, in1_x, in2infty);
2865 pand $in1_x(%rsp), %xmm2
2866 pand $in1_x+0x10(%rsp), %xmm3
2869 movdqu %xmm2, 0x00($r_ptr)
2870 movdqu %xmm3, 0x10($r_ptr)
2872 movdqa %xmm5, %xmm0 # copy_conditional(res_y, in2_y, in1infty);
2874 pandn $res_y(%rsp), %xmm0
2876 pandn $res_y+0x10(%rsp), %xmm1
2878 pand $in2_y(%rsp), %xmm2
2879 pand $in2_y+0x10(%rsp), %xmm3
2883 movdqa %xmm4, %xmm0 # copy_conditional(res_y, in1_y, in2infty);
2889 pand $in1_y(%rsp), %xmm2
2890 pand $in1_y+0x10(%rsp), %xmm3
2893 movdqu %xmm2, 0x20($r_ptr)
2894 movdqu %xmm3, 0x30($r_ptr)
2904 .size ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
2907 &gen_add_affine("q");
2909 ########################################################################
2913 ########################################################################
2914 # operate in 4-5-0-1 "name space" that matches multiplication output
2916 my ($a0,$a1,$a2,$a3,$t3,$t4)=($acc4,$acc5,$acc0,$acc1,$acc2,$acc3);
2919 .type __ecp_nistz256_add_tox,\@abi-omnipotent
2921 __ecp_nistz256_add_tox:
2923 adc 8*0($b_ptr), $a0
2924 adc 8*1($b_ptr), $a1
2926 adc 8*2($b_ptr), $a2
2927 adc 8*3($b_ptr), $a3
2942 mov $a0, 8*0($r_ptr)
2944 mov $a1, 8*1($r_ptr)
2946 mov $a2, 8*2($r_ptr)
2947 mov $a3, 8*3($r_ptr)
2950 .size __ecp_nistz256_add_tox,.-__ecp_nistz256_add_tox
2952 .type __ecp_nistz256_sub_fromx,\@abi-omnipotent
2954 __ecp_nistz256_sub_fromx:
2956 sbb 8*0($b_ptr), $a0
2957 sbb 8*1($b_ptr), $a1
2959 sbb 8*2($b_ptr), $a2
2960 sbb 8*3($b_ptr), $a3
2975 mov $a0, 8*0($r_ptr)
2977 mov $a1, 8*1($r_ptr)
2979 mov $a2, 8*2($r_ptr)
2980 mov $a3, 8*3($r_ptr)
2983 .size __ecp_nistz256_sub_fromx,.-__ecp_nistz256_sub_fromx
2985 .type __ecp_nistz256_subx,\@abi-omnipotent
2987 __ecp_nistz256_subx:
3012 .size __ecp_nistz256_subx,.-__ecp_nistz256_subx
3014 .type __ecp_nistz256_mul_by_2x,\@abi-omnipotent
3016 __ecp_nistz256_mul_by_2x:
3018 adc $a0, $a0 # a0:a3+a0:a3
3037 mov $a0, 8*0($r_ptr)
3039 mov $a1, 8*1($r_ptr)
3041 mov $a2, 8*2($r_ptr)
3042 mov $a3, 8*3($r_ptr)
3045 .size __ecp_nistz256_mul_by_2x,.-__ecp_nistz256_mul_by_2x
3050 &gen_add_affine("x");
3054 ########################################################################
3055 # Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
3057 open TABLE,"<ecp_nistz256_table.c" or
3058 open TABLE,"<${dir}../ecp_nistz256_table.c" or
3059 die "failed to open ecp_nistz256_table.c:",$!;
3064 s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
3068 die "insane number of elements" if ($#arr != 64*16*37-1);
3072 .globl ecp_nistz256_precomputed
3073 .type ecp_nistz256_precomputed,\@object
3075 ecp_nistz256_precomputed:
3077 while (@line=splice(@arr,0,16)) {
3078 print ".long\t",join(',',map { sprintf "0x%08x",$_} @line),"\n";
3081 .size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
3084 $code =~ s/\`([^\`]*)\`/eval $1/gem;