2 * Copyright 2011-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /* Copyright 2011 Google Inc.
12 * Licensed under the Apache License, Version 2.0 (the "License");
14 * you may not use this file except in compliance with the License.
15 * You may obtain a copy of the License at
17 * http://www.apache.org/licenses/LICENSE-2.0
19 * Unless required by applicable law or agreed to in writing, software
20 * distributed under the License is distributed on an "AS IS" BASIS,
21 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 * See the License for the specific language governing permissions and
23 * limitations under the License.
27 * A 64-bit implementation of the NIST P-521 elliptic curve point multiplication
29 * OpenSSL integration was taken from Emilia Kasper's work in ecp_nistp224.c.
30 * Otherwise based on Emilia's P224 work, which was inspired by my curve25519
31 * work which got its smarts from Daniel J. Bernstein's work on the same.
34 #include <openssl/e_os2.h>
35 #ifdef OPENSSL_NO_EC_NISTP_64_GCC_128
36 NON_EMPTY_TRANSLATION_UNIT
40 # include <openssl/err.h>
43 # if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__==16
44 /* even with gcc, the typedef won't work for 32-bit platforms */
45 typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit
48 # error "Your compiler doesn't appear to support 128-bit integer types"
55 * The underlying field. P521 operates over GF(2^521-1). We can serialise an
56 * element of this field into 66 bytes where the most significant byte
57 * contains only a single bit. We call this an felem_bytearray.
60 typedef u8 felem_bytearray[66];
63 * These are the parameters of P521, taken from FIPS 186-3, section D.1.2.5.
64 * These values are big-endian.
66 static const felem_bytearray nistp521_curve_params[5] = {
67 {0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* p */
68 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
69 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
70 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
71 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
72 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
73 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
74 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
76 {0x01, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* a = -3 */
77 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
78 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
79 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
80 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
81 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
82 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
83 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
85 {0x00, 0x51, 0x95, 0x3e, 0xb9, 0x61, 0x8e, 0x1c, /* b */
86 0x9a, 0x1f, 0x92, 0x9a, 0x21, 0xa0, 0xb6, 0x85,
87 0x40, 0xee, 0xa2, 0xda, 0x72, 0x5b, 0x99, 0xb3,
88 0x15, 0xf3, 0xb8, 0xb4, 0x89, 0x91, 0x8e, 0xf1,
89 0x09, 0xe1, 0x56, 0x19, 0x39, 0x51, 0xec, 0x7e,
90 0x93, 0x7b, 0x16, 0x52, 0xc0, 0xbd, 0x3b, 0xb1,
91 0xbf, 0x07, 0x35, 0x73, 0xdf, 0x88, 0x3d, 0x2c,
92 0x34, 0xf1, 0xef, 0x45, 0x1f, 0xd4, 0x6b, 0x50,
94 {0x00, 0xc6, 0x85, 0x8e, 0x06, 0xb7, 0x04, 0x04, /* x */
95 0xe9, 0xcd, 0x9e, 0x3e, 0xcb, 0x66, 0x23, 0x95,
96 0xb4, 0x42, 0x9c, 0x64, 0x81, 0x39, 0x05, 0x3f,
97 0xb5, 0x21, 0xf8, 0x28, 0xaf, 0x60, 0x6b, 0x4d,
98 0x3d, 0xba, 0xa1, 0x4b, 0x5e, 0x77, 0xef, 0xe7,
99 0x59, 0x28, 0xfe, 0x1d, 0xc1, 0x27, 0xa2, 0xff,
100 0xa8, 0xde, 0x33, 0x48, 0xb3, 0xc1, 0x85, 0x6a,
101 0x42, 0x9b, 0xf9, 0x7e, 0x7e, 0x31, 0xc2, 0xe5,
103 {0x01, 0x18, 0x39, 0x29, 0x6a, 0x78, 0x9a, 0x3b, /* y */
104 0xc0, 0x04, 0x5c, 0x8a, 0x5f, 0xb4, 0x2c, 0x7d,
105 0x1b, 0xd9, 0x98, 0xf5, 0x44, 0x49, 0x57, 0x9b,
106 0x44, 0x68, 0x17, 0xaf, 0xbd, 0x17, 0x27, 0x3e,
107 0x66, 0x2c, 0x97, 0xee, 0x72, 0x99, 0x5e, 0xf4,
108 0x26, 0x40, 0xc5, 0x50, 0xb9, 0x01, 0x3f, 0xad,
109 0x07, 0x61, 0x35, 0x3c, 0x70, 0x86, 0xa2, 0x72,
110 0xc2, 0x40, 0x88, 0xbe, 0x94, 0x76, 0x9f, 0xd1,
115 * The representation of field elements.
116 * ------------------------------------
118 * We represent field elements with nine values. These values are either 64 or
119 * 128 bits and the field element represented is:
120 * v[0]*2^0 + v[1]*2^58 + v[2]*2^116 + ... + v[8]*2^464 (mod p)
121 * Each of the nine values is called a 'limb'. Since the limbs are spaced only
122 * 58 bits apart, but are greater than 58 bits in length, the most significant
123 * bits of each limb overlap with the least significant bits of the next.
125 * A field element with 64-bit limbs is an 'felem'. One with 128-bit limbs is a
130 typedef uint64_t limb;
131 typedef limb felem[NLIMBS];
132 typedef uint128_t largefelem[NLIMBS];
134 static const limb bottom57bits = 0x1ffffffffffffff;
135 static const limb bottom58bits = 0x3ffffffffffffff;
138 * bin66_to_felem takes a little-endian byte array and converts it into felem
139 * form. This assumes that the CPU is little-endian.
141 static void bin66_to_felem(felem out, const u8 in[66])
143 out[0] = (*((limb *) & in[0])) & bottom58bits;
144 out[1] = (*((limb *) & in[7]) >> 2) & bottom58bits;
145 out[2] = (*((limb *) & in[14]) >> 4) & bottom58bits;
146 out[3] = (*((limb *) & in[21]) >> 6) & bottom58bits;
147 out[4] = (*((limb *) & in[29])) & bottom58bits;
148 out[5] = (*((limb *) & in[36]) >> 2) & bottom58bits;
149 out[6] = (*((limb *) & in[43]) >> 4) & bottom58bits;
150 out[7] = (*((limb *) & in[50]) >> 6) & bottom58bits;
151 out[8] = (*((limb *) & in[58])) & bottom57bits;
155 * felem_to_bin66 takes an felem and serialises into a little endian, 66 byte
156 * array. This assumes that the CPU is little-endian.
158 static void felem_to_bin66(u8 out[66], const felem in)
161 (*((limb *) & out[0])) = in[0];
162 (*((limb *) & out[7])) |= in[1] << 2;
163 (*((limb *) & out[14])) |= in[2] << 4;
164 (*((limb *) & out[21])) |= in[3] << 6;
165 (*((limb *) & out[29])) = in[4];
166 (*((limb *) & out[36])) |= in[5] << 2;
167 (*((limb *) & out[43])) |= in[6] << 4;
168 (*((limb *) & out[50])) |= in[7] << 6;
169 (*((limb *) & out[58])) = in[8];
172 /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
173 static void flip_endian(u8 *out, const u8 *in, unsigned len)
176 for (i = 0; i < len; ++i)
177 out[i] = in[len - 1 - i];
180 /* BN_to_felem converts an OpenSSL BIGNUM into an felem */
181 static int BN_to_felem(felem out, const BIGNUM *bn)
183 felem_bytearray b_in;
184 felem_bytearray b_out;
187 num_bytes = BN_num_bytes(bn);
188 if (num_bytes > sizeof(b_out)) {
189 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
192 if (BN_is_negative(bn)) {
193 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
196 num_bytes = BN_bn2binpad(bn, b_in, sizeof(b_in));
197 flip_endian(b_out, b_in, num_bytes);
198 bin66_to_felem(out, b_out);
202 /* felem_to_BN converts an felem into an OpenSSL BIGNUM */
203 static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
205 felem_bytearray b_in, b_out;
206 felem_to_bin66(b_in, in);
207 flip_endian(b_out, b_in, sizeof(b_out));
208 return BN_bin2bn(b_out, sizeof(b_out), out);
216 static void felem_one(felem out)
229 static void felem_assign(felem out, const felem in)
242 /* felem_sum64 sets out = out + in. */
243 static void felem_sum64(felem out, const felem in)
256 /* felem_scalar sets out = in * scalar */
257 static void felem_scalar(felem out, const felem in, limb scalar)
259 out[0] = in[0] * scalar;
260 out[1] = in[1] * scalar;
261 out[2] = in[2] * scalar;
262 out[3] = in[3] * scalar;
263 out[4] = in[4] * scalar;
264 out[5] = in[5] * scalar;
265 out[6] = in[6] * scalar;
266 out[7] = in[7] * scalar;
267 out[8] = in[8] * scalar;
270 /* felem_scalar64 sets out = out * scalar */
271 static void felem_scalar64(felem out, limb scalar)
284 /* felem_scalar128 sets out = out * scalar */
285 static void felem_scalar128(largefelem out, limb scalar)
299 * felem_neg sets |out| to |-in|
301 * in[i] < 2^59 + 2^14
305 static void felem_neg(felem out, const felem in)
307 /* In order to prevent underflow, we subtract from 0 mod p. */
308 static const limb two62m3 = (((limb) 1) << 62) - (((limb) 1) << 5);
309 static const limb two62m2 = (((limb) 1) << 62) - (((limb) 1) << 4);
311 out[0] = two62m3 - in[0];
312 out[1] = two62m2 - in[1];
313 out[2] = two62m2 - in[2];
314 out[3] = two62m2 - in[3];
315 out[4] = two62m2 - in[4];
316 out[5] = two62m2 - in[5];
317 out[6] = two62m2 - in[6];
318 out[7] = two62m2 - in[7];
319 out[8] = two62m2 - in[8];
323 * felem_diff64 subtracts |in| from |out|
325 * in[i] < 2^59 + 2^14
327 * out[i] < out[i] + 2^62
329 static void felem_diff64(felem out, const felem in)
332 * In order to prevent underflow, we add 0 mod p before subtracting.
334 static const limb two62m3 = (((limb) 1) << 62) - (((limb) 1) << 5);
335 static const limb two62m2 = (((limb) 1) << 62) - (((limb) 1) << 4);
337 out[0] += two62m3 - in[0];
338 out[1] += two62m2 - in[1];
339 out[2] += two62m2 - in[2];
340 out[3] += two62m2 - in[3];
341 out[4] += two62m2 - in[4];
342 out[5] += two62m2 - in[5];
343 out[6] += two62m2 - in[6];
344 out[7] += two62m2 - in[7];
345 out[8] += two62m2 - in[8];
349 * felem_diff_128_64 subtracts |in| from |out|
351 * in[i] < 2^62 + 2^17
353 * out[i] < out[i] + 2^63
355 static void felem_diff_128_64(largefelem out, const felem in)
358 * In order to prevent underflow, we add 64p mod p (which is equivalent
359 * to 0 mod p) before subtracting. p is 2^521 - 1, i.e. in binary a 521
360 * digit number with all bits set to 1. See "The representation of field
361 * elements" comment above for a description of how limbs are used to
362 * represent a number. 64p is represented with 8 limbs containing a number
363 * with 58 bits set and one limb with a number with 57 bits set.
365 static const limb two63m6 = (((limb) 1) << 63) - (((limb) 1) << 6);
366 static const limb two63m5 = (((limb) 1) << 63) - (((limb) 1) << 5);
368 out[0] += two63m6 - in[0];
369 out[1] += two63m5 - in[1];
370 out[2] += two63m5 - in[2];
371 out[3] += two63m5 - in[3];
372 out[4] += two63m5 - in[4];
373 out[5] += two63m5 - in[5];
374 out[6] += two63m5 - in[6];
375 out[7] += two63m5 - in[7];
376 out[8] += two63m5 - in[8];
380 * felem_diff_128_64 subtracts |in| from |out|
384 * out[i] < out[i] + 2^127 - 2^69
386 static void felem_diff128(largefelem out, const largefelem in)
389 * In order to prevent underflow, we add 0 mod p before subtracting.
391 static const uint128_t two127m70 =
392 (((uint128_t) 1) << 127) - (((uint128_t) 1) << 70);
393 static const uint128_t two127m69 =
394 (((uint128_t) 1) << 127) - (((uint128_t) 1) << 69);
396 out[0] += (two127m70 - in[0]);
397 out[1] += (two127m69 - in[1]);
398 out[2] += (two127m69 - in[2]);
399 out[3] += (two127m69 - in[3]);
400 out[4] += (two127m69 - in[4]);
401 out[5] += (two127m69 - in[5]);
402 out[6] += (two127m69 - in[6]);
403 out[7] += (two127m69 - in[7]);
404 out[8] += (two127m69 - in[8]);
408 * felem_square sets |out| = |in|^2
412 * out[i] < 17 * max(in[i]) * max(in[i])
414 static void felem_square(largefelem out, const felem in)
417 felem_scalar(inx2, in, 2);
418 felem_scalar(inx4, in, 4);
421 * We have many cases were we want to do
424 * This is obviously just
426 * However, rather than do the doubling on the 128 bit result, we
427 * double one of the inputs to the multiplication by reading from
431 out[0] = ((uint128_t) in[0]) * in[0];
432 out[1] = ((uint128_t) in[0]) * inx2[1];
433 out[2] = ((uint128_t) in[0]) * inx2[2] + ((uint128_t) in[1]) * in[1];
434 out[3] = ((uint128_t) in[0]) * inx2[3] + ((uint128_t) in[1]) * inx2[2];
435 out[4] = ((uint128_t) in[0]) * inx2[4] +
436 ((uint128_t) in[1]) * inx2[3] + ((uint128_t) in[2]) * in[2];
437 out[5] = ((uint128_t) in[0]) * inx2[5] +
438 ((uint128_t) in[1]) * inx2[4] + ((uint128_t) in[2]) * inx2[3];
439 out[6] = ((uint128_t) in[0]) * inx2[6] +
440 ((uint128_t) in[1]) * inx2[5] +
441 ((uint128_t) in[2]) * inx2[4] + ((uint128_t) in[3]) * in[3];
442 out[7] = ((uint128_t) in[0]) * inx2[7] +
443 ((uint128_t) in[1]) * inx2[6] +
444 ((uint128_t) in[2]) * inx2[5] + ((uint128_t) in[3]) * inx2[4];
445 out[8] = ((uint128_t) in[0]) * inx2[8] +
446 ((uint128_t) in[1]) * inx2[7] +
447 ((uint128_t) in[2]) * inx2[6] +
448 ((uint128_t) in[3]) * inx2[5] + ((uint128_t) in[4]) * in[4];
451 * The remaining limbs fall above 2^521, with the first falling at 2^522.
452 * They correspond to locations one bit up from the limbs produced above
453 * so we would have to multiply by two to align them. Again, rather than
454 * operate on the 128-bit result, we double one of the inputs to the
455 * multiplication. If we want to double for both this reason, and the
456 * reason above, then we end up multiplying by four.
460 out[0] += ((uint128_t) in[1]) * inx4[8] +
461 ((uint128_t) in[2]) * inx4[7] +
462 ((uint128_t) in[3]) * inx4[6] + ((uint128_t) in[4]) * inx4[5];
465 out[1] += ((uint128_t) in[2]) * inx4[8] +
466 ((uint128_t) in[3]) * inx4[7] +
467 ((uint128_t) in[4]) * inx4[6] + ((uint128_t) in[5]) * inx2[5];
470 out[2] += ((uint128_t) in[3]) * inx4[8] +
471 ((uint128_t) in[4]) * inx4[7] + ((uint128_t) in[5]) * inx4[6];
474 out[3] += ((uint128_t) in[4]) * inx4[8] +
475 ((uint128_t) in[5]) * inx4[7] + ((uint128_t) in[6]) * inx2[6];
478 out[4] += ((uint128_t) in[5]) * inx4[8] + ((uint128_t) in[6]) * inx4[7];
481 out[5] += ((uint128_t) in[6]) * inx4[8] + ((uint128_t) in[7]) * inx2[7];
484 out[6] += ((uint128_t) in[7]) * inx4[8];
487 out[7] += ((uint128_t) in[8]) * inx2[8];
491 * felem_mul sets |out| = |in1| * |in2|
496 * out[i] < 17 * max(in1[i]) * max(in2[i])
498 static void felem_mul(largefelem out, const felem in1, const felem in2)
501 felem_scalar(in2x2, in2, 2);
503 out[0] = ((uint128_t) in1[0]) * in2[0];
505 out[1] = ((uint128_t) in1[0]) * in2[1] +
506 ((uint128_t) in1[1]) * in2[0];
508 out[2] = ((uint128_t) in1[0]) * in2[2] +
509 ((uint128_t) in1[1]) * in2[1] +
510 ((uint128_t) in1[2]) * in2[0];
512 out[3] = ((uint128_t) in1[0]) * in2[3] +
513 ((uint128_t) in1[1]) * in2[2] +
514 ((uint128_t) in1[2]) * in2[1] +
515 ((uint128_t) in1[3]) * in2[0];
517 out[4] = ((uint128_t) in1[0]) * in2[4] +
518 ((uint128_t) in1[1]) * in2[3] +
519 ((uint128_t) in1[2]) * in2[2] +
520 ((uint128_t) in1[3]) * in2[1] +
521 ((uint128_t) in1[4]) * in2[0];
523 out[5] = ((uint128_t) in1[0]) * in2[5] +
524 ((uint128_t) in1[1]) * in2[4] +
525 ((uint128_t) in1[2]) * in2[3] +
526 ((uint128_t) in1[3]) * in2[2] +
527 ((uint128_t) in1[4]) * in2[1] +
528 ((uint128_t) in1[5]) * in2[0];
530 out[6] = ((uint128_t) in1[0]) * in2[6] +
531 ((uint128_t) in1[1]) * in2[5] +
532 ((uint128_t) in1[2]) * in2[4] +
533 ((uint128_t) in1[3]) * in2[3] +
534 ((uint128_t) in1[4]) * in2[2] +
535 ((uint128_t) in1[5]) * in2[1] +
536 ((uint128_t) in1[6]) * in2[0];
538 out[7] = ((uint128_t) in1[0]) * in2[7] +
539 ((uint128_t) in1[1]) * in2[6] +
540 ((uint128_t) in1[2]) * in2[5] +
541 ((uint128_t) in1[3]) * in2[4] +
542 ((uint128_t) in1[4]) * in2[3] +
543 ((uint128_t) in1[5]) * in2[2] +
544 ((uint128_t) in1[6]) * in2[1] +
545 ((uint128_t) in1[7]) * in2[0];
547 out[8] = ((uint128_t) in1[0]) * in2[8] +
548 ((uint128_t) in1[1]) * in2[7] +
549 ((uint128_t) in1[2]) * in2[6] +
550 ((uint128_t) in1[3]) * in2[5] +
551 ((uint128_t) in1[4]) * in2[4] +
552 ((uint128_t) in1[5]) * in2[3] +
553 ((uint128_t) in1[6]) * in2[2] +
554 ((uint128_t) in1[7]) * in2[1] +
555 ((uint128_t) in1[8]) * in2[0];
557 /* See comment in felem_square about the use of in2x2 here */
559 out[0] += ((uint128_t) in1[1]) * in2x2[8] +
560 ((uint128_t) in1[2]) * in2x2[7] +
561 ((uint128_t) in1[3]) * in2x2[6] +
562 ((uint128_t) in1[4]) * in2x2[5] +
563 ((uint128_t) in1[5]) * in2x2[4] +
564 ((uint128_t) in1[6]) * in2x2[3] +
565 ((uint128_t) in1[7]) * in2x2[2] +
566 ((uint128_t) in1[8]) * in2x2[1];
568 out[1] += ((uint128_t) in1[2]) * in2x2[8] +
569 ((uint128_t) in1[3]) * in2x2[7] +
570 ((uint128_t) in1[4]) * in2x2[6] +
571 ((uint128_t) in1[5]) * in2x2[5] +
572 ((uint128_t) in1[6]) * in2x2[4] +
573 ((uint128_t) in1[7]) * in2x2[3] +
574 ((uint128_t) in1[8]) * in2x2[2];
576 out[2] += ((uint128_t) in1[3]) * in2x2[8] +
577 ((uint128_t) in1[4]) * in2x2[7] +
578 ((uint128_t) in1[5]) * in2x2[6] +
579 ((uint128_t) in1[6]) * in2x2[5] +
580 ((uint128_t) in1[7]) * in2x2[4] +
581 ((uint128_t) in1[8]) * in2x2[3];
583 out[3] += ((uint128_t) in1[4]) * in2x2[8] +
584 ((uint128_t) in1[5]) * in2x2[7] +
585 ((uint128_t) in1[6]) * in2x2[6] +
586 ((uint128_t) in1[7]) * in2x2[5] +
587 ((uint128_t) in1[8]) * in2x2[4];
589 out[4] += ((uint128_t) in1[5]) * in2x2[8] +
590 ((uint128_t) in1[6]) * in2x2[7] +
591 ((uint128_t) in1[7]) * in2x2[6] +
592 ((uint128_t) in1[8]) * in2x2[5];
594 out[5] += ((uint128_t) in1[6]) * in2x2[8] +
595 ((uint128_t) in1[7]) * in2x2[7] +
596 ((uint128_t) in1[8]) * in2x2[6];
598 out[6] += ((uint128_t) in1[7]) * in2x2[8] +
599 ((uint128_t) in1[8]) * in2x2[7];
601 out[7] += ((uint128_t) in1[8]) * in2x2[8];
604 static const limb bottom52bits = 0xfffffffffffff;
607 * felem_reduce converts a largefelem to an felem.
611 * out[i] < 2^59 + 2^14
613 static void felem_reduce(felem out, const largefelem in)
615 u64 overflow1, overflow2;
617 out[0] = ((limb) in[0]) & bottom58bits;
618 out[1] = ((limb) in[1]) & bottom58bits;
619 out[2] = ((limb) in[2]) & bottom58bits;
620 out[3] = ((limb) in[3]) & bottom58bits;
621 out[4] = ((limb) in[4]) & bottom58bits;
622 out[5] = ((limb) in[5]) & bottom58bits;
623 out[6] = ((limb) in[6]) & bottom58bits;
624 out[7] = ((limb) in[7]) & bottom58bits;
625 out[8] = ((limb) in[8]) & bottom58bits;
629 out[1] += ((limb) in[0]) >> 58;
630 out[1] += (((limb) (in[0] >> 64)) & bottom52bits) << 6;
632 * out[1] < 2^58 + 2^6 + 2^58
635 out[2] += ((limb) (in[0] >> 64)) >> 52;
637 out[2] += ((limb) in[1]) >> 58;
638 out[2] += (((limb) (in[1] >> 64)) & bottom52bits) << 6;
639 out[3] += ((limb) (in[1] >> 64)) >> 52;
641 out[3] += ((limb) in[2]) >> 58;
642 out[3] += (((limb) (in[2] >> 64)) & bottom52bits) << 6;
643 out[4] += ((limb) (in[2] >> 64)) >> 52;
645 out[4] += ((limb) in[3]) >> 58;
646 out[4] += (((limb) (in[3] >> 64)) & bottom52bits) << 6;
647 out[5] += ((limb) (in[3] >> 64)) >> 52;
649 out[5] += ((limb) in[4]) >> 58;
650 out[5] += (((limb) (in[4] >> 64)) & bottom52bits) << 6;
651 out[6] += ((limb) (in[4] >> 64)) >> 52;
653 out[6] += ((limb) in[5]) >> 58;
654 out[6] += (((limb) (in[5] >> 64)) & bottom52bits) << 6;
655 out[7] += ((limb) (in[5] >> 64)) >> 52;
657 out[7] += ((limb) in[6]) >> 58;
658 out[7] += (((limb) (in[6] >> 64)) & bottom52bits) << 6;
659 out[8] += ((limb) (in[6] >> 64)) >> 52;
661 out[8] += ((limb) in[7]) >> 58;
662 out[8] += (((limb) (in[7] >> 64)) & bottom52bits) << 6;
664 * out[x > 1] < 2^58 + 2^6 + 2^58 + 2^12
667 overflow1 = ((limb) (in[7] >> 64)) >> 52;
669 overflow1 += ((limb) in[8]) >> 58;
670 overflow1 += (((limb) (in[8] >> 64)) & bottom52bits) << 6;
671 overflow2 = ((limb) (in[8] >> 64)) >> 52;
673 overflow1 <<= 1; /* overflow1 < 2^13 + 2^7 + 2^59 */
674 overflow2 <<= 1; /* overflow2 < 2^13 */
676 out[0] += overflow1; /* out[0] < 2^60 */
677 out[1] += overflow2; /* out[1] < 2^59 + 2^6 + 2^13 */
679 out[1] += out[0] >> 58;
680 out[0] &= bottom58bits;
683 * out[1] < 2^59 + 2^6 + 2^13 + 2^2
688 static void felem_square_reduce(felem out, const felem in)
691 felem_square(tmp, in);
692 felem_reduce(out, tmp);
695 static void felem_mul_reduce(felem out, const felem in1, const felem in2)
698 felem_mul(tmp, in1, in2);
699 felem_reduce(out, tmp);
703 * felem_inv calculates |out| = |in|^{-1}
705 * Based on Fermat's Little Theorem:
707 * a^{p-1} = 1 (mod p)
708 * a^{p-2} = a^{-1} (mod p)
710 static void felem_inv(felem out, const felem in)
712 felem ftmp, ftmp2, ftmp3, ftmp4;
716 felem_square(tmp, in);
717 felem_reduce(ftmp, tmp); /* 2^1 */
718 felem_mul(tmp, in, ftmp);
719 felem_reduce(ftmp, tmp); /* 2^2 - 2^0 */
720 felem_assign(ftmp2, ftmp);
721 felem_square(tmp, ftmp);
722 felem_reduce(ftmp, tmp); /* 2^3 - 2^1 */
723 felem_mul(tmp, in, ftmp);
724 felem_reduce(ftmp, tmp); /* 2^3 - 2^0 */
725 felem_square(tmp, ftmp);
726 felem_reduce(ftmp, tmp); /* 2^4 - 2^1 */
728 felem_square(tmp, ftmp2);
729 felem_reduce(ftmp3, tmp); /* 2^3 - 2^1 */
730 felem_square(tmp, ftmp3);
731 felem_reduce(ftmp3, tmp); /* 2^4 - 2^2 */
732 felem_mul(tmp, ftmp3, ftmp2);
733 felem_reduce(ftmp3, tmp); /* 2^4 - 2^0 */
735 felem_assign(ftmp2, ftmp3);
736 felem_square(tmp, ftmp3);
737 felem_reduce(ftmp3, tmp); /* 2^5 - 2^1 */
738 felem_square(tmp, ftmp3);
739 felem_reduce(ftmp3, tmp); /* 2^6 - 2^2 */
740 felem_square(tmp, ftmp3);
741 felem_reduce(ftmp3, tmp); /* 2^7 - 2^3 */
742 felem_square(tmp, ftmp3);
743 felem_reduce(ftmp3, tmp); /* 2^8 - 2^4 */
744 felem_assign(ftmp4, ftmp3);
745 felem_mul(tmp, ftmp3, ftmp);
746 felem_reduce(ftmp4, tmp); /* 2^8 - 2^1 */
747 felem_square(tmp, ftmp4);
748 felem_reduce(ftmp4, tmp); /* 2^9 - 2^2 */
749 felem_mul(tmp, ftmp3, ftmp2);
750 felem_reduce(ftmp3, tmp); /* 2^8 - 2^0 */
751 felem_assign(ftmp2, ftmp3);
753 for (i = 0; i < 8; i++) {
754 felem_square(tmp, ftmp3);
755 felem_reduce(ftmp3, tmp); /* 2^16 - 2^8 */
757 felem_mul(tmp, ftmp3, ftmp2);
758 felem_reduce(ftmp3, tmp); /* 2^16 - 2^0 */
759 felem_assign(ftmp2, ftmp3);
761 for (i = 0; i < 16; i++) {
762 felem_square(tmp, ftmp3);
763 felem_reduce(ftmp3, tmp); /* 2^32 - 2^16 */
765 felem_mul(tmp, ftmp3, ftmp2);
766 felem_reduce(ftmp3, tmp); /* 2^32 - 2^0 */
767 felem_assign(ftmp2, ftmp3);
769 for (i = 0; i < 32; i++) {
770 felem_square(tmp, ftmp3);
771 felem_reduce(ftmp3, tmp); /* 2^64 - 2^32 */
773 felem_mul(tmp, ftmp3, ftmp2);
774 felem_reduce(ftmp3, tmp); /* 2^64 - 2^0 */
775 felem_assign(ftmp2, ftmp3);
777 for (i = 0; i < 64; i++) {
778 felem_square(tmp, ftmp3);
779 felem_reduce(ftmp3, tmp); /* 2^128 - 2^64 */
781 felem_mul(tmp, ftmp3, ftmp2);
782 felem_reduce(ftmp3, tmp); /* 2^128 - 2^0 */
783 felem_assign(ftmp2, ftmp3);
785 for (i = 0; i < 128; i++) {
786 felem_square(tmp, ftmp3);
787 felem_reduce(ftmp3, tmp); /* 2^256 - 2^128 */
789 felem_mul(tmp, ftmp3, ftmp2);
790 felem_reduce(ftmp3, tmp); /* 2^256 - 2^0 */
791 felem_assign(ftmp2, ftmp3);
793 for (i = 0; i < 256; i++) {
794 felem_square(tmp, ftmp3);
795 felem_reduce(ftmp3, tmp); /* 2^512 - 2^256 */
797 felem_mul(tmp, ftmp3, ftmp2);
798 felem_reduce(ftmp3, tmp); /* 2^512 - 2^0 */
800 for (i = 0; i < 9; i++) {
801 felem_square(tmp, ftmp3);
802 felem_reduce(ftmp3, tmp); /* 2^521 - 2^9 */
804 felem_mul(tmp, ftmp3, ftmp4);
805 felem_reduce(ftmp3, tmp); /* 2^512 - 2^2 */
806 felem_mul(tmp, ftmp3, in);
807 felem_reduce(out, tmp); /* 2^512 - 3 */
810 /* This is 2^521-1, expressed as an felem */
811 static const felem kPrime = {
812 0x03ffffffffffffff, 0x03ffffffffffffff, 0x03ffffffffffffff,
813 0x03ffffffffffffff, 0x03ffffffffffffff, 0x03ffffffffffffff,
814 0x03ffffffffffffff, 0x03ffffffffffffff, 0x01ffffffffffffff
818 * felem_is_zero returns a limb with all bits set if |in| == 0 (mod p) and 0
821 * in[i] < 2^59 + 2^14
823 static limb felem_is_zero(const felem in)
827 felem_assign(ftmp, in);
829 ftmp[0] += ftmp[8] >> 57;
830 ftmp[8] &= bottom57bits;
832 ftmp[1] += ftmp[0] >> 58;
833 ftmp[0] &= bottom58bits;
834 ftmp[2] += ftmp[1] >> 58;
835 ftmp[1] &= bottom58bits;
836 ftmp[3] += ftmp[2] >> 58;
837 ftmp[2] &= bottom58bits;
838 ftmp[4] += ftmp[3] >> 58;
839 ftmp[3] &= bottom58bits;
840 ftmp[5] += ftmp[4] >> 58;
841 ftmp[4] &= bottom58bits;
842 ftmp[6] += ftmp[5] >> 58;
843 ftmp[5] &= bottom58bits;
844 ftmp[7] += ftmp[6] >> 58;
845 ftmp[6] &= bottom58bits;
846 ftmp[8] += ftmp[7] >> 58;
847 ftmp[7] &= bottom58bits;
848 /* ftmp[8] < 2^57 + 4 */
851 * The ninth limb of 2*(2^521-1) is 0x03ffffffffffffff, which is greater
852 * than our bound for ftmp[8]. Therefore we only have to check if the
853 * zero is zero or 2^521-1.
869 * We know that ftmp[i] < 2^63, therefore the only way that the top bit
870 * can be set is if is_zero was 0 before the decrement.
872 is_zero = 0 - (is_zero >> 63);
874 is_p = ftmp[0] ^ kPrime[0];
875 is_p |= ftmp[1] ^ kPrime[1];
876 is_p |= ftmp[2] ^ kPrime[2];
877 is_p |= ftmp[3] ^ kPrime[3];
878 is_p |= ftmp[4] ^ kPrime[4];
879 is_p |= ftmp[5] ^ kPrime[5];
880 is_p |= ftmp[6] ^ kPrime[6];
881 is_p |= ftmp[7] ^ kPrime[7];
882 is_p |= ftmp[8] ^ kPrime[8];
885 is_p = 0 - (is_p >> 63);
891 static int felem_is_zero_int(const void *in)
893 return (int)(felem_is_zero(in) & ((limb) 1));
897 * felem_contract converts |in| to its unique, minimal representation.
899 * in[i] < 2^59 + 2^14
901 static void felem_contract(felem out, const felem in)
903 limb is_p, is_greater, sign;
904 static const limb two58 = ((limb) 1) << 58;
906 felem_assign(out, in);
908 out[0] += out[8] >> 57;
909 out[8] &= bottom57bits;
911 out[1] += out[0] >> 58;
912 out[0] &= bottom58bits;
913 out[2] += out[1] >> 58;
914 out[1] &= bottom58bits;
915 out[3] += out[2] >> 58;
916 out[2] &= bottom58bits;
917 out[4] += out[3] >> 58;
918 out[3] &= bottom58bits;
919 out[5] += out[4] >> 58;
920 out[4] &= bottom58bits;
921 out[6] += out[5] >> 58;
922 out[5] &= bottom58bits;
923 out[7] += out[6] >> 58;
924 out[6] &= bottom58bits;
925 out[8] += out[7] >> 58;
926 out[7] &= bottom58bits;
927 /* out[8] < 2^57 + 4 */
930 * If the value is greater than 2^521-1 then we have to subtract 2^521-1
931 * out. See the comments in felem_is_zero regarding why we don't test for
932 * other multiples of the prime.
936 * First, if |out| is equal to 2^521-1, we subtract it out to get zero.
939 is_p = out[0] ^ kPrime[0];
940 is_p |= out[1] ^ kPrime[1];
941 is_p |= out[2] ^ kPrime[2];
942 is_p |= out[3] ^ kPrime[3];
943 is_p |= out[4] ^ kPrime[4];
944 is_p |= out[5] ^ kPrime[5];
945 is_p |= out[6] ^ kPrime[6];
946 is_p |= out[7] ^ kPrime[7];
947 is_p |= out[8] ^ kPrime[8];
956 is_p = 0 - (is_p >> 63);
959 /* is_p is 0 iff |out| == 2^521-1 and all ones otherwise */
972 * In order to test that |out| >= 2^521-1 we need only test if out[8] >>
973 * 57 is greater than zero as (2^521-1) + x >= 2^522
975 is_greater = out[8] >> 57;
976 is_greater |= is_greater << 32;
977 is_greater |= is_greater << 16;
978 is_greater |= is_greater << 8;
979 is_greater |= is_greater << 4;
980 is_greater |= is_greater << 2;
981 is_greater |= is_greater << 1;
982 is_greater = 0 - (is_greater >> 63);
984 out[0] -= kPrime[0] & is_greater;
985 out[1] -= kPrime[1] & is_greater;
986 out[2] -= kPrime[2] & is_greater;
987 out[3] -= kPrime[3] & is_greater;
988 out[4] -= kPrime[4] & is_greater;
989 out[5] -= kPrime[5] & is_greater;
990 out[6] -= kPrime[6] & is_greater;
991 out[7] -= kPrime[7] & is_greater;
992 out[8] -= kPrime[8] & is_greater;
994 /* Eliminate negative coefficients */
995 sign = -(out[0] >> 63);
996 out[0] += (two58 & sign);
997 out[1] -= (1 & sign);
998 sign = -(out[1] >> 63);
999 out[1] += (two58 & sign);
1000 out[2] -= (1 & sign);
1001 sign = -(out[2] >> 63);
1002 out[2] += (two58 & sign);
1003 out[3] -= (1 & sign);
1004 sign = -(out[3] >> 63);
1005 out[3] += (two58 & sign);
1006 out[4] -= (1 & sign);
1007 sign = -(out[4] >> 63);
1008 out[4] += (two58 & sign);
1009 out[5] -= (1 & sign);
1010 sign = -(out[0] >> 63);
1011 out[5] += (two58 & sign);
1012 out[6] -= (1 & sign);
1013 sign = -(out[6] >> 63);
1014 out[6] += (two58 & sign);
1015 out[7] -= (1 & sign);
1016 sign = -(out[7] >> 63);
1017 out[7] += (two58 & sign);
1018 out[8] -= (1 & sign);
1019 sign = -(out[5] >> 63);
1020 out[5] += (two58 & sign);
1021 out[6] -= (1 & sign);
1022 sign = -(out[6] >> 63);
1023 out[6] += (two58 & sign);
1024 out[7] -= (1 & sign);
1025 sign = -(out[7] >> 63);
1026 out[7] += (two58 & sign);
1027 out[8] -= (1 & sign);
1034 * Building on top of the field operations we have the operations on the
1035 * elliptic curve group itself. Points on the curve are represented in Jacobian
1039 * point_double calculates 2*(x_in, y_in, z_in)
1041 * The method is taken from:
1042 * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#doubling-dbl-2001-b
1044 * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed.
1045 * while x_out == y_in is not (maybe this works, but it's not tested). */
1047 point_double(felem x_out, felem y_out, felem z_out,
1048 const felem x_in, const felem y_in, const felem z_in)
1050 largefelem tmp, tmp2;
1051 felem delta, gamma, beta, alpha, ftmp, ftmp2;
1053 felem_assign(ftmp, x_in);
1054 felem_assign(ftmp2, x_in);
1057 felem_square(tmp, z_in);
1058 felem_reduce(delta, tmp); /* delta[i] < 2^59 + 2^14 */
1061 felem_square(tmp, y_in);
1062 felem_reduce(gamma, tmp); /* gamma[i] < 2^59 + 2^14 */
1064 /* beta = x*gamma */
1065 felem_mul(tmp, x_in, gamma);
1066 felem_reduce(beta, tmp); /* beta[i] < 2^59 + 2^14 */
1068 /* alpha = 3*(x-delta)*(x+delta) */
1069 felem_diff64(ftmp, delta);
1070 /* ftmp[i] < 2^61 */
1071 felem_sum64(ftmp2, delta);
1072 /* ftmp2[i] < 2^60 + 2^15 */
1073 felem_scalar64(ftmp2, 3);
1074 /* ftmp2[i] < 3*2^60 + 3*2^15 */
1075 felem_mul(tmp, ftmp, ftmp2);
1077 * tmp[i] < 17(3*2^121 + 3*2^76)
1078 * = 61*2^121 + 61*2^76
1079 * < 64*2^121 + 64*2^76
1083 felem_reduce(alpha, tmp);
1085 /* x' = alpha^2 - 8*beta */
1086 felem_square(tmp, alpha);
1088 * tmp[i] < 17*2^120 < 2^125
1090 felem_assign(ftmp, beta);
1091 felem_scalar64(ftmp, 8);
1092 /* ftmp[i] < 2^62 + 2^17 */
1093 felem_diff_128_64(tmp, ftmp);
1094 /* tmp[i] < 2^125 + 2^63 + 2^62 + 2^17 */
1095 felem_reduce(x_out, tmp);
1097 /* z' = (y + z)^2 - gamma - delta */
1098 felem_sum64(delta, gamma);
1099 /* delta[i] < 2^60 + 2^15 */
1100 felem_assign(ftmp, y_in);
1101 felem_sum64(ftmp, z_in);
1102 /* ftmp[i] < 2^60 + 2^15 */
1103 felem_square(tmp, ftmp);
1105 * tmp[i] < 17(2^122) < 2^127
1107 felem_diff_128_64(tmp, delta);
1108 /* tmp[i] < 2^127 + 2^63 */
1109 felem_reduce(z_out, tmp);
1111 /* y' = alpha*(4*beta - x') - 8*gamma^2 */
1112 felem_scalar64(beta, 4);
1113 /* beta[i] < 2^61 + 2^16 */
1114 felem_diff64(beta, x_out);
1115 /* beta[i] < 2^61 + 2^60 + 2^16 */
1116 felem_mul(tmp, alpha, beta);
1118 * tmp[i] < 17*((2^59 + 2^14)(2^61 + 2^60 + 2^16))
1119 * = 17*(2^120 + 2^75 + 2^119 + 2^74 + 2^75 + 2^30)
1120 * = 17*(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
1123 felem_square(tmp2, gamma);
1125 * tmp2[i] < 17*(2^59 + 2^14)^2
1126 * = 17*(2^118 + 2^74 + 2^28)
1128 felem_scalar128(tmp2, 8);
1130 * tmp2[i] < 8*17*(2^118 + 2^74 + 2^28)
1131 * = 2^125 + 2^121 + 2^81 + 2^77 + 2^35 + 2^31
1134 felem_diff128(tmp, tmp2);
1136 * tmp[i] < 2^127 - 2^69 + 17(2^120 + 2^119 + 2^76 + 2^74 + 2^30)
1137 * = 2^127 + 2^124 + 2^122 + 2^120 + 2^118 + 2^80 + 2^78 + 2^76 +
1138 * 2^74 + 2^69 + 2^34 + 2^30
1141 felem_reduce(y_out, tmp);
1144 /* copy_conditional copies in to out iff mask is all ones. */
1145 static void copy_conditional(felem out, const felem in, limb mask)
1148 for (i = 0; i < NLIMBS; ++i) {
1149 const limb tmp = mask & (in[i] ^ out[i]);
1155 * point_add calculates (x1, y1, z1) + (x2, y2, z2)
1157 * The method is taken from
1158 * http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl,
1159 * adapted for mixed addition (z2 = 1, or z2 = 0 for the point at infinity).
1161 * This function includes a branch for checking whether the two input points
1162 * are equal (while not equal to the point at infinity). See comment below
1165 static void point_add(felem x3, felem y3, felem z3,
1166 const felem x1, const felem y1, const felem z1,
1167 const int mixed, const felem x2, const felem y2,
1170 felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, ftmp6, x_out, y_out, z_out;
1171 largefelem tmp, tmp2;
1172 limb x_equal, y_equal, z1_is_zero, z2_is_zero;
1174 z1_is_zero = felem_is_zero(z1);
1175 z2_is_zero = felem_is_zero(z2);
1177 /* ftmp = z1z1 = z1**2 */
1178 felem_square(tmp, z1);
1179 felem_reduce(ftmp, tmp);
1182 /* ftmp2 = z2z2 = z2**2 */
1183 felem_square(tmp, z2);
1184 felem_reduce(ftmp2, tmp);
1186 /* u1 = ftmp3 = x1*z2z2 */
1187 felem_mul(tmp, x1, ftmp2);
1188 felem_reduce(ftmp3, tmp);
1190 /* ftmp5 = z1 + z2 */
1191 felem_assign(ftmp5, z1);
1192 felem_sum64(ftmp5, z2);
1193 /* ftmp5[i] < 2^61 */
1195 /* ftmp5 = (z1 + z2)**2 - z1z1 - z2z2 = 2*z1z2 */
1196 felem_square(tmp, ftmp5);
1197 /* tmp[i] < 17*2^122 */
1198 felem_diff_128_64(tmp, ftmp);
1199 /* tmp[i] < 17*2^122 + 2^63 */
1200 felem_diff_128_64(tmp, ftmp2);
1201 /* tmp[i] < 17*2^122 + 2^64 */
1202 felem_reduce(ftmp5, tmp);
1204 /* ftmp2 = z2 * z2z2 */
1205 felem_mul(tmp, ftmp2, z2);
1206 felem_reduce(ftmp2, tmp);
1208 /* s1 = ftmp6 = y1 * z2**3 */
1209 felem_mul(tmp, y1, ftmp2);
1210 felem_reduce(ftmp6, tmp);
1213 * We'll assume z2 = 1 (special case z2 = 0 is handled later)
1216 /* u1 = ftmp3 = x1*z2z2 */
1217 felem_assign(ftmp3, x1);
1219 /* ftmp5 = 2*z1z2 */
1220 felem_scalar(ftmp5, z1, 2);
1222 /* s1 = ftmp6 = y1 * z2**3 */
1223 felem_assign(ftmp6, y1);
1227 felem_mul(tmp, x2, ftmp);
1228 /* tmp[i] < 17*2^120 */
1230 /* h = ftmp4 = u2 - u1 */
1231 felem_diff_128_64(tmp, ftmp3);
1232 /* tmp[i] < 17*2^120 + 2^63 */
1233 felem_reduce(ftmp4, tmp);
1235 x_equal = felem_is_zero(ftmp4);
1237 /* z_out = ftmp5 * h */
1238 felem_mul(tmp, ftmp5, ftmp4);
1239 felem_reduce(z_out, tmp);
1241 /* ftmp = z1 * z1z1 */
1242 felem_mul(tmp, ftmp, z1);
1243 felem_reduce(ftmp, tmp);
1245 /* s2 = tmp = y2 * z1**3 */
1246 felem_mul(tmp, y2, ftmp);
1247 /* tmp[i] < 17*2^120 */
1249 /* r = ftmp5 = (s2 - s1)*2 */
1250 felem_diff_128_64(tmp, ftmp6);
1251 /* tmp[i] < 17*2^120 + 2^63 */
1252 felem_reduce(ftmp5, tmp);
1253 y_equal = felem_is_zero(ftmp5);
1254 felem_scalar64(ftmp5, 2);
1255 /* ftmp5[i] < 2^61 */
1257 if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
1259 * This is obviously not constant-time but it will almost-never happen
1260 * for ECDH / ECDSA. The case where it can happen is during scalar-mult
1261 * where the intermediate value gets very close to the group order.
1262 * Since |ec_GFp_nistp_recode_scalar_bits| produces signed digits for
1263 * the scalar, it's possible for the intermediate value to be a small
1264 * negative multiple of the base point, and for the final signed digit
1265 * to be the same value. We believe that this only occurs for the scalar
1266 * 1fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
1267 * ffffffa51868783bf2f966b7fcc0148f709a5d03bb5c9b8899c47aebb6fb
1268 * 71e913863f7, in that case the penultimate intermediate is -9G and
1269 * the final digit is also -9G. Since this only happens for a single
1270 * scalar, the timing leak is irrelevant. (Any attacker who wanted to
1271 * check whether a secret scalar was that exact value, can already do
1274 point_double(x3, y3, z3, x1, y1, z1);
1278 /* I = ftmp = (2h)**2 */
1279 felem_assign(ftmp, ftmp4);
1280 felem_scalar64(ftmp, 2);
1281 /* ftmp[i] < 2^61 */
1282 felem_square(tmp, ftmp);
1283 /* tmp[i] < 17*2^122 */
1284 felem_reduce(ftmp, tmp);
1286 /* J = ftmp2 = h * I */
1287 felem_mul(tmp, ftmp4, ftmp);
1288 felem_reduce(ftmp2, tmp);
1290 /* V = ftmp4 = U1 * I */
1291 felem_mul(tmp, ftmp3, ftmp);
1292 felem_reduce(ftmp4, tmp);
1294 /* x_out = r**2 - J - 2V */
1295 felem_square(tmp, ftmp5);
1296 /* tmp[i] < 17*2^122 */
1297 felem_diff_128_64(tmp, ftmp2);
1298 /* tmp[i] < 17*2^122 + 2^63 */
1299 felem_assign(ftmp3, ftmp4);
1300 felem_scalar64(ftmp4, 2);
1301 /* ftmp4[i] < 2^61 */
1302 felem_diff_128_64(tmp, ftmp4);
1303 /* tmp[i] < 17*2^122 + 2^64 */
1304 felem_reduce(x_out, tmp);
1306 /* y_out = r(V-x_out) - 2 * s1 * J */
1307 felem_diff64(ftmp3, x_out);
1309 * ftmp3[i] < 2^60 + 2^60 = 2^61
1311 felem_mul(tmp, ftmp5, ftmp3);
1312 /* tmp[i] < 17*2^122 */
1313 felem_mul(tmp2, ftmp6, ftmp2);
1314 /* tmp2[i] < 17*2^120 */
1315 felem_scalar128(tmp2, 2);
1316 /* tmp2[i] < 17*2^121 */
1317 felem_diff128(tmp, tmp2);
1319 * tmp[i] < 2^127 - 2^69 + 17*2^122
1320 * = 2^126 - 2^122 - 2^6 - 2^2 - 1
1323 felem_reduce(y_out, tmp);
1325 copy_conditional(x_out, x2, z1_is_zero);
1326 copy_conditional(x_out, x1, z2_is_zero);
1327 copy_conditional(y_out, y2, z1_is_zero);
1328 copy_conditional(y_out, y1, z2_is_zero);
1329 copy_conditional(z_out, z2, z1_is_zero);
1330 copy_conditional(z_out, z1, z2_is_zero);
1331 felem_assign(x3, x_out);
1332 felem_assign(y3, y_out);
1333 felem_assign(z3, z_out);
1337 * Base point pre computation
1338 * --------------------------
1340 * Two different sorts of precomputed tables are used in the following code.
1341 * Each contain various points on the curve, where each point is three field
1342 * elements (x, y, z).
1344 * For the base point table, z is usually 1 (0 for the point at infinity).
1345 * This table has 16 elements:
1346 * index | bits | point
1347 * ------+---------+------------------------------
1350 * 2 | 0 0 1 0 | 2^130G
1351 * 3 | 0 0 1 1 | (2^130 + 1)G
1352 * 4 | 0 1 0 0 | 2^260G
1353 * 5 | 0 1 0 1 | (2^260 + 1)G
1354 * 6 | 0 1 1 0 | (2^260 + 2^130)G
1355 * 7 | 0 1 1 1 | (2^260 + 2^130 + 1)G
1356 * 8 | 1 0 0 0 | 2^390G
1357 * 9 | 1 0 0 1 | (2^390 + 1)G
1358 * 10 | 1 0 1 0 | (2^390 + 2^130)G
1359 * 11 | 1 0 1 1 | (2^390 + 2^130 + 1)G
1360 * 12 | 1 1 0 0 | (2^390 + 2^260)G
1361 * 13 | 1 1 0 1 | (2^390 + 2^260 + 1)G
1362 * 14 | 1 1 1 0 | (2^390 + 2^260 + 2^130)G
1363 * 15 | 1 1 1 1 | (2^390 + 2^260 + 2^130 + 1)G
1365 * The reason for this is so that we can clock bits into four different
1366 * locations when doing simple scalar multiplies against the base point.
1368 * Tables for other points have table[i] = iG for i in 0 .. 16. */
1370 /* gmul is the table of precomputed base points */
1371 static const felem gmul[16][3] = {
1372 {{0, 0, 0, 0, 0, 0, 0, 0, 0},
1373 {0, 0, 0, 0, 0, 0, 0, 0, 0},
1374 {0, 0, 0, 0, 0, 0, 0, 0, 0}},
1375 {{0x017e7e31c2e5bd66, 0x022cf0615a90a6fe, 0x00127a2ffa8de334,
1376 0x01dfbf9d64a3f877, 0x006b4d3dbaa14b5e, 0x014fed487e0a2bd8,
1377 0x015b4429c6481390, 0x03a73678fb2d988e, 0x00c6858e06b70404},
1378 {0x00be94769fd16650, 0x031c21a89cb09022, 0x039013fad0761353,
1379 0x02657bd099031542, 0x03273e662c97ee72, 0x01e6d11a05ebef45,
1380 0x03d1bd998f544495, 0x03001172297ed0b1, 0x011839296a789a3b},
1381 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1382 {{0x0373faacbc875bae, 0x00f325023721c671, 0x00f666fd3dbde5ad,
1383 0x01a6932363f88ea7, 0x01fc6d9e13f9c47b, 0x03bcbffc2bbf734e,
1384 0x013ee3c3647f3a92, 0x029409fefe75d07d, 0x00ef9199963d85e5},
1385 {0x011173743ad5b178, 0x02499c7c21bf7d46, 0x035beaeabb8b1a58,
1386 0x00f989c4752ea0a3, 0x0101e1de48a9c1a3, 0x01a20076be28ba6c,
1387 0x02f8052e5eb2de95, 0x01bfe8f82dea117c, 0x0160074d3c36ddb7},
1388 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1389 {{0x012f3fc373393b3b, 0x03d3d6172f1419fa, 0x02adc943c0b86873,
1390 0x00d475584177952b, 0x012a4d1673750ee2, 0x00512517a0f13b0c,
1391 0x02b184671a7b1734, 0x0315b84236f1a50a, 0x00a4afc472edbdb9},
1392 {0x00152a7077f385c4, 0x03044007d8d1c2ee, 0x0065829d61d52b52,
1393 0x00494ff6b6631d0d, 0x00a11d94d5f06bcf, 0x02d2f89474d9282e,
1394 0x0241c5727c06eeb9, 0x0386928710fbdb9d, 0x01f883f727b0dfbe},
1395 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1396 {{0x019b0c3c9185544d, 0x006243a37c9d97db, 0x02ee3cbe030a2ad2,
1397 0x00cfdd946bb51e0d, 0x0271c00932606b91, 0x03f817d1ec68c561,
1398 0x03f37009806a369c, 0x03c1f30baf184fd5, 0x01091022d6d2f065},
1399 {0x0292c583514c45ed, 0x0316fca51f9a286c, 0x00300af507c1489a,
1400 0x0295f69008298cf1, 0x02c0ed8274943d7b, 0x016509b9b47a431e,
1401 0x02bc9de9634868ce, 0x005b34929bffcb09, 0x000c1a0121681524},
1402 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1403 {{0x0286abc0292fb9f2, 0x02665eee9805b3f7, 0x01ed7455f17f26d6,
1404 0x0346355b83175d13, 0x006284944cd0a097, 0x0191895bcdec5e51,
1405 0x02e288370afda7d9, 0x03b22312bfefa67a, 0x01d104d3fc0613fe},
1406 {0x0092421a12f7e47f, 0x0077a83fa373c501, 0x03bd25c5f696bd0d,
1407 0x035c41e4d5459761, 0x01ca0d1742b24f53, 0x00aaab27863a509c,
1408 0x018b6de47df73917, 0x025c0b771705cd01, 0x01fd51d566d760a7},
1409 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1410 {{0x01dd92ff6b0d1dbd, 0x039c5e2e8f8afa69, 0x0261ed13242c3b27,
1411 0x0382c6e67026e6a0, 0x01d60b10be2089f9, 0x03c15f3dce86723f,
1412 0x03c764a32d2a062d, 0x017307eac0fad056, 0x018207c0b96c5256},
1413 {0x0196a16d60e13154, 0x03e6ce74c0267030, 0x00ddbf2b4e52a5aa,
1414 0x012738241bbf31c8, 0x00ebe8dc04685a28, 0x024c2ad6d380d4a2,
1415 0x035ee062a6e62d0e, 0x0029ed74af7d3a0f, 0x00eef32aec142ebd},
1416 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1417 {{0x00c31ec398993b39, 0x03a9f45bcda68253, 0x00ac733c24c70890,
1418 0x00872b111401ff01, 0x01d178c23195eafb, 0x03bca2c816b87f74,
1419 0x0261a9af46fbad7a, 0x0324b2a8dd3d28f9, 0x00918121d8f24e23},
1420 {0x032bc8c1ca983cd7, 0x00d869dfb08fc8c6, 0x01693cb61fce1516,
1421 0x012a5ea68f4e88a8, 0x010869cab88d7ae3, 0x009081ad277ceee1,
1422 0x033a77166d064cdc, 0x03955235a1fb3a95, 0x01251a4a9b25b65e},
1423 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1424 {{0x00148a3a1b27f40b, 0x0123186df1b31fdc, 0x00026e7beaad34ce,
1425 0x01db446ac1d3dbba, 0x0299c1a33437eaec, 0x024540610183cbb7,
1426 0x0173bb0e9ce92e46, 0x02b937e43921214b, 0x01ab0436a9bf01b5},
1427 {0x0383381640d46948, 0x008dacbf0e7f330f, 0x03602122bcc3f318,
1428 0x01ee596b200620d6, 0x03bd0585fda430b3, 0x014aed77fd123a83,
1429 0x005ace749e52f742, 0x0390fe041da2b842, 0x0189a8ceb3299242},
1430 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1431 {{0x012a19d6b3282473, 0x00c0915918b423ce, 0x023a954eb94405ae,
1432 0x00529f692be26158, 0x0289fa1b6fa4b2aa, 0x0198ae4ceea346ef,
1433 0x0047d8cdfbdedd49, 0x00cc8c8953f0f6b8, 0x001424abbff49203},
1434 {0x0256732a1115a03a, 0x0351bc38665c6733, 0x03f7b950fb4a6447,
1435 0x000afffa94c22155, 0x025763d0a4dab540, 0x000511e92d4fc283,
1436 0x030a7e9eda0ee96c, 0x004c3cd93a28bf0a, 0x017edb3a8719217f},
1437 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1438 {{0x011de5675a88e673, 0x031d7d0f5e567fbe, 0x0016b2062c970ae5,
1439 0x03f4a2be49d90aa7, 0x03cef0bd13822866, 0x03f0923dcf774a6c,
1440 0x0284bebc4f322f72, 0x016ab2645302bb2c, 0x01793f95dace0e2a},
1441 {0x010646e13527a28f, 0x01ca1babd59dc5e7, 0x01afedfd9a5595df,
1442 0x01f15785212ea6b1, 0x0324e5d64f6ae3f4, 0x02d680f526d00645,
1443 0x0127920fadf627a7, 0x03b383f75df4f684, 0x0089e0057e783b0a},
1444 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1445 {{0x00f334b9eb3c26c6, 0x0298fdaa98568dce, 0x01c2d24843a82292,
1446 0x020bcb24fa1b0711, 0x02cbdb3d2b1875e6, 0x0014907598f89422,
1447 0x03abe3aa43b26664, 0x02cbf47f720bc168, 0x0133b5e73014b79b},
1448 {0x034aab5dab05779d, 0x00cdc5d71fee9abb, 0x0399f16bd4bd9d30,
1449 0x03582fa592d82647, 0x02be1cdfb775b0e9, 0x0034f7cea32e94cb,
1450 0x0335a7f08f56f286, 0x03b707e9565d1c8b, 0x0015c946ea5b614f},
1451 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1452 {{0x024676f6cff72255, 0x00d14625cac96378, 0x00532b6008bc3767,
1453 0x01fc16721b985322, 0x023355ea1b091668, 0x029de7afdc0317c3,
1454 0x02fc8a7ca2da037c, 0x02de1217d74a6f30, 0x013f7173175b73bf},
1455 {0x0344913f441490b5, 0x0200f9e272b61eca, 0x0258a246b1dd55d2,
1456 0x03753db9ea496f36, 0x025e02937a09c5ef, 0x030cbd3d14012692,
1457 0x01793a67e70dc72a, 0x03ec1d37048a662e, 0x006550f700c32a8d},
1458 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1459 {{0x00d3f48a347eba27, 0x008e636649b61bd8, 0x00d3b93716778fb3,
1460 0x004d1915757bd209, 0x019d5311a3da44e0, 0x016d1afcbbe6aade,
1461 0x0241bf5f73265616, 0x0384672e5d50d39b, 0x005009fee522b684},
1462 {0x029b4fab064435fe, 0x018868ee095bbb07, 0x01ea3d6936cc92b8,
1463 0x000608b00f78a2f3, 0x02db911073d1c20f, 0x018205938470100a,
1464 0x01f1e4964cbe6ff2, 0x021a19a29eed4663, 0x01414485f42afa81},
1465 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1466 {{0x01612b3a17f63e34, 0x03813992885428e6, 0x022b3c215b5a9608,
1467 0x029b4057e19f2fcb, 0x0384059a587af7e6, 0x02d6400ace6fe610,
1468 0x029354d896e8e331, 0x00c047ee6dfba65e, 0x0037720542e9d49d},
1469 {0x02ce9eed7c5e9278, 0x0374ed703e79643b, 0x01316c54c4072006,
1470 0x005aaa09054b2ee8, 0x002824000c840d57, 0x03d4eba24771ed86,
1471 0x0189c50aabc3bdae, 0x0338c01541e15510, 0x00466d56e38eed42},
1472 {1, 0, 0, 0, 0, 0, 0, 0, 0}},
1473 {{0x007efd8330ad8bd6, 0x02465ed48047710b, 0x0034c6606b215e0c,
1474 0x016ae30c53cbf839, 0x01fa17bd37161216, 0x018ead4e61ce8ab9,
1475 0x005482ed5f5dee46, 0x037543755bba1d7f, 0x005e5ac7e70a9d0f},
1476 {0x0117e1bb2fdcb2a2, 0x03deea36249f40c4, 0x028d09b4a6246cb7,
1477 0x03524b8855bcf756, 0x023d7d109d5ceb58, 0x0178e43e3223ef9c,
1478 0x0154536a0c6e966a, 0x037964d1286ee9fe, 0x0199bcd90e125055},
1479 {1, 0, 0, 0, 0, 0, 0, 0, 0}}
1483 * select_point selects the |idx|th point from a precomputation table and
1486 /* pre_comp below is of the size provided in |size| */
1487 static void select_point(const limb idx, unsigned int size,
1488 const felem pre_comp[][3], felem out[3])
1491 limb *outlimbs = &out[0][0];
1493 memset(out, 0, sizeof(*out) * 3);
1495 for (i = 0; i < size; i++) {
1496 const limb *inlimbs = &pre_comp[i][0][0];
1497 limb mask = i ^ idx;
1503 for (j = 0; j < NLIMBS * 3; j++)
1504 outlimbs[j] |= inlimbs[j] & mask;
1508 /* get_bit returns the |i|th bit in |in| */
1509 static char get_bit(const felem_bytearray in, int i)
1513 return (in[i >> 3] >> (i & 7)) & 1;
1517 * Interleaved point multiplication using precomputed point multiples: The
1518 * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
1519 * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
1520 * generator, using certain (large) precomputed multiples in g_pre_comp.
1521 * Output point (X, Y, Z) is stored in x_out, y_out, z_out
1523 static void batch_mul(felem x_out, felem y_out, felem z_out,
1524 const felem_bytearray scalars[],
1525 const unsigned num_points, const u8 *g_scalar,
1526 const int mixed, const felem pre_comp[][17][3],
1527 const felem g_pre_comp[16][3])
1530 unsigned num, gen_mul = (g_scalar != NULL);
1531 felem nq[3], tmp[4];
1535 /* set nq to the point at infinity */
1536 memset(nq, 0, sizeof(nq));
1539 * Loop over all scalars msb-to-lsb, interleaving additions of multiples
1540 * of the generator (last quarter of rounds) and additions of other
1541 * points multiples (every 5th round).
1543 skip = 1; /* save two point operations in the first
1545 for (i = (num_points ? 520 : 130); i >= 0; --i) {
1548 point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
1550 /* add multiples of the generator */
1551 if (gen_mul && (i <= 130)) {
1552 bits = get_bit(g_scalar, i + 390) << 3;
1554 bits |= get_bit(g_scalar, i + 260) << 2;
1555 bits |= get_bit(g_scalar, i + 130) << 1;
1556 bits |= get_bit(g_scalar, i);
1558 /* select the point to add, in constant time */
1559 select_point(bits, 16, g_pre_comp, tmp);
1561 /* The 1 argument below is for "mixed" */
1562 point_add(nq[0], nq[1], nq[2],
1563 nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
1565 memcpy(nq, tmp, 3 * sizeof(felem));
1570 /* do other additions every 5 doublings */
1571 if (num_points && (i % 5 == 0)) {
1572 /* loop over all scalars */
1573 for (num = 0; num < num_points; ++num) {
1574 bits = get_bit(scalars[num], i + 4) << 5;
1575 bits |= get_bit(scalars[num], i + 3) << 4;
1576 bits |= get_bit(scalars[num], i + 2) << 3;
1577 bits |= get_bit(scalars[num], i + 1) << 2;
1578 bits |= get_bit(scalars[num], i) << 1;
1579 bits |= get_bit(scalars[num], i - 1);
1580 ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
1583 * select the point to add or subtract, in constant time
1585 select_point(digit, 17, pre_comp[num], tmp);
1586 felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
1588 copy_conditional(tmp[1], tmp[3], (-(limb) sign));
1591 point_add(nq[0], nq[1], nq[2],
1592 nq[0], nq[1], nq[2],
1593 mixed, tmp[0], tmp[1], tmp[2]);
1595 memcpy(nq, tmp, 3 * sizeof(felem));
1601 felem_assign(x_out, nq[0]);
1602 felem_assign(y_out, nq[1]);
1603 felem_assign(z_out, nq[2]);
1606 /* Precomputation for the group generator. */
1607 struct nistp521_pre_comp_st {
1608 felem g_pre_comp[16][3];
1609 CRYPTO_REF_COUNT references;
1610 CRYPTO_RWLOCK *lock;
1613 const EC_METHOD *EC_GFp_nistp521_method(void)
1615 static const EC_METHOD ret = {
1616 EC_FLAGS_DEFAULT_OCT,
1617 NID_X9_62_prime_field,
1618 ec_GFp_nistp521_group_init,
1619 ec_GFp_simple_group_finish,
1620 ec_GFp_simple_group_clear_finish,
1621 ec_GFp_nist_group_copy,
1622 ec_GFp_nistp521_group_set_curve,
1623 ec_GFp_simple_group_get_curve,
1624 ec_GFp_simple_group_get_degree,
1625 ec_group_simple_order_bits,
1626 ec_GFp_simple_group_check_discriminant,
1627 ec_GFp_simple_point_init,
1628 ec_GFp_simple_point_finish,
1629 ec_GFp_simple_point_clear_finish,
1630 ec_GFp_simple_point_copy,
1631 ec_GFp_simple_point_set_to_infinity,
1632 ec_GFp_simple_set_Jprojective_coordinates_GFp,
1633 ec_GFp_simple_get_Jprojective_coordinates_GFp,
1634 ec_GFp_simple_point_set_affine_coordinates,
1635 ec_GFp_nistp521_point_get_affine_coordinates,
1636 0 /* point_set_compressed_coordinates */ ,
1641 ec_GFp_simple_invert,
1642 ec_GFp_simple_is_at_infinity,
1643 ec_GFp_simple_is_on_curve,
1645 ec_GFp_simple_make_affine,
1646 ec_GFp_simple_points_make_affine,
1647 ec_GFp_nistp521_points_mul,
1648 ec_GFp_nistp521_precompute_mult,
1649 ec_GFp_nistp521_have_precompute_mult,
1650 ec_GFp_nist_field_mul,
1651 ec_GFp_nist_field_sqr,
1653 ec_GFp_simple_field_inv,
1654 0 /* field_encode */ ,
1655 0 /* field_decode */ ,
1656 0, /* field_set_to_one */
1657 ec_key_simple_priv2oct,
1658 ec_key_simple_oct2priv,
1659 0, /* set private */
1660 ec_key_simple_generate_key,
1661 ec_key_simple_check_key,
1662 ec_key_simple_generate_public_key,
1665 ecdh_simple_compute_key,
1666 ecdsa_simple_sign_setup,
1667 ecdsa_simple_sign_sig,
1668 ecdsa_simple_verify_sig,
1669 0, /* field_inverse_mod_ord */
1670 0, /* blind_coordinates */
1672 0, /* ladder_step */
1679 /******************************************************************************/
1681 * FUNCTIONS TO MANAGE PRECOMPUTATION
1684 static NISTP521_PRE_COMP *nistp521_pre_comp_new(void)
1686 NISTP521_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
1689 ECerr(EC_F_NISTP521_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1693 ret->references = 1;
1695 ret->lock = CRYPTO_THREAD_lock_new();
1696 if (ret->lock == NULL) {
1697 ECerr(EC_F_NISTP521_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1704 NISTP521_PRE_COMP *EC_nistp521_pre_comp_dup(NISTP521_PRE_COMP *p)
1708 CRYPTO_UP_REF(&p->references, &i, p->lock);
1712 void EC_nistp521_pre_comp_free(NISTP521_PRE_COMP *p)
1719 CRYPTO_DOWN_REF(&p->references, &i, p->lock);
1720 REF_PRINT_COUNT("EC_nistp521", x);
1723 REF_ASSERT_ISNT(i < 0);
1725 CRYPTO_THREAD_lock_free(p->lock);
1729 /******************************************************************************/
1731 * OPENSSL EC_METHOD FUNCTIONS
1734 int ec_GFp_nistp521_group_init(EC_GROUP *group)
1737 ret = ec_GFp_simple_group_init(group);
1738 group->a_is_minus3 = 1;
1742 int ec_GFp_nistp521_group_set_curve(EC_GROUP *group, const BIGNUM *p,
1743 const BIGNUM *a, const BIGNUM *b,
1747 BIGNUM *curve_p, *curve_a, *curve_b;
1749 BN_CTX *new_ctx = NULL;
1752 ctx = new_ctx = BN_CTX_new();
1758 curve_p = BN_CTX_get(ctx);
1759 curve_a = BN_CTX_get(ctx);
1760 curve_b = BN_CTX_get(ctx);
1761 if (curve_b == NULL)
1763 BN_bin2bn(nistp521_curve_params[0], sizeof(felem_bytearray), curve_p);
1764 BN_bin2bn(nistp521_curve_params[1], sizeof(felem_bytearray), curve_a);
1765 BN_bin2bn(nistp521_curve_params[2], sizeof(felem_bytearray), curve_b);
1766 if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
1767 ECerr(EC_F_EC_GFP_NISTP521_GROUP_SET_CURVE,
1768 EC_R_WRONG_CURVE_PARAMETERS);
1771 group->field_mod_func = BN_nist_mod_521;
1772 ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
1776 BN_CTX_free(new_ctx);
1782 * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
1785 int ec_GFp_nistp521_point_get_affine_coordinates(const EC_GROUP *group,
1786 const EC_POINT *point,
1787 BIGNUM *x, BIGNUM *y,
1790 felem z1, z2, x_in, y_in, x_out, y_out;
1793 if (EC_POINT_is_at_infinity(group, point)) {
1794 ECerr(EC_F_EC_GFP_NISTP521_POINT_GET_AFFINE_COORDINATES,
1795 EC_R_POINT_AT_INFINITY);
1798 if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
1799 (!BN_to_felem(z1, point->Z)))
1802 felem_square(tmp, z2);
1803 felem_reduce(z1, tmp);
1804 felem_mul(tmp, x_in, z1);
1805 felem_reduce(x_in, tmp);
1806 felem_contract(x_out, x_in);
1808 if (!felem_to_BN(x, x_out)) {
1809 ECerr(EC_F_EC_GFP_NISTP521_POINT_GET_AFFINE_COORDINATES,
1814 felem_mul(tmp, z1, z2);
1815 felem_reduce(z1, tmp);
1816 felem_mul(tmp, y_in, z1);
1817 felem_reduce(y_in, tmp);
1818 felem_contract(y_out, y_in);
1820 if (!felem_to_BN(y, y_out)) {
1821 ECerr(EC_F_EC_GFP_NISTP521_POINT_GET_AFFINE_COORDINATES,
1829 /* points below is of size |num|, and tmp_felems is of size |num+1/ */
1830 static void make_points_affine(size_t num, felem points[][3],
1834 * Runs in constant time, unless an input is the point at infinity (which
1835 * normally shouldn't happen).
1837 ec_GFp_nistp_points_make_affine_internal(num,
1841 (void (*)(void *))felem_one,
1843 (void (*)(void *, const void *))
1845 (void (*)(void *, const void *))
1846 felem_square_reduce, (void (*)
1853 (void (*)(void *, const void *))
1855 (void (*)(void *, const void *))
1860 * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
1861 * values Result is stored in r (r can equal one of the inputs).
1863 int ec_GFp_nistp521_points_mul(const EC_GROUP *group, EC_POINT *r,
1864 const BIGNUM *scalar, size_t num,
1865 const EC_POINT *points[],
1866 const BIGNUM *scalars[], BN_CTX *ctx)
1871 BIGNUM *x, *y, *z, *tmp_scalar;
1872 felem_bytearray g_secret;
1873 felem_bytearray *secrets = NULL;
1874 felem (*pre_comp)[17][3] = NULL;
1875 felem *tmp_felems = NULL;
1876 felem_bytearray tmp;
1877 unsigned i, num_bytes;
1878 int have_pre_comp = 0;
1879 size_t num_points = num;
1880 felem x_in, y_in, z_in, x_out, y_out, z_out;
1881 NISTP521_PRE_COMP *pre = NULL;
1882 felem(*g_pre_comp)[3] = NULL;
1883 EC_POINT *generator = NULL;
1884 const EC_POINT *p = NULL;
1885 const BIGNUM *p_scalar = NULL;
1888 x = BN_CTX_get(ctx);
1889 y = BN_CTX_get(ctx);
1890 z = BN_CTX_get(ctx);
1891 tmp_scalar = BN_CTX_get(ctx);
1892 if (tmp_scalar == NULL)
1895 if (scalar != NULL) {
1896 pre = group->pre_comp.nistp521;
1898 /* we have precomputation, try to use it */
1899 g_pre_comp = &pre->g_pre_comp[0];
1901 /* try to use the standard precomputation */
1902 g_pre_comp = (felem(*)[3]) gmul;
1903 generator = EC_POINT_new(group);
1904 if (generator == NULL)
1906 /* get the generator from precomputation */
1907 if (!felem_to_BN(x, g_pre_comp[1][0]) ||
1908 !felem_to_BN(y, g_pre_comp[1][1]) ||
1909 !felem_to_BN(z, g_pre_comp[1][2])) {
1910 ECerr(EC_F_EC_GFP_NISTP521_POINTS_MUL, ERR_R_BN_LIB);
1913 if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
1917 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
1918 /* precomputation matches generator */
1922 * we don't have valid precomputation: treat the generator as a
1928 if (num_points > 0) {
1929 if (num_points >= 2) {
1931 * unless we precompute multiples for just one point, converting
1932 * those into affine form is time well spent
1936 secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
1937 pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
1940 OPENSSL_malloc(sizeof(*tmp_felems) * (num_points * 17 + 1));
1941 if ((secrets == NULL) || (pre_comp == NULL)
1942 || (mixed && (tmp_felems == NULL))) {
1943 ECerr(EC_F_EC_GFP_NISTP521_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1948 * we treat NULL scalars as 0, and NULL points as points at infinity,
1949 * i.e., they contribute nothing to the linear combination
1951 for (i = 0; i < num_points; ++i) {
1954 * we didn't have a valid precomputation, so we pick the
1958 p = EC_GROUP_get0_generator(group);
1961 /* the i^th point */
1964 p_scalar = scalars[i];
1966 if ((p_scalar != NULL) && (p != NULL)) {
1967 /* reduce scalar to 0 <= scalar < 2^521 */
1968 if ((BN_num_bits(p_scalar) > 521)
1969 || (BN_is_negative(p_scalar))) {
1971 * this is an unusual input, and we don't guarantee
1974 if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
1975 ECerr(EC_F_EC_GFP_NISTP521_POINTS_MUL, ERR_R_BN_LIB);
1978 num_bytes = BN_bn2binpad(tmp_scalar, tmp, sizeof(tmp));
1980 num_bytes = BN_bn2binpad(p_scalar, tmp, sizeof(tmp));
1981 flip_endian(secrets[i], tmp, num_bytes);
1982 /* precompute multiples */
1983 if ((!BN_to_felem(x_out, p->X)) ||
1984 (!BN_to_felem(y_out, p->Y)) ||
1985 (!BN_to_felem(z_out, p->Z)))
1987 memcpy(pre_comp[i][1][0], x_out, sizeof(felem));
1988 memcpy(pre_comp[i][1][1], y_out, sizeof(felem));
1989 memcpy(pre_comp[i][1][2], z_out, sizeof(felem));
1990 for (j = 2; j <= 16; ++j) {
1992 point_add(pre_comp[i][j][0], pre_comp[i][j][1],
1993 pre_comp[i][j][2], pre_comp[i][1][0],
1994 pre_comp[i][1][1], pre_comp[i][1][2], 0,
1995 pre_comp[i][j - 1][0],
1996 pre_comp[i][j - 1][1],
1997 pre_comp[i][j - 1][2]);
1999 point_double(pre_comp[i][j][0], pre_comp[i][j][1],
2000 pre_comp[i][j][2], pre_comp[i][j / 2][0],
2001 pre_comp[i][j / 2][1],
2002 pre_comp[i][j / 2][2]);
2008 make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
2011 /* the scalar for the generator */
2012 if ((scalar != NULL) && (have_pre_comp)) {
2013 memset(g_secret, 0, sizeof(g_secret));
2014 /* reduce scalar to 0 <= scalar < 2^521 */
2015 if ((BN_num_bits(scalar) > 521) || (BN_is_negative(scalar))) {
2017 * this is an unusual input, and we don't guarantee
2020 if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
2021 ECerr(EC_F_EC_GFP_NISTP521_POINTS_MUL, ERR_R_BN_LIB);
2024 num_bytes = BN_bn2binpad(tmp_scalar, tmp, sizeof(tmp));
2026 num_bytes = BN_bn2binpad(scalar, tmp, sizeof(tmp));
2027 flip_endian(g_secret, tmp, num_bytes);
2028 /* do the multiplication with generator precomputation */
2029 batch_mul(x_out, y_out, z_out,
2030 (const felem_bytearray(*))secrets, num_points,
2032 mixed, (const felem(*)[17][3])pre_comp,
2033 (const felem(*)[3])g_pre_comp);
2035 /* do the multiplication without generator precomputation */
2036 batch_mul(x_out, y_out, z_out,
2037 (const felem_bytearray(*))secrets, num_points,
2038 NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
2039 /* reduce the output to its unique minimal representation */
2040 felem_contract(x_in, x_out);
2041 felem_contract(y_in, y_out);
2042 felem_contract(z_in, z_out);
2043 if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
2044 (!felem_to_BN(z, z_in))) {
2045 ECerr(EC_F_EC_GFP_NISTP521_POINTS_MUL, ERR_R_BN_LIB);
2048 ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
2052 EC_POINT_free(generator);
2053 OPENSSL_free(secrets);
2054 OPENSSL_free(pre_comp);
2055 OPENSSL_free(tmp_felems);
2059 int ec_GFp_nistp521_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
2062 NISTP521_PRE_COMP *pre = NULL;
2065 EC_POINT *generator = NULL;
2066 felem tmp_felems[16];
2068 BN_CTX *new_ctx = NULL;
2071 /* throw away old precomputation */
2072 EC_pre_comp_free(group);
2076 ctx = new_ctx = BN_CTX_new();
2082 x = BN_CTX_get(ctx);
2083 y = BN_CTX_get(ctx);
2086 /* get the generator */
2087 if (group->generator == NULL)
2089 generator = EC_POINT_new(group);
2090 if (generator == NULL)
2092 BN_bin2bn(nistp521_curve_params[3], sizeof(felem_bytearray), x);
2093 BN_bin2bn(nistp521_curve_params[4], sizeof(felem_bytearray), y);
2094 if (!EC_POINT_set_affine_coordinates(group, generator, x, y, ctx))
2096 if ((pre = nistp521_pre_comp_new()) == NULL)
2099 * if the generator is the standard one, use built-in precomputation
2101 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
2102 memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
2105 if ((!BN_to_felem(pre->g_pre_comp[1][0], group->generator->X)) ||
2106 (!BN_to_felem(pre->g_pre_comp[1][1], group->generator->Y)) ||
2107 (!BN_to_felem(pre->g_pre_comp[1][2], group->generator->Z)))
2109 /* compute 2^130*G, 2^260*G, 2^390*G */
2110 for (i = 1; i <= 4; i <<= 1) {
2111 point_double(pre->g_pre_comp[2 * i][0], pre->g_pre_comp[2 * i][1],
2112 pre->g_pre_comp[2 * i][2], pre->g_pre_comp[i][0],
2113 pre->g_pre_comp[i][1], pre->g_pre_comp[i][2]);
2114 for (j = 0; j < 129; ++j) {
2115 point_double(pre->g_pre_comp[2 * i][0],
2116 pre->g_pre_comp[2 * i][1],
2117 pre->g_pre_comp[2 * i][2],
2118 pre->g_pre_comp[2 * i][0],
2119 pre->g_pre_comp[2 * i][1],
2120 pre->g_pre_comp[2 * i][2]);
2123 /* g_pre_comp[0] is the point at infinity */
2124 memset(pre->g_pre_comp[0], 0, sizeof(pre->g_pre_comp[0]));
2125 /* the remaining multiples */
2126 /* 2^130*G + 2^260*G */
2127 point_add(pre->g_pre_comp[6][0], pre->g_pre_comp[6][1],
2128 pre->g_pre_comp[6][2], pre->g_pre_comp[4][0],
2129 pre->g_pre_comp[4][1], pre->g_pre_comp[4][2],
2130 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
2131 pre->g_pre_comp[2][2]);
2132 /* 2^130*G + 2^390*G */
2133 point_add(pre->g_pre_comp[10][0], pre->g_pre_comp[10][1],
2134 pre->g_pre_comp[10][2], pre->g_pre_comp[8][0],
2135 pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
2136 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
2137 pre->g_pre_comp[2][2]);
2138 /* 2^260*G + 2^390*G */
2139 point_add(pre->g_pre_comp[12][0], pre->g_pre_comp[12][1],
2140 pre->g_pre_comp[12][2], pre->g_pre_comp[8][0],
2141 pre->g_pre_comp[8][1], pre->g_pre_comp[8][2],
2142 0, pre->g_pre_comp[4][0], pre->g_pre_comp[4][1],
2143 pre->g_pre_comp[4][2]);
2144 /* 2^130*G + 2^260*G + 2^390*G */
2145 point_add(pre->g_pre_comp[14][0], pre->g_pre_comp[14][1],
2146 pre->g_pre_comp[14][2], pre->g_pre_comp[12][0],
2147 pre->g_pre_comp[12][1], pre->g_pre_comp[12][2],
2148 0, pre->g_pre_comp[2][0], pre->g_pre_comp[2][1],
2149 pre->g_pre_comp[2][2]);
2150 for (i = 1; i < 8; ++i) {
2151 /* odd multiples: add G */
2152 point_add(pre->g_pre_comp[2 * i + 1][0],
2153 pre->g_pre_comp[2 * i + 1][1],
2154 pre->g_pre_comp[2 * i + 1][2], pre->g_pre_comp[2 * i][0],
2155 pre->g_pre_comp[2 * i][1], pre->g_pre_comp[2 * i][2], 0,
2156 pre->g_pre_comp[1][0], pre->g_pre_comp[1][1],
2157 pre->g_pre_comp[1][2]);
2159 make_points_affine(15, &(pre->g_pre_comp[1]), tmp_felems);
2162 SETPRECOMP(group, nistp521, pre);
2167 EC_POINT_free(generator);
2169 BN_CTX_free(new_ctx);
2171 EC_nistp521_pre_comp_free(pre);
2175 int ec_GFp_nistp521_have_precompute_mult(const EC_GROUP *group)
2177 return HAVEPRECOMP(group, nistp521);