2 * Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /******************************************************************************
12 * Copyright 2014 Intel Corporation *
14 * Licensed under the Apache License, Version 2.0 (the "License"); *
15 * you may not use this file except in compliance with the License. *
16 * You may obtain a copy of the License at *
18 * http://www.apache.org/licenses/LICENSE-2.0 *
20 * Unless required by applicable law or agreed to in writing, software *
21 * distributed under the License is distributed on an "AS IS" BASIS, *
22 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
23 * See the License for the specific language governing permissions and *
24 * limitations under the License. *
26 ******************************************************************************
28 * Developers and authors: *
29 * Shay Gueron (1, 2), and Vlad Krasnov (1) *
30 * (1) Intel Corporation, Israel Development Center *
31 * (2) University of Haifa *
33 * S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with *
36 ******************************************************************************/
40 #include "internal/cryptlib.h"
41 #include "internal/bn_int.h"
45 # define TOBN(hi,lo) lo,hi
47 # define TOBN(hi,lo) ((BN_ULONG)hi<<32|lo)
51 # define ALIGN32 __attribute((aligned(32)))
52 #elif defined(_MSC_VER)
53 # define ALIGN32 __declspec(align(32))
58 #define ALIGNPTR(p,N) ((unsigned char *)p+N-(size_t)p%N)
59 #define P256_LIMBS (256/BN_BITS2)
61 typedef unsigned short u16;
64 BN_ULONG X[P256_LIMBS];
65 BN_ULONG Y[P256_LIMBS];
66 BN_ULONG Z[P256_LIMBS];
70 BN_ULONG X[P256_LIMBS];
71 BN_ULONG Y[P256_LIMBS];
74 typedef P256_POINT_AFFINE PRECOMP256_ROW[64];
76 /* structure for precomputed multiples of the generator */
77 struct nistz256_pre_comp_st {
78 const EC_GROUP *group; /* Parent EC_GROUP object */
79 size_t w; /* Window size */
81 * Constant time access to the X and Y coordinates of the pre-computed,
82 * generator multiplies, in the Montgomery domain. Pre-calculated
83 * multiplies are stored in affine form.
85 PRECOMP256_ROW *precomp;
86 void *precomp_storage;
91 /* Functions implemented in assembly */
93 * Most of below mentioned functions *preserve* the property of inputs
94 * being fully reduced, i.e. being in [0, modulus) range. Simply put if
95 * inputs are fully reduced, then output is too. Note that reverse is
96 * not true, in sense that given partially reduced inputs output can be
97 * either, not unlikely reduced. And "most" in first sentence refers to
98 * the fact that given the calculations flow one can tolerate that
99 * addition, 1st function below, produces partially reduced result *if*
100 * multiplications by 2 and 3, which customarily use addition, fully
101 * reduce it. This effectively gives two options: a) addition produces
102 * fully reduced result [as long as inputs are, just like remaining
103 * functions]; b) addition is allowed to produce partially reduced
104 * result, but multiplications by 2 and 3 perform additional reduction
105 * step. Choice between the two can be platform-specific, but it was a)
106 * in all cases so far...
108 /* Modular add: res = a+b mod P */
109 void ecp_nistz256_add(BN_ULONG res[P256_LIMBS],
110 const BN_ULONG a[P256_LIMBS],
111 const BN_ULONG b[P256_LIMBS]);
112 /* Modular mul by 2: res = 2*a mod P */
113 void ecp_nistz256_mul_by_2(BN_ULONG res[P256_LIMBS],
114 const BN_ULONG a[P256_LIMBS]);
115 /* Modular mul by 3: res = 3*a mod P */
116 void ecp_nistz256_mul_by_3(BN_ULONG res[P256_LIMBS],
117 const BN_ULONG a[P256_LIMBS]);
119 /* Modular div by 2: res = a/2 mod P */
120 void ecp_nistz256_div_by_2(BN_ULONG res[P256_LIMBS],
121 const BN_ULONG a[P256_LIMBS]);
122 /* Modular sub: res = a-b mod P */
123 void ecp_nistz256_sub(BN_ULONG res[P256_LIMBS],
124 const BN_ULONG a[P256_LIMBS],
125 const BN_ULONG b[P256_LIMBS]);
126 /* Modular neg: res = -a mod P */
127 void ecp_nistz256_neg(BN_ULONG res[P256_LIMBS], const BN_ULONG a[P256_LIMBS]);
128 /* Montgomery mul: res = a*b*2^-256 mod P */
129 void ecp_nistz256_mul_mont(BN_ULONG res[P256_LIMBS],
130 const BN_ULONG a[P256_LIMBS],
131 const BN_ULONG b[P256_LIMBS]);
132 /* Montgomery sqr: res = a*a*2^-256 mod P */
133 void ecp_nistz256_sqr_mont(BN_ULONG res[P256_LIMBS],
134 const BN_ULONG a[P256_LIMBS]);
135 /* Convert a number from Montgomery domain, by multiplying with 1 */
136 void ecp_nistz256_from_mont(BN_ULONG res[P256_LIMBS],
137 const BN_ULONG in[P256_LIMBS]);
138 /* Convert a number to Montgomery domain, by multiplying with 2^512 mod P*/
139 void ecp_nistz256_to_mont(BN_ULONG res[P256_LIMBS],
140 const BN_ULONG in[P256_LIMBS]);
141 /* Functions that perform constant time access to the precomputed tables */
142 void ecp_nistz256_scatter_w5(P256_POINT *val,
143 const P256_POINT *in_t, int idx);
144 void ecp_nistz256_gather_w5(P256_POINT *val,
145 const P256_POINT *in_t, int idx);
146 void ecp_nistz256_scatter_w7(P256_POINT_AFFINE *val,
147 const P256_POINT_AFFINE *in_t, int idx);
148 void ecp_nistz256_gather_w7(P256_POINT_AFFINE *val,
149 const P256_POINT_AFFINE *in_t, int idx);
151 /* One converted into the Montgomery domain */
152 static const BN_ULONG ONE[P256_LIMBS] = {
153 TOBN(0x00000000, 0x00000001), TOBN(0xffffffff, 0x00000000),
154 TOBN(0xffffffff, 0xffffffff), TOBN(0x00000000, 0xfffffffe)
157 static NISTZ256_PRE_COMP *ecp_nistz256_pre_comp_new(const EC_GROUP *group);
159 /* Precomputed tables for the default generator */
160 extern const PRECOMP256_ROW ecp_nistz256_precomputed[37];
162 /* Recode window to a signed digit, see ecp_nistputil.c for details */
163 static unsigned int _booth_recode_w5(unsigned int in)
167 s = ~((in >> 5) - 1);
168 d = (1 << 6) - in - 1;
169 d = (d & s) | (in & ~s);
170 d = (d >> 1) + (d & 1);
172 return (d << 1) + (s & 1);
175 static unsigned int _booth_recode_w7(unsigned int in)
179 s = ~((in >> 7) - 1);
180 d = (1 << 8) - in - 1;
181 d = (d & s) | (in & ~s);
182 d = (d >> 1) + (d & 1);
184 return (d << 1) + (s & 1);
187 static void copy_conditional(BN_ULONG dst[P256_LIMBS],
188 const BN_ULONG src[P256_LIMBS], BN_ULONG move)
190 BN_ULONG mask1 = 0-move;
191 BN_ULONG mask2 = ~mask1;
193 dst[0] = (src[0] & mask1) ^ (dst[0] & mask2);
194 dst[1] = (src[1] & mask1) ^ (dst[1] & mask2);
195 dst[2] = (src[2] & mask1) ^ (dst[2] & mask2);
196 dst[3] = (src[3] & mask1) ^ (dst[3] & mask2);
197 if (P256_LIMBS == 8) {
198 dst[4] = (src[4] & mask1) ^ (dst[4] & mask2);
199 dst[5] = (src[5] & mask1) ^ (dst[5] & mask2);
200 dst[6] = (src[6] & mask1) ^ (dst[6] & mask2);
201 dst[7] = (src[7] & mask1) ^ (dst[7] & mask2);
205 static BN_ULONG is_zero(BN_ULONG in)
213 static BN_ULONG is_equal(const BN_ULONG a[P256_LIMBS],
214 const BN_ULONG b[P256_LIMBS])
222 if (P256_LIMBS == 8) {
232 static BN_ULONG is_one(const BIGNUM *z)
235 BN_ULONG *a = bn_get_words(z);
237 if (bn_get_top(z) == (P256_LIMBS - P256_LIMBS / 8)) {
239 res |= a[1] ^ ONE[1];
240 res |= a[2] ^ ONE[2];
241 res |= a[3] ^ ONE[3];
242 if (P256_LIMBS == 8) {
243 res |= a[4] ^ ONE[4];
244 res |= a[5] ^ ONE[5];
245 res |= a[6] ^ ONE[6];
247 * no check for a[7] (being zero) on 32-bit platforms,
248 * because value of "one" takes only 7 limbs.
257 #ifndef ECP_NISTZ256_REFERENCE_IMPLEMENTATION
258 void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a);
259 void ecp_nistz256_point_add(P256_POINT *r,
260 const P256_POINT *a, const P256_POINT *b);
261 void ecp_nistz256_point_add_affine(P256_POINT *r,
263 const P256_POINT_AFFINE *b);
265 /* Point double: r = 2*a */
266 static void ecp_nistz256_point_double(P256_POINT *r, const P256_POINT *a)
268 BN_ULONG S[P256_LIMBS];
269 BN_ULONG M[P256_LIMBS];
270 BN_ULONG Zsqr[P256_LIMBS];
271 BN_ULONG tmp0[P256_LIMBS];
273 const BN_ULONG *in_x = a->X;
274 const BN_ULONG *in_y = a->Y;
275 const BN_ULONG *in_z = a->Z;
277 BN_ULONG *res_x = r->X;
278 BN_ULONG *res_y = r->Y;
279 BN_ULONG *res_z = r->Z;
281 ecp_nistz256_mul_by_2(S, in_y);
283 ecp_nistz256_sqr_mont(Zsqr, in_z);
285 ecp_nistz256_sqr_mont(S, S);
287 ecp_nistz256_mul_mont(res_z, in_z, in_y);
288 ecp_nistz256_mul_by_2(res_z, res_z);
290 ecp_nistz256_add(M, in_x, Zsqr);
291 ecp_nistz256_sub(Zsqr, in_x, Zsqr);
293 ecp_nistz256_sqr_mont(res_y, S);
294 ecp_nistz256_div_by_2(res_y, res_y);
296 ecp_nistz256_mul_mont(M, M, Zsqr);
297 ecp_nistz256_mul_by_3(M, M);
299 ecp_nistz256_mul_mont(S, S, in_x);
300 ecp_nistz256_mul_by_2(tmp0, S);
302 ecp_nistz256_sqr_mont(res_x, M);
304 ecp_nistz256_sub(res_x, res_x, tmp0);
305 ecp_nistz256_sub(S, S, res_x);
307 ecp_nistz256_mul_mont(S, S, M);
308 ecp_nistz256_sub(res_y, S, res_y);
311 /* Point addition: r = a+b */
312 static void ecp_nistz256_point_add(P256_POINT *r,
313 const P256_POINT *a, const P256_POINT *b)
315 BN_ULONG U2[P256_LIMBS], S2[P256_LIMBS];
316 BN_ULONG U1[P256_LIMBS], S1[P256_LIMBS];
317 BN_ULONG Z1sqr[P256_LIMBS];
318 BN_ULONG Z2sqr[P256_LIMBS];
319 BN_ULONG H[P256_LIMBS], R[P256_LIMBS];
320 BN_ULONG Hsqr[P256_LIMBS];
321 BN_ULONG Rsqr[P256_LIMBS];
322 BN_ULONG Hcub[P256_LIMBS];
324 BN_ULONG res_x[P256_LIMBS];
325 BN_ULONG res_y[P256_LIMBS];
326 BN_ULONG res_z[P256_LIMBS];
328 BN_ULONG in1infty, in2infty;
330 const BN_ULONG *in1_x = a->X;
331 const BN_ULONG *in1_y = a->Y;
332 const BN_ULONG *in1_z = a->Z;
334 const BN_ULONG *in2_x = b->X;
335 const BN_ULONG *in2_y = b->Y;
336 const BN_ULONG *in2_z = b->Z;
339 * Infinity in encoded as (,,0)
341 in1infty = (in1_z[0] | in1_z[1] | in1_z[2] | in1_z[3]);
343 in1infty |= (in1_z[4] | in1_z[5] | in1_z[6] | in1_z[7]);
345 in2infty = (in2_z[0] | in2_z[1] | in2_z[2] | in2_z[3]);
347 in2infty |= (in2_z[4] | in2_z[5] | in2_z[6] | in2_z[7]);
349 in1infty = is_zero(in1infty);
350 in2infty = is_zero(in2infty);
352 ecp_nistz256_sqr_mont(Z2sqr, in2_z); /* Z2^2 */
353 ecp_nistz256_sqr_mont(Z1sqr, in1_z); /* Z1^2 */
355 ecp_nistz256_mul_mont(S1, Z2sqr, in2_z); /* S1 = Z2^3 */
356 ecp_nistz256_mul_mont(S2, Z1sqr, in1_z); /* S2 = Z1^3 */
358 ecp_nistz256_mul_mont(S1, S1, in1_y); /* S1 = Y1*Z2^3 */
359 ecp_nistz256_mul_mont(S2, S2, in2_y); /* S2 = Y2*Z1^3 */
360 ecp_nistz256_sub(R, S2, S1); /* R = S2 - S1 */
362 ecp_nistz256_mul_mont(U1, in1_x, Z2sqr); /* U1 = X1*Z2^2 */
363 ecp_nistz256_mul_mont(U2, in2_x, Z1sqr); /* U2 = X2*Z1^2 */
364 ecp_nistz256_sub(H, U2, U1); /* H = U2 - U1 */
367 * This should not happen during sign/ecdh, so no constant time violation
369 if (is_equal(U1, U2) && !in1infty && !in2infty) {
370 if (is_equal(S1, S2)) {
371 ecp_nistz256_point_double(r, a);
374 memset(r, 0, sizeof(*r));
379 ecp_nistz256_sqr_mont(Rsqr, R); /* R^2 */
380 ecp_nistz256_mul_mont(res_z, H, in1_z); /* Z3 = H*Z1*Z2 */
381 ecp_nistz256_sqr_mont(Hsqr, H); /* H^2 */
382 ecp_nistz256_mul_mont(res_z, res_z, in2_z); /* Z3 = H*Z1*Z2 */
383 ecp_nistz256_mul_mont(Hcub, Hsqr, H); /* H^3 */
385 ecp_nistz256_mul_mont(U2, U1, Hsqr); /* U1*H^2 */
386 ecp_nistz256_mul_by_2(Hsqr, U2); /* 2*U1*H^2 */
388 ecp_nistz256_sub(res_x, Rsqr, Hsqr);
389 ecp_nistz256_sub(res_x, res_x, Hcub);
391 ecp_nistz256_sub(res_y, U2, res_x);
393 ecp_nistz256_mul_mont(S2, S1, Hcub);
394 ecp_nistz256_mul_mont(res_y, R, res_y);
395 ecp_nistz256_sub(res_y, res_y, S2);
397 copy_conditional(res_x, in2_x, in1infty);
398 copy_conditional(res_y, in2_y, in1infty);
399 copy_conditional(res_z, in2_z, in1infty);
401 copy_conditional(res_x, in1_x, in2infty);
402 copy_conditional(res_y, in1_y, in2infty);
403 copy_conditional(res_z, in1_z, in2infty);
405 memcpy(r->X, res_x, sizeof(res_x));
406 memcpy(r->Y, res_y, sizeof(res_y));
407 memcpy(r->Z, res_z, sizeof(res_z));
410 /* Point addition when b is known to be affine: r = a+b */
411 static void ecp_nistz256_point_add_affine(P256_POINT *r,
413 const P256_POINT_AFFINE *b)
415 BN_ULONG U2[P256_LIMBS], S2[P256_LIMBS];
416 BN_ULONG Z1sqr[P256_LIMBS];
417 BN_ULONG H[P256_LIMBS], R[P256_LIMBS];
418 BN_ULONG Hsqr[P256_LIMBS];
419 BN_ULONG Rsqr[P256_LIMBS];
420 BN_ULONG Hcub[P256_LIMBS];
422 BN_ULONG res_x[P256_LIMBS];
423 BN_ULONG res_y[P256_LIMBS];
424 BN_ULONG res_z[P256_LIMBS];
426 BN_ULONG in1infty, in2infty;
428 const BN_ULONG *in1_x = a->X;
429 const BN_ULONG *in1_y = a->Y;
430 const BN_ULONG *in1_z = a->Z;
432 const BN_ULONG *in2_x = b->X;
433 const BN_ULONG *in2_y = b->Y;
436 * Infinity in encoded as (,,0)
438 in1infty = (in1_z[0] | in1_z[1] | in1_z[2] | in1_z[3]);
440 in1infty |= (in1_z[4] | in1_z[5] | in1_z[6] | in1_z[7]);
443 * In affine representation we encode infinity as (0,0), which is
444 * not on the curve, so it is OK
446 in2infty = (in2_x[0] | in2_x[1] | in2_x[2] | in2_x[3] |
447 in2_y[0] | in2_y[1] | in2_y[2] | in2_y[3]);
449 in2infty |= (in2_x[4] | in2_x[5] | in2_x[6] | in2_x[7] |
450 in2_y[4] | in2_y[5] | in2_y[6] | in2_y[7]);
452 in1infty = is_zero(in1infty);
453 in2infty = is_zero(in2infty);
455 ecp_nistz256_sqr_mont(Z1sqr, in1_z); /* Z1^2 */
457 ecp_nistz256_mul_mont(U2, in2_x, Z1sqr); /* U2 = X2*Z1^2 */
458 ecp_nistz256_sub(H, U2, in1_x); /* H = U2 - U1 */
460 ecp_nistz256_mul_mont(S2, Z1sqr, in1_z); /* S2 = Z1^3 */
462 ecp_nistz256_mul_mont(res_z, H, in1_z); /* Z3 = H*Z1*Z2 */
464 ecp_nistz256_mul_mont(S2, S2, in2_y); /* S2 = Y2*Z1^3 */
465 ecp_nistz256_sub(R, S2, in1_y); /* R = S2 - S1 */
467 ecp_nistz256_sqr_mont(Hsqr, H); /* H^2 */
468 ecp_nistz256_sqr_mont(Rsqr, R); /* R^2 */
469 ecp_nistz256_mul_mont(Hcub, Hsqr, H); /* H^3 */
471 ecp_nistz256_mul_mont(U2, in1_x, Hsqr); /* U1*H^2 */
472 ecp_nistz256_mul_by_2(Hsqr, U2); /* 2*U1*H^2 */
474 ecp_nistz256_sub(res_x, Rsqr, Hsqr);
475 ecp_nistz256_sub(res_x, res_x, Hcub);
476 ecp_nistz256_sub(H, U2, res_x);
478 ecp_nistz256_mul_mont(S2, in1_y, Hcub);
479 ecp_nistz256_mul_mont(H, H, R);
480 ecp_nistz256_sub(res_y, H, S2);
482 copy_conditional(res_x, in2_x, in1infty);
483 copy_conditional(res_x, in1_x, in2infty);
485 copy_conditional(res_y, in2_y, in1infty);
486 copy_conditional(res_y, in1_y, in2infty);
488 copy_conditional(res_z, ONE, in1infty);
489 copy_conditional(res_z, in1_z, in2infty);
491 memcpy(r->X, res_x, sizeof(res_x));
492 memcpy(r->Y, res_y, sizeof(res_y));
493 memcpy(r->Z, res_z, sizeof(res_z));
497 /* r = in^-1 mod p */
498 static void ecp_nistz256_mod_inverse(BN_ULONG r[P256_LIMBS],
499 const BN_ULONG in[P256_LIMBS])
502 * The poly is ffffffff 00000001 00000000 00000000 00000000 ffffffff
503 * ffffffff ffffffff We use FLT and used poly-2 as exponent
505 BN_ULONG p2[P256_LIMBS];
506 BN_ULONG p4[P256_LIMBS];
507 BN_ULONG p8[P256_LIMBS];
508 BN_ULONG p16[P256_LIMBS];
509 BN_ULONG p32[P256_LIMBS];
510 BN_ULONG res[P256_LIMBS];
513 ecp_nistz256_sqr_mont(res, in);
514 ecp_nistz256_mul_mont(p2, res, in); /* 3*p */
516 ecp_nistz256_sqr_mont(res, p2);
517 ecp_nistz256_sqr_mont(res, res);
518 ecp_nistz256_mul_mont(p4, res, p2); /* f*p */
520 ecp_nistz256_sqr_mont(res, p4);
521 ecp_nistz256_sqr_mont(res, res);
522 ecp_nistz256_sqr_mont(res, res);
523 ecp_nistz256_sqr_mont(res, res);
524 ecp_nistz256_mul_mont(p8, res, p4); /* ff*p */
526 ecp_nistz256_sqr_mont(res, p8);
527 for (i = 0; i < 7; i++)
528 ecp_nistz256_sqr_mont(res, res);
529 ecp_nistz256_mul_mont(p16, res, p8); /* ffff*p */
531 ecp_nistz256_sqr_mont(res, p16);
532 for (i = 0; i < 15; i++)
533 ecp_nistz256_sqr_mont(res, res);
534 ecp_nistz256_mul_mont(p32, res, p16); /* ffffffff*p */
536 ecp_nistz256_sqr_mont(res, p32);
537 for (i = 0; i < 31; i++)
538 ecp_nistz256_sqr_mont(res, res);
539 ecp_nistz256_mul_mont(res, res, in);
541 for (i = 0; i < 32 * 4; i++)
542 ecp_nistz256_sqr_mont(res, res);
543 ecp_nistz256_mul_mont(res, res, p32);
545 for (i = 0; i < 32; i++)
546 ecp_nistz256_sqr_mont(res, res);
547 ecp_nistz256_mul_mont(res, res, p32);
549 for (i = 0; i < 16; i++)
550 ecp_nistz256_sqr_mont(res, res);
551 ecp_nistz256_mul_mont(res, res, p16);
553 for (i = 0; i < 8; i++)
554 ecp_nistz256_sqr_mont(res, res);
555 ecp_nistz256_mul_mont(res, res, p8);
557 ecp_nistz256_sqr_mont(res, res);
558 ecp_nistz256_sqr_mont(res, res);
559 ecp_nistz256_sqr_mont(res, res);
560 ecp_nistz256_sqr_mont(res, res);
561 ecp_nistz256_mul_mont(res, res, p4);
563 ecp_nistz256_sqr_mont(res, res);
564 ecp_nistz256_sqr_mont(res, res);
565 ecp_nistz256_mul_mont(res, res, p2);
567 ecp_nistz256_sqr_mont(res, res);
568 ecp_nistz256_sqr_mont(res, res);
569 ecp_nistz256_mul_mont(res, res, in);
571 memcpy(r, res, sizeof(res));
575 * ecp_nistz256_bignum_to_field_elem copies the contents of |in| to |out| and
576 * returns one if it fits. Otherwise it returns zero.
578 __owur static int ecp_nistz256_bignum_to_field_elem(BN_ULONG out[P256_LIMBS],
581 return bn_copy_words(out, in, P256_LIMBS);
584 /* r = sum(scalar[i]*point[i]) */
585 __owur static int ecp_nistz256_windowed_mul(const EC_GROUP *group,
587 const BIGNUM **scalar,
588 const EC_POINT **point,
589 size_t num, BN_CTX *ctx)
594 unsigned char (*p_str)[33] = NULL;
595 const unsigned int window_size = 5;
596 const unsigned int mask = (1 << (window_size + 1)) - 1;
598 P256_POINT *temp; /* place for 5 temporary points */
599 const BIGNUM **scalars = NULL;
600 P256_POINT (*table)[16] = NULL;
601 void *table_storage = NULL;
603 if ((num * 16 + 6) > OPENSSL_MALLOC_MAX_NELEMS(P256_POINT)
605 OPENSSL_malloc((num * 16 + 5) * sizeof(P256_POINT) + 64)) == NULL
607 OPENSSL_malloc(num * 33 * sizeof(unsigned char))) == NULL
608 || (scalars = OPENSSL_malloc(num * sizeof(BIGNUM *))) == NULL) {
609 ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, ERR_R_MALLOC_FAILURE);
613 table = (void *)ALIGNPTR(table_storage, 64);
614 temp = (P256_POINT *)(table + num);
616 for (i = 0; i < num; i++) {
617 P256_POINT *row = table[i];
619 /* This is an unusual input, we don't guarantee constant-timeness. */
620 if ((BN_num_bits(scalar[i]) > 256) || BN_is_negative(scalar[i])) {
623 if ((mod = BN_CTX_get(ctx)) == NULL)
625 if (!BN_nnmod(mod, scalar[i], group->order, ctx)) {
626 ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL, ERR_R_BN_LIB);
631 scalars[i] = scalar[i];
633 for (j = 0; j < bn_get_top(scalars[i]) * BN_BYTES; j += BN_BYTES) {
634 BN_ULONG d = bn_get_words(scalars[i])[j / BN_BYTES];
636 p_str[i][j + 0] = (unsigned char)d;
637 p_str[i][j + 1] = (unsigned char)(d >> 8);
638 p_str[i][j + 2] = (unsigned char)(d >> 16);
639 p_str[i][j + 3] = (unsigned char)(d >>= 24);
642 p_str[i][j + 4] = (unsigned char)d;
643 p_str[i][j + 5] = (unsigned char)(d >> 8);
644 p_str[i][j + 6] = (unsigned char)(d >> 16);
645 p_str[i][j + 7] = (unsigned char)(d >> 24);
651 if (!ecp_nistz256_bignum_to_field_elem(temp[0].X, point[i]->X)
652 || !ecp_nistz256_bignum_to_field_elem(temp[0].Y, point[i]->Y)
653 || !ecp_nistz256_bignum_to_field_elem(temp[0].Z, point[i]->Z)) {
654 ECerr(EC_F_ECP_NISTZ256_WINDOWED_MUL,
655 EC_R_COORDINATES_OUT_OF_RANGE);
660 * row[0] is implicitly (0,0,0) (the point at infinity), therefore it
661 * is not stored. All other values are actually stored with an offset
665 ecp_nistz256_scatter_w5 (row, &temp[0], 1);
666 ecp_nistz256_point_double(&temp[1], &temp[0]); /*1+1=2 */
667 ecp_nistz256_scatter_w5 (row, &temp[1], 2);
668 ecp_nistz256_point_add (&temp[2], &temp[1], &temp[0]); /*2+1=3 */
669 ecp_nistz256_scatter_w5 (row, &temp[2], 3);
670 ecp_nistz256_point_double(&temp[1], &temp[1]); /*2*2=4 */
671 ecp_nistz256_scatter_w5 (row, &temp[1], 4);
672 ecp_nistz256_point_double(&temp[2], &temp[2]); /*2*3=6 */
673 ecp_nistz256_scatter_w5 (row, &temp[2], 6);
674 ecp_nistz256_point_add (&temp[3], &temp[1], &temp[0]); /*4+1=5 */
675 ecp_nistz256_scatter_w5 (row, &temp[3], 5);
676 ecp_nistz256_point_add (&temp[4], &temp[2], &temp[0]); /*6+1=7 */
677 ecp_nistz256_scatter_w5 (row, &temp[4], 7);
678 ecp_nistz256_point_double(&temp[1], &temp[1]); /*2*4=8 */
679 ecp_nistz256_scatter_w5 (row, &temp[1], 8);
680 ecp_nistz256_point_double(&temp[2], &temp[2]); /*2*6=12 */
681 ecp_nistz256_scatter_w5 (row, &temp[2], 12);
682 ecp_nistz256_point_double(&temp[3], &temp[3]); /*2*5=10 */
683 ecp_nistz256_scatter_w5 (row, &temp[3], 10);
684 ecp_nistz256_point_double(&temp[4], &temp[4]); /*2*7=14 */
685 ecp_nistz256_scatter_w5 (row, &temp[4], 14);
686 ecp_nistz256_point_add (&temp[2], &temp[2], &temp[0]); /*12+1=13*/
687 ecp_nistz256_scatter_w5 (row, &temp[2], 13);
688 ecp_nistz256_point_add (&temp[3], &temp[3], &temp[0]); /*10+1=11*/
689 ecp_nistz256_scatter_w5 (row, &temp[3], 11);
690 ecp_nistz256_point_add (&temp[4], &temp[4], &temp[0]); /*14+1=15*/
691 ecp_nistz256_scatter_w5 (row, &temp[4], 15);
692 ecp_nistz256_point_add (&temp[2], &temp[1], &temp[0]); /*8+1=9 */
693 ecp_nistz256_scatter_w5 (row, &temp[2], 9);
694 ecp_nistz256_point_double(&temp[1], &temp[1]); /*2*8=16 */
695 ecp_nistz256_scatter_w5 (row, &temp[1], 16);
700 wvalue = p_str[0][(idx - 1) / 8];
701 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
704 * We gather to temp[0], because we know it's position relative
707 ecp_nistz256_gather_w5(&temp[0], table[0], _booth_recode_w5(wvalue) >> 1);
708 memcpy(r, &temp[0], sizeof(temp[0]));
711 for (i = (idx == 255 ? 1 : 0); i < num; i++) {
712 unsigned int off = (idx - 1) / 8;
714 wvalue = p_str[i][off] | p_str[i][off + 1] << 8;
715 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
717 wvalue = _booth_recode_w5(wvalue);
719 ecp_nistz256_gather_w5(&temp[0], table[i], wvalue >> 1);
721 ecp_nistz256_neg(temp[1].Y, temp[0].Y);
722 copy_conditional(temp[0].Y, temp[1].Y, (wvalue & 1));
724 ecp_nistz256_point_add(r, r, &temp[0]);
729 ecp_nistz256_point_double(r, r);
730 ecp_nistz256_point_double(r, r);
731 ecp_nistz256_point_double(r, r);
732 ecp_nistz256_point_double(r, r);
733 ecp_nistz256_point_double(r, r);
737 for (i = 0; i < num; i++) {
738 wvalue = p_str[i][0];
739 wvalue = (wvalue << 1) & mask;
741 wvalue = _booth_recode_w5(wvalue);
743 ecp_nistz256_gather_w5(&temp[0], table[i], wvalue >> 1);
745 ecp_nistz256_neg(temp[1].Y, temp[0].Y);
746 copy_conditional(temp[0].Y, temp[1].Y, wvalue & 1);
748 ecp_nistz256_point_add(r, r, &temp[0]);
753 OPENSSL_free(table_storage);
755 OPENSSL_free(scalars);
759 /* Coordinates of G, for which we have precomputed tables */
760 static const BN_ULONG def_xG[P256_LIMBS] = {
761 TOBN(0x79e730d4, 0x18a9143c), TOBN(0x75ba95fc, 0x5fedb601),
762 TOBN(0x79fb732b, 0x77622510), TOBN(0x18905f76, 0xa53755c6)
765 static const BN_ULONG def_yG[P256_LIMBS] = {
766 TOBN(0xddf25357, 0xce95560a), TOBN(0x8b4ab8e4, 0xba19e45c),
767 TOBN(0xd2e88688, 0xdd21f325), TOBN(0x8571ff18, 0x25885d85)
771 * ecp_nistz256_is_affine_G returns one if |generator| is the standard, P-256
774 static int ecp_nistz256_is_affine_G(const EC_POINT *generator)
776 return (bn_get_top(generator->X) == P256_LIMBS) &&
777 (bn_get_top(generator->Y) == P256_LIMBS) &&
778 is_equal(bn_get_words(generator->X), def_xG) &&
779 is_equal(bn_get_words(generator->Y), def_yG) &&
780 is_one(generator->Z);
783 __owur static int ecp_nistz256_mult_precompute(EC_GROUP *group, BN_CTX *ctx)
786 * We precompute a table for a Booth encoded exponent (wNAF) based
787 * computation. Each table holds 64 values for safe access, with an
788 * implicit value of infinity at index zero. We use window of size 7, and
789 * therefore require ceil(256/7) = 37 tables.
792 EC_POINT *P = NULL, *T = NULL;
793 const EC_POINT *generator;
794 NISTZ256_PRE_COMP *pre_comp;
795 BN_CTX *new_ctx = NULL;
796 int i, j, k, ret = 0;
799 PRECOMP256_ROW *preComputedTable = NULL;
800 unsigned char *precomp_storage = NULL;
802 /* if there is an old NISTZ256_PRE_COMP object, throw it away */
803 EC_pre_comp_free(group);
804 generator = EC_GROUP_get0_generator(group);
805 if (generator == NULL) {
806 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, EC_R_UNDEFINED_GENERATOR);
810 if (ecp_nistz256_is_affine_G(generator)) {
812 * No need to calculate tables for the standard generator because we
813 * have them statically.
818 if ((pre_comp = ecp_nistz256_pre_comp_new(group)) == NULL)
822 ctx = new_ctx = BN_CTX_new();
829 order = EC_GROUP_get0_order(group);
833 if (BN_is_zero(order)) {
834 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, EC_R_UNKNOWN_ORDER);
840 if ((precomp_storage =
841 OPENSSL_malloc(37 * 64 * sizeof(P256_POINT_AFFINE) + 64)) == NULL) {
842 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE, ERR_R_MALLOC_FAILURE);
846 preComputedTable = (void *)ALIGNPTR(precomp_storage, 64);
848 P = EC_POINT_new(group);
849 T = EC_POINT_new(group);
850 if (P == NULL || T == NULL)
854 * The zero entry is implicitly infinity, and we skip it, storing other
855 * values with -1 offset.
857 if (!EC_POINT_copy(T, generator))
860 for (k = 0; k < 64; k++) {
861 if (!EC_POINT_copy(P, T))
863 for (j = 0; j < 37; j++) {
864 P256_POINT_AFFINE temp;
866 * It would be faster to use EC_POINTs_make_affine and
867 * make multiple points affine at the same time.
869 if (!EC_POINT_make_affine(group, P, ctx))
871 if (!ecp_nistz256_bignum_to_field_elem(temp.X, P->X) ||
872 !ecp_nistz256_bignum_to_field_elem(temp.Y, P->Y)) {
873 ECerr(EC_F_ECP_NISTZ256_MULT_PRECOMPUTE,
874 EC_R_COORDINATES_OUT_OF_RANGE);
877 ecp_nistz256_scatter_w7(preComputedTable[j], &temp, k);
878 for (i = 0; i < 7; i++) {
879 if (!EC_POINT_dbl(group, P, P, ctx))
883 if (!EC_POINT_add(group, T, T, generator, ctx))
887 pre_comp->group = group;
889 pre_comp->precomp = preComputedTable;
890 pre_comp->precomp_storage = precomp_storage;
891 precomp_storage = NULL;
892 SETPRECOMP(group, nistz256, pre_comp);
899 BN_CTX_free(new_ctx);
901 EC_nistz256_pre_comp_free(pre_comp);
902 OPENSSL_free(precomp_storage);
909 * Note that by default ECP_NISTZ256_AVX2 is undefined. While it's great
910 * code processing 4 points in parallel, corresponding serial operation
911 * is several times slower, because it uses 29x29=58-bit multiplication
912 * as opposite to 64x64=128-bit in integer-only scalar case. As result
913 * it doesn't provide *significant* performance improvement. Note that
914 * just defining ECP_NISTZ256_AVX2 is not sufficient to make it work,
915 * you'd need to compile even asm/ecp_nistz256-avx.pl module.
917 #if defined(ECP_NISTZ256_AVX2)
918 # if !(defined(__x86_64) || defined(__x86_64__) || \
919 defined(_M_AMD64) || defined(_MX64)) || \
920 !(defined(__GNUC__) || defined(_MSC_VER)) /* this is for ALIGN32 */
921 # undef ECP_NISTZ256_AVX2
923 /* Constant time access, loading four values, from four consecutive tables */
924 void ecp_nistz256_avx2_multi_gather_w7(void *result, const void *in,
925 int index0, int index1, int index2,
927 void ecp_nistz256_avx2_transpose_convert(void *RESULTx4, const void *in);
928 void ecp_nistz256_avx2_convert_transpose_back(void *result, const void *Ax4);
929 void ecp_nistz256_avx2_point_add_affine_x4(void *RESULTx4, const void *Ax4,
931 void ecp_nistz256_avx2_point_add_affines_x4(void *RESULTx4, const void *Ax4,
933 void ecp_nistz256_avx2_to_mont(void *RESULTx4, const void *Ax4);
934 void ecp_nistz256_avx2_from_mont(void *RESULTx4, const void *Ax4);
935 void ecp_nistz256_avx2_set1(void *RESULTx4);
936 int ecp_nistz_avx2_eligible(void);
938 static void booth_recode_w7(unsigned char *sign,
939 unsigned char *digit, unsigned char in)
943 s = ~((in >> 7) - 1);
944 d = (1 << 8) - in - 1;
945 d = (d & s) | (in & ~s);
946 d = (d >> 1) + (d & 1);
953 * ecp_nistz256_avx2_mul_g performs multiplication by G, using only the
954 * precomputed table. It does 4 affine point additions in parallel,
955 * significantly speeding up point multiplication for a fixed value.
957 static void ecp_nistz256_avx2_mul_g(P256_POINT *r,
958 unsigned char p_str[33],
959 const P256_POINT_AFFINE(*preComputedTable)[64])
961 const unsigned int window_size = 7;
962 const unsigned int mask = (1 << (window_size + 1)) - 1;
964 /* Using 4 windows at a time */
965 unsigned char sign0, digit0;
966 unsigned char sign1, digit1;
967 unsigned char sign2, digit2;
968 unsigned char sign3, digit3;
969 unsigned int idx = 0;
970 BN_ULONG tmp[P256_LIMBS];
973 ALIGN32 BN_ULONG aX4[4 * 9 * 3] = { 0 };
974 ALIGN32 BN_ULONG bX4[4 * 9 * 2] = { 0 };
975 ALIGN32 P256_POINT_AFFINE point_arr[4];
976 ALIGN32 P256_POINT res_point_arr[4];
978 /* Initial four windows */
979 wvalue = *((u16 *) & p_str[0]);
980 wvalue = (wvalue << 1) & mask;
982 booth_recode_w7(&sign0, &digit0, wvalue);
983 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
984 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
986 booth_recode_w7(&sign1, &digit1, wvalue);
987 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
988 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
990 booth_recode_w7(&sign2, &digit2, wvalue);
991 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
992 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
994 booth_recode_w7(&sign3, &digit3, wvalue);
996 ecp_nistz256_avx2_multi_gather_w7(point_arr, preComputedTable[0],
997 digit0, digit1, digit2, digit3);
999 ecp_nistz256_neg(tmp, point_arr[0].Y);
1000 copy_conditional(point_arr[0].Y, tmp, sign0);
1001 ecp_nistz256_neg(tmp, point_arr[1].Y);
1002 copy_conditional(point_arr[1].Y, tmp, sign1);
1003 ecp_nistz256_neg(tmp, point_arr[2].Y);
1004 copy_conditional(point_arr[2].Y, tmp, sign2);
1005 ecp_nistz256_neg(tmp, point_arr[3].Y);
1006 copy_conditional(point_arr[3].Y, tmp, sign3);
1008 ecp_nistz256_avx2_transpose_convert(aX4, point_arr);
1009 ecp_nistz256_avx2_to_mont(aX4, aX4);
1010 ecp_nistz256_avx2_to_mont(&aX4[4 * 9], &aX4[4 * 9]);
1011 ecp_nistz256_avx2_set1(&aX4[4 * 9 * 2]);
1013 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1014 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1016 booth_recode_w7(&sign0, &digit0, wvalue);
1017 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1018 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1020 booth_recode_w7(&sign1, &digit1, wvalue);
1021 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1022 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1024 booth_recode_w7(&sign2, &digit2, wvalue);
1025 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1026 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1028 booth_recode_w7(&sign3, &digit3, wvalue);
1030 ecp_nistz256_avx2_multi_gather_w7(point_arr, preComputedTable[4 * 1],
1031 digit0, digit1, digit2, digit3);
1033 ecp_nistz256_neg(tmp, point_arr[0].Y);
1034 copy_conditional(point_arr[0].Y, tmp, sign0);
1035 ecp_nistz256_neg(tmp, point_arr[1].Y);
1036 copy_conditional(point_arr[1].Y, tmp, sign1);
1037 ecp_nistz256_neg(tmp, point_arr[2].Y);
1038 copy_conditional(point_arr[2].Y, tmp, sign2);
1039 ecp_nistz256_neg(tmp, point_arr[3].Y);
1040 copy_conditional(point_arr[3].Y, tmp, sign3);
1042 ecp_nistz256_avx2_transpose_convert(bX4, point_arr);
1043 ecp_nistz256_avx2_to_mont(bX4, bX4);
1044 ecp_nistz256_avx2_to_mont(&bX4[4 * 9], &bX4[4 * 9]);
1045 /* Optimized when both inputs are affine */
1046 ecp_nistz256_avx2_point_add_affines_x4(aX4, aX4, bX4);
1048 for (i = 2; i < 9; i++) {
1049 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1050 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1052 booth_recode_w7(&sign0, &digit0, wvalue);
1053 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1054 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1056 booth_recode_w7(&sign1, &digit1, wvalue);
1057 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1058 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1060 booth_recode_w7(&sign2, &digit2, wvalue);
1061 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1062 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1064 booth_recode_w7(&sign3, &digit3, wvalue);
1066 ecp_nistz256_avx2_multi_gather_w7(point_arr,
1067 preComputedTable[4 * i],
1068 digit0, digit1, digit2, digit3);
1070 ecp_nistz256_neg(tmp, point_arr[0].Y);
1071 copy_conditional(point_arr[0].Y, tmp, sign0);
1072 ecp_nistz256_neg(tmp, point_arr[1].Y);
1073 copy_conditional(point_arr[1].Y, tmp, sign1);
1074 ecp_nistz256_neg(tmp, point_arr[2].Y);
1075 copy_conditional(point_arr[2].Y, tmp, sign2);
1076 ecp_nistz256_neg(tmp, point_arr[3].Y);
1077 copy_conditional(point_arr[3].Y, tmp, sign3);
1079 ecp_nistz256_avx2_transpose_convert(bX4, point_arr);
1080 ecp_nistz256_avx2_to_mont(bX4, bX4);
1081 ecp_nistz256_avx2_to_mont(&bX4[4 * 9], &bX4[4 * 9]);
1083 ecp_nistz256_avx2_point_add_affine_x4(aX4, aX4, bX4);
1086 ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 0], &aX4[4 * 9 * 0]);
1087 ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 1], &aX4[4 * 9 * 1]);
1088 ecp_nistz256_avx2_from_mont(&aX4[4 * 9 * 2], &aX4[4 * 9 * 2]);
1090 ecp_nistz256_avx2_convert_transpose_back(res_point_arr, aX4);
1091 /* Last window is performed serially */
1092 wvalue = *((u16 *) & p_str[(idx - 1) / 8]);
1093 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1094 booth_recode_w7(&sign0, &digit0, wvalue);
1095 ecp_nistz256_gather_w7((P256_POINT_AFFINE *)r,
1096 preComputedTable[36], digit0);
1097 ecp_nistz256_neg(tmp, r->Y);
1098 copy_conditional(r->Y, tmp, sign0);
1099 memcpy(r->Z, ONE, sizeof(ONE));
1100 /* Sum the four windows */
1101 ecp_nistz256_point_add(r, r, &res_point_arr[0]);
1102 ecp_nistz256_point_add(r, r, &res_point_arr[1]);
1103 ecp_nistz256_point_add(r, r, &res_point_arr[2]);
1104 ecp_nistz256_point_add(r, r, &res_point_arr[3]);
1109 __owur static int ecp_nistz256_set_from_affine(EC_POINT *out, const EC_GROUP *group,
1110 const P256_POINT_AFFINE *in,
1114 BN_ULONG d_x[P256_LIMBS], d_y[P256_LIMBS];
1125 memcpy(d_x, in->X, sizeof(d_x));
1126 bn_set_static_words(x, d_x, P256_LIMBS);
1128 memcpy(d_y, in->Y, sizeof(d_y));
1129 bn_set_static_words(y, d_y, P256_LIMBS);
1131 ret = EC_POINT_set_affine_coordinates_GFp(group, out, x, y, ctx);
1139 /* r = scalar*G + sum(scalars[i]*points[i]) */
1140 __owur static int ecp_nistz256_points_mul(const EC_GROUP *group,
1142 const BIGNUM *scalar,
1144 const EC_POINT *points[],
1145 const BIGNUM *scalars[], BN_CTX *ctx)
1147 int i = 0, ret = 0, no_precomp_for_generator = 0, p_is_infinity = 0;
1149 unsigned char p_str[33] = { 0 };
1150 const PRECOMP256_ROW *preComputedTable = NULL;
1151 const NISTZ256_PRE_COMP *pre_comp = NULL;
1152 const EC_POINT *generator = NULL;
1153 BN_CTX *new_ctx = NULL;
1154 const BIGNUM **new_scalars = NULL;
1155 const EC_POINT **new_points = NULL;
1156 unsigned int idx = 0;
1157 const unsigned int window_size = 7;
1158 const unsigned int mask = (1 << (window_size + 1)) - 1;
1159 unsigned int wvalue;
1162 P256_POINT_AFFINE a;
1166 if ((num + 1) == 0 || (num + 1) > OPENSSL_MALLOC_MAX_NELEMS(void *)) {
1167 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1171 if (group->meth != r->meth) {
1172 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_INCOMPATIBLE_OBJECTS);
1176 if ((scalar == NULL) && (num == 0))
1177 return EC_POINT_set_to_infinity(group, r);
1179 for (j = 0; j < num; j++) {
1180 if (group->meth != points[j]->meth) {
1181 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_INCOMPATIBLE_OBJECTS);
1187 ctx = new_ctx = BN_CTX_new();
1195 generator = EC_GROUP_get0_generator(group);
1196 if (generator == NULL) {
1197 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, EC_R_UNDEFINED_GENERATOR);
1201 /* look if we can use precomputed multiples of generator */
1202 pre_comp = group->pre_comp.nistz256;
1206 * If there is a precomputed table for the generator, check that
1207 * it was generated with the same generator.
1209 EC_POINT *pre_comp_generator = EC_POINT_new(group);
1210 if (pre_comp_generator == NULL)
1213 if (!ecp_nistz256_set_from_affine(pre_comp_generator,
1214 group, pre_comp->precomp[0],
1216 EC_POINT_free(pre_comp_generator);
1220 if (0 == EC_POINT_cmp(group, generator, pre_comp_generator, ctx))
1221 preComputedTable = (const PRECOMP256_ROW *)pre_comp->precomp;
1223 EC_POINT_free(pre_comp_generator);
1226 if (preComputedTable == NULL && ecp_nistz256_is_affine_G(generator)) {
1228 * If there is no precomputed data, but the generator is the
1229 * default, a hardcoded table of precomputed data is used. This
1230 * is because applications, such as Apache, do not use
1231 * EC_KEY_precompute_mult.
1233 preComputedTable = ecp_nistz256_precomputed;
1236 if (preComputedTable) {
1237 if ((BN_num_bits(scalar) > 256)
1238 || BN_is_negative(scalar)) {
1239 if ((tmp_scalar = BN_CTX_get(ctx)) == NULL)
1242 if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
1243 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_BN_LIB);
1246 scalar = tmp_scalar;
1249 for (i = 0; i < bn_get_top(scalar) * BN_BYTES; i += BN_BYTES) {
1250 BN_ULONG d = bn_get_words(scalar)[i / BN_BYTES];
1252 p_str[i + 0] = (unsigned char)d;
1253 p_str[i + 1] = (unsigned char)(d >> 8);
1254 p_str[i + 2] = (unsigned char)(d >> 16);
1255 p_str[i + 3] = (unsigned char)(d >>= 24);
1256 if (BN_BYTES == 8) {
1258 p_str[i + 4] = (unsigned char)d;
1259 p_str[i + 5] = (unsigned char)(d >> 8);
1260 p_str[i + 6] = (unsigned char)(d >> 16);
1261 p_str[i + 7] = (unsigned char)(d >> 24);
1268 #if defined(ECP_NISTZ256_AVX2)
1269 if (ecp_nistz_avx2_eligible()) {
1270 ecp_nistz256_avx2_mul_g(&p.p, p_str, preComputedTable);
1277 wvalue = (p_str[0] << 1) & mask;
1280 wvalue = _booth_recode_w7(wvalue);
1282 ecp_nistz256_gather_w7(&p.a, preComputedTable[0],
1285 ecp_nistz256_neg(p.p.Z, p.p.Y);
1286 copy_conditional(p.p.Y, p.p.Z, wvalue & 1);
1289 * Since affine infinity is encoded as (0,0) and
1290 * Jacobian ias (,,0), we need to harmonize them
1291 * by assigning "one" or zero to Z.
1293 infty = (p.p.X[0] | p.p.X[1] | p.p.X[2] | p.p.X[3] |
1294 p.p.Y[0] | p.p.Y[1] | p.p.Y[2] | p.p.Y[3]);
1295 if (P256_LIMBS == 8)
1296 infty |= (p.p.X[4] | p.p.X[5] | p.p.X[6] | p.p.X[7] |
1297 p.p.Y[4] | p.p.Y[5] | p.p.Y[6] | p.p.Y[7]);
1299 infty = 0 - is_zero(infty);
1302 p.p.Z[0] = ONE[0] & infty;
1303 p.p.Z[1] = ONE[1] & infty;
1304 p.p.Z[2] = ONE[2] & infty;
1305 p.p.Z[3] = ONE[3] & infty;
1306 if (P256_LIMBS == 8) {
1307 p.p.Z[4] = ONE[4] & infty;
1308 p.p.Z[5] = ONE[5] & infty;
1309 p.p.Z[6] = ONE[6] & infty;
1310 p.p.Z[7] = ONE[7] & infty;
1313 for (i = 1; i < 37; i++) {
1314 unsigned int off = (idx - 1) / 8;
1315 wvalue = p_str[off] | p_str[off + 1] << 8;
1316 wvalue = (wvalue >> ((idx - 1) % 8)) & mask;
1319 wvalue = _booth_recode_w7(wvalue);
1321 ecp_nistz256_gather_w7(&t.a,
1322 preComputedTable[i], wvalue >> 1);
1324 ecp_nistz256_neg(t.p.Z, t.a.Y);
1325 copy_conditional(t.a.Y, t.p.Z, wvalue & 1);
1327 ecp_nistz256_point_add_affine(&p.p, &p.p, &t.a);
1332 no_precomp_for_generator = 1;
1337 if (no_precomp_for_generator) {
1339 * Without a precomputed table for the generator, it has to be
1340 * handled like a normal point.
1342 new_scalars = OPENSSL_malloc((num + 1) * sizeof(BIGNUM *));
1343 if (new_scalars == NULL) {
1344 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1348 new_points = OPENSSL_malloc((num + 1) * sizeof(EC_POINT *));
1349 if (new_points == NULL) {
1350 ECerr(EC_F_ECP_NISTZ256_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1354 memcpy(new_scalars, scalars, num * sizeof(BIGNUM *));
1355 new_scalars[num] = scalar;
1356 memcpy(new_points, points, num * sizeof(EC_POINT *));
1357 new_points[num] = generator;
1359 scalars = new_scalars;
1360 points = new_points;
1365 P256_POINT *out = &t.p;
1369 if (!ecp_nistz256_windowed_mul(group, out, scalars, points, num, ctx))
1373 ecp_nistz256_point_add(&p.p, &p.p, out);
1376 /* Not constant-time, but we're only operating on the public output. */
1377 if (!bn_set_words(r->X, p.p.X, P256_LIMBS) ||
1378 !bn_set_words(r->Y, p.p.Y, P256_LIMBS) ||
1379 !bn_set_words(r->Z, p.p.Z, P256_LIMBS)) {
1382 r->Z_is_one = is_one(r->Z) & 1;
1389 BN_CTX_free(new_ctx);
1390 OPENSSL_free(new_points);
1391 OPENSSL_free(new_scalars);
1395 __owur static int ecp_nistz256_get_affine(const EC_GROUP *group,
1396 const EC_POINT *point,
1397 BIGNUM *x, BIGNUM *y, BN_CTX *ctx)
1399 BN_ULONG z_inv2[P256_LIMBS];
1400 BN_ULONG z_inv3[P256_LIMBS];
1401 BN_ULONG x_aff[P256_LIMBS];
1402 BN_ULONG y_aff[P256_LIMBS];
1403 BN_ULONG point_x[P256_LIMBS], point_y[P256_LIMBS], point_z[P256_LIMBS];
1404 BN_ULONG x_ret[P256_LIMBS], y_ret[P256_LIMBS];
1406 if (EC_POINT_is_at_infinity(group, point)) {
1407 ECerr(EC_F_ECP_NISTZ256_GET_AFFINE, EC_R_POINT_AT_INFINITY);
1411 if (!ecp_nistz256_bignum_to_field_elem(point_x, point->X) ||
1412 !ecp_nistz256_bignum_to_field_elem(point_y, point->Y) ||
1413 !ecp_nistz256_bignum_to_field_elem(point_z, point->Z)) {
1414 ECerr(EC_F_ECP_NISTZ256_GET_AFFINE, EC_R_COORDINATES_OUT_OF_RANGE);
1418 ecp_nistz256_mod_inverse(z_inv3, point_z);
1419 ecp_nistz256_sqr_mont(z_inv2, z_inv3);
1420 ecp_nistz256_mul_mont(x_aff, z_inv2, point_x);
1423 ecp_nistz256_from_mont(x_ret, x_aff);
1424 if (!bn_set_words(x, x_ret, P256_LIMBS))
1429 ecp_nistz256_mul_mont(z_inv3, z_inv3, z_inv2);
1430 ecp_nistz256_mul_mont(y_aff, z_inv3, point_y);
1431 ecp_nistz256_from_mont(y_ret, y_aff);
1432 if (!bn_set_words(y, y_ret, P256_LIMBS))
1439 static NISTZ256_PRE_COMP *ecp_nistz256_pre_comp_new(const EC_GROUP *group)
1441 NISTZ256_PRE_COMP *ret = NULL;
1446 ret = OPENSSL_zalloc(sizeof(*ret));
1449 ECerr(EC_F_ECP_NISTZ256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1454 ret->w = 6; /* default */
1455 ret->references = 1;
1457 ret->lock = CRYPTO_THREAD_lock_new();
1458 if (ret->lock == NULL) {
1459 ECerr(EC_F_ECP_NISTZ256_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1466 NISTZ256_PRE_COMP *EC_nistz256_pre_comp_dup(NISTZ256_PRE_COMP *p)
1470 CRYPTO_atomic_add(&p->references, 1, &i, p->lock);
1474 void EC_nistz256_pre_comp_free(NISTZ256_PRE_COMP *pre)
1481 CRYPTO_atomic_add(&pre->references, -1, &i, pre->lock);
1482 REF_PRINT_COUNT("EC_nistz256", x);
1485 REF_ASSERT_ISNT(i < 0);
1487 OPENSSL_free(pre->precomp_storage);
1488 CRYPTO_THREAD_lock_free(pre->lock);
1493 static int ecp_nistz256_window_have_precompute_mult(const EC_GROUP *group)
1495 /* There is a hard-coded table for the default generator. */
1496 const EC_POINT *generator = EC_GROUP_get0_generator(group);
1498 if (generator != NULL && ecp_nistz256_is_affine_G(generator)) {
1499 /* There is a hard-coded table for the default generator. */
1503 return HAVEPRECOMP(group, nistz256);
1506 const EC_METHOD *EC_GFp_nistz256_method(void)
1508 static const EC_METHOD ret = {
1509 EC_FLAGS_DEFAULT_OCT,
1510 NID_X9_62_prime_field,
1511 ec_GFp_mont_group_init,
1512 ec_GFp_mont_group_finish,
1513 ec_GFp_mont_group_clear_finish,
1514 ec_GFp_mont_group_copy,
1515 ec_GFp_mont_group_set_curve,
1516 ec_GFp_simple_group_get_curve,
1517 ec_GFp_simple_group_get_degree,
1518 ec_group_simple_order_bits,
1519 ec_GFp_simple_group_check_discriminant,
1520 ec_GFp_simple_point_init,
1521 ec_GFp_simple_point_finish,
1522 ec_GFp_simple_point_clear_finish,
1523 ec_GFp_simple_point_copy,
1524 ec_GFp_simple_point_set_to_infinity,
1525 ec_GFp_simple_set_Jprojective_coordinates_GFp,
1526 ec_GFp_simple_get_Jprojective_coordinates_GFp,
1527 ec_GFp_simple_point_set_affine_coordinates,
1528 ecp_nistz256_get_affine,
1532 ec_GFp_simple_invert,
1533 ec_GFp_simple_is_at_infinity,
1534 ec_GFp_simple_is_on_curve,
1536 ec_GFp_simple_make_affine,
1537 ec_GFp_simple_points_make_affine,
1538 ecp_nistz256_points_mul, /* mul */
1539 ecp_nistz256_mult_precompute, /* precompute_mult */
1540 ecp_nistz256_window_have_precompute_mult, /* have_precompute_mult */
1541 ec_GFp_mont_field_mul,
1542 ec_GFp_mont_field_sqr,
1544 ec_GFp_mont_field_encode,
1545 ec_GFp_mont_field_decode,
1546 ec_GFp_mont_field_set_to_one,
1547 ec_key_simple_priv2oct,
1548 ec_key_simple_oct2priv,
1549 0, /* set private */
1550 ec_key_simple_generate_key,
1551 ec_key_simple_check_key,
1552 ec_key_simple_generate_public_key,
1555 ecdh_simple_compute_key