2 * Copyright 2010-2018 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the Apache License 2.0 (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 /* Copyright 2011 Google Inc.
12 * Licensed under the Apache License, Version 2.0 (the "License");
14 * you may not use this file except in compliance with the License.
15 * You may obtain a copy of the License at
17 * http://www.apache.org/licenses/LICENSE-2.0
19 * Unless required by applicable law or agreed to in writing, software
20 * distributed under the License is distributed on an "AS IS" BASIS,
21 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
22 * See the License for the specific language governing permissions and
23 * limitations under the License.
27 * ECDSA low level APIs are deprecated for public use, but still ok for
30 #include "internal/deprecated.h"
33 * A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
35 * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
36 * and Adam Langley's public domain 64-bit C implementation of curve25519
39 #include <openssl/opensslconf.h>
40 #ifdef OPENSSL_NO_EC_NISTP_64_GCC_128
41 NON_EMPTY_TRANSLATION_UNIT
46 # include <openssl/err.h>
47 # include "ec_local.h"
49 # if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__==16
50 /* even with gcc, the typedef won't work for 32-bit platforms */
51 typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit
54 # error "Your compiler doesn't appear to support 128-bit integer types"
60 /******************************************************************************/
62 * INTERNAL REPRESENTATION OF FIELD ELEMENTS
64 * Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3
65 * using 64-bit coefficients called 'limbs',
66 * and sometimes (for multiplication results) as
67 * b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + 2^336*b_6
68 * using 128-bit coefficients called 'widelimbs'.
69 * A 4-limb representation is an 'felem';
70 * a 7-widelimb representation is a 'widefelem'.
71 * Even within felems, bits of adjacent limbs overlap, and we don't always
72 * reduce the representations: we ensure that inputs to each felem
73 * multiplication satisfy a_i < 2^60, so outputs satisfy b_i < 4*2^60*2^60,
74 * and fit into a 128-bit word without overflow. The coefficients are then
75 * again partially reduced to obtain an felem satisfying a_i < 2^57.
76 * We only reduce to the unique minimal representation at the end of the
80 typedef uint64_t limb;
81 typedef uint128_t widelimb;
83 typedef limb felem[4];
84 typedef widelimb widefelem[7];
87 * Field element represented as a byte array. 28*8 = 224 bits is also the
88 * group order size for the elliptic curve, and we also use this type for
89 * scalars for point multiplication.
91 typedef u8 felem_bytearray[28];
93 static const felem_bytearray nistp224_curve_params[5] = {
94 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* p */
95 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
96 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
97 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* a */
98 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
99 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE},
100 {0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, /* b */
101 0x32, 0x56, 0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA,
102 0x27, 0x0B, 0x39, 0x43, 0x23, 0x55, 0xFF, 0xB4},
103 {0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, /* x */
104 0x90, 0xB9, 0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22,
105 0x34, 0x32, 0x80, 0xD6, 0x11, 0x5C, 0x1D, 0x21},
106 {0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, /* y */
107 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
108 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34}
112 * Precomputed multiples of the standard generator
113 * Points are given in coordinates (X, Y, Z) where Z normally is 1
114 * (0 for the point at infinity).
115 * For each field element, slice a_0 is word 0, etc.
117 * The table has 2 * 16 elements, starting with the following:
118 * index | bits | point
119 * ------+---------+------------------------------
122 * 2 | 0 0 1 0 | 2^56G
123 * 3 | 0 0 1 1 | (2^56 + 1)G
124 * 4 | 0 1 0 0 | 2^112G
125 * 5 | 0 1 0 1 | (2^112 + 1)G
126 * 6 | 0 1 1 0 | (2^112 + 2^56)G
127 * 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G
128 * 8 | 1 0 0 0 | 2^168G
129 * 9 | 1 0 0 1 | (2^168 + 1)G
130 * 10 | 1 0 1 0 | (2^168 + 2^56)G
131 * 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G
132 * 12 | 1 1 0 0 | (2^168 + 2^112)G
133 * 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G
134 * 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G
135 * 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G
136 * followed by a copy of this with each element multiplied by 2^28.
138 * The reason for this is so that we can clock bits into four different
139 * locations when doing simple scalar multiplies against the base point,
140 * and then another four locations using the second 16 elements.
142 static const felem gmul[2][16][3] = {
146 {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf},
147 {0xd5819985007e34, 0x75a05a07476444, 0xfb4c22dfe6cd43, 0xbd376388b5f723},
149 {{0xfd9675666ebbe9, 0xbca7664d40ce5e, 0x2242df8d8a2a43, 0x1f49bbb0f99bc5},
150 {0x29e0b892dc9c43, 0xece8608436e662, 0xdc858f185310d0, 0x9812dd4eb8d321},
152 {{0x6d3e678d5d8eb8, 0x559eed1cb362f1, 0x16e9a3bbce8a3f, 0xeedcccd8c2a748},
153 {0xf19f90ed50266d, 0xabf2b4bf65f9df, 0x313865468fafec, 0x5cb379ba910a17},
155 {{0x0641966cab26e3, 0x91fb2991fab0a0, 0xefec27a4e13a0b, 0x0499aa8a5f8ebe},
156 {0x7510407766af5d, 0x84d929610d5450, 0x81d77aae82f706, 0x6916f6d4338c5b},
158 {{0xea95ac3b1f15c6, 0x086000905e82d4, 0xdd323ae4d1c8b1, 0x932b56be7685a3},
159 {0x9ef93dea25dbbf, 0x41665960f390f0, 0xfdec76dbe2a8a7, 0x523e80f019062a},
161 {{0x822fdd26732c73, 0xa01c83531b5d0f, 0x363f37347c1ba4, 0xc391b45c84725c},
162 {0xbbd5e1b2d6ad24, 0xddfbcde19dfaec, 0xc393da7e222a7f, 0x1efb7890ede244},
164 {{0x4c9e90ca217da1, 0xd11beca79159bb, 0xff8d33c2c98b7c, 0x2610b39409f849},
165 {0x44d1352ac64da0, 0xcdbb7b2c46b4fb, 0x966c079b753c89, 0xfe67e4e820b112},
167 {{0xe28cae2df5312d, 0xc71b61d16f5c6e, 0x79b7619a3e7c4c, 0x05c73240899b47},
168 {0x9f7f6382c73e3a, 0x18615165c56bda, 0x641fab2116fd56, 0x72855882b08394},
170 {{0x0469182f161c09, 0x74a98ca8d00fb5, 0xb89da93489a3e0, 0x41c98768fb0c1d},
171 {0xe5ea05fb32da81, 0x3dce9ffbca6855, 0x1cfe2d3fbf59e6, 0x0e5e03408738a7},
173 {{0xdab22b2333e87f, 0x4430137a5dd2f6, 0xe03ab9f738beb8, 0xcb0c5d0dc34f24},
174 {0x764a7df0c8fda5, 0x185ba5c3fa2044, 0x9281d688bcbe50, 0xc40331df893881},
176 {{0xb89530796f0f60, 0xade92bd26909a3, 0x1a0c83fb4884da, 0x1765bf22a5a984},
177 {0x772a9ee75db09e, 0x23bc6c67cec16f, 0x4c1edba8b14e2f, 0xe2a215d9611369},
179 {{0x571e509fb5efb3, 0xade88696410552, 0xc8ae85fada74fe, 0x6c7e4be83bbde3},
180 {0xff9f51160f4652, 0xb47ce2495a6539, 0xa2946c53b582f4, 0x286d2db3ee9a60},
182 {{0x40bbd5081a44af, 0x0995183b13926c, 0xbcefba6f47f6d0, 0x215619e9cc0057},
183 {0x8bc94d3b0df45e, 0xf11c54a3694f6f, 0x8631b93cdfe8b5, 0xe7e3f4b0982db9},
185 {{0xb17048ab3e1c7b, 0xac38f36ff8a1d8, 0x1c29819435d2c6, 0xc813132f4c07e9},
186 {0x2891425503b11f, 0x08781030579fea, 0xf5426ba5cc9674, 0x1e28ebf18562bc},
188 {{0x9f31997cc864eb, 0x06cd91d28b5e4c, 0xff17036691a973, 0xf1aef351497c58},
189 {0xdd1f2d600564ff, 0xdead073b1402db, 0x74a684435bd693, 0xeea7471f962558},
194 {{0x9665266dddf554, 0x9613d78b60ef2d, 0xce27a34cdba417, 0xd35ab74d6afc31},
195 {0x85ccdd22deb15e, 0x2137e5783a6aab, 0xa141cffd8c93c6, 0x355a1830e90f2d},
197 {{0x1a494eadaade65, 0xd6da4da77fe53c, 0xe7992996abec86, 0x65c3553c6090e3},
198 {0xfa610b1fb09346, 0xf1c6540b8a4aaf, 0xc51a13ccd3cbab, 0x02995b1b18c28a},
200 {{0x7874568e7295ef, 0x86b419fbe38d04, 0xdc0690a7550d9a, 0xd3966a44beac33},
201 {0x2b7280ec29132f, 0xbeaa3b6a032df3, 0xdc7dd88ae41200, 0xd25e2513e3a100},
203 {{0x924857eb2efafd, 0xac2bce41223190, 0x8edaa1445553fc, 0x825800fd3562d5},
204 {0x8d79148ea96621, 0x23a01c3dd9ed8d, 0xaf8b219f9416b5, 0xd8db0cc277daea},
206 {{0x76a9c3b1a700f0, 0xe9acd29bc7e691, 0x69212d1a6b0327, 0x6322e97fe154be},
207 {0x469fc5465d62aa, 0x8d41ed18883b05, 0x1f8eae66c52b88, 0xe4fcbe9325be51},
209 {{0x825fdf583cac16, 0x020b857c7b023a, 0x683c17744b0165, 0x14ffd0a2daf2f1},
210 {0x323b36184218f9, 0x4944ec4e3b47d4, 0xc15b3080841acf, 0x0bced4b01a28bb},
212 {{0x92ac22230df5c4, 0x52f33b4063eda8, 0xcb3f19870c0c93, 0x40064f2ba65233},
213 {0xfe16f0924f8992, 0x012da25af5b517, 0x1a57bb24f723a6, 0x06f8bc76760def},
215 {{0x4a7084f7817cb9, 0xbcab0738ee9a78, 0x3ec11e11d9c326, 0xdc0fe90e0f1aae},
216 {0xcf639ea5f98390, 0x5c350aa22ffb74, 0x9afae98a4047b7, 0x956ec2d617fc45},
218 {{0x4306d648c1be6a, 0x9247cd8bc9a462, 0xf5595e377d2f2e, 0xbd1c3caff1a52e},
219 {0x045e14472409d0, 0x29f3e17078f773, 0x745a602b2d4f7d, 0x191837685cdfbb},
221 {{0x5b6ee254a8cb79, 0x4953433f5e7026, 0xe21faeb1d1def4, 0xc4c225785c09de},
222 {0x307ce7bba1e518, 0x31b125b1036db8, 0x47e91868839e8f, 0xc765866e33b9f3},
224 {{0x3bfece24f96906, 0x4794da641e5093, 0xde5df64f95db26, 0x297ecd89714b05},
225 {0x701bd3ebb2c3aa, 0x7073b4f53cb1d5, 0x13c5665658af16, 0x9895089d66fe58},
227 {{0x0fef05f78c4790, 0x2d773633b05d2e, 0x94229c3a951c94, 0xbbbd70df4911bb},
228 {0xb2c6963d2c1168, 0x105f47a72b0d73, 0x9fdf6111614080, 0x7b7e94b39e67b0},
230 {{0xad1a7d6efbe2b3, 0xf012482c0da69d, 0x6b3bdf12438345, 0x40d7558d7aa4d9},
231 {0x8a09fffb5c6d3d, 0x9a356e5d9ffd38, 0x5973f15f4f9b1c, 0xdcd5f59f63c3ea},
233 {{0xacf39f4c5ca7ab, 0x4c8071cc5fd737, 0xc64e3602cd1184, 0x0acd4644c9abba},
234 {0x6c011a36d8bf6e, 0xfecd87ba24e32a, 0x19f6f56574fad8, 0x050b204ced9405},
236 {{0xed4f1cae7d9a96, 0x5ceef7ad94c40a, 0x778e4a3bf3ef9b, 0x7405783dc3b55e},
237 {0x32477c61b6e8c6, 0xb46a97570f018b, 0x91176d0a7e95d1, 0x3df90fbc4c7d0e},
241 /* Precomputation for the group generator. */
242 struct nistp224_pre_comp_st {
243 felem g_pre_comp[2][16][3];
244 CRYPTO_REF_COUNT references;
248 const EC_METHOD *EC_GFp_nistp224_method(void)
250 static const EC_METHOD ret = {
251 EC_FLAGS_DEFAULT_OCT,
252 NID_X9_62_prime_field,
253 ec_GFp_nistp224_group_init,
254 ec_GFp_simple_group_finish,
255 ec_GFp_simple_group_clear_finish,
256 ec_GFp_nist_group_copy,
257 ec_GFp_nistp224_group_set_curve,
258 ec_GFp_simple_group_get_curve,
259 ec_GFp_simple_group_get_degree,
260 ec_group_simple_order_bits,
261 ec_GFp_simple_group_check_discriminant,
262 ec_GFp_simple_point_init,
263 ec_GFp_simple_point_finish,
264 ec_GFp_simple_point_clear_finish,
265 ec_GFp_simple_point_copy,
266 ec_GFp_simple_point_set_to_infinity,
267 ec_GFp_simple_set_Jprojective_coordinates_GFp,
268 ec_GFp_simple_get_Jprojective_coordinates_GFp,
269 ec_GFp_simple_point_set_affine_coordinates,
270 ec_GFp_nistp224_point_get_affine_coordinates,
271 0 /* point_set_compressed_coordinates */ ,
276 ec_GFp_simple_invert,
277 ec_GFp_simple_is_at_infinity,
278 ec_GFp_simple_is_on_curve,
280 ec_GFp_simple_make_affine,
281 ec_GFp_simple_points_make_affine,
282 ec_GFp_nistp224_points_mul,
283 ec_GFp_nistp224_precompute_mult,
284 ec_GFp_nistp224_have_precompute_mult,
285 ec_GFp_nist_field_mul,
286 ec_GFp_nist_field_sqr,
288 ec_GFp_simple_field_inv,
289 0 /* field_encode */ ,
290 0 /* field_decode */ ,
291 0, /* field_set_to_one */
292 ec_key_simple_priv2oct,
293 ec_key_simple_oct2priv,
295 ec_key_simple_generate_key,
296 ec_key_simple_check_key,
297 ec_key_simple_generate_public_key,
300 ecdh_simple_compute_key,
301 ecdsa_simple_sign_setup,
302 ecdsa_simple_sign_sig,
303 ecdsa_simple_verify_sig,
304 0, /* field_inverse_mod_ord */
305 0, /* blind_coordinates */
315 * Helper functions to convert field elements to/from internal representation
317 static void bin28_to_felem(felem out, const u8 in[28])
319 out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff;
320 out[1] = (*((const uint64_t *)(in + 7))) & 0x00ffffffffffffff;
321 out[2] = (*((const uint64_t *)(in + 14))) & 0x00ffffffffffffff;
322 out[3] = (*((const uint64_t *)(in+20))) >> 8;
325 static void felem_to_bin28(u8 out[28], const felem in)
328 for (i = 0; i < 7; ++i) {
329 out[i] = in[0] >> (8 * i);
330 out[i + 7] = in[1] >> (8 * i);
331 out[i + 14] = in[2] >> (8 * i);
332 out[i + 21] = in[3] >> (8 * i);
336 /* From OpenSSL BIGNUM to internal representation */
337 static int BN_to_felem(felem out, const BIGNUM *bn)
339 felem_bytearray b_out;
342 if (BN_is_negative(bn)) {
343 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
346 num_bytes = BN_bn2lebinpad(bn, b_out, sizeof(b_out));
348 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
351 bin28_to_felem(out, b_out);
355 /* From internal representation to OpenSSL BIGNUM */
356 static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
358 felem_bytearray b_out;
359 felem_to_bin28(b_out, in);
360 return BN_lebin2bn(b_out, sizeof(b_out), out);
363 /******************************************************************************/
367 * Field operations, using the internal representation of field elements.
368 * NB! These operations are specific to our point multiplication and cannot be
369 * expected to be correct in general - e.g., multiplication with a large scalar
370 * will cause an overflow.
374 static void felem_one(felem out)
382 static void felem_assign(felem out, const felem in)
390 /* Sum two field elements: out += in */
391 static void felem_sum(felem out, const felem in)
399 /* Subtract field elements: out -= in */
400 /* Assumes in[i] < 2^57 */
401 static void felem_diff(felem out, const felem in)
403 static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
404 static const limb two58m2 = (((limb) 1) << 58) - (((limb) 1) << 2);
405 static const limb two58m42m2 = (((limb) 1) << 58) -
406 (((limb) 1) << 42) - (((limb) 1) << 2);
408 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
410 out[1] += two58m42m2;
420 /* Subtract in unreduced 128-bit mode: out -= in */
421 /* Assumes in[i] < 2^119 */
422 static void widefelem_diff(widefelem out, const widefelem in)
424 static const widelimb two120 = ((widelimb) 1) << 120;
425 static const widelimb two120m64 = (((widelimb) 1) << 120) -
426 (((widelimb) 1) << 64);
427 static const widelimb two120m104m64 = (((widelimb) 1) << 120) -
428 (((widelimb) 1) << 104) - (((widelimb) 1) << 64);
430 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
435 out[4] += two120m104m64;
448 /* Subtract in mixed mode: out128 -= in64 */
450 static void felem_diff_128_64(widefelem out, const felem in)
452 static const widelimb two64p8 = (((widelimb) 1) << 64) +
453 (((widelimb) 1) << 8);
454 static const widelimb two64m8 = (((widelimb) 1) << 64) -
455 (((widelimb) 1) << 8);
456 static const widelimb two64m48m8 = (((widelimb) 1) << 64) -
457 (((widelimb) 1) << 48) - (((widelimb) 1) << 8);
459 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
461 out[1] += two64m48m8;
472 * Multiply a field element by a scalar: out = out * scalar The scalars we
473 * actually use are small, so results fit without overflow
475 static void felem_scalar(felem out, const limb scalar)
484 * Multiply an unreduced field element by a scalar: out = out * scalar The
485 * scalars we actually use are small, so results fit without overflow
487 static void widefelem_scalar(widefelem out, const widelimb scalar)
498 /* Square a field element: out = in^2 */
499 static void felem_square(widefelem out, const felem in)
501 limb tmp0, tmp1, tmp2;
505 out[0] = ((widelimb) in[0]) * in[0];
506 out[1] = ((widelimb) in[0]) * tmp1;
507 out[2] = ((widelimb) in[0]) * tmp2 + ((widelimb) in[1]) * in[1];
508 out[3] = ((widelimb) in[3]) * tmp0 + ((widelimb) in[1]) * tmp2;
509 out[4] = ((widelimb) in[3]) * tmp1 + ((widelimb) in[2]) * in[2];
510 out[5] = ((widelimb) in[3]) * tmp2;
511 out[6] = ((widelimb) in[3]) * in[3];
514 /* Multiply two field elements: out = in1 * in2 */
515 static void felem_mul(widefelem out, const felem in1, const felem in2)
517 out[0] = ((widelimb) in1[0]) * in2[0];
518 out[1] = ((widelimb) in1[0]) * in2[1] + ((widelimb) in1[1]) * in2[0];
519 out[2] = ((widelimb) in1[0]) * in2[2] + ((widelimb) in1[1]) * in2[1] +
520 ((widelimb) in1[2]) * in2[0];
521 out[3] = ((widelimb) in1[0]) * in2[3] + ((widelimb) in1[1]) * in2[2] +
522 ((widelimb) in1[2]) * in2[1] + ((widelimb) in1[3]) * in2[0];
523 out[4] = ((widelimb) in1[1]) * in2[3] + ((widelimb) in1[2]) * in2[2] +
524 ((widelimb) in1[3]) * in2[1];
525 out[5] = ((widelimb) in1[2]) * in2[3] + ((widelimb) in1[3]) * in2[2];
526 out[6] = ((widelimb) in1[3]) * in2[3];
530 * Reduce seven 128-bit coefficients to four 64-bit coefficients.
531 * Requires in[i] < 2^126,
532 * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */
533 static void felem_reduce(felem out, const widefelem in)
535 static const widelimb two127p15 = (((widelimb) 1) << 127) +
536 (((widelimb) 1) << 15);
537 static const widelimb two127m71 = (((widelimb) 1) << 127) -
538 (((widelimb) 1) << 71);
539 static const widelimb two127m71m55 = (((widelimb) 1) << 127) -
540 (((widelimb) 1) << 71) - (((widelimb) 1) << 55);
543 /* Add 0 mod 2^224-2^96+1 to ensure all differences are positive */
544 output[0] = in[0] + two127p15;
545 output[1] = in[1] + two127m71m55;
546 output[2] = in[2] + two127m71;
550 /* Eliminate in[4], in[5], in[6] */
551 output[4] += in[6] >> 16;
552 output[3] += (in[6] & 0xffff) << 40;
555 output[3] += in[5] >> 16;
556 output[2] += (in[5] & 0xffff) << 40;
559 output[2] += output[4] >> 16;
560 output[1] += (output[4] & 0xffff) << 40;
561 output[0] -= output[4];
563 /* Carry 2 -> 3 -> 4 */
564 output[3] += output[2] >> 56;
565 output[2] &= 0x00ffffffffffffff;
567 output[4] = output[3] >> 56;
568 output[3] &= 0x00ffffffffffffff;
570 /* Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 */
572 /* Eliminate output[4] */
573 output[2] += output[4] >> 16;
574 /* output[2] < 2^56 + 2^56 = 2^57 */
575 output[1] += (output[4] & 0xffff) << 40;
576 output[0] -= output[4];
578 /* Carry 0 -> 1 -> 2 -> 3 */
579 output[1] += output[0] >> 56;
580 out[0] = output[0] & 0x00ffffffffffffff;
582 output[2] += output[1] >> 56;
583 /* output[2] < 2^57 + 2^72 */
584 out[1] = output[1] & 0x00ffffffffffffff;
585 output[3] += output[2] >> 56;
586 /* output[3] <= 2^56 + 2^16 */
587 out[2] = output[2] & 0x00ffffffffffffff;
590 * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
591 * out[3] <= 2^56 + 2^16 (due to final carry),
597 static void felem_square_reduce(felem out, const felem in)
600 felem_square(tmp, in);
601 felem_reduce(out, tmp);
604 static void felem_mul_reduce(felem out, const felem in1, const felem in2)
607 felem_mul(tmp, in1, in2);
608 felem_reduce(out, tmp);
612 * Reduce to unique minimal representation. Requires 0 <= in < 2*p (always
613 * call felem_reduce first)
615 static void felem_contract(felem out, const felem in)
617 static const int64_t two56 = ((limb) 1) << 56;
618 /* 0 <= in < 2*p, p = 2^224 - 2^96 + 1 */
619 /* if in > p , reduce in = in - 2^224 + 2^96 - 1 */
625 /* Case 1: a = 1 iff in >= 2^224 */
629 tmp[3] &= 0x00ffffffffffffff;
631 * Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1
632 * and the lower part is non-zero
634 a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) |
635 (((int64_t) (in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63);
636 a &= 0x00ffffffffffffff;
637 /* turn a into an all-one mask (if a = 0) or an all-zero mask */
639 /* subtract 2^224 - 2^96 + 1 if a is all-one */
640 tmp[3] &= a ^ 0xffffffffffffffff;
641 tmp[2] &= a ^ 0xffffffffffffffff;
642 tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff;
646 * eliminate negative coefficients: if tmp[0] is negative, tmp[1] must be
647 * non-zero, so we only need one step
653 /* carry 1 -> 2 -> 3 */
654 tmp[2] += tmp[1] >> 56;
655 tmp[1] &= 0x00ffffffffffffff;
657 tmp[3] += tmp[2] >> 56;
658 tmp[2] &= 0x00ffffffffffffff;
660 /* Now 0 <= out < p */
668 * Get negative value: out = -in
669 * Requires in[i] < 2^63,
670 * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16
672 static void felem_neg(felem out, const felem in)
676 memset(tmp, 0, sizeof(tmp));
677 felem_diff_128_64(tmp, in);
678 felem_reduce(out, tmp);
682 * Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field
683 * elements are reduced to in < 2^225, so we only need to check three cases:
684 * 0, 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2
686 static limb felem_is_zero(const felem in)
688 limb zero, two224m96p1, two225m97p2;
690 zero = in[0] | in[1] | in[2] | in[3];
691 zero = (((int64_t) (zero) - 1) >> 63) & 1;
692 two224m96p1 = (in[0] ^ 1) | (in[1] ^ 0x00ffff0000000000)
693 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x00ffffffffffffff);
694 two224m96p1 = (((int64_t) (two224m96p1) - 1) >> 63) & 1;
695 two225m97p2 = (in[0] ^ 2) | (in[1] ^ 0x00fffe0000000000)
696 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x01ffffffffffffff);
697 two225m97p2 = (((int64_t) (two225m97p2) - 1) >> 63) & 1;
698 return (zero | two224m96p1 | two225m97p2);
701 static int felem_is_zero_int(const void *in)
703 return (int)(felem_is_zero(in) & ((limb) 1));
706 /* Invert a field element */
707 /* Computation chain copied from djb's code */
708 static void felem_inv(felem out, const felem in)
710 felem ftmp, ftmp2, ftmp3, ftmp4;
714 felem_square(tmp, in);
715 felem_reduce(ftmp, tmp); /* 2 */
716 felem_mul(tmp, in, ftmp);
717 felem_reduce(ftmp, tmp); /* 2^2 - 1 */
718 felem_square(tmp, ftmp);
719 felem_reduce(ftmp, tmp); /* 2^3 - 2 */
720 felem_mul(tmp, in, ftmp);
721 felem_reduce(ftmp, tmp); /* 2^3 - 1 */
722 felem_square(tmp, ftmp);
723 felem_reduce(ftmp2, tmp); /* 2^4 - 2 */
724 felem_square(tmp, ftmp2);
725 felem_reduce(ftmp2, tmp); /* 2^5 - 4 */
726 felem_square(tmp, ftmp2);
727 felem_reduce(ftmp2, tmp); /* 2^6 - 8 */
728 felem_mul(tmp, ftmp2, ftmp);
729 felem_reduce(ftmp, tmp); /* 2^6 - 1 */
730 felem_square(tmp, ftmp);
731 felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
732 for (i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
733 felem_square(tmp, ftmp2);
734 felem_reduce(ftmp2, tmp);
736 felem_mul(tmp, ftmp2, ftmp);
737 felem_reduce(ftmp2, tmp); /* 2^12 - 1 */
738 felem_square(tmp, ftmp2);
739 felem_reduce(ftmp3, tmp); /* 2^13 - 2 */
740 for (i = 0; i < 11; ++i) { /* 2^24 - 2^12 */
741 felem_square(tmp, ftmp3);
742 felem_reduce(ftmp3, tmp);
744 felem_mul(tmp, ftmp3, ftmp2);
745 felem_reduce(ftmp2, tmp); /* 2^24 - 1 */
746 felem_square(tmp, ftmp2);
747 felem_reduce(ftmp3, tmp); /* 2^25 - 2 */
748 for (i = 0; i < 23; ++i) { /* 2^48 - 2^24 */
749 felem_square(tmp, ftmp3);
750 felem_reduce(ftmp3, tmp);
752 felem_mul(tmp, ftmp3, ftmp2);
753 felem_reduce(ftmp3, tmp); /* 2^48 - 1 */
754 felem_square(tmp, ftmp3);
755 felem_reduce(ftmp4, tmp); /* 2^49 - 2 */
756 for (i = 0; i < 47; ++i) { /* 2^96 - 2^48 */
757 felem_square(tmp, ftmp4);
758 felem_reduce(ftmp4, tmp);
760 felem_mul(tmp, ftmp3, ftmp4);
761 felem_reduce(ftmp3, tmp); /* 2^96 - 1 */
762 felem_square(tmp, ftmp3);
763 felem_reduce(ftmp4, tmp); /* 2^97 - 2 */
764 for (i = 0; i < 23; ++i) { /* 2^120 - 2^24 */
765 felem_square(tmp, ftmp4);
766 felem_reduce(ftmp4, tmp);
768 felem_mul(tmp, ftmp2, ftmp4);
769 felem_reduce(ftmp2, tmp); /* 2^120 - 1 */
770 for (i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
771 felem_square(tmp, ftmp2);
772 felem_reduce(ftmp2, tmp);
774 felem_mul(tmp, ftmp2, ftmp);
775 felem_reduce(ftmp, tmp); /* 2^126 - 1 */
776 felem_square(tmp, ftmp);
777 felem_reduce(ftmp, tmp); /* 2^127 - 2 */
778 felem_mul(tmp, ftmp, in);
779 felem_reduce(ftmp, tmp); /* 2^127 - 1 */
780 for (i = 0; i < 97; ++i) { /* 2^224 - 2^97 */
781 felem_square(tmp, ftmp);
782 felem_reduce(ftmp, tmp);
784 felem_mul(tmp, ftmp, ftmp3);
785 felem_reduce(out, tmp); /* 2^224 - 2^96 - 1 */
789 * Copy in constant time: if icopy == 1, copy in to out, if icopy == 0, copy
792 static void copy_conditional(felem out, const felem in, limb icopy)
796 * icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one
798 const limb copy = -icopy;
799 for (i = 0; i < 4; ++i) {
800 const limb tmp = copy & (in[i] ^ out[i]);
805 /******************************************************************************/
807 * ELLIPTIC CURVE POINT OPERATIONS
809 * Points are represented in Jacobian projective coordinates:
810 * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
811 * or to the point at infinity if Z == 0.
816 * Double an elliptic curve point:
817 * (X', Y', Z') = 2 * (X, Y, Z), where
818 * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
819 * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^4
820 * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
821 * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
822 * while x_out == y_in is not (maybe this works, but it's not tested).
825 point_double(felem x_out, felem y_out, felem z_out,
826 const felem x_in, const felem y_in, const felem z_in)
829 felem delta, gamma, beta, alpha, ftmp, ftmp2;
831 felem_assign(ftmp, x_in);
832 felem_assign(ftmp2, x_in);
835 felem_square(tmp, z_in);
836 felem_reduce(delta, tmp);
839 felem_square(tmp, y_in);
840 felem_reduce(gamma, tmp);
843 felem_mul(tmp, x_in, gamma);
844 felem_reduce(beta, tmp);
846 /* alpha = 3*(x-delta)*(x+delta) */
847 felem_diff(ftmp, delta);
848 /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
849 felem_sum(ftmp2, delta);
850 /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
851 felem_scalar(ftmp2, 3);
852 /* ftmp2[i] < 3 * 2^58 < 2^60 */
853 felem_mul(tmp, ftmp, ftmp2);
854 /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
855 felem_reduce(alpha, tmp);
857 /* x' = alpha^2 - 8*beta */
858 felem_square(tmp, alpha);
859 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
860 felem_assign(ftmp, beta);
861 felem_scalar(ftmp, 8);
862 /* ftmp[i] < 8 * 2^57 = 2^60 */
863 felem_diff_128_64(tmp, ftmp);
864 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
865 felem_reduce(x_out, tmp);
867 /* z' = (y + z)^2 - gamma - delta */
868 felem_sum(delta, gamma);
869 /* delta[i] < 2^57 + 2^57 = 2^58 */
870 felem_assign(ftmp, y_in);
871 felem_sum(ftmp, z_in);
872 /* ftmp[i] < 2^57 + 2^57 = 2^58 */
873 felem_square(tmp, ftmp);
874 /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
875 felem_diff_128_64(tmp, delta);
876 /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
877 felem_reduce(z_out, tmp);
879 /* y' = alpha*(4*beta - x') - 8*gamma^2 */
880 felem_scalar(beta, 4);
881 /* beta[i] < 4 * 2^57 = 2^59 */
882 felem_diff(beta, x_out);
883 /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
884 felem_mul(tmp, alpha, beta);
885 /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
886 felem_square(tmp2, gamma);
887 /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
888 widefelem_scalar(tmp2, 8);
889 /* tmp2[i] < 8 * 2^116 = 2^119 */
890 widefelem_diff(tmp, tmp2);
891 /* tmp[i] < 2^119 + 2^120 < 2^121 */
892 felem_reduce(y_out, tmp);
896 * Add two elliptic curve points:
897 * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
898 * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
899 * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
900 * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - X_3) -
901 * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
902 * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2)
904 * This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0.
908 * This function is not entirely constant-time: it includes a branch for
909 * checking whether the two input points are equal, (while not equal to the
910 * point at infinity). This case never happens during single point
911 * multiplication, so there is no timing leak for ECDH or ECDSA signing.
913 static void point_add(felem x3, felem y3, felem z3,
914 const felem x1, const felem y1, const felem z1,
915 const int mixed, const felem x2, const felem y2,
918 felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, x_out, y_out, z_out;
920 limb z1_is_zero, z2_is_zero, x_equal, y_equal;
925 felem_square(tmp, z2);
926 felem_reduce(ftmp2, tmp);
929 felem_mul(tmp, ftmp2, z2);
930 felem_reduce(ftmp4, tmp);
932 /* ftmp4 = z2^3*y1 */
933 felem_mul(tmp2, ftmp4, y1);
934 felem_reduce(ftmp4, tmp2);
936 /* ftmp2 = z2^2*x1 */
937 felem_mul(tmp2, ftmp2, x1);
938 felem_reduce(ftmp2, tmp2);
941 * We'll assume z2 = 1 (special case z2 = 0 is handled later)
944 /* ftmp4 = z2^3*y1 */
945 felem_assign(ftmp4, y1);
947 /* ftmp2 = z2^2*x1 */
948 felem_assign(ftmp2, x1);
952 felem_square(tmp, z1);
953 felem_reduce(ftmp, tmp);
956 felem_mul(tmp, ftmp, z1);
957 felem_reduce(ftmp3, tmp);
960 felem_mul(tmp, ftmp3, y2);
961 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
963 /* ftmp3 = z1^3*y2 - z2^3*y1 */
964 felem_diff_128_64(tmp, ftmp4);
965 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
966 felem_reduce(ftmp3, tmp);
969 felem_mul(tmp, ftmp, x2);
970 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
972 /* ftmp = z1^2*x2 - z2^2*x1 */
973 felem_diff_128_64(tmp, ftmp2);
974 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
975 felem_reduce(ftmp, tmp);
978 * The formulae are incorrect if the points are equal, in affine coordinates
979 * (X_1, Y_1) == (X_2, Y_2), so we check for this and do doubling if this
982 * We use bitwise operations to avoid potential side-channels introduced by
983 * the short-circuiting behaviour of boolean operators.
985 x_equal = felem_is_zero(ftmp);
986 y_equal = felem_is_zero(ftmp3);
988 * The special case of either point being the point at infinity (z1 and/or
989 * z2 are zero), is handled separately later on in this function, so we
990 * avoid jumping to point_double here in those special cases.
992 z1_is_zero = felem_is_zero(z1);
993 z2_is_zero = felem_is_zero(z2);
996 * Compared to `ecp_nistp256.c` and `ecp_nistp521.c`, in this
997 * specific implementation `felem_is_zero()` returns truth as `0x1`
998 * (rather than `0xff..ff`).
1000 * This implies that `~true` in this implementation becomes
1001 * `0xff..fe` (rather than `0x0`): for this reason, to be used in
1002 * the if expression, we mask out only the last bit in the next
1005 points_equal = (x_equal & y_equal & (~z1_is_zero) & (~z2_is_zero)) & 1;
1009 * This is obviously not constant-time but, as mentioned before, this
1010 * case never happens during single point multiplication, so there is no
1011 * timing leak for ECDH or ECDSA signing.
1013 point_double(x3, y3, z3, x1, y1, z1);
1019 felem_mul(tmp, z1, z2);
1020 felem_reduce(ftmp5, tmp);
1022 /* special case z2 = 0 is handled later */
1023 felem_assign(ftmp5, z1);
1026 /* z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) */
1027 felem_mul(tmp, ftmp, ftmp5);
1028 felem_reduce(z_out, tmp);
1030 /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
1031 felem_assign(ftmp5, ftmp);
1032 felem_square(tmp, ftmp);
1033 felem_reduce(ftmp, tmp);
1035 /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
1036 felem_mul(tmp, ftmp, ftmp5);
1037 felem_reduce(ftmp5, tmp);
1039 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1040 felem_mul(tmp, ftmp2, ftmp);
1041 felem_reduce(ftmp2, tmp);
1043 /* tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
1044 felem_mul(tmp, ftmp4, ftmp5);
1045 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
1047 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
1048 felem_square(tmp2, ftmp3);
1049 /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
1051 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
1052 felem_diff_128_64(tmp2, ftmp5);
1053 /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
1055 /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1056 felem_assign(ftmp5, ftmp2);
1057 felem_scalar(ftmp5, 2);
1058 /* ftmp5[i] < 2 * 2^57 = 2^58 */
1061 * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
1062 * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
1064 felem_diff_128_64(tmp2, ftmp5);
1065 /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
1066 felem_reduce(x_out, tmp2);
1068 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out */
1069 felem_diff(ftmp2, x_out);
1070 /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
1073 * tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out)
1075 felem_mul(tmp2, ftmp3, ftmp2);
1076 /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
1079 * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
1080 * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
1082 widefelem_diff(tmp2, tmp);
1083 /* tmp2[i] < 2^118 + 2^120 < 2^121 */
1084 felem_reduce(y_out, tmp2);
1087 * the result (x_out, y_out, z_out) is incorrect if one of the inputs is
1088 * the point at infinity, so we need to check for this separately
1092 * if point 1 is at infinity, copy point 2 to output, and vice versa
1094 copy_conditional(x_out, x2, z1_is_zero);
1095 copy_conditional(x_out, x1, z2_is_zero);
1096 copy_conditional(y_out, y2, z1_is_zero);
1097 copy_conditional(y_out, y1, z2_is_zero);
1098 copy_conditional(z_out, z2, z1_is_zero);
1099 copy_conditional(z_out, z1, z2_is_zero);
1100 felem_assign(x3, x_out);
1101 felem_assign(y3, y_out);
1102 felem_assign(z3, z_out);
1106 * select_point selects the |idx|th point from a precomputation table and
1108 * The pre_comp array argument should be size of |size| argument
1110 static void select_point(const u64 idx, unsigned int size,
1111 const felem pre_comp[][3], felem out[3])
1114 limb *outlimbs = &out[0][0];
1116 memset(out, 0, sizeof(*out) * 3);
1117 for (i = 0; i < size; i++) {
1118 const limb *inlimbs = &pre_comp[i][0][0];
1125 for (j = 0; j < 4 * 3; j++)
1126 outlimbs[j] |= inlimbs[j] & mask;
1130 /* get_bit returns the |i|th bit in |in| */
1131 static char get_bit(const felem_bytearray in, unsigned i)
1135 return (in[i >> 3] >> (i & 7)) & 1;
1139 * Interleaved point multiplication using precomputed point multiples: The
1140 * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
1141 * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
1142 * generator, using certain (large) precomputed multiples in g_pre_comp.
1143 * Output point (X, Y, Z) is stored in x_out, y_out, z_out
1145 static void batch_mul(felem x_out, felem y_out, felem z_out,
1146 const felem_bytearray scalars[],
1147 const unsigned num_points, const u8 *g_scalar,
1148 const int mixed, const felem pre_comp[][17][3],
1149 const felem g_pre_comp[2][16][3])
1153 unsigned gen_mul = (g_scalar != NULL);
1154 felem nq[3], tmp[4];
1158 /* set nq to the point at infinity */
1159 memset(nq, 0, sizeof(nq));
1162 * Loop over all scalars msb-to-lsb, interleaving additions of multiples
1163 * of the generator (two in each of the last 28 rounds) and additions of
1164 * other points multiples (every 5th round).
1166 skip = 1; /* save two point operations in the first
1168 for (i = (num_points ? 220 : 27); i >= 0; --i) {
1171 point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
1173 /* add multiples of the generator */
1174 if (gen_mul && (i <= 27)) {
1175 /* first, look 28 bits upwards */
1176 bits = get_bit(g_scalar, i + 196) << 3;
1177 bits |= get_bit(g_scalar, i + 140) << 2;
1178 bits |= get_bit(g_scalar, i + 84) << 1;
1179 bits |= get_bit(g_scalar, i + 28);
1180 /* select the point to add, in constant time */
1181 select_point(bits, 16, g_pre_comp[1], tmp);
1184 /* value 1 below is argument for "mixed" */
1185 point_add(nq[0], nq[1], nq[2],
1186 nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
1188 memcpy(nq, tmp, 3 * sizeof(felem));
1192 /* second, look at the current position */
1193 bits = get_bit(g_scalar, i + 168) << 3;
1194 bits |= get_bit(g_scalar, i + 112) << 2;
1195 bits |= get_bit(g_scalar, i + 56) << 1;
1196 bits |= get_bit(g_scalar, i);
1197 /* select the point to add, in constant time */
1198 select_point(bits, 16, g_pre_comp[0], tmp);
1199 point_add(nq[0], nq[1], nq[2],
1200 nq[0], nq[1], nq[2],
1201 1 /* mixed */ , tmp[0], tmp[1], tmp[2]);
1204 /* do other additions every 5 doublings */
1205 if (num_points && (i % 5 == 0)) {
1206 /* loop over all scalars */
1207 for (num = 0; num < num_points; ++num) {
1208 bits = get_bit(scalars[num], i + 4) << 5;
1209 bits |= get_bit(scalars[num], i + 3) << 4;
1210 bits |= get_bit(scalars[num], i + 2) << 3;
1211 bits |= get_bit(scalars[num], i + 1) << 2;
1212 bits |= get_bit(scalars[num], i) << 1;
1213 bits |= get_bit(scalars[num], i - 1);
1214 ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
1216 /* select the point to add or subtract */
1217 select_point(digit, 17, pre_comp[num], tmp);
1218 felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
1220 copy_conditional(tmp[1], tmp[3], sign);
1223 point_add(nq[0], nq[1], nq[2],
1224 nq[0], nq[1], nq[2],
1225 mixed, tmp[0], tmp[1], tmp[2]);
1227 memcpy(nq, tmp, 3 * sizeof(felem));
1233 felem_assign(x_out, nq[0]);
1234 felem_assign(y_out, nq[1]);
1235 felem_assign(z_out, nq[2]);
1238 /******************************************************************************/
1240 * FUNCTIONS TO MANAGE PRECOMPUTATION
1243 static NISTP224_PRE_COMP *nistp224_pre_comp_new(void)
1245 NISTP224_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
1248 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1252 ret->references = 1;
1254 ret->lock = CRYPTO_THREAD_lock_new();
1255 if (ret->lock == NULL) {
1256 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1263 NISTP224_PRE_COMP *EC_nistp224_pre_comp_dup(NISTP224_PRE_COMP *p)
1267 CRYPTO_UP_REF(&p->references, &i, p->lock);
1271 void EC_nistp224_pre_comp_free(NISTP224_PRE_COMP *p)
1278 CRYPTO_DOWN_REF(&p->references, &i, p->lock);
1279 REF_PRINT_COUNT("EC_nistp224", x);
1282 REF_ASSERT_ISNT(i < 0);
1284 CRYPTO_THREAD_lock_free(p->lock);
1288 /******************************************************************************/
1290 * OPENSSL EC_METHOD FUNCTIONS
1293 int ec_GFp_nistp224_group_init(EC_GROUP *group)
1296 ret = ec_GFp_simple_group_init(group);
1297 group->a_is_minus3 = 1;
1301 int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p,
1302 const BIGNUM *a, const BIGNUM *b,
1306 BIGNUM *curve_p, *curve_a, *curve_b;
1308 BN_CTX *new_ctx = NULL;
1311 ctx = new_ctx = BN_CTX_new();
1317 curve_p = BN_CTX_get(ctx);
1318 curve_a = BN_CTX_get(ctx);
1319 curve_b = BN_CTX_get(ctx);
1320 if (curve_b == NULL)
1322 BN_bin2bn(nistp224_curve_params[0], sizeof(felem_bytearray), curve_p);
1323 BN_bin2bn(nistp224_curve_params[1], sizeof(felem_bytearray), curve_a);
1324 BN_bin2bn(nistp224_curve_params[2], sizeof(felem_bytearray), curve_b);
1325 if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
1326 ECerr(EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE,
1327 EC_R_WRONG_CURVE_PARAMETERS);
1330 group->field_mod_func = BN_nist_mod_224;
1331 ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
1335 BN_CTX_free(new_ctx);
1341 * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
1344 int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
1345 const EC_POINT *point,
1346 BIGNUM *x, BIGNUM *y,
1349 felem z1, z2, x_in, y_in, x_out, y_out;
1352 if (EC_POINT_is_at_infinity(group, point)) {
1353 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1354 EC_R_POINT_AT_INFINITY);
1357 if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
1358 (!BN_to_felem(z1, point->Z)))
1361 felem_square(tmp, z2);
1362 felem_reduce(z1, tmp);
1363 felem_mul(tmp, x_in, z1);
1364 felem_reduce(x_in, tmp);
1365 felem_contract(x_out, x_in);
1367 if (!felem_to_BN(x, x_out)) {
1368 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1373 felem_mul(tmp, z1, z2);
1374 felem_reduce(z1, tmp);
1375 felem_mul(tmp, y_in, z1);
1376 felem_reduce(y_in, tmp);
1377 felem_contract(y_out, y_in);
1379 if (!felem_to_BN(y, y_out)) {
1380 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1388 static void make_points_affine(size_t num, felem points[ /* num */ ][3],
1389 felem tmp_felems[ /* num+1 */ ])
1392 * Runs in constant time, unless an input is the point at infinity (which
1393 * normally shouldn't happen).
1395 ec_GFp_nistp_points_make_affine_internal(num,
1399 (void (*)(void *))felem_one,
1401 (void (*)(void *, const void *))
1403 (void (*)(void *, const void *))
1404 felem_square_reduce, (void (*)
1411 (void (*)(void *, const void *))
1413 (void (*)(void *, const void *))
1418 * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
1419 * values Result is stored in r (r can equal one of the inputs).
1421 int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r,
1422 const BIGNUM *scalar, size_t num,
1423 const EC_POINT *points[],
1424 const BIGNUM *scalars[], BN_CTX *ctx)
1430 BIGNUM *x, *y, *z, *tmp_scalar;
1431 felem_bytearray g_secret;
1432 felem_bytearray *secrets = NULL;
1433 felem (*pre_comp)[17][3] = NULL;
1434 felem *tmp_felems = NULL;
1436 int have_pre_comp = 0;
1437 size_t num_points = num;
1438 felem x_in, y_in, z_in, x_out, y_out, z_out;
1439 NISTP224_PRE_COMP *pre = NULL;
1440 const felem(*g_pre_comp)[16][3] = NULL;
1441 EC_POINT *generator = NULL;
1442 const EC_POINT *p = NULL;
1443 const BIGNUM *p_scalar = NULL;
1446 x = BN_CTX_get(ctx);
1447 y = BN_CTX_get(ctx);
1448 z = BN_CTX_get(ctx);
1449 tmp_scalar = BN_CTX_get(ctx);
1450 if (tmp_scalar == NULL)
1453 if (scalar != NULL) {
1454 pre = group->pre_comp.nistp224;
1456 /* we have precomputation, try to use it */
1457 g_pre_comp = (const felem(*)[16][3])pre->g_pre_comp;
1459 /* try to use the standard precomputation */
1460 g_pre_comp = &gmul[0];
1461 generator = EC_POINT_new(group);
1462 if (generator == NULL)
1464 /* get the generator from precomputation */
1465 if (!felem_to_BN(x, g_pre_comp[0][1][0]) ||
1466 !felem_to_BN(y, g_pre_comp[0][1][1]) ||
1467 !felem_to_BN(z, g_pre_comp[0][1][2])) {
1468 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1471 if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
1475 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
1476 /* precomputation matches generator */
1480 * we don't have valid precomputation: treat the generator as a
1483 num_points = num_points + 1;
1486 if (num_points > 0) {
1487 if (num_points >= 3) {
1489 * unless we precompute multiples for just one or two points,
1490 * converting those into affine form is time well spent
1494 secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
1495 pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
1498 OPENSSL_malloc(sizeof(felem) * (num_points * 17 + 1));
1499 if ((secrets == NULL) || (pre_comp == NULL)
1500 || (mixed && (tmp_felems == NULL))) {
1501 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1506 * we treat NULL scalars as 0, and NULL points as points at infinity,
1507 * i.e., they contribute nothing to the linear combination
1509 for (i = 0; i < num_points; ++i) {
1512 p = EC_GROUP_get0_generator(group);
1515 /* the i^th point */
1517 p_scalar = scalars[i];
1519 if ((p_scalar != NULL) && (p != NULL)) {
1520 /* reduce scalar to 0 <= scalar < 2^224 */
1521 if ((BN_num_bits(p_scalar) > 224)
1522 || (BN_is_negative(p_scalar))) {
1524 * this is an unusual input, and we don't guarantee
1527 if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
1528 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1531 num_bytes = BN_bn2lebinpad(tmp_scalar,
1532 secrets[i], sizeof(secrets[i]));
1534 num_bytes = BN_bn2lebinpad(p_scalar,
1535 secrets[i], sizeof(secrets[i]));
1537 if (num_bytes < 0) {
1538 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1541 /* precompute multiples */
1542 if ((!BN_to_felem(x_out, p->X)) ||
1543 (!BN_to_felem(y_out, p->Y)) ||
1544 (!BN_to_felem(z_out, p->Z)))
1546 felem_assign(pre_comp[i][1][0], x_out);
1547 felem_assign(pre_comp[i][1][1], y_out);
1548 felem_assign(pre_comp[i][1][2], z_out);
1549 for (j = 2; j <= 16; ++j) {
1551 point_add(pre_comp[i][j][0], pre_comp[i][j][1],
1552 pre_comp[i][j][2], pre_comp[i][1][0],
1553 pre_comp[i][1][1], pre_comp[i][1][2], 0,
1554 pre_comp[i][j - 1][0],
1555 pre_comp[i][j - 1][1],
1556 pre_comp[i][j - 1][2]);
1558 point_double(pre_comp[i][j][0], pre_comp[i][j][1],
1559 pre_comp[i][j][2], pre_comp[i][j / 2][0],
1560 pre_comp[i][j / 2][1],
1561 pre_comp[i][j / 2][2]);
1567 make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
1570 /* the scalar for the generator */
1571 if ((scalar != NULL) && (have_pre_comp)) {
1572 memset(g_secret, 0, sizeof(g_secret));
1573 /* reduce scalar to 0 <= scalar < 2^224 */
1574 if ((BN_num_bits(scalar) > 224) || (BN_is_negative(scalar))) {
1576 * this is an unusual input, and we don't guarantee
1579 if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
1580 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1583 num_bytes = BN_bn2lebinpad(tmp_scalar, g_secret, sizeof(g_secret));
1585 num_bytes = BN_bn2lebinpad(scalar, g_secret, sizeof(g_secret));
1587 /* do the multiplication with generator precomputation */
1588 batch_mul(x_out, y_out, z_out,
1589 (const felem_bytearray(*))secrets, num_points,
1591 mixed, (const felem(*)[17][3])pre_comp, g_pre_comp);
1593 /* do the multiplication without generator precomputation */
1594 batch_mul(x_out, y_out, z_out,
1595 (const felem_bytearray(*))secrets, num_points,
1596 NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
1598 /* reduce the output to its unique minimal representation */
1599 felem_contract(x_in, x_out);
1600 felem_contract(y_in, y_out);
1601 felem_contract(z_in, z_out);
1602 if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
1603 (!felem_to_BN(z, z_in))) {
1604 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1607 ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
1611 EC_POINT_free(generator);
1612 OPENSSL_free(secrets);
1613 OPENSSL_free(pre_comp);
1614 OPENSSL_free(tmp_felems);
1618 int ec_GFp_nistp224_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
1621 NISTP224_PRE_COMP *pre = NULL;
1624 EC_POINT *generator = NULL;
1625 felem tmp_felems[32];
1627 BN_CTX *new_ctx = NULL;
1630 /* throw away old precomputation */
1631 EC_pre_comp_free(group);
1635 ctx = new_ctx = BN_CTX_new();
1641 x = BN_CTX_get(ctx);
1642 y = BN_CTX_get(ctx);
1645 /* get the generator */
1646 if (group->generator == NULL)
1648 generator = EC_POINT_new(group);
1649 if (generator == NULL)
1651 BN_bin2bn(nistp224_curve_params[3], sizeof(felem_bytearray), x);
1652 BN_bin2bn(nistp224_curve_params[4], sizeof(felem_bytearray), y);
1653 if (!EC_POINT_set_affine_coordinates(group, generator, x, y, ctx))
1655 if ((pre = nistp224_pre_comp_new()) == NULL)
1658 * if the generator is the standard one, use built-in precomputation
1660 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
1661 memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
1664 if ((!BN_to_felem(pre->g_pre_comp[0][1][0], group->generator->X)) ||
1665 (!BN_to_felem(pre->g_pre_comp[0][1][1], group->generator->Y)) ||
1666 (!BN_to_felem(pre->g_pre_comp[0][1][2], group->generator->Z)))
1669 * compute 2^56*G, 2^112*G, 2^168*G for the first table, 2^28*G, 2^84*G,
1670 * 2^140*G, 2^196*G for the second one
1672 for (i = 1; i <= 8; i <<= 1) {
1673 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1674 pre->g_pre_comp[1][i][2], pre->g_pre_comp[0][i][0],
1675 pre->g_pre_comp[0][i][1], pre->g_pre_comp[0][i][2]);
1676 for (j = 0; j < 27; ++j) {
1677 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1678 pre->g_pre_comp[1][i][2], pre->g_pre_comp[1][i][0],
1679 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1683 point_double(pre->g_pre_comp[0][2 * i][0],
1684 pre->g_pre_comp[0][2 * i][1],
1685 pre->g_pre_comp[0][2 * i][2], pre->g_pre_comp[1][i][0],
1686 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1687 for (j = 0; j < 27; ++j) {
1688 point_double(pre->g_pre_comp[0][2 * i][0],
1689 pre->g_pre_comp[0][2 * i][1],
1690 pre->g_pre_comp[0][2 * i][2],
1691 pre->g_pre_comp[0][2 * i][0],
1692 pre->g_pre_comp[0][2 * i][1],
1693 pre->g_pre_comp[0][2 * i][2]);
1696 for (i = 0; i < 2; i++) {
1697 /* g_pre_comp[i][0] is the point at infinity */
1698 memset(pre->g_pre_comp[i][0], 0, sizeof(pre->g_pre_comp[i][0]));
1699 /* the remaining multiples */
1700 /* 2^56*G + 2^112*G resp. 2^84*G + 2^140*G */
1701 point_add(pre->g_pre_comp[i][6][0], pre->g_pre_comp[i][6][1],
1702 pre->g_pre_comp[i][6][2], pre->g_pre_comp[i][4][0],
1703 pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2],
1704 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1705 pre->g_pre_comp[i][2][2]);
1706 /* 2^56*G + 2^168*G resp. 2^84*G + 2^196*G */
1707 point_add(pre->g_pre_comp[i][10][0], pre->g_pre_comp[i][10][1],
1708 pre->g_pre_comp[i][10][2], pre->g_pre_comp[i][8][0],
1709 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1710 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1711 pre->g_pre_comp[i][2][2]);
1712 /* 2^112*G + 2^168*G resp. 2^140*G + 2^196*G */
1713 point_add(pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1],
1714 pre->g_pre_comp[i][12][2], pre->g_pre_comp[i][8][0],
1715 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1716 0, pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1],
1717 pre->g_pre_comp[i][4][2]);
1719 * 2^56*G + 2^112*G + 2^168*G resp. 2^84*G + 2^140*G + 2^196*G
1721 point_add(pre->g_pre_comp[i][14][0], pre->g_pre_comp[i][14][1],
1722 pre->g_pre_comp[i][14][2], pre->g_pre_comp[i][12][0],
1723 pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
1724 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1725 pre->g_pre_comp[i][2][2]);
1726 for (j = 1; j < 8; ++j) {
1727 /* odd multiples: add G resp. 2^28*G */
1728 point_add(pre->g_pre_comp[i][2 * j + 1][0],
1729 pre->g_pre_comp[i][2 * j + 1][1],
1730 pre->g_pre_comp[i][2 * j + 1][2],
1731 pre->g_pre_comp[i][2 * j][0],
1732 pre->g_pre_comp[i][2 * j][1],
1733 pre->g_pre_comp[i][2 * j][2], 0,
1734 pre->g_pre_comp[i][1][0], pre->g_pre_comp[i][1][1],
1735 pre->g_pre_comp[i][1][2]);
1738 make_points_affine(31, &(pre->g_pre_comp[0][1]), tmp_felems);
1741 SETPRECOMP(group, nistp224, pre);
1746 EC_POINT_free(generator);
1748 BN_CTX_free(new_ctx);
1750 EC_nistp224_pre_comp_free(pre);
1754 int ec_GFp_nistp224_have_precompute_mult(const EC_GROUP *group)
1756 return HAVEPRECOMP(group, nistp224);