2 * Written by Emilia Kasper (Google) for the OpenSSL project.
4 /* Copyright 2011 Google Inc.
6 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * A 64-bit implementation of the NIST P-224 elliptic curve point multiplication
23 * Inspired by Daniel J. Bernstein's public domain nistp224 implementation
24 * and Adam Langley's public domain 64-bit C implementation of curve25519
27 #include <openssl/opensslconf.h>
28 #ifdef OPENSSL_NO_EC_NISTP_64_GCC_128
29 NON_EMPTY_TRANSLATION_UNIT
34 # include <openssl/err.h>
37 # if defined(__GNUC__) && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1))
38 /* even with gcc, the typedef won't work for 32-bit platforms */
39 typedef __uint128_t uint128_t; /* nonstandard; implemented by gcc on 64-bit
42 # error "Need GCC 3.1 or later to define type uint128_t"
49 /******************************************************************************/
51 * INTERNAL REPRESENTATION OF FIELD ELEMENTS
53 * Field elements are represented as a_0 + 2^56*a_1 + 2^112*a_2 + 2^168*a_3
54 * using 64-bit coefficients called 'limbs',
55 * and sometimes (for multiplication results) as
56 * b_0 + 2^56*b_1 + 2^112*b_2 + 2^168*b_3 + 2^224*b_4 + 2^280*b_5 + 2^336*b_6
57 * using 128-bit coefficients called 'widelimbs'.
58 * A 4-limb representation is an 'felem';
59 * a 7-widelimb representation is a 'widefelem'.
60 * Even within felems, bits of adjacent limbs overlap, and we don't always
61 * reduce the representations: we ensure that inputs to each felem
62 * multiplication satisfy a_i < 2^60, so outputs satisfy b_i < 4*2^60*2^60,
63 * and fit into a 128-bit word without overflow. The coefficients are then
64 * again partially reduced to obtain an felem satisfying a_i < 2^57.
65 * We only reduce to the unique minimal representation at the end of the
69 typedef uint64_t limb;
70 typedef uint128_t widelimb;
72 typedef limb felem[4];
73 typedef widelimb widefelem[7];
76 * Field element represented as a byte arrary. 28*8 = 224 bits is also the
77 * group order size for the elliptic curve, and we also use this type for
78 * scalars for point multiplication.
80 typedef u8 felem_bytearray[28];
82 static const felem_bytearray nistp224_curve_params[5] = {
83 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* p */
84 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
86 {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, /* a */
87 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF,
88 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFE},
89 {0xB4, 0x05, 0x0A, 0x85, 0x0C, 0x04, 0xB3, 0xAB, 0xF5, 0x41, /* b */
90 0x32, 0x56, 0x50, 0x44, 0xB0, 0xB7, 0xD7, 0xBF, 0xD8, 0xBA,
91 0x27, 0x0B, 0x39, 0x43, 0x23, 0x55, 0xFF, 0xB4},
92 {0xB7, 0x0E, 0x0C, 0xBD, 0x6B, 0xB4, 0xBF, 0x7F, 0x32, 0x13, /* x */
93 0x90, 0xB9, 0x4A, 0x03, 0xC1, 0xD3, 0x56, 0xC2, 0x11, 0x22,
94 0x34, 0x32, 0x80, 0xD6, 0x11, 0x5C, 0x1D, 0x21},
95 {0xbd, 0x37, 0x63, 0x88, 0xb5, 0xf7, 0x23, 0xfb, 0x4c, 0x22, /* y */
96 0xdf, 0xe6, 0xcd, 0x43, 0x75, 0xa0, 0x5a, 0x07, 0x47, 0x64,
97 0x44, 0xd5, 0x81, 0x99, 0x85, 0x00, 0x7e, 0x34}
101 * Precomputed multiples of the standard generator
102 * Points are given in coordinates (X, Y, Z) where Z normally is 1
103 * (0 for the point at infinity).
104 * For each field element, slice a_0 is word 0, etc.
106 * The table has 2 * 16 elements, starting with the following:
107 * index | bits | point
108 * ------+---------+------------------------------
111 * 2 | 0 0 1 0 | 2^56G
112 * 3 | 0 0 1 1 | (2^56 + 1)G
113 * 4 | 0 1 0 0 | 2^112G
114 * 5 | 0 1 0 1 | (2^112 + 1)G
115 * 6 | 0 1 1 0 | (2^112 + 2^56)G
116 * 7 | 0 1 1 1 | (2^112 + 2^56 + 1)G
117 * 8 | 1 0 0 0 | 2^168G
118 * 9 | 1 0 0 1 | (2^168 + 1)G
119 * 10 | 1 0 1 0 | (2^168 + 2^56)G
120 * 11 | 1 0 1 1 | (2^168 + 2^56 + 1)G
121 * 12 | 1 1 0 0 | (2^168 + 2^112)G
122 * 13 | 1 1 0 1 | (2^168 + 2^112 + 1)G
123 * 14 | 1 1 1 0 | (2^168 + 2^112 + 2^56)G
124 * 15 | 1 1 1 1 | (2^168 + 2^112 + 2^56 + 1)G
125 * followed by a copy of this with each element multiplied by 2^28.
127 * The reason for this is so that we can clock bits into four different
128 * locations when doing simple scalar multiplies against the base point,
129 * and then another four locations using the second 16 elements.
131 static const felem gmul[2][16][3] = {
135 {{0x3280d6115c1d21, 0xc1d356c2112234, 0x7f321390b94a03, 0xb70e0cbd6bb4bf},
136 {0xd5819985007e34, 0x75a05a07476444, 0xfb4c22dfe6cd43, 0xbd376388b5f723},
138 {{0xfd9675666ebbe9, 0xbca7664d40ce5e, 0x2242df8d8a2a43, 0x1f49bbb0f99bc5},
139 {0x29e0b892dc9c43, 0xece8608436e662, 0xdc858f185310d0, 0x9812dd4eb8d321},
141 {{0x6d3e678d5d8eb8, 0x559eed1cb362f1, 0x16e9a3bbce8a3f, 0xeedcccd8c2a748},
142 {0xf19f90ed50266d, 0xabf2b4bf65f9df, 0x313865468fafec, 0x5cb379ba910a17},
144 {{0x0641966cab26e3, 0x91fb2991fab0a0, 0xefec27a4e13a0b, 0x0499aa8a5f8ebe},
145 {0x7510407766af5d, 0x84d929610d5450, 0x81d77aae82f706, 0x6916f6d4338c5b},
147 {{0xea95ac3b1f15c6, 0x086000905e82d4, 0xdd323ae4d1c8b1, 0x932b56be7685a3},
148 {0x9ef93dea25dbbf, 0x41665960f390f0, 0xfdec76dbe2a8a7, 0x523e80f019062a},
150 {{0x822fdd26732c73, 0xa01c83531b5d0f, 0x363f37347c1ba4, 0xc391b45c84725c},
151 {0xbbd5e1b2d6ad24, 0xddfbcde19dfaec, 0xc393da7e222a7f, 0x1efb7890ede244},
153 {{0x4c9e90ca217da1, 0xd11beca79159bb, 0xff8d33c2c98b7c, 0x2610b39409f849},
154 {0x44d1352ac64da0, 0xcdbb7b2c46b4fb, 0x966c079b753c89, 0xfe67e4e820b112},
156 {{0xe28cae2df5312d, 0xc71b61d16f5c6e, 0x79b7619a3e7c4c, 0x05c73240899b47},
157 {0x9f7f6382c73e3a, 0x18615165c56bda, 0x641fab2116fd56, 0x72855882b08394},
159 {{0x0469182f161c09, 0x74a98ca8d00fb5, 0xb89da93489a3e0, 0x41c98768fb0c1d},
160 {0xe5ea05fb32da81, 0x3dce9ffbca6855, 0x1cfe2d3fbf59e6, 0x0e5e03408738a7},
162 {{0xdab22b2333e87f, 0x4430137a5dd2f6, 0xe03ab9f738beb8, 0xcb0c5d0dc34f24},
163 {0x764a7df0c8fda5, 0x185ba5c3fa2044, 0x9281d688bcbe50, 0xc40331df893881},
165 {{0xb89530796f0f60, 0xade92bd26909a3, 0x1a0c83fb4884da, 0x1765bf22a5a984},
166 {0x772a9ee75db09e, 0x23bc6c67cec16f, 0x4c1edba8b14e2f, 0xe2a215d9611369},
168 {{0x571e509fb5efb3, 0xade88696410552, 0xc8ae85fada74fe, 0x6c7e4be83bbde3},
169 {0xff9f51160f4652, 0xb47ce2495a6539, 0xa2946c53b582f4, 0x286d2db3ee9a60},
171 {{0x40bbd5081a44af, 0x0995183b13926c, 0xbcefba6f47f6d0, 0x215619e9cc0057},
172 {0x8bc94d3b0df45e, 0xf11c54a3694f6f, 0x8631b93cdfe8b5, 0xe7e3f4b0982db9},
174 {{0xb17048ab3e1c7b, 0xac38f36ff8a1d8, 0x1c29819435d2c6, 0xc813132f4c07e9},
175 {0x2891425503b11f, 0x08781030579fea, 0xf5426ba5cc9674, 0x1e28ebf18562bc},
177 {{0x9f31997cc864eb, 0x06cd91d28b5e4c, 0xff17036691a973, 0xf1aef351497c58},
178 {0xdd1f2d600564ff, 0xdead073b1402db, 0x74a684435bd693, 0xeea7471f962558},
183 {{0x9665266dddf554, 0x9613d78b60ef2d, 0xce27a34cdba417, 0xd35ab74d6afc31},
184 {0x85ccdd22deb15e, 0x2137e5783a6aab, 0xa141cffd8c93c6, 0x355a1830e90f2d},
186 {{0x1a494eadaade65, 0xd6da4da77fe53c, 0xe7992996abec86, 0x65c3553c6090e3},
187 {0xfa610b1fb09346, 0xf1c6540b8a4aaf, 0xc51a13ccd3cbab, 0x02995b1b18c28a},
189 {{0x7874568e7295ef, 0x86b419fbe38d04, 0xdc0690a7550d9a, 0xd3966a44beac33},
190 {0x2b7280ec29132f, 0xbeaa3b6a032df3, 0xdc7dd88ae41200, 0xd25e2513e3a100},
192 {{0x924857eb2efafd, 0xac2bce41223190, 0x8edaa1445553fc, 0x825800fd3562d5},
193 {0x8d79148ea96621, 0x23a01c3dd9ed8d, 0xaf8b219f9416b5, 0xd8db0cc277daea},
195 {{0x76a9c3b1a700f0, 0xe9acd29bc7e691, 0x69212d1a6b0327, 0x6322e97fe154be},
196 {0x469fc5465d62aa, 0x8d41ed18883b05, 0x1f8eae66c52b88, 0xe4fcbe9325be51},
198 {{0x825fdf583cac16, 0x020b857c7b023a, 0x683c17744b0165, 0x14ffd0a2daf2f1},
199 {0x323b36184218f9, 0x4944ec4e3b47d4, 0xc15b3080841acf, 0x0bced4b01a28bb},
201 {{0x92ac22230df5c4, 0x52f33b4063eda8, 0xcb3f19870c0c93, 0x40064f2ba65233},
202 {0xfe16f0924f8992, 0x012da25af5b517, 0x1a57bb24f723a6, 0x06f8bc76760def},
204 {{0x4a7084f7817cb9, 0xbcab0738ee9a78, 0x3ec11e11d9c326, 0xdc0fe90e0f1aae},
205 {0xcf639ea5f98390, 0x5c350aa22ffb74, 0x9afae98a4047b7, 0x956ec2d617fc45},
207 {{0x4306d648c1be6a, 0x9247cd8bc9a462, 0xf5595e377d2f2e, 0xbd1c3caff1a52e},
208 {0x045e14472409d0, 0x29f3e17078f773, 0x745a602b2d4f7d, 0x191837685cdfbb},
210 {{0x5b6ee254a8cb79, 0x4953433f5e7026, 0xe21faeb1d1def4, 0xc4c225785c09de},
211 {0x307ce7bba1e518, 0x31b125b1036db8, 0x47e91868839e8f, 0xc765866e33b9f3},
213 {{0x3bfece24f96906, 0x4794da641e5093, 0xde5df64f95db26, 0x297ecd89714b05},
214 {0x701bd3ebb2c3aa, 0x7073b4f53cb1d5, 0x13c5665658af16, 0x9895089d66fe58},
216 {{0x0fef05f78c4790, 0x2d773633b05d2e, 0x94229c3a951c94, 0xbbbd70df4911bb},
217 {0xb2c6963d2c1168, 0x105f47a72b0d73, 0x9fdf6111614080, 0x7b7e94b39e67b0},
219 {{0xad1a7d6efbe2b3, 0xf012482c0da69d, 0x6b3bdf12438345, 0x40d7558d7aa4d9},
220 {0x8a09fffb5c6d3d, 0x9a356e5d9ffd38, 0x5973f15f4f9b1c, 0xdcd5f59f63c3ea},
222 {{0xacf39f4c5ca7ab, 0x4c8071cc5fd737, 0xc64e3602cd1184, 0x0acd4644c9abba},
223 {0x6c011a36d8bf6e, 0xfecd87ba24e32a, 0x19f6f56574fad8, 0x050b204ced9405},
225 {{0xed4f1cae7d9a96, 0x5ceef7ad94c40a, 0x778e4a3bf3ef9b, 0x7405783dc3b55e},
226 {0x32477c61b6e8c6, 0xb46a97570f018b, 0x91176d0a7e95d1, 0x3df90fbc4c7d0e},
230 /* Precomputation for the group generator. */
231 struct nistp224_pre_comp_st {
232 felem g_pre_comp[2][16][3];
237 const EC_METHOD *EC_GFp_nistp224_method(void)
239 static const EC_METHOD ret = {
240 EC_FLAGS_DEFAULT_OCT,
241 NID_X9_62_prime_field,
242 ec_GFp_nistp224_group_init,
243 ec_GFp_simple_group_finish,
244 ec_GFp_simple_group_clear_finish,
245 ec_GFp_nist_group_copy,
246 ec_GFp_nistp224_group_set_curve,
247 ec_GFp_simple_group_get_curve,
248 ec_GFp_simple_group_get_degree,
249 ec_group_simple_order_bits,
250 ec_GFp_simple_group_check_discriminant,
251 ec_GFp_simple_point_init,
252 ec_GFp_simple_point_finish,
253 ec_GFp_simple_point_clear_finish,
254 ec_GFp_simple_point_copy,
255 ec_GFp_simple_point_set_to_infinity,
256 ec_GFp_simple_set_Jprojective_coordinates_GFp,
257 ec_GFp_simple_get_Jprojective_coordinates_GFp,
258 ec_GFp_simple_point_set_affine_coordinates,
259 ec_GFp_nistp224_point_get_affine_coordinates,
260 0 /* point_set_compressed_coordinates */ ,
265 ec_GFp_simple_invert,
266 ec_GFp_simple_is_at_infinity,
267 ec_GFp_simple_is_on_curve,
269 ec_GFp_simple_make_affine,
270 ec_GFp_simple_points_make_affine,
271 ec_GFp_nistp224_points_mul,
272 ec_GFp_nistp224_precompute_mult,
273 ec_GFp_nistp224_have_precompute_mult,
274 ec_GFp_nist_field_mul,
275 ec_GFp_nist_field_sqr,
277 0 /* field_encode */ ,
278 0 /* field_decode */ ,
279 0, /* field_set_to_one */
280 ec_key_simple_priv2oct,
281 ec_key_simple_oct2priv,
283 ec_key_simple_generate_key,
284 ec_key_simple_check_key,
285 ec_key_simple_generate_public_key,
288 ecdh_simple_compute_key
295 * Helper functions to convert field elements to/from internal representation
297 static void bin28_to_felem(felem out, const u8 in[28])
299 out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff;
300 out[1] = (*((const uint64_t *)(in + 7))) & 0x00ffffffffffffff;
301 out[2] = (*((const uint64_t *)(in + 14))) & 0x00ffffffffffffff;
302 out[3] = (*((const uint64_t *)(in+20))) >> 8;
305 static void felem_to_bin28(u8 out[28], const felem in)
308 for (i = 0; i < 7; ++i) {
309 out[i] = in[0] >> (8 * i);
310 out[i + 7] = in[1] >> (8 * i);
311 out[i + 14] = in[2] >> (8 * i);
312 out[i + 21] = in[3] >> (8 * i);
316 /* To preserve endianness when using BN_bn2bin and BN_bin2bn */
317 static void flip_endian(u8 *out, const u8 *in, unsigned len)
320 for (i = 0; i < len; ++i)
321 out[i] = in[len - 1 - i];
324 /* From OpenSSL BIGNUM to internal representation */
325 static int BN_to_felem(felem out, const BIGNUM *bn)
327 felem_bytearray b_in;
328 felem_bytearray b_out;
331 /* BN_bn2bin eats leading zeroes */
332 memset(b_out, 0, sizeof(b_out));
333 num_bytes = BN_num_bytes(bn);
334 if (num_bytes > sizeof b_out) {
335 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
338 if (BN_is_negative(bn)) {
339 ECerr(EC_F_BN_TO_FELEM, EC_R_BIGNUM_OUT_OF_RANGE);
342 num_bytes = BN_bn2bin(bn, b_in);
343 flip_endian(b_out, b_in, num_bytes);
344 bin28_to_felem(out, b_out);
348 /* From internal representation to OpenSSL BIGNUM */
349 static BIGNUM *felem_to_BN(BIGNUM *out, const felem in)
351 felem_bytearray b_in, b_out;
352 felem_to_bin28(b_in, in);
353 flip_endian(b_out, b_in, sizeof b_out);
354 return BN_bin2bn(b_out, sizeof b_out, out);
357 /******************************************************************************/
361 * Field operations, using the internal representation of field elements.
362 * NB! These operations are specific to our point multiplication and cannot be
363 * expected to be correct in general - e.g., multiplication with a large scalar
364 * will cause an overflow.
368 static void felem_one(felem out)
376 static void felem_assign(felem out, const felem in)
384 /* Sum two field elements: out += in */
385 static void felem_sum(felem out, const felem in)
393 /* Get negative value: out = -in */
394 /* Assumes in[i] < 2^57 */
395 static void felem_neg(felem out, const felem in)
397 static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
398 static const limb two58m2 = (((limb) 1) << 58) - (((limb) 1) << 2);
399 static const limb two58m42m2 = (((limb) 1) << 58) -
400 (((limb) 1) << 42) - (((limb) 1) << 2);
402 /* Set to 0 mod 2^224-2^96+1 to ensure out > in */
403 out[0] = two58p2 - in[0];
404 out[1] = two58m42m2 - in[1];
405 out[2] = two58m2 - in[2];
406 out[3] = two58m2 - in[3];
409 /* Subtract field elements: out -= in */
410 /* Assumes in[i] < 2^57 */
411 static void felem_diff(felem out, const felem in)
413 static const limb two58p2 = (((limb) 1) << 58) + (((limb) 1) << 2);
414 static const limb two58m2 = (((limb) 1) << 58) - (((limb) 1) << 2);
415 static const limb two58m42m2 = (((limb) 1) << 58) -
416 (((limb) 1) << 42) - (((limb) 1) << 2);
418 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
420 out[1] += two58m42m2;
430 /* Subtract in unreduced 128-bit mode: out -= in */
431 /* Assumes in[i] < 2^119 */
432 static void widefelem_diff(widefelem out, const widefelem in)
434 static const widelimb two120 = ((widelimb) 1) << 120;
435 static const widelimb two120m64 = (((widelimb) 1) << 120) -
436 (((widelimb) 1) << 64);
437 static const widelimb two120m104m64 = (((widelimb) 1) << 120) -
438 (((widelimb) 1) << 104) - (((widelimb) 1) << 64);
440 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
445 out[4] += two120m104m64;
458 /* Subtract in mixed mode: out128 -= in64 */
460 static void felem_diff_128_64(widefelem out, const felem in)
462 static const widelimb two64p8 = (((widelimb) 1) << 64) +
463 (((widelimb) 1) << 8);
464 static const widelimb two64m8 = (((widelimb) 1) << 64) -
465 (((widelimb) 1) << 8);
466 static const widelimb two64m48m8 = (((widelimb) 1) << 64) -
467 (((widelimb) 1) << 48) - (((widelimb) 1) << 8);
469 /* Add 0 mod 2^224-2^96+1 to ensure out > in */
471 out[1] += two64m48m8;
482 * Multiply a field element by a scalar: out = out * scalar The scalars we
483 * actually use are small, so results fit without overflow
485 static void felem_scalar(felem out, const limb scalar)
494 * Multiply an unreduced field element by a scalar: out = out * scalar The
495 * scalars we actually use are small, so results fit without overflow
497 static void widefelem_scalar(widefelem out, const widelimb scalar)
508 /* Square a field element: out = in^2 */
509 static void felem_square(widefelem out, const felem in)
511 limb tmp0, tmp1, tmp2;
515 out[0] = ((widelimb) in[0]) * in[0];
516 out[1] = ((widelimb) in[0]) * tmp1;
517 out[2] = ((widelimb) in[0]) * tmp2 + ((widelimb) in[1]) * in[1];
518 out[3] = ((widelimb) in[3]) * tmp0 + ((widelimb) in[1]) * tmp2;
519 out[4] = ((widelimb) in[3]) * tmp1 + ((widelimb) in[2]) * in[2];
520 out[5] = ((widelimb) in[3]) * tmp2;
521 out[6] = ((widelimb) in[3]) * in[3];
524 /* Multiply two field elements: out = in1 * in2 */
525 static void felem_mul(widefelem out, const felem in1, const felem in2)
527 out[0] = ((widelimb) in1[0]) * in2[0];
528 out[1] = ((widelimb) in1[0]) * in2[1] + ((widelimb) in1[1]) * in2[0];
529 out[2] = ((widelimb) in1[0]) * in2[2] + ((widelimb) in1[1]) * in2[1] +
530 ((widelimb) in1[2]) * in2[0];
531 out[3] = ((widelimb) in1[0]) * in2[3] + ((widelimb) in1[1]) * in2[2] +
532 ((widelimb) in1[2]) * in2[1] + ((widelimb) in1[3]) * in2[0];
533 out[4] = ((widelimb) in1[1]) * in2[3] + ((widelimb) in1[2]) * in2[2] +
534 ((widelimb) in1[3]) * in2[1];
535 out[5] = ((widelimb) in1[2]) * in2[3] + ((widelimb) in1[3]) * in2[2];
536 out[6] = ((widelimb) in1[3]) * in2[3];
540 * Reduce seven 128-bit coefficients to four 64-bit coefficients.
541 * Requires in[i] < 2^126,
542 * ensures out[0] < 2^56, out[1] < 2^56, out[2] < 2^56, out[3] <= 2^56 + 2^16 */
543 static void felem_reduce(felem out, const widefelem in)
545 static const widelimb two127p15 = (((widelimb) 1) << 127) +
546 (((widelimb) 1) << 15);
547 static const widelimb two127m71 = (((widelimb) 1) << 127) -
548 (((widelimb) 1) << 71);
549 static const widelimb two127m71m55 = (((widelimb) 1) << 127) -
550 (((widelimb) 1) << 71) - (((widelimb) 1) << 55);
553 /* Add 0 mod 2^224-2^96+1 to ensure all differences are positive */
554 output[0] = in[0] + two127p15;
555 output[1] = in[1] + two127m71m55;
556 output[2] = in[2] + two127m71;
560 /* Eliminate in[4], in[5], in[6] */
561 output[4] += in[6] >> 16;
562 output[3] += (in[6] & 0xffff) << 40;
565 output[3] += in[5] >> 16;
566 output[2] += (in[5] & 0xffff) << 40;
569 output[2] += output[4] >> 16;
570 output[1] += (output[4] & 0xffff) << 40;
571 output[0] -= output[4];
573 /* Carry 2 -> 3 -> 4 */
574 output[3] += output[2] >> 56;
575 output[2] &= 0x00ffffffffffffff;
577 output[4] = output[3] >> 56;
578 output[3] &= 0x00ffffffffffffff;
580 /* Now output[2] < 2^56, output[3] < 2^56, output[4] < 2^72 */
582 /* Eliminate output[4] */
583 output[2] += output[4] >> 16;
584 /* output[2] < 2^56 + 2^56 = 2^57 */
585 output[1] += (output[4] & 0xffff) << 40;
586 output[0] -= output[4];
588 /* Carry 0 -> 1 -> 2 -> 3 */
589 output[1] += output[0] >> 56;
590 out[0] = output[0] & 0x00ffffffffffffff;
592 output[2] += output[1] >> 56;
593 /* output[2] < 2^57 + 2^72 */
594 out[1] = output[1] & 0x00ffffffffffffff;
595 output[3] += output[2] >> 56;
596 /* output[3] <= 2^56 + 2^16 */
597 out[2] = output[2] & 0x00ffffffffffffff;
600 * out[0] < 2^56, out[1] < 2^56, out[2] < 2^56,
601 * out[3] <= 2^56 + 2^16 (due to final carry),
607 static void felem_square_reduce(felem out, const felem in)
610 felem_square(tmp, in);
611 felem_reduce(out, tmp);
614 static void felem_mul_reduce(felem out, const felem in1, const felem in2)
617 felem_mul(tmp, in1, in2);
618 felem_reduce(out, tmp);
622 * Reduce to unique minimal representation. Requires 0 <= in < 2*p (always
623 * call felem_reduce first)
625 static void felem_contract(felem out, const felem in)
627 static const int64_t two56 = ((limb) 1) << 56;
628 /* 0 <= in < 2*p, p = 2^224 - 2^96 + 1 */
629 /* if in > p , reduce in = in - 2^224 + 2^96 - 1 */
635 /* Case 1: a = 1 iff in >= 2^224 */
639 tmp[3] &= 0x00ffffffffffffff;
641 * Case 2: a = 0 iff p <= in < 2^224, i.e., the high 128 bits are all 1
642 * and the lower part is non-zero
644 a = ((in[3] & in[2] & (in[1] | 0x000000ffffffffff)) + 1) |
645 (((int64_t) (in[0] + (in[1] & 0x000000ffffffffff)) - 1) >> 63);
646 a &= 0x00ffffffffffffff;
647 /* turn a into an all-one mask (if a = 0) or an all-zero mask */
649 /* subtract 2^224 - 2^96 + 1 if a is all-one */
650 tmp[3] &= a ^ 0xffffffffffffffff;
651 tmp[2] &= a ^ 0xffffffffffffffff;
652 tmp[1] &= (a ^ 0xffffffffffffffff) | 0x000000ffffffffff;
656 * eliminate negative coefficients: if tmp[0] is negative, tmp[1] must be
657 * non-zero, so we only need one step
663 /* carry 1 -> 2 -> 3 */
664 tmp[2] += tmp[1] >> 56;
665 tmp[1] &= 0x00ffffffffffffff;
667 tmp[3] += tmp[2] >> 56;
668 tmp[2] &= 0x00ffffffffffffff;
670 /* Now 0 <= out < p */
678 * Zero-check: returns 1 if input is 0, and 0 otherwise. We know that field
679 * elements are reduced to in < 2^225, so we only need to check three cases:
680 * 0, 2^224 - 2^96 + 1, and 2^225 - 2^97 + 2
682 static limb felem_is_zero(const felem in)
684 limb zero, two224m96p1, two225m97p2;
686 zero = in[0] | in[1] | in[2] | in[3];
687 zero = (((int64_t) (zero) - 1) >> 63) & 1;
688 two224m96p1 = (in[0] ^ 1) | (in[1] ^ 0x00ffff0000000000)
689 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x00ffffffffffffff);
690 two224m96p1 = (((int64_t) (two224m96p1) - 1) >> 63) & 1;
691 two225m97p2 = (in[0] ^ 2) | (in[1] ^ 0x00fffe0000000000)
692 | (in[2] ^ 0x00ffffffffffffff) | (in[3] ^ 0x01ffffffffffffff);
693 two225m97p2 = (((int64_t) (two225m97p2) - 1) >> 63) & 1;
694 return (zero | two224m96p1 | two225m97p2);
697 static limb felem_is_zero_int(const felem in)
699 return (int)(felem_is_zero(in) & ((limb) 1));
702 /* Invert a field element */
703 /* Computation chain copied from djb's code */
704 static void felem_inv(felem out, const felem in)
706 felem ftmp, ftmp2, ftmp3, ftmp4;
710 felem_square(tmp, in);
711 felem_reduce(ftmp, tmp); /* 2 */
712 felem_mul(tmp, in, ftmp);
713 felem_reduce(ftmp, tmp); /* 2^2 - 1 */
714 felem_square(tmp, ftmp);
715 felem_reduce(ftmp, tmp); /* 2^3 - 2 */
716 felem_mul(tmp, in, ftmp);
717 felem_reduce(ftmp, tmp); /* 2^3 - 1 */
718 felem_square(tmp, ftmp);
719 felem_reduce(ftmp2, tmp); /* 2^4 - 2 */
720 felem_square(tmp, ftmp2);
721 felem_reduce(ftmp2, tmp); /* 2^5 - 4 */
722 felem_square(tmp, ftmp2);
723 felem_reduce(ftmp2, tmp); /* 2^6 - 8 */
724 felem_mul(tmp, ftmp2, ftmp);
725 felem_reduce(ftmp, tmp); /* 2^6 - 1 */
726 felem_square(tmp, ftmp);
727 felem_reduce(ftmp2, tmp); /* 2^7 - 2 */
728 for (i = 0; i < 5; ++i) { /* 2^12 - 2^6 */
729 felem_square(tmp, ftmp2);
730 felem_reduce(ftmp2, tmp);
732 felem_mul(tmp, ftmp2, ftmp);
733 felem_reduce(ftmp2, tmp); /* 2^12 - 1 */
734 felem_square(tmp, ftmp2);
735 felem_reduce(ftmp3, tmp); /* 2^13 - 2 */
736 for (i = 0; i < 11; ++i) { /* 2^24 - 2^12 */
737 felem_square(tmp, ftmp3);
738 felem_reduce(ftmp3, tmp);
740 felem_mul(tmp, ftmp3, ftmp2);
741 felem_reduce(ftmp2, tmp); /* 2^24 - 1 */
742 felem_square(tmp, ftmp2);
743 felem_reduce(ftmp3, tmp); /* 2^25 - 2 */
744 for (i = 0; i < 23; ++i) { /* 2^48 - 2^24 */
745 felem_square(tmp, ftmp3);
746 felem_reduce(ftmp3, tmp);
748 felem_mul(tmp, ftmp3, ftmp2);
749 felem_reduce(ftmp3, tmp); /* 2^48 - 1 */
750 felem_square(tmp, ftmp3);
751 felem_reduce(ftmp4, tmp); /* 2^49 - 2 */
752 for (i = 0; i < 47; ++i) { /* 2^96 - 2^48 */
753 felem_square(tmp, ftmp4);
754 felem_reduce(ftmp4, tmp);
756 felem_mul(tmp, ftmp3, ftmp4);
757 felem_reduce(ftmp3, tmp); /* 2^96 - 1 */
758 felem_square(tmp, ftmp3);
759 felem_reduce(ftmp4, tmp); /* 2^97 - 2 */
760 for (i = 0; i < 23; ++i) { /* 2^120 - 2^24 */
761 felem_square(tmp, ftmp4);
762 felem_reduce(ftmp4, tmp);
764 felem_mul(tmp, ftmp2, ftmp4);
765 felem_reduce(ftmp2, tmp); /* 2^120 - 1 */
766 for (i = 0; i < 6; ++i) { /* 2^126 - 2^6 */
767 felem_square(tmp, ftmp2);
768 felem_reduce(ftmp2, tmp);
770 felem_mul(tmp, ftmp2, ftmp);
771 felem_reduce(ftmp, tmp); /* 2^126 - 1 */
772 felem_square(tmp, ftmp);
773 felem_reduce(ftmp, tmp); /* 2^127 - 2 */
774 felem_mul(tmp, ftmp, in);
775 felem_reduce(ftmp, tmp); /* 2^127 - 1 */
776 for (i = 0; i < 97; ++i) { /* 2^224 - 2^97 */
777 felem_square(tmp, ftmp);
778 felem_reduce(ftmp, tmp);
780 felem_mul(tmp, ftmp, ftmp3);
781 felem_reduce(out, tmp); /* 2^224 - 2^96 - 1 */
785 * Copy in constant time: if icopy == 1, copy in to out, if icopy == 0, copy
788 static void copy_conditional(felem out, const felem in, limb icopy)
792 * icopy is a (64-bit) 0 or 1, so copy is either all-zero or all-one
794 const limb copy = -icopy;
795 for (i = 0; i < 4; ++i) {
796 const limb tmp = copy & (in[i] ^ out[i]);
801 /******************************************************************************/
803 * ELLIPTIC CURVE POINT OPERATIONS
805 * Points are represented in Jacobian projective coordinates:
806 * (X, Y, Z) corresponds to the affine point (X/Z^2, Y/Z^3),
807 * or to the point at infinity if Z == 0.
812 * Double an elliptic curve point:
813 * (X', Y', Z') = 2 * (X, Y, Z), where
814 * X' = (3 * (X - Z^2) * (X + Z^2))^2 - 8 * X * Y^2
815 * Y' = 3 * (X - Z^2) * (X + Z^2) * (4 * X * Y^2 - X') - 8 * Y^2
816 * Z' = (Y + Z)^2 - Y^2 - Z^2 = 2 * Y * Z
817 * Outputs can equal corresponding inputs, i.e., x_out == x_in is allowed,
818 * while x_out == y_in is not (maybe this works, but it's not tested).
821 point_double(felem x_out, felem y_out, felem z_out,
822 const felem x_in, const felem y_in, const felem z_in)
825 felem delta, gamma, beta, alpha, ftmp, ftmp2;
827 felem_assign(ftmp, x_in);
828 felem_assign(ftmp2, x_in);
831 felem_square(tmp, z_in);
832 felem_reduce(delta, tmp);
835 felem_square(tmp, y_in);
836 felem_reduce(gamma, tmp);
839 felem_mul(tmp, x_in, gamma);
840 felem_reduce(beta, tmp);
842 /* alpha = 3*(x-delta)*(x+delta) */
843 felem_diff(ftmp, delta);
844 /* ftmp[i] < 2^57 + 2^58 + 2 < 2^59 */
845 felem_sum(ftmp2, delta);
846 /* ftmp2[i] < 2^57 + 2^57 = 2^58 */
847 felem_scalar(ftmp2, 3);
848 /* ftmp2[i] < 3 * 2^58 < 2^60 */
849 felem_mul(tmp, ftmp, ftmp2);
850 /* tmp[i] < 2^60 * 2^59 * 4 = 2^121 */
851 felem_reduce(alpha, tmp);
853 /* x' = alpha^2 - 8*beta */
854 felem_square(tmp, alpha);
855 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
856 felem_assign(ftmp, beta);
857 felem_scalar(ftmp, 8);
858 /* ftmp[i] < 8 * 2^57 = 2^60 */
859 felem_diff_128_64(tmp, ftmp);
860 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
861 felem_reduce(x_out, tmp);
863 /* z' = (y + z)^2 - gamma - delta */
864 felem_sum(delta, gamma);
865 /* delta[i] < 2^57 + 2^57 = 2^58 */
866 felem_assign(ftmp, y_in);
867 felem_sum(ftmp, z_in);
868 /* ftmp[i] < 2^57 + 2^57 = 2^58 */
869 felem_square(tmp, ftmp);
870 /* tmp[i] < 4 * 2^58 * 2^58 = 2^118 */
871 felem_diff_128_64(tmp, delta);
872 /* tmp[i] < 2^118 + 2^64 + 8 < 2^119 */
873 felem_reduce(z_out, tmp);
875 /* y' = alpha*(4*beta - x') - 8*gamma^2 */
876 felem_scalar(beta, 4);
877 /* beta[i] < 4 * 2^57 = 2^59 */
878 felem_diff(beta, x_out);
879 /* beta[i] < 2^59 + 2^58 + 2 < 2^60 */
880 felem_mul(tmp, alpha, beta);
881 /* tmp[i] < 4 * 2^57 * 2^60 = 2^119 */
882 felem_square(tmp2, gamma);
883 /* tmp2[i] < 4 * 2^57 * 2^57 = 2^116 */
884 widefelem_scalar(tmp2, 8);
885 /* tmp2[i] < 8 * 2^116 = 2^119 */
886 widefelem_diff(tmp, tmp2);
887 /* tmp[i] < 2^119 + 2^120 < 2^121 */
888 felem_reduce(y_out, tmp);
892 * Add two elliptic curve points:
893 * (X_1, Y_1, Z_1) + (X_2, Y_2, Z_2) = (X_3, Y_3, Z_3), where
894 * X_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1)^2 - (Z_1^2 * X_2 - Z_2^2 * X_1)^3 -
895 * 2 * Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2
896 * Y_3 = (Z_1^3 * Y_2 - Z_2^3 * Y_1) * (Z_2^2 * X_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^2 - X_3) -
897 * Z_2^3 * Y_1 * (Z_1^2 * X_2 - Z_2^2 * X_1)^3
898 * Z_3 = (Z_1^2 * X_2 - Z_2^2 * X_1) * (Z_1 * Z_2)
900 * This runs faster if 'mixed' is set, which requires Z_2 = 1 or Z_2 = 0.
904 * This function is not entirely constant-time: it includes a branch for
905 * checking whether the two input points are equal, (while not equal to the
906 * point at infinity). This case never happens during single point
907 * multiplication, so there is no timing leak for ECDH or ECDSA signing.
909 static void point_add(felem x3, felem y3, felem z3,
910 const felem x1, const felem y1, const felem z1,
911 const int mixed, const felem x2, const felem y2,
914 felem ftmp, ftmp2, ftmp3, ftmp4, ftmp5, x_out, y_out, z_out;
916 limb z1_is_zero, z2_is_zero, x_equal, y_equal;
920 felem_square(tmp, z2);
921 felem_reduce(ftmp2, tmp);
924 felem_mul(tmp, ftmp2, z2);
925 felem_reduce(ftmp4, tmp);
927 /* ftmp4 = z2^3*y1 */
928 felem_mul(tmp2, ftmp4, y1);
929 felem_reduce(ftmp4, tmp2);
931 /* ftmp2 = z2^2*x1 */
932 felem_mul(tmp2, ftmp2, x1);
933 felem_reduce(ftmp2, tmp2);
936 * We'll assume z2 = 1 (special case z2 = 0 is handled later)
939 /* ftmp4 = z2^3*y1 */
940 felem_assign(ftmp4, y1);
942 /* ftmp2 = z2^2*x1 */
943 felem_assign(ftmp2, x1);
947 felem_square(tmp, z1);
948 felem_reduce(ftmp, tmp);
951 felem_mul(tmp, ftmp, z1);
952 felem_reduce(ftmp3, tmp);
955 felem_mul(tmp, ftmp3, y2);
956 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
958 /* ftmp3 = z1^3*y2 - z2^3*y1 */
959 felem_diff_128_64(tmp, ftmp4);
960 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
961 felem_reduce(ftmp3, tmp);
964 felem_mul(tmp, ftmp, x2);
965 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
967 /* ftmp = z1^2*x2 - z2^2*x1 */
968 felem_diff_128_64(tmp, ftmp2);
969 /* tmp[i] < 2^116 + 2^64 + 8 < 2^117 */
970 felem_reduce(ftmp, tmp);
973 * the formulae are incorrect if the points are equal so we check for
974 * this and do doubling if this happens
976 x_equal = felem_is_zero(ftmp);
977 y_equal = felem_is_zero(ftmp3);
978 z1_is_zero = felem_is_zero(z1);
979 z2_is_zero = felem_is_zero(z2);
980 /* In affine coordinates, (X_1, Y_1) == (X_2, Y_2) */
981 if (x_equal && y_equal && !z1_is_zero && !z2_is_zero) {
982 point_double(x3, y3, z3, x1, y1, z1);
988 felem_mul(tmp, z1, z2);
989 felem_reduce(ftmp5, tmp);
991 /* special case z2 = 0 is handled later */
992 felem_assign(ftmp5, z1);
995 /* z_out = (z1^2*x2 - z2^2*x1)*(z1*z2) */
996 felem_mul(tmp, ftmp, ftmp5);
997 felem_reduce(z_out, tmp);
999 /* ftmp = (z1^2*x2 - z2^2*x1)^2 */
1000 felem_assign(ftmp5, ftmp);
1001 felem_square(tmp, ftmp);
1002 felem_reduce(ftmp, tmp);
1004 /* ftmp5 = (z1^2*x2 - z2^2*x1)^3 */
1005 felem_mul(tmp, ftmp, ftmp5);
1006 felem_reduce(ftmp5, tmp);
1008 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1009 felem_mul(tmp, ftmp2, ftmp);
1010 felem_reduce(ftmp2, tmp);
1012 /* tmp = z2^3*y1*(z1^2*x2 - z2^2*x1)^3 */
1013 felem_mul(tmp, ftmp4, ftmp5);
1014 /* tmp[i] < 4 * 2^57 * 2^57 = 2^116 */
1016 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 */
1017 felem_square(tmp2, ftmp3);
1018 /* tmp2[i] < 4 * 2^57 * 2^57 < 2^116 */
1020 /* tmp2 = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 */
1021 felem_diff_128_64(tmp2, ftmp5);
1022 /* tmp2[i] < 2^116 + 2^64 + 8 < 2^117 */
1024 /* ftmp5 = 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2 */
1025 felem_assign(ftmp5, ftmp2);
1026 felem_scalar(ftmp5, 2);
1027 /* ftmp5[i] < 2 * 2^57 = 2^58 */
1030 * x_out = (z1^3*y2 - z2^3*y1)^2 - (z1^2*x2 - z2^2*x1)^3 -
1031 * 2*z2^2*x1*(z1^2*x2 - z2^2*x1)^2
1033 felem_diff_128_64(tmp2, ftmp5);
1034 /* tmp2[i] < 2^117 + 2^64 + 8 < 2^118 */
1035 felem_reduce(x_out, tmp2);
1037 /* ftmp2 = z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out */
1038 felem_diff(ftmp2, x_out);
1039 /* ftmp2[i] < 2^57 + 2^58 + 2 < 2^59 */
1042 * tmp2 = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out)
1044 felem_mul(tmp2, ftmp3, ftmp2);
1045 /* tmp2[i] < 4 * 2^57 * 2^59 = 2^118 */
1048 * y_out = (z1^3*y2 - z2^3*y1)*(z2^2*x1*(z1^2*x2 - z2^2*x1)^2 - x_out) -
1049 * z2^3*y1*(z1^2*x2 - z2^2*x1)^3
1051 widefelem_diff(tmp2, tmp);
1052 /* tmp2[i] < 2^118 + 2^120 < 2^121 */
1053 felem_reduce(y_out, tmp2);
1056 * the result (x_out, y_out, z_out) is incorrect if one of the inputs is
1057 * the point at infinity, so we need to check for this separately
1061 * if point 1 is at infinity, copy point 2 to output, and vice versa
1063 copy_conditional(x_out, x2, z1_is_zero);
1064 copy_conditional(x_out, x1, z2_is_zero);
1065 copy_conditional(y_out, y2, z1_is_zero);
1066 copy_conditional(y_out, y1, z2_is_zero);
1067 copy_conditional(z_out, z2, z1_is_zero);
1068 copy_conditional(z_out, z1, z2_is_zero);
1069 felem_assign(x3, x_out);
1070 felem_assign(y3, y_out);
1071 felem_assign(z3, z_out);
1075 * select_point selects the |idx|th point from a precomputation table and
1077 * The pre_comp array argument should be size of |size| argument
1079 static void select_point(const u64 idx, unsigned int size,
1080 const felem pre_comp[][3], felem out[3])
1083 limb *outlimbs = &out[0][0];
1085 memset(out, 0, sizeof(*out) * 3);
1086 for (i = 0; i < size; i++) {
1087 const limb *inlimbs = &pre_comp[i][0][0];
1094 for (j = 0; j < 4 * 3; j++)
1095 outlimbs[j] |= inlimbs[j] & mask;
1099 /* get_bit returns the |i|th bit in |in| */
1100 static char get_bit(const felem_bytearray in, unsigned i)
1104 return (in[i >> 3] >> (i & 7)) & 1;
1108 * Interleaved point multiplication using precomputed point multiples: The
1109 * small point multiples 0*P, 1*P, ..., 16*P are in pre_comp[], the scalars
1110 * in scalars[]. If g_scalar is non-NULL, we also add this multiple of the
1111 * generator, using certain (large) precomputed multiples in g_pre_comp.
1112 * Output point (X, Y, Z) is stored in x_out, y_out, z_out
1114 static void batch_mul(felem x_out, felem y_out, felem z_out,
1115 const felem_bytearray scalars[],
1116 const unsigned num_points, const u8 *g_scalar,
1117 const int mixed, const felem pre_comp[][17][3],
1118 const felem g_pre_comp[2][16][3])
1122 unsigned gen_mul = (g_scalar != NULL);
1123 felem nq[3], tmp[4];
1127 /* set nq to the point at infinity */
1128 memset(nq, 0, sizeof(nq));
1131 * Loop over all scalars msb-to-lsb, interleaving additions of multiples
1132 * of the generator (two in each of the last 28 rounds) and additions of
1133 * other points multiples (every 5th round).
1135 skip = 1; /* save two point operations in the first
1137 for (i = (num_points ? 220 : 27); i >= 0; --i) {
1140 point_double(nq[0], nq[1], nq[2], nq[0], nq[1], nq[2]);
1142 /* add multiples of the generator */
1143 if (gen_mul && (i <= 27)) {
1144 /* first, look 28 bits upwards */
1145 bits = get_bit(g_scalar, i + 196) << 3;
1146 bits |= get_bit(g_scalar, i + 140) << 2;
1147 bits |= get_bit(g_scalar, i + 84) << 1;
1148 bits |= get_bit(g_scalar, i + 28);
1149 /* select the point to add, in constant time */
1150 select_point(bits, 16, g_pre_comp[1], tmp);
1153 /* value 1 below is argument for "mixed" */
1154 point_add(nq[0], nq[1], nq[2],
1155 nq[0], nq[1], nq[2], 1, tmp[0], tmp[1], tmp[2]);
1157 memcpy(nq, tmp, 3 * sizeof(felem));
1161 /* second, look at the current position */
1162 bits = get_bit(g_scalar, i + 168) << 3;
1163 bits |= get_bit(g_scalar, i + 112) << 2;
1164 bits |= get_bit(g_scalar, i + 56) << 1;
1165 bits |= get_bit(g_scalar, i);
1166 /* select the point to add, in constant time */
1167 select_point(bits, 16, g_pre_comp[0], tmp);
1168 point_add(nq[0], nq[1], nq[2],
1169 nq[0], nq[1], nq[2],
1170 1 /* mixed */ , tmp[0], tmp[1], tmp[2]);
1173 /* do other additions every 5 doublings */
1174 if (num_points && (i % 5 == 0)) {
1175 /* loop over all scalars */
1176 for (num = 0; num < num_points; ++num) {
1177 bits = get_bit(scalars[num], i + 4) << 5;
1178 bits |= get_bit(scalars[num], i + 3) << 4;
1179 bits |= get_bit(scalars[num], i + 2) << 3;
1180 bits |= get_bit(scalars[num], i + 1) << 2;
1181 bits |= get_bit(scalars[num], i) << 1;
1182 bits |= get_bit(scalars[num], i - 1);
1183 ec_GFp_nistp_recode_scalar_bits(&sign, &digit, bits);
1185 /* select the point to add or subtract */
1186 select_point(digit, 17, pre_comp[num], tmp);
1187 felem_neg(tmp[3], tmp[1]); /* (X, -Y, Z) is the negative
1189 copy_conditional(tmp[1], tmp[3], sign);
1192 point_add(nq[0], nq[1], nq[2],
1193 nq[0], nq[1], nq[2],
1194 mixed, tmp[0], tmp[1], tmp[2]);
1196 memcpy(nq, tmp, 3 * sizeof(felem));
1202 felem_assign(x_out, nq[0]);
1203 felem_assign(y_out, nq[1]);
1204 felem_assign(z_out, nq[2]);
1207 /******************************************************************************/
1209 * FUNCTIONS TO MANAGE PRECOMPUTATION
1212 static NISTP224_PRE_COMP *nistp224_pre_comp_new()
1214 NISTP224_PRE_COMP *ret = OPENSSL_zalloc(sizeof(*ret));
1217 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1221 ret->references = 1;
1223 ret->lock = CRYPTO_THREAD_lock_new();
1224 if (ret->lock == NULL) {
1225 ECerr(EC_F_NISTP224_PRE_COMP_NEW, ERR_R_MALLOC_FAILURE);
1232 NISTP224_PRE_COMP *EC_nistp224_pre_comp_dup(NISTP224_PRE_COMP *p)
1236 CRYPTO_atomic_add(&p->references, 1, &i, p->lock);
1240 void EC_nistp224_pre_comp_free(NISTP224_PRE_COMP *p)
1247 CRYPTO_atomic_add(&p->references, -1, &i, p->lock);
1248 REF_PRINT_COUNT("EC_nistp224", x);
1251 REF_ASSERT_ISNT(i < 0);
1253 CRYPTO_THREAD_lock_free(p->lock);
1257 /******************************************************************************/
1259 * OPENSSL EC_METHOD FUNCTIONS
1262 int ec_GFp_nistp224_group_init(EC_GROUP *group)
1265 ret = ec_GFp_simple_group_init(group);
1266 group->a_is_minus3 = 1;
1270 int ec_GFp_nistp224_group_set_curve(EC_GROUP *group, const BIGNUM *p,
1271 const BIGNUM *a, const BIGNUM *b,
1275 BN_CTX *new_ctx = NULL;
1276 BIGNUM *curve_p, *curve_a, *curve_b;
1279 if ((ctx = new_ctx = BN_CTX_new()) == NULL)
1282 if (((curve_p = BN_CTX_get(ctx)) == NULL) ||
1283 ((curve_a = BN_CTX_get(ctx)) == NULL) ||
1284 ((curve_b = BN_CTX_get(ctx)) == NULL))
1286 BN_bin2bn(nistp224_curve_params[0], sizeof(felem_bytearray), curve_p);
1287 BN_bin2bn(nistp224_curve_params[1], sizeof(felem_bytearray), curve_a);
1288 BN_bin2bn(nistp224_curve_params[2], sizeof(felem_bytearray), curve_b);
1289 if ((BN_cmp(curve_p, p)) || (BN_cmp(curve_a, a)) || (BN_cmp(curve_b, b))) {
1290 ECerr(EC_F_EC_GFP_NISTP224_GROUP_SET_CURVE,
1291 EC_R_WRONG_CURVE_PARAMETERS);
1294 group->field_mod_func = BN_nist_mod_224;
1295 ret = ec_GFp_simple_group_set_curve(group, p, a, b, ctx);
1298 BN_CTX_free(new_ctx);
1303 * Takes the Jacobian coordinates (X, Y, Z) of a point and returns (X', Y') =
1306 int ec_GFp_nistp224_point_get_affine_coordinates(const EC_GROUP *group,
1307 const EC_POINT *point,
1308 BIGNUM *x, BIGNUM *y,
1311 felem z1, z2, x_in, y_in, x_out, y_out;
1314 if (EC_POINT_is_at_infinity(group, point)) {
1315 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1316 EC_R_POINT_AT_INFINITY);
1319 if ((!BN_to_felem(x_in, point->X)) || (!BN_to_felem(y_in, point->Y)) ||
1320 (!BN_to_felem(z1, point->Z)))
1323 felem_square(tmp, z2);
1324 felem_reduce(z1, tmp);
1325 felem_mul(tmp, x_in, z1);
1326 felem_reduce(x_in, tmp);
1327 felem_contract(x_out, x_in);
1329 if (!felem_to_BN(x, x_out)) {
1330 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1335 felem_mul(tmp, z1, z2);
1336 felem_reduce(z1, tmp);
1337 felem_mul(tmp, y_in, z1);
1338 felem_reduce(y_in, tmp);
1339 felem_contract(y_out, y_in);
1341 if (!felem_to_BN(y, y_out)) {
1342 ECerr(EC_F_EC_GFP_NISTP224_POINT_GET_AFFINE_COORDINATES,
1350 static void make_points_affine(size_t num, felem points[ /* num */ ][3],
1351 felem tmp_felems[ /* num+1 */ ])
1354 * Runs in constant time, unless an input is the point at infinity (which
1355 * normally shouldn't happen).
1357 ec_GFp_nistp_points_make_affine_internal(num,
1361 (void (*)(void *))felem_one,
1362 (int (*)(const void *))
1364 (void (*)(void *, const void *))
1366 (void (*)(void *, const void *))
1367 felem_square_reduce, (void (*)
1374 (void (*)(void *, const void *))
1376 (void (*)(void *, const void *))
1381 * Computes scalar*generator + \sum scalars[i]*points[i], ignoring NULL
1382 * values Result is stored in r (r can equal one of the inputs).
1384 int ec_GFp_nistp224_points_mul(const EC_GROUP *group, EC_POINT *r,
1385 const BIGNUM *scalar, size_t num,
1386 const EC_POINT *points[],
1387 const BIGNUM *scalars[], BN_CTX *ctx)
1393 BN_CTX *new_ctx = NULL;
1394 BIGNUM *x, *y, *z, *tmp_scalar;
1395 felem_bytearray g_secret;
1396 felem_bytearray *secrets = NULL;
1397 felem (*pre_comp)[17][3] = NULL;
1398 felem *tmp_felems = NULL;
1399 felem_bytearray tmp;
1401 int have_pre_comp = 0;
1402 size_t num_points = num;
1403 felem x_in, y_in, z_in, x_out, y_out, z_out;
1404 NISTP224_PRE_COMP *pre = NULL;
1405 const felem(*g_pre_comp)[16][3] = NULL;
1406 EC_POINT *generator = NULL;
1407 const EC_POINT *p = NULL;
1408 const BIGNUM *p_scalar = NULL;
1411 if ((ctx = new_ctx = BN_CTX_new()) == NULL)
1414 if (((x = BN_CTX_get(ctx)) == NULL) ||
1415 ((y = BN_CTX_get(ctx)) == NULL) ||
1416 ((z = BN_CTX_get(ctx)) == NULL) ||
1417 ((tmp_scalar = BN_CTX_get(ctx)) == NULL))
1420 if (scalar != NULL) {
1421 pre = group->pre_comp.nistp224;
1423 /* we have precomputation, try to use it */
1424 g_pre_comp = (const felem(*)[16][3])pre->g_pre_comp;
1426 /* try to use the standard precomputation */
1427 g_pre_comp = &gmul[0];
1428 generator = EC_POINT_new(group);
1429 if (generator == NULL)
1431 /* get the generator from precomputation */
1432 if (!felem_to_BN(x, g_pre_comp[0][1][0]) ||
1433 !felem_to_BN(y, g_pre_comp[0][1][1]) ||
1434 !felem_to_BN(z, g_pre_comp[0][1][2])) {
1435 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1438 if (!EC_POINT_set_Jprojective_coordinates_GFp(group,
1442 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx))
1443 /* precomputation matches generator */
1447 * we don't have valid precomputation: treat the generator as a
1450 num_points = num_points + 1;
1453 if (num_points > 0) {
1454 if (num_points >= 3) {
1456 * unless we precompute multiples for just one or two points,
1457 * converting those into affine form is time well spent
1461 secrets = OPENSSL_zalloc(sizeof(*secrets) * num_points);
1462 pre_comp = OPENSSL_zalloc(sizeof(*pre_comp) * num_points);
1465 OPENSSL_malloc(sizeof(felem) * (num_points * 17 + 1));
1466 if ((secrets == NULL) || (pre_comp == NULL)
1467 || (mixed && (tmp_felems == NULL))) {
1468 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_MALLOC_FAILURE);
1473 * we treat NULL scalars as 0, and NULL points as points at infinity,
1474 * i.e., they contribute nothing to the linear combination
1476 for (i = 0; i < num_points; ++i) {
1480 p = EC_GROUP_get0_generator(group);
1483 /* the i^th point */
1486 p_scalar = scalars[i];
1488 if ((p_scalar != NULL) && (p != NULL)) {
1489 /* reduce scalar to 0 <= scalar < 2^224 */
1490 if ((BN_num_bits(p_scalar) > 224)
1491 || (BN_is_negative(p_scalar))) {
1493 * this is an unusual input, and we don't guarantee
1496 if (!BN_nnmod(tmp_scalar, p_scalar, group->order, ctx)) {
1497 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1500 num_bytes = BN_bn2bin(tmp_scalar, tmp);
1502 num_bytes = BN_bn2bin(p_scalar, tmp);
1503 flip_endian(secrets[i], tmp, num_bytes);
1504 /* precompute multiples */
1505 if ((!BN_to_felem(x_out, p->X)) ||
1506 (!BN_to_felem(y_out, p->Y)) ||
1507 (!BN_to_felem(z_out, p->Z)))
1509 felem_assign(pre_comp[i][1][0], x_out);
1510 felem_assign(pre_comp[i][1][1], y_out);
1511 felem_assign(pre_comp[i][1][2], z_out);
1512 for (j = 2; j <= 16; ++j) {
1514 point_add(pre_comp[i][j][0], pre_comp[i][j][1],
1515 pre_comp[i][j][2], pre_comp[i][1][0],
1516 pre_comp[i][1][1], pre_comp[i][1][2], 0,
1517 pre_comp[i][j - 1][0],
1518 pre_comp[i][j - 1][1],
1519 pre_comp[i][j - 1][2]);
1521 point_double(pre_comp[i][j][0], pre_comp[i][j][1],
1522 pre_comp[i][j][2], pre_comp[i][j / 2][0],
1523 pre_comp[i][j / 2][1],
1524 pre_comp[i][j / 2][2]);
1530 make_points_affine(num_points * 17, pre_comp[0], tmp_felems);
1533 /* the scalar for the generator */
1534 if ((scalar != NULL) && (have_pre_comp)) {
1535 memset(g_secret, 0, sizeof(g_secret));
1536 /* reduce scalar to 0 <= scalar < 2^224 */
1537 if ((BN_num_bits(scalar) > 224) || (BN_is_negative(scalar))) {
1539 * this is an unusual input, and we don't guarantee
1542 if (!BN_nnmod(tmp_scalar, scalar, group->order, ctx)) {
1543 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1546 num_bytes = BN_bn2bin(tmp_scalar, tmp);
1548 num_bytes = BN_bn2bin(scalar, tmp);
1549 flip_endian(g_secret, tmp, num_bytes);
1550 /* do the multiplication with generator precomputation */
1551 batch_mul(x_out, y_out, z_out,
1552 (const felem_bytearray(*))secrets, num_points,
1554 mixed, (const felem(*)[17][3])pre_comp, g_pre_comp);
1556 /* do the multiplication without generator precomputation */
1557 batch_mul(x_out, y_out, z_out,
1558 (const felem_bytearray(*))secrets, num_points,
1559 NULL, mixed, (const felem(*)[17][3])pre_comp, NULL);
1560 /* reduce the output to its unique minimal representation */
1561 felem_contract(x_in, x_out);
1562 felem_contract(y_in, y_out);
1563 felem_contract(z_in, z_out);
1564 if ((!felem_to_BN(x, x_in)) || (!felem_to_BN(y, y_in)) ||
1565 (!felem_to_BN(z, z_in))) {
1566 ECerr(EC_F_EC_GFP_NISTP224_POINTS_MUL, ERR_R_BN_LIB);
1569 ret = EC_POINT_set_Jprojective_coordinates_GFp(group, r, x, y, z, ctx);
1573 EC_POINT_free(generator);
1574 BN_CTX_free(new_ctx);
1575 OPENSSL_free(secrets);
1576 OPENSSL_free(pre_comp);
1577 OPENSSL_free(tmp_felems);
1581 int ec_GFp_nistp224_precompute_mult(EC_GROUP *group, BN_CTX *ctx)
1584 NISTP224_PRE_COMP *pre = NULL;
1586 BN_CTX *new_ctx = NULL;
1588 EC_POINT *generator = NULL;
1589 felem tmp_felems[32];
1591 /* throw away old precomputation */
1592 EC_pre_comp_free(group);
1594 if ((ctx = new_ctx = BN_CTX_new()) == NULL)
1597 if (((x = BN_CTX_get(ctx)) == NULL) || ((y = BN_CTX_get(ctx)) == NULL))
1599 /* get the generator */
1600 if (group->generator == NULL)
1602 generator = EC_POINT_new(group);
1603 if (generator == NULL)
1605 BN_bin2bn(nistp224_curve_params[3], sizeof(felem_bytearray), x);
1606 BN_bin2bn(nistp224_curve_params[4], sizeof(felem_bytearray), y);
1607 if (!EC_POINT_set_affine_coordinates_GFp(group, generator, x, y, ctx))
1609 if ((pre = nistp224_pre_comp_new()) == NULL)
1612 * if the generator is the standard one, use built-in precomputation
1614 if (0 == EC_POINT_cmp(group, generator, group->generator, ctx)) {
1615 memcpy(pre->g_pre_comp, gmul, sizeof(pre->g_pre_comp));
1618 if ((!BN_to_felem(pre->g_pre_comp[0][1][0], group->generator->X)) ||
1619 (!BN_to_felem(pre->g_pre_comp[0][1][1], group->generator->Y)) ||
1620 (!BN_to_felem(pre->g_pre_comp[0][1][2], group->generator->Z)))
1623 * compute 2^56*G, 2^112*G, 2^168*G for the first table, 2^28*G, 2^84*G,
1624 * 2^140*G, 2^196*G for the second one
1626 for (i = 1; i <= 8; i <<= 1) {
1627 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1628 pre->g_pre_comp[1][i][2], pre->g_pre_comp[0][i][0],
1629 pre->g_pre_comp[0][i][1], pre->g_pre_comp[0][i][2]);
1630 for (j = 0; j < 27; ++j) {
1631 point_double(pre->g_pre_comp[1][i][0], pre->g_pre_comp[1][i][1],
1632 pre->g_pre_comp[1][i][2], pre->g_pre_comp[1][i][0],
1633 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1637 point_double(pre->g_pre_comp[0][2 * i][0],
1638 pre->g_pre_comp[0][2 * i][1],
1639 pre->g_pre_comp[0][2 * i][2], pre->g_pre_comp[1][i][0],
1640 pre->g_pre_comp[1][i][1], pre->g_pre_comp[1][i][2]);
1641 for (j = 0; j < 27; ++j) {
1642 point_double(pre->g_pre_comp[0][2 * i][0],
1643 pre->g_pre_comp[0][2 * i][1],
1644 pre->g_pre_comp[0][2 * i][2],
1645 pre->g_pre_comp[0][2 * i][0],
1646 pre->g_pre_comp[0][2 * i][1],
1647 pre->g_pre_comp[0][2 * i][2]);
1650 for (i = 0; i < 2; i++) {
1651 /* g_pre_comp[i][0] is the point at infinity */
1652 memset(pre->g_pre_comp[i][0], 0, sizeof(pre->g_pre_comp[i][0]));
1653 /* the remaining multiples */
1654 /* 2^56*G + 2^112*G resp. 2^84*G + 2^140*G */
1655 point_add(pre->g_pre_comp[i][6][0], pre->g_pre_comp[i][6][1],
1656 pre->g_pre_comp[i][6][2], pre->g_pre_comp[i][4][0],
1657 pre->g_pre_comp[i][4][1], pre->g_pre_comp[i][4][2],
1658 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1659 pre->g_pre_comp[i][2][2]);
1660 /* 2^56*G + 2^168*G resp. 2^84*G + 2^196*G */
1661 point_add(pre->g_pre_comp[i][10][0], pre->g_pre_comp[i][10][1],
1662 pre->g_pre_comp[i][10][2], pre->g_pre_comp[i][8][0],
1663 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1664 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1665 pre->g_pre_comp[i][2][2]);
1666 /* 2^112*G + 2^168*G resp. 2^140*G + 2^196*G */
1667 point_add(pre->g_pre_comp[i][12][0], pre->g_pre_comp[i][12][1],
1668 pre->g_pre_comp[i][12][2], pre->g_pre_comp[i][8][0],
1669 pre->g_pre_comp[i][8][1], pre->g_pre_comp[i][8][2],
1670 0, pre->g_pre_comp[i][4][0], pre->g_pre_comp[i][4][1],
1671 pre->g_pre_comp[i][4][2]);
1673 * 2^56*G + 2^112*G + 2^168*G resp. 2^84*G + 2^140*G + 2^196*G
1675 point_add(pre->g_pre_comp[i][14][0], pre->g_pre_comp[i][14][1],
1676 pre->g_pre_comp[i][14][2], pre->g_pre_comp[i][12][0],
1677 pre->g_pre_comp[i][12][1], pre->g_pre_comp[i][12][2],
1678 0, pre->g_pre_comp[i][2][0], pre->g_pre_comp[i][2][1],
1679 pre->g_pre_comp[i][2][2]);
1680 for (j = 1; j < 8; ++j) {
1681 /* odd multiples: add G resp. 2^28*G */
1682 point_add(pre->g_pre_comp[i][2 * j + 1][0],
1683 pre->g_pre_comp[i][2 * j + 1][1],
1684 pre->g_pre_comp[i][2 * j + 1][2],
1685 pre->g_pre_comp[i][2 * j][0],
1686 pre->g_pre_comp[i][2 * j][1],
1687 pre->g_pre_comp[i][2 * j][2], 0,
1688 pre->g_pre_comp[i][1][0], pre->g_pre_comp[i][1][1],
1689 pre->g_pre_comp[i][1][2]);
1692 make_points_affine(31, &(pre->g_pre_comp[0][1]), tmp_felems);
1695 SETPRECOMP(group, nistp224, pre);
1700 EC_POINT_free(generator);
1701 BN_CTX_free(new_ctx);
1702 EC_nistp224_pre_comp_free(pre);
1706 int ec_GFp_nistp224_have_precompute_mult(const EC_GROUP *group)
1708 return HAVEPRECOMP(group, nistp224);