2 * Copyright 2002-2016 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
10 #include "../bn_lcl.h"
11 #if !(defined(__GNUC__) && __GNUC__>=2)
12 # include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
15 * x86_64 BIGNUM accelerator version 0.1, December 2002.
17 * Implemented by Andy Polyakov <appro@openssl.org> for the OpenSSL
20 * Rights for redistribution and usage in source and binary forms are
21 * granted according to the OpenSSL license. Warranty of any kind is
24 * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
25 * versions, like 1.0...
26 * A. Well, that's because this code is basically a quick-n-dirty
27 * proof-of-concept hack. As you can see it's implemented with
28 * inline assembler, which means that you're bound to GCC and that
29 * there might be enough room for further improvement.
31 * Q. Why inline assembler?
32 * A. x86_64 features own ABI which I'm not familiar with. This is
33 * why I decided to let the compiler take care of subroutine
34 * prologue/epilogue as well as register allocation. For reference.
35 * Win64 implements different ABI for AMD64, different from Linux.
37 * Q. How much faster does it get?
38 * A. 'apps/openssl speed rsa dsa' output with no-asm:
40 * sign verify sign/s verify/s
41 * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
42 * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
43 * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
44 * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
45 * sign verify sign/s verify/s
46 * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
47 * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
48 * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
50 * 'apps/openssl speed rsa dsa' output with this module:
52 * sign verify sign/s verify/s
53 * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
54 * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
55 * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
56 * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
57 * sign verify sign/s verify/s
58 * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
59 * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
60 * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
62 * For the reference. IA-32 assembler implementation performs
63 * very much like 64-bit code compiled with no-asm on the same
67 # if defined(_WIN64) || !defined(__LP64__)
68 # define BN_ULONG unsigned long long
70 # define BN_ULONG unsigned long
77 * "m"(a), "+m"(r) is the way to favor DirectPath ยต-code;
78 * "g"(0) let the compiler to decide where does it
79 * want to keep the value of zero;
81 # define mul_add(r,a,word,carry) do { \
82 register BN_ULONG high,low; \
84 : "=a"(low),"=d"(high) \
87 asm ("addq %2,%0; adcq %3,%1" \
88 : "+r"(carry),"+d"(high)\
91 asm ("addq %2,%0; adcq %3,%1" \
92 : "+m"(r),"+d"(high) \
98 # define mul(r,a,word,carry) do { \
99 register BN_ULONG high,low; \
101 : "=a"(low),"=d"(high) \
104 asm ("addq %2,%0; adcq %3,%1" \
105 : "+r"(carry),"+d"(high)\
108 (r)=carry, carry=high; \
111 # define sqr(r0,r1,a) \
113 : "=a"(r0),"=d"(r1) \
117 BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
126 mul_add(rp[0], ap[0], w, c1);
127 mul_add(rp[1], ap[1], w, c1);
128 mul_add(rp[2], ap[2], w, c1);
129 mul_add(rp[3], ap[3], w, c1);
135 mul_add(rp[0], ap[0], w, c1);
138 mul_add(rp[1], ap[1], w, c1);
141 mul_add(rp[2], ap[2], w, c1);
148 BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
156 mul(rp[0], ap[0], w, c1);
157 mul(rp[1], ap[1], w, c1);
158 mul(rp[2], ap[2], w, c1);
159 mul(rp[3], ap[3], w, c1);
165 mul(rp[0], ap[0], w, c1);
168 mul(rp[1], ap[1], w, c1);
171 mul(rp[2], ap[2], w, c1);
176 void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
182 sqr(r[0], r[1], a[0]);
183 sqr(r[2], r[3], a[1]);
184 sqr(r[4], r[5], a[2]);
185 sqr(r[6], r[7], a[3]);
191 sqr(r[0], r[1], a[0]);
194 sqr(r[2], r[3], a[1]);
197 sqr(r[4], r[5], a[2]);
201 BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
205 asm("divq %4":"=a"(ret), "=d"(waste)
206 : "a"(l), "d"(h), "r"(d)
212 BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
221 asm volatile (" subq %0,%0 \n" /* clear carry */
224 "1: movq (%4,%2,8),%0 \n"
225 " adcq (%5,%2,8),%0 \n"
226 " movq %0,(%3,%2,8) \n"
231 :"=&r" (ret), "+c"(n), "+r"(i)
232 :"r"(rp), "r"(ap), "r"(bp)
239 BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
248 asm volatile (" subq %0,%0 \n" /* clear borrow */
251 "1: movq (%4,%2,8),%0 \n"
252 " sbbq (%5,%2,8),%0 \n"
253 " movq %0,(%3,%2,8) \n"
258 :"=&r" (ret), "+c"(n), "+r"(i)
259 :"r"(rp), "r"(ap), "r"(bp)
265 /* Simics 1.4<7 has buggy sbbq:-( */
266 # define BN_MASK2 0xffffffffffffffffL
267 BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
278 r[0] = (t1 - t2 - c) & BN_MASK2;
286 r[1] = (t1 - t2 - c) & BN_MASK2;
294 r[2] = (t1 - t2 - c) & BN_MASK2;
302 r[3] = (t1 - t2 - c) & BN_MASK2;
316 /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
317 /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
318 /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
320 * sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number
325 * Keep in mind that carrying into high part of multiplication result
326 * can not overflow, because it cannot be all-ones.
329 /* original macros are kept for reference purposes */
330 # define mul_add_c(a,b,c0,c1,c2) do { \
331 BN_ULONG ta = (a), tb = (b); \
333 BN_UMULT_LOHI(lo,hi,ta,tb); \
334 c0 += lo; hi += (c0<lo)?1:0; \
335 c1 += hi; c2 += (c1<hi)?1:0; \
338 # define mul_add_c2(a,b,c0,c1,c2) do { \
339 BN_ULONG ta = (a), tb = (b); \
340 BN_ULONG lo, hi, tt; \
341 BN_UMULT_LOHI(lo,hi,ta,tb); \
342 c0 += lo; tt = hi+((c0<lo)?1:0); \
343 c1 += tt; c2 += (c1<tt)?1:0; \
344 c0 += lo; hi += (c0<lo)?1:0; \
345 c1 += hi; c2 += (c1<hi)?1:0; \
348 # define sqr_add_c(a,i,c0,c1,c2) do { \
349 BN_ULONG ta = (a)[i]; \
351 BN_UMULT_LOHI(lo,hi,ta,ta); \
352 c0 += lo; hi += (c0<lo)?1:0; \
353 c1 += hi; c2 += (c1<hi)?1:0; \
356 # define mul_add_c(a,b,c0,c1,c2) do { \
359 : "=a"(t1),"=d"(t2) \
362 asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
363 : "+r"(c0),"+r"(c1),"+r"(c2) \
364 : "r"(t1),"r"(t2),"g"(0) \
368 # define sqr_add_c(a,i,c0,c1,c2) do { \
371 : "=a"(t1),"=d"(t2) \
374 asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
375 : "+r"(c0),"+r"(c1),"+r"(c2) \
376 : "r"(t1),"r"(t2),"g"(0) \
380 # define mul_add_c2(a,b,c0,c1,c2) do { \
383 : "=a"(t1),"=d"(t2) \
386 asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
387 : "+r"(c0),"+r"(c1),"+r"(c2) \
388 : "r"(t1),"r"(t2),"g"(0) \
390 asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
391 : "+r"(c0),"+r"(c1),"+r"(c2) \
392 : "r"(t1),"r"(t2),"g"(0) \
397 # define sqr_add_c2(a,i,j,c0,c1,c2) \
398 mul_add_c2((a)[i],(a)[j],c0,c1,c2)
400 void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
407 mul_add_c(a[0], b[0], c1, c2, c3);
410 mul_add_c(a[0], b[1], c2, c3, c1);
411 mul_add_c(a[1], b[0], c2, c3, c1);
414 mul_add_c(a[2], b[0], c3, c1, c2);
415 mul_add_c(a[1], b[1], c3, c1, c2);
416 mul_add_c(a[0], b[2], c3, c1, c2);
419 mul_add_c(a[0], b[3], c1, c2, c3);
420 mul_add_c(a[1], b[2], c1, c2, c3);
421 mul_add_c(a[2], b[1], c1, c2, c3);
422 mul_add_c(a[3], b[0], c1, c2, c3);
425 mul_add_c(a[4], b[0], c2, c3, c1);
426 mul_add_c(a[3], b[1], c2, c3, c1);
427 mul_add_c(a[2], b[2], c2, c3, c1);
428 mul_add_c(a[1], b[3], c2, c3, c1);
429 mul_add_c(a[0], b[4], c2, c3, c1);
432 mul_add_c(a[0], b[5], c3, c1, c2);
433 mul_add_c(a[1], b[4], c3, c1, c2);
434 mul_add_c(a[2], b[3], c3, c1, c2);
435 mul_add_c(a[3], b[2], c3, c1, c2);
436 mul_add_c(a[4], b[1], c3, c1, c2);
437 mul_add_c(a[5], b[0], c3, c1, c2);
440 mul_add_c(a[6], b[0], c1, c2, c3);
441 mul_add_c(a[5], b[1], c1, c2, c3);
442 mul_add_c(a[4], b[2], c1, c2, c3);
443 mul_add_c(a[3], b[3], c1, c2, c3);
444 mul_add_c(a[2], b[4], c1, c2, c3);
445 mul_add_c(a[1], b[5], c1, c2, c3);
446 mul_add_c(a[0], b[6], c1, c2, c3);
449 mul_add_c(a[0], b[7], c2, c3, c1);
450 mul_add_c(a[1], b[6], c2, c3, c1);
451 mul_add_c(a[2], b[5], c2, c3, c1);
452 mul_add_c(a[3], b[4], c2, c3, c1);
453 mul_add_c(a[4], b[3], c2, c3, c1);
454 mul_add_c(a[5], b[2], c2, c3, c1);
455 mul_add_c(a[6], b[1], c2, c3, c1);
456 mul_add_c(a[7], b[0], c2, c3, c1);
459 mul_add_c(a[7], b[1], c3, c1, c2);
460 mul_add_c(a[6], b[2], c3, c1, c2);
461 mul_add_c(a[5], b[3], c3, c1, c2);
462 mul_add_c(a[4], b[4], c3, c1, c2);
463 mul_add_c(a[3], b[5], c3, c1, c2);
464 mul_add_c(a[2], b[6], c3, c1, c2);
465 mul_add_c(a[1], b[7], c3, c1, c2);
468 mul_add_c(a[2], b[7], c1, c2, c3);
469 mul_add_c(a[3], b[6], c1, c2, c3);
470 mul_add_c(a[4], b[5], c1, c2, c3);
471 mul_add_c(a[5], b[4], c1, c2, c3);
472 mul_add_c(a[6], b[3], c1, c2, c3);
473 mul_add_c(a[7], b[2], c1, c2, c3);
476 mul_add_c(a[7], b[3], c2, c3, c1);
477 mul_add_c(a[6], b[4], c2, c3, c1);
478 mul_add_c(a[5], b[5], c2, c3, c1);
479 mul_add_c(a[4], b[6], c2, c3, c1);
480 mul_add_c(a[3], b[7], c2, c3, c1);
483 mul_add_c(a[4], b[7], c3, c1, c2);
484 mul_add_c(a[5], b[6], c3, c1, c2);
485 mul_add_c(a[6], b[5], c3, c1, c2);
486 mul_add_c(a[7], b[4], c3, c1, c2);
489 mul_add_c(a[7], b[5], c1, c2, c3);
490 mul_add_c(a[6], b[6], c1, c2, c3);
491 mul_add_c(a[5], b[7], c1, c2, c3);
494 mul_add_c(a[6], b[7], c2, c3, c1);
495 mul_add_c(a[7], b[6], c2, c3, c1);
498 mul_add_c(a[7], b[7], c3, c1, c2);
503 void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
510 mul_add_c(a[0], b[0], c1, c2, c3);
513 mul_add_c(a[0], b[1], c2, c3, c1);
514 mul_add_c(a[1], b[0], c2, c3, c1);
517 mul_add_c(a[2], b[0], c3, c1, c2);
518 mul_add_c(a[1], b[1], c3, c1, c2);
519 mul_add_c(a[0], b[2], c3, c1, c2);
522 mul_add_c(a[0], b[3], c1, c2, c3);
523 mul_add_c(a[1], b[2], c1, c2, c3);
524 mul_add_c(a[2], b[1], c1, c2, c3);
525 mul_add_c(a[3], b[0], c1, c2, c3);
528 mul_add_c(a[3], b[1], c2, c3, c1);
529 mul_add_c(a[2], b[2], c2, c3, c1);
530 mul_add_c(a[1], b[3], c2, c3, c1);
533 mul_add_c(a[2], b[3], c3, c1, c2);
534 mul_add_c(a[3], b[2], c3, c1, c2);
537 mul_add_c(a[3], b[3], c1, c2, c3);
542 void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
549 sqr_add_c(a, 0, c1, c2, c3);
552 sqr_add_c2(a, 1, 0, c2, c3, c1);
555 sqr_add_c(a, 1, c3, c1, c2);
556 sqr_add_c2(a, 2, 0, c3, c1, c2);
559 sqr_add_c2(a, 3, 0, c1, c2, c3);
560 sqr_add_c2(a, 2, 1, c1, c2, c3);
563 sqr_add_c(a, 2, c2, c3, c1);
564 sqr_add_c2(a, 3, 1, c2, c3, c1);
565 sqr_add_c2(a, 4, 0, c2, c3, c1);
568 sqr_add_c2(a, 5, 0, c3, c1, c2);
569 sqr_add_c2(a, 4, 1, c3, c1, c2);
570 sqr_add_c2(a, 3, 2, c3, c1, c2);
573 sqr_add_c(a, 3, c1, c2, c3);
574 sqr_add_c2(a, 4, 2, c1, c2, c3);
575 sqr_add_c2(a, 5, 1, c1, c2, c3);
576 sqr_add_c2(a, 6, 0, c1, c2, c3);
579 sqr_add_c2(a, 7, 0, c2, c3, c1);
580 sqr_add_c2(a, 6, 1, c2, c3, c1);
581 sqr_add_c2(a, 5, 2, c2, c3, c1);
582 sqr_add_c2(a, 4, 3, c2, c3, c1);
585 sqr_add_c(a, 4, c3, c1, c2);
586 sqr_add_c2(a, 5, 3, c3, c1, c2);
587 sqr_add_c2(a, 6, 2, c3, c1, c2);
588 sqr_add_c2(a, 7, 1, c3, c1, c2);
591 sqr_add_c2(a, 7, 2, c1, c2, c3);
592 sqr_add_c2(a, 6, 3, c1, c2, c3);
593 sqr_add_c2(a, 5, 4, c1, c2, c3);
596 sqr_add_c(a, 5, c2, c3, c1);
597 sqr_add_c2(a, 6, 4, c2, c3, c1);
598 sqr_add_c2(a, 7, 3, c2, c3, c1);
601 sqr_add_c2(a, 7, 4, c3, c1, c2);
602 sqr_add_c2(a, 6, 5, c3, c1, c2);
605 sqr_add_c(a, 6, c1, c2, c3);
606 sqr_add_c2(a, 7, 5, c1, c2, c3);
609 sqr_add_c2(a, 7, 6, c2, c3, c1);
612 sqr_add_c(a, 7, c3, c1, c2);
617 void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
624 sqr_add_c(a, 0, c1, c2, c3);
627 sqr_add_c2(a, 1, 0, c2, c3, c1);
630 sqr_add_c(a, 1, c3, c1, c2);
631 sqr_add_c2(a, 2, 0, c3, c1, c2);
634 sqr_add_c2(a, 3, 0, c1, c2, c3);
635 sqr_add_c2(a, 2, 1, c1, c2, c3);
638 sqr_add_c(a, 2, c2, c3, c1);
639 sqr_add_c2(a, 3, 1, c2, c3, c1);
642 sqr_add_c2(a, 3, 2, c3, c1, c2);
645 sqr_add_c(a, 3, c1, c2, c3);