2 * x86_64 BIGNUM accelerator version 0.1, December 2002.
4 * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
7 * Rights for redistribution and usage in source and binary forms are
8 * granted according to the OpenSSL license. Warranty of any kind is
11 * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
12 * versions, like 1.0...
13 * A. Well, that's because this code is basically a quick-n-dirty
14 * proof-of-concept hack. As you can see it's implemented with
15 * inline assembler, which means that you're bound to GCC and that
16 * there might be enough room for further improvement.
18 * Q. Why inline assembler?
19 * A. x86_64 features own ABI which I'm not familiar with. This is
20 * why I decided to let the compiler take care of subroutine
21 * prologue/epilogue as well as register allocation. For reference.
22 * Win64 implements different ABI for AMD64, different from Linux.
24 * Q. How much faster does it get?
25 * A. 'apps/openssl speed rsa dsa' output with no-asm:
27 * sign verify sign/s verify/s
28 * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
29 * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
30 * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
31 * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
32 * sign verify sign/s verify/s
33 * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
34 * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
35 * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
37 * 'apps/openssl speed rsa dsa' output with this module:
39 * sign verify sign/s verify/s
40 * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
41 * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
42 * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
43 * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
44 * sign verify sign/s verify/s
45 * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
46 * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
47 * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
49 * For the reference. IA-32 assembler implementation performs
50 * very much like 64-bit code compiled with no-asm on the same
54 #define BN_ULONG unsigned long
57 * "m"(a), "+m"(r) is the way to favor DirectPath ยต-code;
58 * "g"(0) let the compiler to decide where does it
59 * want to keep the value of zero;
61 #define mul_add(r,a,word,carry) do { \
62 register BN_ULONG high,low; \
64 : "=a"(low),"=d"(high) \
67 asm ("addq %2,%0; adcq %3,%1" \
68 : "+r"(carry),"+d"(high)\
71 asm ("addq %2,%0; adcq %3,%1" \
72 : "+m"(r),"+d"(high) \
78 #define mul(r,a,word,carry) do { \
79 register BN_ULONG high,low; \
81 : "=a"(low),"=d"(high) \
84 asm ("addq %2,%0; adcq %3,%1" \
85 : "+r"(carry),"+d"(high)\
88 (r)=carry, carry=high; \
91 #define sqr(r0,r1,a) \
97 BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
101 if (num <= 0) return(c1);
105 mul_add(rp[0],ap[0],w,c1);
106 mul_add(rp[1],ap[1],w,c1);
107 mul_add(rp[2],ap[2],w,c1);
108 mul_add(rp[3],ap[3],w,c1);
109 ap+=4; rp+=4; num-=4;
113 mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
114 mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
115 mul_add(rp[2],ap[2],w,c1); return c1;
121 BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
125 if (num <= 0) return(c1);
129 mul(rp[0],ap[0],w,c1);
130 mul(rp[1],ap[1],w,c1);
131 mul(rp[2],ap[2],w,c1);
132 mul(rp[3],ap[3],w,c1);
133 ap+=4; rp+=4; num-=4;
137 mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
138 mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
139 mul(rp[2],ap[2],w,c1);
144 void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
158 sqr(r[0],r[1],a[0]); if (--n == 0) return;
159 sqr(r[2],r[3],a[1]); if (--n == 0) return;
164 BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
165 { BN_ULONG ret,waste;
168 : "=a"(ret),"=d"(waste)
169 : "a"(l),"d"(h),"g"(d)
175 BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
176 { BN_ULONG ret=0,i=0;
178 if (n <= 0) return 0;
183 "1: movq (%4,%2,8),%0 \n"
184 " adcq (%5,%2,8),%0 \n"
185 " movq %0,(%3,%2,8) \n"
189 : "+a"(ret),"+c"(n),"+r"(i)
190 : "r"(rp),"r"(ap),"r"(bp)
198 BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
199 { BN_ULONG ret=0,i=0;
201 if (n <= 0) return 0;
206 "1: movq (%4,%2,8),%0 \n"
207 " sbbq (%5,%2,8),%0 \n"
208 " movq %0,(%3,%2,8) \n"
212 : "+a"(ret),"+c"(n),"+r"(i)
213 : "r"(rp),"r"(ap),"r"(bp)
220 /* Simics 1.4<7 has buggy sbbq:-( */
221 #define BN_MASK2 0xffffffffffffffffL
222 BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
227 if (n <= 0) return((BN_ULONG)0);
232 r[0]=(t1-t2-c)&BN_MASK2;
233 if (t1 != t2) c=(t1 < t2);
237 r[1]=(t1-t2-c)&BN_MASK2;
238 if (t1 != t2) c=(t1 < t2);
242 r[2]=(t1-t2-c)&BN_MASK2;
243 if (t1 != t2) c=(t1 < t2);
247 r[3]=(t1-t2-c)&BN_MASK2;
248 if (t1 != t2) c=(t1 < t2);
259 /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
260 /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
261 /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
262 /* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
265 /* original macros are kept for reference purposes */
266 #define mul_add_c(a,b,c0,c1,c2) { \
267 BN_ULONG ta=(a),tb=(b); \
269 t2 = BN_UMULT_HIGH(ta,tb); \
270 c0 += t1; t2 += (c0<t1)?1:0; \
271 c1 += t2; c2 += (c1<t2)?1:0; \
274 #define mul_add_c2(a,b,c0,c1,c2) { \
275 BN_ULONG ta=(a),tb=(b),t0; \
276 t1 = BN_UMULT_HIGH(ta,tb); \
278 t2 = t1+t1; c2 += (t2<t1)?1:0; \
279 t1 = t0+t0; t2 += (t1<t0)?1:0; \
280 c0 += t1; t2 += (c0<t1)?1:0; \
281 c1 += t2; c2 += (c1<t2)?1:0; \
284 #define mul_add_c(a,b,c0,c1,c2) do { \
286 : "=a"(t1),"=d"(t2) \
289 asm ("addq %2,%0; adcq %3,%1" \
290 : "+r"(c0),"+d"(t2) \
293 asm ("addq %2,%0; adcq %3,%1" \
294 : "+r"(c1),"+r"(c2) \
299 #define sqr_add_c(a,i,c0,c1,c2) do { \
301 : "=a"(t1),"=d"(t2) \
304 asm ("addq %2,%0; adcq %3,%1" \
305 : "+r"(c0),"+d"(t2) \
308 asm ("addq %2,%0; adcq %3,%1" \
309 : "+r"(c1),"+r"(c2) \
314 #define mul_add_c2(a,b,c0,c1,c2) do { \
316 : "=a"(t1),"=d"(t2) \
319 asm ("addq %0,%0; adcq %2,%1" \
320 : "+d"(t2),"+r"(c2) \
323 asm ("addq %0,%0; adcq %2,%1" \
324 : "+a"(t1),"+d"(t2) \
327 asm ("addq %2,%0; adcq %3,%1" \
328 : "+r"(c0),"+d"(t2) \
331 asm ("addq %2,%0; adcq %3,%1" \
332 : "+r"(c1),"+r"(c2) \
338 #define sqr_add_c2(a,i,j,c0,c1,c2) \
339 mul_add_c2((a)[i],(a)[j],c0,c1,c2)
341 void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
349 mul_add_c(a[0],b[0],c1,c2,c3);
352 mul_add_c(a[0],b[1],c2,c3,c1);
353 mul_add_c(a[1],b[0],c2,c3,c1);
356 mul_add_c(a[2],b[0],c3,c1,c2);
357 mul_add_c(a[1],b[1],c3,c1,c2);
358 mul_add_c(a[0],b[2],c3,c1,c2);
361 mul_add_c(a[0],b[3],c1,c2,c3);
362 mul_add_c(a[1],b[2],c1,c2,c3);
363 mul_add_c(a[2],b[1],c1,c2,c3);
364 mul_add_c(a[3],b[0],c1,c2,c3);
367 mul_add_c(a[4],b[0],c2,c3,c1);
368 mul_add_c(a[3],b[1],c2,c3,c1);
369 mul_add_c(a[2],b[2],c2,c3,c1);
370 mul_add_c(a[1],b[3],c2,c3,c1);
371 mul_add_c(a[0],b[4],c2,c3,c1);
374 mul_add_c(a[0],b[5],c3,c1,c2);
375 mul_add_c(a[1],b[4],c3,c1,c2);
376 mul_add_c(a[2],b[3],c3,c1,c2);
377 mul_add_c(a[3],b[2],c3,c1,c2);
378 mul_add_c(a[4],b[1],c3,c1,c2);
379 mul_add_c(a[5],b[0],c3,c1,c2);
382 mul_add_c(a[6],b[0],c1,c2,c3);
383 mul_add_c(a[5],b[1],c1,c2,c3);
384 mul_add_c(a[4],b[2],c1,c2,c3);
385 mul_add_c(a[3],b[3],c1,c2,c3);
386 mul_add_c(a[2],b[4],c1,c2,c3);
387 mul_add_c(a[1],b[5],c1,c2,c3);
388 mul_add_c(a[0],b[6],c1,c2,c3);
391 mul_add_c(a[0],b[7],c2,c3,c1);
392 mul_add_c(a[1],b[6],c2,c3,c1);
393 mul_add_c(a[2],b[5],c2,c3,c1);
394 mul_add_c(a[3],b[4],c2,c3,c1);
395 mul_add_c(a[4],b[3],c2,c3,c1);
396 mul_add_c(a[5],b[2],c2,c3,c1);
397 mul_add_c(a[6],b[1],c2,c3,c1);
398 mul_add_c(a[7],b[0],c2,c3,c1);
401 mul_add_c(a[7],b[1],c3,c1,c2);
402 mul_add_c(a[6],b[2],c3,c1,c2);
403 mul_add_c(a[5],b[3],c3,c1,c2);
404 mul_add_c(a[4],b[4],c3,c1,c2);
405 mul_add_c(a[3],b[5],c3,c1,c2);
406 mul_add_c(a[2],b[6],c3,c1,c2);
407 mul_add_c(a[1],b[7],c3,c1,c2);
410 mul_add_c(a[2],b[7],c1,c2,c3);
411 mul_add_c(a[3],b[6],c1,c2,c3);
412 mul_add_c(a[4],b[5],c1,c2,c3);
413 mul_add_c(a[5],b[4],c1,c2,c3);
414 mul_add_c(a[6],b[3],c1,c2,c3);
415 mul_add_c(a[7],b[2],c1,c2,c3);
418 mul_add_c(a[7],b[3],c2,c3,c1);
419 mul_add_c(a[6],b[4],c2,c3,c1);
420 mul_add_c(a[5],b[5],c2,c3,c1);
421 mul_add_c(a[4],b[6],c2,c3,c1);
422 mul_add_c(a[3],b[7],c2,c3,c1);
425 mul_add_c(a[4],b[7],c3,c1,c2);
426 mul_add_c(a[5],b[6],c3,c1,c2);
427 mul_add_c(a[6],b[5],c3,c1,c2);
428 mul_add_c(a[7],b[4],c3,c1,c2);
431 mul_add_c(a[7],b[5],c1,c2,c3);
432 mul_add_c(a[6],b[6],c1,c2,c3);
433 mul_add_c(a[5],b[7],c1,c2,c3);
436 mul_add_c(a[6],b[7],c2,c3,c1);
437 mul_add_c(a[7],b[6],c2,c3,c1);
440 mul_add_c(a[7],b[7],c3,c1,c2);
445 void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
453 mul_add_c(a[0],b[0],c1,c2,c3);
456 mul_add_c(a[0],b[1],c2,c3,c1);
457 mul_add_c(a[1],b[0],c2,c3,c1);
460 mul_add_c(a[2],b[0],c3,c1,c2);
461 mul_add_c(a[1],b[1],c3,c1,c2);
462 mul_add_c(a[0],b[2],c3,c1,c2);
465 mul_add_c(a[0],b[3],c1,c2,c3);
466 mul_add_c(a[1],b[2],c1,c2,c3);
467 mul_add_c(a[2],b[1],c1,c2,c3);
468 mul_add_c(a[3],b[0],c1,c2,c3);
471 mul_add_c(a[3],b[1],c2,c3,c1);
472 mul_add_c(a[2],b[2],c2,c3,c1);
473 mul_add_c(a[1],b[3],c2,c3,c1);
476 mul_add_c(a[2],b[3],c3,c1,c2);
477 mul_add_c(a[3],b[2],c3,c1,c2);
480 mul_add_c(a[3],b[3],c1,c2,c3);
485 void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
493 sqr_add_c(a,0,c1,c2,c3);
496 sqr_add_c2(a,1,0,c2,c3,c1);
499 sqr_add_c(a,1,c3,c1,c2);
500 sqr_add_c2(a,2,0,c3,c1,c2);
503 sqr_add_c2(a,3,0,c1,c2,c3);
504 sqr_add_c2(a,2,1,c1,c2,c3);
507 sqr_add_c(a,2,c2,c3,c1);
508 sqr_add_c2(a,3,1,c2,c3,c1);
509 sqr_add_c2(a,4,0,c2,c3,c1);
512 sqr_add_c2(a,5,0,c3,c1,c2);
513 sqr_add_c2(a,4,1,c3,c1,c2);
514 sqr_add_c2(a,3,2,c3,c1,c2);
517 sqr_add_c(a,3,c1,c2,c3);
518 sqr_add_c2(a,4,2,c1,c2,c3);
519 sqr_add_c2(a,5,1,c1,c2,c3);
520 sqr_add_c2(a,6,0,c1,c2,c3);
523 sqr_add_c2(a,7,0,c2,c3,c1);
524 sqr_add_c2(a,6,1,c2,c3,c1);
525 sqr_add_c2(a,5,2,c2,c3,c1);
526 sqr_add_c2(a,4,3,c2,c3,c1);
529 sqr_add_c(a,4,c3,c1,c2);
530 sqr_add_c2(a,5,3,c3,c1,c2);
531 sqr_add_c2(a,6,2,c3,c1,c2);
532 sqr_add_c2(a,7,1,c3,c1,c2);
535 sqr_add_c2(a,7,2,c1,c2,c3);
536 sqr_add_c2(a,6,3,c1,c2,c3);
537 sqr_add_c2(a,5,4,c1,c2,c3);
540 sqr_add_c(a,5,c2,c3,c1);
541 sqr_add_c2(a,6,4,c2,c3,c1);
542 sqr_add_c2(a,7,3,c2,c3,c1);
545 sqr_add_c2(a,7,4,c3,c1,c2);
546 sqr_add_c2(a,6,5,c3,c1,c2);
549 sqr_add_c(a,6,c1,c2,c3);
550 sqr_add_c2(a,7,5,c1,c2,c3);
553 sqr_add_c2(a,7,6,c2,c3,c1);
556 sqr_add_c(a,7,c3,c1,c2);
561 void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
569 sqr_add_c(a,0,c1,c2,c3);
572 sqr_add_c2(a,1,0,c2,c3,c1);
575 sqr_add_c(a,1,c3,c1,c2);
576 sqr_add_c2(a,2,0,c3,c1,c2);
579 sqr_add_c2(a,3,0,c1,c2,c3);
580 sqr_add_c2(a,2,1,c1,c2,c3);
583 sqr_add_c(a,2,c2,c3,c1);
584 sqr_add_c2(a,3,1,c2,c3,c1);
587 sqr_add_c2(a,3,2,c3,c1,c2);
590 sqr_add_c(a,3,c1,c2,c3);