2 # include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
6 * x86_64 BIGNUM accelerator version 0.1, December 2002.
8 * Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
11 * Rights for redistribution and usage in source and binary forms are
12 * granted according to the OpenSSL license. Warranty of any kind is
15 * Q. Version 0.1? It doesn't sound like Andy, he used to assign real
16 * versions, like 1.0...
17 * A. Well, that's because this code is basically a quick-n-dirty
18 * proof-of-concept hack. As you can see it's implemented with
19 * inline assembler, which means that you're bound to GCC and that
20 * there might be enough room for further improvement.
22 * Q. Why inline assembler?
23 * A. x86_64 features own ABI which I'm not familiar with. This is
24 * why I decided to let the compiler take care of subroutine
25 * prologue/epilogue as well as register allocation. For reference.
26 * Win64 implements different ABI for AMD64, different from Linux.
28 * Q. How much faster does it get?
29 * A. 'apps/openssl speed rsa dsa' output with no-asm:
31 * sign verify sign/s verify/s
32 * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
33 * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
34 * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
35 * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
36 * sign verify sign/s verify/s
37 * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
38 * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
39 * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
41 * 'apps/openssl speed rsa dsa' output with this module:
43 * sign verify sign/s verify/s
44 * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
45 * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
46 * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
47 * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
48 * sign verify sign/s verify/s
49 * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
50 * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
51 * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
53 * For the reference. IA-32 assembler implementation performs
54 * very much like 64-bit code compiled with no-asm on the same
58 #define BN_ULONG unsigned long
61 * "m"(a), "+m"(r) is the way to favor DirectPath ยต-code;
62 * "g"(0) let the compiler to decide where does it
63 * want to keep the value of zero;
65 #define mul_add(r,a,word,carry) do { \
66 register BN_ULONG high,low; \
68 : "=a"(low),"=d"(high) \
71 asm ("addq %2,%0; adcq %3,%1" \
72 : "+r"(carry),"+d"(high)\
75 asm ("addq %2,%0; adcq %3,%1" \
76 : "+m"(r),"+d"(high) \
82 #define mul(r,a,word,carry) do { \
83 register BN_ULONG high,low; \
85 : "=a"(low),"=d"(high) \
88 asm ("addq %2,%0; adcq %3,%1" \
89 : "+r"(carry),"+d"(high)\
92 (r)=carry, carry=high; \
95 #define sqr(r0,r1,a) \
101 BN_ULONG bn_mul_add_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
105 if (num <= 0) return(c1);
109 mul_add(rp[0],ap[0],w,c1);
110 mul_add(rp[1],ap[1],w,c1);
111 mul_add(rp[2],ap[2],w,c1);
112 mul_add(rp[3],ap[3],w,c1);
113 ap+=4; rp+=4; num-=4;
117 mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
118 mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
119 mul_add(rp[2],ap[2],w,c1); return c1;
125 BN_ULONG bn_mul_words(BN_ULONG *rp, BN_ULONG *ap, int num, BN_ULONG w)
129 if (num <= 0) return(c1);
133 mul(rp[0],ap[0],w,c1);
134 mul(rp[1],ap[1],w,c1);
135 mul(rp[2],ap[2],w,c1);
136 mul(rp[3],ap[3],w,c1);
137 ap+=4; rp+=4; num-=4;
141 mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
142 mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
143 mul(rp[2],ap[2],w,c1);
148 void bn_sqr_words(BN_ULONG *r, BN_ULONG *a, int n)
162 sqr(r[0],r[1],a[0]); if (--n == 0) return;
163 sqr(r[2],r[3],a[1]); if (--n == 0) return;
168 BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
169 { BN_ULONG ret,waste;
172 : "=a"(ret),"=d"(waste)
173 : "a"(l),"d"(h),"g"(d)
179 BN_ULONG bn_add_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp, size_t n)
180 { BN_ULONG ret=0,i=0;
182 if (n <= 0) return 0;
187 "1: movq (%4,%2,8),%0 \n"
188 " adcq (%5,%2,8),%0 \n"
189 " movq %0,(%3,%2,8) \n"
193 : "=&a"(ret),"+c"(n),"=&r"(i)
194 : "r"(rp),"r"(ap),"r"(bp)
202 BN_ULONG bn_sub_words (BN_ULONG *rp, BN_ULONG *ap, BN_ULONG *bp,int n)
203 { BN_ULONG ret=0,i=0;
205 if (n <= 0) return 0;
210 "1: movq (%4,%2,8),%0 \n"
211 " sbbq (%5,%2,8),%0 \n"
212 " movq %0,(%3,%2,8) \n"
216 : "=&a"(ret),"+c"(n),"=&r"(i)
217 : "r"(rp),"r"(ap),"r"(bp)
224 /* Simics 1.4<7 has buggy sbbq:-( */
225 #define BN_MASK2 0xffffffffffffffffL
226 BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
231 if (n <= 0) return((BN_ULONG)0);
236 r[0]=(t1-t2-c)&BN_MASK2;
237 if (t1 != t2) c=(t1 < t2);
241 r[1]=(t1-t2-c)&BN_MASK2;
242 if (t1 != t2) c=(t1 < t2);
246 r[2]=(t1-t2-c)&BN_MASK2;
247 if (t1 != t2) c=(t1 < t2);
251 r[3]=(t1-t2-c)&BN_MASK2;
252 if (t1 != t2) c=(t1 < t2);
263 /* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
264 /* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
265 /* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
266 /* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
269 /* original macros are kept for reference purposes */
270 #define mul_add_c(a,b,c0,c1,c2) { \
271 BN_ULONG ta=(a),tb=(b); \
273 t2 = BN_UMULT_HIGH(ta,tb); \
274 c0 += t1; t2 += (c0<t1)?1:0; \
275 c1 += t2; c2 += (c1<t2)?1:0; \
278 #define mul_add_c2(a,b,c0,c1,c2) { \
279 BN_ULONG ta=(a),tb=(b),t0; \
280 t1 = BN_UMULT_HIGH(ta,tb); \
282 t2 = t1+t1; c2 += (t2<t1)?1:0; \
283 t1 = t0+t0; t2 += (t1<t0)?1:0; \
284 c0 += t1; t2 += (c0<t1)?1:0; \
285 c1 += t2; c2 += (c1<t2)?1:0; \
288 #define mul_add_c(a,b,c0,c1,c2) do { \
290 : "=a"(t1),"=d"(t2) \
293 asm ("addq %2,%0; adcq %3,%1" \
294 : "+r"(c0),"+d"(t2) \
297 asm ("addq %2,%0; adcq %3,%1" \
298 : "+r"(c1),"+r"(c2) \
303 #define sqr_add_c(a,i,c0,c1,c2) do { \
305 : "=a"(t1),"=d"(t2) \
308 asm ("addq %2,%0; adcq %3,%1" \
309 : "+r"(c0),"+d"(t2) \
312 asm ("addq %2,%0; adcq %3,%1" \
313 : "+r"(c1),"+r"(c2) \
318 #define mul_add_c2(a,b,c0,c1,c2) do { \
320 : "=a"(t1),"=d"(t2) \
323 asm ("addq %0,%0; adcq %2,%1" \
324 : "+d"(t2),"+r"(c2) \
327 asm ("addq %0,%0; adcq %2,%1" \
328 : "+a"(t1),"+d"(t2) \
331 asm ("addq %2,%0; adcq %3,%1" \
332 : "+r"(c0),"+d"(t2) \
335 asm ("addq %2,%0; adcq %3,%1" \
336 : "+r"(c1),"+r"(c2) \
342 #define sqr_add_c2(a,i,j,c0,c1,c2) \
343 mul_add_c2((a)[i],(a)[j],c0,c1,c2)
345 void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
353 mul_add_c(a[0],b[0],c1,c2,c3);
356 mul_add_c(a[0],b[1],c2,c3,c1);
357 mul_add_c(a[1],b[0],c2,c3,c1);
360 mul_add_c(a[2],b[0],c3,c1,c2);
361 mul_add_c(a[1],b[1],c3,c1,c2);
362 mul_add_c(a[0],b[2],c3,c1,c2);
365 mul_add_c(a[0],b[3],c1,c2,c3);
366 mul_add_c(a[1],b[2],c1,c2,c3);
367 mul_add_c(a[2],b[1],c1,c2,c3);
368 mul_add_c(a[3],b[0],c1,c2,c3);
371 mul_add_c(a[4],b[0],c2,c3,c1);
372 mul_add_c(a[3],b[1],c2,c3,c1);
373 mul_add_c(a[2],b[2],c2,c3,c1);
374 mul_add_c(a[1],b[3],c2,c3,c1);
375 mul_add_c(a[0],b[4],c2,c3,c1);
378 mul_add_c(a[0],b[5],c3,c1,c2);
379 mul_add_c(a[1],b[4],c3,c1,c2);
380 mul_add_c(a[2],b[3],c3,c1,c2);
381 mul_add_c(a[3],b[2],c3,c1,c2);
382 mul_add_c(a[4],b[1],c3,c1,c2);
383 mul_add_c(a[5],b[0],c3,c1,c2);
386 mul_add_c(a[6],b[0],c1,c2,c3);
387 mul_add_c(a[5],b[1],c1,c2,c3);
388 mul_add_c(a[4],b[2],c1,c2,c3);
389 mul_add_c(a[3],b[3],c1,c2,c3);
390 mul_add_c(a[2],b[4],c1,c2,c3);
391 mul_add_c(a[1],b[5],c1,c2,c3);
392 mul_add_c(a[0],b[6],c1,c2,c3);
395 mul_add_c(a[0],b[7],c2,c3,c1);
396 mul_add_c(a[1],b[6],c2,c3,c1);
397 mul_add_c(a[2],b[5],c2,c3,c1);
398 mul_add_c(a[3],b[4],c2,c3,c1);
399 mul_add_c(a[4],b[3],c2,c3,c1);
400 mul_add_c(a[5],b[2],c2,c3,c1);
401 mul_add_c(a[6],b[1],c2,c3,c1);
402 mul_add_c(a[7],b[0],c2,c3,c1);
405 mul_add_c(a[7],b[1],c3,c1,c2);
406 mul_add_c(a[6],b[2],c3,c1,c2);
407 mul_add_c(a[5],b[3],c3,c1,c2);
408 mul_add_c(a[4],b[4],c3,c1,c2);
409 mul_add_c(a[3],b[5],c3,c1,c2);
410 mul_add_c(a[2],b[6],c3,c1,c2);
411 mul_add_c(a[1],b[7],c3,c1,c2);
414 mul_add_c(a[2],b[7],c1,c2,c3);
415 mul_add_c(a[3],b[6],c1,c2,c3);
416 mul_add_c(a[4],b[5],c1,c2,c3);
417 mul_add_c(a[5],b[4],c1,c2,c3);
418 mul_add_c(a[6],b[3],c1,c2,c3);
419 mul_add_c(a[7],b[2],c1,c2,c3);
422 mul_add_c(a[7],b[3],c2,c3,c1);
423 mul_add_c(a[6],b[4],c2,c3,c1);
424 mul_add_c(a[5],b[5],c2,c3,c1);
425 mul_add_c(a[4],b[6],c2,c3,c1);
426 mul_add_c(a[3],b[7],c2,c3,c1);
429 mul_add_c(a[4],b[7],c3,c1,c2);
430 mul_add_c(a[5],b[6],c3,c1,c2);
431 mul_add_c(a[6],b[5],c3,c1,c2);
432 mul_add_c(a[7],b[4],c3,c1,c2);
435 mul_add_c(a[7],b[5],c1,c2,c3);
436 mul_add_c(a[6],b[6],c1,c2,c3);
437 mul_add_c(a[5],b[7],c1,c2,c3);
440 mul_add_c(a[6],b[7],c2,c3,c1);
441 mul_add_c(a[7],b[6],c2,c3,c1);
444 mul_add_c(a[7],b[7],c3,c1,c2);
449 void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
457 mul_add_c(a[0],b[0],c1,c2,c3);
460 mul_add_c(a[0],b[1],c2,c3,c1);
461 mul_add_c(a[1],b[0],c2,c3,c1);
464 mul_add_c(a[2],b[0],c3,c1,c2);
465 mul_add_c(a[1],b[1],c3,c1,c2);
466 mul_add_c(a[0],b[2],c3,c1,c2);
469 mul_add_c(a[0],b[3],c1,c2,c3);
470 mul_add_c(a[1],b[2],c1,c2,c3);
471 mul_add_c(a[2],b[1],c1,c2,c3);
472 mul_add_c(a[3],b[0],c1,c2,c3);
475 mul_add_c(a[3],b[1],c2,c3,c1);
476 mul_add_c(a[2],b[2],c2,c3,c1);
477 mul_add_c(a[1],b[3],c2,c3,c1);
480 mul_add_c(a[2],b[3],c3,c1,c2);
481 mul_add_c(a[3],b[2],c3,c1,c2);
484 mul_add_c(a[3],b[3],c1,c2,c3);
489 void bn_sqr_comba8(BN_ULONG *r, BN_ULONG *a)
497 sqr_add_c(a,0,c1,c2,c3);
500 sqr_add_c2(a,1,0,c2,c3,c1);
503 sqr_add_c(a,1,c3,c1,c2);
504 sqr_add_c2(a,2,0,c3,c1,c2);
507 sqr_add_c2(a,3,0,c1,c2,c3);
508 sqr_add_c2(a,2,1,c1,c2,c3);
511 sqr_add_c(a,2,c2,c3,c1);
512 sqr_add_c2(a,3,1,c2,c3,c1);
513 sqr_add_c2(a,4,0,c2,c3,c1);
516 sqr_add_c2(a,5,0,c3,c1,c2);
517 sqr_add_c2(a,4,1,c3,c1,c2);
518 sqr_add_c2(a,3,2,c3,c1,c2);
521 sqr_add_c(a,3,c1,c2,c3);
522 sqr_add_c2(a,4,2,c1,c2,c3);
523 sqr_add_c2(a,5,1,c1,c2,c3);
524 sqr_add_c2(a,6,0,c1,c2,c3);
527 sqr_add_c2(a,7,0,c2,c3,c1);
528 sqr_add_c2(a,6,1,c2,c3,c1);
529 sqr_add_c2(a,5,2,c2,c3,c1);
530 sqr_add_c2(a,4,3,c2,c3,c1);
533 sqr_add_c(a,4,c3,c1,c2);
534 sqr_add_c2(a,5,3,c3,c1,c2);
535 sqr_add_c2(a,6,2,c3,c1,c2);
536 sqr_add_c2(a,7,1,c3,c1,c2);
539 sqr_add_c2(a,7,2,c1,c2,c3);
540 sqr_add_c2(a,6,3,c1,c2,c3);
541 sqr_add_c2(a,5,4,c1,c2,c3);
544 sqr_add_c(a,5,c2,c3,c1);
545 sqr_add_c2(a,6,4,c2,c3,c1);
546 sqr_add_c2(a,7,3,c2,c3,c1);
549 sqr_add_c2(a,7,4,c3,c1,c2);
550 sqr_add_c2(a,6,5,c3,c1,c2);
553 sqr_add_c(a,6,c1,c2,c3);
554 sqr_add_c2(a,7,5,c1,c2,c3);
557 sqr_add_c2(a,7,6,c2,c3,c1);
560 sqr_add_c(a,7,c3,c1,c2);
565 void bn_sqr_comba4(BN_ULONG *r, BN_ULONG *a)
573 sqr_add_c(a,0,c1,c2,c3);
576 sqr_add_c2(a,1,0,c2,c3,c1);
579 sqr_add_c(a,1,c3,c1,c2);
580 sqr_add_c2(a,2,0,c3,c1,c2);
583 sqr_add_c2(a,3,0,c1,c2,c3);
584 sqr_add_c2(a,2,1,c1,c2,c3);
587 sqr_add_c(a,2,c2,c3,c1);
588 sqr_add_c2(a,3,1,c2,c3,c1);
591 sqr_add_c2(a,3,2,c3,c1,c2);
594 sqr_add_c(a,3,c1,c2,c3);