#include <stdio.h>
#include <assert.h>
+#include <openssl/crypto.h>
#include "cryptlib.h"
#include "bn_lcl.h"
#if defined(BN_MUL_COMBA) && !defined(OPENSSL_SMALL_FOOTPRINT)
+#ifndef OPENSSL_FIPSCANISTER
#undef bn_mul_comba8
#undef bn_mul_comba4
#undef bn_sqr_comba8
#undef bn_sqr_comba4
+#endif
/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
r[7]=c2;
}
+#ifdef OPENSSL_NO_ASM
#ifdef OPENSSL_BN_ASM_MONT
+#include <alloca.h>
/*
* This is essentially reference implementation, which may or may not
- * result in performance improvement. E.g. on IA-32 this does give 40%
- * faster rsa1024 private key operations and 10% faster rsa4096 ones,
- * while on AMD64 it improves rsa1024 sign only by 10% and *worsens*
- * rsa4096 sign by 15%. Once again, it's a reference implementation,
- * one to be used as start-point for platform-specific assembler.
+ * result in performance improvement. E.g. on IA-32 this routine was
+ * observed to give 40% faster rsa1024 private key operations and 10%
+ * faster rsa4096 ones, while on AMD64 it improves rsa1024 sign only
+ * by 10% and *worsens* rsa4096 sign by 15%. Once again, it's a
+ * reference implementation, one to be used as starting point for
+ * platform-specific assembler. Mentioned numbers apply to compiler
+ * generated code compiled with and without -DOPENSSL_BN_ASM_MONT and
+ * can vary not only from platform to platform, but even for compiler
+ * versions. Assembler vs. assembler improvement coefficients can
+ * [and are known to] differ and are to be documented elsewhere.
*/
-void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num)
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num)
{
- BN_ULONG c0,c1,ml,*tp;
+ BN_ULONG c0,c1,ml,*tp,n0;
#ifdef mul64
BN_ULONG mh;
#endif
volatile BN_ULONG *vp;
int i=0,j;
+#if 0 /* template for platform-specific implementation */
+ if (ap==bp) return bn_sqr_mont(rp,ap,np,n0p,num);
+#endif
vp = tp = alloca((num+2)*sizeof(BN_ULONG));
- tp[num] = bn_mul_words(tp,ap,num,bp[0]);
+ n0 = *n0p;
+
+ c0 = 0;
+ ml = bp[0];
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ for (j=0;j<num;++j)
+ mul(tp[j],ap[j],ml,mh,c0);
+#else
+ for (j=0;j<num;++j)
+ mul(tp[j],ap[j],ml,c0);
+#endif
+
+ tp[num] = c0;
tp[num+1] = 0;
goto enter;
for(i=0;i<num;i++)
{
- c0 = bn_mul_add_words(tp,ap,num,bp[i]);
+ c0 = 0;
+ ml = bp[i];
+#ifdef mul64
+ mh = HBITS(ml);
+ ml = LBITS(ml);
+ for (j=0;j<num;++j)
+ mul_add(tp[j],ap[j],ml,mh,c0);
+#else
+ for (j=0;j<num;++j)
+ mul_add(tp[j],ap[j],ml,c0);
+#endif
c1 = (tp[num] + c0)&BN_MASK2;
tp[num] = c1;
tp[num+1] = (c1<c0?1:0);
if (tp[num]!=0 || c0==0)
{
for(i=0;i<num+2;i++) vp[i] = 0;
- return;
+ return 1;
}
}
for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
vp[num] = 0;
vp[num+1] = 0;
+ return 1;
}
-
-void bn_sqr_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *np,BN_ULONG n0, int num)
- {
- bn_mul_mont(rp,ap,ap,np,n0,num);
- }
+#else
+/*
+ * Return value of 0 indicates that multiplication/convolution was not
+ * performed to signal the caller to fall down to alternative/original
+ * code-path.
+ */
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num)
+{ return 0; }
#endif /* OPENSSL_BN_ASM_MONT */
+#endif
#else /* !BN_MUL_COMBA */
/* hmm... is it faster just to do a multiply? */
+#ifndef OPENSSL_FIPSCANISTER
#undef bn_sqr_comba4
+#undef bn_sqr_comba8
+#endif
void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
{
BN_ULONG t[8];
bn_sqr_normal(r,a,4,t);
}
-#undef bn_sqr_comba8
void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
{
BN_ULONG t[16];
r[15]=bn_mul_add_words(&(r[7]),a,8,b[7]);
}
+#ifdef OPENSSL_NO_ASM
#ifdef OPENSSL_BN_ASM_MONT
-void bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,BN_ULONG n0, int num)
+#include <alloca.h>
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0p, int num)
{
- BN_ULONG c0,c1,*tp;
+ BN_ULONG c0,c1,*tp,n0=*n0p;
volatile BN_ULONG *vp;
int i=0,j;
for(i=0;i<num;i++)
{
c0 = bn_mul_add_words(tp,ap,num,bp[i]);
- c1 = tp[num] + c0;
+ c1 = (tp[num] + c0)&BN_MASK2;
tp[num] = c1;
tp[num+1] = (c1<c0?1:0);
c0 = bn_mul_add_words(tp,np,num,tp[0]*n0);
- c1 = tp[num] + c0;
+ c1 = (tp[num] + c0)&BN_MASK2;
tp[num] = c1;
tp[num+1] += (c1<c0?1:0);
for(j=0;j<=num;j++) tp[j]=tp[j+1];
if (tp[num]!=0 || c0==0)
{
for(i=0;i<num+2;i++) vp[i] = 0;
- return;
+ return 1;
}
}
for(i=0;i<num;i++) rp[i] = tp[i], vp[i] = 0;
vp[num] = 0;
vp[num+1] = 0;
+ return 1;
}
-
-void bn_sqr_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *np,BN_ULONG n0, int num)
- {
- bn_mul_mont(rp,ap,ap,np,n0,num);
- }
+#else
+int bn_mul_mont(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp, const BN_ULONG *np,const BN_ULONG *n0, int num)
+{ return 0; }
#endif /* OPENSSL_BN_ASM_MONT */
+#endif
#endif /* !BN_MUL_COMBA */