From 1acf071cd08e5cb5c76e1366c5ae2505b8be6d65 Mon Sep 17 00:00:00 2001 From: Andy Polyakov Date: Mon, 20 Dec 2004 13:13:14 +0000 Subject: [PATCH] Backport of http://cvs.openssl.org/chngview?cn=12323, as well as eliminate message size limitations on 64-bit platforms. --- fips/sha1/fingerprint.sha1 | 4 +- fips/sha1/fips_md32_common.h | 95 +++++++++++++++--------------------- fips/sha1/fips_sha_locl.h | 4 +- fips/sha1/standalone.sha1 | 4 +- 4 files changed, 45 insertions(+), 62 deletions(-) diff --git a/fips/sha1/fingerprint.sha1 b/fips/sha1/fingerprint.sha1 index 92225550a4..5cb919fdc5 100644 --- a/fips/sha1/fingerprint.sha1 +++ b/fips/sha1/fingerprint.sha1 @@ -1,5 +1,5 @@ HMAC-SHA1(fips_sha1dgst.c)= 10575600a9540eb15188a7d3b0b031e60aedbc18 HMAC-SHA1(fips_sha1_selftest.c)= 98910a0c85eff1688bd7adb23e738dc75b39546e HMAC-SHA1(asm/sx86-elf.s)= ae66fb23ab8e1a2287e87a0a2dd30a4b9039fe63 -HMAC-SHA1(fips_sha_locl.h)= 4a83a6c5181483244e0f44a902225425835f54bc -HMAC-SHA1(fips_md32_common.h)= 1c7e761db430067391b1b7b86da5d2bf6df92834 +HMAC-SHA1(fips_sha_locl.h)= c1b4c82eec5f0ee119658456690f3ea9d77ed1c5 +HMAC-SHA1(fips_md32_common.h)= 08a057a7b94acf5df4301ea6c894ce14082e1ec4 diff --git a/fips/sha1/fips_md32_common.h b/fips/sha1/fips_md32_common.h index 55d65f5821..cf1110e897 100644 --- a/fips/sha1/fips_md32_common.h +++ b/fips/sha1/fips_md32_common.h @@ -77,7 +77,7 @@ * ... * HASH_LONG Nl,Nh; * HASH_LONG data[HASH_LBLOCK]; - * int num; + * unsigned int num; * ... * } HASH_CTX; * HASH_UPDATE @@ -195,7 +195,6 @@ * Some GNU C inline assembler templates. Note that these are * rotates by *constant* number of bits! But that's exactly * what we need here... - * * */ # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) @@ -217,39 +216,6 @@ }) # endif # endif - -/* - * Engage compiler specific "fetch in reverse byte order" - * intrinsic function if available. - */ -# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) - /* some GNU C inline assembler templates by */ -# if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY) -# define BE_FETCH32(a) ({ register unsigned int l=(a);\ - asm ( \ - "bswapl %0" \ - : "=r"(l) : "0"(l)); \ - l; \ - }) -# elif defined(__powerpc) -# define LE_FETCH32(a) ({ register unsigned int l; \ - asm ( \ - "lwbrx %0,0,%1" \ - : "=r"(l) \ - : "r"(a)); \ - l; \ - }) - -# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC) -# define LE_FETCH32(a) ({ register unsigned int l; \ - asm ( \ - "lda [%1]#ASI_PRIMARY_LITTLE,%0"\ - : "=r"(l) \ - : "r"(a)); \ - l; \ - }) -# endif -# endif #endif /* PEDANTIC */ #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */ @@ -301,28 +267,12 @@ # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER # endif -# elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) -# ifndef HOST_FETCH32 -# ifdef LE_FETCH32 -# define HOST_FETCH32(p,l) LE_FETCH32(p) -# elif defined(REVERSE_FETCH32) -# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) -# endif -# endif # endif #elif defined(L_ENDIAN) # if defined(DATA_ORDER_IS_LITTLE_ENDIAN) # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER # endif -# elif defined(DATA_ORDER_IS_BIG_ENDIAN) -# ifndef HOST_FETCH32 -# ifdef BE_FETCH32 -# define HOST_FETCH32(p,l) BE_FETCH32(p) -# elif defined(REVERSE_FETCH32) -# define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l) -# endif -# endif # endif #endif @@ -334,11 +284,32 @@ #if defined(DATA_ORDER_IS_BIG_ENDIAN) +#ifndef PEDANTIC +# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) +# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) + /* + * This gives ~30-40% performance improvement in SHA-256 compiled + * with gcc [on P4]. Well, first macro to be frank. We can pull + * this trick on x86* platforms only, because these CPUs can fetch + * unaligned data without raising an exception. + */ +# define HOST_c2l(c,l) ({ unsigned int r=*((const unsigned int *)(c)); \ + asm ("bswapl %0":"=r"(r):"0"(r)); \ + (c)+=4; (l)=r; }) +# define HOST_l2c(l,c) ({ unsigned int r=(l); \ + asm ("bswapl %0":"=r"(r):"0"(r)); \ + *((unsigned int *)(c))=r; (c)+=4; r; }) +# endif +# endif +#endif + +#ifndef HOST_c2l #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \ l|=(((unsigned long)(*((c)++)))<<16), \ l|=(((unsigned long)(*((c)++)))<< 8), \ l|=(((unsigned long)(*((c)++))) ), \ l) +#endif #define HOST_p_c2l(c,l,n) { \ switch (n) { \ case 0: l =((unsigned long)(*((c)++)))<<24; \ @@ -362,19 +333,29 @@ case 2: l|=((unsigned long)(*(--(c))))<<16; \ case 1: l|=((unsigned long)(*(--(c))))<<24; \ } } +#ifndef HOST_l2c #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ *((c)++)=(unsigned char)(((l)>>16)&0xff), \ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ *((c)++)=(unsigned char)(((l) )&0xff), \ l) +#endif #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN) +#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__) + /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */ +# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l) +# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l) +#endif + +#ifndef HOST_c2l #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \ l|=(((unsigned long)(*((c)++)))<< 8), \ l|=(((unsigned long)(*((c)++)))<<16), \ l|=(((unsigned long)(*((c)++)))<<24), \ l) +#endif #define HOST_p_c2l(c,l,n) { \ switch (n) { \ case 0: l =((unsigned long)(*((c)++))); \ @@ -398,11 +379,13 @@ case 2: l|=((unsigned long)(*(--(c))))<< 8; \ case 1: l|=((unsigned long)(*(--(c)))); \ } } +#ifndef HOST_l2c #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \ *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ *((c)++)=(unsigned char)(((l)>>16)&0xff), \ *((c)++)=(unsigned char)(((l)>>24)&0xff), \ l) +#endif #endif @@ -415,7 +398,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, FIPS_SHA_SIZE_T len) const unsigned char *data=data_; register HASH_LONG * p; register HASH_LONG l; - int sw,sc,ew,ec; + size_t sw,sc,ew,ec; if(FIPS_selftest_failed()) return 0; @@ -427,7 +410,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, FIPS_SHA_SIZE_T len) * Wei Dai for pointing it out. */ if (l < c->Nl) /* overflow */ c->Nh++; - c->Nh+=(len>>29); + c->Nh+=(len>>29); /* might cause compiler warning on 16-bit */ c->Nl=l; if (c->num != 0) @@ -450,7 +433,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, FIPS_SHA_SIZE_T len) } else { - c->num+=len; + c->num+=(unsigned int)len; if ((sc+len) < 4) /* ugly, add char's to a word */ { l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l; @@ -484,7 +467,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, FIPS_SHA_SIZE_T len) * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined * only if sizeof(HASH_LONG)==4. */ - if ((((unsigned long)data)%4) == 0) + if ((((size_t)data)%4) == 0) { /* data is properly aligned so that we can cast it: */ HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw); @@ -533,7 +516,7 @@ int HASH_UPDATE (HASH_CTX *c, const void *data_, FIPS_SHA_SIZE_T len) void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data) { #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED) - if ((((unsigned long)data)%4) == 0) + if ((((size_t)data)%4) == 0) /* data is properly aligned so that we can cast it: */ HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1); else diff --git a/fips/sha1/fips_sha_locl.h b/fips/sha1/fips_sha_locl.h index 154140d831..6146b07812 100644 --- a/fips/sha1/fips_sha_locl.h +++ b/fips/sha1/fips_sha_locl.h @@ -336,7 +336,7 @@ void HASH_BLOCK_HOST_ORDER (SHA_CTX *c, const void *d, FIPS_SHA_SIZE_T num) c->h3=(c->h3+B)&0xffffffffL; c->h4=(c->h4+C)&0xffffffffL; - if ((int)(--num) <= 0) break; + if (--num == 0) break; A=c->h0; B=c->h1; @@ -466,7 +466,7 @@ void HASH_BLOCK_DATA_ORDER (SHA_CTX *c, const void *p, FIPS_SHA_SIZE_T num) c->h3=(c->h3+B)&0xffffffffL; c->h4=(c->h4+C)&0xffffffffL; - if ((int)(--num) <= 0) break; + if (--num == 0) break; A=c->h0; B=c->h1; diff --git a/fips/sha1/standalone.sha1 b/fips/sha1/standalone.sha1 index 819654a886..7279c82541 100644 --- a/fips/sha1/standalone.sha1 +++ b/fips/sha1/standalone.sha1 @@ -2,5 +2,5 @@ HMAC-SHA1(fips_sha1dgst.c)= 10575600a9540eb15188a7d3b0b031e60aedbc18 HMAC-SHA1(fips_sha1_selftest.c)= 98910a0c85eff1688bd7adb23e738dc75b39546e HMAC-SHA1(asm/sx86-elf.s)= ae66fb23ab8e1a2287e87a0a2dd30a4b9039fe63 HMAC-SHA1(fips_standalone_sha1.c)= 93203c569097189b47a0085bc9fc55193867d4ce -HMAC-SHA1(fips_sha_locl.h)= 4a83a6c5181483244e0f44a902225425835f54bc -HMAC-SHA1(fips_md32_common.h)= 1c7e761db430067391b1b7b86da5d2bf6df92834 +HMAC-SHA1(fips_sha_locl.h)= c1b4c82eec5f0ee119658456690f3ea9d77ed1c5 +HMAC-SHA1(fips_md32_common.h)= 08a057a7b94acf5df4301ea6c894ce14082e1ec4 -- 2.25.1