1 /* crypto/md32_common.h */
2 /* ====================================================================
3 * Copyright (c) 1999-2002 The OpenSSL Project. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
17 * 3. All advertising materials mentioning features or use of this
18 * software must display the following acknowledgment:
19 * "This product includes software developed by the OpenSSL Project
20 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
22 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
23 * endorse or promote products derived from this software without
24 * prior written permission. For written permission, please contact
25 * licensing@OpenSSL.org.
27 * 5. Products derived from this software may not be called "OpenSSL"
28 * nor may "OpenSSL" appear in their names without prior written
29 * permission of the OpenSSL Project.
31 * 6. Redistributions of any form whatsoever must retain the following
33 * "This product includes software developed by the OpenSSL Project
34 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
36 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
37 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
39 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
40 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
42 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
43 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
45 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
47 * OF THE POSSIBILITY OF SUCH DAMAGE.
48 * ====================================================================
50 * This product includes cryptographic software written by Eric Young
51 * (eay@cryptsoft.com). This product includes software written by Tim
52 * Hudson (tjh@cryptsoft.com).
57 * This is a generic 32 bit "collector" for message digest algorithms.
58 * Whenever needed it collects input character stream into chunks of
59 * 32 bit values and invokes a block function that performs actual hash
66 * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
67 * this macro defines byte order of input stream.
69 * size of a unit chunk HASH_BLOCK operates on.
71 * has to be at lest 32 bit wide, if it's wider, then
72 * HASH_LONG_LOG2 *has to* be defined along
74 * context structure that at least contains following
79 * HASH_LONG data[HASH_LBLOCK];
84 * name of "Update" function, implemented here.
86 * name of "Transform" function, implemented here.
88 * name of "Final" function, implemented here.
89 * HASH_BLOCK_HOST_ORDER
90 * name of "block" function treating *aligned* input message
91 * in host byte order, implemented externally.
92 * HASH_BLOCK_DATA_ORDER
93 * name of "block" function treating *unaligned* input message
94 * in original (data) byte order, implemented externally (it
95 * actually is optional if data and host are of the same
98 * macro convering context variables to an ASCII hash string.
102 * B_ENDIAN or L_ENDIAN
103 * defines host byte-order.
105 * defaults to 2 if not states otherwise.
107 * assumed to be HASH_CBLOCK/4 if not stated otherwise.
108 * HASH_BLOCK_DATA_ORDER_ALIGNED
109 * alternative "block" function capable of treating
110 * aligned input message in original (data) order,
111 * implemented externally.
115 * #define DATA_ORDER_IS_LITTLE_ENDIAN
117 * #define HASH_LONG MD5_LONG
118 * #define HASH_LONG_LOG2 MD5_LONG_LOG2
119 * #define HASH_CTX MD5_CTX
120 * #define HASH_CBLOCK MD5_CBLOCK
121 * #define HASH_LBLOCK MD5_LBLOCK
122 * #define HASH_UPDATE MD5_Update
123 * #define HASH_TRANSFORM MD5_Transform
124 * #define HASH_FINAL MD5_Final
125 * #define HASH_BLOCK_HOST_ORDER md5_block_host_order
126 * #define HASH_BLOCK_DATA_ORDER md5_block_data_order
128 * <appro@fy.chalmers.se>
131 #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
132 #error "DATA_ORDER must be defined!"
136 #error "HASH_CBLOCK must be defined!"
139 #error "HASH_LONG must be defined!"
142 #error "HASH_CTX must be defined!"
146 #error "HASH_UPDATE must be defined!"
148 #ifndef HASH_TRANSFORM
149 #error "HASH_TRANSFORM must be defined!"
152 #error "HASH_FINAL must be defined!"
155 #ifndef HASH_BLOCK_HOST_ORDER
156 #error "HASH_BLOCK_HOST_ORDER must be defined!"
161 * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
164 #ifndef HASH_BLOCK_DATA_ORDER
165 #error "HASH_BLOCK_DATA_ORDER must be defined!"
170 #define HASH_LBLOCK (HASH_CBLOCK/4)
173 #ifndef HASH_LONG_LOG2
174 #define HASH_LONG_LOG2 2
178 * Engage compiler specific rotate intrinsic function if available.
182 # if 0 /* defined(_MSC_VER) */
183 # define ROTATE(a,n) _lrotl(a,n)
184 # elif defined(__MWERKS__)
185 # if defined(__POWERPC__)
186 # define ROTATE(a,n) __rlwinm(a,n,0,31)
187 # elif defined(OPENSSL_SYSNAME_NETWARE)
188 # define ROTATE(a,n) _lrotl(a,n)
189 # elif defined(__MC68K__)
190 /* Motorola specific tweak. <appro@fy.chalmers.se> */
191 # define ROTATE(a,n) ( n<24 ? __rol(a,n) : __ror(a,32-n) )
193 # define ROTATE(a,n) __rol(a,n)
195 # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
197 * Some GNU C inline assembler templates. Note that these are
198 * rotates by *constant* number of bits! But that's exactly
199 * what we need here...
201 * <appro@fy.chalmers.se>
203 # if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
204 # define ROTATE(a,n) ({ register unsigned int ret; \
212 # elif defined(__powerpc) || defined(__ppc)
213 # define ROTATE(a,n) ({ register unsigned int ret; \
215 "rlwinm %0,%1,%2,0,31" \
224 * Engage compiler specific "fetch in reverse byte order"
225 * intrinsic function if available.
227 # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
228 /* some GNU C inline assembler templates by <appro@fy.chalmers.se> */
229 # if (defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)) && !defined(I386_ONLY)
230 # define BE_FETCH32(a) ({ register unsigned int l=(a);\
233 : "=r"(l) : "0"(l)); \
236 # elif defined(__powerpc)
237 # define LE_FETCH32(a) ({ register unsigned int l; \
245 # elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC)
246 # define LE_FETCH32(a) ({ register unsigned int l; \
248 "lda [%1]#ASI_PRIMARY_LITTLE,%0"\
255 #endif /* PEDANTIC */
257 #if HASH_LONG_LOG2==2 /* Engage only if sizeof(HASH_LONG)== 4 */
258 /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
260 /* 5 instructions with rotate instruction, else 9 */
261 #define REVERSE_FETCH32(a,l) ( \
262 l=*(const HASH_LONG *)(a), \
263 ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24))) \
266 /* 6 instructions with rotate instruction, else 8 */
267 #define REVERSE_FETCH32(a,l) ( \
268 l=*(const HASH_LONG *)(a), \
269 l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)), \
273 * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
274 * It's rewritten as above for two reasons:
275 * - RISCs aren't good at long constants and have to explicitely
276 * compose 'em with several (well, usually 2) instructions in a
277 * register before performing the actual operation and (as you
278 * already realized:-) having same constant should inspire the
279 * compiler to permanently allocate the only register for it;
280 * - most modern CPUs have two ALUs, but usually only one has
281 * circuitry for shifts:-( this minor tweak inspires compiler
282 * to schedule shift instructions in a better way...
284 * <appro@fy.chalmers.se>
290 #define ROTATE(a,n) (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
294 * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
295 * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
296 * and host are of the same "endianess". It's possible to mask
297 * this with blank #define HASH_BLOCK_DATA_ORDER though...
299 * <appro@fy.chalmers.se>
301 #if defined(B_ENDIAN)
302 # if defined(DATA_ORDER_IS_BIG_ENDIAN)
303 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
304 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
306 # elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
307 # ifndef HOST_FETCH32
309 # define HOST_FETCH32(p,l) LE_FETCH32(p)
310 # elif defined(REVERSE_FETCH32)
311 # define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l)
315 #elif defined(L_ENDIAN)
316 # if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
317 # if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
318 # define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
320 # elif defined(DATA_ORDER_IS_BIG_ENDIAN)
321 # ifndef HOST_FETCH32
323 # define HOST_FETCH32(p,l) BE_FETCH32(p)
324 # elif defined(REVERSE_FETCH32)
325 # define HOST_FETCH32(p,l) REVERSE_FETCH32(p,l)
331 #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
332 #ifndef HASH_BLOCK_DATA_ORDER
333 #error "HASH_BLOCK_DATA_ORDER must be defined!"
337 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
339 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++)))<<24), \
340 l|=(((unsigned long)(*((c)++)))<<16), \
341 l|=(((unsigned long)(*((c)++)))<< 8), \
342 l|=(((unsigned long)(*((c)++))) ), \
344 #define HOST_p_c2l(c,l,n) { \
346 case 0: l =((unsigned long)(*((c)++)))<<24; \
347 case 1: l|=((unsigned long)(*((c)++)))<<16; \
348 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
349 case 3: l|=((unsigned long)(*((c)++))); \
351 #define HOST_p_c2l_p(c,l,sc,len) { \
353 case 0: l =((unsigned long)(*((c)++)))<<24; \
354 if (--len == 0) break; \
355 case 1: l|=((unsigned long)(*((c)++)))<<16; \
356 if (--len == 0) break; \
357 case 2: l|=((unsigned long)(*((c)++)))<< 8; \
359 /* NOTE the pointer is not incremented at the end of this */
360 #define HOST_c2l_p(c,l,n) { \
363 case 3: l =((unsigned long)(*(--(c))))<< 8; \
364 case 2: l|=((unsigned long)(*(--(c))))<<16; \
365 case 1: l|=((unsigned long)(*(--(c))))<<24; \
367 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \
368 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
369 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
370 *((c)++)=(unsigned char)(((l) )&0xff), \
373 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
375 #define HOST_c2l(c,l) (l =(((unsigned long)(*((c)++))) ), \
376 l|=(((unsigned long)(*((c)++)))<< 8), \
377 l|=(((unsigned long)(*((c)++)))<<16), \
378 l|=(((unsigned long)(*((c)++)))<<24), \
380 #define HOST_p_c2l(c,l,n) { \
382 case 0: l =((unsigned long)(*((c)++))); \
383 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
384 case 2: l|=((unsigned long)(*((c)++)))<<16; \
385 case 3: l|=((unsigned long)(*((c)++)))<<24; \
387 #define HOST_p_c2l_p(c,l,sc,len) { \
389 case 0: l =((unsigned long)(*((c)++))); \
390 if (--len == 0) break; \
391 case 1: l|=((unsigned long)(*((c)++)))<< 8; \
392 if (--len == 0) break; \
393 case 2: l|=((unsigned long)(*((c)++)))<<16; \
395 /* NOTE the pointer is not incremented at the end of this */
396 #define HOST_c2l_p(c,l,n) { \
399 case 3: l =((unsigned long)(*(--(c))))<<16; \
400 case 2: l|=((unsigned long)(*(--(c))))<< 8; \
401 case 1: l|=((unsigned long)(*(--(c)))); \
403 #define HOST_l2c(l,c) (*((c)++)=(unsigned char)(((l) )&0xff), \
404 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \
405 *((c)++)=(unsigned char)(((l)>>16)&0xff), \
406 *((c)++)=(unsigned char)(((l)>>24)&0xff), \
412 * Time for some action:-)
415 int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len)
417 const unsigned char *data=data_;
418 register HASH_LONG * p;
419 register unsigned long l;
422 if (len==0) return 1;
424 l=(c->Nl+(len<<3))&0xffffffffL;
425 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
426 * Wei Dai <weidai@eskimo.com> for pointing it out. */
427 if (l < c->Nl) /* overflow */
438 if ((c->num+len) >= HASH_CBLOCK)
440 l=p[sw]; HOST_p_c2l(data,l,sc); p[sw++]=l;
441 for (; sw<HASH_LBLOCK; sw++)
443 HOST_c2l(data,l); p[sw]=l;
445 HASH_BLOCK_HOST_ORDER (c,p,1);
446 len-=(HASH_CBLOCK-c->num);
448 /* drop through and do the rest */
453 if ((sc+len) < 4) /* ugly, add char's to a word */
455 l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;
463 HOST_p_c2l(data,l,sc);
465 for (; sw < ew; sw++)
467 HOST_c2l(data,l); p[sw]=l;
471 HOST_c2l_p(data,l,ec); p[sw]=l;
481 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
483 * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
484 * only if sizeof(HASH_LONG)==4.
486 if ((((unsigned long)data)%4) == 0)
488 /* data is properly aligned so that we can cast it: */
489 HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,sw);
495 #if !defined(HASH_BLOCK_DATA_ORDER)
498 memcpy (p=c->data,data,HASH_CBLOCK);
499 HASH_BLOCK_DATA_ORDER_ALIGNED(c,p,1);
505 #if defined(HASH_BLOCK_DATA_ORDER)
507 HASH_BLOCK_DATA_ORDER(c,data,sw);
519 ew=len>>2; /* words to copy */
523 HOST_c2l(data,l); *p=l;
525 HOST_c2l_p(data,l,ec);
532 void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
534 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
535 if ((((unsigned long)data)%4) == 0)
536 /* data is properly aligned so that we can cast it: */
537 HASH_BLOCK_DATA_ORDER_ALIGNED (c,(const HASH_LONG *)data,1);
539 #if !defined(HASH_BLOCK_DATA_ORDER)
541 memcpy (c->data,data,HASH_CBLOCK);
542 HASH_BLOCK_DATA_ORDER_ALIGNED (c,c->data,1);
546 #if defined(HASH_BLOCK_DATA_ORDER)
547 HASH_BLOCK_DATA_ORDER (c,data,1);
552 int HASH_FINAL (unsigned char *md, HASH_CTX *c)
554 register HASH_LONG *p;
555 register unsigned long l;
557 static const unsigned char end[4]={0x80,0x00,0x00,0x00};
558 const unsigned char *cp=end;
560 /* c->num should definitly have room for at least one more byte. */
566 /* purify often complains about the following line as an
567 * Uninitialized Memory Read. While this can be true, the
568 * following p_c2l macro will reset l when that case is true.
569 * This is because j&0x03 contains the number of 'valid' bytes
570 * already in p[i]. If and only if j&0x03 == 0, the UMR will
571 * occur but this is also the only time p_c2l will do
572 * l= *(cp++) instead of l|= *(cp++)
573 * Many thanks to Alex Tang <altitude@cic.net> for pickup this
576 if (j==0) p[i]=0; /* Yeah, but that's not the way to fix it:-) */
580 l = (j==0) ? 0 : p[i];
582 HOST_p_c2l(cp,l,j); p[i++]=l; /* i is the next 'undefined word' */
584 if (i>(HASH_LBLOCK-2)) /* save room for Nl and Nh */
586 if (i<HASH_LBLOCK) p[i]=0;
587 HASH_BLOCK_HOST_ORDER (c,p,1);
590 for (; i<(HASH_LBLOCK-2); i++)
593 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
594 p[HASH_LBLOCK-2]=c->Nh;
595 p[HASH_LBLOCK-1]=c->Nl;
596 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
597 p[HASH_LBLOCK-2]=c->Nl;
598 p[HASH_LBLOCK-1]=c->Nh;
600 HASH_BLOCK_HOST_ORDER (c,p,1);
602 #ifndef HASH_MAKE_STRING
603 #error "HASH_MAKE_STRING must be defined!"
605 HASH_MAKE_STRING(c,md);
609 /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
610 * but I'm not worried :-)
611 OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
617 #define MD32_REG_T long
619 * This comment was originaly written for MD5, which is why it
620 * discusses A-D. But it basically applies to all 32-bit digests,
621 * which is why it was moved to common header file.
623 * In case you wonder why A-D are declared as long and not
624 * as MD5_LONG. Doing so results in slight performance
625 * boost on LP64 architectures. The catch is we don't
626 * really care if 32 MSBs of a 64-bit register get polluted
627 * with eventual overflows as we *save* only 32 LSBs in
628 * *either* case. Now declaring 'em long excuses the compiler
629 * from keeping 32 MSBs zeroed resulting in 13% performance
630 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
631 * Well, to be honest it should say that this *prevents*
632 * performance degradation.
633 * <appro@fy.chalmers.se>
634 * Apparently there're LP64 compilers that generate better
635 * code if A-D are declared int. Most notably GCC-x86_64
636 * generates better code.
637 * <appro@fy.chalmers.se>