*/
#undef ROTATE
#ifndef PEDANTIC
-# if defined(_MSC_VER)
+# if 0 /* defined(_MSC_VER) */
# define ROTATE(a,n) _lrotl(a,n)
# elif defined(__MWERKS__)
# if defined(__POWERPC__)
# else
# define ROTATE(a,n) __rol(a,n)
# endif
-# elif defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM)
+# elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/*
* Some GNU C inline assembler templates. Note that these are
* rotates by *constant* number of bits! But that's exactly
* Engage compiler specific "fetch in reverse byte order"
* intrinsic function if available.
*/
-# if defined(__GNUC__) && __GNUC__>=2 && !defined(NO_ASM) && !defined(NO_INLINE_ASM)
+# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
/* some GNU C inline assembler templates by <appro@fy.chalmers.se> */
# if defined(__i386) && !defined(I386_ONLY)
# define BE_FETCH32(a) ({ register unsigned int l=(a);\
l; \
})
-# elif defined(__sparc) && defined(ULTRASPARC)
+# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC)
# define LE_FETCH32(a) ({ register unsigned int l; \
asm ( \
"lda [%1]#ASI_PRIMARY_LITTLE,%0"\
* Time for some action:-)
*/
-void HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len)
+int HASH_UPDATE (HASH_CTX *c, const void *data_, unsigned long len)
{
const unsigned char *data=data_;
register HASH_LONG * p;
register unsigned long l;
int sw,sc,ew,ec;
- if (len==0) return;
+ if (len==0) return 1;
l=(c->Nl+(len<<3))&0xffffffffL;
/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
HOST_c2l_p(data,l,ec); p[sw]=l;
}
}
- return;
+ return 1;
}
}
HOST_c2l_p(data,l,ec);
*p=l;
}
+ return 1;
}
}
-void HASH_FINAL (unsigned char *md, HASH_CTX *c)
+int HASH_FINAL (unsigned char *md, HASH_CTX *c)
{
register HASH_LONG *p;
register unsigned long l;
* but I'm not worried :-)
memset((void *)c,0,sizeof(HASH_CTX));
*/
+ return 1;
}