* ...
* HASH_LONG Nl,Nh;
* HASH_LONG data[HASH_LBLOCK];
- * int num;
+ * unsigned int num;
* ...
* } HASH_CTX;
* HASH_UPDATE
: "cc"); \
ret; \
})
-# elif defined(__powerpc) || defined(__ppc)
+# elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
# define ROTATE(a,n) ({ register unsigned int ret; \
asm ( \
"rlwinm %0,%1,%2,0,31" \
#ifndef PEDANTIC
# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-# if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
+# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
+ (defined(__x86_64) || defined(__x86_64__))
+# if !defined(B_ENDIAN)
/*
* This gives ~30-40% performance improvement in SHA-256 compiled
* with gcc [on P4]. Well, first macro to be frank. We can pull
# define HOST_l2c(l,c) ({ unsigned int r=(l); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
*((unsigned int *)(c))=r; (c)+=4; r; })
+# endif
# endif
# endif
#endif
#elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
#if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
- /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
-# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
-# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
+# ifndef B_ENDIAN
+ /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
+# define HOST_c2l(c,l) ((l)=*((const unsigned int *)(c)), (c)+=4, l)
+# define HOST_l2c(l,c) (*((unsigned int *)(c))=(l), (c)+=4, l)
+# endif
#endif
#ifndef HOST_c2l
const unsigned char *data=data_;
register HASH_LONG * p;
register HASH_LONG l;
- unsigned int sw,sc,ew,ec;
+ size_t sw,sc,ew,ec;
if (len==0) return 1;
}
else
{
- c->num+=len;
+ c->num+=(unsigned int)len;
if ((sc+len) < 4) /* ugly, add char's to a word */
{
l=p[sw]; HOST_p_c2l_p(data,l,sc,len); p[sw]=l;