# if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
# if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
(defined(__x86_64) || defined(__x86_64__))
+# if !defined(B_ENDIAN)
/*
* This gives ~30-40% performance improvement in SHA-256 compiled
* with gcc [on P4]. Well, first macro to be frank. We can pull
# define HOST_l2c(l,c) ({ unsigned int r=(l); \
asm ("bswapl %0":"=r"(r):"0"(r)); \
*((unsigned int *)(c))=r; (c)+=4; r; })
+# endif
# endif
# endif
#endif
void md4_block_host_order (MD4_CTX *c, const void *p,size_t num);
void md4_block_data_order (MD4_CTX *c, const void *p,size_t num);
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__)
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
# if !defined(B_ENDIAN)
/*
* *_block_host_order is expected to handle aligned data while
#endif
#ifdef MD5_ASM
-# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__)
-# if !defined(B_ENDIAN)
-# define md5_block_host_order md5_block_asm_host_order
-# endif
+# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
+# define md5_block_host_order md5_block_asm_host_order
# elif defined(__sparc) && defined(OPENSSL_SYS_ULTRASPARC)
void md5_block_asm_data_order_aligned (MD5_CTX *c, const MD5_LONG *p,size_t num);
# define HASH_BLOCK_DATA_ORDER_ALIGNED md5_block_asm_data_order_aligned
void md5_block_host_order (MD5_CTX *c, const void *p,size_t num);
void md5_block_data_order (MD5_CTX *c, const void *p,size_t num);
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || defined(__x86_64) || defined(__x86_64__)
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
# if !defined(B_ENDIAN)
/*
* *_block_host_order is expected to handle aligned data while
*/
#ifdef RMD160_ASM
# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__)
-# if !defined(B_ENDIAN)
-# define ripemd160_block_host_order ripemd160_block_asm_host_order
-# endif
+# define ripemd160_block_host_order ripemd160_block_asm_host_order
# endif
#endif
void ripemd160_block_host_order (RIPEMD160_CTX *c, const void *p,size_t num);
void ripemd160_block_data_order (RIPEMD160_CTX *c, const void *p,size_t num);
-#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__)
+#if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || defined(_M_X64)
# if !defined(B_ENDIAN)
# define ripemd160_block_data_order ripemd160_block_host_order
# endif
# ifdef SHA1_ASM
# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || defined(__INTEL__)
-# if !defined(B_ENDIAN)
-# define sha1_block_host_order sha1_block_asm_host_order
-# define DONT_IMPLEMENT_BLOCK_HOST_ORDER
-# define sha1_block_data_order sha1_block_asm_data_order
-# define DONT_IMPLEMENT_BLOCK_DATA_ORDER
-# define HASH_BLOCK_DATA_ORDER_ALIGNED sha1_block_asm_data_order
-# endif
+# define sha1_block_host_order sha1_block_asm_host_order
+# define DONT_IMPLEMENT_BLOCK_HOST_ORDER
+# define sha1_block_data_order sha1_block_asm_data_order
+# define DONT_IMPLEMENT_BLOCK_DATA_ORDER
+# define HASH_BLOCK_DATA_ORDER_ALIGNED sha1_block_asm_data_order
# elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
# define sha1_block_host_order sha1_block_asm_host_order
# define DONT_IMPLEMENT_BLOCK_HOST_ORDER