#endif
#if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
-#if defined(__GNUC__) && __GNUC__>=2
-# if defined(__x86_64) || defined(__x86_64__)
-# define BSWAP8(x) ({ u64 ret=(x); \
+# if defined(__GNUC__) && __GNUC__>=2
+# if defined(__x86_64) || defined(__x86_64__)
+# define BSWAP8(x) ({ u64 ret=(x); \
asm ("bswapq %0" \
: "+r"(ret)); ret; })
-# define BSWAP4(x) ({ u32 ret=(x); \
+# define BSWAP4(x) ({ u32 ret=(x); \
asm ("bswapl %0" \
: "+r"(ret)); ret; })
-# elif (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)
-# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
+# elif (defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)
+# define BSWAP8(x) ({ u32 lo=(u64)(x)>>32,hi=(x); \
asm ("bswapl %0; bswapl %1" \
: "+r"(hi),"+r"(lo)); \
(u64)hi<<32|lo; })
-# define BSWAP4(x) ({ u32 ret=(x); \
+# define BSWAP4(x) ({ u32 ret=(x); \
asm ("bswapl %0" \
: "+r"(ret)); ret; })
# elif (defined(__arm__) || defined(__arm)) && !defined(STRICT_ALIGNMENT)
asm ("rev %0,%0; rev %1,%1" \
: "+r"(hi),"+r"(lo)); \
(u64)hi<<32|lo; })
-# define BSWAP4(x) ({ u32 ret; \
+# define BSWAP4(x) ({ u32 ret; \
asm ("rev %0,%1" \
: "=r"(ret) : "r"((u32)(x))); \
ret; })
-# endif
-#elif defined(_MSC_VER)
-# if _MSC_VER>=1300
-# pragma intrinsic(_byteswap_uint64,_byteswap_ulong)
-# define BSWAP8(x) _byteswap_uint64((u64)(x))
-# define BSWAP4(x) _byteswap_ulong((u32)(x))
-# elif defined(_M_IX86)
- __inline u32 _bswap4(u32 val) {
+# endif
+# elif defined(_MSC_VER)
+# if _MSC_VER>=1300
+# pragma intrinsic(_byteswap_uint64,_byteswap_ulong)
+# define BSWAP8(x) _byteswap_uint64((u64)(x))
+# define BSWAP4(x) _byteswap_ulong((u32)(x))
+# elif defined(_M_IX86)
+ __inline u32 _bswap4(u32 val) {
_asm mov eax,val
_asm bswap eax
- }
-# define BSWAP4(x) _bswap4(x)
+ }
+# define BSWAP4(x) _bswap4(x)
+# endif
# endif
#endif
-#endif
#if defined(BSWAP4) && !defined(STRICT_ALIGNMENT)
#define GETU32(p) BSWAP4(*(const u32 *)(p))