From: Bernd Edlinger Date: Tue, 24 Apr 2018 19:10:13 +0000 (+0200) Subject: Avoid undefined behavior with unaligned accesses X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=d03ffeaf45da6541875bff05b3f79d8dba355c97;p=oweals%2Fopenssl.git Avoid undefined behavior with unaligned accesses Fixes: #4983 [extended tests] Reviewed-by: Nicola Tuveri (Merged from https://github.com/openssl/openssl/pull/11781) --- diff --git a/.travis.yml b/.travis.yml index fe1b0f78fa..6cf1ba02c0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -57,7 +57,7 @@ matrix: apt: packages: - clang-6.0 - env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-ubsan enable-rc5 enable-md2 enable-ssl3 enable-ssl3-method enable-nextprotoneg no-shared enable-buildtest-c++ -fno-sanitize=alignment -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -D__NO_STRING_INLINES -Wno-unused-command-line-argument" MATRIX_EVAL="CC=clang-6.0 && CXX=clang++-6.0" + env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-ubsan enable-rc5 enable-md2 enable-ssl3 enable-ssl3-method enable-nextprotoneg no-shared enable-buildtest-c++ -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -D__NO_STRING_INLINES -Wno-unused-command-line-argument" MATRIX_EVAL="CC=clang-6.0 && CXX=clang++-6.0" - os: linux arch: s390x compiler: gcc @@ -121,7 +121,7 @@ matrix: env: EXTENDED_TEST="yes" CONFIG_OPTS="enable-msan -D__NO_STRING_INLINES -Wno-unused-command-line-argument" - os: linux compiler: clang - env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-ubsan enable-rc5 enable-md2 enable-ssl3 enable-ssl3-method enable-nextprotoneg no-shared -fno-sanitize=alignment -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -D__NO_STRING_INLINES -Wno-unused-command-line-argument" + env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-ubsan enable-rc5 enable-md2 enable-ssl3 enable-ssl3-method enable-nextprotoneg no-shared -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -D__NO_STRING_INLINES -Wno-unused-command-line-argument" - os: linux compiler: clang env: EXTENDED_TEST="yes" CONFIG_OPTS="no-asm enable-asan enable-rc5 enable-md2 no-shared -DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION -D__NO_STRING_INLINES -Wno-unused-command-line-argument" @@ -134,7 +134,7 @@ matrix: sources: - ubuntu-toolchain-r-test compiler: gcc-5 - env: UBUNTU_GCC_HACK="yes" EXTENDED_TEST="yes" CONFIG_OPTS="--debug no-asm enable-ubsan enable-rc5 enable-md2 -DPEDANTIC" OPENSSL_TEST_RAND_ORDER=0 + env: UBUNTU_GCC_HACK="yes" EXTENDED_TEST="yes" CONFIG_OPTS="--debug no-asm enable-asan enable-ubsan enable-rc5 enable-md2 enable-ec_nistp_64_gcc_128" OPENSSL_TEST_RAND_ORDER=0 - os: linux addons: apt: diff --git a/crypto/aes/aes_ige.c b/crypto/aes/aes_ige.c index dce4ef11be..0df04b3bb2 100644 --- a/crypto/aes/aes_ige.c +++ b/crypto/aes/aes_ige.c @@ -12,11 +12,6 @@ #include #include "aes_local.h" -#define N_WORDS (AES_BLOCK_SIZE / sizeof(unsigned long)) -typedef struct { - unsigned long data[N_WORDS]; -} aes_block_t; - /* XXX: probably some better way to do this */ #if defined(__i386__) || defined(__x86_64__) # define UNALIGNED_MEMOPS_ARE_FAST 1 @@ -24,6 +19,15 @@ typedef struct { # define UNALIGNED_MEMOPS_ARE_FAST 0 #endif +#define N_WORDS (AES_BLOCK_SIZE / sizeof(unsigned long)) +typedef struct { + unsigned long data[N_WORDS]; +#if defined(__GNUC__) && UNALIGNED_MEMOPS_ARE_FAST +} aes_block_t __attribute((__aligned__(1))); +#else +} aes_block_t; +#endif + #if UNALIGNED_MEMOPS_ARE_FAST # define load_block(d, s) (d) = *(const aes_block_t *)(s) # define store_block(d, s) *(aes_block_t *)(d) = (s) diff --git a/crypto/ec/ecp_nistp224.c b/crypto/ec/ecp_nistp224.c index 9a9ced8f13..6f7d66c8be 100644 --- a/crypto/ec/ecp_nistp224.c +++ b/crypto/ec/ecp_nistp224.c @@ -72,6 +72,7 @@ typedef uint64_t u64; */ typedef uint64_t limb; +typedef uint64_t limb_aX __attribute((__aligned__(1))); typedef uint128_t widelimb; typedef limb felem[4]; @@ -307,10 +308,10 @@ const EC_METHOD *EC_GFp_nistp224_method(void) */ static void bin28_to_felem(felem out, const u8 in[28]) { - out[0] = *((const uint64_t *)(in)) & 0x00ffffffffffffff; - out[1] = (*((const uint64_t *)(in + 7))) & 0x00ffffffffffffff; - out[2] = (*((const uint64_t *)(in + 14))) & 0x00ffffffffffffff; - out[3] = (*((const uint64_t *)(in+20))) >> 8; + out[0] = *((const limb *)(in)) & 0x00ffffffffffffff; + out[1] = (*((const limb_aX *)(in + 7))) & 0x00ffffffffffffff; + out[2] = (*((const limb_aX *)(in + 14))) & 0x00ffffffffffffff; + out[3] = (*((const limb_aX *)(in + 20))) >> 8; } static void felem_to_bin28(u8 out[28], const felem in) diff --git a/crypto/ec/ecp_nistp521.c b/crypto/ec/ecp_nistp521.c index 75eeba8536..08b3278729 100644 --- a/crypto/ec/ecp_nistp521.c +++ b/crypto/ec/ecp_nistp521.c @@ -128,6 +128,7 @@ static const felem_bytearray nistp521_curve_params[5] = { # define NLIMBS 9 typedef uint64_t limb; +typedef limb limb_aX __attribute((__aligned__(1))); typedef limb felem[NLIMBS]; typedef uint128_t largefelem[NLIMBS]; @@ -141,14 +142,14 @@ static const limb bottom58bits = 0x3ffffffffffffff; static void bin66_to_felem(felem out, const u8 in[66]) { out[0] = (*((limb *) & in[0])) & bottom58bits; - out[1] = (*((limb *) & in[7]) >> 2) & bottom58bits; - out[2] = (*((limb *) & in[14]) >> 4) & bottom58bits; - out[3] = (*((limb *) & in[21]) >> 6) & bottom58bits; - out[4] = (*((limb *) & in[29])) & bottom58bits; - out[5] = (*((limb *) & in[36]) >> 2) & bottom58bits; - out[6] = (*((limb *) & in[43]) >> 4) & bottom58bits; - out[7] = (*((limb *) & in[50]) >> 6) & bottom58bits; - out[8] = (*((limb *) & in[58])) & bottom57bits; + out[1] = (*((limb_aX *) & in[7]) >> 2) & bottom58bits; + out[2] = (*((limb_aX *) & in[14]) >> 4) & bottom58bits; + out[3] = (*((limb_aX *) & in[21]) >> 6) & bottom58bits; + out[4] = (*((limb_aX *) & in[29])) & bottom58bits; + out[5] = (*((limb_aX *) & in[36]) >> 2) & bottom58bits; + out[6] = (*((limb_aX *) & in[43]) >> 4) & bottom58bits; + out[7] = (*((limb_aX *) & in[50]) >> 6) & bottom58bits; + out[8] = (*((limb_aX *) & in[58])) & bottom57bits; } /* @@ -159,14 +160,14 @@ static void felem_to_bin66(u8 out[66], const felem in) { memset(out, 0, 66); (*((limb *) & out[0])) = in[0]; - (*((limb *) & out[7])) |= in[1] << 2; - (*((limb *) & out[14])) |= in[2] << 4; - (*((limb *) & out[21])) |= in[3] << 6; - (*((limb *) & out[29])) = in[4]; - (*((limb *) & out[36])) |= in[5] << 2; - (*((limb *) & out[43])) |= in[6] << 4; - (*((limb *) & out[50])) |= in[7] << 6; - (*((limb *) & out[58])) = in[8]; + (*((limb_aX *) & out[7])) |= in[1] << 2; + (*((limb_aX *) & out[14])) |= in[2] << 4; + (*((limb_aX *) & out[21])) |= in[3] << 6; + (*((limb_aX *) & out[29])) = in[4]; + (*((limb_aX *) & out[36])) |= in[5] << 2; + (*((limb_aX *) & out[43])) |= in[6] << 4; + (*((limb_aX *) & out[50])) |= in[7] << 6; + (*((limb_aX *) & out[58])) = in[8]; } /* BN_to_felem converts an OpenSSL BIGNUM into an felem */ diff --git a/crypto/modes/cbc128.c b/crypto/modes/cbc128.c index fc7e0b6051..f25f14aa5b 100644 --- a/crypto/modes/cbc128.c +++ b/crypto/modes/cbc128.c @@ -15,6 +15,12 @@ # define STRICT_ALIGNMENT 0 #endif +#if defined(__GNUC__) && !STRICT_ALIGNMENT +typedef size_t size_t_aX __attribute((__aligned__(1))); +#else +typedef size_t size_t_aX; +#endif + void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out, size_t len, const void *key, unsigned char ivec[16], block128_f block) @@ -40,8 +46,8 @@ void CRYPTO_cbc128_encrypt(const unsigned char *in, unsigned char *out, } else { while (len >= 16) { for (n = 0; n < 16; n += sizeof(size_t)) - *(size_t *)(out + n) = - *(size_t *)(in + n) ^ *(size_t *)(iv + n); + *(size_t_aX *)(out + n) = + *(size_t_aX *)(in + n) ^ *(size_t_aX *)(iv + n); (*block) (out, out, key); iv = out; len -= 16; @@ -96,7 +102,8 @@ void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out, } } else if (16 % sizeof(size_t) == 0) { /* always true */ while (len >= 16) { - size_t *out_t = (size_t *)out, *iv_t = (size_t *)iv; + size_t_aX *out_t = (size_t_aX *)out; + size_t_aX *iv_t = (size_t_aX *)iv; (*block) (in, out, key); for (n = 0; n < 16 / sizeof(size_t); n++) @@ -125,8 +132,10 @@ void CRYPTO_cbc128_decrypt(const unsigned char *in, unsigned char *out, } } else if (16 % sizeof(size_t) == 0) { /* always true */ while (len >= 16) { - size_t c, *out_t = (size_t *)out, *ivec_t = (size_t *)ivec; - const size_t *in_t = (const size_t *)in; + size_t c; + size_t_aX *out_t = (size_t_aX *)out; + size_t_aX *ivec_t = (size_t_aX *)ivec; + const size_t_aX *in_t = (const size_t_aX *)in; (*block) (in, tmp.c, key); for (n = 0; n < 16 / sizeof(size_t); n++) { diff --git a/crypto/modes/ccm128.c b/crypto/modes/ccm128.c index 424722811c..170a7c9edb 100644 --- a/crypto/modes/ccm128.c +++ b/crypto/modes/ccm128.c @@ -11,6 +11,14 @@ #include "modes_local.h" #include +#ifndef STRICT_ALIGNMENT +# ifdef __GNUC__ +typedef u64 u64_a1 __attribute((__aligned__(1))); +# else +typedef u64 u64_a1; +# endif +#endif + /* * First you setup M and L parameters and pass the key schedule. This is * called once per session setup... @@ -170,8 +178,8 @@ int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, ctx->cmac.u[0] ^= temp.u[0]; ctx->cmac.u[1] ^= temp.u[1]; #else - ctx->cmac.u[0] ^= ((u64 *)inp)[0]; - ctx->cmac.u[1] ^= ((u64 *)inp)[1]; + ctx->cmac.u[0] ^= ((u64_a1 *)inp)[0]; + ctx->cmac.u[1] ^= ((u64_a1 *)inp)[1]; #endif (*block) (ctx->cmac.c, ctx->cmac.c, key); (*block) (ctx->nonce.c, scratch.c, key); @@ -181,8 +189,8 @@ int CRYPTO_ccm128_encrypt(CCM128_CONTEXT *ctx, temp.u[1] ^= scratch.u[1]; memcpy(out, temp.c, 16); #else - ((u64 *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0]; - ((u64 *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1]; + ((u64_a1 *)out)[0] = scratch.u[0] ^ ((u64_a1 *)inp)[0]; + ((u64_a1 *)out)[1] = scratch.u[1] ^ ((u64_a1 *)inp)[1]; #endif inp += 16; out += 16; @@ -254,8 +262,10 @@ int CRYPTO_ccm128_decrypt(CCM128_CONTEXT *ctx, ctx->cmac.u[1] ^= (scratch.u[1] ^= temp.u[1]); memcpy(out, scratch.c, 16); #else - ctx->cmac.u[0] ^= (((u64 *)out)[0] = scratch.u[0] ^ ((u64 *)inp)[0]); - ctx->cmac.u[1] ^= (((u64 *)out)[1] = scratch.u[1] ^ ((u64 *)inp)[1]); + ctx->cmac.u[0] ^= (((u64_a1 *)out)[0] + = scratch.u[0] ^ ((u64_a1 *)inp)[0]); + ctx->cmac.u[1] ^= (((u64_a1 *)out)[1] + = scratch.u[1] ^ ((u64_a1 *)inp)[1]); #endif (*block) (ctx->cmac.c, ctx->cmac.c, key); diff --git a/crypto/modes/cfb128.c b/crypto/modes/cfb128.c index b6bec414a9..806adb4ead 100644 --- a/crypto/modes/cfb128.c +++ b/crypto/modes/cfb128.c @@ -11,6 +11,12 @@ #include "modes_local.h" #include +#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT) +typedef size_t size_t_aX __attribute((__aligned__(1))); +#else +typedef size_t size_t_aX; +#endif + /* * The input and output encrypted as though 128bit cfb mode is being used. * The extra state information to record how much of the 128bit block we have @@ -43,8 +49,9 @@ void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out, while (len >= 16) { (*block) (ivec, ivec, key); for (; n < 16; n += sizeof(size_t)) { - *(size_t *)(out + n) = - *(size_t *)(ivec + n) ^= *(size_t *)(in + n); + *(size_t_aX *)(out + n) = + *(size_t_aX *)(ivec + n) + ^= *(size_t_aX *)(in + n); } len -= 16; out += 16; @@ -92,9 +99,10 @@ void CRYPTO_cfb128_encrypt(const unsigned char *in, unsigned char *out, while (len >= 16) { (*block) (ivec, ivec, key); for (; n < 16; n += sizeof(size_t)) { - size_t t = *(size_t *)(in + n); - *(size_t *)(out + n) = *(size_t *)(ivec + n) ^ t; - *(size_t *)(ivec + n) = t; + size_t t = *(size_t_aX *)(in + n); + *(size_t_aX *)(out + n) + = *(size_t_aX *)(ivec + n) ^ t; + *(size_t_aX *)(ivec + n) = t; } len -= 16; out += 16; diff --git a/crypto/modes/ctr128.c b/crypto/modes/ctr128.c index ae35116e95..9e610f4f49 100644 --- a/crypto/modes/ctr128.c +++ b/crypto/modes/ctr128.c @@ -11,6 +11,12 @@ #include "modes_local.h" #include +#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT) +typedef size_t size_t_aX __attribute((__aligned__(1))); +#else +typedef size_t size_t_aX; +#endif + /* * NOTE: the IV/counter CTR mode is big-endian. The code itself is * endian-neutral. @@ -97,8 +103,9 @@ void CRYPTO_ctr128_encrypt(const unsigned char *in, unsigned char *out, (*block) (ivec, ecount_buf, key); ctr128_inc_aligned(ivec); for (n = 0; n < 16; n += sizeof(size_t)) - *(size_t *)(out + n) = - *(size_t *)(in + n) ^ *(size_t *)(ecount_buf + n); + *(size_t_aX *)(out + n) = + *(size_t_aX *)(in + n) + ^ *(size_t_aX *)(ecount_buf + n); len -= 16; out += 16; in += 16; diff --git a/crypto/modes/gcm128.c b/crypto/modes/gcm128.c index 48775e6d05..cdab35339d 100644 --- a/crypto/modes/gcm128.c +++ b/crypto/modes/gcm128.c @@ -11,6 +11,12 @@ #include "modes_local.h" #include +#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT) +typedef size_t size_t_aX __attribute((__aligned__(1))); +#else +typedef size_t size_t_aX; +#endif + #if defined(BSWAP4) && defined(STRICT_ALIGNMENT) /* redefine, because alignment is ensured */ # undef GETU32 @@ -1080,8 +1086,8 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, size_t j = GHASH_CHUNK; while (j) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; + size_t_aX *out_t = (size_t_aX *)out; + const size_t_aX *in_t = (const size_t_aX *)in; (*block) (ctx->Yi.c, ctx->EKi.c, key); ++ctr; @@ -1107,8 +1113,8 @@ int CRYPTO_gcm128_encrypt(GCM128_CONTEXT *ctx, size_t j = i; while (len >= 16) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; + size_t_aX *out_t = (size_t_aX *)out; + const size_t_aX *in_t = (const size_t_aX *)in; (*block) (ctx->Yi.c, ctx->EKi.c, key); ++ctr; @@ -1318,8 +1324,8 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, GHASH(ctx, in, GHASH_CHUNK); while (j) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; + size_t_aX *out_t = (size_t_aX *)out; + const size_t_aX *in_t = (const size_t_aX *)in; (*block) (ctx->Yi.c, ctx->EKi.c, key); ++ctr; @@ -1343,8 +1349,8 @@ int CRYPTO_gcm128_decrypt(GCM128_CONTEXT *ctx, if ((i = (len & (size_t)-16))) { GHASH(ctx, in, i); while (len >= 16) { - size_t *out_t = (size_t *)out; - const size_t *in_t = (const size_t *)in; + size_t_aX *out_t = (size_t_aX *)out; + const size_t_aX *in_t = (const size_t_aX *)in; (*block) (ctx->Yi.c, ctx->EKi.c, key); ++ctr; diff --git a/crypto/modes/modes_local.h b/crypto/modes/modes_local.h index f2ae01d11a..abcca797d7 100644 --- a/crypto/modes/modes_local.h +++ b/crypto/modes/modes_local.h @@ -37,6 +37,14 @@ typedef unsigned char u8; # endif #endif +#ifndef STRICT_ALIGNMENT +# ifdef __GNUC__ +typedef u32 u32_a1 __attribute((__aligned__(1))); +# else +typedef u32 u32_a1; +# endif +#endif + #if !defined(PEDANTIC) && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM) # if defined(__GNUC__) && __GNUC__>=2 # if defined(__x86_64) || defined(__x86_64__) @@ -86,8 +94,8 @@ _asm mov eax, val _asm bswap eax} # endif #endif #if defined(BSWAP4) && !defined(STRICT_ALIGNMENT) -# define GETU32(p) BSWAP4(*(const u32 *)(p)) -# define PUTU32(p,v) *(u32 *)(p) = BSWAP4(v) +# define GETU32(p) BSWAP4(*(const u32_a1 *)(p)) +# define PUTU32(p,v) *(u32_a1 *)(p) = BSWAP4(v) #else # define GETU32(p) ((u32)(p)[0]<<24|(u32)(p)[1]<<16|(u32)(p)[2]<<8|(u32)(p)[3]) # define PUTU32(p,v) ((p)[0]=(u8)((v)>>24),(p)[1]=(u8)((v)>>16),(p)[2]=(u8)((v)>>8),(p)[3]=(u8)(v)) diff --git a/crypto/modes/ofb128.c b/crypto/modes/ofb128.c index 44bdf888db..df0fb559cb 100644 --- a/crypto/modes/ofb128.c +++ b/crypto/modes/ofb128.c @@ -11,6 +11,12 @@ #include "modes_local.h" #include +#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT) +typedef size_t size_t_aX __attribute((__aligned__(1))); +#else +typedef size_t size_t_aX; +#endif + /* * The input and output encrypted as though 128bit ofb mode is being used. * The extra state information to record how much of the 128bit block we have @@ -41,8 +47,9 @@ void CRYPTO_ofb128_encrypt(const unsigned char *in, unsigned char *out, while (len >= 16) { (*block) (ivec, ivec, key); for (; n < 16; n += sizeof(size_t)) - *(size_t *)(out + n) = - *(size_t *)(in + n) ^ *(size_t *)(ivec + n); + *(size_t_aX *)(out + n) = + *(size_t_aX *)(in + n) + ^ *(size_t_aX *)(ivec + n); len -= 16; out += 16; in += 16; diff --git a/crypto/modes/xts128.c b/crypto/modes/xts128.c index b5bda5e640..b2d3fff74c 100644 --- a/crypto/modes/xts128.c +++ b/crypto/modes/xts128.c @@ -11,6 +11,14 @@ #include "modes_local.h" #include +#ifndef STRICT_ALIGNMENT +# ifdef __GNUC__ +typedef u64 u64_a1 __attribute((__aligned__(1))); +# else +typedef u64 u64_a1; +# endif +#endif + int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, const unsigned char iv[16], const unsigned char *inp, unsigned char *out, @@ -45,8 +53,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, scratch.u[0] ^= tweak.u[0]; scratch.u[1] ^= tweak.u[1]; #else - scratch.u[0] = ((u64 *)inp)[0] ^ tweak.u[0]; - scratch.u[1] = ((u64 *)inp)[1] ^ tweak.u[1]; + scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak.u[0]; + scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak.u[1]; #endif (*ctx->block1) (scratch.c, scratch.c, ctx->key1); #if defined(STRICT_ALIGNMENT) @@ -54,8 +62,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, scratch.u[1] ^= tweak.u[1]; memcpy(out, scratch.c, 16); #else - ((u64 *)out)[0] = scratch.u[0] ^= tweak.u[0]; - ((u64 *)out)[1] = scratch.u[1] ^= tweak.u[1]; + ((u64_a1 *)out)[0] = scratch.u[0] ^= tweak.u[0]; + ((u64_a1 *)out)[1] = scratch.u[1] ^= tweak.u[1]; #endif inp += 16; out += 16; @@ -128,8 +136,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, scratch.u[0] ^= tweak1.u[0]; scratch.u[1] ^= tweak1.u[1]; #else - scratch.u[0] = ((u64 *)inp)[0] ^ tweak1.u[0]; - scratch.u[1] = ((u64 *)inp)[1] ^ tweak1.u[1]; + scratch.u[0] = ((u64_a1 *)inp)[0] ^ tweak1.u[0]; + scratch.u[1] = ((u64_a1 *)inp)[1] ^ tweak1.u[1]; #endif (*ctx->block1) (scratch.c, scratch.c, ctx->key1); scratch.u[0] ^= tweak1.u[0]; @@ -148,8 +156,8 @@ int CRYPTO_xts128_encrypt(const XTS128_CONTEXT *ctx, scratch.u[1] ^= tweak.u[1]; memcpy(out, scratch.c, 16); #else - ((u64 *)out)[0] = scratch.u[0] ^ tweak.u[0]; - ((u64 *)out)[1] = scratch.u[1] ^ tweak.u[1]; + ((u64_a1 *)out)[0] = scratch.u[0] ^ tweak.u[0]; + ((u64_a1 *)out)[1] = scratch.u[1] ^ tweak.u[1]; #endif } diff --git a/crypto/whrlpool/wp_block.c b/crypto/whrlpool/wp_block.c index c21c04dbc1..0e31253e1d 100644 --- a/crypto/whrlpool/wp_block.c +++ b/crypto/whrlpool/wp_block.c @@ -63,6 +63,20 @@ typedef unsigned long long u64; # undef STRICT_ALIGNMENT #endif +#ifndef STRICT_ALIGNMENT +# ifdef __GNUC__ +typedef u64 u64_a1 __attribute((__aligned__(1))); +# else +typedef u64 u64_a1; +# endif +#endif + +#if defined(__GNUC__) && !defined(STRICT_ALIGNMENT) +typedef u64 u64_aX __attribute((__aligned__(1))); +#else +typedef u64 u64_aX; +#endif + #undef SMALL_REGISTER_BANK #if defined(__i386) || defined(__i386__) || defined(_M_IX86) # define SMALL_REGISTER_BANK @@ -191,13 +205,13 @@ typedef unsigned long long u64; # define LL(c0,c1,c2,c3,c4,c5,c6,c7) c0,c1,c2,c3,c4,c5,c6,c7, \ c0,c1,c2,c3,c4,c5,c6,c7 # define C0(K,i) (((u64*)(Cx.c+0))[2*K.c[(i)*8+0]]) -# define C1(K,i) (((u64*)(Cx.c+7))[2*K.c[(i)*8+1]]) -# define C2(K,i) (((u64*)(Cx.c+6))[2*K.c[(i)*8+2]]) -# define C3(K,i) (((u64*)(Cx.c+5))[2*K.c[(i)*8+3]]) -# define C4(K,i) (((u64*)(Cx.c+4))[2*K.c[(i)*8+4]]) -# define C5(K,i) (((u64*)(Cx.c+3))[2*K.c[(i)*8+5]]) -# define C6(K,i) (((u64*)(Cx.c+2))[2*K.c[(i)*8+6]]) -# define C7(K,i) (((u64*)(Cx.c+1))[2*K.c[(i)*8+7]]) +# define C1(K,i) (((u64_a1*)(Cx.c+7))[2*K.c[(i)*8+1]]) +# define C2(K,i) (((u64_a1*)(Cx.c+6))[2*K.c[(i)*8+2]]) +# define C3(K,i) (((u64_a1*)(Cx.c+5))[2*K.c[(i)*8+3]]) +# define C4(K,i) (((u64_a1*)(Cx.c+4))[2*K.c[(i)*8+4]]) +# define C5(K,i) (((u64_a1*)(Cx.c+3))[2*K.c[(i)*8+5]]) +# define C6(K,i) (((u64_a1*)(Cx.c+2))[2*K.c[(i)*8+6]]) +# define C7(K,i) (((u64_a1*)(Cx.c+1))[2*K.c[(i)*8+7]]) #endif static const @@ -531,7 +545,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx, const void *inp, size_t n) } else # endif { - const u64 *pa = (const u64 *)p; + const u64_aX *pa = (const u64_aX *)p; S.q[0] = (K.q[0] = H->q[0]) ^ pa[0]; S.q[1] = (K.q[1] = H->q[1]) ^ pa[1]; S.q[2] = (K.q[2] = H->q[2]) ^ pa[2]; @@ -769,7 +783,7 @@ void whirlpool_block(WHIRLPOOL_CTX *ctx, const void *inp, size_t n) } else # endif { - const u64 *pa = (const u64 *)p; + const u64_aX *pa = (const u64_aX *)p; H->q[0] ^= S.q[0] ^ pa[0]; H->q[1] ^= S.q[1] ^ pa[1]; H->q[2] ^= S.q[2] ^ pa[2];