From: Andy Polyakov Date: Wed, 2 Oct 2013 22:24:03 +0000 (+0200) Subject: evp/e_aes_cbc_hmac_sha*.c: multi-block glue code. X-Git-Tag: master-post-reformat~1158 X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=7f893258f6e9298a24fbfced5331e4867df4fd6c;p=oweals%2Fopenssl.git evp/e_aes_cbc_hmac_sha*.c: multi-block glue code. --- diff --git a/crypto/evp/e_aes_cbc_hmac_sha1.c b/crypto/evp/e_aes_cbc_hmac_sha1.c index b8fb706eec..583fe8cc48 100644 --- a/crypto/evp/e_aes_cbc_hmac_sha1.c +++ b/crypto/evp/e_aes_cbc_hmac_sha1.c @@ -58,6 +58,8 @@ #include #include #include +#include +#include "modes_lcl.h" #ifndef EVP_CIPH_FLAG_AEAD_CIPHER #define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000 @@ -69,6 +71,10 @@ #define EVP_CIPH_FLAG_DEFAULT_ASN1 0 #endif +#if !defined(EVP_CIPH_FLAG_TLS11_MULTI_BLOCK) +#define EVP_CIPH_FLAG_TLS11_MULTI_BLOCK 0 +#endif + #define TLS1_1_VERSION 0x0302 typedef struct @@ -89,10 +95,6 @@ typedef struct defined(_M_AMD64) || defined(_M_X64) || \ defined(__INTEL__) ) -#if defined(__GNUC__) && __GNUC__>=2 && !defined(PEDANTIC) -# define BSWAP(x) ({ unsigned int r=(x); asm ("bswapl %0":"=r"(r):"0"(r)); r; }) -#endif - extern unsigned int OPENSSL_ia32cap_P[2]; #define AESNI_CAPABLE (1<<(57-32)) @@ -175,6 +177,177 @@ static void sha1_update(SHA_CTX *c,const void *data,size_t len) #endif #define SHA1_Update sha1_update +#if EVP_CIPH_FLAG_TLS11_MULTI_BLOCK + +typedef struct { unsigned int A[8],B[8],C[8],D[8],E[8]; } SHA1_MB_CTX; +typedef struct { const unsigned char *ptr; int blocks; } HASH_DESC; + +void sha1_multi_block(SHA1_MB_CTX *,const HASH_DESC *,int); + +typedef struct { const unsigned char *inp; unsigned char *out; + int blocks; double iv[2]; } CIPH_DESC; + +void aesni_multi_cbc_encrypt(CIPH_DESC *,void *,int); + +static size_t tls11_multi_block_encrypt(EVP_AES_HMAC_SHA1 *key, + unsigned char *out, const unsigned char *inp, size_t inp_len, + int n4x) /* n4x is 1 or 2 */ +{ + HASH_DESC hash_d[8], edges[8]; + CIPH_DESC ciph_d[8]; + unsigned char storage[sizeof(SHA1_MB_CTX)+32]; + union { u64 q[16]; + u32 d[32]; + u8 c[128]; } blocks[8]; + SHA1_MB_CTX *ctx; + unsigned int frag, last, packlen, i, x4=4*n4x; + size_t ret = 0; + + ctx = (SHA1_MB_CTX *)(storage+32-((size_t)storage%32)); /* align */ + + frag = (unsigned int)inp_len>>(1+n4x); + last = (unsigned int)inp_len+frag-(frag<<(1+n4x)); + if (last>frag && ((last+13+9)%64)<(x4-1)) { + frag++; + last -= x4-1; + } + + hash_d[0].ptr = inp; + for (i=1;iA[i] = key->md.h0; + ctx->B[i] = key->md.h1; + ctx->C[i] = key->md.h2; + ctx->D[i] = key->md.h3; + ctx->E[i] = key->md.h4; + + /* fix seqnum */ +#if defined(BSWAP8) + blocks[i].q[0] = BSWAP8(BSWAP8(*(u64*)key->md.data)+i); +#else + blocks[i].c[7] += key->md.data[7]+i; + if (blocks[i].c[7] < i) { + int j; + + for (j=6;j>=0;j--) { + if (blocks[i].c[j]=key->md.data[j]+1) break; + } + } +#endif + blocks[i].c[8] = key->md.data[8]; + blocks[i].c[9] = key->md.data[9]; + blocks[i].c[10] = key->md.data[10]; + /* fix length */ + blocks[i].c[11] = (unsigned char)(len>>8); + blocks[i].c[12] = (unsigned char)(len); + + memcpy(blocks[i].c+13,hash_d[i].ptr,64-13); + hash_d[i].ptr += 64-13; + hash_d[i].blocks = (len-(64-13))/64; + + edges[i].ptr = blocks[i].c; + edges[i].blocks = 1; + } + + sha1_multi_block(ctx,edges,n4x); + sha1_multi_block(ctx,hash_d,n4x); + + memset(blocks,0,sizeof(blocks)); + for (i=0;iA[i]); ctx->A[i] = key->tail.h0; + blocks[i].d[1] = BSWAP4(ctx->B[i]); ctx->B[i] = key->tail.h1; + blocks[i].d[2] = BSWAP4(ctx->C[i]); ctx->C[i] = key->tail.h2; + blocks[i].d[3] = BSWAP4(ctx->D[i]); ctx->D[i] = key->tail.h3; + blocks[i].d[4] = BSWAP4(ctx->E[i]); ctx->E[i] = key->tail.h4; + blocks[i].c[20] = 0x80; + blocks[i].d[15] = BSWAP4((64+20)*8); + edges[i].ptr = blocks[i].c; + edges[i].blocks = 1; + } + + sha1_multi_block(ctx,edges,n4x); + + packlen = 5+16+((frag+20+16)&-16); + + out += (packlen<<(1+n4x))-packlen; + inp += (frag<<(1+n4x))-frag; + + for (i=x4-1;;i--) { + unsigned int len = (i==(x4-1)?last:frag), pad, j; + unsigned char *out0 = out; + + out += 5+16; /* place for header and explicit IV */ + ciph_d[i].inp = out; + ciph_d[i].out = out; + + memmove(out,inp,len); + out += len; + inp -= frag; + + ((u32 *)out)[0] = BSWAP4(ctx->A[i]); + ((u32 *)out)[1] = BSWAP4(ctx->B[i]); + ((u32 *)out)[2] = BSWAP4(ctx->C[i]); + ((u32 *)out)[3] = BSWAP4(ctx->D[i]); + ((u32 *)out)[4] = BSWAP4(ctx->E[i]); + out += 20; + len += 20+16; + + pad = 15-len%16; + for (j=0;j<=pad;j++) *(out++) = pad; + len += pad+1; + + ciph_d[i].blocks = len/16; + + /* arrange header */ + out0[0] = key->md.data[8]; + out0[1] = key->md.data[9]; + out0[2] = key->md.data[10]; + out0[3] = (unsigned char)(len>>8); + out0[4] = (unsigned char)(len); + + /* explicit iv */ + RAND_bytes((u8 *)ciph_d[i].iv, 16); + memcpy(&out[5], ciph_d[i].iv, 16); + + ret += len+5; + + if (i==0) break; + + out = out0-packlen; + } + + aesni_multi_cbc_encrypt(ciph_d,&key->ks,n4x); + + return ret; +} +#endif + static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len) { @@ -302,8 +475,8 @@ static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, /* but pretend as if we hashed padded payload */ bitlen = key->md.Nl+(inp_len<<3); /* at most 18 bits */ -#ifdef BSWAP - bitlen = BSWAP(bitlen); +#ifdef BSWAP4 + bitlen = BSWAP4(bitlen); #else mac.c[0] = 0; mac.c[1] = (unsigned char)(bitlen>>16); @@ -365,12 +538,12 @@ static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, pmac->u[3] |= key->md.h3 & mask; pmac->u[4] |= key->md.h4 & mask; -#ifdef BSWAP - pmac->u[0] = BSWAP(pmac->u[0]); - pmac->u[1] = BSWAP(pmac->u[1]); - pmac->u[2] = BSWAP(pmac->u[2]); - pmac->u[3] = BSWAP(pmac->u[3]); - pmac->u[4] = BSWAP(pmac->u[4]); +#ifdef BSWAP4 + pmac->u[0] = BSWAP4(pmac->u[0]); + pmac->u[1] = BSWAP4(pmac->u[1]); + pmac->u[2] = BSWAP4(pmac->u[2]); + pmac->u[3] = BSWAP4(pmac->u[3]); + pmac->u[4] = BSWAP4(pmac->u[4]); #else for (i=0;i<5;i++) { res = pmac->u[i]; @@ -513,6 +686,60 @@ static int aesni_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, void return SHA_DIGEST_LENGTH; } } +#if EVP_EVP_CIPH_FLAG_TLS11_MULTI_BLOCK + case EVP_CTRL_TLS11_MULTI_BLOCK_AAD: + { + EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *param = + (EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *)ptr; + unsigned int n4x=1, x4; + unsigned int frag, last, packlen, inp_len; + + if (arginp[11]<<8|param->inp[12]; + + if (ctx->encrypt) + { + if ((param->inp[9]<<8|param->inp[10]) < TLS1_1_VERSION) + return -1; + + if (inp_len<2048) return -1; /* too short */ + + if (inp_len>=6144) n4x=2; + + key->md = key->head; + SHA1_Update(&key->md,param->inp,13); + + x4 = 4*n4x; n4x += 1; + + frag = inp_len>>n4x; + last = inp_len+frag-(frag<frag && ((last+13+9)%64<(x4-1))) { + frag++; + last -= x4-1; + } + + packlen = 5+16+((frag+20+16)&-16); + packlen = (packlen<<(1+n4x))-packlen; + packlen += 5+16+((last+20+16)&-16); + + param->interleave = x4; + + return (int)packlen; + } + else + return -1; /* not yet */ + } + case EVP_CTRL_TLS11_MULTI_BLOCK_ENCRYPT: + { + EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *param = + (EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *)ptr; + + return tls11_multi_block_encrypt(key,param->out,param->inp, + param->len,param->interleave/4); + } + case EVP_CTRL_TLS11_MULTI_BLOCK_DECRYPT: +#endif default: return -1; } @@ -526,7 +753,8 @@ static EVP_CIPHER aesni_128_cbc_hmac_sha1_cipher = NID_undef, #endif 16,16,16, - EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1|EVP_CIPH_FLAG_AEAD_CIPHER, + EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1| + EVP_CIPH_FLAG_AEAD_CIPHER|EVP_CIPH_FLAG_TLS11_MULTI_BLOCK, aesni_cbc_hmac_sha1_init_key, aesni_cbc_hmac_sha1_cipher, NULL, @@ -545,7 +773,8 @@ static EVP_CIPHER aesni_256_cbc_hmac_sha1_cipher = NID_undef, #endif 16,32,16, - EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1|EVP_CIPH_FLAG_AEAD_CIPHER, + EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1| + EVP_CIPH_FLAG_AEAD_CIPHER|EVP_CIPH_FLAG_TLS11_MULTI_BLOCK, aesni_cbc_hmac_sha1_init_key, aesni_cbc_hmac_sha1_cipher, NULL, diff --git a/crypto/evp/e_aes_cbc_hmac_sha256.c b/crypto/evp/e_aes_cbc_hmac_sha256.c index ff4eca2a57..7d03abc1a6 100644 --- a/crypto/evp/e_aes_cbc_hmac_sha256.c +++ b/crypto/evp/e_aes_cbc_hmac_sha256.c @@ -58,6 +58,8 @@ #include #include #include +#include +#include "modes_lcl.h" #ifndef EVP_CIPH_FLAG_AEAD_CIPHER #define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000 @@ -69,6 +71,10 @@ #define EVP_CIPH_FLAG_DEFAULT_ASN1 0 #endif +#if !defined(EVP_CIPH_FLAG_TLS11_MULTI_BLOCK) +#define EVP_CIPH_FLAG_TLS11_MULTI_BLOCK 0 +#endif + #define TLS1_1_VERSION 0x0302 typedef struct @@ -89,10 +95,6 @@ typedef struct defined(_M_AMD64) || defined(_M_X64) || \ defined(__INTEL__) ) -#if defined(__GNUC__) && __GNUC__>=2 && !defined(PEDANTIC) -# define BSWAP(x) ({ unsigned int r=(x); asm ("bswapl %0":"=r"(r):"0"(r)); r; }) -#endif - extern unsigned int OPENSSL_ia32cap_P[3]; #define AESNI_CAPABLE (1<<(57-32)) @@ -176,6 +178,186 @@ static void sha256_update(SHA256_CTX *c,const void *data,size_t len) #endif #define SHA256_Update sha256_update +#if EVP_CIPH_FLAG_TLS11_MULTI_BLOCK + +typedef struct { unsigned int A[8],B[8],C[8],D[8],E[8],F[8],G[8],H[8]; } SHA256_MB_CTX; +typedef struct { const unsigned char *ptr; int blocks; } HASH_DESC; + +void sha256_multi_block(SHA256_MB_CTX *,const HASH_DESC *,int); + +typedef struct { const unsigned char *inp; unsigned char *out; + int blocks; double iv[2]; } CIPH_DESC; + +void aesni_multi_cbc_encrypt(CIPH_DESC *,void *,int); + +static size_t tls11_multi_block_encrypt(EVP_AES_HMAC_SHA256 *key, + unsigned char *out, const unsigned char *inp, size_t inp_len, + int n4x) /* n4x is 1 or 2 */ +{ + HASH_DESC hash_d[8], edges[8]; + CIPH_DESC ciph_d[8]; + unsigned char storage[sizeof(SHA256_MB_CTX)+32]; + union { u64 q[16]; + u32 d[32]; + u8 c[128]; } blocks[8]; + SHA256_MB_CTX *ctx; + unsigned int frag, last, packlen, i, x4=4*n4x; + size_t ret = 0; + + ctx = (SHA256_MB_CTX *)(storage+32-((size_t)storage%32)); /* align */ + + frag = (unsigned int)inp_len>>(1+n4x); + last = (unsigned int)inp_len+frag-(frag<<(1+n4x)); + if (last>frag && ((last+13+9)%64)<(x4-1)) { + frag++; + last -= x4-1; + } + + hash_d[0].ptr = inp; + for (i=1;iA[i] = key->md.h[0]; + ctx->B[i] = key->md.h[1]; + ctx->C[i] = key->md.h[2]; + ctx->D[i] = key->md.h[3]; + ctx->E[i] = key->md.h[4]; + ctx->F[i] = key->md.h[5]; + ctx->G[i] = key->md.h[6]; + ctx->H[i] = key->md.h[7]; + + /* fix seqnum */ +#if defined(BSWAP8) + blocks[i].q[0] = BSWAP8(BSWAP8(*(u64*)key->md.data)+i); +#else + blocks[i].c[7] += key->md.data[7]+i; + if (blocks[i].c[7] < i) { + int j; + + for (j=6;j>=0;j--) { + if (blocks[i].c[j]=key->md.data[j]+1) break; + } + } +#endif + blocks[i].c[8] = key->md.data[8]; + blocks[i].c[9] = key->md.data[9]; + blocks[i].c[10] = key->md.data[10]; + /* fix length */ + blocks[i].c[11] = (unsigned char)(len>>8); + blocks[i].c[12] = (unsigned char)(len); + + memcpy(blocks[i].c+13,hash_d[i].ptr,64-13); + hash_d[i].ptr += 64-13; + hash_d[i].blocks = (len-(64-13))/64; + + edges[i].ptr = blocks[i].c; + edges[i].blocks = 1; + } + + sha256_multi_block(ctx,edges,n4x); + sha256_multi_block(ctx,hash_d,n4x); + + memset(blocks,0,sizeof(blocks)); + for (i=0;iA[i]); ctx->A[i] = key->tail.h[0]; + blocks[i].d[1] = BSWAP4(ctx->B[i]); ctx->B[i] = key->tail.h[1]; + blocks[i].d[2] = BSWAP4(ctx->C[i]); ctx->C[i] = key->tail.h[2]; + blocks[i].d[3] = BSWAP4(ctx->D[i]); ctx->D[i] = key->tail.h[3]; + blocks[i].d[4] = BSWAP4(ctx->E[i]); ctx->E[i] = key->tail.h[4]; + blocks[i].d[5] = BSWAP4(ctx->F[i]); ctx->F[i] = key->tail.h[5]; + blocks[i].d[6] = BSWAP4(ctx->G[i]); ctx->G[i] = key->tail.h[6]; + blocks[i].d[7] = BSWAP4(ctx->H[i]); ctx->H[i] = key->tail.h[7]; + blocks[i].c[32] = 0x80; + blocks[i].d[15] = BSWAP4((64+32)*8); + edges[i].ptr = blocks[i].c; + edges[i].blocks = 1; + } + + sha256_multi_block(ctx,edges,n4x); + + packlen = 5+16+((frag+32+16)&-16); + + out += (packlen<<(1+n4x))-packlen; + inp += (frag<<(1+n4x))-frag; + + for (i=x4-1;;i--) { + unsigned int len = (i==(x4-1)?last:frag), pad, j; + unsigned char *out0 = out; + + out += 5+16; /* place for header and explicit IV */ + ciph_d[i].inp = out; + ciph_d[i].out = out; + + memmove(out,inp,len); + out += len; + inp -= frag; + + ((u32 *)out)[0] = BSWAP4(ctx->A[i]); + ((u32 *)out)[1] = BSWAP4(ctx->B[i]); + ((u32 *)out)[2] = BSWAP4(ctx->C[i]); + ((u32 *)out)[3] = BSWAP4(ctx->D[i]); + ((u32 *)out)[4] = BSWAP4(ctx->E[i]); + ((u32 *)out)[5] = BSWAP4(ctx->F[i]); + ((u32 *)out)[6] = BSWAP4(ctx->G[i]); + ((u32 *)out)[7] = BSWAP4(ctx->H[i]); + out += 32; + len += 32+16; + + pad = 15-len%16; + for (j=0;j<=pad;j++) *(out++) = pad; + len += pad+1; + + ciph_d[i].blocks = len/16; + + /* arrange header */ + out0[0] = key->md.data[8]; + out0[1] = key->md.data[9]; + out0[2] = key->md.data[10]; + out0[3] = (unsigned char)(len>>8); + out0[4] = (unsigned char)(len); + + /* explicit iv */ + RAND_bytes((u8 *)ciph_d[i].iv, 16); + memcpy(&out[5], ciph_d[i].iv, 16); + + ret += len+5; + + if (i==0) break; + + out = out0-packlen; + } + + aesni_multi_cbc_encrypt(ciph_d,&key->ks,n4x); + + return ret; +} +#endif + static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t len) { @@ -305,8 +487,8 @@ static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, /* but pretend as if we hashed padded payload */ bitlen = key->md.Nl+(inp_len<<3); /* at most 18 bits */ -#ifdef BSWAP - bitlen = BSWAP(bitlen); +#ifdef BSWAP4 + bitlen = BSWAP4(bitlen); #else mac.c[0] = 0; mac.c[1] = (unsigned char)(bitlen>>16); @@ -380,15 +562,15 @@ static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, pmac->u[6] |= key->md.h[6] & mask; pmac->u[7] |= key->md.h[7] & mask; -#ifdef BSWAP - pmac->u[0] = BSWAP(pmac->u[0]); - pmac->u[1] = BSWAP(pmac->u[1]); - pmac->u[2] = BSWAP(pmac->u[2]); - pmac->u[3] = BSWAP(pmac->u[3]); - pmac->u[4] = BSWAP(pmac->u[4]); - pmac->u[5] = BSWAP(pmac->u[5]); - pmac->u[6] = BSWAP(pmac->u[6]); - pmac->u[7] = BSWAP(pmac->u[7]); +#ifdef BSWAP4 + pmac->u[0] = BSWAP4(pmac->u[0]); + pmac->u[1] = BSWAP4(pmac->u[1]); + pmac->u[2] = BSWAP4(pmac->u[2]); + pmac->u[3] = BSWAP4(pmac->u[3]); + pmac->u[4] = BSWAP4(pmac->u[4]); + pmac->u[5] = BSWAP4(pmac->u[5]); + pmac->u[6] = BSWAP4(pmac->u[6]); + pmac->u[7] = BSWAP4(pmac->u[7]); #else for (i=0;i<8;i++) { res = pmac->u[i]; @@ -531,6 +713,60 @@ static int aesni_cbc_hmac_sha256_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg, vo return SHA256_DIGEST_LENGTH; } } +#if EVP_EVP_CIPH_FLAG_TLS11_MULTI_BLOCK + case EVP_CTRL_TLS11_MULTI_BLOCK_AAD: + { + EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *param = + (EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *)ptr; + unsigned int n4x=1, x4; + unsigned int frag, last, packlen, inp_len; + + if (arginp[11]<<8|param->inp[12]; + + if (ctx->encrypt) + { + if ((param->inp[9]<<8|param->inp[10]) < TLS1_1_VERSION) + return -1; + + if (inp_len<2048) return -1; /* too short */ + + if (inp_len>=6144) n4x=2; + + key->md = key->head; + SHA256_Update(&key->md,param->inp,13); + + x4 = 4*n4x; n4x += 1; + + frag = inp_len>>n4x; + last = inp_len+frag-(frag<frag && ((last+13+9)%64<(x4-1))) { + frag++; + last -= x4-1; + } + + packlen = 5+16+((frag+32+16)&-16); + packlen = (packlen<<(1+n4x))-packlen; + packlen += 5+16+((last+32+16)&-16); + + param->interleave = x4; + + return (int)packlen; + } + else + return -1; /* not yet */ + } + case EVP_CTRL_TLS11_MULTI_BLOCK_ENCRYPT: + { + EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *param = + (EVP_CTRL_TLS11_MULTI_BLOCK_PARAM *)ptr; + + return tls11_multi_block_encrypt(key,param->out,param->inp, + param->len,param->interleave/4); + } + case EVP_CTRL_TLS11_MULTI_BLOCK_DECRYPT: +#endif default: return -1; } @@ -544,7 +780,8 @@ static EVP_CIPHER aesni_128_cbc_hmac_sha256_cipher = NID_undef, #endif 16,16,16, - EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1|EVP_CIPH_FLAG_AEAD_CIPHER, + EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1| + EVP_CIPH_FLAG_AEAD_CIPHER|EVP_CIPH_FLAG_TLS11_MULTI_BLOCK, aesni_cbc_hmac_sha256_init_key, aesni_cbc_hmac_sha256_cipher, NULL, @@ -563,7 +800,8 @@ static EVP_CIPHER aesni_256_cbc_hmac_sha256_cipher = NID_undef, #endif 16,32,16, - EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1|EVP_CIPH_FLAG_AEAD_CIPHER, + EVP_CIPH_CBC_MODE|EVP_CIPH_FLAG_DEFAULT_ASN1| + EVP_CIPH_FLAG_AEAD_CIPHER|EVP_CIPH_FLAG_TLS11_MULTI_BLOCK, aesni_cbc_hmac_sha256_init_key, aesni_cbc_hmac_sha256_cipher, NULL,