1 /* ====================================================================
2 * Copyright (c) 2011-2013 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * licensing@OpenSSL.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
50 #include <openssl/opensslconf.h>
55 #if !defined(OPENSSL_NO_AES)
57 # include <openssl/evp.h>
58 # include <openssl/objects.h>
59 # include <openssl/aes.h>
60 # include <openssl/sha.h>
61 # include <openssl/rand.h>
62 # include "modes_lcl.h"
64 # ifndef EVP_CIPH_FLAG_AEAD_CIPHER
65 # define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000
66 # define EVP_CTRL_AEAD_TLS1_AAD 0x16
67 # define EVP_CTRL_AEAD_SET_MAC_KEY 0x17
70 # if !defined(EVP_CIPH_FLAG_DEFAULT_ASN1)
71 # define EVP_CIPH_FLAG_DEFAULT_ASN1 0
74 # if !defined(EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK)
75 # define EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 0
78 # define TLS1_1_VERSION 0x0302
82 SHA_CTX head, tail, md;
83 size_t payload_length; /* AAD length in decrypt case */
86 unsigned char tls_aad[16]; /* 13 used */
90 # define NO_PAYLOAD_LENGTH ((size_t)-1)
92 # if defined(AES_ASM) && ( \
93 defined(__x86_64) || defined(__x86_64__) || \
94 defined(_M_AMD64) || defined(_M_X64) || \
97 extern unsigned int OPENSSL_ia32cap_P[3];
98 # define AESNI_CAPABLE (1<<(57-32))
100 int aesni_set_encrypt_key(const unsigned char *userKey, int bits,
102 int aesni_set_decrypt_key(const unsigned char *userKey, int bits,
105 void aesni_cbc_encrypt(const unsigned char *in,
108 const AES_KEY *key, unsigned char *ivec, int enc);
110 void aesni_cbc_sha1_enc(const void *inp, void *out, size_t blocks,
111 const AES_KEY *key, unsigned char iv[16],
112 SHA_CTX *ctx, const void *in0);
114 void aesni256_cbc_sha1_dec(const void *inp, void *out, size_t blocks,
115 const AES_KEY *key, unsigned char iv[16],
116 SHA_CTX *ctx, const void *in0);
118 # define data(ctx) ((EVP_AES_HMAC_SHA1 *)(ctx)->cipher_data)
120 static int aesni_cbc_hmac_sha1_init_key(EVP_CIPHER_CTX *ctx,
121 const unsigned char *inkey,
122 const unsigned char *iv, int enc)
124 EVP_AES_HMAC_SHA1 *key = data(ctx);
128 ret = aesni_set_encrypt_key(inkey, ctx->key_len * 8, &key->ks);
130 ret = aesni_set_decrypt_key(inkey, ctx->key_len * 8, &key->ks);
132 SHA1_Init(&key->head); /* handy when benchmarking */
133 key->tail = key->head;
136 key->payload_length = NO_PAYLOAD_LENGTH;
138 return ret < 0 ? 0 : 1;
141 # define STITCHED_CALL
142 # undef STITCHED_DECRYPT_CALL
144 # if !defined(STITCHED_CALL)
148 void sha1_block_data_order(void *c, const void *p, size_t len);
150 static void sha1_update(SHA_CTX *c, const void *data, size_t len)
152 const unsigned char *ptr = data;
155 if ((res = c->num)) {
156 res = SHA_CBLOCK - res;
159 SHA1_Update(c, ptr, res);
164 res = len % SHA_CBLOCK;
168 sha1_block_data_order(c, ptr, len / SHA_CBLOCK);
173 if (c->Nl < (unsigned int)len)
178 SHA1_Update(c, ptr, res);
184 # define SHA1_Update sha1_update
186 # if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
189 unsigned int A[8], B[8], C[8], D[8], E[8];
192 const unsigned char *ptr;
196 void sha1_multi_block(SHA1_MB_CTX *, const HASH_DESC *, int);
199 const unsigned char *inp;
205 void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int);
207 static size_t tls1_1_multi_block_encrypt(EVP_AES_HMAC_SHA1 *key,
209 const unsigned char *inp,
210 size_t inp_len, int n4x)
211 { /* n4x is 1 or 2 */
212 HASH_DESC hash_d[8], edges[8];
214 unsigned char storage[sizeof(SHA1_MB_CTX) + 32];
221 unsigned int frag, last, packlen, i, x4 = 4 * n4x, minblocks, processed =
229 /* ask for IVs in bulk */
230 if (RAND_bytes((IVs = blocks[0].c), 16 * x4) <= 0)
233 ctx = (SHA1_MB_CTX *) (storage + 32 - ((size_t)storage % 32)); /* align */
235 frag = (unsigned int)inp_len >> (1 + n4x);
236 last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
237 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
242 packlen = 5 + 16 + ((frag + 20 + 16) & -16);
244 /* populate descriptors with pointers and IVs */
247 /* 5+16 is place for header and explicit IV */
248 ciph_d[0].out = out + 5 + 16;
249 memcpy(ciph_d[0].out - 16, IVs, 16);
250 memcpy(ciph_d[0].iv, IVs, 16);
253 for (i = 1; i < x4; i++) {
254 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
255 ciph_d[i].out = ciph_d[i - 1].out + packlen;
256 memcpy(ciph_d[i].out - 16, IVs, 16);
257 memcpy(ciph_d[i].iv, IVs, 16);
262 memcpy(blocks[0].c, key->md.data, 8);
263 seqnum = BSWAP8(blocks[0].q[0]);
265 for (i = 0; i < x4; i++) {
266 unsigned int len = (i == (x4 - 1) ? last : frag);
267 # if !defined(BSWAP8)
268 unsigned int carry, j;
271 ctx->A[i] = key->md.h0;
272 ctx->B[i] = key->md.h1;
273 ctx->C[i] = key->md.h2;
274 ctx->D[i] = key->md.h3;
275 ctx->E[i] = key->md.h4;
279 blocks[i].q[0] = BSWAP8(seqnum + i);
281 for (carry = i, j = 8; j--;) {
282 blocks[i].c[j] = ((u8 *)key->md.data)[j] + carry;
283 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
286 blocks[i].c[8] = ((u8 *)key->md.data)[8];
287 blocks[i].c[9] = ((u8 *)key->md.data)[9];
288 blocks[i].c[10] = ((u8 *)key->md.data)[10];
290 blocks[i].c[11] = (u8)(len >> 8);
291 blocks[i].c[12] = (u8)(len);
293 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
294 hash_d[i].ptr += 64 - 13;
295 hash_d[i].blocks = (len - (64 - 13)) / 64;
297 edges[i].ptr = blocks[i].c;
301 /* hash 13-byte headers and first 64-13 bytes of inputs */
302 sha1_multi_block(ctx, edges, n4x);
303 /* hash bulk inputs */
304 # define MAXCHUNKSIZE 2048
306 # error "MAXCHUNKSIZE is not divisible by 64"
309 * goal is to minimize pressure on L1 cache by moving in shorter steps,
310 * so that hashed data is still in the cache by the time we encrypt it
312 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
313 if (minblocks > MAXCHUNKSIZE / 64) {
314 for (i = 0; i < x4; i++) {
315 edges[i].ptr = hash_d[i].ptr;
316 edges[i].blocks = MAXCHUNKSIZE / 64;
317 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
320 sha1_multi_block(ctx, edges, n4x);
321 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
323 for (i = 0; i < x4; i++) {
324 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
325 hash_d[i].blocks -= MAXCHUNKSIZE / 64;
326 edges[i].blocks = MAXCHUNKSIZE / 64;
327 ciph_d[i].inp += MAXCHUNKSIZE;
328 ciph_d[i].out += MAXCHUNKSIZE;
329 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
330 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
332 processed += MAXCHUNKSIZE;
333 minblocks -= MAXCHUNKSIZE / 64;
334 } while (minblocks > MAXCHUNKSIZE / 64);
338 sha1_multi_block(ctx, hash_d, n4x);
340 memset(blocks, 0, sizeof(blocks));
341 for (i = 0; i < x4; i++) {
342 unsigned int len = (i == (x4 - 1) ? last : frag),
343 off = hash_d[i].blocks * 64;
344 const unsigned char *ptr = hash_d[i].ptr + off;
346 off = (len - processed) - (64 - 13) - off; /* remainder actually */
347 memcpy(blocks[i].c, ptr, off);
348 blocks[i].c[off] = 0x80;
349 len += 64 + 13; /* 64 is HMAC header */
350 len *= 8; /* convert to bits */
351 if (off < (64 - 8)) {
353 blocks[i].d[15] = BSWAP4(len);
355 PUTU32(blocks[i].c + 60, len);
360 blocks[i].d[31] = BSWAP4(len);
362 PUTU32(blocks[i].c + 124, len);
366 edges[i].ptr = blocks[i].c;
369 /* hash input tails and finalize */
370 sha1_multi_block(ctx, edges, n4x);
372 memset(blocks, 0, sizeof(blocks));
373 for (i = 0; i < x4; i++) {
375 blocks[i].d[0] = BSWAP4(ctx->A[i]);
376 ctx->A[i] = key->tail.h0;
377 blocks[i].d[1] = BSWAP4(ctx->B[i]);
378 ctx->B[i] = key->tail.h1;
379 blocks[i].d[2] = BSWAP4(ctx->C[i]);
380 ctx->C[i] = key->tail.h2;
381 blocks[i].d[3] = BSWAP4(ctx->D[i]);
382 ctx->D[i] = key->tail.h3;
383 blocks[i].d[4] = BSWAP4(ctx->E[i]);
384 ctx->E[i] = key->tail.h4;
385 blocks[i].c[20] = 0x80;
386 blocks[i].d[15] = BSWAP4((64 + 20) * 8);
388 PUTU32(blocks[i].c + 0, ctx->A[i]);
389 ctx->A[i] = key->tail.h0;
390 PUTU32(blocks[i].c + 4, ctx->B[i]);
391 ctx->B[i] = key->tail.h1;
392 PUTU32(blocks[i].c + 8, ctx->C[i]);
393 ctx->C[i] = key->tail.h2;
394 PUTU32(blocks[i].c + 12, ctx->D[i]);
395 ctx->D[i] = key->tail.h3;
396 PUTU32(blocks[i].c + 16, ctx->E[i]);
397 ctx->E[i] = key->tail.h4;
398 blocks[i].c[20] = 0x80;
399 PUTU32(blocks[i].c + 60, (64 + 20) * 8);
401 edges[i].ptr = blocks[i].c;
406 sha1_multi_block(ctx, edges, n4x);
408 for (i = 0; i < x4; i++) {
409 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
410 unsigned char *out0 = out;
412 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
413 ciph_d[i].inp = ciph_d[i].out;
418 PUTU32(out + 0, ctx->A[i]);
419 PUTU32(out + 4, ctx->B[i]);
420 PUTU32(out + 8, ctx->C[i]);
421 PUTU32(out + 12, ctx->D[i]);
422 PUTU32(out + 16, ctx->E[i]);
428 for (j = 0; j <= pad; j++)
432 ciph_d[i].blocks = (len - processed) / 16;
433 len += 16; /* account for explicit iv */
436 out0[0] = ((u8 *)key->md.data)[8];
437 out0[1] = ((u8 *)key->md.data)[9];
438 out0[2] = ((u8 *)key->md.data)[10];
439 out0[3] = (u8)(len >> 8);
446 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
448 OPENSSL_cleanse(blocks, sizeof(blocks));
449 OPENSSL_cleanse(ctx, sizeof(*ctx));
455 static int aesni_cbc_hmac_sha1_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
456 const unsigned char *in, size_t len)
458 EVP_AES_HMAC_SHA1 *key = data(ctx);
460 size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and
463 # if defined(STITCHED_CALL)
464 size_t aes_off = 0, blocks;
466 sha_off = SHA_CBLOCK - key->md.num;
469 key->payload_length = NO_PAYLOAD_LENGTH;
471 if (len % AES_BLOCK_SIZE)
475 if (plen == NO_PAYLOAD_LENGTH)
478 ((plen + SHA_DIGEST_LENGTH +
479 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
481 else if (key->aux.tls_ver >= TLS1_1_VERSION)
484 # if defined(STITCHED_CALL)
485 if (plen > (sha_off + iv)
486 && (blocks = (plen - (sha_off + iv)) / SHA_CBLOCK)) {
487 SHA1_Update(&key->md, in + iv, sha_off);
489 aesni_cbc_sha1_enc(in, out, blocks, &key->ks,
490 ctx->iv, &key->md, in + iv + sha_off);
491 blocks *= SHA_CBLOCK;
494 key->md.Nh += blocks >> 29;
495 key->md.Nl += blocks <<= 3;
496 if (key->md.Nl < (unsigned int)blocks)
503 SHA1_Update(&key->md, in + sha_off, plen - sha_off);
505 if (plen != len) { /* "TLS" mode of operation */
507 memcpy(out + aes_off, in + aes_off, plen - aes_off);
509 /* calculate HMAC and append it to payload */
510 SHA1_Final(out + plen, &key->md);
512 SHA1_Update(&key->md, out + plen, SHA_DIGEST_LENGTH);
513 SHA1_Final(out + plen, &key->md);
515 /* pad the payload|hmac */
516 plen += SHA_DIGEST_LENGTH;
517 for (l = len - plen - 1; plen < len; plen++)
519 /* encrypt HMAC|padding at once */
520 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
521 &key->ks, ctx->iv, 1);
523 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
524 &key->ks, ctx->iv, 1);
528 unsigned int u[SHA_DIGEST_LENGTH / sizeof(unsigned int)];
529 unsigned char c[32 + SHA_DIGEST_LENGTH];
532 /* arrange cache line alignment */
533 pmac = (void *)(((size_t)mac.c + 31) & ((size_t)0 - 32));
535 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
536 size_t inp_len, mask, j, i;
537 unsigned int res, maxpad, pad, bitlen;
540 unsigned int u[SHA_LBLOCK];
541 unsigned char c[SHA_CBLOCK];
542 } *data = (void *)key->md.data;
543 # if defined(STITCHED_DECRYPT_CALL)
544 unsigned char tail_iv[AES_BLOCK_SIZE];
548 if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3])
550 if (len < (AES_BLOCK_SIZE + SHA_DIGEST_LENGTH + 1))
553 /* omit explicit iv */
554 memcpy(ctx->iv, in, AES_BLOCK_SIZE);
555 in += AES_BLOCK_SIZE;
556 out += AES_BLOCK_SIZE;
557 len -= AES_BLOCK_SIZE;
558 } else if (len < (SHA_DIGEST_LENGTH + 1))
561 # if defined(STITCHED_DECRYPT_CALL)
562 if (len >= 1024 && ctx->key_len == 32) {
563 /* decrypt last block */
564 memcpy(tail_iv, in + len - 2 * AES_BLOCK_SIZE,
566 aesni_cbc_encrypt(in + len - AES_BLOCK_SIZE,
567 out + len - AES_BLOCK_SIZE, AES_BLOCK_SIZE,
568 &key->ks, tail_iv, 0);
572 /* decrypt HMAC|padding at once */
573 aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
575 /* figure out payload length */
577 maxpad = len - (SHA_DIGEST_LENGTH + 1);
578 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
581 inp_len = len - (SHA_DIGEST_LENGTH + pad + 1);
582 mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1)));
586 key->aux.tls_aad[plen - 2] = inp_len >> 8;
587 key->aux.tls_aad[plen - 1] = inp_len;
591 SHA1_Update(&key->md, key->aux.tls_aad, plen);
593 # if defined(STITCHED_DECRYPT_CALL)
595 blocks = (len - (256 + 32 + SHA_CBLOCK)) / SHA_CBLOCK;
596 aes_off = len - AES_BLOCK_SIZE - blocks * SHA_CBLOCK;
597 sha_off = SHA_CBLOCK - plen;
599 aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0);
601 SHA1_Update(&key->md, out, sha_off);
602 aesni256_cbc_sha1_dec(in + aes_off,
603 out + aes_off, blocks, &key->ks,
604 ctx->iv, &key->md, out + sha_off);
606 sha_off += blocks *= SHA_CBLOCK;
611 key->md.Nl += (blocks << 3); /* at most 18 bits */
612 memcpy(ctx->iv, tail_iv, AES_BLOCK_SIZE);
617 len -= SHA_DIGEST_LENGTH; /* amend mac */
618 if (len >= (256 + SHA_CBLOCK)) {
619 j = (len - (256 + SHA_CBLOCK)) & (0 - SHA_CBLOCK);
620 j += SHA_CBLOCK - key->md.num;
621 SHA1_Update(&key->md, out, j);
627 /* but pretend as if we hashed padded payload */
628 bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */
630 bitlen = BSWAP4(bitlen);
633 mac.c[1] = (unsigned char)(bitlen >> 16);
634 mac.c[2] = (unsigned char)(bitlen >> 8);
635 mac.c[3] = (unsigned char)bitlen;
645 for (res = key->md.num, j = 0; j < len; j++) {
647 mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
649 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
650 data->c[res++] = (unsigned char)c;
652 if (res != SHA_CBLOCK)
655 /* j is not incremented yet */
656 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
657 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
658 sha1_block_data_order(&key->md, data, 1);
659 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
660 pmac->u[0] |= key->md.h0 & mask;
661 pmac->u[1] |= key->md.h1 & mask;
662 pmac->u[2] |= key->md.h2 & mask;
663 pmac->u[3] |= key->md.h3 & mask;
664 pmac->u[4] |= key->md.h4 & mask;
668 for (i = res; i < SHA_CBLOCK; i++, j++)
671 if (res > SHA_CBLOCK - 8) {
672 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
673 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
674 sha1_block_data_order(&key->md, data, 1);
675 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
676 pmac->u[0] |= key->md.h0 & mask;
677 pmac->u[1] |= key->md.h1 & mask;
678 pmac->u[2] |= key->md.h2 & mask;
679 pmac->u[3] |= key->md.h3 & mask;
680 pmac->u[4] |= key->md.h4 & mask;
682 memset(data, 0, SHA_CBLOCK);
685 data->u[SHA_LBLOCK - 1] = bitlen;
686 sha1_block_data_order(&key->md, data, 1);
687 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
688 pmac->u[0] |= key->md.h0 & mask;
689 pmac->u[1] |= key->md.h1 & mask;
690 pmac->u[2] |= key->md.h2 & mask;
691 pmac->u[3] |= key->md.h3 & mask;
692 pmac->u[4] |= key->md.h4 & mask;
695 pmac->u[0] = BSWAP4(pmac->u[0]);
696 pmac->u[1] = BSWAP4(pmac->u[1]);
697 pmac->u[2] = BSWAP4(pmac->u[2]);
698 pmac->u[3] = BSWAP4(pmac->u[3]);
699 pmac->u[4] = BSWAP4(pmac->u[4]);
701 for (i = 0; i < 5; i++) {
703 pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
704 pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
705 pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
706 pmac->c[4 * i + 3] = (unsigned char)res;
709 len += SHA_DIGEST_LENGTH;
711 SHA1_Update(&key->md, out, inp_len);
713 SHA1_Final(pmac->c, &key->md);
716 unsigned int inp_blocks, pad_blocks;
718 /* but pretend as if we hashed padded payload */
720 1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
721 res += (unsigned int)(len - inp_len);
722 pad_blocks = res / SHA_CBLOCK;
725 1 + ((SHA_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
726 for (; inp_blocks < pad_blocks; inp_blocks++)
727 sha1_block_data_order(&key->md, data, 1);
731 SHA1_Update(&key->md, pmac->c, SHA_DIGEST_LENGTH);
732 SHA1_Final(pmac->c, &key->md);
739 unsigned char *p = out + len - 1 - maxpad - SHA_DIGEST_LENGTH;
740 size_t off = out - p;
741 unsigned int c, cmask;
743 maxpad += SHA_DIGEST_LENGTH;
744 for (res = 0, i = 0, j = 0; j < maxpad; j++) {
747 ((int)(j - off - SHA_DIGEST_LENGTH)) >> (sizeof(int) *
749 res |= (c ^ pad) & ~cmask; /* ... and padding */
750 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
751 res |= (c ^ pmac->c[i]) & cmask;
754 maxpad -= SHA_DIGEST_LENGTH;
756 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
760 for (res = 0, i = 0; i < SHA_DIGEST_LENGTH; i++)
761 res |= out[i] ^ pmac->c[i];
762 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
766 pad = (pad & ~res) | (maxpad & res);
767 out = out + len - 1 - pad;
768 for (res = 0, i = 0; i < pad; i++)
771 res = (0 - res) >> (sizeof(res) * 8 - 1);
776 # if defined(STITCHED_DECRYPT_CALL)
777 if (len >= 1024 && ctx->key_len == 32) {
778 if (sha_off %= SHA_CBLOCK)
779 blocks = (len - 3 * SHA_CBLOCK) / SHA_CBLOCK;
781 blocks = (len - 2 * SHA_CBLOCK) / SHA_CBLOCK;
782 aes_off = len - blocks * SHA_CBLOCK;
784 aesni_cbc_encrypt(in, out, aes_off, &key->ks, ctx->iv, 0);
785 SHA1_Update(&key->md, out, sha_off);
786 aesni256_cbc_sha1_dec(in + aes_off,
787 out + aes_off, blocks, &key->ks,
788 ctx->iv, &key->md, out + sha_off);
790 sha_off += blocks *= SHA_CBLOCK;
794 key->md.Nh += blocks >> 29;
795 key->md.Nl += blocks <<= 3;
796 if (key->md.Nl < (unsigned int)blocks)
800 /* decrypt HMAC|padding at once */
801 aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
803 SHA1_Update(&key->md, out, len);
810 static int aesni_cbc_hmac_sha1_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
813 EVP_AES_HMAC_SHA1 *key = data(ctx);
816 case EVP_CTRL_AEAD_SET_MAC_KEY:
819 unsigned char hmac_key[64];
821 memset(hmac_key, 0, sizeof(hmac_key));
823 if (arg > (int)sizeof(hmac_key)) {
824 SHA1_Init(&key->head);
825 SHA1_Update(&key->head, ptr, arg);
826 SHA1_Final(hmac_key, &key->head);
828 memcpy(hmac_key, ptr, arg);
831 for (i = 0; i < sizeof(hmac_key); i++)
832 hmac_key[i] ^= 0x36; /* ipad */
833 SHA1_Init(&key->head);
834 SHA1_Update(&key->head, hmac_key, sizeof(hmac_key));
836 for (i = 0; i < sizeof(hmac_key); i++)
837 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
838 SHA1_Init(&key->tail);
839 SHA1_Update(&key->tail, hmac_key, sizeof(hmac_key));
841 OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
845 case EVP_CTRL_AEAD_TLS1_AAD:
847 unsigned char *p = ptr;
850 if (arg != EVP_AEAD_TLS1_AAD_LEN)
853 len = p[arg - 2] << 8 | p[arg - 1];
856 key->payload_length = len;
857 if ((key->aux.tls_ver =
858 p[arg - 4] << 8 | p[arg - 3]) >= TLS1_1_VERSION) {
859 len -= AES_BLOCK_SIZE;
860 p[arg - 2] = len >> 8;
864 SHA1_Update(&key->md, p, arg);
866 return (int)(((len + SHA_DIGEST_LENGTH +
867 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)
870 memcpy(key->aux.tls_aad, ptr, arg);
871 key->payload_length = arg;
873 return SHA_DIGEST_LENGTH;
876 # if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
877 case EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE:
878 return (int)(5 + 16 + ((arg + 20 + 16) & -16));
879 case EVP_CTRL_TLS1_1_MULTIBLOCK_AAD:
881 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param =
882 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr;
883 unsigned int n4x = 1, x4;
884 unsigned int frag, last, packlen, inp_len;
886 if (arg < (int)sizeof(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM))
889 inp_len = param->inp[11] << 8 | param->inp[12];
892 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION)
897 return 0; /* too short */
899 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5))
901 } else if ((n4x = param->interleave / 4) && n4x <= 2)
902 inp_len = param->len;
907 SHA1_Update(&key->md, param->inp, 13);
912 frag = inp_len >> n4x;
913 last = inp_len + frag - (frag << n4x);
914 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) {
919 packlen = 5 + 16 + ((frag + 20 + 16) & -16);
920 packlen = (packlen << n4x) - packlen;
921 packlen += 5 + 16 + ((last + 20 + 16) & -16);
923 param->interleave = x4;
927 return -1; /* not yet */
929 case EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT:
931 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param =
932 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr;
934 return (int)tls1_1_multi_block_encrypt(key, param->out,
935 param->inp, param->len,
936 param->interleave / 4);
938 case EVP_CTRL_TLS1_1_MULTIBLOCK_DECRYPT:
945 static EVP_CIPHER aesni_128_cbc_hmac_sha1_cipher = {
946 # ifdef NID_aes_128_cbc_hmac_sha1
947 NID_aes_128_cbc_hmac_sha1,
952 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 |
953 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK,
954 aesni_cbc_hmac_sha1_init_key,
955 aesni_cbc_hmac_sha1_cipher,
957 sizeof(EVP_AES_HMAC_SHA1),
958 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv,
959 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv,
960 aesni_cbc_hmac_sha1_ctrl,
964 static EVP_CIPHER aesni_256_cbc_hmac_sha1_cipher = {
965 # ifdef NID_aes_256_cbc_hmac_sha1
966 NID_aes_256_cbc_hmac_sha1,
971 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 |
972 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK,
973 aesni_cbc_hmac_sha1_init_key,
974 aesni_cbc_hmac_sha1_cipher,
976 sizeof(EVP_AES_HMAC_SHA1),
977 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv,
978 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv,
979 aesni_cbc_hmac_sha1_ctrl,
983 const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void)
985 return (OPENSSL_ia32cap_P[1] & AESNI_CAPABLE ?
986 &aesni_128_cbc_hmac_sha1_cipher : NULL);
989 const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void)
991 return (OPENSSL_ia32cap_P[1] & AESNI_CAPABLE ?
992 &aesni_256_cbc_hmac_sha1_cipher : NULL);
995 const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha1(void)
1000 const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha1(void)