1 /* ====================================================================
2 * Copyright (c) 2011-2013 The OpenSSL Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * 3. All advertising materials mentioning features or use of this
17 * software must display the following acknowledgment:
18 * "This product includes software developed by the OpenSSL Project
19 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
21 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22 * endorse or promote products derived from this software without
23 * prior written permission. For written permission, please contact
24 * licensing@OpenSSL.org.
26 * 5. Products derived from this software may not be called "OpenSSL"
27 * nor may "OpenSSL" appear in their names without prior written
28 * permission of the OpenSSL Project.
30 * 6. Redistributions of any form whatsoever must retain the following
32 * "This product includes software developed by the OpenSSL Project
33 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
35 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
39 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46 * OF THE POSSIBILITY OF SUCH DAMAGE.
47 * ====================================================================
50 #include <openssl/opensslconf.h>
55 #if !defined(OPENSSL_NO_AES)
57 # include <openssl/evp.h>
58 # include <openssl/objects.h>
59 # include <openssl/aes.h>
60 # include <openssl/sha.h>
61 # include <openssl/rand.h>
62 # include "modes_lcl.h"
64 # ifndef EVP_CIPH_FLAG_AEAD_CIPHER
65 # define EVP_CIPH_FLAG_AEAD_CIPHER 0x200000
66 # define EVP_CTRL_AEAD_TLS1_AAD 0x16
67 # define EVP_CTRL_AEAD_SET_MAC_KEY 0x17
70 # if !defined(EVP_CIPH_FLAG_DEFAULT_ASN1)
71 # define EVP_CIPH_FLAG_DEFAULT_ASN1 0
74 # if !defined(EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK)
75 # define EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK 0
78 # define TLS1_1_VERSION 0x0302
82 SHA256_CTX head, tail, md;
83 size_t payload_length; /* AAD length in decrypt case */
86 unsigned char tls_aad[16]; /* 13 used */
88 } EVP_AES_HMAC_SHA256;
90 # define NO_PAYLOAD_LENGTH ((size_t)-1)
92 # if defined(AES_ASM) && ( \
93 defined(__x86_64) || defined(__x86_64__) || \
94 defined(_M_AMD64) || defined(_M_X64) || \
97 extern unsigned int OPENSSL_ia32cap_P[];
98 # define AESNI_CAPABLE (1<<(57-32))
100 int aesni_set_encrypt_key(const unsigned char *userKey, int bits,
102 int aesni_set_decrypt_key(const unsigned char *userKey, int bits,
105 void aesni_cbc_encrypt(const unsigned char *in,
108 const AES_KEY *key, unsigned char *ivec, int enc);
110 int aesni_cbc_sha256_enc(const void *inp, void *out, size_t blocks,
111 const AES_KEY *key, unsigned char iv[16],
112 SHA256_CTX *ctx, const void *in0);
114 # define data(ctx) ((EVP_AES_HMAC_SHA256 *)(ctx)->cipher_data)
116 static int aesni_cbc_hmac_sha256_init_key(EVP_CIPHER_CTX *ctx,
117 const unsigned char *inkey,
118 const unsigned char *iv, int enc)
120 EVP_AES_HMAC_SHA256 *key = data(ctx);
124 memset(&key->ks, 0, sizeof(key->ks.rd_key)),
125 ret = aesni_set_encrypt_key(inkey, ctx->key_len * 8, &key->ks);
127 ret = aesni_set_decrypt_key(inkey, ctx->key_len * 8, &key->ks);
129 SHA256_Init(&key->head); /* handy when benchmarking */
130 key->tail = key->head;
133 key->payload_length = NO_PAYLOAD_LENGTH;
135 return ret < 0 ? 0 : 1;
138 # define STITCHED_CALL
140 # if !defined(STITCHED_CALL)
144 void sha256_block_data_order(void *c, const void *p, size_t len);
146 static void sha256_update(SHA256_CTX *c, const void *data, size_t len)
148 const unsigned char *ptr = data;
151 if ((res = c->num)) {
152 res = SHA256_CBLOCK - res;
155 SHA256_Update(c, ptr, res);
160 res = len % SHA256_CBLOCK;
164 sha256_block_data_order(c, ptr, len / SHA256_CBLOCK);
169 if (c->Nl < (unsigned int)len)
174 SHA256_Update(c, ptr, res);
177 # ifdef SHA256_Update
178 # undef SHA256_Update
180 # define SHA256_Update sha256_update
182 # if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
185 unsigned int A[8], B[8], C[8], D[8], E[8], F[8], G[8], H[8];
188 const unsigned char *ptr;
192 void sha256_multi_block(SHA256_MB_CTX *, const HASH_DESC *, int);
195 const unsigned char *inp;
201 void aesni_multi_cbc_encrypt(CIPH_DESC *, void *, int);
203 static size_t tls1_1_multi_block_encrypt(EVP_AES_HMAC_SHA256 *key,
205 const unsigned char *inp,
206 size_t inp_len, int n4x)
207 { /* n4x is 1 or 2 */
208 HASH_DESC hash_d[8], edges[8];
210 unsigned char storage[sizeof(SHA256_MB_CTX) + 32];
217 unsigned int frag, last, packlen, i, x4 = 4 * n4x, minblocks, processed =
225 /* ask for IVs in bulk */
226 if (RAND_bytes((IVs = blocks[0].c), 16 * x4) <= 0)
230 ctx = (SHA256_MB_CTX *) (storage + 32 - ((size_t)storage % 32));
232 frag = (unsigned int)inp_len >> (1 + n4x);
233 last = (unsigned int)inp_len + frag - (frag << (1 + n4x));
234 if (last > frag && ((last + 13 + 9) % 64) < (x4 - 1)) {
239 packlen = 5 + 16 + ((frag + 32 + 16) & -16);
241 /* populate descriptors with pointers and IVs */
244 /* 5+16 is place for header and explicit IV */
245 ciph_d[0].out = out + 5 + 16;
246 memcpy(ciph_d[0].out - 16, IVs, 16);
247 memcpy(ciph_d[0].iv, IVs, 16);
250 for (i = 1; i < x4; i++) {
251 ciph_d[i].inp = hash_d[i].ptr = hash_d[i - 1].ptr + frag;
252 ciph_d[i].out = ciph_d[i - 1].out + packlen;
253 memcpy(ciph_d[i].out - 16, IVs, 16);
254 memcpy(ciph_d[i].iv, IVs, 16);
259 memcpy(blocks[0].c, key->md.data, 8);
260 seqnum = BSWAP8(blocks[0].q[0]);
262 for (i = 0; i < x4; i++) {
263 unsigned int len = (i == (x4 - 1) ? last : frag);
264 # if !defined(BSWAP8)
265 unsigned int carry, j;
268 ctx->A[i] = key->md.h[0];
269 ctx->B[i] = key->md.h[1];
270 ctx->C[i] = key->md.h[2];
271 ctx->D[i] = key->md.h[3];
272 ctx->E[i] = key->md.h[4];
273 ctx->F[i] = key->md.h[5];
274 ctx->G[i] = key->md.h[6];
275 ctx->H[i] = key->md.h[7];
279 blocks[i].q[0] = BSWAP8(seqnum + i);
281 for (carry = i, j = 8; j--;) {
282 blocks[i].c[j] = ((u8 *)key->md.data)[j] + carry;
283 carry = (blocks[i].c[j] - carry) >> (sizeof(carry) * 8 - 1);
286 blocks[i].c[8] = ((u8 *)key->md.data)[8];
287 blocks[i].c[9] = ((u8 *)key->md.data)[9];
288 blocks[i].c[10] = ((u8 *)key->md.data)[10];
290 blocks[i].c[11] = (u8)(len >> 8);
291 blocks[i].c[12] = (u8)(len);
293 memcpy(blocks[i].c + 13, hash_d[i].ptr, 64 - 13);
294 hash_d[i].ptr += 64 - 13;
295 hash_d[i].blocks = (len - (64 - 13)) / 64;
297 edges[i].ptr = blocks[i].c;
301 /* hash 13-byte headers and first 64-13 bytes of inputs */
302 sha256_multi_block(ctx, edges, n4x);
303 /* hash bulk inputs */
304 # define MAXCHUNKSIZE 2048
306 # error "MAXCHUNKSIZE is not divisible by 64"
309 * goal is to minimize pressure on L1 cache by moving in shorter steps,
310 * so that hashed data is still in the cache by the time we encrypt it
312 minblocks = ((frag <= last ? frag : last) - (64 - 13)) / 64;
313 if (minblocks > MAXCHUNKSIZE / 64) {
314 for (i = 0; i < x4; i++) {
315 edges[i].ptr = hash_d[i].ptr;
316 edges[i].blocks = MAXCHUNKSIZE / 64;
317 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
320 sha256_multi_block(ctx, edges, n4x);
321 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
323 for (i = 0; i < x4; i++) {
324 edges[i].ptr = hash_d[i].ptr += MAXCHUNKSIZE;
325 hash_d[i].blocks -= MAXCHUNKSIZE / 64;
326 edges[i].blocks = MAXCHUNKSIZE / 64;
327 ciph_d[i].inp += MAXCHUNKSIZE;
328 ciph_d[i].out += MAXCHUNKSIZE;
329 ciph_d[i].blocks = MAXCHUNKSIZE / 16;
330 memcpy(ciph_d[i].iv, ciph_d[i].out - 16, 16);
332 processed += MAXCHUNKSIZE;
333 minblocks -= MAXCHUNKSIZE / 64;
334 } while (minblocks > MAXCHUNKSIZE / 64);
338 sha256_multi_block(ctx, hash_d, n4x);
340 memset(blocks, 0, sizeof(blocks));
341 for (i = 0; i < x4; i++) {
342 unsigned int len = (i == (x4 - 1) ? last : frag),
343 off = hash_d[i].blocks * 64;
344 const unsigned char *ptr = hash_d[i].ptr + off;
346 off = (len - processed) - (64 - 13) - off; /* remainder actually */
347 memcpy(blocks[i].c, ptr, off);
348 blocks[i].c[off] = 0x80;
349 len += 64 + 13; /* 64 is HMAC header */
350 len *= 8; /* convert to bits */
351 if (off < (64 - 8)) {
353 blocks[i].d[15] = BSWAP4(len);
355 PUTU32(blocks[i].c + 60, len);
360 blocks[i].d[31] = BSWAP4(len);
362 PUTU32(blocks[i].c + 124, len);
366 edges[i].ptr = blocks[i].c;
369 /* hash input tails and finalize */
370 sha256_multi_block(ctx, edges, n4x);
372 memset(blocks, 0, sizeof(blocks));
373 for (i = 0; i < x4; i++) {
375 blocks[i].d[0] = BSWAP4(ctx->A[i]);
376 ctx->A[i] = key->tail.h[0];
377 blocks[i].d[1] = BSWAP4(ctx->B[i]);
378 ctx->B[i] = key->tail.h[1];
379 blocks[i].d[2] = BSWAP4(ctx->C[i]);
380 ctx->C[i] = key->tail.h[2];
381 blocks[i].d[3] = BSWAP4(ctx->D[i]);
382 ctx->D[i] = key->tail.h[3];
383 blocks[i].d[4] = BSWAP4(ctx->E[i]);
384 ctx->E[i] = key->tail.h[4];
385 blocks[i].d[5] = BSWAP4(ctx->F[i]);
386 ctx->F[i] = key->tail.h[5];
387 blocks[i].d[6] = BSWAP4(ctx->G[i]);
388 ctx->G[i] = key->tail.h[6];
389 blocks[i].d[7] = BSWAP4(ctx->H[i]);
390 ctx->H[i] = key->tail.h[7];
391 blocks[i].c[32] = 0x80;
392 blocks[i].d[15] = BSWAP4((64 + 32) * 8);
394 PUTU32(blocks[i].c + 0, ctx->A[i]);
395 ctx->A[i] = key->tail.h[0];
396 PUTU32(blocks[i].c + 4, ctx->B[i]);
397 ctx->B[i] = key->tail.h[1];
398 PUTU32(blocks[i].c + 8, ctx->C[i]);
399 ctx->C[i] = key->tail.h[2];
400 PUTU32(blocks[i].c + 12, ctx->D[i]);
401 ctx->D[i] = key->tail.h[3];
402 PUTU32(blocks[i].c + 16, ctx->E[i]);
403 ctx->E[i] = key->tail.h[4];
404 PUTU32(blocks[i].c + 20, ctx->F[i]);
405 ctx->F[i] = key->tail.h[5];
406 PUTU32(blocks[i].c + 24, ctx->G[i]);
407 ctx->G[i] = key->tail.h[6];
408 PUTU32(blocks[i].c + 28, ctx->H[i]);
409 ctx->H[i] = key->tail.h[7];
410 blocks[i].c[32] = 0x80;
411 PUTU32(blocks[i].c + 60, (64 + 32) * 8);
413 edges[i].ptr = blocks[i].c;
418 sha256_multi_block(ctx, edges, n4x);
420 for (i = 0; i < x4; i++) {
421 unsigned int len = (i == (x4 - 1) ? last : frag), pad, j;
422 unsigned char *out0 = out;
424 memcpy(ciph_d[i].out, ciph_d[i].inp, len - processed);
425 ciph_d[i].inp = ciph_d[i].out;
430 PUTU32(out + 0, ctx->A[i]);
431 PUTU32(out + 4, ctx->B[i]);
432 PUTU32(out + 8, ctx->C[i]);
433 PUTU32(out + 12, ctx->D[i]);
434 PUTU32(out + 16, ctx->E[i]);
435 PUTU32(out + 20, ctx->F[i]);
436 PUTU32(out + 24, ctx->G[i]);
437 PUTU32(out + 28, ctx->H[i]);
443 for (j = 0; j <= pad; j++)
447 ciph_d[i].blocks = (len - processed) / 16;
448 len += 16; /* account for explicit iv */
451 out0[0] = ((u8 *)key->md.data)[8];
452 out0[1] = ((u8 *)key->md.data)[9];
453 out0[2] = ((u8 *)key->md.data)[10];
454 out0[3] = (u8)(len >> 8);
461 aesni_multi_cbc_encrypt(ciph_d, &key->ks, n4x);
463 OPENSSL_cleanse(blocks, sizeof(blocks));
464 OPENSSL_cleanse(ctx, sizeof(*ctx));
470 static int aesni_cbc_hmac_sha256_cipher(EVP_CIPHER_CTX *ctx,
472 const unsigned char *in, size_t len)
474 EVP_AES_HMAC_SHA256 *key = data(ctx);
476 size_t plen = key->payload_length, iv = 0, /* explicit IV in TLS 1.1 and
479 # if defined(STITCHED_CALL)
480 size_t aes_off = 0, blocks;
482 sha_off = SHA256_CBLOCK - key->md.num;
485 key->payload_length = NO_PAYLOAD_LENGTH;
487 if (len % AES_BLOCK_SIZE)
491 if (plen == NO_PAYLOAD_LENGTH)
494 ((plen + SHA256_DIGEST_LENGTH +
495 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE))
497 else if (key->aux.tls_ver >= TLS1_1_VERSION)
500 # if defined(STITCHED_CALL)
502 * Assembly stitch handles AVX-capable processors, but its
503 * performance is not optimal on AMD Jaguar, ~40% worse, for
504 * unknown reasons. Incidentally processor in question supports
505 * AVX, but not AMD-specific XOP extension, which can be used
506 * to identify it and avoid stitch invocation. So that after we
507 * establish that current CPU supports AVX, we even see if it's
508 * either even XOP-capable Bulldozer-based or GenuineIntel one.
510 if (OPENSSL_ia32cap_P[1] & (1 << (60 - 32)) && /* AVX? */
511 ((OPENSSL_ia32cap_P[1] & (1 << (43 - 32))) /* XOP? */
512 | (OPENSSL_ia32cap_P[0] & (1<<30))) && /* "Intel CPU"? */
513 plen > (sha_off + iv) &&
514 (blocks = (plen - (sha_off + iv)) / SHA256_CBLOCK)) {
515 SHA256_Update(&key->md, in + iv, sha_off);
517 (void)aesni_cbc_sha256_enc(in, out, blocks, &key->ks,
518 ctx->iv, &key->md, in + iv + sha_off);
519 blocks *= SHA256_CBLOCK;
522 key->md.Nh += blocks >> 29;
523 key->md.Nl += blocks <<= 3;
524 if (key->md.Nl < (unsigned int)blocks)
531 SHA256_Update(&key->md, in + sha_off, plen - sha_off);
533 if (plen != len) { /* "TLS" mode of operation */
535 memcpy(out + aes_off, in + aes_off, plen - aes_off);
537 /* calculate HMAC and append it to payload */
538 SHA256_Final(out + plen, &key->md);
540 SHA256_Update(&key->md, out + plen, SHA256_DIGEST_LENGTH);
541 SHA256_Final(out + plen, &key->md);
543 /* pad the payload|hmac */
544 plen += SHA256_DIGEST_LENGTH;
545 for (l = len - plen - 1; plen < len; plen++)
547 /* encrypt HMAC|padding at once */
548 aesni_cbc_encrypt(out + aes_off, out + aes_off, len - aes_off,
549 &key->ks, ctx->iv, 1);
551 aesni_cbc_encrypt(in + aes_off, out + aes_off, len - aes_off,
552 &key->ks, ctx->iv, 1);
556 unsigned int u[SHA256_DIGEST_LENGTH / sizeof(unsigned int)];
557 unsigned char c[64 + SHA256_DIGEST_LENGTH];
560 /* arrange cache line alignment */
561 pmac = (void *)(((size_t)mac.c + 63) & ((size_t)0 - 64));
563 /* decrypt HMAC|padding at once */
564 aesni_cbc_encrypt(in, out, len, &key->ks, ctx->iv, 0);
566 if (plen != NO_PAYLOAD_LENGTH) { /* "TLS" mode of operation */
567 size_t inp_len, mask, j, i;
568 unsigned int res, maxpad, pad, bitlen;
571 unsigned int u[SHA_LBLOCK];
572 unsigned char c[SHA256_CBLOCK];
573 } *data = (void *)key->md.data;
575 if ((key->aux.tls_aad[plen - 4] << 8 | key->aux.tls_aad[plen - 3])
579 if (len < (iv + SHA256_DIGEST_LENGTH + 1))
582 /* omit explicit iv */
586 /* figure out payload length */
588 maxpad = len - (SHA256_DIGEST_LENGTH + 1);
589 maxpad |= (255 - maxpad) >> (sizeof(maxpad) * 8 - 8);
592 inp_len = len - (SHA256_DIGEST_LENGTH + pad + 1);
593 mask = (0 - ((inp_len - len) >> (sizeof(inp_len) * 8 - 1)));
597 key->aux.tls_aad[plen - 2] = inp_len >> 8;
598 key->aux.tls_aad[plen - 1] = inp_len;
602 SHA256_Update(&key->md, key->aux.tls_aad, plen);
605 len -= SHA256_DIGEST_LENGTH; /* amend mac */
606 if (len >= (256 + SHA256_CBLOCK)) {
607 j = (len - (256 + SHA256_CBLOCK)) & (0 - SHA256_CBLOCK);
608 j += SHA256_CBLOCK - key->md.num;
609 SHA256_Update(&key->md, out, j);
615 /* but pretend as if we hashed padded payload */
616 bitlen = key->md.Nl + (inp_len << 3); /* at most 18 bits */
618 bitlen = BSWAP4(bitlen);
621 mac.c[1] = (unsigned char)(bitlen >> 16);
622 mac.c[2] = (unsigned char)(bitlen >> 8);
623 mac.c[3] = (unsigned char)bitlen;
636 for (res = key->md.num, j = 0; j < len; j++) {
638 mask = (j - inp_len) >> (sizeof(j) * 8 - 8);
640 c |= 0x80 & ~mask & ~((inp_len - j) >> (sizeof(j) * 8 - 8));
641 data->c[res++] = (unsigned char)c;
643 if (res != SHA256_CBLOCK)
646 /* j is not incremented yet */
647 mask = 0 - ((inp_len + 7 - j) >> (sizeof(j) * 8 - 1));
648 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
649 sha256_block_data_order(&key->md, data, 1);
650 mask &= 0 - ((j - inp_len - 72) >> (sizeof(j) * 8 - 1));
651 pmac->u[0] |= key->md.h[0] & mask;
652 pmac->u[1] |= key->md.h[1] & mask;
653 pmac->u[2] |= key->md.h[2] & mask;
654 pmac->u[3] |= key->md.h[3] & mask;
655 pmac->u[4] |= key->md.h[4] & mask;
656 pmac->u[5] |= key->md.h[5] & mask;
657 pmac->u[6] |= key->md.h[6] & mask;
658 pmac->u[7] |= key->md.h[7] & mask;
662 for (i = res; i < SHA256_CBLOCK; i++, j++)
665 if (res > SHA256_CBLOCK - 8) {
666 mask = 0 - ((inp_len + 8 - j) >> (sizeof(j) * 8 - 1));
667 data->u[SHA_LBLOCK - 1] |= bitlen & mask;
668 sha256_block_data_order(&key->md, data, 1);
669 mask &= 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
670 pmac->u[0] |= key->md.h[0] & mask;
671 pmac->u[1] |= key->md.h[1] & mask;
672 pmac->u[2] |= key->md.h[2] & mask;
673 pmac->u[3] |= key->md.h[3] & mask;
674 pmac->u[4] |= key->md.h[4] & mask;
675 pmac->u[5] |= key->md.h[5] & mask;
676 pmac->u[6] |= key->md.h[6] & mask;
677 pmac->u[7] |= key->md.h[7] & mask;
679 memset(data, 0, SHA256_CBLOCK);
682 data->u[SHA_LBLOCK - 1] = bitlen;
683 sha256_block_data_order(&key->md, data, 1);
684 mask = 0 - ((j - inp_len - 73) >> (sizeof(j) * 8 - 1));
685 pmac->u[0] |= key->md.h[0] & mask;
686 pmac->u[1] |= key->md.h[1] & mask;
687 pmac->u[2] |= key->md.h[2] & mask;
688 pmac->u[3] |= key->md.h[3] & mask;
689 pmac->u[4] |= key->md.h[4] & mask;
690 pmac->u[5] |= key->md.h[5] & mask;
691 pmac->u[6] |= key->md.h[6] & mask;
692 pmac->u[7] |= key->md.h[7] & mask;
695 pmac->u[0] = BSWAP4(pmac->u[0]);
696 pmac->u[1] = BSWAP4(pmac->u[1]);
697 pmac->u[2] = BSWAP4(pmac->u[2]);
698 pmac->u[3] = BSWAP4(pmac->u[3]);
699 pmac->u[4] = BSWAP4(pmac->u[4]);
700 pmac->u[5] = BSWAP4(pmac->u[5]);
701 pmac->u[6] = BSWAP4(pmac->u[6]);
702 pmac->u[7] = BSWAP4(pmac->u[7]);
704 for (i = 0; i < 8; i++) {
706 pmac->c[4 * i + 0] = (unsigned char)(res >> 24);
707 pmac->c[4 * i + 1] = (unsigned char)(res >> 16);
708 pmac->c[4 * i + 2] = (unsigned char)(res >> 8);
709 pmac->c[4 * i + 3] = (unsigned char)res;
712 len += SHA256_DIGEST_LENGTH;
714 SHA256_Update(&key->md, out, inp_len);
716 SHA256_Final(pmac->c, &key->md);
719 unsigned int inp_blocks, pad_blocks;
721 /* but pretend as if we hashed padded payload */
723 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
724 res += (unsigned int)(len - inp_len);
725 pad_blocks = res / SHA256_CBLOCK;
726 res %= SHA256_CBLOCK;
728 1 + ((SHA256_CBLOCK - 9 - res) >> (sizeof(res) * 8 - 1));
729 for (; inp_blocks < pad_blocks; inp_blocks++)
730 sha1_block_data_order(&key->md, data, 1);
734 SHA256_Update(&key->md, pmac->c, SHA256_DIGEST_LENGTH);
735 SHA256_Final(pmac->c, &key->md);
743 out + len - 1 - maxpad - SHA256_DIGEST_LENGTH;
744 size_t off = out - p;
745 unsigned int c, cmask;
747 maxpad += SHA256_DIGEST_LENGTH;
748 for (res = 0, i = 0, j = 0; j < maxpad; j++) {
751 ((int)(j - off - SHA256_DIGEST_LENGTH)) >>
752 (sizeof(int) * 8 - 1);
753 res |= (c ^ pad) & ~cmask; /* ... and padding */
754 cmask &= ((int)(off - 1 - j)) >> (sizeof(int) * 8 - 1);
755 res |= (c ^ pmac->c[i]) & cmask;
758 maxpad -= SHA256_DIGEST_LENGTH;
760 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
764 for (res = 0, i = 0; i < SHA256_DIGEST_LENGTH; i++)
765 res |= out[i] ^ pmac->c[i];
766 res = 0 - ((0 - res) >> (sizeof(res) * 8 - 1));
770 pad = (pad & ~res) | (maxpad & res);
771 out = out + len - 1 - pad;
772 for (res = 0, i = 0; i < pad; i++)
775 res = (0 - res) >> (sizeof(res) * 8 - 1);
780 SHA256_Update(&key->md, out, len);
787 static int aesni_cbc_hmac_sha256_ctrl(EVP_CIPHER_CTX *ctx, int type, int arg,
790 EVP_AES_HMAC_SHA256 *key = data(ctx);
791 unsigned int u_arg = (unsigned int)arg;
794 case EVP_CTRL_AEAD_SET_MAC_KEY:
797 unsigned char hmac_key[64];
799 memset(hmac_key, 0, sizeof(hmac_key));
804 if (u_arg > sizeof(hmac_key)) {
805 SHA256_Init(&key->head);
806 SHA256_Update(&key->head, ptr, arg);
807 SHA256_Final(hmac_key, &key->head);
809 memcpy(hmac_key, ptr, arg);
812 for (i = 0; i < sizeof(hmac_key); i++)
813 hmac_key[i] ^= 0x36; /* ipad */
814 SHA256_Init(&key->head);
815 SHA256_Update(&key->head, hmac_key, sizeof(hmac_key));
817 for (i = 0; i < sizeof(hmac_key); i++)
818 hmac_key[i] ^= 0x36 ^ 0x5c; /* opad */
819 SHA256_Init(&key->tail);
820 SHA256_Update(&key->tail, hmac_key, sizeof(hmac_key));
822 OPENSSL_cleanse(hmac_key, sizeof(hmac_key));
826 case EVP_CTRL_AEAD_TLS1_AAD:
828 unsigned char *p = ptr;
829 unsigned int len = p[arg - 2] << 8 | p[arg - 1];
831 if (arg != EVP_AEAD_TLS1_AAD_LEN)
835 key->payload_length = len;
836 if ((key->aux.tls_ver =
837 p[arg - 4] << 8 | p[arg - 3]) >= TLS1_1_VERSION) {
838 len -= AES_BLOCK_SIZE;
839 p[arg - 2] = len >> 8;
843 SHA256_Update(&key->md, p, arg);
845 return (int)(((len + SHA256_DIGEST_LENGTH +
846 AES_BLOCK_SIZE) & -AES_BLOCK_SIZE)
849 memcpy(key->aux.tls_aad, ptr, arg);
850 key->payload_length = arg;
852 return SHA256_DIGEST_LENGTH;
855 # if !defined(OPENSSL_NO_MULTIBLOCK) && EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK
856 case EVP_CTRL_TLS1_1_MULTIBLOCK_MAX_BUFSIZE:
857 return (int)(5 + 16 + ((arg + 32 + 16) & -16));
858 case EVP_CTRL_TLS1_1_MULTIBLOCK_AAD:
860 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param =
861 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr;
862 unsigned int n4x = 1, x4;
863 unsigned int frag, last, packlen, inp_len;
868 if (u_arg < sizeof(EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM))
871 inp_len = param->inp[11] << 8 | param->inp[12];
874 if ((param->inp[9] << 8 | param->inp[10]) < TLS1_1_VERSION)
879 return 0; /* too short */
881 if (inp_len >= 8192 && OPENSSL_ia32cap_P[2] & (1 << 5))
883 } else if ((n4x = param->interleave / 4) && n4x <= 2)
884 inp_len = param->len;
889 SHA256_Update(&key->md, param->inp, 13);
894 frag = inp_len >> n4x;
895 last = inp_len + frag - (frag << n4x);
896 if (last > frag && ((last + 13 + 9) % 64 < (x4 - 1))) {
901 packlen = 5 + 16 + ((frag + 32 + 16) & -16);
902 packlen = (packlen << n4x) - packlen;
903 packlen += 5 + 16 + ((last + 32 + 16) & -16);
905 param->interleave = x4;
909 return -1; /* not yet */
911 case EVP_CTRL_TLS1_1_MULTIBLOCK_ENCRYPT:
913 EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *param =
914 (EVP_CTRL_TLS1_1_MULTIBLOCK_PARAM *) ptr;
916 return (int)tls1_1_multi_block_encrypt(key, param->out,
917 param->inp, param->len,
918 param->interleave / 4);
920 case EVP_CTRL_TLS1_1_MULTIBLOCK_DECRYPT:
927 static EVP_CIPHER aesni_128_cbc_hmac_sha256_cipher = {
928 # ifdef NID_aes_128_cbc_hmac_sha256
929 NID_aes_128_cbc_hmac_sha256,
934 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 |
935 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK,
936 aesni_cbc_hmac_sha256_init_key,
937 aesni_cbc_hmac_sha256_cipher,
939 sizeof(EVP_AES_HMAC_SHA256),
940 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv,
941 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv,
942 aesni_cbc_hmac_sha256_ctrl,
946 static EVP_CIPHER aesni_256_cbc_hmac_sha256_cipher = {
947 # ifdef NID_aes_256_cbc_hmac_sha256
948 NID_aes_256_cbc_hmac_sha256,
953 EVP_CIPH_CBC_MODE | EVP_CIPH_FLAG_DEFAULT_ASN1 |
954 EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_TLS1_1_MULTIBLOCK,
955 aesni_cbc_hmac_sha256_init_key,
956 aesni_cbc_hmac_sha256_cipher,
958 sizeof(EVP_AES_HMAC_SHA256),
959 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_set_asn1_iv,
960 EVP_CIPH_FLAG_DEFAULT_ASN1 ? NULL : EVP_CIPHER_get_asn1_iv,
961 aesni_cbc_hmac_sha256_ctrl,
965 const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void)
967 return ((OPENSSL_ia32cap_P[1] & AESNI_CAPABLE) &&
968 aesni_cbc_sha256_enc(NULL, NULL, 0, NULL, NULL, NULL, NULL) ?
969 &aesni_128_cbc_hmac_sha256_cipher : NULL);
972 const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void)
974 return ((OPENSSL_ia32cap_P[1] & AESNI_CAPABLE) &&
975 aesni_cbc_sha256_enc(NULL, NULL, 0, NULL, NULL, NULL, NULL) ?
976 &aesni_256_cbc_hmac_sha256_cipher : NULL);
979 const EVP_CIPHER *EVP_aes_128_cbc_hmac_sha256(void)
984 const EVP_CIPHER *EVP_aes_256_cbc_hmac_sha256(void)