enum {
NEED_EC_KEY = 1 << 0,
GOT_CERT_RSA_KEY_ALG = 1 << 1,
- GOT_CERT_ECDSA_KEY_ALG = 1 << 2,
+ GOT_CERT_ECDSA_KEY_ALG = 1 << 2,
GOT_EC_KEY = 1 << 3,
ENCRYPTION_AESGCM = 1 << 4,
};
} while ((size & (AES_BLOCK_SIZE - 1)) != 0);
/* Encrypt content+MAC+padding in place */
-//optimize key setup
aes_cbc_encrypt(
&tls->aes_decrypt, /* selects 128/256 */
buf - AES_BLOCK_SIZE, /* IV */
*/
static void xwrite_encrypted_aesgcm(tls_state_t *tls, unsigned size, unsigned type)
{
-//go for [16]
- uint8_t aad[13];
+#define COUNTER(v) (*(uint32_t*)(v + 12))
+
+ uint8_t aad[13 + 3]; /* +3 creates [16] buffer, simplifying GHASH() */
uint8_t nonce[12 + 4]; /* +4 creates space for AES block counter */
uint8_t scratch[AES_BLOCK_SIZE]; //[16]
uint8_t authtag[AES_BLOCK_SIZE]; //[16]
aad[9] = TLS_MAJ;
aad[10] = TLS_MIN;
aad[11] = size >> 8;
- aad[12] = size & 0xff;
+ /* set aad[12], and clear aad[13..15] */
+ COUNTER(aad) = SWAP_LE32(size & 0xff);
memcpy(nonce, tls->client_write_IV, 4);
memcpy(nonce + 4, &tls->write_seq64_be, 8);
/* seq64 is not used later in this func, can increment here */
tls->write_seq64_be = SWAP_BE64(1 + SWAP_BE64(tls->write_seq64_be));
-#define COUNTER(v) (*(uint32_t*)(v + 12))
-
cnt = 1;
remaining = size;
while (remaining != 0) {
remaining -= n;
}
-//optimize fixed sizes
- aesgcm_GHASH(tls->H, aad, sizeof(aad), tls->outbuf + OUTBUF_PFX, size, authtag, sizeof(authtag));
+ aesgcm_GHASH(tls->H, aad, /*sizeof(aad),*/ tls->outbuf + OUTBUF_PFX, size, authtag /*, sizeof(authtag)*/);
COUNTER(nonce) = htonl(1);
aes_encrypt_one_block(&tls->aes_encrypt, nonce, scratch);
xorbuf(authtag, scratch, sizeof(authtag));
static void tls_aesgcm_decrypt(tls_state_t *tls, uint8_t *buf, int size)
{
-//go for [16]
- //uint8_t aad[13];
+#define COUNTER(v) (*(uint32_t*)(v + 12))
+
+ //uint8_t aad[13 + 3]; /* +3 creates [16] buffer, simplifying GHASH() */
uint8_t nonce[12 + 4]; /* +4 creates space for AES block counter */
uint8_t scratch[AES_BLOCK_SIZE]; //[16]
//uint8_t authtag[AES_BLOCK_SIZE]; //[16]
//aad[9] = TLS_MAJ;
//aad[10] = TLS_MIN;
//aad[11] = size >> 8;
- //aad[12] = size & 0xff;
+ ///* set aad[12], and clear aad[13..15] */
+ //COUNTER(aad) = SWAP_LE32(size & 0xff);
+ //memcpy(aad, &tls->write_seq64_be, 8);
memcpy(nonce, tls->server_write_IV, 4);
memcpy(nonce + 4, buf, 8);
buf += 8;
-#define COUNTER(v) (*(uint32_t*)(v + 12))
-
cnt = 1;
remaining = size;
while (remaining != 0) {
remaining -= n;
}
-////optimize fixed sizes
- //aesgcm_GHASH(tls->H, aad, sizeof(aad), tls->outbuf + OUTBUF_PFX, size, authtag, sizeof(authtag));
+ //aesgcm_GHASH(tls->H, aad, tls->outbuf + OUTBUF_PFX, size, authtag);
//COUNTER(nonce) = htonl(1);
//aes_encrypt_one_block(&tls->aes_encrypt, nonce, scratch);
//xorbuf(authtag, scratch, sizeof(authtag));
b[i] ^= m[i];
}
-/* wolfssl-3.15.3/wolfcrypt/src/aes.c */
+/* from wolfssl-3.15.3/wolfcrypt/src/aes.c */
-static void FlattenSzInBits(byte* buf, word32 sz)
+static ALWAYS_INLINE void FlattenSzInBits(byte* buf, word32 sz)
{
/* Multiply the sz by 8 */
- word32 szHi = (sz >> (8*sizeof(sz) - 3));
+//bbox: these sizes are never even close to 2^32/8
+// word32 szHi = (sz >> (8*sizeof(sz) - 3));
sz <<= 3;
/* copy over the words of the sz into the destination buffer */
- buf[0] = (szHi >> 24) & 0xff;
- buf[1] = (szHi >> 16) & 0xff;
- buf[2] = (szHi >> 8) & 0xff;
- buf[3] = szHi & 0xff;
- buf[4] = (sz >> 24) & 0xff;
- buf[5] = (sz >> 16) & 0xff;
- buf[6] = (sz >> 8) & 0xff;
- buf[7] = sz & 0xff;
+// buf[0] = (szHi >> 24) & 0xff;
+// buf[1] = (szHi >> 16) & 0xff;
+// buf[2] = (szHi >> 8) & 0xff;
+// buf[3] = szHi & 0xff;
+ move_to_unaligned32(buf, 0);
+// buf[4] = (sz >> 24) & 0xff;
+// buf[5] = (sz >> 16) & 0xff;
+// buf[6] = (sz >> 8) & 0xff;
+// buf[7] = sz & 0xff;
+ move_to_unaligned32(buf + 4, SWAP_BE32(sz));
}
static void RIGHTSHIFTX(byte* x)
XMEMCPY(X, Z, AES_BLOCK_SIZE);
}
-void FAST_FUNC aesgcm_GHASH(byte* h, const byte* a, unsigned aSz, const byte* c,
- unsigned cSz, byte* s, unsigned sSz)
+//bbox:
+// for TLS AES-GCM, a (which as AAD) is always 13 bytes long, and bbox code provides
+// extra 3 zeroed bytes, making it a[16], or a[AES_BLOCK_SIZE].
+// Resulting auth tag in s is also always AES_BLOCK_SIZE bytes.
+//
+// This allows some simplifications.
+#define aSz AES_BLOCK_SIZE
+#define sSz AES_BLOCK_SIZE
+void FAST_FUNC aesgcm_GHASH(byte* h,
+ const byte* a, //unsigned aSz,
+ const byte* c, unsigned cSz,
+ byte* s //, unsigned sSz
+)
{
byte x[AES_BLOCK_SIZE];
byte scratch[AES_BLOCK_SIZE];
word32 blocks, partial;
//was: byte* h = aes->H;
- XMEMSET(x, 0, AES_BLOCK_SIZE);
+ //XMEMSET(x, 0, AES_BLOCK_SIZE);
/* Hash in A, the Additional Authentication Data */
- if (aSz != 0 && a != NULL) {
- blocks = aSz / AES_BLOCK_SIZE;
- partial = aSz % AES_BLOCK_SIZE;
- while (blocks--) {
- xorbuf(x, a, AES_BLOCK_SIZE);
- GMULT(x, h);
- a += AES_BLOCK_SIZE;
- }
- if (partial != 0) {
- XMEMSET(scratch, 0, AES_BLOCK_SIZE);
- XMEMCPY(scratch, a, partial);
- xorbuf(x, scratch, AES_BLOCK_SIZE);
+// if (aSz != 0 && a != NULL) {
+// blocks = aSz / AES_BLOCK_SIZE;
+// partial = aSz % AES_BLOCK_SIZE;
+// while (blocks--) {
+ //xorbuf(x, a, AES_BLOCK_SIZE);
+ XMEMCPY(x, a, AES_BLOCK_SIZE);// memcpy(x,a) = memset(x,0)+xorbuf(x,a)
GMULT(x, h);
- }
- }
+// a += AES_BLOCK_SIZE;
+// }
+// if (partial != 0) {
+// XMEMSET(scratch, 0, AES_BLOCK_SIZE);
+// XMEMCPY(scratch, a, partial);
+// xorbuf(x, scratch, AES_BLOCK_SIZE);
+// GMULT(x, h);
+// }
+// }
/* Hash in C, the Ciphertext */
- if (cSz != 0 && c != NULL) {
+ if (cSz != 0 /*&& c != NULL*/) {
blocks = cSz / AES_BLOCK_SIZE;
partial = cSz % AES_BLOCK_SIZE;
while (blocks--) {