The code has been modularized so that it can be shared by algorithms.
A fixed size IV is now used instead of being allocated.
The IV is not set into the low level struct now until the update (it uses an
iv_state for this purpose).
Hardware specific methods have been added to a PROV_GCM_HW object.
The S390 code has been changed to just contain methods that can be accessed in
a modular way. There are equivalent generic methods also for the other
platforms.
Reviewed-by: Matt Caswell <matt@openssl.org>
Reviewed-by: Patrick Steuer <patrick.steuer@de.ibm.com>
(Merged from https://github.com/openssl/openssl/pull/9231)
PROV_R_CIPHER_OPERATION_FAILED:102:cipher operation failed
PROV_R_FAILED_TO_GET_PARAMETER:103:failed to get parameter
PROV_R_FAILED_TO_SET_PARAMETER:104:failed to set parameter
+PROV_R_INVALID_AAD:108:invalid aad
+PROV_R_INVALID_IVLEN:109:invalid ivlen
PROV_R_INVALID_KEYLEN:105:invalid keylen
+PROV_R_INVALID_TAG:110:invalid tag
PROV_R_OUTPUT_BUFFER_TOO_SMALL:106:output buffer too small
PROV_R_WRONG_FINAL_BLOCK_LENGTH:107:wrong final block length
RAND_R_ADDITIONAL_INPUT_TOO_LONG:102:additional input too long
/*
- * Copyright 1995-2018 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 1995-2019 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
case NID_aes_256_ctr:
case NID_aes_192_ctr:
case NID_aes_128_ctr:
+ case NID_aes_256_gcm:
+ case NID_aes_192_gcm:
+ case NID_aes_128_gcm:
+ case NID_aria_256_gcm:
+ case NID_aria_192_gcm:
+ case NID_aria_128_gcm:
break;
default:
goto legacy;
int EVP_CIPHER_CTX_iv_length(const EVP_CIPHER_CTX *ctx)
{
- return EVP_CIPHER_iv_length(ctx->cipher);
+ int ok, v = EVP_CIPHER_iv_length(ctx->cipher);
+ OSSL_PARAM params[2] = { OSSL_PARAM_END, OSSL_PARAM_END };
+
+ params[0] = OSSL_PARAM_construct_int(OSSL_CIPHER_PARAM_IVLEN, &v);
+ ok = evp_do_ciph_ctx_getparams(ctx->cipher, ctx->provctx, params);
+
+ return ok != 0 ? v : -1;
}
const unsigned char *EVP_CIPHER_CTX_original_iv(const EVP_CIPHER_CTX *ctx)
ENDIF
ENDIF
-$COMMON=cbc128.c ctr128.c cfb128.c ofb128.c $MODESASM
+$COMMON=cbc128.c ctr128.c cfb128.c ofb128.c gcm128.c $MODESASM
SOURCE[../../libcrypto]=$COMMON \
- cts128.c gcm128.c ccm128.c xts128.c wrap128.c ocb128.c siv128.c
+ cts128.c ccm128.c xts128.c wrap128.c ocb128.c siv128.c
DEFINE[../../libcrypto]=$MODESDEF
SOURCE[../../providers/fips]=$COMMON
DEFINE[../../providers/fips]=$MODESDEF
the remaining parameters in subsequent calls, all of which have B<type>
set to NULL. This is done when the default cipher parameters are not
appropriate.
+For EVP_CIPH_GCM_MODE the IV will be generated internally if it is not
+specified.
EVP_EncryptUpdate() encrypts B<inl> bytes from the buffer B<in> and
writes the encrypted version to B<out>. This function can be called
#include <openssl/core_names.h>
#include <openssl/evp.h>
#include <openssl/params.h>
+#include <openssl/rand.h>
#include "internal/cryptlib.h"
#include "internal/provider_algs.h"
#include "ciphers_locl.h"
return 1;
}
-#define IMPLEMENT_cipher(lcmode, UCMODE, flags, kbits, blkbits, ivbits) \
- static OSSL_OP_cipher_get_params_fn aes_##kbits##_##lcmode##_get_params; \
- static int aes_##kbits##_##lcmode##_get_params(OSSL_PARAM params[]) \
- { \
- OSSL_PARAM *p; \
- \
- p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_MODE); \
- if (p != NULL) { \
- if (!OSSL_PARAM_set_int(p, EVP_CIPH_##UCMODE##_MODE)) \
- return 0; \
- } \
- p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_FLAGS); \
- if (p != NULL) { \
- if (!OSSL_PARAM_set_ulong(p, (flags))) \
- return 0; \
- } \
- p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_KEYLEN); \
- if (p != NULL) { \
- if (!OSSL_PARAM_set_int(p, (kbits) / 8)) \
- return 0; \
- } \
- p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_BLOCK_SIZE); \
- if (p != NULL) { \
- if (!OSSL_PARAM_set_int(p, (blkbits) / 8)) \
- return 0; \
- } \
- p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_IVLEN); \
- if (p != NULL) { \
- if (!OSSL_PARAM_set_int(p, (ivbits) / 8)) \
- return 0; \
- } \
- \
- return 1; \
- } \
- static OSSL_OP_cipher_newctx_fn aes_##kbits##_##lcmode##_newctx; \
- static void *aes_##kbits##_##lcmode##_newctx(void *provctx) \
- { \
- PROV_AES_KEY *ctx = OPENSSL_zalloc(sizeof(*ctx)); \
- \
- ctx->pad = 1; \
- ctx->keylen = ((kbits) / 8); \
- ctx->ciph = PROV_AES_CIPHER_##lcmode(ctx->keylen); \
- ctx->mode = EVP_CIPH_##UCMODE##_MODE; \
- return ctx; \
+static void *aes_new_ctx(void *provctx, size_t mode, size_t kbits,
+ const PROV_AES_CIPHER *ciph)
+{
+ PROV_AES_KEY *ctx = OPENSSL_zalloc(sizeof(*ctx));
+
+ ctx->pad = 1;
+ ctx->keylen = kbits / 8;
+ ctx->ciph = ciph;
+ ctx->mode = mode;
+ return ctx;
+}
+
+int aes_get_params(OSSL_PARAM params[], int md, unsigned long flags,
+ int kbits, int blkbits, int ivbits)
+{
+ OSSL_PARAM *p;
+
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_MODE);
+ if (p != NULL) {
+ if (!OSSL_PARAM_set_int(p, md))
+ return 0;
+ }
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_FLAGS);
+ if (p != NULL) {
+ if (!OSSL_PARAM_set_ulong(p, flags))
+ return 0;
+ }
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_KEYLEN);
+ if (p != NULL) {
+ if (!OSSL_PARAM_set_int(p, kbits / 8))
+ return 0;
+ }
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_BLOCK_SIZE);
+ if (p != NULL) {
+ if (!OSSL_PARAM_set_int(p, blkbits / 8))
+ return 0;
+ }
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_IVLEN);
+ if (p != NULL) {
+ if (!OSSL_PARAM_set_int(p, ivbits / 8))
+ return 0;
+ }
+ return 1;
+}
+
+#define IMPLEMENT_cipher(lcmode, UCMODE, flags, kbits, blkbits, ivbits) \
+ static OSSL_OP_cipher_get_params_fn aes_##kbits##_##lcmode##_get_params; \
+ static int aes_##kbits##_##lcmode##_get_params(OSSL_PARAM params[]) \
+ { \
+ return aes_get_params(params, EVP_CIPH_##UCMODE##_MODE, flags, kbits, \
+ blkbits, ivbits); \
+ } \
+ static OSSL_OP_cipher_newctx_fn aes_##kbits##_##lcmode##_newctx; \
+ static void *aes_##kbits##_##lcmode##_newctx(void *provctx) \
+ { \
+ return aes_new_ctx(provctx, EVP_CIPH_##UCMODE##_MODE, kbits, \
+ PROV_AES_CIPHER_##lcmode(kbits / 8)); \
}
/* ECB */
PROV_AES_KEY *ctx = (PROV_AES_KEY *)vctx;
OSSL_PARAM *p;
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_IVLEN);
+ if (p != NULL) {
+ if (!OSSL_PARAM_set_int(p, AES_BLOCK_SIZE))
+ return 0;
+ }
p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_PADDING);
if (p != NULL && !OSSL_PARAM_set_int(p, ctx->pad)) {
PROVerr(PROV_F_AES_CTX_GET_PARAMS, PROV_R_FAILED_TO_SET_PARAMETER);
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
-
+#include <string.h>
+#include <assert.h>
#include <openssl/opensslconf.h>
#include <openssl/crypto.h>
#include <openssl/err.h>
-#include <string.h>
-#include <assert.h>
#include <openssl/aes.h>
-#include "internal/modes_int.h"
-#include "internal/evp_int.h"
#include <openssl/rand.h>
#include <openssl/cmac.h>
#include "ciphers_locl.h"
+#include "internal/evp_int.h"
#include "internal/providercommonerr.h"
#include "internal/aes_platform.h"
LIBS=../../../libcrypto
-SOURCE[../../../libcrypto]=\
- block.c aes.c aes_basic.c
+$COMMON=block.c aes.c aes_basic.c gcm.c gcm_hw.c
+
+SOURCE[../../../libcrypto]=$COMMON
INCLUDE[../../../libcrypto]=. ../../../crypto
-SOURCE[../../fips]=\
- block.c aes.c aes_basic.c
+SOURCE[../../fips]=$COMMON
INCLUDE[../../fips]=. ../../../crypto
--- /dev/null
+
+/*
+ * Copyright 2019 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/aes.h>
+
+typedef struct prov_gcm_hw_st PROV_GCM_HW;
+
+#define GCM_IV_DEFAULT_SIZE 12/* IV's for AES_GCM should normally be 12 bytes */
+#define GCM_IV_MAX_SIZE 64
+#define GCM_TAG_MAX_SIZE 16
+
+typedef struct prov_gcm_ctx_st {
+ int enc; /* Set to 1 if we are encrypting or 0 otherwise */
+ int mode; /* The mode that we are using */
+ size_t keylen;
+ int ivlen;
+ size_t ivlen_min;
+ int taglen;
+ int key_set; /* Set if key initialised */
+ int iv_state; /* set to one of IV_STATE_XXX */
+ int iv_gen_rand; /* No IV was specified, so generate a rand IV */
+ int iv_gen; /* It is OK to generate IVs */
+ int tls_aad_pad_sz;
+ int tls_aad_len; /* TLS AAD length */
+ uint64_t tls_enc_records; /* Number of TLS records encrypted */
+
+ /*
+ * num contains the number of bytes of |iv| which are valid for modes that
+ * manage partial blocks themselves.
+ */
+ size_t num;
+ size_t bufsz; /* Number of bytes in buf */
+ uint64_t flags;
+
+ unsigned int pad : 1; /* Whether padding should be used or not */
+
+ unsigned char iv[GCM_IV_MAX_SIZE]; /* Buffer to use for IV's */
+ unsigned char buf[AES_BLOCK_SIZE]; /* Buffer of partial blocks processed via update calls */
+
+ OPENSSL_CTX *libctx; /* needed for rand calls */
+ const PROV_GCM_HW *hw; /* hardware specific methods */
+ GCM128_CONTEXT gcm;
+ ctr128_f ctr;
+ const void *ks;
+} PROV_GCM_CTX;
+
+typedef struct prov_aes_gcm_ctx_st {
+ PROV_GCM_CTX base; /* must be first entry in struct */
+ union {
+ OSSL_UNION_ALIGN;
+ AES_KEY ks;
+ } ks; /* AES key schedule to use */
+
+ /* Platform specific data */
+ union {
+ int dummy;
+#if defined(OPENSSL_CPUID_OBJ) && defined(__s390__)
+ struct {
+ union {
+ OSSL_UNION_ALIGN;
+ S390X_KMA_PARAMS kma;
+ } param;
+ unsigned int fc;
+ unsigned char ares[16];
+ unsigned char mres[16];
+ unsigned char kres[16];
+ int areslen;
+ int mreslen;
+ int kreslen;
+ int res;
+ } s390x;
+#endif /* defined(OPENSSL_CPUID_OBJ) && defined(__s390__) */
+ } plat;
+} PROV_AES_GCM_CTX;
+
+OSSL_CIPHER_FUNC(int, GCM_setkey, (PROV_GCM_CTX *ctx, const unsigned char *key,
+ size_t keylen));
+OSSL_CIPHER_FUNC(int, GCM_setiv, (PROV_GCM_CTX *dat, const unsigned char *iv,
+ size_t ivlen));
+OSSL_CIPHER_FUNC(int, GCM_aadupdate, (PROV_GCM_CTX *ctx,
+ const unsigned char *aad, size_t aadlen));
+OSSL_CIPHER_FUNC(int, GCM_cipherupdate, (PROV_GCM_CTX *ctx,
+ const unsigned char *in, size_t len,
+ unsigned char *out));
+OSSL_CIPHER_FUNC(int, GCM_cipherfinal, (PROV_GCM_CTX *ctx, unsigned char *tag));
+OSSL_CIPHER_FUNC(int, GCM_oneshot, (PROV_GCM_CTX *ctx, unsigned char *aad,
+ size_t aad_len, const unsigned char *in,
+ size_t in_len, unsigned char *out,
+ unsigned char *tag, size_t taglen));
+struct prov_gcm_hw_st {
+ OSSL_GCM_setkey_fn setkey;
+ OSSL_GCM_setiv_fn setiv;
+ OSSL_GCM_aadupdate_fn aadupdate;
+ OSSL_GCM_cipherupdate_fn cipherupdate;
+ OSSL_GCM_cipherfinal_fn cipherfinal;
+ OSSL_GCM_oneshot_fn oneshot;
+};
+const PROV_GCM_HW *PROV_AES_HW_gcm(size_t keybits);
+
+#if !defined(OPENSSL_NO_ARIA) && !defined(FIPS_MODE)
+
+#include "internal/aria.h"
+
+typedef struct prov_aria_gcm_ctx_st {
+ PROV_GCM_CTX base; /* must be first entry in struct */
+ union {
+ OSSL_UNION_ALIGN;
+ ARIA_KEY ks;
+ } ks;
+} PROV_ARIA_GCM_CTX;
+const PROV_GCM_HW *PROV_ARIA_HW_gcm(size_t keybits);
+
+#endif /* !defined(OPENSSL_NO_ARIA) && !defined(FIPS_MODE) */
-
/*
* Copyright 2019 The OpenSSL Project Authors. All Rights Reserved.
*
* https://www.openssl.org/source/license.html
*/
+#include <openssl/opensslconf.h>
#include <openssl/aes.h>
-#include <openssl/modes.h>
+#include <openssl/params.h>
#include "internal/cryptlib.h"
+#include "internal/modes_int.h"
+
+#if defined(OPENSSL_CPUID_OBJ) && defined(__s390__)
+/*-
+ * KMA-GCM-AES parameter block - begin
+ * (see z/Architecture Principles of Operation >= SA22-7832-11)
+ */
+typedef struct S390X_kma_params_st {
+ unsigned char reserved[12];
+ union {
+ unsigned int w;
+ unsigned char b[4];
+ } cv; /* 32 bit counter value */
+ union {
+ unsigned long long g[2];
+ unsigned char b[16];
+ } t; /* tag */
+ unsigned char h[16]; /* hash subkey */
+ unsigned long long taadl; /* total AAD length */
+ unsigned long long tpcl; /* total plaintxt/ciphertxt len */
+ union {
+ unsigned long long g[2];
+ unsigned int w[4];
+ } j0; /* initial counter value */
+ unsigned char k[32]; /* key */
+} S390X_KMA_PARAMS;
+
+#endif
typedef struct prov_aes_cipher_st PROV_AES_CIPHER;
+#define IV_STATE_UNINITIALISED 0 /* initial state is not initialized */
+#define IV_STATE_BUFFERED 1 /* iv has been copied to the iv buffer */
+#define IV_STATE_COPIED 2 /* iv has been copied from the iv buffer */
+#define IV_STATE_FINISHED 3 /* the iv has been used - so don't reuse it */
+
typedef struct prov_aes_key_st {
union {
OSSL_UNION_ALIGN;
size_t inl);
};
+#define OSSL_CIPHER_FUNC(type, name, args) typedef type (* OSSL_##name##_fn)args
+
+#include "ciphers_gcm.h"
+
const PROV_AES_CIPHER *PROV_AES_CIPHER_ecb(size_t keylen);
const PROV_AES_CIPHER *PROV_AES_CIPHER_cbc(size_t keylen);
const PROV_AES_CIPHER *PROV_AES_CIPHER_ofb(size_t keylen);
const unsigned char **in, size_t *inlen);
void padblock(unsigned char *buf, size_t *buflen, size_t blocksize);
int unpadblock(unsigned char *buf, size_t *buflen, size_t blocksize);
+int aes_get_params(OSSL_PARAM params[], int md, unsigned long flags,
+ int kbits, int blkbits, int ivbits);
--- /dev/null
+/*
+ * Copyright 2019 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/evp.h>
+#include <openssl/params.h>
+#include <openssl/core_numbers.h>
+#include <openssl/core_names.h>
+#include "internal/rand_int.h"
+#include "internal/provider_algs.h"
+#include "internal/provider_ctx.h"
+#include "internal/providercommonerr.h"
+#include "ciphers_locl.h"
+
+/* TODO(3.0) Figure out what flags are really needed */
+#define AEAD_GCM_FLAGS (EVP_CIPH_FLAG_AEAD_CIPHER | EVP_CIPH_FLAG_DEFAULT_ASN1 \
+ | EVP_CIPH_CUSTOM_IV | EVP_CIPH_FLAG_CUSTOM_CIPHER \
+ | EVP_CIPH_ALWAYS_CALL_INIT | EVP_CIPH_CTRL_INIT \
+ | EVP_CIPH_CUSTOM_COPY)
+
+static OSSL_OP_cipher_encrypt_init_fn gcm_einit;
+static OSSL_OP_cipher_decrypt_init_fn gcm_dinit;
+static OSSL_OP_cipher_ctx_get_params_fn gcm_ctx_get_params;
+static OSSL_OP_cipher_ctx_set_params_fn gcm_ctx_set_params;
+static OSSL_OP_cipher_cipher_fn gcm_cipher;
+static OSSL_OP_cipher_update_fn gcm_stream_update;
+static OSSL_OP_cipher_final_fn gcm_stream_final;
+
+static int gcm_tls_init(PROV_GCM_CTX *dat, unsigned char *aad, size_t aad_len);
+static int gcm_tls_iv_set_fixed(PROV_GCM_CTX *ctx, unsigned char *iv,
+ size_t len);
+static int gcm_tls_cipher(PROV_GCM_CTX *ctx, unsigned char *out, size_t *padlen,
+ const unsigned char *in, size_t len);
+static int gcm_cipher_internal(PROV_GCM_CTX *ctx, unsigned char *out,
+ size_t *padlen, const unsigned char *in,
+ size_t len);
+
+static void gcm_initctx(void *provctx, PROV_GCM_CTX *ctx, size_t keybits,
+ const PROV_GCM_HW *hw, size_t ivlen_min)
+{
+ ctx->pad = 1;
+ ctx->mode = EVP_CIPH_GCM_MODE;
+ ctx->taglen = -1;
+ ctx->tls_aad_len = -1;
+ ctx->ivlen_min = ivlen_min;
+ ctx->ivlen = (EVP_GCM_TLS_FIXED_IV_LEN + EVP_GCM_TLS_EXPLICIT_IV_LEN);
+ ctx->keylen = keybits / 8;
+ ctx->hw = hw;
+ ctx->libctx = PROV_LIBRARY_CONTEXT_OF(provctx);
+}
+
+static void gcm_deinitctx(PROV_GCM_CTX *ctx)
+{
+ OPENSSL_cleanse(ctx->iv, sizeof(ctx->iv));
+}
+
+static int gcm_init(void *vctx, const unsigned char *key, size_t keylen,
+ const unsigned char *iv, size_t ivlen, int enc)
+{
+ PROV_GCM_CTX *ctx = (PROV_GCM_CTX *)vctx;
+
+ ctx->enc = enc;
+
+ if (iv != NULL) {
+ if (ivlen < ctx->ivlen_min || ivlen > sizeof(ctx->iv)) {
+ PROVerr(0, PROV_R_INVALID_IVLEN);
+ return 0;
+ }
+ ctx->ivlen = ivlen;
+ memcpy(ctx->iv, iv, ctx->ivlen);
+ ctx->iv_state = IV_STATE_BUFFERED;
+ }
+
+ if (key != NULL) {
+ if (keylen != ctx->keylen) {
+ PROVerr(0, PROV_R_INVALID_KEYLEN);
+ return 0;
+ }
+ return ctx->hw->setkey(ctx, key, ctx->keylen);
+ }
+ return 1;
+}
+
+static int gcm_einit(void *vctx, const unsigned char *key, size_t keylen,
+ const unsigned char *iv, size_t ivlen)
+{
+ return gcm_init(vctx, key, keylen, iv, ivlen, 1);
+}
+
+static int gcm_dinit(void *vctx, const unsigned char *key, size_t keylen,
+ const unsigned char *iv, size_t ivlen)
+{
+ return gcm_init(vctx, key, keylen, iv, ivlen, 0);
+}
+
+static int gcm_ctx_get_params(void *vctx, OSSL_PARAM params[])
+{
+ PROV_GCM_CTX *ctx = (PROV_GCM_CTX *)vctx;
+ OSSL_PARAM *p;
+ size_t sz;
+
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_IVLEN);
+ if (p != NULL) {
+ if (!OSSL_PARAM_set_int(p, ctx->ivlen))
+ return 0;
+ }
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_KEYLEN);
+ if (p != NULL && !OSSL_PARAM_set_int(p, ctx->keylen)) {
+ PROVerr(0, PROV_R_FAILED_TO_SET_PARAMETER);
+ return 0;
+ }
+
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_IV);
+ if (p != NULL) {
+ if (ctx->iv_gen != 1 && ctx->iv_gen_rand != 1)
+ return 0;
+ if (ctx->ivlen != (int)p->data_size) {
+ PROVerr(0, PROV_R_INVALID_IVLEN);
+ return 0;
+ }
+ if (!OSSL_PARAM_set_octet_string(p, ctx->iv, ctx->ivlen)) {
+ PROVerr(0, PROV_R_FAILED_TO_SET_PARAMETER);
+ return 0;
+ }
+ }
+
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_AEAD_TLS1_AAD_PAD);
+ if (p != NULL && !OSSL_PARAM_set_size_t(p, ctx->tls_aad_pad_sz)) {
+ PROVerr(0, PROV_R_FAILED_TO_SET_PARAMETER);
+ return 0;
+ }
+ p = OSSL_PARAM_locate(params, OSSL_CIPHER_PARAM_AEAD_TAG);
+ if (p != NULL) {
+ sz = p->data_size;
+ if (sz == 0 || sz > EVP_GCM_TLS_TAG_LEN || !ctx->enc || ctx->taglen < 0) {
+ PROVerr(0, PROV_R_INVALID_TAG);
+ return 0;
+ }
+ if (!OSSL_PARAM_set_octet_string(p, ctx->buf, sz)) {
+ PROVerr(0, PROV_R_FAILED_TO_SET_PARAMETER);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int gcm_ctx_set_params(void *vctx, const OSSL_PARAM params[])
+{
+ PROV_GCM_CTX *ctx = (PROV_GCM_CTX *)vctx;
+ const OSSL_PARAM *p;
+ size_t sz;
+ void *vp;
+
+ p = OSSL_PARAM_locate_const(params, OSSL_CIPHER_PARAM_AEAD_TAG);
+ if (p != NULL) {
+ vp = ctx->buf;
+ if (!OSSL_PARAM_get_octet_string(p, &vp, EVP_GCM_TLS_TAG_LEN, &sz)) {
+ PROVerr(0, PROV_R_FAILED_TO_GET_PARAMETER);
+ return 0;
+ }
+ if (sz == 0 || ctx->enc) {
+ PROVerr(0, PROV_R_INVALID_TAG);
+ return 0;
+ }
+ ctx->taglen = sz;
+ }
+
+ p = OSSL_PARAM_locate_const(params, OSSL_CIPHER_PARAM_AEAD_IVLEN);
+ if (p != NULL) {
+ if (!OSSL_PARAM_get_size_t(p, &sz)) {
+ PROVerr(0, PROV_R_FAILED_TO_GET_PARAMETER);
+ return 0;
+ }
+ if (sz == 0 || sz > sizeof(ctx->iv)) {
+ PROVerr(0, PROV_R_INVALID_IVLEN);
+ return 0;
+ }
+ ctx->ivlen = sz;
+ }
+
+ p = OSSL_PARAM_locate_const(params, OSSL_CIPHER_PARAM_AEAD_TLS1_AAD);
+ if (p != NULL) {
+ if (p->data_type != OSSL_PARAM_OCTET_STRING) {
+ PROVerr(0, PROV_R_FAILED_TO_GET_PARAMETER);
+ return 0;
+ }
+ sz = gcm_tls_init(ctx, p->data, p->data_size);
+ if (sz == 0) {
+ PROVerr(0, PROV_R_INVALID_AAD);
+ return 0;
+ }
+ ctx->tls_aad_pad_sz = sz;
+ }
+
+ p = OSSL_PARAM_locate_const(params, OSSL_CIPHER_PARAM_AEAD_TLS1_IV_FIXED);
+ if (p != NULL) {
+ if (p->data_type != OSSL_PARAM_OCTET_STRING) {
+ PROVerr(0, PROV_R_FAILED_TO_GET_PARAMETER);
+ return 0;
+ }
+ if (gcm_tls_iv_set_fixed(ctx, p->data, p->data_size) == 0) {
+ PROVerr(0, PROV_R_FAILED_TO_GET_PARAMETER);
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static int gcm_stream_update(void *vctx, unsigned char *out, size_t *outl,
+ size_t outsize, const unsigned char *in,
+ size_t inl)
+{
+ PROV_GCM_CTX *ctx = (PROV_GCM_CTX *)vctx;
+
+ if (outsize < inl) {
+ PROVerr(0, PROV_R_OUTPUT_BUFFER_TOO_SMALL);
+ return -1;
+ }
+
+ if (gcm_cipher_internal(ctx, out, outl, in, inl) <= 0) {
+ PROVerr(0, PROV_R_CIPHER_OPERATION_FAILED);
+ return -1;
+ }
+ return 1;
+}
+
+static int gcm_stream_final(void *vctx, unsigned char *out, size_t *outl,
+ size_t outsize)
+{
+ PROV_GCM_CTX *ctx = (PROV_GCM_CTX *)vctx;
+ int i;
+
+ i = gcm_cipher_internal(ctx, out, outl, NULL, 0);
+ if (i <= 0)
+ return 0;
+
+ *outl = 0;
+ return 1;
+}
+
+static int gcm_cipher(void *vctx,
+ unsigned char *out, size_t *outl, size_t outsize,
+ const unsigned char *in, size_t inl)
+{
+ PROV_GCM_CTX *ctx = (PROV_GCM_CTX *)vctx;
+
+ if (outsize < inl) {
+ PROVerr(0, PROV_R_OUTPUT_BUFFER_TOO_SMALL);
+ return -1;
+ }
+
+ if (gcm_cipher_internal(ctx, out, outl, in, inl) <= 0)
+ return -1;
+
+ *outl = inl;
+ return 1;
+}
+
+/*
+ * See SP800-38D (GCM) Section 8 "Uniqueness requirement on IVS and keys"
+ *
+ * See also 8.2.2 RBG-based construction.
+ * Random construction consists of a free field (which can be NULL) and a
+ * random field which will use a DRBG that can return at least 96 bits of
+ * entropy strength. (The DRBG must be seeded by the FIPS module).
+ */
+static int gcm_iv_generate(PROV_GCM_CTX *ctx, int offset)
+{
+ int sz = ctx->ivlen - offset;
+
+ /* Must be at least 96 bits */
+ if (sz <= 0 || ctx->ivlen < GCM_IV_DEFAULT_SIZE)
+ return 0;
+
+ /* Use DRBG to generate random iv */
+ if (rand_bytes_ex(ctx->libctx, ctx->iv + offset, sz) <= 0)
+ return 0;
+ ctx->iv_state = IV_STATE_BUFFERED;
+ ctx->iv_gen_rand = 1;
+ return 1;
+}
+
+static int gcm_cipher_internal(PROV_GCM_CTX *ctx, unsigned char *out,
+ size_t *padlen, const unsigned char *in,
+ size_t len)
+{
+ size_t olen = 0;
+ int rv = 0;
+ const PROV_GCM_HW *hw = ctx->hw;
+
+ if (ctx->tls_aad_len >= 0)
+ return gcm_tls_cipher(ctx, out, padlen, in, len);
+
+ if (!ctx->key_set || ctx->iv_state == IV_STATE_FINISHED)
+ goto err;
+
+ /*
+ * FIPS requires generation of AES-GCM IV's inside the FIPS module.
+ * The IV can still be set externally (the security policy will state that
+ * this is not FIPS compliant). There are some applications
+ * where setting the IV externally is the only option available.
+ */
+ if (ctx->iv_state == IV_STATE_UNINITIALISED) {
+ if (!ctx->enc || !gcm_iv_generate(ctx, 0))
+ goto err;
+ }
+
+ if (ctx->iv_state == IV_STATE_BUFFERED) {
+ if (!hw->setiv(ctx, ctx->iv, ctx->ivlen))
+ goto err;
+ ctx->iv_state = IV_STATE_COPIED;
+ }
+
+ if (in != NULL) {
+ /* The input is AAD if out is NULL */
+ if (out == NULL) {
+ if (!hw->aadupdate(ctx, in, len))
+ goto err;
+ } else {
+ /* The input is ciphertext OR plaintext */
+ if (!hw->cipherupdate(ctx, in, len, out))
+ goto err;
+ }
+ } else {
+ /* Finished when in == NULL */
+ if (!hw->cipherfinal(ctx, ctx->buf))
+ goto err;
+ ctx->iv_state = IV_STATE_FINISHED; /* Don't reuse the IV */
+ goto finish;
+ }
+ olen = len;
+finish:
+ rv = 1;
+err:
+ *padlen = olen;
+ return rv;
+}
+
+static int gcm_tls_init(PROV_GCM_CTX *dat, unsigned char *aad, size_t aad_len)
+{
+ unsigned char *buf;
+ size_t len;
+
+ if (aad_len != EVP_AEAD_TLS1_AAD_LEN)
+ return 0;
+
+ /* Save the aad for later use. */
+ buf = dat->buf;
+ memcpy(buf, aad, aad_len);
+ dat->tls_aad_len = aad_len;
+ dat->tls_enc_records = 0;
+
+ len = buf[aad_len - 2] << 8 | buf[aad_len - 1];
+ /* Correct length for explicit iv. */
+ if (len < EVP_GCM_TLS_EXPLICIT_IV_LEN)
+ return 0;
+ len -= EVP_GCM_TLS_EXPLICIT_IV_LEN;
+
+ /* If decrypting correct for tag too. */
+ if (!dat->enc) {
+ if (len < EVP_GCM_TLS_TAG_LEN)
+ return 0;
+ len -= EVP_GCM_TLS_TAG_LEN;
+ }
+ buf[aad_len - 2] = (unsigned char)(len >> 8);
+ buf[aad_len - 1] = (unsigned char)(len & 0xff);
+ /* Extra padding: tag appended to record. */
+ return EVP_GCM_TLS_TAG_LEN;
+}
+
+static int gcm_tls_iv_set_fixed(PROV_GCM_CTX *ctx, unsigned char *iv,
+ size_t len)
+{
+ /* Special case: -1 length restores whole IV */
+ if (len == (size_t)-1) {
+ memcpy(ctx->iv, iv, ctx->ivlen);
+ ctx->iv_gen = 1;
+ ctx->iv_state = IV_STATE_BUFFERED;
+ return 1;
+ }
+ /* Fixed field must be at least 4 bytes and invocation field at least 8 */
+ if ((len < EVP_GCM_TLS_FIXED_IV_LEN)
+ || (ctx->ivlen - (int)len) < EVP_GCM_TLS_EXPLICIT_IV_LEN)
+ return 0;
+ if (len > 0)
+ memcpy(ctx->iv, iv, len);
+ if (ctx->enc
+ && rand_bytes_ex(ctx->libctx, ctx->iv + len, ctx->ivlen - len) <= 0)
+ return 0;
+ ctx->iv_gen = 1;
+ ctx->iv_state = IV_STATE_BUFFERED;
+ return 1;
+}
+
+/* increment counter (64-bit int) by 1 */
+static void ctr64_inc(unsigned char *counter)
+{
+ int n = 8;
+ unsigned char c;
+
+ do {
+ --n;
+ c = counter[n];
+ ++c;
+ counter[n] = c;
+ if (c > 0)
+ return;
+ } while (n > 0);
+}
+
+/*
+ * Handle TLS GCM packet format. This consists of the last portion of the IV
+ * followed by the payload and finally the tag. On encrypt generate IV,
+ * encrypt payload and write the tag. On verify retrieve IV, decrypt payload
+ * and verify tag.
+ */
+static int gcm_tls_cipher(PROV_GCM_CTX *ctx, unsigned char *out, size_t *padlen,
+ const unsigned char *in, size_t len)
+{
+ int rv = 0, arg = EVP_GCM_TLS_EXPLICIT_IV_LEN;
+ size_t plen = 0;
+ unsigned char *tag = NULL;
+
+ if (!ctx->key_set)
+ goto err;
+
+ /* Encrypt/decrypt must be performed in place */
+ if (out != in || len < (EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN))
+ goto err;
+
+ /*
+ * Check for too many keys as per FIPS 140-2 IG A.5 "Key/IV Pair Uniqueness
+ * Requirements from SP 800-38D". The requirements is for one party to the
+ * communication to fail after 2^64 - 1 keys. We do this on the encrypting
+ * side only.
+ */
+ if (ctx->enc && ++ctx->tls_enc_records == 0) {
+ PROVerr(0, EVP_R_TOO_MANY_RECORDS);
+ goto err;
+ }
+
+ if (ctx->iv_gen == 0)
+ goto err;
+ /*
+ * Set IV from start of buffer or generate IV and write to start of
+ * buffer.
+ */
+ if (ctx->enc) {
+ if (!ctx->hw->setiv(ctx, ctx->iv, ctx->ivlen))
+ goto err;
+ if (arg > ctx->ivlen)
+ arg = ctx->ivlen;
+ memcpy(out, ctx->iv + ctx->ivlen - arg, arg);
+ /*
+ * Invocation field will be at least 8 bytes in size and so no need
+ * to check wrap around or increment more than last 8 bytes.
+ */
+ ctr64_inc(ctx->iv + ctx->ivlen - 8);
+ } else {
+ memcpy(ctx->iv + ctx->ivlen - arg, out, arg);
+ if (!ctx->hw->setiv(ctx, ctx->iv, ctx->ivlen))
+ goto err;
+ }
+ ctx->iv_state = IV_STATE_COPIED;
+
+ /* Fix buffer and length to point to payload */
+ in += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+ out += EVP_GCM_TLS_EXPLICIT_IV_LEN;
+ len -= EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
+
+ tag = ctx->enc ? out + len : (unsigned char *)in + len;
+ if (!ctx->hw->oneshot(ctx, ctx->buf, ctx->tls_aad_len, in, len, out, tag,
+ EVP_GCM_TLS_TAG_LEN)) {
+ if (!ctx->enc)
+ OPENSSL_cleanse(out, len);
+ goto err;
+ }
+ if (ctx->enc)
+ plen = len + EVP_GCM_TLS_EXPLICIT_IV_LEN + EVP_GCM_TLS_TAG_LEN;
+ else
+ plen = len;
+
+ rv = 1;
+err:
+ ctx->iv_state = IV_STATE_FINISHED;
+ ctx->tls_aad_len = -1;
+ *padlen = plen;
+ return rv;
+}
+
+#define IMPLEMENT_cipher(alg, lcmode, UCMODE, flags, kbits, blkbits, ivbits) \
+ static OSSL_OP_cipher_get_params_fn alg##_##kbits##_##lcmode##_get_params; \
+ static int alg##_##kbits##_##lcmode##_get_params(OSSL_PARAM params[]) \
+ { \
+ return aes_get_params(params, EVP_CIPH_##UCMODE##_MODE, flags, \
+ kbits, blkbits, ivbits); \
+ } \
+ static OSSL_OP_cipher_newctx_fn alg##kbits##gcm_newctx; \
+ static void *alg##kbits##gcm_newctx(void *provctx) \
+ { \
+ return alg##_gcm_newctx(provctx, kbits); \
+ } \
+ const OSSL_DISPATCH alg##kbits##gcm_functions[] = { \
+ { OSSL_FUNC_CIPHER_ENCRYPT_INIT, (void (*)(void))gcm_einit }, \
+ { OSSL_FUNC_CIPHER_DECRYPT_INIT, (void (*)(void))gcm_dinit }, \
+ { OSSL_FUNC_CIPHER_UPDATE, (void (*)(void))gcm_stream_update }, \
+ { OSSL_FUNC_CIPHER_FINAL, (void (*)(void))gcm_stream_final }, \
+ { OSSL_FUNC_CIPHER_CIPHER, (void (*)(void))gcm_cipher }, \
+ { OSSL_FUNC_CIPHER_NEWCTX, (void (*)(void)) alg##kbits##gcm_newctx }, \
+ { OSSL_FUNC_CIPHER_FREECTX, (void (*)(void)) alg##_gcm_freectx }, \
+ { OSSL_FUNC_CIPHER_GET_PARAMS, \
+ (void (*)(void)) alg##_##kbits##_##lcmode##_get_params }, \
+ { OSSL_FUNC_CIPHER_CTX_GET_PARAMS, \
+ (void (*)(void))gcm_ctx_get_params }, \
+ { OSSL_FUNC_CIPHER_CTX_SET_PARAMS, \
+ (void (*)(void))gcm_ctx_set_params }, \
+ { 0, NULL } \
+ }
+
+static void *aes_gcm_newctx(void *provctx, size_t keybits)
+{
+ PROV_AES_GCM_CTX *ctx = OPENSSL_zalloc(sizeof(*ctx));
+
+ if (ctx != NULL)
+ gcm_initctx(provctx, (PROV_GCM_CTX *)ctx, keybits,
+ PROV_AES_HW_gcm(keybits), 8);
+ return ctx;
+}
+
+static OSSL_OP_cipher_freectx_fn aes_gcm_freectx;
+static void aes_gcm_freectx(void *vctx)
+{
+ PROV_AES_GCM_CTX *ctx = (PROV_AES_GCM_CTX *)vctx;
+
+ gcm_deinitctx((PROV_GCM_CTX *)ctx);
+ OPENSSL_clear_free(ctx, sizeof(*ctx));
+}
+
+/* aes128gcm_functions */
+IMPLEMENT_cipher(aes, gcm, GCM, AEAD_GCM_FLAGS, 128, 8, 96);
+/* aes192gcm_functions */
+IMPLEMENT_cipher(aes, gcm, GCM, AEAD_GCM_FLAGS, 192, 8, 96);
+/* aes256gcm_functions */
+IMPLEMENT_cipher(aes, gcm, GCM, AEAD_GCM_FLAGS, 256, 8, 96);
+
+#if !defined(OPENSSL_NO_ARIA) && !defined(FIPS_MODE)
+
+static void *aria_gcm_newctx(void *provctx, size_t keybits)
+{
+ PROV_ARIA_GCM_CTX *ctx = OPENSSL_zalloc(sizeof(*ctx));
+
+ if (ctx != NULL)
+ gcm_initctx(provctx, (PROV_GCM_CTX *)ctx, keybits,
+ PROV_ARIA_HW_gcm(keybits), 4);
+ return ctx;
+}
+
+static OSSL_OP_cipher_freectx_fn aria_gcm_freectx;
+static void aria_gcm_freectx(void *vctx)
+{
+ PROV_ARIA_GCM_CTX *ctx = (PROV_ARIA_GCM_CTX *)vctx;
+
+ gcm_deinitctx((PROV_GCM_CTX *)ctx);
+ OPENSSL_clear_free(ctx, sizeof(*ctx));
+}
+
+/* aria128gcm_functions */
+IMPLEMENT_cipher(aria, gcm, GCM, AEAD_GCM_FLAGS, 128, 8, 96);
+/* aria192gcm_functions */
+IMPLEMENT_cipher(aria, gcm, GCM, AEAD_GCM_FLAGS, 192, 8, 96);
+/* aria256gcm_functions */
+IMPLEMENT_cipher(aria, gcm, GCM, AEAD_GCM_FLAGS, 256, 8, 96);
+
+#endif /* !defined(OPENSSL_NO_ARIA) && !defined(FIPS_MODE) */
--- /dev/null
+/*
+ * Copyright 2001-2019 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "ciphers_locl.h"
+#include "internal/aes_platform.h"
+
+static const PROV_GCM_HW aes_gcm;
+
+static int gcm_setiv(PROV_GCM_CTX *ctx, const unsigned char *iv, size_t ivlen);
+static int gcm_aad_update(PROV_GCM_CTX *ctx, const unsigned char *aad,
+ size_t aad_len);
+static int gcm_cipher_final(PROV_GCM_CTX *ctx, unsigned char *tag);
+static int gcm_one_shot(PROV_GCM_CTX *ctx, unsigned char *aad, size_t aad_len,
+ const unsigned char *in, size_t in_len,
+ unsigned char *out, unsigned char *tag, size_t tag_len);
+static int gcm_cipher_update(PROV_GCM_CTX *ctx, const unsigned char *in,
+ size_t len, unsigned char *out);
+
+#define SET_KEY_CTR_FN(ks, fn_set_enc_key, fn_block, fn_ctr) \
+ ctx->ks = ks; \
+ fn_set_enc_key(key, keylen * 8, ks); \
+ CRYPTO_gcm128_init(&ctx->gcm, ks, (block128_f)fn_block); \
+ ctx->ctr = (ctr128_f)fn_ctr; \
+ ctx->key_set = 1;
+
+#if defined(AESNI_CAPABLE)
+
+/* AES-NI section */
+static int aesni_gcm_init_key(PROV_GCM_CTX *ctx, const unsigned char *key,
+ size_t keylen)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ AES_KEY *ks = &actx->ks.ks;
+
+ SET_KEY_CTR_FN(ks, aesni_set_encrypt_key, aesni_encrypt,
+ aesni_ctr32_encrypt_blocks);
+ return 1;
+}
+
+static const PROV_GCM_HW aesni_gcm = {
+ aesni_gcm_init_key,
+ gcm_setiv,
+ gcm_aad_update,
+ gcm_cipher_update,
+ gcm_cipher_final,
+ gcm_one_shot
+};
+
+const PROV_GCM_HW *PROV_AES_HW_gcm(size_t keybits)
+{
+ return AESNI_CAPABLE ? &aesni_gcm : &aes_gcm;
+}
+
+#elif defined(AES_ASM) && (defined(__sparc) || defined(__sparc__))
+
+/* Fujitsu SPARC64 X support */
+
+static int t4_aes_gcm_init_key(PROV_GCM_CTX *ctx, const unsigned char *key,
+ size_t keylen)
+{
+ ctr128_f ctr;
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ AES_KEY *ks = &actx->ks.ks;
+
+
+ switch (keylen) {
+ case 16:
+ ctr = (ctr128_f)aes128_t4_ctr32_encrypt;
+ break;
+ case 24:
+ ctr = (ctr128_f)aes192_t4_ctr32_encrypt;
+ break;
+ case 32:
+ ctr = (ctr128_f)aes256_t4_ctr32_encrypt;
+ break;
+ default:
+ return 0;
+ }
+
+ SET_KEY_CTR_FN(ks, aes_t4_set_encrypt_key, aes_t4_encrypt, ctr);
+ return 1;
+}
+
+static const PROV_GCM_HW t4_aes_gcm = {
+ t4_aes_gcm_init_key,
+ gcm_setiv,
+ gcm_aad_update,
+ gcm_cipher_update,
+ gcm_cipher_final,
+ gcm_one_shot
+};
+const PROV_GCM_HW *PROV_AES_HW_gcm(size_t keybits)
+{
+ return SPARC_AES_CAPABLE ? &t4_aes_gcm : &aes_gcm;
+}
+
+#elif defined(OPENSSL_CPUID_OBJ) && defined(__s390__)
+# include "gcm_s390x.c"
+#else
+const PROV_GCM_HW *PROV_AES_HW_gcm(size_t keybits)
+{
+ return &aes_gcm;
+}
+#endif
+
+static int generic_aes_gcm_init_key(PROV_GCM_CTX *ctx, const unsigned char *key,
+ size_t keylen)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ AES_KEY *ks = &actx->ks.ks;
+
+# ifdef HWAES_CAPABLE
+ if (HWAES_CAPABLE) {
+# ifdef HWAES_ctr32_encrypt_blocks
+ SET_KEY_CTR_FN(ks, HWAES_set_encrypt_key, HWAES_encrypt,
+ HWAES_ctr32_encrypt_blocks);
+# else
+ SET_KEY_CTR_FN(ks, HWAES_set_encrypt_key, HWAES_encrypt, NULL);
+# endif /* HWAES_ctr32_encrypt_blocks */
+ } else
+# endif /* HWAES_CAPABLE */
+
+# ifdef BSAES_CAPABLE
+ if (BSAES_CAPABLE) {
+ SET_KEY_CTR_FN(ks, AES_set_encrypt_key, AES_encrypt,
+ bsaes_ctr32_encrypt_blocks);
+ } else
+# endif /* BSAES_CAPABLE */
+
+# ifdef VPAES_CAPABLE
+ if (VPAES_CAPABLE) {
+ SET_KEY_CTR_FN(ks, vpaes_set_encrypt_key, vpaes_encrypt, NULL);
+ } else
+# endif /* VPAES_CAPABLE */
+
+ {
+# ifdef AES_CTR_ASM
+ SET_KEY_CTR_FN(ks, AES_set_encrypt_key, AES_encrypt, AES_ctr32_encrypt);
+# else
+ SET_KEY_CTR_FN(ks, AES_set_encrypt_key, AES_encrypt, NULL);
+# endif /* AES_CTR_ASM */
+ }
+ ctx->key_set = 1;
+ return 1;
+}
+
+static int gcm_setiv(PROV_GCM_CTX *ctx, const unsigned char *iv, size_t ivlen)
+{
+ CRYPTO_gcm128_setiv(&ctx->gcm, iv, ivlen);
+ return 1;
+}
+
+static int gcm_aad_update(PROV_GCM_CTX *ctx,
+ const unsigned char *aad, size_t aad_len)
+{
+ return CRYPTO_gcm128_aad(&ctx->gcm, aad, aad_len) == 0;
+}
+
+static int gcm_cipher_update(PROV_GCM_CTX *ctx, const unsigned char *in,
+ size_t len, unsigned char *out)
+{
+ if (ctx->enc) {
+ if (ctx->ctr != NULL) {
+#if defined(AES_GCM_ASM)
+ size_t bulk = 0;
+
+ if (len >= 32 && AES_GCM_ASM(ctx)) {
+ size_t res = (16 - ctx->gcm.mres) % 16;
+
+ if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, res))
+ return 0;
+ bulk = aesni_gcm_encrypt(in + res, out + res, len - res,
+ ctx->gcm.key,
+ ctx->gcm.Yi.c, ctx->gcm.Xi.u);
+ ctx->gcm.len.u[1] += bulk;
+ bulk += res;
+ }
+ if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in + bulk, out + bulk,
+ len - bulk, ctx->ctr))
+ return 0;
+#else
+ if (CRYPTO_gcm128_encrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr))
+ return 0;
+#endif /* AES_GCM_ASM */
+ } else {
+ if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len))
+ return 0;
+ }
+ } else {
+ if (ctx->ctr != NULL) {
+#if defined(AES_GCM_ASM)
+ size_t bulk = 0;
+
+ if (len >= 16 && AES_GCM_ASM(ctx)) {
+ size_t res = (16 - ctx->gcm.mres) % 16;
+
+ if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, res))
+ return -1;
+
+ bulk = aesni_gcm_decrypt(in + res, out + res, len - res,
+ ctx->gcm.key,
+ ctx->gcm.Yi.c, ctx->gcm.Xi.u);
+ ctx->gcm.len.u[1] += bulk;
+ bulk += res;
+ }
+ if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in + bulk, out + bulk,
+ len - bulk, ctx->ctr))
+ return 0;
+#else
+ if (CRYPTO_gcm128_decrypt_ctr32(&ctx->gcm, in, out, len, ctx->ctr))
+ return 0;
+#endif /* AES_GCM_ASM */
+ } else {
+ if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static int gcm_cipher_final(PROV_GCM_CTX *ctx, unsigned char *tag)
+{
+ if (ctx->enc) {
+ CRYPTO_gcm128_tag(&ctx->gcm, tag, GCM_TAG_MAX_SIZE);
+ ctx->taglen = GCM_TAG_MAX_SIZE;
+ } else {
+ if (ctx->taglen < 0
+ || CRYPTO_gcm128_finish(&ctx->gcm, tag, ctx->taglen) != 0)
+ return 0;
+ }
+ return 1;
+}
+
+static int gcm_one_shot(PROV_GCM_CTX *ctx, unsigned char *aad, size_t aad_len,
+ const unsigned char *in, size_t in_len,
+ unsigned char *out, unsigned char *tag, size_t tag_len)
+{
+ int ret = 0;
+
+ /* Use saved AAD */
+ if (!ctx->hw->aadupdate(ctx, aad, aad_len))
+ goto err;
+ if (!ctx->hw->cipherupdate(ctx, in, in_len, out))
+ goto err;
+ ctx->taglen = GCM_TAG_MAX_SIZE;
+ if (!ctx->hw->cipherfinal(ctx, tag))
+ goto err;
+ ret = 1;
+
+err:
+ return ret;
+}
+
+static const PROV_GCM_HW aes_gcm = {
+ generic_aes_gcm_init_key,
+ gcm_setiv,
+ gcm_aad_update,
+ gcm_cipher_update,
+ gcm_cipher_final,
+ gcm_one_shot
+};
+
+#if !defined(OPENSSL_NO_ARIA) && !defined(FIPS_MODE)
+
+static int aria_gcm_init_key(PROV_GCM_CTX *ctx, const unsigned char *key,
+ size_t keylen)
+{
+ PROV_ARIA_GCM_CTX *actx = (PROV_ARIA_GCM_CTX *)ctx;
+ ARIA_KEY *ks = &actx->ks.ks;
+
+ SET_KEY_CTR_FN(ks, aria_set_encrypt_key, aria_encrypt, NULL);
+ return 1;
+}
+
+static int aria_cipher_update(PROV_GCM_CTX *ctx, const unsigned char *in,
+ size_t len, unsigned char *out)
+{
+ if (ctx->enc) {
+ if (CRYPTO_gcm128_encrypt(&ctx->gcm, in, out, len))
+ return 0;
+ } else {
+ if (CRYPTO_gcm128_decrypt(&ctx->gcm, in, out, len))
+ return 0;
+ }
+ return 1;
+}
+
+static const PROV_GCM_HW aria_gcm = {
+ aria_gcm_init_key,
+ gcm_setiv,
+ gcm_aad_update,
+ aria_cipher_update,
+ gcm_cipher_final,
+ gcm_one_shot
+};
+const PROV_GCM_HW *PROV_ARIA_HW_gcm(size_t keybits)
+{
+ return &aria_gcm;
+}
+
+#endif /* !defined(OPENSSL_NO_ARIA) && !defined(FIPS_MODE) */
--- /dev/null
+/*
+ * Copyright 2001-2019 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+/*
+ * IBM S390X AES GCM support
+ * Note this file is included by aes_gcm_hw.c
+ */
+
+/* iv + padding length for iv lengths != 12 */
+#define S390X_gcm_ivpadlen(i) ((((i) + 15) >> 4 << 4) + 16)
+
+static int s390x_aes_gcm_init_key(PROV_GCM_CTX *ctx,
+ const unsigned char *key, size_t keylen)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+
+ ctx->key_set = 1;
+ memcpy(&actx->plat.s390x.param.kma.k, key, keylen);
+ actx->plat.s390x.fc = S390X_AES_FC(keylen);
+ if (!ctx->enc)
+ actx->plat.s390x.fc |= S390X_DECRYPT;
+ return 1;
+}
+
+static int s390x_aes_gcm_setiv(PROV_GCM_CTX *ctx, const unsigned char *iv,
+ size_t ivlen)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ S390X_KMA_PARAMS *kma = &actx->plat.s390x.param.kma;
+
+ kma->t.g[0] = 0;
+ kma->t.g[1] = 0;
+ kma->tpcl = 0;
+ kma->taadl = 0;
+ actx->plat.s390x.mreslen = 0;
+ actx->plat.s390x.areslen = 0;
+ actx->plat.s390x.kreslen = 0;
+
+ if (ivlen == AES_GCM_IV_DEFAULT_SIZE) {
+ memcpy(&kma->j0, iv, ivlen);
+ kma->j0.w[3] = 1;
+ kma->cv.w = 1;
+ } else {
+ unsigned long long ivbits = ivlen << 3;
+ size_t len = S390X_gcm_ivpadlen(ivlen);
+ unsigned char iv_zero_pad[S390X_gcm_ivpadlen(AES_GCM_IV_MAX_SIZE)];
+ /*
+ * The IV length needs to be zero padded to be a multiple of 16 bytes
+ * followed by 8 bytes of zeros and 8 bytes for the IV length.
+ * The GHASH of this value can then be calculated.
+ */
+ memcpy(iv_zero_pad, iv, ivlen);
+ memset(iv_zero_pad + ivlen, 0, len - ivlen);
+ memcpy(iv_zero_pad + len - sizeof(ivbits), &ivbits, sizeof(ivbits));
+ /*
+ * Calculate the ghash of the iv - the result is stored into the tag
+ * param.
+ */
+ s390x_kma(iv_zero_pad, len, NULL, 0, NULL, actx->plat.s390x.fc, kma);
+ actx->plat.s390x.fc |= S390X_KMA_HS; /* The hash subkey is set */
+
+ /* Copy the 128 bit GHASH result into J0 and clear the tag */
+ kma->j0.g[0] = kma->t.g[0];
+ kma->j0.g[1] = kma->t.g[1];
+ kma->t.g[0] = 0;
+ kma->t.g[1] = 0;
+ /* Set the 32 bit counter */
+ kma->cv.w = kma->j0.w[3];
+ }
+ return 1;
+}
+
+static int s390x_aes_gcm_cipher_final(PROV_GCM_CTX *ctx, unsigned char *tag)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ S390X_KMA_PARAMS *kma = &actx->plat.s390x.param.kma;
+ unsigned char out[AES_BLOCK_SIZE];
+ int rc;
+
+ kma->taadl <<= 3;
+ kma->tpcl <<= 3;
+ s390x_kma(actx->plat.s390x.ares, actx->plat.s390x.areslen,
+ actx->plat.s390x.mres, actx->plat.s390x.mreslen, out,
+ actx->plat.s390x.fc | S390X_KMA_LAAD | S390X_KMA_LPC, kma);
+
+ /* gctx->mres already returned to the caller */
+ OPENSSL_cleanse(out, actx->plat.s390x.mreslen);
+
+ if (ctx->enc) {
+ ctx->taglen = AES_GCM_TAG_MAX_SIZE;
+ memcpy(tag, kma->t.b, ctx->taglen);
+ rc = 1;
+ } else {
+ if (ctx->taglen < 0)
+ rc = 0;
+ else
+ rc = (CRYPTO_memcmp(tag, kma->t.b, ctx->taglen) == 0);
+ }
+ return rc;
+}
+
+static int s390x_aes_gcm_one_shot(PROV_GCM_CTX *ctx,
+ unsigned char *aad, size_t aad_len,
+ const unsigned char *in, size_t in_len,
+ unsigned char *out,
+ unsigned char *tag, size_t taglen)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ S390X_KMA_PARAMS *kma = &actx->plat.s390x.param.kma;
+ int rc;
+
+ kma->taadl = aad_len << 3;
+ kma->tpcl = in_len << 3;
+ s390x_kma(aad, aad_len, in, in_len, out,
+ actx->plat.s390x.fc | S390X_KMA_LAAD | S390X_KMA_LPC, kma);
+
+ if (ctx->enc) {
+ memcpy(tag, kma->t.b, taglen);
+ rc = 1;
+ } else {
+ rc = (CRYPTO_memcmp(tag, kma->t.b, taglen) == 0);
+ }
+ return rc;
+}
+
+/*
+ * Process additional authenticated data. Returns 1 on success. Code is
+ * big-endian.
+ */
+static int s390x_aes_gcm_aad_update(PROV_GCM_CTX *ctx,
+ const unsigned char *aad, size_t len)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ S390X_KMA_PARAMS *kma = &actx->plat.s390x.param.kma;
+ unsigned long long alen;
+ int n, rem;
+
+ /* If already processed pt/ct then error */
+ if (kma->tpcl != 0)
+ return 0;
+
+ /* update the total aad length */
+ alen = kma->taadl + len;
+ if (alen > (U64(1) << 61) || (sizeof(len) == 8 && alen < len))
+ return 0;
+ kma->taadl = alen;
+
+ /* check if there is any existing aad data from a previous add */
+ n = actx->plat.s390x.areslen;
+ if (n) {
+ /* add additional data to a buffer until it has 16 bytes */
+ while (n && len) {
+ actx->plat.s390x.ares[n] = *aad;
+ ++aad;
+ --len;
+ n = (n + 1) & 0xf;
+ }
+ /* ctx->ares contains a complete block if offset has wrapped around */
+ if (!n) {
+ s390x_kma(actx->plat.s390x.ares, 16, NULL, 0, NULL,
+ actx->plat.s390x.fc, kma);
+ actx->plat.s390x.fc |= S390X_KMA_HS;
+ }
+ actx->plat.s390x.areslen = n;
+ }
+
+ /* If there are leftover bytes (< 128 bits) save them for next time */
+ rem = len & 0xf;
+ /* Add any remaining 16 byte blocks (128 bit each) */
+ len &= ~(size_t)0xf;
+ if (len) {
+ s390x_kma(aad, len, NULL, 0, NULL, actx->plat.s390x.fc, kma);
+ actx->plat.s390x.fc |= S390X_KMA_HS;
+ aad += len;
+ }
+
+ if (rem) {
+ actx->plat.s390x.areslen = rem;
+
+ do {
+ --rem;
+ actx->plat.s390x.ares[rem] = aad[rem];
+ } while (rem);
+ }
+ return 1;
+}
+
+/*-
+ * En/de-crypt plain/cipher-text and authenticate ciphertext. Returns 1 for
+ * success. Code is big-endian.
+ */
+static int s390x_aes_gcm_cipher_update(PROV_GCM_CTX *ctx,
+ const unsigned char *in, size_t len,
+ unsigned char *out)
+{
+ PROV_AES_GCM_CTX *actx = (PROV_AES_GCM_CTX *)ctx;
+ S390X_KMA_PARAMS *kma = &actx->plat.s390x.param.kma;
+ const unsigned char *inptr;
+ unsigned long long mlen;
+ union {
+ unsigned int w[4];
+ unsigned char b[16];
+ } buf;
+ size_t inlen;
+ int n, rem, i;
+
+ mlen = kma->tpcl + len;
+ if (mlen > ((U64(1) << 36) - 32) || (sizeof(len) == 8 && mlen < len))
+ return 0;
+ kma->tpcl = mlen;
+
+ n = actx->plat.s390x.mreslen;
+ if (n) {
+ inptr = in;
+ inlen = len;
+ while (n && inlen) {
+ actx->plat.s390x.mres[n] = *inptr;
+ n = (n + 1) & 0xf;
+ ++inptr;
+ --inlen;
+ }
+ /* ctx->mres contains a complete block if offset has wrapped around */
+ if (!n) {
+ s390x_kma(actx->plat.s390x.ares, actx->plat.s390x.areslen,
+ actx->plat.s390x.mres, 16, buf.b,
+ actx->plat.s390x.fc | S390X_KMA_LAAD, kma);
+ actx->plat.s390x.fc |= S390X_KMA_HS;
+ actx->plat.s390x.areslen = 0;
+
+ /* previous call already encrypted/decrypted its remainder,
+ * see comment below */
+ n = actx->plat.s390x.mreslen;
+ while (n) {
+ *out = buf.b[n];
+ n = (n + 1) & 0xf;
+ ++out;
+ ++in;
+ --len;
+ }
+ actx->plat.s390x.mreslen = 0;
+ }
+ }
+
+ rem = len & 0xf;
+
+ len &= ~(size_t)0xf;
+ if (len) {
+ s390x_kma(actx->plat.s390x.ares, actx->plat.s390x.areslen, in, len, out,
+ actx->plat.s390x.fc | S390X_KMA_LAAD, kma);
+ in += len;
+ out += len;
+ actx->plat.s390x.fc |= S390X_KMA_HS;
+ actx->plat.s390x.areslen = 0;
+ }
+
+ /*-
+ * If there is a remainder, it has to be saved such that it can be
+ * processed by kma later. However, we also have to do the for-now
+ * unauthenticated encryption/decryption part here and now...
+ */
+ if (rem) {
+ if (!actx->plat.s390x.mreslen) {
+ buf.w[0] = kma->j0.w[0];
+ buf.w[1] = kma->j0.w[1];
+ buf.w[2] = kma->j0.w[2];
+ buf.w[3] = kma->cv.w + 1;
+ s390x_km(buf.b, 16, actx->plat.s390x.kres,
+ actx->plat.s390x.fc & 0x1f, &kma->k);
+ }
+
+ n = actx->plat.s390x.mreslen;
+ for (i = 0; i < rem; i++) {
+ actx->plat.s390x.mres[n + i] = in[i];
+ out[i] = in[i] ^ actx->plat.s390x.kres[n + i];
+ }
+ actx->plat.s390x.mreslen += rem;
+ }
+ return 1;
+}
+
+static const PROV_GCM_HW s390x_aes_gcm = {
+ s390x_aes_gcm_init_key,
+ s390x_aes_gcm_setiv,
+ s390x_aes_gcm_aad_update,
+ s390x_aes_gcm_cipher_update,
+ s390x_aes_gcm_cipher_final,
+ s390x_aes_gcm_one_shot
+};
+
+const PROV_GCM_HW *PROV_AES_HW_gcm(size_t keybits)
+{
+ if ((keybits == 128 && S390X_aes_128_gcm_CAPABLE)
+ || (keybits == 192 && S390X_aes_192_gcm_CAPABLE)
+ || (keybits == 256 && S390X_aes_256_gcm_CAPABLE))
+ return &s390x_aes_gcm;
+ return &aes_gcm;
+}
extern const OSSL_DISPATCH aes256ctr_functions[];
extern const OSSL_DISPATCH aes192ctr_functions[];
extern const OSSL_DISPATCH aes128ctr_functions[];
+extern const OSSL_DISPATCH aes256gcm_functions[];
+extern const OSSL_DISPATCH aes192gcm_functions[];
+extern const OSSL_DISPATCH aes128gcm_functions[];
+#ifndef OPENSSL_NO_ARIA
+extern const OSSL_DISPATCH aria256gcm_functions[];
+extern const OSSL_DISPATCH aria192gcm_functions[];
+extern const OSSL_DISPATCH aria128gcm_functions[];
+#endif /* OPENSSL_NO_ARIA */
/* Key management */
extern const OSSL_DISPATCH dh_keymgmt_functions[];
# define PROV_R_CIPHER_OPERATION_FAILED 102
# define PROV_R_FAILED_TO_GET_PARAMETER 103
# define PROV_R_FAILED_TO_SET_PARAMETER 104
+# define PROV_R_INVALID_AAD 108
+# define PROV_R_INVALID_IVLEN 109
# define PROV_R_INVALID_KEYLEN 105
+# define PROV_R_INVALID_TAG 110
# define PROV_R_OUTPUT_BUFFER_TOO_SMALL 106
# define PROV_R_WRONG_FINAL_BLOCK_LENGTH 107
"failed to get parameter"},
{ERR_PACK(ERR_LIB_PROV, 0, PROV_R_FAILED_TO_SET_PARAMETER),
"failed to set parameter"},
+ {ERR_PACK(ERR_LIB_PROV, 0, PROV_R_INVALID_AAD), "invalid aad"},
+ {ERR_PACK(ERR_LIB_PROV, 0, PROV_R_INVALID_IVLEN), "invalid ivlen"},
{ERR_PACK(ERR_LIB_PROV, 0, PROV_R_INVALID_KEYLEN), "invalid keylen"},
+ {ERR_PACK(ERR_LIB_PROV, 0, PROV_R_INVALID_TAG), "invalid tag"},
{ERR_PACK(ERR_LIB_PROV, 0, PROV_R_OUTPUT_BUFFER_TOO_SMALL),
"output buffer too small"},
{ERR_PACK(ERR_LIB_PROV, 0, PROV_R_WRONG_FINAL_BLOCK_LENGTH),
{ "AES-256-CTR", "default=yes", aes256ctr_functions },
{ "AES-192-CTR", "default=yes", aes192ctr_functions },
{ "AES-128-CTR", "default=yes", aes128ctr_functions },
+ { "id-aes256-GCM", "default=yes", aes256gcm_functions },
+ { "id-aes192-GCM", "default=yes", aes192gcm_functions },
+ { "id-aes128-GCM", "default=yes", aes128gcm_functions },
+#ifndef OPENSSL_NO_ARIA
+ { "ARIA-256-GCM", "default=yes", aria256gcm_functions },
+ { "ARIA-192-GCM", "default=yes", aria192gcm_functions },
+ { "ARIA-128-GCM", "default=yes", aria128gcm_functions },
+#endif /* OPENSSL_NO_ARIA */
{ NULL, NULL, NULL }
};
{ "AES-256-CTR", "fips=yes", aes256ctr_functions },
{ "AES-192-CTR", "fips=yes", aes192ctr_functions },
{ "AES-128-CTR", "fips=yes", aes128ctr_functions },
+ { "id-aes256-GCM", "fips=yes", aes256gcm_functions },
+ { "id-aes192-GCM", "fips=yes", aes192gcm_functions },
+ { "id-aes128-GCM", "fips=yes", aes128gcm_functions },
{ NULL, NULL, NULL }
};