This commit contains some optimizations in PKCS5_PBKDF2_HMAC() and
HMAC_CTX_copy() functions which together makes PBKDF2 computations
faster by 15-40% according to my measurements made on x64 Linux with
both asm optimized and no-asm versions of SHA1, SHA256 and SHA512.
Reviewed-by: Richard Levitte <levitte@openssl.org>
Reviewed-by: Rich Salz <rsalz@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/1708)
HMAC_CTX_free(hctx_tpl);
return 0;
}
- HMAC_CTX_reset(hctx);
memcpy(p, digtmp, cplen);
for (j = 1; j < iter; j++) {
if (!HMAC_CTX_copy(hctx, hctx_tpl)) {
HMAC_CTX_free(hctx_tpl);
return 0;
}
- HMAC_CTX_reset(hctx);
for (k = 0; k < cplen; k++)
p[k] ^= digtmp[k];
}
}
}
-int HMAC_CTX_reset(HMAC_CTX *ctx)
+static int hmac_ctx_alloc_mds(HMAC_CTX *ctx)
{
- hmac_ctx_cleanup(ctx);
if (ctx->i_ctx == NULL)
ctx->i_ctx = EVP_MD_CTX_new();
if (ctx->i_ctx == NULL)
- goto err;
+ return 0;
if (ctx->o_ctx == NULL)
ctx->o_ctx = EVP_MD_CTX_new();
if (ctx->o_ctx == NULL)
- goto err;
+ return 0;
if (ctx->md_ctx == NULL)
ctx->md_ctx = EVP_MD_CTX_new();
if (ctx->md_ctx == NULL)
- goto err;
- ctx->md = NULL;
+ return 0;
return 1;
- err:
+}
+
+int HMAC_CTX_reset(HMAC_CTX *ctx)
+{
hmac_ctx_cleanup(ctx);
- return 0;
+ if (!hmac_ctx_alloc_mds(ctx)) {
+ hmac_ctx_cleanup(ctx);
+ return 0;
+ }
+ return 1;
}
int HMAC_CTX_copy(HMAC_CTX *dctx, HMAC_CTX *sctx)
{
- if (!HMAC_CTX_reset(dctx))
+ if (!hmac_ctx_alloc_mds(dctx))
goto err;
if (!EVP_MD_CTX_copy_ex(dctx->i_ctx, sctx->i_ctx))
goto err;