3 * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project
6 /* ====================================================================
7 * Copyright (c) 1999 The OpenSSL Project. All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
21 * 3. All advertising materials mentioning features or use of this
22 * software must display the following acknowledgment:
23 * "This product includes software developed by the OpenSSL Project
24 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
26 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
27 * endorse or promote products derived from this software without
28 * prior written permission. For written permission, please contact
29 * licensing@OpenSSL.org.
31 * 5. Products derived from this software may not be called "OpenSSL"
32 * nor may "OpenSSL" appear in their names without prior written
33 * permission of the OpenSSL Project.
35 * 6. Redistributions of any form whatsoever must retain the following
37 * "This product includes software developed by the OpenSSL Project
38 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
40 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
41 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
43 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
44 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
51 * OF THE POSSIBILITY OF SUCH DAMAGE.
52 * ====================================================================
54 * This product includes cryptographic software written by Eric Young
55 * (eay@cryptsoft.com). This product includes software written by Tim
56 * Hudson (tjh@cryptsoft.com).
60 /* EVP_MD_CTX related stuff */
62 struct evp_md_ctx_st {
64 ENGINE *engine; /* functional reference if 'digest' is
68 /* Public key context for sign/verify */
70 /* Update function: usually copied from EVP_MD */
71 int (*update) (EVP_MD_CTX *ctx, const void *data, size_t count);
74 # define M_EVP_MD_size(e) ((e)->md_size)
75 # define M_EVP_MD_block_size(e) ((e)->block_size)
76 # define M_EVP_MD_CTX_set_flags(ctx,flgs) ((ctx)->flags|=(flgs))
77 # define M_EVP_MD_CTX_clear_flags(ctx,flgs) ((ctx)->flags&=~(flgs))
78 # define M_EVP_MD_CTX_test_flags(ctx,flgs) ((ctx)->flags&(flgs))
79 # define M_EVP_MD_type(e) ((e)->type)
80 # define M_EVP_MD_CTX_type(e) M_EVP_MD_type(M_EVP_MD_CTX_md(e))
81 # define M_EVP_MD_CTX_md(e) ((e)->digest)
83 /* Macros to code block cipher wrappers */
85 /* Wrapper functions for each cipher mode */
87 #define BLOCK_CIPHER_ecb_loop() \
89 bl = ctx->cipher->block_size;\
90 if(inl < bl) return 1;\
92 for(i=0; i <= inl; i+=bl)
94 #define BLOCK_CIPHER_func_ecb(cname, cprefix, kstruct, ksched) \
95 static int cname##_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
97 BLOCK_CIPHER_ecb_loop() \
98 cprefix##_ecb_encrypt(in + i, out + i, &((kstruct *)ctx->cipher_data)->ksched, ctx->encrypt);\
102 #define EVP_MAXCHUNK ((size_t)1<<(sizeof(long)*8-2))
104 #define BLOCK_CIPHER_func_ofb(cname, cprefix, cbits, kstruct, ksched) \
105 static int cname##_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
107 while(inl>=EVP_MAXCHUNK)\
109 cprefix##_ofb##cbits##_encrypt(in, out, (long)EVP_MAXCHUNK, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, &ctx->num);\
115 cprefix##_ofb##cbits##_encrypt(in, out, (long)inl, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, &ctx->num);\
119 #define BLOCK_CIPHER_func_cbc(cname, cprefix, kstruct, ksched) \
120 static int cname##_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
122 while(inl>=EVP_MAXCHUNK) \
124 cprefix##_cbc_encrypt(in, out, (long)EVP_MAXCHUNK, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, ctx->encrypt);\
130 cprefix##_cbc_encrypt(in, out, (long)inl, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, ctx->encrypt);\
134 #define BLOCK_CIPHER_func_cfb(cname, cprefix, cbits, kstruct, ksched) \
135 static int cname##_cfb##cbits##_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
137 size_t chunk=EVP_MAXCHUNK;\
138 if (cbits==1) chunk>>=3;\
139 if (inl<chunk) chunk=inl;\
140 while(inl && inl>=chunk)\
142 cprefix##_cfb##cbits##_encrypt(in, out, (long)((cbits==1) && !(ctx->flags & EVP_CIPH_FLAG_LENGTH_BITS) ?inl*8:inl), &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, &ctx->num, ctx->encrypt);\
146 if(inl<chunk) chunk=inl;\
151 #define BLOCK_CIPHER_all_funcs(cname, cprefix, cbits, kstruct, ksched) \
152 BLOCK_CIPHER_func_cbc(cname, cprefix, kstruct, ksched) \
153 BLOCK_CIPHER_func_cfb(cname, cprefix, cbits, kstruct, ksched) \
154 BLOCK_CIPHER_func_ecb(cname, cprefix, kstruct, ksched) \
155 BLOCK_CIPHER_func_ofb(cname, cprefix, cbits, kstruct, ksched)
157 #define BLOCK_CIPHER_def1(cname, nmode, mode, MODE, kstruct, nid, block_size, \
158 key_len, iv_len, flags, init_key, cleanup, \
159 set_asn1, get_asn1, ctrl) \
160 static const EVP_CIPHER cname##_##mode = { \
161 nid##_##nmode, block_size, key_len, iv_len, \
162 flags | EVP_CIPH_##MODE##_MODE, \
164 cname##_##mode##_cipher, \
171 const EVP_CIPHER *EVP_##cname##_##mode(void) { return &cname##_##mode; }
173 #define BLOCK_CIPHER_def_cbc(cname, kstruct, nid, block_size, key_len, \
174 iv_len, flags, init_key, cleanup, set_asn1, \
176 BLOCK_CIPHER_def1(cname, cbc, cbc, CBC, kstruct, nid, block_size, key_len, \
177 iv_len, flags, init_key, cleanup, set_asn1, get_asn1, ctrl)
179 #define BLOCK_CIPHER_def_cfb(cname, kstruct, nid, key_len, \
180 iv_len, cbits, flags, init_key, cleanup, \
181 set_asn1, get_asn1, ctrl) \
182 BLOCK_CIPHER_def1(cname, cfb##cbits, cfb##cbits, CFB, kstruct, nid, 1, \
183 key_len, iv_len, flags, init_key, cleanup, set_asn1, \
186 #define BLOCK_CIPHER_def_ofb(cname, kstruct, nid, key_len, \
187 iv_len, cbits, flags, init_key, cleanup, \
188 set_asn1, get_asn1, ctrl) \
189 BLOCK_CIPHER_def1(cname, ofb##cbits, ofb, OFB, kstruct, nid, 1, \
190 key_len, iv_len, flags, init_key, cleanup, set_asn1, \
193 #define BLOCK_CIPHER_def_ecb(cname, kstruct, nid, block_size, key_len, \
194 flags, init_key, cleanup, set_asn1, \
196 BLOCK_CIPHER_def1(cname, ecb, ecb, ECB, kstruct, nid, block_size, key_len, \
197 0, flags, init_key, cleanup, set_asn1, get_asn1, ctrl)
199 #define BLOCK_CIPHER_defs(cname, kstruct, \
200 nid, block_size, key_len, iv_len, cbits, flags, \
201 init_key, cleanup, set_asn1, get_asn1, ctrl) \
202 BLOCK_CIPHER_def_cbc(cname, kstruct, nid, block_size, key_len, iv_len, flags, \
203 init_key, cleanup, set_asn1, get_asn1, ctrl) \
204 BLOCK_CIPHER_def_cfb(cname, kstruct, nid, key_len, iv_len, cbits, \
205 flags, init_key, cleanup, set_asn1, get_asn1, ctrl) \
206 BLOCK_CIPHER_def_ofb(cname, kstruct, nid, key_len, iv_len, cbits, \
207 flags, init_key, cleanup, set_asn1, get_asn1, ctrl) \
208 BLOCK_CIPHER_def_ecb(cname, kstruct, nid, block_size, key_len, flags, \
209 init_key, cleanup, set_asn1, get_asn1, ctrl)
212 #define BLOCK_CIPHER_defs(cname, kstruct, \
213 nid, block_size, key_len, iv_len, flags,\
214 init_key, cleanup, set_asn1, get_asn1, ctrl)\
215 static const EVP_CIPHER cname##_cbc = {\
216 nid##_cbc, block_size, key_len, iv_len, \
217 flags | EVP_CIPH_CBC_MODE,\
221 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
222 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
227 const EVP_CIPHER *EVP_##cname##_cbc(void) { return &cname##_cbc; }\
228 static const EVP_CIPHER cname##_cfb = {\
229 nid##_cfb64, 1, key_len, iv_len, \
230 flags | EVP_CIPH_CFB_MODE,\
234 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
235 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
240 const EVP_CIPHER *EVP_##cname##_cfb(void) { return &cname##_cfb; }\
241 static const EVP_CIPHER cname##_ofb = {\
242 nid##_ofb64, 1, key_len, iv_len, \
243 flags | EVP_CIPH_OFB_MODE,\
247 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
248 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
253 const EVP_CIPHER *EVP_##cname##_ofb(void) { return &cname##_ofb; }\
254 static const EVP_CIPHER cname##_ecb = {\
255 nid##_ecb, block_size, key_len, iv_len, \
256 flags | EVP_CIPH_ECB_MODE,\
260 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
261 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
266 const EVP_CIPHER *EVP_##cname##_ecb(void) { return &cname##_ecb; }
269 #define IMPLEMENT_BLOCK_CIPHER(cname, ksched, cprefix, kstruct, nid, \
270 block_size, key_len, iv_len, cbits, \
272 cleanup, set_asn1, get_asn1, ctrl) \
273 BLOCK_CIPHER_all_funcs(cname, cprefix, cbits, kstruct, ksched) \
274 BLOCK_CIPHER_defs(cname, kstruct, nid, block_size, key_len, iv_len, \
275 cbits, flags, init_key, cleanup, set_asn1, \
278 #define EVP_C_DATA(kstruct, ctx) ((kstruct *)(ctx)->cipher_data)
280 #define IMPLEMENT_CFBR(cipher,cprefix,kstruct,ksched,keysize,cbits,iv_len,fl) \
281 BLOCK_CIPHER_func_cfb(cipher##_##keysize,cprefix,cbits,kstruct,ksched) \
282 BLOCK_CIPHER_def_cfb(cipher##_##keysize,kstruct, \
283 NID_##cipher##_##keysize, keysize/8, iv_len, cbits, \
284 (fl)|EVP_CIPH_FLAG_DEFAULT_ASN1, \
285 cipher##_init_key, NULL, NULL, NULL, NULL)
287 int PKCS5_v2_PBKDF2_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass,
288 int passlen, ASN1_TYPE *param,
289 const EVP_CIPHER *c, const EVP_MD *md,