3 * Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL project
6 /* ====================================================================
7 * Copyright (c) 1999 The OpenSSL Project. All rights reserved.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in
18 * the documentation and/or other materials provided with the
21 * 3. All advertising materials mentioning features or use of this
22 * software must display the following acknowledgment:
23 * "This product includes software developed by the OpenSSL Project
24 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
26 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
27 * endorse or promote products derived from this software without
28 * prior written permission. For written permission, please contact
29 * licensing@OpenSSL.org.
31 * 5. Products derived from this software may not be called "OpenSSL"
32 * nor may "OpenSSL" appear in their names without prior written
33 * permission of the OpenSSL Project.
35 * 6. Redistributions of any form whatsoever must retain the following
37 * "This product includes software developed by the OpenSSL Project
38 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
40 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
41 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
43 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
44 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
46 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
47 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
49 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
50 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
51 * OF THE POSSIBILITY OF SUCH DAMAGE.
52 * ====================================================================
54 * This product includes cryptographic software written by Eric Young
55 * (eay@cryptsoft.com). This product includes software written by Tim
56 * Hudson (tjh@cryptsoft.com).
60 /* EVP_MD_CTX related stuff */
62 struct evp_md_ctx_st {
64 ENGINE *engine; /* functional reference if 'digest' is
68 /* Public key context for sign/verify */
70 /* Update function: usually copied from EVP_MD */
71 int (*update) (EVP_MD_CTX *ctx, const void *data, size_t count);
74 /* Macros to code block cipher wrappers */
76 /* Wrapper functions for each cipher mode */
78 #define BLOCK_CIPHER_ecb_loop() \
80 bl = ctx->cipher->block_size;\
81 if(inl < bl) return 1;\
83 for(i=0; i <= inl; i+=bl)
85 #define BLOCK_CIPHER_func_ecb(cname, cprefix, kstruct, ksched) \
86 static int cname##_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
88 BLOCK_CIPHER_ecb_loop() \
89 cprefix##_ecb_encrypt(in + i, out + i, &((kstruct *)ctx->cipher_data)->ksched, ctx->encrypt);\
93 #define EVP_MAXCHUNK ((size_t)1<<(sizeof(long)*8-2))
95 #define BLOCK_CIPHER_func_ofb(cname, cprefix, cbits, kstruct, ksched) \
96 static int cname##_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
98 while(inl>=EVP_MAXCHUNK)\
100 cprefix##_ofb##cbits##_encrypt(in, out, (long)EVP_MAXCHUNK, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, &ctx->num);\
106 cprefix##_ofb##cbits##_encrypt(in, out, (long)inl, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, &ctx->num);\
110 #define BLOCK_CIPHER_func_cbc(cname, cprefix, kstruct, ksched) \
111 static int cname##_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
113 while(inl>=EVP_MAXCHUNK) \
115 cprefix##_cbc_encrypt(in, out, (long)EVP_MAXCHUNK, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, ctx->encrypt);\
121 cprefix##_cbc_encrypt(in, out, (long)inl, &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, ctx->encrypt);\
125 #define BLOCK_CIPHER_func_cfb(cname, cprefix, cbits, kstruct, ksched) \
126 static int cname##_cfb##cbits##_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out, const unsigned char *in, size_t inl) \
128 size_t chunk=EVP_MAXCHUNK;\
129 if (cbits==1) chunk>>=3;\
130 if (inl<chunk) chunk=inl;\
131 while(inl && inl>=chunk)\
133 cprefix##_cfb##cbits##_encrypt(in, out, (long)((cbits==1) && !(ctx->flags & EVP_CIPH_FLAG_LENGTH_BITS) ?inl*8:inl), &((kstruct *)ctx->cipher_data)->ksched, ctx->iv, &ctx->num, ctx->encrypt);\
137 if(inl<chunk) chunk=inl;\
142 #define BLOCK_CIPHER_all_funcs(cname, cprefix, cbits, kstruct, ksched) \
143 BLOCK_CIPHER_func_cbc(cname, cprefix, kstruct, ksched) \
144 BLOCK_CIPHER_func_cfb(cname, cprefix, cbits, kstruct, ksched) \
145 BLOCK_CIPHER_func_ecb(cname, cprefix, kstruct, ksched) \
146 BLOCK_CIPHER_func_ofb(cname, cprefix, cbits, kstruct, ksched)
148 #define BLOCK_CIPHER_def1(cname, nmode, mode, MODE, kstruct, nid, block_size, \
149 key_len, iv_len, flags, init_key, cleanup, \
150 set_asn1, get_asn1, ctrl) \
151 static const EVP_CIPHER cname##_##mode = { \
152 nid##_##nmode, block_size, key_len, iv_len, \
153 flags | EVP_CIPH_##MODE##_MODE, \
155 cname##_##mode##_cipher, \
162 const EVP_CIPHER *EVP_##cname##_##mode(void) { return &cname##_##mode; }
164 #define BLOCK_CIPHER_def_cbc(cname, kstruct, nid, block_size, key_len, \
165 iv_len, flags, init_key, cleanup, set_asn1, \
167 BLOCK_CIPHER_def1(cname, cbc, cbc, CBC, kstruct, nid, block_size, key_len, \
168 iv_len, flags, init_key, cleanup, set_asn1, get_asn1, ctrl)
170 #define BLOCK_CIPHER_def_cfb(cname, kstruct, nid, key_len, \
171 iv_len, cbits, flags, init_key, cleanup, \
172 set_asn1, get_asn1, ctrl) \
173 BLOCK_CIPHER_def1(cname, cfb##cbits, cfb##cbits, CFB, kstruct, nid, 1, \
174 key_len, iv_len, flags, init_key, cleanup, set_asn1, \
177 #define BLOCK_CIPHER_def_ofb(cname, kstruct, nid, key_len, \
178 iv_len, cbits, flags, init_key, cleanup, \
179 set_asn1, get_asn1, ctrl) \
180 BLOCK_CIPHER_def1(cname, ofb##cbits, ofb, OFB, kstruct, nid, 1, \
181 key_len, iv_len, flags, init_key, cleanup, set_asn1, \
184 #define BLOCK_CIPHER_def_ecb(cname, kstruct, nid, block_size, key_len, \
185 flags, init_key, cleanup, set_asn1, \
187 BLOCK_CIPHER_def1(cname, ecb, ecb, ECB, kstruct, nid, block_size, key_len, \
188 0, flags, init_key, cleanup, set_asn1, get_asn1, ctrl)
190 #define BLOCK_CIPHER_defs(cname, kstruct, \
191 nid, block_size, key_len, iv_len, cbits, flags, \
192 init_key, cleanup, set_asn1, get_asn1, ctrl) \
193 BLOCK_CIPHER_def_cbc(cname, kstruct, nid, block_size, key_len, iv_len, flags, \
194 init_key, cleanup, set_asn1, get_asn1, ctrl) \
195 BLOCK_CIPHER_def_cfb(cname, kstruct, nid, key_len, iv_len, cbits, \
196 flags, init_key, cleanup, set_asn1, get_asn1, ctrl) \
197 BLOCK_CIPHER_def_ofb(cname, kstruct, nid, key_len, iv_len, cbits, \
198 flags, init_key, cleanup, set_asn1, get_asn1, ctrl) \
199 BLOCK_CIPHER_def_ecb(cname, kstruct, nid, block_size, key_len, flags, \
200 init_key, cleanup, set_asn1, get_asn1, ctrl)
203 #define BLOCK_CIPHER_defs(cname, kstruct, \
204 nid, block_size, key_len, iv_len, flags,\
205 init_key, cleanup, set_asn1, get_asn1, ctrl)\
206 static const EVP_CIPHER cname##_cbc = {\
207 nid##_cbc, block_size, key_len, iv_len, \
208 flags | EVP_CIPH_CBC_MODE,\
212 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
213 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
218 const EVP_CIPHER *EVP_##cname##_cbc(void) { return &cname##_cbc; }\
219 static const EVP_CIPHER cname##_cfb = {\
220 nid##_cfb64, 1, key_len, iv_len, \
221 flags | EVP_CIPH_CFB_MODE,\
225 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
226 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
231 const EVP_CIPHER *EVP_##cname##_cfb(void) { return &cname##_cfb; }\
232 static const EVP_CIPHER cname##_ofb = {\
233 nid##_ofb64, 1, key_len, iv_len, \
234 flags | EVP_CIPH_OFB_MODE,\
238 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
239 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
244 const EVP_CIPHER *EVP_##cname##_ofb(void) { return &cname##_ofb; }\
245 static const EVP_CIPHER cname##_ecb = {\
246 nid##_ecb, block_size, key_len, iv_len, \
247 flags | EVP_CIPH_ECB_MODE,\
251 sizeof(EVP_CIPHER_CTX)-sizeof((((EVP_CIPHER_CTX *)NULL)->c))+\
252 sizeof((((EVP_CIPHER_CTX *)NULL)->c.kstruct)),\
257 const EVP_CIPHER *EVP_##cname##_ecb(void) { return &cname##_ecb; }
260 #define IMPLEMENT_BLOCK_CIPHER(cname, ksched, cprefix, kstruct, nid, \
261 block_size, key_len, iv_len, cbits, \
263 cleanup, set_asn1, get_asn1, ctrl) \
264 BLOCK_CIPHER_all_funcs(cname, cprefix, cbits, kstruct, ksched) \
265 BLOCK_CIPHER_defs(cname, kstruct, nid, block_size, key_len, iv_len, \
266 cbits, flags, init_key, cleanup, set_asn1, \
269 #define EVP_C_DATA(kstruct, ctx) ((kstruct *)(ctx)->cipher_data)
271 #define IMPLEMENT_CFBR(cipher,cprefix,kstruct,ksched,keysize,cbits,iv_len,fl) \
272 BLOCK_CIPHER_func_cfb(cipher##_##keysize,cprefix,cbits,kstruct,ksched) \
273 BLOCK_CIPHER_def_cfb(cipher##_##keysize,kstruct, \
274 NID_##cipher##_##keysize, keysize/8, iv_len, cbits, \
275 (fl)|EVP_CIPH_FLAG_DEFAULT_ASN1, \
276 cipher##_init_key, NULL, NULL, NULL, NULL)
278 int PKCS5_v2_PBKDF2_keyivgen(EVP_CIPHER_CTX *ctx, const char *pass,
279 int passlen, ASN1_TYPE *param,
280 const EVP_CIPHER *c, const EVP_MD *md,
283 struct evp_Encode_Ctx_st {
284 /* number saved in a partial encode/decode */
287 * The length is either the output line length (in input bytes) or the
288 * shortest input line length that is ok. Once decoding begins, the
289 * length is adjusted up each time a longer line is decoded
293 unsigned char enc_data[80];
294 /* number read on current line */