2 * Support for VIA PadLock Advanced Cryptography Engine (ACE)
3 * Written by Michal Ludvig <michal@logix.cz>
4 * http://www.logix.cz/michal
6 * Big thanks to Andy Polyakov for a help with optimization,
7 * assembler fixes, port to MS Windows and a lot of other
8 * valuable work on this engine!
11 /* ====================================================================
12 * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in
23 * the documentation and/or other materials provided with the
26 * 3. All advertising materials mentioning features or use of this
27 * software must display the following acknowledgment:
28 * "This product includes software developed by the OpenSSL Project
29 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
31 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
32 * endorse or promote products derived from this software without
33 * prior written permission. For written permission, please contact
34 * licensing@OpenSSL.org.
36 * 5. Products derived from this software may not be called "OpenSSL"
37 * nor may "OpenSSL" appear in their names without prior written
38 * permission of the OpenSSL Project.
40 * 6. Redistributions of any form whatsoever must retain the following
42 * "This product includes software developed by the OpenSSL Project
43 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
45 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
46 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
48 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
49 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
51 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
52 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
54 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
56 * OF THE POSSIBILITY OF SUCH DAMAGE.
57 * ====================================================================
59 * This product includes cryptographic software written by Eric Young
60 * (eay@cryptsoft.com). This product includes software written by Tim
61 * Hudson (tjh@cryptsoft.com).
69 #include <openssl/crypto.h>
70 #include <openssl/dso.h>
71 #include <openssl/engine.h>
72 #include <openssl/evp.h>
73 #include <openssl/aes.h>
74 #include <openssl/rand.h>
77 #ifndef OPENSSL_NO_HW_PADLOCK
79 /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
80 #if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
81 # ifndef OPENSSL_NO_DYNAMIC_ENGINE
82 # define DYNAMIC_ENGINE
84 #elif (OPENSSL_VERSION_NUMBER >= 0x00907000L)
85 # ifdef ENGINE_DYNAMIC_SUPPORT
86 # define DYNAMIC_ENGINE
89 # error "Only OpenSSL >= 0.9.7 is supported"
92 /* VIA PadLock AES is available *ONLY* on some x86 CPUs.
93 Not only that it doesn't exist elsewhere, but it
94 even can't be compiled on other platforms!
96 In addition, because of the heavy use of inline assembler,
97 compiler choice is limited to GCC and Microsoft C. */
98 #undef COMPILE_HW_PADLOCK
99 #if !defined(I386_ONLY) && !defined(OPENSSL_NO_INLINE_ASM)
100 # if (defined(__GNUC__) && (defined(__i386__) || defined(__i386))) || \
101 (defined(_MSC_VER) && defined(_M_IX86))
102 # define COMPILE_HW_PADLOCK
103 static ENGINE *ENGINE_padlock (void);
107 void ENGINE_load_padlock (void)
109 /* On non-x86 CPUs it just returns. */
110 #ifdef COMPILE_HW_PADLOCK
111 ENGINE *toadd = ENGINE_padlock ();
119 #ifdef COMPILE_HW_PADLOCK
120 /* We do these includes here to avoid header problems on platforms that
121 do not have the VIA padlock anyway... */
124 # define alloca _alloca
129 /* Function for ENGINE detection and control */
130 static int padlock_available(void);
131 static int padlock_init(ENGINE *e);
134 static RAND_METHOD padlock_rand;
137 static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid);
140 static const char *padlock_id = "padlock";
141 static char padlock_name[100];
143 /* Available features */
144 static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
145 static int padlock_use_rng = 0; /* Random Number Generator */
146 static int padlock_aes_align_required = 1;
148 /* ===== Engine "management" functions ===== */
150 /* Prepare the ENGINE structure for registration */
152 padlock_bind_helper(ENGINE *e)
154 /* Check available features */
157 #if 1 /* disable RNG for now, see commentary in vicinity of RNG code */
161 /* Generate a nice engine name with available features */
162 BIO_snprintf(padlock_name, sizeof(padlock_name),
163 "VIA PadLock (%s, %s)",
164 padlock_use_rng ? "RNG" : "no-RNG",
165 padlock_use_ace ? "ACE" : "no-ACE");
167 /* Register everything or return with an error */
168 if (!ENGINE_set_id(e, padlock_id) ||
169 !ENGINE_set_name(e, padlock_name) ||
171 !ENGINE_set_init_function(e, padlock_init) ||
173 (padlock_use_ace && !ENGINE_set_ciphers (e, padlock_ciphers)) ||
174 (padlock_use_rng && !ENGINE_set_RAND (e, &padlock_rand))) {
178 /* Everything looks good */
186 ENGINE *eng = ENGINE_new();
192 if (!padlock_bind_helper(eng)) {
200 /* Check availability of the engine */
202 padlock_init(ENGINE *e)
204 return (padlock_use_rng || padlock_use_ace);
207 /* This stuff is needed if this ENGINE is being compiled into a self-contained
210 #ifdef DYNAMIC_ENGINE
212 padlock_bind_fn(ENGINE *e, const char *id)
214 if (id && (strcmp(id, padlock_id) != 0)) {
218 if (!padlock_bind_helper(e)) {
225 IMPLEMENT_DYNAMIC_CHECK_FN ();
226 IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn);
227 #endif /* DYNAMIC_ENGINE */
229 /* ===== Here comes the "real" engine ===== */
231 /* Some AES-related constants */
232 #define AES_BLOCK_SIZE 16
233 #define AES_KEY_SIZE_128 16
234 #define AES_KEY_SIZE_192 24
235 #define AES_KEY_SIZE_256 32
237 /* Here we store the status information relevant to the
240 * Inline assembler in PADLOCK_XCRYPT_ASM()
241 * depends on the order of items in this structure.
242 * Don't blindly modify, reorder, etc!
244 struct padlock_cipher_data
246 unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
247 union { unsigned int pad[4];
256 } cword; /* Control word */
257 AES_KEY ks; /* Encryption key */
261 * Essentially this variable belongs in thread local storage.
262 * Having this variable global on the other hand can only cause
263 * few bogus key reloads [if any at all on single-CPU system],
264 * so we accept the penatly...
266 static volatile struct padlock_cipher_data *padlock_saved_context;
269 * =======================================================
270 * Inline assembler section(s).
271 * =======================================================
272 * Order of arguments is chosen to facilitate Windows port
273 * using __fastcall calling convention. If you wish to add
274 * more routines, keep in mind that first __fastcall
275 * argument is passed in %ecx and second - in %edx.
276 * =======================================================
278 #if defined(__GNUC__) && __GNUC__>=2
280 * As for excessive "push %ebx"/"pop %ebx" found all over.
281 * When generating position-independent code GCC won't let
282 * us use "b" in assembler templates nor even respect "ebx"
283 * in "clobber description." Therefore the trouble...
286 /* Helper function - check if a CPUID instruction
287 is available on this CPU */
289 padlock_insn_cpuid_available(void)
293 /* We're checking if the bit #21 of EFLAGS
294 can be toggled. If yes = CPUID is available. */
298 "xorl $0x200000, %%eax\n"
299 "movl %%eax, %%ecx\n"
300 "andl $0x200000, %%ecx\n"
305 "andl $0x200000, %%eax\n"
306 "xorl %%eax, %%ecx\n"
308 : "=r" (result) : : "eax", "ecx");
310 return (result == 0);
313 /* Load supported features of the CPU to see if
314 the PadLock is available. */
316 padlock_available(void)
318 char vendor_string[16];
319 unsigned int eax, edx;
321 /* First check if the CPUID instruction is available at all... */
322 if (! padlock_insn_cpuid_available())
325 /* Are we running on the Centaur (VIA) CPU? */
327 vendor_string[12] = 0;
331 "movl %%ebx,(%%edi)\n"
332 "movl %%edx,4(%%edi)\n"
333 "movl %%ecx,8(%%edi)\n"
335 : "+a"(eax) : "D"(vendor_string) : "ecx", "edx");
336 if (strcmp(vendor_string, "CentaurHauls") != 0)
339 /* Check for Centaur Extended Feature Flags presence */
341 asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
342 : "+a"(eax) : : "ecx", "edx");
343 if (eax < 0xC0000001)
346 /* Read the Centaur Extended Feature Flags */
348 asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
349 : "+a"(eax), "=d"(edx) : : "ecx");
351 /* Fill up some flags */
352 padlock_use_ace = ((edx & (0x3<<6)) == (0x3<<6));
353 padlock_use_rng = ((edx & (0x3<<2)) == (0x3<<2));
355 return padlock_use_ace + padlock_use_rng;
358 /* Our own htonl()/ntohl() */
360 padlock_bswapl(AES_KEY *ks)
362 size_t i = sizeof(ks->rd_key)/sizeof(ks->rd_key[0]);
363 unsigned int *key = ks->rd_key;
366 asm volatile ("bswapl %0" : "+r"(*key));
371 /* Force key reload from memory to the CPU microcode.
372 Loading EFLAGS from the stack clears EFLAGS[30]
373 which does the trick. */
375 padlock_reload_key(void)
377 asm volatile ("pushfl; popfl");
381 * This is heuristic key context tracing. At first one
382 * believes that one should use atomic swap instructions,
383 * but it's not actually necessary. Point is that if
384 * padlock_saved_context was changed by another thread
385 * after we've read it and before we compare it with cdata,
386 * our key *shall* be reloaded upon thread context switch
387 * and we are therefore set in either case...
390 padlock_verify_context(struct padlock_cipher_data *cdata)
402 :"+m"(padlock_saved_context)
403 : "r"(padlock_saved_context), "r"(cdata) : "cc");
406 /* Template for padlock_xcrypt_* modes */
408 * The offsets used with 'leal' instructions
409 * describe items of the 'padlock_cipher_data'
412 #define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
413 static inline void *name(size_t cnt, \
414 struct padlock_cipher_data *cdata, \
415 void *out, const void *inp) \
417 asm volatile ( "pushl %%ebx\n" \
418 " leal 16(%0),%%edx\n" \
419 " leal 32(%0),%%ebx\n" \
422 : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
423 : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
428 /* Generate all functions with appropriate opcodes */
429 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb, ".byte 0xf3,0x0f,0xa7,0xc8") /* rep xcryptecb */
430 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc, ".byte 0xf3,0x0f,0xa7,0xd0") /* rep xcryptcbc */
431 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb, ".byte 0xf3,0x0f,0xa7,0xe0") /* rep xcryptcfb */
432 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb, ".byte 0xf3,0x0f,0xa7,0xe8") /* rep xcryptofb */
434 /* The RNG call itself */
435 static inline unsigned int
436 padlock_xstore(void *addr, unsigned int edx_in)
438 unsigned int eax_out;
440 asm volatile (".byte 0x0f,0xa7,0xc0" /* xstore */
441 : "=a"(eax_out),"=m"(*(unsigned *)addr)
442 : "D"(addr), "d" (edx_in)
448 /* Why not inline 'rep movsd'? I failed to find information on what
449 * value in Direction Flag one can expect and consequently have to
450 * apply "better-safe-than-sorry" approach and assume "undefined."
451 * I could explicitly clear it and restore the original value upon
452 * return from padlock_aes_cipher, but it's presumably too much
453 * trouble for too little gain...
455 * In case you wonder 'rep xcrypt*' instructions above are *not*
456 * affected by the Direction Flag and pointers advance toward
457 * larger addresses unconditionally.
459 static inline unsigned char *
460 padlock_memcpy(void *dst,const void *src,size_t n)
466 do { *d++ = *s++; } while (--n);
471 #elif defined(_MSC_VER)
473 * Unlike GCC these are real functions. In order to minimize impact
474 * on performance we adhere to __fastcall calling convention in
475 * order to get two first arguments passed through %ecx and %edx.
476 * Which kind of suits very well, as instructions in question use
477 * both %ecx and %edx as input:-)
479 #define REP_XCRYPT(code) \
481 _asm _emit 0x0f _asm _emit 0xa7 \
485 * The offsets used with 'lea' instructions
486 * describe items of the 'padlock_cipher_data'
489 #define PADLOCK_XCRYPT_ASM(name,code) \
490 static void * __fastcall \
491 name (size_t cnt, void *cdata, \
492 void *outp, const void *inp) \
494 _asm lea edx,[eax+16] \
495 _asm lea ebx,[eax+32] \
501 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb,0xc8)
502 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc,0xd0)
503 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb,0xe0)
504 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb,0xe8)
506 static int __fastcall
507 padlock_xstore(void *outp,unsigned int code)
509 _asm _emit 0x0f _asm _emit 0xa7 _asm _emit 0xc0
512 static void __fastcall
513 padlock_reload_key(void)
514 { _asm pushfd _asm popfd }
516 static void __fastcall
517 padlock_verify_context(void *cdata)
522 cmp ecx,padlock_saved_context
527 mov padlock_saved_context,ecx
532 padlock_available(void)
567 mov padlock_use_ace,1
573 mov padlock_use_rng,1
580 static void __fastcall
581 padlock_bswapl(void *key)
596 /* MS actually specifies status of Direction Flag and compiler even
597 * manages to compile following as 'rep movsd' all by itself...
599 #define padlock_memcpy(o,i,n) ((unsigned char *)memcpy((o),(i),(n)&~3U))
602 /* ===== AES encryption/decryption ===== */
604 #if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
605 #define NID_aes_128_cfb NID_aes_128_cfb128
608 #if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
609 #define NID_aes_128_ofb NID_aes_128_ofb128
612 #if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
613 #define NID_aes_192_cfb NID_aes_192_cfb128
616 #if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
617 #define NID_aes_192_ofb NID_aes_192_ofb128
620 #if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
621 #define NID_aes_256_cfb NID_aes_256_cfb128
624 #if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
625 #define NID_aes_256_ofb NID_aes_256_ofb128
628 /* List of supported ciphers. */
629 static int padlock_cipher_nids[] = {
638 NID_aes_192_cfb, /* FIXME: AES192/256 CFB/OFB don't work. */
649 static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids)/
650 sizeof(padlock_cipher_nids[0]));
652 /* Function prototypes ... */
653 static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
654 const unsigned char *iv, int enc);
655 static int padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out,
656 const unsigned char *in, size_t nbytes);
658 #define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
659 ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
660 #define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
661 NEAREST_ALIGNED(ctx->cipher_data))
663 /* Declaring so many ciphers by hand would be a pain.
664 Instead introduce a bit of preprocessor magic :-) */
665 #define DECLARE_AES_EVP(ksize,lmode,umode) \
666 static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \
667 NID_aes_##ksize##_##lmode, \
669 AES_KEY_SIZE_##ksize, \
671 0 | EVP_CIPH_##umode##_MODE, \
672 padlock_aes_init_key, \
673 padlock_aes_cipher, \
675 sizeof(struct padlock_cipher_data) + 16, \
676 EVP_CIPHER_set_asn1_iv, \
677 EVP_CIPHER_get_asn1_iv, \
682 DECLARE_AES_EVP(128,ecb,ECB);
683 DECLARE_AES_EVP(128,cbc,CBC);
684 DECLARE_AES_EVP(128,cfb,CFB);
685 DECLARE_AES_EVP(128,ofb,OFB);
687 DECLARE_AES_EVP(192,ecb,ECB);
688 DECLARE_AES_EVP(192,cbc,CBC);
689 DECLARE_AES_EVP(192,cfb,CFB);
690 DECLARE_AES_EVP(192,ofb,OFB);
692 DECLARE_AES_EVP(256,ecb,ECB);
693 DECLARE_AES_EVP(256,cbc,CBC);
694 DECLARE_AES_EVP(256,cfb,CFB);
695 DECLARE_AES_EVP(256,ofb,OFB);
698 padlock_ciphers (ENGINE *e, const EVP_CIPHER **cipher, const int **nids, int nid)
700 /* No specific cipher => return a list of supported nids ... */
702 *nids = padlock_cipher_nids;
703 return padlock_cipher_nids_num;
706 /* ... or the requested "cipher" otherwise */
708 case NID_aes_128_ecb:
709 *cipher = &padlock_aes_128_ecb;
711 case NID_aes_128_cbc:
712 *cipher = &padlock_aes_128_cbc;
714 case NID_aes_128_cfb:
715 *cipher = &padlock_aes_128_cfb;
717 case NID_aes_128_ofb:
718 *cipher = &padlock_aes_128_ofb;
721 case NID_aes_192_ecb:
722 *cipher = &padlock_aes_192_ecb;
724 case NID_aes_192_cbc:
725 *cipher = &padlock_aes_192_cbc;
727 case NID_aes_192_cfb:
728 *cipher = &padlock_aes_192_cfb;
730 case NID_aes_192_ofb:
731 *cipher = &padlock_aes_192_ofb;
734 case NID_aes_256_ecb:
735 *cipher = &padlock_aes_256_ecb;
737 case NID_aes_256_cbc:
738 *cipher = &padlock_aes_256_cbc;
740 case NID_aes_256_cfb:
741 *cipher = &padlock_aes_256_cfb;
743 case NID_aes_256_ofb:
744 *cipher = &padlock_aes_256_ofb;
748 /* Sorry, we don't support this NID */
756 /* Prepare the encryption key for PadLock usage */
758 padlock_aes_init_key (EVP_CIPHER_CTX *ctx, const unsigned char *key,
759 const unsigned char *iv, int enc)
761 struct padlock_cipher_data *cdata;
762 int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8;
764 if (key==NULL) return 0; /* ERROR */
766 cdata = ALIGNED_CIPHER_DATA(ctx);
767 memset(cdata, 0, sizeof(struct padlock_cipher_data));
769 /* Prepare Control word. */
770 cdata->cword.b.encdec = (ctx->encrypt == 0);
771 cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
772 cdata->cword.b.ksize = (key_len - 128) / 64;
776 /* PadLock can generate an extended key for
777 AES128 in hardware */
778 memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
779 cdata->cword.b.keygen = 0;
784 /* Generate an extended AES key in software.
785 Needed for AES192/AES256 */
786 /* Well, the above applies to Stepping 8 CPUs
787 and is listed as hardware errata. They most
788 likely will fix it at some point and then
789 a check for stepping would be due here. */
791 AES_set_encrypt_key(key, key_len, &cdata->ks);
793 AES_set_decrypt_key(key, key_len, &cdata->ks);
795 /* OpenSSL C functions use byte-swapped extended key. */
796 padlock_bswapl(&cdata->ks);
798 cdata->cword.b.keygen = 1;
807 * This is done to cover for cases when user reuses the
808 * context for new key. The catch is that if we don't do
809 * this, padlock_eas_cipher might proceed with old key...
811 padlock_reload_key ();
817 * Simplified version of padlock_aes_cipher() used when
818 * 1) both input and output buffers are at aligned addresses.
820 * 2) running on a newer CPU that doesn't require aligned buffers.
823 padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
824 const unsigned char *in_arg, size_t nbytes)
826 struct padlock_cipher_data *cdata;
829 cdata = ALIGNED_CIPHER_DATA(ctx);
830 padlock_verify_context(cdata);
832 switch (EVP_CIPHER_CTX_mode(ctx)) {
833 case EVP_CIPH_ECB_MODE:
834 padlock_xcrypt_ecb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
837 case EVP_CIPH_CBC_MODE:
838 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
839 iv = padlock_xcrypt_cbc(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
840 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
843 case EVP_CIPH_CFB_MODE:
844 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
845 iv = padlock_xcrypt_cfb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
846 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
849 case EVP_CIPH_OFB_MODE:
850 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
851 padlock_xcrypt_ofb(nbytes/AES_BLOCK_SIZE, cdata, out_arg, in_arg);
852 memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
859 memset(cdata->iv, 0, AES_BLOCK_SIZE);
864 #ifndef PADLOCK_CHUNK
865 # define PADLOCK_CHUNK 4096 /* Must be a power of 2 larger than 16 */
867 #if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1)
868 # error "insane PADLOCK_CHUNK..."
871 /* Re-align the arguments to 16-Bytes boundaries and run the
872 encryption function itself. This function is not AES-specific. */
874 padlock_aes_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
875 const unsigned char *in_arg, size_t nbytes)
877 struct padlock_cipher_data *cdata;
881 int inp_misaligned, out_misaligned, realign_in_loop;
882 size_t chunk, allocated=0;
886 if (nbytes % AES_BLOCK_SIZE)
887 return 0; /* are we expected to do tail processing? */
889 /* VIA promises CPUs that won't require alignment in the future.
890 For now padlock_aes_align_required is initialized to 1 and
891 the condition is never met... */
892 if (!padlock_aes_align_required)
893 return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
895 inp_misaligned = (((size_t)in_arg) & 0x0F);
896 out_misaligned = (((size_t)out_arg) & 0x0F);
898 /* Note that even if output is aligned and input not,
899 * I still prefer to loop instead of copy the whole
900 * input and then encrypt in one stroke. This is done
901 * in order to improve L1 cache utilization... */
902 realign_in_loop = out_misaligned|inp_misaligned;
904 if (!realign_in_loop)
905 return padlock_aes_cipher_omnivorous(ctx, out_arg, in_arg, nbytes);
907 /* this takes one "if" out of the loops */
909 chunk %= PADLOCK_CHUNK;
910 if (chunk==0) chunk = PADLOCK_CHUNK;
912 if (out_misaligned) {
913 /* optmize for small input */
914 allocated = (chunk<nbytes?PADLOCK_CHUNK:nbytes);
915 out = alloca(0x10 + allocated);
916 out = NEAREST_ALIGNED(out);
921 cdata = ALIGNED_CIPHER_DATA(ctx);
922 padlock_verify_context(cdata);
924 switch (EVP_CIPHER_CTX_mode(ctx)) {
925 case EVP_CIPH_ECB_MODE:
928 inp = padlock_memcpy(out, in_arg, chunk);
933 padlock_xcrypt_ecb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
936 out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
938 out = out_arg+=chunk;
941 chunk = PADLOCK_CHUNK;
945 case EVP_CIPH_CBC_MODE:
946 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
950 memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
951 chunk = PADLOCK_CHUNK;
952 cbc_shortcut: /* optimize for small input */
954 inp = padlock_memcpy(out, in_arg, chunk);
959 iv = padlock_xcrypt_cbc(chunk/AES_BLOCK_SIZE, cdata, out, inp);
962 out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
964 out = out_arg+=chunk;
966 } while (nbytes -= chunk);
967 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
970 case EVP_CIPH_CFB_MODE:
971 memcpy (cdata->iv, ctx->iv, AES_BLOCK_SIZE);
975 memcpy(cdata->iv, iv, AES_BLOCK_SIZE);
976 chunk = PADLOCK_CHUNK;
977 cfb_shortcut: /* optimize for small input */
979 inp = padlock_memcpy(out, in_arg, chunk);
984 iv = padlock_xcrypt_cfb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
987 out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
989 out = out_arg+=chunk;
991 } while (nbytes -= chunk);
992 memcpy(ctx->iv, iv, AES_BLOCK_SIZE);
995 case EVP_CIPH_OFB_MODE:
996 memcpy(cdata->iv, ctx->iv, AES_BLOCK_SIZE);
999 inp = padlock_memcpy(out, in_arg, chunk);
1004 padlock_xcrypt_ofb(chunk/AES_BLOCK_SIZE, cdata, out, inp);
1007 out_arg = padlock_memcpy(out_arg, out, chunk) + chunk;
1009 out = out_arg+=chunk;
1012 chunk = PADLOCK_CHUNK;
1014 memcpy(ctx->iv, cdata->iv, AES_BLOCK_SIZE);
1021 /* Clean the realign buffer if it was used */
1022 if (out_misaligned) {
1023 volatile unsigned long *p=(void *)out;
1024 size_t n = allocated/sizeof(*p);
1028 memset(cdata->iv, 0, AES_BLOCK_SIZE);
1033 /* ===== Random Number Generator ===== */
1035 * This code is not engaged. The reason is that it does not comply
1036 * with recommendations for VIA RNG usage for secure applications
1037 * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
1038 * provide meaningful error control...
1040 /* Wrapper that provides an interface between the API and
1041 the raw PadLock RNG */
1043 padlock_rand_bytes(unsigned char *output, int count)
1045 unsigned int eax, buf;
1047 while (count >= 8) {
1048 eax = padlock_xstore(output, 0);
1049 if (!(eax&(1<<6))) return 0; /* RNG disabled */
1050 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
1051 if (eax&(0x1F<<10)) return 0;
1052 if ((eax&0x1F)==0) continue; /* no data, retry... */
1053 if ((eax&0x1F)!=8) return 0; /* fatal failure... */
1058 eax = padlock_xstore(&buf, 3);
1059 if (!(eax&(1<<6))) return 0; /* RNG disabled */
1060 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
1061 if (eax&(0x1F<<10)) return 0;
1062 if ((eax&0x1F)==0) continue; /* no data, retry... */
1063 if ((eax&0x1F)!=1) return 0; /* fatal failure... */
1064 *output++ = (unsigned char)buf;
1067 *(volatile unsigned int *)&buf=0;
1072 /* Dummy but necessary function */
1074 padlock_rand_status(void)
1079 /* Prepare structure for registration */
1080 static RAND_METHOD padlock_rand = {
1082 padlock_rand_bytes, /* bytes */
1085 padlock_rand_bytes, /* pseudorand */
1086 padlock_rand_status, /* rand status */
1089 #endif /* COMPILE_HW_PADLOCK */
1091 #endif /* !OPENSSL_NO_HW_PADLOCK */
1092 #endif /* !OPENSSL_NO_HW */