2 * Support for VIA PadLock Advanced Cryptography Engine (ACE)
3 * Written by Michal Ludvig <michal@logix.cz>
4 * http://www.logix.cz/michal
6 * Big thanks to Andy Polyakov for a help with optimization,
7 * assembler fixes, port to MS Windows and a lot of other
8 * valuable work on this engine!
11 /* ====================================================================
12 * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved.
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in
23 * the documentation and/or other materials provided with the
26 * 3. All advertising materials mentioning features or use of this
27 * software must display the following acknowledgment:
28 * "This product includes software developed by the OpenSSL Project
29 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
31 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
32 * endorse or promote products derived from this software without
33 * prior written permission. For written permission, please contact
34 * licensing@OpenSSL.org.
36 * 5. Products derived from this software may not be called "OpenSSL"
37 * nor may "OpenSSL" appear in their names without prior written
38 * permission of the OpenSSL Project.
40 * 6. Redistributions of any form whatsoever must retain the following
42 * "This product includes software developed by the OpenSSL Project
43 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
45 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
46 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
48 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
49 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
51 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
52 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
54 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
56 * OF THE POSSIBILITY OF SUCH DAMAGE.
57 * ====================================================================
59 * This product includes cryptographic software written by Eric Young
60 * (eay@cryptsoft.com). This product includes software written by Tim
61 * Hudson (tjh@cryptsoft.com).
68 #include <openssl/opensslconf.h>
69 #include <openssl/crypto.h>
70 #include <openssl/dso.h>
71 #include <openssl/engine.h>
72 #include <openssl/evp.h>
73 #ifndef OPENSSL_NO_AES
74 # include <openssl/aes.h>
76 #include <openssl/rand.h>
77 #include <openssl/err.h>
78 #include <openssl/modes.h>
81 # ifndef OPENSSL_NO_HW_PADLOCK
83 /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
84 # if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
85 # ifndef OPENSSL_NO_DYNAMIC_ENGINE
86 # define DYNAMIC_ENGINE
88 # elif (OPENSSL_VERSION_NUMBER >= 0x00907000L)
89 # ifdef ENGINE_DYNAMIC_SUPPORT
90 # define DYNAMIC_ENGINE
93 # error "Only OpenSSL >= 0.9.7 is supported"
97 * VIA PadLock AES is available *ONLY* on some x86 CPUs. Not only that it
98 * doesn't exist elsewhere, but it even can't be compiled on other platforms!
101 # undef COMPILE_HW_PADLOCK
102 # if !defined(I386_ONLY) && !defined(OPENSSL_NO_ASM)
103 # if defined(__i386__) || defined(__i386) || \
104 defined(__x86_64__) || defined(__x86_64) || \
105 defined(_M_IX86) || defined(_M_AMD64) || defined(_M_X64) || \
107 # define COMPILE_HW_PADLOCK
108 # ifdef OPENSSL_NO_DYNAMIC_ENGINE
109 static ENGINE *ENGINE_padlock(void);
114 # ifdef OPENSSL_NO_DYNAMIC_ENGINE
116 void ENGINE_load_padlock(void)
118 /* On non-x86 CPUs it just returns. */
119 # ifdef COMPILE_HW_PADLOCK
120 ENGINE *toadd = ENGINE_padlock();
131 # ifdef COMPILE_HW_PADLOCK
133 /* Function for ENGINE detection and control */
134 static int padlock_available(void);
135 static int padlock_init(ENGINE *e);
138 static RAND_METHOD padlock_rand;
141 # ifndef OPENSSL_NO_AES
142 static int padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher,
143 const int **nids, int nid);
147 static const char *padlock_id = "padlock";
148 static char padlock_name[100];
150 /* Available features */
151 static int padlock_use_ace = 0; /* Advanced Cryptography Engine */
152 static int padlock_use_rng = 0; /* Random Number Generator */
154 /* ===== Engine "management" functions ===== */
156 /* Prepare the ENGINE structure for registration */
157 static int padlock_bind_helper(ENGINE *e)
159 /* Check available features */
163 * RNG is currently disabled for reasons discussed in commentary just
164 * before padlock_rand_bytes function.
168 /* Generate a nice engine name with available features */
169 BIO_snprintf(padlock_name, sizeof(padlock_name),
170 "VIA PadLock (%s, %s)",
171 padlock_use_rng ? "RNG" : "no-RNG",
172 padlock_use_ace ? "ACE" : "no-ACE");
174 /* Register everything or return with an error */
175 if (!ENGINE_set_id(e, padlock_id) ||
176 !ENGINE_set_name(e, padlock_name) ||
177 !ENGINE_set_init_function(e, padlock_init) ||
178 # ifndef OPENSSL_NO_AES
179 (padlock_use_ace && !ENGINE_set_ciphers(e, padlock_ciphers)) ||
181 (padlock_use_rng && !ENGINE_set_RAND(e, &padlock_rand))) {
185 /* Everything looks good */
189 # ifdef OPENSSL_NO_DYNAMIC_ENGINE
191 static ENGINE *ENGINE_padlock(void)
193 ENGINE *eng = ENGINE_new();
199 if (!padlock_bind_helper(eng)) {
208 /* Check availability of the engine */
209 static int padlock_init(ENGINE *e)
211 return (padlock_use_rng || padlock_use_ace);
215 * This stuff is needed if this ENGINE is being compiled into a
216 * self-contained shared-library.
218 # ifdef DYNAMIC_ENGINE
219 static int padlock_bind_fn(ENGINE *e, const char *id)
221 if (id && (strcmp(id, padlock_id) != 0)) {
225 if (!padlock_bind_helper(e)) {
232 IMPLEMENT_DYNAMIC_CHECK_FN()
233 IMPLEMENT_DYNAMIC_BIND_FN(padlock_bind_fn)
234 # endif /* DYNAMIC_ENGINE */
235 /* ===== Here comes the "real" engine ===== */
236 # ifndef OPENSSL_NO_AES
237 /* Some AES-related constants */
238 # define AES_BLOCK_SIZE 16
239 # define AES_KEY_SIZE_128 16
240 # define AES_KEY_SIZE_192 24
241 # define AES_KEY_SIZE_256 32
243 * Here we store the status information relevant to the current context.
246 * BIG FAT WARNING: Inline assembler in PADLOCK_XCRYPT_ASM() depends on
247 * the order of items in this structure. Don't blindly modify, reorder,
250 struct padlock_cipher_data {
251 unsigned char iv[AES_BLOCK_SIZE]; /* Initialization vector */
256 int dgst:1; /* n/a in C3 */
257 int align:1; /* n/a in C3 */
258 int ciphr:1; /* n/a in C3 */
259 unsigned int keygen:1;
261 unsigned int encdec:1;
264 } cword; /* Control word */
265 AES_KEY ks; /* Encryption key */
269 /* Interface to assembler module */
270 unsigned int padlock_capability();
271 void padlock_key_bswap(AES_KEY *key);
272 void padlock_verify_context(struct padlock_cipher_data *ctx);
273 void padlock_reload_key();
274 void padlock_aes_block(void *out, const void *inp,
275 struct padlock_cipher_data *ctx);
276 int padlock_ecb_encrypt(void *out, const void *inp,
277 struct padlock_cipher_data *ctx, size_t len);
278 int padlock_cbc_encrypt(void *out, const void *inp,
279 struct padlock_cipher_data *ctx, size_t len);
280 int padlock_cfb_encrypt(void *out, const void *inp,
281 struct padlock_cipher_data *ctx, size_t len);
282 int padlock_ofb_encrypt(void *out, const void *inp,
283 struct padlock_cipher_data *ctx, size_t len);
284 int padlock_ctr32_encrypt(void *out, const void *inp,
285 struct padlock_cipher_data *ctx, size_t len);
286 int padlock_xstore(void *out, int edx);
287 void padlock_sha1_oneshot(void *ctx, const void *inp, size_t len);
288 void padlock_sha1(void *ctx, const void *inp, size_t len);
289 void padlock_sha256_oneshot(void *ctx, const void *inp, size_t len);
290 void padlock_sha256(void *ctx, const void *inp, size_t len);
293 * Load supported features of the CPU to see if the PadLock is available.
295 static int padlock_available(void)
297 unsigned int edx = padlock_capability();
299 /* Fill up some flags */
300 padlock_use_ace = ((edx & (0x3 << 6)) == (0x3 << 6));
301 padlock_use_rng = ((edx & (0x3 << 2)) == (0x3 << 2));
303 return padlock_use_ace + padlock_use_rng;
306 /* ===== AES encryption/decryption ===== */
307 # ifndef OPENSSL_NO_AES
309 # if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
310 # define NID_aes_128_cfb NID_aes_128_cfb128
313 # if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
314 # define NID_aes_128_ofb NID_aes_128_ofb128
317 # if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
318 # define NID_aes_192_cfb NID_aes_192_cfb128
321 # if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
322 # define NID_aes_192_ofb NID_aes_192_ofb128
325 # if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
326 # define NID_aes_256_cfb NID_aes_256_cfb128
329 # if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
330 # define NID_aes_256_ofb NID_aes_256_ofb128
333 /* List of supported ciphers. */
334 static const int padlock_cipher_nids[] = {
354 static int padlock_cipher_nids_num = (sizeof(padlock_cipher_nids) /
355 sizeof(padlock_cipher_nids[0]));
357 /* Function prototypes ... */
358 static int padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
359 const unsigned char *iv, int enc);
361 # define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
362 ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
363 # define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
364 NEAREST_ALIGNED(EVP_CIPHER_CTX_cipher_data(ctx)))
367 padlock_ecb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
368 const unsigned char *in_arg, size_t nbytes)
370 return padlock_ecb_encrypt(out_arg, in_arg,
371 ALIGNED_CIPHER_DATA(ctx), nbytes);
375 padlock_cbc_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
376 const unsigned char *in_arg, size_t nbytes)
378 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
381 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
382 if ((ret = padlock_cbc_encrypt(out_arg, in_arg, cdata, nbytes)))
383 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
388 padlock_cfb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
389 const unsigned char *in_arg, size_t nbytes)
391 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
394 if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */
395 unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
397 if (chunk >= AES_BLOCK_SIZE)
398 return 0; /* bogus value */
400 if (EVP_CIPHER_CTX_encrypting(ctx))
401 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
402 ivp[chunk] = *(out_arg++) = *(in_arg++) ^ ivp[chunk];
405 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
406 unsigned char c = *(in_arg++);
407 *(out_arg++) = c ^ ivp[chunk];
408 ivp[chunk++] = c, nbytes--;
411 EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
417 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
419 if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
420 if (!padlock_cfb_encrypt(out_arg, in_arg, cdata, chunk))
426 unsigned char *ivp = cdata->iv;
430 EVP_CIPHER_CTX_set_num(ctx, nbytes);
431 if (cdata->cword.b.encdec) {
432 cdata->cword.b.encdec = 0;
433 padlock_reload_key();
434 padlock_aes_block(ivp, ivp, cdata);
435 cdata->cword.b.encdec = 1;
436 padlock_reload_key();
438 unsigned char c = *(in_arg++);
439 *(out_arg++) = c ^ *ivp;
440 *(ivp++) = c, nbytes--;
443 padlock_reload_key();
444 padlock_aes_block(ivp, ivp, cdata);
445 padlock_reload_key();
447 *ivp = *(out_arg++) = *(in_arg++) ^ *ivp;
453 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
459 padlock_ofb_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
460 const unsigned char *in_arg, size_t nbytes)
462 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
466 * ctx->num is maintained in byte-oriented modes, such as CFB and OFB...
468 if ((chunk = EVP_CIPHER_CTX_num(ctx))) { /* borrow chunk variable */
469 unsigned char *ivp = EVP_CIPHER_CTX_iv_noconst(ctx);
471 if (chunk >= AES_BLOCK_SIZE)
472 return 0; /* bogus value */
474 while (chunk < AES_BLOCK_SIZE && nbytes != 0) {
475 *(out_arg++) = *(in_arg++) ^ ivp[chunk];
479 EVP_CIPHER_CTX_set_num(ctx, chunk % AES_BLOCK_SIZE);
485 memcpy(cdata->iv, EVP_CIPHER_CTX_iv(ctx), AES_BLOCK_SIZE);
487 if ((chunk = nbytes & ~(AES_BLOCK_SIZE - 1))) {
488 if (!padlock_ofb_encrypt(out_arg, in_arg, cdata, chunk))
494 unsigned char *ivp = cdata->iv;
498 EVP_CIPHER_CTX_set_num(ctx, nbytes);
499 padlock_reload_key(); /* empirically found */
500 padlock_aes_block(ivp, ivp, cdata);
501 padlock_reload_key(); /* empirically found */
503 *(out_arg++) = *(in_arg++) ^ *ivp;
508 memcpy(EVP_CIPHER_CTX_iv_noconst(ctx), cdata->iv, AES_BLOCK_SIZE);
513 static void padlock_ctr32_encrypt_glue(const unsigned char *in,
514 unsigned char *out, size_t blocks,
515 struct padlock_cipher_data *ctx,
516 const unsigned char *ivec)
518 memcpy(ctx->iv, ivec, AES_BLOCK_SIZE);
519 padlock_ctr32_encrypt(out, in, ctx, AES_BLOCK_SIZE * blocks);
523 padlock_ctr_cipher(EVP_CIPHER_CTX *ctx, unsigned char *out_arg,
524 const unsigned char *in_arg, size_t nbytes)
526 struct padlock_cipher_data *cdata = ALIGNED_CIPHER_DATA(ctx);
527 unsigned int num = EVP_CIPHER_CTX_num(ctx);
529 CRYPTO_ctr128_encrypt_ctr32(in_arg, out_arg, nbytes,
530 cdata, EVP_CIPHER_CTX_iv_noconst(ctx),
531 EVP_CIPHER_CTX_buf_noconst(ctx), &num,
532 (ctr128_f) padlock_ctr32_encrypt_glue);
534 EVP_CIPHER_CTX_set_num(ctx, (size_t)num);
538 # define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
539 # define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
540 # define EVP_CIPHER_block_size_OFB 1
541 # define EVP_CIPHER_block_size_CFB 1
542 # define EVP_CIPHER_block_size_CTR 1
545 * Declaring so many ciphers by hand would be a pain. Instead introduce a bit
546 * of preprocessor magic :-)
548 # define DECLARE_AES_EVP(ksize,lmode,umode) \
549 static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \
550 NID_aes_##ksize##_##lmode, \
551 EVP_CIPHER_block_size_##umode, \
552 AES_KEY_SIZE_##ksize, \
554 0 | EVP_CIPH_##umode##_MODE, \
555 padlock_aes_init_key, \
556 padlock_##lmode##_cipher, \
558 sizeof(struct padlock_cipher_data) + 16, \
559 EVP_CIPHER_set_asn1_iv, \
560 EVP_CIPHER_get_asn1_iv, \
565 DECLARE_AES_EVP(128, ecb, ECB);
566 DECLARE_AES_EVP(128, cbc, CBC);
567 DECLARE_AES_EVP(128, cfb, CFB);
568 DECLARE_AES_EVP(128, ofb, OFB);
569 DECLARE_AES_EVP(128, ctr, CTR);
571 DECLARE_AES_EVP(192, ecb, ECB);
572 DECLARE_AES_EVP(192, cbc, CBC);
573 DECLARE_AES_EVP(192, cfb, CFB);
574 DECLARE_AES_EVP(192, ofb, OFB);
575 DECLARE_AES_EVP(192, ctr, CTR);
577 DECLARE_AES_EVP(256, ecb, ECB);
578 DECLARE_AES_EVP(256, cbc, CBC);
579 DECLARE_AES_EVP(256, cfb, CFB);
580 DECLARE_AES_EVP(256, ofb, OFB);
581 DECLARE_AES_EVP(256, ctr, CTR);
584 padlock_ciphers(ENGINE *e, const EVP_CIPHER **cipher, const int **nids,
587 /* No specific cipher => return a list of supported nids ... */
589 *nids = padlock_cipher_nids;
590 return padlock_cipher_nids_num;
593 /* ... or the requested "cipher" otherwise */
595 case NID_aes_128_ecb:
596 *cipher = &padlock_aes_128_ecb;
598 case NID_aes_128_cbc:
599 *cipher = &padlock_aes_128_cbc;
601 case NID_aes_128_cfb:
602 *cipher = &padlock_aes_128_cfb;
604 case NID_aes_128_ofb:
605 *cipher = &padlock_aes_128_ofb;
607 case NID_aes_128_ctr:
608 *cipher = &padlock_aes_128_ctr;
611 case NID_aes_192_ecb:
612 *cipher = &padlock_aes_192_ecb;
614 case NID_aes_192_cbc:
615 *cipher = &padlock_aes_192_cbc;
617 case NID_aes_192_cfb:
618 *cipher = &padlock_aes_192_cfb;
620 case NID_aes_192_ofb:
621 *cipher = &padlock_aes_192_ofb;
623 case NID_aes_192_ctr:
624 *cipher = &padlock_aes_192_ctr;
627 case NID_aes_256_ecb:
628 *cipher = &padlock_aes_256_ecb;
630 case NID_aes_256_cbc:
631 *cipher = &padlock_aes_256_cbc;
633 case NID_aes_256_cfb:
634 *cipher = &padlock_aes_256_cfb;
636 case NID_aes_256_ofb:
637 *cipher = &padlock_aes_256_ofb;
639 case NID_aes_256_ctr:
640 *cipher = &padlock_aes_256_ctr;
644 /* Sorry, we don't support this NID */
652 /* Prepare the encryption key for PadLock usage */
654 padlock_aes_init_key(EVP_CIPHER_CTX *ctx, const unsigned char *key,
655 const unsigned char *iv, int enc)
657 struct padlock_cipher_data *cdata;
658 int key_len = EVP_CIPHER_CTX_key_length(ctx) * 8;
659 unsigned long mode = EVP_CIPHER_CTX_mode(ctx);
662 return 0; /* ERROR */
664 cdata = ALIGNED_CIPHER_DATA(ctx);
665 memset(cdata, 0, sizeof(*cdata));
667 /* Prepare Control word. */
668 if (mode == EVP_CIPH_OFB_MODE || mode == EVP_CIPH_CTR_MODE)
669 cdata->cword.b.encdec = 0;
671 cdata->cword.b.encdec = (EVP_CIPHER_CTX_encrypting(ctx) == 0);
672 cdata->cword.b.rounds = 10 + (key_len - 128) / 32;
673 cdata->cword.b.ksize = (key_len - 128) / 64;
678 * PadLock can generate an extended key for AES128 in hardware
680 memcpy(cdata->ks.rd_key, key, AES_KEY_SIZE_128);
681 cdata->cword.b.keygen = 0;
687 * Generate an extended AES key in software. Needed for AES192/AES256
690 * Well, the above applies to Stepping 8 CPUs and is listed as
691 * hardware errata. They most likely will fix it at some point and
692 * then a check for stepping would be due here.
694 if ((mode == EVP_CIPH_ECB_MODE || mode == EVP_CIPH_CBC_MODE)
696 AES_set_decrypt_key(key, key_len, &cdata->ks);
698 AES_set_encrypt_key(key, key_len, &cdata->ks);
701 * OpenSSL C functions use byte-swapped extended key.
703 padlock_key_bswap(&cdata->ks);
705 cdata->cword.b.keygen = 1;
714 * This is done to cover for cases when user reuses the
715 * context for new key. The catch is that if we don't do
716 * this, padlock_eas_cipher might proceed with old key...
718 padlock_reload_key();
723 # endif /* OPENSSL_NO_AES */
725 /* ===== Random Number Generator ===== */
727 * This code is not engaged. The reason is that it does not comply
728 * with recommendations for VIA RNG usage for secure applications
729 * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
730 * provide meaningful error control...
733 * Wrapper that provides an interface between the API and the raw PadLock
736 static int padlock_rand_bytes(unsigned char *output, int count)
738 unsigned int eax, buf;
741 eax = padlock_xstore(output, 0);
742 if (!(eax & (1 << 6)))
743 return 0; /* RNG disabled */
744 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
745 if (eax & (0x1F << 10))
747 if ((eax & 0x1F) == 0)
748 continue; /* no data, retry... */
749 if ((eax & 0x1F) != 8)
750 return 0; /* fatal failure... */
755 eax = padlock_xstore(&buf, 3);
756 if (!(eax & (1 << 6)))
757 return 0; /* RNG disabled */
758 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
759 if (eax & (0x1F << 10))
761 if ((eax & 0x1F) == 0)
762 continue; /* no data, retry... */
763 if ((eax & 0x1F) != 1)
764 return 0; /* fatal failure... */
765 *output++ = (unsigned char)buf;
768 *(volatile unsigned int *)&buf = 0;
773 /* Dummy but necessary function */
774 static int padlock_rand_status(void)
779 /* Prepare structure for registration */
780 static RAND_METHOD padlock_rand = {
782 padlock_rand_bytes, /* bytes */
785 padlock_rand_bytes, /* pseudorand */
786 padlock_rand_status, /* rand status */
789 # else /* !COMPILE_HW_PADLOCK */
790 # ifndef OPENSSL_NO_DYNAMIC_ENGINE
792 int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns);
794 int bind_engine(ENGINE *e, const char *id, const dynamic_fns *fns)
799 IMPLEMENT_DYNAMIC_CHECK_FN()
801 # endif /* COMPILE_HW_PADLOCK */
802 # endif /* !OPENSSL_NO_HW_PADLOCK */
803 #endif /* !OPENSSL_NO_HW */