2 * Copyright 2015-2017 The OpenSSL Project Authors. All Rights Reserved.
4 * Licensed under the OpenSSL license (the "License"). You may not use
5 * this file except in compliance with the License. You can obtain a copy
6 * in the file LICENSE in the source distribution or at
7 * https://www.openssl.org/source/license.html
11 * Copyright 2004-2014, Akamai Technologies. All Rights Reserved.
12 * This file is distributed under the terms of the OpenSSL license.
16 * This file is in two halves. The first half implements the public API
17 * to be used by external consumers, and to be used by OpenSSL to store
18 * data in a "secure arena." The second half implements the secure arena.
19 * For details on that implementation, see below (look for uppercase
20 * "SECURE HEAP IMPLEMENTATION").
22 #include <openssl/crypto.h>
27 #if defined(OPENSSL_SYS_LINUX) || defined(OPENSSL_SYS_UNIX)
32 # include <sys/types.h>
33 # include <sys/mman.h>
34 # if defined(OPENSSL_SYS_LINUX)
35 # include <sys/syscall.h>
36 # include <linux/mman.h>
39 # include <sys/param.h>
40 # include <sys/stat.h>
44 #define CLEAR(p, s) OPENSSL_cleanse(p, s)
46 # define PAGE_SIZE 4096
50 static size_t secure_mem_used;
52 static int secure_mem_initialized;
54 static CRYPTO_RWLOCK *sec_malloc_lock = NULL;
57 * These are the functions that must be implemented by a secure heap (sh).
59 static int sh_init(size_t size, int minsize);
60 static void *sh_malloc(size_t size);
61 static void sh_free(void *ptr);
62 static void sh_done(void);
63 static size_t sh_actual_size(char *ptr);
64 static int sh_allocated(const char *ptr);
67 int CRYPTO_secure_malloc_init(size_t size, int minsize)
72 if (!secure_mem_initialized) {
73 sec_malloc_lock = CRYPTO_THREAD_lock_new();
74 if (sec_malloc_lock == NULL)
76 ret = sh_init(size, minsize);
77 secure_mem_initialized = 1;
83 #endif /* IMPLEMENTED */
86 int CRYPTO_secure_malloc_done()
89 if (secure_mem_used == 0) {
91 secure_mem_initialized = 0;
92 CRYPTO_THREAD_lock_free(sec_malloc_lock);
95 #endif /* IMPLEMENTED */
99 int CRYPTO_secure_malloc_initialized()
102 return secure_mem_initialized;
105 #endif /* IMPLEMENTED */
108 void *CRYPTO_secure_malloc(size_t num, const char *file, int line)
114 if (!secure_mem_initialized) {
115 return CRYPTO_malloc(num, file, line);
117 CRYPTO_THREAD_write_lock(sec_malloc_lock);
118 ret = sh_malloc(num);
119 actual_size = ret ? sh_actual_size(ret) : 0;
120 secure_mem_used += actual_size;
121 CRYPTO_THREAD_unlock(sec_malloc_lock);
124 return CRYPTO_malloc(num, file, line);
125 #endif /* IMPLEMENTED */
128 void *CRYPTO_secure_zalloc(size_t num, const char *file, int line)
130 void *ret = CRYPTO_secure_malloc(num, file, line);
137 void CRYPTO_secure_free(void *ptr, const char *file, int line)
144 if (!CRYPTO_secure_allocated(ptr)) {
145 CRYPTO_free(ptr, file, line);
148 CRYPTO_THREAD_write_lock(sec_malloc_lock);
149 actual_size = sh_actual_size(ptr);
150 CLEAR(ptr, actual_size);
151 secure_mem_used -= actual_size;
153 CRYPTO_THREAD_unlock(sec_malloc_lock);
155 CRYPTO_free(ptr, file, line);
156 #endif /* IMPLEMENTED */
159 int CRYPTO_secure_allocated(const void *ptr)
164 if (!secure_mem_initialized)
166 CRYPTO_THREAD_write_lock(sec_malloc_lock);
167 ret = sh_allocated(ptr);
168 CRYPTO_THREAD_unlock(sec_malloc_lock);
172 #endif /* IMPLEMENTED */
175 size_t CRYPTO_secure_used()
178 return secure_mem_used;
181 #endif /* IMPLEMENTED */
184 size_t CRYPTO_secure_actual_size(void *ptr)
189 CRYPTO_THREAD_write_lock(sec_malloc_lock);
190 actual_size = sh_actual_size(ptr);
191 CRYPTO_THREAD_unlock(sec_malloc_lock);
202 * SECURE HEAP IMPLEMENTATION
208 * The implementation provided here uses a fixed-sized mmap() heap,
209 * which is locked into memory, not written to core files, and protected
210 * on either side by an unmapped page, which will catch pointer overruns
211 * (or underruns) and an attempt to read data out of the secure heap.
212 * Free'd memory is zero'd or otherwise cleansed.
214 * This is a pretty standard buddy allocator. We keep areas in a multiple
215 * of "sh.minsize" units. The freelist and bitmaps are kept separately,
216 * so all (and only) data is kept in the mmap'd heap.
218 * This code assumes eight-bit bytes. The numbers 3 and 7 are all over the
222 #define ONE ((size_t)1)
224 # define TESTBIT(t, b) (t[(b) >> 3] & (ONE << ((b) & 7)))
225 # define SETBIT(t, b) (t[(b) >> 3] |= (ONE << ((b) & 7)))
226 # define CLEARBIT(t, b) (t[(b) >> 3] &= (0xFF & ~(ONE << ((b) & 7))))
228 #define WITHIN_ARENA(p) \
229 ((char*)(p) >= sh.arena && (char*)(p) < &sh.arena[sh.arena_size])
230 #define WITHIN_FREELIST(p) \
231 ((char*)(p) >= (char*)sh.freelist && (char*)(p) < (char*)&sh.freelist[sh.freelist_size])
234 typedef struct sh_list_st
236 struct sh_list_st *next;
237 struct sh_list_st **p_next;
247 ossl_ssize_t freelist_size;
249 unsigned char *bittable;
250 unsigned char *bitmalloc;
251 size_t bittable_size; /* size in bits */
256 static size_t sh_getlist(char *ptr)
258 ossl_ssize_t list = sh.freelist_size - 1;
259 size_t bit = (sh.arena_size + ptr - sh.arena) / sh.minsize;
261 for (; bit; bit >>= 1, list--) {
262 if (TESTBIT(sh.bittable, bit))
264 OPENSSL_assert((bit & 1) == 0);
271 static int sh_testbit(char *ptr, int list, unsigned char *table)
275 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
276 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
277 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
278 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
279 return TESTBIT(table, bit);
282 static void sh_clearbit(char *ptr, int list, unsigned char *table)
286 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
287 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
288 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
289 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
290 OPENSSL_assert(TESTBIT(table, bit));
291 CLEARBIT(table, bit);
294 static void sh_setbit(char *ptr, int list, unsigned char *table)
298 OPENSSL_assert(list >= 0 && list < sh.freelist_size);
299 OPENSSL_assert(((ptr - sh.arena) & ((sh.arena_size >> list) - 1)) == 0);
300 bit = (ONE << list) + ((ptr - sh.arena) / (sh.arena_size >> list));
301 OPENSSL_assert(bit > 0 && bit < sh.bittable_size);
302 OPENSSL_assert(!TESTBIT(table, bit));
306 static void sh_add_to_list(char **list, char *ptr)
310 OPENSSL_assert(WITHIN_FREELIST(list));
311 OPENSSL_assert(WITHIN_ARENA(ptr));
313 temp = (SH_LIST *)ptr;
314 temp->next = *(SH_LIST **)list;
315 OPENSSL_assert(temp->next == NULL || WITHIN_ARENA(temp->next));
316 temp->p_next = (SH_LIST **)list;
318 if (temp->next != NULL) {
319 OPENSSL_assert((char **)temp->next->p_next == list);
320 temp->next->p_next = &(temp->next);
326 static void sh_remove_from_list(char *ptr)
328 SH_LIST *temp, *temp2;
330 temp = (SH_LIST *)ptr;
331 if (temp->next != NULL)
332 temp->next->p_next = temp->p_next;
333 *temp->p_next = temp->next;
334 if (temp->next == NULL)
338 OPENSSL_assert(WITHIN_FREELIST(temp2->p_next) || WITHIN_ARENA(temp2->p_next));
342 static int sh_init(size_t size, int minsize)
348 memset(&sh, 0, sizeof sh);
350 /* make sure size and minsize are powers of 2 */
351 OPENSSL_assert(size > 0);
352 OPENSSL_assert((size & (size - 1)) == 0);
353 OPENSSL_assert(minsize > 0);
354 OPENSSL_assert((minsize & (minsize - 1)) == 0);
355 if (size <= 0 || (size & (size - 1)) != 0)
357 if (minsize <= 0 || (minsize & (minsize - 1)) != 0)
360 while (minsize < (int)sizeof(SH_LIST))
363 sh.arena_size = size;
364 sh.minsize = minsize;
365 sh.bittable_size = (sh.arena_size / sh.minsize) * 2;
367 /* Prevent allocations of size 0 later on */
368 if (sh.bittable_size >> 3 == 0)
371 sh.freelist_size = -1;
372 for (i = sh.bittable_size; i; i >>= 1)
375 sh.freelist = OPENSSL_zalloc(sh.freelist_size * sizeof (char *));
376 OPENSSL_assert(sh.freelist != NULL);
377 if (sh.freelist == NULL)
380 sh.bittable = OPENSSL_zalloc(sh.bittable_size >> 3);
381 OPENSSL_assert(sh.bittable != NULL);
382 if (sh.bittable == NULL)
385 sh.bitmalloc = OPENSSL_zalloc(sh.bittable_size >> 3);
386 OPENSSL_assert(sh.bitmalloc != NULL);
387 if (sh.bitmalloc == NULL)
390 /* Allocate space for heap, and two extra pages as guards */
391 #if defined(_SC_PAGE_SIZE) || defined (_SC_PAGESIZE)
393 # if defined(_SC_PAGE_SIZE)
394 long tmppgsize = sysconf(_SC_PAGE_SIZE);
396 long tmppgsize = sysconf(_SC_PAGESIZE);
401 pgsize = (size_t)tmppgsize;
406 sh.map_size = pgsize + sh.arena_size + pgsize;
409 sh.map_result = mmap(NULL, sh.map_size,
410 PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
415 sh.map_result = MAP_FAILED;
416 if ((fd = open("/dev/zero", O_RDWR)) >= 0) {
417 sh.map_result = mmap(NULL, sh.map_size,
418 PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
422 OPENSSL_assert(sh.map_result != MAP_FAILED);
423 if (sh.map_result == MAP_FAILED)
425 sh.arena = (char *)(sh.map_result + pgsize);
426 sh_setbit(sh.arena, 0, sh.bittable);
427 sh_add_to_list(&sh.freelist[0], sh.arena);
429 /* Now try to add guard pages and lock into memory. */
432 /* Starting guard is already aligned from mmap. */
433 if (mprotect(sh.map_result, pgsize, PROT_NONE) < 0)
436 /* Ending guard page - need to round up to page boundary */
437 aligned = (pgsize + sh.arena_size + (pgsize - 1)) & ~(pgsize - 1);
438 if (mprotect(sh.map_result + aligned, pgsize, PROT_NONE) < 0)
441 #if defined(OPENSSL_SYS_LINUX) && defined(MLOCK_ONFAULT) && defined(SYS_mlock2)
442 if (syscall(SYS_mlock2, sh.arena, sh.arena_size, MLOCK_ONFAULT) < 0) {
443 if (errno == ENOSYS) {
444 if (mlock(sh.arena, sh.arena_size) < 0)
451 if (mlock(sh.arena, sh.arena_size) < 0)
455 if (madvise(sh.arena, sh.arena_size, MADV_DONTDUMP) < 0)
466 static void sh_done()
468 OPENSSL_free(sh.freelist);
469 OPENSSL_free(sh.bittable);
470 OPENSSL_free(sh.bitmalloc);
471 if (sh.map_result != NULL && sh.map_size)
472 munmap(sh.map_result, sh.map_size);
473 memset(&sh, 0, sizeof sh);
476 static int sh_allocated(const char *ptr)
478 return WITHIN_ARENA(ptr) ? 1 : 0;
481 static char *sh_find_my_buddy(char *ptr, int list)
486 bit = (ONE << list) + (ptr - sh.arena) / (sh.arena_size >> list);
489 if (TESTBIT(sh.bittable, bit) && !TESTBIT(sh.bitmalloc, bit))
490 chunk = sh.arena + ((bit & ((ONE << list) - 1)) * (sh.arena_size >> list));
495 static void *sh_malloc(size_t size)
497 ossl_ssize_t list, slist;
501 list = sh.freelist_size - 1;
502 for (i = sh.minsize; i < size; i <<= 1)
507 /* try to find a larger entry to split */
508 for (slist = list; slist >= 0; slist--)
509 if (sh.freelist[slist] != NULL)
514 /* split larger entry */
515 while (slist != list) {
516 char *temp = sh.freelist[slist];
518 /* remove from bigger list */
519 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
520 sh_clearbit(temp, slist, sh.bittable);
521 sh_remove_from_list(temp);
522 OPENSSL_assert(temp != sh.freelist[slist]);
524 /* done with bigger list */
527 /* add to smaller list */
528 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
529 sh_setbit(temp, slist, sh.bittable);
530 sh_add_to_list(&sh.freelist[slist], temp);
531 OPENSSL_assert(sh.freelist[slist] == temp);
534 temp += sh.arena_size >> slist;
535 OPENSSL_assert(!sh_testbit(temp, slist, sh.bitmalloc));
536 sh_setbit(temp, slist, sh.bittable);
537 sh_add_to_list(&sh.freelist[slist], temp);
538 OPENSSL_assert(sh.freelist[slist] == temp);
540 OPENSSL_assert(temp-(sh.arena_size >> slist) == sh_find_my_buddy(temp, slist));
543 /* peel off memory to hand back */
544 chunk = sh.freelist[list];
545 OPENSSL_assert(sh_testbit(chunk, list, sh.bittable));
546 sh_setbit(chunk, list, sh.bitmalloc);
547 sh_remove_from_list(chunk);
549 OPENSSL_assert(WITHIN_ARENA(chunk));
554 static void sh_free(void *ptr)
561 OPENSSL_assert(WITHIN_ARENA(ptr));
562 if (!WITHIN_ARENA(ptr))
565 list = sh_getlist(ptr);
566 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
567 sh_clearbit(ptr, list, sh.bitmalloc);
568 sh_add_to_list(&sh.freelist[list], ptr);
570 /* Try to coalesce two adjacent free areas. */
571 while ((buddy = sh_find_my_buddy(ptr, list)) != NULL) {
572 OPENSSL_assert(ptr == sh_find_my_buddy(buddy, list));
573 OPENSSL_assert(ptr != NULL);
574 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
575 sh_clearbit(ptr, list, sh.bittable);
576 sh_remove_from_list(ptr);
577 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
578 sh_clearbit(buddy, list, sh.bittable);
579 sh_remove_from_list(buddy);
586 OPENSSL_assert(!sh_testbit(ptr, list, sh.bitmalloc));
587 sh_setbit(ptr, list, sh.bittable);
588 sh_add_to_list(&sh.freelist[list], ptr);
589 OPENSSL_assert(sh.freelist[list] == ptr);
593 static size_t sh_actual_size(char *ptr)
597 OPENSSL_assert(WITHIN_ARENA(ptr));
598 if (!WITHIN_ARENA(ptr))
600 list = sh_getlist(ptr);
601 OPENSSL_assert(sh_testbit(ptr, list, sh.bittable));
602 return sh.arena_size / (ONE << list);
604 #endif /* IMPLEMENTED */