From: Rich Felker Date: Tue, 2 Jun 2020 21:37:14 +0000 (-0400) Subject: fix unbounded heap expansion race in malloc X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=3e16313f8fe2ed143ae0267fd79d63014c24779f;p=oweals%2Fmusl.git fix unbounded heap expansion race in malloc this has been a longstanding issue reported many times over the years, with it becoming increasingly clear that it could be hit in practice. under concurrent malloc and free from multiple threads, it's possible to hit usage patterns where unbounded amounts of new memory are obtained via brk/mmap despite the total nominal usage being small and bounded. the underlying cause is that, as a fundamental consequence of keeping locking as fine-grained as possible, the state where free has unbinned an already-free chunk to merge it with a newly-freed one, but has not yet re-binned the combined chunk, is exposed to other threads. this is bad even with small chunks, and leads to suboptimal use of memory, but where it really blows up is where the already-freed chunk in question is the large free region "at the top of the heap". in this situation, other threads momentarily see a state of having almost no free memory, and conclude that they need to obtain more. as far as I can tell there is no fix for this that does not harm performance. the fix made here forces all split/merge of free chunks to take place under a single lock, which also takes the place of the old free_lock, being held at least momentarily at the time of free to determine whether there are neighboring free chunks that need merging. as a consequence, the pretrim, alloc_fwd, and alloc_rev operations no longer make sense and are deleted. simplified merging now takes place inline in free (__bin_chunk) and realloc. as commented in the source, holding the split_merge_lock precludes any chunk transition from in-use to free state. for the most part, it also precludes change to chunk header sizes. however, __memalign may still modify the sizes of an in-use chunk to split it into two in-use chunks. arguably this should require holding the split_merge_lock, but that would necessitate refactoring to expose it externally, which is a mess. and it turns out not to be necessary, at least assuming the existing sloppy memory model malloc has been using, because if free (__bin_chunk) or realloc sees any unsynchronized change to the size, it will also see the in-use bit being set, and thereby can't do anything with the neighboring chunk that changed size. --- diff --git a/src/malloc/malloc.c b/src/malloc/malloc.c index a803d4c9..20598ec3 100644 --- a/src/malloc/malloc.c +++ b/src/malloc/malloc.c @@ -17,7 +17,7 @@ static struct { volatile uint64_t binmap; struct bin bins[64]; - volatile int free_lock[2]; + volatile int split_merge_lock[2]; } mal; int __malloc_replaced; @@ -128,7 +128,6 @@ void __dump_heap(int x) static struct chunk *expand_heap(size_t n) { - static int heap_lock[2]; static void *end; void *p; struct chunk *w; @@ -138,13 +137,8 @@ static struct chunk *expand_heap(size_t n) * we need room for an extra zero-sized sentinel chunk. */ n += SIZE_ALIGN; - lock(heap_lock); - p = __expand_heap(&n); - if (!p) { - unlock(heap_lock); - return 0; - } + if (!p) return 0; /* If not just expanding existing space, we need to make a * new sentinel chunk below the allocated space. */ @@ -167,8 +161,6 @@ static struct chunk *expand_heap(size_t n) w = MEM_TO_CHUNK(p); w->csize = n | C_INUSE; - unlock(heap_lock); - return w; } @@ -198,96 +190,44 @@ static void unbin(struct chunk *c, int i) NEXT_CHUNK(c)->psize |= C_INUSE; } -static int alloc_fwd(struct chunk *c) -{ - int i; - size_t k; - while (!((k=c->csize) & C_INUSE)) { - i = bin_index(k); - lock_bin(i); - if (c->csize == k) { - unbin(c, i); - unlock_bin(i); - return 1; - } - unlock_bin(i); - } - return 0; -} - -static int alloc_rev(struct chunk *c) +static void bin_chunk(struct chunk *self, int i) { - int i; - size_t k; - while (!((k=c->psize) & C_INUSE)) { - i = bin_index(k); - lock_bin(i); - if (c->psize == k) { - unbin(PREV_CHUNK(c), i); - unlock_bin(i); - return 1; - } - unlock_bin(i); - } - return 0; + self->next = BIN_TO_CHUNK(i); + self->prev = mal.bins[i].tail; + self->next->prev = self; + self->prev->next = self; + if (self->prev == BIN_TO_CHUNK(i)) + a_or_64(&mal.binmap, 1ULL<= n1 - DONTCARE) return; next = NEXT_CHUNK(self); split = (void *)((char *)self + n); - split->prev = self->prev; - split->next = self->next; - split->prev->next = split; - split->next->prev = split; split->psize = n | C_INUSE; split->csize = n1-n; next->psize = n1-n; self->csize = n | C_INUSE; - return 1; -} -static void trim(struct chunk *self, size_t n) -{ - size_t n1 = CHUNK_SIZE(self); - struct chunk *next, *split; - - if (n >= n1 - DONTCARE) return; + int i = bin_index(n1-n); + lock_bin(i); - next = NEXT_CHUNK(self); - split = (void *)((char *)self + n); - - split->psize = n | C_INUSE; - split->csize = n1-n | C_INUSE; - next->psize = n1-n | C_INUSE; - self->csize = n | C_INUSE; + bin_chunk(split, i); - __bin_chunk(split); + unlock_bin(i); } void *malloc(size_t n) { struct chunk *c; int i, j; + uint64_t mask; if (adjust_size(&n) < 0) return 0; @@ -303,33 +243,37 @@ void *malloc(size_t n) } i = bin_index_up(n); - for (;;) { - uint64_t mask = mal.binmap & -(1ULL<psize = c->csize = - x->csize + CHUNK_SIZE(c); - } - break; + if (i<63 && (mal.binmap & (1ULL<psize; char *base = (char *)self - extra; @@ -408,27 +354,24 @@ void *realloc(void *p, size_t n) /* Crash on corrupted footer (likely from buffer overflow) */ if (next->psize != self->csize) a_crash(); - /* Merge adjacent chunks if we need more space. This is not - * a waste of time even if we fail to get enough space, because our - * subsequent call to free would otherwise have to do the merge. */ - if (n > n1 && alloc_fwd(next)) { - n1 += CHUNK_SIZE(next); - next = NEXT_CHUNK(next); - } - /* FIXME: find what's wrong here and reenable it..? */ - if (0 && n > n1 && alloc_rev(self)) { - self = PREV_CHUNK(self); - n1 += CHUNK_SIZE(self); - } - self->csize = n1 | C_INUSE; - next->psize = n1 | C_INUSE; + lock(mal.split_merge_lock); - /* If we got enough space, split off the excess and return */ - if (n <= n1) { - //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD); - trim(self, n); - return CHUNK_TO_MEM(self); + size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); + if (n0+nsize >= n) { + int i = bin_index(nsize); + lock_bin(i); + if (!(next->csize & C_INUSE)) { + unbin(next, i); + unlock_bin(i); + next = NEXT_CHUNK(next); + self->csize = next->psize = n0+nsize | C_INUSE; + trim(self, n); + unlock(mal.split_merge_lock); + return CHUNK_TO_MEM(self); + } + unlock_bin(i); } + unlock(mal.split_merge_lock); copy_realloc: /* As a last resort, allocate a new chunk and copy to it. */ @@ -443,59 +386,51 @@ copy_free_ret: void __bin_chunk(struct chunk *self) { struct chunk *next = NEXT_CHUNK(self); - size_t final_size, new_size, size; - int reclaim=0; - int i; - - final_size = new_size = CHUNK_SIZE(self); /* Crash on corrupted footer (likely from buffer overflow) */ if (next->psize != self->csize) a_crash(); - for (;;) { - if (self->psize & next->csize & C_INUSE) { - self->csize = final_size | C_INUSE; - next->psize = final_size | C_INUSE; - i = bin_index(final_size); - lock_bin(i); - lock(mal.free_lock); - if (self->psize & next->csize & C_INUSE) - break; - unlock(mal.free_lock); - unlock_bin(i); - } + lock(mal.split_merge_lock); - if (alloc_rev(self)) { - self = PREV_CHUNK(self); - size = CHUNK_SIZE(self); - final_size += size; - if (new_size+size > RECLAIM && (new_size+size^size) > size) - reclaim = 1; - } + size_t osize = CHUNK_SIZE(self), size = osize; + + /* Since we hold split_merge_lock, only transition from free to + * in-use can race; in-use to free is impossible */ + size_t psize = self->psize & C_INUSE ? 0 : CHUNK_PSIZE(self); + size_t nsize = next->csize & C_INUSE ? 0 : CHUNK_SIZE(next); - if (alloc_fwd(next)) { - size = CHUNK_SIZE(next); - final_size += size; - if (new_size+size > RECLAIM && (new_size+size^size) > size) - reclaim = 1; + if (psize) { + int i = bin_index(psize); + lock_bin(i); + if (!(self->psize & C_INUSE)) { + struct chunk *prev = PREV_CHUNK(self); + unbin(prev, i); + self = prev; + size += psize; + } + unlock_bin(i); + } + if (nsize) { + int i = bin_index(nsize); + lock_bin(i); + if (!(next->csize & C_INUSE)) { + unbin(next, i); next = NEXT_CHUNK(next); + size += nsize; } + unlock_bin(i); } - if (!(mal.binmap & 1ULL<csize = final_size; - next->psize = final_size; - unlock(mal.free_lock); + int i = bin_index(size); + lock_bin(i); - self->next = BIN_TO_CHUNK(i); - self->prev = mal.bins[i].tail; - self->next->prev = self; - self->prev->next = self; + self->csize = size; + next->psize = size; + bin_chunk(self, i); + unlock(mal.split_merge_lock); /* Replace middle of large chunks with fresh zero pages */ - if (reclaim) { + if (size > RECLAIM && (size^(size-osize)) > size-osize) { uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_SIZE; uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE; #if 1