/* A huge hack: to make up for the wastefulness of shared libraries
* needing at least a page of dirty memory even if they have no global
* data, we reclaim the gaps at the beginning and end of writable maps
- * and "donate" them to the heap by setting up minimal malloc
- * structures and then freeing them. */
+ * and "donate" them to the heap. */
static void reclaim(struct dso *dso, size_t start, size_t end)
{
- size_t *a, *z;
+ void __malloc_donate(char *, char *);
if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end;
if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start;
- start = start + 6*sizeof(size_t)-1 & -4*sizeof(size_t);
- end = (end & -4*sizeof(size_t)) - 2*sizeof(size_t);
- if (start>end || end-start < 4*sizeof(size_t)) return;
- a = laddr(dso, start);
- z = laddr(dso, end);
- a[-2] = 1;
- a[-1] = z[0] = end-start + 2*sizeof(size_t) | 1;
- z[1] = 1;
- free(a);
+ if (start >= end) return;
+ __malloc_donate(laddr(dso, start), laddr(dso, end));
}
static void reclaim_gaps(struct dso *dso)
return 1;
}
+static void bin_chunk(struct chunk *);
+
static void trim(struct chunk *self, size_t n)
{
size_t n1 = CHUNK_SIZE(self);
next->psize = n1-n | C_INUSE;
self->csize = n | C_INUSE;
- free(CHUNK_TO_MEM(split));
+ bin_chunk(split);
}
void *malloc(size_t n)
return new;
}
-void free(void *p)
+static void bin_chunk(struct chunk *self)
{
- struct chunk *self, *next;
+ struct chunk *next = NEXT_CHUNK(self);
size_t final_size, new_size, size;
int reclaim=0;
int i;
- if (!p) return;
-
- self = MEM_TO_CHUNK(p);
-
- if (IS_MMAPPED(self)) {
- size_t extra = self->psize;
- char *base = (char *)self - extra;
- size_t len = CHUNK_SIZE(self) + extra;
- /* Crash on double free */
- if (extra & 1) a_crash();
- __munmap(base, len);
- return;
- }
-
final_size = new_size = CHUNK_SIZE(self);
- next = NEXT_CHUNK(self);
/* Crash on corrupted footer (likely from buffer overflow) */
if (next->psize != self->csize) a_crash();
unlock_bin(i);
}
+
+static void unmap_chunk(struct chunk *self)
+{
+ size_t extra = self->psize;
+ char *base = (char *)self - extra;
+ size_t len = CHUNK_SIZE(self) + extra;
+ /* Crash on double free */
+ if (extra & 1) a_crash();
+ __munmap(base, len);
+}
+
+void free(void *p)
+{
+ if (!p) return;
+
+ struct chunk *self = MEM_TO_CHUNK(p);
+
+ if (IS_MMAPPED(self))
+ unmap_chunk(self);
+ else
+ bin_chunk(self);
+}
+
+void __malloc_donate(char *start, char *end)
+{
+ size_t align_start_up = (SIZE_ALIGN-1) & (-(uintptr_t)start - OVERHEAD);
+ size_t align_end_down = (SIZE_ALIGN-1) & (uintptr_t)end;
+
+ if (end - start <= OVERHEAD + align_start_up + align_end_down)
+ return;
+ start += align_start_up + OVERHEAD;
+ end -= align_end_down;
+
+ struct chunk *c = MEM_TO_CHUNK(start), *n = MEM_TO_CHUNK(end);
+ c->psize = n->csize = C_INUSE;
+ c->csize = n->psize = C_INUSE | (end-start);
+ bin_chunk(c);
+}