From: Rich Felker Date: Wed, 3 Jun 2020 23:13:11 +0000 (-0400) Subject: rename aligned_alloc source file back to its proper name X-Git-Url: https://git.librecmc.org/?a=commitdiff_plain;h=fc18facf6e5a1ce344ba1f5c797cf1594f49bbd8;p=oweals%2Fmusl.git rename aligned_alloc source file back to its proper name --- diff --git a/src/malloc/aligned_alloc.c b/src/malloc/aligned_alloc.c new file mode 100644 index 00000000..e06c76ed --- /dev/null +++ b/src/malloc/aligned_alloc.c @@ -0,0 +1,52 @@ +#include +#include +#include +#include "malloc_impl.h" + +void *aligned_alloc(size_t align, size_t len) +{ + unsigned char *mem, *new; + + if ((align & -align) != align) { + errno = EINVAL; + return 0; + } + + if (len > SIZE_MAX - align || __malloc_replaced) { + errno = ENOMEM; + return 0; + } + + if (align <= SIZE_ALIGN) + return malloc(len); + + if (!(mem = malloc(len + align-1))) + return 0; + + new = (void *)((uintptr_t)mem + align-1 & -align); + if (new == mem) return mem; + + struct chunk *c = MEM_TO_CHUNK(mem); + struct chunk *n = MEM_TO_CHUNK(new); + + if (IS_MMAPPED(c)) { + /* Apply difference between aligned and original + * address to the "extra" field of mmapped chunk. */ + n->psize = c->psize + (new-mem); + n->csize = c->csize - (new-mem); + return new; + } + + struct chunk *t = NEXT_CHUNK(c); + + /* Split the allocated chunk into two chunks. The aligned part + * that will be used has the size in its footer reduced by the + * difference between the aligned and original addresses, and + * the resulting size copied to its header. A new header and + * footer are written for the split-off part to be freed. */ + n->psize = c->csize = C_INUSE | (new-mem); + n->csize = t->psize -= new-mem; + + __bin_chunk(c); + return new; +} diff --git a/src/malloc/memalign.c b/src/malloc/memalign.c deleted file mode 100644 index e06c76ed..00000000 --- a/src/malloc/memalign.c +++ /dev/null @@ -1,52 +0,0 @@ -#include -#include -#include -#include "malloc_impl.h" - -void *aligned_alloc(size_t align, size_t len) -{ - unsigned char *mem, *new; - - if ((align & -align) != align) { - errno = EINVAL; - return 0; - } - - if (len > SIZE_MAX - align || __malloc_replaced) { - errno = ENOMEM; - return 0; - } - - if (align <= SIZE_ALIGN) - return malloc(len); - - if (!(mem = malloc(len + align-1))) - return 0; - - new = (void *)((uintptr_t)mem + align-1 & -align); - if (new == mem) return mem; - - struct chunk *c = MEM_TO_CHUNK(mem); - struct chunk *n = MEM_TO_CHUNK(new); - - if (IS_MMAPPED(c)) { - /* Apply difference between aligned and original - * address to the "extra" field of mmapped chunk. */ - n->psize = c->psize + (new-mem); - n->csize = c->csize - (new-mem); - return new; - } - - struct chunk *t = NEXT_CHUNK(c); - - /* Split the allocated chunk into two chunks. The aligned part - * that will be used has the size in its footer reduced by the - * difference between the aligned and original addresses, and - * the resulting size copied to its header. A new header and - * footer are written for the split-off part to be freed. */ - n->psize = c->csize = C_INUSE | (new-mem); - n->csize = t->psize -= new-mem; - - __bin_chunk(c); - return new; -}