mirror of
git://git.musl-libc.org/musl
synced 2025-02-22 05:46:49 +00:00
refactor malloc's expand_heap to share with __simple_malloc
this extends the brk/stack collision protection added to full malloc in commit276904c2f6
to also protect the __simple_malloc function used in static-linked programs that don't reference the free function. it also extends support for using mmap when brk fails, which full malloc got in commit5446303328
, to __simple_malloc. since __simple_malloc may expand the heap by arbitrarily large increments, the stack collision detection is enhanced to detect interval overlap rather than just proximity of a single address to the stack. code size is increased a bit, but this is partly offset by the sharing of code between the two malloc implementations, which due to linking semantics, both get linked in a program that needs the full malloc with realloc/free support.
This commit is contained in:
parent
4ef9b828c1
commit
e3bc22f1ef
72
src/malloc/expand_heap.c
Normal file
72
src/malloc/expand_heap.c
Normal file
@ -0,0 +1,72 @@
|
||||
#include <limits.h>
|
||||
#include <stdint.h>
|
||||
#include <errno.h>
|
||||
#include <sys/mman.h>
|
||||
#include "libc.h"
|
||||
#include "syscall.h"
|
||||
|
||||
/* This function returns true if the interval [old,new]
|
||||
* intersects the 'len'-sized interval below &libc.auxv
|
||||
* (interpreted as the main-thread stack) or below &b
|
||||
* (the current stack). It is used to defend against
|
||||
* buggy brk implementations that can cross the stack. */
|
||||
|
||||
static int traverses_stack_p(uintptr_t old, uintptr_t new)
|
||||
{
|
||||
const uintptr_t len = 8<<20;
|
||||
uintptr_t a, b;
|
||||
|
||||
b = (uintptr_t)libc.auxv;
|
||||
a = b > len ? b-len : 0;
|
||||
if (new>a && old<b) return 1;
|
||||
|
||||
b = (uintptr_t)&b;
|
||||
a = b > len ? b-len : 0;
|
||||
if (new>a && old<b) return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *__mmap(void *, size_t, int, int, int, off_t);
|
||||
|
||||
/* Expand the heap in-place if brk can be used, or otherwise via mmap,
|
||||
* using an exponential lower bound on growth by mmap to make
|
||||
* fragmentation asymptotically irrelevant. The size argument is both
|
||||
* an input and an output, since the caller needs to know the size
|
||||
* allocated, which will be larger than requested due to page alignment
|
||||
* and mmap minimum size rules. The caller is responsible for locking
|
||||
* to prevent concurrent calls. */
|
||||
|
||||
void *__expand_heap(size_t *pn)
|
||||
{
|
||||
static uintptr_t brk;
|
||||
static unsigned mmap_step;
|
||||
size_t n = *pn;
|
||||
|
||||
if (n > SIZE_MAX/2 - PAGE_SIZE) {
|
||||
errno = ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
n += -n & PAGE_SIZE-1;
|
||||
|
||||
if (!brk) {
|
||||
brk = __syscall(SYS_brk, 0);
|
||||
brk += -brk & PAGE_SIZE-1;
|
||||
}
|
||||
|
||||
if (n < SIZE_MAX-brk && !traverses_stack_p(brk, brk+n)
|
||||
&& __syscall(SYS_brk, brk+n)==brk+n) {
|
||||
*pn = n;
|
||||
brk += n;
|
||||
return (void *)(brk-n);
|
||||
}
|
||||
|
||||
size_t min = (size_t)PAGE_SIZE << mmap_step/2;
|
||||
if (n < min) n = min;
|
||||
void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
if (area == MAP_FAILED) return 0;
|
||||
*pn = n;
|
||||
mmap_step++;
|
||||
return area;
|
||||
}
|
@ -4,43 +4,46 @@
|
||||
#include <errno.h>
|
||||
#include "libc.h"
|
||||
|
||||
uintptr_t __brk(uintptr_t);
|
||||
|
||||
#define ALIGN 16
|
||||
|
||||
void *__expand_heap(size_t *);
|
||||
|
||||
void *__simple_malloc(size_t n)
|
||||
{
|
||||
static uintptr_t cur, brk;
|
||||
uintptr_t base, new;
|
||||
static char *cur, *end;
|
||||
static volatile int lock[2];
|
||||
size_t align=1;
|
||||
size_t align=1, pad;
|
||||
void *p;
|
||||
|
||||
if (!n) n++;
|
||||
if (n > SIZE_MAX/2) goto toobig;
|
||||
|
||||
while (align<n && align<ALIGN)
|
||||
align += align;
|
||||
n = n + align - 1 & -align;
|
||||
|
||||
LOCK(lock);
|
||||
if (!cur) cur = brk = __brk(0)+16;
|
||||
base = cur + align-1 & -align;
|
||||
if (n > SIZE_MAX - PAGE_SIZE - base) goto fail;
|
||||
if (base+n > brk) {
|
||||
new = base+n + PAGE_SIZE-1 & -PAGE_SIZE;
|
||||
if (__brk(new) != new) goto fail;
|
||||
brk = new;
|
||||
|
||||
pad = -(uintptr_t)cur & align-1;
|
||||
|
||||
if (n <= SIZE_MAX/2 + ALIGN) n += pad;
|
||||
|
||||
if (n > end-cur) {
|
||||
size_t m = n;
|
||||
char *new = __expand_heap(&m);
|
||||
if (!new) {
|
||||
UNLOCK(lock);
|
||||
return 0;
|
||||
}
|
||||
if (new != end) {
|
||||
cur = new;
|
||||
n -= pad;
|
||||
pad = 0;
|
||||
}
|
||||
end = new + m;
|
||||
}
|
||||
cur = base+n;
|
||||
UNLOCK(lock);
|
||||
|
||||
return (void *)base;
|
||||
|
||||
fail:
|
||||
p = cur + pad;
|
||||
cur += n;
|
||||
UNLOCK(lock);
|
||||
toobig:
|
||||
errno = ENOMEM;
|
||||
return 0;
|
||||
return p;
|
||||
}
|
||||
|
||||
weak_alias(__simple_malloc, malloc);
|
||||
|
@ -13,7 +13,6 @@
|
||||
#define inline inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
uintptr_t __brk(uintptr_t);
|
||||
void *__mmap(void *, size_t, int, int, int, off_t);
|
||||
int __munmap(void *, size_t);
|
||||
void *__mremap(void *, size_t, size_t, int, ...);
|
||||
@ -31,13 +30,9 @@ struct bin {
|
||||
};
|
||||
|
||||
static struct {
|
||||
uintptr_t brk;
|
||||
size_t *heap;
|
||||
volatile uint64_t binmap;
|
||||
struct bin bins[64];
|
||||
volatile int brk_lock[2];
|
||||
volatile int free_lock[2];
|
||||
unsigned mmap_step;
|
||||
} mal;
|
||||
|
||||
|
||||
@ -152,77 +147,52 @@ void __dump_heap(int x)
|
||||
}
|
||||
#endif
|
||||
|
||||
static int is_near_stack(uintptr_t b)
|
||||
{
|
||||
const uintptr_t c = 8<<20;
|
||||
uintptr_t a = (uintptr_t)libc.auxv;
|
||||
uintptr_t d = (uintptr_t)&b;
|
||||
return a-b<=c || d-b<=c;
|
||||
}
|
||||
void *__expand_heap(size_t *);
|
||||
|
||||
static struct chunk *expand_heap(size_t n)
|
||||
{
|
||||
static int init;
|
||||
static int heap_lock[2];
|
||||
static void *end;
|
||||
void *p;
|
||||
struct chunk *w;
|
||||
uintptr_t new;
|
||||
|
||||
lock(mal.brk_lock);
|
||||
/* The argument n already accounts for the caller's chunk
|
||||
* overhead needs, but if the heap can't be extended in-place,
|
||||
* we need room for an extra zero-sized sentinel chunk. */
|
||||
n += SIZE_ALIGN;
|
||||
|
||||
if (!init) {
|
||||
mal.brk = __brk(0);
|
||||
#ifdef SHARED
|
||||
mal.brk = mal.brk + PAGE_SIZE-1 & -PAGE_SIZE;
|
||||
#endif
|
||||
mal.brk = mal.brk + 2*SIZE_ALIGN-1 & -SIZE_ALIGN;
|
||||
mal.heap = (void *)mal.brk;
|
||||
init = 1;
|
||||
lock(heap_lock);
|
||||
|
||||
p = __expand_heap(&n);
|
||||
if (!p) {
|
||||
unlock(heap_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (n > SIZE_MAX - mal.brk - 2*PAGE_SIZE) goto fail;
|
||||
new = mal.brk + n + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
|
||||
n = new - mal.brk;
|
||||
|
||||
if (is_near_stack(mal.brk) || __brk(new) != new) {
|
||||
size_t min = (size_t)PAGE_SIZE << mal.mmap_step/2;
|
||||
n += -n & PAGE_SIZE-1;
|
||||
if (n < min) n = min;
|
||||
void *area = __mmap(0, n, PROT_READ|PROT_WRITE,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||
if (area == MAP_FAILED) goto fail;
|
||||
|
||||
mal.mmap_step++;
|
||||
area = (char *)area + SIZE_ALIGN - OVERHEAD;
|
||||
w = area;
|
||||
/* If not just expanding existing space, we need to make a
|
||||
* new sentinel chunk below the allocated space. */
|
||||
if (p != end) {
|
||||
/* Valid/safe because of the prologue increment. */
|
||||
n -= SIZE_ALIGN;
|
||||
p = (char *)p + SIZE_ALIGN;
|
||||
w = MEM_TO_CHUNK(p);
|
||||
w->psize = 0 | C_INUSE;
|
||||
w->csize = n | C_INUSE;
|
||||
w = NEXT_CHUNK(w);
|
||||
w->psize = n | C_INUSE;
|
||||
w->csize = 0 | C_INUSE;
|
||||
|
||||
unlock(mal.brk_lock);
|
||||
|
||||
return area;
|
||||
}
|
||||
|
||||
w = MEM_TO_CHUNK(mal.heap);
|
||||
w->psize = 0 | C_INUSE;
|
||||
|
||||
w = MEM_TO_CHUNK(new);
|
||||
/* Record new heap end and fill in footer. */
|
||||
end = (char *)p + n;
|
||||
w = MEM_TO_CHUNK(end);
|
||||
w->psize = n | C_INUSE;
|
||||
w->csize = 0 | C_INUSE;
|
||||
|
||||
w = MEM_TO_CHUNK(mal.brk);
|
||||
/* Fill in header, which may be new or may be replacing a
|
||||
* zero-size sentinel header at the old end-of-heap. */
|
||||
w = MEM_TO_CHUNK(p);
|
||||
w->csize = n | C_INUSE;
|
||||
mal.brk = new;
|
||||
|
||||
unlock(mal.brk_lock);
|
||||
|
||||
unlock(heap_lock);
|
||||
|
||||
return w;
|
||||
fail:
|
||||
unlock(mal.brk_lock);
|
||||
errno = ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int adjust_size(size_t *n)
|
||||
|
Loading…
Reference in New Issue
Block a user