mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-03-11 05:48:41 +00:00
REORG: pool: move all the OS specific code to pool-os.h
Till now pool-os used to contain a mapping from pool_{alloc,free}_area() to pool_{alloc,free}_area_uaf() in case of DEBUG_UAF, or the regular malloc-based function. And the *_uaf() functions were in pool.c. But since 2.4 with the first cleanup of the pools, there has been no more calls to pool_{alloc,free}_area() from anywhere but pool.c, from exactly one place each. As such, there's no more need to keep *_uaf() apart in pool.c, we can inline it into pool-os.h and leave all the OS stuff there, with pool.c calling either based on DEBUG_UAF. This is cleaner with less round trips between both files and easier to find.
This commit is contained in:
parent
76a97a98ca
commit
a95636682d
@ -22,12 +22,11 @@
|
|||||||
#ifndef _HAPROXY_POOL_OS_H
|
#ifndef _HAPROXY_POOL_OS_H
|
||||||
#define _HAPROXY_POOL_OS_H
|
#define _HAPROXY_POOL_OS_H
|
||||||
|
|
||||||
|
#include <sys/mman.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <haproxy/api.h>
|
#include <haproxy/api.h>
|
||||||
|
|
||||||
|
|
||||||
#ifndef DEBUG_UAF
|
|
||||||
|
|
||||||
/************* normal allocator *************/
|
/************* normal allocator *************/
|
||||||
|
|
||||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||||
@ -48,32 +47,57 @@ static forceinline void pool_free_area(void *area, size_t __maybe_unused size)
|
|||||||
free(area);
|
free(area);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
|
||||||
|
|
||||||
/************* use-after-free allocator *************/
|
/************* use-after-free allocator *************/
|
||||||
|
|
||||||
void *pool_alloc_area_uaf(size_t size);
|
|
||||||
void pool_free_area_uaf(void *area, size_t size);
|
|
||||||
|
|
||||||
|
|
||||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
/* allocates an area of size <size> and returns it. The semantics are similar
|
||||||
* to those of malloc().
|
* to those of malloc(). However the allocation is rounded up to 4kB so that a
|
||||||
|
* full page is allocated. This ensures the object can be freed alone so that
|
||||||
|
* future dereferences are easily detected. The returned object is always
|
||||||
|
* 16-bytes aligned to avoid issues with unaligned structure objects. In case
|
||||||
|
* some padding is added, the area's start address is copied at the end of the
|
||||||
|
* padding to help detect underflows.
|
||||||
*/
|
*/
|
||||||
static forceinline void *pool_alloc_area(size_t size)
|
static inline void *pool_alloc_area_uaf(size_t size)
|
||||||
{
|
{
|
||||||
return pool_alloc_area_uaf(size);
|
size_t pad = (4096 - size) & 0xFF0;
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
|
if (ret != MAP_FAILED) {
|
||||||
|
/* let's dereference the page before returning so that the real
|
||||||
|
* allocation in the system is performed without holding the lock.
|
||||||
|
*/
|
||||||
|
*(int *)ret = 0;
|
||||||
|
if (pad >= sizeof(void *))
|
||||||
|
*(void **)(ret + pad - sizeof(void *)) = ret + pad;
|
||||||
|
ret += pad;
|
||||||
|
} else {
|
||||||
|
ret = NULL;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
|
/* frees an area <area> of size <size> allocated by pool_alloc_area_uaf(). The
|
||||||
* semantics are identical to free() except that the size is specified and
|
* semantics are identical to free() except that the size must absolutely match
|
||||||
* may be ignored.
|
* the one passed to pool_alloc_area_uaf(). In case some padding is added, the
|
||||||
|
* area's start address is compared to the one at the end of the padding, and
|
||||||
|
* a segfault is triggered if they don't match, indicating an underflow.
|
||||||
*/
|
*/
|
||||||
static forceinline void pool_free_area(void *area, size_t size)
|
static inline void pool_free_area_uaf(void *area, size_t size)
|
||||||
{
|
{
|
||||||
pool_free_area_uaf(area, size);
|
size_t pad = (4096 - size) & 0xFF0;
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* DEBUG_UAF */
|
/* This object will be released for real in order to detect a use after
|
||||||
|
* free. We also force a write to the area to ensure we crash on double
|
||||||
|
* free or free of a const area.
|
||||||
|
*/
|
||||||
|
*(uint32_t *)area = 0xDEADADD4;
|
||||||
|
|
||||||
|
if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
|
||||||
|
ABORT_NOW();
|
||||||
|
|
||||||
|
munmap(area - pad, (size + 4095) & -4096);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _HAPROXY_POOL_OS_H */
|
#endif /* _HAPROXY_POOL_OS_H */
|
||||||
|
|
||||||
|
69
src/pool.c
69
src/pool.c
@ -10,7 +10,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <sys/mman.h>
|
|
||||||
#include <errno.h>
|
#include <errno.h>
|
||||||
|
|
||||||
#include <haproxy/activity.h>
|
#include <haproxy/activity.h>
|
||||||
@ -336,7 +335,12 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
|||||||
void *pool_get_from_os(struct pool_head *pool)
|
void *pool_get_from_os(struct pool_head *pool)
|
||||||
{
|
{
|
||||||
if (!pool->limit || pool->allocated < pool->limit) {
|
if (!pool->limit || pool->allocated < pool->limit) {
|
||||||
void *ptr = pool_alloc_area(pool->alloc_sz);
|
void *ptr;
|
||||||
|
#ifdef DEBUG_UAF
|
||||||
|
ptr = pool_alloc_area_uaf(pool->alloc_sz);
|
||||||
|
#else
|
||||||
|
ptr = pool_alloc_area(pool->alloc_sz);
|
||||||
|
#endif
|
||||||
if (ptr) {
|
if (ptr) {
|
||||||
_HA_ATOMIC_INC(&pool->allocated);
|
_HA_ATOMIC_INC(&pool->allocated);
|
||||||
return ptr;
|
return ptr;
|
||||||
@ -353,7 +357,11 @@ void *pool_get_from_os(struct pool_head *pool)
|
|||||||
*/
|
*/
|
||||||
void pool_put_to_os(struct pool_head *pool, void *ptr)
|
void pool_put_to_os(struct pool_head *pool, void *ptr)
|
||||||
{
|
{
|
||||||
|
#ifdef DEBUG_UAF
|
||||||
|
pool_free_area_uaf(ptr, pool->alloc_sz);
|
||||||
|
#else
|
||||||
pool_free_area(ptr, pool->alloc_sz);
|
pool_free_area(ptr, pool->alloc_sz);
|
||||||
|
#endif
|
||||||
_HA_ATOMIC_DEC(&pool->allocated);
|
_HA_ATOMIC_DEC(&pool->allocated);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -787,63 +795,6 @@ void __pool_free(struct pool_head *pool, void *ptr)
|
|||||||
pool_put_to_cache(pool, ptr, caller);
|
pool_put_to_cache(pool, ptr, caller);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef DEBUG_UAF
|
|
||||||
|
|
||||||
/************* use-after-free allocator *************/
|
|
||||||
|
|
||||||
/* allocates an area of size <size> and returns it. The semantics are similar
|
|
||||||
* to those of malloc(). However the allocation is rounded up to 4kB so that a
|
|
||||||
* full page is allocated. This ensures the object can be freed alone so that
|
|
||||||
* future dereferences are easily detected. The returned object is always
|
|
||||||
* 16-bytes aligned to avoid issues with unaligned structure objects. In case
|
|
||||||
* some padding is added, the area's start address is copied at the end of the
|
|
||||||
* padding to help detect underflows.
|
|
||||||
*/
|
|
||||||
void *pool_alloc_area_uaf(size_t size)
|
|
||||||
{
|
|
||||||
size_t pad = (4096 - size) & 0xFF0;
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
|
||||||
if (ret != MAP_FAILED) {
|
|
||||||
/* let's dereference the page before returning so that the real
|
|
||||||
* allocation in the system is performed without holding the lock.
|
|
||||||
*/
|
|
||||||
*(int *)ret = 0;
|
|
||||||
if (pad >= sizeof(void *))
|
|
||||||
*(void **)(ret + pad - sizeof(void *)) = ret + pad;
|
|
||||||
ret += pad;
|
|
||||||
} else {
|
|
||||||
ret = NULL;
|
|
||||||
}
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
|
|
||||||
* semantics are identical to free() except that the size must absolutely match
|
|
||||||
* the one passed to pool_alloc_area(). In case some padding is added, the
|
|
||||||
* area's start address is compared to the one at the end of the padding, and
|
|
||||||
* a segfault is triggered if they don't match, indicating an underflow.
|
|
||||||
*/
|
|
||||||
void pool_free_area_uaf(void *area, size_t size)
|
|
||||||
{
|
|
||||||
size_t pad = (4096 - size) & 0xFF0;
|
|
||||||
|
|
||||||
/* This object will be released for real in order to detect a use after
|
|
||||||
* free. We also force a write to the area to ensure we crash on double
|
|
||||||
* free or free of a const area.
|
|
||||||
*/
|
|
||||||
*(uint32_t *)area = 0xDEADADD4;
|
|
||||||
|
|
||||||
if (pad >= sizeof(void *) && *(void **)(area - sizeof(void *)) != area)
|
|
||||||
ABORT_NOW();
|
|
||||||
|
|
||||||
munmap(area - pad, (size + 4095) & -4096);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* DEBUG_UAF */
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function destroys a pool by freeing it completely, unless it's still
|
* This function destroys a pool by freeing it completely, unless it's still
|
||||||
* in use. This should be called only under extreme circumstances. It always
|
* in use. This should be called only under extreme circumstances. It always
|
||||||
|
Loading…
Reference in New Issue
Block a user