MINOR: pools: implement DEBUG_UAF to detect use after free

This code has been used successfully a few times in the past to detect
that a pool was used after being freed. Its main goal is to allocate a
full page for each object so that they are always released individually
and unmapped from memory. This way if any part of the code reference the
object after is was freed and before it is reallocated, a segv occurs at
the exact offending location. It does a few extra things such as writing
to the memory area before freeing to detect double-frees and free of
read-only areas, and placing the data at the end of the page instead of
the beginning so that out of bounds accesses are easier to spot. The
amount of memory used with this is huge (about 10 times the regular
usage) but it can be useful sometimes.
This commit is contained in:
Willy Tarreau 2017-11-22 15:47:29 +01:00
parent f13322ede1
commit 158fa75811
2 changed files with 42 additions and 2 deletions

View File

@ -167,8 +167,8 @@ SMALL_OPTS =
#### Debug settings
# You can enable debugging on specific code parts by setting DEBUG=-DDEBUG_xxx.
# Currently defined DEBUG macros include DEBUG_FULL, DEBUG_MEMORY, DEBUG_FSM,
# DEBUG_HASH, DEBUG_AUTH, DEBUG_SPOE and DEBUG_THREAD. Please check sources for
# exact meaning or do not use at all.
# DEBUG_HASH, DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF and DEBUG_THREAD. Please check
# sources for exact meaning or do not use at all.
DEBUG =
#### Trace options

View File

@ -22,6 +22,8 @@
#ifndef _COMMON_MEMORY_H
#define _COMMON_MEMORY_H
#include <sys/mman.h>
#include <stdlib.h>
#include <string.h>
@ -155,6 +157,8 @@ static inline void *pool_alloc_dirty(struct pool_head *pool)
return p;
}
#ifndef DEBUG_UAF /* normal allocator */
/* allocates an area of size <size> and returns it. The semantics are similar
* to those of malloc().
*/
@ -172,6 +176,34 @@ static inline void pool_free_area(void *area, size_t __maybe_unused size)
free(area);
}
#else /* use-after-free detector */
/* allocates an area of size <size> and returns it. The semantics are similar
* to those of malloc(). However the allocation is rounded up to 4kB so that a
* full page is allocated. This ensures the object can be freed alone so that
* future dereferences are easily detected. The returned object is always
* 16-bytes aligned to avoid issues with unaligned structure objects.
*/
static inline void *pool_alloc_area(size_t size)
{
size_t pad = (4096 - size) & 0xFF0;
return mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0) + pad;
}
/* frees an area <area> of size <size> allocated by pool_alloc_area(). The
* semantics are identical to free() except that the size must absolutely match
* the one passed to pool_alloc_area().
*/
static inline void pool_free_area(void *area, size_t size)
{
size_t pad = (4096 - size) & 0xFF0;
munmap(area - pad, (size + 4095) & -4096);
}
#endif /* DEBUG_UAF */
/*
* Returns a pointer to type <type> taken from the pool <pool_type> or
* dynamically allocated. In the first case, <pool_type> is updated to point to
@ -215,8 +247,16 @@ static inline void pool_free2(struct pool_head *pool, void *ptr)
if (*POOL_LINK(pool, ptr) != (void *)pool)
*(int *)0 = 0;
#endif
#ifndef DEBUG_UAF /* normal pool behaviour */
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
#else /* release the entry for real to detect use after free */
/* ensure we crash on double free or free of a const area*/
*(uint32_t *)ptr = 0xDEADADD4;
pool_free_area(ptr, pool->size + POOL_EXTRA);
pool->allocated--;
#endif /* DEBUG_UAF */
pool->used--;
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
}