DEBUG: pools: add new build option DEBUG_POOL_TRACING

This new option, when set, will cause the callers of pool_alloc() and
pool_free() to be recorded into an extra area in the pool that is expected
to be helpful for later inspection (e.g. in core dumps). For example it
may help figure that an object was released to a pool with some sub-fields
not yet released or that a use-after-free happened after releasing it,
with an immediate indication about the exact line of code that released
it (possibly an error path).

This only works with the per-thread cache, and even objects refilled from
the shared pool directly into the thread-local cache will have a NULL
there. That's not an issue since these objects have not yet been freed.
It's worth noting that pool_alloc_nocache() continues not to set any
caller pointer (e.g. when the cache is empty) because that would require
a possibly undesirable API change.

The extra cost is minimal (one pointer per object) and this completes
well with DEBUG_POOL_INTEGRITY.
This commit is contained in:
Willy Tarreau 2022-01-24 15:52:51 +01:00
parent 0e2a5b4b61
commit add43fa43e
4 changed files with 43 additions and 3 deletions

View File

@ -234,7 +234,7 @@ SMALL_OPTS =
# DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY,
# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK,
# DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV,
# DEBUG_TASK, DEBUG_MEMORY_POOLS.
# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING.
DEBUG =
#### Trace options

View File

@ -513,6 +513,15 @@ DEBUG_POOL_INTEGRITY
preference for cold cache instead of hot cache, though not as much as
with DEBUG_UAF. This option is meant to be usable in production.
DEBUG_POOL_TRACING
When enabled, the callers of pool_alloc() and pool_free() will be
recorded into an extra memory area placed after the end of the object.
This may only be required by developers who want to get a few more
hints about code paths involved in some crashes, but will serve no
purpose outside of this. It remains compatible (and completes well)
DEBUG_POOL_INTEGRITY above. Such information become meaningless once
the objects leave the thread-local cache.
DEBUG_MEM_STATS
When enabled, all malloc/calloc/realloc/strdup/free calls are accounted
for per call place (file+line number), and may be displayed or reset on

View File

@ -78,7 +78,27 @@
#endif // DEBUG_MEMORY_POOLS
# define POOL_EXTRA (POOL_EXTRA_MARK)
/* It's possible to trace callers of pool_free() by placing their pointer
* after the end of the area and the optional mark above.
*/
#if defined(DEBUG_POOL_TRACING)
# define POOL_EXTRA_CALLER (sizeof(void *))
# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) \
do { \
typeof(pool) __p = (pool); \
typeof(item) __i = (item); \
typeof(caller) __c = (caller); \
*(typeof(caller)*)(((char *)__i) + __p->size + POOL_EXTRA_MARK) = __c; \
} while (0)
#else // DEBUG_POOL_TRACING
# define POOL_EXTRA_CALLER (0)
# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) do { } while (0)
#endif
# define POOL_EXTRA (POOL_EXTRA_MARK + POOL_EXTRA_CALLER)
/* poison each newly allocated area with this byte if >= 0 */
extern int mem_poison_byte;
@ -274,6 +294,7 @@ static inline void *pool_get_from_cache(struct pool_head *pool, const void *call
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, item);
POOL_DEBUG_TRACE_CALLER(pool, item, caller);
ph->count--;
pool_cache_bytes -= pool->size;

View File

@ -284,6 +284,7 @@ void *pool_alloc_nocache(struct pool_head *pool)
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, ptr);
POOL_DEBUG_TRACE_CALLER(pool, item, NULL);
return ptr;
}
@ -390,7 +391,8 @@ void pool_evict_from_local_caches()
/* Frees an object to the local cache, possibly pushing oldest objects to the
* shared cache, which itself may decide to release some of them to the OS.
* While it is unspecified what the object becomes past this point, it is
* guaranteed to be released from the users' perpective.
* guaranteed to be released from the users' perpective. A caller address may
* be passed and stored into the area when DEBUG_POOL_TRACING is set.
*/
void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller)
{
@ -399,6 +401,7 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller)
LIST_INSERT(&ph->list, &item->by_pool);
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
POOL_DEBUG_TRACE_CALLER(pool, item, caller);
ph->count++;
pool_fill_pattern(ph, item, pool->size);
pool_cache_count++;
@ -467,6 +470,7 @@ void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_hea
down = ret->down;
/* keep track of where the element was allocated from */
POOL_DEBUG_SET_MARK(pool, ret);
POOL_DEBUG_TRACE_CALLER(pool, item, NULL);
item = (struct pool_cache_item *)ret;
LIST_INSERT(&pch->list, &item->by_pool);
@ -604,6 +608,9 @@ void *__pool_alloc(struct pool_head *pool, unsigned int flags)
return NULL;
#endif
#if defined(DEBUG_POOL_TRACING)
caller = __builtin_return_address(0);
#endif
if (!p)
p = pool_get_from_cache(pool, caller);
if (unlikely(!p))
@ -626,6 +633,9 @@ void __pool_free(struct pool_head *pool, void *ptr)
{
const void *caller = NULL;
#if defined(DEBUG_POOL_TRACING)
caller = __builtin_return_address(0);
#endif
/* we'll get late corruption if we refill to the wrong pool or double-free */
POOL_DEBUG_CHECK_MARK(pool, ptr);