diff --git a/Makefile b/Makefile index af0f5cc87..657ceb726 100644 --- a/Makefile +++ b/Makefile @@ -234,7 +234,7 @@ SMALL_OPTS = # DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_FD, DEBUG_POOL_INTEGRITY, # DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK, # DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV, -# DEBUG_TASK, DEBUG_MEMORY_POOLS. +# DEBUG_TASK, DEBUG_MEMORY_POOLS, DEBUG_POOL_TRACING. DEBUG = #### Trace options diff --git a/doc/internals/api/pools.txt b/doc/internals/api/pools.txt index f3a014108..567571968 100644 --- a/doc/internals/api/pools.txt +++ b/doc/internals/api/pools.txt @@ -513,6 +513,15 @@ DEBUG_POOL_INTEGRITY preference for cold cache instead of hot cache, though not as much as with DEBUG_UAF. This option is meant to be usable in production. +DEBUG_POOL_TRACING + When enabled, the callers of pool_alloc() and pool_free() will be + recorded into an extra memory area placed after the end of the object. + This may only be required by developers who want to get a few more + hints about code paths involved in some crashes, but will serve no + purpose outside of this. It remains compatible (and completes well) + DEBUG_POOL_INTEGRITY above. Such information become meaningless once + the objects leave the thread-local cache. + DEBUG_MEM_STATS When enabled, all malloc/calloc/realloc/strdup/free calls are accounted for per call place (file+line number), and may be displayed or reset on diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index a79eb284b..4392a2e97 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -78,7 +78,27 @@ #endif // DEBUG_MEMORY_POOLS -# define POOL_EXTRA (POOL_EXTRA_MARK) +/* It's possible to trace callers of pool_free() by placing their pointer + * after the end of the area and the optional mark above. + */ +#if defined(DEBUG_POOL_TRACING) +# define POOL_EXTRA_CALLER (sizeof(void *)) +# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) \ + do { \ + typeof(pool) __p = (pool); \ + typeof(item) __i = (item); \ + typeof(caller) __c = (caller); \ + *(typeof(caller)*)(((char *)__i) + __p->size + POOL_EXTRA_MARK) = __c; \ + } while (0) + +#else // DEBUG_POOL_TRACING + +# define POOL_EXTRA_CALLER (0) +# define POOL_DEBUG_TRACE_CALLER(pool, item, caller) do { } while (0) + +#endif + +# define POOL_EXTRA (POOL_EXTRA_MARK + POOL_EXTRA_CALLER) /* poison each newly allocated area with this byte if >= 0 */ extern int mem_poison_byte; @@ -274,6 +294,7 @@ static inline void *pool_get_from_cache(struct pool_head *pool, const void *call /* keep track of where the element was allocated from */ POOL_DEBUG_SET_MARK(pool, item); + POOL_DEBUG_TRACE_CALLER(pool, item, caller); ph->count--; pool_cache_bytes -= pool->size; diff --git a/src/pool.c b/src/pool.c index 4b12d7c91..a20880b94 100644 --- a/src/pool.c +++ b/src/pool.c @@ -284,6 +284,7 @@ void *pool_alloc_nocache(struct pool_head *pool) /* keep track of where the element was allocated from */ POOL_DEBUG_SET_MARK(pool, ptr); + POOL_DEBUG_TRACE_CALLER(pool, item, NULL); return ptr; } @@ -390,7 +391,8 @@ void pool_evict_from_local_caches() /* Frees an object to the local cache, possibly pushing oldest objects to the * shared cache, which itself may decide to release some of them to the OS. * While it is unspecified what the object becomes past this point, it is - * guaranteed to be released from the users' perpective. + * guaranteed to be released from the users' perpective. A caller address may + * be passed and stored into the area when DEBUG_POOL_TRACING is set. */ void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller) { @@ -399,6 +401,7 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller) LIST_INSERT(&ph->list, &item->by_pool); LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru); + POOL_DEBUG_TRACE_CALLER(pool, item, caller); ph->count++; pool_fill_pattern(ph, item, pool->size); pool_cache_count++; @@ -467,6 +470,7 @@ void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_hea down = ret->down; /* keep track of where the element was allocated from */ POOL_DEBUG_SET_MARK(pool, ret); + POOL_DEBUG_TRACE_CALLER(pool, item, NULL); item = (struct pool_cache_item *)ret; LIST_INSERT(&pch->list, &item->by_pool); @@ -604,6 +608,9 @@ void *__pool_alloc(struct pool_head *pool, unsigned int flags) return NULL; #endif +#if defined(DEBUG_POOL_TRACING) + caller = __builtin_return_address(0); +#endif if (!p) p = pool_get_from_cache(pool, caller); if (unlikely(!p)) @@ -626,6 +633,9 @@ void __pool_free(struct pool_head *pool, void *ptr) { const void *caller = NULL; +#if defined(DEBUG_POOL_TRACING) + caller = __builtin_return_address(0); +#endif /* we'll get late corruption if we refill to the wrong pool or double-free */ POOL_DEBUG_CHECK_MARK(pool, ptr);