diff --git a/Makefile b/Makefile index f80ba4b0c..2d9cc0ca5 100644 --- a/Makefile +++ b/Makefile @@ -229,7 +229,7 @@ SMALL_OPTS = # not use them at all. Some even more obscure ones might also be available # without appearing here. Currently defined DEBUG macros include DEBUG_FULL, # DEBUG_MEM_STATS, DEBUG_DONT_SHARE_POOLS, DEBUG_NO_LOCKLESS_POOLS, DEBUG_FD, -# DEBUG_NO_LOCAL_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK, +# DEBUG_NO_POOLS, DEBUG_FAIL_ALLOC, DEBUG_STRICT_NOCRASH, DEBUG_HPACK, # DEBUG_AUTH, DEBUG_SPOE, DEBUG_UAF, DEBUG_THREAD, DEBUG_STRICT, DEBUG_DEV, # DEBUG_TASK. DEBUG = diff --git a/include/haproxy/pool-t.h b/include/haproxy/pool-t.h index 7dc99c182..4b185ef9a 100644 --- a/include/haproxy/pool-t.h +++ b/include/haproxy/pool-t.h @@ -26,6 +26,13 @@ #include #include +/* Pools are always enabled unless explicitly disabled. When disabled, the + * calls are directly passed to the underlying OS functions. + */ +#if !defined(DEBUG_NO_POOLS) && !defined(DEBUG_UAF) && !defined(DEBUG_FAIL_ALLOC) +#define CONFIG_HAP_POOLS +#endif + /* On architectures supporting threads and double-word CAS, we can implement * lock-less memory pools. This isn't supported for debugging modes however. */ @@ -33,20 +40,13 @@ #define CONFIG_HAP_LOCKLESS_POOLS #endif -/* On architectures supporting threads we can amortize the locking cost using - * local pools. - */ -#if defined(USE_THREAD) && !defined(DEBUG_NO_LOCAL_POOLS) && !defined(DEBUG_UAF) && !defined(DEBUG_FAIL_ALLOC) -#define CONFIG_HAP_LOCAL_POOLS -#endif - /* On modern architectures with many threads, a fast memory allocator, and * local pools, the global pools with their single list can be way slower than * the standard allocator which already has its own per-thread arenas. In this * case we disable global pools. The global pools may still be enforced * using CONFIG_HAP_GLOBAL_POOLS though. */ -#if defined(USE_THREAD) && defined(HA_HAVE_FAST_MALLOC) && defined(CONFIG_HAP_LOCAL_POOLS) && !defined(CONFIG_HAP_GLOBAL_POOLS) +#if defined(USE_THREAD) && defined(HA_HAVE_FAST_MALLOC) && defined(CONFIG_HAP_POOLS) && !defined(CONFIG_HAP_GLOBAL_POOLS) #define CONFIG_HAP_NO_GLOBAL_POOLS #endif @@ -117,7 +117,7 @@ struct pool_head { unsigned int failed; /* failed allocations */ struct list list; /* list of all known pools */ char name[12]; /* name of the pool */ -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS struct pool_cache_head cache[MAX_THREADS]; /* pool caches */ #endif } __attribute__((aligned(64))); diff --git a/include/haproxy/pool.h b/include/haproxy/pool.h index 526c57c2d..92fb6dfbf 100644 --- a/include/haproxy/pool.h +++ b/include/haproxy/pool.h @@ -71,7 +71,7 @@ static inline int pool_is_crowded(const struct pool_head *pool) } -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS /****************** Thread-local cache management ******************/ @@ -123,7 +123,7 @@ static inline void pool_put_to_cache(struct pool_head *pool, void *ptr) pool_evict_from_cache(); } -#endif // CONFIG_HAP_LOCAL_POOLS +#endif // CONFIG_HAP_POOLS #if defined(CONFIG_HAP_NO_GLOBAL_POOLS) @@ -269,7 +269,7 @@ static inline void *__pool_alloc(struct pool_head *pool, unsigned int flags) { void *p; -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS if (likely(p = __pool_get_from_cache(pool))) goto ret; #endif @@ -330,7 +330,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr) if (unlikely(mem_poison_byte >= 0)) memset(ptr, mem_poison_byte, pool->size); -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS /* put the object back into the cache only if there are not too * many objects yet in this pool (no more than half of the cached * is used or this pool uses no more than 1/8 of the cache size). diff --git a/include/haproxy/tinfo-t.h b/include/haproxy/tinfo-t.h index d1a4fc4e8..1285c731a 100644 --- a/include/haproxy/tinfo-t.h +++ b/include/haproxy/tinfo-t.h @@ -42,7 +42,7 @@ struct thread_info { unsigned int idle_pct; /* idle to total ratio over last sample (percent) */ unsigned int flags; /* thread info flags, TI_FL_* */ -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS struct list pool_lru_head; /* oldest objects */ #endif struct list buffer_wq; /* buffer waiters */ diff --git a/src/pool.c b/src/pool.c index 6f19ec783..b493a933e 100644 --- a/src/pool.c +++ b/src/pool.c @@ -27,7 +27,7 @@ #include -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS /* These ones are initialized per-thread on startup by init_pools() */ THREAD_LOCAL size_t pool_cache_bytes = 0; /* total cache size */ THREAD_LOCAL size_t pool_cache_count = 0; /* #cache objects */ @@ -107,7 +107,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags) pool->flags = flags; LIST_ADDQ(start, &pool->list); -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS /* update per-thread pool cache if necessary */ for (thr = 0; thr < MAX_THREADS; thr++) { LIST_INIT(&pool->cache[thr].list); @@ -119,7 +119,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags) return pool; } -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS /* Evicts some of the oldest objects from the local cache, pushing them to the * global pool. */ @@ -602,7 +602,7 @@ void create_pool_callback(struct pool_head **ptr, char *name, unsigned int size) /* Initializes all per-thread arrays on startup */ static void init_pools() { -#ifdef CONFIG_HAP_LOCAL_POOLS +#ifdef CONFIG_HAP_POOLS int thr; for (thr = 0; thr < MAX_THREADS; thr++) {