MINOR: pools: make the global pools a runtime option.

There were very few functions left that were specific to global pools,
and even the checks they used to participate to are not directly on the
most critical path so they can suffer an extra "if".

What's done now is that pool_releasable() always returns 0 when global
pools are disabled (like the one before) so that pool_evict_last_items()
never tries to place evicted objects there. As such there will never be
any object in the free list. However pool_refill_local_from_shared() is
bypassed when global pools are disabled so that we even avoid the atomic
loads from this function.

The default global setting is still adjusted based on the original
CONFIG_NO_GLOBAL_POOLS that is set depending on threads and the allocator.
The global executable only grew by 1.1kB by keeping this code enabled,
and the code is simplified and will later support runtime options.
This commit is contained in:
Willy Tarreau 2022-02-22 09:21:13 +01:00
parent 6f3c7f6e6a
commit dff3b0627d
3 changed files with 16 additions and 44 deletions

View File

@ -45,6 +45,7 @@
#define POOL_DBG_DONT_MERGE 0x00000002 // do not merge same-size pools
#define POOL_DBG_COLD_FIRST 0x00000004 // pick cold objects first
#define POOL_DBG_INTEGRITY 0x00000008 // perform integrity checks on cache
#define POOL_DBG_NO_GLOBAL 0x00000010 // disable global pools
/* This is the head of a thread-local cache */

View File

@ -145,27 +145,6 @@ void pool_evict_from_local_caches(void);
void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller);
void pool_fill_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
void pool_check_pattern(struct pool_cache_head *pch, struct pool_cache_item *item, uint size);
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
static inline uint pool_releasable(const struct pool_head *pool)
{
/* no room left */
return 0;
}
static inline void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
{
/* ignored without shared pools */
}
static inline void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, uint count)
{
/* ignored without shared pools */
}
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, uint count);
@ -175,12 +154,17 @@ void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, ui
* be released in the worst case, and that this value is always lower than or
* equal to ->allocated. It's important to understand that under thread
* contention these values may not always be accurate but the principle is that
* any deviation remains contained.
* any deviation remains contained. When global pools are disabled, this
* function always returns zero so that the caller knows it must free the
* object via other ways.
*/
static inline uint pool_releasable(const struct pool_head *pool)
{
uint alloc, used;
if (unlikely(pool_debugging & POOL_DBG_NO_GLOBAL))
return 0;
alloc = HA_ATOMIC_LOAD(&pool->allocated);
used = HA_ATOMIC_LOAD(&pool->used);
if (used < alloc)
@ -196,16 +180,13 @@ static inline uint pool_releasable(const struct pool_head *pool)
return 0;
}
#endif /* CONFIG_HAP_NO_GLOBAL_POOLS */
/* These are generic cache-aware wrappers that allocate/free from/to the local
* cache first, then from the second level if it exists.
*/
/* Tries to retrieve an object from the local pool cache corresponding to pool
* <pool>. If none is available, tries to allocate from the shared cache, and
* returns NULL if nothing is available.
* <pool>. If none is available, tries to allocate from the shared cache if any
* and returns NULL if nothing is available.
*/
static inline void *pool_get_from_cache(struct pool_head *pool, const void *caller)
{
@ -214,7 +195,8 @@ static inline void *pool_get_from_cache(struct pool_head *pool, const void *call
ph = &pool->cache[tid];
if (unlikely(LIST_ISEMPTY(&ph->list))) {
pool_refill_local_from_shared(pool, ph);
if (!(pool_debugging & POOL_DBG_NO_GLOBAL))
pool_refill_local_from_shared(pool, ph);
if (LIST_ISEMPTY(&ph->list))
return NULL;
}

View File

@ -49,6 +49,9 @@ uint pool_debugging __read_mostly = /* set of POOL_DBG_* flags */
#endif
#ifdef DEBUG_POOL_INTEGRITY
POOL_DBG_INTEGRITY |
#endif
#ifdef CONFIG_HAP_NO_GLOBAL_POOLS
POOL_DBG_NO_GLOBAL |
#endif
0;
@ -393,6 +396,7 @@ static void pool_evict_last_items(struct pool_head *pool, struct pool_cache_head
uint cluster = 0;
uint to_free_max;
/* Note: this will be zero when global pools are disabled */
to_free_max = pool_releasable(pool);
while (released < count && !LIST_ISEMPTY(&ph->list)) {
@ -404,6 +408,7 @@ static void pool_evict_last_items(struct pool_head *pool, struct pool_cache_head
LIST_DELETE(&item->by_lru);
if (to_free_max > released || cluster) {
/* will never match when global pools are disabled */
pi = (struct pool_item *)item;
pi->next = NULL;
pi->down = head;
@ -501,21 +506,6 @@ void pool_put_to_cache(struct pool_head *pool, void *ptr, const void *caller)
}
}
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
/* legacy stuff */
void pool_flush(struct pool_head *pool)
{
}
/* This function might ask the malloc library to trim its buffers. */
void pool_gc(struct pool_head *pool_ctx)
{
trim_all_pools();
}
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
/* Tries to refill the local cache <pch> from the shared one for pool <pool>.
* This is only used when pools are in use and shared pools are enabled. No
* malloc() is attempted, and poisonning is never performed. The purpose is to
@ -659,7 +649,6 @@ void pool_gc(struct pool_head *pool_ctx)
if (!isolated)
thread_release();
}
#endif /* CONFIG_HAP_NO_GLOBAL_POOLS */
#else /* CONFIG_HAP_POOLS */