MEDIUM: pools: directly free objects when pools are too much crowded

During pool_free(), when the ->allocated value is 125% of needed_avg or
more, instead of putting the object back into the pool, it's immediately
freed using free(). By doing this we manage to significantly reduce the
amount of memory pinned in pools after transient traffic spikes.

During a test involving a constant load of 100 concurrent connections
each delivering 100 requests per second, the memory usage was a steady
21 MB RSS. Adding a 1 minute parallel load of 40k connections all looping
on 100kB objects made the memory usage climb to 938 MB before this patch.
With the patch it was only 660 MB. But when this parasit load stopped,
before the patch the RSS would remain at 938 MB while with the patch,
it went down to 480 then 180 MB after a few seconds, to stabilize around
69 MB after about 20 seconds.

This can be particularly important to improve reloads where the memory
has to be shared between the old and new process.

Another improvement would be welcome, we ought to have a periodic task
to check pools usage and continue to free up unused objects regardless
of any call to pool_free(), because the needed_avg value depends on the
past and will not cover recently refilled objects.
This commit is contained in:
Willy Tarreau 2020-05-08 08:38:24 +02:00
parent a1e4f8c27c
commit 63a8738724
1 changed files with 25 additions and 7 deletions

View File

@ -220,6 +220,13 @@ static inline unsigned int pool_avg(unsigned int sum)
return (sum + n - 1) / n;
}
/* returns true if the pool is considered to have too many free objects */
static inline int pool_is_crowded(const struct pool_head *pool)
{
return pool->allocated >= pool_avg(pool->needed_avg + pool->needed_avg / 4) &&
(int)(pool->allocated - pool->used) >= pool->minavail;
}
#ifdef CONFIG_HAP_LOCKLESS_POOLS
/* Tries to retrieve an object from the local pool cache corresponding to pool
@ -333,12 +340,18 @@ static inline void __pool_free(struct pool_head *pool, void *ptr)
{
void **free_list = pool->free_list;
do {
*POOL_LINK(pool, ptr) = (void *)free_list;
__ha_barrier_store();
} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
__ha_barrier_atomic_store();
_HA_ATOMIC_SUB(&pool->used, 1);
if (unlikely(pool_is_crowded(pool))) {
free(ptr);
_HA_ATOMIC_SUB(&pool->allocated, 1);
} else {
do {
*POOL_LINK(pool, ptr) = (void *)free_list;
__ha_barrier_store();
} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, ptr));
__ha_barrier_atomic_store();
}
pool_avg_add(&pool->needed_avg, pool->used);
}
@ -546,9 +559,14 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
#ifndef DEBUG_UAF /* normal pool behaviour */
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
pool->used--;
if (pool_is_crowded(pool)) {
free(ptr);
pool->allocated--;
} else {
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
}
pool_avg_add(&pool->needed_avg, pool->used);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
#else /* release the entry for real to detect use after free */