MINOR: pool: only use opportunistic versions of the swrate_add() functions

We don't need to know very accurately how much RAM is needed in a pool,
however we must not spend time competing with other threads trying to be
the one with the most accurate value. Let's use the "_opportunistic"
variants of swrate_add() which will simply cause some updates to be
dropped in case of thread contention. This should significantly improve
the situation when dealing with many threads and small per-thread caches.

Performance gains of up to 1-2% were observed on 48-thread systems thanks
to this alone.
This commit is contained in:
Willy Tarreau 2022-12-19 17:26:25 +01:00
parent e327b4a73e
commit 2aa14ce5a1

View File

@ -380,7 +380,7 @@ void *pool_alloc_nocache(struct pool_head *pool)
if (!ptr)
return NULL;
swrate_add_scaled(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4);
swrate_add_scaled_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used, POOL_AVG_SAMPLES/4);
_HA_ATOMIC_INC(&pool->used);
/* keep track of where the element was allocated from */
@ -396,7 +396,7 @@ void *pool_alloc_nocache(struct pool_head *pool)
void pool_free_nocache(struct pool_head *pool, void *ptr)
{
_HA_ATOMIC_DEC(&pool->used);
swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
pool_put_to_os(pool, ptr);
}
@ -655,7 +655,7 @@ void pool_put_to_shared_cache(struct pool_head *pool, struct pool_item *item, ui
__ha_barrier_atomic_store();
} while (!_HA_ATOMIC_CAS(&pool->free_list, &free_list, item));
__ha_barrier_atomic_store();
swrate_add(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
swrate_add_opportunistic(&pool->needed_avg, POOL_AVG_SAMPLES, pool->used);
}
/*