mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-03-09 21:07:43 +00:00
MINOR: pool: allocate from the shared cache through the local caches
One of the thread scaling challenges nowadays for the pools is the contention on the shared caches. There's never any situation where we have a shared cache and no local cache anymore, so we can technically afford to transfer objects from the shared cache to the local cache before returning them to the user via the regular path. This adds a little bit more work per object per miss, but will permit batch processing later. This patch simply moves pool_get_from_shared_cache() to pool.c under the new name pool_refill_local_from_shared(), and this function does not return anything but it places the allocated object at the head of the local cache.
This commit is contained in:
parent
8c4927098e
commit
afe2c4a1fc
@ -120,13 +120,9 @@ static inline int pool_is_crowded(const struct pool_head *pool)
|
|||||||
|
|
||||||
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
|
#if defined(CONFIG_HAP_NO_GLOBAL_POOLS)
|
||||||
|
|
||||||
/* this is essentially used with local caches and a fast malloc library,
|
static inline void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
|
||||||
* which may sometimes be faster than the local shared pools because it
|
|
||||||
* will maintain its own per-thread arenas.
|
|
||||||
*/
|
|
||||||
static inline void *pool_get_from_shared_cache(struct pool_head *pool)
|
|
||||||
{
|
{
|
||||||
return NULL;
|
/* ignored without shared pools */
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
|
static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
|
||||||
@ -136,44 +132,7 @@ static inline void pool_put_to_shared_cache(struct pool_head *pool, void *ptr)
|
|||||||
|
|
||||||
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
|
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
|
||||||
|
|
||||||
/*
|
void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch);
|
||||||
* Returns a pointer to type <type> taken from the pool <pool_type> if
|
|
||||||
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
|
|
||||||
* is never performed. The purpose is to get the fastest possible allocation.
|
|
||||||
*/
|
|
||||||
static inline void *pool_get_from_shared_cache(struct pool_head *pool)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
|
|
||||||
/* we'll need to reference the first element to figure the next one. We
|
|
||||||
* must temporarily lock it so that nobody allocates then releases it,
|
|
||||||
* or the dereference could fail.
|
|
||||||
*/
|
|
||||||
ret = pool->free_list;
|
|
||||||
do {
|
|
||||||
while (unlikely(ret == POOL_BUSY)) {
|
|
||||||
__ha_cpu_relax();
|
|
||||||
ret = _HA_ATOMIC_LOAD(&pool->free_list);
|
|
||||||
}
|
|
||||||
if (ret == NULL)
|
|
||||||
return ret;
|
|
||||||
} while (unlikely((ret = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
|
|
||||||
|
|
||||||
if (unlikely(ret == NULL)) {
|
|
||||||
_HA_ATOMIC_STORE(&pool->free_list, NULL);
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* this releases the lock */
|
|
||||||
_HA_ATOMIC_STORE(&pool->free_list, *(void **)ret);
|
|
||||||
_HA_ATOMIC_INC(&pool->used);
|
|
||||||
|
|
||||||
/* keep track of where the element was allocated from */
|
|
||||||
POOL_DEBUG_SET_MARK(pool, ret);
|
|
||||||
out:
|
|
||||||
__ha_barrier_atomic_store();
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
|
/* Locklessly add item <ptr> to pool <pool>, then update the pool used count.
|
||||||
* Both the pool and the pointer must be valid. Use pool_free() for normal
|
* Both the pool and the pointer must be valid. Use pool_free() for normal
|
||||||
@ -218,8 +177,11 @@ static inline void *pool_get_from_cache(struct pool_head *pool)
|
|||||||
struct pool_cache_head *ph;
|
struct pool_cache_head *ph;
|
||||||
|
|
||||||
ph = &pool->cache[tid];
|
ph = &pool->cache[tid];
|
||||||
if (LIST_ISEMPTY(&ph->list))
|
if (unlikely(LIST_ISEMPTY(&ph->list))) {
|
||||||
return pool_get_from_shared_cache(pool);
|
pool_refill_local_from_shared(pool, ph);
|
||||||
|
if (LIST_ISEMPTY(&ph->list))
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
item = LIST_NEXT(&ph->list, typeof(item), by_pool);
|
item = LIST_NEXT(&ph->list, typeof(item), by_pool);
|
||||||
ph->count--;
|
ph->count--;
|
||||||
|
46
src/pool.c
46
src/pool.c
@ -388,6 +388,52 @@ void pool_gc(struct pool_head *pool_ctx)
|
|||||||
|
|
||||||
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
|
#else /* CONFIG_HAP_NO_GLOBAL_POOLS */
|
||||||
|
|
||||||
|
/* Tries to refill the local cache <pch> from the shared one for pool <pool>.
|
||||||
|
* This is only used when pools are in use and shared pools are enabled. No
|
||||||
|
* malloc() is attempted, and poisonning is never performed. The purpose is to
|
||||||
|
* get the fastest possible refilling so that the caller can easily check if
|
||||||
|
* the cache has enough objects for its use.
|
||||||
|
*/
|
||||||
|
void pool_refill_local_from_shared(struct pool_head *pool, struct pool_cache_head *pch)
|
||||||
|
{
|
||||||
|
struct pool_cache_item *item;
|
||||||
|
void *ret;
|
||||||
|
|
||||||
|
/* we'll need to reference the first element to figure the next one. We
|
||||||
|
* must temporarily lock it so that nobody allocates then releases it,
|
||||||
|
* or the dereference could fail.
|
||||||
|
*/
|
||||||
|
ret = _HA_ATOMIC_LOAD(&pool->free_list);
|
||||||
|
do {
|
||||||
|
while (unlikely(ret == POOL_BUSY)) {
|
||||||
|
__ha_cpu_relax();
|
||||||
|
ret = _HA_ATOMIC_LOAD(&pool->free_list);
|
||||||
|
}
|
||||||
|
if (ret == NULL)
|
||||||
|
return;
|
||||||
|
} while (unlikely((ret = _HA_ATOMIC_XCHG(&pool->free_list, POOL_BUSY)) == POOL_BUSY));
|
||||||
|
|
||||||
|
if (unlikely(ret == NULL)) {
|
||||||
|
HA_ATOMIC_STORE(&pool->free_list, NULL);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* this releases the lock */
|
||||||
|
HA_ATOMIC_STORE(&pool->free_list, *(void **)ret);
|
||||||
|
HA_ATOMIC_INC(&pool->used);
|
||||||
|
|
||||||
|
/* keep track of where the element was allocated from */
|
||||||
|
POOL_DEBUG_SET_MARK(pool, ret);
|
||||||
|
|
||||||
|
/* now store the retrieved object into the local cache */
|
||||||
|
item = ret;
|
||||||
|
LIST_INSERT(&pch->list, &item->by_pool);
|
||||||
|
LIST_INSERT(&th_ctx->pool_lru_head, &item->by_lru);
|
||||||
|
pch->count++;
|
||||||
|
pool_cache_count++;
|
||||||
|
pool_cache_bytes += pool->size;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function frees whatever can be freed in pool <pool>.
|
* This function frees whatever can be freed in pool <pool>.
|
||||||
*/
|
*/
|
||||||
|
Loading…
Reference in New Issue
Block a user