1
0
mirror of http://git.haproxy.org/git/haproxy.git/ synced 2025-04-07 01:31:35 +00:00

MINOR: memory: Change the flush_lock to a spinlock, and don't get it in alloc.

The flush_lock was introduced, mostly to be sure that pool_gc() will never
dereference a pointer that has been free'd. __pool_get_first() was acquiring
the lock to, the fear was that otherwise that pointer could get free'd later,
and then pool_gc() would attempt to dereference it. However, that can not
happen, because the only functions that can free a pointer, when using
lockless pools, are pool_gc() and pool_flush(), and as long as those two
are mutually exclusive, nobody will be able to free the pointer while
pool_gc() attempts to access it.
So change the flush_lock to a spinlock, and don't bother acquire/release
it in __pool_get_first(), that way callers of __pool_get_first() won't have
to wait while the pool is flushed. The worst that can happen is we call
__pool_refill_alloc() while the pool is getting flushed, and memory can
get allocated just to be free'd.

This may help with github issue 

This may be backported to 2.1, 2.0 and 1.9.
This commit is contained in:
Olivier Houchard 2020-03-18 15:48:29 +01:00
parent b0198cc413
commit 899fb8abdc
2 changed files with 7 additions and 11 deletions
include/common
src

View File

@ -78,7 +78,7 @@ struct pool_head {
void **free_list;
#ifdef CONFIG_HAP_LOCKLESS_POOLS
uintptr_t seq;
HA_RWLOCK_T flush_lock;
HA_SPINLOCK_T flush_lock;
#else
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
#endif
@ -222,19 +222,15 @@ static inline void *__pool_get_first(struct pool_head *pool)
cmp.seq = pool->seq;
__ha_barrier_load();
HA_RWLOCK_RDLOCK(POOL_LOCK, &pool->flush_lock);
cmp.free_list = pool->free_list;
do {
if (cmp.free_list == NULL) {
HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
if (cmp.free_list == NULL)
return NULL;
}
new.seq = cmp.seq + 1;
__ha_barrier_load();
new.free_list = *POOL_LINK(pool, cmp.free_list);
} while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
__ha_barrier_atomic_store();
HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
_HA_ATOMIC_ADD(&pool->used, 1);
#ifdef DEBUG_MEMORY_POOLS

View File

@ -142,7 +142,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_INIT(&pool->lock);
#else
HA_RWLOCK_INIT(&pool->flush_lock);
HA_SPIN_INIT(&pool->flush_lock);
#endif
}
pool->users++;
@ -225,7 +225,7 @@ void pool_flush(struct pool_head *pool)
if (!pool)
return;
HA_RWLOCK_WRLOCK(POOL_LOCK, &pool->flush_lock);
HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock);
do {
cmp.free_list = pool->free_list;
cmp.seq = pool->seq;
@ -233,7 +233,7 @@ void pool_flush(struct pool_head *pool)
new.seq = cmp.seq + 1;
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
__ha_barrier_atomic_store();
HA_RWLOCK_WRUNLOCK(POOL_LOCK, &pool->flush_lock);
HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock);
next = cmp.free_list;
while (next) {
temp = next;
@ -263,7 +263,7 @@ void pool_gc(struct pool_head *pool_ctx)
return;
list_for_each_entry(entry, &pools, list) {
HA_RWLOCK_WRLOCK(POOL_LOCK, &entry->flush_lock);
HA_SPIN_LOCK(POOL_LOCK, &entry->flush_lock);
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
struct pool_free_list cmp, new;
@ -280,7 +280,7 @@ void pool_gc(struct pool_head *pool_ctx)
free(cmp.free_list);
_HA_ATOMIC_SUB(&entry->allocated, 1);
}
HA_RWLOCK_WRUNLOCK(POOL_LOCK, &entry->flush_lock);
HA_SPIN_UNLOCK(POOL_LOCK, &entry->flush_lock);
}
_HA_ATOMIC_STORE(&recurse, 0);