mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-23 21:22:17 +00:00
MINOR: memory: Change the flush_lock to a spinlock, and don't get it in alloc.
The flush_lock was introduced, mostly to be sure that pool_gc() will never dereference a pointer that has been free'd. __pool_get_first() was acquiring the lock to, the fear was that otherwise that pointer could get free'd later, and then pool_gc() would attempt to dereference it. However, that can not happen, because the only functions that can free a pointer, when using lockless pools, are pool_gc() and pool_flush(), and as long as those two are mutually exclusive, nobody will be able to free the pointer while pool_gc() attempts to access it. So change the flush_lock to a spinlock, and don't bother acquire/release it in __pool_get_first(), that way callers of __pool_get_first() won't have to wait while the pool is flushed. The worst that can happen is we call __pool_refill_alloc() while the pool is getting flushed, and memory can get allocated just to be free'd. This may help with github issue #552 This may be backported to 2.1, 2.0 and 1.9.
This commit is contained in:
parent
b0198cc413
commit
899fb8abdc
@ -78,7 +78,7 @@ struct pool_head {
|
|||||||
void **free_list;
|
void **free_list;
|
||||||
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
uintptr_t seq;
|
uintptr_t seq;
|
||||||
HA_RWLOCK_T flush_lock;
|
HA_SPINLOCK_T flush_lock;
|
||||||
#else
|
#else
|
||||||
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
||||||
#endif
|
#endif
|
||||||
@ -222,19 +222,15 @@ static inline void *__pool_get_first(struct pool_head *pool)
|
|||||||
cmp.seq = pool->seq;
|
cmp.seq = pool->seq;
|
||||||
__ha_barrier_load();
|
__ha_barrier_load();
|
||||||
|
|
||||||
HA_RWLOCK_RDLOCK(POOL_LOCK, &pool->flush_lock);
|
|
||||||
cmp.free_list = pool->free_list;
|
cmp.free_list = pool->free_list;
|
||||||
do {
|
do {
|
||||||
if (cmp.free_list == NULL) {
|
if (cmp.free_list == NULL)
|
||||||
HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
|
||||||
new.seq = cmp.seq + 1;
|
new.seq = cmp.seq + 1;
|
||||||
__ha_barrier_load();
|
__ha_barrier_load();
|
||||||
new.free_list = *POOL_LINK(pool, cmp.free_list);
|
new.free_list = *POOL_LINK(pool, cmp.free_list);
|
||||||
} while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
|
} while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
|
||||||
__ha_barrier_atomic_store();
|
__ha_barrier_atomic_store();
|
||||||
HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
|
|
||||||
|
|
||||||
_HA_ATOMIC_ADD(&pool->used, 1);
|
_HA_ATOMIC_ADD(&pool->used, 1);
|
||||||
#ifdef DEBUG_MEMORY_POOLS
|
#ifdef DEBUG_MEMORY_POOLS
|
||||||
|
10
src/memory.c
10
src/memory.c
@ -142,7 +142,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
|||||||
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_INIT(&pool->lock);
|
HA_SPIN_INIT(&pool->lock);
|
||||||
#else
|
#else
|
||||||
HA_RWLOCK_INIT(&pool->flush_lock);
|
HA_SPIN_INIT(&pool->flush_lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
pool->users++;
|
pool->users++;
|
||||||
@ -225,7 +225,7 @@ void pool_flush(struct pool_head *pool)
|
|||||||
|
|
||||||
if (!pool)
|
if (!pool)
|
||||||
return;
|
return;
|
||||||
HA_RWLOCK_WRLOCK(POOL_LOCK, &pool->flush_lock);
|
HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock);
|
||||||
do {
|
do {
|
||||||
cmp.free_list = pool->free_list;
|
cmp.free_list = pool->free_list;
|
||||||
cmp.seq = pool->seq;
|
cmp.seq = pool->seq;
|
||||||
@ -233,7 +233,7 @@ void pool_flush(struct pool_head *pool)
|
|||||||
new.seq = cmp.seq + 1;
|
new.seq = cmp.seq + 1;
|
||||||
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
|
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
|
||||||
__ha_barrier_atomic_store();
|
__ha_barrier_atomic_store();
|
||||||
HA_RWLOCK_WRUNLOCK(POOL_LOCK, &pool->flush_lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock);
|
||||||
next = cmp.free_list;
|
next = cmp.free_list;
|
||||||
while (next) {
|
while (next) {
|
||||||
temp = next;
|
temp = next;
|
||||||
@ -263,7 +263,7 @@ void pool_gc(struct pool_head *pool_ctx)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(entry, &pools, list) {
|
list_for_each_entry(entry, &pools, list) {
|
||||||
HA_RWLOCK_WRLOCK(POOL_LOCK, &entry->flush_lock);
|
HA_SPIN_LOCK(POOL_LOCK, &entry->flush_lock);
|
||||||
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
|
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
|
||||||
struct pool_free_list cmp, new;
|
struct pool_free_list cmp, new;
|
||||||
|
|
||||||
@ -280,7 +280,7 @@ void pool_gc(struct pool_head *pool_ctx)
|
|||||||
free(cmp.free_list);
|
free(cmp.free_list);
|
||||||
_HA_ATOMIC_SUB(&entry->allocated, 1);
|
_HA_ATOMIC_SUB(&entry->allocated, 1);
|
||||||
}
|
}
|
||||||
HA_RWLOCK_WRUNLOCK(POOL_LOCK, &entry->flush_lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &entry->flush_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
_HA_ATOMIC_STORE(&recurse, 0);
|
_HA_ATOMIC_STORE(&recurse, 0);
|
||||||
|
Loading…
Reference in New Issue
Block a user