BUG/MEDIUM: memory: Add a rwlock before freeing memory.
When using lockless pools, add a new rwlock, flush_pool. read-lock it when getting memory from the pool, so that concurrenct access are still authorized, but write-lock it when we're about to free memory, in pool_flush() and pool_gc(). The problem is, when removing an item from the pool, we unreference it to get the next one, however, that pointer may have been free'd in the meanwhile, and that could provoke a crash if the pointer has been unmapped. It should be OK to use a rwlock, as normal operations will still be able to access the pool concurrently, and calls to pool_flush() and pool_gc() should be pretty rare. This should be backported to 2.1, 2.0 and 1.9.
This commit is contained in:
parent
8af97eb4a1
commit
04f5fe87d3
|
@ -78,6 +78,7 @@ struct pool_head {
|
||||||
void **free_list;
|
void **free_list;
|
||||||
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
#ifdef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
uintptr_t seq;
|
uintptr_t seq;
|
||||||
|
HA_RWLOCK_T flush_lock;
|
||||||
#else
|
#else
|
||||||
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
|
||||||
#endif
|
#endif
|
||||||
|
@ -221,6 +222,7 @@ static inline void *__pool_get_first(struct pool_head *pool)
|
||||||
cmp.seq = pool->seq;
|
cmp.seq = pool->seq;
|
||||||
__ha_barrier_load();
|
__ha_barrier_load();
|
||||||
|
|
||||||
|
HA_RWLOCK_RDLOCK(POOL_LOCK, &pool->flush_lock);
|
||||||
cmp.free_list = pool->free_list;
|
cmp.free_list = pool->free_list;
|
||||||
do {
|
do {
|
||||||
if (cmp.free_list == NULL)
|
if (cmp.free_list == NULL)
|
||||||
|
@ -230,6 +232,7 @@ static inline void *__pool_get_first(struct pool_head *pool)
|
||||||
new.free_list = *POOL_LINK(pool, cmp.free_list);
|
new.free_list = *POOL_LINK(pool, cmp.free_list);
|
||||||
} while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
|
} while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
|
||||||
__ha_barrier_atomic_store();
|
__ha_barrier_atomic_store();
|
||||||
|
HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock);
|
||||||
|
|
||||||
_HA_ATOMIC_ADD(&pool->used, 1);
|
_HA_ATOMIC_ADD(&pool->used, 1);
|
||||||
#ifdef DEBUG_MEMORY_POOLS
|
#ifdef DEBUG_MEMORY_POOLS
|
||||||
|
|
10
src/memory.c
10
src/memory.c
|
@ -141,6 +141,8 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
|
||||||
}
|
}
|
||||||
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
#ifndef CONFIG_HAP_LOCKLESS_POOLS
|
||||||
HA_SPIN_INIT(&pool->lock);
|
HA_SPIN_INIT(&pool->lock);
|
||||||
|
#else
|
||||||
|
HA_RWLOCK_INIT(&pool->flush_lock);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
pool->users++;
|
pool->users++;
|
||||||
|
@ -223,6 +225,7 @@ void pool_flush(struct pool_head *pool)
|
||||||
|
|
||||||
if (!pool)
|
if (!pool)
|
||||||
return;
|
return;
|
||||||
|
HA_RWLOCK_WRLOCK(POOL_LOCK, &pool->flush_lock);
|
||||||
do {
|
do {
|
||||||
cmp.free_list = pool->free_list;
|
cmp.free_list = pool->free_list;
|
||||||
cmp.seq = pool->seq;
|
cmp.seq = pool->seq;
|
||||||
|
@ -230,6 +233,7 @@ void pool_flush(struct pool_head *pool)
|
||||||
new.seq = cmp.seq + 1;
|
new.seq = cmp.seq + 1;
|
||||||
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
|
} while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new));
|
||||||
__ha_barrier_atomic_store();
|
__ha_barrier_atomic_store();
|
||||||
|
HA_RWLOCK_WRUNLOCK(POOL_LOCK, &pool->flush_lock);
|
||||||
next = cmp.free_list;
|
next = cmp.free_list;
|
||||||
while (next) {
|
while (next) {
|
||||||
temp = next;
|
temp = next;
|
||||||
|
@ -259,6 +263,7 @@ void pool_gc(struct pool_head *pool_ctx)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry(entry, &pools, list) {
|
list_for_each_entry(entry, &pools, list) {
|
||||||
|
HA_RWLOCK_WRLOCK(POOL_LOCK, &entry->flush_lock);
|
||||||
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
|
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
|
||||||
struct pool_free_list cmp, new;
|
struct pool_free_list cmp, new;
|
||||||
|
|
||||||
|
@ -275,6 +280,7 @@ void pool_gc(struct pool_head *pool_ctx)
|
||||||
free(cmp.free_list);
|
free(cmp.free_list);
|
||||||
_HA_ATOMIC_SUB(&entry->allocated, 1);
|
_HA_ATOMIC_SUB(&entry->allocated, 1);
|
||||||
}
|
}
|
||||||
|
HA_RWLOCK_WRUNLOCK(POOL_LOCK, &entry->flush_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
_HA_ATOMIC_STORE(&recurse, 0);
|
_HA_ATOMIC_STORE(&recurse, 0);
|
||||||
|
@ -629,7 +635,7 @@ int mem_should_fail(const struct pool_head *pool)
|
||||||
else
|
else
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
HA_SPIN_LOCK(OTHER_LOCK, &mem_fail_lock);
|
HA_SPIN_LOCK(POOL_LOCK, &mem_fail_lock);
|
||||||
n = snprintf(&mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR],
|
n = snprintf(&mem_fail_str[mem_fail_cur_idx * MEM_FAIL_MAX_CHAR],
|
||||||
MEM_FAIL_MAX_CHAR - 2,
|
MEM_FAIL_MAX_CHAR - 2,
|
||||||
"%d %.18s %d %d", mem_fail_cur_idx, pool->name, ret, tid);
|
"%d %.18s %d %d", mem_fail_cur_idx, pool->name, ret, tid);
|
||||||
|
@ -642,7 +648,7 @@ int mem_should_fail(const struct pool_head *pool)
|
||||||
mem_fail_cur_idx++;
|
mem_fail_cur_idx++;
|
||||||
if (mem_fail_cur_idx == MEM_FAIL_MAX_STR)
|
if (mem_fail_cur_idx == MEM_FAIL_MAX_STR)
|
||||||
mem_fail_cur_idx = 0;
|
mem_fail_cur_idx = 0;
|
||||||
HA_SPIN_UNLOCK(OTHER_LOCK, &mem_fail_lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &mem_fail_lock);
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue