MEDIUM: threads/pool: Make pool thread-safe by locking all access to a pool

A lock has been added for each memory pool. It is used to protect the pool
during allocations and releases. It is also used when pool info are dumped.
This commit is contained in:
Christopher Faulet 2017-08-29 09:52:38 +02:00 committed by Willy Tarreau
parent f8188c69fa
commit b349e48ede
6 changed files with 73 additions and 27 deletions

View File

@ -138,6 +138,7 @@ int thread_need_sync(void);
enum lock_label {
THREAD_SYNC_LOCK = 0,
POOL_LOCK,
LOCK_LABELS
};
struct lock_stat {
@ -220,7 +221,7 @@ struct ha_rwlock {
static inline void show_lock_stats()
{
const char *labels[LOCK_LABELS] = {"THREAD_SYNC" };
const char *labels[LOCK_LABELS] = {"THREAD_SYNC", "POOL"};
int lbl;
for (lbl = 0; lbl < LOCK_LABELS; lbl++) {

View File

@ -27,6 +27,7 @@
#include <common/config.h>
#include <common/mini-clist.h>
#include <common/hathreads.h>
#ifndef DEBUG_DONT_SHARE_POOLS
#define MEM_F_SHARED 0x1
@ -46,6 +47,9 @@
struct pool_head {
void **free_list;
#ifdef USE_THREAD
HA_SPINLOCK_T lock; /* the spin lock */
#endif
struct list list; /* list of all known pools */
unsigned int used; /* how many chunks are currently in use */
unsigned int allocated; /* how many chunks have been allocated */
@ -69,6 +73,7 @@ extern int mem_poison_byte;
* A call to the garbage collector is performed at most once in case malloc()
* returns an error, before returning NULL.
*/
void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail);
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail);
/* Try to find an existing shared pool with the same characteristics and
@ -93,8 +98,12 @@ void pool_flush2(struct pool_head *pool);
/*
* This function frees whatever can be freed in all pools, but respecting
* the minimum thresholds imposed by owners.
*
* <pool_ctx> is used when pool_gc2 is called to release resources to allocate
* an element in __pool_refill_alloc. It is important because <pool_ctx> is
* already locked, so we need to skip the lock here.
*/
void pool_gc2();
void pool_gc2(struct pool_head *pool_ctx);
/*
* This function destroys a pull by freeing it completely.
@ -107,7 +116,7 @@ void *pool_destroy2(struct pool_head *pool);
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
* is never performed. The purpose is to get the fastest possible allocation.
*/
static inline void *pool_get_first(struct pool_head *pool)
static inline void *__pool_get_first(struct pool_head *pool)
{
void *p;
@ -122,6 +131,15 @@ static inline void *pool_get_first(struct pool_head *pool)
return p;
}
static inline void *pool_get_first(struct pool_head *pool)
{
void *ret;
SPIN_LOCK(POOL_LOCK, &pool->lock);
ret = __pool_get_first(pool);
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
return ret;
}
/*
* Returns a pointer to type <type> taken from the pool <pool_type> or
* dynamically allocated. In the first case, <pool_type> is updated to point to
@ -132,9 +150,10 @@ static inline void *pool_alloc_dirty(struct pool_head *pool)
{
void *p;
if ((p = pool_get_first(pool)) == NULL)
p = pool_refill_alloc(pool, 0);
SPIN_LOCK(POOL_LOCK, &pool->lock);
if ((p = __pool_get_first(pool)) == NULL)
p = __pool_refill_alloc(pool, 0);
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
return p;
}
@ -150,8 +169,10 @@ static inline void *pool_alloc2(struct pool_head *pool)
p = pool_alloc_dirty(pool);
#ifdef DEBUG_MEMORY_POOLS
if (p) {
SPIN_LOCK(POOL_LOCK, &pool->lock);
/* keep track of where the element was allocated from */
*POOL_LINK(pool, p) = (void *)pool;
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
}
#endif
if (p && mem_poison_byte >= 0) {
@ -173,6 +194,7 @@ static inline void *pool_alloc2(struct pool_head *pool)
static inline void pool_free2(struct pool_head *pool, void *ptr)
{
if (likely(ptr != NULL)) {
SPIN_LOCK(POOL_LOCK, &pool->lock);
#ifdef DEBUG_MEMORY_POOLS
/* we'll get late corruption if we refill to the wrong pool or double-free */
if (*POOL_LINK(pool, ptr) != (void *)pool)
@ -181,10 +203,9 @@ static inline void pool_free2(struct pool_head *pool, void *ptr)
*POOL_LINK(pool, ptr) = (void *)pool->free_list;
pool->free_list = (void *)ptr;
pool->used--;
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
}
}
#endif /* _COMMON_MEMORY_H */
/*

View File

@ -745,7 +745,7 @@ static void sig_soft_stop(struct sig_handler *sh)
{
soft_stop();
signal_unregister_handler(sh);
pool_gc2();
pool_gc2(NULL);
}
/*
@ -754,7 +754,7 @@ static void sig_soft_stop(struct sig_handler *sh)
static void sig_pause(struct sig_handler *sh)
{
pause_proxies();
pool_gc2();
pool_gc2(NULL);
}
/*
@ -818,7 +818,7 @@ static void dump(struct sig_handler *sh)
{
/* dump memory usage then free everything possible */
dump_pools();
pool_gc2();
pool_gc2(NULL);
}
/* This function check if cfg_cfgfiles containes directories.

View File

@ -57,6 +57,8 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
size = ((size + POOL_EXTRA + align - 1) & -align) - POOL_EXTRA;
}
/* TODO: thread: we do not lock pool list for now because all pools are
* created during HAProxy startup (so before threads creation) */
start = &pools;
pool = NULL;
@ -91,6 +93,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
LIST_ADDQ(start, &pool->list);
}
pool->users++;
SPIN_INIT(&pool->lock);
return pool;
}
@ -102,7 +105,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
* A call to the garbage collector is performed at most once in case malloc()
* returns an error, before returning NULL.
*/
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
{
void *ptr = NULL;
int failed = 0;
@ -120,7 +123,7 @@ void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
if (failed)
return NULL;
failed++;
pool_gc2();
pool_gc2(pool);
continue;
}
if (++pool->allocated > avail)
@ -136,7 +139,15 @@ void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
#endif
return ptr;
}
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
{
void *ptr;
SPIN_LOCK(POOL_LOCK, &pool->lock);
ptr = __pool_refill_alloc(pool, avail);
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
return ptr;
}
/*
* This function frees whatever can be freed in pool <pool>.
*/
@ -146,6 +157,7 @@ void pool_flush2(struct pool_head *pool)
if (!pool)
return;
SPIN_LOCK(POOL_LOCK, &pool->lock);
next = pool->free_list;
while (next) {
temp = next;
@ -154,7 +166,7 @@ void pool_flush2(struct pool_head *pool)
free(temp);
}
pool->free_list = next;
SPIN_UNLOCK(POOL_LOCK, &pool->lock);
/* here, we should have pool->allocate == pool->used */
}
@ -162,18 +174,25 @@ void pool_flush2(struct pool_head *pool)
* This function frees whatever can be freed in all pools, but respecting
* the minimum thresholds imposed by owners. It takes care of avoiding
* recursion because it may be called from a signal handler.
*
* <pool_ctx> is used when pool_gc2 is called to release resources to allocate
* an element in __pool_refill_alloc. It is important because <pool_ctx> is
* already locked, so we need to skip the lock here.
*/
void pool_gc2()
void pool_gc2(struct pool_head *pool_ctx)
{
static int recurse;
int cur_recurse = 0;
struct pool_head *entry;
if (recurse++)
goto out;
if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
return;
list_for_each_entry(entry, &pools, list) {
void *temp, *next;
//qfprintf(stderr, "Flushing pool %s\n", entry->name);
if (entry != pool_ctx)
SPIN_LOCK(POOL_LOCK, &entry->lock);
next = entry->free_list;
while (next &&
(int)(entry->allocated - entry->used) > (int)entry->minavail) {
@ -183,9 +202,11 @@ void pool_gc2()
free(temp);
}
entry->free_list = next;
if (entry != pool_ctx)
SPIN_UNLOCK(POOL_LOCK, &entry->lock);
}
out:
recurse--;
HA_ATOMIC_STORE(&recurse, 0);
}
/*
@ -204,6 +225,7 @@ void *pool_destroy2(struct pool_head *pool)
pool->users--;
if (!pool->users) {
LIST_DEL(&pool->list);
SPIN_DESTROY(&pool->lock);
free(pool);
}
}
@ -220,6 +242,7 @@ void dump_pools_to_trash()
allocated = used = nbpools = 0;
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
list_for_each_entry(entry, &pools, list) {
SPIN_LOCK(POOL_LOCK, &entry->lock);
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
entry->name, entry->size, entry->allocated,
entry->size * entry->allocated, entry->used, entry->failed,
@ -228,6 +251,7 @@ void dump_pools_to_trash()
allocated += entry->allocated * entry->size;
used += entry->used * entry->size;
nbpools++;
SPIN_UNLOCK(POOL_LOCK, &entry->lock);
}
chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
nbpools, allocated, used);

View File

@ -853,7 +853,7 @@ struct task *manage_proxy(struct task *t)
p->id, p->fe_counters.cum_conn, p->be_counters.cum_conn);
stop_proxy(p);
/* try to free more memory */
pool_gc2();
pool_gc2(NULL);
}
else {
next = tick_first(next, p->stop_time);
@ -870,7 +870,7 @@ struct task *manage_proxy(struct task *t)
if (unlikely(stopping && p->state == PR_STSTOPPED && p->table.current)) {
if (!p->table.syncing) {
stktable_trash_oldest(&p->table, p->table.current);
pool_gc2();
pool_gc2(NULL);
}
if (p->table.current) {
/* some entries still remain, let's recheck in one second */

View File

@ -4793,7 +4793,7 @@ static int ssl_sock_init(struct connection *conn)
conn->xprt_ctx = SSL_new(objt_server(conn->target)->ssl_ctx.ctx);
if (!conn->xprt_ctx) {
if (may_retry--) {
pool_gc2();
pool_gc2(NULL);
goto retry_connect;
}
conn->err_code = CO_ER_SSL_NO_MEM;
@ -4805,7 +4805,7 @@ static int ssl_sock_init(struct connection *conn)
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
pool_gc2();
pool_gc2(NULL);
goto retry_connect;
}
conn->err_code = CO_ER_SSL_NO_MEM;
@ -4817,7 +4817,7 @@ static int ssl_sock_init(struct connection *conn)
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
pool_gc2();
pool_gc2(NULL);
goto retry_connect;
}
conn->err_code = CO_ER_SSL_NO_MEM;
@ -4847,7 +4847,7 @@ static int ssl_sock_init(struct connection *conn)
conn->xprt_ctx = SSL_new(objt_listener(conn->target)->bind_conf->initial_ctx);
if (!conn->xprt_ctx) {
if (may_retry--) {
pool_gc2();
pool_gc2(NULL);
goto retry_accept;
}
conn->err_code = CO_ER_SSL_NO_MEM;
@ -4859,7 +4859,7 @@ static int ssl_sock_init(struct connection *conn)
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
pool_gc2();
pool_gc2(NULL);
goto retry_accept;
}
conn->err_code = CO_ER_SSL_NO_MEM;
@ -4871,7 +4871,7 @@ static int ssl_sock_init(struct connection *conn)
SSL_free(conn->xprt_ctx);
conn->xprt_ctx = NULL;
if (may_retry--) {
pool_gc2();
pool_gc2(NULL);
goto retry_accept;
}
conn->err_code = CO_ER_SSL_NO_MEM;