MINOR: pools/threads: Implement lockless memory pools.

On CPUs that support a double-width compare-and-swap, implement lockless
pools.
This commit is contained in:
Olivier Houchard 2018-01-24 18:38:31 +01:00 committed by Willy Tarreau
parent 5266b3e12d
commit cf975d46bc
3 changed files with 258 additions and 3 deletions

View File

@ -735,13 +735,17 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
return *buf;
*buf = &buf_wanted;
#ifndef HA_HAVE_CAS_DW
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
#endif
/* fast path */
if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
b = __pool_get_first(pool_head_buffer);
if (likely(b)) {
#ifndef HA_HAVE_CAS_DW
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
#endif
b->size = pool_head_buffer->size - sizeof(struct buffer);
b_reset(b);
*buf = b;
@ -752,7 +756,9 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
/* slow path, uses malloc() */
b = __pool_refill_alloc(pool_head_buffer, margin);
#ifndef HA_HAVE_CAS_DW
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
#endif
if (b) {
b->size = pool_head_buffer->size - sizeof(struct buffer);

View File

@ -47,10 +47,20 @@
#define POOL_LINK(pool, item) ((void **)(item))
#endif
struct pool_head {
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
#ifdef HA_HAVE_CAS_DW
struct pool_free_list {
void **free_list;
struct list list; /* list of all known pools */
uintptr_t seq;
};
#endif
struct pool_head {
void **free_list;
#ifdef HA_HAVE_CAS_DW
uintptr_t seq;
#else
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
#endif
unsigned int used; /* how many chunks are currently in use */
unsigned int allocated; /* how many chunks have been allocated */
unsigned int limit; /* hard limit on the number of chunks */
@ -59,6 +69,7 @@ struct pool_head {
unsigned int flags; /* MEM_F_* */
unsigned int users; /* number of pools sharing this zone */
unsigned int failed; /* failed allocations */
struct list list; /* list of all known pools */
char name[12]; /* name of the pool */
} __attribute__((aligned(64)));
@ -111,6 +122,110 @@ void pool_gc(struct pool_head *pool_ctx);
*/
void *pool_destroy(struct pool_head *pool);
#ifdef HA_HAVE_CAS_DW
/*
* Returns a pointer to type <type> taken from the pool <pool_type> if
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
* is never performed. The purpose is to get the fastest possible allocation.
*/
static inline void *__pool_get_first(struct pool_head *pool)
{
struct pool_free_list cmp, new;
cmp.seq = pool->seq;
__ha_barrier_load();
cmp.free_list = pool->free_list;
do {
if (cmp.free_list == NULL)
return NULL;
new.seq = cmp.seq + 1;
__ha_barrier_load();
new.free_list = *POOL_LINK(pool, cmp.free_list);
} while (__ha_cas_dw((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0);
end:
HA_ATOMIC_ADD(&pool->used, 1);
#ifdef DEBUG_MEMORY_POOLS
/* keep track of where the element was allocated from */
*POOL_LINK(pool, cmp.free_list) = (void *)pool;
#endif
return cmp.free_list;
}
static inline void *pool_get_first(struct pool_head *pool)
{
void *ret;
ret = __pool_get_first(pool);
return ret;
}
/*
* Returns a pointer to type <type> taken from the pool <pool_type> or
* dynamically allocated. In the first case, <pool_type> is updated to point to
* the next element in the list. No memory poisonning is ever performed on the
* returned area.
*/
static inline void *pool_alloc_dirty(struct pool_head *pool)
{
void *p;
if ((p = __pool_get_first(pool)) == NULL)
p = __pool_refill_alloc(pool, 0);
return p;
}
/*
* Returns a pointer to type <type> taken from the pool <pool_type> or
* dynamically allocated. In the first case, <pool_type> is updated to point to
* the next element in the list. Memory poisonning is performed if enabled.
*/
static inline void *pool_alloc(struct pool_head *pool)
{
void *p;
p = pool_alloc_dirty(pool);
#ifdef DEBUG_MEMORY_POOLS
if (p) {
/* keep track of where the element was allocated from */
*POOL_LINK(pool, p) = (void *)pool;
}
#endif
if (p && mem_poison_byte >= 0) {
memset(p, mem_poison_byte, pool->size);
}
return p;
}
/*
* Puts a memory area back to the corresponding pool.
* Items are chained directly through a pointer that
* is written in the beginning of the memory area, so
* there's no need for any carrier cell. This implies
* that each memory area is at least as big as one
* pointer. Just like with the libc's free(), nothing
* is done if <ptr> is NULL.
*/
static inline void pool_free(struct pool_head *pool, void *ptr)
{
if (likely(ptr != NULL)) {
void *free_list;
#ifdef DEBUG_MEMORY_POOLS
/* we'll get late corruption if we refill to the wrong pool or double-free */
if (*POOL_LINK(pool, ptr) != (void *)pool)
*(volatile int *)0 = 0;
#endif
free_list = pool->free_list;
do {
*POOL_LINK(pool, ptr) = (void *)free_list;
__ha_barrier_store();
} while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr));
end:
HA_ATOMIC_SUB(&pool->used, 1);
}
}
#else
/*
* Returns a pointer to type <type> taken from the pool <pool_type> if
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
@ -261,6 +376,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
}
}
#endif /* HA_HAVE_CAS_DW */
#endif /* _COMMON_MEMORY_H */
/*

View File

@ -93,10 +93,136 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
LIST_ADDQ(start, &pool->list);
}
pool->users++;
#ifndef HA_HAVE_CAS_DW
HA_SPIN_INIT(&pool->lock);
#endif
return pool;
}
#ifdef HA_HAVE_CAS_DW
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
* available, then returns the last one for immediate use, so that at least
* <avail> are left available in the pool upon return. NULL is returned if the
* last entry could not be allocated. It's important to note that at least one
* allocation is always performed even if there are enough entries in the pool.
* A call to the garbage collector is performed at most once in case malloc()
* returns an error, before returning NULL.
*/
void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
{
void *ptr = NULL, *free_list;
int failed = 0;
int size = pool->size;
int limit = pool->limit;
int allocated = pool->allocated, allocated_orig = allocated;
/* stop point */
avail += pool->used;
while (1) {
if (limit && allocated >= limit) {
HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
return NULL;
}
ptr = malloc(size + POOL_EXTRA);
if (!ptr) {
HA_ATOMIC_ADD(&pool->failed, 1);
if (failed)
return NULL;
failed++;
pool_gc(pool);
continue;
}
if (++allocated > avail)
break;
free_list = pool->free_list;
do {
*POOL_LINK(pool, ptr) = free_list;
__ha_barrier_store();
} while (HA_ATOMIC_CAS(&pool->free_list, (void *)&free_list, ptr) == 0);
}
HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
HA_ATOMIC_ADD(&pool->used, 1);
#ifdef DEBUG_MEMORY_POOLS
/* keep track of where the element was allocated from */
*POOL_LINK(pool, ptr) = (void *)pool;
#endif
return ptr;
}
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
{
void *ptr;
ptr = __pool_refill_alloc(pool, avail);
return ptr;
}
/*
* This function frees whatever can be freed in pool <pool>.
*/
void pool_flush(struct pool_head *pool)
{
void *next, *temp;
int removed = 0;
if (!pool)
return;
do {
next = pool->free_list;
} while (!HA_ATOMIC_CAS(&pool->free_list, (void *)&next, NULL));
while (next) {
temp = next;
next = *POOL_LINK(pool, temp);
removed++;
free(temp);
}
pool->free_list = next;
HA_ATOMIC_SUB(&pool->allocated, removed);
/* here, we should have pool->allocate == pool->used */
}
/*
* This function frees whatever can be freed in all pools, but respecting
* the minimum thresholds imposed by owners. It takes care of avoiding
* recursion because it may be called from a signal handler.
*
* <pool_ctx> is unused
*/
void pool_gc(struct pool_head *pool_ctx)
{
static int recurse;
int cur_recurse = 0;
struct pool_head *entry;
if (recurse || !HA_ATOMIC_CAS(&recurse, &cur_recurse, 1))
return;
list_for_each_entry(entry, &pools, list) {
while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) {
struct pool_free_list cmp, new;
cmp.seq = entry->seq;
__ha_barrier_load();
cmp.free_list = entry->free_list;
__ha_barrier_load();
if (cmp.free_list == NULL)
break;
new.free_list = *POOL_LINK(entry, cmp.free_list);
new.seq = cmp.seq + 1;
if (__ha_cas_dw(&entry->free_list, &cmp, &new) == 0)
continue;
free(cmp.free_list);
HA_ATOMIC_SUB(&entry->allocated, 1);
}
}
HA_ATOMIC_STORE(&recurse, 0);
}
#else
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
* available, then returns the last one for immediate use, so that at least
* <avail> are left available in the pool upon return. NULL is returned if the
@ -208,6 +334,7 @@ void pool_gc(struct pool_head *pool_ctx)
HA_ATOMIC_STORE(&recurse, 0);
}
#endif
/*
* This function destroys a pool by freeing it completely, unless it's still
@ -225,7 +352,9 @@ void *pool_destroy(struct pool_head *pool)
pool->users--;
if (!pool->users) {
LIST_DEL(&pool->list);
#ifndef HA_HAVE_CAS_DW
HA_SPIN_DESTROY(&pool->lock);
#endif
free(pool);
}
}
@ -242,7 +371,9 @@ void dump_pools_to_trash()
allocated = used = nbpools = 0;
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
list_for_each_entry(entry, &pools, list) {
#ifndef HA_HAVE_CAS_DW
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
#endif
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
entry->name, entry->size, entry->allocated,
entry->size * entry->allocated, entry->used, entry->failed,
@ -251,7 +382,9 @@ void dump_pools_to_trash()
allocated += entry->allocated * entry->size;
used += entry->used * entry->size;
nbpools++;
#ifndef HA_HAVE_CAS_DW
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
#endif
}
chunk_appendf(&trash, "Total: %d pools, %lu bytes allocated, %lu used.\n",
nbpools, allocated, used);