BUG/MINOR: pools/threads: don't ignore DEBUG_UAF on double-word CAS capable archs

Since commit cf975d4 ("MINOR: pools/threads: Implement lockless memory
pools."), we support lockless pools. However the parts dedicated to
detecting use-after-free are not present in this part, making DEBUG_UAF
useless in this situation.

The present patch sets a new define CONFIG_HAP_LOCKLESS_POOLS when such
a compatible architecture is detected, and when pool debugging is not
requested, then makes use of this everywhere in pools and buffers
functions. This way enabling DEBUG_UAF will automatically disable the
lockless version.

No backport is needed as this is purely 1.9-dev.
This commit is contained in:
Willy Tarreau 2018-02-22 14:05:55 +01:00
parent 5e64286bab
commit f161d0f51e
4 changed files with 21 additions and 14 deletions

View File

@ -735,7 +735,7 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
return *buf;
*buf = &buf_wanted;
#ifndef HA_HAVE_CAS_DW
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_LOCK(POOL_LOCK, &pool_head_buffer->lock);
#endif
@ -743,7 +743,7 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
if ((pool_head_buffer->allocated - pool_head_buffer->used) > margin) {
b = __pool_get_first(pool_head_buffer);
if (likely(b)) {
#ifndef HA_HAVE_CAS_DW
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
#endif
b->size = pool_head_buffer->size - sizeof(struct buffer);
@ -756,7 +756,7 @@ static inline struct buffer *b_alloc_margin(struct buffer **buf, int margin)
/* slow path, uses malloc() */
b = __pool_refill_alloc(pool_head_buffer, margin);
#ifndef HA_HAVE_CAS_DW
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
#endif

View File

@ -47,6 +47,13 @@
#define THREAD_LOCAL
#endif
/* On architectures supporting threads and double-word CAS, we can implement
* lock-less memory pools. This isn't supported for debugging modes however.
*/
#if !defined(DEBUG_NO_LOCKLESS_POOLS) && defined(USE_THREAD) && defined(HA_HAVE_CAS_DW) && !defined(DEBUG_UAF)
#define CONFIG_HAP_LOCKLESS_POOLS
#endif
/* CONFIG_HAP_INLINE_FD_SET
* This makes use of inline FD_* macros instead of calling equivalent
* functions. Benchmarks on a Pentium-M show that using functions is

View File

@ -48,7 +48,7 @@
#define POOL_LINK(pool, item) ((void **)(item))
#endif
#ifdef HA_HAVE_CAS_DW
#ifdef CONFIG_HAP_LOCKLESS_POOLS
struct pool_free_list {
void **free_list;
uintptr_t seq;
@ -57,7 +57,7 @@ struct pool_free_list {
struct pool_head {
void **free_list;
#ifdef HA_HAVE_CAS_DW
#ifdef CONFIG_HAP_LOCKLESS_POOLS
uintptr_t seq;
#else
__decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */
@ -123,7 +123,7 @@ void pool_gc(struct pool_head *pool_ctx);
*/
void *pool_destroy(struct pool_head *pool);
#ifdef HA_HAVE_CAS_DW
#ifdef CONFIG_HAP_LOCKLESS_POOLS
/*
* Returns a pointer to type <type> taken from the pool <pool_type> if
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
@ -226,7 +226,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
}
}
#else
#else /* CONFIG_HAP_LOCKLESS_POOLS */
/*
* Returns a pointer to type <type> taken from the pool <pool_type> if
* available, otherwise returns NULL. No malloc() is attempted, and poisonning
@ -377,7 +377,7 @@ static inline void pool_free(struct pool_head *pool, void *ptr)
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
}
}
#endif /* HA_HAVE_CAS_DW */
#endif /* CONFIG_HAP_LOCKLESS_POOLS */
#endif /* _COMMON_MEMORY_H */
/*

View File

@ -93,13 +93,13 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags)
LIST_ADDQ(start, &pool->list);
}
pool->users++;
#ifndef HA_HAVE_CAS_DW
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_INIT(&pool->lock);
#endif
return pool;
}
#ifdef HA_HAVE_CAS_DW
#ifdef CONFIG_HAP_LOCKLESS_POOLS
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
* available, then returns the last one for immediate use, so that at least
* <avail> are left available in the pool upon return. NULL is returned if the
@ -221,7 +221,7 @@ void pool_gc(struct pool_head *pool_ctx)
HA_ATOMIC_STORE(&recurse, 0);
}
#else
#else /* CONFIG_HAP_LOCKLESS_POOLS */
/* Allocates new entries for pool <pool> until there are at least <avail> + 1
* available, then returns the last one for immediate use, so that at least
@ -352,7 +352,7 @@ void *pool_destroy(struct pool_head *pool)
pool->users--;
if (!pool->users) {
LIST_DEL(&pool->list);
#ifndef HA_HAVE_CAS_DW
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_DESTROY(&pool->lock);
#endif
free(pool);
@ -371,7 +371,7 @@ void dump_pools_to_trash()
allocated = used = nbpools = 0;
chunk_printf(&trash, "Dumping pools usage. Use SIGQUIT to flush them.\n");
list_for_each_entry(entry, &pools, list) {
#ifndef HA_HAVE_CAS_DW
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_LOCK(POOL_LOCK, &entry->lock);
#endif
chunk_appendf(&trash, " - Pool %s (%d bytes) : %d allocated (%u bytes), %d used, %d failures, %d users%s\n",
@ -382,7 +382,7 @@ void dump_pools_to_trash()
allocated += entry->allocated * entry->size;
used += entry->used * entry->size;
nbpools++;
#ifndef HA_HAVE_CAS_DW
#ifndef CONFIG_HAP_LOCKLESS_POOLS
HA_SPIN_UNLOCK(POOL_LOCK, &entry->lock);
#endif
}