From 899fb8abdcda90e3b3c69f6945f83c4c1107c459 Mon Sep 17 00:00:00 2001 From: Olivier Houchard Date: Wed, 18 Mar 2020 15:48:29 +0100 Subject: [PATCH] MINOR: memory: Change the flush_lock to a spinlock, and don't get it in alloc. The flush_lock was introduced, mostly to be sure that pool_gc() will never dereference a pointer that has been free'd. __pool_get_first() was acquiring the lock to, the fear was that otherwise that pointer could get free'd later, and then pool_gc() would attempt to dereference it. However, that can not happen, because the only functions that can free a pointer, when using lockless pools, are pool_gc() and pool_flush(), and as long as those two are mutually exclusive, nobody will be able to free the pointer while pool_gc() attempts to access it. So change the flush_lock to a spinlock, and don't bother acquire/release it in __pool_get_first(), that way callers of __pool_get_first() won't have to wait while the pool is flushed. The worst that can happen is we call __pool_refill_alloc() while the pool is getting flushed, and memory can get allocated just to be free'd. This may help with github issue #552 This may be backported to 2.1, 2.0 and 1.9. --- include/common/memory.h | 8 ++------ src/memory.c | 10 +++++----- 2 files changed, 7 insertions(+), 11 deletions(-) diff --git a/include/common/memory.h b/include/common/memory.h index 5433a7373..44a9dcb57 100644 --- a/include/common/memory.h +++ b/include/common/memory.h @@ -78,7 +78,7 @@ struct pool_head { void **free_list; #ifdef CONFIG_HAP_LOCKLESS_POOLS uintptr_t seq; - HA_RWLOCK_T flush_lock; + HA_SPINLOCK_T flush_lock; #else __decl_hathreads(HA_SPINLOCK_T lock); /* the spin lock */ #endif @@ -222,19 +222,15 @@ static inline void *__pool_get_first(struct pool_head *pool) cmp.seq = pool->seq; __ha_barrier_load(); - HA_RWLOCK_RDLOCK(POOL_LOCK, &pool->flush_lock); cmp.free_list = pool->free_list; do { - if (cmp.free_list == NULL) { - HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock); + if (cmp.free_list == NULL) return NULL; - } new.seq = cmp.seq + 1; __ha_barrier_load(); new.free_list = *POOL_LINK(pool, cmp.free_list); } while (HA_ATOMIC_DWCAS((void *)&pool->free_list, (void *)&cmp, (void *)&new) == 0); __ha_barrier_atomic_store(); - HA_RWLOCK_RDUNLOCK(POOL_LOCK, &pool->flush_lock); _HA_ATOMIC_ADD(&pool->used, 1); #ifdef DEBUG_MEMORY_POOLS diff --git a/src/memory.c b/src/memory.c index b0512aaec..4a37381e0 100644 --- a/src/memory.c +++ b/src/memory.c @@ -142,7 +142,7 @@ struct pool_head *create_pool(char *name, unsigned int size, unsigned int flags) #ifndef CONFIG_HAP_LOCKLESS_POOLS HA_SPIN_INIT(&pool->lock); #else - HA_RWLOCK_INIT(&pool->flush_lock); + HA_SPIN_INIT(&pool->flush_lock); #endif } pool->users++; @@ -225,7 +225,7 @@ void pool_flush(struct pool_head *pool) if (!pool) return; - HA_RWLOCK_WRLOCK(POOL_LOCK, &pool->flush_lock); + HA_SPIN_LOCK(POOL_LOCK, &pool->flush_lock); do { cmp.free_list = pool->free_list; cmp.seq = pool->seq; @@ -233,7 +233,7 @@ void pool_flush(struct pool_head *pool) new.seq = cmp.seq + 1; } while (!_HA_ATOMIC_DWCAS(&pool->free_list, &cmp, &new)); __ha_barrier_atomic_store(); - HA_RWLOCK_WRUNLOCK(POOL_LOCK, &pool->flush_lock); + HA_SPIN_UNLOCK(POOL_LOCK, &pool->flush_lock); next = cmp.free_list; while (next) { temp = next; @@ -263,7 +263,7 @@ void pool_gc(struct pool_head *pool_ctx) return; list_for_each_entry(entry, &pools, list) { - HA_RWLOCK_WRLOCK(POOL_LOCK, &entry->flush_lock); + HA_SPIN_LOCK(POOL_LOCK, &entry->flush_lock); while ((int)((volatile int)entry->allocated - (volatile int)entry->used) > (int)entry->minavail) { struct pool_free_list cmp, new; @@ -280,7 +280,7 @@ void pool_gc(struct pool_head *pool_ctx) free(cmp.free_list); _HA_ATOMIC_SUB(&entry->allocated, 1); } - HA_RWLOCK_WRUNLOCK(POOL_LOCK, &entry->flush_lock); + HA_SPIN_UNLOCK(POOL_LOCK, &entry->flush_lock); } _HA_ATOMIC_STORE(&recurse, 0);