mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2025-05-08 10:48:03 +00:00
MINOR: pools: always pre-initialize allocated memory outside of the lock
When calling mmap(), in general the system gives us a page but does not really allocate it until we first dereference it. And it turns out that this time is much longer than the time to perform the mmap() syscall. Unfortunately, when running with memory debugging enabled, we mmap/munmap() each object resulting in lots of such calls and a high contention on the allocator. And the first accesses to the page being done under the pool lock is extremely damaging to other threads. The simple fact of writing a 0 at the beginning of the page after allocating it and placing the POOL_LINK pointer outside of the lock is enough to boost the performance by 8x in debug mode and to save the watchdog from triggering on lock contention. This is what this patch does.
This commit is contained in:
parent
3e853ea74d
commit
828675421e
@ -421,6 +421,10 @@ static inline void *pool_alloc_area(size_t size)
|
|||||||
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
ret = mmap(NULL, (size + 4095) & -4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
|
||||||
if (ret == MAP_FAILED)
|
if (ret == MAP_FAILED)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
/* let's dereference the page before returning so that the real
|
||||||
|
* allocation in the system is performed without holding the lock.
|
||||||
|
*/
|
||||||
|
*(int *)ret = 0;
|
||||||
if (pad >= sizeof(void *))
|
if (pad >= sizeof(void *))
|
||||||
*(void **)(ret + pad - sizeof(void *)) = ret + pad;
|
*(void **)(ret + pad - sizeof(void *)) = ret + pad;
|
||||||
return ret + pad;
|
return ret + pad;
|
||||||
|
12
src/memory.c
12
src/memory.c
@ -337,6 +337,14 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
|||||||
|
|
||||||
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
HA_SPIN_UNLOCK(POOL_LOCK, &pool->lock);
|
||||||
ptr = pool_alloc_area(pool->size + POOL_EXTRA);
|
ptr = pool_alloc_area(pool->size + POOL_EXTRA);
|
||||||
|
#ifdef DEBUG_MEMORY_POOLS
|
||||||
|
/* keep track of where the element was allocated from. This
|
||||||
|
* is done out of the lock so that the system really allocates
|
||||||
|
* the data without harming other threads waiting on the lock.
|
||||||
|
*/
|
||||||
|
if (ptr)
|
||||||
|
*POOL_LINK(pool, ptr) = (void *)pool;
|
||||||
|
#endif
|
||||||
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
HA_SPIN_LOCK(POOL_LOCK, &pool->lock);
|
||||||
if (!ptr) {
|
if (!ptr) {
|
||||||
pool->failed++;
|
pool->failed++;
|
||||||
@ -355,10 +363,6 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
|||||||
pool->free_list = ptr;
|
pool->free_list = ptr;
|
||||||
}
|
}
|
||||||
pool->used++;
|
pool->used++;
|
||||||
#ifdef DEBUG_MEMORY_POOLS
|
|
||||||
/* keep track of where the element was allocated from */
|
|
||||||
*POOL_LINK(pool, ptr) = (void *)pool;
|
|
||||||
#endif
|
|
||||||
return ptr;
|
return ptr;
|
||||||
}
|
}
|
||||||
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
void *pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
||||||
|
Loading…
Reference in New Issue
Block a user