MINOR: activity: report the number of failed pool/buffer allocations
Haproxy is designed to be able to continue to run even under very low memory conditions. However this can sometimes have a serious impact on performance that it hard to diagnose. Let's report counters of failed pool and buffer allocations per thread in show activity.
This commit is contained in:
parent
2ae84e445d
commit
a8b2ce02b8
|
@ -33,6 +33,7 @@
|
|||
#include <common/istbuf.h>
|
||||
#include <common/memory.h>
|
||||
|
||||
#include <proto/activity.h>
|
||||
|
||||
/* an element of the <buffer_wq> list. It represents an object that need to
|
||||
* acquire a buffer to continue its process. */
|
||||
|
@ -77,8 +78,10 @@ static inline struct buffer *b_alloc(struct buffer *buf)
|
|||
|
||||
*buf = BUF_WANTED;
|
||||
area = pool_alloc_dirty(pool_head_buffer);
|
||||
if (unlikely(!area))
|
||||
if (unlikely(!area)) {
|
||||
activity[tid].buf_wait++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
buf->area = area;
|
||||
buf->size = pool_head_buffer->size;
|
||||
|
@ -175,8 +178,10 @@ static inline struct buffer *b_alloc_margin(struct buffer *buf, int margin)
|
|||
HA_SPIN_UNLOCK(POOL_LOCK, &pool_head_buffer->lock);
|
||||
#endif
|
||||
|
||||
if (unlikely(!area))
|
||||
if (unlikely(!area)) {
|
||||
activity[tid].buf_wait++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
done:
|
||||
buf->area = area;
|
||||
|
|
|
@ -54,6 +54,8 @@ struct activity {
|
|||
unsigned int accepted; // accepted incoming connections
|
||||
unsigned int accq_pushed; // accept queue connections pushed
|
||||
unsigned int accq_full; // accept queue connection not pushed because full
|
||||
unsigned int pool_fail; // failed a pool allocation
|
||||
unsigned int buf_wait; // waited on a buffer allocation
|
||||
#if defined(DEBUG_DEV)
|
||||
/* keep these ones at the end */
|
||||
unsigned int ctr0; // general purposee debug counter
|
||||
|
|
|
@ -1117,6 +1117,8 @@ static int cli_io_handler_show_activity(struct appctx *appctx)
|
|||
chunk_appendf(&trash, "fd_lock:"); SHOW_TOT(thr, activity[thr].fd_lock);
|
||||
chunk_appendf(&trash, "conn_dead:"); SHOW_TOT(thr, activity[thr].conn_dead);
|
||||
chunk_appendf(&trash, "stream:"); SHOW_TOT(thr, activity[thr].stream);
|
||||
chunk_appendf(&trash, "pool_fail:"); SHOW_TOT(thr, activity[thr].pool_fail);
|
||||
chunk_appendf(&trash, "buf_wait:"); SHOW_TOT(thr, activity[thr].buf_wait);
|
||||
chunk_appendf(&trash, "empty_rq:"); SHOW_TOT(thr, activity[thr].empty_rq);
|
||||
chunk_appendf(&trash, "long_rq:"); SHOW_TOT(thr, activity[thr].long_rq);
|
||||
chunk_appendf(&trash, "ctxsw:"); SHOW_TOT(thr, activity[thr].ctxsw);
|
||||
|
|
15
src/memory.c
15
src/memory.c
|
@ -25,6 +25,8 @@
|
|||
#include <common/mini-clist.h>
|
||||
#include <common/standard.h>
|
||||
|
||||
#include <types/activity.h>
|
||||
|
||||
#include <proto/applet.h>
|
||||
#include <proto/cli.h>
|
||||
#include <proto/channel.h>
|
||||
|
@ -160,14 +162,17 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
|||
while (1) {
|
||||
if (limit && allocated >= limit) {
|
||||
_HA_ATOMIC_ADD(&pool->allocated, allocated - allocated_orig);
|
||||
activity[tid].pool_fail++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptr = malloc(size + POOL_EXTRA);
|
||||
if (!ptr) {
|
||||
_HA_ATOMIC_ADD(&pool->failed, 1);
|
||||
if (failed)
|
||||
if (failed) {
|
||||
activity[tid].pool_fail++;
|
||||
return NULL;
|
||||
}
|
||||
failed++;
|
||||
pool_gc(pool);
|
||||
continue;
|
||||
|
@ -317,14 +322,18 @@ void *__pool_refill_alloc(struct pool_head *pool, unsigned int avail)
|
|||
avail += pool->used;
|
||||
|
||||
while (1) {
|
||||
if (pool->limit && pool->allocated >= pool->limit)
|
||||
if (pool->limit && pool->allocated >= pool->limit) {
|
||||
activity[tid].pool_fail++;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ptr = pool_alloc_area(pool->size + POOL_EXTRA);
|
||||
if (!ptr) {
|
||||
pool->failed++;
|
||||
if (failed)
|
||||
if (failed) {
|
||||
activity[tid].pool_fail++;
|
||||
return NULL;
|
||||
}
|
||||
failed++;
|
||||
pool_gc(pool);
|
||||
continue;
|
||||
|
|
Loading…
Reference in New Issue