MINOR: buffer: use MT_LIST_ADDQ() for buffer_wait lists additions
The TRY_ADDQ there was not needed since the wait list is exclusively owned by the caller. There's a preliminary test on MT_LIST_ADDED() that might have been eliminated by keeping MT_LIST_TRY_ADDQ() but it would have required two more expensive writes before testing so better keep the test the way it is.
This commit is contained in:
parent
de4db17dee
commit
8689127816
|
@ -850,7 +850,7 @@ static inline int channel_alloc_buffer(struct channel *chn, struct buffer_wait *
|
|||
return 1;
|
||||
|
||||
if (!MT_LIST_ADDED(&wait->list))
|
||||
MT_LIST_TRY_ADDQ(&buffer_wq, &wait->list);
|
||||
MT_LIST_ADDQ(&buffer_wq, &wait->list);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -2807,7 +2807,7 @@ spoe_acquire_buffer(struct buffer *buf, struct buffer_wait *buffer_wait)
|
|||
if (b_alloc_margin(buf, global.tune.reserved_bufs))
|
||||
return 1;
|
||||
|
||||
MT_LIST_TRY_ADDQ(&buffer_wq, &buffer_wait->list);
|
||||
MT_LIST_ADDQ(&buffer_wq, &buffer_wait->list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -605,7 +605,7 @@ static inline struct buffer *fcgi_get_buf(struct fcgi_conn *fconn, struct buffer
|
|||
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
|
||||
fconn->buf_wait.target = fconn;
|
||||
fconn->buf_wait.wakeup_cb = fcgi_buf_available;
|
||||
MT_LIST_TRY_ADDQ(&buffer_wq, &fconn->buf_wait.list);
|
||||
MT_LIST_ADDQ(&buffer_wq, &fconn->buf_wait.list);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -416,7 +416,7 @@ static inline struct buffer *h1_get_buf(struct h1c *h1c, struct buffer *bptr)
|
|||
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
|
||||
h1c->buf_wait.target = h1c;
|
||||
h1c->buf_wait.wakeup_cb = h1_buf_available;
|
||||
MT_LIST_TRY_ADDQ(&buffer_wq, &h1c->buf_wait.list);
|
||||
MT_LIST_ADDQ(&buffer_wq, &h1c->buf_wait.list);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -683,7 +683,7 @@ static inline struct buffer *h2_get_buf(struct h2c *h2c, struct buffer *bptr)
|
|||
unlikely((buf = b_alloc_margin(bptr, 0)) == NULL)) {
|
||||
h2c->buf_wait.target = h2c;
|
||||
h2c->buf_wait.wakeup_cb = h2_buf_available;
|
||||
MT_LIST_TRY_ADDQ(&buffer_wq, &h2c->buf_wait.list);
|
||||
MT_LIST_ADDQ(&buffer_wq, &h2c->buf_wait.list);
|
||||
}
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -728,7 +728,7 @@ static int stream_alloc_work_buffer(struct stream *s)
|
|||
if (b_alloc_margin(&s->res.buf, 0))
|
||||
return 1;
|
||||
|
||||
MT_LIST_TRY_ADDQ(&buffer_wq, &s->buffer_wait.list);
|
||||
MT_LIST_ADDQ(&buffer_wq, &s->buffer_wait.list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue