mirror of
http://git.haproxy.org/git/haproxy.git/
synced 2024-12-25 06:02:08 +00:00
MINOR: server/event_hdl: add support for SERVER_UP and SERVER_DOWN events
We're using srv_update_status() as the only event source or UP/DOWN server events in an attempt to simplify the support for these 2 events. It seems srv_update_status() is the common path for server state changes anyway Tested with server state updated from various sources: - the cli - server-state file (maybe we could disable this or at least don't publish in global event queue in the future if it ends in slower startup for setups relying on huge server state files) - dns records (ie: srv template) (again, could be fined tuned to only publish in server specific subscriber list and no longer in global subscription list if mass dns update tend to slow down srv_update_status()) - normal checks and observe checks (HCHK_STATUS_HANA) (same as above, if checks related state update storms are expected) - lua scripts - html stats page (admin mode)
This commit is contained in:
parent
129ecf441f
commit
22f82f81e5
@ -239,6 +239,8 @@ struct event_hdl_sub {
|
|||||||
#define EVENT_HDL_SUB_SERVER EVENT_HDL_SUB_FAMILY(1)
|
#define EVENT_HDL_SUB_SERVER EVENT_HDL_SUB_FAMILY(1)
|
||||||
#define EVENT_HDL_SUB_SERVER_ADD EVENT_HDL_SUB_TYPE(1,1)
|
#define EVENT_HDL_SUB_SERVER_ADD EVENT_HDL_SUB_TYPE(1,1)
|
||||||
#define EVENT_HDL_SUB_SERVER_DEL EVENT_HDL_SUB_TYPE(1,2)
|
#define EVENT_HDL_SUB_SERVER_DEL EVENT_HDL_SUB_TYPE(1,2)
|
||||||
|
#define EVENT_HDL_SUB_SERVER_UP EVENT_HDL_SUB_TYPE(1,3)
|
||||||
|
#define EVENT_HDL_SUB_SERVER_DOWN EVENT_HDL_SUB_TYPE(1,4)
|
||||||
|
|
||||||
/* --------------------------------------- */
|
/* --------------------------------------- */
|
||||||
|
|
||||||
|
@ -441,6 +441,7 @@ struct event_hdl_cb_data_server {
|
|||||||
struct server *ptr; /* server live ptr */
|
struct server *ptr; /* server live ptr */
|
||||||
/* lock hints */
|
/* lock hints */
|
||||||
uint8_t thread_isolate; /* 1 = thread_isolate is on, no locking required */
|
uint8_t thread_isolate; /* 1 = thread_isolate is on, no locking required */
|
||||||
|
uint8_t srv_lock; /* 1 = srv lock is held */
|
||||||
} unsafe;
|
} unsafe;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -26,6 +26,8 @@ static struct event_hdl_sub_type_map event_hdl_sub_type_map[] = {
|
|||||||
{"SERVER", EVENT_HDL_SUB_SERVER},
|
{"SERVER", EVENT_HDL_SUB_SERVER},
|
||||||
{"SERVER_ADD", EVENT_HDL_SUB_SERVER_ADD},
|
{"SERVER_ADD", EVENT_HDL_SUB_SERVER_ADD},
|
||||||
{"SERVER_DEL", EVENT_HDL_SUB_SERVER_DEL},
|
{"SERVER_DEL", EVENT_HDL_SUB_SERVER_DEL},
|
||||||
|
{"SERVER_UP", EVENT_HDL_SUB_SERVER_UP},
|
||||||
|
{"SERVER_DOWN", EVENT_HDL_SUB_SERVER_DOWN},
|
||||||
};
|
};
|
||||||
|
|
||||||
/* internal types (only used in this file) */
|
/* internal types (only used in this file) */
|
||||||
|
17
src/server.c
17
src/server.c
@ -139,6 +139,7 @@ int srv_getinter(const struct check *check)
|
|||||||
* Event will be published in both global subscription list and
|
* Event will be published in both global subscription list and
|
||||||
* server dedicated subscription list
|
* server dedicated subscription list
|
||||||
* server ptr must be valid
|
* server ptr must be valid
|
||||||
|
* must be called with srv lock or under thread_isolate
|
||||||
*/
|
*/
|
||||||
static inline void srv_event_hdl_publish(struct event_hdl_sub_type event, struct server *srv, uint8_t thread_isolate)
|
static inline void srv_event_hdl_publish(struct event_hdl_sub_type event, struct server *srv, uint8_t thread_isolate)
|
||||||
{
|
{
|
||||||
@ -154,6 +155,7 @@ static inline void srv_event_hdl_publish(struct event_hdl_sub_type event, struct
|
|||||||
/* unsafe data assignments */
|
/* unsafe data assignments */
|
||||||
cb_data.unsafe.ptr = srv;
|
cb_data.unsafe.ptr = srv;
|
||||||
cb_data.unsafe.thread_isolate = thread_isolate;
|
cb_data.unsafe.thread_isolate = thread_isolate;
|
||||||
|
cb_data.unsafe.srv_lock = !thread_isolate;
|
||||||
/* publish in server dedicated sub list */
|
/* publish in server dedicated sub list */
|
||||||
event_hdl_publish(&srv->e_subs, event, EVENT_HDL_CB_DATA(&cb_data));
|
event_hdl_publish(&srv->e_subs, event, EVENT_HDL_CB_DATA(&cb_data));
|
||||||
/* publish in global subscription list */
|
/* publish in global subscription list */
|
||||||
@ -5259,6 +5261,9 @@ static void srv_update_status(struct server *s)
|
|||||||
s->next_admin = s->cur_admin;
|
s->next_admin = s->cur_admin;
|
||||||
|
|
||||||
if ((s->cur_state != SRV_ST_STOPPED) && (s->next_state == SRV_ST_STOPPED)) {
|
if ((s->cur_state != SRV_ST_STOPPED) && (s->next_state == SRV_ST_STOPPED)) {
|
||||||
|
/* no maintenance + server DOWN: publish event SERVER DOWN */
|
||||||
|
srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, s, 0);
|
||||||
|
|
||||||
s->last_change = now.tv_sec;
|
s->last_change = now.tv_sec;
|
||||||
if (s->proxy->lbprm.set_server_status_down)
|
if (s->proxy->lbprm.set_server_status_down)
|
||||||
s->proxy->lbprm.set_server_status_down(s);
|
s->proxy->lbprm.set_server_status_down(s);
|
||||||
@ -5326,6 +5331,9 @@ static void srv_update_status(struct server *s)
|
|||||||
}
|
}
|
||||||
else if (((s->cur_state != SRV_ST_RUNNING) && (s->next_state == SRV_ST_RUNNING))
|
else if (((s->cur_state != SRV_ST_RUNNING) && (s->next_state == SRV_ST_RUNNING))
|
||||||
|| ((s->cur_state != SRV_ST_STARTING) && (s->next_state == SRV_ST_STARTING))) {
|
|| ((s->cur_state != SRV_ST_STARTING) && (s->next_state == SRV_ST_STARTING))) {
|
||||||
|
/* no maintenance + server going UP: publish event SERVER UP */
|
||||||
|
srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_UP, s, 0);
|
||||||
|
|
||||||
if (s->proxy->srv_bck == 0 && s->proxy->srv_act == 0) {
|
if (s->proxy->srv_bck == 0 && s->proxy->srv_act == 0) {
|
||||||
if (s->proxy->last_change < now.tv_sec) // ignore negative times
|
if (s->proxy->last_change < now.tv_sec) // ignore negative times
|
||||||
s->proxy->down_time += now.tv_sec - s->proxy->last_change;
|
s->proxy->down_time += now.tv_sec - s->proxy->last_change;
|
||||||
@ -5452,6 +5460,9 @@ static void srv_update_status(struct server *s)
|
|||||||
if (s->onmarkeddown & HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS)
|
if (s->onmarkeddown & HANA_ONMARKEDDOWN_SHUTDOWNSESSIONS)
|
||||||
srv_shutdown_streams(s, SF_ERR_DOWN);
|
srv_shutdown_streams(s, SF_ERR_DOWN);
|
||||||
|
|
||||||
|
/* maintenance on previously running server: publish event SERVER DOWN */
|
||||||
|
srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_DOWN, s, 0);
|
||||||
|
|
||||||
/* force connection cleanup on the given server */
|
/* force connection cleanup on the given server */
|
||||||
srv_cleanup_connections(s);
|
srv_cleanup_connections(s);
|
||||||
/* we might have streams queued on this server and waiting for
|
/* we might have streams queued on this server and waiting for
|
||||||
@ -5525,6 +5536,12 @@ static void srv_update_status(struct server *s)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* ignore if server stays down when leaving maintenance mode */
|
||||||
|
if (s->next_state != SRV_ST_STOPPED) {
|
||||||
|
/* leaving maintenance + server UP: publish event SERVER UP */
|
||||||
|
srv_event_hdl_publish(EVENT_HDL_SUB_SERVER_UP, s, 0);
|
||||||
|
}
|
||||||
|
|
||||||
tmptrash = alloc_trash_chunk();
|
tmptrash = alloc_trash_chunk();
|
||||||
if (tmptrash) {
|
if (tmptrash) {
|
||||||
if (!(s->next_admin & SRV_ADMF_FMAINT) && (s->cur_admin & SRV_ADMF_FMAINT)) {
|
if (!(s->next_admin & SRV_ADMF_FMAINT) && (s->cur_admin & SRV_ADMF_FMAINT)) {
|
||||||
|
Loading…
Reference in New Issue
Block a user