's effective
+ * weight. It may be called after a state change too.
+ */
+static void fas_update_server_weight(struct server *srv)
+{
+ int old_state, new_state;
+ struct proxy *p = srv->proxy;
+
+ if (srv->state == srv->prev_state &&
+ srv->eweight == srv->prev_eweight)
+ return;
+
+ /* If changing the server's weight changes its state, we simply apply
+ * the procedures we already have for status change. If the state
+ * remains down, the server is not in any tree, so it's as easy as
+ * updating its values. If the state remains up with different weights,
+ * there are some computations to perform to find a new place and
+ * possibly a new tree for this server.
+ */
+
+ old_state = srv_is_usable(srv->prev_state, srv->prev_eweight);
+ new_state = srv_is_usable(srv->state, srv->eweight);
+
+ if (!old_state && !new_state) {
+ srv->prev_state = srv->state;
+ srv->prev_eweight = srv->eweight;
+ return;
+ }
+ else if (!old_state && new_state) {
+ fas_set_server_status_up(srv);
+ return;
+ }
+ else if (old_state && !new_state) {
+ fas_set_server_status_down(srv);
+ return;
+ }
+
+ if (srv->lb_tree)
+ fas_dequeue_srv(srv);
+
+ if (srv->state & SRV_BACKUP) {
+ p->lbprm.tot_wbck += srv->eweight - srv->prev_eweight;
+ srv->lb_tree = &p->lbprm.fas.bck;
+ } else {
+ p->lbprm.tot_wact += srv->eweight - srv->prev_eweight;
+ srv->lb_tree = &p->lbprm.fas.act;
+ }
+
+ fas_queue_srv(srv);
+
+ update_backend_weight(p);
+ srv->prev_state = srv->state;
+ srv->prev_eweight = srv->eweight;
+}
+
+/* This function is responsible for building the trees in case of fast
+ * weighted least-conns. It also sets p->lbprm.wdiv to the eweight to
+ * uweight ratio. Both active and backup groups are initialized.
+ */
+void fas_init_server_tree(struct proxy *p)
+{
+ struct server *srv;
+ struct eb_root init_head = EB_ROOT;
+
+ p->lbprm.set_server_status_up = fas_set_server_status_up;
+ p->lbprm.set_server_status_down = fas_set_server_status_down;
+ p->lbprm.update_server_eweight = fas_update_server_weight;
+ p->lbprm.server_take_conn = fas_srv_reposition;
+ p->lbprm.server_drop_conn = fas_srv_reposition;
+
+ p->lbprm.wdiv = BE_WEIGHT_SCALE;
+ for (srv = p->srv; srv; srv = srv->next) {
+ srv->prev_eweight = srv->eweight = srv->uweight * BE_WEIGHT_SCALE;
+ srv->prev_state = srv->state;
+ }
+
+ recount_servers(p);
+ update_backend_weight(p);
+
+ p->lbprm.fas.act = init_head;
+ p->lbprm.fas.bck = init_head;
+
+ /* queue active and backup servers in two distinct groups */
+ for (srv = p->srv; srv; srv = srv->next) {
+ if (!srv_is_usable(srv->state, srv->eweight))
+ continue;
+ srv->lb_tree = (srv->state & SRV_BACKUP) ? &p->lbprm.fas.bck : &p->lbprm.fas.act;
+ fas_queue_srv(srv);
+ }
+}
+
+/* Return next server from the FS tree in backend . If the tree is empty,
+ * return NULL. Saturated servers are skipped.
+ */
+struct server *fas_get_next_server(struct proxy *p, struct server *srvtoavoid)
+{
+ struct server *srv, *avoided;
+ struct eb32_node *node;
+
+ srv = avoided = NULL;
+
+ if (p->srv_act)
+ node = eb32_first(&p->lbprm.fas.act);
+ else if (p->lbprm.fbck)
+ return p->lbprm.fbck;
+ else if (p->srv_bck)
+ node = eb32_first(&p->lbprm.fas.bck);
+ else
+ return NULL;
+
+ while (node) {
+ /* OK, we have a server. However, it may be saturated, in which
+ * case we don't want to reconsider it for now, so we'll simply
+ * skip it. Same if it's the server we try to avoid, in which
+ * case we simply remember it for later use if needed.
+ */
+ struct server *s;
+
+ s = eb32_entry(node, struct server, lb_node);
+ if (!s->maxconn || (!s->nbpend && s->served < srv_dynamic_maxconn(s))) {
+ if (s != srvtoavoid) {
+ srv = s;
+ break;
+ }
+ avoided = s;
+ }
+ node = eb32_next(node);
+ }
+
+ if (!srv)
+ srv = avoided;
+
+ return srv;
+}
+
+
+/*
+ * Local variables:
+ * c-indent-level: 8
+ * c-basic-offset: 8
+ * End:
+ */