2006-06-15 19:48:13 +00:00
|
|
|
/*
|
2010-06-01 15:45:26 +00:00
|
|
|
* include/proto/session.h
|
|
|
|
* This file defines everything related to sessions.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000-2010 Willy Tarreau - w@1wt.eu
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
* exclusively.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
2006-06-15 19:48:13 +00:00
|
|
|
|
2006-06-26 00:48:02 +00:00
|
|
|
#ifndef _PROTO_SESSION_H
|
|
|
|
#define _PROTO_SESSION_H
|
|
|
|
|
2006-06-29 16:54:54 +00:00
|
|
|
#include <common/config.h>
|
2007-05-13 17:43:47 +00:00
|
|
|
#include <common/memory.h>
|
2006-06-26 00:48:02 +00:00
|
|
|
#include <types/session.h>
|
2010-06-20 09:19:22 +00:00
|
|
|
#include <proto/freq_ctr.h>
|
2010-06-14 19:04:55 +00:00
|
|
|
#include <proto/stick_table.h>
|
2006-06-15 19:48:13 +00:00
|
|
|
|
2007-05-13 17:43:47 +00:00
|
|
|
extern struct pool_head *pool2_session;
|
2008-11-23 18:53:55 +00:00
|
|
|
extern struct list sessions;
|
2007-05-13 17:43:47 +00:00
|
|
|
|
2012-11-19 15:10:32 +00:00
|
|
|
extern struct data_cb sess_conn_cb;
|
|
|
|
|
2010-06-01 15:45:26 +00:00
|
|
|
int session_accept(struct listener *l, int cfd, struct sockaddr_storage *addr);
|
2006-06-15 19:48:13 +00:00
|
|
|
|
2007-05-13 17:43:47 +00:00
|
|
|
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
|
|
|
|
int init_session();
|
2006-06-26 00:48:02 +00:00
|
|
|
|
2011-09-07 21:01:56 +00:00
|
|
|
/* kill a session and set the termination flags to <why> (one of SN_ERR_*) */
|
|
|
|
void session_shutdown(struct session *session, int why);
|
2011-06-08 00:19:07 +00:00
|
|
|
|
2007-11-24 21:12:47 +00:00
|
|
|
void session_process_counters(struct session *s);
|
[BUG] fix the dequeuing logic to ensure that all requests get served
The dequeuing logic was completely wrong. First, a task was assigned
to all servers to process the queue, but this task was never scheduled
and was only woken up on session free. Second, there was no reservation
of server entries when a task was assigned a server. This means that
as long as the task was not connected to the server, its presence was
not accounted for. This was causing trouble when detecting whether or
not a server had reached maxconn. Third, during a redispatch, a session
could lose its place at the server's and get blocked because another
session at the same moment would have stolen the entry. Fourth, the
redispatch option did not work when maxqueue was reached for a server,
and it was not possible to do so without indefinitely hanging a session.
The root cause of all those problems was the lack of pre-reservation of
connections at the server's, and the lack of tracking of servers during
a redispatch. Everything relied on combinations of flags which could
appear similarly in quite distinct situations.
This patch is a major rework but there was no other solution, as the
internal logic was deeply flawed. The resulting code is cleaner, more
understandable, uses less magics and is overall more robust.
As an added bonus, "option redispatch" now works when maxqueue has
been reached on a server.
2008-06-20 13:04:11 +00:00
|
|
|
void sess_change_server(struct session *sess, struct server *newsrv);
|
2009-03-08 08:38:41 +00:00
|
|
|
struct task *process_session(struct task *t);
|
2009-03-15 21:34:05 +00:00
|
|
|
void default_srv_error(struct session *s, struct stream_interface *si);
|
2010-06-14 19:04:55 +00:00
|
|
|
int parse_track_counters(char **args, int *arg,
|
|
|
|
int section_type, struct proxy *curpx,
|
|
|
|
struct track_ctr_prm *prm,
|
2012-05-08 17:47:01 +00:00
|
|
|
struct proxy *defpx, char **err);
|
2010-06-14 19:04:55 +00:00
|
|
|
|
2013-11-23 22:37:04 +00:00
|
|
|
/* returns the session from a void *owner */
|
|
|
|
static inline struct session *session_from_task(struct task *t)
|
|
|
|
{
|
|
|
|
return (struct session *)t->context;
|
|
|
|
}
|
|
|
|
|
2010-06-14 19:04:55 +00:00
|
|
|
/* Remove the refcount from the session to the tracked counters, and clear the
|
|
|
|
* pointer to ensure this is only performed once. The caller is responsible for
|
|
|
|
* ensuring that the pointer is valid first.
|
|
|
|
*/
|
|
|
|
static inline void session_store_counters(struct session *s)
|
|
|
|
{
|
2010-08-03 14:29:52 +00:00
|
|
|
void *ptr;
|
2012-12-09 14:55:40 +00:00
|
|
|
int i;
|
2010-08-03 14:29:52 +00:00
|
|
|
|
2013-07-23 17:15:30 +00:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2012-12-09 14:55:40 +00:00
|
|
|
if (!s->stkctr[i].entry)
|
|
|
|
continue;
|
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_CONN_CUR);
|
2010-08-03 14:29:52 +00:00
|
|
|
if (ptr)
|
|
|
|
stktable_data_cast(ptr, conn_cur)--;
|
2012-12-09 14:55:40 +00:00
|
|
|
s->stkctr[i].entry->ref_cnt--;
|
|
|
|
stksess_kill_if_expired(s->stkctr[i].table, s->stkctr[i].entry);
|
|
|
|
s->stkctr[i].entry = NULL;
|
2010-06-18 14:35:43 +00:00
|
|
|
}
|
2010-06-14 19:04:55 +00:00
|
|
|
}
|
|
|
|
|
2010-08-03 14:29:52 +00:00
|
|
|
/* Remove the refcount from the session counters tracked only by the backend if
|
|
|
|
* any, and clear the pointer to ensure this is only performed once. The caller
|
|
|
|
* is responsible for ensuring that the pointer is valid first.
|
2010-06-14 19:04:55 +00:00
|
|
|
*/
|
2010-08-03 14:29:52 +00:00
|
|
|
static inline void session_stop_backend_counters(struct session *s)
|
2010-06-14 19:04:55 +00:00
|
|
|
{
|
2010-08-03 14:29:52 +00:00
|
|
|
void *ptr;
|
2012-12-09 14:55:40 +00:00
|
|
|
int i;
|
2010-06-18 19:03:20 +00:00
|
|
|
|
2013-05-28 15:40:25 +00:00
|
|
|
if (likely(!(s->flags & SN_BE_TRACK_ANY)))
|
2010-08-03 14:29:52 +00:00
|
|
|
return;
|
2010-06-18 19:03:20 +00:00
|
|
|
|
2013-07-23 17:15:30 +00:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2012-12-09 14:55:40 +00:00
|
|
|
if (!s->stkctr[i].entry)
|
|
|
|
continue;
|
|
|
|
|
2013-06-17 13:04:07 +00:00
|
|
|
if (!(s->flags & (SN_BE_TRACK_SC0 << i)))
|
2012-12-09 14:55:40 +00:00
|
|
|
continue;
|
|
|
|
|
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_CONN_CUR);
|
2010-08-06 18:11:05 +00:00
|
|
|
if (ptr)
|
|
|
|
stktable_data_cast(ptr, conn_cur)--;
|
2012-12-09 14:55:40 +00:00
|
|
|
s->stkctr[i].entry->ref_cnt--;
|
|
|
|
stksess_kill_if_expired(s->stkctr[i].table, s->stkctr[i].entry);
|
|
|
|
s->stkctr[i].entry = NULL;
|
2010-08-06 18:11:05 +00:00
|
|
|
}
|
2013-05-28 15:40:25 +00:00
|
|
|
s->flags &= ~SN_BE_TRACK_ANY;
|
2010-08-03 14:29:52 +00:00
|
|
|
}
|
2010-06-18 19:03:20 +00:00
|
|
|
|
2010-08-03 14:29:52 +00:00
|
|
|
/* Increase total and concurrent connection count for stick entry <ts> of table
|
|
|
|
* <t>. The caller is responsible for ensuring that <t> and <ts> are valid
|
|
|
|
* pointers, and for calling this only once per connection.
|
|
|
|
*/
|
|
|
|
static inline void session_start_counters(struct stktable *t, struct stksess *ts)
|
|
|
|
{
|
|
|
|
void *ptr;
|
|
|
|
|
|
|
|
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CUR);
|
|
|
|
if (ptr)
|
|
|
|
stktable_data_cast(ptr, conn_cur)++;
|
|
|
|
|
|
|
|
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_CNT);
|
|
|
|
if (ptr)
|
|
|
|
stktable_data_cast(ptr, conn_cnt)++;
|
|
|
|
|
|
|
|
ptr = stktable_data_ptr(t, ts, STKTABLE_DT_CONN_RATE);
|
|
|
|
if (ptr)
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, conn_rate),
|
|
|
|
t->data_arg[STKTABLE_DT_CONN_RATE].u, 1);
|
|
|
|
if (tick_isset(t->expire))
|
|
|
|
ts->expire = tick_add(now_ms, MS_TO_TICKS(t->expire));
|
|
|
|
}
|
2010-06-20 09:19:22 +00:00
|
|
|
|
2012-12-09 14:55:40 +00:00
|
|
|
/* Enable tracking of session counters as <stkctr> on stksess <ts>. The caller is
|
2010-08-03 14:29:52 +00:00
|
|
|
* responsible for ensuring that <t> and <ts> are valid pointers. Some controls
|
|
|
|
* are performed to ensure the state can still change.
|
|
|
|
*/
|
2012-12-09 14:55:40 +00:00
|
|
|
static inline void session_track_stkctr(struct stkctr *ctr, struct stktable *t, struct stksess *ts)
|
2010-08-03 14:29:52 +00:00
|
|
|
{
|
2012-12-09 14:55:40 +00:00
|
|
|
if (ctr->entry)
|
2010-08-03 14:29:52 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
ts->ref_cnt++;
|
2012-12-09 14:55:40 +00:00
|
|
|
ctr->table = t;
|
|
|
|
ctr->entry = ts;
|
2010-08-03 14:29:52 +00:00
|
|
|
session_start_counters(t, ts);
|
2010-06-14 19:04:55 +00:00
|
|
|
}
|
2007-11-24 21:12:47 +00:00
|
|
|
|
2010-06-23 09:44:09 +00:00
|
|
|
/* Increase the number of cumulated HTTP requests in the tracked counters */
|
|
|
|
static void inline session_inc_http_req_ctr(struct session *s)
|
|
|
|
{
|
2010-08-03 14:29:52 +00:00
|
|
|
void *ptr;
|
2012-12-09 14:55:40 +00:00
|
|
|
int i;
|
2010-06-23 09:44:09 +00:00
|
|
|
|
2013-07-23 17:15:30 +00:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2012-12-09 14:55:40 +00:00
|
|
|
if (!s->stkctr[i].entry)
|
|
|
|
continue;
|
2010-06-23 09:44:09 +00:00
|
|
|
|
2012-12-09 14:55:40 +00:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_HTTP_REQ_CNT);
|
2010-08-03 14:29:52 +00:00
|
|
|
if (ptr)
|
|
|
|
stktable_data_cast(ptr, http_req_cnt)++;
|
2012-12-09 11:00:04 +00:00
|
|
|
|
2012-12-09 14:55:40 +00:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_HTTP_REQ_RATE);
|
2012-12-09 11:00:04 +00:00
|
|
|
if (ptr)
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
|
2012-12-09 14:55:40 +00:00
|
|
|
s->stkctr[i].table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
|
2012-12-09 11:00:04 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increase the number of cumulated HTTP requests in the backend's tracked counters */
|
|
|
|
static void inline session_inc_be_http_req_ctr(struct session *s)
|
|
|
|
{
|
|
|
|
void *ptr;
|
2012-12-09 14:55:40 +00:00
|
|
|
int i;
|
2012-12-09 11:00:04 +00:00
|
|
|
|
2013-05-28 15:40:25 +00:00
|
|
|
if (likely(!(s->flags & SN_BE_TRACK_ANY)))
|
2012-12-09 11:00:04 +00:00
|
|
|
return;
|
|
|
|
|
2013-07-23 17:15:30 +00:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2012-12-09 14:55:40 +00:00
|
|
|
if (!s->stkctr[i].entry)
|
|
|
|
continue;
|
2012-12-09 11:00:04 +00:00
|
|
|
|
2013-06-17 13:04:07 +00:00
|
|
|
if (!(s->flags & (SN_BE_TRACK_SC0 << i)))
|
2012-12-09 14:55:40 +00:00
|
|
|
continue;
|
2012-12-09 11:00:04 +00:00
|
|
|
|
2012-12-09 14:55:40 +00:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_HTTP_REQ_CNT);
|
2012-12-09 11:00:04 +00:00
|
|
|
if (ptr)
|
|
|
|
stktable_data_cast(ptr, http_req_cnt)++;
|
2010-08-03 14:29:52 +00:00
|
|
|
|
2012-12-09 14:55:40 +00:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_HTTP_REQ_RATE);
|
2010-08-03 14:29:52 +00:00
|
|
|
if (ptr)
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, http_req_rate),
|
2012-12-09 14:55:40 +00:00
|
|
|
s->stkctr[i].table->data_arg[STKTABLE_DT_HTTP_REQ_RATE].u, 1);
|
2010-06-23 09:44:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Increase the number of cumulated failed HTTP requests in the tracked
|
|
|
|
* counters. Only 4xx requests should be counted here so that we can
|
|
|
|
* distinguish between errors caused by client behaviour and other ones.
|
|
|
|
* Note that even 404 are interesting because they're generally caused by
|
|
|
|
* vulnerability scans.
|
|
|
|
*/
|
|
|
|
static void inline session_inc_http_err_ctr(struct session *s)
|
|
|
|
{
|
2010-08-03 14:29:52 +00:00
|
|
|
void *ptr;
|
2012-12-09 14:55:40 +00:00
|
|
|
int i;
|
2010-08-03 14:29:52 +00:00
|
|
|
|
2013-07-23 17:15:30 +00:00
|
|
|
for (i = 0; i < MAX_SESS_STKCTR; i++) {
|
2012-12-09 14:55:40 +00:00
|
|
|
if (!s->stkctr[i].entry)
|
|
|
|
continue;
|
2010-06-23 09:44:09 +00:00
|
|
|
|
2012-12-09 14:55:40 +00:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_HTTP_ERR_CNT);
|
2010-06-23 09:44:09 +00:00
|
|
|
if (ptr)
|
|
|
|
stktable_data_cast(ptr, http_err_cnt)++;
|
|
|
|
|
2012-12-09 14:55:40 +00:00
|
|
|
ptr = stktable_data_ptr(s->stkctr[i].table, s->stkctr[i].entry, STKTABLE_DT_HTTP_ERR_RATE);
|
2010-06-23 09:44:09 +00:00
|
|
|
if (ptr)
|
|
|
|
update_freq_ctr_period(&stktable_data_cast(ptr, http_err_rate),
|
2012-12-09 14:55:40 +00:00
|
|
|
s->stkctr[i].table->data_arg[STKTABLE_DT_HTTP_ERR_RATE].u, 1);
|
2010-06-23 09:44:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-21 05:34:57 +00:00
|
|
|
static void inline session_add_srv_conn(struct session *sess, struct server *srv)
|
|
|
|
{
|
|
|
|
sess->srv_conn = srv;
|
|
|
|
LIST_ADD(&srv->actconns, &sess->by_srv);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void inline session_del_srv_conn(struct session *sess)
|
|
|
|
{
|
|
|
|
if (!sess->srv_conn)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sess->srv_conn = NULL;
|
|
|
|
LIST_DEL(&sess->by_srv);
|
|
|
|
}
|
|
|
|
|
2011-07-19 22:17:39 +00:00
|
|
|
static void inline session_init_srv_conn(struct session *sess)
|
|
|
|
{
|
|
|
|
sess->srv_conn = NULL;
|
|
|
|
LIST_INIT(&sess->by_srv);
|
|
|
|
}
|
|
|
|
|
2006-06-26 00:48:02 +00:00
|
|
|
#endif /* _PROTO_SESSION_H */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-indent-level: 8
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|