2021-12-16 16:32:56 +00:00
|
|
|
/*
|
2022-05-17 17:07:51 +00:00
|
|
|
* stream connector management functions
|
2021-12-16 16:32:56 +00:00
|
|
|
*
|
|
|
|
* Copyright 2021 Christopher Faulet <cfaulet@haproxy.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <haproxy/api.h>
|
2022-04-01 09:36:58 +00:00
|
|
|
#include <haproxy/applet.h>
|
2021-12-16 16:32:56 +00:00
|
|
|
#include <haproxy/connection.h>
|
|
|
|
#include <haproxy/conn_stream.h>
|
2022-04-01 11:58:09 +00:00
|
|
|
#include <haproxy/cs_utils.h>
|
2022-04-04 06:58:34 +00:00
|
|
|
#include <haproxy/check.h>
|
|
|
|
#include <haproxy/http_ana.h>
|
|
|
|
#include <haproxy/pipe.h>
|
2021-12-16 16:32:56 +00:00
|
|
|
#include <haproxy/pool.h>
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
DECLARE_POOL(pool_head_connstream, "stconn", sizeof(struct stconn));
|
2022-05-17 15:53:22 +00:00
|
|
|
DECLARE_POOL(pool_head_sedesc, "sedesc", sizeof(struct sedesc));
|
2021-12-16 16:32:56 +00:00
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* functions used by default on a detached stream connector */
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutr(struct stconn *cs);
|
|
|
|
static void sc_app_shutw(struct stconn *cs);
|
|
|
|
static void sc_app_chk_rcv(struct stconn *cs);
|
|
|
|
static void sc_app_chk_snd(struct stconn *cs);
|
2022-05-17 16:28:19 +00:00
|
|
|
|
|
|
|
/* functions used on a mux-based stream connector */
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutr_conn(struct stconn *cs);
|
|
|
|
static void sc_app_shutw_conn(struct stconn *cs);
|
|
|
|
static void sc_app_chk_rcv_conn(struct stconn *cs);
|
|
|
|
static void sc_app_chk_snd_conn(struct stconn *cs);
|
2022-05-17 16:28:19 +00:00
|
|
|
|
|
|
|
/* functions used on an applet-based stream connector */
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutr_applet(struct stconn *cs);
|
|
|
|
static void sc_app_shutw_applet(struct stconn *cs);
|
|
|
|
static void sc_app_chk_rcv_applet(struct stconn *cs);
|
|
|
|
static void sc_app_chk_snd_applet(struct stconn *cs);
|
2022-05-17 16:28:19 +00:00
|
|
|
|
2022-05-18 16:06:53 +00:00
|
|
|
static int sc_conn_process(struct stconn *cs);
|
|
|
|
static int sc_conn_recv(struct stconn *cs);
|
|
|
|
static int sc_conn_send(struct stconn *cs);
|
2022-05-18 08:17:16 +00:00
|
|
|
static int cs_applet_process(struct stconn *cs);
|
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* stream connector operations for connections */
|
|
|
|
struct sc_app_ops sc_app_conn_ops = {
|
|
|
|
.chk_rcv = sc_app_chk_rcv_conn,
|
|
|
|
.chk_snd = sc_app_chk_snd_conn,
|
|
|
|
.shutr = sc_app_shutr_conn,
|
|
|
|
.shutw = sc_app_shutw_conn,
|
2022-05-18 16:06:53 +00:00
|
|
|
.wake = sc_conn_process,
|
2022-05-18 08:17:16 +00:00
|
|
|
.name = "STRM",
|
2022-04-01 12:04:29 +00:00
|
|
|
};
|
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* stream connector operations for embedded tasks */
|
|
|
|
struct sc_app_ops sc_app_embedded_ops = {
|
|
|
|
.chk_rcv = sc_app_chk_rcv,
|
|
|
|
.chk_snd = sc_app_chk_snd,
|
|
|
|
.shutr = sc_app_shutr,
|
|
|
|
.shutw = sc_app_shutw,
|
2022-05-18 08:17:16 +00:00
|
|
|
.wake = NULL, /* may never be used */
|
|
|
|
.name = "NONE", /* may never be used */
|
2022-04-01 12:04:29 +00:00
|
|
|
};
|
|
|
|
|
2022-05-18 08:17:16 +00:00
|
|
|
/* stream connector operations for applets */
|
2022-05-17 16:28:19 +00:00
|
|
|
struct sc_app_ops sc_app_applet_ops = {
|
|
|
|
.chk_rcv = sc_app_chk_rcv_applet,
|
|
|
|
.chk_snd = sc_app_chk_snd_applet,
|
|
|
|
.shutr = sc_app_shutr_applet,
|
|
|
|
.shutw = sc_app_shutw_applet,
|
2022-04-04 06:58:34 +00:00
|
|
|
.wake = cs_applet_process,
|
|
|
|
.name = "STRM",
|
|
|
|
};
|
|
|
|
|
2022-05-18 08:17:16 +00:00
|
|
|
/* stream connector for health checks on connections */
|
|
|
|
struct sc_app_ops sc_app_check_ops = {
|
|
|
|
.chk_rcv = NULL,
|
|
|
|
.chk_snd = NULL,
|
|
|
|
.shutr = NULL,
|
|
|
|
.shutw = NULL,
|
|
|
|
.wake = wake_srv_chk,
|
|
|
|
.name = "CHCK",
|
|
|
|
};
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* Initializes an endpoint */
|
2022-05-17 15:53:22 +00:00
|
|
|
void sedesc_init(struct sedesc *sedesc)
|
2022-03-22 15:06:25 +00:00
|
|
|
{
|
2022-05-17 15:53:22 +00:00
|
|
|
sedesc->se = NULL;
|
|
|
|
sedesc->conn = NULL;
|
2022-05-18 05:43:52 +00:00
|
|
|
sedesc->sc = NULL;
|
2022-05-17 15:53:22 +00:00
|
|
|
se_fl_setall(sedesc, SE_FL_NONE);
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* Tries to alloc an endpoint and initialize it. Returns NULL on failure. */
|
2022-05-17 15:53:22 +00:00
|
|
|
struct sedesc *sedesc_new()
|
2022-03-22 15:06:25 +00:00
|
|
|
{
|
2022-05-17 15:53:22 +00:00
|
|
|
struct sedesc *sedesc;
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2022-05-17 15:53:22 +00:00
|
|
|
sedesc = pool_alloc(pool_head_sedesc);
|
|
|
|
if (unlikely(!sedesc))
|
2022-03-22 15:06:25 +00:00
|
|
|
return NULL;
|
|
|
|
|
2022-05-17 15:53:22 +00:00
|
|
|
sedesc_init(sedesc);
|
|
|
|
return sedesc;
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* Releases an endpoint. It is the caller responsibility to be sure it is safe
|
|
|
|
* and it is not shared with another entity
|
|
|
|
*/
|
2022-05-17 15:53:22 +00:00
|
|
|
void sedesc_free(struct sedesc *sedesc)
|
2022-03-22 15:06:25 +00:00
|
|
|
{
|
2022-05-17 15:53:22 +00:00
|
|
|
pool_free(pool_head_sedesc, sedesc);
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
2021-12-16 16:32:56 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Tries to allocate a new stconn and initialize its main fields. On
|
2022-04-12 06:51:15 +00:00
|
|
|
* failure, nothing is allocated and NULL is returned. It is an internal
|
2022-05-17 15:04:55 +00:00
|
|
|
* function. The caller must, at least, set the SE_FL_ORPHAN or SE_FL_DETACHED
|
2022-04-12 06:51:15 +00:00
|
|
|
* flag.
|
2021-12-16 16:32:56 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static struct stconn *cs_new(struct sedesc *sedesc)
|
2021-12-16 16:32:56 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs;
|
2021-12-16 16:32:56 +00:00
|
|
|
|
|
|
|
cs = pool_alloc(pool_head_connstream);
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2021-12-16 16:32:56 +00:00
|
|
|
if (unlikely(!cs))
|
2022-03-22 15:06:25 +00:00
|
|
|
goto alloc_error;
|
2022-03-22 14:28:36 +00:00
|
|
|
|
|
|
|
cs->obj_type = OBJ_TYPE_CS;
|
2022-05-17 17:44:42 +00:00
|
|
|
cs->flags = SC_FL_NONE;
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_INI;
|
2022-03-29 16:03:35 +00:00
|
|
|
cs->hcto = TICK_ETERNITY;
|
2022-03-22 14:28:36 +00:00
|
|
|
cs->app = NULL;
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = NULL;
|
2022-03-29 15:53:09 +00:00
|
|
|
cs->src = NULL;
|
|
|
|
cs->dst = NULL;
|
2022-03-31 09:09:28 +00:00
|
|
|
cs->wait_event.tasklet = NULL;
|
|
|
|
cs->wait_event.events = 0;
|
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* If there is no endpoint, allocate a new one now */
|
2022-05-17 15:53:22 +00:00
|
|
|
if (!sedesc) {
|
|
|
|
sedesc = sedesc_new();
|
|
|
|
if (unlikely(!sedesc))
|
2022-03-22 17:37:19 +00:00
|
|
|
goto alloc_error;
|
|
|
|
}
|
2022-05-17 16:20:02 +00:00
|
|
|
cs->sedesc = sedesc;
|
2022-05-18 05:43:52 +00:00
|
|
|
sedesc->sc = cs;
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2021-12-16 16:32:56 +00:00
|
|
|
return cs;
|
2022-03-22 15:06:25 +00:00
|
|
|
|
|
|
|
alloc_error:
|
|
|
|
pool_free(pool_head_connstream, cs);
|
|
|
|
return NULL;
|
2021-12-16 16:32:56 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Creates a new stream connector and its associated stream from a mux. <endp> must be
|
|
|
|
* defined. It returns NULL on error. On success, the new stream connector is
|
2022-05-17 15:04:55 +00:00
|
|
|
* returned. In this case, SE_FL_ORPHAN flag is removed.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs_new_from_endp(struct sedesc *sedesc, struct session *sess, struct buffer *input)
|
2022-03-23 10:01:09 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs;
|
2022-03-23 10:01:09 +00:00
|
|
|
|
2022-05-17 15:53:22 +00:00
|
|
|
cs = cs_new(sedesc);
|
2022-03-23 10:01:09 +00:00
|
|
|
if (unlikely(!cs))
|
|
|
|
return NULL;
|
|
|
|
if (unlikely(!stream_new(sess, cs, input))) {
|
|
|
|
pool_free(pool_head_connstream, cs);
|
|
|
|
cs = NULL;
|
|
|
|
}
|
2022-05-17 15:53:22 +00:00
|
|
|
se_fl_clr(sedesc, SE_FL_ORPHAN);
|
2022-03-23 10:01:09 +00:00
|
|
|
return cs;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Creates a new stream connector from an stream. There is no endpoint here, thus it
|
2022-05-17 15:04:55 +00:00
|
|
|
* will be created by cs_new(). So the SE_FL_DETACHED flag is set. It returns
|
2022-05-17 17:07:51 +00:00
|
|
|
* NULL on error. On success, the new stream connector is returned.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs_new_from_strm(struct stream *strm, unsigned int flags)
|
2022-03-23 10:01:09 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs;
|
2022-03-23 10:01:09 +00:00
|
|
|
|
|
|
|
cs = cs_new(NULL);
|
|
|
|
if (unlikely(!cs))
|
|
|
|
return NULL;
|
|
|
|
cs->flags |= flags;
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_DETACHED);
|
2022-03-23 10:01:09 +00:00
|
|
|
cs->app = &strm->obj_type;
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_embedded_ops;
|
2022-03-23 10:01:09 +00:00
|
|
|
return cs;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Creates a new stream connector from an health-check. There is no endpoint here,
|
2022-05-17 15:04:55 +00:00
|
|
|
* thus it will be created by cs_new(). So the SE_FL_DETACHED flag is set. It
|
2022-05-17 17:07:51 +00:00
|
|
|
* returns NULL on error. On success, the new stream connector is returned.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs_new_from_check(struct check *check, unsigned int flags)
|
2022-03-23 10:01:09 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs;
|
2022-03-23 10:01:09 +00:00
|
|
|
|
|
|
|
cs = cs_new(NULL);
|
|
|
|
if (unlikely(!cs))
|
|
|
|
return NULL;
|
|
|
|
cs->flags |= flags;
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_DETACHED);
|
2022-03-23 10:01:09 +00:00
|
|
|
cs->app = &check->obj_type;
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_check_ops;
|
2022-03-23 10:01:09 +00:00
|
|
|
return cs;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Releases a stconn previously allocated by cs_new(), as well as its
|
2022-04-12 06:51:15 +00:00
|
|
|
* endpoint, if it exists. This function is called internally or on error path.
|
2021-12-16 16:32:56 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
void cs_free(struct stconn *cs)
|
2021-12-16 16:32:56 +00:00
|
|
|
{
|
2022-03-29 15:53:09 +00:00
|
|
|
sockaddr_free(&cs->src);
|
|
|
|
sockaddr_free(&cs->dst);
|
2022-05-17 16:20:02 +00:00
|
|
|
if (cs->sedesc) {
|
2022-05-17 15:04:55 +00:00
|
|
|
BUG_ON(!sc_ep_test(cs, SE_FL_DETACHED));
|
2022-05-17 16:20:02 +00:00
|
|
|
sedesc_free(cs->sedesc);
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
2022-03-31 09:09:28 +00:00
|
|
|
if (cs->wait_event.tasklet)
|
|
|
|
tasklet_free(cs->wait_event.tasklet);
|
2021-12-16 16:32:56 +00:00
|
|
|
pool_free(pool_head_connstream, cs);
|
|
|
|
}
|
2021-12-23 16:28:17 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Conditionally removes a stream connector if it is detached and if there is no app
|
2022-04-21 12:22:53 +00:00
|
|
|
* layer defined. Except on error path, this one must be used. if release, the
|
|
|
|
* pointer on the CS is set to NULL.
|
2022-04-12 16:09:48 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void cs_free_cond(struct stconn **csp)
|
2022-04-12 16:09:48 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs = *csp;
|
2022-04-21 12:22:53 +00:00
|
|
|
|
2022-05-17 16:20:02 +00:00
|
|
|
if (!cs->app && (!cs->sedesc || sc_ep_test(cs, SE_FL_DETACHED))) {
|
2022-04-12 16:09:48 +00:00
|
|
|
cs_free(cs);
|
2022-04-21 12:22:53 +00:00
|
|
|
*csp = NULL;
|
|
|
|
}
|
2022-04-12 16:09:48 +00:00
|
|
|
}
|
|
|
|
|
2021-12-23 16:28:17 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Attaches a stconn to a mux endpoint and sets the endpoint ctx. Returns
|
2022-05-17 15:04:55 +00:00
|
|
|
* -1 on error and 0 on sucess. SE_FL_DETACHED flag is removed. This function is
|
2022-04-12 06:51:15 +00:00
|
|
|
* called from a mux when it is attached to a stream or a health-check.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
int cs_attach_mux(struct stconn *cs, void *endp, void *ctx)
|
2021-12-23 16:28:17 +00:00
|
|
|
{
|
2022-01-19 13:56:50 +00:00
|
|
|
struct connection *conn = ctx;
|
2022-05-17 16:20:02 +00:00
|
|
|
struct sedesc *sedesc = cs->sedesc;
|
2021-12-23 16:28:17 +00:00
|
|
|
|
2022-05-17 16:20:02 +00:00
|
|
|
sedesc->se = endp;
|
|
|
|
sedesc->conn = ctx;
|
|
|
|
se_fl_set(sedesc, SE_FL_T_MUX);
|
|
|
|
se_fl_clr(sedesc, SE_FL_DETACHED);
|
2022-01-19 13:56:50 +00:00
|
|
|
if (!conn->ctx)
|
|
|
|
conn->ctx = cs;
|
2022-05-18 14:10:52 +00:00
|
|
|
if (sc_strm(cs)) {
|
2022-03-31 09:09:28 +00:00
|
|
|
if (!cs->wait_event.tasklet) {
|
|
|
|
cs->wait_event.tasklet = tasklet_new();
|
|
|
|
if (!cs->wait_event.tasklet)
|
|
|
|
return -1;
|
2022-05-18 16:06:53 +00:00
|
|
|
cs->wait_event.tasklet->process = sc_conn_io_cb;
|
2022-04-01 14:58:52 +00:00
|
|
|
cs->wait_event.tasklet->context = cs;
|
2022-03-31 09:09:28 +00:00
|
|
|
cs->wait_event.events = 0;
|
|
|
|
}
|
|
|
|
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_conn_ops;
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
2022-05-18 14:10:52 +00:00
|
|
|
else if (sc_check(cs)) {
|
2022-05-18 13:57:15 +00:00
|
|
|
if (!cs->wait_event.tasklet) {
|
|
|
|
cs->wait_event.tasklet = tasklet_new();
|
|
|
|
if (!cs->wait_event.tasklet)
|
|
|
|
return -1;
|
|
|
|
cs->wait_event.tasklet->process = srv_chk_io_cb;
|
|
|
|
cs->wait_event.tasklet->context = cs;
|
|
|
|
cs->wait_event.events = 0;
|
|
|
|
}
|
|
|
|
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_check_ops;
|
2022-05-18 13:57:15 +00:00
|
|
|
}
|
2022-03-31 17:27:18 +00:00
|
|
|
return 0;
|
2022-01-19 13:56:50 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Attaches a stconn to an applet endpoint and sets the endpoint
|
2022-05-17 15:04:55 +00:00
|
|
|
* ctx. Returns -1 on error and 0 on sucess. SE_FL_DETACHED flag is
|
2022-04-12 06:51:15 +00:00
|
|
|
* removed. This function is called by a stream when a backend applet is
|
|
|
|
* registered.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void cs_attach_applet(struct stconn *cs, void *endp)
|
2022-01-19 13:56:50 +00:00
|
|
|
{
|
2022-05-17 16:20:02 +00:00
|
|
|
cs->sedesc->se = endp;
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_T_APPLET);
|
|
|
|
sc_ep_clr(cs, SE_FL_DETACHED);
|
2022-05-18 14:10:52 +00:00
|
|
|
if (sc_strm(cs))
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_applet_ops;
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Attaches a stconn to a app layer and sets the relevant
|
2022-05-17 15:04:55 +00:00
|
|
|
* callbacks. Returns -1 on error and 0 on success. SE_FL_ORPHAN flag is
|
2022-04-12 06:51:15 +00:00
|
|
|
* removed. This function is called by a stream when it is created to attach it
|
2022-05-17 17:07:51 +00:00
|
|
|
* on the stream connector on the client side.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
int cs_attach_strm(struct stconn *cs, struct stream *strm)
|
2021-12-23 16:28:17 +00:00
|
|
|
{
|
2022-03-23 10:01:09 +00:00
|
|
|
cs->app = &strm->obj_type;
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, SE_FL_ORPHAN);
|
|
|
|
if (sc_ep_test(cs, SE_FL_T_MUX)) {
|
2022-03-31 09:09:28 +00:00
|
|
|
cs->wait_event.tasklet = tasklet_new();
|
2022-04-04 09:25:59 +00:00
|
|
|
if (!cs->wait_event.tasklet)
|
2022-03-31 09:09:28 +00:00
|
|
|
return -1;
|
2022-05-18 16:06:53 +00:00
|
|
|
cs->wait_event.tasklet->process = sc_conn_io_cb;
|
2022-04-01 14:58:52 +00:00
|
|
|
cs->wait_event.tasklet->context = cs;
|
2022-03-31 09:09:28 +00:00
|
|
|
cs->wait_event.events = 0;
|
|
|
|
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_conn_ops;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
2022-05-17 15:04:55 +00:00
|
|
|
else if (sc_ep_test(cs, SE_FL_T_APPLET)) {
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_applet_ops;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
|
|
|
else {
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_embedded_ops;
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Detaches the stconn from the endpoint, if any. For a connecrion, if a
|
2022-04-12 06:51:15 +00:00
|
|
|
* mux owns the connection ->detach() callback is called. Otherwise, it means
|
2022-05-17 17:07:51 +00:00
|
|
|
* the stream connector owns the connection. In this case the connection is closed
|
2022-04-12 06:51:15 +00:00
|
|
|
* and released. For an applet, the appctx is released. If still allocated, the
|
|
|
|
* endpoint is reset and flag as detached. If the app layer is also detached,
|
2022-05-17 17:07:51 +00:00
|
|
|
* the stream connector is released.
|
2021-12-23 16:28:17 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void cs_detach_endp(struct stconn **csp)
|
2021-12-23 16:28:17 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs = *csp;
|
2022-04-21 12:22:53 +00:00
|
|
|
|
|
|
|
if (!cs)
|
|
|
|
return;
|
|
|
|
|
2022-05-17 16:20:02 +00:00
|
|
|
if (!cs->sedesc)
|
2022-03-24 09:27:02 +00:00
|
|
|
goto reset_cs;
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_T_MUX)) {
|
2022-05-18 14:23:22 +00:00
|
|
|
struct connection *conn = __sc_conn(cs);
|
2022-05-17 16:20:02 +00:00
|
|
|
struct sedesc *sedesc = cs->sedesc;
|
2021-12-23 16:28:17 +00:00
|
|
|
|
|
|
|
if (conn->mux) {
|
2022-03-31 09:09:28 +00:00
|
|
|
if (cs->wait_event.events != 0)
|
|
|
|
conn->mux->unsubscribe(cs, cs->wait_event.events, &cs->wait_event);
|
2022-05-17 16:20:02 +00:00
|
|
|
se_fl_set(sedesc, SE_FL_ORPHAN);
|
2022-05-18 05:43:52 +00:00
|
|
|
sedesc->sc = NULL;
|
2022-05-17 16:20:02 +00:00
|
|
|
cs->sedesc = NULL;
|
|
|
|
conn->mux->detach(sedesc);
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* It's too early to have a mux, let's just destroy
|
|
|
|
* the connection
|
|
|
|
*/
|
|
|
|
conn_stop_tracking(conn);
|
|
|
|
conn_full_close(conn);
|
|
|
|
if (conn->destroy_cb)
|
|
|
|
conn->destroy_cb(conn);
|
|
|
|
conn_free(conn);
|
|
|
|
}
|
|
|
|
}
|
2022-05-17 15:04:55 +00:00
|
|
|
else if (sc_ep_test(cs, SE_FL_T_APPLET)) {
|
2022-05-18 15:58:02 +00:00
|
|
|
struct appctx *appctx = __sc_appctx(cs);
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_ORPHAN);
|
2022-05-18 05:43:52 +00:00
|
|
|
cs->sedesc->sc = NULL;
|
2022-05-17 16:20:02 +00:00
|
|
|
cs->sedesc = NULL;
|
2022-05-10 17:42:22 +00:00
|
|
|
appctx_shut(appctx);
|
|
|
|
appctx_free(appctx);
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 16:20:02 +00:00
|
|
|
if (cs->sedesc) {
|
2022-03-23 14:15:29 +00:00
|
|
|
/* the cs is the only one one the endpoint */
|
2022-05-17 16:20:02 +00:00
|
|
|
cs->sedesc->se = NULL;
|
|
|
|
cs->sedesc->conn = NULL;
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, ~SE_FL_APP_MASK);
|
|
|
|
sc_ep_set(cs, SE_FL_DETACHED);
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
|
|
|
|
2022-03-24 09:27:02 +00:00
|
|
|
reset_cs:
|
2022-01-06 07:44:58 +00:00
|
|
|
/* FIXME: Rest CS for now but must be reviewed. CS flags are only
|
|
|
|
* connection related for now but this will evolved
|
|
|
|
*/
|
2022-05-17 17:44:42 +00:00
|
|
|
cs->flags &= SC_FL_ISBACK;
|
2022-05-18 14:10:52 +00:00
|
|
|
if (sc_strm(cs))
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = &sc_app_embedded_ops;
|
|
|
|
else
|
|
|
|
cs->app_ops = NULL;
|
2022-04-21 12:22:53 +00:00
|
|
|
cs_free_cond(csp);
|
2022-01-06 07:44:58 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Detaches the stconn from the app layer. If there is no endpoint attached
|
|
|
|
* to the stconn
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void cs_detach_app(struct stconn **csp)
|
2022-01-06 07:44:58 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs = *csp;
|
2022-04-21 12:22:53 +00:00
|
|
|
|
|
|
|
if (!cs)
|
|
|
|
return;
|
|
|
|
|
2022-01-06 07:44:58 +00:00
|
|
|
cs->app = NULL;
|
2022-05-18 08:17:16 +00:00
|
|
|
cs->app_ops = NULL;
|
2022-03-29 15:53:09 +00:00
|
|
|
sockaddr_free(&cs->src);
|
|
|
|
sockaddr_free(&cs->dst);
|
2022-03-31 09:09:28 +00:00
|
|
|
|
|
|
|
if (cs->wait_event.tasklet)
|
|
|
|
tasklet_free(cs->wait_event.tasklet);
|
|
|
|
cs->wait_event.tasklet = NULL;
|
|
|
|
cs->wait_event.events = 0;
|
2022-04-21 12:22:53 +00:00
|
|
|
cs_free_cond(csp);
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Destroy the stconn. It is detached from its endpoint and its
|
|
|
|
* application. After this call, the stconn must be considered as released.
|
2022-04-21 12:22:53 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
void cs_destroy(struct stconn *cs)
|
2022-04-21 12:22:53 +00:00
|
|
|
{
|
|
|
|
cs_detach_endp(&cs);
|
|
|
|
cs_detach_app(&cs);
|
|
|
|
BUG_ON_HOT(cs);
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
2022-03-23 14:15:29 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Resets the stream connector endpoint. It happens when the app layer want to renew
|
2022-04-12 06:51:15 +00:00
|
|
|
* its endpoint. For a connection retry for instance. If a mux or an applet is
|
|
|
|
* attached, a new endpoint is created. Returns -1 on error and 0 on sucess.
|
2022-04-28 16:25:24 +00:00
|
|
|
*
|
2022-05-17 15:04:55 +00:00
|
|
|
* Only SE_FL_ERROR flag is removed on the endpoint. Orther flags are preserved.
|
2022-04-28 16:25:24 +00:00
|
|
|
* It is the caller responsibility to remove other flags if needed.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
int cs_reset_endp(struct stconn *cs)
|
2022-03-23 14:15:29 +00:00
|
|
|
{
|
2022-05-17 15:53:22 +00:00
|
|
|
struct sedesc *new_endp;
|
2022-03-24 09:27:02 +00:00
|
|
|
|
2022-03-23 14:15:29 +00:00
|
|
|
BUG_ON(!cs->app);
|
2022-04-28 16:25:24 +00:00
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, SE_FL_ERROR);
|
2022-05-18 15:56:13 +00:00
|
|
|
if (!__sc_endp(cs)) {
|
2022-03-24 09:27:02 +00:00
|
|
|
/* endpoint not attached or attached to a mux with no
|
|
|
|
* target. Thus the endpoint will not be release but just
|
2022-04-21 12:22:53 +00:00
|
|
|
* reset. The app is still attached, the cs will not be
|
|
|
|
* released.
|
2022-03-24 09:27:02 +00:00
|
|
|
*/
|
2022-04-21 12:22:53 +00:00
|
|
|
cs_detach_endp(&cs);
|
2022-03-24 09:27:02 +00:00
|
|
|
return 0;
|
2022-03-23 14:15:29 +00:00
|
|
|
}
|
2022-03-24 09:27:02 +00:00
|
|
|
|
|
|
|
/* allocate the new endpoint first to be able to set error if it
|
|
|
|
* fails */
|
2022-05-17 15:53:22 +00:00
|
|
|
new_endp = sedesc_new();
|
2022-03-24 09:27:02 +00:00
|
|
|
if (!unlikely(new_endp)) {
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_ERROR);
|
2022-03-24 09:27:02 +00:00
|
|
|
return -1;
|
|
|
|
}
|
2022-05-17 15:04:55 +00:00
|
|
|
se_fl_setall(new_endp, sc_ep_get(cs) & SE_FL_APP_MASK);
|
2022-03-24 09:27:02 +00:00
|
|
|
|
2022-04-21 12:22:53 +00:00
|
|
|
/* The app is still attached, the cs will not be released */
|
|
|
|
cs_detach_endp(&cs);
|
2022-05-17 16:20:02 +00:00
|
|
|
BUG_ON(cs->sedesc);
|
|
|
|
cs->sedesc = new_endp;
|
2022-05-18 05:43:52 +00:00
|
|
|
cs->sedesc->sc = cs;
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_DETACHED);
|
2022-03-23 14:15:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2022-04-01 09:36:58 +00:00
|
|
|
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Create an applet to handle a stream connector as a new appctx. The CS will
|
2022-04-01 09:36:58 +00:00
|
|
|
* wake it up every time it is solicited. The appctx must be deleted by the task
|
|
|
|
* handler using cs_detach_endp(), possibly from within the function itself.
|
|
|
|
* It also pre-initializes the applet's context and returns it (or NULL in case
|
|
|
|
* it could not be allocated).
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
struct appctx *cs_applet_create(struct stconn *cs, struct applet *app)
|
2022-04-01 09:36:58 +00:00
|
|
|
{
|
|
|
|
struct appctx *appctx;
|
|
|
|
|
2022-05-18 14:10:52 +00:00
|
|
|
DPRINTF(stderr, "registering handler %p for cs %p (was %p)\n", app, cs, sc_strm_task(cs));
|
2022-04-01 09:36:58 +00:00
|
|
|
|
2022-05-17 16:20:02 +00:00
|
|
|
appctx = appctx_new_here(app, cs->sedesc);
|
2022-04-01 09:36:58 +00:00
|
|
|
if (!appctx)
|
|
|
|
return NULL;
|
2022-05-16 15:29:37 +00:00
|
|
|
cs_attach_applet(cs, appctx);
|
2022-05-18 14:10:52 +00:00
|
|
|
appctx->t->nice = __sc_strm(cs)->task->nice;
|
2022-04-04 05:51:21 +00:00
|
|
|
cs_cant_get(cs);
|
2022-04-01 09:36:58 +00:00
|
|
|
appctx_wakeup(appctx);
|
2022-04-21 09:52:07 +00:00
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_RDY;
|
2022-04-01 09:36:58 +00:00
|
|
|
return appctx;
|
|
|
|
}
|
|
|
|
|
2022-04-01 12:04:29 +00:00
|
|
|
/*
|
2022-05-17 17:07:51 +00:00
|
|
|
* This function performs a shutdown-read on a detached stream connector in a
|
2022-04-01 12:04:29 +00:00
|
|
|
* connected or init state (it does nothing for other states). It either shuts
|
|
|
|
* the read side or marks itself as closed. The buffer flags are updated to
|
2022-05-17 17:44:42 +00:00
|
|
|
* reflect the new state. If the stream connector has SC_FL_NOHALF, we also
|
2022-04-01 12:04:29 +00:00
|
|
|
* forward the close to the write side. The owner task is woken up if it exists.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutr(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (ic->flags & CF_SHUTR)
|
|
|
|
return;
|
|
|
|
ic->flags |= CF_SHUTR;
|
|
|
|
ic->rex = TICK_ETERNITY;
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (!cs_state_in(cs->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-18 13:55:18 +00:00
|
|
|
if (sc_oc(cs)->flags & CF_SHUTW) {
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_DIS;
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
2022-05-17 17:44:42 +00:00
|
|
|
else if (cs->flags & SC_FL_NOHALF) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we want to immediately forward this close to the write side */
|
2022-05-17 16:28:19 +00:00
|
|
|
return sc_app_shutw(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* note that if the task exists, it must unregister itself once it runs */
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!(cs->flags & SC_FL_DONT_WAKE))
|
2022-05-18 14:10:52 +00:00
|
|
|
task_wakeup(sc_strm_task(cs), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 17:07:51 +00:00
|
|
|
* This function performs a shutdown-write on a detached stream connector in a
|
2022-04-01 12:04:29 +00:00
|
|
|
* connected or init state (it does nothing for other states). It either shuts
|
|
|
|
* the write side or marks itself as closed. The buffer flags are updated to
|
2022-04-04 09:29:28 +00:00
|
|
|
* reflect the new state. It does also close everything if the CS was marked as
|
2022-04-01 12:04:29 +00:00
|
|
|
* being in error state. The owner task is woken up if it exists.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutw(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
oc->flags &= ~CF_SHUTW_NOW;
|
|
|
|
if (oc->flags & CF_SHUTW)
|
|
|
|
return;
|
|
|
|
oc->flags |= CF_SHUTW;
|
|
|
|
oc->wex = TICK_ETERNITY;
|
|
|
|
|
|
|
|
if (tick_isset(cs->hcto)) {
|
|
|
|
ic->rto = cs->hcto;
|
|
|
|
ic->rex = tick_add(now_ms, ic->rto);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cs->state) {
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_RDY:
|
|
|
|
case SC_ST_EST:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we have to shut before closing, otherwise some short messages
|
|
|
|
* may never leave the system, especially when there are remaining
|
|
|
|
* unread data in the socket input buffer, or when nolinger is set.
|
2022-05-17 17:44:42 +00:00
|
|
|
* However, if SC_FL_NOLINGER is explicitly set, we know there is
|
2022-04-01 12:04:29 +00:00
|
|
|
* no risk so we close both sides immediately.
|
|
|
|
*/
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!sc_ep_test(cs, SE_FL_ERROR) && !(cs->flags & SC_FL_NOLINGER) &&
|
2022-04-01 12:04:29 +00:00
|
|
|
!(ic->flags & (CF_SHUTR|CF_DONT_READ)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* fall through */
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CON:
|
|
|
|
case SC_ST_CER:
|
|
|
|
case SC_ST_QUE:
|
|
|
|
case SC_ST_TAR:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* Note that none of these states may happen with applets */
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_DIS;
|
2022-04-01 12:04:29 +00:00
|
|
|
/* fall through */
|
|
|
|
default:
|
2022-05-17 17:44:42 +00:00
|
|
|
cs->flags &= ~SC_FL_NOLINGER;
|
2022-04-01 12:04:29 +00:00
|
|
|
ic->flags |= CF_SHUTR;
|
|
|
|
ic->rex = TICK_ETERNITY;
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* note that if the task exists, it must unregister itself once it runs */
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!(cs->flags & SC_FL_DONT_WAKE))
|
2022-05-18 14:10:52 +00:00
|
|
|
task_wakeup(sc_strm_task(cs), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* default chk_rcv function for scheduled tasks */
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_chk_rcv(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
DPRINTF(stderr, "%s: cs=%p, cs->state=%d ic->flags=%08x oc->flags=%08x\n",
|
|
|
|
__FUNCTION__,
|
2022-05-18 13:55:18 +00:00
|
|
|
cs, cs->state, ic->flags, sc_oc(cs)->flags);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (ic->pipe) {
|
|
|
|
/* stop reading */
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_need_room(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* (re)start reading */
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!(cs->flags & SC_FL_DONT_WAKE))
|
2022-05-18 14:10:52 +00:00
|
|
|
task_wakeup(sc_strm_task(cs), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* default chk_snd function for scheduled tasks */
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_chk_snd(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
DPRINTF(stderr, "%s: cs=%p, cs->state=%d ic->flags=%08x oc->flags=%08x\n",
|
|
|
|
__FUNCTION__,
|
2022-05-18 13:55:18 +00:00
|
|
|
cs, cs->state, sc_ic(cs)->flags, oc->flags);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (unlikely(cs->state != SC_ST_EST || (oc->flags & CF_SHUTW)))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (!sc_ep_test(cs, SE_FL_WAIT_DATA) || /* not waiting for data */
|
2022-04-01 12:04:29 +00:00
|
|
|
channel_is_empty(oc)) /* called with nothing to send ! */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Otherwise there are remaining data to be sent in the buffer,
|
|
|
|
* so we tell the handler.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, SE_FL_WAIT_DATA);
|
2022-04-01 12:04:29 +00:00
|
|
|
if (!tick_isset(oc->wex))
|
|
|
|
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
|
|
|
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!(cs->flags & SC_FL_DONT_WAKE))
|
2022-05-18 14:10:52 +00:00
|
|
|
task_wakeup(sc_strm_task(cs), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-read on a stream connector attached to
|
2022-04-01 12:04:29 +00:00
|
|
|
* a connection in a connected or init state (it does nothing for other
|
|
|
|
* states). It either shuts the read side or marks itself as closed. The buffer
|
2022-05-17 16:28:19 +00:00
|
|
|
* flags are updated to reflect the new state. If the stream connector has
|
2022-05-17 17:44:42 +00:00
|
|
|
* SC_FL_NOHALF, we also forward the close to the write side. If a control
|
2022-04-01 12:04:29 +00:00
|
|
|
* layer is defined, then it is supposed to be a socket layer and file
|
|
|
|
* descriptors are then shutdown or closed accordingly. The function
|
|
|
|
* automatically disables polling if needed.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutr_conn(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-18 14:23:22 +00:00
|
|
|
BUG_ON(!sc_conn(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (ic->flags & CF_SHUTR)
|
|
|
|
return;
|
|
|
|
ic->flags |= CF_SHUTR;
|
|
|
|
ic->rex = TICK_ETERNITY;
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (!cs_state_in(cs->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-18 13:55:18 +00:00
|
|
|
if (sc_oc(cs)->flags & CF_SHUTW) {
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_shut(cs);
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_DIS;
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
2022-05-17 17:44:42 +00:00
|
|
|
else if (cs->flags & SC_FL_NOHALF) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we want to immediately forward this close to the write side */
|
2022-05-17 16:28:19 +00:00
|
|
|
return sc_app_shutw_conn(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-write on a stream connector attached to
|
2022-04-01 12:04:29 +00:00
|
|
|
* a connection in a connected or init state (it does nothing for other
|
|
|
|
* states). It either shuts the write side or marks itself as closed. The
|
|
|
|
* buffer flags are updated to reflect the new state. It does also close
|
2022-04-04 09:29:28 +00:00
|
|
|
* everything if the CS was marked as being in error state. If there is a
|
2022-04-01 12:04:29 +00:00
|
|
|
* data-layer shutdown, it is called.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutw_conn(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-18 14:23:22 +00:00
|
|
|
BUG_ON(!sc_conn(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
oc->flags &= ~CF_SHUTW_NOW;
|
|
|
|
if (oc->flags & CF_SHUTW)
|
|
|
|
return;
|
|
|
|
oc->flags |= CF_SHUTW;
|
|
|
|
oc->wex = TICK_ETERNITY;
|
|
|
|
|
|
|
|
if (tick_isset(cs->hcto)) {
|
|
|
|
ic->rto = cs->hcto;
|
|
|
|
ic->rex = tick_add(now_ms, ic->rto);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (cs->state) {
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_RDY:
|
|
|
|
case SC_ST_EST:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we have to shut before closing, otherwise some short messages
|
|
|
|
* may never leave the system, especially when there are remaining
|
|
|
|
* unread data in the socket input buffer, or when nolinger is set.
|
2022-05-17 17:44:42 +00:00
|
|
|
* However, if SC_FL_NOLINGER is explicitly set, we know there is
|
2022-04-01 12:04:29 +00:00
|
|
|
* no risk so we close both sides immediately.
|
|
|
|
*/
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_ERROR)) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* quick close, the socket is already shut anyway */
|
|
|
|
}
|
2022-05-17 17:44:42 +00:00
|
|
|
else if (cs->flags & SC_FL_NOLINGER) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* unclean data-layer shutdown, typically an aborted request
|
|
|
|
* or a forwarded shutdown from a client to a server due to
|
|
|
|
* option abortonclose. No need for the TLS layer to try to
|
|
|
|
* emit a shutdown message.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_shutw(cs, CO_SHW_SILENT);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* clean data-layer shutdown. This only happens on the
|
|
|
|
* frontend side, or on the backend side when forwarding
|
|
|
|
* a client close in TCP mode or in HTTP TUNNEL mode
|
|
|
|
* while option abortonclose is set. We want the TLS
|
|
|
|
* layer to try to signal it to the peer before we close.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_shutw(cs, CO_SHW_NORMAL);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (!(ic->flags & (CF_SHUTR|CF_DONT_READ)))
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fall through */
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CON:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we may have to close a pending connection, and mark the
|
|
|
|
* response buffer as shutr
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_shut(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
/* fall through */
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CER:
|
|
|
|
case SC_ST_QUE:
|
|
|
|
case SC_ST_TAR:
|
|
|
|
cs->state = SC_ST_DIS;
|
2022-04-01 12:04:29 +00:00
|
|
|
/* fall through */
|
|
|
|
default:
|
2022-05-17 17:44:42 +00:00
|
|
|
cs->flags &= ~SC_FL_NOLINGER;
|
2022-04-01 12:04:29 +00:00
|
|
|
ic->flags |= CF_SHUTR;
|
|
|
|
ic->rex = TICK_ETERNITY;
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* This function is used for inter-stream connector calls. It is called by the
|
2022-04-01 12:04:29 +00:00
|
|
|
* consumer to inform the producer side that it may be interested in checking
|
|
|
|
* for free space in the buffer. Note that it intentionally does not update
|
|
|
|
* timeouts, so that we can still check them later at wake-up. This function is
|
2022-05-17 16:28:19 +00:00
|
|
|
* dedicated to connection-based stream connectors.
|
2022-04-01 12:04:29 +00:00
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_chk_rcv_conn(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 14:23:22 +00:00
|
|
|
BUG_ON(!sc_conn(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
/* (re)start reading */
|
2022-05-17 17:47:17 +00:00
|
|
|
if (cs_state_in(cs->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-01 12:04:29 +00:00
|
|
|
tasklet_wakeup(cs->wait_event.tasklet);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* This function is used for inter-stream connector calls. It is called by the
|
2022-04-01 12:04:29 +00:00
|
|
|
* producer to inform the consumer side that it may be interested in checking
|
|
|
|
* for data in the buffer. Note that it intentionally does not update timeouts,
|
|
|
|
* so that we can still check them later at wake-up.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_chk_snd_conn(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-18 14:23:22 +00:00
|
|
|
BUG_ON(!sc_conn(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (unlikely(!cs_state_in(cs->state, SC_SB_RDY|SC_SB_EST) ||
|
2022-04-01 12:04:29 +00:00
|
|
|
(oc->flags & CF_SHUTW)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(channel_is_empty(oc))) /* called with nothing to send ! */
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!oc->pipe && /* spliced data wants to be forwarded ASAP */
|
2022-05-17 15:04:55 +00:00
|
|
|
!sc_ep_test(cs, SE_FL_WAIT_DATA)) /* not waiting for data */
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-18 13:55:18 +00:00
|
|
|
if (!(cs->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(cs)))
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_send(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_ERROR | SE_FL_ERR_PENDING) || cs_is_conn_error(cs)) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* Write error on the file descriptor */
|
2022-05-17 17:47:17 +00:00
|
|
|
if (cs->state >= SC_ST_CON)
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_ERROR);
|
2022-04-01 12:04:29 +00:00
|
|
|
goto out_wakeup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, so now we know that some data might have been sent, and that we may
|
|
|
|
* have to poll first. We have to do that too if the buffer is not empty.
|
|
|
|
*/
|
|
|
|
if (channel_is_empty(oc)) {
|
|
|
|
/* the connection is established but we can't write. Either the
|
|
|
|
* buffer is empty, or we just refrain from sending because the
|
|
|
|
* ->o limit was reached. Maybe we just wrote the last
|
|
|
|
* chunk and need to close.
|
|
|
|
*/
|
|
|
|
if (((oc->flags & (CF_SHUTW|CF_AUTO_CLOSE|CF_SHUTW_NOW)) ==
|
|
|
|
(CF_AUTO_CLOSE|CF_SHUTW_NOW)) &&
|
2022-05-17 17:47:17 +00:00
|
|
|
cs_state_in(cs->state, SC_SB_RDY|SC_SB_EST)) {
|
2022-04-01 12:04:29 +00:00
|
|
|
cs_shutw(cs);
|
|
|
|
goto out_wakeup;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == 0)
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_WAIT_DATA);
|
2022-04-01 12:04:29 +00:00
|
|
|
oc->wex = TICK_ETERNITY;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Otherwise there are remaining data to be sent in the buffer,
|
|
|
|
* which means we have to poll before doing so.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, SE_FL_WAIT_DATA);
|
2022-04-01 12:04:29 +00:00
|
|
|
if (!tick_isset(oc->wex))
|
|
|
|
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (likely(oc->flags & CF_WRITE_ACTIVITY)) {
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
/* update timeout if we have written something */
|
|
|
|
if ((oc->flags & (CF_SHUTW|CF_WRITE_PARTIAL)) == CF_WRITE_PARTIAL &&
|
|
|
|
!channel_is_empty(oc))
|
|
|
|
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
|
|
|
|
2022-05-17 17:44:42 +00:00
|
|
|
if (tick_isset(ic->rex) && !(cs->flags & SC_FL_INDEP_STR)) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* Note: to prevent the client from expiring read timeouts
|
|
|
|
* during writes, we refresh it. We only do this if the
|
|
|
|
* interface is not configured for "independent streams",
|
|
|
|
* because for some applications it's better not to do this,
|
|
|
|
* for instance when continuously exchanging small amounts
|
|
|
|
* of data which can full the socket buffers long before a
|
|
|
|
* write timeout is detected.
|
|
|
|
*/
|
|
|
|
ic->rex = tick_add_ifset(now_ms, ic->rto);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* in case of special condition (error, shutdown, end of write...), we
|
|
|
|
* have to notify the task.
|
|
|
|
*/
|
|
|
|
if (likely((oc->flags & (CF_WRITE_NULL|CF_WRITE_ERROR|CF_SHUTW)) ||
|
|
|
|
((oc->flags & CF_WAKE_WRITE) &&
|
|
|
|
((channel_is_empty(oc) && !oc->to_forward) ||
|
2022-05-17 17:47:17 +00:00
|
|
|
!cs_state_in(cs->state, SC_SB_EST))))) {
|
2022-04-01 12:04:29 +00:00
|
|
|
out_wakeup:
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!(cs->flags & SC_FL_DONT_WAKE))
|
2022-05-18 14:10:52 +00:00
|
|
|
task_wakeup(sc_strm_task(cs), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-read on a stream connector attached to an
|
2022-04-01 12:04:29 +00:00
|
|
|
* applet in a connected or init state (it does nothing for other states). It
|
|
|
|
* either shuts the read side or marks itself as closed. The buffer flags are
|
2022-05-17 17:44:42 +00:00
|
|
|
* updated to reflect the new state. If the stream connector has SC_FL_NOHALF,
|
2022-04-01 12:04:29 +00:00
|
|
|
* we also forward the close to the write side. The owner task is woken up if
|
|
|
|
* it exists.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutr_applet(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-18 15:58:02 +00:00
|
|
|
BUG_ON(!sc_appctx(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (ic->flags & CF_SHUTR)
|
|
|
|
return;
|
|
|
|
ic->flags |= CF_SHUTR;
|
|
|
|
ic->rex = TICK_ETERNITY;
|
|
|
|
|
|
|
|
/* Note: on shutr, we don't call the applet */
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (!cs_state_in(cs->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-18 13:55:18 +00:00
|
|
|
if (sc_oc(cs)->flags & CF_SHUTW) {
|
2022-05-18 15:58:02 +00:00
|
|
|
appctx_shut(__sc_appctx(cs));
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_DIS;
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
2022-05-17 17:44:42 +00:00
|
|
|
else if (cs->flags & SC_FL_NOHALF) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we want to immediately forward this close to the write side */
|
2022-05-17 16:28:19 +00:00
|
|
|
return sc_app_shutw_applet(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-write on a stream connector attached to an
|
2022-04-01 12:04:29 +00:00
|
|
|
* applet in a connected or init state (it does nothing for other states). It
|
|
|
|
* either shuts the write side or marks itself as closed. The buffer flags are
|
|
|
|
* updated to reflect the new state. It does also close everything if the SI
|
|
|
|
* was marked as being in error state. The owner task is woken up if it exists.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_shutw_applet(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-18 15:58:02 +00:00
|
|
|
BUG_ON(!sc_appctx(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
oc->flags &= ~CF_SHUTW_NOW;
|
|
|
|
if (oc->flags & CF_SHUTW)
|
|
|
|
return;
|
|
|
|
oc->flags |= CF_SHUTW;
|
|
|
|
oc->wex = TICK_ETERNITY;
|
|
|
|
|
|
|
|
if (tick_isset(cs->hcto)) {
|
|
|
|
ic->rto = cs->hcto;
|
|
|
|
ic->rex = tick_add(now_ms, ic->rto);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* on shutw we always wake the applet up */
|
2022-05-18 15:58:02 +00:00
|
|
|
appctx_wakeup(__sc_appctx(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
switch (cs->state) {
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_RDY:
|
|
|
|
case SC_ST_EST:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we have to shut before closing, otherwise some short messages
|
|
|
|
* may never leave the system, especially when there are remaining
|
|
|
|
* unread data in the socket input buffer, or when nolinger is set.
|
2022-05-17 17:44:42 +00:00
|
|
|
* However, if SC_FL_NOLINGER is explicitly set, we know there is
|
2022-04-01 12:04:29 +00:00
|
|
|
* no risk so we close both sides immediately.
|
|
|
|
*/
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!sc_ep_test(cs, SE_FL_ERROR) && !(cs->flags & SC_FL_NOLINGER) &&
|
2022-04-01 12:04:29 +00:00
|
|
|
!(ic->flags & (CF_SHUTR|CF_DONT_READ)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* fall through */
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CON:
|
|
|
|
case SC_ST_CER:
|
|
|
|
case SC_ST_QUE:
|
|
|
|
case SC_ST_TAR:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* Note that none of these states may happen with applets */
|
2022-05-18 15:58:02 +00:00
|
|
|
appctx_shut(__sc_appctx(cs));
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_DIS;
|
2022-04-01 12:04:29 +00:00
|
|
|
/* fall through */
|
|
|
|
default:
|
2022-05-17 17:44:42 +00:00
|
|
|
cs->flags &= ~SC_FL_NOLINGER;
|
2022-04-01 12:04:29 +00:00
|
|
|
ic->flags |= CF_SHUTR;
|
|
|
|
ic->rex = TICK_ETERNITY;
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* chk_rcv function for applets */
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_chk_rcv_applet(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-18 15:58:02 +00:00
|
|
|
BUG_ON(!sc_appctx(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
DPRINTF(stderr, "%s: cs=%p, cs->state=%d ic->flags=%08x oc->flags=%08x\n",
|
|
|
|
__FUNCTION__,
|
2022-05-18 13:55:18 +00:00
|
|
|
cs, cs->state, ic->flags, sc_oc(cs)->flags);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (!ic->pipe) {
|
|
|
|
/* (re)start reading */
|
2022-05-18 15:58:02 +00:00
|
|
|
appctx_wakeup(__sc_appctx(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* chk_snd function for applets */
|
2022-05-17 17:07:51 +00:00
|
|
|
static void sc_app_chk_snd_applet(struct stconn *cs)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-18 15:58:02 +00:00
|
|
|
BUG_ON(!sc_appctx(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
DPRINTF(stderr, "%s: cs=%p, cs->state=%d ic->flags=%08x oc->flags=%08x\n",
|
|
|
|
__FUNCTION__,
|
2022-05-18 13:55:18 +00:00
|
|
|
cs, cs->state, sc_ic(cs)->flags, oc->flags);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (unlikely(cs->state != SC_ST_EST || (oc->flags & CF_SHUTW)))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* we only wake the applet up if it was waiting for some data */
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (!sc_ep_test(cs, SE_FL_WAIT_DATA))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!tick_isset(oc->wex))
|
|
|
|
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
|
|
|
|
|
|
|
if (!channel_is_empty(oc)) {
|
|
|
|
/* (re)start sending */
|
2022-05-18 15:58:02 +00:00
|
|
|
appctx_wakeup(__sc_appctx(cs));
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-01 12:23:38 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* This function is designed to be called from within the stream handler to
|
2022-05-17 17:07:51 +00:00
|
|
|
* update the input channel's expiration timer and the stream connector's
|
2022-04-01 12:23:38 +00:00
|
|
|
* Rx flags based on the channel's flags. It needs to be called only once
|
|
|
|
* after the channel's flags have settled down, and before they are cleared,
|
|
|
|
* though it doesn't harm to call it as often as desired (it just slightly
|
|
|
|
* hurts performance). It must not be called from outside of the stream
|
|
|
|
* handler, as what it does will be used to compute the stream task's
|
|
|
|
* expiration.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
void cs_update_rx(struct stconn *cs)
|
2022-04-01 12:23:38 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:23:38 +00:00
|
|
|
|
2022-05-24 14:22:24 +00:00
|
|
|
if (ic->flags & CF_SHUTR)
|
2022-04-01 12:23:38 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Read not closed, update FD status and timeout for reads */
|
|
|
|
if (ic->flags & CF_DONT_READ)
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_wont_read(cs);
|
2022-04-01 12:23:38 +00:00
|
|
|
else
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_will_read(cs);
|
2022-04-01 12:23:38 +00:00
|
|
|
|
|
|
|
if (!channel_is_empty(ic) || !channel_may_recv(ic)) {
|
|
|
|
/* stop reading, imposed by channel's policy or contents */
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_need_room(cs);
|
2022-04-01 12:23:38 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* (re)start reading and update timeout. Note: we don't recompute the timeout
|
|
|
|
* every time we get here, otherwise it would risk never to expire. We only
|
|
|
|
* update it if is was not yet set. The stream socket handler will already
|
|
|
|
* have updated it if there has been a completed I/O.
|
|
|
|
*/
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_have_room(cs);
|
2022-04-01 12:23:38 +00:00
|
|
|
}
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_RXBLK_ANY))
|
2022-04-01 12:23:38 +00:00
|
|
|
ic->rex = TICK_ETERNITY;
|
|
|
|
else if (!(ic->flags & CF_READ_NOEXP) && !tick_isset(ic->rex))
|
|
|
|
ic->rex = tick_add_ifset(now_ms, ic->rto);
|
|
|
|
|
|
|
|
cs_chk_rcv(cs);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is designed to be called from within the stream handler to
|
2022-05-17 17:07:51 +00:00
|
|
|
* update the output channel's expiration timer and the stream connector's
|
2022-04-01 12:23:38 +00:00
|
|
|
* Tx flags based on the channel's flags. It needs to be called only once
|
|
|
|
* after the channel's flags have settled down, and before they are cleared,
|
|
|
|
* though it doesn't harm to call it as often as desired (it just slightly
|
|
|
|
* hurts performance). It must not be called from outside of the stream
|
|
|
|
* handler, as what it does will be used to compute the stream task's
|
|
|
|
* expiration.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
void cs_update_tx(struct stconn *cs)
|
2022-04-01 12:23:38 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *oc = sc_oc(cs);
|
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-01 12:23:38 +00:00
|
|
|
|
|
|
|
if (oc->flags & CF_SHUTW)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Write not closed, update FD status and timeout for writes */
|
|
|
|
if (channel_is_empty(oc)) {
|
|
|
|
/* stop writing */
|
2022-05-17 15:04:55 +00:00
|
|
|
if (!sc_ep_test(cs, SE_FL_WAIT_DATA)) {
|
2022-04-01 12:23:38 +00:00
|
|
|
if ((oc->flags & CF_SHUTW_NOW) == 0)
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_WAIT_DATA);
|
2022-04-01 12:23:38 +00:00
|
|
|
oc->wex = TICK_ETERNITY;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* (re)start writing and update timeout. Note: we don't recompute the timeout
|
|
|
|
* every time we get here, otherwise it would risk never to expire. We only
|
|
|
|
* update it if is was not yet set. The stream socket handler will already
|
|
|
|
* have updated it if there has been a completed I/O.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, SE_FL_WAIT_DATA);
|
2022-04-01 12:23:38 +00:00
|
|
|
if (!tick_isset(oc->wex)) {
|
|
|
|
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
2022-05-17 17:44:42 +00:00
|
|
|
if (tick_isset(ic->rex) && !(cs->flags & SC_FL_INDEP_STR)) {
|
2022-04-01 12:23:38 +00:00
|
|
|
/* Note: depending on the protocol, we don't know if we're waiting
|
|
|
|
* for incoming data or not. So in order to prevent the socket from
|
|
|
|
* expiring read timeouts during writes, we refresh the read timeout,
|
|
|
|
* except if it was already infinite or if we have explicitly setup
|
|
|
|
* independent streams.
|
|
|
|
*/
|
|
|
|
ic->rex = tick_add_ifset(now_ms, ic->rto);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* This function is the equivalent to cs_update() except that it's
|
|
|
|
* designed to be called from outside the stream handlers, typically the lower
|
|
|
|
* layers (applets, connections) after I/O completion. After updating the stream
|
|
|
|
* interface and timeouts, it will try to forward what can be forwarded, then to
|
|
|
|
* wake the associated task up if an important event requires special handling.
|
2022-05-17 15:04:55 +00:00
|
|
|
* It may update SE_FL_WAIT_DATA and/or SE_FL_RXBLK_ROOM, that the callers are
|
2022-04-04 06:58:34 +00:00
|
|
|
* encouraged to watch to take appropriate action.
|
|
|
|
* It should not be called from within the stream itself, cs_update()
|
|
|
|
* is designed for this.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static void cs_notify(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cso = cs_opposite(cs);
|
2022-05-18 14:10:52 +00:00
|
|
|
struct task *task = sc_strm_task(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* process consumer side */
|
|
|
|
if (channel_is_empty(oc)) {
|
2022-05-18 14:23:22 +00:00
|
|
|
struct connection *conn = sc_conn(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
if (((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW) &&
|
2022-05-17 17:47:17 +00:00
|
|
|
(cs->state == SC_ST_EST) && (!conn || !(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS))))
|
2022-04-04 06:58:34 +00:00
|
|
|
cs_shutw(cs);
|
|
|
|
oc->wex = TICK_ETERNITY;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* indicate that we may be waiting for data from the output channel or
|
|
|
|
* we're about to close and can't expect more data if SHUTW_NOW is there.
|
|
|
|
*/
|
|
|
|
if (!(oc->flags & (CF_SHUTW|CF_SHUTW_NOW)))
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_WAIT_DATA);
|
2022-04-04 06:58:34 +00:00
|
|
|
else if ((oc->flags & (CF_SHUTW|CF_SHUTW_NOW)) == CF_SHUTW_NOW)
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, SE_FL_WAIT_DATA);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* update OC timeouts and wake the other side up if it's waiting for room */
|
|
|
|
if (oc->flags & CF_WRITE_ACTIVITY) {
|
|
|
|
if ((oc->flags & (CF_SHUTW|CF_WRITE_PARTIAL)) == CF_WRITE_PARTIAL &&
|
|
|
|
!channel_is_empty(oc))
|
|
|
|
if (tick_isset(oc->wex))
|
|
|
|
oc->wex = tick_add_ifset(now_ms, oc->wto);
|
|
|
|
|
2022-05-17 17:44:42 +00:00
|
|
|
if (!(cs->flags & SC_FL_INDEP_STR))
|
2022-04-04 06:58:34 +00:00
|
|
|
if (tick_isset(ic->rex))
|
|
|
|
ic->rex = tick_add_ifset(now_ms, ic->rto);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oc->flags & CF_DONT_READ)
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_wont_read(cso);
|
2022-04-04 06:58:34 +00:00
|
|
|
else
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_will_read(cso);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* Notify the other side when we've injected data into the IC that
|
|
|
|
* needs to be forwarded. We can do fast-forwarding as soon as there
|
|
|
|
* are output data, but we avoid doing this if some of the data are
|
|
|
|
* not yet scheduled for being forwarded, because it is very likely
|
|
|
|
* that it will be done again immediately afterwards once the following
|
2022-05-17 15:04:55 +00:00
|
|
|
* data are parsed (eg: HTTP chunking). We only SE_FL_RXBLK_ROOM once
|
2022-04-04 06:58:34 +00:00
|
|
|
* we've emptied *some* of the output buffer, and not just when there
|
|
|
|
* is available room, because applets are often forced to stop before
|
|
|
|
* the buffer is full. We must not stop based on input data alone because
|
|
|
|
* an HTTP parser might need more data to complete the parsing.
|
|
|
|
*/
|
|
|
|
if (!channel_is_empty(ic) &&
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_test(cso, SE_FL_WAIT_DATA) &&
|
2022-04-04 06:58:34 +00:00
|
|
|
(!(ic->flags & CF_EXPECT_MORE) || c_full(ic) || ci_data(ic) == 0 || ic->pipe)) {
|
|
|
|
int new_len, last_len;
|
|
|
|
|
|
|
|
last_len = co_data(ic);
|
|
|
|
if (ic->pipe)
|
|
|
|
last_len += ic->pipe->data;
|
|
|
|
|
|
|
|
cs_chk_snd(cso);
|
|
|
|
|
|
|
|
new_len = co_data(ic);
|
|
|
|
if (ic->pipe)
|
|
|
|
new_len += ic->pipe->data;
|
|
|
|
|
|
|
|
/* check if the consumer has freed some space either in the
|
|
|
|
* buffer or in the pipe.
|
|
|
|
*/
|
|
|
|
if (new_len < last_len)
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_have_room(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(ic->flags & CF_DONT_READ))
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_will_read(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
cs_chk_rcv(cs);
|
|
|
|
cs_chk_rcv(cso);
|
|
|
|
|
2022-05-24 14:56:55 +00:00
|
|
|
if (ic->flags & CF_SHUTR || sc_ep_test(cs, SE_FL_APPLET_NEED_CONN) || cs_rx_blocked(cs)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
ic->rex = TICK_ETERNITY;
|
|
|
|
}
|
|
|
|
else if ((ic->flags & (CF_SHUTR|CF_READ_PARTIAL)) == CF_READ_PARTIAL) {
|
|
|
|
/* we must re-enable reading if cs_chk_snd() has freed some space */
|
|
|
|
if (!(ic->flags & CF_READ_NOEXP) && tick_isset(ic->rex))
|
|
|
|
ic->rex = tick_add_ifset(now_ms, ic->rto);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* wake the task up only when needed */
|
|
|
|
if (/* changes on the production side */
|
|
|
|
(ic->flags & (CF_READ_NULL|CF_READ_ERROR)) ||
|
2022-05-17 17:47:17 +00:00
|
|
|
!cs_state_in(cs->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST) ||
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_test(cs, SE_FL_ERROR) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
((ic->flags & CF_READ_PARTIAL) &&
|
2022-05-17 17:47:17 +00:00
|
|
|
((ic->flags & CF_EOI) || !ic->to_forward || cso->state != SC_ST_EST)) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* changes on the consumption side */
|
|
|
|
(oc->flags & (CF_WRITE_NULL|CF_WRITE_ERROR)) ||
|
|
|
|
((oc->flags & CF_WRITE_ACTIVITY) &&
|
|
|
|
((oc->flags & CF_SHUTW) ||
|
|
|
|
(((oc->flags & CF_WAKE_WRITE) ||
|
|
|
|
!(oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW|CF_SHUTW))) &&
|
2022-05-17 17:47:17 +00:00
|
|
|
(cso->state != SC_ST_EST ||
|
2022-04-04 06:58:34 +00:00
|
|
|
(channel_is_empty(oc) && !oc->to_forward)))))) {
|
|
|
|
task_wakeup(task, TASK_WOKEN_IO);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Update expiration date for the task and requeue it */
|
|
|
|
task->expire = tick_first((tick_is_expired(task->expire, now_ms) ? 0 : task->expire),
|
|
|
|
tick_first(tick_first(ic->rex, ic->wex),
|
|
|
|
tick_first(oc->rex, oc->wex)));
|
|
|
|
|
|
|
|
task->expire = tick_first(task->expire, ic->analyse_exp);
|
|
|
|
task->expire = tick_first(task->expire, oc->analyse_exp);
|
2022-05-18 14:10:52 +00:00
|
|
|
task->expire = tick_first(task->expire, __sc_strm(cs)->conn_exp);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
task_queue(task);
|
|
|
|
}
|
|
|
|
if (ic->flags & CF_READ_ACTIVITY)
|
|
|
|
ic->flags &= ~CF_READ_DONTWAIT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function propagates a null read received on a socket-based connection.
|
2022-05-17 17:44:42 +00:00
|
|
|
* It updates the stream connector. If the stream connector has SC_FL_NOHALF,
|
2022-04-04 06:58:34 +00:00
|
|
|
* the close is also forwarded to the write side as an abort.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
static void sc_conn_read0(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-18 14:23:22 +00:00
|
|
|
BUG_ON(!sc_conn(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
if (ic->flags & CF_SHUTR)
|
|
|
|
return;
|
|
|
|
ic->flags |= CF_SHUTR;
|
|
|
|
ic->rex = TICK_ETERNITY;
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (!cs_state_in(cs->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (oc->flags & CF_SHUTW)
|
|
|
|
goto do_close;
|
|
|
|
|
2022-05-17 17:44:42 +00:00
|
|
|
if (cs->flags & SC_FL_NOHALF) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* we want to immediately forward this close to the write side */
|
|
|
|
/* force flag on ssl to keep stream in cache */
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_shutw(cs, CO_SHW_SILENT);
|
2022-04-04 06:58:34 +00:00
|
|
|
goto do_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* otherwise that's just a normal read shutdown */
|
|
|
|
return;
|
|
|
|
|
|
|
|
do_close:
|
|
|
|
/* OK we completely close the socket here just as if we went through cs_shut[rw]() */
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_shut(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
oc->flags &= ~CF_SHUTW_NOW;
|
|
|
|
oc->flags |= CF_SHUTW;
|
|
|
|
oc->wex = TICK_ETERNITY;
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
cs->state = SC_ST_DIS;
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the callback which is called by the connection layer to receive data
|
|
|
|
* into the buffer from the connection. It iterates over the mux layer's
|
|
|
|
* rcv_buf function.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
static int sc_conn_recv(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-18 14:23:22 +00:00
|
|
|
struct connection *conn = __sc_conn(cs);
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
int ret, max, cur_read = 0;
|
|
|
|
int read_poll = MAX_READ_POLL_LOOPS;
|
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
/* If not established yet, do nothing. */
|
2022-05-17 17:47:17 +00:00
|
|
|
if (cs->state != SC_ST_EST)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-05-18 16:06:53 +00:00
|
|
|
/* If another call to sc_conn_recv() failed, and we subscribed to
|
2022-04-04 06:58:34 +00:00
|
|
|
* recv events already, give up now.
|
|
|
|
*/
|
|
|
|
if (cs->wait_event.events & SUB_RETRY_RECV)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* maybe we were called immediately after an asynchronous shutr */
|
|
|
|
if (ic->flags & CF_SHUTR)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* we must wait because the mux is not installed yet */
|
|
|
|
if (!conn->mux)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* stop here if we reached the end of data */
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_EOS))
|
2022-04-04 06:58:34 +00:00
|
|
|
goto end_recv;
|
|
|
|
|
|
|
|
/* stop immediately on errors. Note that we DON'T want to stop on
|
|
|
|
* POLL_ERR, as the poller might report a write error while there
|
|
|
|
* are still data available in the recv buffer. This typically
|
|
|
|
* happens when we send too large a request to a backend server
|
|
|
|
* which rejects it before reading it all.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
if (!sc_ep_test(cs, SE_FL_RCV_MORE)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
if (!conn_xprt_ready(conn))
|
|
|
|
return 0;
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_ERROR))
|
2022-04-04 06:58:34 +00:00
|
|
|
goto end_recv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* prepare to detect if the mux needs more room */
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_clr(cs, SE_FL_WANT_ROOM);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
|
|
|
|
global.tune.idle_timer &&
|
|
|
|
(unsigned short)(now_ms - ic->last_read) >= global.tune.idle_timer) {
|
|
|
|
/* The buffer was empty and nothing was transferred for more
|
|
|
|
* than one second. This was caused by a pause and not by
|
|
|
|
* congestion. Reset any streaming mode to reduce latency.
|
|
|
|
*/
|
|
|
|
ic->xfer_small = 0;
|
|
|
|
ic->xfer_large = 0;
|
|
|
|
ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First, let's see if we may splice data across the channel without
|
|
|
|
* using a buffer.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_MAY_SPLICE) &&
|
2022-04-04 06:58:34 +00:00
|
|
|
(ic->pipe || ic->to_forward >= MIN_SPLICE_FORWARD) &&
|
|
|
|
ic->flags & CF_KERN_SPLICING) {
|
|
|
|
if (c_data(ic)) {
|
|
|
|
/* We're embarrassed, there are already data pending in
|
|
|
|
* the buffer and we don't want to have them at two
|
|
|
|
* locations at a time. Let's indicate we need some
|
|
|
|
* place and ask the consumer to hurry.
|
|
|
|
*/
|
|
|
|
flags |= CO_RFL_BUF_FLUSH;
|
|
|
|
goto abort_splice;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(ic->pipe == NULL)) {
|
|
|
|
if (pipes_used >= global.maxpipes || !(ic->pipe = get_pipe())) {
|
|
|
|
ic->flags &= ~CF_KERN_SPLICING;
|
|
|
|
goto abort_splice;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = conn->mux->rcv_pipe(cs, ic->pipe, ic->to_forward);
|
|
|
|
if (ret < 0) {
|
|
|
|
/* splice not supported on this end, let's disable it */
|
|
|
|
ic->flags &= ~CF_KERN_SPLICING;
|
|
|
|
goto abort_splice;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret > 0) {
|
|
|
|
if (ic->to_forward != CHN_INFINITE_FORWARD)
|
|
|
|
ic->to_forward -= ret;
|
|
|
|
ic->total += ret;
|
|
|
|
cur_read += ret;
|
|
|
|
ic->flags |= CF_READ_PARTIAL;
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_EOS | SE_FL_ERROR))
|
2022-04-04 06:58:34 +00:00
|
|
|
goto end_recv;
|
|
|
|
|
|
|
|
if (conn->flags & CO_FL_WAIT_ROOM) {
|
|
|
|
/* the pipe is full or we have read enough data that it
|
|
|
|
* could soon be full. Let's stop before needing to poll.
|
|
|
|
*/
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_need_room(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
goto done_recv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* splice not possible (anymore), let's go on on standard copy */
|
|
|
|
}
|
|
|
|
|
|
|
|
abort_splice:
|
|
|
|
if (ic->pipe && unlikely(!ic->pipe->data)) {
|
|
|
|
put_pipe(ic->pipe);
|
|
|
|
ic->pipe = NULL;
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && sc_ep_test(cs, SE_FL_MAY_SPLICE)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* don't break splicing by reading, but still call rcv_buf()
|
|
|
|
* to pass the flag.
|
|
|
|
*/
|
|
|
|
goto done_recv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now we'll need a input buffer for the stream */
|
2022-05-18 14:10:52 +00:00
|
|
|
if (!cs_alloc_ibuf(cs, &(__sc_strm(cs)->buffer_wait)))
|
2022-04-04 06:58:34 +00:00
|
|
|
goto end_recv;
|
|
|
|
|
|
|
|
/* For an HTX stream, if the buffer is stuck (no output data with some
|
|
|
|
* input data) and if the HTX message is fragmented or if its free space
|
|
|
|
* wraps, we force an HTX deframentation. It is a way to have a
|
|
|
|
* contiguous free space nad to let the mux to copy as much data as
|
|
|
|
* possible.
|
|
|
|
*
|
|
|
|
* NOTE: A possible optim may be to let the mux decides if defrag is
|
|
|
|
* required or not, depending on amount of data to be xferred.
|
|
|
|
*/
|
2022-05-18 14:10:52 +00:00
|
|
|
if (IS_HTX_STRM(__sc_strm(cs)) && !co_data(ic)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
struct htx *htx = htxbuf(&ic->buf);
|
|
|
|
|
|
|
|
if (htx_is_not_empty(htx) && ((htx->flags & HTX_FL_FRAGMENTED) || htx_space_wraps(htx)))
|
|
|
|
htx_defrag(htx, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Instruct the mux it must subscribed for read events */
|
2022-05-18 14:10:52 +00:00
|
|
|
flags |= ((!conn_is_back(conn) && (__sc_strm(cs)->be->options & PR_O_ABRT_CLOSE)) ? CO_RFL_KEEP_RECV : 0);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
|
|
|
|
* was enabled, which implies that the recv buffer was not full. So we have a guarantee
|
|
|
|
* that if such an event is not handled above in splice, it will be handled here by
|
|
|
|
* recv().
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
while (sc_ep_test(cs, SE_FL_RCV_MORE) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
(!(conn->flags & CO_FL_HANDSHAKE) &&
|
2022-05-17 15:04:55 +00:00
|
|
|
(!sc_ep_test(cs, SE_FL_ERROR | SE_FL_EOS)) && !(ic->flags & CF_SHUTR))) {
|
2022-04-04 06:58:34 +00:00
|
|
|
int cur_flags = flags;
|
|
|
|
|
|
|
|
/* Compute transient CO_RFL_* flags */
|
|
|
|
if (co_data(ic)) {
|
|
|
|
cur_flags |= (CO_RFL_BUF_WET | CO_RFL_BUF_NOT_STUCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* <max> may be null. This is the mux responsibility to set
|
2022-05-17 15:04:55 +00:00
|
|
|
* SE_FL_RCV_MORE on the CS if more space is needed.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
|
|
|
max = channel_recv_max(ic);
|
|
|
|
ret = conn->mux->rcv_buf(cs, &ic->buf, max, cur_flags);
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_WANT_ROOM)) {
|
|
|
|
/* SE_FL_WANT_ROOM must not be reported if the channel's
|
2022-04-04 06:58:34 +00:00
|
|
|
* buffer is empty.
|
|
|
|
*/
|
|
|
|
BUG_ON(c_empty(ic));
|
|
|
|
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_need_room(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
/* Add READ_PARTIAL because some data are pending but
|
|
|
|
* cannot be xferred to the channel
|
|
|
|
*/
|
|
|
|
ic->flags |= CF_READ_PARTIAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret <= 0) {
|
|
|
|
/* if we refrained from reading because we asked for a
|
|
|
|
* flush to satisfy rcv_pipe(), we must not subscribe
|
|
|
|
* and instead report that there's not enough room
|
|
|
|
* here to proceed.
|
|
|
|
*/
|
|
|
|
if (flags & CO_RFL_BUF_FLUSH)
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_need_room(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_read += ret;
|
|
|
|
|
|
|
|
/* if we're allowed to directly forward data, we must update ->o */
|
|
|
|
if (ic->to_forward && !(ic->flags & (CF_SHUTW|CF_SHUTW_NOW))) {
|
|
|
|
unsigned long fwd = ret;
|
|
|
|
if (ic->to_forward != CHN_INFINITE_FORWARD) {
|
|
|
|
if (fwd > ic->to_forward)
|
|
|
|
fwd = ic->to_forward;
|
|
|
|
ic->to_forward -= fwd;
|
|
|
|
}
|
|
|
|
c_adv(ic, fwd);
|
|
|
|
}
|
|
|
|
|
|
|
|
ic->flags |= CF_READ_PARTIAL;
|
|
|
|
ic->total += ret;
|
|
|
|
|
|
|
|
/* End-of-input reached, we can leave. In this case, it is
|
2022-04-04 09:29:28 +00:00
|
|
|
* important to break the loop to not block the CS because of
|
2022-04-04 06:58:34 +00:00
|
|
|
* the channel's policies.This way, we are still able to receive
|
|
|
|
* shutdowns.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_EOI))
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
|
|
|
|
if ((ic->flags & CF_READ_DONTWAIT) || --read_poll <= 0) {
|
|
|
|
/* we're stopped by the channel's policy */
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_wont_read(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if too many bytes were missing from last read, it means that
|
|
|
|
* it's pointless trying to read again because the system does
|
|
|
|
* not have them in buffers.
|
|
|
|
*/
|
|
|
|
if (ret < max) {
|
|
|
|
/* if a streamer has read few data, it may be because we
|
|
|
|
* have exhausted system buffers. It's not worth trying
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (ic->flags & CF_STREAMER) {
|
|
|
|
/* we're stopped by the channel's policy */
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_wont_read(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if we read a large block smaller than what we requested,
|
|
|
|
* it's almost certain we'll never get anything more.
|
|
|
|
*/
|
|
|
|
if (ret >= global.tune.recv_enough) {
|
|
|
|
/* we're stopped by the channel's policy */
|
2022-05-25 05:35:53 +00:00
|
|
|
sc_wont_read(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if we are waiting for more space, don't try to read more data
|
|
|
|
* right now.
|
|
|
|
*/
|
|
|
|
if (cs_rx_blocked(cs))
|
|
|
|
break;
|
|
|
|
} /* while !flags */
|
|
|
|
|
|
|
|
done_recv:
|
|
|
|
if (cur_read) {
|
|
|
|
if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
|
|
|
|
(cur_read <= ic->buf.size / 2)) {
|
|
|
|
ic->xfer_large = 0;
|
|
|
|
ic->xfer_small++;
|
|
|
|
if (ic->xfer_small >= 3) {
|
|
|
|
/* we have read less than half of the buffer in
|
|
|
|
* one pass, and this happened at least 3 times.
|
|
|
|
* This is definitely not a streamer.
|
|
|
|
*/
|
|
|
|
ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
|
|
|
|
}
|
|
|
|
else if (ic->xfer_small >= 2) {
|
|
|
|
/* if the buffer has been at least half full twice,
|
|
|
|
* we receive faster than we send, so at least it
|
|
|
|
* is not a "fast streamer".
|
|
|
|
*/
|
|
|
|
ic->flags &= ~CF_STREAMER_FAST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!(ic->flags & CF_STREAMER_FAST) &&
|
|
|
|
(cur_read >= ic->buf.size - global.tune.maxrewrite)) {
|
|
|
|
/* we read a full buffer at once */
|
|
|
|
ic->xfer_small = 0;
|
|
|
|
ic->xfer_large++;
|
|
|
|
if (ic->xfer_large >= 3) {
|
|
|
|
/* we call this buffer a fast streamer if it manages
|
|
|
|
* to be filled in one call 3 consecutive times.
|
|
|
|
*/
|
|
|
|
ic->flags |= (CF_STREAMER | CF_STREAMER_FAST);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ic->xfer_small = 0;
|
|
|
|
ic->xfer_large = 0;
|
|
|
|
}
|
|
|
|
ic->last_read = now_ms;
|
|
|
|
}
|
|
|
|
|
|
|
|
end_recv:
|
|
|
|
ret = (cur_read != 0);
|
|
|
|
|
|
|
|
/* Report EOI on the channel if it was reached from the mux point of
|
|
|
|
* view. */
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_EOI) && !(ic->flags & CF_EOI)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
ic->flags |= (CF_EOI|CF_READ_PARTIAL);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_ERROR))
|
2022-04-04 06:58:34 +00:00
|
|
|
ret = 1;
|
2022-05-17 15:04:55 +00:00
|
|
|
else if (sc_ep_test(cs, SE_FL_EOS)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* we received a shutdown */
|
|
|
|
ic->flags |= CF_READ_NULL;
|
|
|
|
if (ic->flags & CF_AUTO_CLOSE)
|
|
|
|
channel_shutw_now(ic);
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_read0(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
ret = 1;
|
|
|
|
}
|
2022-05-24 14:18:11 +00:00
|
|
|
else if (!cs_rx_blocked(cs) && !(ic->flags & CF_SHUTR)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* Subscribe to receive events if we're blocking on I/O */
|
|
|
|
conn->mux->subscribe(cs, SUB_RETRY_RECV, &cs->wait_event);
|
2022-05-25 13:42:03 +00:00
|
|
|
se_have_no_more_data(cs->sedesc);
|
2022-04-04 06:58:34 +00:00
|
|
|
} else {
|
2022-05-25 13:42:03 +00:00
|
|
|
se_have_more_data(cs->sedesc);
|
2022-04-04 06:58:34 +00:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* This tries to perform a synchronous receive on the stream connector to
|
2022-04-04 06:58:34 +00:00
|
|
|
* try to collect last arrived data. In practice it's only implemented on
|
2022-05-17 17:07:51 +00:00
|
|
|
* stconns. Returns 0 if nothing was done, non-zero if new data or a
|
2022-04-04 06:58:34 +00:00
|
|
|
* shutdown were collected. This may result on some delayed receive calls
|
|
|
|
* to be programmed and performed later, though it doesn't provide any
|
|
|
|
* such guarantee.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
int sc_conn_sync_recv(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-17 17:47:17 +00:00
|
|
|
if (!cs_state_in(cs->state, SC_SB_RDY|SC_SB_EST))
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-05-18 15:51:19 +00:00
|
|
|
if (!sc_mux_ops(cs))
|
2022-05-17 17:07:51 +00:00
|
|
|
return 0; // only stconns are supported
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
if (cs->wait_event.events & SUB_RETRY_RECV)
|
|
|
|
return 0; // already subscribed
|
|
|
|
|
2022-05-25 13:00:44 +00:00
|
|
|
if (!sc_is_recv_allowed(cs))
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0; // already failed
|
|
|
|
|
2022-05-18 16:06:53 +00:00
|
|
|
return sc_conn_recv(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is called to send buffer data to a stream socket.
|
|
|
|
* It calls the mux layer's snd_buf function. It relies on the
|
|
|
|
* caller to commit polling changes. The caller should check conn->flags
|
|
|
|
* for errors.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
static int sc_conn_send(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-18 14:23:22 +00:00
|
|
|
struct connection *conn = __sc_conn(cs);
|
2022-05-18 14:10:52 +00:00
|
|
|
struct stream *s = __sc_strm(cs);
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
int ret;
|
|
|
|
int did_send = 0;
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_ERROR | SE_FL_ERR_PENDING) || cs_is_conn_error(cs)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* We're probably there because the tasklet was woken up,
|
|
|
|
* but process_stream() ran before, detected there were an
|
2022-05-17 17:47:17 +00:00
|
|
|
* error and put the CS back to SC_ST_TAR. There's still
|
2022-04-04 06:58:34 +00:00
|
|
|
* CO_FL_ERROR on the connection but we don't want to add
|
2022-05-17 15:04:55 +00:00
|
|
|
* SE_FL_ERROR back, so give up
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
2022-05-17 17:47:17 +00:00
|
|
|
if (cs->state < SC_ST_CON)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're already waiting to be able to send, give up */
|
|
|
|
if (cs->wait_event.events & SUB_RETRY_SEND)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* we might have been called just after an asynchronous shutw */
|
|
|
|
if (oc->flags & CF_SHUTW)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* we must wait because the mux is not installed yet */
|
|
|
|
if (!conn->mux)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (oc->pipe && conn->xprt->snd_pipe && conn->mux->snd_pipe) {
|
|
|
|
ret = conn->mux->snd_pipe(cs, oc->pipe);
|
|
|
|
if (ret > 0)
|
|
|
|
did_send = 1;
|
|
|
|
|
|
|
|
if (!oc->pipe->data) {
|
|
|
|
put_pipe(oc->pipe);
|
|
|
|
oc->pipe = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oc->pipe)
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* At this point, the pipe is empty, but we may still have data pending
|
|
|
|
* in the normal buffer.
|
|
|
|
*/
|
|
|
|
if (co_data(oc)) {
|
|
|
|
/* when we're here, we already know that there is no spliced
|
|
|
|
* data left, and that there are sendable buffered data.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* check if we want to inform the kernel that we're interested in
|
|
|
|
* sending more data after this call. We want this if :
|
|
|
|
* - we're about to close after this last send and want to merge
|
|
|
|
* the ongoing FIN with the last segment.
|
|
|
|
* - we know we can't send everything at once and must get back
|
|
|
|
* here because of unaligned data
|
|
|
|
* - there is still a finite amount of data to forward
|
|
|
|
* The test is arranged so that the most common case does only 2
|
|
|
|
* tests.
|
|
|
|
*/
|
|
|
|
unsigned int send_flag = 0;
|
|
|
|
|
|
|
|
if ((!(oc->flags & (CF_NEVER_WAIT|CF_SEND_DONTWAIT)) &&
|
|
|
|
((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
|
|
|
|
(oc->flags & CF_EXPECT_MORE) ||
|
|
|
|
(IS_HTX_STRM(s) &&
|
|
|
|
(!(oc->flags & (CF_EOI|CF_SHUTR)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
|
|
|
|
((oc->flags & CF_ISRESP) &&
|
|
|
|
((oc->flags & (CF_AUTO_CLOSE|CF_SHUTW_NOW)) == (CF_AUTO_CLOSE|CF_SHUTW_NOW))))
|
|
|
|
send_flag |= CO_SFL_MSG_MORE;
|
|
|
|
|
|
|
|
if (oc->flags & CF_STREAMER)
|
|
|
|
send_flag |= CO_SFL_STREAMER;
|
|
|
|
|
|
|
|
if (s->txn && s->txn->flags & TX_L7_RETRY && !b_data(&s->txn->l7_buffer)) {
|
|
|
|
/* If we want to be able to do L7 retries, copy
|
|
|
|
* the data we're about to send, so that we are able
|
|
|
|
* to resend them if needed
|
|
|
|
*/
|
|
|
|
/* Try to allocate a buffer if we had none.
|
|
|
|
* If it fails, the next test will just
|
|
|
|
* disable the l7 retries by setting
|
|
|
|
* l7_conn_retries to 0.
|
|
|
|
*/
|
|
|
|
if (s->txn->req.msg_state != HTTP_MSG_DONE)
|
|
|
|
s->txn->flags &= ~TX_L7_RETRY;
|
|
|
|
else {
|
|
|
|
if (b_alloc(&s->txn->l7_buffer) == NULL)
|
|
|
|
s->txn->flags &= ~TX_L7_RETRY;
|
|
|
|
else {
|
|
|
|
memcpy(b_orig(&s->txn->l7_buffer),
|
|
|
|
b_orig(&oc->buf),
|
|
|
|
b_size(&oc->buf));
|
|
|
|
s->txn->l7_buffer.head = co_data(oc);
|
|
|
|
b_add(&s->txn->l7_buffer, co_data(oc));
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = conn->mux->snd_buf(cs, &oc->buf, co_data(oc), send_flag);
|
|
|
|
if (ret > 0) {
|
|
|
|
did_send = 1;
|
|
|
|
c_rew(oc, ret);
|
|
|
|
c_realign_if_empty(oc);
|
|
|
|
|
|
|
|
if (!co_data(oc)) {
|
|
|
|
/* Always clear both flags once everything has been sent, they're one-shot */
|
|
|
|
oc->flags &= ~(CF_EXPECT_MORE | CF_SEND_DONTWAIT);
|
|
|
|
}
|
|
|
|
/* if some data remain in the buffer, it's only because the
|
|
|
|
* system buffers are full, we will try next time.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
|
|
|
if (did_send) {
|
|
|
|
oc->flags |= CF_WRITE_PARTIAL | CF_WROTE_DATA;
|
2022-05-17 17:47:17 +00:00
|
|
|
if (cs->state == SC_ST_CON)
|
|
|
|
cs->state = SC_ST_RDY;
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-25 05:29:36 +00:00
|
|
|
sc_have_room(cs_opposite(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_ERROR | SE_FL_ERR_PENDING)) {
|
|
|
|
sc_ep_set(cs, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We couldn't send all of our data, let the mux know we'd like to send more */
|
|
|
|
if (!channel_is_empty(oc))
|
|
|
|
conn->mux->subscribe(cs, SUB_RETRY_SEND, &cs->wait_event);
|
|
|
|
return did_send;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* perform a synchronous send() for the stream connector. The CF_WRITE_NULL and
|
2022-04-04 06:58:34 +00:00
|
|
|
* CF_WRITE_PARTIAL flags are cleared prior to the attempt, and will possibly
|
|
|
|
* be updated in case of success.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
void sc_conn_sync_send(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
oc->flags &= ~(CF_WRITE_NULL|CF_WRITE_PARTIAL);
|
|
|
|
|
|
|
|
if (oc->flags & CF_SHUTW)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (channel_is_empty(oc))
|
|
|
|
return;
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (!cs_state_in(cs->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-18 15:51:19 +00:00
|
|
|
if (!sc_mux_ops(cs))
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_send(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called by I/O handlers after completion.. It propagates
|
2022-05-17 17:07:51 +00:00
|
|
|
* connection flags to the stream connector, updates the stream (which may or
|
2022-04-04 06:58:34 +00:00
|
|
|
* may not take this opportunity to try to forward data), then update the
|
2022-05-17 17:07:51 +00:00
|
|
|
* connection's polling based on the channels and stream connector's final
|
2022-04-04 06:58:34 +00:00
|
|
|
* states. The function always returns 0.
|
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
static int sc_conn_process(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-18 14:23:22 +00:00
|
|
|
struct connection *conn = __sc_conn(cs);
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
|
|
|
struct channel *oc = sc_oc(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
BUG_ON(!conn);
|
|
|
|
|
|
|
|
/* If we have data to send, try it now */
|
|
|
|
if (!channel_is_empty(oc) && !(cs->wait_event.events & SUB_RETRY_SEND))
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_send(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* First step, report to the stream connector what was detected at the
|
2022-04-04 06:58:34 +00:00
|
|
|
* connection layer : errors and connection establishment.
|
2022-05-17 15:04:55 +00:00
|
|
|
* Only add SE_FL_ERROR if we're connected, or we're attempting to
|
2022-04-04 06:58:34 +00:00
|
|
|
* connect, we may get there because we got woken up, but only run
|
|
|
|
* after process_stream() noticed there were an error, and decided
|
|
|
|
* to retry to connect, the connection may still have CO_FL_ERROR,
|
2022-05-17 15:04:55 +00:00
|
|
|
* and we don't want to add SE_FL_ERROR back
|
2022-04-04 06:58:34 +00:00
|
|
|
*
|
2022-05-18 16:06:53 +00:00
|
|
|
* Note: This test is only required because sc_conn_process is also the SI
|
|
|
|
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
|
2022-04-04 06:58:34 +00:00
|
|
|
* care of it.
|
|
|
|
*/
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (cs->state >= SC_ST_CON) {
|
2022-04-04 06:58:34 +00:00
|
|
|
if (cs_is_conn_error(cs))
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If we had early data, and the handshake ended, then
|
|
|
|
* we can remove the flag, and attempt to wake the task up,
|
|
|
|
* in the event there's an analyser waiting for the end of
|
|
|
|
* the handshake.
|
|
|
|
*/
|
|
|
|
if (!(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) &&
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_test(cs, SE_FL_WAIT_FOR_HS)) {
|
|
|
|
sc_ep_clr(cs, SE_FL_WAIT_FOR_HS);
|
2022-05-18 14:10:52 +00:00
|
|
|
task_wakeup(sc_strm_task(cs), TASK_WOKEN_MSG);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:47:17 +00:00
|
|
|
if (!cs_state_in(cs->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
|
2022-04-04 06:58:34 +00:00
|
|
|
(conn->flags & CO_FL_WAIT_XPRT) == 0) {
|
2022-05-18 14:10:52 +00:00
|
|
|
__sc_strm(cs)->conn_exp = TICK_ETERNITY;
|
2022-04-04 06:58:34 +00:00
|
|
|
oc->flags |= CF_WRITE_NULL;
|
2022-05-17 17:47:17 +00:00
|
|
|
if (cs->state == SC_ST_CON)
|
|
|
|
cs->state = SC_ST_RDY;
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Report EOS on the channel if it was reached from the mux point of
|
|
|
|
* view.
|
|
|
|
*
|
2022-05-18 16:06:53 +00:00
|
|
|
* Note: This test is only required because sc_conn_process is also the SI
|
|
|
|
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
|
2022-04-04 06:58:34 +00:00
|
|
|
* care of it.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_EOS) && !(ic->flags & CF_SHUTR)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* we received a shutdown */
|
|
|
|
ic->flags |= CF_READ_NULL;
|
|
|
|
if (ic->flags & CF_AUTO_CLOSE)
|
|
|
|
channel_shutw_now(ic);
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_read0(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Report EOI on the channel if it was reached from the mux point of
|
|
|
|
* view.
|
|
|
|
*
|
2022-05-18 16:06:53 +00:00
|
|
|
* Note: This test is only required because sc_conn_process is also the SI
|
|
|
|
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
|
2022-04-04 06:58:34 +00:00
|
|
|
* care of it.
|
|
|
|
*/
|
2022-05-17 15:04:55 +00:00
|
|
|
if (sc_ep_test(cs, SE_FL_EOI) && !(ic->flags & CF_EOI))
|
2022-04-04 06:58:34 +00:00
|
|
|
ic->flags |= (CF_EOI|CF_READ_PARTIAL);
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Second step : update the stream connector and channels, try to forward any
|
2022-04-04 06:58:34 +00:00
|
|
|
* pending data, then possibly wake the stream up based on the new
|
2022-05-17 17:07:51 +00:00
|
|
|
* stream connector status.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
|
|
|
cs_notify(cs);
|
2022-05-18 14:10:52 +00:00
|
|
|
stream_release_buffers(__sc_strm(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* This is the ->process() function for any stream connector's wait_event task.
|
|
|
|
* It's assigned during the stream connector's initialization, for any type of
|
|
|
|
* stream connector. Thus it is always safe to perform a tasklet_wakeup() on a
|
|
|
|
* stream connector, as the presence of the CS is checked there.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-17 17:07:51 +00:00
|
|
|
struct stconn *cs = ctx;
|
2022-04-04 06:58:34 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2022-05-18 14:23:22 +00:00
|
|
|
if (!sc_conn(cs))
|
2022-04-04 06:58:34 +00:00
|
|
|
return t;
|
|
|
|
|
2022-05-18 13:55:18 +00:00
|
|
|
if (!(cs->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(cs)))
|
2022-05-18 16:06:53 +00:00
|
|
|
ret = sc_conn_send(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
if (!(cs->wait_event.events & SUB_RETRY_RECV))
|
2022-05-18 16:06:53 +00:00
|
|
|
ret |= sc_conn_recv(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
if (ret != 0)
|
2022-05-18 16:06:53 +00:00
|
|
|
sc_conn_process(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-18 14:10:52 +00:00
|
|
|
stream_release_buffers(__sc_strm(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Callback to be used by applet handlers upon completion. It updates the stream
|
|
|
|
* (which may or may not take this opportunity to try to forward data), then
|
2022-05-17 17:07:51 +00:00
|
|
|
* may re-enable the applet's based on the channels and stream connector's final
|
2022-04-04 06:58:34 +00:00
|
|
|
* states.
|
|
|
|
*/
|
2022-05-17 17:07:51 +00:00
|
|
|
static int cs_applet_process(struct stconn *cs)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-18 13:55:18 +00:00
|
|
|
struct channel *ic = sc_ic(cs);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-18 15:58:02 +00:00
|
|
|
BUG_ON(!sc_appctx(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* If the applet wants to write and the channel is closed, it's a
|
|
|
|
* broken pipe and it must be reported.
|
|
|
|
*/
|
2022-05-25 14:01:38 +00:00
|
|
|
if (!sc_ep_test(cs, SE_FL_HAVE_NO_DATA) && (ic->flags & CF_SHUTR))
|
2022-05-17 15:04:55 +00:00
|
|
|
sc_ep_set(cs, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* automatically mark the applet having data available if it reported
|
|
|
|
* begin blocked by the channel.
|
|
|
|
*/
|
2022-05-24 14:56:55 +00:00
|
|
|
if (cs_rx_blocked(cs) || sc_ep_test(cs, SE_FL_APPLET_NEED_CONN))
|
2022-05-25 13:42:03 +00:00
|
|
|
applet_have_more_data(__sc_appctx(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* update the stream connector, channels, and possibly wake the stream up */
|
2022-04-04 06:58:34 +00:00
|
|
|
cs_notify(cs);
|
2022-05-18 14:10:52 +00:00
|
|
|
stream_release_buffers(__sc_strm(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* cs_notify may have passed through chk_snd and released some
|
|
|
|
* RXBLK flags. Process_stream will consider those flags to wake up the
|
|
|
|
* appctx but in the case the task is not in runqueue we may have to
|
|
|
|
* wakeup the appctx immediately.
|
|
|
|
*/
|
2022-05-25 13:00:44 +00:00
|
|
|
if (sc_is_recv_allowed(cs) || sc_is_send_allowed(cs))
|
2022-05-18 15:58:02 +00:00
|
|
|
appctx_wakeup(__sc_appctx(cs));
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|