2021-12-16 16:32:56 +00:00
|
|
|
/*
|
2022-05-17 17:07:51 +00:00
|
|
|
* stream connector management functions
|
2021-12-16 16:32:56 +00:00
|
|
|
*
|
|
|
|
* Copyright 2021 Christopher Faulet <cfaulet@haproxy.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <haproxy/api.h>
|
2022-04-01 09:36:58 +00:00
|
|
|
#include <haproxy/applet.h>
|
2021-12-16 16:32:56 +00:00
|
|
|
#include <haproxy/connection.h>
|
2022-04-04 06:58:34 +00:00
|
|
|
#include <haproxy/check.h>
|
|
|
|
#include <haproxy/http_ana.h>
|
|
|
|
#include <haproxy/pipe.h>
|
2021-12-16 16:32:56 +00:00
|
|
|
#include <haproxy/pool.h>
|
2022-05-27 07:25:10 +00:00
|
|
|
#include <haproxy/sc_strm.h>
|
2022-05-27 07:47:12 +00:00
|
|
|
#include <haproxy/stconn.h>
|
2021-12-16 16:32:56 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
DECLARE_POOL(pool_head_connstream, "stconn", sizeof(struct stconn));
|
2022-05-17 15:53:22 +00:00
|
|
|
DECLARE_POOL(pool_head_sedesc, "sedesc", sizeof(struct sedesc));
|
2021-12-16 16:32:56 +00:00
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* functions used by default on a detached stream connector */
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutr(struct stconn *sc);
|
|
|
|
static void sc_app_shutw(struct stconn *sc);
|
|
|
|
static void sc_app_chk_rcv(struct stconn *sc);
|
|
|
|
static void sc_app_chk_snd(struct stconn *sc);
|
2022-05-17 16:28:19 +00:00
|
|
|
|
|
|
|
/* functions used on a mux-based stream connector */
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutr_conn(struct stconn *sc);
|
|
|
|
static void sc_app_shutw_conn(struct stconn *sc);
|
|
|
|
static void sc_app_chk_rcv_conn(struct stconn *sc);
|
|
|
|
static void sc_app_chk_snd_conn(struct stconn *sc);
|
2022-05-17 16:28:19 +00:00
|
|
|
|
|
|
|
/* functions used on an applet-based stream connector */
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutr_applet(struct stconn *sc);
|
|
|
|
static void sc_app_shutw_applet(struct stconn *sc);
|
|
|
|
static void sc_app_chk_rcv_applet(struct stconn *sc);
|
|
|
|
static void sc_app_chk_snd_applet(struct stconn *sc);
|
2022-05-17 16:28:19 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
static int sc_conn_process(struct stconn *sc);
|
|
|
|
static int sc_conn_recv(struct stconn *sc);
|
|
|
|
static int sc_conn_send(struct stconn *sc);
|
|
|
|
static int sc_applet_process(struct stconn *sc);
|
2022-05-18 08:17:16 +00:00
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* stream connector operations for connections */
|
|
|
|
struct sc_app_ops sc_app_conn_ops = {
|
|
|
|
.chk_rcv = sc_app_chk_rcv_conn,
|
|
|
|
.chk_snd = sc_app_chk_snd_conn,
|
|
|
|
.shutr = sc_app_shutr_conn,
|
|
|
|
.shutw = sc_app_shutw_conn,
|
2022-05-18 16:06:53 +00:00
|
|
|
.wake = sc_conn_process,
|
2022-05-18 08:17:16 +00:00
|
|
|
.name = "STRM",
|
2022-04-01 12:04:29 +00:00
|
|
|
};
|
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* stream connector operations for embedded tasks */
|
|
|
|
struct sc_app_ops sc_app_embedded_ops = {
|
|
|
|
.chk_rcv = sc_app_chk_rcv,
|
|
|
|
.chk_snd = sc_app_chk_snd,
|
|
|
|
.shutr = sc_app_shutr,
|
|
|
|
.shutw = sc_app_shutw,
|
2022-05-18 08:17:16 +00:00
|
|
|
.wake = NULL, /* may never be used */
|
|
|
|
.name = "NONE", /* may never be used */
|
2022-04-01 12:04:29 +00:00
|
|
|
};
|
|
|
|
|
2022-05-18 08:17:16 +00:00
|
|
|
/* stream connector operations for applets */
|
2022-05-17 16:28:19 +00:00
|
|
|
struct sc_app_ops sc_app_applet_ops = {
|
|
|
|
.chk_rcv = sc_app_chk_rcv_applet,
|
|
|
|
.chk_snd = sc_app_chk_snd_applet,
|
|
|
|
.shutr = sc_app_shutr_applet,
|
|
|
|
.shutw = sc_app_shutw_applet,
|
2022-05-27 06:49:24 +00:00
|
|
|
.wake = sc_applet_process,
|
2022-04-04 06:58:34 +00:00
|
|
|
.name = "STRM",
|
|
|
|
};
|
|
|
|
|
2022-05-18 08:17:16 +00:00
|
|
|
/* stream connector for health checks on connections */
|
|
|
|
struct sc_app_ops sc_app_check_ops = {
|
|
|
|
.chk_rcv = NULL,
|
|
|
|
.chk_snd = NULL,
|
|
|
|
.shutr = NULL,
|
|
|
|
.shutw = NULL,
|
|
|
|
.wake = wake_srv_chk,
|
|
|
|
.name = "CHCK",
|
|
|
|
};
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* Initializes an endpoint */
|
2022-05-17 15:53:22 +00:00
|
|
|
void sedesc_init(struct sedesc *sedesc)
|
2022-03-22 15:06:25 +00:00
|
|
|
{
|
2022-05-17 15:53:22 +00:00
|
|
|
sedesc->se = NULL;
|
|
|
|
sedesc->conn = NULL;
|
2022-05-18 05:43:52 +00:00
|
|
|
sedesc->sc = NULL;
|
2023-02-16 10:09:31 +00:00
|
|
|
sedesc->lra = TICK_ETERNITY;
|
|
|
|
sedesc->fsb = TICK_ETERNITY;
|
2022-05-17 15:53:22 +00:00
|
|
|
se_fl_setall(sedesc, SE_FL_NONE);
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* Tries to alloc an endpoint and initialize it. Returns NULL on failure. */
|
2022-05-17 15:53:22 +00:00
|
|
|
struct sedesc *sedesc_new()
|
2022-03-22 15:06:25 +00:00
|
|
|
{
|
2022-05-17 15:53:22 +00:00
|
|
|
struct sedesc *sedesc;
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2022-05-17 15:53:22 +00:00
|
|
|
sedesc = pool_alloc(pool_head_sedesc);
|
|
|
|
if (unlikely(!sedesc))
|
2022-03-22 15:06:25 +00:00
|
|
|
return NULL;
|
|
|
|
|
2022-05-17 15:53:22 +00:00
|
|
|
sedesc_init(sedesc);
|
|
|
|
return sedesc;
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* Releases an endpoint. It is the caller responsibility to be sure it is safe
|
|
|
|
* and it is not shared with another entity
|
|
|
|
*/
|
2022-05-17 15:53:22 +00:00
|
|
|
void sedesc_free(struct sedesc *sedesc)
|
2022-03-22 15:06:25 +00:00
|
|
|
{
|
2022-05-17 15:53:22 +00:00
|
|
|
pool_free(pool_head_sedesc, sedesc);
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
2021-12-16 16:32:56 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Tries to allocate a new stconn and initialize its main fields. On
|
2022-04-12 06:51:15 +00:00
|
|
|
* failure, nothing is allocated and NULL is returned. It is an internal
|
2022-05-17 15:04:55 +00:00
|
|
|
* function. The caller must, at least, set the SE_FL_ORPHAN or SE_FL_DETACHED
|
2022-04-12 06:51:15 +00:00
|
|
|
* flag.
|
2021-12-16 16:32:56 +00:00
|
|
|
*/
|
2022-05-27 06:33:53 +00:00
|
|
|
static struct stconn *sc_new(struct sedesc *sedesc)
|
2021-12-16 16:32:56 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct stconn *sc;
|
2021-12-16 16:32:56 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc = pool_alloc(pool_head_connstream);
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (unlikely(!sc))
|
2022-03-22 15:06:25 +00:00
|
|
|
goto alloc_error;
|
2022-03-22 14:28:36 +00:00
|
|
|
|
2022-05-27 09:15:19 +00:00
|
|
|
sc->obj_type = OBJ_TYPE_SC;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->flags = SC_FL_NONE;
|
|
|
|
sc->state = SC_ST_INI;
|
2023-02-20 07:41:55 +00:00
|
|
|
sc->ioto = TICK_ETERNITY;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app = NULL;
|
|
|
|
sc->app_ops = NULL;
|
|
|
|
sc->src = NULL;
|
|
|
|
sc->dst = NULL;
|
|
|
|
sc->wait_event.tasklet = NULL;
|
|
|
|
sc->wait_event.events = 0;
|
2022-03-31 09:09:28 +00:00
|
|
|
|
2022-04-12 06:51:15 +00:00
|
|
|
/* If there is no endpoint, allocate a new one now */
|
2022-05-17 15:53:22 +00:00
|
|
|
if (!sedesc) {
|
|
|
|
sedesc = sedesc_new();
|
|
|
|
if (unlikely(!sedesc))
|
2022-03-22 17:37:19 +00:00
|
|
|
goto alloc_error;
|
|
|
|
}
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->sedesc = sedesc;
|
|
|
|
sedesc->sc = sc;
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
return sc;
|
2022-03-22 15:06:25 +00:00
|
|
|
|
|
|
|
alloc_error:
|
2022-05-27 08:02:48 +00:00
|
|
|
pool_free(pool_head_connstream, sc);
|
2022-03-22 15:06:25 +00:00
|
|
|
return NULL;
|
2021-12-16 16:32:56 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 14:21:33 +00:00
|
|
|
/* Creates a new stream connector and its associated stream from a mux. <sd> must
|
|
|
|
* be defined. It returns NULL on error. On success, the new stream connector is
|
2022-05-17 15:04:55 +00:00
|
|
|
* returned. In this case, SE_FL_ORPHAN flag is removed.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-27 14:21:33 +00:00
|
|
|
struct stconn *sc_new_from_endp(struct sedesc *sd, struct session *sess, struct buffer *input)
|
2022-03-23 10:01:09 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct stconn *sc;
|
2022-03-23 10:01:09 +00:00
|
|
|
|
2022-05-27 14:21:33 +00:00
|
|
|
sc = sc_new(sd);
|
2022-05-27 08:02:48 +00:00
|
|
|
if (unlikely(!sc))
|
2022-03-23 10:01:09 +00:00
|
|
|
return NULL;
|
2022-05-27 08:02:48 +00:00
|
|
|
if (unlikely(!stream_new(sess, sc, input))) {
|
2022-09-27 07:18:20 +00:00
|
|
|
sd->sc = NULL;
|
2023-03-20 18:53:14 +00:00
|
|
|
if (sc->sedesc != sd) {
|
|
|
|
/* none was provided so sc_new() allocated one */
|
|
|
|
sedesc_free(sc->sedesc);
|
|
|
|
}
|
|
|
|
pool_free(pool_head_connstream, sc);
|
2022-09-27 07:18:20 +00:00
|
|
|
se_fl_set(sd, SE_FL_ORPHAN);
|
|
|
|
return NULL;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
2022-05-27 14:21:33 +00:00
|
|
|
se_fl_clr(sd, SE_FL_ORPHAN);
|
2022-05-27 08:02:48 +00:00
|
|
|
return sc;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Creates a new stream connector from an stream. There is no endpoint here, thus it
|
2022-05-27 06:33:53 +00:00
|
|
|
* will be created by sc_new(). So the SE_FL_DETACHED flag is set. It returns
|
2022-05-17 17:07:51 +00:00
|
|
|
* NULL on error. On success, the new stream connector is returned.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-27 06:33:53 +00:00
|
|
|
struct stconn *sc_new_from_strm(struct stream *strm, unsigned int flags)
|
2022-03-23 10:01:09 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct stconn *sc;
|
2022-03-23 10:01:09 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc = sc_new(NULL);
|
|
|
|
if (unlikely(!sc))
|
2022-03-23 10:01:09 +00:00
|
|
|
return NULL;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->flags |= flags;
|
|
|
|
sc_ep_set(sc, SE_FL_DETACHED);
|
|
|
|
sc->app = &strm->obj_type;
|
|
|
|
sc->app_ops = &sc_app_embedded_ops;
|
|
|
|
return sc;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Creates a new stream connector from an health-check. There is no endpoint here,
|
2022-05-27 06:33:53 +00:00
|
|
|
* thus it will be created by sc_new(). So the SE_FL_DETACHED flag is set. It
|
2022-05-17 17:07:51 +00:00
|
|
|
* returns NULL on error. On success, the new stream connector is returned.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-27 06:33:53 +00:00
|
|
|
struct stconn *sc_new_from_check(struct check *check, unsigned int flags)
|
2022-03-23 10:01:09 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct stconn *sc;
|
2022-03-23 10:01:09 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc = sc_new(NULL);
|
|
|
|
if (unlikely(!sc))
|
2022-03-23 10:01:09 +00:00
|
|
|
return NULL;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->flags |= flags;
|
|
|
|
sc_ep_set(sc, SE_FL_DETACHED);
|
|
|
|
sc->app = &check->obj_type;
|
|
|
|
sc->app_ops = &sc_app_check_ops;
|
|
|
|
return sc;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 06:33:53 +00:00
|
|
|
/* Releases a stconn previously allocated by sc_new(), as well as its
|
2022-04-12 06:51:15 +00:00
|
|
|
* endpoint, if it exists. This function is called internally or on error path.
|
2021-12-16 16:32:56 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
void sc_free(struct stconn *sc)
|
2021-12-16 16:32:56 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
sockaddr_free(&sc->src);
|
|
|
|
sockaddr_free(&sc->dst);
|
|
|
|
if (sc->sedesc) {
|
|
|
|
BUG_ON(!sc_ep_test(sc, SE_FL_DETACHED));
|
|
|
|
sedesc_free(sc->sedesc);
|
|
|
|
}
|
|
|
|
if (sc->wait_event.tasklet)
|
|
|
|
tasklet_free(sc->wait_event.tasklet);
|
|
|
|
pool_free(pool_head_connstream, sc);
|
2021-12-16 16:32:56 +00:00
|
|
|
}
|
2021-12-23 16:28:17 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Conditionally removes a stream connector if it is detached and if there is no app
|
2022-04-21 12:22:53 +00:00
|
|
|
* layer defined. Except on error path, this one must be used. if release, the
|
2022-05-27 09:23:05 +00:00
|
|
|
* pointer on the SC is set to NULL.
|
2022-04-12 16:09:48 +00:00
|
|
|
*/
|
2022-05-27 09:23:05 +00:00
|
|
|
static void sc_free_cond(struct stconn **scp)
|
2022-04-12 16:09:48 +00:00
|
|
|
{
|
2022-05-27 09:23:05 +00:00
|
|
|
struct stconn *sc = *scp;
|
2022-04-21 12:22:53 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc->app && (!sc->sedesc || sc_ep_test(sc, SE_FL_DETACHED))) {
|
|
|
|
sc_free(sc);
|
2022-05-27 09:23:05 +00:00
|
|
|
*scp = NULL;
|
2022-04-21 12:22:53 +00:00
|
|
|
}
|
2022-04-12 16:09:48 +00:00
|
|
|
}
|
|
|
|
|
2021-12-23 16:28:17 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Attaches a stconn to a mux endpoint and sets the endpoint ctx. Returns
|
2022-07-29 17:26:53 +00:00
|
|
|
* -1 on error and 0 on success. SE_FL_DETACHED flag is removed. This function is
|
2022-04-12 06:51:15 +00:00
|
|
|
* called from a mux when it is attached to a stream or a health-check.
|
|
|
|
*/
|
2022-05-27 14:21:33 +00:00
|
|
|
int sc_attach_mux(struct stconn *sc, void *sd, void *ctx)
|
2021-12-23 16:28:17 +00:00
|
|
|
{
|
2022-01-19 13:56:50 +00:00
|
|
|
struct connection *conn = ctx;
|
2022-05-27 08:02:48 +00:00
|
|
|
struct sedesc *sedesc = sc->sedesc;
|
2021-12-23 16:28:17 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_strm(sc)) {
|
|
|
|
if (!sc->wait_event.tasklet) {
|
|
|
|
sc->wait_event.tasklet = tasklet_new();
|
|
|
|
if (!sc->wait_event.tasklet)
|
2022-03-31 09:09:28 +00:00
|
|
|
return -1;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->wait_event.tasklet->process = sc_conn_io_cb;
|
|
|
|
sc->wait_event.tasklet->context = sc;
|
|
|
|
sc->wait_event.events = 0;
|
2022-03-31 09:09:28 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app_ops = &sc_app_conn_ops;
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
2022-05-27 08:02:48 +00:00
|
|
|
else if (sc_check(sc)) {
|
|
|
|
if (!sc->wait_event.tasklet) {
|
|
|
|
sc->wait_event.tasklet = tasklet_new();
|
|
|
|
if (!sc->wait_event.tasklet)
|
2022-05-18 13:57:15 +00:00
|
|
|
return -1;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->wait_event.tasklet->process = srv_chk_io_cb;
|
|
|
|
sc->wait_event.tasklet->context = sc;
|
|
|
|
sc->wait_event.events = 0;
|
2022-05-18 13:57:15 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app_ops = &sc_app_check_ops;
|
2022-05-18 13:57:15 +00:00
|
|
|
}
|
2023-03-20 18:45:41 +00:00
|
|
|
|
|
|
|
sedesc->se = sd;
|
|
|
|
sedesc->conn = ctx;
|
|
|
|
se_fl_set(sedesc, SE_FL_T_MUX);
|
|
|
|
se_fl_clr(sedesc, SE_FL_DETACHED);
|
|
|
|
if (!conn->ctx)
|
|
|
|
conn->ctx = sc;
|
2022-03-31 17:27:18 +00:00
|
|
|
return 0;
|
2022-01-19 13:56:50 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Attaches a stconn to an applet endpoint and sets the endpoint
|
2022-07-29 17:26:53 +00:00
|
|
|
* ctx. Returns -1 on error and 0 on success. SE_FL_DETACHED flag is
|
2022-04-12 06:51:15 +00:00
|
|
|
* removed. This function is called by a stream when a backend applet is
|
|
|
|
* registered.
|
|
|
|
*/
|
2022-05-27 14:21:33 +00:00
|
|
|
static void sc_attach_applet(struct stconn *sc, void *sd)
|
2022-01-19 13:56:50 +00:00
|
|
|
{
|
2022-05-27 14:21:33 +00:00
|
|
|
sc->sedesc->se = sd;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_T_APPLET);
|
|
|
|
sc_ep_clr(sc, SE_FL_DETACHED);
|
|
|
|
if (sc_strm(sc))
|
|
|
|
sc->app_ops = &sc_app_applet_ops;
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Attaches a stconn to a app layer and sets the relevant
|
2022-05-17 15:04:55 +00:00
|
|
|
* callbacks. Returns -1 on error and 0 on success. SE_FL_ORPHAN flag is
|
2022-04-12 06:51:15 +00:00
|
|
|
* removed. This function is called by a stream when it is created to attach it
|
2022-05-17 17:07:51 +00:00
|
|
|
* on the stream connector on the client side.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
int sc_attach_strm(struct stconn *sc, struct stream *strm)
|
2021-12-23 16:28:17 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app = &strm->obj_type;
|
|
|
|
sc_ep_clr(sc, SE_FL_ORPHAN);
|
|
|
|
if (sc_ep_test(sc, SE_FL_T_MUX)) {
|
|
|
|
sc->wait_event.tasklet = tasklet_new();
|
|
|
|
if (!sc->wait_event.tasklet)
|
2022-03-31 09:09:28 +00:00
|
|
|
return -1;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->wait_event.tasklet->process = sc_conn_io_cb;
|
|
|
|
sc->wait_event.tasklet->context = sc;
|
|
|
|
sc->wait_event.events = 0;
|
2022-03-31 09:09:28 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app_ops = &sc_app_conn_ops;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
2022-05-27 08:02:48 +00:00
|
|
|
else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
|
|
|
|
sc->app_ops = &sc_app_applet_ops;
|
2022-03-23 10:01:09 +00:00
|
|
|
}
|
|
|
|
else {
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app_ops = &sc_app_embedded_ops;
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Detaches the stconn from the endpoint, if any. For a connecrion, if a
|
2022-04-12 06:51:15 +00:00
|
|
|
* mux owns the connection ->detach() callback is called. Otherwise, it means
|
2022-05-17 17:07:51 +00:00
|
|
|
* the stream connector owns the connection. In this case the connection is closed
|
2022-04-12 06:51:15 +00:00
|
|
|
* and released. For an applet, the appctx is released. If still allocated, the
|
|
|
|
* endpoint is reset and flag as detached. If the app layer is also detached,
|
2022-05-17 17:07:51 +00:00
|
|
|
* the stream connector is released.
|
2021-12-23 16:28:17 +00:00
|
|
|
*/
|
2022-05-27 09:23:05 +00:00
|
|
|
static void sc_detach_endp(struct stconn **scp)
|
2021-12-23 16:28:17 +00:00
|
|
|
{
|
2022-05-27 09:23:05 +00:00
|
|
|
struct stconn *sc = *scp;
|
2022-04-21 12:22:53 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc)
|
2022-04-21 12:22:53 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_T_MUX)) {
|
|
|
|
struct connection *conn = __sc_conn(sc);
|
|
|
|
struct sedesc *sedesc = sc->sedesc;
|
2021-12-23 16:28:17 +00:00
|
|
|
|
|
|
|
if (conn->mux) {
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->wait_event.events != 0)
|
|
|
|
conn->mux->unsubscribe(sc, sc->wait_event.events, &sc->wait_event);
|
2022-05-17 16:20:02 +00:00
|
|
|
se_fl_set(sedesc, SE_FL_ORPHAN);
|
2022-05-18 05:43:52 +00:00
|
|
|
sedesc->sc = NULL;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->sedesc = NULL;
|
2022-05-17 16:20:02 +00:00
|
|
|
conn->mux->detach(sedesc);
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* It's too early to have a mux, let's just destroy
|
|
|
|
* the connection
|
|
|
|
*/
|
|
|
|
conn_stop_tracking(conn);
|
|
|
|
conn_full_close(conn);
|
|
|
|
if (conn->destroy_cb)
|
|
|
|
conn->destroy_cb(conn);
|
|
|
|
conn_free(conn);
|
|
|
|
}
|
|
|
|
}
|
2022-05-27 08:02:48 +00:00
|
|
|
else if (sc_ep_test(sc, SE_FL_T_APPLET)) {
|
|
|
|
struct appctx *appctx = __sc_appctx(sc);
|
2022-03-22 15:06:25 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_ORPHAN);
|
|
|
|
sc->sedesc->sc = NULL;
|
|
|
|
sc->sedesc = NULL;
|
2022-05-10 17:42:22 +00:00
|
|
|
appctx_shut(appctx);
|
|
|
|
appctx_free(appctx);
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->sedesc) {
|
2022-05-27 15:03:34 +00:00
|
|
|
/* the SD wasn't used and can be recycled */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->sedesc->se = NULL;
|
|
|
|
sc->sedesc->conn = NULL;
|
2022-05-27 15:03:34 +00:00
|
|
|
sc->sedesc->flags = 0;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_DETACHED);
|
2022-03-22 15:06:25 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 09:23:05 +00:00
|
|
|
/* FIXME: Rest SC for now but must be reviewed. SC flags are only
|
2022-01-06 07:44:58 +00:00
|
|
|
* connection related for now but this will evolved
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->flags &= SC_FL_ISBACK;
|
|
|
|
if (sc_strm(sc))
|
|
|
|
sc->app_ops = &sc_app_embedded_ops;
|
2022-05-18 08:17:16 +00:00
|
|
|
else
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app_ops = NULL;
|
2022-05-27 09:23:05 +00:00
|
|
|
sc_free_cond(scp);
|
2022-01-06 07:44:58 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Detaches the stconn from the app layer. If there is no endpoint attached
|
|
|
|
* to the stconn
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-27 09:23:05 +00:00
|
|
|
static void sc_detach_app(struct stconn **scp)
|
2022-01-06 07:44:58 +00:00
|
|
|
{
|
2022-05-27 09:23:05 +00:00
|
|
|
struct stconn *sc = *scp;
|
2022-04-21 12:22:53 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc)
|
2022-04-21 12:22:53 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->app = NULL;
|
|
|
|
sc->app_ops = NULL;
|
|
|
|
sockaddr_free(&sc->src);
|
|
|
|
sockaddr_free(&sc->dst);
|
2022-03-31 09:09:28 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->wait_event.tasklet)
|
|
|
|
tasklet_free(sc->wait_event.tasklet);
|
|
|
|
sc->wait_event.tasklet = NULL;
|
|
|
|
sc->wait_event.events = 0;
|
2022-05-27 09:23:05 +00:00
|
|
|
sc_free_cond(scp);
|
2022-04-21 12:22:53 +00:00
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Destroy the stconn. It is detached from its endpoint and its
|
|
|
|
* application. After this call, the stconn must be considered as released.
|
2022-04-21 12:22:53 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
void sc_destroy(struct stconn *sc)
|
2022-04-21 12:22:53 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_detach_endp(&sc);
|
|
|
|
sc_detach_app(&sc);
|
|
|
|
BUG_ON_HOT(sc);
|
2021-12-23 16:28:17 +00:00
|
|
|
}
|
2022-03-23 14:15:29 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Resets the stream connector endpoint. It happens when the app layer want to renew
|
2022-04-12 06:51:15 +00:00
|
|
|
* its endpoint. For a connection retry for instance. If a mux or an applet is
|
2022-07-29 17:26:53 +00:00
|
|
|
* attached, a new endpoint is created. Returns -1 on error and 0 on success.
|
2022-04-28 16:25:24 +00:00
|
|
|
*
|
2022-05-17 15:04:55 +00:00
|
|
|
* Only SE_FL_ERROR flag is removed on the endpoint. Orther flags are preserved.
|
2022-04-28 16:25:24 +00:00
|
|
|
* It is the caller responsibility to remove other flags if needed.
|
2022-04-12 06:51:15 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
int sc_reset_endp(struct stconn *sc)
|
2022-03-23 14:15:29 +00:00
|
|
|
{
|
2022-05-27 14:21:33 +00:00
|
|
|
struct sedesc *new_sd;
|
2022-03-24 09:27:02 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc->app);
|
2022-04-28 16:25:24 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_clr(sc, SE_FL_ERROR);
|
|
|
|
if (!__sc_endp(sc)) {
|
2022-03-24 09:27:02 +00:00
|
|
|
/* endpoint not attached or attached to a mux with no
|
|
|
|
* target. Thus the endpoint will not be release but just
|
2022-05-27 08:02:48 +00:00
|
|
|
* reset. The app is still attached, the sc will not be
|
2022-04-21 12:22:53 +00:00
|
|
|
* released.
|
2022-03-24 09:27:02 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_detach_endp(&sc);
|
2022-03-24 09:27:02 +00:00
|
|
|
return 0;
|
2022-03-23 14:15:29 +00:00
|
|
|
}
|
2022-03-24 09:27:02 +00:00
|
|
|
|
|
|
|
/* allocate the new endpoint first to be able to set error if it
|
|
|
|
* fails */
|
2022-05-27 14:21:33 +00:00
|
|
|
new_sd = sedesc_new();
|
|
|
|
if (!unlikely(new_sd)) {
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_ERROR);
|
2022-03-24 09:27:02 +00:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
/* The app is still attached, the sc will not be released */
|
|
|
|
sc_detach_endp(&sc);
|
2022-08-11 11:56:42 +00:00
|
|
|
BUG_ON(!sc);
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(sc->sedesc);
|
2022-05-27 14:21:33 +00:00
|
|
|
sc->sedesc = new_sd;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->sedesc->sc = sc;
|
|
|
|
sc_ep_set(sc, SE_FL_DETACHED);
|
2022-03-23 14:15:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2022-04-01 09:36:58 +00:00
|
|
|
|
|
|
|
|
2022-05-27 09:23:05 +00:00
|
|
|
/* Create an applet to handle a stream connector as a new appctx. The SC will
|
2022-04-01 09:36:58 +00:00
|
|
|
* wake it up every time it is solicited. The appctx must be deleted by the task
|
2022-05-27 06:49:24 +00:00
|
|
|
* handler using sc_detach_endp(), possibly from within the function itself.
|
2022-04-01 09:36:58 +00:00
|
|
|
* It also pre-initializes the applet's context and returns it (or NULL in case
|
|
|
|
* it could not be allocated).
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
struct appctx *sc_applet_create(struct stconn *sc, struct applet *app)
|
2022-04-01 09:36:58 +00:00
|
|
|
{
|
|
|
|
struct appctx *appctx;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
appctx = appctx_new_here(app, sc->sedesc);
|
2022-04-01 09:36:58 +00:00
|
|
|
if (!appctx)
|
|
|
|
return NULL;
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_attach_applet(sc, appctx);
|
|
|
|
appctx->t->nice = __sc_strm(sc)->task->nice;
|
2022-05-25 16:21:43 +00:00
|
|
|
applet_need_more_data(appctx);
|
2022-04-01 09:36:58 +00:00
|
|
|
appctx_wakeup(appctx);
|
2022-04-21 09:52:07 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->state = SC_ST_RDY;
|
2022-04-01 09:36:58 +00:00
|
|
|
return appctx;
|
|
|
|
}
|
|
|
|
|
2023-04-01 10:26:42 +00:00
|
|
|
/* Conditionally forward the close to the write side. It return 1 if it can be
|
BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
The commit 7f59d68fe ("BUG/MEDIIM: stconn: Flush output data before
forwarding close to write side") introduced a regression. When the read side
is closed, the close is not forwarded to the write side if there are some
pending outgoind data. The idea is to foward data first and the close the
write side. However, when fast-forwarding is enabled and last data block is
received with the read0, the close is never forwarded.
We cannot revert the commit above because it really fix an issue. However,
we can schedule the shutdown for write by setting CF_SHUTW_NOW flag on the
write side. Indeed, it is the purpose of this flag.
To not replicate ugly and hardly maintainable code block at different places
in stconn.c, an helper function is used. Thus, sc_cond_forward_shutw() must
be called to know if the close can be fowarded or not. It returns 1 if it is
possible. In this case, the caller is responsible to forward the close to
the write side. Otherwise, if the close cannot be forwarded, 0 is
returned. It happens when it should not be performed at all. Or when it
should only be delayed, waiting for the input channel to be flushed. In this
last case, the CF_SHUTW_NOW flag is set in the output channel.
This patch should fix the issue #2033. It must be backported with the commit
above, thus at least as far as 2.2.
2023-02-08 15:18:48 +00:00
|
|
|
* forwarded. It is the caller responsibility to forward the close to the write
|
2023-04-03 16:32:50 +00:00
|
|
|
* side. Otherwise, 0 is returned. In this case, SC_FL_SHUTW_NOW flag may be set on
|
|
|
|
* the consumer SC if we are only waiting for the outgoing data to be flushed.
|
BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
The commit 7f59d68fe ("BUG/MEDIIM: stconn: Flush output data before
forwarding close to write side") introduced a regression. When the read side
is closed, the close is not forwarded to the write side if there are some
pending outgoind data. The idea is to foward data first and the close the
write side. However, when fast-forwarding is enabled and last data block is
received with the read0, the close is never forwarded.
We cannot revert the commit above because it really fix an issue. However,
we can schedule the shutdown for write by setting CF_SHUTW_NOW flag on the
write side. Indeed, it is the purpose of this flag.
To not replicate ugly and hardly maintainable code block at different places
in stconn.c, an helper function is used. Thus, sc_cond_forward_shutw() must
be called to know if the close can be fowarded or not. It returns 1 if it is
possible. In this case, the caller is responsible to forward the close to
the write side. Otherwise, if the close cannot be forwarded, 0 is
returned. It happens when it should not be performed at all. Or when it
should only be delayed, waiting for the input channel to be flushed. In this
last case, the CF_SHUTW_NOW flag is set in the output channel.
This patch should fix the issue #2033. It must be backported with the commit
above, thus at least as far as 2.2.
2023-02-08 15:18:48 +00:00
|
|
|
*/
|
|
|
|
static inline int sc_cond_forward_shutw(struct stconn *sc)
|
|
|
|
{
|
|
|
|
/* The close must not be forwarded */
|
2023-04-04 08:05:27 +00:00
|
|
|
if (!(sc->flags & SC_FL_SHUTR) || !(sc->flags & SC_FL_NOHALF))
|
BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
The commit 7f59d68fe ("BUG/MEDIIM: stconn: Flush output data before
forwarding close to write side") introduced a regression. When the read side
is closed, the close is not forwarded to the write side if there are some
pending outgoind data. The idea is to foward data first and the close the
write side. However, when fast-forwarding is enabled and last data block is
received with the read0, the close is never forwarded.
We cannot revert the commit above because it really fix an issue. However,
we can schedule the shutdown for write by setting CF_SHUTW_NOW flag on the
write side. Indeed, it is the purpose of this flag.
To not replicate ugly and hardly maintainable code block at different places
in stconn.c, an helper function is used. Thus, sc_cond_forward_shutw() must
be called to know if the close can be fowarded or not. It returns 1 if it is
possible. In this case, the caller is responsible to forward the close to
the write side. Otherwise, if the close cannot be forwarded, 0 is
returned. It happens when it should not be performed at all. Or when it
should only be delayed, waiting for the input channel to be flushed. In this
last case, the CF_SHUTW_NOW flag is set in the output channel.
This patch should fix the issue #2033. It must be backported with the commit
above, thus at least as far as 2.2.
2023-02-08 15:18:48 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!channel_is_empty(sc_ic(sc))) {
|
|
|
|
/* the close to the write side cannot be forwarded now because
|
|
|
|
* we should flush outgoing data first. But instruct the output
|
|
|
|
* channel it should be done ASAP.
|
|
|
|
*/
|
|
|
|
channel_shutw_now(sc_oc(sc));
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* the close can be immediately forwarded to the write side */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2022-04-01 12:04:29 +00:00
|
|
|
/*
|
2022-05-17 17:07:51 +00:00
|
|
|
* This function performs a shutdown-read on a detached stream connector in a
|
2022-04-01 12:04:29 +00:00
|
|
|
* connected or init state (it does nothing for other states). It either shuts
|
|
|
|
* the read side or marks itself as closed. The buffer flags are updated to
|
2022-05-17 17:44:42 +00:00
|
|
|
* reflect the new state. If the stream connector has SC_FL_NOHALF, we also
|
2022-04-01 12:04:29 +00:00
|
|
|
* forward the close to the write side. The owner task is woken up if it exists.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutr(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTR)
|
2023-04-04 08:06:57 +00:00
|
|
|
return;
|
2023-04-03 16:32:50 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTR;
|
2023-04-03 16:32:50 +00:00
|
|
|
ic->flags |= CF_READ_EVENT;
|
2023-02-16 10:09:31 +00:00
|
|
|
sc_ep_report_read_activity(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTW) {
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->state = SC_ST_DIS;
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
The commit 7f59d68fe ("BUG/MEDIIM: stconn: Flush output data before
forwarding close to write side") introduced a regression. When the read side
is closed, the close is not forwarded to the write side if there are some
pending outgoind data. The idea is to foward data first and the close the
write side. However, when fast-forwarding is enabled and last data block is
received with the read0, the close is never forwarded.
We cannot revert the commit above because it really fix an issue. However,
we can schedule the shutdown for write by setting CF_SHUTW_NOW flag on the
write side. Indeed, it is the purpose of this flag.
To not replicate ugly and hardly maintainable code block at different places
in stconn.c, an helper function is used. Thus, sc_cond_forward_shutw() must
be called to know if the close can be fowarded or not. It returns 1 if it is
possible. In this case, the caller is responsible to forward the close to
the write side. Otherwise, if the close cannot be forwarded, 0 is
returned. It happens when it should not be performed at all. Or when it
should only be delayed, waiting for the input channel to be flushed. In this
last case, the CF_SHUTW_NOW flag is set in the output channel.
This patch should fix the issue #2033. It must be backported with the commit
above, thus at least as far as 2.2.
2023-02-08 15:18:48 +00:00
|
|
|
else if (sc_cond_forward_shutw(sc))
|
2022-05-27 08:02:48 +00:00
|
|
|
return sc_app_shutw(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
/* note that if the task exists, it must unregister itself once it runs */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!(sc->flags & SC_FL_DONT_WAKE))
|
|
|
|
task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 17:07:51 +00:00
|
|
|
* This function performs a shutdown-write on a detached stream connector in a
|
2022-04-01 12:04:29 +00:00
|
|
|
* connected or init state (it does nothing for other states). It either shuts
|
|
|
|
* the write side or marks itself as closed. The buffer flags are updated to
|
2022-05-27 09:23:05 +00:00
|
|
|
* reflect the new state. It does also close everything if the SC was marked as
|
2022-04-01 12:04:29 +00:00
|
|
|
* being in error state. The owner task is woken up if it exists.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutw(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags &= ~SC_FL_SHUTW_NOW;
|
|
|
|
if (sc->flags & SC_FL_SHUTW)
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTW;
|
2023-04-03 16:32:50 +00:00
|
|
|
oc->flags |= CF_WRITE_EVENT;
|
2023-02-20 07:36:53 +00:00
|
|
|
sc_set_hcto(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
switch (sc->state) {
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_RDY:
|
|
|
|
case SC_ST_EST:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we have to shut before closing, otherwise some short messages
|
|
|
|
* may never leave the system, especially when there are remaining
|
|
|
|
* unread data in the socket input buffer, or when nolinger is set.
|
2022-05-17 17:44:42 +00:00
|
|
|
* However, if SC_FL_NOLINGER is explicitly set, we know there is
|
2022-04-01 12:04:29 +00:00
|
|
|
* no risk so we close both sides immediately.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
!(sc->flags & SC_FL_SHUTR) && !(ic->flags & CF_DONT_READ))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-11-14 06:36:42 +00:00
|
|
|
__fallthrough;
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CON:
|
|
|
|
case SC_ST_CER:
|
|
|
|
case SC_ST_QUE:
|
|
|
|
case SC_ST_TAR:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* Note that none of these states may happen with applets */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->state = SC_ST_DIS;
|
2022-11-14 06:36:42 +00:00
|
|
|
__fallthrough;
|
2022-04-01 12:04:29 +00:00
|
|
|
default:
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->flags &= ~SC_FL_NOLINGER;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTR;
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* note that if the task exists, it must unregister itself once it runs */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!(sc->flags & SC_FL_DONT_WAKE))
|
|
|
|
task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* default chk_rcv function for scheduled tasks */
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_chk_rcv(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (ic->pipe) {
|
|
|
|
/* stop reading */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_need_room(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* (re)start reading */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!(sc->flags & SC_FL_DONT_WAKE))
|
|
|
|
task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* default chk_snd function for scheduled tasks */
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_chk_snd(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (unlikely(sc->state != SC_ST_EST || (sc->flags & SC_FL_SHUTW)))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || /* not waiting for data */
|
2022-04-01 12:04:29 +00:00
|
|
|
channel_is_empty(oc)) /* called with nothing to send ! */
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Otherwise there are remaining data to be sent in the buffer,
|
|
|
|
* so we tell the handler.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_clr(sc, SE_FL_WAIT_DATA);
|
|
|
|
if (!(sc->flags & SC_FL_DONT_WAKE))
|
|
|
|
task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-read on a stream connector attached to
|
2022-04-01 12:04:29 +00:00
|
|
|
* a connection in a connected or init state (it does nothing for other
|
|
|
|
* states). It either shuts the read side or marks itself as closed. The buffer
|
2022-05-17 16:28:19 +00:00
|
|
|
* flags are updated to reflect the new state. If the stream connector has
|
2022-05-17 17:44:42 +00:00
|
|
|
* SC_FL_NOHALF, we also forward the close to the write side. If a control
|
2022-04-01 12:04:29 +00:00
|
|
|
* layer is defined, then it is supposed to be a socket layer and file
|
|
|
|
* descriptors are then shutdown or closed accordingly. The function
|
|
|
|
* automatically disables polling if needed.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutr_conn(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_conn(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTR)
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTR;
|
2023-04-03 16:32:50 +00:00
|
|
|
ic->flags |= CF_READ_EVENT;
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTW) {
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_shut(sc);
|
|
|
|
sc->state = SC_ST_DIS;
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
The commit 7f59d68fe ("BUG/MEDIIM: stconn: Flush output data before
forwarding close to write side") introduced a regression. When the read side
is closed, the close is not forwarded to the write side if there are some
pending outgoind data. The idea is to foward data first and the close the
write side. However, when fast-forwarding is enabled and last data block is
received with the read0, the close is never forwarded.
We cannot revert the commit above because it really fix an issue. However,
we can schedule the shutdown for write by setting CF_SHUTW_NOW flag on the
write side. Indeed, it is the purpose of this flag.
To not replicate ugly and hardly maintainable code block at different places
in stconn.c, an helper function is used. Thus, sc_cond_forward_shutw() must
be called to know if the close can be fowarded or not. It returns 1 if it is
possible. In this case, the caller is responsible to forward the close to
the write side. Otherwise, if the close cannot be forwarded, 0 is
returned. It happens when it should not be performed at all. Or when it
should only be delayed, waiting for the input channel to be flushed. In this
last case, the CF_SHUTW_NOW flag is set in the output channel.
This patch should fix the issue #2033. It must be backported with the commit
above, thus at least as far as 2.2.
2023-02-08 15:18:48 +00:00
|
|
|
else if (sc_cond_forward_shutw(sc))
|
2022-05-27 08:02:48 +00:00
|
|
|
return sc_app_shutw_conn(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-write on a stream connector attached to
|
2022-04-01 12:04:29 +00:00
|
|
|
* a connection in a connected or init state (it does nothing for other
|
|
|
|
* states). It either shuts the write side or marks itself as closed. The
|
|
|
|
* buffer flags are updated to reflect the new state. It does also close
|
2022-05-27 09:23:05 +00:00
|
|
|
* everything if the SC was marked as being in error state. If there is a
|
2022-04-01 12:04:29 +00:00
|
|
|
* data-layer shutdown, it is called.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutw_conn(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_conn(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags &= ~SC_FL_SHUTW_NOW;
|
|
|
|
if (sc->flags & SC_FL_SHUTW)
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTW;
|
2023-04-03 16:32:50 +00:00
|
|
|
oc->flags |= CF_WRITE_EVENT;
|
2023-02-20 07:36:53 +00:00
|
|
|
sc_set_hcto(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
switch (sc->state) {
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_RDY:
|
|
|
|
case SC_ST_EST:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we have to shut before closing, otherwise some short messages
|
|
|
|
* may never leave the system, especially when there are remaining
|
|
|
|
* unread data in the socket input buffer, or when nolinger is set.
|
2022-05-17 17:44:42 +00:00
|
|
|
* However, if SC_FL_NOLINGER is explicitly set, we know there is
|
2022-04-01 12:04:29 +00:00
|
|
|
* no risk so we close both sides immediately.
|
|
|
|
*/
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_ERROR)) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* quick close, the socket is already shut anyway */
|
|
|
|
}
|
2022-05-27 08:02:48 +00:00
|
|
|
else if (sc->flags & SC_FL_NOLINGER) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* unclean data-layer shutdown, typically an aborted request
|
|
|
|
* or a forwarded shutdown from a client to a server due to
|
|
|
|
* option abortonclose. No need for the TLS layer to try to
|
|
|
|
* emit a shutdown message.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_shutw(sc, CO_SHW_SILENT);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* clean data-layer shutdown. This only happens on the
|
|
|
|
* frontend side, or on the backend side when forwarding
|
|
|
|
* a client close in TCP mode or in HTTP TUNNEL mode
|
|
|
|
* while option abortonclose is set. We want the TLS
|
|
|
|
* layer to try to signal it to the peer before we close.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_shutw(sc, CO_SHW_NORMAL);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (!(sc->flags & SC_FL_SHUTR) && !(ic->flags & CF_DONT_READ))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-11-14 06:36:42 +00:00
|
|
|
__fallthrough;
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CON:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we may have to close a pending connection, and mark the
|
|
|
|
* response buffer as shutr
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_shut(sc);
|
2022-11-14 06:36:42 +00:00
|
|
|
__fallthrough;
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CER:
|
|
|
|
case SC_ST_QUE:
|
|
|
|
case SC_ST_TAR:
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->state = SC_ST_DIS;
|
2022-11-14 06:36:42 +00:00
|
|
|
__fallthrough;
|
2022-04-01 12:04:29 +00:00
|
|
|
default:
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->flags &= ~SC_FL_NOLINGER;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTR;
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* This function is used for inter-stream connector calls. It is called by the
|
2022-04-01 12:04:29 +00:00
|
|
|
* consumer to inform the producer side that it may be interested in checking
|
|
|
|
* for free space in the buffer. Note that it intentionally does not update
|
|
|
|
* timeouts, so that we can still check them later at wake-up. This function is
|
2022-05-17 16:28:19 +00:00
|
|
|
* dedicated to connection-based stream connectors.
|
2022-04-01 12:04:29 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_chk_rcv_conn(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_conn(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
/* (re)start reading */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
|
|
|
tasklet_wakeup(sc->wait_event.tasklet);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2022-05-17 16:28:19 +00:00
|
|
|
/* This function is used for inter-stream connector calls. It is called by the
|
2022-04-01 12:04:29 +00:00
|
|
|
* producer to inform the consumer side that it may be interested in checking
|
|
|
|
* for data in the buffer. Note that it intentionally does not update timeouts,
|
|
|
|
* so that we can still check them later at wake-up.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_chk_snd_conn(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_conn(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (unlikely(!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST) ||
|
2023-04-04 08:05:27 +00:00
|
|
|
(sc->flags & SC_FL_SHUTW)))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (unlikely(channel_is_empty(oc))) /* called with nothing to send ! */
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!oc->pipe && /* spliced data wants to be forwarded ASAP */
|
2022-05-27 08:02:48 +00:00
|
|
|
!sc_ep_test(sc, SE_FL_WAIT_DATA)) /* not waiting for data */
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!(sc->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(sc)))
|
|
|
|
sc_conn_send(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
|
2022-04-01 12:04:29 +00:00
|
|
|
/* Write error on the file descriptor */
|
2022-10-17 08:21:19 +00:00
|
|
|
if (sc->state >= SC_ST_CON && sc_ep_test(sc, SE_FL_EOS))
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_ERROR);
|
2022-04-01 12:04:29 +00:00
|
|
|
goto out_wakeup;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, so now we know that some data might have been sent, and that we may
|
|
|
|
* have to poll first. We have to do that too if the buffer is not empty.
|
|
|
|
*/
|
|
|
|
if (channel_is_empty(oc)) {
|
|
|
|
/* the connection is established but we can't write. Either the
|
|
|
|
* buffer is empty, or we just refrain from sending because the
|
|
|
|
* ->o limit was reached. Maybe we just wrote the last
|
|
|
|
* chunk and need to close.
|
|
|
|
*/
|
2023-04-03 16:32:50 +00:00
|
|
|
if ((oc->flags & CF_AUTO_CLOSE) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
((sc->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW) &&
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST)) {
|
|
|
|
sc_shutw(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
goto out_wakeup;
|
|
|
|
}
|
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if ((sc->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == 0)
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_WAIT_DATA);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Otherwise there are remaining data to be sent in the buffer,
|
|
|
|
* which means we have to poll before doing so.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_clr(sc, SE_FL_WAIT_DATA);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* in case of special condition (error, shutdown, end of write...), we
|
|
|
|
* have to notify the task.
|
|
|
|
*/
|
2023-04-04 08:05:27 +00:00
|
|
|
if (likely((sc->flags & SC_FL_SHUTW) ||
|
2023-02-09 13:14:38 +00:00
|
|
|
((oc->flags & CF_WRITE_EVENT) && sc->state < SC_ST_EST) ||
|
|
|
|
((oc->flags & CF_WAKE_WRITE) &&
|
|
|
|
((channel_is_empty(oc) && !oc->to_forward) ||
|
|
|
|
!sc_state_in(sc->state, SC_SB_EST))))) {
|
2022-04-01 12:04:29 +00:00
|
|
|
out_wakeup:
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!(sc->flags & SC_FL_DONT_WAKE))
|
|
|
|
task_wakeup(sc_strm_task(sc), TASK_WOKEN_IO);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-read on a stream connector attached to an
|
2022-04-01 12:04:29 +00:00
|
|
|
* applet in a connected or init state (it does nothing for other states). It
|
|
|
|
* either shuts the read side or marks itself as closed. The buffer flags are
|
2022-05-17 17:44:42 +00:00
|
|
|
* updated to reflect the new state. If the stream connector has SC_FL_NOHALF,
|
2022-04-01 12:04:29 +00:00
|
|
|
* we also forward the close to the write side. The owner task is woken up if
|
|
|
|
* it exists.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutr_applet(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_appctx(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTR)
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTR;
|
2023-04-03 16:32:50 +00:00
|
|
|
ic->flags |= CF_READ_EVENT;
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
/* Note: on shutr, we don't call the applet */
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTW) {
|
2022-05-27 08:02:48 +00:00
|
|
|
appctx_shut(__sc_appctx(sc));
|
|
|
|
sc->state = SC_ST_DIS;
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
The commit 7f59d68fe ("BUG/MEDIIM: stconn: Flush output data before
forwarding close to write side") introduced a regression. When the read side
is closed, the close is not forwarded to the write side if there are some
pending outgoind data. The idea is to foward data first and the close the
write side. However, when fast-forwarding is enabled and last data block is
received with the read0, the close is never forwarded.
We cannot revert the commit above because it really fix an issue. However,
we can schedule the shutdown for write by setting CF_SHUTW_NOW flag on the
write side. Indeed, it is the purpose of this flag.
To not replicate ugly and hardly maintainable code block at different places
in stconn.c, an helper function is used. Thus, sc_cond_forward_shutw() must
be called to know if the close can be fowarded or not. It returns 1 if it is
possible. In this case, the caller is responsible to forward the close to
the write side. Otherwise, if the close cannot be forwarded, 0 is
returned. It happens when it should not be performed at all. Or when it
should only be delayed, waiting for the input channel to be flushed. In this
last case, the CF_SHUTW_NOW flag is set in the output channel.
This patch should fix the issue #2033. It must be backported with the commit
above, thus at least as far as 2.2.
2023-02-08 15:18:48 +00:00
|
|
|
else if (sc_cond_forward_shutw(sc))
|
2022-05-27 08:02:48 +00:00
|
|
|
return sc_app_shutw_applet(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2022-05-17 16:28:19 +00:00
|
|
|
* This function performs a shutdown-write on a stream connector attached to an
|
2022-04-01 12:04:29 +00:00
|
|
|
* applet in a connected or init state (it does nothing for other states). It
|
|
|
|
* either shuts the write side or marks itself as closed. The buffer flags are
|
|
|
|
* updated to reflect the new state. It does also close everything if the SI
|
|
|
|
* was marked as being in error state. The owner task is woken up if it exists.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_shutw_applet(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_appctx(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags &= ~SC_FL_SHUTW_NOW;
|
|
|
|
if (sc->flags & SC_FL_SHUTW)
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTW;
|
2023-04-03 16:32:50 +00:00
|
|
|
oc->flags |= CF_WRITE_EVENT;
|
2023-02-20 07:36:53 +00:00
|
|
|
sc_set_hcto(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
/* on shutw we always wake the applet up */
|
2022-05-27 08:02:48 +00:00
|
|
|
appctx_wakeup(__sc_appctx(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
switch (sc->state) {
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_RDY:
|
|
|
|
case SC_ST_EST:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* we have to shut before closing, otherwise some short messages
|
|
|
|
* may never leave the system, especially when there are remaining
|
|
|
|
* unread data in the socket input buffer, or when nolinger is set.
|
2022-05-17 17:44:42 +00:00
|
|
|
* However, if SC_FL_NOLINGER is explicitly set, we know there is
|
2022-04-01 12:04:29 +00:00
|
|
|
* no risk so we close both sides immediately.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_ep_test(sc, SE_FL_ERROR) && !(sc->flags & SC_FL_NOLINGER) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
!(sc->flags & SC_FL_SHUTR) &&
|
2023-04-03 16:32:50 +00:00
|
|
|
!(ic->flags & CF_DONT_READ))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-11-14 06:36:42 +00:00
|
|
|
__fallthrough;
|
2022-05-17 17:47:17 +00:00
|
|
|
case SC_ST_CON:
|
|
|
|
case SC_ST_CER:
|
|
|
|
case SC_ST_QUE:
|
|
|
|
case SC_ST_TAR:
|
2022-04-01 12:04:29 +00:00
|
|
|
/* Note that none of these states may happen with applets */
|
2022-05-27 08:02:48 +00:00
|
|
|
appctx_shut(__sc_appctx(sc));
|
|
|
|
sc->state = SC_ST_DIS;
|
2022-11-14 06:36:42 +00:00
|
|
|
__fallthrough;
|
2022-04-01 12:04:29 +00:00
|
|
|
default:
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->flags &= ~SC_FL_NOLINGER;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTR;
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* chk_rcv function for applets */
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_chk_rcv_applet(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_appctx(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
|
|
|
if (!ic->pipe) {
|
|
|
|
/* (re)start reading */
|
2022-05-27 08:02:48 +00:00
|
|
|
appctx_wakeup(__sc_appctx(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* chk_snd function for applets */
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_app_chk_snd_applet(struct stconn *sc)
|
2022-04-01 12:04:29 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_appctx(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (unlikely(sc->state != SC_ST_EST || (sc->flags & SC_FL_SHUTW)))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
2022-06-01 15:35:34 +00:00
|
|
|
/* we only wake the applet up if it was waiting for some data and is ready to consume it */
|
|
|
|
if (!sc_ep_test(sc, SE_FL_WAIT_DATA) || sc_ep_test(sc, SE_FL_WONT_CONSUME))
|
2022-04-01 12:04:29 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (!channel_is_empty(oc)) {
|
|
|
|
/* (re)start sending */
|
2022-05-27 08:02:48 +00:00
|
|
|
appctx_wakeup(__sc_appctx(sc));
|
2022-04-01 12:04:29 +00:00
|
|
|
}
|
|
|
|
}
|
2022-04-01 12:23:38 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* This function is designed to be called from within the stream handler to
|
2022-05-17 17:07:51 +00:00
|
|
|
* update the input channel's expiration timer and the stream connector's
|
2022-04-01 12:23:38 +00:00
|
|
|
* Rx flags based on the channel's flags. It needs to be called only once
|
|
|
|
* after the channel's flags have settled down, and before they are cleared,
|
|
|
|
* though it doesn't harm to call it as often as desired (it just slightly
|
|
|
|
* hurts performance). It must not be called from outside of the stream
|
|
|
|
* handler, as what it does will be used to compute the stream task's
|
|
|
|
* expiration.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
void sc_update_rx(struct stconn *sc)
|
2022-04-01 12:23:38 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-01 12:23:38 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTR)
|
2022-04-01 12:23:38 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Read not closed, update FD status and timeout for reads */
|
|
|
|
if (ic->flags & CF_DONT_READ)
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_wont_read(sc);
|
2022-04-01 12:23:38 +00:00
|
|
|
else
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_will_read(sc);
|
2022-04-01 12:23:38 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_chk_rcv(sc);
|
2022-04-01 12:23:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is designed to be called from within the stream handler to
|
2022-05-17 17:07:51 +00:00
|
|
|
* update the output channel's expiration timer and the stream connector's
|
2022-04-01 12:23:38 +00:00
|
|
|
* Tx flags based on the channel's flags. It needs to be called only once
|
|
|
|
* after the channel's flags have settled down, and before they are cleared,
|
|
|
|
* though it doesn't harm to call it as often as desired (it just slightly
|
|
|
|
* hurts performance). It must not be called from outside of the stream
|
|
|
|
* handler, as what it does will be used to compute the stream task's
|
|
|
|
* expiration.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
void sc_update_tx(struct stconn *sc)
|
2022-04-01 12:23:38 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-01 12:23:38 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTW)
|
2022-04-01 12:23:38 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
/* Write not closed, update FD status and timeout for writes */
|
|
|
|
if (channel_is_empty(oc)) {
|
|
|
|
/* stop writing */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_ep_test(sc, SE_FL_WAIT_DATA)) {
|
2023-04-04 08:05:27 +00:00
|
|
|
if ((sc->flags & SC_FL_SHUTW_NOW) == 0)
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_WAIT_DATA);
|
2022-04-01 12:23:38 +00:00
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2023-02-20 07:23:51 +00:00
|
|
|
/* (re)start writing */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_clr(sc, SE_FL_WAIT_DATA);
|
2022-04-01 12:23:38 +00:00
|
|
|
}
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 06:49:24 +00:00
|
|
|
/* This function is the equivalent to sc_update() except that it's
|
2022-04-04 06:58:34 +00:00
|
|
|
* designed to be called from outside the stream handlers, typically the lower
|
|
|
|
* layers (applets, connections) after I/O completion. After updating the stream
|
|
|
|
* interface and timeouts, it will try to forward what can be forwarded, then to
|
|
|
|
* wake the associated task up if an important event requires special handling.
|
2022-05-25 14:36:21 +00:00
|
|
|
* It may update SE_FL_WAIT_DATA and/or SC_FL_NEED_ROOM, that the callers are
|
2022-04-04 06:58:34 +00:00
|
|
|
* encouraged to watch to take appropriate action.
|
2022-05-27 06:49:24 +00:00
|
|
|
* It should not be called from within the stream itself, sc_update()
|
2022-04-04 06:58:34 +00:00
|
|
|
* is designed for this.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_notify(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-05-27 09:23:05 +00:00
|
|
|
struct stconn *sco = sc_opposite(sc);
|
2022-05-27 08:02:48 +00:00
|
|
|
struct task *task = sc_strm_task(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* process consumer side */
|
|
|
|
if (channel_is_empty(oc)) {
|
2022-05-27 08:02:48 +00:00
|
|
|
struct connection *conn = sc_conn(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (((sc->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW) &&
|
2022-05-27 08:02:48 +00:00
|
|
|
(sc->state == SC_ST_EST) && (!conn || !(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS))))
|
|
|
|
sc_shutw(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* indicate that we may be waiting for data from the output channel or
|
2023-04-03 16:32:50 +00:00
|
|
|
* we're about to close and can't expect more data if SC_FL_SHUTW_NOW is there.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
2023-04-04 08:05:27 +00:00
|
|
|
if (!(sc->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)))
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_WAIT_DATA);
|
2023-04-04 08:05:27 +00:00
|
|
|
else if ((sc->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW)) == SC_FL_SHUTW_NOW)
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_clr(sc, SE_FL_WAIT_DATA);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
if (oc->flags & CF_DONT_READ)
|
2022-05-27 09:23:05 +00:00
|
|
|
sc_wont_read(sco);
|
2022-04-04 06:58:34 +00:00
|
|
|
else
|
2022-05-27 09:23:05 +00:00
|
|
|
sc_will_read(sco);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* Notify the other side when we've injected data into the IC that
|
|
|
|
* needs to be forwarded. We can do fast-forwarding as soon as there
|
|
|
|
* are output data, but we avoid doing this if some of the data are
|
|
|
|
* not yet scheduled for being forwarded, because it is very likely
|
|
|
|
* that it will be done again immediately afterwards once the following
|
2022-05-25 14:36:21 +00:00
|
|
|
* data are parsed (eg: HTTP chunking). We only clear SC_FL_NEED_ROOM
|
|
|
|
* once we've emptied *some* of the output buffer, and not just when
|
|
|
|
* there is available room, because applets are often forced to stop
|
|
|
|
* before the buffer is full. We must not stop based on input data
|
|
|
|
* alone because an HTTP parser might need more data to complete the
|
|
|
|
* parsing.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
|
|
|
if (!channel_is_empty(ic) &&
|
2022-05-27 09:23:05 +00:00
|
|
|
sc_ep_test(sco, SE_FL_WAIT_DATA) &&
|
2023-03-17 14:45:58 +00:00
|
|
|
(!(sc->flags & SC_FL_SND_EXP_MORE) || c_full(ic) || ci_data(ic) == 0 || ic->pipe)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
int new_len, last_len;
|
|
|
|
|
|
|
|
last_len = co_data(ic);
|
|
|
|
if (ic->pipe)
|
|
|
|
last_len += ic->pipe->data;
|
|
|
|
|
2022-05-27 09:23:05 +00:00
|
|
|
sc_chk_snd(sco);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
new_len = co_data(ic);
|
|
|
|
if (ic->pipe)
|
|
|
|
new_len += ic->pipe->data;
|
|
|
|
|
|
|
|
/* check if the consumer has freed some space either in the
|
|
|
|
* buffer or in the pipe.
|
|
|
|
*/
|
|
|
|
if (new_len < last_len)
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_have_room(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!(ic->flags & CF_DONT_READ))
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_will_read(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_chk_rcv(sc);
|
2022-05-27 09:23:05 +00:00
|
|
|
sc_chk_rcv(sco);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* wake the task up only when needed */
|
2022-12-12 07:28:55 +00:00
|
|
|
if (/* changes on the production side that must be handled:
|
2023-01-26 15:18:09 +00:00
|
|
|
* - An error on receipt: SE_FL_ERROR
|
2022-12-12 07:28:55 +00:00
|
|
|
* - A read event: shutdown for reads (CF_READ_EVENT + SHUTR)
|
2023-03-22 13:53:11 +00:00
|
|
|
* end of input (CF_READ_EVENT + SC_FL_EOI)
|
2022-12-12 07:28:55 +00:00
|
|
|
* data received and no fast-forwarding (CF_READ_EVENT + !to_forward)
|
|
|
|
* read event while consumer side is not established (CF_READ_EVENT + sco->state != SC_ST_EST)
|
|
|
|
*/
|
2023-04-04 08:05:27 +00:00
|
|
|
((ic->flags & CF_READ_EVENT) && ((sc->flags & SC_FL_EOI) || (sc->flags & SC_FL_SHUTR) || !ic->to_forward || sco->state != SC_ST_EST)) ||
|
2023-01-26 15:18:09 +00:00
|
|
|
sc_ep_test(sc, SE_FL_ERROR) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* changes on the consumption side */
|
2023-01-26 15:18:09 +00:00
|
|
|
sc_ep_test(sc, SE_FL_ERR_PENDING) ||
|
2022-12-20 17:10:04 +00:00
|
|
|
((oc->flags & CF_WRITE_EVENT) &&
|
|
|
|
((sc->state < SC_ST_EST) ||
|
2023-04-04 08:05:27 +00:00
|
|
|
(sc->flags & SC_FL_SHUTW) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
(((oc->flags & CF_WAKE_WRITE) ||
|
2023-04-03 16:32:50 +00:00
|
|
|
(!(oc->flags & CF_AUTO_CLOSE) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
!(sc->flags & (SC_FL_SHUTW_NOW|SC_FL_SHUTW)))) &&
|
2023-04-03 16:32:50 +00:00
|
|
|
(sco->state != SC_ST_EST ||
|
|
|
|
(channel_is_empty(oc) && !oc->to_forward)))))) {
|
2022-04-04 06:58:34 +00:00
|
|
|
task_wakeup(task, TASK_WOKEN_IO);
|
|
|
|
}
|
|
|
|
|
2023-01-26 15:18:09 +00:00
|
|
|
if (ic->flags & CF_READ_EVENT)
|
2023-03-16 13:40:03 +00:00
|
|
|
sc->flags &= ~SC_FL_RCV_ONCE;
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function propagates a null read received on a socket-based connection.
|
2022-05-17 17:44:42 +00:00
|
|
|
* It updates the stream connector. If the stream connector has SC_FL_NOHALF,
|
2022-04-04 06:58:34 +00:00
|
|
|
* the close is also forwarded to the write side as an abort.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static void sc_conn_read0(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_conn(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTR)
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags |= SC_FL_SHUTR;
|
2023-04-03 16:32:50 +00:00
|
|
|
ic->flags |= CF_READ_EVENT;
|
2023-02-16 10:09:31 +00:00
|
|
|
sc_ep_report_read_activity(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTW)
|
2022-04-04 06:58:34 +00:00
|
|
|
goto do_close;
|
|
|
|
|
BUG/MEDIUM: stconn: Schedule a shutw on shutr if data must be sent first
The commit 7f59d68fe ("BUG/MEDIIM: stconn: Flush output data before
forwarding close to write side") introduced a regression. When the read side
is closed, the close is not forwarded to the write side if there are some
pending outgoind data. The idea is to foward data first and the close the
write side. However, when fast-forwarding is enabled and last data block is
received with the read0, the close is never forwarded.
We cannot revert the commit above because it really fix an issue. However,
we can schedule the shutdown for write by setting CF_SHUTW_NOW flag on the
write side. Indeed, it is the purpose of this flag.
To not replicate ugly and hardly maintainable code block at different places
in stconn.c, an helper function is used. Thus, sc_cond_forward_shutw() must
be called to know if the close can be fowarded or not. It returns 1 if it is
possible. In this case, the caller is responsible to forward the close to
the write side. Otherwise, if the close cannot be forwarded, 0 is
returned. It happens when it should not be performed at all. Or when it
should only be delayed, waiting for the input channel to be flushed. In this
last case, the CF_SHUTW_NOW flag is set in the output channel.
This patch should fix the issue #2033. It must be backported with the commit
above, thus at least as far as 2.2.
2023-02-08 15:18:48 +00:00
|
|
|
if (sc_cond_forward_shutw(sc)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* we want to immediately forward this close to the write side */
|
|
|
|
/* force flag on ssl to keep stream in cache */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_shutw(sc, CO_SHW_SILENT);
|
2022-04-04 06:58:34 +00:00
|
|
|
goto do_close;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* otherwise that's just a normal read shutdown */
|
|
|
|
return;
|
|
|
|
|
|
|
|
do_close:
|
2022-05-27 07:00:19 +00:00
|
|
|
/* OK we completely close the socket here just as if we went through sc_shut[rw]() */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_shut(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
sc->flags &= ~SC_FL_SHUTW_NOW;
|
|
|
|
sc->flags |= SC_FL_SHUTW;
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc->state = SC_ST_DIS;
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is the callback which is called by the connection layer to receive data
|
|
|
|
* into the buffer from the connection. It iterates over the mux layer's
|
|
|
|
* rcv_buf function.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static int sc_conn_recv(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct connection *conn = __sc_conn(sc);
|
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
int ret, max, cur_read = 0;
|
|
|
|
int read_poll = MAX_READ_POLL_LOOPS;
|
|
|
|
int flags = 0;
|
|
|
|
|
|
|
|
/* If not established yet, do nothing. */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->state != SC_ST_EST)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-05-18 16:06:53 +00:00
|
|
|
/* If another call to sc_conn_recv() failed, and we subscribed to
|
2022-04-04 06:58:34 +00:00
|
|
|
* recv events already, give up now.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->wait_event.events & SUB_RETRY_RECV)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* maybe we were called immediately after an asynchronous shutr */
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTR)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* we must wait because the mux is not installed yet */
|
|
|
|
if (!conn->mux)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* stop immediately on errors. Note that we DON'T want to stop on
|
|
|
|
* POLL_ERR, as the poller might report a write error while there
|
|
|
|
* are still data available in the recv buffer. This typically
|
|
|
|
* happens when we send too large a request to a backend server
|
|
|
|
* which rejects it before reading it all.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_ep_test(sc, SE_FL_RCV_MORE)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
if (!conn_xprt_ready(conn))
|
|
|
|
return 0;
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_ERROR))
|
2022-04-04 06:58:34 +00:00
|
|
|
goto end_recv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* prepare to detect if the mux needs more room */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_clr(sc, SE_FL_WANT_ROOM);
|
2023-02-10 16:37:11 +00:00
|
|
|
BUG_ON(sc_waiting_room(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) && !co_data(ic) &&
|
|
|
|
global.tune.idle_timer &&
|
|
|
|
(unsigned short)(now_ms - ic->last_read) >= global.tune.idle_timer) {
|
|
|
|
/* The buffer was empty and nothing was transferred for more
|
|
|
|
* than one second. This was caused by a pause and not by
|
|
|
|
* congestion. Reset any streaming mode to reduce latency.
|
|
|
|
*/
|
|
|
|
ic->xfer_small = 0;
|
|
|
|
ic->xfer_large = 0;
|
|
|
|
ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* First, let's see if we may splice data across the channel without
|
|
|
|
* using a buffer.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_MAY_SPLICE) &&
|
2022-04-04 06:58:34 +00:00
|
|
|
(ic->pipe || ic->to_forward >= MIN_SPLICE_FORWARD) &&
|
|
|
|
ic->flags & CF_KERN_SPLICING) {
|
|
|
|
if (c_data(ic)) {
|
|
|
|
/* We're embarrassed, there are already data pending in
|
|
|
|
* the buffer and we don't want to have them at two
|
|
|
|
* locations at a time. Let's indicate we need some
|
|
|
|
* place and ask the consumer to hurry.
|
|
|
|
*/
|
|
|
|
flags |= CO_RFL_BUF_FLUSH;
|
|
|
|
goto abort_splice;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(ic->pipe == NULL)) {
|
|
|
|
if (pipes_used >= global.maxpipes || !(ic->pipe = get_pipe())) {
|
|
|
|
ic->flags &= ~CF_KERN_SPLICING;
|
|
|
|
goto abort_splice;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
ret = conn->mux->rcv_pipe(sc, ic->pipe, ic->to_forward);
|
2022-04-04 06:58:34 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
/* splice not supported on this end, let's disable it */
|
|
|
|
ic->flags &= ~CF_KERN_SPLICING;
|
|
|
|
goto abort_splice;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ret > 0) {
|
|
|
|
if (ic->to_forward != CHN_INFINITE_FORWARD)
|
|
|
|
ic->to_forward -= ret;
|
|
|
|
ic->total += ret;
|
|
|
|
cur_read += ret;
|
2022-12-12 07:28:55 +00:00
|
|
|
ic->flags |= CF_READ_EVENT;
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOS | SE_FL_ERROR))
|
2022-04-04 06:58:34 +00:00
|
|
|
goto end_recv;
|
|
|
|
|
|
|
|
if (conn->flags & CO_FL_WAIT_ROOM) {
|
|
|
|
/* the pipe is full or we have read enough data that it
|
|
|
|
* could soon be full. Let's stop before needing to poll.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_need_room(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
goto done_recv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* splice not possible (anymore), let's go on on standard copy */
|
|
|
|
}
|
|
|
|
|
|
|
|
abort_splice:
|
|
|
|
if (ic->pipe && unlikely(!ic->pipe->data)) {
|
|
|
|
put_pipe(ic->pipe);
|
|
|
|
ic->pipe = NULL;
|
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (ic->pipe && ic->to_forward && !(flags & CO_RFL_BUF_FLUSH) && sc_ep_test(sc, SE_FL_MAY_SPLICE)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* don't break splicing by reading, but still call rcv_buf()
|
|
|
|
* to pass the flag.
|
|
|
|
*/
|
|
|
|
goto done_recv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* now we'll need a input buffer for the stream */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_alloc_ibuf(sc, &(__sc_strm(sc)->buffer_wait)))
|
2022-04-04 06:58:34 +00:00
|
|
|
goto end_recv;
|
|
|
|
|
|
|
|
/* For an HTX stream, if the buffer is stuck (no output data with some
|
|
|
|
* input data) and if the HTX message is fragmented or if its free space
|
|
|
|
* wraps, we force an HTX deframentation. It is a way to have a
|
|
|
|
* contiguous free space nad to let the mux to copy as much data as
|
|
|
|
* possible.
|
|
|
|
*
|
|
|
|
* NOTE: A possible optim may be to let the mux decides if defrag is
|
|
|
|
* required or not, depending on amount of data to be xferred.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (IS_HTX_STRM(__sc_strm(sc)) && !co_data(ic)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
struct htx *htx = htxbuf(&ic->buf);
|
|
|
|
|
|
|
|
if (htx_is_not_empty(htx) && ((htx->flags & HTX_FL_FRAGMENTED) || htx_space_wraps(htx)))
|
|
|
|
htx_defrag(htx, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Instruct the mux it must subscribed for read events */
|
2022-05-27 08:02:48 +00:00
|
|
|
flags |= ((!conn_is_back(conn) && (__sc_strm(sc)->be->options & PR_O_ABRT_CLOSE)) ? CO_RFL_KEEP_RECV : 0);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* Important note : if we're called with POLL_IN|POLL_HUP, it means the read polling
|
|
|
|
* was enabled, which implies that the recv buffer was not full. So we have a guarantee
|
|
|
|
* that if such an event is not handled above in splice, it will be handled here by
|
|
|
|
* recv().
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
while (sc_ep_test(sc, SE_FL_RCV_MORE) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
(!(conn->flags & CO_FL_HANDSHAKE) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
(!sc_ep_test(sc, SE_FL_ERROR | SE_FL_EOS)) && !(sc->flags & SC_FL_SHUTR))) {
|
2022-04-04 06:58:34 +00:00
|
|
|
int cur_flags = flags;
|
|
|
|
|
|
|
|
/* Compute transient CO_RFL_* flags */
|
|
|
|
if (co_data(ic)) {
|
|
|
|
cur_flags |= (CO_RFL_BUF_WET | CO_RFL_BUF_NOT_STUCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* <max> may be null. This is the mux responsibility to set
|
2022-05-27 09:23:05 +00:00
|
|
|
* SE_FL_RCV_MORE on the SC if more space is needed.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
|
|
|
max = channel_recv_max(ic);
|
2022-05-27 08:02:48 +00:00
|
|
|
ret = conn->mux->rcv_buf(sc, &ic->buf, max, cur_flags);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_WANT_ROOM)) {
|
2022-05-17 15:04:55 +00:00
|
|
|
/* SE_FL_WANT_ROOM must not be reported if the channel's
|
2022-04-04 06:58:34 +00:00
|
|
|
* buffer is empty.
|
|
|
|
*/
|
|
|
|
BUG_ON(c_empty(ic));
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_need_room(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
/* Add READ_PARTIAL because some data are pending but
|
|
|
|
* cannot be xferred to the channel
|
|
|
|
*/
|
2022-12-12 07:28:55 +00:00
|
|
|
ic->flags |= CF_READ_EVENT;
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ret <= 0) {
|
|
|
|
/* if we refrained from reading because we asked for a
|
|
|
|
* flush to satisfy rcv_pipe(), we must not subscribe
|
|
|
|
* and instead report that there's not enough room
|
|
|
|
* here to proceed.
|
|
|
|
*/
|
|
|
|
if (flags & CO_RFL_BUF_FLUSH)
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_need_room(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
cur_read += ret;
|
|
|
|
|
|
|
|
/* if we're allowed to directly forward data, we must update ->o */
|
2023-04-03 16:32:50 +00:00
|
|
|
if (ic->to_forward && !(chn_cons(ic)->flags & (SC_FL_SHUTW|SC_FL_SHUTW_NOW))) {
|
2022-04-04 06:58:34 +00:00
|
|
|
unsigned long fwd = ret;
|
|
|
|
if (ic->to_forward != CHN_INFINITE_FORWARD) {
|
|
|
|
if (fwd > ic->to_forward)
|
|
|
|
fwd = ic->to_forward;
|
|
|
|
ic->to_forward -= fwd;
|
|
|
|
}
|
|
|
|
c_adv(ic, fwd);
|
|
|
|
}
|
|
|
|
|
2022-12-12 07:28:55 +00:00
|
|
|
ic->flags |= CF_READ_EVENT;
|
2022-04-04 06:58:34 +00:00
|
|
|
ic->total += ret;
|
|
|
|
|
|
|
|
/* End-of-input reached, we can leave. In this case, it is
|
2022-05-27 09:23:05 +00:00
|
|
|
* important to break the loop to not block the SC because of
|
2022-04-04 06:58:34 +00:00
|
|
|
* the channel's policies.This way, we are still able to receive
|
|
|
|
* shutdowns.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOI))
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
|
2023-03-16 13:40:03 +00:00
|
|
|
if ((sc->flags & SC_FL_RCV_ONCE) || --read_poll <= 0) {
|
|
|
|
/* we don't expect to read more data */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_wont_read(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if too many bytes were missing from last read, it means that
|
|
|
|
* it's pointless trying to read again because the system does
|
|
|
|
* not have them in buffers.
|
|
|
|
*/
|
|
|
|
if (ret < max) {
|
|
|
|
/* if a streamer has read few data, it may be because we
|
|
|
|
* have exhausted system buffers. It's not worth trying
|
|
|
|
* again.
|
|
|
|
*/
|
|
|
|
if (ic->flags & CF_STREAMER) {
|
|
|
|
/* we're stopped by the channel's policy */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_wont_read(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if we read a large block smaller than what we requested,
|
|
|
|
* it's almost certain we'll never get anything more.
|
|
|
|
*/
|
|
|
|
if (ret >= global.tune.recv_enough) {
|
|
|
|
/* we're stopped by the channel's policy */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_wont_read(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* if we are waiting for more space, don't try to read more data
|
|
|
|
* right now.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM))
|
2022-04-04 06:58:34 +00:00
|
|
|
break;
|
|
|
|
} /* while !flags */
|
|
|
|
|
|
|
|
done_recv:
|
|
|
|
if (cur_read) {
|
|
|
|
if ((ic->flags & (CF_STREAMER | CF_STREAMER_FAST)) &&
|
|
|
|
(cur_read <= ic->buf.size / 2)) {
|
|
|
|
ic->xfer_large = 0;
|
|
|
|
ic->xfer_small++;
|
|
|
|
if (ic->xfer_small >= 3) {
|
|
|
|
/* we have read less than half of the buffer in
|
|
|
|
* one pass, and this happened at least 3 times.
|
|
|
|
* This is definitely not a streamer.
|
|
|
|
*/
|
|
|
|
ic->flags &= ~(CF_STREAMER | CF_STREAMER_FAST);
|
|
|
|
}
|
|
|
|
else if (ic->xfer_small >= 2) {
|
|
|
|
/* if the buffer has been at least half full twice,
|
|
|
|
* we receive faster than we send, so at least it
|
|
|
|
* is not a "fast streamer".
|
|
|
|
*/
|
|
|
|
ic->flags &= ~CF_STREAMER_FAST;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (!(ic->flags & CF_STREAMER_FAST) &&
|
|
|
|
(cur_read >= ic->buf.size - global.tune.maxrewrite)) {
|
|
|
|
/* we read a full buffer at once */
|
|
|
|
ic->xfer_small = 0;
|
|
|
|
ic->xfer_large++;
|
|
|
|
if (ic->xfer_large >= 3) {
|
|
|
|
/* we call this buffer a fast streamer if it manages
|
|
|
|
* to be filled in one call 3 consecutive times.
|
|
|
|
*/
|
|
|
|
ic->flags |= (CF_STREAMER | CF_STREAMER_FAST);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
ic->xfer_small = 0;
|
|
|
|
ic->xfer_large = 0;
|
|
|
|
}
|
|
|
|
ic->last_read = now_ms;
|
2023-02-16 10:09:31 +00:00
|
|
|
sc_ep_report_read_activity(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
end_recv:
|
|
|
|
ret = (cur_read != 0);
|
|
|
|
|
|
|
|
/* Report EOI on the channel if it was reached from the mux point of
|
|
|
|
* view. */
|
2023-03-22 13:53:11 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
|
2023-02-16 10:09:31 +00:00
|
|
|
sc_ep_report_read_activity(sc);
|
2023-03-22 13:53:11 +00:00
|
|
|
sc->flags |= SC_FL_EOI;
|
|
|
|
ic->flags |= CF_READ_EVENT;
|
2022-04-04 06:58:34 +00:00
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
|
2023-03-21 10:25:21 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOS)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* we received a shutdown */
|
|
|
|
if (ic->flags & CF_AUTO_CLOSE)
|
|
|
|
channel_shutw_now(ic);
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_read0(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
ret = 1;
|
|
|
|
}
|
2023-03-21 10:25:21 +00:00
|
|
|
|
|
|
|
if (sc_ep_test(sc, SE_FL_ERROR))
|
|
|
|
ret = 1;
|
2022-05-27 08:02:48 +00:00
|
|
|
else if (!(sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
!(sc->flags & SC_FL_SHUTR)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* Subscribe to receive events if we're blocking on I/O */
|
2022-05-27 08:02:48 +00:00
|
|
|
conn->mux->subscribe(sc, SUB_RETRY_RECV, &sc->wait_event);
|
|
|
|
se_have_no_more_data(sc->sedesc);
|
2023-03-21 10:25:21 +00:00
|
|
|
}
|
|
|
|
else {
|
2022-05-27 08:02:48 +00:00
|
|
|
se_have_more_data(sc->sedesc);
|
2022-04-04 06:58:34 +00:00
|
|
|
ret = 1;
|
|
|
|
}
|
2023-03-23 16:30:29 +00:00
|
|
|
|
|
|
|
BUG_ON_HOT((sc_ep_get(sc) & (SE_FL_EOI|SE_FL_EOS|SE_FL_ERROR)) == SE_FL_EOS);
|
2022-04-04 06:58:34 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* This tries to perform a synchronous receive on the stream connector to
|
2022-04-04 06:58:34 +00:00
|
|
|
* try to collect last arrived data. In practice it's only implemented on
|
2022-05-17 17:07:51 +00:00
|
|
|
* stconns. Returns 0 if nothing was done, non-zero if new data or a
|
2022-04-04 06:58:34 +00:00
|
|
|
* shutdown were collected. This may result on some delayed receive calls
|
|
|
|
* to be programmed and performed later, though it doesn't provide any
|
|
|
|
* such guarantee.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
int sc_conn_sync_recv(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_state_in(sc->state, SC_SB_RDY|SC_SB_EST))
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_mux_ops(sc))
|
2022-05-17 17:07:51 +00:00
|
|
|
return 0; // only stconns are supported
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->wait_event.events & SUB_RETRY_RECV)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0; // already subscribed
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_is_recv_allowed(sc))
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0; // already failed
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
return sc_conn_recv(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function is called to send buffer data to a stream socket.
|
|
|
|
* It calls the mux layer's snd_buf function. It relies on the
|
|
|
|
* caller to commit polling changes. The caller should check conn->flags
|
|
|
|
* for errors.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static int sc_conn_send(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct connection *conn = __sc_conn(sc);
|
2023-03-22 13:53:11 +00:00
|
|
|
struct stconn *sco = sc_opposite(sc);
|
2022-05-27 08:02:48 +00:00
|
|
|
struct stream *s = __sc_strm(sc);
|
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
int ret;
|
|
|
|
int did_send = 0;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING) || sc_is_conn_error(sc)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* We're probably there because the tasklet was woken up,
|
|
|
|
* but process_stream() ran before, detected there were an
|
2022-05-27 09:23:05 +00:00
|
|
|
* error and put the SC back to SC_ST_TAR. There's still
|
2022-04-04 06:58:34 +00:00
|
|
|
* CO_FL_ERROR on the connection but we don't want to add
|
2022-05-17 15:04:55 +00:00
|
|
|
* SE_FL_ERROR back, so give up
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->state < SC_ST_CON)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
2022-10-17 08:21:19 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOS))
|
|
|
|
sc_ep_set(sc, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We're already waiting to be able to send, give up */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->wait_event.events & SUB_RETRY_SEND)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* we might have been called just after an asynchronous shutw */
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTW)
|
2022-04-04 06:58:34 +00:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* we must wait because the mux is not installed yet */
|
|
|
|
if (!conn->mux)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (oc->pipe && conn->xprt->snd_pipe && conn->mux->snd_pipe) {
|
2022-05-27 08:02:48 +00:00
|
|
|
ret = conn->mux->snd_pipe(sc, oc->pipe);
|
2022-04-04 06:58:34 +00:00
|
|
|
if (ret > 0)
|
|
|
|
did_send = 1;
|
|
|
|
|
|
|
|
if (!oc->pipe->data) {
|
|
|
|
put_pipe(oc->pipe);
|
|
|
|
oc->pipe = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (oc->pipe)
|
|
|
|
goto end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* At this point, the pipe is empty, but we may still have data pending
|
|
|
|
* in the normal buffer.
|
|
|
|
*/
|
|
|
|
if (co_data(oc)) {
|
|
|
|
/* when we're here, we already know that there is no spliced
|
|
|
|
* data left, and that there are sendable buffered data.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* check if we want to inform the kernel that we're interested in
|
|
|
|
* sending more data after this call. We want this if :
|
|
|
|
* - we're about to close after this last send and want to merge
|
|
|
|
* the ongoing FIN with the last segment.
|
|
|
|
* - we know we can't send everything at once and must get back
|
|
|
|
* here because of unaligned data
|
|
|
|
* - there is still a finite amount of data to forward
|
|
|
|
* The test is arranged so that the most common case does only 2
|
|
|
|
* tests.
|
|
|
|
*/
|
|
|
|
unsigned int send_flag = 0;
|
|
|
|
|
2023-03-17 14:38:18 +00:00
|
|
|
if ((!(sc->flags & (SC_FL_SND_ASAP|SC_FL_SND_NEVERWAIT)) &&
|
2022-04-04 06:58:34 +00:00
|
|
|
((oc->to_forward && oc->to_forward != CHN_INFINITE_FORWARD) ||
|
2023-03-17 14:45:58 +00:00
|
|
|
(sc->flags & SC_FL_SND_EXP_MORE) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
(IS_HTX_STRM(s) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
(!(sco->flags & (SC_FL_EOI|SC_FL_SHUTR)) && htx_expect_more(htxbuf(&oc->buf)))))) ||
|
2022-04-04 06:58:34 +00:00
|
|
|
((oc->flags & CF_ISRESP) &&
|
2023-04-03 16:32:50 +00:00
|
|
|
(oc->flags & CF_AUTO_CLOSE) &&
|
2023-04-04 08:05:27 +00:00
|
|
|
(sc->flags & SC_FL_SHUTW_NOW)))
|
2022-04-04 06:58:34 +00:00
|
|
|
send_flag |= CO_SFL_MSG_MORE;
|
|
|
|
|
|
|
|
if (oc->flags & CF_STREAMER)
|
|
|
|
send_flag |= CO_SFL_STREAMER;
|
|
|
|
|
|
|
|
if (s->txn && s->txn->flags & TX_L7_RETRY && !b_data(&s->txn->l7_buffer)) {
|
|
|
|
/* If we want to be able to do L7 retries, copy
|
|
|
|
* the data we're about to send, so that we are able
|
|
|
|
* to resend them if needed
|
|
|
|
*/
|
|
|
|
/* Try to allocate a buffer if we had none.
|
|
|
|
* If it fails, the next test will just
|
|
|
|
* disable the l7 retries by setting
|
|
|
|
* l7_conn_retries to 0.
|
|
|
|
*/
|
|
|
|
if (s->txn->req.msg_state != HTTP_MSG_DONE)
|
|
|
|
s->txn->flags &= ~TX_L7_RETRY;
|
|
|
|
else {
|
|
|
|
if (b_alloc(&s->txn->l7_buffer) == NULL)
|
|
|
|
s->txn->flags &= ~TX_L7_RETRY;
|
|
|
|
else {
|
|
|
|
memcpy(b_orig(&s->txn->l7_buffer),
|
|
|
|
b_orig(&oc->buf),
|
|
|
|
b_size(&oc->buf));
|
|
|
|
s->txn->l7_buffer.head = co_data(oc);
|
|
|
|
b_add(&s->txn->l7_buffer, co_data(oc));
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
ret = conn->mux->snd_buf(sc, &oc->buf, co_data(oc), send_flag);
|
2022-04-04 06:58:34 +00:00
|
|
|
if (ret > 0) {
|
|
|
|
did_send = 1;
|
|
|
|
c_rew(oc, ret);
|
|
|
|
c_realign_if_empty(oc);
|
|
|
|
|
|
|
|
if (!co_data(oc)) {
|
|
|
|
/* Always clear both flags once everything has been sent, they're one-shot */
|
2023-03-17 14:45:58 +00:00
|
|
|
sc->flags &= ~(SC_FL_SND_ASAP|SC_FL_SND_EXP_MORE);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
/* if some data remain in the buffer, it's only because the
|
|
|
|
* system buffers are full, we will try next time.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
end:
|
|
|
|
if (did_send) {
|
2022-12-20 17:10:04 +00:00
|
|
|
oc->flags |= CF_WRITE_EVENT | CF_WROTE_DATA;
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->state == SC_ST_CON)
|
|
|
|
sc->state = SC_ST_RDY;
|
|
|
|
sc_have_room(sc_opposite(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_ERROR | SE_FL_ERR_PENDING)) {
|
2023-01-26 15:18:09 +00:00
|
|
|
oc->flags |= CF_WRITE_EVENT;
|
2022-10-17 08:21:19 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOS))
|
2023-01-26 15:18:09 +00:00
|
|
|
sc_ep_set(sc, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-02-27 15:38:12 +00:00
|
|
|
if (channel_is_empty(oc))
|
|
|
|
sc_ep_report_send_activity(sc);
|
|
|
|
else {
|
|
|
|
/* We couldn't send all of our data, let the mux know we'd like to send more */
|
2022-05-27 08:02:48 +00:00
|
|
|
conn->mux->subscribe(sc, SUB_RETRY_SEND, &sc->wait_event);
|
2023-02-27 15:38:12 +00:00
|
|
|
sc_ep_report_blocked_send(sc);
|
|
|
|
}
|
|
|
|
|
2022-04-04 06:58:34 +00:00
|
|
|
return did_send;
|
|
|
|
}
|
|
|
|
|
2022-12-20 17:10:04 +00:00
|
|
|
/* perform a synchronous send() for the stream connector. The CF_WRITE_EVENT
|
|
|
|
* flag are cleared prior to the attempt, and will possibly be updated in case
|
|
|
|
* of success.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
void sc_conn_sync_send(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-12-20 17:10:04 +00:00
|
|
|
oc->flags &= ~CF_WRITE_EVENT;
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc->flags & SC_FL_SHUTW)
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
|
|
|
|
if (channel_is_empty(oc))
|
|
|
|
return;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_state_in(sc->state, SC_SB_CON|SC_SB_RDY|SC_SB_EST))
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_mux_ops(sc))
|
2022-04-04 06:58:34 +00:00
|
|
|
return;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_send(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Called by I/O handlers after completion.. It propagates
|
2022-05-17 17:07:51 +00:00
|
|
|
* connection flags to the stream connector, updates the stream (which may or
|
2022-04-04 06:58:34 +00:00
|
|
|
* may not take this opportunity to try to forward data), then update the
|
2022-05-17 17:07:51 +00:00
|
|
|
* connection's polling based on the channels and stream connector's final
|
2022-04-04 06:58:34 +00:00
|
|
|
* states. The function always returns 0.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static int sc_conn_process(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct connection *conn = __sc_conn(sc);
|
|
|
|
struct channel *ic = sc_ic(sc);
|
|
|
|
struct channel *oc = sc_oc(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
BUG_ON(!conn);
|
|
|
|
|
|
|
|
/* If we have data to send, try it now */
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!channel_is_empty(oc) && !(sc->wait_event.events & SUB_RETRY_SEND))
|
|
|
|
sc_conn_send(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* First step, report to the stream connector what was detected at the
|
2022-04-04 06:58:34 +00:00
|
|
|
* connection layer : errors and connection establishment.
|
2022-05-17 15:04:55 +00:00
|
|
|
* Only add SE_FL_ERROR if we're connected, or we're attempting to
|
2022-04-04 06:58:34 +00:00
|
|
|
* connect, we may get there because we got woken up, but only run
|
|
|
|
* after process_stream() noticed there were an error, and decided
|
|
|
|
* to retry to connect, the connection may still have CO_FL_ERROR,
|
2022-05-17 15:04:55 +00:00
|
|
|
* and we don't want to add SE_FL_ERROR back
|
2022-04-04 06:58:34 +00:00
|
|
|
*
|
2022-05-18 16:06:53 +00:00
|
|
|
* Note: This test is only required because sc_conn_process is also the SI
|
|
|
|
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
|
2022-04-04 06:58:34 +00:00
|
|
|
* care of it.
|
|
|
|
*/
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->state >= SC_ST_CON) {
|
|
|
|
if (sc_is_conn_error(sc))
|
|
|
|
sc_ep_set(sc, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If we had early data, and the handshake ended, then
|
|
|
|
* we can remove the flag, and attempt to wake the task up,
|
|
|
|
* in the event there's an analyser waiting for the end of
|
|
|
|
* the handshake.
|
|
|
|
*/
|
|
|
|
if (!(conn->flags & (CO_FL_WAIT_XPRT | CO_FL_EARLY_SSL_HS)) &&
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_test(sc, SE_FL_WAIT_FOR_HS)) {
|
|
|
|
sc_ep_clr(sc, SE_FL_WAIT_FOR_HS);
|
|
|
|
task_wakeup(sc_strm_task(sc), TASK_WOKEN_MSG);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_state_in(sc->state, SC_SB_EST|SC_SB_DIS|SC_SB_CLO) &&
|
2022-04-04 06:58:34 +00:00
|
|
|
(conn->flags & CO_FL_WAIT_XPRT) == 0) {
|
2022-07-20 11:24:04 +00:00
|
|
|
if (sc->flags & SC_FL_ISBACK)
|
|
|
|
__sc_strm(sc)->conn_exp = TICK_ETERNITY;
|
2022-12-12 07:11:36 +00:00
|
|
|
oc->flags |= CF_WRITE_EVENT;
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc->state == SC_ST_CON)
|
|
|
|
sc->state = SC_ST_RDY;
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Report EOS on the channel if it was reached from the mux point of
|
|
|
|
* view.
|
|
|
|
*
|
2022-05-18 16:06:53 +00:00
|
|
|
* Note: This test is only required because sc_conn_process is also the SI
|
|
|
|
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
|
2022-04-04 06:58:34 +00:00
|
|
|
* care of it.
|
|
|
|
*/
|
2023-04-04 08:05:27 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOS) && !(sc->flags & SC_FL_SHUTR)) {
|
2022-04-04 06:58:34 +00:00
|
|
|
/* we received a shutdown */
|
|
|
|
if (ic->flags & CF_AUTO_CLOSE)
|
|
|
|
channel_shutw_now(ic);
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_read0(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Report EOI on the channel if it was reached from the mux point of
|
|
|
|
* view.
|
|
|
|
*
|
2022-05-18 16:06:53 +00:00
|
|
|
* Note: This test is only required because sc_conn_process is also the SI
|
|
|
|
* wake callback. Otherwise sc_conn_recv()/sc_conn_send() already take
|
2022-04-04 06:58:34 +00:00
|
|
|
* care of it.
|
|
|
|
*/
|
2023-03-22 13:53:11 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
|
|
|
|
sc->flags |= SC_FL_EOI;
|
|
|
|
ic->flags |= CF_READ_EVENT;
|
|
|
|
}
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* Second step : update the stream connector and channels, try to forward any
|
2022-04-04 06:58:34 +00:00
|
|
|
* pending data, then possibly wake the stream up based on the new
|
2022-05-17 17:07:51 +00:00
|
|
|
* stream connector status.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_notify(sc);
|
|
|
|
stream_release_buffers(__sc_strm(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* This is the ->process() function for any stream connector's wait_event task.
|
|
|
|
* It's assigned during the stream connector's initialization, for any type of
|
|
|
|
* stream connector. Thus it is always safe to perform a tasklet_wakeup() on a
|
2022-05-27 09:23:05 +00:00
|
|
|
* stream connector, as the presence of the SC is checked there.
|
2022-04-04 06:58:34 +00:00
|
|
|
*/
|
2022-05-18 16:06:53 +00:00
|
|
|
struct task *sc_conn_io_cb(struct task *t, void *ctx, unsigned int state)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct stconn *sc = ctx;
|
2022-04-04 06:58:34 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!sc_conn(sc))
|
2022-04-04 06:58:34 +00:00
|
|
|
return t;
|
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
if (!(sc->wait_event.events & SUB_RETRY_SEND) && !channel_is_empty(sc_oc(sc)))
|
|
|
|
ret = sc_conn_send(sc);
|
|
|
|
if (!(sc->wait_event.events & SUB_RETRY_RECV))
|
|
|
|
ret |= sc_conn_recv(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
if (ret != 0)
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_conn_process(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
stream_release_buffers(__sc_strm(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
return t;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Callback to be used by applet handlers upon completion. It updates the stream
|
|
|
|
* (which may or may not take this opportunity to try to forward data), then
|
2022-05-17 17:07:51 +00:00
|
|
|
* may re-enable the applet's based on the channels and stream connector's final
|
2022-04-04 06:58:34 +00:00
|
|
|
* states.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
static int sc_applet_process(struct stconn *sc)
|
2022-04-04 06:58:34 +00:00
|
|
|
{
|
2022-05-27 08:02:48 +00:00
|
|
|
struct channel *ic = sc_ic(sc);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 08:02:48 +00:00
|
|
|
BUG_ON(!sc_appctx(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2023-03-21 10:49:21 +00:00
|
|
|
/* Report EOI on the channel if it was reached from the applet point of
|
|
|
|
* view. */
|
2023-03-22 13:53:11 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOI) && !(sc->flags & SC_FL_EOI)) {
|
2023-03-21 10:49:21 +00:00
|
|
|
sc_ep_report_read_activity(sc);
|
2023-03-22 13:53:11 +00:00
|
|
|
sc->flags |= SC_FL_EOI;
|
|
|
|
ic->flags |= CF_READ_EVENT;
|
2023-03-21 10:49:21 +00:00
|
|
|
}
|
|
|
|
|
2023-03-21 13:19:08 +00:00
|
|
|
if (sc_ep_test(sc, SE_FL_EOS)) {
|
|
|
|
/* we received a shutdown */
|
|
|
|
sc_shutr(sc);
|
|
|
|
}
|
|
|
|
|
2022-04-04 06:58:34 +00:00
|
|
|
/* If the applet wants to write and the channel is closed, it's a
|
|
|
|
* broken pipe and it must be reported.
|
|
|
|
*/
|
2023-04-04 08:05:27 +00:00
|
|
|
if (!sc_ep_test(sc, SE_FL_HAVE_NO_DATA) && (sc->flags & SC_FL_SHUTR))
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_ep_set(sc, SE_FL_ERROR);
|
2022-04-04 06:58:34 +00:00
|
|
|
|
|
|
|
/* automatically mark the applet having data available if it reported
|
|
|
|
* begin blocked by the channel.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if ((sc->flags & (SC_FL_WONT_READ|SC_FL_NEED_BUFF|SC_FL_NEED_ROOM)) ||
|
|
|
|
sc_ep_test(sc, SE_FL_APPLET_NEED_CONN))
|
|
|
|
applet_have_more_data(__sc_appctx(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-17 17:07:51 +00:00
|
|
|
/* update the stream connector, channels, and possibly wake the stream up */
|
2022-05-27 08:02:48 +00:00
|
|
|
sc_notify(sc);
|
|
|
|
stream_release_buffers(__sc_strm(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
|
2022-05-27 06:49:24 +00:00
|
|
|
/* sc_notify may have passed through chk_snd and released some blocking
|
2022-05-25 14:36:21 +00:00
|
|
|
* flags. Process_stream will consider those flags to wake up the
|
2022-04-04 06:58:34 +00:00
|
|
|
* appctx but in the case the task is not in runqueue we may have to
|
|
|
|
* wakeup the appctx immediately.
|
|
|
|
*/
|
2022-05-27 08:02:48 +00:00
|
|
|
if (sc_is_recv_allowed(sc) || sc_is_send_allowed(sc))
|
|
|
|
appctx_wakeup(__sc_appctx(sc));
|
2022-04-04 06:58:34 +00:00
|
|
|
return 0;
|
|
|
|
}
|
2022-06-16 14:24:16 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* Prepares an endpoint upgrade. We don't now at this stage if the upgrade will
|
|
|
|
* succeed or not and if the stconn will be reused by the new endpoint. Thus,
|
|
|
|
* for now, only pretend the stconn is detached.
|
|
|
|
*/
|
|
|
|
void sc_conn_prepare_endp_upgrade(struct stconn *sc)
|
|
|
|
{
|
|
|
|
BUG_ON(!sc_conn(sc) || !sc->app);
|
|
|
|
sc_ep_clr(sc, SE_FL_T_MUX);
|
|
|
|
sc_ep_set(sc, SE_FL_DETACHED);
|
|
|
|
}
|
|
|
|
|
2022-07-29 17:26:53 +00:00
|
|
|
/* Endpoint upgrade failed. Restore the stconn state. */
|
2022-06-16 14:24:16 +00:00
|
|
|
void sc_conn_abort_endp_upgrade(struct stconn *sc)
|
|
|
|
{
|
|
|
|
sc_ep_set(sc, SE_FL_T_MUX);
|
|
|
|
sc_ep_clr(sc, SE_FL_DETACHED);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Commit the endpoint upgrade. If stconn is attached, it means the new endpoint
|
|
|
|
* use it. So we do nothing. Otherwise, the stconn will be destroy with the
|
|
|
|
* overlying stream. So, it means we must commit the detach.
|
|
|
|
*/
|
|
|
|
void sc_conn_commit_endp_upgrade(struct stconn *sc)
|
|
|
|
{
|
|
|
|
if (!sc_ep_test(sc, SE_FL_DETACHED))
|
|
|
|
return;
|
|
|
|
sc_detach_endp(&sc);
|
|
|
|
/* Because it was already set as detached, the sedesc must be preserved */
|
2022-08-11 11:56:42 +00:00
|
|
|
BUG_ON(!sc);
|
2022-06-16 14:24:16 +00:00
|
|
|
BUG_ON(!sc->sedesc);
|
|
|
|
}
|