2012-07-06 12:13:49 +00:00
|
|
|
/*
|
|
|
|
* include/proto/connection.h
|
|
|
|
* This file contains connection function prototypes
|
|
|
|
*
|
|
|
|
* Copyright (C) 2000-2012 Willy Tarreau - w@1wt.eu
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation, version 2.1
|
|
|
|
* exclusively.
|
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _PROTO_CONNECTION_H
|
|
|
|
#define _PROTO_CONNECTION_H
|
|
|
|
|
|
|
|
#include <common/config.h>
|
2017-09-21 17:40:52 +00:00
|
|
|
#include <common/ist.h>
|
2012-10-26 18:10:28 +00:00
|
|
|
#include <common/memory.h>
|
2012-07-06 12:13:49 +00:00
|
|
|
#include <types/connection.h>
|
2012-09-12 20:58:11 +00:00
|
|
|
#include <types/listener.h>
|
2012-11-23 16:32:21 +00:00
|
|
|
#include <proto/fd.h>
|
2012-11-11 23:42:33 +00:00
|
|
|
#include <proto/obj_type.h>
|
2018-12-27 16:20:54 +00:00
|
|
|
#include <proto/session.h>
|
2018-07-17 16:46:31 +00:00
|
|
|
#include <proto/task.h>
|
2012-07-06 12:13:49 +00:00
|
|
|
|
2017-11-24 16:34:44 +00:00
|
|
|
extern struct pool_head *pool_head_connection;
|
|
|
|
extern struct pool_head *pool_head_connstream;
|
2019-07-17 16:37:02 +00:00
|
|
|
extern struct pool_head *pool_head_sockaddr;
|
2019-08-27 16:31:16 +00:00
|
|
|
extern struct pool_head *pool_head_authority;
|
2016-12-22 19:25:26 +00:00
|
|
|
extern struct xprt_ops *registered_xprt[XPRT_ENTRIES];
|
2018-04-10 12:33:41 +00:00
|
|
|
extern struct mux_proto_list mux_proto_list;
|
2012-10-26 18:10:28 +00:00
|
|
|
|
2012-07-06 12:13:49 +00:00
|
|
|
/* I/O callback for fd-based connections. It calls the read/write handlers
|
2016-04-14 09:13:20 +00:00
|
|
|
* provided by the connection's sock_ops.
|
2012-07-06 12:13:49 +00:00
|
|
|
*/
|
2016-04-14 09:13:20 +00:00
|
|
|
void conn_fd_handler(int fd);
|
2019-12-27 09:40:21 +00:00
|
|
|
int conn_fd_check(struct connection *conn);
|
2012-07-06 12:13:49 +00:00
|
|
|
|
2012-08-31 15:43:29 +00:00
|
|
|
/* receive a PROXY protocol header over a connection */
|
|
|
|
int conn_recv_proxy(struct connection *conn, int flag);
|
2014-05-09 03:42:08 +00:00
|
|
|
int make_proxy_line(char *buf, int buf_len, struct server *srv, struct connection *remote);
|
|
|
|
int make_proxy_line_v1(char *buf, int buf_len, struct sockaddr_storage *src, struct sockaddr_storage *dst);
|
|
|
|
int make_proxy_line_v2(char *buf, int buf_len, struct server *srv, struct connection *remote);
|
2012-08-31 15:43:29 +00:00
|
|
|
|
2019-03-21 17:27:17 +00:00
|
|
|
int conn_subscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param);
|
|
|
|
int conn_unsubscribe(struct connection *conn, void *xprt_ctx, int event_type, void *param);
|
2018-09-28 15:57:58 +00:00
|
|
|
|
2016-06-04 14:11:10 +00:00
|
|
|
/* receive a NetScaler Client IP insertion header over a connection */
|
|
|
|
int conn_recv_netscaler_cip(struct connection *conn, int flag);
|
|
|
|
|
2015-03-12 22:56:52 +00:00
|
|
|
/* raw send() directly on the socket */
|
|
|
|
int conn_sock_send(struct connection *conn, const void *buf, int len, int flags);
|
|
|
|
|
2015-03-12 23:40:28 +00:00
|
|
|
/* drains any pending bytes from the socket */
|
|
|
|
int conn_sock_drain(struct connection *conn);
|
|
|
|
|
2019-05-22 11:44:48 +00:00
|
|
|
/* scoks4 proxy handshake */
|
|
|
|
int conn_send_socks4_proxy_request(struct connection *conn);
|
|
|
|
int conn_recv_socks4_proxy_response(struct connection *conn);
|
|
|
|
|
BUG/MEDIUM: servers: Fix a race condition with idle connections.
When we're purging idle connections, there's a race condition, when we're
removing the connection from the idle list, to add it to the list of
connections to free, if the thread owning the connection tries to free it
at the same time.
To fix this, simply add a per-thread lock, that has to be hold before
removing the connection from the idle list, and when, in conn_free(), we're
about to remove the connection from every list. That way, we know for sure
the connection will stay valid while we remove it from the idle list, to add
it to the list of connections to free.
This should happen rarely enough that it shouldn't have any impact on
performances.
This has not been reported yet, but could provoke random segfaults.
This should be backported to 2.0.
2019-07-11 13:49:00 +00:00
|
|
|
__decl_hathreads(extern HA_SPINLOCK_T toremove_lock[MAX_THREADS]);
|
|
|
|
|
2013-12-15 09:23:20 +00:00
|
|
|
/* returns true is the transport layer is ready */
|
2014-01-23 13:21:42 +00:00
|
|
|
static inline int conn_xprt_ready(const struct connection *conn)
|
2013-12-15 09:23:20 +00:00
|
|
|
{
|
2014-01-23 13:21:42 +00:00
|
|
|
return (conn->flags & CO_FL_XPRT_READY);
|
2013-12-15 09:23:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* returns true is the control layer is ready */
|
2014-01-23 12:50:42 +00:00
|
|
|
static inline int conn_ctrl_ready(const struct connection *conn)
|
2013-12-15 09:23:20 +00:00
|
|
|
{
|
|
|
|
return (conn->flags & CO_FL_CTRL_READY);
|
|
|
|
}
|
|
|
|
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
/* Calls the init() function of the transport layer if any and if not done yet,
|
|
|
|
* and sets the CO_FL_XPRT_READY flag to indicate it was properly initialized.
|
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 22:19:48 +00:00
|
|
|
* Returns <0 in case of error.
|
2012-08-31 11:54:11 +00:00
|
|
|
*/
|
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 22:19:48 +00:00
|
|
|
static inline int conn_xprt_init(struct connection *conn)
|
2012-08-31 11:54:11 +00:00
|
|
|
{
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2014-01-23 13:21:42 +00:00
|
|
|
if (!conn_xprt_ready(conn) && conn->xprt && conn->xprt->init)
|
2019-03-21 17:27:17 +00:00
|
|
|
ret = conn->xprt->init(conn, &conn->xprt_ctx);
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
|
|
|
|
if (ret >= 0)
|
|
|
|
conn->flags |= CO_FL_XPRT_READY;
|
|
|
|
|
|
|
|
return ret;
|
2012-08-31 11:54:11 +00:00
|
|
|
}
|
|
|
|
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
/* Calls the close() function of the transport layer if any and if not done
|
|
|
|
* yet, and clears the CO_FL_XPRT_READY flag. However this is not done if the
|
|
|
|
* CO_FL_XPRT_TRACKED flag is set, which allows logs to take data from the
|
|
|
|
* transport layer very late if needed.
|
2012-10-12 15:00:05 +00:00
|
|
|
*/
|
REORG: connection: rename the data layer the "transport layer"
While working on the changes required to make the health checks use the
new connections, it started to become obvious that some naming was not
logical at all in the connections. Specifically, it is not logical to
call the "data layer" the layer which is in charge for all the handshake
and which does not yet provide a data layer once established until a
session has allocated all the required buffers.
In fact, it's more a transport layer, which makes much more sense. The
transport layer offers a medium on which data can transit, and it offers
the functions to move these data when the upper layer requests this. And
it is the upper layer which iterates over the transport layer's functions
to move data which should be called the data layer.
The use case where it's obvious is with embryonic sessions : an incoming
SSL connection is accepted. Only the connection is allocated, not the
buffers nor stream interface, etc... The connection handles the SSL
handshake by itself. Once this handshake is complete, we can't use the
data functions because the buffers and stream interface are not there
yet. Hence we have to first call a specific function to complete the
session initialization, after which we'll be able to use the data
functions. This clearly proves that SSL here is only a transport layer
and that the stream interface constitutes the data layer.
A similar change will be performed to rename app_cb => data, but the
two could not be in the same commit for obvious reasons.
2012-10-02 22:19:48 +00:00
|
|
|
static inline void conn_xprt_close(struct connection *conn)
|
2012-08-06 13:06:49 +00:00
|
|
|
{
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
if ((conn->flags & (CO_FL_XPRT_READY|CO_FL_XPRT_TRACKED)) == CO_FL_XPRT_READY) {
|
2014-01-23 13:21:42 +00:00
|
|
|
if (conn->xprt->close)
|
2019-03-21 17:27:17 +00:00
|
|
|
conn->xprt->close(conn, conn->xprt_ctx);
|
2019-05-13 17:10:46 +00:00
|
|
|
conn->xprt_ctx = NULL;
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
conn->flags &= ~CO_FL_XPRT_READY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initializes the connection's control layer which essentially consists in
|
|
|
|
* registering the file descriptor for polling and setting the CO_FL_CTRL_READY
|
2014-01-23 12:50:42 +00:00
|
|
|
* flag. The caller is responsible for ensuring that the control layer is
|
|
|
|
* already assigned to the connection prior to the call.
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
*/
|
|
|
|
static inline void conn_ctrl_init(struct connection *conn)
|
|
|
|
{
|
2014-01-23 12:50:42 +00:00
|
|
|
if (!conn_ctrl_ready(conn)) {
|
2017-08-24 12:31:19 +00:00
|
|
|
int fd = conn->handle.fd;
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
|
2018-01-25 06:22:13 +00:00
|
|
|
fd_insert(fd, conn, conn_fd_handler, tid_bit);
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
conn->flags |= CO_FL_CTRL_READY;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Deletes the FD if the transport layer is already gone. Once done,
|
|
|
|
* it then removes the CO_FL_CTRL_READY flag.
|
|
|
|
*/
|
|
|
|
static inline void conn_ctrl_close(struct connection *conn)
|
|
|
|
{
|
|
|
|
if ((conn->flags & (CO_FL_XPRT_READY|CO_FL_CTRL_READY)) == CO_FL_CTRL_READY) {
|
2017-08-24 12:31:19 +00:00
|
|
|
fd_delete(conn->handle.fd);
|
2017-10-05 15:43:39 +00:00
|
|
|
conn->handle.fd = DEAD_FD_MAGIC;
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
conn->flags &= ~CO_FL_CTRL_READY;
|
2012-10-12 15:00:05 +00:00
|
|
|
}
|
2012-08-06 13:06:49 +00:00
|
|
|
}
|
|
|
|
|
2012-11-23 16:32:21 +00:00
|
|
|
/* If the connection still has a transport layer, then call its close() function
|
|
|
|
* if any, and delete the file descriptor if a control layer is set. This is
|
|
|
|
* used to close everything at once and atomically. However this is not done if
|
|
|
|
* the CO_FL_XPRT_TRACKED flag is set, which allows logs to take data from the
|
|
|
|
* transport layer very late if needed.
|
|
|
|
*/
|
|
|
|
static inline void conn_full_close(struct connection *conn)
|
|
|
|
{
|
MAJOR: connection: add two new flags to indicate readiness of control/transport
Currently the control and transport layers of a connection are supposed
to be initialized when their respective pointers are not NULL. This will
not work anymore when we plan to reuse connections, because there is an
asymmetry between the accept() side and the connect() side :
- on accept() side, the fd is set first, then the ctrl layer then the
transport layer ; upon error, they must be undone in the reverse order,
then the FD must be closed. The FD must not be deleted if the control
layer was not yet initialized ;
- on the connect() side, the fd is set last and there is no reliable way
to know if it has been initialized or not. In practice it's initialized
to -1 first but this is hackish and supposes that local FDs only will
be used forever. Also, there are even less solutions for keeping trace
of the transport layer's state.
Also it is possible to support delayed close() when something (eg: logs)
tracks some information requiring the transport and/or control layers,
making it even more difficult to clean them.
So the proposed solution is to add two flags to the connection :
- CO_FL_CTRL_READY is set when the control layer is initialized (fd_insert)
and cleared after it's released (fd_delete).
- CO_FL_XPRT_READY is set when the control layer is initialized (xprt->init)
and cleared after it's released (xprt->close).
The functions have been adapted to rely on this and not on the pointers
anymore. conn_xprt_close() was unused and dangerous : it did not close
the control layer (eg: the socket itself) but still marks the transport
layer as closed, preventing any future call to conn_full_close() from
finishing the job.
The problem comes from conn_full_close() in fact. It needs to close the
xprt and ctrl layers independantly. After that we're still having an issue :
we don't know based on ->ctrl alone whether the fd was registered or not.
For this we use the two new flags CO_FL_XPRT_READY and CO_FL_CTRL_READY. We
now rely on this and not on conn->xprt nor conn->ctrl anymore to decide what
remains to be done on the connection.
In order not to miss some flag assignments, we introduce conn_ctrl_init()
to initialize the control layer, register the fd using fd_insert() and set
the flag, and conn_ctrl_close() which unregisters the fd and removes the
flag, but only if the transport layer was closed.
Similarly, at the transport layer, conn_xprt_init() calls ->init and sets
the flag, while conn_xprt_close() checks the flag, calls ->close and clears
the flag, regardless xprt_ctx or xprt_st. This also ensures that the ->init
and the ->close functions are called only once each and in the correct order.
Note that conn_xprt_close() does nothing if the transport layer is still
tracked.
conn_full_close() now simply calls conn_xprt_close() then conn_full_close()
in turn, which do nothing if CO_FL_XPRT_TRACKED is set.
In order to handle the error path, we also provide conn_force_close() which
ignores CO_FL_XPRT_TRACKED and closes the transport and the control layers
in turns. All relevant instances of fd_delete() have been replaced with
conn_force_close(). Now we always know what state the connection is in and
we can expect to split its initialization.
2013-10-21 14:30:56 +00:00
|
|
|
conn_xprt_close(conn);
|
|
|
|
conn_ctrl_close(conn);
|
|
|
|
}
|
|
|
|
|
2017-10-05 16:09:20 +00:00
|
|
|
/* stop tracking a connection, allowing conn_full_close() to always
|
|
|
|
* succeed.
|
|
|
|
*/
|
|
|
|
static inline void conn_stop_tracking(struct connection *conn)
|
|
|
|
{
|
|
|
|
conn->flags &= ~CO_FL_XPRT_TRACKED;
|
|
|
|
}
|
|
|
|
|
2012-09-01 15:26:16 +00:00
|
|
|
/* Update polling on connection <c>'s file descriptor depending on its current
|
|
|
|
* state as reported in the connection's CO_FL_CURR_* flags, reports of EAGAIN
|
2017-09-13 16:30:23 +00:00
|
|
|
* in CO_FL_WAIT_*, and the upper layer expectations indicated by CO_FL_XPRT_*.
|
2012-09-01 15:26:16 +00:00
|
|
|
* The connection flags are updated with the new flags at the end of the
|
2012-10-04 20:21:15 +00:00
|
|
|
* operation. Polling is totally disabled if an error was reported.
|
2012-08-17 09:55:04 +00:00
|
|
|
*/
|
2017-09-13 16:30:23 +00:00
|
|
|
void conn_update_xprt_polling(struct connection *c);
|
2012-08-17 09:55:04 +00:00
|
|
|
|
2012-12-16 18:19:13 +00:00
|
|
|
/* Refresh the connection's polling flags from its file descriptor status.
|
2017-10-25 07:22:43 +00:00
|
|
|
* This should be called at the beginning of a connection handler. It does
|
|
|
|
* nothing if CO_FL_WILL_UPDATE is present, indicating that an upper caller
|
|
|
|
* has already done it.
|
2012-12-16 18:19:13 +00:00
|
|
|
*/
|
|
|
|
static inline void conn_refresh_polling_flags(struct connection *conn)
|
|
|
|
{
|
2017-10-25 07:22:43 +00:00
|
|
|
if (conn_ctrl_ready(conn) && !(conn->flags & CO_FL_WILL_UPDATE)) {
|
2017-10-25 08:28:45 +00:00
|
|
|
unsigned int flags = conn->flags;
|
2012-12-16 18:19:13 +00:00
|
|
|
|
2020-01-17 08:59:40 +00:00
|
|
|
flags &= ~(CO_FL_CURR_RD_ENA | CO_FL_CURR_WR_ENA);
|
2017-08-24 12:31:19 +00:00
|
|
|
if (fd_recv_active(conn->handle.fd))
|
2012-12-16 18:19:13 +00:00
|
|
|
flags |= CO_FL_CURR_RD_ENA;
|
2017-08-24 12:31:19 +00:00
|
|
|
if (fd_send_active(conn->handle.fd))
|
2012-12-16 18:19:13 +00:00
|
|
|
flags |= CO_FL_CURR_WR_ENA;
|
|
|
|
conn->flags = flags;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
/* inspects c->flags and returns non-zero if XPRT ENA changes from the CURR ENA
|
2012-11-05 16:52:26 +00:00
|
|
|
* or if the WAIT flags are set with their respective ENA flags. Additionally,
|
2012-10-04 20:21:15 +00:00
|
|
|
* non-zero is also returned if an error was reported on the connection. This
|
|
|
|
* function is used quite often and is inlined. In order to proceed optimally
|
|
|
|
* with very little code and CPU cycles, the bits are arranged so that a change
|
2012-11-05 16:52:26 +00:00
|
|
|
* can be detected by a few left shifts, a xor, and a mask. These operations
|
|
|
|
* detect when W&D are both enabled for either direction, when C&D differ for
|
|
|
|
* either direction and when Error is set. The trick consists in first keeping
|
|
|
|
* only the bits we're interested in, since they don't collide when shifted,
|
|
|
|
* and to perform the AND at the end. In practice, the compiler is able to
|
|
|
|
* replace the last AND with a TEST in boolean conditions. This results in
|
|
|
|
* checks that are done in 4-6 cycles and less than 30 bytes.
|
2012-08-17 09:55:04 +00:00
|
|
|
*/
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline unsigned int conn_xprt_polling_changes(const struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2012-11-05 16:52:26 +00:00
|
|
|
unsigned int f = c->flags;
|
2017-09-13 16:30:23 +00:00
|
|
|
f &= CO_FL_XPRT_WR_ENA | CO_FL_XPRT_RD_ENA | CO_FL_CURR_WR_ENA |
|
2014-01-22 18:46:33 +00:00
|
|
|
CO_FL_CURR_RD_ENA | CO_FL_ERROR;
|
2012-11-05 16:52:26 +00:00
|
|
|
|
2014-01-22 18:46:33 +00:00
|
|
|
f = (f ^ (f << 1)) & (CO_FL_CURR_WR_ENA|CO_FL_CURR_RD_ENA); /* test C ^ D */
|
|
|
|
return f & (CO_FL_CURR_WR_ENA | CO_FL_CURR_RD_ENA | CO_FL_ERROR);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2019-05-28 08:12:02 +00:00
|
|
|
/* Automatically updates polling on connection <c> depending on the XPRT flags.
|
|
|
|
* It does nothing if CO_FL_WILL_UPDATE is present, indicating that an upper
|
|
|
|
* caller is going to do it again later.
|
2012-08-17 09:55:04 +00:00
|
|
|
*/
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_cond_update_xprt_polling(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-10-25 07:22:43 +00:00
|
|
|
if (!(c->flags & CO_FL_WILL_UPDATE))
|
2019-05-28 08:12:02 +00:00
|
|
|
if (conn_xprt_polling_changes(c))
|
2017-10-25 07:22:43 +00:00
|
|
|
conn_update_xprt_polling(c);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2012-11-24 10:09:07 +00:00
|
|
|
/* Stop all polling on the fd. This might be used when an error is encountered
|
2017-10-25 07:22:43 +00:00
|
|
|
* for example. It does not propage the change to the fd layer if
|
|
|
|
* CO_FL_WILL_UPDATE is present, indicating that an upper caller is going to do
|
|
|
|
* it later.
|
2012-11-24 10:09:07 +00:00
|
|
|
*/
|
|
|
|
static inline void conn_stop_polling(struct connection *c)
|
|
|
|
{
|
|
|
|
c->flags &= ~(CO_FL_CURR_RD_ENA | CO_FL_CURR_WR_ENA |
|
2017-09-13 16:30:23 +00:00
|
|
|
CO_FL_XPRT_RD_ENA | CO_FL_XPRT_WR_ENA);
|
2017-10-25 07:22:43 +00:00
|
|
|
if (!(c->flags & CO_FL_WILL_UPDATE) && conn_ctrl_ready(c))
|
2017-08-24 12:31:19 +00:00
|
|
|
fd_stop_both(c->handle.fd);
|
2012-11-24 10:09:07 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
/* Automatically update polling on connection <c> depending on the XPRT and
|
2012-08-17 09:55:04 +00:00
|
|
|
* SOCK flags, and on whether a handshake is in progress or not. This may be
|
|
|
|
* called at any moment when there is a doubt about the effectiveness of the
|
|
|
|
* polling state, for instance when entering or leaving the handshake state.
|
2017-10-25 07:22:43 +00:00
|
|
|
* It does nothing if CO_FL_WILL_UPDATE is present, indicating that an upper
|
|
|
|
* caller is going to do it again later.
|
2012-08-17 09:55:04 +00:00
|
|
|
*/
|
|
|
|
static inline void conn_cond_update_polling(struct connection *c)
|
|
|
|
{
|
2012-11-24 10:09:07 +00:00
|
|
|
if (unlikely(c->flags & CO_FL_ERROR))
|
|
|
|
conn_stop_polling(c);
|
2017-10-25 07:22:43 +00:00
|
|
|
else if (!(c->flags & CO_FL_WILL_UPDATE)) {
|
2019-05-28 08:12:02 +00:00
|
|
|
if (conn_xprt_polling_changes(c))
|
2017-10-25 07:22:43 +00:00
|
|
|
conn_update_xprt_polling(c);
|
|
|
|
}
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/***** Event manipulation primitives for use by DATA I/O callbacks *****/
|
|
|
|
/* The __conn_* versions do not propagate to lower layers and are only meant
|
|
|
|
* to be used by handlers called by the connection handler. The other ones
|
|
|
|
* may be used anywhere.
|
|
|
|
*/
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void __conn_xprt_want_recv(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
c->flags |= CO_FL_XPRT_RD_ENA;
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void __conn_xprt_stop_recv(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
c->flags &= ~CO_FL_XPRT_RD_ENA;
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void __conn_xprt_want_send(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
c->flags |= CO_FL_XPRT_WR_ENA;
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void __conn_xprt_stop_send(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
c->flags &= ~CO_FL_XPRT_WR_ENA;
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void __conn_xprt_stop_both(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
c->flags &= ~(CO_FL_XPRT_WR_ENA | CO_FL_XPRT_RD_ENA);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_xprt_want_recv(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
__conn_xprt_want_recv(c);
|
|
|
|
conn_cond_update_xprt_polling(c);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_xprt_stop_recv(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
__conn_xprt_stop_recv(c);
|
|
|
|
conn_cond_update_xprt_polling(c);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_xprt_want_send(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
__conn_xprt_want_send(c);
|
|
|
|
conn_cond_update_xprt_polling(c);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_xprt_stop_send(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
__conn_xprt_stop_send(c);
|
|
|
|
conn_cond_update_xprt_polling(c);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_xprt_stop_both(struct connection *c)
|
2012-08-17 09:55:04 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
__conn_xprt_stop_both(c);
|
|
|
|
conn_cond_update_xprt_polling(c);
|
2012-08-17 09:55:04 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 07:59:22 +00:00
|
|
|
/* read shutdown, called from the rcv_buf/rcv_pipe handlers when
|
|
|
|
* detecting an end of connection.
|
|
|
|
*/
|
2012-08-20 14:55:48 +00:00
|
|
|
static inline void conn_sock_read0(struct connection *c)
|
|
|
|
{
|
|
|
|
c->flags |= CO_FL_SOCK_RD_SH;
|
2019-05-28 08:12:02 +00:00
|
|
|
__conn_xprt_stop_recv(c);
|
2013-12-15 13:19:38 +00:00
|
|
|
/* we don't risk keeping ports unusable if we found the
|
|
|
|
* zero from the other side.
|
|
|
|
*/
|
2014-01-23 12:50:42 +00:00
|
|
|
if (conn_ctrl_ready(c))
|
2017-08-24 12:31:19 +00:00
|
|
|
fdtab[c->handle.fd].linger_risk = 0;
|
2012-08-20 14:55:48 +00:00
|
|
|
}
|
|
|
|
|
2017-10-25 07:59:22 +00:00
|
|
|
/* write shutdown, indication that the upper layer is not willing to send
|
2017-12-22 17:46:33 +00:00
|
|
|
* anything anymore and wants to close after pending data are sent. The
|
|
|
|
* <clean> argument will allow not to perform the socket layer shutdown if
|
|
|
|
* equal to 0.
|
2017-10-25 07:59:22 +00:00
|
|
|
*/
|
2017-12-22 17:46:33 +00:00
|
|
|
static inline void conn_sock_shutw(struct connection *c, int clean)
|
2012-08-20 14:55:48 +00:00
|
|
|
{
|
|
|
|
c->flags |= CO_FL_SOCK_WR_SH;
|
2017-10-25 07:59:22 +00:00
|
|
|
conn_refresh_polling_flags(c);
|
2019-05-28 08:12:02 +00:00
|
|
|
__conn_xprt_stop_send(c);
|
|
|
|
conn_cond_update_xprt_polling(c);
|
2017-10-25 07:59:22 +00:00
|
|
|
|
2017-12-22 17:46:33 +00:00
|
|
|
/* don't perform a clean shutdown if we're going to reset or
|
|
|
|
* if the shutr was already received.
|
|
|
|
*/
|
2019-07-02 14:35:18 +00:00
|
|
|
if (conn_ctrl_ready(c) && !(c->flags & CO_FL_SOCK_RD_SH) && clean)
|
2017-08-24 12:31:19 +00:00
|
|
|
shutdown(c->handle.fd, SHUT_WR);
|
2012-08-20 14:55:48 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_xprt_shutw(struct connection *c)
|
2012-08-20 14:55:48 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
__conn_xprt_stop_send(c);
|
2015-03-12 21:51:10 +00:00
|
|
|
|
|
|
|
/* clean data-layer shutdown */
|
|
|
|
if (c->xprt && c->xprt->shutw)
|
2019-03-21 17:27:17 +00:00
|
|
|
c->xprt->shutw(c, c->xprt_ctx, 1);
|
2015-03-12 21:51:10 +00:00
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void conn_xprt_shutw_hard(struct connection *c)
|
2015-03-12 21:51:10 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
__conn_xprt_stop_send(c);
|
2015-03-12 21:51:10 +00:00
|
|
|
|
|
|
|
/* unclean data-layer shutdown */
|
|
|
|
if (c->xprt && c->xprt->shutw)
|
2019-03-21 17:27:17 +00:00
|
|
|
c->xprt->shutw(c, c->xprt_ctx, 0);
|
2012-08-20 14:55:48 +00:00
|
|
|
}
|
|
|
|
|
2017-10-05 13:25:48 +00:00
|
|
|
/* shut read */
|
|
|
|
static inline void cs_shutr(struct conn_stream *cs, enum cs_shr_mode mode)
|
2017-09-13 16:30:23 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
/* clean data-layer shutdown */
|
|
|
|
if (cs->conn->mux && cs->conn->mux->shutr)
|
2017-10-05 13:25:48 +00:00
|
|
|
cs->conn->mux->shutr(cs, mode);
|
|
|
|
cs->flags |= (mode == CS_SHR_DRAIN) ? CS_FL_SHRD : CS_FL_SHRR;
|
2017-09-13 16:30:23 +00:00
|
|
|
}
|
|
|
|
|
2017-10-05 13:25:48 +00:00
|
|
|
/* shut write */
|
|
|
|
static inline void cs_shutw(struct conn_stream *cs, enum cs_shw_mode mode)
|
2017-09-13 16:30:23 +00:00
|
|
|
{
|
|
|
|
|
|
|
|
/* clean data-layer shutdown */
|
|
|
|
if (cs->conn->mux && cs->conn->mux->shutw)
|
2017-10-05 13:25:48 +00:00
|
|
|
cs->conn->mux->shutw(cs, mode);
|
|
|
|
cs->flags |= (mode == CS_SHW_NORMAL) ? CS_FL_SHWN : CS_FL_SHWS;
|
2017-09-13 16:30:23 +00:00
|
|
|
}
|
|
|
|
|
2017-10-05 16:19:43 +00:00
|
|
|
/* completely close a conn_stream (but do not detach it) */
|
|
|
|
static inline void cs_close(struct conn_stream *cs)
|
|
|
|
{
|
|
|
|
cs_shutw(cs, CS_SHW_SILENT);
|
|
|
|
cs_shutr(cs, CS_SHR_RESET);
|
|
|
|
cs->flags = CS_FL_NONE;
|
|
|
|
}
|
|
|
|
|
2018-12-19 16:59:30 +00:00
|
|
|
/* sets CS_FL_ERROR or CS_FL_ERR_PENDING on the cs */
|
|
|
|
static inline void cs_set_error(struct conn_stream *cs)
|
|
|
|
{
|
|
|
|
if (cs->flags & CS_FL_EOS)
|
|
|
|
cs->flags |= CS_FL_ERROR;
|
|
|
|
else
|
2019-06-03 12:23:33 +00:00
|
|
|
cs->flags |= CS_FL_ERR_PENDING;
|
2018-12-19 16:59:30 +00:00
|
|
|
}
|
|
|
|
|
2012-08-20 14:55:48 +00:00
|
|
|
/* detect sock->data read0 transition */
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline int conn_xprt_read0_pending(struct connection *c)
|
2012-08-20 14:55:48 +00:00
|
|
|
{
|
2017-08-30 05:35:35 +00:00
|
|
|
return (c->flags & CO_FL_SOCK_RD_SH) != 0;
|
2012-08-20 14:55:48 +00:00
|
|
|
}
|
|
|
|
|
2013-10-24 13:08:37 +00:00
|
|
|
/* prepares a connection to work with protocol <proto> and transport <xprt>.
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
* The transport's is initialized as well, and the mux and its context are
|
2018-09-06 09:45:30 +00:00
|
|
|
* cleared. The target is not reinitialized and it is recommended that it is
|
|
|
|
* set prior to calling this function so that the function may make use of it
|
|
|
|
* in the future to refine the mux choice if needed.
|
2013-10-24 13:08:37 +00:00
|
|
|
*/
|
|
|
|
static inline void conn_prepare(struct connection *conn, const struct protocol *proto, const struct xprt_ops *xprt)
|
|
|
|
{
|
|
|
|
conn->ctrl = proto;
|
|
|
|
conn->xprt = xprt;
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
conn->mux = NULL;
|
2013-10-24 13:08:37 +00:00
|
|
|
conn->xprt_ctx = NULL;
|
2018-12-19 13:12:10 +00:00
|
|
|
conn->ctx = NULL;
|
2013-10-24 13:08:37 +00:00
|
|
|
}
|
|
|
|
|
2017-10-08 13:16:00 +00:00
|
|
|
/*
|
|
|
|
* Initializes all required fields for a new conn_strema.
|
|
|
|
*/
|
|
|
|
static inline void cs_init(struct conn_stream *cs, struct connection *conn)
|
|
|
|
{
|
|
|
|
cs->obj_type = OBJ_TYPE_CS;
|
|
|
|
cs->flags = CS_FL_NONE;
|
|
|
|
cs->conn = conn;
|
|
|
|
}
|
|
|
|
|
2013-10-14 15:10:08 +00:00
|
|
|
/* Initializes all required fields for a new connection. Note that it does the
|
|
|
|
* minimum acceptable initialization for a connection that already exists and
|
|
|
|
* is about to be reused. It also leaves the addresses untouched, which makes
|
|
|
|
* it usable across connection retries to reset a connection to a known state.
|
|
|
|
*/
|
|
|
|
static inline void conn_init(struct connection *conn)
|
|
|
|
{
|
|
|
|
conn->obj_type = OBJ_TYPE_CONN;
|
|
|
|
conn->flags = CO_FL_NONE;
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
conn->mux = NULL;
|
2018-12-19 13:12:10 +00:00
|
|
|
conn->ctx = NULL;
|
2013-10-14 15:10:08 +00:00
|
|
|
conn->owner = NULL;
|
2013-10-24 19:10:08 +00:00
|
|
|
conn->send_proxy_ofs = 0;
|
2017-08-24 12:31:19 +00:00
|
|
|
conn->handle.fd = DEAD_FD_MAGIC;
|
2013-10-14 15:10:08 +00:00
|
|
|
conn->err_code = CO_ER_NONE;
|
|
|
|
conn->target = NULL;
|
2017-08-28 13:46:01 +00:00
|
|
|
conn->xprt_done_cb = NULL;
|
2017-10-08 09:16:46 +00:00
|
|
|
conn->destroy_cb = NULL;
|
2014-11-17 14:11:45 +00:00
|
|
|
conn->proxy_netns = NULL;
|
2015-08-04 15:25:58 +00:00
|
|
|
LIST_INIT(&conn->list);
|
2018-11-13 15:44:31 +00:00
|
|
|
LIST_INIT(&conn->session_list);
|
2018-10-10 16:25:41 +00:00
|
|
|
conn->send_wait = NULL;
|
|
|
|
conn->recv_wait = NULL;
|
2018-12-10 17:30:32 +00:00
|
|
|
conn->idle_time = 0;
|
2019-07-17 17:06:58 +00:00
|
|
|
conn->src = NULL;
|
|
|
|
conn->dst = NULL;
|
2019-08-27 16:31:16 +00:00
|
|
|
conn->proxy_authority = NULL;
|
2013-10-14 15:10:08 +00:00
|
|
|
}
|
|
|
|
|
2017-08-28 13:46:01 +00:00
|
|
|
/* sets <owner> as the connection's owner */
|
2017-10-08 09:16:46 +00:00
|
|
|
static inline void conn_set_owner(struct connection *conn, void *owner, void (*cb)(struct connection *))
|
2017-08-28 13:46:01 +00:00
|
|
|
{
|
|
|
|
conn->owner = owner;
|
2017-10-08 09:16:46 +00:00
|
|
|
conn->destroy_cb = cb;
|
2017-08-28 13:46:01 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* registers <cb> as a callback to notify for transport's readiness or failure */
|
|
|
|
static inline void conn_set_xprt_done_cb(struct connection *conn, int (*cb)(struct connection *))
|
|
|
|
{
|
|
|
|
conn->xprt_done_cb = cb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* unregisters the callback to notify for transport's readiness or failure */
|
|
|
|
static inline void conn_clear_xprt_done_cb(struct connection *conn)
|
|
|
|
{
|
|
|
|
conn->xprt_done_cb = NULL;
|
|
|
|
}
|
|
|
|
|
2019-07-17 16:37:02 +00:00
|
|
|
/* Allocates a struct sockaddr from the pool if needed, assigns it to *sap and
|
|
|
|
* returns it. If <sap> is NULL, the address is always allocated and returned.
|
|
|
|
* if <sap> is non-null, an address will only be allocated if it points to a
|
|
|
|
* non-null pointer. In this case the allocated address will be assigned there.
|
|
|
|
* In both situations the new pointer is returned.
|
|
|
|
*/
|
|
|
|
static inline struct sockaddr_storage *sockaddr_alloc(struct sockaddr_storage **sap)
|
|
|
|
{
|
|
|
|
struct sockaddr_storage *sa;
|
|
|
|
|
|
|
|
if (sap && *sap)
|
|
|
|
return *sap;
|
|
|
|
|
|
|
|
sa = pool_alloc(pool_head_sockaddr);
|
|
|
|
if (sap)
|
|
|
|
*sap = sa;
|
|
|
|
return sa;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Releases the struct sockaddr potentially pointed to by <sap> to the pool. It
|
|
|
|
* may be NULL or may point to NULL. If <sap> is not NULL, a NULL is placed
|
|
|
|
* there.
|
|
|
|
*/
|
|
|
|
static inline void sockaddr_free(struct sockaddr_storage **sap)
|
|
|
|
{
|
|
|
|
if (!sap)
|
|
|
|
return;
|
|
|
|
pool_free(pool_head_sockaddr, *sap);
|
|
|
|
*sap = NULL;
|
|
|
|
}
|
|
|
|
|
2013-10-20 20:56:45 +00:00
|
|
|
/* Tries to allocate a new connection and initialized its main fields. The
|
|
|
|
* connection is returned on success, NULL on failure. The connection must
|
2017-11-24 16:34:44 +00:00
|
|
|
* be released using pool_free() or conn_free().
|
2013-10-20 20:56:45 +00:00
|
|
|
*/
|
|
|
|
static inline struct connection *conn_new()
|
|
|
|
{
|
|
|
|
struct connection *conn;
|
|
|
|
|
2017-11-24 16:34:44 +00:00
|
|
|
conn = pool_alloc(pool_head_connection);
|
2013-10-20 20:56:45 +00:00
|
|
|
if (likely(conn != NULL))
|
|
|
|
conn_init(conn);
|
|
|
|
return conn;
|
|
|
|
}
|
|
|
|
|
2018-03-02 09:43:58 +00:00
|
|
|
/* Releases a conn_stream previously allocated by cs_new(), as well as any
|
|
|
|
* buffer it would still hold.
|
|
|
|
*/
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline void cs_free(struct conn_stream *cs)
|
|
|
|
{
|
2018-03-02 09:43:58 +00:00
|
|
|
|
2017-11-24 16:34:44 +00:00
|
|
|
pool_free(pool_head_connstream, cs);
|
2017-09-13 16:30:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Tries to allocate a new conn_stream and initialize its main fields. If
|
|
|
|
* <conn> is NULL, then a new connection is allocated on the fly, initialized,
|
|
|
|
* and assigned to cs->conn ; this connection will then have to be released
|
2017-11-24 16:34:44 +00:00
|
|
|
* using pool_free() or conn_free(). The conn_stream is initialized and added
|
2017-09-13 16:30:23 +00:00
|
|
|
* to the mux's stream list on success, then returned. On failure, nothing is
|
|
|
|
* allocated and NULL is returned.
|
2017-10-08 13:16:00 +00:00
|
|
|
*/
|
|
|
|
static inline struct conn_stream *cs_new(struct connection *conn)
|
|
|
|
{
|
|
|
|
struct conn_stream *cs;
|
|
|
|
|
2017-11-24 16:34:44 +00:00
|
|
|
cs = pool_alloc(pool_head_connstream);
|
2017-09-13 16:30:23 +00:00
|
|
|
if (!likely(cs))
|
|
|
|
return NULL;
|
2017-10-08 13:16:00 +00:00
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
if (!conn) {
|
|
|
|
conn = conn_new();
|
|
|
|
if (!likely(conn)) {
|
|
|
|
cs_free(cs);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
conn_init(conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
cs_init(cs, conn);
|
|
|
|
return cs;
|
2017-10-08 13:16:00 +00:00
|
|
|
}
|
|
|
|
|
2018-11-18 20:29:20 +00:00
|
|
|
/* Retrieves any valid conn_stream from this connection, preferably the first
|
|
|
|
* valid one. The purpose is to be able to figure one other end of a private
|
|
|
|
* connection for purposes like source binding or proxy protocol header
|
|
|
|
* emission. In such cases, any conn_stream is expected to be valid so the
|
|
|
|
* mux is encouraged to return the first one it finds. If the connection has
|
|
|
|
* no mux or the mux has no get_first_cs() method or the mux has no valid
|
|
|
|
* conn_stream, NULL is returned. The output pointer is purposely marked
|
|
|
|
* const to discourage the caller from modifying anything there.
|
|
|
|
*/
|
|
|
|
static inline const struct conn_stream *cs_get_first(const struct connection *conn)
|
|
|
|
{
|
|
|
|
if (!conn || !conn->mux || !conn->mux->get_first_cs)
|
|
|
|
return NULL;
|
|
|
|
return conn->mux->get_first_cs(conn);
|
|
|
|
}
|
|
|
|
|
2018-10-20 22:32:01 +00:00
|
|
|
static inline void conn_force_unsubscribe(struct connection *conn)
|
2013-10-20 20:56:45 +00:00
|
|
|
{
|
2018-10-20 22:32:01 +00:00
|
|
|
if (conn->recv_wait) {
|
2018-12-19 12:59:17 +00:00
|
|
|
conn->recv_wait->events &= ~SUB_RETRY_RECV;
|
2018-10-20 22:32:01 +00:00
|
|
|
conn->recv_wait = NULL;
|
|
|
|
}
|
|
|
|
if (conn->send_wait) {
|
2018-12-19 12:59:17 +00:00
|
|
|
conn->send_wait->events &= ~SUB_RETRY_SEND;
|
2018-10-20 22:32:01 +00:00
|
|
|
conn->send_wait = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Releases a connection previously allocated by conn_new() */
|
|
|
|
static inline void conn_free(struct connection *conn)
|
|
|
|
{
|
2018-11-30 16:24:55 +00:00
|
|
|
/* Remove ourself from the session's connections list, if any. */
|
2018-12-14 18:27:06 +00:00
|
|
|
if (!LIST_ISEMPTY(&conn->session_list)) {
|
|
|
|
struct session *sess = conn->owner;
|
2018-12-28 17:50:57 +00:00
|
|
|
if (conn->flags & CO_FL_SESS_IDLE)
|
|
|
|
sess->idle_conns--;
|
2018-12-27 16:20:54 +00:00
|
|
|
session_unown_conn(sess, conn);
|
2018-12-14 18:27:06 +00:00
|
|
|
}
|
2018-12-19 13:36:29 +00:00
|
|
|
|
2019-07-17 17:06:58 +00:00
|
|
|
sockaddr_free(&conn->src);
|
|
|
|
sockaddr_free(&conn->dst);
|
|
|
|
|
2019-08-27 16:31:16 +00:00
|
|
|
if (conn->proxy_authority != NULL) {
|
|
|
|
pool_free(pool_head_authority, conn->proxy_authority);
|
|
|
|
conn->proxy_authority = NULL;
|
|
|
|
}
|
|
|
|
|
2018-12-19 13:36:29 +00:00
|
|
|
/* By convention we always place a NULL where the ctx points to if the
|
|
|
|
* mux is null. It may have been used to store the connection as a
|
|
|
|
* stream_interface's end point for example.
|
2018-11-20 23:16:29 +00:00
|
|
|
*/
|
2018-12-19 13:36:29 +00:00
|
|
|
if (conn->ctx != NULL && conn->mux == NULL)
|
|
|
|
*(void **)conn->ctx = NULL;
|
2018-11-20 23:16:29 +00:00
|
|
|
|
2018-12-10 17:30:32 +00:00
|
|
|
/* The connection is currently in the server's idle list, so tell it
|
|
|
|
* there's one less connection available in that list.
|
|
|
|
*/
|
|
|
|
if (conn->idle_time > 0) {
|
|
|
|
struct server *srv = __objt_server(conn->target);
|
2019-03-13 17:52:21 +00:00
|
|
|
_HA_ATOMIC_SUB(&srv->curr_idle_conns, 1);
|
2019-02-18 15:41:17 +00:00
|
|
|
srv->curr_idle_thr[tid]--;
|
2018-12-10 17:30:32 +00:00
|
|
|
}
|
2018-11-20 23:16:29 +00:00
|
|
|
|
2018-10-20 22:32:01 +00:00
|
|
|
conn_force_unsubscribe(conn);
|
BUG/MEDIUM: servers: Fix a race condition with idle connections.
When we're purging idle connections, there's a race condition, when we're
removing the connection from the idle list, to add it to the list of
connections to free, if the thread owning the connection tries to free it
at the same time.
To fix this, simply add a per-thread lock, that has to be hold before
removing the connection from the idle list, and when, in conn_free(), we're
about to remove the connection from every list. That way, we know for sure
the connection will stay valid while we remove it from the idle list, to add
it to the list of connections to free.
This should happen rarely enough that it shouldn't have any impact on
performances.
This has not been reported yet, but could provoke random segfaults.
This should be backported to 2.0.
2019-07-11 13:49:00 +00:00
|
|
|
HA_SPIN_LOCK(OTHER_LOCK, &toremove_lock[tid]);
|
2019-08-08 13:47:21 +00:00
|
|
|
MT_LIST_DEL((struct mt_list *)&conn->list);
|
BUG/MEDIUM: servers: Fix a race condition with idle connections.
When we're purging idle connections, there's a race condition, when we're
removing the connection from the idle list, to add it to the list of
connections to free, if the thread owning the connection tries to free it
at the same time.
To fix this, simply add a per-thread lock, that has to be hold before
removing the connection from the idle list, and when, in conn_free(), we're
about to remove the connection from every list. That way, we know for sure
the connection will stay valid while we remove it from the idle list, to add
it to the list of connections to free.
This should happen rarely enough that it shouldn't have any impact on
performances.
This has not been reported yet, but could provoke random segfaults.
This should be backported to 2.0.
2019-07-11 13:49:00 +00:00
|
|
|
HA_SPIN_UNLOCK(OTHER_LOCK, &toremove_lock[tid]);
|
2017-11-24 16:34:44 +00:00
|
|
|
pool_free(pool_head_connection, conn);
|
2013-10-20 20:56:45 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 15:48:36 +00:00
|
|
|
/* Release a conn_stream */
|
2017-10-08 13:16:00 +00:00
|
|
|
static inline void cs_destroy(struct conn_stream *cs)
|
|
|
|
{
|
2018-04-13 13:50:27 +00:00
|
|
|
if (cs->conn->mux)
|
|
|
|
cs->conn->mux->detach(cs);
|
|
|
|
else {
|
|
|
|
/* It's too early to have a mux, let's just destroy
|
|
|
|
* the connection
|
|
|
|
*/
|
|
|
|
struct connection *conn = cs->conn;
|
|
|
|
|
|
|
|
conn_stop_tracking(conn);
|
|
|
|
conn_full_close(conn);
|
|
|
|
if (conn->destroy_cb)
|
|
|
|
conn->destroy_cb(conn);
|
|
|
|
conn_free(conn);
|
|
|
|
}
|
2017-10-08 13:16:00 +00:00
|
|
|
cs_free(cs);
|
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
/* Returns the conn from a cs. If cs is NULL, returns NULL */
|
|
|
|
static inline struct connection *cs_conn(const struct conn_stream *cs)
|
|
|
|
{
|
|
|
|
return cs ? cs->conn : NULL;
|
|
|
|
}
|
2013-10-20 20:56:45 +00:00
|
|
|
|
2019-07-17 08:48:33 +00:00
|
|
|
/* Retrieves the connection's original source address. Returns non-zero on
|
|
|
|
* success or zero on failure. The operation is only performed once and the
|
|
|
|
* address is stored in the connection for future use.
|
|
|
|
*/
|
|
|
|
static inline int conn_get_src(struct connection *conn)
|
|
|
|
{
|
|
|
|
if (conn->flags & CO_FL_ADDR_FROM_SET)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!conn_ctrl_ready(conn) || !conn->ctrl->get_src)
|
|
|
|
return 0;
|
|
|
|
|
2019-07-17 17:04:47 +00:00
|
|
|
if (!sockaddr_alloc(&conn->src))
|
|
|
|
return 0;
|
|
|
|
|
2019-07-17 12:46:00 +00:00
|
|
|
if (conn->ctrl->get_src(conn->handle.fd, (struct sockaddr *)conn->src,
|
|
|
|
sizeof(*conn->src),
|
2019-07-17 08:48:33 +00:00
|
|
|
obj_type(conn->target) != OBJ_TYPE_LISTENER) == -1)
|
|
|
|
return 0;
|
|
|
|
conn->flags |= CO_FL_ADDR_FROM_SET;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Retrieves the connection's original destination address. Returns non-zero on
|
|
|
|
* success or zero on failure. The operation is only performed once and the
|
|
|
|
* address is stored in the connection for future use.
|
|
|
|
*/
|
|
|
|
static inline int conn_get_dst(struct connection *conn)
|
|
|
|
{
|
|
|
|
if (conn->flags & CO_FL_ADDR_TO_SET)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
if (!conn_ctrl_ready(conn) || !conn->ctrl->get_dst)
|
|
|
|
return 0;
|
|
|
|
|
2019-07-17 17:04:47 +00:00
|
|
|
if (!sockaddr_alloc(&conn->dst))
|
|
|
|
return 0;
|
|
|
|
|
2019-07-17 12:46:00 +00:00
|
|
|
if (conn->ctrl->get_dst(conn->handle.fd, (struct sockaddr *)conn->dst,
|
|
|
|
sizeof(*conn->dst),
|
2019-07-17 08:48:33 +00:00
|
|
|
obj_type(conn->target) != OBJ_TYPE_LISTENER) == -1)
|
|
|
|
return 0;
|
|
|
|
conn->flags |= CO_FL_ADDR_TO_SET;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 15:37:42 +00:00
|
|
|
/* Sets the TOS header in IPv4 and the traffic class header in IPv6 packets
|
|
|
|
* (as per RFC3260 #4 and BCP37 #4.2 and #5.2). The connection is tested and if
|
|
|
|
* it is null, nothing is done.
|
|
|
|
*/
|
|
|
|
static inline void conn_set_tos(const struct connection *conn, int tos)
|
|
|
|
{
|
|
|
|
if (!conn || !conn_ctrl_ready(conn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef IP_TOS
|
2019-07-17 12:46:00 +00:00
|
|
|
if (conn->src->ss_family == AF_INET)
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 15:37:42 +00:00
|
|
|
setsockopt(conn->handle.fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos));
|
|
|
|
#endif
|
|
|
|
#ifdef IPV6_TCLASS
|
2019-07-17 12:46:00 +00:00
|
|
|
if (conn->src->ss_family == AF_INET6) {
|
|
|
|
if (IN6_IS_ADDR_V4MAPPED(&((struct sockaddr_in6 *)conn->src)->sin6_addr))
|
REORG: connection: centralize the conn_set_{tos,mark,quickack} functions
There were a number of ugly setsockopt() calls spread all over
proto_http.c, proto_htx.c and hlua.c just to manipulate the front
connection's TOS, mark or TCP quick-ack. These ones entirely relied
on the connection, its existence, its control layer's presence, and
its addresses. Worse, inet_set_tos() was placed in proto_http.c,
exported and used from the two other ones, surrounded in #ifdefs.
This patch moves this code to connection.h and makes the other ones
rely on it without ifdefs.
2018-12-11 15:37:42 +00:00
|
|
|
/* v4-mapped addresses need IP_TOS */
|
|
|
|
setsockopt(conn->handle.fd, IPPROTO_IP, IP_TOS, &tos, sizeof(tos));
|
|
|
|
else
|
|
|
|
setsockopt(conn->handle.fd, IPPROTO_IPV6, IPV6_TCLASS, &tos, sizeof(tos));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sets the netfilter mark on the connection's socket. The connection is tested
|
|
|
|
* and if it is null, nothing is done.
|
|
|
|
*/
|
|
|
|
static inline void conn_set_mark(const struct connection *conn, int mark)
|
|
|
|
{
|
|
|
|
if (!conn || !conn_ctrl_ready(conn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef SO_MARK
|
|
|
|
setsockopt(conn->handle.fd, SOL_SOCKET, SO_MARK, &mark, sizeof(mark));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sets adjust the TCP quick-ack feature on the connection's socket. The
|
|
|
|
* connection is tested and if it is null, nothing is done.
|
|
|
|
*/
|
|
|
|
static inline void conn_set_quickack(const struct connection *conn, int value)
|
|
|
|
{
|
|
|
|
if (!conn || !conn_ctrl_ready(conn))
|
|
|
|
return;
|
|
|
|
|
|
|
|
#ifdef TCP_QUICKACK
|
|
|
|
setsockopt(conn->handle.fd, IPPROTO_TCP, TCP_QUICKACK, &value, sizeof(value));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
/* Attaches a conn_stream to a data layer and sets the relevant callbacks */
|
|
|
|
static inline void cs_attach(struct conn_stream *cs, void *data, const struct data_cb *data_cb)
|
2012-09-24 15:15:42 +00:00
|
|
|
{
|
2017-09-13 16:30:23 +00:00
|
|
|
cs->data_cb = data_cb;
|
|
|
|
cs->data = data;
|
2012-10-02 18:57:19 +00:00
|
|
|
}
|
|
|
|
|
2018-10-10 16:25:41 +00:00
|
|
|
static inline struct wait_event *wl_set_waitcb(struct wait_event *wl, struct task *(*cb)(struct task *, void *, unsigned short), void *ctx)
|
2018-07-18 06:18:20 +00:00
|
|
|
{
|
2019-06-14 12:42:29 +00:00
|
|
|
if (!wl->tasklet->process) {
|
|
|
|
wl->tasklet->process = cb;
|
|
|
|
wl->tasklet->context = ctx;
|
2018-07-18 06:18:20 +00:00
|
|
|
}
|
|
|
|
return wl;
|
|
|
|
}
|
|
|
|
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
/* Installs the connection's mux layer for upper context <ctx>.
|
|
|
|
* Returns < 0 on error.
|
|
|
|
*/
|
2018-09-12 10:02:05 +00:00
|
|
|
static inline int conn_install_mux(struct connection *conn, const struct mux_ops *mux,
|
2018-12-14 18:42:40 +00:00
|
|
|
void *ctx, struct proxy *prx, struct session *sess)
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
{
|
2019-01-10 09:33:32 +00:00
|
|
|
int ret;
|
|
|
|
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
conn->mux = mux;
|
2018-12-19 13:12:10 +00:00
|
|
|
conn->ctx = ctx;
|
2019-04-08 09:22:47 +00:00
|
|
|
ret = mux->init ? mux->init(conn, prx, sess, &BUF_NULL) : 0;
|
2019-01-10 09:33:32 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
conn->mux = NULL;
|
|
|
|
conn->ctx = NULL;
|
|
|
|
}
|
|
|
|
return ret;
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
}
|
|
|
|
|
2012-12-03 14:35:00 +00:00
|
|
|
/* returns a human-readable error code for conn->err_code, or NULL if the code
|
|
|
|
* is unknown.
|
|
|
|
*/
|
|
|
|
static inline const char *conn_err_code_str(struct connection *c)
|
|
|
|
{
|
|
|
|
switch (c->err_code) {
|
|
|
|
case CO_ER_NONE: return "Success";
|
2014-01-24 15:06:50 +00:00
|
|
|
|
|
|
|
case CO_ER_CONF_FDLIM: return "Reached configured maxconn value";
|
|
|
|
case CO_ER_PROC_FDLIM: return "Too many sockets on the process";
|
|
|
|
case CO_ER_SYS_FDLIM: return "Too many sockets on the system";
|
|
|
|
case CO_ER_SYS_MEMLIM: return "Out of system buffers";
|
|
|
|
case CO_ER_NOPROTO: return "Protocol or address family not supported";
|
|
|
|
case CO_ER_SOCK_ERR: return "General socket error";
|
|
|
|
case CO_ER_PORT_RANGE: return "Source port range exhausted";
|
|
|
|
case CO_ER_CANT_BIND: return "Can't bind to source address";
|
|
|
|
case CO_ER_FREE_PORTS: return "Out of local source ports on the system";
|
|
|
|
case CO_ER_ADDR_INUSE: return "Local source address already in use";
|
|
|
|
|
2012-12-03 14:41:18 +00:00
|
|
|
case CO_ER_PRX_EMPTY: return "Connection closed while waiting for PROXY protocol header";
|
|
|
|
case CO_ER_PRX_ABORT: return "Connection error while waiting for PROXY protocol header";
|
2012-12-03 14:35:00 +00:00
|
|
|
case CO_ER_PRX_TIMEOUT: return "Timeout while waiting for PROXY protocol header";
|
2012-12-03 14:41:18 +00:00
|
|
|
case CO_ER_PRX_TRUNCATED: return "Truncated PROXY protocol header received";
|
|
|
|
case CO_ER_PRX_NOT_HDR: return "Received something which does not look like a PROXY protocol header";
|
|
|
|
case CO_ER_PRX_BAD_HDR: return "Received an invalid PROXY protocol header";
|
|
|
|
case CO_ER_PRX_BAD_PROTO: return "Received an unhandled protocol in the PROXY protocol header";
|
2016-06-04 14:11:10 +00:00
|
|
|
|
|
|
|
case CO_ER_CIP_EMPTY: return "Connection closed while waiting for NetScaler Client IP header";
|
|
|
|
case CO_ER_CIP_ABORT: return "Connection error while waiting for NetScaler Client IP header";
|
|
|
|
case CO_ER_CIP_TRUNCATED: return "Truncated NetScaler Client IP header received";
|
|
|
|
case CO_ER_CIP_BAD_MAGIC: return "Received an invalid NetScaler Client IP magic number";
|
|
|
|
case CO_ER_CIP_BAD_PROTO: return "Received an unhandled protocol in the NetScaler Client IP header";
|
|
|
|
|
2012-12-03 15:32:10 +00:00
|
|
|
case CO_ER_SSL_EMPTY: return "Connection closed during SSL handshake";
|
|
|
|
case CO_ER_SSL_ABORT: return "Connection error during SSL handshake";
|
2012-12-03 14:35:00 +00:00
|
|
|
case CO_ER_SSL_TIMEOUT: return "Timeout during SSL handshake";
|
2012-12-03 15:32:10 +00:00
|
|
|
case CO_ER_SSL_TOO_MANY: return "Too many SSL connections";
|
|
|
|
case CO_ER_SSL_NO_MEM: return "Out of memory when initializing an SSL connection";
|
|
|
|
case CO_ER_SSL_RENEG: return "Rejected a client-initiated SSL renegociation attempt";
|
|
|
|
case CO_ER_SSL_CA_FAIL: return "SSL client CA chain cannot be verified";
|
|
|
|
case CO_ER_SSL_CRT_FAIL: return "SSL client certificate not trusted";
|
2017-07-26 18:09:56 +00:00
|
|
|
case CO_ER_SSL_MISMATCH: return "Server presented an SSL certificate different from the configured one";
|
|
|
|
case CO_ER_SSL_MISMATCH_SNI: return "Server presented an SSL certificate different from the expected one";
|
2012-12-03 15:32:10 +00:00
|
|
|
case CO_ER_SSL_HANDSHAKE: return "SSL handshake failure";
|
2014-04-25 16:54:29 +00:00
|
|
|
case CO_ER_SSL_HANDSHAKE_HB: return "SSL handshake failure after heartbeat";
|
2014-04-25 18:02:39 +00:00
|
|
|
case CO_ER_SSL_KILLED_HB: return "Stopped a TLSv1 heartbeat attack (CVE-2014-0160)";
|
2013-12-01 19:29:58 +00:00
|
|
|
case CO_ER_SSL_NO_TARGET: return "Attempt to use SSL on an unknown target (internal error)";
|
2019-05-22 11:44:48 +00:00
|
|
|
|
|
|
|
case CO_ER_SOCKS4_SEND: return "SOCKS4 Proxy write error during handshake";
|
|
|
|
case CO_ER_SOCKS4_RECV: return "SOCKS4 Proxy read error during handshake";
|
|
|
|
case CO_ER_SOCKS4_DENY: return "SOCKS4 Proxy deny the request";
|
|
|
|
case CO_ER_SOCKS4_ABORT: return "SOCKS4 Proxy handshake aborted by server";
|
2012-12-03 14:35:00 +00:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2016-11-23 17:00:08 +00:00
|
|
|
static inline const char *conn_get_ctrl_name(const struct connection *conn)
|
|
|
|
{
|
2019-04-25 16:35:49 +00:00
|
|
|
if (!conn || !conn_ctrl_ready(conn))
|
2016-11-23 17:00:08 +00:00
|
|
|
return "NONE";
|
|
|
|
return conn->ctrl->name;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline const char *conn_get_xprt_name(const struct connection *conn)
|
|
|
|
{
|
2019-04-25 16:35:49 +00:00
|
|
|
if (!conn || !conn_xprt_ready(conn))
|
2016-11-23 17:00:08 +00:00
|
|
|
return "NONE";
|
2016-11-24 15:58:12 +00:00
|
|
|
return conn->xprt->name;
|
2016-11-23 17:00:08 +00:00
|
|
|
}
|
|
|
|
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
static inline const char *conn_get_mux_name(const struct connection *conn)
|
|
|
|
{
|
2019-04-25 16:35:49 +00:00
|
|
|
if (!conn || !conn->mux)
|
MEDIUM: connection: start to introduce a mux layer between xprt and data
For HTTP/2 and QUIC, we'll need to deal with multiplexed streams inside
a connection. After quite a long brainstorming, it appears that the
connection interface to the existing streams is appropriate just like
the connection interface to the lower layers. In fact we need to have
the mux layer in the middle of the connection, between the transport
and the data layer.
A mux can exist on two directions/sides. On the inbound direction, it
instanciates new streams from incoming connections, while on the outbound
direction it muxes streams into outgoing connections. The difference is
visible on the mux->init() call : in one case, an upper context is already
known (outgoing connection), and in the other case, the upper context is
not yet known (incoming connection) and will have to be allocated by the
mux. The session doesn't have to create the new streams anymore, as this
is performed by the mux itself.
This patch introduces this and creates a pass-through mux called
"mux_pt" which is used for all new connections and which only
calls the data layer's recv,send,wake() calls. One incoming stream
is immediately created when init() is called on the inbound direction.
There should not be any visible impact.
Note that the connection's mux is purposely not set until the session
is completed so that we don't accidently run with the wrong mux. This
must not cause any issue as the xprt_done_cb function is always called
prior to using mux's recv/send functions.
2017-08-28 08:53:00 +00:00
|
|
|
return "NONE";
|
|
|
|
return conn->mux->name;
|
|
|
|
}
|
|
|
|
|
2017-09-13 16:30:23 +00:00
|
|
|
static inline const char *cs_get_data_name(const struct conn_stream *cs)
|
2016-11-23 17:00:08 +00:00
|
|
|
{
|
2019-04-25 16:35:49 +00:00
|
|
|
if (!cs || !cs->data_cb)
|
2016-11-23 17:00:08 +00:00
|
|
|
return "NONE";
|
2017-09-13 16:30:23 +00:00
|
|
|
return cs->data_cb->name;
|
2016-11-23 17:00:08 +00:00
|
|
|
}
|
|
|
|
|
2016-12-22 19:25:26 +00:00
|
|
|
/* registers pointer to transport layer <id> (XPRT_*) */
|
|
|
|
static inline void xprt_register(int id, struct xprt_ops *xprt)
|
|
|
|
{
|
|
|
|
if (id >= XPRT_ENTRIES)
|
|
|
|
return;
|
|
|
|
registered_xprt[id] = xprt;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns pointer to transport layer <id> (XPRT_*) or NULL if not registered */
|
|
|
|
static inline struct xprt_ops *xprt_get(int id)
|
|
|
|
{
|
|
|
|
if (id >= XPRT_ENTRIES)
|
|
|
|
return NULL;
|
|
|
|
return registered_xprt[id];
|
|
|
|
}
|
2016-11-23 17:00:08 +00:00
|
|
|
|
2019-05-27 10:09:19 +00:00
|
|
|
/* Try to add a handshake pseudo-XPRT. If the connection's first XPRT is
|
|
|
|
* raw_sock, then just use the new XPRT as the connection XPRT, otherwise
|
|
|
|
* call the xprt's add_xprt() method.
|
|
|
|
* Returns 0 on success, or non-zero on failure.
|
|
|
|
*/
|
|
|
|
static inline int xprt_add_hs(struct connection *conn)
|
|
|
|
{
|
|
|
|
void *xprt_ctx = NULL;
|
|
|
|
const struct xprt_ops *ops = xprt_get(XPRT_HANDSHAKE);
|
|
|
|
void *nextxprt_ctx = NULL;
|
|
|
|
const struct xprt_ops *nextxprt_ops = NULL;
|
|
|
|
|
|
|
|
if (conn->flags & CO_FL_ERROR)
|
|
|
|
return -1;
|
|
|
|
if (ops->init(conn, &xprt_ctx) < 0)
|
|
|
|
return -1;
|
|
|
|
if (conn->xprt == xprt_get(XPRT_RAW)) {
|
|
|
|
nextxprt_ctx = conn->xprt_ctx;
|
|
|
|
nextxprt_ops = conn->xprt;
|
|
|
|
conn->xprt_ctx = xprt_ctx;
|
|
|
|
conn->xprt = ops;
|
|
|
|
} else {
|
|
|
|
if (conn->xprt->add_xprt(conn, conn->xprt_ctx, xprt_ctx, ops,
|
|
|
|
&nextxprt_ctx, &nextxprt_ops) != 0) {
|
|
|
|
ops->close(conn, xprt_ctx);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (ops->add_xprt(conn, xprt_ctx, nextxprt_ctx, nextxprt_ops, NULL, NULL) != 0) {
|
|
|
|
ops->close(conn, xprt_ctx);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-12-04 17:42:09 +00:00
|
|
|
static inline int conn_get_alpn(const struct connection *conn, const char **str, int *len)
|
|
|
|
{
|
|
|
|
if (!conn_xprt_ready(conn) || !conn->xprt->get_alpn)
|
|
|
|
return 0;
|
2019-03-21 17:27:17 +00:00
|
|
|
return conn->xprt->get_alpn(conn, conn->xprt_ctx, str, len);
|
2016-12-04 17:42:09 +00:00
|
|
|
}
|
|
|
|
|
2018-04-10 12:33:41 +00:00
|
|
|
/* registers proto mux list <list>. Modifies the list element! */
|
|
|
|
static inline void register_mux_proto(struct mux_proto_list *list)
|
2017-09-21 17:40:52 +00:00
|
|
|
{
|
2018-04-10 12:33:41 +00:00
|
|
|
LIST_ADDQ(&mux_proto_list.list, &list->list);
|
2017-09-21 17:40:52 +00:00
|
|
|
}
|
|
|
|
|
2018-04-10 12:33:41 +00:00
|
|
|
/* unregisters proto mux list <list> */
|
|
|
|
static inline void unregister_mux_proto(struct mux_proto_list *list)
|
2017-09-21 17:40:52 +00:00
|
|
|
{
|
|
|
|
LIST_DEL(&list->list);
|
|
|
|
LIST_INIT(&list->list);
|
|
|
|
}
|
|
|
|
|
2018-08-08 08:21:56 +00:00
|
|
|
static inline struct mux_proto_list *get_mux_proto(const struct ist proto)
|
2018-04-10 12:37:32 +00:00
|
|
|
{
|
|
|
|
struct mux_proto_list *item;
|
|
|
|
|
|
|
|
list_for_each_entry(item, &mux_proto_list.list, list) {
|
|
|
|
if (isteq(proto, item->token))
|
|
|
|
return item;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Lists the known proto mux on <out> */
|
|
|
|
static inline void list_mux_proto(FILE *out)
|
|
|
|
{
|
|
|
|
struct mux_proto_list *item;
|
|
|
|
struct ist proto;
|
|
|
|
char *mode, *side;
|
|
|
|
|
2018-08-08 08:25:57 +00:00
|
|
|
fprintf(out, "Available multiplexer protocols :\n"
|
2018-11-30 15:52:32 +00:00
|
|
|
"(protocols marked as <default> cannot be specified using 'proto' keyword)\n");
|
2018-04-10 12:37:32 +00:00
|
|
|
list_for_each_entry(item, &mux_proto_list.list, list) {
|
|
|
|
proto = item->token;
|
|
|
|
|
|
|
|
if (item->mode == PROTO_MODE_ANY)
|
|
|
|
mode = "TCP|HTTP";
|
|
|
|
else if (item->mode == PROTO_MODE_TCP)
|
|
|
|
mode = "TCP";
|
|
|
|
else if (item->mode == PROTO_MODE_HTTP)
|
|
|
|
mode = "HTTP";
|
|
|
|
else
|
|
|
|
mode = "NONE";
|
|
|
|
|
|
|
|
if (item->side == PROTO_SIDE_BOTH)
|
|
|
|
side = "FE|BE";
|
|
|
|
else if (item->side == PROTO_SIDE_FE)
|
|
|
|
side = "FE";
|
|
|
|
else if (item->side == PROTO_SIDE_BE)
|
|
|
|
side = "BE";
|
|
|
|
else
|
|
|
|
side = "NONE";
|
|
|
|
|
2019-05-22 09:44:03 +00:00
|
|
|
fprintf(out, " %15s : mode=%-10s side=%-8s mux=%s\n",
|
|
|
|
(proto.len ? proto.ptr : "<default>"), mode, side, item->mux->name);
|
2018-04-10 12:37:32 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-02 12:04:43 +00:00
|
|
|
/* returns the first mux entry in the list matching the exact same <mux_proto>
|
|
|
|
* and compatible with the <proto_side> (FE or BE) and the <proto_mode> (TCP or
|
2018-04-10 13:01:45 +00:00
|
|
|
* HTTP). <mux_proto> can be empty. Will fall back to the first compatible mux
|
|
|
|
* with exactly the same <proto_mode> or with an empty name. May return
|
|
|
|
* null if the code improperly registered the default mux to use as a fallback.
|
2017-09-21 17:40:52 +00:00
|
|
|
*/
|
2018-12-02 12:04:43 +00:00
|
|
|
static inline const struct mux_proto_list *conn_get_best_mux_entry(
|
|
|
|
const struct ist mux_proto,
|
|
|
|
int proto_side, int proto_mode)
|
2017-09-21 17:40:52 +00:00
|
|
|
{
|
2018-04-10 12:33:41 +00:00
|
|
|
struct mux_proto_list *item;
|
2018-04-10 13:01:45 +00:00
|
|
|
struct mux_proto_list *fallback = NULL;
|
2017-09-21 17:40:52 +00:00
|
|
|
|
2018-04-10 12:33:41 +00:00
|
|
|
list_for_each_entry(item, &mux_proto_list.list, list) {
|
2018-04-10 13:01:45 +00:00
|
|
|
if (!(item->side & proto_side) || !(item->mode & proto_mode))
|
2017-09-21 17:40:52 +00:00
|
|
|
continue;
|
2018-04-10 13:01:45 +00:00
|
|
|
if (istlen(mux_proto) && isteq(mux_proto, item->token))
|
2018-12-02 12:04:43 +00:00
|
|
|
return item;
|
2018-04-10 13:01:45 +00:00
|
|
|
else if (!istlen(item->token)) {
|
|
|
|
if (!fallback || (item->mode == proto_mode && fallback->mode != proto_mode))
|
|
|
|
fallback = item;
|
|
|
|
}
|
2017-09-21 17:40:52 +00:00
|
|
|
}
|
2018-12-02 12:04:43 +00:00
|
|
|
return fallback;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/* returns the first mux in the list matching the exact same <mux_proto> and
|
|
|
|
* compatible with the <proto_side> (FE or BE) and the <proto_mode> (TCP or
|
|
|
|
* HTTP). <mux_proto> can be empty. Will fall back to the first compatible mux
|
|
|
|
* with exactly the same <proto_mode> or with an empty name. May return
|
|
|
|
* null if the code improperly registered the default mux to use as a fallback.
|
|
|
|
*/
|
|
|
|
static inline const struct mux_ops *conn_get_best_mux(struct connection *conn,
|
|
|
|
const struct ist mux_proto,
|
|
|
|
int proto_side, int proto_mode)
|
|
|
|
{
|
|
|
|
const struct mux_proto_list *item;
|
|
|
|
|
|
|
|
item = conn_get_best_mux_entry(mux_proto, proto_side, proto_mode);
|
2018-04-10 13:01:45 +00:00
|
|
|
|
2018-12-02 12:04:43 +00:00
|
|
|
return item ? item->mux : NULL;
|
2017-09-21 17:40:52 +00:00
|
|
|
}
|
|
|
|
|
2018-09-06 12:52:21 +00:00
|
|
|
/* returns 0 if the connection is valid and is a frontend connection, otherwise
|
|
|
|
* returns 1 indicating it's a backend connection. And uninitialized connection
|
|
|
|
* also returns 1 to better handle the usage in the middle of initialization.
|
|
|
|
*/
|
|
|
|
static inline int conn_is_back(const struct connection *conn)
|
|
|
|
{
|
|
|
|
return !objt_listener(conn->target);
|
|
|
|
}
|
|
|
|
|
2018-09-06 09:48:44 +00:00
|
|
|
/* returns a pointer to the proxy associated with this connection. For a front
|
|
|
|
* connection it returns a pointer to the frontend ; for a back connection, it
|
|
|
|
* returns a pointer to the backend.
|
|
|
|
*/
|
|
|
|
static inline struct proxy *conn_get_proxy(const struct connection *conn)
|
|
|
|
{
|
|
|
|
struct listener *l;
|
|
|
|
struct server *s;
|
|
|
|
|
|
|
|
/* check if it's a frontend connection */
|
|
|
|
l = objt_listener(conn->target);
|
|
|
|
if (l)
|
|
|
|
return l->bind_conf->frontend;
|
|
|
|
|
|
|
|
/* check if it's a backend connection */
|
|
|
|
s = objt_server(conn->target);
|
|
|
|
if (s)
|
|
|
|
return s->proxy;
|
|
|
|
|
|
|
|
return objt_proxy(conn->target);
|
|
|
|
}
|
|
|
|
|
2018-04-10 13:01:45 +00:00
|
|
|
/* installs the best mux for incoming connection <conn> using the upper context
|
|
|
|
* <ctx>. If the mux protocol is forced, we use it to find the best
|
|
|
|
* mux. Otherwise we use the ALPN name, if any. Returns < 0 on error.
|
2017-09-21 17:40:52 +00:00
|
|
|
*/
|
2018-04-10 13:01:45 +00:00
|
|
|
static inline int conn_install_mux_fe(struct connection *conn, void *ctx)
|
2017-09-21 17:40:52 +00:00
|
|
|
{
|
2018-09-20 09:26:52 +00:00
|
|
|
struct bind_conf *bind_conf = __objt_listener(conn->target)->bind_conf;
|
2018-04-10 13:01:45 +00:00
|
|
|
const struct mux_ops *mux_ops;
|
2017-09-21 17:40:52 +00:00
|
|
|
|
2018-04-10 13:01:45 +00:00
|
|
|
if (bind_conf->mux_proto)
|
|
|
|
mux_ops = bind_conf->mux_proto->mux;
|
|
|
|
else {
|
|
|
|
struct ist mux_proto;
|
|
|
|
const char *alpn_str = NULL;
|
|
|
|
int alpn_len = 0;
|
2018-10-22 09:49:15 +00:00
|
|
|
int mode;
|
|
|
|
|
BUG/MAJOR: muxes: Use the HTX mode to find the best mux for HTTP proxies only
Since the commit 1d2b586cd ("MAJOR: htx: Enable the HTX mode by default for all
proxies"), the HTX is enabled by default for all proxies, HTTP and TCP, but also
CLI and HEALTH proxies. But when the best mux is retrieved, only HTTP and TCP
modes are checked. If the TCP mode is not explicitly set, it is considered as an
HTTP proxy. It is an hidden bug introduced when the option "http-use-htx" was
added. It has no effect until the commit 1d2b586cd. But now, when a stats socket
is created for the master process, the mux h1 is installed on all incoming
connections to the CLI proxy, leading to segfaults because HTX operations are
performed on raw buffers.
So to fix the buf, when a mux is installed, all proxies are considered as TCP
proxies, except HTTP ones. This way, CLI and HEALTH proxies will be handled as
TCP proxies.
This patch must be backported to 1.9 although it has no effect. It is safer to
not keep hidden bugs.
2019-04-24 13:01:22 +00:00
|
|
|
if (bind_conf->frontend->mode == PR_MODE_HTTP)
|
2019-07-15 09:42:52 +00:00
|
|
|
mode = PROTO_MODE_HTTP;
|
2018-10-22 09:49:15 +00:00
|
|
|
else
|
BUG/MAJOR: muxes: Use the HTX mode to find the best mux for HTTP proxies only
Since the commit 1d2b586cd ("MAJOR: htx: Enable the HTX mode by default for all
proxies"), the HTX is enabled by default for all proxies, HTTP and TCP, but also
CLI and HEALTH proxies. But when the best mux is retrieved, only HTTP and TCP
modes are checked. If the TCP mode is not explicitly set, it is considered as an
HTTP proxy. It is an hidden bug introduced when the option "http-use-htx" was
added. It has no effect until the commit 1d2b586cd. But now, when a stats socket
is created for the master process, the mux h1 is installed on all incoming
connections to the CLI proxy, leading to segfaults because HTX operations are
performed on raw buffers.
So to fix the buf, when a mux is installed, all proxies are considered as TCP
proxies, except HTTP ones. This way, CLI and HEALTH proxies will be handled as
TCP proxies.
This patch must be backported to 1.9 although it has no effect. It is safer to
not keep hidden bugs.
2019-04-24 13:01:22 +00:00
|
|
|
mode = PROTO_MODE_TCP;
|
2018-04-10 13:01:45 +00:00
|
|
|
|
|
|
|
conn_get_alpn(conn, &alpn_str, &alpn_len);
|
|
|
|
mux_proto = ist2(alpn_str, alpn_len);
|
|
|
|
mux_ops = conn_get_best_mux(conn, mux_proto, PROTO_SIDE_FE, mode);
|
|
|
|
if (!mux_ops)
|
|
|
|
return -1;
|
|
|
|
}
|
2018-12-14 18:42:40 +00:00
|
|
|
return conn_install_mux(conn, mux_ops, ctx, bind_conf->frontend, conn->owner);
|
2017-09-21 17:40:52 +00:00
|
|
|
}
|
|
|
|
|
2018-04-10 13:01:45 +00:00
|
|
|
/* installs the best mux for outgoing connection <conn> using the upper context
|
|
|
|
* <ctx>. If the mux protocol is forced, we use it to find the best mux. Returns
|
2017-09-15 04:59:55 +00:00
|
|
|
* < 0 on error.
|
2017-09-21 17:40:52 +00:00
|
|
|
*/
|
2018-12-14 18:42:40 +00:00
|
|
|
static inline int conn_install_mux_be(struct connection *conn, void *ctx, struct session *sess)
|
2017-09-21 17:40:52 +00:00
|
|
|
{
|
2018-04-10 13:01:45 +00:00
|
|
|
struct server *srv = objt_server(conn->target);
|
2018-08-08 16:40:44 +00:00
|
|
|
struct proxy *prx = objt_proxy(conn->target);
|
2017-09-21 17:40:52 +00:00
|
|
|
const struct mux_ops *mux_ops;
|
|
|
|
|
2018-08-08 16:40:44 +00:00
|
|
|
if (srv)
|
|
|
|
prx = srv->proxy;
|
|
|
|
|
|
|
|
if (!prx) // target must be either proxy or server
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (srv && srv->mux_proto)
|
2018-04-10 13:01:45 +00:00
|
|
|
mux_ops = srv->mux_proto->mux;
|
|
|
|
else {
|
2018-11-20 23:16:29 +00:00
|
|
|
struct ist mux_proto;
|
|
|
|
const char *alpn_str = NULL;
|
|
|
|
int alpn_len = 0;
|
2018-04-10 13:01:45 +00:00
|
|
|
int mode;
|
|
|
|
|
BUG/MAJOR: muxes: Use the HTX mode to find the best mux for HTTP proxies only
Since the commit 1d2b586cd ("MAJOR: htx: Enable the HTX mode by default for all
proxies"), the HTX is enabled by default for all proxies, HTTP and TCP, but also
CLI and HEALTH proxies. But when the best mux is retrieved, only HTTP and TCP
modes are checked. If the TCP mode is not explicitly set, it is considered as an
HTTP proxy. It is an hidden bug introduced when the option "http-use-htx" was
added. It has no effect until the commit 1d2b586cd. But now, when a stats socket
is created for the master process, the mux h1 is installed on all incoming
connections to the CLI proxy, leading to segfaults because HTX operations are
performed on raw buffers.
So to fix the buf, when a mux is installed, all proxies are considered as TCP
proxies, except HTTP ones. This way, CLI and HEALTH proxies will be handled as
TCP proxies.
This patch must be backported to 1.9 although it has no effect. It is safer to
not keep hidden bugs.
2019-04-24 13:01:22 +00:00
|
|
|
if (prx->mode == PR_MODE_HTTP)
|
2019-07-15 09:42:52 +00:00
|
|
|
mode = PROTO_MODE_HTTP;
|
2018-10-22 09:49:15 +00:00
|
|
|
else
|
BUG/MAJOR: muxes: Use the HTX mode to find the best mux for HTTP proxies only
Since the commit 1d2b586cd ("MAJOR: htx: Enable the HTX mode by default for all
proxies"), the HTX is enabled by default for all proxies, HTTP and TCP, but also
CLI and HEALTH proxies. But when the best mux is retrieved, only HTTP and TCP
modes are checked. If the TCP mode is not explicitly set, it is considered as an
HTTP proxy. It is an hidden bug introduced when the option "http-use-htx" was
added. It has no effect until the commit 1d2b586cd. But now, when a stats socket
is created for the master process, the mux h1 is installed on all incoming
connections to the CLI proxy, leading to segfaults because HTX operations are
performed on raw buffers.
So to fix the buf, when a mux is installed, all proxies are considered as TCP
proxies, except HTTP ones. This way, CLI and HEALTH proxies will be handled as
TCP proxies.
This patch must be backported to 1.9 although it has no effect. It is safer to
not keep hidden bugs.
2019-04-24 13:01:22 +00:00
|
|
|
mode = PROTO_MODE_TCP;
|
2018-10-22 09:49:15 +00:00
|
|
|
|
2018-11-20 23:16:29 +00:00
|
|
|
conn_get_alpn(conn, &alpn_str, &alpn_len);
|
|
|
|
mux_proto = ist2(alpn_str, alpn_len);
|
|
|
|
|
|
|
|
mux_ops = conn_get_best_mux(conn, mux_proto, PROTO_SIDE_BE, mode);
|
2018-04-10 13:01:45 +00:00
|
|
|
if (!mux_ops)
|
|
|
|
return -1;
|
|
|
|
}
|
2018-12-14 18:42:40 +00:00
|
|
|
return conn_install_mux(conn, mux_ops, ctx, prx, sess);
|
2017-09-21 17:40:52 +00:00
|
|
|
}
|
|
|
|
|
2019-07-03 11:08:18 +00:00
|
|
|
/* Change the mux for the connection.
|
|
|
|
* The caller should make sure he's not subscribed to the underlying XPRT.
|
|
|
|
*/
|
2019-04-08 08:42:41 +00:00
|
|
|
static inline int conn_upgrade_mux_fe(struct connection *conn, void *ctx, struct buffer *buf,
|
|
|
|
struct ist mux_proto, int mode)
|
|
|
|
{
|
|
|
|
struct bind_conf *bind_conf = __objt_listener(conn->target)->bind_conf;
|
|
|
|
const struct mux_ops *old_mux, *new_mux;
|
|
|
|
void *old_mux_ctx;
|
|
|
|
const char *alpn_str = NULL;
|
|
|
|
int alpn_len = 0;
|
|
|
|
|
|
|
|
if (!mux_proto.len) {
|
|
|
|
conn_get_alpn(conn, &alpn_str, &alpn_len);
|
|
|
|
mux_proto = ist2(alpn_str, alpn_len);
|
|
|
|
}
|
|
|
|
new_mux = conn_get_best_mux(conn, mux_proto, PROTO_SIDE_FE, mode);
|
|
|
|
old_mux = conn->mux;
|
|
|
|
|
|
|
|
/* No mux found */
|
|
|
|
if (!new_mux)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* Same mux, nothing to do */
|
|
|
|
if (old_mux == new_mux)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
old_mux_ctx = conn->ctx;
|
|
|
|
conn->mux = new_mux;
|
|
|
|
conn->ctx = ctx;
|
|
|
|
if (new_mux->init(conn, bind_conf->frontend, conn->owner, buf) == -1) {
|
|
|
|
/* The mux upgrade failed, so restore the old mux */
|
|
|
|
conn->ctx = old_mux_ctx;
|
|
|
|
conn->mux = old_mux;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The mux was upgraded, destroy the old one */
|
|
|
|
*buf = BUF_NULL;
|
|
|
|
old_mux->destroy(old_mux_ctx);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-07-06 12:13:49 +00:00
|
|
|
#endif /* _PROTO_CONNECTION_H */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* c-indent-level: 8
|
|
|
|
* c-basic-offset: 8
|
|
|
|
* End:
|
|
|
|
*/
|