MINOR: quic: Add new "QUIC over SSL" C module.

Move the code which directly calls the functions of the OpenSSL QUIC API into
quic_ssl.c new C file.
Some code have been extracted from qc_conn_finalize() to implement only
the QUIC TLS part (see quic_tls_finalize()) into quic_tls.c.
qc_conn_finalize() has also been exported to be used from this new quic_ssl.c
C module.
This commit is contained in:
Frédéric Lécaille 2023-07-24 16:28:45 +02:00
parent 57237f68ad
commit f454b78fa9
8 changed files with 820 additions and 709 deletions

View File

@ -608,7 +608,7 @@ OPTIONS_OBJS += src/quic_conn.o src/mux_quic.o src/h3.o src/xprt_quic.o \
src/qpack-dec.o src/hq_interop.o src/quic_stream.o \
src/h3_stats.o src/qmux_http.o src/cfgparse-quic.o \
src/cbuf.o src/quic_cc.o src/quic_cc_nocc.o \
src/quic_trace.o src/quic_cli.o
src/quic_trace.o src/quic_cli.o src/quic_ssl.o
endif
ifneq ($(USE_QUIC_OPENSSL_COMPAT),)

View File

@ -50,11 +50,13 @@
extern struct pool_head *pool_head_quic_connection_id;
int qc_conn_finalize(struct quic_conn *qc, int server);
int ssl_quic_initial_ctx(struct bind_conf *bind_conf);
struct quic_cstream *quic_cstream_new(struct quic_conn *qc);
void quic_cstream_free(struct quic_cstream *cs);
void quic_free_arngs(struct quic_conn *qc, struct quic_arngs *arngs);
struct quic_cstream *quic_cstream_new(struct quic_conn *qc);
struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int state);
/* Return the long packet type matching with <qv> version and <type> */
static inline int quic_pkt_type(int type, uint32_t version)

View File

@ -0,0 +1,21 @@
/*
* include/haproxy/quic_ssl-t.h
* Definitions for QUIC over TLS/SSL api types, constants and flags.
*
* Copyright (C) 2023
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#ifndef _HAPROXY_QUIC_SSL_T_H
#define _HAPROXY_QUIC_SSL_T_H
#include <haproxy/pool-t.h>
extern struct pool_head *pool_head_quic_ssl_sock_ctx;
#endif /* _HAPROXY_QUIC_SSL_T_H */

View File

@ -0,0 +1,48 @@
/*
* include/haproxy/quic_ssl.h
* This file contains QUIC over TLS/SSL api definitions.
*
* Copyright (C) 2023
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation, version 2.1
* exclusively.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HAPROXY_QUIC_SSL_H
#define _HAPROXY_QUIC_SSL_H
#include <haproxy/listener-t.h>
#include <haproxy/ncbuf-t.h>
#include <haproxy/openssl-compat.h>
#include <haproxy/pool.h>
#include <haproxy/quic_ssl-t.h>
#include <haproxy/ssl_sock-t.h>
int ssl_quic_initial_ctx(struct bind_conf *bind_conf);
int qc_alloc_ssl_sock_ctx(struct quic_conn *qc);
int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
enum ssl_encryption_level_t level,
struct ssl_sock_ctx *ctx,
const unsigned char *data, size_t len);
static inline void qc_free_ssl_sock_ctx(struct ssl_sock_ctx **ctx)
{
if (!*ctx)
return;
SSL_free((*ctx)->ssl);
pool_free(pool_head_quic_ssl_sock_ctx, *ctx);
*ctx = NULL;
}
#endif /* _HAPROXY_QUIC_SSL_H */

View File

@ -30,6 +30,8 @@
#include <haproxy/quic_tls-t.h>
#include <haproxy/trace.h>
int quic_tls_finalize(struct quic_conn *qc, int server);
void quic_tls_ctx_free(struct quic_tls_ctx **ctx);
void quic_pktns_release(struct quic_conn *qc, struct quic_pktns **pktns);
int qc_enc_level_alloc(struct quic_conn *qc, struct quic_pktns **pktns,
struct quic_enc_level **qel, enum ssl_encryption_level_t level);

View File

@ -50,6 +50,7 @@
#include <haproxy/quic_frame.h>
#include <haproxy/quic_enc.h>
#include <haproxy/quic_loss.h>
#include <haproxy/quic_ssl.h>
#include <haproxy/quic_sock.h>
#include <haproxy/quic_stats.h>
#include <haproxy/quic_stream.h>
@ -125,8 +126,6 @@ const struct quic_version quic_version_VN_reserved = { .num = 0, };
static BIO_METHOD *ha_quic_meth;
DECLARE_POOL(pool_head_quic_conn_rxbuf, "quic_conn_rxbuf", QUIC_CONN_RX_BUFSZ);
DECLARE_STATIC_POOL(pool_head_quic_conn_ctx,
"quic_conn_ctx", sizeof(struct ssl_sock_ctx));
DECLARE_STATIC_POOL(pool_head_quic_conn, "quic_conn", sizeof(struct quic_conn));
DECLARE_POOL(pool_head_quic_connection_id,
"quic_connection_id", sizeof(struct quic_connection_id));
@ -150,7 +149,6 @@ static struct quic_tx_packet *qc_build_pkt(unsigned char **pos, const unsigned c
struct task *quic_conn_app_io_cb(struct task *t, void *context, unsigned int state);
static void qc_idle_timer_do_rearm(struct quic_conn *qc, int arm_ack);
static void qc_idle_timer_rearm(struct quic_conn *qc, int read, int arm_ack);
static int qc_conn_alloc_ssl_ctx(struct quic_conn *qc);
static int quic_conn_init_timer(struct quic_conn *qc);
static int quic_conn_init_idle_timer_task(struct quic_conn *qc);
@ -243,295 +241,6 @@ static inline void qc_set_timer(struct quic_conn *qc)
TRACE_LEAVE(QUIC_EV_CONN_STIMER, qc);
}
/* Set the encoded version of the transport parameter into the TLS
* stack depending on <ver> QUIC version and <server> boolean which must
* be set to 1 for a QUIC server, 0 for a client.
* Return 1 if succeeded, 0 if not.
*/
static int qc_set_quic_transport_params(struct quic_conn *qc,
const struct quic_version *ver, int server)
{
int ret = 0;
#ifdef USE_QUIC_OPENSSL_COMPAT
unsigned char *in = qc->enc_params;
size_t insz = sizeof qc->enc_params;
size_t *enclen = &qc->enc_params_len;
#else
unsigned char tps[QUIC_TP_MAX_ENCLEN];
size_t tpslen;
unsigned char *in = tps;
size_t insz = sizeof tps;
size_t *enclen = &tpslen;
#endif
TRACE_ENTER(QUIC_EV_CONN_RWSEC, qc);
*enclen = quic_transport_params_encode(in, in + insz, &qc->rx.params, ver, server);
if (!*enclen) {
TRACE_ERROR("quic_transport_params_encode() failed", QUIC_EV_CONN_RWSEC);
goto leave;
}
if (!SSL_set_quic_transport_params(qc->xprt_ctx->ssl, in, *enclen)) {
TRACE_ERROR("SSL_set_quic_transport_params() failed", QUIC_EV_CONN_RWSEC);
goto leave;
}
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_RWSEC, qc);
return ret;
}
/* returns 0 on error, 1 on success */
int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t level,
const uint8_t *read_secret,
const uint8_t *write_secret, size_t secret_len)
{
int ret = 0;
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
struct quic_enc_level **qel = ssl_to_qel_addr(qc, level);
struct quic_pktns **pktns = ssl_to_quic_pktns(qc, level);
struct quic_tls_ctx *tls_ctx;
const SSL_CIPHER *cipher = SSL_get_current_cipher(ssl);
struct quic_tls_secrets *rx = NULL, *tx = NULL;
const struct quic_version *ver =
qc->negotiated_version ? qc->negotiated_version : qc->original_version;
TRACE_ENTER(QUIC_EV_CONN_RWSEC, qc);
BUG_ON(secret_len > QUIC_TLS_SECRET_LEN);
if (!*qel && !qc_enc_level_alloc(qc, pktns, qel, level)) {
TRACE_PROTO("Could not allocated an encryption level", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
tls_ctx = &(*qel)->tls_ctx;
if (qc->flags & QUIC_FL_CONN_TO_KILL) {
TRACE_PROTO("connection to be killed", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
TRACE_PROTO("CC required", QUIC_EV_CONN_RWSEC, qc);
goto out;
}
if (!read_secret)
goto write;
rx = &tls_ctx->rx;
if (!quic_tls_secrets_keys_alloc(rx)) {
TRACE_ERROR("RX keys allocation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
rx->aead = tls_aead(cipher);
rx->md = tls_md(cipher);
rx->hp = tls_hp(cipher);
if (!quic_tls_derive_keys(rx->aead, rx->hp, rx->md, ver, rx->key, rx->keylen,
rx->iv, rx->ivlen, rx->hp_key, sizeof rx->hp_key,
read_secret, secret_len)) {
TRACE_ERROR("TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_rx_ctx_init(&rx->ctx, rx->aead, rx->key)) {
TRACE_ERROR("could not initial RX TLS cipher context", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_dec_aes_ctx_init(&rx->hp_ctx, rx->hp, rx->hp_key)) {
TRACE_ERROR("could not initial RX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
/* Enqueue this connection asap if we could derive O-RTT secrets as
* listener. Note that a listener derives only RX secrets for this
* level.
*/
if (qc_is_listener(qc) && level == ssl_encryption_early_data) {
TRACE_DEVEL("pushing connection into accept queue", QUIC_EV_CONN_RWSEC, qc);
quic_accept_push_qc(qc);
}
write:
if (!write_secret)
goto keyupdate_init;
tx = &tls_ctx->tx;
if (!quic_tls_secrets_keys_alloc(tx)) {
TRACE_ERROR("TX keys allocation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
tx->aead = tls_aead(cipher);
tx->md = tls_md(cipher);
tx->hp = tls_hp(cipher);
if (!quic_tls_derive_keys(tx->aead, tx->hp, tx->md, ver, tx->key, tx->keylen,
tx->iv, tx->ivlen, tx->hp_key, sizeof tx->hp_key,
write_secret, secret_len)) {
TRACE_ERROR("TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_tx_ctx_init(&tx->ctx, tx->aead, tx->key)) {
TRACE_ERROR("could not initial RX TLS cipher context", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_enc_aes_ctx_init(&tx->hp_ctx, tx->hp, tx->hp_key)) {
TRACE_ERROR("could not initial TX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
/* Set the transport parameters in the TLS stack. */
if (level == ssl_encryption_handshake && qc_is_listener(qc) &&
!qc_set_quic_transport_params(qc, ver, 1))
goto leave;
keyupdate_init:
/* Store the secret provided by the TLS stack, required for keyupdate. */
if (level == ssl_encryption_application) {
struct quic_tls_kp *prv_rx = &qc->ku.prv_rx;
struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
if (rx) {
if (!(rx->secret = pool_alloc(pool_head_quic_tls_secret))) {
TRACE_ERROR("Could not allocate RX Application secrete keys", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
memcpy(rx->secret, read_secret, secret_len);
rx->secretlen = secret_len;
}
if (tx) {
if (!(tx->secret = pool_alloc(pool_head_quic_tls_secret))) {
TRACE_ERROR("Could not allocate TX Application secrete keys", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
memcpy(tx->secret, write_secret, secret_len);
tx->secretlen = secret_len;
}
/* Initialize all the secret keys lengths */
prv_rx->secretlen = nxt_rx->secretlen = nxt_tx->secretlen = secret_len;
}
out:
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_RWSEC, qc, &level);
return ret;
}
/* This function copies the CRYPTO data provided by the TLS stack found at <data>
* with <len> as size in CRYPTO buffers dedicated to store the information about
* outgoing CRYPTO frames so that to be able to replay the CRYPTO data streams.
* It fails (returns 0) only if it could not managed to allocate enough CRYPTO
* buffers to store all the data.
* Note that CRYPTO data may exist at any encryption level except at 0-RTT.
*/
static int quic_crypto_data_cpy(struct quic_conn *qc, struct quic_enc_level *qel,
const unsigned char *data, size_t len)
{
struct quic_crypto_buf **qcb;
/* The remaining byte to store in CRYPTO buffers. */
size_t cf_offset, cf_len, *nb_buf;
unsigned char *pos;
int ret = 0;
nb_buf = &qel->tx.crypto.nb_buf;
qcb = &qel->tx.crypto.bufs[*nb_buf - 1];
cf_offset = (*nb_buf - 1) * QUIC_CRYPTO_BUF_SZ + (*qcb)->sz;
cf_len = len;
TRACE_ENTER(QUIC_EV_CONN_ADDDATA, qc);
while (len) {
size_t to_copy, room;
pos = (*qcb)->data + (*qcb)->sz;
room = QUIC_CRYPTO_BUF_SZ - (*qcb)->sz;
to_copy = len > room ? room : len;
if (to_copy) {
memcpy(pos, data, to_copy);
/* Increment the total size of this CRYPTO buffers by <to_copy>. */
qel->tx.crypto.sz += to_copy;
(*qcb)->sz += to_copy;
len -= to_copy;
data += to_copy;
}
else {
struct quic_crypto_buf **tmp;
// FIXME: realloc!
tmp = realloc(qel->tx.crypto.bufs,
(*nb_buf + 1) * sizeof *qel->tx.crypto.bufs);
if (tmp) {
qel->tx.crypto.bufs = tmp;
qcb = &qel->tx.crypto.bufs[*nb_buf];
*qcb = pool_alloc(pool_head_quic_crypto_buf);
if (!*qcb) {
TRACE_ERROR("Could not allocate crypto buf", QUIC_EV_CONN_ADDDATA, qc);
goto leave;
}
(*qcb)->sz = 0;
++*nb_buf;
}
else {
break;
}
}
}
/* Allocate a TX CRYPTO frame only if all the CRYPTO data
* have been buffered.
*/
if (!len) {
struct quic_frame *frm;
struct quic_frame *found = NULL;
/* There is at most one CRYPTO frame in this packet number
* space. Let's look for it.
*/
list_for_each_entry(frm, &qel->pktns->tx.frms, list) {
if (frm->type != QUIC_FT_CRYPTO)
continue;
/* Found */
found = frm;
break;
}
if (found) {
found->crypto.len += cf_len;
}
else {
frm = qc_frm_alloc(QUIC_FT_CRYPTO);
if (!frm) {
TRACE_ERROR("Could not allocate quic frame", QUIC_EV_CONN_ADDDATA, qc);
goto leave;
}
frm->crypto.offset = cf_offset;
frm->crypto.len = cf_len;
frm->crypto.qel = qel;
LIST_APPEND(&qel->pktns->tx.frms, &frm->list);
}
}
ret = len == 0;
leave:
TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, qc);
return ret;
}
/* Prepare the emission of CONNECTION_CLOSE with error <err>. All send/receive
* activity for <qc> will be interrupted.
*/
@ -582,125 +291,6 @@ int quic_set_app_ops(struct quic_conn *qc, const unsigned char *alpn, size_t alp
return 1;
}
/* ->add_handshake_data QUIC TLS callback used by the QUIC TLS stack when it
* wants to provide the QUIC layer with CRYPTO data.
* Returns 1 if succeeded, 0 if not.
*/
int ha_quic_add_handshake_data(SSL *ssl, enum ssl_encryption_level_t level,
const uint8_t *data, size_t len)
{
int ret = 0;
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
struct quic_enc_level **qel = ssl_to_qel_addr(qc, level);
struct quic_pktns **pktns = ssl_to_quic_pktns(qc, level);
TRACE_ENTER(QUIC_EV_CONN_ADDDATA, qc);
if (qc->flags & QUIC_FL_CONN_TO_KILL) {
TRACE_PROTO("connection to be killed", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
TRACE_PROTO("CC required", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
if (!*qel && !qc_enc_level_alloc(qc, pktns, qel, level))
goto leave;
if (!quic_crypto_data_cpy(qc, *qel, data, len)) {
TRACE_ERROR("Could not bufferize", QUIC_EV_CONN_ADDDATA, qc);
goto leave;
}
TRACE_DEVEL("CRYPTO data buffered", QUIC_EV_CONN_ADDDATA,
qc, &level, &len);
out:
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, qc);
return ret;
}
int ha_quic_flush_flight(SSL *ssl)
{
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
TRACE_ENTER(QUIC_EV_CONN_FFLIGHT, qc);
TRACE_LEAVE(QUIC_EV_CONN_FFLIGHT, qc);
return 1;
}
int ha_quic_send_alert(SSL *ssl, enum ssl_encryption_level_t level, uint8_t alert)
{
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
TRACE_ENTER(QUIC_EV_CONN_SSLALERT, qc);
TRACE_PROTO("Received TLS alert", QUIC_EV_CONN_SSLALERT, qc, &alert, &level);
quic_set_tls_alert(qc, alert);
TRACE_LEAVE(QUIC_EV_CONN_SSLALERT, qc);
return 1;
}
/* QUIC TLS methods */
static SSL_QUIC_METHOD ha_quic_method = {
.set_encryption_secrets = ha_quic_set_encryption_secrets,
.add_handshake_data = ha_quic_add_handshake_data,
.flush_flight = ha_quic_flush_flight,
.send_alert = ha_quic_send_alert,
};
/* Initialize the TLS context of a listener with <bind_conf> as configuration.
* Returns an error count.
*/
int ssl_quic_initial_ctx(struct bind_conf *bind_conf)
{
struct ssl_bind_conf __maybe_unused *ssl_conf_cur;
int cfgerr = 0;
long options =
(SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) |
SSL_OP_SINGLE_ECDH_USE |
SSL_OP_CIPHER_SERVER_PREFERENCE;
SSL_CTX *ctx;
ctx = SSL_CTX_new(TLS_server_method());
bind_conf->initial_ctx = ctx;
SSL_CTX_set_options(ctx, options);
SSL_CTX_set_mode(ctx, SSL_MODE_RELEASE_BUFFERS);
SSL_CTX_set_min_proto_version(ctx, TLS1_3_VERSION);
SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION);
#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
# if defined(HAVE_SSL_CLIENT_HELLO_CB)
# if defined(SSL_OP_NO_ANTI_REPLAY)
if (bind_conf->ssl_conf.early_data) {
SSL_CTX_set_options(ctx, SSL_OP_NO_ANTI_REPLAY);
#ifndef USE_QUIC_OPENSSL_COMPAT
SSL_CTX_set_max_early_data(ctx, 0xffffffff);
#endif
}
# endif /* !SSL_OP_NO_ANTI_REPLAY */
SSL_CTX_set_client_hello_cb(ctx, ssl_sock_switchctx_cbk, NULL);
SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
# else /* ! HAVE_SSL_CLIENT_HELLO_CB */
SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_cbk);
# endif
SSL_CTX_set_tlsext_servername_arg(ctx, bind_conf);
#endif
#ifdef USE_QUIC_OPENSSL_COMPAT
if (!quic_tls_compat_init(bind_conf, ctx))
cfgerr |= ERR_ALERT | ERR_FATAL;
#endif
return cfgerr;
}
/* Decode an expected packet number from <truncated_on> its truncated value,
* depending on <largest_pn> the largest received packet number, and <pn_nbits>
* the number of bits used to encode this packet number (its length in bytes * 8).
@ -1579,43 +1169,18 @@ static inline int qc_parse_ack_frm(struct quic_conn *qc,
goto leave;
}
/* This function gives the detail of the SSL error. It is used only
* if the debug mode and the verbose mode are activated. It dump all
* the SSL error until the stack was empty.
*/
static forceinline void qc_ssl_dump_errors(struct connection *conn)
{
if (unlikely(global.mode & MODE_DEBUG)) {
while (1) {
const char *func = NULL;
unsigned long ret;
ERR_peek_error_func(&func);
ret = ERR_get_error();
if (!ret)
return;
fprintf(stderr, "conn. @%p OpenSSL error[0x%lx] %s: %s\n", conn, ret,
func, ERR_reason_error_string(ret));
}
}
}
int ssl_sock_get_alpn(const struct connection *conn, void *xprt_ctx,
const char **str, int *len);
/* Finalize <qc> QUIC connection:
* - initialize the Initial QUIC TLS context for negotiated version,
* - derive the secrets for this context,
* - set them into the TLS stack,
*
* MUST be called after having received the remote transport parameters which
* are parsed when the TLS callback for the ClientHello message is called upon
* SSL_do_handshake() calls, not necessarily at the first time as this TLS
* message may be split between packets
* Return 1 if succeeded, 0 if not.
*/
static int qc_conn_finalize(struct quic_conn *qc, int server)
int qc_conn_finalize(struct quic_conn *qc, int server)
{
int ret = 0;
@ -1624,16 +1189,8 @@ static int qc_conn_finalize(struct quic_conn *qc, int server)
if (qc->flags & QUIC_FL_CONN_FINALIZED)
goto finalized;
if (qc->negotiated_version) {
qc->nictx = pool_alloc(pool_head_quic_tls_ctx);
if (!qc->nictx)
goto out;
quic_tls_ctx_reset(qc->nictx);
if (!qc_new_isecs(qc, qc->nictx, qc->negotiated_version,
qc->odcid.data, qc->odcid.len, server))
goto out;
}
if (!quic_tls_finalize(qc, server))
goto out;
/* This connection is functional (ready to send/receive) */
qc->flags |= QUIC_FL_CONN_FINALIZED;
@ -1645,150 +1202,6 @@ static int qc_conn_finalize(struct quic_conn *qc, int server)
return ret;
}
/* Provide CRYPTO data to the TLS stack found at <data> with <len> as length
* from <qel> encryption level with <ctx> as QUIC connection context.
* Remaining parameter are there for debugging purposes.
* Return 1 if succeeded, 0 if not.
*/
static inline int qc_provide_cdata(struct quic_enc_level *el,
struct ssl_sock_ctx *ctx,
const unsigned char *data, size_t len,
struct quic_rx_packet *pkt,
struct quic_rx_crypto_frm *cf)
{
#ifdef DEBUG_STRICT
enum ncb_ret ncb_ret;
#endif
int ssl_err, state;
struct quic_conn *qc;
int ret = 0;
struct ncbuf *ncbuf = &el->cstream->rx.ncbuf;
ssl_err = SSL_ERROR_NONE;
qc = ctx->qc;
TRACE_ENTER(QUIC_EV_CONN_SSLDATA, qc);
if (SSL_provide_quic_data(ctx->ssl, el->level, data, len) != 1) {
TRACE_ERROR("SSL_provide_quic_data() error",
QUIC_EV_CONN_SSLDATA, qc, pkt, cf, ctx->ssl);
goto leave;
}
TRACE_PROTO("in order CRYPTO data",
QUIC_EV_CONN_SSLDATA, qc, NULL, cf, ctx->ssl);
state = qc->state;
if (state < QUIC_HS_ST_COMPLETE) {
ssl_err = SSL_do_handshake(ctx->ssl);
if (qc->flags & QUIC_FL_CONN_TO_KILL) {
TRACE_DEVEL("connection to be killed", QUIC_EV_CONN_IO_CB, qc);
goto leave;
}
/* Finalize the connection as soon as possible if the peer transport parameters
* have been received. This may be useful to send packets even if this
* handshake fails.
*/
if ((qc->flags & QUIC_FL_CONN_TX_TP_RECEIVED) && !qc_conn_finalize(qc, 1)) {
TRACE_ERROR("connection finalization failed", QUIC_EV_CONN_IO_CB, qc, &state);
goto leave;
}
if (ssl_err != 1) {
ssl_err = SSL_get_error(ctx->ssl, ssl_err);
if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
TRACE_PROTO("SSL handshake in progress",
QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
goto out;
}
/* TODO: Should close the connection asap */
if (!(qc->flags & QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED)) {
qc->flags |= QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED;
HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn);
HA_ATOMIC_INC(&qc->prx_counters->hdshk_fail);
}
TRACE_ERROR("SSL handshake error", QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
qc_ssl_dump_errors(ctx->conn);
ERR_clear_error();
goto leave;
}
TRACE_PROTO("SSL handshake OK", QUIC_EV_CONN_IO_CB, qc, &state);
/* Check the alpn could be negotiated */
if (!qc->app_ops) {
TRACE_ERROR("No negotiated ALPN", QUIC_EV_CONN_IO_CB, qc, &state);
quic_set_tls_alert(qc, SSL_AD_NO_APPLICATION_PROTOCOL);
goto leave;
}
if (!(qc->flags & QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED)) {
TRACE_DEVEL("dec half open counter", QUIC_EV_CONN_IO_CB, qc, &state);
qc->flags |= QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED;
HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn);
}
/* I/O callback switch */
qc->wait_event.tasklet->process = quic_conn_app_io_cb;
if (qc_is_listener(ctx->qc)) {
qc->flags |= QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS;
qc->state = QUIC_HS_ST_CONFIRMED;
/* The connection is ready to be accepted. */
quic_accept_push_qc(qc);
}
else {
qc->state = QUIC_HS_ST_COMPLETE;
}
/* Prepare the next key update */
if (!quic_tls_key_update(qc)) {
TRACE_ERROR("quic_tls_key_update() failed", QUIC_EV_CONN_IO_CB, qc);
goto leave;
}
} else {
ssl_err = SSL_process_quic_post_handshake(ctx->ssl);
if (ssl_err != 1) {
ssl_err = SSL_get_error(ctx->ssl, ssl_err);
if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
TRACE_PROTO("SSL post handshake in progress",
QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
goto out;
}
TRACE_ERROR("SSL post handshake error",
QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
goto leave;
}
TRACE_STATE("SSL post handshake succeeded", QUIC_EV_CONN_IO_CB, qc, &state);
}
out:
ret = 1;
leave:
/* The CRYPTO data are consumed even in case of an error to release
* the memory asap.
*/
if (!ncb_is_null(ncbuf)) {
#ifdef DEBUG_STRICT
ncb_ret = ncb_advance(ncbuf, len);
/* ncb_advance() must always succeed. This is guaranteed as
* this is only done inside a data block. If false, this will
* lead to handshake failure with quic_enc_level offset shifted
* from buffer data.
*/
BUG_ON(ncb_ret != NCB_RET_OK);
#else
ncb_advance(ncbuf, len);
#endif
}
TRACE_LEAVE(QUIC_EV_CONN_SSLDATA, qc);
return ret;
}
/* Parse a STREAM frame <strm_frm> received in <pkt> packet for <qc>
* connection. <fin> is true if FIN bit is set on frame type.
*
@ -2208,8 +1621,8 @@ static int qc_handle_crypto_frm(struct quic_conn *qc,
}
if (crypto_frm->offset == cstream->rx.offset && ncb_is_empty(ncbuf)) {
if (!qc_provide_cdata(qel, qc->xprt_ctx, crypto_frm->data, crypto_frm->len,
pkt, &cfdebug)) {
if (!qc_ssl_provide_quic_data(&qel->cstream->rx.ncbuf, qel->level,
qc->xprt_ctx, crypto_frm->data, crypto_frm->len)) {
// trace already emitted by function above
goto leave;
}
@ -3565,7 +2978,8 @@ static inline int qc_treat_rx_crypto_frms(struct quic_conn *qc,
while ((data = ncb_data(ncbuf, 0))) {
const unsigned char *cdata = (const unsigned char *)ncb_head(ncbuf);
if (!qc_provide_cdata(el, ctx, cdata, data, NULL, NULL))
if (!qc_ssl_provide_quic_data(&el->cstream->rx.ncbuf, el->level,
ctx, cdata, data))
goto leave;
cstream->rx.offset += data;
@ -4849,7 +4263,7 @@ static struct quic_conn *qc_new_conn(const struct quic_version *qv, int ipv4,
qc->wait_event.events = 0;
qc->subs = NULL;
if (qc_conn_alloc_ssl_ctx(qc) ||
if (qc_alloc_ssl_sock_ctx(qc) ||
!quic_conn_init_timer(qc) ||
!quic_conn_init_idle_timer_task(qc))
goto err;
@ -4907,7 +4321,6 @@ static inline void quic_conn_prx_cntrs_update(struct quic_conn *qc)
*/
void quic_conn_release(struct quic_conn *qc)
{
struct ssl_sock_ctx *conn_ctx;
struct eb64_node *node;
struct quic_rx_packet *pkt, *pktback;
@ -4962,11 +4375,8 @@ void quic_conn_release(struct quic_conn *qc)
/* remove the connection from receiver cids trees */
free_quic_conn_cids(qc);
conn_ctx = qc->xprt_ctx;
if (conn_ctx) {
SSL_free(conn_ctx->ssl);
pool_free(pool_head_quic_conn_ctx, conn_ctx);
}
/* free the SSL sock context */
qc_free_ssl_sock_ctx(&qc->xprt_ctx);
quic_tls_ku_free(qc);
if (qc->ael) {
@ -4982,9 +4392,7 @@ void quic_conn_release(struct quic_conn *qc)
qc_enc_level_free(qc, &qc->hel);
qc_enc_level_free(qc, &qc->ael);
quic_tls_ctx_secs_free(qc->nictx);
pool_free(pool_head_quic_tls_ctx, qc->nictx);
qc->nictx = NULL;
quic_tls_ctx_free(&qc->nictx);
quic_pktns_release(qc, &qc->ipktns);
quic_pktns_release(qc, &qc->hpktns);
@ -5881,109 +5289,6 @@ static struct quic_conn *retrieve_qc_conn_from_cid(struct quic_rx_packet *pkt,
return qc;
}
/* Try to allocate the <*ssl> SSL session object for <qc> QUIC connection
* with <ssl_ctx> as SSL context inherited settings. Also set the transport
* parameters of this session.
* This is the responsibility of the caller to check the validity of all the
* pointers passed as parameter to this function.
* Return 0 if succeeded, -1 if not. If failed, sets the ->err_code member of <qc->conn> to
* CO_ER_SSL_NO_MEM.
*/
static int qc_ssl_sess_init(struct quic_conn *qc, SSL_CTX *ssl_ctx, SSL **ssl)
{
int retry, ret = -1;
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
retry = 1;
retry:
*ssl = SSL_new(ssl_ctx);
if (!*ssl) {
if (!retry--)
goto leave;
pool_gc(NULL);
goto retry;
}
if (!SSL_set_ex_data(*ssl, ssl_qc_app_data_index, qc) ||
!SSL_set_quic_method(*ssl, &ha_quic_method)) {
SSL_free(*ssl);
*ssl = NULL;
if (!retry--)
goto leave;
pool_gc(NULL);
goto retry;
}
ret = 0;
leave:
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
return ret;
}
/* Allocate the ssl_sock_ctx from connection <qc>. This creates the tasklet
* used to process <qc> received packets. The allocated context is stored in
* <qc.xprt_ctx>.
*
* Returns 0 on success else non-zero.
*/
static int qc_conn_alloc_ssl_ctx(struct quic_conn *qc)
{
int ret = 0;
struct bind_conf *bc = qc->li->bind_conf;
struct ssl_sock_ctx *ctx = NULL;
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
ctx = pool_alloc(pool_head_quic_conn_ctx);
if (!ctx) {
TRACE_ERROR("SSL context allocation failed", QUIC_EV_CONN_TXPKT);
goto err;
}
ctx->conn = NULL;
ctx->bio = NULL;
ctx->xprt = NULL;
ctx->xprt_ctx = NULL;
memset(&ctx->wait_event, 0, sizeof(ctx->wait_event));
ctx->subs = NULL;
ctx->xprt_st = 0;
ctx->error_code = 0;
ctx->early_buf = BUF_NULL;
ctx->sent_early_data = 0;
ctx->qc = qc;
if (qc_is_listener(qc)) {
if (qc_ssl_sess_init(qc, bc->initial_ctx, &ctx->ssl) == -1)
goto err;
#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
#ifndef USE_QUIC_OPENSSL_COMPAT
/* Enabling 0-RTT */
if (bc->ssl_conf.early_data)
SSL_set_quic_early_data_enabled(ctx->ssl, 1);
#endif
#endif
SSL_set_accept_state(ctx->ssl);
}
ctx->xprt = xprt_get(XPRT_QUIC);
/* Store the allocated context in <qc>. */
qc->xprt_ctx = ctx;
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
return !ret;
err:
pool_free(pool_head_quic_conn_ctx, ctx);
goto leave;
}
/* Check that all the bytes between <pos> included and <end> address
* excluded are null. This is the responsibility of the caller to
* check that there is at least one byte between <pos> end <end>.

685
src/quic_ssl.c Normal file
View File

@ -0,0 +1,685 @@
#include <haproxy/errors.h>
#include <haproxy/ncbuf.h>
#include <haproxy/quic_conn.h>
#include <haproxy/quic_sock.h>
#include <haproxy/quic_ssl.h>
#include <haproxy/quic_tls.h>
#include <haproxy/quic_tp.h>
#include <haproxy/ssl_sock.h>
#define TRACE_SOURCE &trace_quic
DECLARE_POOL(pool_head_quic_ssl_sock_ctx, "quic_ssl_sock_ctx", sizeof(struct ssl_sock_ctx));
/* Set the encoded version of the transport parameter into the TLS
* stack depending on <ver> QUIC version and <server> boolean which must
* be set to 1 for a QUIC server, 0 for a client.
* Return 1 if succeeded, 0 if not.
*/
static int qc_ssl_set_quic_transport_params(struct quic_conn *qc,
const struct quic_version *ver, int server)
{
int ret = 0;
#ifdef USE_QUIC_OPENSSL_COMPAT
unsigned char *in = qc->enc_params;
size_t insz = sizeof qc->enc_params;
size_t *enclen = &qc->enc_params_len;
#else
unsigned char tps[QUIC_TP_MAX_ENCLEN];
size_t tpslen;
unsigned char *in = tps;
size_t insz = sizeof tps;
size_t *enclen = &tpslen;
#endif
TRACE_ENTER(QUIC_EV_CONN_RWSEC, qc);
*enclen = quic_transport_params_encode(in, in + insz, &qc->rx.params, ver, server);
if (!*enclen) {
TRACE_ERROR("quic_transport_params_encode() failed", QUIC_EV_CONN_RWSEC);
goto leave;
}
if (!SSL_set_quic_transport_params(qc->xprt_ctx->ssl, in, *enclen)) {
TRACE_ERROR("SSL_set_quic_transport_params() failed", QUIC_EV_CONN_RWSEC);
goto leave;
}
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_RWSEC, qc);
return ret;
}
/* This function copies the CRYPTO data provided by the TLS stack found at <data>
* with <len> as size in CRYPTO buffers dedicated to store the information about
* outgoing CRYPTO frames so that to be able to replay the CRYPTO data streams.
* It fails (returns 0) only if it could not managed to allocate enough CRYPTO
* buffers to store all the data.
* Note that CRYPTO data may exist at any encryption level except at 0-RTT.
*/
static int qc_ssl_crypto_data_cpy(struct quic_conn *qc, struct quic_enc_level *qel,
const unsigned char *data, size_t len)
{
struct quic_crypto_buf **qcb;
/* The remaining byte to store in CRYPTO buffers. */
size_t cf_offset, cf_len, *nb_buf;
unsigned char *pos;
int ret = 0;
nb_buf = &qel->tx.crypto.nb_buf;
qcb = &qel->tx.crypto.bufs[*nb_buf - 1];
cf_offset = (*nb_buf - 1) * QUIC_CRYPTO_BUF_SZ + (*qcb)->sz;
cf_len = len;
TRACE_ENTER(QUIC_EV_CONN_ADDDATA, qc);
while (len) {
size_t to_copy, room;
pos = (*qcb)->data + (*qcb)->sz;
room = QUIC_CRYPTO_BUF_SZ - (*qcb)->sz;
to_copy = len > room ? room : len;
if (to_copy) {
memcpy(pos, data, to_copy);
/* Increment the total size of this CRYPTO buffers by <to_copy>. */
qel->tx.crypto.sz += to_copy;
(*qcb)->sz += to_copy;
len -= to_copy;
data += to_copy;
}
else {
struct quic_crypto_buf **tmp;
// FIXME: realloc!
tmp = realloc(qel->tx.crypto.bufs,
(*nb_buf + 1) * sizeof *qel->tx.crypto.bufs);
if (tmp) {
qel->tx.crypto.bufs = tmp;
qcb = &qel->tx.crypto.bufs[*nb_buf];
*qcb = pool_alloc(pool_head_quic_crypto_buf);
if (!*qcb) {
TRACE_ERROR("Could not allocate crypto buf", QUIC_EV_CONN_ADDDATA, qc);
goto leave;
}
(*qcb)->sz = 0;
++*nb_buf;
}
else {
break;
}
}
}
/* Allocate a TX CRYPTO frame only if all the CRYPTO data
* have been buffered.
*/
if (!len) {
struct quic_frame *frm;
struct quic_frame *found = NULL;
/* There is at most one CRYPTO frame in this packet number
* space. Let's look for it.
*/
list_for_each_entry(frm, &qel->pktns->tx.frms, list) {
if (frm->type != QUIC_FT_CRYPTO)
continue;
/* Found */
found = frm;
break;
}
if (found) {
found->crypto.len += cf_len;
}
else {
frm = qc_frm_alloc(QUIC_FT_CRYPTO);
if (!frm) {
TRACE_ERROR("Could not allocate quic frame", QUIC_EV_CONN_ADDDATA, qc);
goto leave;
}
frm->crypto.offset = cf_offset;
frm->crypto.len = cf_len;
frm->crypto.qel = qel;
LIST_APPEND(&qel->pktns->tx.frms, &frm->list);
}
}
ret = len == 0;
leave:
TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, qc);
return ret;
}
/* returns 0 on error, 1 on success */
static int ha_quic_set_encryption_secrets(SSL *ssl, enum ssl_encryption_level_t level,
const uint8_t *read_secret,
const uint8_t *write_secret, size_t secret_len)
{
int ret = 0;
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
struct quic_enc_level **qel = ssl_to_qel_addr(qc, level);
struct quic_pktns **pktns = ssl_to_quic_pktns(qc, level);
struct quic_tls_ctx *tls_ctx;
const SSL_CIPHER *cipher = SSL_get_current_cipher(ssl);
struct quic_tls_secrets *rx = NULL, *tx = NULL;
const struct quic_version *ver =
qc->negotiated_version ? qc->negotiated_version : qc->original_version;
TRACE_ENTER(QUIC_EV_CONN_RWSEC, qc);
BUG_ON(secret_len > QUIC_TLS_SECRET_LEN);
if (!*qel && !qc_enc_level_alloc(qc, pktns, qel, level)) {
TRACE_PROTO("Could not allocated an encryption level", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
tls_ctx = &(*qel)->tls_ctx;
if (qc->flags & QUIC_FL_CONN_TO_KILL) {
TRACE_PROTO("connection to be killed", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
TRACE_PROTO("CC required", QUIC_EV_CONN_RWSEC, qc);
goto out;
}
if (!read_secret)
goto write;
rx = &tls_ctx->rx;
if (!quic_tls_secrets_keys_alloc(rx)) {
TRACE_ERROR("RX keys allocation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
rx->aead = tls_aead(cipher);
rx->md = tls_md(cipher);
rx->hp = tls_hp(cipher);
if (!quic_tls_derive_keys(rx->aead, rx->hp, rx->md, ver, rx->key, rx->keylen,
rx->iv, rx->ivlen, rx->hp_key, sizeof rx->hp_key,
read_secret, secret_len)) {
TRACE_ERROR("TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_rx_ctx_init(&rx->ctx, rx->aead, rx->key)) {
TRACE_ERROR("could not initial RX TLS cipher context", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_dec_aes_ctx_init(&rx->hp_ctx, rx->hp, rx->hp_key)) {
TRACE_ERROR("could not initial RX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
/* Enqueue this connection asap if we could derive O-RTT secrets as
* listener. Note that a listener derives only RX secrets for this
* level.
*/
if (qc_is_listener(qc) && level == ssl_encryption_early_data) {
TRACE_DEVEL("pushing connection into accept queue", QUIC_EV_CONN_RWSEC, qc);
quic_accept_push_qc(qc);
}
write:
if (!write_secret)
goto keyupdate_init;
tx = &tls_ctx->tx;
if (!quic_tls_secrets_keys_alloc(tx)) {
TRACE_ERROR("TX keys allocation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
tx->aead = tls_aead(cipher);
tx->md = tls_md(cipher);
tx->hp = tls_hp(cipher);
if (!quic_tls_derive_keys(tx->aead, tx->hp, tx->md, ver, tx->key, tx->keylen,
tx->iv, tx->ivlen, tx->hp_key, sizeof tx->hp_key,
write_secret, secret_len)) {
TRACE_ERROR("TX key derivation failed", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_tx_ctx_init(&tx->ctx, tx->aead, tx->key)) {
TRACE_ERROR("could not initial RX TLS cipher context", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
if (!quic_tls_enc_aes_ctx_init(&tx->hp_ctx, tx->hp, tx->hp_key)) {
TRACE_ERROR("could not initial TX TLS cipher context for HP", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
/* Set the transport parameters in the TLS stack. */
if (level == ssl_encryption_handshake && qc_is_listener(qc) &&
!qc_ssl_set_quic_transport_params(qc, ver, 1))
goto leave;
keyupdate_init:
/* Store the secret provided by the TLS stack, required for keyupdate. */
if (level == ssl_encryption_application) {
struct quic_tls_kp *prv_rx = &qc->ku.prv_rx;
struct quic_tls_kp *nxt_rx = &qc->ku.nxt_rx;
struct quic_tls_kp *nxt_tx = &qc->ku.nxt_tx;
if (rx) {
if (!(rx->secret = pool_alloc(pool_head_quic_tls_secret))) {
TRACE_ERROR("Could not allocate RX Application secrete keys", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
memcpy(rx->secret, read_secret, secret_len);
rx->secretlen = secret_len;
}
if (tx) {
if (!(tx->secret = pool_alloc(pool_head_quic_tls_secret))) {
TRACE_ERROR("Could not allocate TX Application secrete keys", QUIC_EV_CONN_RWSEC, qc);
goto leave;
}
memcpy(tx->secret, write_secret, secret_len);
tx->secretlen = secret_len;
}
/* Initialize all the secret keys lengths */
prv_rx->secretlen = nxt_rx->secretlen = nxt_tx->secretlen = secret_len;
}
out:
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_RWSEC, qc, &level);
return ret;
}
/* ->add_handshake_data QUIC TLS callback used by the QUIC TLS stack when it
* wants to provide the QUIC layer with CRYPTO data.
* Returns 1 if succeeded, 0 if not.
*/
static int ha_quic_add_handshake_data(SSL *ssl, enum ssl_encryption_level_t level,
const uint8_t *data, size_t len)
{
int ret = 0;
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
struct quic_enc_level **qel = ssl_to_qel_addr(qc, level);
struct quic_pktns **pktns = ssl_to_quic_pktns(qc, level);
TRACE_ENTER(QUIC_EV_CONN_ADDDATA, qc);
if (qc->flags & QUIC_FL_CONN_TO_KILL) {
TRACE_PROTO("connection to be killed", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
if (qc->flags & QUIC_FL_CONN_IMMEDIATE_CLOSE) {
TRACE_PROTO("CC required", QUIC_EV_CONN_ADDDATA, qc);
goto out;
}
if (!*qel && !qc_enc_level_alloc(qc, pktns, qel, level))
goto leave;
if (!qc_ssl_crypto_data_cpy(qc, *qel, data, len)) {
TRACE_ERROR("Could not bufferize", QUIC_EV_CONN_ADDDATA, qc);
goto leave;
}
TRACE_DEVEL("CRYPTO data buffered", QUIC_EV_CONN_ADDDATA,
qc, &level, &len);
out:
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_ADDDATA, qc);
return ret;
}
static int ha_quic_flush_flight(SSL *ssl)
{
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
TRACE_ENTER(QUIC_EV_CONN_FFLIGHT, qc);
TRACE_LEAVE(QUIC_EV_CONN_FFLIGHT, qc);
return 1;
}
static int ha_quic_send_alert(SSL *ssl, enum ssl_encryption_level_t level, uint8_t alert)
{
struct quic_conn *qc = SSL_get_ex_data(ssl, ssl_qc_app_data_index);
TRACE_ENTER(QUIC_EV_CONN_SSLALERT, qc);
TRACE_PROTO("Received TLS alert", QUIC_EV_CONN_SSLALERT, qc, &alert, &level);
quic_set_tls_alert(qc, alert);
TRACE_LEAVE(QUIC_EV_CONN_SSLALERT, qc);
return 1;
}
/* QUIC TLS methods */
static SSL_QUIC_METHOD ha_quic_method = {
.set_encryption_secrets = ha_quic_set_encryption_secrets,
.add_handshake_data = ha_quic_add_handshake_data,
.flush_flight = ha_quic_flush_flight,
.send_alert = ha_quic_send_alert,
};
/* Initialize the TLS context of a listener with <bind_conf> as configuration.
* Returns an error count.
*/
int ssl_quic_initial_ctx(struct bind_conf *bind_conf)
{
struct ssl_bind_conf __maybe_unused *ssl_conf_cur;
int cfgerr = 0;
long options =
(SSL_OP_ALL & ~SSL_OP_DONT_INSERT_EMPTY_FRAGMENTS) |
SSL_OP_SINGLE_ECDH_USE |
SSL_OP_CIPHER_SERVER_PREFERENCE;
SSL_CTX *ctx;
ctx = SSL_CTX_new(TLS_server_method());
bind_conf->initial_ctx = ctx;
SSL_CTX_set_options(ctx, options);
SSL_CTX_set_mode(ctx, SSL_MODE_RELEASE_BUFFERS);
SSL_CTX_set_min_proto_version(ctx, TLS1_3_VERSION);
SSL_CTX_set_max_proto_version(ctx, TLS1_3_VERSION);
#ifdef SSL_CTRL_SET_TLSEXT_HOSTNAME
# if defined(HAVE_SSL_CLIENT_HELLO_CB)
# if defined(SSL_OP_NO_ANTI_REPLAY)
if (bind_conf->ssl_conf.early_data) {
SSL_CTX_set_options(ctx, SSL_OP_NO_ANTI_REPLAY);
#ifndef USE_QUIC_OPENSSL_COMPAT
SSL_CTX_set_max_early_data(ctx, 0xffffffff);
#endif
}
# endif /* !SSL_OP_NO_ANTI_REPLAY */
SSL_CTX_set_client_hello_cb(ctx, ssl_sock_switchctx_cbk, NULL);
SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_err_cbk);
# else /* ! HAVE_SSL_CLIENT_HELLO_CB */
SSL_CTX_set_tlsext_servername_callback(ctx, ssl_sock_switchctx_cbk);
# endif
SSL_CTX_set_tlsext_servername_arg(ctx, bind_conf);
#endif
#ifdef USE_QUIC_OPENSSL_COMPAT
if (!quic_tls_compat_init(bind_conf, ctx))
cfgerr |= ERR_ALERT | ERR_FATAL;
#endif
return cfgerr;
}
/* This function gives the detail of the SSL error. It is used only
* if the debug mode and the verbose mode are activated. It dump all
* the SSL error until the stack was empty.
*/
static forceinline void qc_ssl_dump_errors(struct connection *conn)
{
if (unlikely(global.mode & MODE_DEBUG)) {
while (1) {
const char *func = NULL;
unsigned long ret;
ERR_peek_error_func(&func);
ret = ERR_get_error();
if (!ret)
return;
fprintf(stderr, "conn. @%p OpenSSL error[0x%lx] %s: %s\n", conn, ret,
func, ERR_reason_error_string(ret));
}
}
}
/* Provide CRYPTO data to the TLS stack found at <data> with <len> as length
* from <qel> encryption level with <ctx> as QUIC connection context.
* Remaining parameter are there for debugging purposes.
* Return 1 if succeeded, 0 if not.
*/
int qc_ssl_provide_quic_data(struct ncbuf *ncbuf,
enum ssl_encryption_level_t level,
struct ssl_sock_ctx *ctx,
const unsigned char *data, size_t len)
{
#ifdef DEBUG_STRICT
enum ncb_ret ncb_ret;
#endif
int ssl_err, state;
struct quic_conn *qc;
int ret = 0;
ssl_err = SSL_ERROR_NONE;
qc = ctx->qc;
TRACE_ENTER(QUIC_EV_CONN_SSLDATA, qc);
if (SSL_provide_quic_data(ctx->ssl, level, data, len) != 1) {
TRACE_ERROR("SSL_provide_quic_data() error",
QUIC_EV_CONN_SSLDATA, qc, NULL, NULL, ctx->ssl);
goto leave;
}
state = qc->state;
if (state < QUIC_HS_ST_COMPLETE) {
ssl_err = SSL_do_handshake(ctx->ssl);
if (qc->flags & QUIC_FL_CONN_TO_KILL) {
TRACE_DEVEL("connection to be killed", QUIC_EV_CONN_IO_CB, qc);
goto leave;
}
/* Finalize the connection as soon as possible if the peer transport parameters
* have been received. This may be useful to send packets even if this
* handshake fails.
*/
if ((qc->flags & QUIC_FL_CONN_TX_TP_RECEIVED) && !qc_conn_finalize(qc, 1)) {
TRACE_ERROR("connection finalization failed", QUIC_EV_CONN_IO_CB, qc, &state);
goto leave;
}
if (ssl_err != 1) {
ssl_err = SSL_get_error(ctx->ssl, ssl_err);
if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
TRACE_PROTO("SSL handshake in progress",
QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
goto out;
}
/* TODO: Should close the connection asap */
if (!(qc->flags & QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED)) {
qc->flags |= QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED;
HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn);
HA_ATOMIC_INC(&qc->prx_counters->hdshk_fail);
}
TRACE_ERROR("SSL handshake error", QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
qc_ssl_dump_errors(ctx->conn);
ERR_clear_error();
goto leave;
}
TRACE_PROTO("SSL handshake OK", QUIC_EV_CONN_IO_CB, qc, &state);
/* Check the alpn could be negotiated */
if (!qc->app_ops) {
TRACE_ERROR("No negotiated ALPN", QUIC_EV_CONN_IO_CB, qc, &state);
quic_set_tls_alert(qc, SSL_AD_NO_APPLICATION_PROTOCOL);
goto leave;
}
if (!(qc->flags & QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED)) {
TRACE_DEVEL("dec half open counter", QUIC_EV_CONN_IO_CB, qc, &state);
qc->flags |= QUIC_FL_CONN_HALF_OPEN_CNT_DECREMENTED;
HA_ATOMIC_DEC(&qc->prx_counters->half_open_conn);
}
/* I/O callback switch */
qc->wait_event.tasklet->process = quic_conn_app_io_cb;
if (qc_is_listener(ctx->qc)) {
qc->flags |= QUIC_FL_CONN_NEED_POST_HANDSHAKE_FRMS;
qc->state = QUIC_HS_ST_CONFIRMED;
/* The connection is ready to be accepted. */
quic_accept_push_qc(qc);
}
else {
qc->state = QUIC_HS_ST_COMPLETE;
}
/* Prepare the next key update */
if (!quic_tls_key_update(qc)) {
TRACE_ERROR("quic_tls_key_update() failed", QUIC_EV_CONN_IO_CB, qc);
goto leave;
}
} else {
ssl_err = SSL_process_quic_post_handshake(ctx->ssl);
if (ssl_err != 1) {
ssl_err = SSL_get_error(ctx->ssl, ssl_err);
if (ssl_err == SSL_ERROR_WANT_READ || ssl_err == SSL_ERROR_WANT_WRITE) {
TRACE_PROTO("SSL post handshake in progress",
QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
goto out;
}
TRACE_ERROR("SSL post handshake error",
QUIC_EV_CONN_IO_CB, qc, &state, &ssl_err);
goto leave;
}
TRACE_STATE("SSL post handshake succeeded", QUIC_EV_CONN_IO_CB, qc, &state);
}
out:
ret = 1;
leave:
/* The CRYPTO data are consumed even in case of an error to release
* the memory asap.
*/
if (!ncb_is_null(ncbuf)) {
#ifdef DEBUG_STRICT
ncb_ret = ncb_advance(ncbuf, len);
/* ncb_advance() must always succeed. This is guaranteed as
* this is only done inside a data block. If false, this will
* lead to handshake failure with quic_enc_level offset shifted
* from buffer data.
*/
BUG_ON(ncb_ret != NCB_RET_OK);
#else
ncb_advance(ncbuf, len);
#endif
}
TRACE_LEAVE(QUIC_EV_CONN_SSLDATA, qc);
return ret;
}
/* Try to allocate the <*ssl> SSL session object for <qc> QUIC connection
* with <ssl_ctx> as SSL context inherited settings. Also set the transport
* parameters of this session.
* This is the responsibility of the caller to check the validity of all the
* pointers passed as parameter to this function.
* Return 0 if succeeded, -1 if not. If failed, sets the ->err_code member of <qc->conn> to
* CO_ER_SSL_NO_MEM.
*/
static int qc_ssl_sess_init(struct quic_conn *qc, SSL_CTX *ssl_ctx, SSL **ssl)
{
int retry, ret = -1;
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
retry = 1;
retry:
*ssl = SSL_new(ssl_ctx);
if (!*ssl) {
if (!retry--)
goto leave;
pool_gc(NULL);
goto retry;
}
if (!SSL_set_ex_data(*ssl, ssl_qc_app_data_index, qc) ||
!SSL_set_quic_method(*ssl, &ha_quic_method)) {
SSL_free(*ssl);
*ssl = NULL;
if (!retry--)
goto leave;
pool_gc(NULL);
goto retry;
}
ret = 0;
leave:
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
return ret;
}
/* Allocate the ssl_sock_ctx from connection <qc>. This creates the tasklet
* used to process <qc> received packets. The allocated context is stored in
* <qc.xprt_ctx>.
*
* Returns 0 on success else non-zero.
*/
int qc_alloc_ssl_sock_ctx(struct quic_conn *qc)
{
int ret = 0;
struct bind_conf *bc = qc->li->bind_conf;
struct ssl_sock_ctx *ctx = NULL;
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
ctx = pool_alloc(pool_head_quic_ssl_sock_ctx);
if (!ctx) {
TRACE_ERROR("SSL context allocation failed", QUIC_EV_CONN_TXPKT);
goto err;
}
ctx->conn = NULL;
ctx->bio = NULL;
ctx->xprt = NULL;
ctx->xprt_ctx = NULL;
memset(&ctx->wait_event, 0, sizeof(ctx->wait_event));
ctx->subs = NULL;
ctx->xprt_st = 0;
ctx->error_code = 0;
ctx->early_buf = BUF_NULL;
ctx->sent_early_data = 0;
ctx->qc = qc;
if (qc_is_listener(qc)) {
if (qc_ssl_sess_init(qc, bc->initial_ctx, &ctx->ssl) == -1)
goto err;
#if (HA_OPENSSL_VERSION_NUMBER >= 0x10101000L)
#ifndef USE_QUIC_OPENSSL_COMPAT
/* Enabling 0-RTT */
if (bc->ssl_conf.early_data)
SSL_set_quic_early_data_enabled(ctx->ssl, 1);
#endif
#endif
SSL_set_accept_state(ctx->ssl);
}
ctx->xprt = xprt_get(XPRT_QUIC);
/* Store the allocated context in <qc>. */
qc->xprt_ctx = ctx;
ret = 1;
leave:
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
return !ret;
err:
pool_free(pool_head_quic_ssl_sock_ctx, ctx);
goto leave;
}

View File

@ -973,3 +973,51 @@ void quic_tls_rotate_keys(struct quic_conn *qc)
TRACE_LEAVE(QUIC_EV_CONN_RXPKT, qc);
}
/* Release the memory allocated for the QUIC TLS context with <ctx> as address. */
void quic_tls_ctx_free(struct quic_tls_ctx **ctx)
{
if (!*ctx)
return;
quic_tls_ctx_secs_free(*ctx);
pool_free(pool_head_quic_tls_ctx, *ctx);
*ctx = NULL;
}
/* Finalize <qc> QUIC connection:
* - allocated and initialize the Initial QUIC TLS context for negotiated
* version if needed,
* - derive the secrets for this context,
* - set them into the TLS stack,
*
* Return 1 if succeeded, 0 if not.
*/
int quic_tls_finalize(struct quic_conn *qc, int server)
{
int ret = 0;
TRACE_ENTER(QUIC_EV_CONN_NEW, qc);
if (!qc->negotiated_version)
goto done;
qc->nictx = pool_alloc(pool_head_quic_tls_ctx);
if (!qc->nictx)
goto err;
quic_tls_ctx_reset(qc->nictx);
if (!qc_new_isecs(qc, qc->nictx, qc->negotiated_version,
qc->odcid.data, qc->odcid.len, server))
goto err;
done:
ret = 1;
out:
TRACE_LEAVE(QUIC_EV_CONN_NEW, qc);
return ret;
err:
quic_tls_ctx_free(&qc->nictx);
goto out;
}