aboutsummaryrefslogtreecommitdiff
path: root/crypto/openssl/ssl/quic
diff options
context:
space:
mode:
Diffstat (limited to 'crypto/openssl/ssl/quic')
-rw-r--r--crypto/openssl/ssl/quic/build.info29
-rw-r--r--crypto/openssl/ssl/quic/cc_newreno.c485
-rw-r--r--crypto/openssl/ssl/quic/json_enc.c738
-rw-r--r--crypto/openssl/ssl/quic/qlog.c728
-rw-r--r--crypto/openssl/ssl/quic/qlog_event_helpers.c636
-rw-r--r--crypto/openssl/ssl/quic/quic_ackm.c1744
-rw-r--r--crypto/openssl/ssl/quic/quic_cfq.c363
-rw-r--r--crypto/openssl/ssl/quic/quic_channel.c4128
-rw-r--r--crypto/openssl/ssl/quic/quic_channel_local.h474
-rw-r--r--crypto/openssl/ssl/quic/quic_demux.c474
-rw-r--r--crypto/openssl/ssl/quic/quic_engine.c194
-rw-r--r--crypto/openssl/ssl/quic/quic_engine_local.h59
-rw-r--r--crypto/openssl/ssl/quic/quic_fc.c411
-rw-r--r--crypto/openssl/ssl/quic/quic_fifd.c312
-rw-r--r--crypto/openssl/ssl/quic/quic_impl.c5386
-rw-r--r--crypto/openssl/ssl/quic/quic_lcidm.c617
-rw-r--r--crypto/openssl/ssl/quic/quic_local.h342
-rw-r--r--crypto/openssl/ssl/quic/quic_method.c27
-rw-r--r--crypto/openssl/ssl/quic/quic_obj.c137
-rw-r--r--crypto/openssl/ssl/quic/quic_obj_local.h341
-rw-r--r--crypto/openssl/ssl/quic/quic_port.c1747
-rw-r--r--crypto/openssl/ssl/quic/quic_port_local.h123
-rw-r--r--crypto/openssl/ssl/quic/quic_rcidm.c688
-rw-r--r--crypto/openssl/ssl/quic/quic_reactor.c590
-rw-r--r--crypto/openssl/ssl/quic/quic_reactor_wait_ctx.c85
-rw-r--r--crypto/openssl/ssl/quic/quic_record_rx.c1603
-rw-r--r--crypto/openssl/ssl/quic/quic_record_shared.c489
-rw-r--r--crypto/openssl/ssl/quic/quic_record_shared.h150
-rw-r--r--crypto/openssl/ssl/quic/quic_record_tx.c1105
-rw-r--r--crypto/openssl/ssl/quic/quic_record_util.c287
-rw-r--r--crypto/openssl/ssl/quic/quic_rstream.c295
-rw-r--r--crypto/openssl/ssl/quic/quic_rx_depack.c1479
-rw-r--r--crypto/openssl/ssl/quic/quic_sf_list.c334
-rw-r--r--crypto/openssl/ssl/quic/quic_srt_gen.c84
-rw-r--r--crypto/openssl/ssl/quic/quic_srtm.c565
-rw-r--r--crypto/openssl/ssl/quic/quic_sstream.c424
-rw-r--r--crypto/openssl/ssl/quic/quic_statm.c76
-rw-r--r--crypto/openssl/ssl/quic/quic_stream_map.c861
-rw-r--r--crypto/openssl/ssl/quic/quic_thread_assist.c148
-rw-r--r--crypto/openssl/ssl/quic/quic_tls.c949
-rw-r--r--crypto/openssl/ssl/quic/quic_tls_api.c208
-rw-r--r--crypto/openssl/ssl/quic/quic_trace.c650
-rw-r--r--crypto/openssl/ssl/quic/quic_tserver.c584
-rw-r--r--crypto/openssl/ssl/quic/quic_txp.c3256
-rw-r--r--crypto/openssl/ssl/quic/quic_txpim.c229
-rw-r--r--crypto/openssl/ssl/quic/quic_types.c29
-rw-r--r--crypto/openssl/ssl/quic/quic_wire.c1078
-rw-r--r--crypto/openssl/ssl/quic/quic_wire_pkt.c962
-rw-r--r--crypto/openssl/ssl/quic/uint_set.c332
49 files changed, 37035 insertions, 0 deletions
diff --git a/crypto/openssl/ssl/quic/build.info b/crypto/openssl/ssl/quic/build.info
new file mode 100644
index 000000000000..230341db7625
--- /dev/null
+++ b/crypto/openssl/ssl/quic/build.info
@@ -0,0 +1,29 @@
+$LIBSSL=../../libssl
+
+#QUIC TLS API is available even in the event of no-quic
+SOURCE[$LIBSSL]=quic_tls.c quic_tls_api.c
+IF[{- !$disabled{quic} -}]
+ SOURCE[$LIBSSL]=quic_method.c quic_impl.c quic_wire.c quic_ackm.c quic_statm.c
+ SOURCE[$LIBSSL]=cc_newreno.c quic_demux.c quic_record_rx.c
+ SOURCE[$LIBSSL]=quic_record_tx.c quic_record_util.c quic_record_shared.c quic_wire_pkt.c
+ SOURCE[$LIBSSL]=quic_rx_depack.c
+ SOURCE[$LIBSSL]=quic_fc.c uint_set.c
+ SOURCE[$LIBSSL]=quic_cfq.c quic_txpim.c quic_fifd.c quic_txp.c
+ SOURCE[$LIBSSL]=quic_stream_map.c
+ SOURCE[$LIBSSL]=quic_sf_list.c quic_rstream.c quic_sstream.c
+ SOURCE[$LIBSSL]=quic_reactor.c
+ SOURCE[$LIBSSL]=quic_reactor_wait_ctx.c
+ SOURCE[$LIBSSL]=quic_channel.c quic_port.c quic_engine.c
+ SOURCE[$LIBSSL]=quic_tserver.c
+ SOURCE[$LIBSSL]=quic_thread_assist.c
+ SOURCE[$LIBSSL]=quic_trace.c
+ SOURCE[$LIBSSL]=quic_srtm.c quic_srt_gen.c
+ SOURCE[$LIBSSL]=quic_lcidm.c quic_rcidm.c
+ SOURCE[$LIBSSL]=quic_types.c
+ SOURCE[$LIBSSL]=qlog_event_helpers.c
+ IF[{- !$disabled{qlog} -}]
+ SOURCE[$LIBSSL]=json_enc.c qlog.c
+ SHARED_SOURCE[$LIBSSL]=../../crypto/getenv.c ../../crypto/ctype.c
+ ENDIF
+ SOURCE[$LIBSSL]=quic_obj.c
+ENDIF
diff --git a/crypto/openssl/ssl/quic/cc_newreno.c b/crypto/openssl/ssl/quic/cc_newreno.c
new file mode 100644
index 000000000000..1fe37c276e58
--- /dev/null
+++ b/crypto/openssl/ssl/quic/cc_newreno.c
@@ -0,0 +1,485 @@
+#include "internal/quic_cc.h"
+#include "internal/quic_types.h"
+#include "internal/safe_math.h"
+
+OSSL_SAFE_MATH_UNSIGNED(u64, uint64_t)
+
+typedef struct ossl_cc_newreno_st {
+ /* Dependencies. */
+ OSSL_TIME (*now_cb)(void *arg);
+ void *now_cb_arg;
+
+ /* 'Constants' (which we allow to be configurable). */
+ uint64_t k_init_wnd, k_min_wnd;
+ uint32_t k_loss_reduction_factor_num, k_loss_reduction_factor_den;
+ uint32_t persistent_cong_thresh;
+
+ /* State. */
+ size_t max_dgram_size;
+ uint64_t bytes_in_flight, cong_wnd, slow_start_thresh, bytes_acked;
+ OSSL_TIME cong_recovery_start_time;
+
+ /* Unflushed state during multiple on-loss calls. */
+ int processing_loss; /* 1 if not flushed */
+ OSSL_TIME tx_time_of_last_loss;
+
+ /* Diagnostic state. */
+ int in_congestion_recovery;
+
+ /* Diagnostic output locations. */
+ size_t *p_diag_max_dgram_payload_len;
+ uint64_t *p_diag_cur_cwnd_size;
+ uint64_t *p_diag_min_cwnd_size;
+ uint64_t *p_diag_cur_bytes_in_flight;
+ uint32_t *p_diag_cur_state;
+} OSSL_CC_NEWRENO;
+
+#define MIN_MAX_INIT_WND_SIZE 14720 /* RFC 9002 s. 7.2 */
+
+/* TODO(QUIC FUTURE): Pacing support. */
+
+static void newreno_set_max_dgram_size(OSSL_CC_NEWRENO *nr,
+ size_t max_dgram_size);
+static void newreno_update_diag(OSSL_CC_NEWRENO *nr);
+
+static void newreno_reset(OSSL_CC_DATA *cc);
+
+static OSSL_CC_DATA *newreno_new(OSSL_TIME (*now_cb)(void *arg),
+ void *now_cb_arg)
+{
+ OSSL_CC_NEWRENO *nr;
+
+ if ((nr = OPENSSL_zalloc(sizeof(*nr))) == NULL)
+ return NULL;
+
+ nr->now_cb = now_cb;
+ nr->now_cb_arg = now_cb_arg;
+
+ newreno_set_max_dgram_size(nr, QUIC_MIN_INITIAL_DGRAM_LEN);
+ newreno_reset((OSSL_CC_DATA *)nr);
+
+ return (OSSL_CC_DATA *)nr;
+}
+
+static void newreno_free(OSSL_CC_DATA *cc)
+{
+ OPENSSL_free(cc);
+}
+
+static void newreno_set_max_dgram_size(OSSL_CC_NEWRENO *nr,
+ size_t max_dgram_size)
+{
+ size_t max_init_wnd;
+ int is_reduced = (max_dgram_size < nr->max_dgram_size);
+
+ nr->max_dgram_size = max_dgram_size;
+
+ max_init_wnd = 2 * max_dgram_size;
+ if (max_init_wnd < MIN_MAX_INIT_WND_SIZE)
+ max_init_wnd = MIN_MAX_INIT_WND_SIZE;
+
+ nr->k_init_wnd = 10 * max_dgram_size;
+ if (nr->k_init_wnd > max_init_wnd)
+ nr->k_init_wnd = max_init_wnd;
+
+ nr->k_min_wnd = 2 * max_dgram_size;
+
+ if (is_reduced)
+ nr->cong_wnd = nr->k_init_wnd;
+
+ newreno_update_diag(nr);
+}
+
+static void newreno_reset(OSSL_CC_DATA *cc)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ nr->k_loss_reduction_factor_num = 1;
+ nr->k_loss_reduction_factor_den = 2;
+ nr->persistent_cong_thresh = 3;
+
+ nr->cong_wnd = nr->k_init_wnd;
+ nr->bytes_in_flight = 0;
+ nr->bytes_acked = 0;
+ nr->slow_start_thresh = UINT64_MAX;
+ nr->cong_recovery_start_time = ossl_time_zero();
+
+ nr->processing_loss = 0;
+ nr->tx_time_of_last_loss = ossl_time_zero();
+ nr->in_congestion_recovery = 0;
+}
+
+static int newreno_set_input_params(OSSL_CC_DATA *cc, const OSSL_PARAM *params)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+ const OSSL_PARAM *p;
+ size_t value;
+
+ p = OSSL_PARAM_locate_const(params, OSSL_CC_OPTION_MAX_DGRAM_PAYLOAD_LEN);
+ if (p != NULL) {
+ if (!OSSL_PARAM_get_size_t(p, &value))
+ return 0;
+ if (value < QUIC_MIN_INITIAL_DGRAM_LEN)
+ return 0;
+
+ newreno_set_max_dgram_size(nr, value);
+ }
+
+ return 1;
+}
+
+static int bind_diag(OSSL_PARAM *params, const char *param_name, size_t len,
+ void **pp)
+{
+ const OSSL_PARAM *p = OSSL_PARAM_locate_const(params, param_name);
+
+ *pp = NULL;
+
+ if (p == NULL)
+ return 1;
+
+ if (p->data_type != OSSL_PARAM_UNSIGNED_INTEGER
+ || p->data_size != len)
+ return 0;
+
+ *pp = p->data;
+ return 1;
+}
+
+static int newreno_bind_diagnostic(OSSL_CC_DATA *cc, OSSL_PARAM *params)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+ size_t *new_p_max_dgram_payload_len;
+ uint64_t *new_p_cur_cwnd_size;
+ uint64_t *new_p_min_cwnd_size;
+ uint64_t *new_p_cur_bytes_in_flight;
+ uint32_t *new_p_cur_state;
+
+ if (!bind_diag(params, OSSL_CC_OPTION_MAX_DGRAM_PAYLOAD_LEN,
+ sizeof(size_t), (void **)&new_p_max_dgram_payload_len)
+ || !bind_diag(params, OSSL_CC_OPTION_CUR_CWND_SIZE,
+ sizeof(uint64_t), (void **)&new_p_cur_cwnd_size)
+ || !bind_diag(params, OSSL_CC_OPTION_MIN_CWND_SIZE,
+ sizeof(uint64_t), (void **)&new_p_min_cwnd_size)
+ || !bind_diag(params, OSSL_CC_OPTION_CUR_BYTES_IN_FLIGHT,
+ sizeof(uint64_t), (void **)&new_p_cur_bytes_in_flight)
+ || !bind_diag(params, OSSL_CC_OPTION_CUR_STATE,
+ sizeof(uint32_t), (void **)&new_p_cur_state))
+ return 0;
+
+ if (new_p_max_dgram_payload_len != NULL)
+ nr->p_diag_max_dgram_payload_len = new_p_max_dgram_payload_len;
+
+ if (new_p_cur_cwnd_size != NULL)
+ nr->p_diag_cur_cwnd_size = new_p_cur_cwnd_size;
+
+ if (new_p_min_cwnd_size != NULL)
+ nr->p_diag_min_cwnd_size = new_p_min_cwnd_size;
+
+ if (new_p_cur_bytes_in_flight != NULL)
+ nr->p_diag_cur_bytes_in_flight = new_p_cur_bytes_in_flight;
+
+ if (new_p_cur_state != NULL)
+ nr->p_diag_cur_state = new_p_cur_state;
+
+ newreno_update_diag(nr);
+ return 1;
+}
+
+static void unbind_diag(OSSL_PARAM *params, const char *param_name,
+ void **pp)
+{
+ const OSSL_PARAM *p = OSSL_PARAM_locate_const(params, param_name);
+
+ if (p != NULL)
+ *pp = NULL;
+}
+
+static int newreno_unbind_diagnostic(OSSL_CC_DATA *cc, OSSL_PARAM *params)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ unbind_diag(params, OSSL_CC_OPTION_MAX_DGRAM_PAYLOAD_LEN,
+ (void **)&nr->p_diag_max_dgram_payload_len);
+ unbind_diag(params, OSSL_CC_OPTION_CUR_CWND_SIZE,
+ (void **)&nr->p_diag_cur_cwnd_size);
+ unbind_diag(params, OSSL_CC_OPTION_MIN_CWND_SIZE,
+ (void **)&nr->p_diag_min_cwnd_size);
+ unbind_diag(params, OSSL_CC_OPTION_CUR_BYTES_IN_FLIGHT,
+ (void **)&nr->p_diag_cur_bytes_in_flight);
+ unbind_diag(params, OSSL_CC_OPTION_CUR_STATE,
+ (void **)&nr->p_diag_cur_state);
+ return 1;
+}
+
+static void newreno_update_diag(OSSL_CC_NEWRENO *nr)
+{
+ if (nr->p_diag_max_dgram_payload_len != NULL)
+ *nr->p_diag_max_dgram_payload_len = nr->max_dgram_size;
+
+ if (nr->p_diag_cur_cwnd_size != NULL)
+ *nr->p_diag_cur_cwnd_size = nr->cong_wnd;
+
+ if (nr->p_diag_min_cwnd_size != NULL)
+ *nr->p_diag_min_cwnd_size = nr->k_min_wnd;
+
+ if (nr->p_diag_cur_bytes_in_flight != NULL)
+ *nr->p_diag_cur_bytes_in_flight = nr->bytes_in_flight;
+
+ if (nr->p_diag_cur_state != NULL) {
+ if (nr->in_congestion_recovery)
+ *nr->p_diag_cur_state = 'R';
+ else if (nr->cong_wnd < nr->slow_start_thresh)
+ *nr->p_diag_cur_state = 'S';
+ else
+ *nr->p_diag_cur_state = 'A';
+ }
+}
+
+static int newreno_in_cong_recovery(OSSL_CC_NEWRENO *nr, OSSL_TIME tx_time)
+{
+ return ossl_time_compare(tx_time, nr->cong_recovery_start_time) <= 0;
+}
+
+static void newreno_cong(OSSL_CC_NEWRENO *nr, OSSL_TIME tx_time)
+{
+ int err = 0;
+
+ /* No reaction if already in a recovery period. */
+ if (newreno_in_cong_recovery(nr, tx_time))
+ return;
+
+ /* Start a new recovery period. */
+ nr->in_congestion_recovery = 1;
+ nr->cong_recovery_start_time = nr->now_cb(nr->now_cb_arg);
+
+ /* slow_start_thresh = cong_wnd * loss_reduction_factor */
+ nr->slow_start_thresh
+ = safe_muldiv_u64(nr->cong_wnd,
+ nr->k_loss_reduction_factor_num,
+ nr->k_loss_reduction_factor_den,
+ &err);
+
+ if (err)
+ nr->slow_start_thresh = UINT64_MAX;
+
+ nr->cong_wnd = nr->slow_start_thresh;
+ if (nr->cong_wnd < nr->k_min_wnd)
+ nr->cong_wnd = nr->k_min_wnd;
+}
+
+static void newreno_flush(OSSL_CC_NEWRENO *nr, uint32_t flags)
+{
+ if (!nr->processing_loss)
+ return;
+
+ newreno_cong(nr, nr->tx_time_of_last_loss);
+
+ if ((flags & OSSL_CC_LOST_FLAG_PERSISTENT_CONGESTION) != 0) {
+ nr->cong_wnd = nr->k_min_wnd;
+ nr->cong_recovery_start_time = ossl_time_zero();
+ }
+
+ nr->processing_loss = 0;
+ newreno_update_diag(nr);
+}
+
+static uint64_t newreno_get_tx_allowance(OSSL_CC_DATA *cc)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ if (nr->bytes_in_flight >= nr->cong_wnd)
+ return 0;
+
+ return nr->cong_wnd - nr->bytes_in_flight;
+}
+
+static OSSL_TIME newreno_get_wakeup_deadline(OSSL_CC_DATA *cc)
+{
+ if (newreno_get_tx_allowance(cc) > 0) {
+ /* We have TX allowance now so wakeup immediately */
+ return ossl_time_zero();
+ } else {
+ /*
+ * The NewReno congestion controller does not vary its state in time,
+ * only in response to stimulus.
+ */
+ return ossl_time_infinite();
+ }
+}
+
+static int newreno_on_data_sent(OSSL_CC_DATA *cc, uint64_t num_bytes)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ nr->bytes_in_flight += num_bytes;
+ newreno_update_diag(nr);
+ return 1;
+}
+
+static int newreno_is_cong_limited(OSSL_CC_NEWRENO *nr)
+{
+ uint64_t wnd_rem;
+
+ /* We are congestion-limited if we are already at the congestion window. */
+ if (nr->bytes_in_flight >= nr->cong_wnd)
+ return 1;
+
+ wnd_rem = nr->cong_wnd - nr->bytes_in_flight;
+
+ /*
+ * Consider ourselves congestion-limited if less than three datagrams' worth
+ * of congestion window remains to be spent, or if we are in slow start and
+ * have consumed half of our window.
+ */
+ return (nr->cong_wnd < nr->slow_start_thresh && wnd_rem <= nr->cong_wnd / 2)
+ || wnd_rem <= 3 * nr->max_dgram_size;
+}
+
+static int newreno_on_data_acked(OSSL_CC_DATA *cc,
+ const OSSL_CC_ACK_INFO *info)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ /*
+ * Packet has been acked. Firstly, remove it from the aggregate count of
+ * bytes in flight.
+ */
+ nr->bytes_in_flight -= info->tx_size;
+
+ /*
+ * We use acknowledgement of data as a signal that we are not at channel
+ * capacity and that it may be reasonable to increase the congestion window.
+ * However, acknowledgement is not a useful signal that there is further
+ * capacity if we are not actually saturating the congestion window that we
+ * already have (for example, if the application is not generating much data
+ * or we are limited by flow control). Therefore, we only expand the
+ * congestion window if we are consuming a significant fraction of the
+ * congestion window.
+ */
+ if (!newreno_is_cong_limited(nr))
+ goto out;
+
+ /*
+ * We can handle acknowledgement of a packet in one of three ways
+ * depending on our current state:
+ *
+ * - Congestion Recovery: Do nothing. We don't start increasing
+ * the congestion window in response to acknowledgements until
+ * we are no longer in the Congestion Recovery state.
+ *
+ * - Slow Start: Increase the congestion window using the slow
+ * start scale.
+ *
+ * - Congestion Avoidance: Increase the congestion window using
+ * the congestion avoidance scale.
+ */
+ if (newreno_in_cong_recovery(nr, info->tx_time)) {
+ /* Congestion recovery, do nothing. */
+ } else if (nr->cong_wnd < nr->slow_start_thresh) {
+ /* When this condition is true we are in the Slow Start state. */
+ nr->cong_wnd += info->tx_size;
+ nr->in_congestion_recovery = 0;
+ } else {
+ /* Otherwise, we are in the Congestion Avoidance state. */
+ nr->bytes_acked += info->tx_size;
+
+ /*
+ * Avoid integer division as per RFC 9002 s. B.5. / RFC3465 s. 2.1.
+ */
+ if (nr->bytes_acked >= nr->cong_wnd) {
+ nr->bytes_acked -= nr->cong_wnd;
+ nr->cong_wnd += nr->max_dgram_size;
+ }
+
+ nr->in_congestion_recovery = 0;
+ }
+
+out:
+ newreno_update_diag(nr);
+ return 1;
+}
+
+static int newreno_on_data_lost(OSSL_CC_DATA *cc,
+ const OSSL_CC_LOSS_INFO *info)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ if (info->tx_size > nr->bytes_in_flight)
+ return 0;
+
+ nr->bytes_in_flight -= info->tx_size;
+
+ if (!nr->processing_loss) {
+
+ if (ossl_time_compare(info->tx_time, nr->tx_time_of_last_loss) <= 0)
+ /*
+ * After triggering congestion due to a lost packet at time t, don't
+ * trigger congestion again due to any subsequently detected lost
+ * packet at a time s < t, as we've effectively already signalled
+ * congestion on loss of that and subsequent packets.
+ */
+ goto out;
+
+ nr->processing_loss = 1;
+
+ /*
+ * Cancel any pending window increase in the Congestion Avoidance state.
+ */
+ nr->bytes_acked = 0;
+ }
+
+ nr->tx_time_of_last_loss
+ = ossl_time_max(nr->tx_time_of_last_loss, info->tx_time);
+
+out:
+ newreno_update_diag(nr);
+ return 1;
+}
+
+static int newreno_on_data_lost_finished(OSSL_CC_DATA *cc, uint32_t flags)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ newreno_flush(nr, flags);
+ return 1;
+}
+
+static int newreno_on_data_invalidated(OSSL_CC_DATA *cc,
+ uint64_t num_bytes)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ nr->bytes_in_flight -= num_bytes;
+ newreno_update_diag(nr);
+ return 1;
+}
+
+static int newreno_on_ecn(OSSL_CC_DATA *cc,
+ const OSSL_CC_ECN_INFO *info)
+{
+ OSSL_CC_NEWRENO *nr = (OSSL_CC_NEWRENO *)cc;
+
+ nr->processing_loss = 1;
+ nr->bytes_acked = 0;
+ nr->tx_time_of_last_loss = info->largest_acked_time;
+ newreno_flush(nr, 0);
+ return 1;
+}
+
+const OSSL_CC_METHOD ossl_cc_newreno_method = {
+ newreno_new,
+ newreno_free,
+ newreno_reset,
+ newreno_set_input_params,
+ newreno_bind_diagnostic,
+ newreno_unbind_diagnostic,
+ newreno_get_tx_allowance,
+ newreno_get_wakeup_deadline,
+ newreno_on_data_sent,
+ newreno_on_data_acked,
+ newreno_on_data_lost,
+ newreno_on_data_lost_finished,
+ newreno_on_data_invalidated,
+ newreno_on_ecn,
+};
diff --git a/crypto/openssl/ssl/quic/json_enc.c b/crypto/openssl/ssl/quic/json_enc.c
new file mode 100644
index 000000000000..527230137ec2
--- /dev/null
+++ b/crypto/openssl/ssl/quic/json_enc.c
@@ -0,0 +1,738 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/json_enc.h"
+#include "internal/nelem.h"
+#include "internal/numbers.h"
+#include <string.h>
+
+/*
+ * wbuf
+ * ====
+ */
+static int wbuf_flush(struct json_write_buf *wbuf, int full);
+
+static int wbuf_init(struct json_write_buf *wbuf, BIO *bio, size_t alloc)
+{
+ wbuf->buf = OPENSSL_malloc(alloc);
+ if (wbuf->buf == NULL)
+ return 0;
+
+ wbuf->cur = 0;
+ wbuf->alloc = alloc;
+ wbuf->bio = bio;
+ return 1;
+}
+
+static void wbuf_cleanup(struct json_write_buf *wbuf)
+{
+ OPENSSL_free(wbuf->buf);
+ wbuf->buf = NULL;
+ wbuf->alloc = 0;
+}
+
+static void wbuf_set0_bio(struct json_write_buf *wbuf, BIO *bio)
+{
+ wbuf->bio = bio;
+}
+
+/* Empty write buffer. */
+static ossl_inline void wbuf_clean(struct json_write_buf *wbuf)
+{
+ wbuf->cur = 0;
+}
+
+/* Available data remaining in buffer. */
+static ossl_inline size_t wbuf_avail(struct json_write_buf *wbuf)
+{
+ return wbuf->alloc - wbuf->cur;
+}
+
+/* Add character to write buffer, returning 0 on flush failure. */
+static ossl_inline int wbuf_write_char(struct json_write_buf *wbuf, char c)
+{
+ if (wbuf_avail(wbuf) == 0) {
+ if (!wbuf_flush(wbuf, /*full=*/0))
+ return 0;
+ }
+
+ wbuf->buf[wbuf->cur++] = c;
+ return 1;
+}
+
+/*
+ * Write zero-terminated string to write buffer, returning 0 on flush failure.
+ */
+static int wbuf_write_str(struct json_write_buf *wbuf, const char *s)
+{
+ char c;
+
+ while ((c = *s++) != 0)
+ if (!wbuf_write_char(wbuf, c))
+ return 0;
+
+ return 1;
+}
+
+/* Flush write buffer, returning 0 on I/O failure. */
+static int wbuf_flush(struct json_write_buf *wbuf, int full)
+{
+ size_t written = 0, total_written = 0;
+
+ while (total_written < wbuf->cur) {
+ if (!BIO_write_ex(wbuf->bio,
+ wbuf->buf + total_written,
+ wbuf->cur - total_written,
+ &written)) {
+ memmove(wbuf->buf,
+ wbuf->buf + total_written,
+ wbuf->cur - total_written);
+ wbuf->cur = 0;
+ return 0;
+ }
+
+ total_written += written;
+ }
+
+ wbuf->cur = 0;
+
+ if (full)
+ (void)BIO_flush(wbuf->bio); /* best effort */
+
+ return 1;
+}
+
+/*
+ * OSSL_JSON_ENC: Stack Management
+ * ===============================
+ */
+
+static int json_ensure_stack_size(OSSL_JSON_ENC *json, size_t num_bytes)
+{
+ unsigned char *stack;
+
+ if (json->stack_bytes >= num_bytes)
+ return 1;
+
+ if (num_bytes <= OSSL_NELEM(json->stack_small)) {
+ stack = json->stack_small;
+ } else {
+ if (json->stack == json->stack_small)
+ json->stack = NULL;
+
+ stack = OPENSSL_realloc(json->stack, num_bytes);
+ if (stack == NULL)
+ return 0;
+ }
+
+ json->stack = stack;
+ json->stack_bytes = num_bytes;
+ return 1;
+}
+
+/* Push one bit onto the stack. Returns 0 on allocation failure. */
+static int json_push(OSSL_JSON_ENC *json, unsigned int v)
+{
+ if (v > 1)
+ return 0;
+
+ if (json->stack_end_byte >= json->stack_bytes) {
+ size_t new_size
+ = (json->stack_bytes == 0)
+ ? OSSL_NELEM(json->stack_small)
+ : (json->stack_bytes * 2);
+
+ if (!json_ensure_stack_size(json, new_size))
+ return 0;
+
+ json->stack_bytes = new_size;
+ }
+
+ if (v > 0)
+ json->stack[json->stack_end_byte] |= (v << json->stack_end_bit);
+ else
+ json->stack[json->stack_end_byte] &= ~(1U << json->stack_end_bit);
+
+ json->stack_end_bit = (json->stack_end_bit + 1) % CHAR_BIT;
+ if (json->stack_end_bit == 0)
+ ++json->stack_end_byte;
+
+ return 1;
+}
+
+/*
+ * Pop a bit from the stack. Returns 0 if stack is empty. Use json_peek() to get
+ * the value before calling this.
+ */
+static int json_pop(OSSL_JSON_ENC *json)
+{
+ if (json->stack_end_byte == 0 && json->stack_end_bit == 0)
+ return 0;
+
+ if (json->stack_end_bit == 0) {
+ --json->stack_end_byte;
+ json->stack_end_bit = CHAR_BIT - 1;
+ } else {
+ --json->stack_end_bit;
+ }
+
+ return 1;
+}
+
+/*
+ * Returns the bit on the top of the stack, or -1 if the stack is empty.
+ */
+static int json_peek(OSSL_JSON_ENC *json)
+{
+ size_t obyte, obit;
+
+ obyte = json->stack_end_byte;
+ obit = json->stack_end_bit;
+ if (obit == 0) {
+ if (obyte == 0)
+ return -1;
+
+ --obyte;
+ obit = CHAR_BIT - 1;
+ } else {
+ --obit;
+ }
+
+ return (json->stack[obyte] & (1U << obit)) != 0;
+}
+
+/*
+ * OSSL_JSON_ENC: Initialisation
+ * =============================
+ */
+
+enum {
+ STATE_PRE_KEY,
+ STATE_PRE_ITEM,
+ STATE_PRE_COMMA
+};
+
+static ossl_inline int in_ijson(const OSSL_JSON_ENC *json)
+{
+ return (json->flags & OSSL_JSON_FLAG_IJSON) != 0;
+}
+
+static ossl_inline int in_seq(const OSSL_JSON_ENC *json)
+{
+ return (json->flags & OSSL_JSON_FLAG_SEQ) != 0;
+}
+
+static ossl_inline int in_pretty(const OSSL_JSON_ENC *json)
+{
+ return (json->flags & OSSL_JSON_FLAG_PRETTY) != 0;
+}
+
+int ossl_json_init(OSSL_JSON_ENC *json, BIO *bio, uint32_t flags)
+{
+ memset(json, 0, sizeof(*json));
+ json->flags = flags;
+ json->error = 0;
+ if (!wbuf_init(&json->wbuf, bio, 4096))
+ return 0;
+
+ json->state = STATE_PRE_COMMA;
+ return 1;
+}
+
+void ossl_json_cleanup(OSSL_JSON_ENC *json)
+{
+ wbuf_cleanup(&json->wbuf);
+
+ if (json->stack != json->stack_small)
+ OPENSSL_free(json->stack);
+
+ json->stack = NULL;
+}
+
+int ossl_json_flush_cleanup(OSSL_JSON_ENC *json)
+{
+ int ok = ossl_json_flush(json);
+
+ ossl_json_cleanup(json);
+ return ok;
+}
+
+int ossl_json_reset(OSSL_JSON_ENC *json)
+{
+ wbuf_clean(&json->wbuf);
+ json->stack_end_byte = 0;
+ json->stack_end_bit = 0;
+ json->error = 0;
+ return 1;
+}
+
+int ossl_json_flush(OSSL_JSON_ENC *json)
+{
+ return wbuf_flush(&json->wbuf, /*full=*/1);
+}
+
+int ossl_json_set0_sink(OSSL_JSON_ENC *json, BIO *bio)
+{
+ wbuf_set0_bio(&json->wbuf, bio);
+ return 1;
+}
+
+int ossl_json_in_error(OSSL_JSON_ENC *json)
+{
+ return json->error;
+}
+
+/*
+ * JSON Builder Calls
+ * ==================
+ */
+
+static void json_write_qstring(OSSL_JSON_ENC *json, const char *str);
+static void json_indent(OSSL_JSON_ENC *json);
+
+static void json_raise_error(OSSL_JSON_ENC *json)
+{
+ json->error = 1;
+}
+
+static void json_undefer(OSSL_JSON_ENC *json)
+{
+ if (!json->defer_indent)
+ return;
+
+ json_indent(json);
+}
+
+static void json_write_char(OSSL_JSON_ENC *json, char ch)
+{
+ if (ossl_json_in_error(json))
+ return;
+
+ json_undefer(json);
+ if (!wbuf_write_char(&json->wbuf, ch))
+ json_raise_error(json);
+}
+
+static void json_write_str(OSSL_JSON_ENC *json, const char *s)
+{
+ if (ossl_json_in_error(json))
+ return;
+
+ json_undefer(json);
+ if (!wbuf_write_str(&json->wbuf, s))
+ json_raise_error(json);
+}
+
+static void json_indent(OSSL_JSON_ENC *json)
+{
+ size_t i, depth;
+
+ json->defer_indent = 0;
+
+ if (!in_pretty(json))
+ return;
+
+ json_write_char(json, '\n');
+
+ depth = json->stack_end_byte * 8 + json->stack_end_bit;
+ for (i = 0; i < depth * 4; ++i)
+ json_write_str(json, " ");
+}
+
+static int json_pre_item(OSSL_JSON_ENC *json)
+{
+ int s;
+
+ if (ossl_json_in_error(json))
+ return 0;
+
+ switch (json->state) {
+ case STATE_PRE_COMMA:
+ s = json_peek(json);
+
+ if (s == 0) {
+ json_raise_error(json);
+ return 0;
+ }
+
+ if (s == 1) {
+ json_write_char(json, ',');
+ if (ossl_json_in_error(json))
+ return 0;
+
+ json_indent(json);
+ }
+
+ if (s < 0 && in_seq(json))
+ json_write_char(json, '\x1E');
+
+ json->state = STATE_PRE_ITEM;
+ break;
+
+ case STATE_PRE_ITEM:
+ break;
+
+ case STATE_PRE_KEY:
+ default:
+ json_raise_error(json);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void json_post_item(OSSL_JSON_ENC *json)
+{
+ int s = json_peek(json);
+
+ json->state = STATE_PRE_COMMA;
+
+ if (s < 0 && in_seq(json))
+ json_write_char(json, '\n');
+}
+
+/*
+ * Begin a composite structure (object or array).
+ *
+ * type: 0=object, 1=array.
+ */
+static void composite_begin(OSSL_JSON_ENC *json, int type, char ch)
+{
+ if (!json_pre_item(json)
+ || !json_push(json, type))
+ json_raise_error(json);
+
+ json_write_char(json, ch);
+ json->defer_indent = 1;
+}
+
+/*
+ * End a composite structure (object or array).
+ *
+ * type: 0=object, 1=array. Errors on mismatch.
+ */
+static void composite_end(OSSL_JSON_ENC *json, int type, char ch)
+{
+ int was_defer = json->defer_indent;
+
+ if (ossl_json_in_error(json))
+ return;
+
+ json->defer_indent = 0;
+
+ if (json_peek(json) != type) {
+ json_raise_error(json);
+ return;
+ }
+
+ if (type == 0 && json->state == STATE_PRE_ITEM) {
+ json_raise_error(json);
+ return;
+ }
+
+ if (!json_pop(json)) {
+ json_raise_error(json);
+ return;
+ }
+
+ if (!was_defer)
+ json_indent(json);
+
+ json_write_char(json, ch);
+ json_post_item(json);
+}
+
+/* Begin a new JSON object. */
+void ossl_json_object_begin(OSSL_JSON_ENC *json)
+{
+ composite_begin(json, 0, '{');
+ json->state = STATE_PRE_KEY;
+}
+
+/* End a JSON object. Must be matched with a call to ossl_json_object_begin(). */
+void ossl_json_object_end(OSSL_JSON_ENC *json)
+{
+ composite_end(json, 0, '}');
+}
+
+/* Begin a new JSON array. */
+void ossl_json_array_begin(OSSL_JSON_ENC *json)
+{
+ composite_begin(json, 1, '[');
+ json->state = STATE_PRE_ITEM;
+}
+
+/* End a JSON array. Must be matched with a call to ossl_json_array_begin(). */
+void ossl_json_array_end(OSSL_JSON_ENC *json)
+{
+ composite_end(json, 1, ']');
+}
+
+/*
+ * Encode a JSON key within an object. Pass a zero-terminated string, which can
+ * be freed immediately following the call to this function.
+ */
+void ossl_json_key(OSSL_JSON_ENC *json, const char *key)
+{
+ if (ossl_json_in_error(json))
+ return;
+
+ if (json_peek(json) != 0) {
+ /* Not in object */
+ json_raise_error(json);
+ return;
+ }
+
+ if (json->state == STATE_PRE_COMMA) {
+ json_write_char(json, ',');
+ json->state = STATE_PRE_KEY;
+ }
+
+ json_indent(json);
+ if (json->state != STATE_PRE_KEY) {
+ json_raise_error(json);
+ return;
+ }
+
+ json_write_qstring(json, key);
+ if (ossl_json_in_error(json))
+ return;
+
+ json_write_char(json, ':');
+ if (in_pretty(json))
+ json_write_char(json, ' ');
+
+ json->state = STATE_PRE_ITEM;
+}
+
+/* Encode a JSON 'null' value. */
+void ossl_json_null(OSSL_JSON_ENC *json)
+{
+ if (!json_pre_item(json))
+ return;
+
+ json_write_str(json, "null");
+ json_post_item(json);
+}
+
+void ossl_json_bool(OSSL_JSON_ENC *json, int v)
+{
+ if (!json_pre_item(json))
+ return;
+
+ json_write_str(json, v > 0 ? "true" : "false");
+ json_post_item(json);
+}
+
+#define POW_53 (((int64_t)1) << 53)
+
+/* Encode a JSON integer from a uint64_t. */
+static void json_u64(OSSL_JSON_ENC *json, uint64_t v, int noquote)
+{
+ char buf[22], *p = buf + sizeof(buf) - 1;
+ int quote = !noquote && in_ijson(json) && v > (uint64_t)(POW_53 - 1);
+
+ if (!json_pre_item(json))
+ return;
+
+ if (quote)
+ json_write_char(json, '"');
+
+ if (v == 0)
+ p = "0";
+ else
+ for (*p = '\0'; v > 0; v /= 10)
+ *--p = '0' + v % 10;
+
+ json_write_str(json, p);
+
+ if (quote)
+ json_write_char(json, '"');
+
+ json_post_item(json);
+}
+
+void ossl_json_u64(OSSL_JSON_ENC *json, uint64_t v)
+{
+ json_u64(json, v, 0);
+}
+
+/* Encode a JSON integer from an int64_t. */
+void ossl_json_i64(OSSL_JSON_ENC *json, int64_t value)
+{
+ uint64_t uv;
+ int quote;
+
+ if (value >= 0) {
+ ossl_json_u64(json, (uint64_t)value);
+ return;
+ }
+
+ if (!json_pre_item(json))
+ return;
+
+ quote = in_ijson(json)
+ && (value > POW_53 - 1 || value < -POW_53 + 1);
+
+ if (quote)
+ json_write_char(json, '"');
+
+ json_write_char(json, '-');
+
+ uv = (value == INT64_MIN)
+ ? ((uint64_t)-(INT64_MIN + 1)) + 1
+ : (uint64_t)-value;
+ json_u64(json, uv, /*noquote=*/1);
+
+ if (quote && !ossl_json_in_error(json))
+ json_write_char(json, '"');
+}
+
+/*
+ * Encode a JSON UTF-8 string from a zero-terminated string. The string passed
+ * can be freed immediately following the call to this function.
+ */
+static ossl_inline int hex_digit(int v)
+{
+ return v >= 10 ? 'a' + (v - 10) : '0' + v;
+}
+
+static ossl_inline void
+json_write_qstring_inner(OSSL_JSON_ENC *json, const char *str, size_t str_len,
+ int nul_term)
+{
+ char c, *o, obuf[7];
+ unsigned char *u_str;
+ int i;
+ size_t j;
+
+ if (ossl_json_in_error(json))
+ return;
+
+ json_write_char(json, '"');
+
+ for (j = nul_term ? strlen(str) : str_len; j > 0; str++, j--) {
+ c = *str;
+ u_str = (unsigned char*)str;
+ switch (c) {
+ case '\n': o = "\\n"; break;
+ case '\r': o = "\\r"; break;
+ case '\t': o = "\\t"; break;
+ case '\b': o = "\\b"; break;
+ case '\f': o = "\\f"; break;
+ case '"': o = "\\\""; break;
+ case '\\': o = "\\\\"; break;
+ default:
+ /* valid UTF-8 sequences according to RFC-3629 */
+ if (u_str[0] >= 0xc2 && u_str[0] <= 0xdf && j >= 2
+ && u_str[1] >= 0x80 && u_str[1] <= 0xbf) {
+ memcpy(obuf, str, 2);
+ obuf[2] = '\0';
+ str++, j--;
+ o = obuf;
+ break;
+ }
+ if (u_str[0] >= 0xe0 && u_str[0] <= 0xef && j >= 3
+ && u_str[1] >= 0x80 && u_str[1] <= 0xbf
+ && u_str[2] >= 0x80 && u_str[2] <= 0xbf
+ && !(u_str[0] == 0xe0 && u_str[1] <= 0x9f)
+ && !(u_str[0] == 0xed && u_str[1] >= 0xa0)) {
+ memcpy(obuf, str, 3);
+ obuf[3] = '\0';
+ str += 2;
+ j -= 2;
+ o = obuf;
+ break;
+ }
+ if (u_str[0] >= 0xf0 && u_str[0] <= 0xf4 && j >= 4
+ && u_str[1] >= 0x80 && u_str[1] <= 0xbf
+ && u_str[2] >= 0x80 && u_str[2] <= 0xbf
+ && u_str[3] >= 0x80 && u_str[3] <= 0xbf
+ && !(u_str[0] == 0xf0 && u_str[1] <= 0x8f)
+ && !(u_str[0] == 0xf4 && u_str[1] >= 0x90)) {
+ memcpy(obuf, str, 4);
+ obuf[4] = '\0';
+ str += 3;
+ j -= 3;
+ o = obuf;
+ break;
+ }
+ if (u_str[0] < 0x20 || u_str[0] >= 0x7f) {
+ obuf[0] = '\\';
+ obuf[1] = 'u';
+ for (i = 0; i < 4; ++i)
+ obuf[2 + i] = hex_digit((u_str[0] >> ((3 - i) * 4)) & 0x0F);
+ obuf[6] = '\0';
+ o = obuf;
+ } else {
+ json_write_char(json, c);
+ continue;
+ }
+ break;
+ }
+
+ json_write_str(json, o);
+ }
+
+ json_write_char(json, '"');
+}
+
+static void
+json_write_qstring(OSSL_JSON_ENC *json, const char *str)
+{
+ json_write_qstring_inner(json, str, 0, 1);
+}
+
+static void
+json_write_qstring_len(OSSL_JSON_ENC *json, const char *str, size_t str_len)
+{
+ json_write_qstring_inner(json, str, str_len, 0);
+}
+
+void ossl_json_str(OSSL_JSON_ENC *json, const char *str)
+{
+ if (!json_pre_item(json))
+ return;
+
+ json_write_qstring(json, str);
+ json_post_item(json);
+}
+
+void ossl_json_str_len(OSSL_JSON_ENC *json, const char *str, size_t str_len)
+{
+ if (!json_pre_item(json))
+ return;
+
+ json_write_qstring_len(json, str, str_len);
+ json_post_item(json);
+}
+
+/*
+ * Encode binary data as a lowercase hex string. data_len is the data length in
+ * bytes.
+ */
+void ossl_json_str_hex(OSSL_JSON_ENC *json, const void *data, size_t data_len)
+{
+ const unsigned char *b = data, *end = b + data_len;
+ unsigned char c;
+
+ if (!json_pre_item(json))
+ return;
+
+ json_write_char(json, '"');
+
+ for (; b < end; ++b) {
+ c = *b;
+ json_write_char(json, hex_digit(c >> 4));
+ json_write_char(json, hex_digit(c & 0x0F));
+ }
+
+ json_write_char(json, '"');
+ json_post_item(json);
+}
diff --git a/crypto/openssl/ssl/quic/qlog.c b/crypto/openssl/ssl/quic/qlog.c
new file mode 100644
index 000000000000..3aadda046f74
--- /dev/null
+++ b/crypto/openssl/ssl/quic/qlog.c
@@ -0,0 +1,728 @@
+/*
+ * Copyright 2023-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/qlog.h"
+#include "internal/json_enc.h"
+#include "internal/common.h"
+#include "internal/cryptlib.h"
+#include "crypto/ctype.h"
+
+#define BITS_PER_WORD (sizeof(size_t) * 8)
+#define NUM_ENABLED_W ((QLOG_EVENT_TYPE_NUM + BITS_PER_WORD - 1) / BITS_PER_WORD)
+
+static ossl_unused ossl_inline int bit_get(const size_t *p, uint32_t bit_no)
+{
+ return p[bit_no / BITS_PER_WORD] & (((size_t)1) << (bit_no % BITS_PER_WORD));
+}
+
+static ossl_unused ossl_inline void bit_set(size_t *p, uint32_t bit_no, int enable)
+{
+ size_t mask = (((size_t)1) << (bit_no % BITS_PER_WORD));
+
+ if (enable)
+ p[bit_no / BITS_PER_WORD] |= mask;
+ else
+ p[bit_no / BITS_PER_WORD] &= ~mask;
+}
+
+struct qlog_st {
+ QLOG_TRACE_INFO info;
+
+ BIO *bio;
+ size_t enabled[NUM_ENABLED_W];
+ uint32_t event_type;
+ const char *event_cat, *event_name, *event_combined_name;
+ OSSL_TIME event_time, prev_event_time;
+ OSSL_JSON_ENC json;
+ int header_done, first_event_done;
+};
+
+static OSSL_TIME default_now(void *arg)
+{
+ return ossl_time_now();
+}
+
+/*
+ * Construction
+ * ============
+ */
+QLOG *ossl_qlog_new(const QLOG_TRACE_INFO *info)
+{
+ QLOG *qlog = OPENSSL_zalloc(sizeof(QLOG));
+
+ if (qlog == NULL)
+ return NULL;
+
+ qlog->info.odcid = info->odcid;
+ qlog->info.is_server = info->is_server;
+ qlog->info.now_cb = info->now_cb;
+ qlog->info.now_cb_arg = info->now_cb_arg;
+ qlog->info.override_process_id = info->override_process_id;
+
+ if (info->title != NULL
+ && (qlog->info.title = OPENSSL_strdup(info->title)) == NULL)
+ goto err;
+
+ if (info->description != NULL
+ && (qlog->info.description = OPENSSL_strdup(info->description)) == NULL)
+ goto err;
+
+ if (info->group_id != NULL
+ && (qlog->info.group_id = OPENSSL_strdup(info->group_id)) == NULL)
+ goto err;
+
+ if (info->override_impl_name != NULL
+ && (qlog->info.override_impl_name
+ = OPENSSL_strdup(info->override_impl_name)) == NULL)
+ goto err;
+
+ if (!ossl_json_init(&qlog->json, NULL,
+ OSSL_JSON_FLAG_IJSON | OSSL_JSON_FLAG_SEQ))
+ goto err;
+
+ if (qlog->info.now_cb == NULL)
+ qlog->info.now_cb = default_now;
+
+ return qlog;
+
+err:
+ if (qlog != NULL) {
+ OPENSSL_free((char *)qlog->info.title);
+ OPENSSL_free((char *)qlog->info.description);
+ OPENSSL_free((char *)qlog->info.group_id);
+ OPENSSL_free((char *)qlog->info.override_impl_name);
+ OPENSSL_free(qlog);
+ }
+ return NULL;
+}
+
+QLOG *ossl_qlog_new_from_env(const QLOG_TRACE_INFO *info)
+{
+ QLOG *qlog = NULL;
+ const char *qlogdir = ossl_safe_getenv("QLOGDIR");
+ const char *qfilter = ossl_safe_getenv("OSSL_QFILTER");
+ char qlogdir_sep, *filename = NULL;
+ size_t i, l, strl;
+
+ if (info == NULL || qlogdir == NULL)
+ return NULL;
+
+ l = strlen(qlogdir);
+ if (l == 0)
+ return NULL;
+
+ qlogdir_sep = ossl_determine_dirsep(qlogdir);
+
+ /* dir; [sep]; ODCID; _; strlen("client" / "server"); strlen(".sqlog"); NUL */
+ strl = l + 1 + info->odcid.id_len * 2 + 1 + 6 + 6 + 1;
+ filename = OPENSSL_malloc(strl);
+ if (filename == NULL)
+ return NULL;
+
+ memcpy(filename, qlogdir, l);
+ if (qlogdir_sep != '\0')
+ filename[l++] = qlogdir_sep;
+
+ for (i = 0; i < info->odcid.id_len; ++i)
+ l += BIO_snprintf(filename + l, strl - l, "%02x", info->odcid.id[i]);
+
+ l += BIO_snprintf(filename + l, strl - l, "_%s.sqlog",
+ info->is_server ? "server" : "client");
+
+ qlog = ossl_qlog_new(info);
+ if (qlog == NULL)
+ goto err;
+
+ if (!ossl_qlog_set_sink_filename(qlog, filename))
+ goto err;
+
+ if (qfilter == NULL || qfilter[0] == '\0')
+ qfilter = "*";
+
+ if (!ossl_qlog_set_filter(qlog, qfilter))
+ goto err;
+
+ OPENSSL_free(filename);
+ return qlog;
+
+err:
+ OPENSSL_free(filename);
+ ossl_qlog_free(qlog);
+ return NULL;
+}
+
+void ossl_qlog_free(QLOG *qlog)
+{
+ if (qlog == NULL)
+ return;
+
+ ossl_json_flush_cleanup(&qlog->json);
+ BIO_free_all(qlog->bio);
+ OPENSSL_free((char *)qlog->info.title);
+ OPENSSL_free((char *)qlog->info.description);
+ OPENSSL_free((char *)qlog->info.group_id);
+ OPENSSL_free((char *)qlog->info.override_impl_name);
+ OPENSSL_free(qlog);
+}
+
+/*
+ * Configuration
+ * =============
+ */
+int ossl_qlog_set_sink_bio(QLOG *qlog, BIO *bio)
+{
+ if (qlog == NULL)
+ return 0;
+
+ ossl_qlog_flush(qlog); /* best effort */
+ BIO_free_all(qlog->bio);
+ qlog->bio = bio;
+ ossl_json_set0_sink(&qlog->json, bio);
+ return 1;
+}
+
+#ifndef OPENSSL_NO_STDIO
+
+int ossl_qlog_set_sink_file(QLOG *qlog, FILE *f, int close_flag)
+{
+ BIO *bio;
+
+ if (qlog == NULL)
+ return 0;
+
+ bio = BIO_new_fp(f, BIO_CLOSE);
+ if (bio == NULL)
+ return 0;
+
+ if (!ossl_qlog_set_sink_bio(qlog, bio)) {
+ BIO_free_all(bio);
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif
+
+int ossl_qlog_set_sink_filename(QLOG *qlog, const char *filename)
+{
+ BIO *bio;
+
+ if (qlog == NULL)
+ return 0;
+
+ /*
+ * We supply our own text encoding as JSON requires UTF-8, so disable any
+ * OS-specific processing here.
+ */
+ bio = BIO_new_file(filename, "wb");
+ if (bio == NULL)
+ return 0;
+
+ if (!ossl_qlog_set_sink_bio(qlog, bio)) {
+ BIO_free_all(bio);
+ return 0;
+ }
+
+ return 1;
+}
+
+int ossl_qlog_flush(QLOG *qlog)
+{
+ if (qlog == NULL)
+ return 1;
+
+ return ossl_json_flush(&qlog->json);
+}
+
+int ossl_qlog_set_event_type_enabled(QLOG *qlog, uint32_t event_type,
+ int enabled)
+{
+ if (qlog == NULL || event_type >= QLOG_EVENT_TYPE_NUM)
+ return 0;
+
+ bit_set(qlog->enabled, event_type, enabled);
+ return 1;
+}
+
+int ossl_qlog_enabled(QLOG *qlog, uint32_t event_type)
+{
+ if (qlog == NULL)
+ return 0;
+
+ return bit_get(qlog->enabled, event_type) != 0;
+}
+
+/*
+ * Event Lifecycle
+ * ===============
+ */
+static void write_str_once(QLOG *qlog, const char *key, char **p)
+{
+ if (*p == NULL)
+ return;
+
+ ossl_json_key(&qlog->json, key);
+ ossl_json_str(&qlog->json, *p);
+
+ OPENSSL_free(*p);
+ *p = NULL;
+}
+
+static void qlog_event_seq_header(QLOG *qlog)
+{
+ if (qlog->header_done)
+ return;
+
+ ossl_json_object_begin(&qlog->json);
+ {
+ ossl_json_key(&qlog->json, "qlog_version");
+ ossl_json_str(&qlog->json, "0.3");
+
+ ossl_json_key(&qlog->json, "qlog_format");
+ ossl_json_str(&qlog->json, "JSON-SEQ");
+
+ write_str_once(qlog, "title", (char **)&qlog->info.title);
+ write_str_once(qlog, "description", (char **)&qlog->info.description);
+
+ ossl_json_key(&qlog->json, "trace");
+ ossl_json_object_begin(&qlog->json);
+ {
+ ossl_json_key(&qlog->json, "common_fields");
+ ossl_json_object_begin(&qlog->json);
+ {
+ ossl_json_key(&qlog->json, "time_format");
+ ossl_json_str(&qlog->json, "delta");
+
+ ossl_json_key(&qlog->json, "protocol_type");
+ ossl_json_array_begin(&qlog->json);
+ {
+ ossl_json_str(&qlog->json, "QUIC");
+ } /* protocol_type */
+ ossl_json_array_end(&qlog->json);
+
+ write_str_once(qlog, "group_id", (char **)&qlog->info.group_id);
+
+ ossl_json_key(&qlog->json, "system_info");
+ ossl_json_object_begin(&qlog->json);
+ {
+ if (qlog->info.override_process_id != 0) {
+ ossl_json_key(&qlog->json, "process_id");
+ ossl_json_u64(&qlog->json, qlog->info.override_process_id);
+ } else {
+#if defined(OPENSSL_SYS_UNIX)
+ ossl_json_key(&qlog->json, "process_id");
+ ossl_json_u64(&qlog->json, (uint64_t)getpid());
+#elif defined(OPENSSL_SYS_WINDOWS)
+ ossl_json_key(&qlog->json, "process_id");
+ ossl_json_u64(&qlog->json, (uint64_t)GetCurrentProcessId());
+#endif
+ }
+ } /* system_info */
+ ossl_json_object_end(&qlog->json);
+ } /* common_fields */
+ ossl_json_object_end(&qlog->json);
+
+ ossl_json_key(&qlog->json, "vantage_point");
+ ossl_json_object_begin(&qlog->json);
+ {
+ char buf[128];
+ const char *p = buf;
+
+ if (qlog->info.override_impl_name != NULL) {
+ p = qlog->info.override_impl_name;
+ } else {
+ BIO_snprintf(buf, sizeof(buf), "OpenSSL/%s (%s)",
+ OpenSSL_version(OPENSSL_FULL_VERSION_STRING),
+ OpenSSL_version(OPENSSL_PLATFORM) + 10);
+ }
+
+ ossl_json_key(&qlog->json, "type");
+ ossl_json_str(&qlog->json,
+ qlog->info.is_server ? "server" : "client");
+
+ ossl_json_key(&qlog->json, "name");
+ ossl_json_str(&qlog->json, p);
+ } /* vantage_point */
+ ossl_json_object_end(&qlog->json);
+ } /* trace */
+ ossl_json_object_end(&qlog->json);
+ }
+ ossl_json_object_end(&qlog->json);
+
+ qlog->header_done = 1;
+}
+
+static void qlog_event_prologue(QLOG *qlog)
+{
+ qlog_event_seq_header(qlog);
+
+ ossl_json_object_begin(&qlog->json);
+
+ ossl_json_key(&qlog->json, "name");
+ ossl_json_str(&qlog->json, qlog->event_combined_name);
+
+ ossl_json_key(&qlog->json, "data");
+ ossl_json_object_begin(&qlog->json);
+}
+
+static void qlog_event_epilogue(QLOG *qlog)
+{
+ ossl_json_object_end(&qlog->json);
+
+ ossl_json_key(&qlog->json, "time");
+ if (!qlog->first_event_done) {
+ ossl_json_u64(&qlog->json, ossl_time2ms(qlog->event_time));
+ qlog->prev_event_time = qlog->event_time;
+ qlog->first_event_done = 1;
+ } else {
+ OSSL_TIME delta = ossl_time_subtract(qlog->event_time,
+ qlog->prev_event_time);
+
+ ossl_json_u64(&qlog->json, ossl_time2ms(delta));
+ qlog->prev_event_time = qlog->event_time;
+ }
+
+ ossl_json_object_end(&qlog->json);
+}
+
+int ossl_qlog_event_try_begin(QLOG *qlog,
+ uint32_t event_type,
+ const char *event_cat,
+ const char *event_name,
+ const char *event_combined_name)
+{
+ if (qlog == NULL)
+ return 0;
+
+ if (!ossl_assert(qlog->event_type == QLOG_EVENT_TYPE_NONE)
+ || !ossl_qlog_enabled(qlog, event_type))
+ return 0;
+
+ qlog->event_type = event_type;
+ qlog->event_cat = event_cat;
+ qlog->event_name = event_name;
+ qlog->event_combined_name = event_combined_name;
+ qlog->event_time = qlog->info.now_cb(qlog->info.now_cb_arg);
+
+ qlog_event_prologue(qlog);
+ return 1;
+}
+
+void ossl_qlog_event_end(QLOG *qlog)
+{
+ if (!ossl_assert(qlog != NULL && qlog->event_type != QLOG_EVENT_TYPE_NONE))
+ return;
+
+ qlog_event_epilogue(qlog);
+ qlog->event_type = QLOG_EVENT_TYPE_NONE;
+}
+
+/*
+ * Field Generators
+ * ================
+ */
+void ossl_qlog_group_begin(QLOG *qlog, const char *name)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_object_begin(&qlog->json);
+}
+
+void ossl_qlog_group_end(QLOG *qlog)
+{
+ ossl_json_object_end(&qlog->json);
+}
+
+void ossl_qlog_array_begin(QLOG *qlog, const char *name)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_array_begin(&qlog->json);
+}
+
+void ossl_qlog_array_end(QLOG *qlog)
+{
+ ossl_json_array_end(&qlog->json);
+}
+
+void ossl_qlog_override_time(QLOG *qlog, OSSL_TIME event_time)
+{
+ qlog->event_time = event_time;
+}
+
+void ossl_qlog_str(QLOG *qlog, const char *name, const char *value)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_str(&qlog->json, value);
+}
+
+void ossl_qlog_str_len(QLOG *qlog, const char *name,
+ const char *value, size_t value_len)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_str_len(&qlog->json, value, value_len);
+}
+
+void ossl_qlog_u64(QLOG *qlog, const char *name, uint64_t value)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_u64(&qlog->json, value);
+}
+
+void ossl_qlog_i64(QLOG *qlog, const char *name, int64_t value)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_i64(&qlog->json, value);
+}
+
+void ossl_qlog_bool(QLOG *qlog, const char *name, int value)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_bool(&qlog->json, value);
+}
+
+void ossl_qlog_bin(QLOG *qlog, const char *name,
+ const void *value, size_t value_len)
+{
+ if (name != NULL)
+ ossl_json_key(&qlog->json, name);
+
+ ossl_json_str_hex(&qlog->json, value, value_len);
+}
+
+/*
+ * Filter Parsing
+ * ==============
+ */
+struct lexer {
+ const char *p, *term_end, *end;
+};
+
+static ossl_inline int is_term_sep_ws(char c)
+{
+ return c == ' ' || c == '\r' || c == '\n' || c == '\t';
+}
+
+static ossl_inline int is_name_char(char c)
+{
+ return ossl_isalpha(c) || ossl_isdigit(c) || c == '_' || c == '-';
+}
+
+static int lex_init(struct lexer *lex, const char *in, size_t in_len)
+{
+ if (in == NULL)
+ return 0;
+
+ lex->p = in;
+ lex->term_end = in;
+ lex->end = in + in_len;
+ return 1;
+}
+
+static int lex_do(struct lexer *lex)
+{
+ const char *p = lex->term_end, *end = lex->end, *term_end;
+
+ for (; is_term_sep_ws(*p) && p < end; ++p);
+
+ if (p == end) {
+ lex->p = end;
+ lex->term_end = end;
+ return 0;
+ }
+
+ for (term_end = p; !is_term_sep_ws(*term_end) && term_end < end; ++term_end);
+
+ lex->p = p;
+ lex->term_end = term_end;
+ return 1;
+}
+
+static int lex_eot(struct lexer *lex)
+{
+ return lex->p == lex->term_end;
+}
+
+static int lex_peek_char(struct lexer *lex)
+{
+ return lex_eot(lex) ? -1 : *lex->p;
+}
+
+static int lex_skip_char(struct lexer *lex)
+{
+ if (lex_eot(lex))
+ return 0;
+
+ ++lex->p;
+ return 1;
+}
+
+static int lex_match(struct lexer *lex, const char *s, size_t s_len)
+{
+ if ((size_t)(lex->term_end - lex->p) != s_len)
+ return 0;
+
+ if (memcmp(lex->p, s, s_len))
+ return 0;
+
+ return 1;
+}
+
+static void lex_get_rest(struct lexer *lex, const char **str, size_t *str_l)
+{
+ *str = lex->p;
+ *str_l = lex->term_end - lex->p;
+}
+
+static int lex_extract_to(struct lexer *lex, char c,
+ const char **str, size_t *str_l)
+{
+ const char *p = lex->p, *term_end = lex->term_end, *s;
+
+ for (s = p; s < term_end && *s != c; ++s);
+ if (s == term_end)
+ return 0;
+
+ *str = p;
+ *str_l = s - p;
+ lex->p = ++s;
+ return 1;
+}
+
+static int ossl_unused filter_match_event(const char *cat, size_t cat_l,
+ const char *event, size_t event_l,
+ const char *expect_cat,
+ const char *expect_event)
+{
+ size_t expect_cat_l = strlen(expect_cat);
+ size_t expect_event_l = strlen(expect_event);
+
+ if ((cat != NULL && cat_l != expect_cat_l)
+ || (event != NULL && event_l != expect_event_l)
+ || (cat != NULL && memcmp(cat, expect_cat, expect_cat_l))
+ || (event != NULL && memcmp(event, expect_event, expect_event_l)))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * enabled: event enablement bitmask Array of size NUM_ENABLED_W.
+ * add: 1 to enable an event, 0 to disable.
+ * cat: Category name/length. Not necessarily zero terminated.
+ * NULL to match any.
+ * event: Event name/length. Not necessarily zero terminated.
+ * NULL to match any.
+ */
+static void filter_apply(size_t *enabled, int add,
+ const char *cat, size_t cat_l,
+ const char *event, size_t event_l)
+{
+ /* Find events which match the given filters. */
+# define QLOG_EVENT(e_cat, e_name) \
+ if (filter_match_event(cat, cat_l, event, event_l, \
+ #e_cat, #e_name)) \
+ bit_set(enabled, QLOG_EVENT_TYPE_##e_cat##_##e_name, add);
+# include "internal/qlog_events.h"
+# undef QLOG_EVENT
+}
+
+static int lex_fail(struct lexer *lex, const char *msg)
+{
+ /*
+ * TODO(QLOG FUTURE): Determine how to print log messages about bad filter
+ * strings
+ */
+ lex->p = lex->term_end = lex->end;
+ return 0;
+}
+
+static int validate_name(const char **p, size_t *l)
+{
+ const char *p_ = *p;
+ size_t i, l_ = *l;
+
+ if (l_ == 1 && *p_ == '*') {
+ *p = NULL;
+ *l = 0;
+ return 1;
+ }
+
+ if (l_ == 0)
+ return 0;
+
+ for (i = 0; i < l_; ++i)
+ if (!is_name_char(p_[i]))
+ return 0;
+
+ return 1;
+}
+
+int ossl_qlog_set_filter(QLOG *qlog, const char *filter)
+{
+ struct lexer lex = {0};
+ char c;
+ const char *cat, *event;
+ size_t cat_l, event_l, enabled[NUM_ENABLED_W];
+ int add;
+
+ memcpy(enabled, qlog->enabled, sizeof(enabled));
+
+ if (!lex_init(&lex, filter, strlen(filter)))
+ return 0;
+
+ while (lex_do(&lex)) {
+ c = lex_peek_char(&lex);
+ if (c == '+' || c == '-') {
+ add = (c == '+');
+ lex_skip_char(&lex);
+
+ c = lex_peek_char(&lex);
+ if (!is_name_char(c) && c != '*')
+ return lex_fail(&lex, "expected alphanumeric name or '*'"
+ " after +/-");
+ } else if (!is_name_char(c) && c != '*') {
+ return lex_fail(&lex, "expected +/- or alphanumeric name or '*'");
+ } else {
+ add = 1;
+ }
+
+ if (lex_match(&lex, "*", 1)) {
+ filter_apply(enabled, add, NULL, 0, NULL, 0);
+ continue;
+ }
+
+ if (!lex_extract_to(&lex, ':', &cat, &cat_l))
+ return lex_fail(&lex, "expected ':' after category name");
+
+ lex_get_rest(&lex, &event, &event_l);
+ if (!validate_name(&cat, &cat_l))
+ return lex_fail(&lex, "expected alphanumeric category name or '*'");
+ if (!validate_name(&event, &event_l))
+ return lex_fail(&lex, "expected alphanumeric event name or '*'");
+
+ filter_apply(enabled, add, cat, cat_l, event, event_l);
+ }
+
+ memcpy(qlog->enabled, enabled, sizeof(enabled));
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/qlog_event_helpers.c b/crypto/openssl/ssl/quic/qlog_event_helpers.c
new file mode 100644
index 000000000000..a4f74e25c800
--- /dev/null
+++ b/crypto/openssl/ssl/quic/qlog_event_helpers.c
@@ -0,0 +1,636 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/qlog_event_helpers.h"
+#include "internal/common.h"
+#include "internal/packet.h"
+#include "internal/quic_channel.h"
+#include "internal/quic_error.h"
+
+void ossl_qlog_event_connectivity_connection_started(QLOG *qlog,
+ const QUIC_CONN_ID *init_dcid)
+{
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(qlog, connectivity, connection_started)
+ QLOG_STR("protocol", "quic");
+ QLOG_CID("dst_cid", init_dcid);
+ QLOG_EVENT_END()
+#endif
+}
+
+#ifndef OPENSSL_NO_QLOG
+static const char *map_state_to_qlog(uint32_t state,
+ int handshake_complete,
+ int handshake_confirmed)
+{
+ switch (state) {
+ default:
+ case QUIC_CHANNEL_STATE_IDLE:
+ return NULL;
+
+ case QUIC_CHANNEL_STATE_ACTIVE:
+ if (handshake_confirmed)
+ return "handshake_confirmed";
+ else if (handshake_complete)
+ return "handshake_complete";
+ else
+ return "attempted";
+
+ case QUIC_CHANNEL_STATE_TERMINATING_CLOSING:
+ return "closing";
+
+ case QUIC_CHANNEL_STATE_TERMINATING_DRAINING:
+ return "draining";
+
+ case QUIC_CHANNEL_STATE_TERMINATED:
+ return "closed";
+ }
+}
+#endif
+
+void ossl_qlog_event_connectivity_connection_state_updated(QLOG *qlog,
+ uint32_t old_state,
+ uint32_t new_state,
+ int handshake_complete,
+ int handshake_confirmed)
+{
+#ifndef OPENSSL_NO_QLOG
+ const char *state_s;
+
+ QLOG_EVENT_BEGIN(qlog, connectivity, connection_state_updated)
+ state_s = map_state_to_qlog(new_state,
+ handshake_complete,
+ handshake_confirmed);
+
+ if (state_s != NULL)
+ QLOG_STR("state", state_s);
+ QLOG_EVENT_END()
+#endif
+}
+
+#ifndef OPENSSL_NO_QLOG
+static const char *quic_err_to_qlog(uint64_t error_code)
+{
+ switch (error_code) {
+ case OSSL_QUIC_ERR_INTERNAL_ERROR:
+ return "internal_error";
+ case OSSL_QUIC_ERR_CONNECTION_REFUSED:
+ return "connection_refused";
+ case OSSL_QUIC_ERR_FLOW_CONTROL_ERROR:
+ return "flow_control_error";
+ case OSSL_QUIC_ERR_STREAM_LIMIT_ERROR:
+ return "stream_limit_error";
+ case OSSL_QUIC_ERR_STREAM_STATE_ERROR:
+ return "stream_state_error";
+ case OSSL_QUIC_ERR_FINAL_SIZE_ERROR:
+ return "final_size_error";
+ case OSSL_QUIC_ERR_FRAME_ENCODING_ERROR:
+ return "frame_encoding_error";
+ case OSSL_QUIC_ERR_TRANSPORT_PARAMETER_ERROR:
+ return "transport_parameter_error";
+ case OSSL_QUIC_ERR_CONNECTION_ID_LIMIT_ERROR:
+ return "connection_id_limit_error";
+ case OSSL_QUIC_ERR_PROTOCOL_VIOLATION:
+ return "protocol_violation";
+ case OSSL_QUIC_ERR_INVALID_TOKEN:
+ return "invalid_token";
+ case OSSL_QUIC_ERR_APPLICATION_ERROR:
+ return "application_error";
+ case OSSL_QUIC_ERR_CRYPTO_BUFFER_EXCEEDED:
+ return "crypto_buffer_exceeded";
+ case OSSL_QUIC_ERR_KEY_UPDATE_ERROR:
+ return "key_update_error";
+ case OSSL_QUIC_ERR_AEAD_LIMIT_REACHED:
+ return "aead_limit_reached";
+ case OSSL_QUIC_ERR_NO_VIABLE_PATH:
+ return "no_viable_path";
+ default:
+ return NULL;
+ }
+}
+#endif
+
+void ossl_qlog_event_connectivity_connection_closed(QLOG *qlog,
+ const QUIC_TERMINATE_CAUSE *tcause)
+{
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(qlog, connectivity, connection_closed)
+ QLOG_STR("owner", tcause->remote ? "remote" : "local");
+ if (tcause->app) {
+ QLOG_U64("application_code", tcause->error_code);
+ } else {
+ const char *m = quic_err_to_qlog(tcause->error_code);
+ char ce[32];
+
+ if (tcause->error_code >= OSSL_QUIC_ERR_CRYPTO_ERR_BEGIN
+ && tcause->error_code <= OSSL_QUIC_ERR_CRYPTO_ERR_END) {
+ BIO_snprintf(ce, sizeof(ce), "crypto_error_0x%03llx",
+ (unsigned long long)tcause->error_code);
+ m = ce;
+ }
+ /* TODO(QLOG FUTURE): Consider adding ERR information in the output. */
+
+ if (m != NULL)
+ QLOG_STR("connection_code", m);
+ else
+ QLOG_U64("connection_code", tcause->error_code);
+ }
+
+ QLOG_STR_LEN("reason", tcause->reason, tcause->reason_len);
+ QLOG_EVENT_END()
+#endif
+}
+
+#ifndef OPENSSL_NO_QLOG
+static const char *quic_pkt_type_to_qlog(uint32_t pkt_type)
+{
+ switch (pkt_type) {
+ case QUIC_PKT_TYPE_INITIAL:
+ return "initial";
+ case QUIC_PKT_TYPE_HANDSHAKE:
+ return "handshake";
+ case QUIC_PKT_TYPE_0RTT:
+ return "0RTT";
+ case QUIC_PKT_TYPE_1RTT:
+ return "1RTT";
+ case QUIC_PKT_TYPE_VERSION_NEG:
+ return "version_negotiation";
+ case QUIC_PKT_TYPE_RETRY:
+ return "retry";
+ default:
+ return "unknown";
+ }
+}
+#endif
+
+void ossl_qlog_event_recovery_packet_lost(QLOG *qlog,
+ const QUIC_TXPIM_PKT *tpkt)
+{
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(qlog, recovery, packet_lost)
+ QLOG_BEGIN("header")
+ QLOG_STR("packet_type", quic_pkt_type_to_qlog(tpkt->pkt_type));
+ if (ossl_quic_pkt_type_has_pn(tpkt->pkt_type))
+ QLOG_U64("packet_number", tpkt->ackm_pkt.pkt_num);
+ QLOG_END()
+ QLOG_EVENT_END()
+#endif
+}
+
+#ifndef OPENSSL_NO_QLOG
+# define MAX_ACK_RANGES 32
+
+static void ignore_res(int x) {}
+
+/*
+ * For logging received packets, we need to parse all the frames in the packet
+ * to log them. We should do this separately to the RXDP code because we want to
+ * log the packet and its contents before we start to actually process it in
+ * case it causes an error. We also in general don't want to do other
+ * non-logging related work in the middle of an event logging transaction.
+ * Reparsing packet data allows us to meet these needs while avoiding the need
+ * to keep around bookkeeping data on what frames were in a packet, etc.
+ *
+ * For logging transmitted packets, we actually reuse the same code and reparse
+ * the outgoing packet's payload. This again has the advantage that we only log
+ * a packet when it is actually queued for transmission (and not if something
+ * goes wrong before then) while avoiding the need to keep around bookkeeping
+ * data on what frames it contained.
+ */
+static int log_frame_actual(QLOG *qlog_instance, PACKET *pkt,
+ size_t *need_skip)
+{
+ uint64_t frame_type;
+ OSSL_QUIC_FRAME_ACK ack;
+ OSSL_QUIC_ACK_RANGE ack_ranges[MAX_ACK_RANGES];
+ uint64_t num_ranges, total_ranges;
+ size_t i;
+ PACKET orig_pkt = *pkt;
+
+ if (!ossl_quic_wire_peek_frame_header(pkt, &frame_type, NULL)) {
+ *need_skip = SIZE_MAX;
+ return 0;
+ }
+
+ /*
+ * If something goes wrong decoding a frame we cannot log it as that frame
+ * as we need to know how to decode it in order to be able to do so, but in
+ * that case we log it as an unknown frame to assist with diagnosis.
+ */
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_PADDING:
+ QLOG_STR("frame_type", "padding");
+ QLOG_U64("payload_length",
+ ossl_quic_wire_decode_padding(pkt));
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PING:
+ if (!ossl_quic_wire_decode_frame_ping(pkt))
+ goto unknown;
+
+ QLOG_STR("frame_type", "ping");
+ break;
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITHOUT_ECN:
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN:
+ if (!ossl_quic_wire_peek_frame_ack_num_ranges(pkt, &num_ranges))
+ goto unknown;
+
+ ack.ack_ranges = ack_ranges;
+ ack.num_ack_ranges = OSSL_NELEM(ack_ranges);
+ if (!ossl_quic_wire_decode_frame_ack(pkt, 3, &ack, &total_ranges))
+ goto unknown;
+
+ QLOG_STR("frame_type", "ack");
+ QLOG_U64("ack_delay", ossl_time2ms(ack.delay_time));
+ if (ack.ecn_present) {
+ QLOG_U64("ect1", ack.ect0);
+ QLOG_U64("ect0", ack.ect1);
+ QLOG_U64("ce", ack.ecnce);
+ }
+ QLOG_BEGIN_ARRAY("acked_ranges");
+ for (i = 0; i < ack.num_ack_ranges; ++i) {
+ QLOG_BEGIN_ARRAY(NULL)
+ QLOG_U64(NULL, ack.ack_ranges[i].start);
+ if (ack.ack_ranges[i].end != ack.ack_ranges[i].start)
+ QLOG_U64(NULL, ack.ack_ranges[i].end);
+ QLOG_END_ARRAY()
+ }
+ QLOG_END_ARRAY()
+ break;
+ case OSSL_QUIC_FRAME_TYPE_RESET_STREAM:
+ {
+ OSSL_QUIC_FRAME_RESET_STREAM f;
+
+ if (!ossl_quic_wire_decode_frame_reset_stream(pkt, &f))
+ goto unknown;
+
+ QLOG_STR("frame_type", "reset_stream");
+ QLOG_U64("stream_id", f.stream_id);
+ QLOG_U64("error_code", f.app_error_code);
+ QLOG_U64("final_size", f.final_size);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_STOP_SENDING:
+ {
+ OSSL_QUIC_FRAME_STOP_SENDING f;
+
+ if (!ossl_quic_wire_decode_frame_stop_sending(pkt, &f))
+ goto unknown;
+
+ QLOG_STR("frame_type", "stop_sending");
+ QLOG_U64("stream_id", f.stream_id);
+ QLOG_U64("error_code", f.app_error_code);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_CRYPTO:
+ {
+ OSSL_QUIC_FRAME_CRYPTO f;
+
+ if (!ossl_quic_wire_decode_frame_crypto(pkt, 1, &f))
+ goto unknown;
+
+ QLOG_STR("frame_type", "crypto");
+ QLOG_U64("offset", f.offset);
+ QLOG_U64("payload_length", f.len);
+ *need_skip += (size_t)f.len;
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_STREAM:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN_FIN:
+ {
+ OSSL_QUIC_FRAME_STREAM f;
+
+ if (!ossl_quic_wire_decode_frame_stream(pkt, 1, &f))
+ goto unknown;
+
+ QLOG_STR("frame_type", "stream");
+ QLOG_U64("stream_id", f.stream_id);
+ QLOG_U64("offset", f.offset);
+ QLOG_U64("payload_length", f.len);
+ QLOG_BOOL("explicit_length", f.has_explicit_len);
+ if (f.is_fin)
+ QLOG_BOOL("fin", 1);
+ *need_skip = f.has_explicit_len
+ ? *need_skip + (size_t)f.len : SIZE_MAX;
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_DATA:
+ {
+ uint64_t x;
+
+ if (!ossl_quic_wire_decode_frame_max_data(pkt, &x))
+ goto unknown;
+
+ QLOG_STR("frame_type", "max_data");
+ QLOG_U64("maximum", x);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI:
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI:
+ {
+ uint64_t x;
+
+ if (!ossl_quic_wire_decode_frame_max_streams(pkt, &x))
+ goto unknown;
+
+ QLOG_STR("frame_type", "max_streams");
+ QLOG_STR("stream_type",
+ frame_type == OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI
+ ? "bidirectional" : "unidirectional");
+ QLOG_U64("maximum", x);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA:
+ {
+ uint64_t stream_id, max_data;
+
+ if (!ossl_quic_wire_decode_frame_max_stream_data(pkt, &stream_id,
+ &max_data))
+ goto unknown;
+
+ QLOG_STR("frame_type", "max_stream_data");
+ QLOG_U64("stream_id", stream_id);
+ QLOG_U64("maximum", max_data);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PATH_CHALLENGE:
+ {
+ uint64_t challenge;
+
+ if (!ossl_quic_wire_decode_frame_path_challenge(pkt, &challenge))
+ goto unknown;
+
+ QLOG_STR("frame_type", "path_challenge");
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE:
+ {
+ uint64_t challenge;
+
+ if (!ossl_quic_wire_decode_frame_path_response(pkt, &challenge))
+ goto unknown;
+
+ QLOG_STR("frame_type", "path_response");
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_APP:
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_TRANSPORT:
+ {
+ OSSL_QUIC_FRAME_CONN_CLOSE f;
+
+ if (!ossl_quic_wire_decode_frame_conn_close(pkt, &f))
+ goto unknown;
+
+ QLOG_STR("frame_type", "connection_close");
+ QLOG_STR("error_space", f.is_app ? "application" : "transport");
+ QLOG_U64("error_code_value", f.error_code);
+ if (f.is_app)
+ QLOG_U64("error_code", f.error_code);
+ if (!f.is_app && f.frame_type != 0)
+ QLOG_U64("trigger_frame_type", f.frame_type);
+ QLOG_STR_LEN("reason", f.reason, f.reason_len);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE:
+ {
+ if (!ossl_quic_wire_decode_frame_handshake_done(pkt))
+ goto unknown;
+
+ QLOG_STR("frame_type", "handshake_done");
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
+ {
+ OSSL_QUIC_FRAME_NEW_CONN_ID f;
+
+ if (!ossl_quic_wire_decode_frame_new_conn_id(pkt, &f))
+ goto unknown;
+
+ QLOG_STR("frame_type", "new_connection_id");
+ QLOG_U64("sequence_number", f.seq_num);
+ QLOG_U64("retire_prior_to", f.retire_prior_to);
+ QLOG_CID("connection_id", &f.conn_id);
+ QLOG_BIN("stateless_reset_token",
+ f.stateless_reset.token,
+ sizeof(f.stateless_reset.token));
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
+ {
+ uint64_t seq_num;
+
+ if (!ossl_quic_wire_decode_frame_retire_conn_id(pkt, &seq_num))
+ goto unknown;
+
+ QLOG_STR("frame_type", "retire_connection_id");
+ QLOG_U64("sequence_number", seq_num);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_DATA_BLOCKED:
+ {
+ uint64_t x;
+
+ if (!ossl_quic_wire_decode_frame_data_blocked(pkt, &x))
+ goto unknown;
+
+ QLOG_STR("frame_type", "data_blocked");
+ QLOG_U64("limit", x);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED:
+ {
+ uint64_t stream_id, x;
+
+ if (!ossl_quic_wire_decode_frame_stream_data_blocked(pkt,
+ &stream_id,
+ &x))
+ goto unknown;
+
+ QLOG_STR("frame_type", "stream_data_blocked");
+ QLOG_U64("stream_id", stream_id);
+ QLOG_U64("limit", x);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_BIDI:
+ case OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_UNI:
+ {
+ uint64_t x;
+
+ if (!ossl_quic_wire_decode_frame_streams_blocked(pkt, &x))
+ goto unknown;
+
+ QLOG_STR("frame_type", "streams_blocked");
+ QLOG_STR("stream_type",
+ frame_type == OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_BIDI
+ ? "bidirectional" : "unidirectional");
+ QLOG_U64("limit", x);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
+ {
+ const unsigned char *token;
+ size_t token_len;
+
+ if (!ossl_quic_wire_decode_frame_new_token(pkt, &token, &token_len))
+ goto unknown;
+
+ QLOG_STR("frame_type", "new_token");
+ QLOG_BEGIN("token");
+ QLOG_BEGIN("raw");
+ QLOG_BIN("data", token, token_len);
+ QLOG_END();
+ QLOG_END();
+ }
+ break;
+ default:
+unknown:
+ QLOG_STR("frame_type", "unknown");
+ QLOG_U64("frame_type_value", frame_type);
+
+ /*
+ * Can't continue scanning for frames in this case as the frame length
+ * is unknown. We log the entire body of the rest of the packet payload
+ * as the raw data of the frame.
+ */
+ QLOG_BEGIN("raw");
+ QLOG_BIN("data", PACKET_data(&orig_pkt),
+ PACKET_remaining(&orig_pkt));
+ QLOG_END();
+ ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
+ break;
+ }
+
+ return 1;
+}
+
+static void log_frame(QLOG *qlog_instance, PACKET *pkt,
+ size_t *need_skip)
+{
+ size_t rem_before, rem_after;
+
+ rem_before = PACKET_remaining(pkt);
+
+ if (!log_frame_actual(qlog_instance, pkt, need_skip))
+ return;
+
+ rem_after = PACKET_remaining(pkt);
+ QLOG_U64("length", rem_before - rem_after);
+}
+
+static int log_frames(QLOG *qlog_instance,
+ const OSSL_QTX_IOVEC *iovec,
+ size_t num_iovec)
+{
+ size_t i;
+ PACKET pkt;
+ size_t need_skip = 0;
+
+ for (i = 0; i < num_iovec; ++i) {
+ if (!PACKET_buf_init(&pkt, iovec[i].buf, iovec[i].buf_len))
+ return 0;
+
+ while (PACKET_remaining(&pkt) > 0) {
+ if (need_skip > 0) {
+ size_t adv = need_skip;
+
+ if (adv > PACKET_remaining(&pkt))
+ adv = PACKET_remaining(&pkt);
+
+ if (!PACKET_forward(&pkt, adv))
+ return 0;
+
+ need_skip -= adv;
+ continue;
+ }
+
+ QLOG_BEGIN(NULL)
+ {
+ log_frame(qlog_instance, &pkt, &need_skip);
+ }
+ QLOG_END()
+ }
+ }
+
+ return 1;
+}
+
+static void log_packet(QLOG *qlog_instance,
+ const QUIC_PKT_HDR *hdr,
+ QUIC_PN pn,
+ const OSSL_QTX_IOVEC *iovec,
+ size_t num_iovec,
+ uint64_t datagram_id)
+{
+ const char *type_s;
+
+ QLOG_BEGIN("header")
+ type_s = quic_pkt_type_to_qlog(hdr->type);
+ if (type_s == NULL)
+ type_s = "unknown";
+
+ QLOG_STR("packet_type", type_s);
+ if (ossl_quic_pkt_type_has_pn(hdr->type))
+ QLOG_U64("packet_number", pn);
+
+ QLOG_CID("dcid", &hdr->dst_conn_id);
+ if (ossl_quic_pkt_type_has_scid(hdr->type))
+ QLOG_CID("scid", &hdr->src_conn_id);
+
+ if (hdr->token_len > 0) {
+ QLOG_BEGIN("token")
+ QLOG_BEGIN("raw")
+ QLOG_BIN("data", hdr->token, hdr->token_len);
+ QLOG_END()
+ QLOG_END()
+ }
+ /* TODO(QLOG FUTURE): flags, length */
+ QLOG_END()
+ QLOG_U64("datagram_id", datagram_id);
+
+ if (ossl_quic_pkt_type_is_encrypted(hdr->type)) {
+ QLOG_BEGIN_ARRAY("frames")
+ log_frames(qlog_instance, iovec, num_iovec);
+ QLOG_END_ARRAY()
+ }
+}
+
+#endif
+
+void ossl_qlog_event_transport_packet_sent(QLOG *qlog,
+ const QUIC_PKT_HDR *hdr,
+ QUIC_PN pn,
+ const OSSL_QTX_IOVEC *iovec,
+ size_t num_iovec,
+ uint64_t datagram_id)
+{
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(qlog, transport, packet_sent)
+ log_packet(qlog, hdr, pn, iovec, num_iovec, datagram_id);
+ QLOG_EVENT_END()
+#endif
+}
+
+void ossl_qlog_event_transport_packet_received(QLOG *qlog,
+ const QUIC_PKT_HDR *hdr,
+ QUIC_PN pn,
+ const OSSL_QTX_IOVEC *iovec,
+ size_t num_iovec,
+ uint64_t datagram_id)
+{
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(qlog, transport, packet_received)
+ log_packet(qlog, hdr, pn, iovec, num_iovec, datagram_id);
+ QLOG_EVENT_END()
+#endif
+}
diff --git a/crypto/openssl/ssl/quic/quic_ackm.c b/crypto/openssl/ssl/quic/quic_ackm.c
new file mode 100644
index 000000000000..93c83a36d8fe
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_ackm.c
@@ -0,0 +1,1744 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_ackm.h"
+#include "internal/uint_set.h"
+#include "internal/common.h"
+#include <assert.h>
+
+DEFINE_LIST_OF(tx_history, OSSL_ACKM_TX_PKT);
+
+/*
+ * TX Packet History
+ * *****************
+ *
+ * The TX Packet History object tracks information about packets which have been
+ * sent for which we later expect to receive an ACK. It is essentially a simple
+ * database keeping a list of packet information structures in packet number
+ * order which can also be looked up directly by packet number.
+ *
+ * We currently only allow packets to be appended to the list (i.e. the packet
+ * numbers of the packets appended to the list must monotonically increase), as
+ * we should not currently need more general functionality such as a sorted list
+ * insert.
+ */
+struct tx_pkt_history_st {
+ /* A linked list of all our packets. */
+ OSSL_LIST(tx_history) packets;
+
+ /*
+ * Mapping from packet numbers (uint64_t) to (OSSL_ACKM_TX_PKT *)
+ *
+ * Invariant: A packet is in this map if and only if it is in the linked
+ * list.
+ */
+ LHASH_OF(OSSL_ACKM_TX_PKT) *map;
+
+ /*
+ * The lowest packet number which may currently be added to the history list
+ * (inclusive). We do not allow packet numbers to be added to the history
+ * list non-monotonically, so packet numbers must be greater than or equal
+ * to this value.
+ */
+ uint64_t watermark;
+
+ /*
+ * Packet number of the highest packet info structure we have yet appended
+ * to the list. This is usually one less than watermark, except when we have
+ * not added any packet yet.
+ */
+ uint64_t highest_sent;
+};
+
+DEFINE_LHASH_OF_EX(OSSL_ACKM_TX_PKT);
+
+static unsigned long tx_pkt_info_hash(const OSSL_ACKM_TX_PKT *pkt)
+{
+ /* Using low bits of the packet number as the hash should be enough */
+ return (unsigned long)pkt->pkt_num;
+}
+
+static int tx_pkt_info_compare(const OSSL_ACKM_TX_PKT *a,
+ const OSSL_ACKM_TX_PKT *b)
+{
+ if (a->pkt_num < b->pkt_num)
+ return -1;
+ if (a->pkt_num > b->pkt_num)
+ return 1;
+ return 0;
+}
+
+static int
+tx_pkt_history_init(struct tx_pkt_history_st *h)
+{
+ ossl_list_tx_history_init(&h->packets);
+ h->watermark = 0;
+ h->highest_sent = 0;
+
+ h->map = lh_OSSL_ACKM_TX_PKT_new(tx_pkt_info_hash, tx_pkt_info_compare);
+ if (h->map == NULL)
+ return 0;
+
+ return 1;
+}
+
+static void
+tx_pkt_history_destroy(struct tx_pkt_history_st *h)
+{
+ lh_OSSL_ACKM_TX_PKT_free(h->map);
+ h->map = NULL;
+ ossl_list_tx_history_init(&h->packets);
+}
+
+static int
+tx_pkt_history_add_actual(struct tx_pkt_history_st *h,
+ OSSL_ACKM_TX_PKT *pkt)
+{
+ OSSL_ACKM_TX_PKT *existing;
+
+ /*
+ * There should not be any existing packet with this number
+ * in our mapping.
+ */
+ existing = lh_OSSL_ACKM_TX_PKT_retrieve(h->map, pkt);
+ if (!ossl_assert(existing == NULL))
+ return 0;
+
+ /* Should not already be in a list. */
+ if (!ossl_assert(ossl_list_tx_history_next(pkt) == NULL
+ && ossl_list_tx_history_prev(pkt) == NULL))
+ return 0;
+
+ lh_OSSL_ACKM_TX_PKT_insert(h->map, pkt);
+
+ ossl_list_tx_history_insert_tail(&h->packets, pkt);
+ return 1;
+}
+
+/* Adds a packet information structure to the history list. */
+static int
+tx_pkt_history_add(struct tx_pkt_history_st *h,
+ OSSL_ACKM_TX_PKT *pkt)
+{
+ if (!ossl_assert(pkt->pkt_num >= h->watermark))
+ return 0;
+
+ if (tx_pkt_history_add_actual(h, pkt) < 1)
+ return 0;
+
+ h->watermark = pkt->pkt_num + 1;
+ h->highest_sent = pkt->pkt_num;
+ return 1;
+}
+
+/* Retrieve a packet information structure by packet number. */
+static OSSL_ACKM_TX_PKT *
+tx_pkt_history_by_pkt_num(struct tx_pkt_history_st *h, uint64_t pkt_num)
+{
+ OSSL_ACKM_TX_PKT key;
+
+ key.pkt_num = pkt_num;
+
+ return lh_OSSL_ACKM_TX_PKT_retrieve(h->map, &key);
+}
+
+/* Remove a packet information structure from the history log. */
+static int
+tx_pkt_history_remove(struct tx_pkt_history_st *h, uint64_t pkt_num)
+{
+ OSSL_ACKM_TX_PKT key, *pkt;
+ key.pkt_num = pkt_num;
+
+ pkt = tx_pkt_history_by_pkt_num(h, pkt_num);
+ if (pkt == NULL)
+ return 0;
+
+ ossl_list_tx_history_remove(&h->packets, pkt);
+ lh_OSSL_ACKM_TX_PKT_delete(h->map, &key);
+ return 1;
+}
+
+/*
+ * RX Packet Number Tracking
+ * *************************
+ *
+ * **Background.** The RX side of the ACK manager must track packets we have
+ * received for which we have to generate ACK frames. Broadly, this means we
+ * store a set of packet numbers which we have received but which we do not know
+ * for a fact that the transmitter knows we have received.
+ *
+ * This must handle various situations:
+ *
+ * 1. We receive a packet but have not sent an ACK yet, so the transmitter
+ * does not know whether we have received it or not yet.
+ *
+ * 2. We receive a packet and send an ACK which is lost. We do not
+ * immediately know that the ACK was lost and the transmitter does not know
+ * that we have received the packet.
+ *
+ * 3. We receive a packet and send an ACK which is received by the
+ * transmitter. The transmitter does not immediately respond with an ACK,
+ * or responds with an ACK which is lost. The transmitter knows that we
+ * have received the packet, but we do not know for sure that it knows,
+ * because the ACK we sent could have been lost.
+ *
+ * 4. We receive a packet and send an ACK which is received by the
+ * transmitter. The transmitter subsequently sends us an ACK which confirms
+ * its receipt of the ACK we sent, and we successfully receive that ACK, so
+ * we know that the transmitter knows, that we received the original
+ * packet.
+ *
+ * Only when we reach case (4) are we relieved of any need to track a given
+ * packet number we have received, because only in this case do we know for sure
+ * that the peer knows we have received the packet. Having reached case (4) we
+ * will never again need to generate an ACK containing the PN in question, but
+ * until we reach that point, we must keep track of the PN as not having been
+ * provably ACKed, as we may have to keep generating ACKs for the given PN not
+ * just until the transmitter receives one, but until we know that it has
+ * received one. This will be referred to herein as "provably ACKed".
+ *
+ * **Duplicate handling.** The above discusses the case where we have received a
+ * packet with a given PN but are at best unsure whether the sender knows we
+ * have received it or not. However, we must also handle the case where we have
+ * yet to receive a packet with a given PN in the first place. The reason for
+ * this is because of the requirement expressed by RFC 9000 s. 12.3:
+ *
+ * "A receiver MUST discard a newly unprotected packet unless it is certain
+ * that it has not processed another packet with the same packet number from
+ * the same packet number space."
+ *
+ * We must ensure we never process a duplicate PN. As such, each possible PN we
+ * can receive must exist in one of the following logical states:
+ *
+ * - We have never processed this PN before
+ * (so if we receive such a PN, it can be processed)
+ *
+ * - We have processed this PN but it has not yet been provably ACKed
+ * (and should therefore be in any future ACK frame generated;
+ * if we receive such a PN again, it must be ignored)
+ *
+ * - We have processed this PN and it has been provably ACKed
+ * (if we receive such a PN again, it must be ignored)
+ *
+ * However, if we were to track this state for every PN ever used in the history
+ * of a connection, the amount of state required would increase unboundedly as
+ * the connection goes on (for example, we would have to store a set of every PN
+ * ever received.)
+ *
+ * RFC 9000 s. 12.3 continues:
+ *
+ * "Endpoints that track all individual packets for the purposes of detecting
+ * duplicates are at risk of accumulating excessive state. The data required
+ * for detecting duplicates can be limited by maintaining a minimum packet
+ * number below which all packets are immediately dropped."
+ *
+ * Moreover, RFC 9000 s. 13.2.3 states that:
+ *
+ * "A receiver MUST retain an ACK Range unless it can ensure that it will not
+ * subsequently accept packets with numbers in that range. Maintaining a
+ * minimum packet number that increases as ranges are discarded is one way to
+ * achieve this with minimal state."
+ *
+ * This touches on a subtlety of the original requirement quoted above: the
+ * receiver MUST discard a packet unless it is certain that it has not processed
+ * another packet with the same PN. However, this does not forbid the receiver
+ * from also discarding some PNs even though it has not yet processed them. In
+ * other words, implementations must be conservative and err in the direction of
+ * assuming a packet is a duplicate, but it is acceptable for this to come at
+ * the cost of falsely identifying some packets as duplicates.
+ *
+ * This allows us to bound the amount of state we must keep, and we adopt the
+ * suggested strategy quoted above to do so. We define a watermark PN below
+ * which all PNs are in the same state. This watermark is only ever increased.
+ * Thus the PNs the state for which needs to be explicitly tracked is limited to
+ * only a small number of recent PNs, and all older PNs have an assumed state.
+ *
+ * Any given PN thus falls into one of the following states:
+ *
+ * - (A) The PN is above the watermark but we have not yet received it.
+ *
+ * If we receive such a PN, we should process it and record the PN as
+ * received.
+ *
+ * - (B) The PN is above the watermark and we have received it.
+ *
+ * The PN should be included in any future ACK frame we generate.
+ * If we receive such a PN again, we should ignore it.
+ *
+ * - (C) The PN is below the watermark.
+ *
+ * We do not know whether a packet with the given PN was received or
+ * not. To be safe, if we receive such a packet, it is not processed.
+ *
+ * Note that state (C) corresponds to both "we have processed this PN and it has
+ * been provably ACKed" logical state and a subset of the PNs in the "we have
+ * never processed this PN before" logical state (namely all PNs which were lost
+ * and never received, but which are not recent enough to be above the
+ * watermark). The reason we can merge these states and avoid tracking states
+ * for the PNs in this state is because the provably ACKed and never-received
+ * states are functionally identical in terms of how we need to handle them: we
+ * don't need to do anything for PNs in either of these states, so we don't have
+ * to care about PNs in this state nor do we have to care about distinguishing
+ * the two states for a given PN.
+ *
+ * Note that under this scheme provably ACKed PNs are by definition always below
+ * the watermark; therefore, it follows that when a PN becomes provably ACKed,
+ * the watermark must be immediately increased to exceed it (otherwise we would
+ * keep reporting it in future ACK frames).
+ *
+ * This is in line with RFC 9000 s. 13.2.4's suggested strategy on when
+ * to advance the watermark:
+ *
+ * "When a packet containing an ACK frame is sent, the Largest Acknowledged
+ * field in that frame can be saved. When a packet containing an ACK frame is
+ * acknowledged, the receiver can stop acknowledging packets less than or
+ * equal to the Largest Acknowledged field in the sent ACK frame."
+ *
+ * This is where our scheme's false positives arise. When a packet containing an
+ * ACK frame is itself ACK'd, PNs referenced in that ACK frame become provably
+ * acked, and the watermark is bumped accordingly. However, the Largest
+ * Acknowledged field does not imply that all lower PNs have been received,
+ * because there may be gaps expressed in the ranges of PNs expressed by that
+ * and previous ACK frames. Thus, some unreceived PNs may be moved below the
+ * watermark, and we may subsequently reject those PNs as possibly being
+ * duplicates even though we have not actually received those PNs. Since we bump
+ * the watermark when a PN becomes provably ACKed, it follows that an unreceived
+ * PN falls below the watermark (and thus becomes a false positive for the
+ * purposes of duplicate detection) when a higher-numbered PN becomes provably
+ * ACKed.
+ *
+ * Thus, when PN n becomes provably acked, any unreceived PNs in the range [0,
+ * n) will no longer be processed. Although datagrams may be reordered in the
+ * network, a PN we receive can only become provably ACKed after our own
+ * subsequently generated ACK frame is sent in a future TX packet, and then we
+ * receive another RX PN acknowledging that TX packet. This means that a given RX
+ * PN can only become provably ACKed at least 1 RTT after it is received; it is
+ * unlikely that any reordered datagrams will still be "in the network" (and not
+ * lost) by this time. If this does occur for whatever reason and a late PN is
+ * received, the packet will be discarded unprocessed and the PN is simply
+ * handled as though lost (a "written off" PN).
+ *
+ * **Data structure.** Our state for the RX handling side of the ACK manager, as
+ * discussed above, mainly comprises:
+ *
+ * a) a logical set of PNs, and
+ * b) a monotonically increasing PN counter (the watermark).
+ *
+ * For (a), we define a data structure which stores a logical set of PNs, which
+ * we use to keep track of which PNs we have received but which have not yet
+ * been provably ACKed, and thus will later need to generate an ACK frame for.
+ *
+ * The correspondence with the logical states discussed above is as follows. A
+ * PN is in state (C) if it is below the watermark; otherwise it is in state (B)
+ * if it is in the logical set of PNs, and in state (A) otherwise.
+ *
+ * Note that PNs are only removed from the PN set (when they become provably
+ * ACKed or written off) by virtue of advancement of the watermark. Removing PNs
+ * from the PN set any other way would be ambiguous as it would be
+ * indistinguishable from a PN we have not yet received and risk us processing a
+ * duplicate packet. In other words, for a given PN:
+ *
+ * - State (A) can transition to state (B) or (C)
+ * - State (B) can transition to state (C) only
+ * - State (C) is the terminal state
+ *
+ * We can query the logical set data structure for PNs which have been received
+ * but which have not been provably ACKed when we want to generate ACK frames.
+ * Since ACK frames can be lost and/or we might not know that the peer has
+ * successfully received them, we might generate multiple ACK frames covering a
+ * given PN until that PN becomes provably ACKed and we finally remove it from
+ * our set (by bumping the watermark) as no longer being our concern.
+ *
+ * The data structure used is the UINT_SET structure defined in uint_set.h,
+ * which is used as a PN set. We use the following operations of the structure:
+ *
+ * Insert Range: Used when we receive a new PN.
+ *
+ * Remove Range: Used when bumping the watermark.
+ *
+ * Query: Used to determine if a PN is in the set.
+ *
+ * **Possible duplicates.** A PN is considered a possible duplicate when either:
+ *
+ * a) its PN is already in the PN set (i.e. has already been received), or
+ * b) its PN is below the watermark (i.e. was provably ACKed or written off).
+ *
+ * A packet with a given PN is considered 'processable' when that PN is not
+ * considered a possible duplicate (see ossl_ackm_is_rx_pn_processable).
+ *
+ * **TX/RX interaction.** The watermark is bumped whenever an RX packet becomes
+ * provably ACKed. This occurs when an ACK frame is received by the TX side of
+ * the ACK manager; thus, there is necessary interaction between the TX and RX
+ * sides of the ACK manager.
+ *
+ * This is implemented as follows. When a packet is queued as sent in the TX
+ * side of the ACK manager, it may optionally have a Largest Acked value set on
+ * it. The user of the ACK manager should do this if the packet being
+ * transmitted contains an ACK frame, by setting the field to the Largest Acked
+ * field of that frame. Otherwise, this field should be set to QUIC_PN_INVALID.
+ * When a TX packet is eventually acknowledged which has this field set, it is
+ * used to update the state of the RX side of the ACK manager by bumping the
+ * watermark accordingly.
+ */
+struct rx_pkt_history_st {
+ UINT_SET set;
+
+ /*
+ * Invariant: PNs below this are not in the set.
+ * Invariant: This is monotonic and only ever increases.
+ */
+ QUIC_PN watermark;
+};
+
+static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
+ QUIC_PN watermark);
+
+static void rx_pkt_history_init(struct rx_pkt_history_st *h)
+{
+ ossl_uint_set_init(&h->set);
+ h->watermark = 0;
+}
+
+static void rx_pkt_history_destroy(struct rx_pkt_history_st *h)
+{
+ ossl_uint_set_destroy(&h->set);
+}
+
+/*
+ * Limit the number of ACK ranges we store to prevent resource consumption DoS
+ * attacks.
+ */
+#define MAX_RX_ACK_RANGES 32
+
+static void rx_pkt_history_trim_range_count(struct rx_pkt_history_st *h)
+{
+ QUIC_PN highest = QUIC_PN_INVALID;
+
+ while (ossl_list_uint_set_num(&h->set) > MAX_RX_ACK_RANGES) {
+ UINT_RANGE r = ossl_list_uint_set_head(&h->set)->range;
+
+ highest = (highest == QUIC_PN_INVALID)
+ ? r.end : ossl_quic_pn_max(highest, r.end);
+
+ ossl_uint_set_remove(&h->set, &r);
+ }
+
+ /*
+ * Bump watermark to cover all PNs we removed to avoid accidental
+ * reprocessing of packets.
+ */
+ if (highest != QUIC_PN_INVALID)
+ rx_pkt_history_bump_watermark(h, highest + 1);
+}
+
+static int rx_pkt_history_add_pn(struct rx_pkt_history_st *h,
+ QUIC_PN pn)
+{
+ UINT_RANGE r;
+
+ r.start = pn;
+ r.end = pn;
+
+ if (pn < h->watermark)
+ return 1; /* consider this a success case */
+
+ if (ossl_uint_set_insert(&h->set, &r) != 1)
+ return 0;
+
+ rx_pkt_history_trim_range_count(h);
+ return 1;
+}
+
+static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
+ QUIC_PN watermark)
+{
+ UINT_RANGE r;
+
+ if (watermark <= h->watermark)
+ return 1;
+
+ /* Remove existing PNs below the watermark. */
+ r.start = 0;
+ r.end = watermark - 1;
+ if (ossl_uint_set_remove(&h->set, &r) != 1)
+ return 0;
+
+ h->watermark = watermark;
+ return 1;
+}
+
+/*
+ * ACK Manager Implementation
+ * **************************
+ * Implementation of the ACK manager proper.
+ */
+
+/* Constants used by the ACK manager; see RFC 9002. */
+#define K_GRANULARITY (1 * OSSL_TIME_MS)
+#define K_PKT_THRESHOLD 3
+#define K_TIME_THRESHOLD_NUM 9
+#define K_TIME_THRESHOLD_DEN 8
+
+/* The maximum number of times we allow PTO to be doubled. */
+#define MAX_PTO_COUNT 16
+
+/* Default maximum amount of time to leave an ACK-eliciting packet un-ACK'd. */
+#define DEFAULT_TX_MAX_ACK_DELAY ossl_ms2time(QUIC_DEFAULT_MAX_ACK_DELAY)
+
+struct ossl_ackm_st {
+ /* Our list of transmitted packets. Corresponds to RFC 9002 sent_packets. */
+ struct tx_pkt_history_st tx_history[QUIC_PN_SPACE_NUM];
+
+ /* Our list of received PNs which are not yet provably acked. */
+ struct rx_pkt_history_st rx_history[QUIC_PN_SPACE_NUM];
+
+ /* Polymorphic dependencies that we consume. */
+ OSSL_TIME (*now)(void *arg);
+ void *now_arg;
+ OSSL_STATM *statm;
+ const OSSL_CC_METHOD *cc_method;
+ OSSL_CC_DATA *cc_data;
+
+ /* RFC 9002 variables. */
+ uint32_t pto_count;
+ QUIC_PN largest_acked_pkt[QUIC_PN_SPACE_NUM];
+ OSSL_TIME time_of_last_ack_eliciting_pkt[QUIC_PN_SPACE_NUM];
+ OSSL_TIME loss_time[QUIC_PN_SPACE_NUM];
+ OSSL_TIME loss_detection_deadline;
+
+ /* Lowest PN which is still not known to be ACKed. */
+ QUIC_PN lowest_unacked_pkt[QUIC_PN_SPACE_NUM];
+
+ /* Time at which we got our first RTT sample, or 0. */
+ OSSL_TIME first_rtt_sample;
+
+ /*
+ * A packet's num_bytes are added to this if it is inflight,
+ * and removed again once ack'd/lost/discarded.
+ */
+ uint64_t bytes_in_flight;
+
+ /*
+ * A packet's num_bytes are added to this if it is both inflight and
+ * ack-eliciting, and removed again once ack'd/lost/discarded.
+ */
+ uint64_t ack_eliciting_bytes_in_flight[QUIC_PN_SPACE_NUM];
+
+ /* Count of ECN-CE events. */
+ uint64_t peer_ecnce[QUIC_PN_SPACE_NUM];
+
+ /* Set to 1 when the handshake is confirmed. */
+ char handshake_confirmed;
+
+ /* Set to 1 when attached to server channel */
+ char is_server;
+
+ /* Set to 1 when the peer has completed address validation. */
+ char peer_completed_addr_validation;
+
+ /* Set to 1 when a PN space has been discarded. */
+ char discarded[QUIC_PN_SPACE_NUM];
+
+ /* Set to 1 when we think an ACK frame should be generated. */
+ char rx_ack_desired[QUIC_PN_SPACE_NUM];
+
+ /* Set to 1 if an ACK frame has ever been generated. */
+ char rx_ack_generated[QUIC_PN_SPACE_NUM];
+
+ /* Probe request counts for reporting to the user. */
+ OSSL_ACKM_PROBE_INFO pending_probe;
+
+ /* Generated ACK frames for each PN space. */
+ OSSL_QUIC_FRAME_ACK ack[QUIC_PN_SPACE_NUM];
+ OSSL_QUIC_ACK_RANGE ack_ranges[QUIC_PN_SPACE_NUM][MAX_RX_ACK_RANGES];
+
+ /* Other RX state. */
+ /* Largest PN we have RX'd. */
+ QUIC_PN rx_largest_pn[QUIC_PN_SPACE_NUM];
+
+ /* Time at which the PN in rx_largest_pn was RX'd. */
+ OSSL_TIME rx_largest_time[QUIC_PN_SPACE_NUM];
+
+ /*
+ * ECN event counters. Each time we receive a packet with a given ECN label,
+ * the corresponding ECN counter here is incremented.
+ */
+ uint64_t rx_ect0[QUIC_PN_SPACE_NUM];
+ uint64_t rx_ect1[QUIC_PN_SPACE_NUM];
+ uint64_t rx_ecnce[QUIC_PN_SPACE_NUM];
+
+ /*
+ * Number of ACK-eliciting packets since last ACK. We use this to defer
+ * emitting ACK frames until a threshold number of ACK-eliciting packets
+ * have been received.
+ */
+ uint32_t rx_ack_eliciting_pkts_since_last_ack[QUIC_PN_SPACE_NUM];
+
+ /*
+ * The ACK frame coalescing deadline at which we should flush any unsent ACK
+ * frames.
+ */
+ OSSL_TIME rx_ack_flush_deadline[QUIC_PN_SPACE_NUM];
+
+ /*
+ * The RX maximum ACK delay (the maximum amount of time our peer might
+ * wait to send us an ACK after receiving an ACK-eliciting packet).
+ */
+ OSSL_TIME rx_max_ack_delay;
+
+ /*
+ * The TX maximum ACK delay (the maximum amount of time we allow ourselves
+ * to wait before generating an ACK after receiving an ACK-eliciting
+ * packet).
+ */
+ OSSL_TIME tx_max_ack_delay;
+
+ /* Callbacks for deadline updates. */
+ void (*loss_detection_deadline_cb)(OSSL_TIME deadline, void *arg);
+ void *loss_detection_deadline_cb_arg;
+
+ void (*ack_deadline_cb)(OSSL_TIME deadline, int pkt_space, void *arg);
+ void *ack_deadline_cb_arg;
+};
+
+static ossl_inline uint32_t min_u32(uint32_t x, uint32_t y)
+{
+ return x < y ? x : y;
+}
+
+/*
+ * Get TX history for a given packet number space. Must not have been
+ * discarded.
+ */
+static struct tx_pkt_history_st *get_tx_history(OSSL_ACKM *ackm, int pkt_space)
+{
+ assert(!ackm->discarded[pkt_space]);
+
+ return &ackm->tx_history[pkt_space];
+}
+
+/*
+ * Get RX history for a given packet number space. Must not have been
+ * discarded.
+ */
+static struct rx_pkt_history_st *get_rx_history(OSSL_ACKM *ackm, int pkt_space)
+{
+ assert(!ackm->discarded[pkt_space]);
+
+ return &ackm->rx_history[pkt_space];
+}
+
+/* Does the newly-acknowledged list contain any ack-eliciting packet? */
+static int ack_includes_ack_eliciting(OSSL_ACKM_TX_PKT *pkt)
+{
+ for (; pkt != NULL; pkt = pkt->anext)
+ if (pkt->is_ack_eliciting)
+ return 1;
+
+ return 0;
+}
+
+/* Return number of ACK-eliciting bytes in flight across all PN spaces. */
+static uint64_t ackm_ack_eliciting_bytes_in_flight(OSSL_ACKM *ackm)
+{
+ int i;
+ uint64_t total = 0;
+
+ for (i = 0; i < QUIC_PN_SPACE_NUM; ++i)
+ total += ackm->ack_eliciting_bytes_in_flight[i];
+
+ return total;
+}
+
+/* Return 1 if the range contains the given PN. */
+static int range_contains(const OSSL_QUIC_ACK_RANGE *range, QUIC_PN pn)
+{
+ return pn >= range->start && pn <= range->end;
+}
+
+/*
+ * Given a logical representation of an ACK frame 'ack', create a singly-linked
+ * list of the newly ACK'd frames; that is, of frames which are matched by the
+ * list of PN ranges contained in the ACK frame. The packet structures in the
+ * list returned are removed from the TX history list. Returns a pointer to the
+ * list head (or NULL) if empty.
+ */
+static OSSL_ACKM_TX_PKT *ackm_detect_and_remove_newly_acked_pkts(OSSL_ACKM *ackm,
+ const OSSL_QUIC_FRAME_ACK *ack,
+ int pkt_space)
+{
+ OSSL_ACKM_TX_PKT *acked_pkts = NULL, **fixup = &acked_pkts, *pkt, *pprev;
+ struct tx_pkt_history_st *h;
+ size_t ridx = 0;
+
+ assert(ack->num_ack_ranges > 0);
+
+ /*
+ * Our history list is a list of packets sorted in ascending order
+ * by packet number.
+ *
+ * ack->ack_ranges is a list of packet number ranges in descending order.
+ *
+ * Walk through our history list from the end in order to efficiently detect
+ * membership in the specified ack ranges. As an optimization, we use our
+ * hashtable to try and skip to the first matching packet. This may fail if
+ * the ACK ranges given include nonexistent packets.
+ */
+ h = get_tx_history(ackm, pkt_space);
+
+ pkt = tx_pkt_history_by_pkt_num(h, ack->ack_ranges[0].end);
+ if (pkt == NULL)
+ pkt = ossl_list_tx_history_tail(&h->packets);
+
+ for (; pkt != NULL; pkt = pprev) {
+ /*
+ * Save prev value as it will be zeroed if we remove the packet from the
+ * history list below.
+ */
+ pprev = ossl_list_tx_history_prev(pkt);
+
+ for (;; ++ridx) {
+ if (ridx >= ack->num_ack_ranges) {
+ /*
+ * We have exhausted all ranges so stop here, even if there are
+ * more packets to look at.
+ */
+ goto stop;
+ }
+
+ if (range_contains(&ack->ack_ranges[ridx], pkt->pkt_num)) {
+ /* We have matched this range. */
+ tx_pkt_history_remove(h, pkt->pkt_num);
+
+ *fixup = pkt;
+ fixup = &pkt->anext;
+ *fixup = NULL;
+ break;
+ } else if (pkt->pkt_num > ack->ack_ranges[ridx].end) {
+ /*
+ * We have not reached this range yet in our list, so do not
+ * advance ridx.
+ */
+ break;
+ } else {
+ /*
+ * We have moved beyond this range, so advance to the next range
+ * and try matching again.
+ */
+ assert(pkt->pkt_num < ack->ack_ranges[ridx].start);
+ continue;
+ }
+ }
+ }
+stop:
+
+ return acked_pkts;
+}
+
+/*
+ * Create a singly-linked list of newly detected-lost packets in the given
+ * packet number space. Returns the head of the list or NULL if no packets were
+ * detected lost. The packets in the list are removed from the TX history list.
+ */
+static OSSL_ACKM_TX_PKT *ackm_detect_and_remove_lost_pkts(OSSL_ACKM *ackm,
+ int pkt_space)
+{
+ OSSL_ACKM_TX_PKT *lost_pkts = NULL, **fixup = &lost_pkts, *pkt, *pnext;
+ OSSL_TIME loss_delay, lost_send_time, now;
+ OSSL_RTT_INFO rtt;
+ struct tx_pkt_history_st *h;
+
+ assert(ackm->largest_acked_pkt[pkt_space] != QUIC_PN_INVALID);
+
+ ossl_statm_get_rtt_info(ackm->statm, &rtt);
+
+ ackm->loss_time[pkt_space] = ossl_time_zero();
+
+ loss_delay = ossl_time_multiply(ossl_time_max(rtt.latest_rtt,
+ rtt.smoothed_rtt),
+ K_TIME_THRESHOLD_NUM);
+ loss_delay = ossl_time_divide(loss_delay, K_TIME_THRESHOLD_DEN);
+
+ /* Minimum time of K_GRANULARITY before packets are deemed lost. */
+ loss_delay = ossl_time_max(loss_delay, ossl_ticks2time(K_GRANULARITY));
+
+ /* Packets sent before this time are deemed lost. */
+ now = ackm->now(ackm->now_arg);
+ lost_send_time = ossl_time_subtract(now, loss_delay);
+
+ h = get_tx_history(ackm, pkt_space);
+ pkt = ossl_list_tx_history_head(&h->packets);
+
+ for (; pkt != NULL; pkt = pnext) {
+ assert(pkt_space == pkt->pkt_space);
+
+ /*
+ * Save prev value as it will be zeroed if we remove the packet from the
+ * history list below.
+ */
+ pnext = ossl_list_tx_history_next(pkt);
+
+ if (pkt->pkt_num > ackm->largest_acked_pkt[pkt_space])
+ continue;
+
+ /*
+ * Mark packet as lost, or set time when it should be marked.
+ */
+ if (ossl_time_compare(pkt->time, lost_send_time) <= 0
+ || ackm->largest_acked_pkt[pkt_space]
+ >= pkt->pkt_num + K_PKT_THRESHOLD) {
+ tx_pkt_history_remove(h, pkt->pkt_num);
+
+ *fixup = pkt;
+ fixup = &pkt->lnext;
+ *fixup = NULL;
+ } else {
+ if (ossl_time_is_zero(ackm->loss_time[pkt_space]))
+ ackm->loss_time[pkt_space] =
+ ossl_time_add(pkt->time, loss_delay);
+ else
+ ackm->loss_time[pkt_space] =
+ ossl_time_min(ackm->loss_time[pkt_space],
+ ossl_time_add(pkt->time, loss_delay));
+ }
+ }
+
+ return lost_pkts;
+}
+
+static OSSL_TIME ackm_get_loss_time_and_space(OSSL_ACKM *ackm, int *pspace)
+{
+ OSSL_TIME time = ackm->loss_time[QUIC_PN_SPACE_INITIAL];
+ int i, space = QUIC_PN_SPACE_INITIAL;
+
+ for (i = space + 1; i < QUIC_PN_SPACE_NUM; ++i)
+ if (ossl_time_is_zero(time)
+ || ossl_time_compare(ackm->loss_time[i], time) == -1) {
+ time = ackm->loss_time[i];
+ space = i;
+ }
+
+ *pspace = space;
+ return time;
+}
+
+static OSSL_TIME ackm_get_pto_time_and_space(OSSL_ACKM *ackm, int *space)
+{
+ OSSL_RTT_INFO rtt;
+ OSSL_TIME duration;
+ OSSL_TIME pto_timeout = ossl_time_infinite(), t;
+ int pto_space = QUIC_PN_SPACE_INITIAL, i;
+
+ ossl_statm_get_rtt_info(ackm->statm, &rtt);
+
+ duration
+ = ossl_time_add(rtt.smoothed_rtt,
+ ossl_time_max(ossl_time_multiply(rtt.rtt_variance, 4),
+ ossl_ticks2time(K_GRANULARITY)));
+
+ duration
+ = ossl_time_multiply(duration,
+ (uint64_t)1 << min_u32(ackm->pto_count,
+ MAX_PTO_COUNT));
+
+ /* Anti-deadlock PTO starts from the current time. */
+ if (ackm_ack_eliciting_bytes_in_flight(ackm) == 0) {
+ assert(!ackm->peer_completed_addr_validation);
+
+ *space = ackm->discarded[QUIC_PN_SPACE_INITIAL]
+ ? QUIC_PN_SPACE_HANDSHAKE
+ : QUIC_PN_SPACE_INITIAL;
+ return ossl_time_add(ackm->now(ackm->now_arg), duration);
+ }
+
+ for (i = QUIC_PN_SPACE_INITIAL; i < QUIC_PN_SPACE_NUM; ++i) {
+ /*
+ * RFC 9002 section 6.2.2.1 keep probe timeout armed until
+ * handshake is confirmed (client sees HANDSHAKE_DONE message
+ * from server).
+ */
+ if (ackm->ack_eliciting_bytes_in_flight[i] == 0 &&
+ (ackm->handshake_confirmed == 1 || ackm->is_server == 1))
+ continue;
+
+ if (i == QUIC_PN_SPACE_APP) {
+ /* Skip application data until handshake confirmed. */
+ if (!ackm->handshake_confirmed)
+ break;
+
+ /* Include max_ack_delay and backoff for app data. */
+ if (!ossl_time_is_infinite(ackm->rx_max_ack_delay)) {
+ uint64_t factor
+ = (uint64_t)1 << min_u32(ackm->pto_count, MAX_PTO_COUNT);
+
+ duration
+ = ossl_time_add(duration,
+ ossl_time_multiply(ackm->rx_max_ack_delay,
+ factor));
+ }
+ }
+
+ /*
+ * Only re-arm timer if stack has sent at least one ACK eliciting frame.
+ * If stack has sent no ACK eliciting frame at given encryption level then
+ * particular timer is zero and we must not attempt to set it. Timer keeps
+ * time since epoch (Jan 1 1970) and we must not set timer to past.
+ */
+ if (!ossl_time_is_zero(ackm->time_of_last_ack_eliciting_pkt[i])) {
+ t = ossl_time_add(ackm->time_of_last_ack_eliciting_pkt[i], duration);
+ if (ossl_time_compare(t, pto_timeout) < 0) {
+ pto_timeout = t;
+ pto_space = i;
+ }
+ }
+ }
+
+ *space = pto_space;
+ return pto_timeout;
+}
+
+static void ackm_set_loss_detection_timer_actual(OSSL_ACKM *ackm,
+ OSSL_TIME deadline)
+{
+ ackm->loss_detection_deadline = deadline;
+
+ if (ackm->loss_detection_deadline_cb != NULL)
+ ackm->loss_detection_deadline_cb(deadline,
+ ackm->loss_detection_deadline_cb_arg);
+}
+
+static int ackm_set_loss_detection_timer(OSSL_ACKM *ackm)
+{
+ int space;
+ OSSL_TIME earliest_loss_time, timeout;
+
+ earliest_loss_time = ackm_get_loss_time_and_space(ackm, &space);
+ if (!ossl_time_is_zero(earliest_loss_time)) {
+ /* Time threshold loss detection. */
+ ackm_set_loss_detection_timer_actual(ackm, earliest_loss_time);
+ return 1;
+ }
+
+ if (ackm_ack_eliciting_bytes_in_flight(ackm) == 0
+ && ackm->peer_completed_addr_validation) {
+ /*
+ * Nothing to detect lost, so no timer is set. However, the client
+ * needs to arm the timer if the server might be blocked by the
+ * anti-amplification limit.
+ */
+ ackm_set_loss_detection_timer_actual(ackm, ossl_time_zero());
+ return 1;
+ }
+
+ timeout = ackm_get_pto_time_and_space(ackm, &space);
+ ackm_set_loss_detection_timer_actual(ackm, timeout);
+ return 1;
+}
+
+static int ackm_in_persistent_congestion(OSSL_ACKM *ackm,
+ const OSSL_ACKM_TX_PKT *lpkt)
+{
+ /* TODO(QUIC FUTURE): Persistent congestion not currently implemented. */
+ return 0;
+}
+
+static void ackm_on_pkts_lost(OSSL_ACKM *ackm, int pkt_space,
+ const OSSL_ACKM_TX_PKT *lpkt, int pseudo)
+{
+ const OSSL_ACKM_TX_PKT *p, *pnext;
+ OSSL_RTT_INFO rtt;
+ QUIC_PN largest_pn_lost = 0;
+ OSSL_CC_LOSS_INFO loss_info = {0};
+ uint32_t flags = 0;
+
+ for (p = lpkt; p != NULL; p = pnext) {
+ pnext = p->lnext;
+
+ if (p->is_inflight) {
+ ackm->bytes_in_flight -= p->num_bytes;
+ if (p->is_ack_eliciting)
+ ackm->ack_eliciting_bytes_in_flight[p->pkt_space]
+ -= p->num_bytes;
+
+ if (p->pkt_num > largest_pn_lost)
+ largest_pn_lost = p->pkt_num;
+
+ if (!pseudo) {
+ /*
+ * If this is pseudo-loss (e.g. during connection retry) we do not
+ * inform the CC as it is not a real loss and not reflective of
+ * network conditions.
+ */
+ loss_info.tx_time = p->time;
+ loss_info.tx_size = p->num_bytes;
+
+ ackm->cc_method->on_data_lost(ackm->cc_data, &loss_info);
+ }
+ }
+
+ p->on_lost(p->cb_arg);
+ }
+
+ /*
+ * Persistent congestion can only be considered if we have gotten at least
+ * one RTT sample.
+ */
+ ossl_statm_get_rtt_info(ackm->statm, &rtt);
+ if (!ossl_time_is_zero(ackm->first_rtt_sample)
+ && ackm_in_persistent_congestion(ackm, lpkt))
+ flags |= OSSL_CC_LOST_FLAG_PERSISTENT_CONGESTION;
+
+ ackm->cc_method->on_data_lost_finished(ackm->cc_data, flags);
+}
+
+static void ackm_on_pkts_acked(OSSL_ACKM *ackm, const OSSL_ACKM_TX_PKT *apkt)
+{
+ const OSSL_ACKM_TX_PKT *anext;
+ QUIC_PN last_pn_acked = 0;
+ OSSL_CC_ACK_INFO ainfo = {0};
+
+ for (; apkt != NULL; apkt = anext) {
+ if (apkt->is_inflight) {
+ ackm->bytes_in_flight -= apkt->num_bytes;
+ if (apkt->is_ack_eliciting)
+ ackm->ack_eliciting_bytes_in_flight[apkt->pkt_space]
+ -= apkt->num_bytes;
+
+ if (apkt->pkt_num > last_pn_acked)
+ last_pn_acked = apkt->pkt_num;
+
+ if (apkt->largest_acked != QUIC_PN_INVALID)
+ /*
+ * This can fail, but it is monotonic; worst case we try again
+ * next time.
+ */
+ rx_pkt_history_bump_watermark(get_rx_history(ackm,
+ apkt->pkt_space),
+ apkt->largest_acked + 1);
+ }
+
+ ainfo.tx_time = apkt->time;
+ ainfo.tx_size = apkt->num_bytes;
+
+ anext = apkt->anext;
+ apkt->on_acked(apkt->cb_arg); /* may free apkt */
+
+ if (apkt->is_inflight)
+ ackm->cc_method->on_data_acked(ackm->cc_data, &ainfo);
+ }
+}
+
+OSSL_ACKM *ossl_ackm_new(OSSL_TIME (*now)(void *arg),
+ void *now_arg,
+ OSSL_STATM *statm,
+ const OSSL_CC_METHOD *cc_method,
+ OSSL_CC_DATA *cc_data,
+ int is_server)
+{
+ OSSL_ACKM *ackm;
+ int i;
+
+ ackm = OPENSSL_zalloc(sizeof(OSSL_ACKM));
+ if (ackm == NULL)
+ return NULL;
+
+ for (i = 0; i < (int)OSSL_NELEM(ackm->tx_history); ++i) {
+ ackm->largest_acked_pkt[i] = QUIC_PN_INVALID;
+ ackm->rx_ack_flush_deadline[i] = ossl_time_infinite();
+ if (tx_pkt_history_init(&ackm->tx_history[i]) < 1)
+ goto err;
+ }
+
+ for (i = 0; i < (int)OSSL_NELEM(ackm->rx_history); ++i)
+ rx_pkt_history_init(&ackm->rx_history[i]);
+
+ ackm->now = now;
+ ackm->now_arg = now_arg;
+ ackm->statm = statm;
+ ackm->cc_method = cc_method;
+ ackm->cc_data = cc_data;
+ ackm->is_server = (char)is_server;
+
+ ackm->rx_max_ack_delay = ossl_ms2time(QUIC_DEFAULT_MAX_ACK_DELAY);
+ ackm->tx_max_ack_delay = DEFAULT_TX_MAX_ACK_DELAY;
+
+ return ackm;
+
+err:
+ while (--i >= 0)
+ tx_pkt_history_destroy(&ackm->tx_history[i]);
+
+ OPENSSL_free(ackm);
+ return NULL;
+}
+
+void ossl_ackm_free(OSSL_ACKM *ackm)
+{
+ size_t i;
+
+ if (ackm == NULL)
+ return;
+
+ for (i = 0; i < OSSL_NELEM(ackm->tx_history); ++i)
+ if (!ackm->discarded[i]) {
+ tx_pkt_history_destroy(&ackm->tx_history[i]);
+ rx_pkt_history_destroy(&ackm->rx_history[i]);
+ }
+
+ OPENSSL_free(ackm);
+}
+
+int ossl_ackm_on_tx_packet(OSSL_ACKM *ackm, OSSL_ACKM_TX_PKT *pkt)
+{
+ struct tx_pkt_history_st *h = get_tx_history(ackm, pkt->pkt_space);
+
+ /* Time must be set and not move backwards. */
+ if (ossl_time_is_zero(pkt->time)
+ || ossl_time_compare(ackm->time_of_last_ack_eliciting_pkt[pkt->pkt_space],
+ pkt->time) > 0)
+ return 0;
+
+ /* Must have non-zero number of bytes. */
+ if (pkt->num_bytes == 0)
+ return 0;
+
+ /* Does not make any sense for a non-in-flight packet to be ACK-eliciting. */
+ if (!pkt->is_inflight && pkt->is_ack_eliciting)
+ return 0;
+
+ if (tx_pkt_history_add(h, pkt) == 0)
+ return 0;
+
+ if (pkt->is_inflight) {
+ if (pkt->is_ack_eliciting) {
+ ackm->time_of_last_ack_eliciting_pkt[pkt->pkt_space] = pkt->time;
+ ackm->ack_eliciting_bytes_in_flight[pkt->pkt_space]
+ += pkt->num_bytes;
+ }
+
+ ackm->bytes_in_flight += pkt->num_bytes;
+ ackm_set_loss_detection_timer(ackm);
+
+ ackm->cc_method->on_data_sent(ackm->cc_data, pkt->num_bytes);
+ }
+
+ return 1;
+}
+
+int ossl_ackm_on_rx_datagram(OSSL_ACKM *ackm, size_t num_bytes)
+{
+ /* No-op on the client. */
+ return 1;
+}
+
+static void ackm_process_ecn(OSSL_ACKM *ackm, const OSSL_QUIC_FRAME_ACK *ack,
+ int pkt_space)
+{
+ struct tx_pkt_history_st *h;
+ OSSL_ACKM_TX_PKT *pkt;
+ OSSL_CC_ECN_INFO ecn_info = {0};
+
+ /*
+ * If the ECN-CE counter reported by the peer has increased, this could
+ * be a new congestion event.
+ */
+ if (ack->ecnce > ackm->peer_ecnce[pkt_space]) {
+ ackm->peer_ecnce[pkt_space] = ack->ecnce;
+
+ h = get_tx_history(ackm, pkt_space);
+ pkt = tx_pkt_history_by_pkt_num(h, ack->ack_ranges[0].end);
+ if (pkt == NULL)
+ return;
+
+ ecn_info.largest_acked_time = pkt->time;
+ ackm->cc_method->on_ecn(ackm->cc_data, &ecn_info);
+ }
+}
+
+int ossl_ackm_on_rx_ack_frame(OSSL_ACKM *ackm, const OSSL_QUIC_FRAME_ACK *ack,
+ int pkt_space, OSSL_TIME rx_time)
+{
+ OSSL_ACKM_TX_PKT *na_pkts, *lost_pkts;
+ int must_set_timer = 0;
+
+ if (ackm->largest_acked_pkt[pkt_space] == QUIC_PN_INVALID)
+ ackm->largest_acked_pkt[pkt_space] = ack->ack_ranges[0].end;
+ else
+ ackm->largest_acked_pkt[pkt_space]
+ = ossl_quic_pn_max(ackm->largest_acked_pkt[pkt_space],
+ ack->ack_ranges[0].end);
+
+ /*
+ * If we get an ACK in the handshake space, address validation is completed.
+ * Make sure we update the timer, even if no packets were ACK'd.
+ */
+ if (!ackm->peer_completed_addr_validation
+ && pkt_space == QUIC_PN_SPACE_HANDSHAKE) {
+ ackm->peer_completed_addr_validation = 1;
+ must_set_timer = 1;
+ }
+
+ /*
+ * Find packets that are newly acknowledged and remove them from the list.
+ */
+ na_pkts = ackm_detect_and_remove_newly_acked_pkts(ackm, ack, pkt_space);
+ if (na_pkts == NULL) {
+ if (must_set_timer)
+ ackm_set_loss_detection_timer(ackm);
+
+ return 1;
+ }
+
+ /*
+ * Update the RTT if the largest acknowledged is newly acked and at least
+ * one ACK-eliciting packet was newly acked.
+ *
+ * First packet in the list is always the one with the largest PN.
+ */
+ if (na_pkts->pkt_num == ack->ack_ranges[0].end &&
+ ack_includes_ack_eliciting(na_pkts)) {
+ OSSL_TIME now = ackm->now(ackm->now_arg), ack_delay;
+ if (ossl_time_is_zero(ackm->first_rtt_sample))
+ ackm->first_rtt_sample = now;
+
+ /* Enforce maximum ACK delay. */
+ ack_delay = ack->delay_time;
+ if (ackm->handshake_confirmed)
+ ack_delay = ossl_time_min(ack_delay, ackm->rx_max_ack_delay);
+
+ ossl_statm_update_rtt(ackm->statm, ack_delay,
+ ossl_time_subtract(now, na_pkts->time));
+ }
+
+ /*
+ * Process ECN information if present.
+ *
+ * We deliberately do most ECN processing in the ACKM rather than the
+ * congestion controller to avoid having to give the congestion controller
+ * access to ACKM internal state.
+ */
+ if (ack->ecn_present)
+ ackm_process_ecn(ackm, ack, pkt_space);
+
+ /* Handle inferred loss. */
+ lost_pkts = ackm_detect_and_remove_lost_pkts(ackm, pkt_space);
+ if (lost_pkts != NULL)
+ ackm_on_pkts_lost(ackm, pkt_space, lost_pkts, /*pseudo=*/0);
+
+ ackm_on_pkts_acked(ackm, na_pkts);
+
+ /*
+ * Reset pto_count unless the client is unsure if the server validated the
+ * client's address.
+ */
+ if (ackm->peer_completed_addr_validation)
+ ackm->pto_count = 0;
+
+ ackm_set_loss_detection_timer(ackm);
+ return 1;
+}
+
+int ossl_ackm_on_pkt_space_discarded(OSSL_ACKM *ackm, int pkt_space)
+{
+ OSSL_ACKM_TX_PKT *pkt, *pnext;
+ uint64_t num_bytes_invalidated = 0;
+
+ if (ackm->discarded[pkt_space])
+ return 0;
+
+ if (pkt_space == QUIC_PN_SPACE_HANDSHAKE)
+ ackm->peer_completed_addr_validation = 1;
+
+ for (pkt = ossl_list_tx_history_head(&get_tx_history(ackm, pkt_space)->packets);
+ pkt != NULL; pkt = pnext) {
+ pnext = ossl_list_tx_history_next(pkt);
+ if (pkt->is_inflight) {
+ ackm->bytes_in_flight -= pkt->num_bytes;
+ num_bytes_invalidated += pkt->num_bytes;
+ }
+
+ pkt->on_discarded(pkt->cb_arg); /* may free pkt */
+ }
+
+ tx_pkt_history_destroy(&ackm->tx_history[pkt_space]);
+ rx_pkt_history_destroy(&ackm->rx_history[pkt_space]);
+
+ if (num_bytes_invalidated > 0)
+ ackm->cc_method->on_data_invalidated(ackm->cc_data,
+ num_bytes_invalidated);
+
+ ackm->time_of_last_ack_eliciting_pkt[pkt_space] = ossl_time_zero();
+ ackm->loss_time[pkt_space] = ossl_time_zero();
+ ackm->pto_count = 0;
+ ackm->discarded[pkt_space] = 1;
+ ackm->ack_eliciting_bytes_in_flight[pkt_space] = 0;
+ ackm_set_loss_detection_timer(ackm);
+ return 1;
+}
+
+int ossl_ackm_on_handshake_confirmed(OSSL_ACKM *ackm)
+{
+ ackm->handshake_confirmed = 1;
+ ackm->peer_completed_addr_validation = 1;
+ ackm_set_loss_detection_timer(ackm);
+ return 1;
+}
+
+static void ackm_queue_probe_anti_deadlock_handshake(OSSL_ACKM *ackm)
+{
+ ++ackm->pending_probe.anti_deadlock_handshake;
+}
+
+static void ackm_queue_probe_anti_deadlock_initial(OSSL_ACKM *ackm)
+{
+ ++ackm->pending_probe.anti_deadlock_initial;
+}
+
+static void ackm_queue_probe(OSSL_ACKM *ackm, int pkt_space)
+{
+ /*
+ * TODO(QUIC FUTURE): We are allowed to send either one or two probe
+ * packets here.
+ * Determine a strategy for when we should send two probe packets.
+ */
+ ++ackm->pending_probe.pto[pkt_space];
+}
+
+int ossl_ackm_on_timeout(OSSL_ACKM *ackm)
+{
+ int pkt_space;
+ OSSL_TIME earliest_loss_time;
+ OSSL_ACKM_TX_PKT *lost_pkts;
+
+ earliest_loss_time = ackm_get_loss_time_and_space(ackm, &pkt_space);
+ if (!ossl_time_is_zero(earliest_loss_time)) {
+ /* Time threshold loss detection. */
+ lost_pkts = ackm_detect_and_remove_lost_pkts(ackm, pkt_space);
+ if (lost_pkts != NULL)
+ ackm_on_pkts_lost(ackm, pkt_space, lost_pkts, /*pseudo=*/0);
+ ackm_set_loss_detection_timer(ackm);
+ return 1;
+ }
+
+ if (ackm_ack_eliciting_bytes_in_flight(ackm) == 0) {
+ assert(!ackm->peer_completed_addr_validation);
+ /*
+ * Client sends an anti-deadlock packet: Initial is padded to earn more
+ * anti-amplification credit. A handshake packet proves address
+ * ownership.
+ */
+ if (ackm->discarded[QUIC_PN_SPACE_INITIAL])
+ ackm_queue_probe_anti_deadlock_handshake(ackm);
+ else
+ ackm_queue_probe_anti_deadlock_initial(ackm);
+ } else {
+ /*
+ * PTO. The user of the ACKM should send new data if available, else
+ * retransmit old data, or if neither is available, send a single PING
+ * frame.
+ */
+ ackm_get_pto_time_and_space(ackm, &pkt_space);
+ ackm_queue_probe(ackm, pkt_space);
+ }
+
+ ++ackm->pto_count;
+ ackm_set_loss_detection_timer(ackm);
+ return 1;
+}
+
+OSSL_TIME ossl_ackm_get_loss_detection_deadline(OSSL_ACKM *ackm)
+{
+ return ackm->loss_detection_deadline;
+}
+
+OSSL_ACKM_PROBE_INFO *ossl_ackm_get0_probe_request(OSSL_ACKM *ackm)
+{
+ return &ackm->pending_probe;
+}
+
+int ossl_ackm_get_largest_unacked(OSSL_ACKM *ackm, int pkt_space, QUIC_PN *pn)
+{
+ struct tx_pkt_history_st *h;
+ OSSL_ACKM_TX_PKT *p;
+
+ h = get_tx_history(ackm, pkt_space);
+ p = ossl_list_tx_history_tail(&h->packets);
+ if (p != NULL) {
+ *pn = p->pkt_num;
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Number of ACK-eliciting packets RX'd before we always emit an ACK. */
+#define PKTS_BEFORE_ACK 2
+
+/*
+ * Return 1 if emission of an ACK frame is currently desired.
+ *
+ * This occurs when one or more of the following conditions occurs:
+ *
+ * - We have flagged that we want to send an ACK frame
+ * (for example, due to the packet threshold count being exceeded), or
+ *
+ * - We have exceeded the ACK flush deadline, meaning that
+ * we have received at least one ACK-eliciting packet, but held off on
+ * sending an ACK frame immediately in the hope that more ACK-eliciting
+ * packets might come in, but not enough did and we are now requesting
+ * transmission of an ACK frame anyway.
+ *
+ */
+int ossl_ackm_is_ack_desired(OSSL_ACKM *ackm, int pkt_space)
+{
+ return ackm->rx_ack_desired[pkt_space]
+ || (!ossl_time_is_infinite(ackm->rx_ack_flush_deadline[pkt_space])
+ && ossl_time_compare(ackm->now(ackm->now_arg),
+ ackm->rx_ack_flush_deadline[pkt_space]) >= 0);
+}
+
+/*
+ * Returns 1 if an ACK frame matches a given packet number.
+ */
+static int ack_contains(const OSSL_QUIC_FRAME_ACK *ack, QUIC_PN pkt_num)
+{
+ size_t i;
+
+ for (i = 0; i < ack->num_ack_ranges; ++i)
+ if (range_contains(&ack->ack_ranges[i], pkt_num))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Returns 1 iff a PN (which we have just received) was previously reported as
+ * implied missing (by us, in an ACK frame we previously generated).
+ */
+static int ackm_is_missing(OSSL_ACKM *ackm, int pkt_space, QUIC_PN pkt_num)
+{
+ /*
+ * A PN is implied missing if it is not greater than the highest PN in our
+ * generated ACK frame, but is not matched by the frame.
+ */
+ return ackm->ack[pkt_space].num_ack_ranges > 0
+ && pkt_num <= ackm->ack[pkt_space].ack_ranges[0].end
+ && !ack_contains(&ackm->ack[pkt_space], pkt_num);
+}
+
+/*
+ * Returns 1 iff our RX of a PN newly establishes the implication of missing
+ * packets.
+ */
+static int ackm_has_newly_missing(OSSL_ACKM *ackm, int pkt_space)
+{
+ struct rx_pkt_history_st *h;
+
+ h = get_rx_history(ackm, pkt_space);
+
+ if (ossl_list_uint_set_is_empty(&h->set))
+ return 0;
+
+ /*
+ * The second condition here establishes that the highest PN range in our RX
+ * history comprises only a single PN. If there is more than one, then this
+ * function will have returned 1 during a previous call to
+ * ossl_ackm_on_rx_packet assuming the third condition below was met. Thus
+ * we only return 1 when the missing PN condition is newly established.
+ *
+ * The third condition here establishes that the highest PN range in our RX
+ * history is beyond (and does not border) the highest PN we have yet
+ * reported in any ACK frame. Thus there is a gap of at least one PN between
+ * the PNs we have ACK'd previously and the PN we have just received.
+ */
+ return ackm->ack[pkt_space].num_ack_ranges > 0
+ && ossl_list_uint_set_tail(&h->set)->range.start
+ == ossl_list_uint_set_tail(&h->set)->range.end
+ && ossl_list_uint_set_tail(&h->set)->range.start
+ > ackm->ack[pkt_space].ack_ranges[0].end + 1;
+}
+
+static void ackm_set_flush_deadline(OSSL_ACKM *ackm, int pkt_space,
+ OSSL_TIME deadline)
+{
+ ackm->rx_ack_flush_deadline[pkt_space] = deadline;
+
+ if (ackm->ack_deadline_cb != NULL)
+ ackm->ack_deadline_cb(ossl_ackm_get_ack_deadline(ackm, pkt_space),
+ pkt_space, ackm->ack_deadline_cb_arg);
+}
+
+/* Explicitly flags that we want to generate an ACK frame. */
+static void ackm_queue_ack(OSSL_ACKM *ackm, int pkt_space)
+{
+ ackm->rx_ack_desired[pkt_space] = 1;
+
+ /* Cancel deadline. */
+ ackm_set_flush_deadline(ackm, pkt_space, ossl_time_infinite());
+}
+
+static void ackm_on_rx_ack_eliciting(OSSL_ACKM *ackm,
+ OSSL_TIME rx_time, int pkt_space,
+ int was_missing)
+{
+ OSSL_TIME tx_max_ack_delay;
+
+ if (ackm->rx_ack_desired[pkt_space])
+ /* ACK generation already requested so nothing to do. */
+ return;
+
+ ++ackm->rx_ack_eliciting_pkts_since_last_ack[pkt_space];
+
+ if (!ackm->rx_ack_generated[pkt_space]
+ || was_missing
+ || ackm->rx_ack_eliciting_pkts_since_last_ack[pkt_space]
+ >= PKTS_BEFORE_ACK
+ || ackm_has_newly_missing(ackm, pkt_space)) {
+ /*
+ * Either:
+ *
+ * - We have never yet generated an ACK frame, meaning that this
+ * is the first ever packet received, which we should always
+ * acknowledge immediately, or
+ *
+ * - We previously reported the PN that we have just received as
+ * missing in a previous ACK frame (meaning that we should report
+ * the fact that we now have it to the peer immediately), or
+ *
+ * - We have exceeded the ACK-eliciting packet threshold count
+ * for the purposes of ACK coalescing, so request transmission
+ * of an ACK frame, or
+ *
+ * - The PN we just received and added to our PN RX history
+ * newly implies one or more missing PNs, in which case we should
+ * inform the peer by sending an ACK frame immediately.
+ *
+ * We do not test the ACK flush deadline here because it is tested
+ * separately in ossl_ackm_is_ack_desired.
+ */
+ ackm_queue_ack(ackm, pkt_space);
+ return;
+ }
+
+ /*
+ * Not emitting an ACK yet.
+ *
+ * Update the ACK flush deadline.
+ *
+ * RFC 9000 s. 13.2.1: "An endpoint MUST acknowledge all ack-eliciting
+ * Initial and Handshake packets immediately"; don't delay ACK generation if
+ * we are using the Initial or Handshake PN spaces.
+ */
+ tx_max_ack_delay = ackm->tx_max_ack_delay;
+ if (pkt_space == QUIC_PN_SPACE_INITIAL
+ || pkt_space == QUIC_PN_SPACE_HANDSHAKE)
+ tx_max_ack_delay = ossl_time_zero();
+
+ if (ossl_time_is_infinite(ackm->rx_ack_flush_deadline[pkt_space]))
+ ackm_set_flush_deadline(ackm, pkt_space,
+ ossl_time_add(rx_time, tx_max_ack_delay));
+ else
+ ackm_set_flush_deadline(ackm, pkt_space,
+ ossl_time_min(ackm->rx_ack_flush_deadline[pkt_space],
+ ossl_time_add(rx_time,
+ tx_max_ack_delay)));
+}
+
+int ossl_ackm_on_rx_packet(OSSL_ACKM *ackm, const OSSL_ACKM_RX_PKT *pkt)
+{
+ struct rx_pkt_history_st *h = get_rx_history(ackm, pkt->pkt_space);
+ int was_missing;
+
+ if (ossl_ackm_is_rx_pn_processable(ackm, pkt->pkt_num, pkt->pkt_space) != 1)
+ /* PN has already been processed or written off, no-op. */
+ return 1;
+
+ /*
+ * Record the largest PN we have RX'd and the time we received it.
+ * We use this to calculate the ACK delay field of ACK frames.
+ */
+ if (pkt->pkt_num > ackm->rx_largest_pn[pkt->pkt_space]) {
+ ackm->rx_largest_pn[pkt->pkt_space] = pkt->pkt_num;
+ ackm->rx_largest_time[pkt->pkt_space] = pkt->time;
+ }
+
+ /*
+ * If the PN we just received was previously implied missing by virtue of
+ * being omitted from a previous ACK frame generated, we skip any packet
+ * count thresholds or coalescing delays and emit a new ACK frame
+ * immediately.
+ */
+ was_missing = ackm_is_missing(ackm, pkt->pkt_space, pkt->pkt_num);
+
+ /*
+ * Add the packet number to our history list of PNs we have not yet provably
+ * acked.
+ */
+ if (rx_pkt_history_add_pn(h, pkt->pkt_num) != 1)
+ return 0;
+
+ /*
+ * Receiving this packet may or may not cause us to emit an ACK frame.
+ * We may not emit an ACK frame yet if we have not yet received a threshold
+ * number of packets.
+ */
+ if (pkt->is_ack_eliciting)
+ ackm_on_rx_ack_eliciting(ackm, pkt->time, pkt->pkt_space, was_missing);
+
+ /* Update the ECN counters according to which ECN signal we got, if any. */
+ switch (pkt->ecn) {
+ case OSSL_ACKM_ECN_ECT0:
+ ++ackm->rx_ect0[pkt->pkt_space];
+ break;
+ case OSSL_ACKM_ECN_ECT1:
+ ++ackm->rx_ect1[pkt->pkt_space];
+ break;
+ case OSSL_ACKM_ECN_ECNCE:
+ ++ackm->rx_ecnce[pkt->pkt_space];
+ break;
+ default:
+ break;
+ }
+
+ return 1;
+}
+
+static void ackm_fill_rx_ack_ranges(OSSL_ACKM *ackm, int pkt_space,
+ OSSL_QUIC_FRAME_ACK *ack)
+{
+ struct rx_pkt_history_st *h = get_rx_history(ackm, pkt_space);
+ UINT_SET_ITEM *x;
+ size_t i = 0;
+
+ /*
+ * Copy out ranges from the PN set, starting at the end, until we reach our
+ * maximum number of ranges.
+ */
+ for (x = ossl_list_uint_set_tail(&h->set);
+ x != NULL && i < OSSL_NELEM(ackm->ack_ranges);
+ x = ossl_list_uint_set_prev(x), ++i) {
+ ackm->ack_ranges[pkt_space][i].start = x->range.start;
+ ackm->ack_ranges[pkt_space][i].end = x->range.end;
+ }
+
+ ack->ack_ranges = ackm->ack_ranges[pkt_space];
+ ack->num_ack_ranges = i;
+}
+
+const OSSL_QUIC_FRAME_ACK *ossl_ackm_get_ack_frame(OSSL_ACKM *ackm,
+ int pkt_space)
+{
+ OSSL_QUIC_FRAME_ACK *ack = &ackm->ack[pkt_space];
+ OSSL_TIME now = ackm->now(ackm->now_arg);
+
+ ackm_fill_rx_ack_ranges(ackm, pkt_space, ack);
+
+ if (!ossl_time_is_zero(ackm->rx_largest_time[pkt_space])
+ && ossl_time_compare(now, ackm->rx_largest_time[pkt_space]) > 0
+ && pkt_space == QUIC_PN_SPACE_APP)
+ ack->delay_time =
+ ossl_time_subtract(now, ackm->rx_largest_time[pkt_space]);
+ else
+ ack->delay_time = ossl_time_zero();
+
+ ack->ect0 = ackm->rx_ect0[pkt_space];
+ ack->ect1 = ackm->rx_ect1[pkt_space];
+ ack->ecnce = ackm->rx_ecnce[pkt_space];
+ ack->ecn_present = 1;
+
+ ackm->rx_ack_eliciting_pkts_since_last_ack[pkt_space] = 0;
+
+ ackm->rx_ack_generated[pkt_space] = 1;
+ ackm->rx_ack_desired[pkt_space] = 0;
+ ackm_set_flush_deadline(ackm, pkt_space, ossl_time_infinite());
+ return ack;
+}
+
+
+OSSL_TIME ossl_ackm_get_ack_deadline(OSSL_ACKM *ackm, int pkt_space)
+{
+ if (ackm->rx_ack_desired[pkt_space])
+ /* Already desired, deadline is now. */
+ return ossl_time_zero();
+
+ return ackm->rx_ack_flush_deadline[pkt_space];
+}
+
+int ossl_ackm_is_rx_pn_processable(OSSL_ACKM *ackm, QUIC_PN pn, int pkt_space)
+{
+ struct rx_pkt_history_st *h = get_rx_history(ackm, pkt_space);
+
+ return pn >= h->watermark && ossl_uint_set_query(&h->set, pn) == 0;
+}
+
+void ossl_ackm_set_loss_detection_deadline_callback(OSSL_ACKM *ackm,
+ void (*fn)(OSSL_TIME deadline,
+ void *arg),
+ void *arg)
+{
+ ackm->loss_detection_deadline_cb = fn;
+ ackm->loss_detection_deadline_cb_arg = arg;
+}
+
+void ossl_ackm_set_ack_deadline_callback(OSSL_ACKM *ackm,
+ void (*fn)(OSSL_TIME deadline,
+ int pkt_space,
+ void *arg),
+ void *arg)
+{
+ ackm->ack_deadline_cb = fn;
+ ackm->ack_deadline_cb_arg = arg;
+}
+
+int ossl_ackm_mark_packet_pseudo_lost(OSSL_ACKM *ackm,
+ int pkt_space, QUIC_PN pn)
+{
+ struct tx_pkt_history_st *h = get_tx_history(ackm, pkt_space);
+ OSSL_ACKM_TX_PKT *pkt;
+
+ pkt = tx_pkt_history_by_pkt_num(h, pn);
+ if (pkt == NULL)
+ return 0;
+
+ tx_pkt_history_remove(h, pkt->pkt_num);
+ pkt->lnext = NULL;
+ ackm_on_pkts_lost(ackm, pkt_space, pkt, /*pseudo=*/1);
+ return 1;
+}
+
+OSSL_TIME ossl_ackm_get_pto_duration(OSSL_ACKM *ackm)
+{
+ OSSL_TIME duration;
+ OSSL_RTT_INFO rtt;
+
+ ossl_statm_get_rtt_info(ackm->statm, &rtt);
+
+ duration = ossl_time_add(rtt.smoothed_rtt,
+ ossl_time_max(ossl_time_multiply(rtt.rtt_variance, 4),
+ ossl_ticks2time(K_GRANULARITY)));
+ if (!ossl_time_is_infinite(ackm->rx_max_ack_delay))
+ duration = ossl_time_add(duration, ackm->rx_max_ack_delay);
+
+ return duration;
+}
+
+QUIC_PN ossl_ackm_get_largest_acked(OSSL_ACKM *ackm, int pkt_space)
+{
+ return ackm->largest_acked_pkt[pkt_space];
+}
+
+void ossl_ackm_set_rx_max_ack_delay(OSSL_ACKM *ackm, OSSL_TIME rx_max_ack_delay)
+{
+ ackm->rx_max_ack_delay = rx_max_ack_delay;
+}
+
+void ossl_ackm_set_tx_max_ack_delay(OSSL_ACKM *ackm, OSSL_TIME tx_max_ack_delay)
+{
+ ackm->tx_max_ack_delay = tx_max_ack_delay;
+}
diff --git a/crypto/openssl/ssl/quic/quic_cfq.c b/crypto/openssl/ssl/quic/quic_cfq.c
new file mode 100644
index 000000000000..fb96e0c68cc1
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_cfq.c
@@ -0,0 +1,363 @@
+/*
+ * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_cfq.h"
+#include "internal/numbers.h"
+
+typedef struct quic_cfq_item_ex_st QUIC_CFQ_ITEM_EX;
+
+struct quic_cfq_item_ex_st {
+ QUIC_CFQ_ITEM public;
+ QUIC_CFQ_ITEM_EX *prev, *next;
+ unsigned char *encoded;
+ cfq_free_cb *free_cb;
+ void *free_cb_arg;
+ uint64_t frame_type;
+ size_t encoded_len;
+ uint32_t priority, pn_space, flags;
+ int state;
+};
+
+uint64_t ossl_quic_cfq_item_get_frame_type(const QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ return ex->frame_type;
+}
+
+const unsigned char *ossl_quic_cfq_item_get_encoded(const QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ return ex->encoded;
+}
+
+size_t ossl_quic_cfq_item_get_encoded_len(const QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ return ex->encoded_len;
+}
+
+int ossl_quic_cfq_item_get_state(const QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ return ex->state;
+}
+
+uint32_t ossl_quic_cfq_item_get_pn_space(const QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ return ex->pn_space;
+}
+
+int ossl_quic_cfq_item_is_unreliable(const QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ return (ex->flags & QUIC_CFQ_ITEM_FLAG_UNRELIABLE) != 0;
+}
+
+typedef struct quic_cfq_item_list_st {
+ QUIC_CFQ_ITEM_EX *head, *tail;
+} QUIC_CFQ_ITEM_LIST;
+
+struct quic_cfq_st {
+ /*
+ * Invariant: A CFQ item is always in exactly one of these lists, never more
+ * or less than one.
+ *
+ * Invariant: The list the CFQ item is determined exactly by the state field
+ * of the item.
+ */
+ QUIC_CFQ_ITEM_LIST new_list, tx_list, free_list;
+};
+
+static int compare(const QUIC_CFQ_ITEM_EX *a, const QUIC_CFQ_ITEM_EX *b)
+{
+ if (a->pn_space < b->pn_space)
+ return -1;
+ else if (a->pn_space > b->pn_space)
+ return 1;
+
+ if (a->priority > b->priority)
+ return -1;
+ else if (a->priority < b->priority)
+ return 1;
+
+ return 0;
+}
+
+static void list_remove(QUIC_CFQ_ITEM_LIST *l, QUIC_CFQ_ITEM_EX *n)
+{
+ if (l->head == n)
+ l->head = n->next;
+ if (l->tail == n)
+ l->tail = n->prev;
+ if (n->prev != NULL)
+ n->prev->next = n->next;
+ if (n->next != NULL)
+ n->next->prev = n->prev;
+ n->prev = n->next = NULL;
+}
+
+static void list_insert_head(QUIC_CFQ_ITEM_LIST *l, QUIC_CFQ_ITEM_EX *n)
+{
+ n->next = l->head;
+ n->prev = NULL;
+ l->head = n;
+ if (n->next != NULL)
+ n->next->prev = n;
+ if (l->tail == NULL)
+ l->tail = n;
+}
+
+static void list_insert_tail(QUIC_CFQ_ITEM_LIST *l, QUIC_CFQ_ITEM_EX *n)
+{
+ n->prev = l->tail;
+ n->next = NULL;
+ l->tail = n;
+ if (n->prev != NULL)
+ n->prev->next = n;
+ if (l->head == NULL)
+ l->head = n;
+}
+
+static void list_insert_after(QUIC_CFQ_ITEM_LIST *l,
+ QUIC_CFQ_ITEM_EX *ref,
+ QUIC_CFQ_ITEM_EX *n)
+{
+ n->prev = ref;
+ n->next = ref->next;
+ if (ref->next != NULL)
+ ref->next->prev = n;
+ ref->next = n;
+ if (l->tail == ref)
+ l->tail = n;
+}
+
+static void list_insert_sorted(QUIC_CFQ_ITEM_LIST *l, QUIC_CFQ_ITEM_EX *n,
+ int (*cmp)(const QUIC_CFQ_ITEM_EX *a,
+ const QUIC_CFQ_ITEM_EX *b))
+{
+ QUIC_CFQ_ITEM_EX *p = l->head, *pprev = NULL;
+
+ if (p == NULL) {
+ l->head = l->tail = n;
+ n->prev = n->next = NULL;
+ return;
+ }
+
+ for (; p != NULL && cmp(p, n) < 0; pprev = p, p = p->next);
+
+ if (p == NULL)
+ list_insert_tail(l, n);
+ else if (pprev == NULL)
+ list_insert_head(l, n);
+ else
+ list_insert_after(l, pprev, n);
+}
+
+QUIC_CFQ *ossl_quic_cfq_new(void)
+{
+ QUIC_CFQ *cfq = OPENSSL_zalloc(sizeof(*cfq));
+
+ if (cfq == NULL)
+ return NULL;
+
+ return cfq;
+}
+
+static void clear_item(QUIC_CFQ_ITEM_EX *item)
+{
+ if (item->free_cb != NULL) {
+ item->free_cb(item->encoded, item->encoded_len, item->free_cb_arg);
+
+ item->free_cb = NULL;
+ item->encoded = NULL;
+ item->encoded_len = 0;
+ }
+
+ item->state = -1;
+}
+
+static void free_list_items(QUIC_CFQ_ITEM_LIST *l)
+{
+ QUIC_CFQ_ITEM_EX *p, *pnext;
+
+ for (p = l->head; p != NULL; p = pnext) {
+ pnext = p->next;
+ clear_item(p);
+ OPENSSL_free(p);
+ }
+}
+
+void ossl_quic_cfq_free(QUIC_CFQ *cfq)
+{
+ if (cfq == NULL)
+ return;
+
+ free_list_items(&cfq->new_list);
+ free_list_items(&cfq->tx_list);
+ free_list_items(&cfq->free_list);
+ OPENSSL_free(cfq);
+}
+
+static QUIC_CFQ_ITEM_EX *cfq_get_free(QUIC_CFQ *cfq)
+{
+ QUIC_CFQ_ITEM_EX *item = cfq->free_list.head;
+
+ if (item != NULL)
+ return item;
+
+ item = OPENSSL_zalloc(sizeof(*item));
+ if (item == NULL)
+ return NULL;
+
+ item->state = -1;
+ list_insert_tail(&cfq->free_list, item);
+ return item;
+}
+
+QUIC_CFQ_ITEM *ossl_quic_cfq_add_frame(QUIC_CFQ *cfq,
+ uint32_t priority,
+ uint32_t pn_space,
+ uint64_t frame_type,
+ uint32_t flags,
+ const unsigned char *encoded,
+ size_t encoded_len,
+ cfq_free_cb *free_cb,
+ void *free_cb_arg)
+{
+ QUIC_CFQ_ITEM_EX *item = cfq_get_free(cfq);
+
+ if (item == NULL)
+ return NULL;
+
+ item->priority = priority;
+ item->frame_type = frame_type;
+ item->pn_space = pn_space;
+ item->encoded = (unsigned char *)encoded;
+ item->encoded_len = encoded_len;
+ item->free_cb = free_cb;
+ item->free_cb_arg = free_cb_arg;
+
+ item->state = QUIC_CFQ_STATE_NEW;
+ item->flags = flags;
+ list_remove(&cfq->free_list, item);
+ list_insert_sorted(&cfq->new_list, item, compare);
+ return &item->public;
+}
+
+void ossl_quic_cfq_mark_tx(QUIC_CFQ *cfq, QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ switch (ex->state) {
+ case QUIC_CFQ_STATE_NEW:
+ list_remove(&cfq->new_list, ex);
+ list_insert_tail(&cfq->tx_list, ex);
+ ex->state = QUIC_CFQ_STATE_TX;
+ break;
+ case QUIC_CFQ_STATE_TX:
+ break; /* nothing to do */
+ default:
+ assert(0); /* invalid state (e.g. in free state) */
+ break;
+ }
+}
+
+void ossl_quic_cfq_mark_lost(QUIC_CFQ *cfq, QUIC_CFQ_ITEM *item,
+ uint32_t priority)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ if (ossl_quic_cfq_item_is_unreliable(item)) {
+ ossl_quic_cfq_release(cfq, item);
+ return;
+ }
+
+ switch (ex->state) {
+ case QUIC_CFQ_STATE_NEW:
+ if (priority != UINT32_MAX && priority != ex->priority) {
+ list_remove(&cfq->new_list, ex);
+ ex->priority = priority;
+ list_insert_sorted(&cfq->new_list, ex, compare);
+ }
+ break; /* nothing to do */
+ case QUIC_CFQ_STATE_TX:
+ if (priority != UINT32_MAX)
+ ex->priority = priority;
+ list_remove(&cfq->tx_list, ex);
+ list_insert_sorted(&cfq->new_list, ex, compare);
+ ex->state = QUIC_CFQ_STATE_NEW;
+ break;
+ default:
+ assert(0); /* invalid state (e.g. in free state) */
+ break;
+ }
+}
+
+/*
+ * Releases a CFQ item. The item may be in either state (NEW or TX) prior to the
+ * call. The QUIC_CFQ_ITEM pointer must not be used following this call.
+ */
+void ossl_quic_cfq_release(QUIC_CFQ *cfq, QUIC_CFQ_ITEM *item)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ switch (ex->state) {
+ case QUIC_CFQ_STATE_NEW:
+ list_remove(&cfq->new_list, ex);
+ list_insert_tail(&cfq->free_list, ex);
+ clear_item(ex);
+ break;
+ case QUIC_CFQ_STATE_TX:
+ list_remove(&cfq->tx_list, ex);
+ list_insert_tail(&cfq->free_list, ex);
+ clear_item(ex);
+ break;
+ default:
+ assert(0); /* invalid state (e.g. in free state) */
+ break;
+ }
+}
+
+QUIC_CFQ_ITEM *ossl_quic_cfq_get_priority_head(const QUIC_CFQ *cfq,
+ uint32_t pn_space)
+{
+ QUIC_CFQ_ITEM_EX *item = cfq->new_list.head;
+
+ for (; item != NULL && item->pn_space != pn_space; item = item->next);
+
+ if (item == NULL)
+ return NULL;
+
+ return &item->public;
+}
+
+QUIC_CFQ_ITEM *ossl_quic_cfq_item_get_priority_next(const QUIC_CFQ_ITEM *item,
+ uint32_t pn_space)
+{
+ QUIC_CFQ_ITEM_EX *ex = (QUIC_CFQ_ITEM_EX *)item;
+
+ if (ex == NULL)
+ return NULL;
+
+ ex = ex->next;
+
+ for (; ex != NULL && ex->pn_space != pn_space; ex = ex->next);
+
+ if (ex == NULL)
+ return NULL; /* ubsan */
+
+ return &ex->public;
+}
diff --git a/crypto/openssl/ssl/quic/quic_channel.c b/crypto/openssl/ssl/quic/quic_channel.c
new file mode 100644
index 000000000000..652c653b9120
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_channel.c
@@ -0,0 +1,4128 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/rand.h>
+#include <openssl/err.h>
+#include "internal/ssl_unwrap.h"
+#include "internal/quic_channel.h"
+#include "internal/quic_error.h"
+#include "internal/quic_rx_depack.h"
+#include "internal/quic_lcidm.h"
+#include "internal/quic_srtm.h"
+#include "internal/qlog_event_helpers.h"
+#include "internal/quic_txp.h"
+#include "internal/quic_tls.h"
+#include "internal/quic_ssl.h"
+#include "../ssl_local.h"
+#include "quic_channel_local.h"
+#include "quic_port_local.h"
+#include "quic_engine_local.h"
+
+#define INIT_CRYPTO_RECV_BUF_LEN 16384
+#define INIT_CRYPTO_SEND_BUF_LEN 16384
+#define INIT_APP_BUF_LEN 8192
+
+/*
+ * Interval before we force a PING to ensure NATs don't timeout. This is based
+ * on the lowest commonly seen value of 30 seconds as cited in RFC 9000 s.
+ * 10.1.2.
+ */
+#define MAX_NAT_INTERVAL (ossl_ms2time(25000))
+
+/*
+ * Our maximum ACK delay on the TX side. This is up to us to choose. Note that
+ * this could differ from QUIC_DEFAULT_MAX_DELAY in future as that is a protocol
+ * value which determines the value of the maximum ACK delay if the
+ * max_ack_delay transport parameter is not set.
+ */
+#define DEFAULT_MAX_ACK_DELAY QUIC_DEFAULT_MAX_ACK_DELAY
+
+DEFINE_LIST_OF_IMPL(ch, QUIC_CHANNEL);
+
+static void ch_save_err_state(QUIC_CHANNEL *ch);
+static int ch_rx(QUIC_CHANNEL *ch, int channel_only, int *notify_other_threads);
+static int ch_tx(QUIC_CHANNEL *ch, int *notify_other_threads);
+static int ch_tick_tls(QUIC_CHANNEL *ch, int channel_only, int *notify_other_threads);
+static void ch_rx_handle_packet(QUIC_CHANNEL *ch, int channel_only);
+static OSSL_TIME ch_determine_next_tick_deadline(QUIC_CHANNEL *ch);
+static int ch_retry(QUIC_CHANNEL *ch,
+ const unsigned char *retry_token,
+ size_t retry_token_len,
+ const QUIC_CONN_ID *retry_scid,
+ int drop_later_pn);
+static int ch_restart(QUIC_CHANNEL *ch);
+
+static void ch_cleanup(QUIC_CHANNEL *ch);
+static int ch_generate_transport_params(QUIC_CHANNEL *ch);
+static int ch_on_transport_params(const unsigned char *params,
+ size_t params_len,
+ void *arg);
+static int ch_on_handshake_alert(void *arg, unsigned char alert_code);
+static int ch_on_handshake_complete(void *arg);
+static int ch_on_handshake_yield_secret(uint32_t prot_level, int direction,
+ uint32_t suite_id, EVP_MD *md,
+ const unsigned char *secret,
+ size_t secret_len,
+ void *arg);
+static int ch_on_crypto_recv_record(const unsigned char **buf,
+ size_t *bytes_read, void *arg);
+static int ch_on_crypto_release_record(size_t bytes_read, void *arg);
+static int crypto_ensure_empty(QUIC_RSTREAM *rstream);
+static int ch_on_crypto_send(const unsigned char *buf, size_t buf_len,
+ size_t *consumed, void *arg);
+static OSSL_TIME get_time(void *arg);
+static uint64_t get_stream_limit(int uni, void *arg);
+static int rx_late_validate(QUIC_PN pn, int pn_space, void *arg);
+static void rxku_detected(QUIC_PN pn, void *arg);
+static int ch_retry(QUIC_CHANNEL *ch,
+ const unsigned char *retry_token,
+ size_t retry_token_len,
+ const QUIC_CONN_ID *retry_scid,
+ int drop_later_pn);
+static void ch_update_idle(QUIC_CHANNEL *ch);
+static int ch_discard_el(QUIC_CHANNEL *ch,
+ uint32_t enc_level);
+static void ch_on_idle_timeout(QUIC_CHANNEL *ch);
+static void ch_update_idle(QUIC_CHANNEL *ch);
+static void ch_update_ping_deadline(QUIC_CHANNEL *ch);
+static void ch_on_terminating_timeout(QUIC_CHANNEL *ch);
+static void ch_start_terminating(QUIC_CHANNEL *ch,
+ const QUIC_TERMINATE_CAUSE *tcause,
+ int force_immediate);
+static void ch_on_txp_ack_tx(const OSSL_QUIC_FRAME_ACK *ack, uint32_t pn_space,
+ void *arg);
+static void ch_rx_handle_version_neg(QUIC_CHANNEL *ch, OSSL_QRX_PKT *pkt);
+static void ch_raise_version_neg_failure(QUIC_CHANNEL *ch);
+static void ch_record_state_transition(QUIC_CHANNEL *ch, uint32_t new_state);
+
+DEFINE_LHASH_OF_EX(QUIC_SRT_ELEM);
+
+QUIC_NEEDS_LOCK
+static QLOG *ch_get_qlog(QUIC_CHANNEL *ch)
+{
+#ifndef OPENSSL_NO_QLOG
+ QLOG_TRACE_INFO qti = {0};
+
+ if (ch->qlog != NULL)
+ return ch->qlog;
+
+ if (!ch->use_qlog)
+ return NULL;
+
+ if (ch->is_server && ch->init_dcid.id_len == 0)
+ return NULL;
+
+ qti.odcid = ch->init_dcid;
+ qti.title = ch->qlog_title;
+ qti.description = NULL;
+ qti.group_id = NULL;
+ qti.is_server = ch->is_server;
+ qti.now_cb = get_time;
+ qti.now_cb_arg = ch;
+ if ((ch->qlog = ossl_qlog_new_from_env(&qti)) == NULL) {
+ ch->use_qlog = 0; /* don't try again */
+ return NULL;
+ }
+
+ return ch->qlog;
+#else
+ return NULL;
+#endif
+}
+
+QUIC_NEEDS_LOCK
+static QLOG *ch_get_qlog_cb(void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ return ch_get_qlog(ch);
+}
+
+/*
+ * QUIC Channel Initialization and Teardown
+ * ========================================
+ */
+#define DEFAULT_INIT_CONN_RXFC_WND (768 * 1024)
+#define DEFAULT_CONN_RXFC_MAX_WND_MUL 20
+
+#define DEFAULT_INIT_STREAM_RXFC_WND (512 * 1024)
+#define DEFAULT_STREAM_RXFC_MAX_WND_MUL 12
+
+#define DEFAULT_INIT_CONN_MAX_STREAMS 100
+
+static int ch_init(QUIC_CHANNEL *ch)
+{
+ OSSL_QUIC_TX_PACKETISER_ARGS txp_args = {0};
+ OSSL_QTX_ARGS qtx_args = {0};
+ OSSL_QRX_ARGS qrx_args = {0};
+ QUIC_TLS_ARGS tls_args = {0};
+ uint32_t pn_space;
+ size_t rx_short_dcid_len;
+ size_t tx_init_dcid_len;
+
+ if (ch->port == NULL || ch->lcidm == NULL || ch->srtm == NULL)
+ goto err;
+
+ rx_short_dcid_len = ossl_quic_port_get_rx_short_dcid_len(ch->port);
+ tx_init_dcid_len = ossl_quic_port_get_tx_init_dcid_len(ch->port);
+
+ /* For clients, generate our initial DCID. */
+ if (!ch->is_server
+ && !ossl_quic_gen_rand_conn_id(ch->port->engine->libctx, tx_init_dcid_len,
+ &ch->init_dcid))
+ goto err;
+
+ /* We plug in a network write BIO to the QTX later when we get one. */
+ qtx_args.libctx = ch->port->engine->libctx;
+ qtx_args.get_qlog_cb = ch_get_qlog_cb;
+ qtx_args.get_qlog_cb_arg = ch;
+ qtx_args.mdpl = QUIC_MIN_INITIAL_DGRAM_LEN;
+ ch->rx_max_udp_payload_size = qtx_args.mdpl;
+
+ ch->ping_deadline = ossl_time_infinite();
+
+ ch->qtx = ossl_qtx_new(&qtx_args);
+ if (ch->qtx == NULL)
+ goto err;
+
+ ch->txpim = ossl_quic_txpim_new();
+ if (ch->txpim == NULL)
+ goto err;
+
+ ch->cfq = ossl_quic_cfq_new();
+ if (ch->cfq == NULL)
+ goto err;
+
+ if (!ossl_quic_txfc_init(&ch->conn_txfc, NULL))
+ goto err;
+
+ /*
+ * Note: The TP we transmit governs what the peer can transmit and thus
+ * applies to the RXFC.
+ */
+ ch->tx_init_max_stream_data_bidi_local = DEFAULT_INIT_STREAM_RXFC_WND;
+ ch->tx_init_max_stream_data_bidi_remote = DEFAULT_INIT_STREAM_RXFC_WND;
+ ch->tx_init_max_stream_data_uni = DEFAULT_INIT_STREAM_RXFC_WND;
+
+ if (!ossl_quic_rxfc_init(&ch->conn_rxfc, NULL,
+ DEFAULT_INIT_CONN_RXFC_WND,
+ DEFAULT_CONN_RXFC_MAX_WND_MUL *
+ DEFAULT_INIT_CONN_RXFC_WND,
+ get_time, ch))
+ goto err;
+
+ for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space)
+ if (!ossl_quic_rxfc_init_standalone(&ch->crypto_rxfc[pn_space],
+ INIT_CRYPTO_RECV_BUF_LEN,
+ get_time, ch))
+ goto err;
+
+ if (!ossl_quic_rxfc_init_standalone(&ch->max_streams_bidi_rxfc,
+ DEFAULT_INIT_CONN_MAX_STREAMS,
+ get_time, ch))
+ goto err;
+
+ if (!ossl_quic_rxfc_init_standalone(&ch->max_streams_uni_rxfc,
+ DEFAULT_INIT_CONN_MAX_STREAMS,
+ get_time, ch))
+ goto err;
+
+ if (!ossl_statm_init(&ch->statm))
+ goto err;
+
+ ch->have_statm = 1;
+ ch->cc_method = &ossl_cc_newreno_method;
+ if ((ch->cc_data = ch->cc_method->new(get_time, ch)) == NULL)
+ goto err;
+
+ if ((ch->ackm = ossl_ackm_new(get_time, ch, &ch->statm,
+ ch->cc_method, ch->cc_data,
+ ch->is_server)) == NULL)
+ goto err;
+
+ if (!ossl_quic_stream_map_init(&ch->qsm, get_stream_limit, ch,
+ &ch->max_streams_bidi_rxfc,
+ &ch->max_streams_uni_rxfc,
+ ch->is_server))
+ goto err;
+
+ ch->have_qsm = 1;
+
+ if (!ch->is_server
+ && !ossl_quic_lcidm_generate_initial(ch->lcidm, ch, &ch->init_scid))
+ goto err;
+
+ txp_args.cur_scid = ch->init_scid;
+ txp_args.cur_dcid = ch->init_dcid;
+ txp_args.ack_delay_exponent = 3;
+ txp_args.qtx = ch->qtx;
+ txp_args.txpim = ch->txpim;
+ txp_args.cfq = ch->cfq;
+ txp_args.ackm = ch->ackm;
+ txp_args.qsm = &ch->qsm;
+ txp_args.conn_txfc = &ch->conn_txfc;
+ txp_args.conn_rxfc = &ch->conn_rxfc;
+ txp_args.max_streams_bidi_rxfc = &ch->max_streams_bidi_rxfc;
+ txp_args.max_streams_uni_rxfc = &ch->max_streams_uni_rxfc;
+ txp_args.cc_method = ch->cc_method;
+ txp_args.cc_data = ch->cc_data;
+ txp_args.now = get_time;
+ txp_args.now_arg = ch;
+ txp_args.get_qlog_cb = ch_get_qlog_cb;
+ txp_args.get_qlog_cb_arg = ch;
+ txp_args.protocol_version = QUIC_VERSION_1;
+
+ for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
+ ch->crypto_send[pn_space] = ossl_quic_sstream_new(INIT_CRYPTO_SEND_BUF_LEN);
+ if (ch->crypto_send[pn_space] == NULL)
+ goto err;
+
+ txp_args.crypto[pn_space] = ch->crypto_send[pn_space];
+ }
+
+ ch->txp = ossl_quic_tx_packetiser_new(&txp_args);
+ if (ch->txp == NULL)
+ goto err;
+
+ /* clients have no amplification limit, so are considered always valid */
+ if (!ch->is_server)
+ ossl_quic_tx_packetiser_set_validated(ch->txp);
+
+ ossl_quic_tx_packetiser_set_ack_tx_cb(ch->txp, ch_on_txp_ack_tx, ch);
+
+ /*
+ * qrx does not exist yet, then we must be dealing with client channel
+ * (QUIC connection initiator).
+ * If qrx exists already, then we are dealing with server channel which
+ * qrx gets created by port_default_packet_handler() before
+ * port_default_packet_handler() accepts connection and creates channel
+ * for it.
+ * The exception here is tserver which always creates channel,
+ * before the first packet is ever seen.
+ */
+ if (ch->qrx == NULL && ch->is_tserver_ch == 0) {
+ /* we are regular client, create channel */
+ qrx_args.libctx = ch->port->engine->libctx;
+ qrx_args.demux = ch->port->demux;
+ qrx_args.short_conn_id_len = rx_short_dcid_len;
+ qrx_args.max_deferred = 32;
+
+ if ((ch->qrx = ossl_qrx_new(&qrx_args)) == NULL)
+ goto err;
+ }
+
+ if (ch->qrx != NULL) {
+ /*
+ * callbacks for channels associated with tserver's port
+ * are set up later when we call ossl_quic_channel_bind_qrx()
+ * in port_default_packet_handler()
+ */
+ if (!ossl_qrx_set_late_validation_cb(ch->qrx,
+ rx_late_validate,
+ ch))
+ goto err;
+
+ if (!ossl_qrx_set_key_update_cb(ch->qrx,
+ rxku_detected,
+ ch))
+ goto err;
+ }
+
+
+ for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
+ ch->crypto_recv[pn_space] = ossl_quic_rstream_new(NULL, NULL, 0);
+ if (ch->crypto_recv[pn_space] == NULL)
+ goto err;
+ }
+
+ /* Plug in the TLS handshake layer. */
+ tls_args.s = ch->tls;
+ tls_args.crypto_send_cb = ch_on_crypto_send;
+ tls_args.crypto_send_cb_arg = ch;
+ tls_args.crypto_recv_rcd_cb = ch_on_crypto_recv_record;
+ tls_args.crypto_recv_rcd_cb_arg = ch;
+ tls_args.crypto_release_rcd_cb = ch_on_crypto_release_record;
+ tls_args.crypto_release_rcd_cb_arg = ch;
+ tls_args.yield_secret_cb = ch_on_handshake_yield_secret;
+ tls_args.yield_secret_cb_arg = ch;
+ tls_args.got_transport_params_cb = ch_on_transport_params;
+ tls_args.got_transport_params_cb_arg= ch;
+ tls_args.handshake_complete_cb = ch_on_handshake_complete;
+ tls_args.handshake_complete_cb_arg = ch;
+ tls_args.alert_cb = ch_on_handshake_alert;
+ tls_args.alert_cb_arg = ch;
+ tls_args.is_server = ch->is_server;
+ tls_args.ossl_quic = 1;
+
+ if ((ch->qtls = ossl_quic_tls_new(&tls_args)) == NULL)
+ goto err;
+
+ ch->tx_max_ack_delay = DEFAULT_MAX_ACK_DELAY;
+ ch->rx_max_ack_delay = QUIC_DEFAULT_MAX_ACK_DELAY;
+ ch->rx_ack_delay_exp = QUIC_DEFAULT_ACK_DELAY_EXP;
+ ch->rx_active_conn_id_limit = QUIC_MIN_ACTIVE_CONN_ID_LIMIT;
+ ch->tx_enc_level = QUIC_ENC_LEVEL_INITIAL;
+ ch->rx_enc_level = QUIC_ENC_LEVEL_INITIAL;
+ ch->txku_threshold_override = UINT64_MAX;
+
+ ch->max_idle_timeout_local_req = QUIC_DEFAULT_IDLE_TIMEOUT;
+ ch->max_idle_timeout_remote_req = 0;
+ ch->max_idle_timeout = ch->max_idle_timeout_local_req;
+
+ ossl_ackm_set_tx_max_ack_delay(ch->ackm, ossl_ms2time(ch->tx_max_ack_delay));
+ ossl_ackm_set_rx_max_ack_delay(ch->ackm, ossl_ms2time(ch->rx_max_ack_delay));
+
+ ch_update_idle(ch);
+ ossl_list_ch_insert_tail(&ch->port->channel_list, ch);
+ ch->on_port_list = 1;
+ return 1;
+
+err:
+ ch_cleanup(ch);
+ return 0;
+}
+
+static void ch_cleanup(QUIC_CHANNEL *ch)
+{
+ uint32_t pn_space;
+
+ if (ch->ackm != NULL)
+ for (pn_space = QUIC_PN_SPACE_INITIAL;
+ pn_space < QUIC_PN_SPACE_NUM;
+ ++pn_space)
+ ossl_ackm_on_pkt_space_discarded(ch->ackm, pn_space);
+
+ ossl_quic_lcidm_cull(ch->lcidm, ch);
+ ossl_quic_srtm_cull(ch->srtm, ch);
+ ossl_quic_tx_packetiser_free(ch->txp);
+ ossl_quic_txpim_free(ch->txpim);
+ ossl_quic_cfq_free(ch->cfq);
+ ossl_qtx_free(ch->qtx);
+ if (ch->cc_data != NULL)
+ ch->cc_method->free(ch->cc_data);
+ if (ch->have_statm)
+ ossl_statm_destroy(&ch->statm);
+ ossl_ackm_free(ch->ackm);
+
+ if (ch->have_qsm)
+ ossl_quic_stream_map_cleanup(&ch->qsm);
+
+ for (pn_space = QUIC_PN_SPACE_INITIAL; pn_space < QUIC_PN_SPACE_NUM; ++pn_space) {
+ ossl_quic_sstream_free(ch->crypto_send[pn_space]);
+ ossl_quic_rstream_free(ch->crypto_recv[pn_space]);
+ }
+
+ ossl_qrx_pkt_release(ch->qrx_pkt);
+ ch->qrx_pkt = NULL;
+
+ ossl_quic_tls_free(ch->qtls);
+ ossl_qrx_free(ch->qrx);
+ OPENSSL_free(ch->local_transport_params);
+ OPENSSL_free((char *)ch->terminate_cause.reason);
+ OSSL_ERR_STATE_free(ch->err_state);
+ OPENSSL_free(ch->ack_range_scratch);
+ OPENSSL_free(ch->pending_new_token);
+
+ if (ch->on_port_list) {
+ ossl_list_ch_remove(&ch->port->channel_list, ch);
+ ch->on_port_list = 0;
+ }
+
+#ifndef OPENSSL_NO_QLOG
+ if (ch->qlog != NULL)
+ ossl_qlog_flush(ch->qlog); /* best effort */
+
+ OPENSSL_free(ch->qlog_title);
+ ossl_qlog_free(ch->qlog);
+#endif
+}
+
+int ossl_quic_channel_init(QUIC_CHANNEL *ch)
+{
+ return ch_init(ch);
+}
+
+void ossl_quic_channel_bind_qrx(QUIC_CHANNEL *tserver_ch, OSSL_QRX *qrx)
+{
+ if (tserver_ch->qrx == NULL && tserver_ch->is_tserver_ch == 1) {
+ tserver_ch->qrx = qrx;
+ ossl_qrx_set_late_validation_cb(tserver_ch->qrx, rx_late_validate,
+ tserver_ch);
+ ossl_qrx_set_key_update_cb(tserver_ch->qrx, rxku_detected,
+ tserver_ch);
+ }
+}
+
+QUIC_CHANNEL *ossl_quic_channel_alloc(const QUIC_CHANNEL_ARGS *args)
+{
+ QUIC_CHANNEL *ch = NULL;
+
+ if ((ch = OPENSSL_zalloc(sizeof(*ch))) == NULL)
+ return NULL;
+
+ ch->port = args->port;
+ ch->is_server = args->is_server;
+ ch->tls = args->tls;
+ ch->lcidm = args->lcidm;
+ ch->srtm = args->srtm;
+ ch->qrx = args->qrx;
+ ch->is_tserver_ch = args->is_tserver_ch;
+#ifndef OPENSSL_NO_QLOG
+ ch->use_qlog = args->use_qlog;
+
+ if (ch->use_qlog && args->qlog_title != NULL) {
+ if ((ch->qlog_title = OPENSSL_strdup(args->qlog_title)) == NULL) {
+ OPENSSL_free(ch);
+ return NULL;
+ }
+ }
+#endif
+
+ return ch;
+}
+
+void ossl_quic_channel_free(QUIC_CHANNEL *ch)
+{
+ if (ch == NULL)
+ return;
+
+ ch_cleanup(ch);
+ OPENSSL_free(ch);
+}
+
+/* Set mutator callbacks for test framework support */
+int ossl_quic_channel_set_mutator(QUIC_CHANNEL *ch,
+ ossl_mutate_packet_cb mutatecb,
+ ossl_finish_mutate_cb finishmutatecb,
+ void *mutatearg)
+{
+ if (ch->qtx == NULL)
+ return 0;
+
+ ossl_qtx_set_mutator(ch->qtx, mutatecb, finishmutatecb, mutatearg);
+ return 1;
+}
+
+int ossl_quic_channel_get_peer_addr(QUIC_CHANNEL *ch, BIO_ADDR *peer_addr)
+{
+ if (!ch->addressed_mode)
+ return 0;
+
+ return BIO_ADDR_copy(peer_addr, &ch->cur_peer_addr);
+}
+
+int ossl_quic_channel_set_peer_addr(QUIC_CHANNEL *ch, const BIO_ADDR *peer_addr)
+{
+ if (ch->state != QUIC_CHANNEL_STATE_IDLE)
+ return 0;
+
+ if (peer_addr == NULL || BIO_ADDR_family(peer_addr) == AF_UNSPEC) {
+ BIO_ADDR_clear(&ch->cur_peer_addr);
+ ch->addressed_mode = 0;
+ return 1;
+ }
+
+ if (!BIO_ADDR_copy(&ch->cur_peer_addr, peer_addr)) {
+ ch->addressed_mode = 0;
+ return 0;
+ }
+ ch->addressed_mode = 1;
+
+ return 1;
+}
+
+QUIC_REACTOR *ossl_quic_channel_get_reactor(QUIC_CHANNEL *ch)
+{
+ return ossl_quic_port_get0_reactor(ch->port);
+}
+
+QUIC_STREAM_MAP *ossl_quic_channel_get_qsm(QUIC_CHANNEL *ch)
+{
+ return &ch->qsm;
+}
+
+OSSL_STATM *ossl_quic_channel_get_statm(QUIC_CHANNEL *ch)
+{
+ return &ch->statm;
+}
+
+SSL *ossl_quic_channel_get0_tls(QUIC_CHANNEL *ch)
+{
+ return ch->tls;
+}
+
+static void free_buf_mem(unsigned char *buf, size_t buf_len, void *arg)
+{
+ BUF_MEM_free((BUF_MEM *)arg);
+}
+
+int ossl_quic_channel_schedule_new_token(QUIC_CHANNEL *ch,
+ const unsigned char *token,
+ size_t token_len)
+{
+ int rc = 0;
+ QUIC_CFQ_ITEM *cfq_item;
+ WPACKET wpkt;
+ BUF_MEM *buf_mem = NULL;
+ size_t l = 0;
+
+ buf_mem = BUF_MEM_new();
+ if (buf_mem == NULL)
+ goto err;
+
+ if (!WPACKET_init(&wpkt, buf_mem))
+ goto err;
+
+ if (!ossl_quic_wire_encode_frame_new_token(&wpkt, token,
+ token_len)) {
+ WPACKET_cleanup(&wpkt);
+ goto err;
+ }
+
+ WPACKET_finish(&wpkt);
+
+ if (!WPACKET_get_total_written(&wpkt, &l))
+ goto err;
+
+ cfq_item = ossl_quic_cfq_add_frame(ch->cfq, 1,
+ QUIC_PN_SPACE_APP,
+ OSSL_QUIC_FRAME_TYPE_NEW_TOKEN, 0,
+ (unsigned char *)buf_mem->data, l,
+ free_buf_mem,
+ buf_mem);
+ if (cfq_item == NULL)
+ goto err;
+
+ rc = 1;
+err:
+ if (!rc)
+ BUF_MEM_free(buf_mem);
+ return rc;
+}
+
+size_t ossl_quic_channel_get_short_header_conn_id_len(QUIC_CHANNEL *ch)
+{
+ return ossl_quic_port_get_rx_short_dcid_len(ch->port);
+}
+
+QUIC_STREAM *ossl_quic_channel_get_stream_by_id(QUIC_CHANNEL *ch,
+ uint64_t stream_id)
+{
+ return ossl_quic_stream_map_get_by_id(&ch->qsm, stream_id);
+}
+
+int ossl_quic_channel_is_active(const QUIC_CHANNEL *ch)
+{
+ return ch != NULL && ch->state == QUIC_CHANNEL_STATE_ACTIVE;
+}
+
+int ossl_quic_channel_is_closing(const QUIC_CHANNEL *ch)
+{
+ return ch->state == QUIC_CHANNEL_STATE_TERMINATING_CLOSING;
+}
+
+static int ossl_quic_channel_is_draining(const QUIC_CHANNEL *ch)
+{
+ return ch->state == QUIC_CHANNEL_STATE_TERMINATING_DRAINING;
+}
+
+static int ossl_quic_channel_is_terminating(const QUIC_CHANNEL *ch)
+{
+ return ossl_quic_channel_is_closing(ch)
+ || ossl_quic_channel_is_draining(ch);
+}
+
+int ossl_quic_channel_is_terminated(const QUIC_CHANNEL *ch)
+{
+ return ch->state == QUIC_CHANNEL_STATE_TERMINATED;
+}
+
+int ossl_quic_channel_is_term_any(const QUIC_CHANNEL *ch)
+{
+ return ossl_quic_channel_is_terminating(ch)
+ || ossl_quic_channel_is_terminated(ch);
+}
+
+const QUIC_TERMINATE_CAUSE *
+ossl_quic_channel_get_terminate_cause(const QUIC_CHANNEL *ch)
+{
+ return ossl_quic_channel_is_term_any(ch) ? &ch->terminate_cause : NULL;
+}
+
+int ossl_quic_channel_is_handshake_complete(const QUIC_CHANNEL *ch)
+{
+ return ch->handshake_complete;
+}
+
+int ossl_quic_channel_is_handshake_confirmed(const QUIC_CHANNEL *ch)
+{
+ return ch->handshake_confirmed;
+}
+
+QUIC_DEMUX *ossl_quic_channel_get0_demux(QUIC_CHANNEL *ch)
+{
+ return ch->port->demux;
+}
+
+QUIC_PORT *ossl_quic_channel_get0_port(QUIC_CHANNEL *ch)
+{
+ return ch->port;
+}
+
+QUIC_ENGINE *ossl_quic_channel_get0_engine(QUIC_CHANNEL *ch)
+{
+ return ossl_quic_port_get0_engine(ch->port);
+}
+
+CRYPTO_MUTEX *ossl_quic_channel_get_mutex(QUIC_CHANNEL *ch)
+{
+ return ossl_quic_port_get0_mutex(ch->port);
+}
+
+int ossl_quic_channel_has_pending(const QUIC_CHANNEL *ch)
+{
+ return ossl_quic_demux_has_pending(ch->port->demux)
+ || ossl_qrx_processed_read_pending(ch->qrx);
+}
+
+/*
+ * QUIC Channel: Callbacks from Miscellaneous Subsidiary Components
+ * ================================================================
+ */
+
+/* Used by various components. */
+static OSSL_TIME get_time(void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ return ossl_quic_port_get_time(ch->port);
+}
+
+/* Used by QSM. */
+static uint64_t get_stream_limit(int uni, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ return uni ? ch->max_local_streams_uni : ch->max_local_streams_bidi;
+}
+
+/*
+ * Called by QRX to determine if a packet is potentially invalid before trying
+ * to decrypt it.
+ */
+static int rx_late_validate(QUIC_PN pn, int pn_space, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ /* Potential duplicates should not be processed. */
+ if (!ossl_ackm_is_rx_pn_processable(ch->ackm, pn, pn_space))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Triggers a TXKU (whether spontaneous or solicited). Does not check whether
+ * spontaneous TXKU is currently allowed.
+ */
+QUIC_NEEDS_LOCK
+static void ch_trigger_txku(QUIC_CHANNEL *ch)
+{
+ uint64_t next_pn
+ = ossl_quic_tx_packetiser_get_next_pn(ch->txp, QUIC_PN_SPACE_APP);
+
+ if (!ossl_quic_pn_valid(next_pn)
+ || !ossl_qtx_trigger_key_update(ch->qtx)) {
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INTERNAL_ERROR, 0,
+ "key update");
+ return;
+ }
+
+ ch->txku_in_progress = 1;
+ ch->txku_pn = next_pn;
+ ch->rxku_expected = ch->ku_locally_initiated;
+}
+
+QUIC_NEEDS_LOCK
+static int txku_in_progress(QUIC_CHANNEL *ch)
+{
+ if (ch->txku_in_progress
+ && ossl_ackm_get_largest_acked(ch->ackm, QUIC_PN_SPACE_APP) >= ch->txku_pn) {
+ OSSL_TIME pto = ossl_ackm_get_pto_duration(ch->ackm);
+
+ /*
+ * RFC 9001 s. 6.5: Endpoints SHOULD wait three times the PTO before
+ * initiating a key update after receiving an acknowledgment that
+ * confirms that the previous key update was received.
+ *
+ * Note that by the above wording, this period starts from when we get
+ * the ack for a TXKU-triggering packet, not when the TXKU is initiated.
+ * So we defer TXKU cooldown deadline calculation to this point.
+ */
+ ch->txku_in_progress = 0;
+ ch->txku_cooldown_deadline = ossl_time_add(get_time(ch),
+ ossl_time_multiply(pto, 3));
+ }
+
+ return ch->txku_in_progress;
+}
+
+QUIC_NEEDS_LOCK
+static int txku_allowed(QUIC_CHANNEL *ch)
+{
+ return ch->tx_enc_level == QUIC_ENC_LEVEL_1RTT /* Sanity check. */
+ /* Strict RFC 9001 criterion for TXKU. */
+ && ch->handshake_confirmed
+ && !txku_in_progress(ch);
+}
+
+QUIC_NEEDS_LOCK
+static int txku_recommendable(QUIC_CHANNEL *ch)
+{
+ if (!txku_allowed(ch))
+ return 0;
+
+ return
+ /* Recommended RFC 9001 criterion for TXKU. */
+ ossl_time_compare(get_time(ch), ch->txku_cooldown_deadline) >= 0
+ /* Some additional sensible criteria. */
+ && !ch->rxku_in_progress
+ && !ch->rxku_pending_confirm;
+}
+
+QUIC_NEEDS_LOCK
+static int txku_desirable(QUIC_CHANNEL *ch)
+{
+ uint64_t cur_pkt_count, max_pkt_count, thresh_pkt_count;
+ const uint32_t enc_level = QUIC_ENC_LEVEL_1RTT;
+
+ /* Check AEAD limit to determine if we should perform a spontaneous TXKU. */
+ cur_pkt_count = ossl_qtx_get_cur_epoch_pkt_count(ch->qtx, enc_level);
+ max_pkt_count = ossl_qtx_get_max_epoch_pkt_count(ch->qtx, enc_level);
+
+ thresh_pkt_count = max_pkt_count / 2;
+ if (ch->txku_threshold_override != UINT64_MAX)
+ thresh_pkt_count = ch->txku_threshold_override;
+
+ return cur_pkt_count >= thresh_pkt_count;
+}
+
+QUIC_NEEDS_LOCK
+static void ch_maybe_trigger_spontaneous_txku(QUIC_CHANNEL *ch)
+{
+ if (!txku_recommendable(ch) || !txku_desirable(ch))
+ return;
+
+ ch->ku_locally_initiated = 1;
+ ch_trigger_txku(ch);
+}
+
+QUIC_NEEDS_LOCK
+static int rxku_allowed(QUIC_CHANNEL *ch)
+{
+ /*
+ * RFC 9001 s. 6.1: An endpoint MUST NOT initiate a key update prior to
+ * having confirmed the handshake (Section 4.1.2).
+ *
+ * RFC 9001 s. 6.1: An endpoint MUST NOT initiate a subsequent key update
+ * unless it has received an acknowledgment for a packet that was sent
+ * protected with keys from the current key phase.
+ *
+ * RFC 9001 s. 6.2: If an endpoint detects a second update before it has
+ * sent any packets with updated keys containing an acknowledgment for the
+ * packet that initiated the key update, it indicates that its peer has
+ * updated keys twice without awaiting confirmation. An endpoint MAY treat
+ * such consecutive key updates as a connection error of type
+ * KEY_UPDATE_ERROR.
+ */
+ return ch->handshake_confirmed && !ch->rxku_pending_confirm;
+}
+
+/*
+ * Called when the QRX detects a new RX key update event.
+ */
+enum rxku_decision {
+ DECISION_RXKU_ONLY,
+ DECISION_PROTOCOL_VIOLATION,
+ DECISION_SOLICITED_TXKU
+};
+
+/* Called when the QRX detects a key update has occurred. */
+QUIC_NEEDS_LOCK
+static void rxku_detected(QUIC_PN pn, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+ enum rxku_decision decision;
+ OSSL_TIME pto;
+
+ /*
+ * Note: rxku_in_progress is always 0 here as an RXKU cannot be detected
+ * when we are still in UPDATING or COOLDOWN (see quic_record_rx.h).
+ */
+ assert(!ch->rxku_in_progress);
+
+ if (!rxku_allowed(ch))
+ /* Is RXKU even allowed at this time? */
+ decision = DECISION_PROTOCOL_VIOLATION;
+
+ else if (ch->ku_locally_initiated)
+ /*
+ * If this key update was locally initiated (meaning that this detected
+ * RXKU event is a result of our own spontaneous TXKU), we do not
+ * trigger another TXKU; after all, to do so would result in an infinite
+ * ping-pong of key updates. We still process it as an RXKU.
+ */
+ decision = DECISION_RXKU_ONLY;
+
+ else
+ /*
+ * Otherwise, a peer triggering a KU means we have to trigger a KU also.
+ */
+ decision = DECISION_SOLICITED_TXKU;
+
+ if (decision == DECISION_PROTOCOL_VIOLATION) {
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_KEY_UPDATE_ERROR,
+ 0, "RX key update again too soon");
+ return;
+ }
+
+ pto = ossl_ackm_get_pto_duration(ch->ackm);
+
+ ch->ku_locally_initiated = 0;
+ ch->rxku_in_progress = 1;
+ ch->rxku_pending_confirm = 1;
+ ch->rxku_trigger_pn = pn;
+ ch->rxku_update_end_deadline = ossl_time_add(get_time(ch), pto);
+ ch->rxku_expected = 0;
+
+ if (decision == DECISION_SOLICITED_TXKU)
+ /* NOT gated by usual txku_allowed() */
+ ch_trigger_txku(ch);
+
+ /*
+ * Ordinarily, we only generate ACK when some ACK-eliciting frame has been
+ * received. In some cases, this may not occur for a long time, for example
+ * if transmission of application data is going in only one direction and
+ * nothing else is happening with the connection. However, since the peer
+ * cannot initiate a subsequent (spontaneous) TXKU until its prior
+ * (spontaneous or solicited) TXKU has completed - meaning that prior
+ * TXKU's trigger packet (or subsequent packet) has been acknowledged, this
+ * can lead to very long times before a TXKU is considered 'completed'.
+ * Optimise this by forcing ACK generation after triggering TXKU.
+ * (Basically, we consider a RXKU event something that is 'ACK-eliciting',
+ * which it more or less should be; it is necessarily separate from ordinary
+ * processing of ACK-eliciting frames as key update is not indicated via a
+ * frame.)
+ */
+ ossl_quic_tx_packetiser_schedule_ack(ch->txp, QUIC_PN_SPACE_APP);
+}
+
+/* Called per tick to handle RXKU timer events. */
+QUIC_NEEDS_LOCK
+static void ch_rxku_tick(QUIC_CHANNEL *ch)
+{
+ if (!ch->rxku_in_progress
+ || ossl_time_compare(get_time(ch), ch->rxku_update_end_deadline) < 0)
+ return;
+
+ ch->rxku_update_end_deadline = ossl_time_infinite();
+ ch->rxku_in_progress = 0;
+
+ if (!ossl_qrx_key_update_timeout(ch->qrx, /*normal=*/1))
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INTERNAL_ERROR, 0,
+ "RXKU cooldown internal error");
+}
+
+QUIC_NEEDS_LOCK
+static void ch_on_txp_ack_tx(const OSSL_QUIC_FRAME_ACK *ack, uint32_t pn_space,
+ void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ if (pn_space != QUIC_PN_SPACE_APP || !ch->rxku_pending_confirm
+ || !ossl_quic_frame_ack_contains_pn(ack, ch->rxku_trigger_pn))
+ return;
+
+ /*
+ * Defer clearing rxku_pending_confirm until TXP generate call returns
+ * successfully.
+ */
+ ch->rxku_pending_confirm_done = 1;
+}
+
+/*
+ * QUIC Channel: Handshake Layer Event Handling
+ * ============================================
+ */
+static int ch_on_crypto_send(const unsigned char *buf, size_t buf_len,
+ size_t *consumed, void *arg)
+{
+ int ret;
+ QUIC_CHANNEL *ch = arg;
+ uint32_t enc_level = ch->tx_enc_level;
+ uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+ QUIC_SSTREAM *sstream = ch->crypto_send[pn_space];
+
+ if (!ossl_assert(sstream != NULL))
+ return 0;
+
+ ret = ossl_quic_sstream_append(sstream, buf, buf_len, consumed);
+ return ret;
+}
+
+static int crypto_ensure_empty(QUIC_RSTREAM *rstream)
+{
+ size_t avail = 0;
+ int is_fin = 0;
+
+ if (rstream == NULL)
+ return 1;
+
+ if (!ossl_quic_rstream_available(rstream, &avail, &is_fin))
+ return 0;
+
+ return avail == 0;
+}
+
+static int ch_on_crypto_recv_record(const unsigned char **buf,
+ size_t *bytes_read, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+ QUIC_RSTREAM *rstream;
+ int is_fin = 0; /* crypto stream is never finished, so we don't use this */
+ uint32_t i;
+
+ /*
+ * After we move to a later EL we must not allow our peer to send any new
+ * bytes in the crypto stream on a previous EL. Retransmissions of old bytes
+ * are allowed.
+ *
+ * In practice we will only move to a new EL when we have consumed all bytes
+ * which should be sent on the crypto stream at a previous EL. For example,
+ * the Handshake EL should not be provisioned until we have completely
+ * consumed a TLS 1.3 ServerHello. Thus when we provision an EL the output
+ * of ossl_quic_rstream_available() should be 0 for all lower ELs. Thus if a
+ * given EL is available we simply ensure we have not received any further
+ * bytes at a lower EL.
+ */
+ for (i = QUIC_ENC_LEVEL_INITIAL; i < ch->rx_enc_level; ++i)
+ if (i != QUIC_ENC_LEVEL_0RTT &&
+ !crypto_ensure_empty(ch->crypto_recv[ossl_quic_enc_level_to_pn_space(i)])) {
+ /* Protocol violation (RFC 9001 s. 4.1.3) */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ OSSL_QUIC_FRAME_TYPE_CRYPTO,
+ "crypto stream data in wrong EL");
+ return 0;
+ }
+
+ rstream = ch->crypto_recv[ossl_quic_enc_level_to_pn_space(ch->rx_enc_level)];
+ if (rstream == NULL)
+ return 0;
+
+ return ossl_quic_rstream_get_record(rstream, buf, bytes_read,
+ &is_fin);
+}
+
+static int ch_on_crypto_release_record(size_t bytes_read, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+ QUIC_RSTREAM *rstream;
+ OSSL_RTT_INFO rtt_info;
+ uint32_t rx_pn_space = ossl_quic_enc_level_to_pn_space(ch->rx_enc_level);
+
+ rstream = ch->crypto_recv[rx_pn_space];
+ if (rstream == NULL)
+ return 0;
+
+ ossl_statm_get_rtt_info(ossl_quic_channel_get_statm(ch), &rtt_info);
+ if (!ossl_quic_rxfc_on_retire(&ch->crypto_rxfc[rx_pn_space], bytes_read,
+ rtt_info.smoothed_rtt))
+ return 0;
+
+ return ossl_quic_rstream_release_record(rstream, bytes_read);
+}
+
+static int ch_on_handshake_yield_secret(uint32_t prot_level, int direction,
+ uint32_t suite_id, EVP_MD *md,
+ const unsigned char *secret,
+ size_t secret_len,
+ void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+ uint32_t i;
+ uint32_t enc_level;
+
+ /* Convert TLS protection level to QUIC encryption level */
+ switch (prot_level) {
+ case OSSL_RECORD_PROTECTION_LEVEL_EARLY:
+ enc_level = QUIC_ENC_LEVEL_0RTT;
+ break;
+
+ case OSSL_RECORD_PROTECTION_LEVEL_HANDSHAKE:
+ enc_level = QUIC_ENC_LEVEL_HANDSHAKE;
+ break;
+
+ case OSSL_RECORD_PROTECTION_LEVEL_APPLICATION:
+ enc_level = QUIC_ENC_LEVEL_1RTT;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (enc_level < QUIC_ENC_LEVEL_HANDSHAKE || enc_level >= QUIC_ENC_LEVEL_NUM)
+ /* Invalid EL. */
+ return 0;
+
+
+ if (direction) {
+ /* TX */
+ if (enc_level <= ch->tx_enc_level)
+ /*
+ * Does not make sense for us to try and provision an EL we have already
+ * attained.
+ */
+ return 0;
+
+ if (!ossl_qtx_provide_secret(ch->qtx, enc_level,
+ suite_id, md,
+ secret, secret_len))
+ return 0;
+
+ ch->tx_enc_level = enc_level;
+ } else {
+ /* RX */
+ if (enc_level <= ch->rx_enc_level)
+ /*
+ * Does not make sense for us to try and provision an EL we have already
+ * attained.
+ */
+ return 0;
+
+ /*
+ * Ensure all crypto streams for previous ELs are now empty of available
+ * data.
+ */
+ for (i = QUIC_ENC_LEVEL_INITIAL; i < enc_level; ++i)
+ if (!crypto_ensure_empty(ch->crypto_recv[ossl_quic_enc_level_to_pn_space(i)])) {
+ /* Protocol violation (RFC 9001 s. 4.1.3) */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ OSSL_QUIC_FRAME_TYPE_CRYPTO,
+ "crypto stream data in wrong EL");
+ return 0;
+ }
+
+ if (!ossl_qrx_provide_secret(ch->qrx, enc_level,
+ suite_id, md,
+ secret, secret_len))
+ return 0;
+
+ ch->have_new_rx_secret = 1;
+ ch->rx_enc_level = enc_level;
+ }
+
+ return 1;
+}
+
+static int ch_on_handshake_complete(void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ if (!ossl_assert(!ch->handshake_complete))
+ return 0; /* this should not happen twice */
+
+ if (!ossl_assert(ch->tx_enc_level == QUIC_ENC_LEVEL_1RTT))
+ return 0;
+
+ /*
+ * When handshake is complete, we no longer need to abide by the
+ * 3x amplification limit, though we should be validated as soon
+ * as we see a handshake key encrypted packet (see ossl_quic_handle_packet)
+ */
+ ossl_quic_tx_packetiser_set_validated(ch->txp);
+
+ if (!ch->got_remote_transport_params) {
+ /*
+ * Was not a valid QUIC handshake if we did not get valid transport
+ * params.
+ */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_CRYPTO_MISSING_EXT,
+ OSSL_QUIC_FRAME_TYPE_CRYPTO,
+ "no transport parameters received");
+ return 0;
+ }
+
+ /* Don't need transport parameters anymore. */
+ OPENSSL_free(ch->local_transport_params);
+ ch->local_transport_params = NULL;
+
+ /* Tell the QRX it can now process 1-RTT packets. */
+ ossl_qrx_allow_1rtt_processing(ch->qrx);
+
+ /* Tell TXP the handshake is complete. */
+ ossl_quic_tx_packetiser_notify_handshake_complete(ch->txp);
+
+ ch->handshake_complete = 1;
+
+ if (ch->pending_new_token != NULL) {
+ /*
+ * Note this is a best effort operation here
+ * If scheduling a new token fails, the worst outcome is that
+ * a client, not having received it, will just have to go through
+ * an extra roundtrip on a subsequent connection via the retry frame
+ * path, at which point we get another opportunity to schedule another
+ * new token. As a result, we don't need to handle any errors here
+ */
+ ossl_quic_channel_schedule_new_token(ch,
+ ch->pending_new_token,
+ ch->pending_new_token_len);
+ OPENSSL_free(ch->pending_new_token);
+ ch->pending_new_token = NULL;
+ ch->pending_new_token_len = 0;
+ }
+
+ if (ch->is_server) {
+ /*
+ * On the server, the handshake is confirmed as soon as it is complete.
+ */
+ ossl_quic_channel_on_handshake_confirmed(ch);
+
+ ossl_quic_tx_packetiser_schedule_handshake_done(ch->txp);
+ }
+
+ ch_record_state_transition(ch, ch->state);
+ return 1;
+}
+
+static int ch_on_handshake_alert(void *arg, unsigned char alert_code)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ /*
+ * RFC 9001 s. 4.4: More specifically, servers MUST NOT send post-handshake
+ * TLS CertificateRequest messages, and clients MUST treat receipt of such
+ * messages as a connection error of type PROTOCOL_VIOLATION.
+ */
+ if (alert_code == SSL_AD_UNEXPECTED_MESSAGE
+ && ch->handshake_complete
+ && ossl_quic_tls_is_cert_request(ch->qtls))
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ 0,
+ "Post-handshake TLS "
+ "CertificateRequest received");
+ /*
+ * RFC 9001 s. 4.6.1: Servers MUST NOT send the early_data extension with a
+ * max_early_data_size field set to any value other than 0xffffffff. A
+ * client MUST treat receipt of a NewSessionTicket that contains an
+ * early_data extension with any other value as a connection error of type
+ * PROTOCOL_VIOLATION.
+ */
+ else if (alert_code == SSL_AD_ILLEGAL_PARAMETER
+ && ch->handshake_complete
+ && ossl_quic_tls_has_bad_max_early_data(ch->qtls))
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ 0,
+ "Bad max_early_data received");
+ else
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_CRYPTO_ERR_BEGIN
+ + alert_code,
+ 0, "handshake alert");
+
+ return 1;
+}
+
+/*
+ * QUIC Channel: Transport Parameter Handling
+ * ==========================================
+ */
+
+/*
+ * Called by handshake layer when we receive QUIC Transport Parameters from the
+ * peer. Note that these are not authenticated until the handshake is marked
+ * as complete.
+ */
+#define TP_REASON_SERVER_ONLY(x) \
+ x " may not be sent by a client"
+#define TP_REASON_DUP(x) \
+ x " appears multiple times"
+#define TP_REASON_MALFORMED(x) \
+ x " is malformed"
+#define TP_REASON_EXPECTED_VALUE(x) \
+ x " does not match expected value"
+#define TP_REASON_NOT_RETRY(x) \
+ x " sent when not performing a retry"
+#define TP_REASON_REQUIRED(x) \
+ x " was not sent but is required"
+#define TP_REASON_INTERNAL_ERROR(x) \
+ x " encountered internal error"
+
+static void txfc_bump_cwm_bidi(QUIC_STREAM *s, void *arg)
+{
+ if (!ossl_quic_stream_is_bidi(s)
+ || ossl_quic_stream_is_server_init(s))
+ return;
+
+ ossl_quic_txfc_bump_cwm(&s->txfc, *(uint64_t *)arg);
+}
+
+static void txfc_bump_cwm_uni(QUIC_STREAM *s, void *arg)
+{
+ if (ossl_quic_stream_is_bidi(s)
+ || ossl_quic_stream_is_server_init(s))
+ return;
+
+ ossl_quic_txfc_bump_cwm(&s->txfc, *(uint64_t *)arg);
+}
+
+static void do_update(QUIC_STREAM *s, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ ossl_quic_stream_map_update_state(&ch->qsm, s);
+}
+
+static uint64_t min_u64_ignore_0(uint64_t a, uint64_t b)
+{
+ if (a == 0)
+ return b;
+ if (b == 0)
+ return a;
+
+ return a < b ? a : b;
+}
+
+static int ch_on_transport_params(const unsigned char *params,
+ size_t params_len,
+ void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+ PACKET pkt;
+ uint64_t id, v;
+ size_t len;
+ const unsigned char *body;
+ int got_orig_dcid = 0;
+ int got_initial_scid = 0;
+ int got_retry_scid = 0;
+ int got_initial_max_data = 0;
+ int got_initial_max_stream_data_bidi_local = 0;
+ int got_initial_max_stream_data_bidi_remote = 0;
+ int got_initial_max_stream_data_uni = 0;
+ int got_initial_max_streams_bidi = 0;
+ int got_initial_max_streams_uni = 0;
+ int got_stateless_reset_token = 0;
+ int got_preferred_addr = 0;
+ int got_ack_delay_exp = 0;
+ int got_max_ack_delay = 0;
+ int got_max_udp_payload_size = 0;
+ int got_max_idle_timeout = 0;
+ int got_active_conn_id_limit = 0;
+ int got_disable_active_migration = 0;
+ QUIC_CONN_ID cid;
+ const char *reason = "bad transport parameter";
+ ossl_unused uint64_t rx_max_idle_timeout = 0;
+ ossl_unused const void *stateless_reset_token_p = NULL;
+ QUIC_PREFERRED_ADDR pfa;
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(ch->tls);
+
+ /*
+ * When HRR happens the client sends the transport params in the new client
+ * hello again. Reset the transport params here and load them again.
+ */
+ if (ch->is_server && sc->hello_retry_request != SSL_HRR_NONE
+ && ch->got_remote_transport_params) {
+ ch->max_local_streams_bidi = 0;
+ ch->max_local_streams_uni = 0;
+ ch->got_local_transport_params = 0;
+ OPENSSL_free(ch->local_transport_params);
+ ch->local_transport_params = NULL;
+ } else if (ch->got_remote_transport_params) {
+ reason = "multiple transport parameter extensions";
+ goto malformed;
+ }
+
+ if (!PACKET_buf_init(&pkt, params, params_len)) {
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INTERNAL_ERROR, 0,
+ "internal error (packet buf init)");
+ return 0;
+ }
+
+ while (PACKET_remaining(&pkt) > 0) {
+ if (!ossl_quic_wire_peek_transport_param(&pkt, &id))
+ goto malformed;
+
+ switch (id) {
+ case QUIC_TPARAM_ORIG_DCID:
+ if (got_orig_dcid) {
+ reason = TP_REASON_DUP("ORIG_DCID");
+ goto malformed;
+ }
+
+ if (ch->is_server) {
+ reason = TP_REASON_SERVER_ONLY("ORIG_DCID");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_cid(&pkt, NULL, &cid)) {
+ reason = TP_REASON_MALFORMED("ORIG_DCID");
+ goto malformed;
+ }
+
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* Must match our initial DCID. */
+ if (!ossl_quic_conn_id_eq(&ch->init_dcid, &cid)) {
+ reason = TP_REASON_EXPECTED_VALUE("ORIG_DCID");
+ goto malformed;
+ }
+#endif
+
+ got_orig_dcid = 1;
+ break;
+
+ case QUIC_TPARAM_RETRY_SCID:
+ if (ch->is_server) {
+ reason = TP_REASON_SERVER_ONLY("RETRY_SCID");
+ goto malformed;
+ }
+
+ if (got_retry_scid) {
+ reason = TP_REASON_DUP("RETRY_SCID");
+ goto malformed;
+ }
+
+ if (!ch->doing_retry) {
+ reason = TP_REASON_NOT_RETRY("RETRY_SCID");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_cid(&pkt, NULL, &cid)) {
+ reason = TP_REASON_MALFORMED("RETRY_SCID");
+ goto malformed;
+ }
+
+ /* Must match Retry packet SCID. */
+ if (!ossl_quic_conn_id_eq(&ch->retry_scid, &cid)) {
+ reason = TP_REASON_EXPECTED_VALUE("RETRY_SCID");
+ goto malformed;
+ }
+
+ got_retry_scid = 1;
+ break;
+
+ case QUIC_TPARAM_INITIAL_SCID:
+ if (got_initial_scid) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("INITIAL_SCID");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_cid(&pkt, NULL, &cid)) {
+ reason = TP_REASON_MALFORMED("INITIAL_SCID");
+ goto malformed;
+ }
+
+ if (!ossl_quic_conn_id_eq(&ch->init_scid, &cid)) {
+ reason = TP_REASON_EXPECTED_VALUE("INITIAL_SCID");
+ goto malformed;
+ }
+
+ got_initial_scid = 1;
+ break;
+
+ case QUIC_TPARAM_INITIAL_MAX_DATA:
+ if (got_initial_max_data) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("INITIAL_MAX_DATA");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
+ reason = TP_REASON_MALFORMED("INITIAL_MAX_DATA");
+ goto malformed;
+ }
+
+ ossl_quic_txfc_bump_cwm(&ch->conn_txfc, v);
+ got_initial_max_data = 1;
+ break;
+
+ case QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL:
+ if (got_initial_max_stream_data_bidi_local) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("INITIAL_MAX_STREAM_DATA_BIDI_LOCAL");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
+ reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAM_DATA_BIDI_LOCAL");
+ goto malformed;
+ }
+
+ /*
+ * This is correct; the BIDI_LOCAL TP governs streams created by
+ * the endpoint which sends the TP, i.e., our peer.
+ */
+ ch->rx_init_max_stream_data_bidi_remote = v;
+ got_initial_max_stream_data_bidi_local = 1;
+ break;
+
+ case QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE:
+ if (got_initial_max_stream_data_bidi_remote) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("INITIAL_MAX_STREAM_DATA_BIDI_REMOTE");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
+ reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAM_DATA_BIDI_REMOTE");
+ goto malformed;
+ }
+
+ /*
+ * This is correct; the BIDI_REMOTE TP governs streams created
+ * by the endpoint which receives the TP, i.e., us.
+ */
+ ch->rx_init_max_stream_data_bidi_local = v;
+
+ /* Apply to all existing streams. */
+ ossl_quic_stream_map_visit(&ch->qsm, txfc_bump_cwm_bidi, &v);
+ got_initial_max_stream_data_bidi_remote = 1;
+ break;
+
+ case QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_UNI:
+ if (got_initial_max_stream_data_uni) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("INITIAL_MAX_STREAM_DATA_UNI");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
+ reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAM_DATA_UNI");
+ goto malformed;
+ }
+
+ ch->rx_init_max_stream_data_uni = v;
+
+ /* Apply to all existing streams. */
+ ossl_quic_stream_map_visit(&ch->qsm, txfc_bump_cwm_uni, &v);
+ got_initial_max_stream_data_uni = 1;
+ break;
+
+ case QUIC_TPARAM_ACK_DELAY_EXP:
+ if (got_ack_delay_exp) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("ACK_DELAY_EXP");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
+ || v > QUIC_MAX_ACK_DELAY_EXP) {
+ reason = TP_REASON_MALFORMED("ACK_DELAY_EXP");
+ goto malformed;
+ }
+
+ ch->rx_ack_delay_exp = (unsigned char)v;
+ got_ack_delay_exp = 1;
+ break;
+
+ case QUIC_TPARAM_MAX_ACK_DELAY:
+ if (got_max_ack_delay) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("MAX_ACK_DELAY");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
+ || v >= (((uint64_t)1) << 14)) {
+ reason = TP_REASON_MALFORMED("MAX_ACK_DELAY");
+ goto malformed;
+ }
+
+ ch->rx_max_ack_delay = v;
+ ossl_ackm_set_rx_max_ack_delay(ch->ackm,
+ ossl_ms2time(ch->rx_max_ack_delay));
+
+ got_max_ack_delay = 1;
+ break;
+
+ case QUIC_TPARAM_INITIAL_MAX_STREAMS_BIDI:
+ if (got_initial_max_streams_bidi) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("INITIAL_MAX_STREAMS_BIDI");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
+ || v > (((uint64_t)1) << 60)) {
+ reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAMS_BIDI");
+ goto malformed;
+ }
+
+ assert(ch->max_local_streams_bidi == 0);
+ ch->max_local_streams_bidi = v;
+ got_initial_max_streams_bidi = 1;
+ break;
+
+ case QUIC_TPARAM_INITIAL_MAX_STREAMS_UNI:
+ if (got_initial_max_streams_uni) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("INITIAL_MAX_STREAMS_UNI");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
+ || v > (((uint64_t)1) << 60)) {
+ reason = TP_REASON_MALFORMED("INITIAL_MAX_STREAMS_UNI");
+ goto malformed;
+ }
+
+ assert(ch->max_local_streams_uni == 0);
+ ch->max_local_streams_uni = v;
+ got_initial_max_streams_uni = 1;
+ break;
+
+ case QUIC_TPARAM_MAX_IDLE_TIMEOUT:
+ if (got_max_idle_timeout) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("MAX_IDLE_TIMEOUT");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)) {
+ reason = TP_REASON_MALFORMED("MAX_IDLE_TIMEOUT");
+ goto malformed;
+ }
+
+ ch->max_idle_timeout_remote_req = v;
+
+ ch->max_idle_timeout = min_u64_ignore_0(ch->max_idle_timeout_local_req,
+ ch->max_idle_timeout_remote_req);
+
+
+ ch_update_idle(ch);
+ got_max_idle_timeout = 1;
+ rx_max_idle_timeout = v;
+ break;
+
+ case QUIC_TPARAM_MAX_UDP_PAYLOAD_SIZE:
+ if (got_max_udp_payload_size) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("MAX_UDP_PAYLOAD_SIZE");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
+ || v < QUIC_MIN_INITIAL_DGRAM_LEN) {
+ reason = TP_REASON_MALFORMED("MAX_UDP_PAYLOAD_SIZE");
+ goto malformed;
+ }
+
+ ch->rx_max_udp_payload_size = v;
+ got_max_udp_payload_size = 1;
+ break;
+
+ case QUIC_TPARAM_ACTIVE_CONN_ID_LIMIT:
+ if (got_active_conn_id_limit) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("ACTIVE_CONN_ID_LIMIT");
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_int(&pkt, &id, &v)
+ || v < QUIC_MIN_ACTIVE_CONN_ID_LIMIT) {
+ reason = TP_REASON_MALFORMED("ACTIVE_CONN_ID_LIMIT");
+ goto malformed;
+ }
+
+ ch->rx_active_conn_id_limit = v;
+ got_active_conn_id_limit = 1;
+ break;
+
+ case QUIC_TPARAM_STATELESS_RESET_TOKEN:
+ if (got_stateless_reset_token) {
+ reason = TP_REASON_DUP("STATELESS_RESET_TOKEN");
+ goto malformed;
+ }
+
+ /*
+ * RFC 9000 s. 18.2: This transport parameter MUST NOT be sent
+ * by a client but MAY be sent by a server.
+ */
+ if (ch->is_server) {
+ reason = TP_REASON_SERVER_ONLY("STATELESS_RESET_TOKEN");
+ goto malformed;
+ }
+
+ body = ossl_quic_wire_decode_transport_param_bytes(&pkt, &id, &len);
+ if (body == NULL || len != QUIC_STATELESS_RESET_TOKEN_LEN) {
+ reason = TP_REASON_MALFORMED("STATELESS_RESET_TOKEN");
+ goto malformed;
+ }
+ if (!ossl_quic_srtm_add(ch->srtm, ch, ch->cur_remote_seq_num,
+ (const QUIC_STATELESS_RESET_TOKEN *)body)) {
+ reason = TP_REASON_INTERNAL_ERROR("STATELESS_RESET_TOKEN");
+ goto malformed;
+ }
+
+ stateless_reset_token_p = body;
+ got_stateless_reset_token = 1;
+ break;
+
+ case QUIC_TPARAM_PREFERRED_ADDR:
+ /* TODO(QUIC FUTURE): Handle preferred address. */
+ if (got_preferred_addr) {
+ reason = TP_REASON_DUP("PREFERRED_ADDR");
+ goto malformed;
+ }
+
+ /*
+ * RFC 9000 s. 18.2: "A server that chooses a zero-length
+ * connection ID MUST NOT provide a preferred address.
+ * Similarly, a server MUST NOT include a zero-length connection
+ * ID in this transport parameter. A client MUST treat a
+ * violation of these requirements as a connection error of type
+ * TRANSPORT_PARAMETER_ERROR."
+ */
+ if (ch->is_server) {
+ reason = TP_REASON_SERVER_ONLY("PREFERRED_ADDR");
+ goto malformed;
+ }
+
+ if (ch->cur_remote_dcid.id_len == 0) {
+ reason = "PREFERRED_ADDR provided for zero-length CID";
+ goto malformed;
+ }
+
+ if (!ossl_quic_wire_decode_transport_param_preferred_addr(&pkt, &pfa)) {
+ reason = TP_REASON_MALFORMED("PREFERRED_ADDR");
+ goto malformed;
+ }
+
+ if (pfa.cid.id_len == 0) {
+ reason = "zero-length CID in PREFERRED_ADDR";
+ goto malformed;
+ }
+
+ got_preferred_addr = 1;
+ break;
+
+ case QUIC_TPARAM_DISABLE_ACTIVE_MIGRATION:
+ /* We do not currently handle migration, so nothing to do. */
+ if (got_disable_active_migration) {
+ /* must not appear more than once */
+ reason = TP_REASON_DUP("DISABLE_ACTIVE_MIGRATION");
+ goto malformed;
+ }
+
+ body = ossl_quic_wire_decode_transport_param_bytes(&pkt, &id, &len);
+ if (body == NULL || len > 0) {
+ reason = TP_REASON_MALFORMED("DISABLE_ACTIVE_MIGRATION");
+ goto malformed;
+ }
+
+ got_disable_active_migration = 1;
+ break;
+
+ default:
+ /*
+ * Skip over and ignore.
+ *
+ * RFC 9000 s. 7.4: We SHOULD treat duplicated transport parameters
+ * as a connection error, but we are not required to. Currently,
+ * handle this programmatically by checking for duplicates in the
+ * parameters that we recognise, as above, but don't bother
+ * maintaining a list of duplicates for anything we don't recognise.
+ */
+ body = ossl_quic_wire_decode_transport_param_bytes(&pkt, &id,
+ &len);
+ if (body == NULL)
+ goto malformed;
+
+ break;
+ }
+ }
+
+ if (!got_initial_scid) {
+ reason = TP_REASON_REQUIRED("INITIAL_SCID");
+ goto malformed;
+ }
+
+ if (!ch->is_server) {
+ if (!got_orig_dcid) {
+ reason = TP_REASON_REQUIRED("ORIG_DCID");
+ goto malformed;
+ }
+
+ if (ch->doing_retry && !got_retry_scid) {
+ reason = TP_REASON_REQUIRED("RETRY_SCID");
+ goto malformed;
+ }
+ }
+
+ ch->got_remote_transport_params = 1;
+
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(ch_get_qlog(ch), transport, parameters_set)
+ QLOG_STR("owner", "remote");
+
+ if (got_orig_dcid)
+ QLOG_CID("original_destination_connection_id",
+ &ch->init_dcid);
+ if (got_initial_scid)
+ QLOG_CID("original_source_connection_id",
+ &ch->init_dcid);
+ if (got_retry_scid)
+ QLOG_CID("retry_source_connection_id",
+ &ch->retry_scid);
+ if (got_initial_max_data)
+ QLOG_U64("initial_max_data",
+ ossl_quic_txfc_get_cwm(&ch->conn_txfc));
+ if (got_initial_max_stream_data_bidi_local)
+ QLOG_U64("initial_max_stream_data_bidi_local",
+ ch->rx_init_max_stream_data_bidi_local);
+ if (got_initial_max_stream_data_bidi_remote)
+ QLOG_U64("initial_max_stream_data_bidi_remote",
+ ch->rx_init_max_stream_data_bidi_remote);
+ if (got_initial_max_stream_data_uni)
+ QLOG_U64("initial_max_stream_data_uni",
+ ch->rx_init_max_stream_data_uni);
+ if (got_initial_max_streams_bidi)
+ QLOG_U64("initial_max_streams_bidi",
+ ch->max_local_streams_bidi);
+ if (got_initial_max_streams_uni)
+ QLOG_U64("initial_max_streams_uni",
+ ch->max_local_streams_uni);
+ if (got_ack_delay_exp)
+ QLOG_U64("ack_delay_exponent", ch->rx_ack_delay_exp);
+ if (got_max_ack_delay)
+ QLOG_U64("max_ack_delay", ch->rx_max_ack_delay);
+ if (got_max_udp_payload_size)
+ QLOG_U64("max_udp_payload_size", ch->rx_max_udp_payload_size);
+ if (got_max_idle_timeout)
+ QLOG_U64("max_idle_timeout", rx_max_idle_timeout);
+ if (got_active_conn_id_limit)
+ QLOG_U64("active_connection_id_limit", ch->rx_active_conn_id_limit);
+ if (got_stateless_reset_token)
+ QLOG_BIN("stateless_reset_token", stateless_reset_token_p,
+ QUIC_STATELESS_RESET_TOKEN_LEN);
+ if (got_preferred_addr) {
+ QLOG_BEGIN("preferred_addr")
+ QLOG_U64("port_v4", pfa.ipv4_port);
+ QLOG_U64("port_v6", pfa.ipv6_port);
+ QLOG_BIN("ip_v4", pfa.ipv4, sizeof(pfa.ipv4));
+ QLOG_BIN("ip_v6", pfa.ipv6, sizeof(pfa.ipv6));
+ QLOG_BIN("stateless_reset_token", pfa.stateless_reset.token,
+ sizeof(pfa.stateless_reset.token));
+ QLOG_CID("connection_id", &pfa.cid);
+ QLOG_END()
+ }
+ QLOG_BOOL("disable_active_migration", got_disable_active_migration);
+ QLOG_EVENT_END()
+#endif
+
+ if (got_initial_max_data || got_initial_max_stream_data_bidi_remote
+ || got_initial_max_streams_bidi || got_initial_max_streams_uni)
+ /*
+ * If FC credit was bumped, we may now be able to send. Update all
+ * streams.
+ */
+ ossl_quic_stream_map_visit(&ch->qsm, do_update, ch);
+
+ /* If we are a server, we now generate our own transport parameters. */
+ if (ch->is_server && !ch_generate_transport_params(ch)) {
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INTERNAL_ERROR, 0,
+ "internal error");
+ return 0;
+ }
+
+ return 1;
+
+malformed:
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_TRANSPORT_PARAMETER_ERROR,
+ 0, reason);
+ return 0;
+}
+
+/*
+ * Called when we want to generate transport parameters. This is called
+ * immediately at instantiation time for a client and after we receive the
+ * client's transport parameters for a server.
+ */
+static int ch_generate_transport_params(QUIC_CHANNEL *ch)
+{
+ int ok = 0;
+ BUF_MEM *buf_mem = NULL;
+ WPACKET wpkt;
+ int wpkt_valid = 0;
+ size_t buf_len = 0;
+ QUIC_CONN_ID *id_to_use = NULL;
+
+ /*
+ * We need to select which connection id to encode in the
+ * QUIC_TPARAM_ORIG_DCID transport parameter
+ * If we have an odcid, then this connection was established
+ * in response to a retry request, and we need to use the connection
+ * id sent in the first initial packet.
+ * If we don't have an odcid, then this connection was established
+ * without a retry and the init_dcid is the connection we should use
+ */
+ if (ch->odcid.id_len == 0)
+ id_to_use = &ch->init_dcid;
+ else
+ id_to_use = &ch->odcid;
+
+ if (ch->local_transport_params != NULL || ch->got_local_transport_params)
+ goto err;
+
+ if ((buf_mem = BUF_MEM_new()) == NULL)
+ goto err;
+
+ if (!WPACKET_init(&wpkt, buf_mem))
+ goto err;
+
+ wpkt_valid = 1;
+
+ if (ossl_quic_wire_encode_transport_param_bytes(&wpkt, QUIC_TPARAM_DISABLE_ACTIVE_MIGRATION,
+ NULL, 0) == NULL)
+ goto err;
+
+ if (ch->is_server) {
+ if (!ossl_quic_wire_encode_transport_param_cid(&wpkt, QUIC_TPARAM_ORIG_DCID,
+ id_to_use))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_cid(&wpkt, QUIC_TPARAM_INITIAL_SCID,
+ &ch->cur_local_cid))
+ goto err;
+ if (ch->odcid.id_len != 0)
+ if (!ossl_quic_wire_encode_transport_param_cid(&wpkt,
+ QUIC_TPARAM_RETRY_SCID,
+ &ch->init_dcid))
+ goto err;
+ } else {
+ if (!ossl_quic_wire_encode_transport_param_cid(&wpkt, QUIC_TPARAM_INITIAL_SCID,
+ &ch->init_scid))
+ goto err;
+ }
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_MAX_IDLE_TIMEOUT,
+ ch->max_idle_timeout_local_req))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_MAX_UDP_PAYLOAD_SIZE,
+ QUIC_MIN_INITIAL_DGRAM_LEN))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_ACTIVE_CONN_ID_LIMIT,
+ QUIC_MIN_ACTIVE_CONN_ID_LIMIT))
+ goto err;
+
+ if (ch->tx_max_ack_delay != QUIC_DEFAULT_MAX_ACK_DELAY
+ && !ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_MAX_ACK_DELAY,
+ ch->tx_max_ack_delay))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_DATA,
+ ossl_quic_rxfc_get_cwm(&ch->conn_rxfc)))
+ goto err;
+
+ /* Send the default CWM for a new RXFC. */
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_LOCAL,
+ ch->tx_init_max_stream_data_bidi_local))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_BIDI_REMOTE,
+ ch->tx_init_max_stream_data_bidi_remote))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAM_DATA_UNI,
+ ch->tx_init_max_stream_data_uni))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAMS_BIDI,
+ ossl_quic_rxfc_get_cwm(&ch->max_streams_bidi_rxfc)))
+ goto err;
+
+ if (!ossl_quic_wire_encode_transport_param_int(&wpkt, QUIC_TPARAM_INITIAL_MAX_STREAMS_UNI,
+ ossl_quic_rxfc_get_cwm(&ch->max_streams_uni_rxfc)))
+ goto err;
+
+ if (!WPACKET_finish(&wpkt))
+ goto err;
+
+ wpkt_valid = 0;
+
+ if (!WPACKET_get_total_written(&wpkt, &buf_len))
+ goto err;
+
+ ch->local_transport_params = (unsigned char *)buf_mem->data;
+ buf_mem->data = NULL;
+
+ if (!ossl_quic_tls_set_transport_params(ch->qtls, ch->local_transport_params,
+ buf_len))
+ goto err;
+
+#ifndef OPENSSL_NO_QLOG
+ QLOG_EVENT_BEGIN(ch_get_qlog(ch), transport, parameters_set)
+ QLOG_STR("owner", "local");
+ QLOG_BOOL("disable_active_migration", 1);
+ if (ch->is_server) {
+ QLOG_CID("original_destination_connection_id", &ch->init_dcid);
+ QLOG_CID("initial_source_connection_id", &ch->cur_local_cid);
+ } else {
+ QLOG_STR("initial_source_connection_id", "");
+ }
+ QLOG_U64("max_idle_timeout", ch->max_idle_timeout);
+ QLOG_U64("max_udp_payload_size", QUIC_MIN_INITIAL_DGRAM_LEN);
+ QLOG_U64("active_connection_id_limit", QUIC_MIN_ACTIVE_CONN_ID_LIMIT);
+ QLOG_U64("max_ack_delay", ch->tx_max_ack_delay);
+ QLOG_U64("initial_max_data", ossl_quic_rxfc_get_cwm(&ch->conn_rxfc));
+ QLOG_U64("initial_max_stream_data_bidi_local",
+ ch->tx_init_max_stream_data_bidi_local);
+ QLOG_U64("initial_max_stream_data_bidi_remote",
+ ch->tx_init_max_stream_data_bidi_remote);
+ QLOG_U64("initial_max_stream_data_uni",
+ ch->tx_init_max_stream_data_uni);
+ QLOG_U64("initial_max_streams_bidi",
+ ossl_quic_rxfc_get_cwm(&ch->max_streams_bidi_rxfc));
+ QLOG_U64("initial_max_streams_uni",
+ ossl_quic_rxfc_get_cwm(&ch->max_streams_uni_rxfc));
+ QLOG_EVENT_END()
+#endif
+
+ ch->got_local_transport_params = 1;
+
+ ok = 1;
+err:
+ if (wpkt_valid)
+ WPACKET_cleanup(&wpkt);
+ BUF_MEM_free(buf_mem);
+ return ok;
+}
+
+/*
+ * QUIC Channel: Ticker-Mutator
+ * ============================
+ */
+
+/*
+ * The central ticker function called by the reactor. This does everything, or
+ * at least everything network I/O related. Best effort - not allowed to fail
+ * "loudly".
+ */
+void ossl_quic_channel_subtick(QUIC_CHANNEL *ch, QUIC_TICK_RESULT *res,
+ uint32_t flags)
+{
+ OSSL_TIME now, deadline;
+ int channel_only = (flags & QUIC_REACTOR_TICK_FLAG_CHANNEL_ONLY) != 0;
+ int notify_other_threads = 0;
+
+ /*
+ * When we tick the QUIC connection, we do everything we need to do
+ * periodically. Network I/O handling will already have been performed
+ * as necessary by the QUIC port. Thus, in order, we:
+ *
+ * - handle any packets the DEMUX has queued up for us;
+ * - handle any timer events which are due to fire (ACKM, etc.);
+ * - generate any packets which need to be sent;
+ * - determine the time at which we should next be ticked.
+ */
+
+ /*
+ * If the connection has not yet started, or we are in the TERMINATED state,
+ * there is nothing to do.
+ */
+ if (ch->state == QUIC_CHANNEL_STATE_IDLE
+ || ossl_quic_channel_is_terminated(ch)) {
+ res->net_read_desired = 0;
+ res->net_write_desired = 0;
+ res->notify_other_threads = 0;
+ res->tick_deadline = ossl_time_infinite();
+ return;
+ }
+
+ /*
+ * If we are in the TERMINATING state, check if the terminating timer has
+ * expired.
+ */
+ if (ossl_quic_channel_is_terminating(ch)) {
+ now = get_time(ch);
+
+ if (ossl_time_compare(now, ch->terminate_deadline) >= 0) {
+ ch_on_terminating_timeout(ch);
+ res->net_read_desired = 0;
+ res->net_write_desired = 0;
+ res->notify_other_threads = 1;
+ res->tick_deadline = ossl_time_infinite();
+ return; /* abort normal processing, nothing to do */
+ }
+ }
+
+ if (!ch->port->engine->inhibit_tick) {
+ /* Handle RXKU timeouts. */
+ ch_rxku_tick(ch);
+
+ do {
+ /* Process queued incoming packets. */
+ ch->did_tls_tick = 0;
+ ch->have_new_rx_secret = 0;
+ ch_rx(ch, channel_only, &notify_other_threads);
+
+ /*
+ * Allow the handshake layer to check for any new incoming data and
+ * generate new outgoing data.
+ */
+ if (!ch->did_tls_tick)
+ ch_tick_tls(ch, channel_only, &notify_other_threads);
+
+ /*
+ * If the handshake layer gave us a new secret, we need to do RX
+ * again because packets that were not previously processable and
+ * were deferred might now be processable.
+ *
+ * TODO(QUIC FUTURE): Consider handling this in the yield_secret callback.
+ */
+ } while (ch->have_new_rx_secret);
+ }
+
+ /*
+ * Handle any timer events which are due to fire; namely, the loss
+ * detection deadline and the idle timeout.
+ *
+ * ACKM ACK generation deadline is polled by TXP, so we don't need to
+ * handle it here.
+ */
+ now = get_time(ch);
+ if (ossl_time_compare(now, ch->idle_deadline) >= 0) {
+ /*
+ * Idle timeout differs from normal protocol violation because we do
+ * not send a CONN_CLOSE frame; go straight to TERMINATED.
+ */
+ if (!ch->port->engine->inhibit_tick)
+ ch_on_idle_timeout(ch);
+
+ res->net_read_desired = 0;
+ res->net_write_desired = 0;
+ res->notify_other_threads = 1;
+ res->tick_deadline = ossl_time_infinite();
+ return;
+ }
+
+ if (!ch->port->engine->inhibit_tick) {
+ deadline = ossl_ackm_get_loss_detection_deadline(ch->ackm);
+ if (!ossl_time_is_zero(deadline)
+ && ossl_time_compare(now, deadline) >= 0)
+ ossl_ackm_on_timeout(ch->ackm);
+
+ /* If a ping is due, inform TXP. */
+ if (ossl_time_compare(now, ch->ping_deadline) >= 0) {
+ int pn_space = ossl_quic_enc_level_to_pn_space(ch->tx_enc_level);
+
+ ossl_quic_tx_packetiser_schedule_ack_eliciting(ch->txp, pn_space);
+
+ /*
+ * If we have no CC budget at this time we cannot process the above
+ * PING request immediately. In any case we have scheduled the
+ * request so bump the ping deadline. If we don't do this we will
+ * busy-loop endlessly as the above deadline comparison condition
+ * will still be met.
+ */
+ ch_update_ping_deadline(ch);
+ }
+
+ /* Queue any data to be sent for transmission. */
+ ch_tx(ch, &notify_other_threads);
+
+ /* Do stream GC. */
+ ossl_quic_stream_map_gc(&ch->qsm);
+ }
+
+ /* Determine the time at which we should next be ticked. */
+ res->tick_deadline = ch_determine_next_tick_deadline(ch);
+
+ /*
+ * Always process network input unless we are now terminated. Although we
+ * had not terminated at the beginning of this tick, network errors in
+ * ch_tx() may have caused us to transition to the Terminated state.
+ */
+ res->net_read_desired = !ossl_quic_channel_is_terminated(ch);
+
+ /* We want to write to the network if we have any data in our TX queue. */
+ res->net_write_desired
+ = (!ossl_quic_channel_is_terminated(ch)
+ && ossl_qtx_get_queue_len_datagrams(ch->qtx) > 0);
+
+ res->notify_other_threads = notify_other_threads;
+}
+
+static int ch_tick_tls(QUIC_CHANNEL *ch, int channel_only, int *notify_other_threads)
+{
+ uint64_t error_code;
+ const char *error_msg;
+ ERR_STATE *error_state = NULL;
+
+ if (channel_only)
+ return 1;
+
+ ch->did_tls_tick = 1;
+ ossl_quic_tls_tick(ch->qtls);
+
+ if (ossl_quic_tls_get_error(ch->qtls, &error_code, &error_msg,
+ &error_state)) {
+ ossl_quic_channel_raise_protocol_error_state(ch, error_code, 0,
+ error_msg, error_state);
+ if (notify_other_threads != NULL)
+ *notify_other_threads = 1;
+
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Check incoming forged packet limit and terminate connection if needed. */
+static void ch_rx_check_forged_pkt_limit(QUIC_CHANNEL *ch)
+{
+ uint32_t enc_level;
+ uint64_t limit = UINT64_MAX, l;
+
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level)
+ {
+ /*
+ * Different ELs can have different AEADs which can in turn impose
+ * different limits, so use the lowest value of any currently valid EL.
+ */
+ if ((ch->el_discarded & (1U << enc_level)) != 0)
+ continue;
+
+ if (enc_level > ch->rx_enc_level)
+ break;
+
+ l = ossl_qrx_get_max_forged_pkt_count(ch->qrx, enc_level);
+ if (l < limit)
+ limit = l;
+ }
+
+ if (ossl_qrx_get_cur_forged_pkt_count(ch->qrx) < limit)
+ return;
+
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_AEAD_LIMIT_REACHED, 0,
+ "forgery limit");
+}
+
+/* Process queued incoming packets and handle frames, if any. */
+static int ch_rx(QUIC_CHANNEL *ch, int channel_only, int *notify_other_threads)
+{
+ int handled_any = 0;
+ const int closing = ossl_quic_channel_is_closing(ch);
+
+ if (!ch->is_server && !ch->have_sent_any_pkt)
+ /*
+ * We have not sent anything yet, therefore there is no need to check
+ * for incoming data.
+ */
+ return 1;
+
+ for (;;) {
+ assert(ch->qrx_pkt == NULL);
+
+ if (!ossl_qrx_read_pkt(ch->qrx, &ch->qrx_pkt))
+ break;
+
+ /* Track the amount of data received while in the closing state */
+ if (closing)
+ ossl_quic_tx_packetiser_record_received_closing_bytes(
+ ch->txp, ch->qrx_pkt->hdr->len);
+
+ if (!handled_any) {
+ ch_update_idle(ch);
+ ch_update_ping_deadline(ch);
+ }
+
+ ch_rx_handle_packet(ch, channel_only); /* best effort */
+
+ /*
+ * Regardless of the outcome of frame handling, unref the packet.
+ * This will free the packet unless something added another
+ * reference to it during frame processing.
+ */
+ ossl_qrx_pkt_release(ch->qrx_pkt);
+ ch->qrx_pkt = NULL;
+
+ ch->have_sent_ack_eliciting_since_rx = 0;
+ handled_any = 1;
+ }
+
+ ch_rx_check_forged_pkt_limit(ch);
+
+ if (handled_any && notify_other_threads != NULL)
+ *notify_other_threads = 1;
+
+ /*
+ * When in TERMINATING - CLOSING, generate a CONN_CLOSE frame whenever we
+ * process one or more incoming packets.
+ */
+ if (handled_any && closing)
+ ch->conn_close_queued = 1;
+
+ return 1;
+}
+
+static int bio_addr_eq(const BIO_ADDR *a, const BIO_ADDR *b)
+{
+ if (BIO_ADDR_family(a) != BIO_ADDR_family(b))
+ return 0;
+
+ switch (BIO_ADDR_family(a)) {
+ case AF_INET:
+ return !memcmp(&a->s_in.sin_addr,
+ &b->s_in.sin_addr,
+ sizeof(a->s_in.sin_addr))
+ && a->s_in.sin_port == b->s_in.sin_port;
+#if OPENSSL_USE_IPV6
+ case AF_INET6:
+ return !memcmp(&a->s_in6.sin6_addr,
+ &b->s_in6.sin6_addr,
+ sizeof(a->s_in6.sin6_addr))
+ && a->s_in6.sin6_port == b->s_in6.sin6_port;
+#endif
+ default:
+ return 0; /* not supported */
+ }
+
+ return 1;
+}
+
+/* Handles the packet currently in ch->qrx_pkt->hdr. */
+static void ch_rx_handle_packet(QUIC_CHANNEL *ch, int channel_only)
+{
+ uint32_t enc_level;
+ int old_have_processed_any_pkt = ch->have_processed_any_pkt;
+ OSSL_QTX_IOVEC iovec;
+ PACKET vpkt;
+ unsigned long supported_ver;
+
+ assert(ch->qrx_pkt != NULL);
+
+ /*
+ * RFC 9000 s. 10.2.1 Closing Connection State:
+ * An endpoint that is closing is not required to process any
+ * received frame.
+ */
+ if (!ossl_quic_channel_is_active(ch))
+ return;
+
+ if (ossl_quic_pkt_type_is_encrypted(ch->qrx_pkt->hdr->type)) {
+ if (!ch->have_received_enc_pkt) {
+ ch->cur_remote_dcid = ch->init_scid = ch->qrx_pkt->hdr->src_conn_id;
+ ch->have_received_enc_pkt = 1;
+
+ /*
+ * We change to using the SCID in the first Initial packet as the
+ * DCID.
+ */
+ ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, &ch->init_scid);
+ }
+
+ enc_level = ossl_quic_pkt_type_to_enc_level(ch->qrx_pkt->hdr->type);
+ if ((ch->el_discarded & (1U << enc_level)) != 0)
+ /* Do not process packets from ELs we have already discarded. */
+ return;
+ }
+
+ /*
+ * RFC 9000 s. 9.6: "If a client receives packets from a new server address
+ * when the client has not initiated a migration to that address, the client
+ * SHOULD discard these packets."
+ *
+ * We need to be a bit careful here as due to the BIO abstraction layer an
+ * application is liable to be weird and lie to us about peer addresses.
+ * Only apply this check if we actually are using a real AF_INET or AF_INET6
+ * address.
+ */
+ if (!ch->is_server
+ && ch->qrx_pkt->peer != NULL
+ && (
+ BIO_ADDR_family(&ch->cur_peer_addr) == AF_INET
+#if OPENSSL_USE_IPV6
+ || BIO_ADDR_family(&ch->cur_peer_addr) == AF_INET6
+#endif
+ )
+ && !bio_addr_eq(ch->qrx_pkt->peer, &ch->cur_peer_addr))
+ return;
+
+ if (!ch->is_server
+ && ch->have_received_enc_pkt
+ && ossl_quic_pkt_type_has_scid(ch->qrx_pkt->hdr->type)) {
+ /*
+ * RFC 9000 s. 7.2: "Once a client has received a valid Initial packet
+ * from the server, it MUST discard any subsequent packet it receives on
+ * that connection with a different SCID."
+ */
+ if (!ossl_quic_conn_id_eq(&ch->qrx_pkt->hdr->src_conn_id,
+ &ch->init_scid))
+ return;
+ }
+
+ if (ossl_quic_pkt_type_has_version(ch->qrx_pkt->hdr->type)
+ && ch->qrx_pkt->hdr->version != QUIC_VERSION_1)
+ /*
+ * RFC 9000 s. 5.2.1: If a client receives a packet that uses a
+ * different version than it initially selected, it MUST discard the
+ * packet. We only ever use v1, so require it.
+ */
+ return;
+
+ if (ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_VERSION_NEG) {
+
+ /*
+ * Sanity check. Version negotiation packet MUST have a version
+ * value of 0 according to the RFC. We must discard such packets
+ */
+ if (ch->qrx_pkt->hdr->version != 0)
+ return;
+
+ /*
+ * RFC 9000 s. 6.2: If a client receives a version negotiation
+ * packet, we need to do the following:
+ * a) If the negotiation packet lists the version we initially sent
+ * then we must abandon this connection attempt
+ * b) We have to select a version from the list provided in the
+ * version negotiation packet, and retry the connection attempt
+ * in much the same way that ch_retry does, but we can reuse the
+ * connection id values
+ */
+
+ if (old_have_processed_any_pkt == 1) {
+ /*
+ * We've gotten previous packets, need to discard this.
+ */
+ return;
+ }
+
+ /*
+ * Indicate that we have processed a packet, as any subsequently
+ * received version negotiation packet must be discarded above
+ */
+ ch->have_processed_any_pkt = 1;
+
+ /*
+ * Following the header, version negotiation packets
+ * contain an array of 32 bit integers representing
+ * the supported versions that the server honors
+ * this array, bounded by the hdr->len field
+ * needs to be traversed so that we can find a matching
+ * version
+ */
+ if (!PACKET_buf_init(&vpkt, ch->qrx_pkt->hdr->data,
+ ch->qrx_pkt->hdr->len))
+ return;
+
+ while (PACKET_remaining(&vpkt) > 0) {
+ /*
+ * We only support quic version 1 at the moment, so
+ * look to see if thats offered
+ */
+ if (!PACKET_get_net_4(&vpkt, &supported_ver))
+ return;
+
+ if (supported_ver == QUIC_VERSION_1) {
+ /*
+ * If the server supports version 1, set it as
+ * the packetisers version
+ */
+ ossl_quic_tx_packetiser_set_protocol_version(ch->txp, QUIC_VERSION_1);
+
+ /*
+ * And then request a restart of the QUIC connection
+ */
+ if (!ch_restart(ch))
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ 0, "handling ver negotiation packet");
+ return;
+ }
+ }
+
+ /*
+ * If we get here, then the server doesn't support a version of the
+ * protocol that we can handle, abandon the connection
+ */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_CONNECTION_REFUSED,
+ 0, "unsupported protocol version");
+ return;
+ }
+
+ ch->have_processed_any_pkt = 1;
+
+ /*
+ * RFC 9000 s. 17.2: "An endpoint MUST treat receipt of a packet that has a
+ * non-zero value for [the reserved bits] after removing both packet and
+ * header protection as a connection error of type PROTOCOL_VIOLATION."
+ */
+ if (ossl_quic_pkt_type_is_encrypted(ch->qrx_pkt->hdr->type)
+ && ch->qrx_pkt->hdr->reserved != 0) {
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ 0, "packet header reserved bits");
+ return;
+ }
+
+ iovec.buf = ch->qrx_pkt->hdr->data;
+ iovec.buf_len = ch->qrx_pkt->hdr->len;
+ ossl_qlog_event_transport_packet_received(ch_get_qlog(ch), ch->qrx_pkt->hdr,
+ ch->qrx_pkt->pn, &iovec, 1,
+ ch->qrx_pkt->datagram_id);
+
+ /* Handle incoming packet. */
+ switch (ch->qrx_pkt->hdr->type) {
+ case QUIC_PKT_TYPE_RETRY:
+ if (ch->doing_retry || ch->is_server)
+ /*
+ * It is not allowed to ask a client to do a retry more than
+ * once. Clients may not send retries.
+ */
+ return;
+
+ /*
+ * RFC 9000 s 17.2.5.2: After the client has received and processed an
+ * Initial or Retry packet from the server, it MUST discard any
+ * subsequent Retry packets that it receives.
+ */
+ if (ch->have_received_enc_pkt)
+ return;
+
+ if (ch->qrx_pkt->hdr->len <= QUIC_RETRY_INTEGRITY_TAG_LEN)
+ /* Packets with zero-length Retry Tokens are invalid. */
+ return;
+
+ /*
+ * TODO(QUIC FUTURE): Theoretically this should probably be in the QRX.
+ * However because validation is dependent on context (namely the
+ * client's initial DCID) we can't do this cleanly. In the future we
+ * should probably add a callback to the QRX to let it call us (via
+ * the DEMUX) and ask us about the correct original DCID, rather
+ * than allow the QRX to emit a potentially malformed packet to the
+ * upper layers. However, special casing this will do for now.
+ */
+ if (!ossl_quic_validate_retry_integrity_tag(ch->port->engine->libctx,
+ ch->port->engine->propq,
+ ch->qrx_pkt->hdr,
+ &ch->init_dcid))
+ /* Malformed retry packet, ignore. */
+ return;
+
+ if (!ch_retry(ch, ch->qrx_pkt->hdr->data,
+ ch->qrx_pkt->hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN,
+ &ch->qrx_pkt->hdr->src_conn_id, old_have_processed_any_pkt))
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INTERNAL_ERROR,
+ 0, "handling retry packet");
+ break;
+
+ case QUIC_PKT_TYPE_0RTT:
+ if (!ch->is_server)
+ /* Clients should never receive 0-RTT packets. */
+ return;
+
+ /*
+ * TODO(QUIC 0RTT): Implement 0-RTT on the server side. We currently
+ * do not need to implement this as a client can only do 0-RTT if we
+ * have given it permission to in a previous session.
+ */
+ break;
+
+ case QUIC_PKT_TYPE_INITIAL:
+ case QUIC_PKT_TYPE_HANDSHAKE:
+ case QUIC_PKT_TYPE_1RTT:
+ if (ch->is_server && ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_HANDSHAKE)
+ /*
+ * We automatically drop INITIAL EL keys when first successfully
+ * decrypting a HANDSHAKE packet, as per the RFC.
+ */
+ ch_discard_el(ch, QUIC_ENC_LEVEL_INITIAL);
+
+ if (ch->rxku_in_progress
+ && ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_1RTT
+ && ch->qrx_pkt->pn >= ch->rxku_trigger_pn
+ && ch->qrx_pkt->key_epoch < ossl_qrx_get_key_epoch(ch->qrx)) {
+ /*
+ * RFC 9001 s. 6.4: Packets with higher packet numbers MUST be
+ * protected with either the same or newer packet protection keys
+ * than packets with lower packet numbers. An endpoint that
+ * successfully removes protection with old keys when newer keys
+ * were used for packets with lower packet numbers MUST treat this
+ * as a connection error of type KEY_UPDATE_ERROR.
+ */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_KEY_UPDATE_ERROR,
+ 0, "new packet with old keys");
+ break;
+ }
+
+ if (!ch->is_server
+ && ch->qrx_pkt->hdr->type == QUIC_PKT_TYPE_INITIAL
+ && ch->qrx_pkt->hdr->token_len > 0) {
+ /*
+ * RFC 9000 s. 17.2.2: Clients that receive an Initial packet with a
+ * non-zero Token Length field MUST either discard the packet or
+ * generate a connection error of type PROTOCOL_VIOLATION.
+ *
+ * TODO(QUIC FUTURE): consider the implications of RFC 9000 s. 10.2.3
+ * Immediate Close during the Handshake:
+ * However, at the cost of reducing feedback about
+ * errors for legitimate peers, some forms of denial of
+ * service can be made more difficult for an attacker
+ * if endpoints discard illegal packets rather than
+ * terminating a connection with CONNECTION_CLOSE. For
+ * this reason, endpoints MAY discard packets rather
+ * than immediately close if errors are detected in
+ * packets that lack authentication.
+ * I.e. should we drop this packet instead of closing the connection?
+ */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ 0, "client received initial token");
+ break;
+ }
+
+ /* This packet contains frames, pass to the RXDP. */
+ ossl_quic_handle_frames(ch, ch->qrx_pkt); /* best effort */
+
+ if (ch->did_crypto_frame)
+ ch_tick_tls(ch, channel_only, NULL);
+
+ break;
+
+ case QUIC_PKT_TYPE_VERSION_NEG:
+ /*
+ * "A client MUST discard any Version Negotiation packet if it has
+ * received and successfully processed any other packet."
+ */
+ if (!old_have_processed_any_pkt)
+ ch_rx_handle_version_neg(ch, ch->qrx_pkt);
+
+ break;
+
+ default:
+ assert(0);
+ break;
+ }
+
+}
+
+static void ch_rx_handle_version_neg(QUIC_CHANNEL *ch, OSSL_QRX_PKT *pkt)
+{
+ /*
+ * We do not support version negotiation at this time. As per RFC 9000 s.
+ * 6.2., we MUST abandon the connection attempt if we receive a Version
+ * Negotiation packet, unless we have already successfully processed another
+ * incoming packet, or the packet lists the QUIC version we want to use.
+ */
+ PACKET vpkt;
+ unsigned long v;
+
+ if (!PACKET_buf_init(&vpkt, pkt->hdr->data, pkt->hdr->len))
+ return;
+
+ while (PACKET_remaining(&vpkt) > 0) {
+ if (!PACKET_get_net_4(&vpkt, &v))
+ break;
+
+ if ((uint32_t)v == QUIC_VERSION_1)
+ return;
+ }
+
+ /* No match, this is a failure case. */
+ ch_raise_version_neg_failure(ch);
+}
+
+static void ch_raise_version_neg_failure(QUIC_CHANNEL *ch)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
+
+ tcause.error_code = OSSL_QUIC_ERR_CONNECTION_REFUSED;
+ tcause.reason = "version negotiation failure";
+ tcause.reason_len = strlen(tcause.reason);
+
+ /*
+ * Skip TERMINATING state; this is not considered a protocol error and we do
+ * not send CONNECTION_CLOSE.
+ */
+ ch_start_terminating(ch, &tcause, 1);
+}
+
+/* Try to generate packets and if possible, flush them to the network. */
+static int ch_tx(QUIC_CHANNEL *ch, int *notify_other_threads)
+{
+ QUIC_TXP_STATUS status;
+ int res;
+
+ /*
+ * RFC 9000 s. 10.2.2: Draining Connection State:
+ * While otherwise identical to the closing state, an endpoint
+ * in the draining state MUST NOT send any packets.
+ * and:
+ * An endpoint MUST NOT send further packets.
+ */
+ if (ossl_quic_channel_is_draining(ch))
+ return 0;
+
+ if (ossl_quic_channel_is_closing(ch)) {
+ /*
+ * While closing, only send CONN_CLOSE if we've received more traffic
+ * from the peer. Once we tell the TXP to generate CONN_CLOSE, all
+ * future calls to it generate CONN_CLOSE frames, so otherwise we would
+ * just constantly generate CONN_CLOSE frames.
+ *
+ * Confirming to RFC 9000 s. 10.2.1 Closing Connection State:
+ * An endpoint SHOULD limit the rate at which it generates
+ * packets in the closing state.
+ */
+ if (!ch->conn_close_queued)
+ return 0;
+
+ ch->conn_close_queued = 0;
+ }
+
+ /* Do TXKU if we need to. */
+ ch_maybe_trigger_spontaneous_txku(ch);
+
+ ch->rxku_pending_confirm_done = 0;
+
+ /* Loop until we stop generating packets to send */
+ do {
+ /*
+ * Send packet, if we need to. Best effort. The TXP consults the CC and
+ * applies any limitations imposed by it, so we don't need to do it here.
+ *
+ * Best effort. In particular if TXP fails for some reason we should
+ * still flush any queued packets which we already generated.
+ */
+ res = ossl_quic_tx_packetiser_generate(ch->txp, &status);
+ if (status.sent_pkt > 0) {
+ ch->have_sent_any_pkt = 1; /* Packet(s) were sent */
+ ch->port->have_sent_any_pkt = 1;
+
+ /*
+ * RFC 9000 s. 10.1. 'An endpoint also restarts its idle timer when
+ * sending an ack-eliciting packet if no other ack-eliciting packets
+ * have been sent since last receiving and processing a packet.'
+ */
+ if (status.sent_ack_eliciting
+ && !ch->have_sent_ack_eliciting_since_rx) {
+ ch_update_idle(ch);
+ ch->have_sent_ack_eliciting_since_rx = 1;
+ }
+
+ if (!ch->is_server && status.sent_handshake)
+ /*
+ * RFC 9001 s. 4.9.1: A client MUST discard Initial keys when it
+ * first sends a Handshake packet.
+ */
+ ch_discard_el(ch, QUIC_ENC_LEVEL_INITIAL);
+
+ if (ch->rxku_pending_confirm_done)
+ ch->rxku_pending_confirm = 0;
+
+ ch_update_ping_deadline(ch);
+ }
+
+ if (!res) {
+ /*
+ * One case where TXP can fail is if we reach a TX PN of 2**62 - 1.
+ * As per RFC 9000 s. 12.3, if this happens we MUST close the
+ * connection without sending a CONNECTION_CLOSE frame. This is
+ * actually handled as an emergent consequence of our design, as the
+ * TX packetiser will never transmit another packet when the TX PN
+ * reaches the limit.
+ *
+ * Calling the below function terminates the connection; its attempt
+ * to schedule a CONNECTION_CLOSE frame will not actually cause a
+ * packet to be transmitted for this reason.
+ */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INTERNAL_ERROR,
+ 0,
+ "internal error (txp generate)");
+ break;
+ }
+ } while (status.sent_pkt > 0);
+
+ /* Flush packets to network. */
+ switch (ossl_qtx_flush_net(ch->qtx)) {
+ case QTX_FLUSH_NET_RES_OK:
+ case QTX_FLUSH_NET_RES_TRANSIENT_FAIL:
+ /* Best effort, done for now. */
+ break;
+
+ case QTX_FLUSH_NET_RES_PERMANENT_FAIL:
+ default:
+ /* Permanent underlying network BIO, start terminating. */
+ ossl_quic_port_raise_net_error(ch->port, ch);
+ break;
+ }
+
+ /*
+ * If we have datagrams we have yet to successfully transmit, we need to
+ * notify other threads so that they can switch to polling on POLLOUT as
+ * well as POLLIN.
+ */
+ if (ossl_qtx_get_queue_len_datagrams(ch->qtx) > 0)
+ *notify_other_threads = 1;
+
+ return 1;
+}
+
+/* Determine next tick deadline. */
+static OSSL_TIME ch_determine_next_tick_deadline(QUIC_CHANNEL *ch)
+{
+ OSSL_TIME deadline;
+ int i;
+
+ if (ossl_quic_channel_is_terminated(ch))
+ return ossl_time_infinite();
+
+ deadline = ossl_ackm_get_loss_detection_deadline(ch->ackm);
+ if (ossl_time_is_zero(deadline))
+ deadline = ossl_time_infinite();
+
+ /*
+ * Check the ack deadline for all enc_levels that are actually provisioned.
+ * ACKs aren't restricted by CC.
+ */
+ for (i = 0; i < QUIC_ENC_LEVEL_NUM; i++) {
+ if (ossl_qtx_is_enc_level_provisioned(ch->qtx, i)) {
+ deadline = ossl_time_min(deadline,
+ ossl_ackm_get_ack_deadline(ch->ackm,
+ ossl_quic_enc_level_to_pn_space(i)));
+ }
+ }
+
+ /*
+ * When do we need to send an ACK-eliciting packet to reset the idle
+ * deadline timer for the peer?
+ */
+ if (!ossl_time_is_infinite(ch->ping_deadline))
+ deadline = ossl_time_min(deadline, ch->ping_deadline);
+
+ /* Apply TXP wakeup deadline. */
+ deadline = ossl_time_min(deadline,
+ ossl_quic_tx_packetiser_get_deadline(ch->txp));
+
+ /* Is the terminating timer armed? */
+ if (ossl_quic_channel_is_terminating(ch))
+ deadline = ossl_time_min(deadline,
+ ch->terminate_deadline);
+ else if (!ossl_time_is_infinite(ch->idle_deadline))
+ deadline = ossl_time_min(deadline,
+ ch->idle_deadline);
+
+ /* When does the RXKU process complete? */
+ if (ch->rxku_in_progress)
+ deadline = ossl_time_min(deadline, ch->rxku_update_end_deadline);
+
+ return deadline;
+}
+
+/*
+ * QUIC Channel: Lifecycle Events
+ * ==============================
+ */
+
+/*
+ * Record a state transition. This is not necessarily a change to ch->state but
+ * also includes the handshake becoming complete or confirmed, etc.
+ */
+static void ch_record_state_transition(QUIC_CHANNEL *ch, uint32_t new_state)
+{
+ uint32_t old_state = ch->state;
+
+ ch->state = new_state;
+
+ ossl_qlog_event_connectivity_connection_state_updated(ch_get_qlog(ch),
+ old_state,
+ new_state,
+ ch->handshake_complete,
+ ch->handshake_confirmed);
+}
+
+static void free_peer_token(const unsigned char *token,
+ size_t token_len, void *arg)
+{
+ ossl_quic_free_peer_token((QUIC_TOKEN *)arg);
+}
+
+int ossl_quic_channel_start(QUIC_CHANNEL *ch)
+{
+ QUIC_TOKEN *token;
+
+ if (ch->is_server)
+ /*
+ * This is not used by the server. The server moves to active
+ * automatically on receiving an incoming connection.
+ */
+ return 0;
+
+ if (ch->state != QUIC_CHANNEL_STATE_IDLE)
+ /* Calls to connect are idempotent */
+ return 1;
+
+ /* Inform QTX of peer address. */
+ if (!ossl_quic_tx_packetiser_set_peer(ch->txp, &ch->cur_peer_addr))
+ return 0;
+
+ /*
+ * Look to see if we have a token, and if so, set it on the packetiser
+ */
+ if (!ch->is_server
+ && ossl_quic_get_peer_token(ch->port->channel_ctx,
+ &ch->cur_peer_addr,
+ &token)
+ && !ossl_quic_tx_packetiser_set_initial_token(ch->txp, token->token,
+ token->token_len,
+ free_peer_token,
+ token))
+ free_peer_token(NULL, 0, token);
+
+ /* Plug in secrets for the Initial EL. */
+ if (!ossl_quic_provide_initial_secret(ch->port->engine->libctx,
+ ch->port->engine->propq,
+ &ch->init_dcid,
+ ch->is_server,
+ ch->qrx, ch->qtx))
+ return 0;
+
+ /*
+ * Determine the QUIC Transport Parameters and serialize the transport
+ * parameters block. (For servers, we do this later as we must defer
+ * generation until we have received the client's transport parameters.)
+ */
+ if (!ch->is_server && !ch->got_local_transport_params
+ && !ch_generate_transport_params(ch))
+ return 0;
+
+ /* Change state. */
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_ACTIVE);
+ ch->doing_proactive_ver_neg = 0; /* not currently supported */
+
+ ossl_qlog_event_connectivity_connection_started(ch_get_qlog(ch),
+ &ch->init_dcid);
+
+ /* Handshake layer: start (e.g. send CH). */
+ if (!ch_tick_tls(ch, /*channel_only=*/0, NULL))
+ return 0;
+
+ ossl_quic_reactor_tick(ossl_quic_port_get0_reactor(ch->port), 0); /* best effort */
+ return 1;
+}
+
+static void free_token(const unsigned char *token, size_t token_len, void *arg)
+{
+ OPENSSL_free((char *)token);
+}
+
+/* Start a locally initiated connection shutdown. */
+void ossl_quic_channel_local_close(QUIC_CHANNEL *ch, uint64_t app_error_code,
+ const char *app_reason)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
+
+ if (ossl_quic_channel_is_term_any(ch))
+ return;
+
+ tcause.app = 1;
+ tcause.error_code = app_error_code;
+ tcause.reason = app_reason;
+ tcause.reason_len = app_reason != NULL ? strlen(app_reason) : 0;
+ ch_start_terminating(ch, &tcause, 0);
+}
+
+/**
+ * ch_restart - Restarts the QUIC channel by simulating loss of the initial
+ * packet. This forces the packet to be regenerated with the updated protocol
+ * version number.
+ *
+ * @ch: Pointer to the QUIC_CHANNEL structure.
+ *
+ * Returns 1 on success, 0 on failure.
+ */
+static int ch_restart(QUIC_CHANNEL *ch)
+{
+ /*
+ * Just pretend we lost our initial packet, so it gets
+ * regenerated, with our updated protocol version number
+ */
+ return ossl_ackm_mark_packet_pseudo_lost(ch->ackm, QUIC_PN_SPACE_INITIAL,
+ /* PN= */ 0);
+}
+
+/* Called when a server asks us to do a retry. */
+static int ch_retry(QUIC_CHANNEL *ch,
+ const unsigned char *retry_token,
+ size_t retry_token_len,
+ const QUIC_CONN_ID *retry_scid,
+ int drop_later_pn)
+{
+ void *buf;
+ QUIC_PN pn = 0;
+
+ /*
+ * RFC 9000 s. 17.2.5.1: "A client MUST discard a Retry packet that contains
+ * a SCID field that is identical to the DCID field of its initial packet."
+ */
+ if (ossl_quic_conn_id_eq(&ch->init_dcid, retry_scid))
+ return 1;
+
+ /* We change to using the SCID in the Retry packet as the DCID. */
+ if (!ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, retry_scid))
+ return 0;
+
+ /*
+ * Now we retry. We will release the Retry packet immediately, so copy
+ * the token.
+ */
+ if ((buf = OPENSSL_memdup(retry_token, retry_token_len)) == NULL)
+ return 0;
+
+ if (!ossl_quic_tx_packetiser_set_initial_token(ch->txp, buf,
+ retry_token_len,
+ free_token, NULL)) {
+ /*
+ * This may fail if the token we receive is too big for us to ever be
+ * able to transmit in an outgoing Initial packet.
+ */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INVALID_TOKEN, 0,
+ "received oversize token");
+ OPENSSL_free(buf);
+ return 0;
+ }
+
+ ch->retry_scid = *retry_scid;
+ ch->doing_retry = 1;
+
+ /*
+ * If a retry isn't our first response, we need to drop packet number
+ * one instead (i.e. the case where we did version negotiation first
+ */
+ if (drop_later_pn == 1)
+ pn = 1;
+
+ /*
+ * We need to stimulate the Initial EL to generate the first CRYPTO frame
+ * again. We can do this most cleanly by simply forcing the ACKM to consider
+ * the first Initial packet as lost, which it effectively was as the server
+ * hasn't processed it. This also maintains the desired behaviour with e.g.
+ * PNs not resetting and so on.
+ *
+ * The PN we used initially is always zero, because QUIC does not allow
+ * repeated retries.
+ */
+ if (!ossl_ackm_mark_packet_pseudo_lost(ch->ackm, QUIC_PN_SPACE_INITIAL,
+ pn))
+ return 0;
+
+ /*
+ * Plug in new secrets for the Initial EL. This is the only time we change
+ * the secrets for an EL after we already provisioned it.
+ */
+ if (!ossl_quic_provide_initial_secret(ch->port->engine->libctx,
+ ch->port->engine->propq,
+ &ch->retry_scid,
+ /*is_server=*/0,
+ ch->qrx, ch->qtx))
+ return 0;
+
+ return 1;
+}
+
+/* Called when an EL is to be discarded. */
+static int ch_discard_el(QUIC_CHANNEL *ch,
+ uint32_t enc_level)
+{
+ if (!ossl_assert(enc_level < QUIC_ENC_LEVEL_1RTT))
+ return 0;
+
+ if ((ch->el_discarded & (1U << enc_level)) != 0)
+ /* Already done. */
+ return 1;
+
+ /* Best effort for all of these. */
+ ossl_quic_tx_packetiser_discard_enc_level(ch->txp, enc_level);
+ ossl_qrx_discard_enc_level(ch->qrx, enc_level);
+ ossl_qtx_discard_enc_level(ch->qtx, enc_level);
+
+ if (enc_level != QUIC_ENC_LEVEL_0RTT) {
+ uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+
+ ossl_ackm_on_pkt_space_discarded(ch->ackm, pn_space);
+
+ /* We should still have crypto streams at this point. */
+ if (!ossl_assert(ch->crypto_send[pn_space] != NULL)
+ || !ossl_assert(ch->crypto_recv[pn_space] != NULL))
+ return 0;
+
+ /* Get rid of the crypto stream state for the EL. */
+ ossl_quic_sstream_free(ch->crypto_send[pn_space]);
+ ch->crypto_send[pn_space] = NULL;
+
+ ossl_quic_rstream_free(ch->crypto_recv[pn_space]);
+ ch->crypto_recv[pn_space] = NULL;
+ }
+
+ ch->el_discarded |= (1U << enc_level);
+ return 1;
+}
+
+/* Intended to be called by the RXDP. */
+int ossl_quic_channel_on_handshake_confirmed(QUIC_CHANNEL *ch)
+{
+ if (ch->handshake_confirmed)
+ return 1;
+
+ if (!ch->handshake_complete) {
+ /*
+ * Does not make sense for handshake to be confirmed before it is
+ * completed.
+ */
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE,
+ "handshake cannot be confirmed "
+ "before it is completed");
+ return 0;
+ }
+
+ ch_discard_el(ch, QUIC_ENC_LEVEL_HANDSHAKE);
+ ch->handshake_confirmed = 1;
+ ch_record_state_transition(ch, ch->state);
+ ossl_ackm_on_handshake_confirmed(ch->ackm);
+ return 1;
+}
+
+/*
+ * Master function used when we want to start tearing down a connection:
+ *
+ * - If the connection is still IDLE we can go straight to TERMINATED;
+ *
+ * - If we are already TERMINATED this is a no-op.
+ *
+ * - If we are TERMINATING - CLOSING and we have now got a CONNECTION_CLOSE
+ * from the peer (tcause->remote == 1), we move to TERMINATING - DRAINING.
+ *
+ * - If we are TERMINATING - DRAINING, we remain here until the terminating
+ * timer expires.
+ *
+ * - Otherwise, we are in ACTIVE and move to TERMINATING - CLOSING.
+ * if we caused the termination (e.g. we have sent a CONNECTION_CLOSE). Note
+ * that we are considered to have caused a termination if we sent the first
+ * CONNECTION_CLOSE frame, even if it is caused by a peer protocol
+ * violation. If the peer sent the first CONNECTION_CLOSE frame, we move to
+ * TERMINATING - DRAINING.
+ *
+ * We record the termination cause structure passed on the first call only.
+ * Any successive calls have their termination cause data discarded;
+ * once we start sending a CONNECTION_CLOSE frame, we don't change the details
+ * in it.
+ *
+ * This conforms to RFC 9000 s. 10.2.1: Closing Connection State:
+ * To minimize the state that an endpoint maintains for a closing
+ * connection, endpoints MAY send the exact same packet in response
+ * to any received packet.
+ *
+ * We don't drop any connection state (specifically packet protection keys)
+ * even though we are permitted to. This conforms to RFC 9000 s. 10.2.1:
+ * Closing Connection State:
+ * An endpoint MAY retain packet protection keys for incoming
+ * packets to allow it to read and process a CONNECTION_CLOSE frame.
+ *
+ * Note that we do not conform to these two from the same section:
+ * An endpoint's selected connection ID and the QUIC version
+ * are sufficient information to identify packets for a closing
+ * connection; the endpoint MAY discard all other connection state.
+ * and:
+ * An endpoint MAY drop packet protection keys when entering the
+ * closing state and send a packet containing a CONNECTION_CLOSE
+ * frame in response to any UDP datagram that is received.
+ */
+static void copy_tcause(QUIC_TERMINATE_CAUSE *dst,
+ const QUIC_TERMINATE_CAUSE *src)
+{
+ dst->error_code = src->error_code;
+ dst->frame_type = src->frame_type;
+ dst->app = src->app;
+ dst->remote = src->remote;
+
+ dst->reason = NULL;
+ dst->reason_len = 0;
+
+ if (src->reason != NULL && src->reason_len > 0) {
+ size_t l = src->reason_len;
+ char *r;
+
+ if (l >= SIZE_MAX)
+ --l;
+
+ /*
+ * If this fails, dst->reason becomes NULL and we simply do not use a
+ * reason. This ensures termination is infallible.
+ */
+ dst->reason = r = OPENSSL_memdup(src->reason, l + 1);
+ if (r == NULL)
+ return;
+
+ r[l] = '\0';
+ dst->reason_len = l;
+ }
+}
+
+static void ch_start_terminating(QUIC_CHANNEL *ch,
+ const QUIC_TERMINATE_CAUSE *tcause,
+ int force_immediate)
+{
+ /* No point sending anything if we haven't sent anything yet. */
+ if (!ch->have_sent_any_pkt)
+ force_immediate = 1;
+
+ switch (ch->state) {
+ default:
+ case QUIC_CHANNEL_STATE_IDLE:
+ copy_tcause(&ch->terminate_cause, tcause);
+ ch_on_terminating_timeout(ch);
+ break;
+
+ case QUIC_CHANNEL_STATE_ACTIVE:
+ copy_tcause(&ch->terminate_cause, tcause);
+
+ ossl_qlog_event_connectivity_connection_closed(ch_get_qlog(ch), tcause);
+
+ if (!force_immediate) {
+ ch_record_state_transition(ch, tcause->remote
+ ? QUIC_CHANNEL_STATE_TERMINATING_DRAINING
+ : QUIC_CHANNEL_STATE_TERMINATING_CLOSING);
+ /*
+ * RFC 9000 s. 10.2 Immediate Close
+ * These states SHOULD persist for at least three times
+ * the current PTO interval as defined in [QUIC-RECOVERY].
+ */
+ ch->terminate_deadline
+ = ossl_time_add(get_time(ch),
+ ossl_time_multiply(ossl_ackm_get_pto_duration(ch->ackm),
+ 3));
+
+ if (!tcause->remote) {
+ OSSL_QUIC_FRAME_CONN_CLOSE f = {0};
+
+ /* best effort */
+ f.error_code = ch->terminate_cause.error_code;
+ f.frame_type = ch->terminate_cause.frame_type;
+ f.is_app = ch->terminate_cause.app;
+ f.reason = (char *)ch->terminate_cause.reason;
+ f.reason_len = ch->terminate_cause.reason_len;
+ ossl_quic_tx_packetiser_schedule_conn_close(ch->txp, &f);
+ /*
+ * RFC 9000 s. 10.2.2 Draining Connection State:
+ * An endpoint that receives a CONNECTION_CLOSE frame MAY
+ * send a single packet containing a CONNECTION_CLOSE
+ * frame before entering the draining state, using a
+ * NO_ERROR code if appropriate
+ */
+ ch->conn_close_queued = 1;
+ }
+ } else {
+ ch_on_terminating_timeout(ch);
+ }
+ break;
+
+ case QUIC_CHANNEL_STATE_TERMINATING_CLOSING:
+ if (force_immediate)
+ ch_on_terminating_timeout(ch);
+ else if (tcause->remote)
+ /*
+ * RFC 9000 s. 10.2.2 Draining Connection State:
+ * An endpoint MAY enter the draining state from the
+ * closing state if it receives a CONNECTION_CLOSE frame,
+ * which indicates that the peer is also closing or draining.
+ */
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_TERMINATING_DRAINING);
+
+ break;
+
+ case QUIC_CHANNEL_STATE_TERMINATING_DRAINING:
+ /*
+ * Other than in the force-immediate case, we remain here until the
+ * timeout expires.
+ */
+ if (force_immediate)
+ ch_on_terminating_timeout(ch);
+
+ break;
+
+ case QUIC_CHANNEL_STATE_TERMINATED:
+ /* No-op. */
+ break;
+ }
+}
+
+/* For RXDP use. */
+void ossl_quic_channel_on_remote_conn_close(QUIC_CHANNEL *ch,
+ OSSL_QUIC_FRAME_CONN_CLOSE *f)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
+
+ if (!ossl_quic_channel_is_active(ch))
+ return;
+
+ tcause.remote = 1;
+ tcause.app = f->is_app;
+ tcause.error_code = f->error_code;
+ tcause.frame_type = f->frame_type;
+ tcause.reason = f->reason;
+ tcause.reason_len = f->reason_len;
+ ch_start_terminating(ch, &tcause, 0);
+}
+
+static void free_frame_data(unsigned char *buf, size_t buf_len, void *arg)
+{
+ OPENSSL_free(buf);
+}
+
+static int ch_enqueue_retire_conn_id(QUIC_CHANNEL *ch, uint64_t seq_num)
+{
+ BUF_MEM *buf_mem = NULL;
+ WPACKET wpkt;
+ size_t l;
+
+ ossl_quic_srtm_remove(ch->srtm, ch, seq_num);
+
+ if ((buf_mem = BUF_MEM_new()) == NULL)
+ goto err;
+
+ if (!WPACKET_init(&wpkt, buf_mem))
+ goto err;
+
+ if (!ossl_quic_wire_encode_frame_retire_conn_id(&wpkt, seq_num)) {
+ WPACKET_cleanup(&wpkt);
+ goto err;
+ }
+
+ WPACKET_finish(&wpkt);
+ if (!WPACKET_get_total_written(&wpkt, &l))
+ goto err;
+
+ if (ossl_quic_cfq_add_frame(ch->cfq, 1, QUIC_PN_SPACE_APP,
+ OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID, 0,
+ (unsigned char *)buf_mem->data, l,
+ free_frame_data, NULL) == NULL)
+ goto err;
+
+ buf_mem->data = NULL;
+ BUF_MEM_free(buf_mem);
+ return 1;
+
+err:
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
+ "internal error enqueueing retire conn id");
+ BUF_MEM_free(buf_mem);
+ return 0;
+}
+
+void ossl_quic_channel_on_new_conn_id(QUIC_CHANNEL *ch,
+ OSSL_QUIC_FRAME_NEW_CONN_ID *f)
+{
+ uint64_t new_remote_seq_num = ch->cur_remote_seq_num;
+ uint64_t new_retire_prior_to = ch->cur_retire_prior_to;
+
+ if (!ossl_quic_channel_is_active(ch))
+ return;
+
+ /* We allow only two active connection ids; first check some constraints */
+ if (ch->cur_remote_dcid.id_len == 0) {
+ /* Changing from 0 length connection id is disallowed */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
+ "zero length connection id in use");
+
+ return;
+ }
+
+ if (f->seq_num > new_remote_seq_num)
+ new_remote_seq_num = f->seq_num;
+ if (f->retire_prior_to > new_retire_prior_to)
+ new_retire_prior_to = f->retire_prior_to;
+
+ /*
+ * RFC 9000-5.1.1: An endpoint MUST NOT provide more connection IDs
+ * than the peer's limit.
+ *
+ * After processing a NEW_CONNECTION_ID frame and adding and retiring
+ * active connection IDs, if the number of active connection IDs exceeds
+ * the value advertised in its active_connection_id_limit transport
+ * parameter, an endpoint MUST close the connection with an error of
+ * type CONNECTION_ID_LIMIT_ERROR.
+ */
+ if (new_remote_seq_num - new_retire_prior_to > 1) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_CONNECTION_ID_LIMIT_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
+ "active_connection_id limit violated");
+ return;
+ }
+
+ /*
+ * RFC 9000-5.1.1: An endpoint MAY send connection IDs that temporarily
+ * exceed a peer's limit if the NEW_CONNECTION_ID frame also requires
+ * the retirement of any excess, by including a sufficiently large
+ * value in the Retire Prior To field.
+ *
+ * RFC 9000-5.1.2: An endpoint SHOULD allow for sending and tracking
+ * a number of RETIRE_CONNECTION_ID frames of at least twice the value
+ * of the active_connection_id_limit transport parameter. An endpoint
+ * MUST NOT forget a connection ID without retiring it, though it MAY
+ * choose to treat having connection IDs in need of retirement that
+ * exceed this limit as a connection error of type CONNECTION_ID_LIMIT_ERROR.
+ *
+ * We are a little bit more liberal than the minimum mandated.
+ */
+ if (new_retire_prior_to - ch->cur_retire_prior_to > 10) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_CONNECTION_ID_LIMIT_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
+ "retiring connection id limit violated");
+
+ return;
+ }
+
+ if (new_remote_seq_num > ch->cur_remote_seq_num) {
+ /* Add new stateless reset token */
+ if (!ossl_quic_srtm_add(ch->srtm, ch, new_remote_seq_num,
+ &f->stateless_reset)) {
+ ossl_quic_channel_raise_protocol_error(
+ ch, OSSL_QUIC_ERR_CONNECTION_ID_LIMIT_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
+ "unable to store stateless reset token");
+
+ return;
+ }
+ ch->cur_remote_seq_num = new_remote_seq_num;
+ ch->cur_remote_dcid = f->conn_id;
+ ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, &ch->cur_remote_dcid);
+ }
+
+ /*
+ * RFC 9000-5.1.2: Upon receipt of an increased Retire Prior To
+ * field, the peer MUST stop using the corresponding connection IDs
+ * and retire them with RETIRE_CONNECTION_ID frames before adding the
+ * newly provided connection ID to the set of active connection IDs.
+ */
+
+ /*
+ * Note: RFC 9000 s. 19.15 says:
+ * "An endpoint that receives a NEW_CONNECTION_ID frame with a sequence
+ * number smaller than the Retire Prior To field of a previously received
+ * NEW_CONNECTION_ID frame MUST send a corresponding
+ * RETIRE_CONNECTION_ID frame that retires the newly received connection
+ * ID, unless it has already done so for that sequence number."
+ *
+ * Since we currently always queue RETIRE_CONN_ID frames based on the Retire
+ * Prior To field of a NEW_CONNECTION_ID frame immediately upon receiving
+ * that NEW_CONNECTION_ID frame, by definition this will always be met.
+ * This may change in future when we change our CID handling.
+ */
+ while (new_retire_prior_to > ch->cur_retire_prior_to) {
+ if (!ch_enqueue_retire_conn_id(ch, ch->cur_retire_prior_to))
+ break;
+ ++ch->cur_retire_prior_to;
+ }
+}
+
+static void ch_save_err_state(QUIC_CHANNEL *ch)
+{
+ if (ch->err_state == NULL)
+ ch->err_state = OSSL_ERR_STATE_new();
+
+ if (ch->err_state == NULL)
+ return;
+
+ OSSL_ERR_STATE_save(ch->err_state);
+}
+
+void ossl_quic_channel_inject(QUIC_CHANNEL *ch, QUIC_URXE *e)
+{
+ ossl_qrx_inject_urxe(ch->qrx, e);
+}
+
+void ossl_quic_channel_inject_pkt(QUIC_CHANNEL *ch, OSSL_QRX_PKT *qpkt)
+{
+ ossl_qrx_inject_pkt(ch->qrx, qpkt);
+}
+
+void ossl_quic_channel_on_stateless_reset(QUIC_CHANNEL *ch)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
+
+ tcause.error_code = OSSL_QUIC_ERR_NO_ERROR;
+ tcause.remote = 1;
+ ch_start_terminating(ch, &tcause, 0);
+}
+
+void ossl_quic_channel_raise_net_error(QUIC_CHANNEL *ch)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
+
+ if (ch->net_error)
+ return;
+
+ ch->net_error = 1;
+
+ tcause.error_code = OSSL_QUIC_ERR_INTERNAL_ERROR;
+ tcause.reason = "network BIO I/O error";
+ tcause.reason_len = strlen(tcause.reason);
+
+ /*
+ * Skip Terminating state and go directly to Terminated, no point trying to
+ * send CONNECTION_CLOSE if we cannot communicate.
+ */
+ ch_start_terminating(ch, &tcause, 1);
+}
+
+int ossl_quic_channel_net_error(QUIC_CHANNEL *ch)
+{
+ return ch->net_error;
+}
+
+void ossl_quic_channel_restore_err_state(QUIC_CHANNEL *ch)
+{
+ if (ch == NULL)
+ return;
+
+ if (!ossl_quic_port_is_running(ch->port))
+ ossl_quic_port_restore_err_state(ch->port);
+ else
+ OSSL_ERR_STATE_restore(ch->err_state);
+}
+
+void ossl_quic_channel_raise_protocol_error_loc(QUIC_CHANNEL *ch,
+ uint64_t error_code,
+ uint64_t frame_type,
+ const char *reason,
+ ERR_STATE *err_state,
+ const char *src_file,
+ int src_line,
+ const char *src_func)
+{
+ QUIC_TERMINATE_CAUSE tcause = {0};
+ int err_reason = error_code == OSSL_QUIC_ERR_INTERNAL_ERROR
+ ? ERR_R_INTERNAL_ERROR : SSL_R_QUIC_PROTOCOL_ERROR;
+ const char *err_str = ossl_quic_err_to_string(error_code);
+ const char *err_str_pfx = " (", *err_str_sfx = ")";
+ const char *ft_str = NULL;
+ const char *ft_str_pfx = " (", *ft_str_sfx = ")";
+
+ if (ch->protocol_error)
+ /* Only the first call to this function matters. */
+ return;
+
+ if (err_str == NULL) {
+ err_str = "";
+ err_str_pfx = "";
+ err_str_sfx = "";
+ }
+
+ /*
+ * If we were provided an underlying error state, restore it and then append
+ * our ERR on top as a "cover letter" error.
+ */
+ if (err_state != NULL)
+ OSSL_ERR_STATE_restore(err_state);
+
+ if (frame_type != 0) {
+ ft_str = ossl_quic_frame_type_to_string(frame_type);
+ if (ft_str == NULL) {
+ ft_str = "";
+ ft_str_pfx = "";
+ ft_str_sfx = "";
+ }
+
+ ERR_raise_data(ERR_LIB_SSL, err_reason,
+ "QUIC error code: 0x%llx%s%s%s "
+ "(triggered by frame type: 0x%llx%s%s%s), reason: \"%s\"",
+ (unsigned long long) error_code,
+ err_str_pfx, err_str, err_str_sfx,
+ (unsigned long long) frame_type,
+ ft_str_pfx, ft_str, ft_str_sfx,
+ reason);
+ } else {
+ ERR_raise_data(ERR_LIB_SSL, err_reason,
+ "QUIC error code: 0x%llx%s%s%s, reason: \"%s\"",
+ (unsigned long long) error_code,
+ err_str_pfx, err_str, err_str_sfx,
+ reason);
+ }
+
+ if (src_file != NULL)
+ ERR_set_debug(src_file, src_line, src_func);
+
+ ch_save_err_state(ch);
+
+ tcause.error_code = error_code;
+ tcause.frame_type = frame_type;
+ tcause.reason = reason;
+ tcause.reason_len = strlen(reason);
+
+ ch->protocol_error = 1;
+ ch_start_terminating(ch, &tcause, 0);
+}
+
+/*
+ * Called once the terminating timer expires, meaning we move from TERMINATING
+ * to TERMINATED.
+ */
+static void ch_on_terminating_timeout(QUIC_CHANNEL *ch)
+{
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_TERMINATED);
+}
+
+/*
+ * Determines the effective idle timeout duration. This is based on the idle
+ * timeout values that we and our peer signalled in transport parameters
+ * but have some limits applied.
+ */
+static OSSL_TIME ch_get_effective_idle_timeout_duration(QUIC_CHANNEL *ch)
+{
+ OSSL_TIME pto;
+
+ if (ch->max_idle_timeout == 0)
+ return ossl_time_infinite();
+
+ /*
+ * RFC 9000 s. 10.1: Idle Timeout
+ * To avoid excessively small idle timeout periods, endpoints
+ * MUST increase the idle timeout period to be at least three
+ * times the current Probe Timeout (PTO). This allows for
+ * multiple PTOs to expire, and therefore multiple probes to
+ * be sent and lost, prior to idle timeout.
+ */
+ pto = ossl_ackm_get_pto_duration(ch->ackm);
+ return ossl_time_max(ossl_ms2time(ch->max_idle_timeout),
+ ossl_time_multiply(pto, 3));
+}
+
+/*
+ * Updates our idle deadline. Called when an event happens which should bump the
+ * idle timeout.
+ */
+static void ch_update_idle(QUIC_CHANNEL *ch)
+{
+ ch->idle_deadline = ossl_time_add(get_time(ch),
+ ch_get_effective_idle_timeout_duration(ch));
+}
+
+/*
+ * Updates our ping deadline, which determines when we next generate a ping if
+ * we don't have any other ACK-eliciting frames to send.
+ */
+static void ch_update_ping_deadline(QUIC_CHANNEL *ch)
+{
+ OSSL_TIME max_span, idle_duration;
+
+ idle_duration = ch_get_effective_idle_timeout_duration(ch);
+ if (ossl_time_is_infinite(idle_duration)) {
+ ch->ping_deadline = ossl_time_infinite();
+ return;
+ }
+
+ /*
+ * Maximum amount of time without traffic before we send a PING to keep
+ * the connection open. Usually we use max_idle_timeout/2, but ensure
+ * the period never exceeds the assumed NAT interval to ensure NAT
+ * devices don't have their state time out (RFC 9000 s. 10.1.2).
+ */
+ max_span = ossl_time_divide(idle_duration, 2);
+ max_span = ossl_time_min(max_span, MAX_NAT_INTERVAL);
+ ch->ping_deadline = ossl_time_add(get_time(ch), max_span);
+}
+
+/* Called when the idle timeout expires. */
+static void ch_on_idle_timeout(QUIC_CHANNEL *ch)
+{
+ /*
+ * Idle timeout does not have an error code associated with it because a
+ * CONN_CLOSE is never sent for it. We shouldn't use this data once we reach
+ * TERMINATED anyway.
+ */
+ ch->terminate_cause.app = 0;
+ ch->terminate_cause.error_code = OSSL_QUIC_LOCAL_ERR_IDLE_TIMEOUT;
+ ch->terminate_cause.frame_type = 0;
+
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_TERMINATED);
+}
+
+/**
+ * @brief Common handler for initializing a new QUIC connection.
+ *
+ * This function configures a QUIC channel (`QUIC_CHANNEL *ch`) for a new
+ * connection by setting the peer address, connection IDs, and necessary
+ * callbacks. It establishes initial secrets, sets up logging, and performs
+ * required transitions for the channel state.
+ *
+ * @param ch Pointer to the QUIC channel being initialized.
+ * @param peer Address of the peer to which the channel connects.
+ * @param peer_scid Peer-specified source connection ID.
+ * @param peer_dcid Peer-specified destination connection ID.
+ * @param peer_odcid Peer-specified original destination connection ID
+ * may be NULL if retry frame not sent to client
+ * @return 1 on success, 0 on failure to set required elements.
+ */
+static int ch_on_new_conn_common(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
+ const QUIC_CONN_ID *peer_scid,
+ const QUIC_CONN_ID *peer_dcid,
+ const QUIC_CONN_ID *peer_odcid)
+{
+ /* Note our newly learnt peer address and CIDs. */
+ if (!BIO_ADDR_copy(&ch->cur_peer_addr, peer))
+ return 0;
+
+ ch->init_dcid = *peer_dcid;
+ ch->cur_remote_dcid = *peer_scid;
+ ch->odcid.id_len = 0;
+
+ if (peer_odcid != NULL)
+ ch->odcid = *peer_odcid;
+
+ /* Inform QTX of peer address. */
+ if (!ossl_quic_tx_packetiser_set_peer(ch->txp, &ch->cur_peer_addr))
+ return 0;
+
+ /* Inform TXP of desired CIDs. */
+ if (!ossl_quic_tx_packetiser_set_cur_dcid(ch->txp, &ch->cur_remote_dcid))
+ return 0;
+
+ if (!ossl_quic_tx_packetiser_set_cur_scid(ch->txp, &ch->cur_local_cid))
+ return 0;
+
+ /* Setup QLOG, which did not happen earlier due to lacking an Initial ODCID. */
+ ossl_qtx_set_qlog_cb(ch->qtx, ch_get_qlog_cb, ch);
+ ossl_quic_tx_packetiser_set_qlog_cb(ch->txp, ch_get_qlog_cb, ch);
+
+ /*
+ * Plug in secrets for the Initial EL. secrets for QRX were created in
+ * port_default_packet_handler() already.
+ */
+ if (!ossl_quic_provide_initial_secret(ch->port->engine->libctx,
+ ch->port->engine->propq,
+ &ch->init_dcid,
+ /*is_server=*/1,
+ NULL, ch->qtx))
+ return 0;
+
+ /* Register the peer ODCID in the LCIDM. */
+ if (!ossl_quic_lcidm_enrol_odcid(ch->lcidm, ch, peer_odcid == NULL ?
+ &ch->init_dcid :
+ peer_odcid))
+ return 0;
+
+ /* Change state. */
+ ch_record_state_transition(ch, QUIC_CHANNEL_STATE_ACTIVE);
+ ch->doing_proactive_ver_neg = 0; /* not currently supported */
+ return 1;
+}
+
+/* Called when we, as a server, get a new incoming connection. */
+int ossl_quic_channel_on_new_conn(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
+ const QUIC_CONN_ID *peer_scid,
+ const QUIC_CONN_ID *peer_dcid)
+{
+ if (!ossl_assert(ch->state == QUIC_CHANNEL_STATE_IDLE && ch->is_server))
+ return 0;
+
+ /* Generate an Initial LCID we will use for the connection. */
+ if (!ossl_quic_lcidm_generate_initial(ch->lcidm, ch, &ch->cur_local_cid))
+ return 0;
+
+ return ch_on_new_conn_common(ch, peer, peer_scid, peer_dcid, NULL);
+}
+
+/**
+ * Binds a QUIC channel to a specific peer's address and connection IDs.
+ *
+ * This function is used to establish a binding between a QUIC channel and a
+ * peer's address and connection IDs. The binding is performed only if the
+ * channel is idle and is on the server side. The peer's destination connection
+ * ID (`peer_dcid`) is mandatory, and the channel's current local connection ID
+ * is set to this value.
+ *
+ * @param ch Pointer to the QUIC_CHANNEL structure representing the
+ * channel to be bound.
+ * @param peer Pointer to a BIO_ADDR structure representing the peer's
+ * address.
+ * @param peer_scid Pointer to the peer's source connection ID (QUIC_CONN_ID).
+ * @param peer_dcid Pointer to the peer's destination connection ID
+ * (QUIC_CONN_ID). This must not be NULL.
+ * @param peer_odcid Pointer to the original destination connection ID
+ * (QUIC_CONN_ID) chosen by the peer in its first initial
+ * packet received without a token.
+ *
+ * @return 1 on success, or 0 on failure if the conditions for binding are not
+ * met (e.g., channel is not idle or not a server, or binding fails).
+ */
+int ossl_quic_bind_channel(QUIC_CHANNEL *ch, const BIO_ADDR *peer,
+ const QUIC_CONN_ID *peer_scid,
+ const QUIC_CONN_ID *peer_dcid,
+ const QUIC_CONN_ID *peer_odcid)
+{
+ if (peer_dcid == NULL)
+ return 0;
+
+ if (!ossl_assert(ch->state == QUIC_CHANNEL_STATE_IDLE && ch->is_server))
+ return 0;
+
+ ch->cur_local_cid = *peer_dcid;
+ if (!ossl_quic_lcidm_bind_channel(ch->lcidm, ch, peer_dcid))
+ return 0;
+
+ /*
+ * peer_odcid <=> is initial dst conn id chosen by peer in its
+ * first initial packet we received without token.
+ */
+ return ch_on_new_conn_common(ch, peer, peer_scid, peer_dcid, peer_odcid);
+}
+
+SSL *ossl_quic_channel_get0_ssl(QUIC_CHANNEL *ch)
+{
+ return ch->tls;
+}
+
+static int ch_init_new_stream(QUIC_CHANNEL *ch, QUIC_STREAM *qs,
+ int can_send, int can_recv)
+{
+ uint64_t rxfc_wnd;
+ int server_init = ossl_quic_stream_is_server_init(qs);
+ int local_init = (ch->is_server == server_init);
+ int is_uni = !ossl_quic_stream_is_bidi(qs);
+
+ if (can_send)
+ if ((qs->sstream = ossl_quic_sstream_new(INIT_APP_BUF_LEN)) == NULL)
+ goto err;
+
+ if (can_recv)
+ if ((qs->rstream = ossl_quic_rstream_new(NULL, NULL, 0)) == NULL)
+ goto err;
+
+ /* TXFC */
+ if (!ossl_quic_txfc_init(&qs->txfc, &ch->conn_txfc))
+ goto err;
+
+ if (ch->got_remote_transport_params) {
+ /*
+ * If we already got peer TPs we need to apply the initial CWM credit
+ * now. If we didn't already get peer TPs this will be done
+ * automatically for all extant streams when we do.
+ */
+ if (can_send) {
+ uint64_t cwm;
+
+ if (is_uni)
+ cwm = ch->rx_init_max_stream_data_uni;
+ else if (local_init)
+ cwm = ch->rx_init_max_stream_data_bidi_local;
+ else
+ cwm = ch->rx_init_max_stream_data_bidi_remote;
+
+ ossl_quic_txfc_bump_cwm(&qs->txfc, cwm);
+ }
+ }
+
+ /* RXFC */
+ if (!can_recv)
+ rxfc_wnd = 0;
+ else if (is_uni)
+ rxfc_wnd = ch->tx_init_max_stream_data_uni;
+ else if (local_init)
+ rxfc_wnd = ch->tx_init_max_stream_data_bidi_local;
+ else
+ rxfc_wnd = ch->tx_init_max_stream_data_bidi_remote;
+
+ if (!ossl_quic_rxfc_init(&qs->rxfc, &ch->conn_rxfc,
+ rxfc_wnd,
+ DEFAULT_STREAM_RXFC_MAX_WND_MUL * rxfc_wnd,
+ get_time, ch))
+ goto err;
+
+ return 1;
+
+err:
+ ossl_quic_sstream_free(qs->sstream);
+ qs->sstream = NULL;
+ ossl_quic_rstream_free(qs->rstream);
+ qs->rstream = NULL;
+ return 0;
+}
+
+static uint64_t *ch_get_local_stream_next_ordinal_ptr(QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ return is_uni ? &ch->next_local_stream_ordinal_uni
+ : &ch->next_local_stream_ordinal_bidi;
+}
+
+static const uint64_t *ch_get_local_stream_max_ptr(const QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ return is_uni ? &ch->max_local_streams_uni
+ : &ch->max_local_streams_bidi;
+}
+
+static const QUIC_RXFC *ch_get_remote_stream_count_rxfc(const QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ return is_uni ? &ch->max_streams_uni_rxfc
+ : &ch->max_streams_bidi_rxfc;
+}
+
+int ossl_quic_channel_is_new_local_stream_admissible(QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ const uint64_t *p_next_ordinal = ch_get_local_stream_next_ordinal_ptr(ch, is_uni);
+
+ return ossl_quic_stream_map_is_local_allowed_by_stream_limit(&ch->qsm,
+ *p_next_ordinal,
+ is_uni);
+}
+
+uint64_t ossl_quic_channel_get_local_stream_count_avail(const QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ const uint64_t *p_next_ordinal, *p_max;
+
+ p_next_ordinal = ch_get_local_stream_next_ordinal_ptr((QUIC_CHANNEL *)ch,
+ is_uni);
+ p_max = ch_get_local_stream_max_ptr(ch, is_uni);
+
+ return *p_max - *p_next_ordinal;
+}
+
+uint64_t ossl_quic_channel_get_remote_stream_count_avail(const QUIC_CHANNEL *ch,
+ int is_uni)
+{
+ return ossl_quic_rxfc_get_credit(ch_get_remote_stream_count_rxfc(ch, is_uni));
+}
+
+QUIC_STREAM *ossl_quic_channel_new_stream_local(QUIC_CHANNEL *ch, int is_uni)
+{
+ QUIC_STREAM *qs;
+ int type;
+ uint64_t stream_id;
+ uint64_t *p_next_ordinal;
+
+ type = ch->is_server ? QUIC_STREAM_INITIATOR_SERVER
+ : QUIC_STREAM_INITIATOR_CLIENT;
+
+ p_next_ordinal = ch_get_local_stream_next_ordinal_ptr(ch, is_uni);
+
+ if (is_uni)
+ type |= QUIC_STREAM_DIR_UNI;
+ else
+ type |= QUIC_STREAM_DIR_BIDI;
+
+ if (*p_next_ordinal >= ((uint64_t)1) << 62)
+ return NULL;
+
+ stream_id = ((*p_next_ordinal) << 2) | type;
+
+ if ((qs = ossl_quic_stream_map_alloc(&ch->qsm, stream_id, type)) == NULL)
+ return NULL;
+
+ /* Locally-initiated stream, so we always want a send buffer. */
+ if (!ch_init_new_stream(ch, qs, /*can_send=*/1, /*can_recv=*/!is_uni))
+ goto err;
+
+ ++*p_next_ordinal;
+ return qs;
+
+err:
+ ossl_quic_stream_map_release(&ch->qsm, qs);
+ return NULL;
+}
+
+QUIC_STREAM *ossl_quic_channel_new_stream_remote(QUIC_CHANNEL *ch,
+ uint64_t stream_id)
+{
+ uint64_t peer_role;
+ int is_uni;
+ QUIC_STREAM *qs;
+
+ peer_role = ch->is_server
+ ? QUIC_STREAM_INITIATOR_CLIENT
+ : QUIC_STREAM_INITIATOR_SERVER;
+
+ if ((stream_id & QUIC_STREAM_INITIATOR_MASK) != peer_role)
+ return NULL;
+
+ is_uni = ((stream_id & QUIC_STREAM_DIR_MASK) == QUIC_STREAM_DIR_UNI);
+
+ qs = ossl_quic_stream_map_alloc(&ch->qsm, stream_id,
+ stream_id & (QUIC_STREAM_INITIATOR_MASK
+ | QUIC_STREAM_DIR_MASK));
+ if (qs == NULL)
+ return NULL;
+
+ if (!ch_init_new_stream(ch, qs, /*can_send=*/!is_uni, /*can_recv=*/1))
+ goto err;
+
+ if (ch->incoming_stream_auto_reject)
+ ossl_quic_channel_reject_stream(ch, qs);
+ else
+ ossl_quic_stream_map_push_accept_queue(&ch->qsm, qs);
+
+ return qs;
+
+err:
+ ossl_quic_stream_map_release(&ch->qsm, qs);
+ return NULL;
+}
+
+void ossl_quic_channel_set_incoming_stream_auto_reject(QUIC_CHANNEL *ch,
+ int enable,
+ uint64_t aec)
+{
+ ch->incoming_stream_auto_reject = (enable != 0);
+ ch->incoming_stream_auto_reject_aec = aec;
+}
+
+void ossl_quic_channel_reject_stream(QUIC_CHANNEL *ch, QUIC_STREAM *qs)
+{
+ ossl_quic_stream_map_stop_sending_recv_part(&ch->qsm, qs,
+ ch->incoming_stream_auto_reject_aec);
+
+ ossl_quic_stream_map_reset_stream_send_part(&ch->qsm, qs,
+ ch->incoming_stream_auto_reject_aec);
+ qs->deleted = 1;
+
+ ossl_quic_stream_map_update_state(&ch->qsm, qs);
+}
+
+/* Replace local connection ID in TXP and DEMUX for testing purposes. */
+int ossl_quic_channel_replace_local_cid(QUIC_CHANNEL *ch,
+ const QUIC_CONN_ID *conn_id)
+{
+ /* Remove the current LCID from the LCIDM. */
+ if (!ossl_quic_lcidm_debug_remove(ch->lcidm, &ch->cur_local_cid))
+ return 0;
+ ch->cur_local_cid = *conn_id;
+ /* Set in the TXP, used only for long header packets. */
+ if (!ossl_quic_tx_packetiser_set_cur_scid(ch->txp, &ch->cur_local_cid))
+ return 0;
+ /* Add the new LCID to the LCIDM. */
+ if (!ossl_quic_lcidm_debug_add(ch->lcidm, ch, &ch->cur_local_cid,
+ 100))
+ return 0;
+ return 1;
+}
+
+void ossl_quic_channel_set_msg_callback(QUIC_CHANNEL *ch,
+ ossl_msg_cb msg_callback,
+ SSL *msg_callback_ssl)
+{
+ ch->msg_callback = msg_callback;
+ ch->msg_callback_ssl = msg_callback_ssl;
+ ossl_qtx_set_msg_callback(ch->qtx, msg_callback, msg_callback_ssl);
+ ossl_quic_tx_packetiser_set_msg_callback(ch->txp, msg_callback,
+ msg_callback_ssl);
+ /*
+ * postpone msg callback setting for tserver until port calls
+ * port_bind_channel().
+ */
+ if (ch->is_tserver_ch == 0)
+ ossl_qrx_set_msg_callback(ch->qrx, msg_callback, msg_callback_ssl);
+}
+
+void ossl_quic_channel_set_msg_callback_arg(QUIC_CHANNEL *ch,
+ void *msg_callback_arg)
+{
+ ch->msg_callback_arg = msg_callback_arg;
+ ossl_qtx_set_msg_callback_arg(ch->qtx, msg_callback_arg);
+ ossl_quic_tx_packetiser_set_msg_callback_arg(ch->txp, msg_callback_arg);
+
+ /*
+ * postpone msg callback setting for tserver until port calls
+ * port_bind_channel().
+ */
+ if (ch->is_tserver_ch == 0)
+ ossl_qrx_set_msg_callback_arg(ch->qrx, msg_callback_arg);
+}
+
+void ossl_quic_channel_set_txku_threshold_override(QUIC_CHANNEL *ch,
+ uint64_t tx_pkt_threshold)
+{
+ ch->txku_threshold_override = tx_pkt_threshold;
+}
+
+uint64_t ossl_quic_channel_get_tx_key_epoch(QUIC_CHANNEL *ch)
+{
+ return ossl_qtx_get_key_epoch(ch->qtx);
+}
+
+uint64_t ossl_quic_channel_get_rx_key_epoch(QUIC_CHANNEL *ch)
+{
+ return ossl_qrx_get_key_epoch(ch->qrx);
+}
+
+int ossl_quic_channel_trigger_txku(QUIC_CHANNEL *ch)
+{
+ if (!txku_allowed(ch))
+ return 0;
+
+ ch->ku_locally_initiated = 1;
+ ch_trigger_txku(ch);
+ return 1;
+}
+
+int ossl_quic_channel_ping(QUIC_CHANNEL *ch)
+{
+ int pn_space = ossl_quic_enc_level_to_pn_space(ch->tx_enc_level);
+
+ ossl_quic_tx_packetiser_schedule_ack_eliciting(ch->txp, pn_space);
+
+ return 1;
+}
+
+uint16_t ossl_quic_channel_get_diag_num_rx_ack(QUIC_CHANNEL *ch)
+{
+ return ch->diag_num_rx_ack;
+}
+
+void ossl_quic_channel_get_diag_local_cid(QUIC_CHANNEL *ch, QUIC_CONN_ID *cid)
+{
+ *cid = ch->cur_local_cid;
+}
+
+int ossl_quic_channel_have_generated_transport_params(const QUIC_CHANNEL *ch)
+{
+ return ch->got_local_transport_params;
+}
+
+void ossl_quic_channel_set_max_idle_timeout_request(QUIC_CHANNEL *ch, uint64_t ms)
+{
+ ch->max_idle_timeout_local_req = ms;
+}
+uint64_t ossl_quic_channel_get_max_idle_timeout_request(const QUIC_CHANNEL *ch)
+{
+ return ch->max_idle_timeout_local_req;
+}
+
+uint64_t ossl_quic_channel_get_max_idle_timeout_peer_request(const QUIC_CHANNEL *ch)
+{
+ return ch->max_idle_timeout_remote_req;
+}
+
+uint64_t ossl_quic_channel_get_max_idle_timeout_actual(const QUIC_CHANNEL *ch)
+{
+ return ch->max_idle_timeout;
+}
diff --git a/crypto/openssl/ssl/quic/quic_channel_local.h b/crypto/openssl/ssl/quic/quic_channel_local.h
new file mode 100644
index 000000000000..816ccf694b8f
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_channel_local.h
@@ -0,0 +1,474 @@
+#ifndef OSSL_QUIC_CHANNEL_LOCAL_H
+# define OSSL_QUIC_CHANNEL_LOCAL_H
+
+# include "internal/quic_channel.h"
+
+# ifndef OPENSSL_NO_QUIC
+
+# include <openssl/lhash.h>
+# include "internal/list.h"
+# include "internal/quic_predef.h"
+# include "internal/quic_fc.h"
+# include "internal/quic_stream_map.h"
+# include "internal/quic_tls.h"
+
+/*
+ * QUIC Channel Structure
+ * ======================
+ *
+ * QUIC channel internals. It is intended that only the QUIC_CHANNEL
+ * implementation and the RX depacketiser be allowed to access this structure
+ * directly. As the RX depacketiser has no state of its own and computes over a
+ * QUIC_CHANNEL structure, it can be viewed as an extension of the QUIC_CHANNEL
+ * implementation. While the RX depacketiser could be provided with adequate
+ * accessors to do what it needs, this would weaken the abstraction provided by
+ * the QUIC_CHANNEL to other components; moreover the coupling of the RX
+ * depacketiser to QUIC_CHANNEL internals is too deep and bespoke to make this
+ * desirable.
+ *
+ * Other components should not include this header.
+ */
+struct quic_channel_st {
+ QUIC_PORT *port;
+
+ /*
+ * QUIC_PORT keeps the channels which belong to it on a list for bookkeeping
+ * purposes.
+ */
+ OSSL_LIST_MEMBER(ch, QUIC_CHANNEL);
+ OSSL_LIST_MEMBER(incoming_ch, QUIC_CHANNEL);
+
+ /*
+ * The associated TLS 1.3 connection data. Used to provide the handshake
+ * layer; its 'network' side is plugged into the crypto stream for each EL
+ * (other than the 0-RTT EL). Note that the `tls` SSL object is not "owned"
+ * by this channel. It is created and managed elsewhere and is guaranteed
+ * to be valid for the lifetime of the channel. Therefore we do not free it
+ * when we free the channel.
+ */
+ QUIC_TLS *qtls;
+ SSL *tls;
+
+ /* Port LCIDM we use to register LCIDs. */
+ QUIC_LCIDM *lcidm;
+ /* SRTM we register SRTs with. */
+ QUIC_SRTM *srtm;
+
+ /* Optional QLOG instance (or NULL). */
+ QLOG *qlog;
+
+ /*
+ * The transport parameter block we will send or have sent.
+ * Freed after sending or when connection is freed.
+ */
+ unsigned char *local_transport_params;
+
+ /*
+ * Pending new token to send once handshake is complete
+ */
+ uint8_t *pending_new_token;
+ size_t pending_new_token_len;
+
+ /* Our current L4 peer address, if any. */
+ BIO_ADDR cur_peer_addr;
+
+ /*
+ * Subcomponents of the connection. All of these components are instantiated
+ * and owned by us.
+ */
+ OSSL_QUIC_TX_PACKETISER *txp;
+ QUIC_TXPIM *txpim;
+ QUIC_CFQ *cfq;
+ /*
+ * Connection level FC. The stream_count RXFCs is used to manage
+ * MAX_STREAMS signalling.
+ */
+ QUIC_TXFC conn_txfc;
+ QUIC_RXFC conn_rxfc, crypto_rxfc[QUIC_PN_SPACE_NUM];
+ QUIC_RXFC max_streams_bidi_rxfc, max_streams_uni_rxfc;
+ QUIC_STREAM_MAP qsm;
+ OSSL_STATM statm;
+ OSSL_CC_DATA *cc_data;
+ const OSSL_CC_METHOD *cc_method;
+ OSSL_ACKM *ackm;
+
+ /* Record layers in the TX and RX directions. */
+ OSSL_QTX *qtx;
+ OSSL_QRX *qrx;
+
+ /* Message callback related arguments */
+ ossl_msg_cb msg_callback;
+ void *msg_callback_arg;
+ SSL *msg_callback_ssl;
+
+ /*
+ * Send and receive parts of the crypto streams.
+ * crypto_send[QUIC_PN_SPACE_APP] is the 1-RTT crypto stream. There is no
+ * 0-RTT crypto stream.
+ */
+ QUIC_SSTREAM *crypto_send[QUIC_PN_SPACE_NUM];
+ QUIC_RSTREAM *crypto_recv[QUIC_PN_SPACE_NUM];
+
+ /* Internal state. */
+ /*
+ * Client: The DCID used in the first Initial packet we transmit as a client.
+ * Server: The DCID used in the first Initial packet the client transmitted.
+ * Randomly generated and required by RFC to be at least 8 bytes.
+ */
+ QUIC_CONN_ID init_dcid;
+
+ /*
+ * Server: If this channel is created in response to an init packet sent
+ * after the server has sent a retry packet to do address validation, this
+ * field stores the original connection id from the first init packet sent
+ */
+ QUIC_CONN_ID odcid;
+
+ /*
+ * Client: The SCID found in the first Initial packet from the server.
+ * Not valid for servers.
+ * Valid if have_received_enc_pkt is set.
+ */
+ QUIC_CONN_ID init_scid;
+
+ /*
+ * Client only: The SCID found in an incoming Retry packet we handled.
+ * Not valid for servers.
+ */
+ QUIC_CONN_ID retry_scid;
+
+ /* Server only: The DCID we currently expect the peer to use to talk to us. */
+ QUIC_CONN_ID cur_local_cid;
+
+ /*
+ * The DCID we currently use to talk to the peer and its sequence num.
+ */
+ QUIC_CONN_ID cur_remote_dcid;
+ uint64_t cur_remote_seq_num;
+ uint64_t cur_retire_prior_to;
+
+ /* Transport parameter values we send to our peer. */
+ uint64_t tx_init_max_stream_data_bidi_local;
+ uint64_t tx_init_max_stream_data_bidi_remote;
+ uint64_t tx_init_max_stream_data_uni;
+ uint64_t tx_max_ack_delay; /* ms */
+
+ /* Transport parameter values received from server. */
+ uint64_t rx_init_max_stream_data_bidi_local;
+ uint64_t rx_init_max_stream_data_bidi_remote;
+ uint64_t rx_init_max_stream_data_uni;
+ uint64_t rx_max_ack_delay; /* ms */
+ unsigned char rx_ack_delay_exp;
+
+ /* Diagnostic counters for testing purposes only. May roll over. */
+ uint16_t diag_num_rx_ack; /* Number of ACK frames received */
+
+ /*
+ * Temporary staging area to store information about the incoming packet we
+ * are currently processing.
+ */
+ OSSL_QRX_PKT *qrx_pkt;
+
+ /*
+ * Current limit on number of streams we may create. Set by transport
+ * parameters initially and then by MAX_STREAMS frames.
+ */
+ uint64_t max_local_streams_bidi;
+ uint64_t max_local_streams_uni;
+
+ /* The idle timeout values we and our peer requested. */
+ uint64_t max_idle_timeout_local_req;
+ uint64_t max_idle_timeout_remote_req;
+
+ /* The negotiated maximum idle timeout in milliseconds. */
+ uint64_t max_idle_timeout;
+
+ /*
+ * Maximum payload size in bytes for datagrams sent to our peer, as
+ * negotiated by transport parameters.
+ */
+ uint64_t rx_max_udp_payload_size;
+ /* Maximum active CID limit, as negotiated by transport parameters. */
+ uint64_t rx_active_conn_id_limit;
+
+ /*
+ * Used to allocate stream IDs. This is a stream ordinal, i.e., a stream ID
+ * without the low two bits designating type and initiator. Shift and or in
+ * the type bits to convert to a stream ID.
+ */
+ uint64_t next_local_stream_ordinal_bidi;
+ uint64_t next_local_stream_ordinal_uni;
+
+ /*
+ * Used to track which stream ordinals within a given stream type have been
+ * used by the remote peer. This is an optimisation used to determine
+ * which streams should be implicitly created due to usage of a higher
+ * stream ordinal.
+ */
+ uint64_t next_remote_stream_ordinal_bidi;
+ uint64_t next_remote_stream_ordinal_uni;
+
+ /*
+ * Application error code to be used for STOP_SENDING/RESET_STREAM frames
+ * used to autoreject incoming streams.
+ */
+ uint64_t incoming_stream_auto_reject_aec;
+
+ /*
+ * Override packet count threshold at which we do a spontaneous TXKU.
+ * Usually UINT64_MAX in which case a suitable value is chosen based on AEAD
+ * limit advice from the QRL utility functions. This is intended for testing
+ * use only. Usually set to UINT64_MAX.
+ */
+ uint64_t txku_threshold_override;
+
+ /* Valid if we are in the TERMINATING or TERMINATED states. */
+ QUIC_TERMINATE_CAUSE terminate_cause;
+
+ /*
+ * Deadline at which we move to TERMINATING state. Valid if in the
+ * TERMINATING state.
+ */
+ OSSL_TIME terminate_deadline;
+
+ /*
+ * Deadline at which connection dies due to idle timeout if no further
+ * events occur.
+ */
+ OSSL_TIME idle_deadline;
+
+ /*
+ * Deadline at which we should send an ACK-eliciting packet to ensure
+ * idle timeout does not occur.
+ */
+ OSSL_TIME ping_deadline;
+
+ /*
+ * The deadline at which the period in which it is RECOMMENDED that we not
+ * initiate any spontaneous TXKU ends. This is zero if no such deadline
+ * applies.
+ */
+ OSSL_TIME txku_cooldown_deadline;
+
+ /*
+ * The deadline at which we take the QRX out of UPDATING and back to NORMAL.
+ * Valid if rxku_in_progress in 1.
+ */
+ OSSL_TIME rxku_update_end_deadline;
+
+ /*
+ * The first (application space) PN sent with a new key phase. Valid if the
+ * QTX key epoch is greater than 0. Once a packet we sent with a PN p (p >=
+ * txku_pn) is ACKed, the TXKU is considered completed and txku_in_progress
+ * becomes 0. For sanity's sake, such a PN p should also be <= the highest
+ * PN we have ever sent, of course.
+ */
+ QUIC_PN txku_pn;
+
+ /*
+ * The (application space) PN which triggered RXKU detection. Valid if
+ * rxku_pending_confirm.
+ */
+ QUIC_PN rxku_trigger_pn;
+
+ /*
+ * State tracking. QUIC connection-level state is best represented based on
+ * whether various things have happened yet or not, rather than as an
+ * explicit FSM. We do have a coarse state variable which tracks the basic
+ * state of the connection's lifecycle, but more fine-grained conditions of
+ * the Active state are tracked via flags below. For more details, see
+ * doc/designs/quic-design/connection-state-machine.md. We are in the Open
+ * state if the state is QUIC_CHANNEL_STATE_ACTIVE and handshake_confirmed is
+ * set.
+ */
+ unsigned int state : 3;
+
+ /*
+ * Have we received at least one encrypted packet from the peer?
+ * (If so, Retry and Version Negotiation messages should no longer
+ * be received and should be ignored if they do occur.)
+ */
+ unsigned int have_received_enc_pkt : 1;
+
+ /*
+ * Have we successfully processed any packet, including a Version
+ * Negotiation packet? If so, further Version Negotiation packets should be
+ * ignored.
+ */
+ unsigned int have_processed_any_pkt : 1;
+
+ /*
+ * Have we sent literally any packet yet? If not, there is no point polling
+ * RX.
+ */
+ unsigned int have_sent_any_pkt : 1;
+
+ /*
+ * Are we currently doing proactive version negotiation?
+ */
+ unsigned int doing_proactive_ver_neg : 1;
+
+ /* We have received transport parameters from the peer. */
+ unsigned int got_remote_transport_params : 1;
+ /* We have generated our local transport parameters. */
+ unsigned int got_local_transport_params : 1;
+
+ /*
+ * This monotonically transitions to 1 once the TLS state machine is
+ * 'complete', meaning that it has both sent a Finished and successfully
+ * verified the peer's Finished (see RFC 9001 s. 4.1.1). Note that it
+ * does not transition to 1 at both peers simultaneously.
+ *
+ * Handshake completion is not the same as handshake confirmation (see
+ * below).
+ */
+ unsigned int handshake_complete : 1;
+
+ /*
+ * This monotonically transitions to 1 once the handshake is confirmed.
+ * This happens on the client when we receive a HANDSHAKE_DONE frame.
+ * At our option, we may also take acknowledgement of any 1-RTT packet
+ * we sent as a handshake confirmation.
+ */
+ unsigned int handshake_confirmed : 1;
+
+ /*
+ * We are sending Initial packets based on a Retry. This means we definitely
+ * should not receive another Retry, and if we do it is an error.
+ */
+ unsigned int doing_retry : 1;
+
+ /*
+ * We don't store the current EL here; the TXP asks the QTX which ELs
+ * are provisioned to determine which ELs to use.
+ */
+
+ /* Have statm, qsm been initialised? Used to track cleanup. */
+ unsigned int have_statm : 1;
+ unsigned int have_qsm : 1;
+
+ /*
+ * Preferred ELs for transmission and reception. This is not strictly needed
+ * as it can be inferred from what keys we have provisioned, but makes
+ * determining the current EL simpler and faster. A separate EL for
+ * transmission and reception is not strictly necessary but makes things
+ * easier for interoperation with the handshake layer, which likes to invoke
+ * the yield secret callback at different times for TX and RX.
+ */
+ unsigned int tx_enc_level : 3;
+ unsigned int rx_enc_level : 3;
+
+ /* If bit n is set, EL n has been discarded. */
+ unsigned int el_discarded : 4;
+
+ /*
+ * While in TERMINATING - CLOSING, set when we should generate a connection
+ * close frame.
+ */
+ unsigned int conn_close_queued : 1;
+
+ /* Are we in server mode? Never changes after instantiation. */
+ unsigned int is_server : 1;
+
+ /*
+ * Set temporarily when the handshake layer has given us a new RX secret.
+ * Used to determine if we need to check our RX queues again.
+ */
+ unsigned int have_new_rx_secret : 1;
+
+ /* Have we ever called QUIC_TLS yet during RX processing? */
+ unsigned int did_tls_tick : 1;
+ /* Has any CRYPTO frame been processed during this tick? */
+ unsigned int did_crypto_frame : 1;
+
+ /*
+ * Have we sent an ack-eliciting packet since the last successful packet
+ * reception? Used to determine when to bump idle timer (see RFC 9000 s.
+ * 10.1).
+ */
+ unsigned int have_sent_ack_eliciting_since_rx : 1;
+
+ /* Should incoming streams automatically be rejected? */
+ unsigned int incoming_stream_auto_reject : 1;
+
+ /*
+ * 1 if a key update sequence was locally initiated, meaning we sent the
+ * TXKU first and the resultant RXKU shouldn't result in our triggering
+ * another TXKU. 0 if a key update sequence was initiated by the peer,
+ * meaning we detect a RXKU first and have to generate a TXKU in response.
+ */
+ unsigned int ku_locally_initiated : 1;
+
+ /*
+ * 1 if we have triggered TXKU (whether spontaneous or solicited) but are
+ * waiting for any PN using that new KP to be ACKed. While this is set, we
+ * are not allowed to trigger spontaneous TXKU (but solicited TXKU is
+ * potentially still possible).
+ */
+ unsigned int txku_in_progress : 1;
+
+ /*
+ * We have received an RXKU event and currently are going through
+ * UPDATING/COOLDOWN on the QRX. COOLDOWN is currently not used. Since RXKU
+ * cannot be detected in this state, this doesn't cause a protocol error or
+ * anything similar if a peer tries TXKU in this state. That traffic would
+ * simply be dropped. It's only used to track that our UPDATING timer is
+ * active so we know when to take the QRX out of UPDATING and back to
+ * NORMAL.
+ */
+ unsigned int rxku_in_progress : 1;
+
+ /*
+ * We have received an RXKU but have yet to send an ACK for it, which means
+ * no further RXKUs are allowed yet. Note that we cannot detect further
+ * RXKUs anyway while the QRX remains in the UPDATING/COOLDOWN states, so
+ * this restriction comes into play if we take more than PTO time to send
+ * an ACK for it (not likely).
+ */
+ unsigned int rxku_pending_confirm : 1;
+
+ /* Temporary variable indicating rxku_pending_confirm is to become 0. */
+ unsigned int rxku_pending_confirm_done : 1;
+
+ /*
+ * If set, RXKU is expected (because we initiated a spontaneous TXKU).
+ */
+ unsigned int rxku_expected : 1;
+
+ /* Permanent net error encountered */
+ unsigned int net_error : 1;
+
+ /*
+ * Protocol error encountered. Note that you should refer to the state field
+ * rather than this. This is only used so we can ignore protocol errors
+ * after the first protocol error, but still record the first protocol error
+ * if it happens during the TERMINATING state.
+ */
+ unsigned int protocol_error : 1;
+
+ /* Are we using addressed mode? */
+ unsigned int addressed_mode : 1;
+
+ /* Are we on the QUIC_PORT linked list of channels? */
+ unsigned int on_port_list : 1;
+
+ /* Has qlog been requested? */
+ unsigned int use_qlog : 1;
+
+ /* Has qlog been requested? */
+ unsigned int is_tserver_ch : 1;
+
+ /* Saved error stack in case permanent error was encountered */
+ ERR_STATE *err_state;
+
+ /* Scratch area for use by RXDP to store decoded ACK ranges. */
+ OSSL_QUIC_ACK_RANGE *ack_range_scratch;
+ size_t num_ack_range_scratch;
+
+ /* Title for qlog purposes. We own this copy. */
+ char *qlog_title;
+};
+
+# endif
+
+#endif
diff --git a/crypto/openssl/ssl/quic/quic_demux.c b/crypto/openssl/ssl/quic/quic_demux.c
new file mode 100644
index 000000000000..a84a44c6e9fb
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_demux.c
@@ -0,0 +1,474 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_demux.h"
+#include "internal/quic_wire_pkt.h"
+#include "internal/common.h"
+#include <openssl/lhash.h>
+#include <openssl/err.h>
+
+#define URXE_DEMUX_STATE_FREE 0 /* on urx_free list */
+#define URXE_DEMUX_STATE_PENDING 1 /* on urx_pending list */
+#define URXE_DEMUX_STATE_ISSUED 2 /* on neither list */
+
+#define DEMUX_MAX_MSGS_PER_CALL 32
+
+#define DEMUX_DEFAULT_MTU 1500
+
+struct quic_demux_st {
+ /* The underlying transport BIO with datagram semantics. */
+ BIO *net_bio;
+
+ /*
+ * QUIC short packets do not contain the length of the connection ID field,
+ * therefore it must be known contextually. The demuxer requires connection
+ * IDs of the same length to be used for all incoming packets.
+ */
+ size_t short_conn_id_len;
+
+ /*
+ * Our current understanding of the upper bound on an incoming datagram size
+ * in bytes.
+ */
+ size_t mtu;
+
+ /* The datagram_id to use for the next datagram we receive. */
+ uint64_t next_datagram_id;
+
+ /* Time retrieval callback. */
+ OSSL_TIME (*now)(void *arg);
+ void *now_arg;
+
+ /* The default packet handler, if any. */
+ ossl_quic_demux_cb_fn *default_cb;
+ void *default_cb_arg;
+
+ /*
+ * List of URXEs which are not currently in use (i.e., not filled with
+ * unconsumed data). These are moved to the pending list as they are filled.
+ */
+ QUIC_URXE_LIST urx_free;
+
+ /*
+ * List of URXEs which are filled with received encrypted data. These are
+ * removed from this list as we invoke the callbacks for each of them. They
+ * are then not on any list managed by us; we forget about them until our
+ * user calls ossl_quic_demux_release_urxe to return the URXE to us, at
+ * which point we add it to the free list.
+ */
+ QUIC_URXE_LIST urx_pending;
+
+ /* Whether to use local address support. */
+ char use_local_addr;
+};
+
+QUIC_DEMUX *ossl_quic_demux_new(BIO *net_bio,
+ size_t short_conn_id_len,
+ OSSL_TIME (*now)(void *arg),
+ void *now_arg)
+{
+ QUIC_DEMUX *demux;
+
+ demux = OPENSSL_zalloc(sizeof(QUIC_DEMUX));
+ if (demux == NULL)
+ return NULL;
+
+ demux->net_bio = net_bio;
+ demux->short_conn_id_len = short_conn_id_len;
+ /* We update this if possible when we get a BIO. */
+ demux->mtu = DEMUX_DEFAULT_MTU;
+ demux->now = now;
+ demux->now_arg = now_arg;
+
+ if (net_bio != NULL
+ && BIO_dgram_get_local_addr_cap(net_bio)
+ && BIO_dgram_set_local_addr_enable(net_bio, 1))
+ demux->use_local_addr = 1;
+
+ return demux;
+}
+
+static void demux_free_urxl(QUIC_URXE_LIST *l)
+{
+ QUIC_URXE *e, *enext;
+
+ for (e = ossl_list_urxe_head(l); e != NULL; e = enext) {
+ enext = ossl_list_urxe_next(e);
+ ossl_list_urxe_remove(l, e);
+ OPENSSL_free(e);
+ }
+}
+
+void ossl_quic_demux_free(QUIC_DEMUX *demux)
+{
+ if (demux == NULL)
+ return;
+
+ /* Free all URXEs we are holding. */
+ demux_free_urxl(&demux->urx_free);
+ demux_free_urxl(&demux->urx_pending);
+
+ OPENSSL_free(demux);
+}
+
+void ossl_quic_demux_set_bio(QUIC_DEMUX *demux, BIO *net_bio)
+{
+ unsigned int mtu;
+
+ demux->net_bio = net_bio;
+
+ if (net_bio != NULL) {
+ /*
+ * Try to determine our MTU if possible. The BIO is not required to
+ * support this, in which case we remain at the last known MTU, or our
+ * initial default.
+ */
+ mtu = BIO_dgram_get_mtu(net_bio);
+ if (mtu >= QUIC_MIN_INITIAL_DGRAM_LEN)
+ ossl_quic_demux_set_mtu(demux, mtu); /* best effort */
+ }
+}
+
+int ossl_quic_demux_set_mtu(QUIC_DEMUX *demux, unsigned int mtu)
+{
+ if (mtu < QUIC_MIN_INITIAL_DGRAM_LEN)
+ return 0;
+
+ demux->mtu = mtu;
+ return 1;
+}
+
+void ossl_quic_demux_set_default_handler(QUIC_DEMUX *demux,
+ ossl_quic_demux_cb_fn *cb,
+ void *cb_arg)
+{
+ demux->default_cb = cb;
+ demux->default_cb_arg = cb_arg;
+}
+
+static QUIC_URXE *demux_alloc_urxe(size_t alloc_len)
+{
+ QUIC_URXE *e;
+
+ if (alloc_len >= SIZE_MAX - sizeof(QUIC_URXE))
+ return NULL;
+
+ e = OPENSSL_malloc(sizeof(QUIC_URXE) + alloc_len);
+ if (e == NULL)
+ return NULL;
+
+ ossl_list_urxe_init_elem(e);
+ e->alloc_len = alloc_len;
+ e->data_len = 0;
+ return e;
+}
+
+static QUIC_URXE *demux_resize_urxe(QUIC_DEMUX *demux, QUIC_URXE *e,
+ size_t new_alloc_len)
+{
+ QUIC_URXE *e2, *prev;
+
+ if (!ossl_assert(e->demux_state == URXE_DEMUX_STATE_FREE))
+ /* Never attempt to resize a URXE which is not on the free list. */
+ return NULL;
+
+ prev = ossl_list_urxe_prev(e);
+ ossl_list_urxe_remove(&demux->urx_free, e);
+
+ e2 = OPENSSL_realloc(e, sizeof(QUIC_URXE) + new_alloc_len);
+ if (e2 == NULL) {
+ /* Failed to resize, abort. */
+ if (prev == NULL)
+ ossl_list_urxe_insert_head(&demux->urx_free, e);
+ else
+ ossl_list_urxe_insert_after(&demux->urx_free, prev, e);
+
+ return NULL;
+ }
+
+ if (prev == NULL)
+ ossl_list_urxe_insert_head(&demux->urx_free, e2);
+ else
+ ossl_list_urxe_insert_after(&demux->urx_free, prev, e2);
+
+ e2->alloc_len = new_alloc_len;
+ return e2;
+}
+
+static QUIC_URXE *demux_reserve_urxe(QUIC_DEMUX *demux, QUIC_URXE *e,
+ size_t alloc_len)
+{
+ return e->alloc_len < alloc_len ? demux_resize_urxe(demux, e, alloc_len) : e;
+}
+
+static int demux_ensure_free_urxe(QUIC_DEMUX *demux, size_t min_num_free)
+{
+ QUIC_URXE *e;
+
+ while (ossl_list_urxe_num(&demux->urx_free) < min_num_free) {
+ e = demux_alloc_urxe(demux->mtu);
+ if (e == NULL)
+ return 0;
+
+ ossl_list_urxe_insert_tail(&demux->urx_free, e);
+ e->demux_state = URXE_DEMUX_STATE_FREE;
+ }
+
+ return 1;
+}
+
+/*
+ * Receive datagrams from network, placing them into URXEs.
+ *
+ * Returns 1 on success or 0 on failure.
+ *
+ * Precondition: at least one URXE is free
+ * Precondition: there are no pending URXEs
+ */
+static int demux_recv(QUIC_DEMUX *demux)
+{
+ BIO_MSG msg[DEMUX_MAX_MSGS_PER_CALL];
+ size_t rd, i;
+ QUIC_URXE *urxe = ossl_list_urxe_head(&demux->urx_free), *unext;
+ OSSL_TIME now;
+
+ /* This should never be called when we have any pending URXE. */
+ assert(ossl_list_urxe_head(&demux->urx_pending) == NULL);
+ assert(urxe->demux_state == URXE_DEMUX_STATE_FREE);
+
+ if (demux->net_bio == NULL)
+ /*
+ * If no BIO is plugged in, treat this as no datagram being available.
+ */
+ return QUIC_DEMUX_PUMP_RES_TRANSIENT_FAIL;
+
+ /*
+ * Opportunistically receive as many messages as possible in a single
+ * syscall, determined by how many free URXEs are available.
+ */
+ for (i = 0; i < (ossl_ssize_t)OSSL_NELEM(msg);
+ ++i, urxe = ossl_list_urxe_next(urxe)) {
+ if (urxe == NULL) {
+ /* We need at least one URXE to receive into. */
+ if (!ossl_assert(i > 0))
+ return QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL;
+
+ break;
+ }
+
+ /* Ensure the URXE is big enough. */
+ urxe = demux_reserve_urxe(demux, urxe, demux->mtu);
+ if (urxe == NULL)
+ /* Allocation error, fail. */
+ return QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL;
+
+ /* Ensure we zero any fields added to BIO_MSG at a later date. */
+ memset(&msg[i], 0, sizeof(BIO_MSG));
+ msg[i].data = ossl_quic_urxe_data(urxe);
+ msg[i].data_len = urxe->alloc_len;
+ msg[i].peer = &urxe->peer;
+ BIO_ADDR_clear(&urxe->peer);
+ if (demux->use_local_addr)
+ msg[i].local = &urxe->local;
+ else
+ BIO_ADDR_clear(&urxe->local);
+ }
+
+ ERR_set_mark();
+ if (!BIO_recvmmsg(demux->net_bio, msg, sizeof(BIO_MSG), i, 0, &rd)) {
+ if (BIO_err_is_non_fatal(ERR_peek_last_error())) {
+ /* Transient error, clear the error and stop. */
+ ERR_pop_to_mark();
+ return QUIC_DEMUX_PUMP_RES_TRANSIENT_FAIL;
+ } else {
+ /* Non-transient error, do not clear the error. */
+ ERR_clear_last_mark();
+ return QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL;
+ }
+ }
+
+ ERR_clear_last_mark();
+ now = demux->now != NULL ? demux->now(demux->now_arg) : ossl_time_zero();
+
+ urxe = ossl_list_urxe_head(&demux->urx_free);
+ for (i = 0; i < rd; ++i, urxe = unext) {
+ unext = ossl_list_urxe_next(urxe);
+ /* Set URXE with actual length of received datagram. */
+ urxe->data_len = msg[i].data_len;
+ /* Time we received datagram. */
+ urxe->time = now;
+ urxe->datagram_id = demux->next_datagram_id++;
+ /* Move from free list to pending list. */
+ ossl_list_urxe_remove(&demux->urx_free, urxe);
+ ossl_list_urxe_insert_tail(&demux->urx_pending, urxe);
+ urxe->demux_state = URXE_DEMUX_STATE_PENDING;
+ }
+
+ return QUIC_DEMUX_PUMP_RES_OK;
+}
+
+/* Extract destination connection ID from the first packet in a datagram. */
+static int demux_identify_conn_id(QUIC_DEMUX *demux,
+ QUIC_URXE *e,
+ QUIC_CONN_ID *dst_conn_id)
+{
+ return ossl_quic_wire_get_pkt_hdr_dst_conn_id(ossl_quic_urxe_data(e),
+ e->data_len,
+ demux->short_conn_id_len,
+ dst_conn_id);
+}
+
+/*
+ * Process a single pending URXE.
+ * Returning 1 on success, 0 on failure.
+ */
+static int demux_process_pending_urxe(QUIC_DEMUX *demux, QUIC_URXE *e)
+{
+ QUIC_CONN_ID dst_conn_id;
+ int dst_conn_id_ok = 0;
+
+ /* The next URXE we process should be at the head of the pending list. */
+ if (!ossl_assert(e == ossl_list_urxe_head(&demux->urx_pending)))
+ return 0;
+
+ assert(e->demux_state == URXE_DEMUX_STATE_PENDING);
+
+ /* Determine the DCID of the first packet in the datagram. */
+ dst_conn_id_ok = demux_identify_conn_id(demux, e, &dst_conn_id);
+
+ ossl_list_urxe_remove(&demux->urx_pending, e);
+ if (demux->default_cb != NULL) {
+ /*
+ * Pass to default handler for routing. The URXE now belongs to the
+ * callback.
+ */
+ e->demux_state = URXE_DEMUX_STATE_ISSUED;
+ demux->default_cb(e, demux->default_cb_arg,
+ dst_conn_id_ok ? &dst_conn_id : NULL);
+ } else {
+ /* Discard. */
+ ossl_list_urxe_insert_tail(&demux->urx_free, e);
+ e->demux_state = URXE_DEMUX_STATE_FREE;
+ }
+
+ return 1; /* keep processing pending URXEs */
+}
+
+/* Process pending URXEs to generate callbacks. */
+static int demux_process_pending_urxl(QUIC_DEMUX *demux)
+{
+ QUIC_URXE *e;
+ int ret;
+
+ while ((e = ossl_list_urxe_head(&demux->urx_pending)) != NULL)
+ if ((ret = demux_process_pending_urxe(demux, e)) <= 0)
+ return ret;
+
+ return 1;
+}
+
+/*
+ * Drain the pending URXE list, processing any pending URXEs by making their
+ * callbacks. If no URXEs are pending, a network read is attempted first.
+ */
+int ossl_quic_demux_pump(QUIC_DEMUX *demux)
+{
+ int ret;
+
+ if (ossl_list_urxe_head(&demux->urx_pending) == NULL) {
+ ret = demux_ensure_free_urxe(demux, DEMUX_MAX_MSGS_PER_CALL);
+ if (ret != 1)
+ return QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL;
+
+ ret = demux_recv(demux);
+ if (ret != QUIC_DEMUX_PUMP_RES_OK)
+ return ret;
+
+ /*
+ * If demux_recv returned successfully, we should always have something.
+ */
+ assert(ossl_list_urxe_head(&demux->urx_pending) != NULL);
+ }
+
+ if ((ret = demux_process_pending_urxl(demux)) <= 0)
+ return QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL;
+
+ return QUIC_DEMUX_PUMP_RES_OK;
+}
+
+/* Artificially inject a packet into the demuxer for testing purposes. */
+int ossl_quic_demux_inject(QUIC_DEMUX *demux,
+ const unsigned char *buf,
+ size_t buf_len,
+ const BIO_ADDR *peer,
+ const BIO_ADDR *local)
+{
+ int ret;
+ QUIC_URXE *urxe;
+
+ ret = demux_ensure_free_urxe(demux, 1);
+ if (ret != 1)
+ return 0;
+
+ urxe = ossl_list_urxe_head(&demux->urx_free);
+
+ assert(urxe->demux_state == URXE_DEMUX_STATE_FREE);
+
+ urxe = demux_reserve_urxe(demux, urxe, buf_len);
+ if (urxe == NULL)
+ return 0;
+
+ memcpy(ossl_quic_urxe_data(urxe), buf, buf_len);
+ urxe->data_len = buf_len;
+
+ if (peer != NULL)
+ urxe->peer = *peer;
+ else
+ BIO_ADDR_clear(&urxe->peer);
+
+ if (local != NULL)
+ urxe->local = *local;
+ else
+ BIO_ADDR_clear(&urxe->local);
+
+ urxe->time
+ = demux->now != NULL ? demux->now(demux->now_arg) : ossl_time_zero();
+
+ /* Move from free list to pending list. */
+ ossl_list_urxe_remove(&demux->urx_free, urxe);
+ urxe->datagram_id = demux->next_datagram_id++;
+ ossl_list_urxe_insert_tail(&demux->urx_pending, urxe);
+ urxe->demux_state = URXE_DEMUX_STATE_PENDING;
+
+ return demux_process_pending_urxl(demux) > 0;
+}
+
+/* Called by our user to return a URXE to the free list. */
+void ossl_quic_demux_release_urxe(QUIC_DEMUX *demux,
+ QUIC_URXE *e)
+{
+ assert(ossl_list_urxe_prev(e) == NULL && ossl_list_urxe_next(e) == NULL);
+ assert(e->demux_state == URXE_DEMUX_STATE_ISSUED);
+ ossl_list_urxe_insert_tail(&demux->urx_free, e);
+ e->demux_state = URXE_DEMUX_STATE_FREE;
+}
+
+void ossl_quic_demux_reinject_urxe(QUIC_DEMUX *demux,
+ QUIC_URXE *e)
+{
+ assert(ossl_list_urxe_prev(e) == NULL && ossl_list_urxe_next(e) == NULL);
+ assert(e->demux_state == URXE_DEMUX_STATE_ISSUED);
+ ossl_list_urxe_insert_head(&demux->urx_pending, e);
+ e->demux_state = URXE_DEMUX_STATE_PENDING;
+}
+
+int ossl_quic_demux_has_pending(const QUIC_DEMUX *demux)
+{
+ return ossl_list_urxe_head(&demux->urx_pending) != NULL;
+}
diff --git a/crypto/openssl/ssl/quic/quic_engine.c b/crypto/openssl/ssl/quic/quic_engine.c
new file mode 100644
index 000000000000..e933d8548f47
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_engine.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_engine.h"
+#include "internal/quic_port.h"
+#include "quic_engine_local.h"
+#include "quic_port_local.h"
+#include "../ssl_local.h"
+
+/*
+ * QUIC Engine
+ * ===========
+ */
+static int qeng_init(QUIC_ENGINE *qeng, uint64_t reactor_flags);
+static void qeng_cleanup(QUIC_ENGINE *qeng);
+static void qeng_tick(QUIC_TICK_RESULT *res, void *arg, uint32_t flags);
+
+DEFINE_LIST_OF_IMPL(port, QUIC_PORT);
+
+QUIC_ENGINE *ossl_quic_engine_new(const QUIC_ENGINE_ARGS *args)
+{
+ QUIC_ENGINE *qeng;
+
+ if ((qeng = OPENSSL_zalloc(sizeof(QUIC_ENGINE))) == NULL)
+ return NULL;
+
+ qeng->libctx = args->libctx;
+ qeng->propq = args->propq;
+ qeng->mutex = args->mutex;
+
+ if (!qeng_init(qeng, args->reactor_flags)) {
+ OPENSSL_free(qeng);
+ return NULL;
+ }
+
+ return qeng;
+}
+
+void ossl_quic_engine_free(QUIC_ENGINE *qeng)
+{
+ if (qeng == NULL)
+ return;
+
+ qeng_cleanup(qeng);
+ OPENSSL_free(qeng);
+}
+
+static int qeng_init(QUIC_ENGINE *qeng, uint64_t reactor_flags)
+{
+ return ossl_quic_reactor_init(&qeng->rtor, qeng_tick, qeng,
+ qeng->mutex,
+ ossl_time_zero(), reactor_flags);
+}
+
+static void qeng_cleanup(QUIC_ENGINE *qeng)
+{
+ assert(ossl_list_port_num(&qeng->port_list) == 0);
+ ossl_quic_reactor_cleanup(&qeng->rtor);
+}
+
+QUIC_REACTOR *ossl_quic_engine_get0_reactor(QUIC_ENGINE *qeng)
+{
+ return &qeng->rtor;
+}
+
+CRYPTO_MUTEX *ossl_quic_engine_get0_mutex(QUIC_ENGINE *qeng)
+{
+ return qeng->mutex;
+}
+
+OSSL_TIME ossl_quic_engine_get_time(QUIC_ENGINE *qeng)
+{
+ if (qeng->now_cb == NULL)
+ return ossl_time_now();
+
+ return qeng->now_cb(qeng->now_cb_arg);
+}
+
+OSSL_TIME ossl_quic_engine_make_real_time(QUIC_ENGINE *qeng, OSSL_TIME tm)
+{
+ OSSL_TIME offset;
+
+ if (qeng->now_cb != NULL
+ && !ossl_time_is_zero(tm)
+ && !ossl_time_is_infinite(tm)) {
+
+ offset = qeng->now_cb(qeng->now_cb_arg);
+
+ /* If tm is earlier than offset then tm will end up as "now" */
+ tm = ossl_time_add(ossl_time_subtract(tm, offset), ossl_time_now());
+ }
+
+ return tm;
+}
+
+void ossl_quic_engine_set_time_cb(QUIC_ENGINE *qeng,
+ OSSL_TIME (*now_cb)(void *arg),
+ void *now_cb_arg)
+{
+ qeng->now_cb = now_cb;
+ qeng->now_cb_arg = now_cb_arg;
+}
+
+void ossl_quic_engine_set_inhibit_tick(QUIC_ENGINE *qeng, int inhibit)
+{
+ qeng->inhibit_tick = (inhibit != 0);
+}
+
+OSSL_LIB_CTX *ossl_quic_engine_get0_libctx(QUIC_ENGINE *qeng)
+{
+ return qeng->libctx;
+}
+
+const char *ossl_quic_engine_get0_propq(QUIC_ENGINE *qeng)
+{
+ return qeng->propq;
+}
+
+void ossl_quic_engine_update_poll_descriptors(QUIC_ENGINE *qeng, int force)
+{
+ QUIC_PORT *port;
+
+ /*
+ * TODO(QUIC MULTIPORT): The implementation of
+ * ossl_quic_port_update_poll_descriptors assumes an engine only ever has a
+ * single port for now due to reactor limitations. This limitation will be
+ * removed in future.
+ *
+ * TODO(QUIC MULTIPORT): Consider only iterating the port list when dirty at
+ * the engine level in future when we can have multiple ports. This is not
+ * important currently as the port list has a single entry.
+ */
+ OSSL_LIST_FOREACH(port, port, &qeng->port_list)
+ ossl_quic_port_update_poll_descriptors(port, force);
+}
+
+/*
+ * QUIC Engine: Child Object Lifecycle Management
+ * ==============================================
+ */
+
+QUIC_PORT *ossl_quic_engine_create_port(QUIC_ENGINE *qeng,
+ const QUIC_PORT_ARGS *args)
+{
+ QUIC_PORT_ARGS largs = *args;
+
+ if (ossl_list_port_num(&qeng->port_list) > 0)
+ /* TODO(QUIC MULTIPORT): We currently support only one port. */
+ return NULL;
+
+ if (largs.engine != NULL)
+ return NULL;
+
+ largs.engine = qeng;
+ return ossl_quic_port_new(&largs);
+}
+
+/*
+ * QUIC Engine: Ticker-Mutator
+ * ===========================
+ */
+
+/*
+ * The central ticker function called by the reactor. This does everything, or
+ * at least everything network I/O related. Best effort - not allowed to fail
+ * "loudly".
+ */
+static void qeng_tick(QUIC_TICK_RESULT *res, void *arg, uint32_t flags)
+{
+ QUIC_ENGINE *qeng = arg;
+ QUIC_PORT *port;
+
+ res->net_read_desired = 0;
+ res->net_write_desired = 0;
+ res->notify_other_threads = 0;
+ res->tick_deadline = ossl_time_infinite();
+
+ if (qeng->inhibit_tick)
+ return;
+
+ /* Iterate through all ports and service them. */
+ OSSL_LIST_FOREACH(port, port, &qeng->port_list) {
+ QUIC_TICK_RESULT subr = {0};
+
+ ossl_quic_port_subtick(port, &subr, flags);
+ ossl_quic_tick_result_merge_into(res, &subr);
+ }
+}
diff --git a/crypto/openssl/ssl/quic/quic_engine_local.h b/crypto/openssl/ssl/quic/quic_engine_local.h
new file mode 100644
index 000000000000..280fd31dd7c1
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_engine_local.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OSSL_QUIC_ENGINE_LOCAL_H
+# define OSSL_QUIC_ENGINE_LOCAL_H
+
+# include "internal/quic_engine.h"
+# include "internal/quic_reactor.h"
+
+# ifndef OPENSSL_NO_QUIC
+
+/*
+ * QUIC Engine Structure
+ * =====================
+ *
+ * QUIC engine internals. It is intended that only the QUIC_ENGINE, QUIC_PORT
+ * and QUIC_CHANNEL implementations be allowed to access this structure
+ * directly.
+ *
+ * Other components should not include this header.
+ */
+DECLARE_LIST_OF(port, QUIC_PORT);
+
+struct quic_engine_st {
+ /* All objects in a QUIC event domain share the same (libctx, propq). */
+ OSSL_LIB_CTX *libctx;
+ const char *propq;
+
+ /*
+ * Master synchronisation mutex for the entire QUIC event domain. Used for
+ * thread assisted mode synchronisation. We don't own this; the instantiator
+ * of the engine passes it to us and is responsible for freeing it after
+ * engine destruction.
+ */
+ CRYPTO_MUTEX *mutex;
+
+ /* Callback used to get the current time. */
+ OSSL_TIME (*now_cb)(void *arg);
+ void *now_cb_arg;
+
+ /* Asynchronous I/O reactor. */
+ QUIC_REACTOR rtor;
+
+ /* List of all child ports. */
+ OSSL_LIST(port) port_list;
+
+ /* Inhibit tick for testing purposes? */
+ unsigned int inhibit_tick : 1;
+};
+
+# endif
+
+#endif
diff --git a/crypto/openssl/ssl/quic/quic_fc.c b/crypto/openssl/ssl/quic/quic_fc.c
new file mode 100644
index 000000000000..64ef31780c22
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_fc.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_fc.h"
+#include "internal/quic_error.h"
+#include "internal/common.h"
+#include "internal/safe_math.h"
+#include <assert.h>
+
+OSSL_SAFE_MATH_UNSIGNED(uint64_t, uint64_t)
+
+/*
+ * TX Flow Controller (TXFC)
+ * =========================
+ */
+
+int ossl_quic_txfc_init(QUIC_TXFC *txfc, QUIC_TXFC *conn_txfc)
+{
+ if (conn_txfc != NULL && conn_txfc->parent != NULL)
+ return 0;
+
+ txfc->swm = 0;
+ txfc->cwm = 0;
+ txfc->parent = conn_txfc;
+ txfc->has_become_blocked = 0;
+ return 1;
+}
+
+QUIC_TXFC *ossl_quic_txfc_get_parent(QUIC_TXFC *txfc)
+{
+ return txfc->parent;
+}
+
+int ossl_quic_txfc_bump_cwm(QUIC_TXFC *txfc, uint64_t cwm)
+{
+ if (cwm <= txfc->cwm)
+ return 0;
+
+ txfc->cwm = cwm;
+ return 1;
+}
+
+uint64_t ossl_quic_txfc_get_credit_local(QUIC_TXFC *txfc, uint64_t consumed)
+{
+ assert((txfc->swm + consumed) <= txfc->cwm);
+ return txfc->cwm - (consumed + txfc->swm);
+}
+
+uint64_t ossl_quic_txfc_get_credit(QUIC_TXFC *txfc, uint64_t consumed)
+{
+ uint64_t r, conn_r;
+
+ r = ossl_quic_txfc_get_credit_local(txfc, 0);
+
+ if (txfc->parent != NULL) {
+ assert(txfc->parent->parent == NULL);
+ conn_r = ossl_quic_txfc_get_credit_local(txfc->parent, consumed);
+ if (conn_r < r)
+ r = conn_r;
+ }
+
+ return r;
+}
+
+int ossl_quic_txfc_consume_credit_local(QUIC_TXFC *txfc, uint64_t num_bytes)
+{
+ int ok = 1;
+ uint64_t credit = ossl_quic_txfc_get_credit_local(txfc, 0);
+
+ if (num_bytes > credit) {
+ ok = 0;
+ num_bytes = credit;
+ }
+
+ if (num_bytes > 0 && num_bytes == credit)
+ txfc->has_become_blocked = 1;
+
+ txfc->swm += num_bytes;
+ return ok;
+}
+
+int ossl_quic_txfc_consume_credit(QUIC_TXFC *txfc, uint64_t num_bytes)
+{
+ int ok = ossl_quic_txfc_consume_credit_local(txfc, num_bytes);
+
+ if (txfc->parent != NULL) {
+ assert(txfc->parent->parent == NULL);
+ if (!ossl_quic_txfc_consume_credit_local(txfc->parent, num_bytes))
+ return 0;
+ }
+
+ return ok;
+}
+
+int ossl_quic_txfc_has_become_blocked(QUIC_TXFC *txfc, int clear)
+{
+ int r = txfc->has_become_blocked;
+
+ if (clear)
+ txfc->has_become_blocked = 0;
+
+ return r;
+}
+
+uint64_t ossl_quic_txfc_get_cwm(QUIC_TXFC *txfc)
+{
+ return txfc->cwm;
+}
+
+uint64_t ossl_quic_txfc_get_swm(QUIC_TXFC *txfc)
+{
+ return txfc->swm;
+}
+
+/*
+ * RX Flow Controller (RXFC)
+ * =========================
+ */
+
+int ossl_quic_rxfc_init(QUIC_RXFC *rxfc, QUIC_RXFC *conn_rxfc,
+ uint64_t initial_window_size,
+ uint64_t max_window_size,
+ OSSL_TIME (*now)(void *now_arg),
+ void *now_arg)
+{
+ if (conn_rxfc != NULL && conn_rxfc->parent != NULL)
+ return 0;
+
+ rxfc->swm = 0;
+ rxfc->cwm = initial_window_size;
+ rxfc->rwm = 0;
+ rxfc->esrwm = 0;
+ rxfc->hwm = 0;
+ rxfc->cur_window_size = initial_window_size;
+ rxfc->max_window_size = max_window_size;
+ rxfc->parent = conn_rxfc;
+ rxfc->error_code = 0;
+ rxfc->has_cwm_changed = 0;
+ rxfc->epoch_start = ossl_time_zero();
+ rxfc->now = now;
+ rxfc->now_arg = now_arg;
+ rxfc->is_fin = 0;
+ rxfc->standalone = 0;
+ return 1;
+}
+
+int ossl_quic_rxfc_init_standalone(QUIC_RXFC *rxfc,
+ uint64_t initial_window_size,
+ OSSL_TIME (*now)(void *arg),
+ void *now_arg)
+{
+ if (!ossl_quic_rxfc_init(rxfc, NULL,
+ initial_window_size, initial_window_size,
+ now, now_arg))
+ return 0;
+
+ rxfc->standalone = 1;
+ return 1;
+}
+
+QUIC_RXFC *ossl_quic_rxfc_get_parent(QUIC_RXFC *rxfc)
+{
+ return rxfc->parent;
+}
+
+void ossl_quic_rxfc_set_max_window_size(QUIC_RXFC *rxfc,
+ size_t max_window_size)
+{
+ rxfc->max_window_size = max_window_size;
+}
+
+static void rxfc_start_epoch(QUIC_RXFC *rxfc)
+{
+ rxfc->epoch_start = rxfc->now(rxfc->now_arg);
+ rxfc->esrwm = rxfc->rwm;
+}
+
+static int on_rx_controlled_bytes(QUIC_RXFC *rxfc, uint64_t num_bytes)
+{
+ int ok = 1;
+ uint64_t credit = rxfc->cwm - rxfc->swm;
+
+ if (num_bytes > credit) {
+ ok = 0;
+ num_bytes = credit;
+ rxfc->error_code = OSSL_QUIC_ERR_FLOW_CONTROL_ERROR;
+ }
+
+ rxfc->swm += num_bytes;
+ return ok;
+}
+
+int ossl_quic_rxfc_on_rx_stream_frame(QUIC_RXFC *rxfc, uint64_t end, int is_fin)
+{
+ uint64_t delta;
+
+ if (!rxfc->standalone && rxfc->parent == NULL)
+ return 0;
+
+ if (rxfc->is_fin && ((is_fin && rxfc->hwm != end) || end > rxfc->hwm)) {
+ /* Stream size cannot change after the stream is finished */
+ rxfc->error_code = OSSL_QUIC_ERR_FINAL_SIZE_ERROR;
+ return 1; /* not a caller error */
+ }
+
+ if (is_fin)
+ rxfc->is_fin = 1;
+
+ if (end > rxfc->hwm) {
+ delta = end - rxfc->hwm;
+ rxfc->hwm = end;
+
+ on_rx_controlled_bytes(rxfc, delta); /* result ignored */
+ if (rxfc->parent != NULL)
+ on_rx_controlled_bytes(rxfc->parent, delta); /* result ignored */
+ } else if (end < rxfc->hwm && is_fin) {
+ rxfc->error_code = OSSL_QUIC_ERR_FINAL_SIZE_ERROR;
+ return 1; /* not a caller error */
+ }
+
+ return 1;
+}
+
+/* threshold = 3/4 */
+#define WINDOW_THRESHOLD_NUM 3
+#define WINDOW_THRESHOLD_DEN 4
+
+static int rxfc_cwm_bump_desired(QUIC_RXFC *rxfc)
+{
+ int err = 0;
+ uint64_t window_rem = rxfc->cwm - rxfc->rwm;
+ uint64_t threshold
+ = safe_muldiv_uint64_t(rxfc->cur_window_size,
+ WINDOW_THRESHOLD_NUM, WINDOW_THRESHOLD_DEN, &err);
+
+ if (err)
+ /*
+ * Extremely large window should never occur, but if it does, just use
+ * 1/2 as the threshold.
+ */
+ threshold = rxfc->cur_window_size / 2;
+
+ /*
+ * No point emitting a new MAX_STREAM_DATA frame if the stream has a final
+ * size.
+ */
+ return !rxfc->is_fin && window_rem <= threshold;
+}
+
+static int rxfc_should_bump_window_size(QUIC_RXFC *rxfc, OSSL_TIME rtt)
+{
+ /*
+ * dt: time since start of epoch
+ * b: bytes of window consumed since start of epoch
+ * dw: proportion of window consumed since start of epoch
+ * T_window: time it will take to use up the entire window, based on dt, dw
+ * RTT: The current estimated RTT.
+ *
+ * b = rwm - esrwm
+ * dw = b / window_size
+ * T_window = dt / dw
+ * T_window = dt / (b / window_size)
+ * T_window = (dt * window_size) / b
+ *
+ * We bump the window size if T_window < 4 * RTT.
+ *
+ * We leave the division by b on the LHS to reduce the risk of overflowing
+ * our 64-bit nanosecond representation, which will afford plenty of
+ * precision left over after the division anyway.
+ */
+ uint64_t b = rxfc->rwm - rxfc->esrwm;
+ OSSL_TIME now, dt, t_window;
+
+ if (b == 0)
+ return 0;
+
+ now = rxfc->now(rxfc->now_arg);
+ dt = ossl_time_subtract(now, rxfc->epoch_start);
+ t_window = ossl_time_muldiv(dt, rxfc->cur_window_size, b);
+
+ return ossl_time_compare(t_window, ossl_time_multiply(rtt, 4)) < 0;
+}
+
+static void rxfc_adjust_window_size(QUIC_RXFC *rxfc, uint64_t min_window_size,
+ OSSL_TIME rtt)
+{
+ /* Are we sending updates too often? */
+ uint64_t new_window_size;
+
+ new_window_size = rxfc->cur_window_size;
+
+ if (rxfc_should_bump_window_size(rxfc, rtt))
+ new_window_size *= 2;
+
+ if (new_window_size < min_window_size)
+ new_window_size = min_window_size;
+ if (new_window_size > rxfc->max_window_size) /* takes precedence over min size */
+ new_window_size = rxfc->max_window_size;
+
+ rxfc->cur_window_size = new_window_size;
+ rxfc_start_epoch(rxfc);
+}
+
+static void rxfc_update_cwm(QUIC_RXFC *rxfc, uint64_t min_window_size,
+ OSSL_TIME rtt)
+{
+ uint64_t new_cwm;
+
+ if (!rxfc_cwm_bump_desired(rxfc))
+ return;
+
+ rxfc_adjust_window_size(rxfc, min_window_size, rtt);
+
+ new_cwm = rxfc->rwm + rxfc->cur_window_size;
+ if (new_cwm > rxfc->cwm) {
+ rxfc->cwm = new_cwm;
+ rxfc->has_cwm_changed = 1;
+ }
+}
+
+static int rxfc_on_retire(QUIC_RXFC *rxfc, uint64_t num_bytes,
+ uint64_t min_window_size,
+ OSSL_TIME rtt)
+{
+ if (ossl_time_is_zero(rxfc->epoch_start))
+ /* This happens when we retire our first ever bytes. */
+ rxfc_start_epoch(rxfc);
+
+ rxfc->rwm += num_bytes;
+ rxfc_update_cwm(rxfc, min_window_size, rtt);
+ return 1;
+}
+
+int ossl_quic_rxfc_on_retire(QUIC_RXFC *rxfc,
+ uint64_t num_bytes,
+ OSSL_TIME rtt)
+{
+ if (rxfc->parent == NULL && !rxfc->standalone)
+ return 0;
+
+ if (num_bytes == 0)
+ return 1;
+
+ if (rxfc->rwm + num_bytes > rxfc->swm)
+ /* Impossible for us to retire more bytes than we have received. */
+ return 0;
+
+ rxfc_on_retire(rxfc, num_bytes, 0, rtt);
+
+ if (!rxfc->standalone)
+ rxfc_on_retire(rxfc->parent, num_bytes, rxfc->cur_window_size, rtt);
+
+ return 1;
+}
+
+uint64_t ossl_quic_rxfc_get_cwm(const QUIC_RXFC *rxfc)
+{
+ return rxfc->cwm;
+}
+
+uint64_t ossl_quic_rxfc_get_swm(const QUIC_RXFC *rxfc)
+{
+ return rxfc->swm;
+}
+
+uint64_t ossl_quic_rxfc_get_rwm(const QUIC_RXFC *rxfc)
+{
+ return rxfc->rwm;
+}
+
+uint64_t ossl_quic_rxfc_get_credit(const QUIC_RXFC *rxfc)
+{
+ return ossl_quic_rxfc_get_cwm(rxfc) - ossl_quic_rxfc_get_swm(rxfc);
+}
+
+int ossl_quic_rxfc_has_cwm_changed(QUIC_RXFC *rxfc, int clear)
+{
+ int r = rxfc->has_cwm_changed;
+
+ if (clear)
+ rxfc->has_cwm_changed = 0;
+
+ return r;
+}
+
+int ossl_quic_rxfc_get_error(QUIC_RXFC *rxfc, int clear)
+{
+ int r = rxfc->error_code;
+
+ if (clear)
+ rxfc->error_code = 0;
+
+ return r;
+}
+
+int ossl_quic_rxfc_get_final_size(const QUIC_RXFC *rxfc, uint64_t *final_size)
+{
+ if (!rxfc->is_fin)
+ return 0;
+
+ if (final_size != NULL)
+ *final_size = rxfc->hwm;
+
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_fifd.c b/crypto/openssl/ssl/quic/quic_fifd.c
new file mode 100644
index 000000000000..1d1bcc11ce63
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_fifd.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_fifd.h"
+#include "internal/quic_wire.h"
+#include "internal/qlog_event_helpers.h"
+
+DEFINE_LIST_OF(tx_history, OSSL_ACKM_TX_PKT);
+
+int ossl_quic_fifd_init(QUIC_FIFD *fifd,
+ QUIC_CFQ *cfq,
+ OSSL_ACKM *ackm,
+ QUIC_TXPIM *txpim,
+ /* stream_id is UINT64_MAX for the crypto stream */
+ QUIC_SSTREAM *(*get_sstream_by_id)(uint64_t stream_id,
+ uint32_t pn_space,
+ void *arg),
+ void *get_sstream_by_id_arg,
+ /* stream_id is UINT64_MAX if not applicable */
+ void (*regen_frame)(uint64_t frame_type,
+ uint64_t stream_id,
+ QUIC_TXPIM_PKT *pkt,
+ void *arg),
+ void *regen_frame_arg,
+ void (*confirm_frame)(uint64_t frame_type,
+ uint64_t stream_id,
+ QUIC_TXPIM_PKT *pkt,
+ void *arg),
+ void *confirm_frame_arg,
+ void (*sstream_updated)(uint64_t stream_id,
+ void *arg),
+ void *sstream_updated_arg,
+ QLOG *(*get_qlog_cb)(void *arg),
+ void *get_qlog_cb_arg)
+{
+ if (cfq == NULL || ackm == NULL || txpim == NULL
+ || get_sstream_by_id == NULL || regen_frame == NULL)
+ return 0;
+
+ fifd->cfq = cfq;
+ fifd->ackm = ackm;
+ fifd->txpim = txpim;
+ fifd->get_sstream_by_id = get_sstream_by_id;
+ fifd->get_sstream_by_id_arg = get_sstream_by_id_arg;
+ fifd->regen_frame = regen_frame;
+ fifd->regen_frame_arg = regen_frame_arg;
+ fifd->confirm_frame = confirm_frame;
+ fifd->confirm_frame_arg = confirm_frame_arg;
+ fifd->sstream_updated = sstream_updated;
+ fifd->sstream_updated_arg = sstream_updated_arg;
+ fifd->get_qlog_cb = get_qlog_cb;
+ fifd->get_qlog_cb_arg = get_qlog_cb_arg;
+ return 1;
+}
+
+void ossl_quic_fifd_cleanup(QUIC_FIFD *fifd)
+{
+ /* No-op. */
+}
+
+static void on_acked(void *arg)
+{
+ QUIC_TXPIM_PKT *pkt = arg;
+ QUIC_FIFD *fifd = pkt->fifd;
+ const QUIC_TXPIM_CHUNK *chunks = ossl_quic_txpim_pkt_get_chunks(pkt);
+ size_t i, num_chunks = ossl_quic_txpim_pkt_get_num_chunks(pkt);
+ QUIC_SSTREAM *sstream;
+ QUIC_CFQ_ITEM *cfq_item, *cfq_item_next;
+
+ /* STREAM and CRYPTO stream chunks, FINs and stream FC frames */
+ for (i = 0; i < num_chunks; ++i) {
+ sstream = fifd->get_sstream_by_id(chunks[i].stream_id,
+ pkt->ackm_pkt.pkt_space,
+ fifd->get_sstream_by_id_arg);
+ if (sstream == NULL)
+ continue;
+
+ if (chunks[i].end >= chunks[i].start)
+ /* coverity[check_return]: Best effort - we cannot fail here. */
+ ossl_quic_sstream_mark_acked(sstream,
+ chunks[i].start, chunks[i].end);
+
+ if (chunks[i].has_fin && chunks[i].stream_id != UINT64_MAX)
+ ossl_quic_sstream_mark_acked_fin(sstream);
+
+ if (chunks[i].has_stop_sending && chunks[i].stream_id != UINT64_MAX)
+ fifd->confirm_frame(OSSL_QUIC_FRAME_TYPE_STOP_SENDING,
+ chunks[i].stream_id, pkt,
+ fifd->confirm_frame_arg);
+
+ if (chunks[i].has_reset_stream && chunks[i].stream_id != UINT64_MAX)
+ fifd->confirm_frame(OSSL_QUIC_FRAME_TYPE_RESET_STREAM,
+ chunks[i].stream_id, pkt,
+ fifd->confirm_frame_arg);
+
+ if (ossl_quic_sstream_is_totally_acked(sstream))
+ fifd->sstream_updated(chunks[i].stream_id, fifd->sstream_updated_arg);
+ }
+
+ /* GCR */
+ for (cfq_item = pkt->retx_head; cfq_item != NULL; cfq_item = cfq_item_next) {
+ cfq_item_next = cfq_item->pkt_next;
+ ossl_quic_cfq_release(fifd->cfq, cfq_item);
+ }
+
+ ossl_quic_txpim_pkt_release(fifd->txpim, pkt);
+}
+
+static QLOG *fifd_get_qlog(QUIC_FIFD *fifd)
+{
+ if (fifd->get_qlog_cb == NULL)
+ return NULL;
+
+ return fifd->get_qlog_cb(fifd->get_qlog_cb_arg);
+}
+
+static void on_lost(void *arg)
+{
+ QUIC_TXPIM_PKT *pkt = arg;
+ QUIC_FIFD *fifd = pkt->fifd;
+ const QUIC_TXPIM_CHUNK *chunks = ossl_quic_txpim_pkt_get_chunks(pkt);
+ size_t i, num_chunks = ossl_quic_txpim_pkt_get_num_chunks(pkt);
+ QUIC_SSTREAM *sstream;
+ QUIC_CFQ_ITEM *cfq_item, *cfq_item_next;
+ int sstream_updated;
+
+ ossl_qlog_event_recovery_packet_lost(fifd_get_qlog(fifd), pkt);
+
+ /* STREAM and CRYPTO stream chunks, FIN and stream FC frames */
+ for (i = 0; i < num_chunks; ++i) {
+ sstream = fifd->get_sstream_by_id(chunks[i].stream_id,
+ pkt->ackm_pkt.pkt_space,
+ fifd->get_sstream_by_id_arg);
+ if (sstream == NULL)
+ continue;
+
+ sstream_updated = 0;
+
+ if (chunks[i].end >= chunks[i].start) {
+ /*
+ * Note: If the stream is being reset, we do not need to retransmit
+ * old data as this is pointless. In this case this will be handled
+ * by (sstream == NULL) above as the QSM will free the QUIC_SSTREAM
+ * and our call to get_sstream_by_id above will return NULL.
+ */
+ ossl_quic_sstream_mark_lost(sstream,
+ chunks[i].start, chunks[i].end);
+ sstream_updated = 1;
+ }
+
+ if (chunks[i].has_fin && chunks[i].stream_id != UINT64_MAX) {
+ ossl_quic_sstream_mark_lost_fin(sstream);
+ sstream_updated = 1;
+ }
+
+ if (chunks[i].has_stop_sending && chunks[i].stream_id != UINT64_MAX)
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_STOP_SENDING,
+ chunks[i].stream_id, pkt,
+ fifd->regen_frame_arg);
+
+ if (chunks[i].has_reset_stream && chunks[i].stream_id != UINT64_MAX)
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_RESET_STREAM,
+ chunks[i].stream_id, pkt,
+ fifd->regen_frame_arg);
+
+ /*
+ * Inform caller that stream needs an FC frame.
+ *
+ * Note: We could track whether an FC frame was sent originally for the
+ * stream to determine if it really needs to be regenerated or not.
+ * However, if loss has occurred, it's probably better to ensure the
+ * peer has up-to-date flow control data for the stream. Given that
+ * these frames are extremely small, we may as well always send it when
+ * handling loss.
+ */
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA,
+ chunks[i].stream_id,
+ pkt,
+ fifd->regen_frame_arg);
+
+ if (sstream_updated && chunks[i].stream_id != UINT64_MAX)
+ fifd->sstream_updated(chunks[i].stream_id,
+ fifd->sstream_updated_arg);
+ }
+
+ /* GCR */
+ for (cfq_item = pkt->retx_head; cfq_item != NULL; cfq_item = cfq_item_next) {
+ cfq_item_next = cfq_item->pkt_next;
+ ossl_quic_cfq_mark_lost(fifd->cfq, cfq_item, UINT32_MAX);
+ }
+
+ /* Regenerate flag frames */
+ if (pkt->had_handshake_done_frame)
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE,
+ UINT64_MAX, pkt,
+ fifd->regen_frame_arg);
+
+ if (pkt->had_max_data_frame)
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_MAX_DATA,
+ UINT64_MAX, pkt,
+ fifd->regen_frame_arg);
+
+ if (pkt->had_max_streams_bidi_frame)
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI,
+ UINT64_MAX, pkt,
+ fifd->regen_frame_arg);
+
+ if (pkt->had_max_streams_uni_frame)
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI,
+ UINT64_MAX, pkt,
+ fifd->regen_frame_arg);
+
+ if (pkt->had_ack_frame)
+ /*
+ * We always use the ACK_WITH_ECN frame type to represent the ACK frame
+ * type in our callback; we assume it is the caller's job to decide
+ * whether it wants to send ECN data or not.
+ */
+ fifd->regen_frame(OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN,
+ UINT64_MAX, pkt,
+ fifd->regen_frame_arg);
+
+ ossl_quic_txpim_pkt_release(fifd->txpim, pkt);
+}
+
+static void on_discarded(void *arg)
+{
+ QUIC_TXPIM_PKT *pkt = arg;
+ QUIC_FIFD *fifd = pkt->fifd;
+ QUIC_CFQ_ITEM *cfq_item, *cfq_item_next;
+
+ /*
+ * Don't need to do anything to SSTREAMs for STREAM and CRYPTO streams, as
+ * we assume caller will clean them up.
+ */
+
+ /* GCR */
+ for (cfq_item = pkt->retx_head; cfq_item != NULL; cfq_item = cfq_item_next) {
+ cfq_item_next = cfq_item->pkt_next;
+ ossl_quic_cfq_release(fifd->cfq, cfq_item);
+ }
+
+ ossl_quic_txpim_pkt_release(fifd->txpim, pkt);
+}
+
+int ossl_quic_fifd_pkt_commit(QUIC_FIFD *fifd, QUIC_TXPIM_PKT *pkt)
+{
+ QUIC_CFQ_ITEM *cfq_item;
+ const QUIC_TXPIM_CHUNK *chunks;
+ size_t i, num_chunks;
+ QUIC_SSTREAM *sstream;
+
+ pkt->fifd = fifd;
+
+ pkt->ackm_pkt.on_lost = on_lost;
+ pkt->ackm_pkt.on_acked = on_acked;
+ pkt->ackm_pkt.on_discarded = on_discarded;
+ pkt->ackm_pkt.cb_arg = pkt;
+
+ ossl_list_tx_history_init_elem(&pkt->ackm_pkt);
+ pkt->ackm_pkt.anext = pkt->ackm_pkt.lnext = NULL;
+
+ /*
+ * Mark the CFQ items which have been added to this packet as having been
+ * transmitted.
+ */
+ for (cfq_item = pkt->retx_head;
+ cfq_item != NULL;
+ cfq_item = cfq_item->pkt_next)
+ ossl_quic_cfq_mark_tx(fifd->cfq, cfq_item);
+
+ /*
+ * Mark the send stream chunks which have been added to the packet as having
+ * been transmitted.
+ */
+ chunks = ossl_quic_txpim_pkt_get_chunks(pkt);
+ num_chunks = ossl_quic_txpim_pkt_get_num_chunks(pkt);
+ for (i = 0; i < num_chunks; ++i) {
+ sstream = fifd->get_sstream_by_id(chunks[i].stream_id,
+ pkt->ackm_pkt.pkt_space,
+ fifd->get_sstream_by_id_arg);
+ if (sstream == NULL)
+ continue;
+
+ if (chunks[i].end >= chunks[i].start
+ && !ossl_quic_sstream_mark_transmitted(sstream,
+ chunks[i].start,
+ chunks[i].end))
+ return 0;
+
+ if (chunks[i].has_fin
+ && !ossl_quic_sstream_mark_transmitted_fin(sstream,
+ chunks[i].end + 1))
+ return 0;
+ }
+
+ /* Inform the ACKM. */
+ return ossl_ackm_on_tx_packet(fifd->ackm, &pkt->ackm_pkt);
+}
+
+void ossl_quic_fifd_set_qlog_cb(QUIC_FIFD *fifd, QLOG *(*get_qlog_cb)(void *arg),
+ void *get_qlog_cb_arg)
+{
+ fifd->get_qlog_cb = get_qlog_cb;
+ fifd->get_qlog_cb_arg = get_qlog_cb_arg;
+}
diff --git a/crypto/openssl/ssl/quic/quic_impl.c b/crypto/openssl/ssl/quic/quic_impl.c
new file mode 100644
index 000000000000..cec05d5bd37b
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_impl.c
@@ -0,0 +1,5386 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/macros.h>
+#include <openssl/objects.h>
+#include <openssl/sslerr.h>
+#include <crypto/rand.h>
+#include "quic_local.h"
+#include "internal/hashfunc.h"
+#include "internal/ssl_unwrap.h"
+#include "internal/quic_tls.h"
+#include "internal/quic_rx_depack.h"
+#include "internal/quic_error.h"
+#include "internal/quic_engine.h"
+#include "internal/quic_port.h"
+#include "internal/quic_reactor_wait_ctx.h"
+#include "internal/time.h"
+
+typedef struct qctx_st QCTX;
+
+static void qc_cleanup(QUIC_CONNECTION *qc, int have_lock);
+static void aon_write_finish(QUIC_XSO *xso);
+static int create_channel(QUIC_CONNECTION *qc, SSL_CTX *ctx);
+static QUIC_XSO *create_xso_from_stream(QUIC_CONNECTION *qc, QUIC_STREAM *qs);
+static QUIC_CONNECTION *create_qc_from_incoming_conn(QUIC_LISTENER *ql, QUIC_CHANNEL *ch);
+static int qc_try_create_default_xso_for_write(QCTX *ctx);
+static int qc_wait_for_default_xso_for_read(QCTX *ctx, int peek);
+static void qctx_lock(QCTX *qctx);
+static void qctx_unlock(QCTX *qctx);
+static void qctx_lock_for_io(QCTX *ctx);
+static int quic_do_handshake(QCTX *ctx);
+static void qc_update_reject_policy(QUIC_CONNECTION *qc);
+static void qc_touch_default_xso(QUIC_CONNECTION *qc);
+static void qc_set_default_xso(QUIC_CONNECTION *qc, QUIC_XSO *xso, int touch);
+static void qc_set_default_xso_keep_ref(QUIC_CONNECTION *qc, QUIC_XSO *xso,
+ int touch, QUIC_XSO **old_xso);
+static SSL *quic_conn_stream_new(QCTX *ctx, uint64_t flags, int need_lock);
+static int quic_validate_for_write(QUIC_XSO *xso, int *err);
+static int quic_mutation_allowed(QUIC_CONNECTION *qc, int req_active);
+static void qctx_maybe_autotick(QCTX *ctx);
+static int qctx_should_autotick(QCTX *ctx);
+
+/*
+ * QCTX is a utility structure which provides information we commonly wish to
+ * unwrap upon an API call being dispatched to us, namely:
+ *
+ * - a pointer to the QUIC_CONNECTION (regardless of whether a QCSO or QSSO
+ * was passed);
+ * - a pointer to any applicable QUIC_XSO (e.g. if a QSSO was passed, or if
+ * a QCSO with a default stream was passed);
+ * - whether a QSSO was passed (xso == NULL must not be used to determine this
+ * because it may be non-NULL when a QCSO is passed if that QCSO has a
+ * default stream);
+ * - a pointer to a QUIC_LISTENER object, if one is relevant;
+ * - whether we are in "I/O context", meaning that non-normal errors can
+ * be reported via SSL_get_error() as well as via ERR. Functions such as
+ * SSL_read(), SSL_write() and SSL_do_handshake() are "I/O context"
+ * functions which are allowed to change the value returned by
+ * SSL_get_error. However, other functions (including functions which call
+ * SSL_do_handshake() implicitly) are not allowed to change the return value
+ * of SSL_get_error.
+ */
+struct qctx_st {
+ QUIC_OBJ *obj;
+ QUIC_DOMAIN *qd;
+ QUIC_LISTENER *ql;
+ QUIC_CONNECTION *qc;
+ QUIC_XSO *xso;
+ int is_stream, is_listener, is_domain, in_io;
+};
+
+QUIC_NEEDS_LOCK
+static void quic_set_last_error(QCTX *ctx, int last_error)
+{
+ if (!ctx->in_io)
+ return;
+
+ if (ctx->is_stream && ctx->xso != NULL)
+ ctx->xso->last_error = last_error;
+ else if (!ctx->is_stream && ctx->qc != NULL)
+ ctx->qc->last_error = last_error;
+}
+
+/*
+ * Raise a 'normal' error, meaning one that can be reported via SSL_get_error()
+ * rather than via ERR. Note that normal errors must always be raised while
+ * holding a lock.
+ */
+QUIC_NEEDS_LOCK
+static int quic_raise_normal_error(QCTX *ctx,
+ int err)
+{
+ assert(ctx->in_io);
+ quic_set_last_error(ctx, err);
+
+ return 0;
+}
+
+/*
+ * Raise a 'non-normal' error, meaning any error that is not reported via
+ * SSL_get_error() and must be reported via ERR.
+ *
+ * qc should be provided if available. In exceptional circumstances when qc is
+ * not known NULL may be passed. This should generally only happen when an
+ * expect_...() function defined below fails, which generally indicates a
+ * dispatch error or caller error.
+ *
+ * ctx should be NULL if the connection lock is not held.
+ */
+static int quic_raise_non_normal_error(QCTX *ctx,
+ const char *file,
+ int line,
+ const char *func,
+ int reason,
+ const char *fmt,
+ ...)
+{
+ va_list args;
+
+ if (ctx != NULL) {
+ quic_set_last_error(ctx, SSL_ERROR_SSL);
+
+ if (reason == SSL_R_PROTOCOL_IS_SHUTDOWN && ctx->qc != NULL)
+ ossl_quic_channel_restore_err_state(ctx->qc->ch);
+ }
+
+ ERR_new();
+ ERR_set_debug(file, line, func);
+
+ va_start(args, fmt);
+ ERR_vset_error(ERR_LIB_SSL, reason, fmt, args);
+ va_end(args);
+
+ return 0;
+}
+
+#define QUIC_RAISE_NORMAL_ERROR(ctx, err) \
+ quic_raise_normal_error((ctx), (err))
+
+#define QUIC_RAISE_NON_NORMAL_ERROR(ctx, reason, msg) \
+ quic_raise_non_normal_error((ctx), \
+ OPENSSL_FILE, OPENSSL_LINE, \
+ OPENSSL_FUNC, \
+ (reason), \
+ (msg))
+/*
+ * Flags for expect_quic_as:
+ *
+ * QCTX_C
+ * The input SSL object may be a QCSO.
+ *
+ * QCTX_S
+ * The input SSL object may be a QSSO or a QCSO with a default stream
+ * attached.
+ *
+ * (Note this means there is no current way to require an SSL object with a
+ * QUIC stream which is not a QCSO; a QCSO with a default stream attached
+ * is always considered to satisfy QCTX_S.)
+ *
+ * QCTX_AUTO_S
+ * The input SSL object may be a QSSO or a QCSO with a default stream
+ * attached. If no default stream is currently attached to a QCSO,
+ * one may be auto-created if possible.
+ *
+ * If QCTX_REMOTE_INIT is set, an auto-created default XSO is
+ * initiated by the remote party (i.e., local party reads first).
+ *
+ * If it is not set, an auto-created default XSO is
+ * initiated by the local party (i.e., local party writes first).
+ *
+ * QCTX_L
+ * The input SSL object may be a QLSO.
+ *
+ * QCTX_LOCK
+ * If and only if the function returns successfully, the ctx
+ * is guaranteed to be locked.
+ *
+ * QCTX_IO
+ * Begin an I/O context. If not set, begins a non-I/O context.
+ * This determines whether SSL_get_error() is updated; the value it returns
+ * is modified only by an I/O call.
+ *
+ * QCTX_NO_ERROR
+ * Don't raise an error if the object type is wrong. Should not be used in
+ * conjunction with any flags that may raise errors not related to a wrong
+ * object type.
+ */
+#define QCTX_C (1U << 0)
+#define QCTX_S (1U << 1)
+#define QCTX_L (1U << 2)
+#define QCTX_AUTO_S (1U << 3)
+#define QCTX_REMOTE_INIT (1U << 4)
+#define QCTX_LOCK (1U << 5)
+#define QCTX_IO (1U << 6)
+#define QCTX_D (1U << 7)
+#define QCTX_NO_ERROR (1U << 8)
+
+/*
+ * Called when expect_quic failed. Used to diagnose why such a call failed and
+ * raise a reasonable error code based on the configured preconditions in flags.
+ */
+static int wrong_type(const SSL *s, uint32_t flags)
+{
+ const uint32_t mask = QCTX_C | QCTX_S | QCTX_L | QCTX_D;
+ int code = ERR_R_UNSUPPORTED;
+
+ if ((flags & QCTX_NO_ERROR) != 0)
+ return 1;
+ else if ((flags & mask) == QCTX_D)
+ code = SSL_R_DOMAIN_USE_ONLY;
+ else if ((flags & mask) == QCTX_L)
+ code = SSL_R_LISTENER_USE_ONLY;
+ else if ((flags & mask) == QCTX_C)
+ code = SSL_R_CONN_USE_ONLY;
+ else if ((flags & mask) == QCTX_S
+ || (flags & mask) == (QCTX_C | QCTX_S))
+ code = SSL_R_NO_STREAM;
+
+ return QUIC_RAISE_NON_NORMAL_ERROR(NULL, code, NULL);
+}
+
+/*
+ * Given a QDSO, QCSO, QSSO or QLSO, initialises a QCTX, determining the
+ * contextually applicable QUIC_LISTENER, QUIC_CONNECTION and QUIC_XSO
+ * pointers.
+ *
+ * After this returns 1, all fields of the passed QCTX are initialised.
+ * Returns 0 on failure. This function is intended to be used to provide API
+ * semantics and as such, it invokes QUIC_RAISE_NON_NORMAL_ERROR() on failure
+ * unless the QCTX_NO_ERROR flag is set.
+ *
+ * The flags argument controls the preconditions and postconditions of this
+ * function. See above for the different flags.
+ *
+ * The fields of a QCTX are initialised as follows depending on the identity of
+ * the SSL object, and assuming the preconditions demanded by the flags field as
+ * described above are met:
+ *
+ * QDSO QLSO QCSO QSSO
+ * qd non-NULL maybe maybe maybe
+ * ql NULL non-NULL maybe maybe
+ * qc NULL NULL non-NULL non-NULL
+ * xso NULL NULL maybe non-NULL
+ * is_stream 0 0 0 1
+ * is_listener 0 1 0 0
+ * is_domain 1 0 0 0
+ *
+ */
+static int expect_quic_as(const SSL *s, QCTX *ctx, uint32_t flags)
+{
+ int ok = 0, locked = 0, lock_requested = ((flags & QCTX_LOCK) != 0);
+ QUIC_DOMAIN *qd;
+ QUIC_LISTENER *ql;
+ QUIC_CONNECTION *qc;
+ QUIC_XSO *xso;
+
+ if ((flags & QCTX_AUTO_S) != 0)
+ flags |= QCTX_S;
+
+ ctx->obj = NULL;
+ ctx->qd = NULL;
+ ctx->ql = NULL;
+ ctx->qc = NULL;
+ ctx->xso = NULL;
+ ctx->is_stream = 0;
+ ctx->is_listener = 0;
+ ctx->is_domain = 0;
+ ctx->in_io = ((flags & QCTX_IO) != 0);
+
+ if (s == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_PASSED_NULL_PARAMETER, NULL);
+ goto err;
+ }
+
+ switch (s->type) {
+ case SSL_TYPE_QUIC_DOMAIN:
+ if ((flags & QCTX_D) == 0) {
+ wrong_type(s, flags);
+ goto err;
+ }
+
+ qd = (QUIC_DOMAIN *)s;
+ ctx->obj = &qd->obj;
+ ctx->qd = qd;
+ ctx->is_domain = 1;
+ break;
+
+ case SSL_TYPE_QUIC_LISTENER:
+ if ((flags & QCTX_L) == 0) {
+ wrong_type(s, flags);
+ goto err;
+ }
+
+ ql = (QUIC_LISTENER *)s;
+ ctx->obj = &ql->obj;
+ ctx->qd = ql->domain;
+ ctx->ql = ql;
+ ctx->is_listener = 1;
+ break;
+
+ case SSL_TYPE_QUIC_CONNECTION:
+ qc = (QUIC_CONNECTION *)s;
+ ctx->obj = &qc->obj;
+ ctx->qd = qc->domain;
+ ctx->ql = qc->listener; /* never changes, so can be read without lock */
+ ctx->qc = qc;
+
+ if ((flags & QCTX_AUTO_S) != 0) {
+ if ((flags & QCTX_IO) != 0)
+ qctx_lock_for_io(ctx);
+ else
+ qctx_lock(ctx);
+
+ locked = 1;
+ }
+
+ if ((flags & QCTX_AUTO_S) != 0 && qc->default_xso == NULL) {
+ if (!quic_mutation_allowed(qc, /*req_active=*/0)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ goto err;
+ }
+
+ /* If we haven't finished the handshake, try to advance it. */
+ if (quic_do_handshake(ctx) < 1)
+ /* ossl_quic_do_handshake raised error here */
+ goto err;
+
+ if ((flags & QCTX_REMOTE_INIT) != 0) {
+ if (!qc_wait_for_default_xso_for_read(ctx, /*peek=*/0))
+ goto err;
+ } else {
+ if (!qc_try_create_default_xso_for_write(ctx))
+ goto err;
+ }
+ }
+
+ if ((flags & QCTX_C) == 0
+ && (qc->default_xso == NULL || (flags & QCTX_S) == 0)) {
+ wrong_type(s, flags);
+ goto err;
+ }
+
+ ctx->xso = qc->default_xso;
+ break;
+
+ case SSL_TYPE_QUIC_XSO:
+ if ((flags & QCTX_S) == 0) {
+ wrong_type(s, flags);
+ goto err;
+ }
+
+ xso = (QUIC_XSO *)s;
+ ctx->obj = &xso->obj;
+ ctx->qd = xso->conn->domain;
+ ctx->ql = xso->conn->listener;
+ ctx->qc = xso->conn;
+ ctx->xso = xso;
+ ctx->is_stream = 1;
+ break;
+
+ default:
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ if (lock_requested && !locked) {
+ if ((flags & QCTX_IO) != 0)
+ qctx_lock_for_io(ctx);
+ else
+ qctx_lock(ctx);
+
+ locked = 1;
+ }
+
+ ok = 1;
+err:
+ if (locked && (!ok || !lock_requested))
+ qctx_unlock(ctx);
+
+ return ok;
+}
+
+static int is_quic_c(const SSL *s, QCTX *ctx, int raiseerrs)
+{
+ uint32_t flags = QCTX_C;
+
+ if (!raiseerrs)
+ flags |= QCTX_NO_ERROR;
+ return expect_quic_as(s, ctx, flags);
+}
+
+/* Same as expect_quic_cs except that errors are not raised if raiseerrs == 0 */
+static int is_quic_cs(const SSL *s, QCTX *ctx, int raiseerrs)
+{
+ uint32_t flags = QCTX_C | QCTX_S;
+
+ if (!raiseerrs)
+ flags |= QCTX_NO_ERROR;
+ return expect_quic_as(s, ctx, flags);
+}
+
+static int expect_quic_cs(const SSL *s, QCTX *ctx)
+{
+ return expect_quic_as(s, ctx, QCTX_C | QCTX_S);
+}
+
+static int expect_quic_csl(const SSL *s, QCTX *ctx)
+{
+ return expect_quic_as(s, ctx, QCTX_C | QCTX_S | QCTX_L);
+}
+
+static int expect_quic_csld(const SSL *s, QCTX *ctx)
+{
+ return expect_quic_as(s, ctx, QCTX_C | QCTX_S | QCTX_L | QCTX_D);
+}
+
+#define expect_quic_any expect_quic_csld
+
+static int expect_quic_listener(const SSL *s, QCTX *ctx)
+{
+ return expect_quic_as(s, ctx, QCTX_L);
+}
+
+static int expect_quic_domain(const SSL *s, QCTX *ctx)
+{
+ return expect_quic_as(s, ctx, QCTX_D);
+}
+
+/*
+ * Like expect_quic_cs(), but requires a QUIC_XSO be contextually available. In
+ * other words, requires that the passed QSO be a QSSO or a QCSO with a default
+ * stream.
+ *
+ * remote_init determines if we expect the default XSO to be remotely created or
+ * not. If it is -1, do not instantiate a default XSO if one does not yet exist.
+ *
+ * Channel mutex is acquired and retained on success.
+ */
+QUIC_ACQUIRES_LOCK
+static int ossl_unused expect_quic_with_stream_lock(const SSL *s, int remote_init,
+ int in_io, QCTX *ctx)
+{
+ uint32_t flags = QCTX_S | QCTX_LOCK;
+
+ if (remote_init >= 0)
+ flags |= QCTX_AUTO_S;
+
+ if (remote_init > 0)
+ flags |= QCTX_REMOTE_INIT;
+
+ if (in_io)
+ flags |= QCTX_IO;
+
+ return expect_quic_as(s, ctx, flags);
+}
+
+/*
+ * Like expect_quic_cs(), but fails if called on a QUIC_XSO. ctx->xso may still
+ * be non-NULL if the QCSO has a default stream.
+ */
+static int ossl_unused expect_quic_conn_only(const SSL *s, QCTX *ctx)
+{
+ return expect_quic_as(s, ctx, QCTX_C);
+}
+
+/*
+ * Ensures that the domain mutex is held for a method which touches channel
+ * state.
+ *
+ * Precondition: Domain mutex is not held (unchecked)
+ */
+static void qctx_lock(QCTX *ctx)
+{
+#if defined(OPENSSL_THREADS)
+ assert(ctx->obj != NULL);
+ ossl_crypto_mutex_lock(ossl_quic_obj_get0_mutex(ctx->obj));
+#endif
+}
+
+/* Precondition: Channel mutex is held (unchecked) */
+QUIC_NEEDS_LOCK
+static void qctx_unlock(QCTX *ctx)
+{
+#if defined(OPENSSL_THREADS)
+ assert(ctx->obj != NULL);
+ ossl_crypto_mutex_unlock(ossl_quic_obj_get0_mutex(ctx->obj));
+#endif
+}
+
+static void qctx_lock_for_io(QCTX *ctx)
+{
+ qctx_lock(ctx);
+ ctx->in_io = 1;
+
+ /*
+ * We are entering an I/O function so we must update the values returned by
+ * SSL_get_error and SSL_want. Set no error. This will be overridden later
+ * if a call to QUIC_RAISE_NORMAL_ERROR or QUIC_RAISE_NON_NORMAL_ERROR
+ * occurs during the API call.
+ */
+ quic_set_last_error(ctx, SSL_ERROR_NONE);
+}
+
+/*
+ * This predicate is the criterion which should determine API call rejection for
+ * *most* mutating API calls, particularly stream-related operations for send
+ * parts.
+ *
+ * A call is rejected (this function returns 0) if shutdown is in progress
+ * (stream flushing), or we are in a TERMINATING or TERMINATED state. If
+ * req_active=1, the connection must be active (i.e., the IDLE state is also
+ * rejected).
+ */
+static int quic_mutation_allowed(QUIC_CONNECTION *qc, int req_active)
+{
+ if (qc->shutting_down || ossl_quic_channel_is_term_any(qc->ch))
+ return 0;
+
+ if (req_active && !ossl_quic_channel_is_active(qc->ch))
+ return 0;
+
+ return 1;
+}
+
+static int qctx_is_top_level(QCTX *ctx)
+{
+ return ctx->obj->parent_obj == NULL;
+}
+
+static int qctx_blocking(QCTX *ctx)
+{
+ return ossl_quic_obj_blocking(ctx->obj);
+}
+
+/*
+ * Block until a predicate is met.
+ *
+ * Precondition: Must have a channel.
+ * Precondition: Must hold channel lock (unchecked).
+ */
+QUIC_NEEDS_LOCK
+static int block_until_pred(QCTX *ctx,
+ int (*pred)(void *arg), void *pred_arg,
+ uint32_t flags)
+{
+ QUIC_ENGINE *qeng;
+ QUIC_REACTOR *rtor;
+
+ qeng = ossl_quic_obj_get0_engine(ctx->obj);
+ assert(qeng != NULL);
+
+ /*
+ * Any attempt to block auto-disables tick inhibition as otherwise we will
+ * hang around forever.
+ */
+ ossl_quic_engine_set_inhibit_tick(qeng, 0);
+
+ rtor = ossl_quic_engine_get0_reactor(qeng);
+ return ossl_quic_reactor_block_until_pred(rtor, pred, pred_arg, flags);
+}
+
+/*
+ * QUIC Front-End I/O API: Initialization
+ * ======================================
+ *
+ * SSL_new => ossl_quic_new
+ * ossl_quic_init
+ * SSL_reset => ossl_quic_reset
+ * SSL_clear => ossl_quic_clear
+ * ossl_quic_deinit
+ * SSL_free => ossl_quic_free
+ *
+ * SSL_set_options => ossl_quic_set_options
+ * SSL_get_options => ossl_quic_get_options
+ * SSL_clear_options => ossl_quic_clear_options
+ *
+ */
+
+/* SSL_new */
+SSL *ossl_quic_new(SSL_CTX *ctx)
+{
+ QUIC_CONNECTION *qc = NULL;
+ SSL_CONNECTION *sc = NULL;
+
+ /*
+ * QUIC_server_method should not be used with SSL_new.
+ * It should only be used with SSL_new_listener.
+ */
+ if (ctx->method == OSSL_QUIC_server_method()) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED, NULL);
+ return NULL;
+ }
+
+ qc = OPENSSL_zalloc(sizeof(*qc));
+ if (qc == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ return NULL;
+ }
+
+ /* Create the QUIC domain mutex. */
+#if defined(OPENSSL_THREADS)
+ if ((qc->mutex = ossl_crypto_mutex_new()) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+#endif
+
+ /* Create the handshake layer. */
+ qc->tls = ossl_ssl_connection_new_int(ctx, &qc->obj.ssl, TLS_method());
+ if (qc->tls == NULL || (sc = SSL_CONNECTION_FROM_SSL(qc->tls)) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ /* override the user_ssl of the inner connection */
+ sc->s3.flags |= TLS1_FLAGS_QUIC | TLS1_FLAGS_QUIC_INTERNAL;
+
+ /* Restrict options derived from the SSL_CTX. */
+ sc->options &= OSSL_QUIC_PERMITTED_OPTIONS_CONN;
+ sc->pha_enabled = 0;
+
+ /* Determine mode of operation. */
+#if !defined(OPENSSL_NO_QUIC_THREAD_ASSIST)
+ qc->is_thread_assisted
+ = ((ctx->domain_flags & SSL_DOMAIN_FLAG_THREAD_ASSISTED) != 0);
+#endif
+
+ qc->as_server = 0;
+ qc->as_server_state = qc->as_server;
+
+ if (!create_channel(qc, ctx))
+ goto err;
+
+ ossl_quic_channel_set_msg_callback(qc->ch, ctx->msg_callback, &qc->obj.ssl);
+ ossl_quic_channel_set_msg_callback_arg(qc->ch, ctx->msg_callback_arg);
+
+ /* Initialise the QUIC_CONNECTION's QUIC_OBJ base. */
+ if (!ossl_quic_obj_init(&qc->obj, ctx, SSL_TYPE_QUIC_CONNECTION, NULL,
+ qc->engine, qc->port)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ /* Initialise libssl APL-related state. */
+ qc->default_stream_mode = SSL_DEFAULT_STREAM_MODE_AUTO_BIDI;
+ qc->default_ssl_mode = qc->obj.ssl.ctx->mode;
+ qc->default_ssl_options = qc->obj.ssl.ctx->options & OSSL_QUIC_PERMITTED_OPTIONS;
+ qc->incoming_stream_policy = SSL_INCOMING_STREAM_POLICY_AUTO;
+ qc->last_error = SSL_ERROR_NONE;
+
+ qc_update_reject_policy(qc);
+
+ /*
+ * We do not create the default XSO yet. The reason for this is that the
+ * stream ID of the default XSO will depend on whether the stream is client
+ * or server-initiated, which depends on who transmits first. Since we do
+ * not know whether the application will be using a client-transmits-first
+ * or server-transmits-first protocol, we defer default XSO creation until
+ * the client calls SSL_read() or SSL_write(). If it calls SSL_read() first,
+ * we take that as a cue that the client is expecting a server-initiated
+ * stream, and vice versa if SSL_write() is called first.
+ */
+ return &qc->obj.ssl;
+
+err:
+ if (qc != NULL) {
+ qc_cleanup(qc, /*have_lock=*/0);
+ OPENSSL_free(qc);
+ }
+ return NULL;
+}
+
+QUIC_NEEDS_LOCK
+static void quic_unref_port_bios(QUIC_PORT *port)
+{
+ BIO *b;
+
+ b = ossl_quic_port_get_net_rbio(port);
+ BIO_free_all(b);
+
+ b = ossl_quic_port_get_net_wbio(port);
+ BIO_free_all(b);
+}
+
+QUIC_NEEDS_LOCK
+static void qc_cleanup(QUIC_CONNECTION *qc, int have_lock)
+{
+ SSL_free(qc->tls);
+ qc->tls = NULL;
+
+ ossl_quic_channel_free(qc->ch);
+ qc->ch = NULL;
+
+ if (qc->port != NULL && qc->listener == NULL && qc->pending == 0) { /* TODO */
+ quic_unref_port_bios(qc->port);
+ ossl_quic_port_free(qc->port);
+ qc->port = NULL;
+
+ ossl_quic_engine_free(qc->engine);
+ qc->engine = NULL;
+ }
+
+#if defined(OPENSSL_THREADS)
+ if (have_lock)
+ /* tsan doesn't like freeing locked mutexes */
+ ossl_crypto_mutex_unlock(qc->mutex);
+
+ if (qc->listener == NULL && qc->pending == 0)
+ ossl_crypto_mutex_free(&qc->mutex);
+#endif
+}
+
+/* SSL_free */
+QUIC_TAKES_LOCK
+static void quic_free_listener(QCTX *ctx)
+{
+ quic_unref_port_bios(ctx->ql->port);
+ ossl_quic_port_drop_incoming(ctx->ql->port);
+ ossl_quic_port_free(ctx->ql->port);
+
+ if (ctx->ql->domain == NULL) {
+ ossl_quic_engine_free(ctx->ql->engine);
+#if defined(OPENSSL_THREADS)
+ ossl_crypto_mutex_free(&ctx->ql->mutex);
+#endif
+ } else {
+ SSL_free(&ctx->ql->domain->obj.ssl);
+ }
+}
+
+/* SSL_free */
+QUIC_TAKES_LOCK
+static void quic_free_domain(QCTX *ctx)
+{
+ ossl_quic_engine_free(ctx->qd->engine);
+#if defined(OPENSSL_THREADS)
+ ossl_crypto_mutex_free(&ctx->qd->mutex);
+#endif
+}
+
+QUIC_TAKES_LOCK
+void ossl_quic_free(SSL *s)
+{
+ QCTX ctx;
+ int is_default;
+
+ /* We should never be called on anything but a QSO. */
+ if (!expect_quic_any(s, &ctx))
+ return;
+
+ if (ctx.is_domain) {
+ quic_free_domain(&ctx);
+ return;
+ }
+
+ if (ctx.is_listener) {
+ quic_free_listener(&ctx);
+ return;
+ }
+
+ qctx_lock(&ctx);
+
+ if (ctx.is_stream) {
+ /*
+ * When a QSSO is freed, the XSO is freed immediately, because the XSO
+ * itself only contains API personality layer data. However the
+ * underlying QUIC_STREAM is not freed immediately but is instead marked
+ * as deleted for later collection.
+ */
+
+ assert(ctx.qc->num_xso > 0);
+ --ctx.qc->num_xso;
+
+ /* If a stream's send part has not been finished, auto-reset it. */
+ if (( ctx.xso->stream->send_state == QUIC_SSTREAM_STATE_READY
+ || ctx.xso->stream->send_state == QUIC_SSTREAM_STATE_SEND)
+ && !ossl_quic_sstream_get_final_size(ctx.xso->stream->sstream, NULL))
+ ossl_quic_stream_map_reset_stream_send_part(ossl_quic_channel_get_qsm(ctx.qc->ch),
+ ctx.xso->stream, 0);
+
+ /* Do STOP_SENDING for the receive part, if applicable. */
+ if ( ctx.xso->stream->recv_state == QUIC_RSTREAM_STATE_RECV
+ || ctx.xso->stream->recv_state == QUIC_RSTREAM_STATE_SIZE_KNOWN)
+ ossl_quic_stream_map_stop_sending_recv_part(ossl_quic_channel_get_qsm(ctx.qc->ch),
+ ctx.xso->stream, 0);
+
+ /* Update stream state. */
+ ctx.xso->stream->deleted = 1;
+ ossl_quic_stream_map_update_state(ossl_quic_channel_get_qsm(ctx.qc->ch),
+ ctx.xso->stream);
+
+ is_default = (ctx.xso == ctx.qc->default_xso);
+ qctx_unlock(&ctx);
+
+ /*
+ * Unref the connection in most cases; the XSO has a ref to the QC and
+ * not vice versa. But for a default XSO, to avoid circular references,
+ * the QC refs the XSO but the XSO does not ref the QC. If we are the
+ * default XSO, we only get here when the QC is being torn down anyway,
+ * so don't call SSL_free(qc) as we are already in it.
+ */
+ if (!is_default)
+ SSL_free(&ctx.qc->obj.ssl);
+
+ /* Note: SSL_free calls OPENSSL_free(xso) for us */
+ return;
+ }
+
+ /*
+ * Free the default XSO, if any. The QUIC_STREAM is not deleted at this
+ * stage, but is freed during the channel free when the whole QSM is freed.
+ */
+ if (ctx.qc->default_xso != NULL) {
+ QUIC_XSO *xso = ctx.qc->default_xso;
+
+ qctx_unlock(&ctx);
+ SSL_free(&xso->obj.ssl);
+ qctx_lock(&ctx);
+ ctx.qc->default_xso = NULL;
+ }
+
+ /* Ensure we have no remaining XSOs. */
+ assert(ctx.qc->num_xso == 0);
+
+#if !defined(OPENSSL_NO_QUIC_THREAD_ASSIST)
+ if (ctx.qc->is_thread_assisted && ctx.qc->started) {
+ ossl_quic_thread_assist_wait_stopped(&ctx.qc->thread_assist);
+ ossl_quic_thread_assist_cleanup(&ctx.qc->thread_assist);
+ }
+#endif
+
+ /*
+ * Note: SSL_free (that called this function) calls OPENSSL_free(ctx.qc) for
+ * us
+ */
+ qc_cleanup(ctx.qc, /*have_lock=*/1);
+ /* Note: SSL_free calls OPENSSL_free(qc) for us */
+
+ if (ctx.qc->listener != NULL)
+ SSL_free(&ctx.qc->listener->obj.ssl);
+ if (ctx.qc->domain != NULL)
+ SSL_free(&ctx.qc->domain->obj.ssl);
+}
+
+/* SSL method init */
+int ossl_quic_init(SSL *s)
+{
+ /* Same op as SSL_clear, forward the call. */
+ return ossl_quic_clear(s);
+}
+
+/* SSL method deinit */
+void ossl_quic_deinit(SSL *s)
+{
+ /* No-op. */
+}
+
+/* SSL_clear (ssl_reset method) */
+int ossl_quic_reset(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_any(s, &ctx))
+ return 0;
+
+ ERR_raise(ERR_LIB_SSL, ERR_R_UNSUPPORTED);
+ return 0;
+}
+
+/* ssl_clear method (unused) */
+int ossl_quic_clear(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_any(s, &ctx))
+ return 0;
+
+ ERR_raise(ERR_LIB_SSL, ERR_R_UNSUPPORTED);
+ return 0;
+}
+
+int ossl_quic_set_override_now_cb(SSL *s,
+ OSSL_TIME (*now_cb)(void *arg),
+ void *now_cb_arg)
+{
+ QCTX ctx;
+
+ if (!expect_quic_any(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ ossl_quic_engine_set_time_cb(ctx.obj->engine, now_cb, now_cb_arg);
+
+ qctx_unlock(&ctx);
+ return 1;
+}
+
+void ossl_quic_conn_force_assist_thread_wake(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return;
+
+#if !defined(OPENSSL_NO_QUIC_THREAD_ASSIST)
+ if (ctx.qc->is_thread_assisted && ctx.qc->started)
+ ossl_quic_thread_assist_notify_deadline_changed(&ctx.qc->thread_assist);
+#endif
+}
+
+QUIC_NEEDS_LOCK
+static void qc_touch_default_xso(QUIC_CONNECTION *qc)
+{
+ qc->default_xso_created = 1;
+ qc_update_reject_policy(qc);
+}
+
+/*
+ * Changes default XSO. Allows caller to keep reference to the old default XSO
+ * (if any). Reference to new XSO is transferred from caller.
+ */
+QUIC_NEEDS_LOCK
+static void qc_set_default_xso_keep_ref(QUIC_CONNECTION *qc, QUIC_XSO *xso,
+ int touch,
+ QUIC_XSO **old_xso)
+{
+ int refs;
+
+ *old_xso = NULL;
+
+ if (qc->default_xso != xso) {
+ *old_xso = qc->default_xso; /* transfer old XSO ref to caller */
+
+ qc->default_xso = xso;
+
+ if (xso == NULL) {
+ /*
+ * Changing to not having a default XSO. XSO becomes standalone and
+ * now has a ref to the QC.
+ */
+ if (!ossl_assert(SSL_up_ref(&qc->obj.ssl)))
+ return;
+ } else {
+ /*
+ * Changing from not having a default XSO to having one. The new XSO
+ * will have had a reference to the QC we need to drop to avoid a
+ * circular reference.
+ *
+ * Currently we never change directly from one default XSO to
+ * another, though this function would also still be correct if this
+ * weren't the case.
+ */
+ assert(*old_xso == NULL);
+
+ CRYPTO_DOWN_REF(&qc->obj.ssl.references, &refs);
+ assert(refs > 0);
+ }
+ }
+
+ if (touch)
+ qc_touch_default_xso(qc);
+}
+
+/*
+ * Changes default XSO, releasing the reference to any previous default XSO.
+ * Reference to new XSO is transferred from caller.
+ */
+QUIC_NEEDS_LOCK
+static void qc_set_default_xso(QUIC_CONNECTION *qc, QUIC_XSO *xso, int touch)
+{
+ QUIC_XSO *old_xso = NULL;
+
+ qc_set_default_xso_keep_ref(qc, xso, touch, &old_xso);
+
+ if (old_xso != NULL)
+ SSL_free(&old_xso->obj.ssl);
+}
+
+QUIC_NEEDS_LOCK
+static void xso_update_options(QUIC_XSO *xso)
+{
+ int cleanse = ((xso->ssl_options & SSL_OP_CLEANSE_PLAINTEXT) != 0);
+
+ if (xso->stream->rstream != NULL)
+ ossl_quic_rstream_set_cleanse(xso->stream->rstream, cleanse);
+
+ if (xso->stream->sstream != NULL)
+ ossl_quic_sstream_set_cleanse(xso->stream->sstream, cleanse);
+}
+
+/*
+ * SSL_set_options
+ * ---------------
+ *
+ * Setting options on a QCSO
+ * - configures the handshake-layer options;
+ * - configures the default data-plane options for new streams;
+ * - configures the data-plane options on the default XSO, if there is one.
+ *
+ * Setting options on a QSSO
+ * - configures data-plane options for that stream only.
+ */
+QUIC_TAKES_LOCK
+static uint64_t quic_mask_or_options(SSL *ssl, uint64_t mask_value, uint64_t or_value)
+{
+ QCTX ctx;
+ uint64_t hs_mask_value, hs_or_value, ret;
+
+ if (!expect_quic_cs(ssl, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ if (!ctx.is_stream) {
+ /*
+ * If we were called on the connection, we apply any handshake option
+ * changes.
+ */
+ hs_mask_value = (mask_value & OSSL_QUIC_PERMITTED_OPTIONS_CONN);
+ hs_or_value = (or_value & OSSL_QUIC_PERMITTED_OPTIONS_CONN);
+
+ SSL_clear_options(ctx.qc->tls, hs_mask_value);
+ SSL_set_options(ctx.qc->tls, hs_or_value);
+
+ /* Update defaults for new streams. */
+ ctx.qc->default_ssl_options
+ = ((ctx.qc->default_ssl_options & ~mask_value) | or_value)
+ & OSSL_QUIC_PERMITTED_OPTIONS;
+ }
+
+ ret = ctx.qc->default_ssl_options;
+ if (ctx.xso != NULL) {
+ ctx.xso->ssl_options
+ = ((ctx.xso->ssl_options & ~mask_value) | or_value)
+ & OSSL_QUIC_PERMITTED_OPTIONS_STREAM;
+
+ xso_update_options(ctx.xso);
+
+ if (ctx.is_stream)
+ ret = ctx.xso->ssl_options;
+ }
+
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+uint64_t ossl_quic_set_options(SSL *ssl, uint64_t options)
+{
+ return quic_mask_or_options(ssl, 0, options);
+}
+
+/* SSL_clear_options */
+uint64_t ossl_quic_clear_options(SSL *ssl, uint64_t options)
+{
+ return quic_mask_or_options(ssl, options, 0);
+}
+
+/* SSL_get_options */
+uint64_t ossl_quic_get_options(const SSL *ssl)
+{
+ return quic_mask_or_options((SSL *)ssl, 0, 0);
+}
+
+/*
+ * QUIC Front-End I/O API: Network BIO Configuration
+ * =================================================
+ *
+ * Handling the different BIOs is difficult:
+ *
+ * - It is more or less a requirement that we use non-blocking network I/O;
+ * we need to be able to have timeouts on recv() calls, and make best effort
+ * (non blocking) send() and recv() calls.
+ *
+ * The only sensible way to do this is to configure the socket into
+ * non-blocking mode. We could try to do select() before calling send() or
+ * recv() to get a guarantee that the call will not block, but this will
+ * probably run into issues with buggy OSes which generate spurious socket
+ * readiness events. In any case, relying on this to work reliably does not
+ * seem sane.
+ *
+ * Timeouts could be handled via setsockopt() socket timeout options, but
+ * this depends on OS support and adds another syscall to every network I/O
+ * operation. It also has obvious thread safety concerns if we want to move
+ * to concurrent use of a single socket at some later date.
+ *
+ * Some OSes support a MSG_DONTWAIT flag which allows a single I/O option to
+ * be made non-blocking. However some OSes (e.g. Windows) do not support
+ * this, so we cannot rely on this.
+ *
+ * As such, we need to configure any FD in non-blocking mode. This may
+ * confound users who pass a blocking socket to libssl. However, in practice
+ * it would be extremely strange for a user of QUIC to pass an FD to us,
+ * then also try and send receive traffic on the same socket(!). Thus the
+ * impact of this should be limited, and can be documented.
+ *
+ * - We support both blocking and non-blocking operation in terms of the API
+ * presented to the user. One prospect is to set the blocking mode based on
+ * whether the socket passed to us was already in blocking mode. However,
+ * Windows has no API for determining if a socket is in blocking mode (!),
+ * therefore this cannot be done portably. Currently therefore we expose an
+ * explicit API call to set this, and default to blocking mode.
+ *
+ * - We need to determine our initial destination UDP address. The "natural"
+ * way for a user to do this is to set the peer variable on a BIO_dgram.
+ * However, this has problems because BIO_dgram's peer variable is used for
+ * both transmission and reception. This means it can be constantly being
+ * changed to a malicious value (e.g. if some random unrelated entity on the
+ * network starts sending traffic to us) on every read call. This is not a
+ * direct issue because we use the 'stateless' BIO_sendmmsg and BIO_recvmmsg
+ * calls only, which do not use this variable. However, we do need to let
+ * the user specify the peer in a 'normal' manner. The compromise here is
+ * that we grab the current peer value set at the time the write BIO is set
+ * and do not read the value again.
+ *
+ * - We also need to support memory BIOs (e.g. BIO_dgram_pair) or custom BIOs.
+ * Currently we do this by only supporting non-blocking mode.
+ *
+ */
+
+/*
+ * Determines what initial destination UDP address we should use, if possible.
+ * If this fails the client must set the destination address manually, or use a
+ * BIO which does not need a destination address.
+ */
+static int csm_analyse_init_peer_addr(BIO *net_wbio, BIO_ADDR *peer)
+{
+ if (BIO_dgram_detect_peer_addr(net_wbio, peer) <= 0)
+ return 0;
+
+ return 1;
+}
+
+static int
+quic_set0_net_rbio(QUIC_OBJ *obj, BIO *net_rbio)
+{
+ QUIC_PORT *port;
+ BIO *old_rbio = NULL;
+
+ port = ossl_quic_obj_get0_port(obj);
+ old_rbio = ossl_quic_port_get_net_rbio(port);
+ if (old_rbio == net_rbio)
+ return 0;
+
+ if (!ossl_quic_port_set_net_rbio(port, net_rbio))
+ return 0;
+
+ BIO_free_all(old_rbio);
+ if (net_rbio != NULL)
+ BIO_set_nbio(net_rbio, 1); /* best effort autoconfig */
+
+ return 1;
+}
+
+static int
+quic_set0_net_wbio(QUIC_OBJ *obj, BIO *net_wbio)
+{
+ QUIC_PORT *port;
+ BIO *old_wbio = NULL;
+
+ port = ossl_quic_obj_get0_port(obj);
+ old_wbio = ossl_quic_port_get_net_wbio(port);
+ if (old_wbio == net_wbio)
+ return 0;
+
+ if (!ossl_quic_port_set_net_wbio(port, net_wbio))
+ return 0;
+
+ BIO_free_all(old_wbio);
+ if (net_wbio != NULL)
+ BIO_set_nbio(net_wbio, 1); /* best effort autoconfig */
+
+ return 1;
+}
+
+void ossl_quic_conn_set0_net_rbio(SSL *s, BIO *net_rbio)
+{
+ QCTX ctx;
+
+ if (!expect_quic_csl(s, &ctx))
+ return;
+
+ /* Returns 0 if no change. */
+ if (!quic_set0_net_rbio(ctx.obj, net_rbio))
+ return;
+}
+
+void ossl_quic_conn_set0_net_wbio(SSL *s, BIO *net_wbio)
+{
+ QCTX ctx;
+
+ if (!expect_quic_csl(s, &ctx))
+ return;
+
+ /* Returns 0 if no change. */
+ if (!quic_set0_net_wbio(ctx.obj, net_wbio))
+ return;
+}
+
+BIO *ossl_quic_conn_get_net_rbio(const SSL *s)
+{
+ QCTX ctx;
+ QUIC_PORT *port;
+
+ if (!expect_quic_csl(s, &ctx))
+ return NULL;
+
+ port = ossl_quic_obj_get0_port(ctx.obj);
+ assert(port != NULL);
+ return ossl_quic_port_get_net_rbio(port);
+}
+
+BIO *ossl_quic_conn_get_net_wbio(const SSL *s)
+{
+ QCTX ctx;
+ QUIC_PORT *port;
+
+ if (!expect_quic_csl(s, &ctx))
+ return NULL;
+
+ port = ossl_quic_obj_get0_port(ctx.obj);
+ assert(port != NULL);
+ return ossl_quic_port_get_net_wbio(port);
+}
+
+int ossl_quic_conn_get_blocking_mode(const SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ return qctx_blocking(&ctx);
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_conn_set_blocking_mode(SSL *s, int blocking)
+{
+ int ret = 0;
+ unsigned int mode;
+ QCTX ctx;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ /* Sanity check - can we support the request given the current network BIO? */
+ if (blocking) {
+ /*
+ * If called directly on a top-level object (QCSO or QLSO), update our
+ * information on network BIO capabilities.
+ */
+ if (qctx_is_top_level(&ctx))
+ ossl_quic_engine_update_poll_descriptors(ctx.obj->engine, /*force=*/1);
+
+ /* Cannot enable blocking mode if we do not have pollable FDs. */
+ if (!ossl_quic_obj_can_support_blocking(ctx.obj)) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_UNSUPPORTED, NULL);
+ goto out;
+ }
+ }
+
+ mode = (blocking != 0)
+ ? QUIC_BLOCKING_MODE_BLOCKING
+ : QUIC_BLOCKING_MODE_NONBLOCKING;
+
+ ossl_quic_obj_set_blocking_mode(ctx.obj, mode);
+
+ ret = 1;
+out:
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+int ossl_quic_conn_set_initial_peer_addr(SSL *s,
+ const BIO_ADDR *peer_addr)
+{
+ QCTX ctx;
+
+ if (!expect_quic_cs(s, &ctx))
+ return 0;
+
+ if (ctx.qc->started)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED,
+ NULL);
+
+ if (peer_addr == NULL) {
+ BIO_ADDR_clear(&ctx.qc->init_peer_addr);
+ return 1;
+ }
+
+ return BIO_ADDR_copy(&ctx.qc->init_peer_addr, peer_addr);
+}
+
+/*
+ * QUIC Front-End I/O API: Asynchronous I/O Management
+ * ===================================================
+ *
+ * (BIO/)SSL_handle_events => ossl_quic_handle_events
+ * (BIO/)SSL_get_event_timeout => ossl_quic_get_event_timeout
+ * (BIO/)SSL_get_poll_fd => ossl_quic_get_poll_fd
+ *
+ */
+
+/* SSL_handle_events; performs QUIC I/O and timeout processing. */
+QUIC_TAKES_LOCK
+int ossl_quic_handle_events(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_any(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+ ossl_quic_reactor_tick(ossl_quic_obj_get0_reactor(ctx.obj), 0);
+ qctx_unlock(&ctx);
+ return 1;
+}
+
+/*
+ * SSL_get_event_timeout. Get the time in milliseconds until the SSL object
+ * should next have events handled by the application by calling
+ * SSL_handle_events(). tv is set to 0 if the object should have events handled
+ * immediately. If no timeout is currently active, *is_infinite is set to 1 and
+ * the value of *tv is undefined.
+ */
+QUIC_TAKES_LOCK
+int ossl_quic_get_event_timeout(SSL *s, struct timeval *tv, int *is_infinite)
+{
+ QCTX ctx;
+ QUIC_REACTOR *reactor;
+ OSSL_TIME deadline;
+ OSSL_TIME basetime;
+
+ if (!expect_quic_any(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ reactor = ossl_quic_obj_get0_reactor(ctx.obj);
+ deadline = ossl_quic_reactor_get_tick_deadline(reactor);
+
+ if (ossl_time_is_infinite(deadline)) {
+ qctx_unlock(&ctx);
+ *is_infinite = 1;
+
+ /*
+ * Robustness against faulty applications that don't check *is_infinite;
+ * harmless long timeout.
+ */
+ tv->tv_sec = 1000000;
+ tv->tv_usec = 0;
+ return 1;
+ }
+
+ basetime = ossl_quic_engine_get_time(ctx.obj->engine);
+
+ qctx_unlock(&ctx);
+
+ *tv = ossl_time_to_timeval(ossl_time_subtract(deadline, basetime));
+ *is_infinite = 0;
+
+ return 1;
+}
+
+/* SSL_get_rpoll_descriptor */
+int ossl_quic_get_rpoll_descriptor(SSL *s, BIO_POLL_DESCRIPTOR *desc)
+{
+ QCTX ctx;
+ QUIC_PORT *port = NULL;
+ BIO *net_rbio;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ port = ossl_quic_obj_get0_port(ctx.obj);
+ net_rbio = ossl_quic_port_get_net_rbio(port);
+ if (desc == NULL || net_rbio == NULL)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_PASSED_INVALID_ARGUMENT,
+ NULL);
+
+ return BIO_get_rpoll_descriptor(net_rbio, desc);
+}
+
+/* SSL_get_wpoll_descriptor */
+int ossl_quic_get_wpoll_descriptor(SSL *s, BIO_POLL_DESCRIPTOR *desc)
+{
+ QCTX ctx;
+ QUIC_PORT *port = NULL;
+ BIO *net_wbio;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ port = ossl_quic_obj_get0_port(ctx.obj);
+ net_wbio = ossl_quic_port_get_net_wbio(port);
+ if (desc == NULL || net_wbio == NULL)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_PASSED_INVALID_ARGUMENT,
+ NULL);
+
+ return BIO_get_wpoll_descriptor(net_wbio, desc);
+}
+
+/* SSL_net_read_desired */
+QUIC_TAKES_LOCK
+int ossl_quic_get_net_read_desired(SSL *s)
+{
+ QCTX ctx;
+ int ret;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+ ret = ossl_quic_reactor_net_read_desired(ossl_quic_obj_get0_reactor(ctx.obj));
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/* SSL_net_write_desired */
+QUIC_TAKES_LOCK
+int ossl_quic_get_net_write_desired(SSL *s)
+{
+ int ret;
+ QCTX ctx;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+ ret = ossl_quic_reactor_net_write_desired(ossl_quic_obj_get0_reactor(ctx.obj));
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/*
+ * QUIC Front-End I/O API: Connection Lifecycle Operations
+ * =======================================================
+ *
+ * SSL_do_handshake => ossl_quic_do_handshake
+ * SSL_set_connect_state => ossl_quic_set_connect_state
+ * SSL_set_accept_state => ossl_quic_set_accept_state
+ * SSL_shutdown => ossl_quic_shutdown
+ * SSL_ctrl => ossl_quic_ctrl
+ * (BIO/)SSL_connect => ossl_quic_connect
+ * (BIO/)SSL_accept => ossl_quic_accept
+ *
+ */
+
+QUIC_NEEDS_LOCK
+static void qc_shutdown_flush_init(QUIC_CONNECTION *qc)
+{
+ QUIC_STREAM_MAP *qsm;
+
+ if (qc->shutting_down)
+ return;
+
+ qsm = ossl_quic_channel_get_qsm(qc->ch);
+
+ ossl_quic_stream_map_begin_shutdown_flush(qsm);
+ qc->shutting_down = 1;
+}
+
+/* Returns 1 if all shutdown-flush streams have been done with. */
+QUIC_NEEDS_LOCK
+static int qc_shutdown_flush_finished(QUIC_CONNECTION *qc)
+{
+ QUIC_STREAM_MAP *qsm = ossl_quic_channel_get_qsm(qc->ch);
+
+ return qc->shutting_down
+ && ossl_quic_stream_map_is_shutdown_flush_finished(qsm);
+}
+
+/* SSL_shutdown */
+static int quic_shutdown_wait(void *arg)
+{
+ QUIC_CONNECTION *qc = arg;
+
+ return ossl_quic_channel_is_terminated(qc->ch);
+}
+
+/* Returns 1 if shutdown flush process has finished or is inapplicable. */
+static int quic_shutdown_flush_wait(void *arg)
+{
+ QUIC_CONNECTION *qc = arg;
+
+ return ossl_quic_channel_is_term_any(qc->ch)
+ || qc_shutdown_flush_finished(qc);
+}
+
+static int quic_shutdown_peer_wait(void *arg)
+{
+ QUIC_CONNECTION *qc = arg;
+ return ossl_quic_channel_is_term_any(qc->ch);
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_conn_shutdown(SSL *s, uint64_t flags,
+ const SSL_SHUTDOWN_EX_ARGS *args,
+ size_t args_len)
+{
+ int ret;
+ QCTX ctx;
+ int stream_flush = ((flags & SSL_SHUTDOWN_FLAG_NO_STREAM_FLUSH) == 0);
+ int no_block = ((flags & SSL_SHUTDOWN_FLAG_NO_BLOCK) != 0);
+ int wait_peer = ((flags & SSL_SHUTDOWN_FLAG_WAIT_PEER) != 0);
+
+ if (!expect_quic_cs(s, &ctx))
+ return -1;
+
+ if (ctx.is_stream) {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, SSL_R_CONN_USE_ONLY, NULL);
+ return -1;
+ }
+
+ qctx_lock(&ctx);
+
+ if (ossl_quic_channel_is_terminated(ctx.qc->ch)) {
+ qctx_unlock(&ctx);
+ return 1;
+ }
+
+ /* Phase 1: Stream Flushing */
+ if (!wait_peer && stream_flush) {
+ qc_shutdown_flush_init(ctx.qc);
+
+ if (!qc_shutdown_flush_finished(ctx.qc)) {
+ if (!no_block && qctx_blocking(&ctx)) {
+ ret = block_until_pred(&ctx, quic_shutdown_flush_wait, ctx.qc, 0);
+ if (ret < 1) {
+ ret = 0;
+ goto err;
+ }
+ } else {
+ qctx_maybe_autotick(&ctx);
+ }
+ }
+
+ if (!qc_shutdown_flush_finished(ctx.qc)) {
+ qctx_unlock(&ctx);
+ return 0; /* ongoing */
+ }
+ }
+
+ /* Phase 2: Connection Closure */
+ if (wait_peer && !ossl_quic_channel_is_term_any(ctx.qc->ch)) {
+ if (!no_block && qctx_blocking(&ctx)) {
+ ret = block_until_pred(&ctx, quic_shutdown_peer_wait, ctx.qc, 0);
+ if (ret < 1) {
+ ret = 0;
+ goto err;
+ }
+ } else {
+ qctx_maybe_autotick(&ctx);
+ }
+
+ if (!ossl_quic_channel_is_term_any(ctx.qc->ch)) {
+ ret = 0; /* peer hasn't closed yet - still not done */
+ goto err;
+ }
+
+ /*
+ * We are at least terminating - go through the normal process of
+ * waiting until we are in the TERMINATED state.
+ */
+ }
+
+ /* Block mutation ops regardless of if we did stream flush. */
+ ctx.qc->shutting_down = 1;
+
+ /*
+ * This call is a no-op if we are already terminating, so it doesn't
+ * affect the wait_peer case.
+ */
+ ossl_quic_channel_local_close(ctx.qc->ch,
+ args != NULL ? args->quic_error_code : 0,
+ args != NULL ? args->quic_reason : NULL);
+
+ SSL_set_shutdown(ctx.qc->tls, SSL_SENT_SHUTDOWN);
+
+ if (ossl_quic_channel_is_terminated(ctx.qc->ch)) {
+ qctx_unlock(&ctx);
+ return 1;
+ }
+
+ /* Phase 3: Terminating Wait Time */
+ if (!no_block && qctx_blocking(&ctx)
+ && (flags & SSL_SHUTDOWN_FLAG_RAPID) == 0) {
+ ret = block_until_pred(&ctx, quic_shutdown_wait, ctx.qc, 0);
+ if (ret < 1) {
+ ret = 0;
+ goto err;
+ }
+ } else {
+ qctx_maybe_autotick(&ctx);
+ }
+
+ ret = ossl_quic_channel_is_terminated(ctx.qc->ch);
+err:
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/* SSL_ctrl */
+long ossl_quic_ctrl(SSL *s, int cmd, long larg, void *parg)
+{
+ QCTX ctx;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ switch (cmd) {
+ case SSL_CTRL_MODE:
+ if (ctx.is_listener)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_UNSUPPORTED, NULL);
+
+ /* If called on a QCSO, update the default mode. */
+ if (!ctx.is_stream)
+ ctx.qc->default_ssl_mode |= (uint32_t)larg;
+
+ /*
+ * If we were called on a QSSO or have a default stream, we also update
+ * that.
+ */
+ if (ctx.xso != NULL) {
+ /* Cannot enable EPW while AON write in progress. */
+ if (ctx.xso->aon_write_in_progress)
+ larg &= ~SSL_MODE_ENABLE_PARTIAL_WRITE;
+
+ ctx.xso->ssl_mode |= (uint32_t)larg;
+ return ctx.xso->ssl_mode;
+ }
+
+ return ctx.qc->default_ssl_mode;
+ case SSL_CTRL_CLEAR_MODE:
+ if (ctx.is_listener)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_UNSUPPORTED, NULL);
+
+ if (!ctx.is_stream)
+ ctx.qc->default_ssl_mode &= ~(uint32_t)larg;
+
+ if (ctx.xso != NULL) {
+ ctx.xso->ssl_mode &= ~(uint32_t)larg;
+ return ctx.xso->ssl_mode;
+ }
+
+ return ctx.qc->default_ssl_mode;
+
+ case SSL_CTRL_SET_MSG_CALLBACK_ARG:
+ if (ctx.is_listener)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_UNSUPPORTED, NULL);
+
+ ossl_quic_channel_set_msg_callback_arg(ctx.qc->ch, parg);
+ /* This ctrl also needs to be passed to the internal SSL object */
+ return SSL_ctrl(ctx.qc->tls, cmd, larg, parg);
+
+ case DTLS_CTRL_GET_TIMEOUT: /* DTLSv1_get_timeout */
+ {
+ int is_infinite;
+
+ if (!ossl_quic_get_event_timeout(s, parg, &is_infinite))
+ return 0;
+
+ return !is_infinite;
+ }
+ case DTLS_CTRL_HANDLE_TIMEOUT: /* DTLSv1_handle_timeout */
+ /* For legacy compatibility with DTLS calls. */
+ return ossl_quic_handle_events(s) == 1 ? 1 : -1;
+
+ /* Mask ctrls we shouldn't support for QUIC. */
+ case SSL_CTRL_GET_READ_AHEAD:
+ case SSL_CTRL_SET_READ_AHEAD:
+ case SSL_CTRL_SET_MAX_SEND_FRAGMENT:
+ case SSL_CTRL_SET_SPLIT_SEND_FRAGMENT:
+ case SSL_CTRL_SET_MAX_PIPELINES:
+ return 0;
+
+ default:
+ /*
+ * Probably a TLS related ctrl. Send back to the frontend SSL_ctrl
+ * implementation. Either SSL_ctrl will handle it itself by direct
+ * access into handshake layer state, or failing that, it will be passed
+ * to the handshake layer via the SSL_METHOD vtable. If the ctrl is not
+ * supported by anything, the handshake layer's ctrl method will finally
+ * return 0.
+ */
+ if (ctx.is_listener)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_UNSUPPORTED, NULL);
+
+ return ossl_ctrl_internal(&ctx.qc->obj.ssl, cmd, larg, parg, /*no_quic=*/1);
+ }
+}
+
+/* SSL_set_connect_state */
+int ossl_quic_set_connect_state(SSL *s, int raiseerrs)
+{
+ QCTX ctx;
+
+ if (!is_quic_c(s, &ctx, raiseerrs))
+ return 0;
+
+ if (ctx.qc->as_server_state == 0)
+ return 1;
+
+ /* Cannot be changed after handshake started */
+ if (ctx.qc->started) {
+ if (raiseerrs)
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, SSL_R_INVALID_COMMAND, NULL);
+ return 0;
+ }
+
+ ctx.qc->as_server_state = 0;
+ return 1;
+}
+
+/* SSL_set_accept_state */
+int ossl_quic_set_accept_state(SSL *s, int raiseerrs)
+{
+ QCTX ctx;
+
+ if (!is_quic_c(s, &ctx, raiseerrs))
+ return 0;
+
+ if (ctx.qc->as_server_state == 1)
+ return 1;
+
+ /* Cannot be changed after handshake started */
+ if (ctx.qc->started) {
+ if (raiseerrs)
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, SSL_R_INVALID_COMMAND, NULL);
+ return 0;
+ }
+
+ ctx.qc->as_server_state = 1;
+ return 1;
+}
+
+/* SSL_do_handshake */
+struct quic_handshake_wait_args {
+ QUIC_CONNECTION *qc;
+};
+
+static int tls_wants_non_io_retry(QUIC_CONNECTION *qc)
+{
+ int want = SSL_want(qc->tls);
+
+ if (want == SSL_X509_LOOKUP
+ || want == SSL_CLIENT_HELLO_CB
+ || want == SSL_RETRY_VERIFY)
+ return 1;
+
+ return 0;
+}
+
+static int quic_handshake_wait(void *arg)
+{
+ struct quic_handshake_wait_args *args = arg;
+
+ if (!quic_mutation_allowed(args->qc, /*req_active=*/1))
+ return -1;
+
+ if (ossl_quic_channel_is_handshake_complete(args->qc->ch))
+ return 1;
+
+ if (tls_wants_non_io_retry(args->qc))
+ return 1;
+
+ return 0;
+}
+
+static int configure_channel(QUIC_CONNECTION *qc)
+{
+ assert(qc->ch != NULL);
+
+ if (!ossl_quic_channel_set_peer_addr(qc->ch, &qc->init_peer_addr))
+ return 0;
+
+ return 1;
+}
+
+static int need_notifier_for_domain_flags(uint64_t domain_flags)
+{
+ return (domain_flags & SSL_DOMAIN_FLAG_THREAD_ASSISTED) != 0
+ || ((domain_flags & SSL_DOMAIN_FLAG_MULTI_THREAD) != 0
+ && (domain_flags & SSL_DOMAIN_FLAG_BLOCKING) != 0);
+}
+
+QUIC_NEEDS_LOCK
+static int create_channel(QUIC_CONNECTION *qc, SSL_CTX *ctx)
+{
+ QUIC_ENGINE_ARGS engine_args = {0};
+ QUIC_PORT_ARGS port_args = {0};
+
+ engine_args.libctx = ctx->libctx;
+ engine_args.propq = ctx->propq;
+#if defined(OPENSSL_THREADS)
+ engine_args.mutex = qc->mutex;
+#endif
+
+ if (need_notifier_for_domain_flags(ctx->domain_flags))
+ engine_args.reactor_flags |= QUIC_REACTOR_FLAG_USE_NOTIFIER;
+
+ qc->engine = ossl_quic_engine_new(&engine_args);
+ if (qc->engine == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ return 0;
+ }
+
+ port_args.channel_ctx = ctx;
+ qc->port = ossl_quic_engine_create_port(qc->engine, &port_args);
+ if (qc->port == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ ossl_quic_engine_free(qc->engine);
+ return 0;
+ }
+
+ qc->ch = ossl_quic_port_create_outgoing(qc->port, qc->tls);
+ if (qc->ch == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ ossl_quic_port_free(qc->port);
+ ossl_quic_engine_free(qc->engine);
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * Configures a channel with the information we have accumulated via calls made
+ * to us from the application prior to starting a handshake attempt.
+ */
+QUIC_NEEDS_LOCK
+static int ensure_channel_started(QCTX *ctx)
+{
+ QUIC_CONNECTION *qc = ctx->qc;
+
+ if (!qc->started) {
+ if (!configure_channel(qc)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR,
+ "failed to configure channel");
+ return 0;
+ }
+
+ if (!ossl_quic_channel_start(qc->ch)) {
+ ossl_quic_channel_restore_err_state(qc->ch);
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR,
+ "failed to start channel");
+ return 0;
+ }
+
+#if !defined(OPENSSL_NO_QUIC_THREAD_ASSIST)
+ if (qc->is_thread_assisted)
+ if (!ossl_quic_thread_assist_init_start(&qc->thread_assist, qc->ch)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR,
+ "failed to start assist thread");
+ return 0;
+ }
+#endif
+ }
+
+ qc->started = 1;
+ return 1;
+}
+
+QUIC_NEEDS_LOCK
+static int quic_do_handshake(QCTX *ctx)
+{
+ int ret;
+ QUIC_CONNECTION *qc = ctx->qc;
+ QUIC_PORT *port;
+ BIO *net_rbio, *net_wbio;
+
+ if (ossl_quic_channel_is_handshake_complete(qc->ch))
+ /* Handshake already completed. */
+ return 1;
+
+ if (!quic_mutation_allowed(qc, /*req_active=*/0))
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+
+ if (qc->as_server != qc->as_server_state) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_PASSED_INVALID_ARGUMENT, NULL);
+ return -1; /* Non-protocol error */
+ }
+
+ port = ossl_quic_obj_get0_port(ctx->obj);
+ net_rbio = ossl_quic_port_get_net_rbio(port);
+ net_wbio = ossl_quic_port_get_net_wbio(port);
+ if (net_rbio == NULL || net_wbio == NULL) {
+ /* Need read and write BIOs. */
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_BIO_NOT_SET, NULL);
+ return -1; /* Non-protocol error */
+ }
+
+ if (!qc->started && ossl_quic_port_is_addressed_w(port)
+ && BIO_ADDR_family(&qc->init_peer_addr) == AF_UNSPEC) {
+ /*
+ * We are trying to connect and are using addressed mode, which means we
+ * need an initial peer address; if we do not have a peer address yet,
+ * we should try to autodetect one.
+ *
+ * We do this as late as possible because some BIOs (e.g. BIO_s_connect)
+ * may not be able to provide us with a peer address until they have
+ * finished their own processing. They may not be able to perform this
+ * processing until an application has finished configuring that BIO
+ * (e.g. with setter calls), which might happen after SSL_set_bio is
+ * called.
+ */
+ if (!csm_analyse_init_peer_addr(net_wbio, &qc->init_peer_addr))
+ /* best effort */
+ BIO_ADDR_clear(&qc->init_peer_addr);
+ else
+ ossl_quic_channel_set_peer_addr(qc->ch, &qc->init_peer_addr);
+ }
+
+ if (!qc->started
+ && ossl_quic_port_is_addressed_w(port)
+ && BIO_ADDR_family(&qc->init_peer_addr) == AF_UNSPEC) {
+ /*
+ * If we still don't have a peer address in addressed mode, we can't do
+ * anything.
+ */
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_REMOTE_PEER_ADDRESS_NOT_SET, NULL);
+ return -1; /* Non-protocol error */
+ }
+
+ /*
+ * Start connection process. Note we may come here multiple times in
+ * non-blocking mode, which is fine.
+ */
+ if (!ensure_channel_started(ctx)) /* raises on failure */
+ return -1; /* Non-protocol error */
+
+ if (ossl_quic_channel_is_handshake_complete(qc->ch))
+ /* The handshake is now done. */
+ return 1;
+
+ if (!qctx_blocking(ctx)) {
+ /* Try to advance the reactor. */
+ qctx_maybe_autotick(ctx);
+
+ if (ossl_quic_channel_is_handshake_complete(qc->ch))
+ /* The handshake is now done. */
+ return 1;
+
+ if (ossl_quic_channel_is_term_any(qc->ch)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ return 0;
+ } else if (ossl_quic_obj_desires_blocking(&qc->obj)) {
+ /*
+ * As a special case when doing a handshake when blocking mode is
+ * desired yet not available, see if the network BIOs have become
+ * poll descriptor-enabled. This supports BIOs such as BIO_s_connect
+ * which do late creation of socket FDs and therefore cannot expose
+ * a poll descriptor until after a network BIO is set on the QCSO.
+ */
+ ossl_quic_engine_update_poll_descriptors(qc->obj.engine, /*force=*/1);
+ }
+ }
+
+ /*
+ * We are either in blocking mode or just entered it due to the code above.
+ */
+ if (qctx_blocking(ctx)) {
+ /* In blocking mode, wait for the handshake to complete. */
+ struct quic_handshake_wait_args args;
+
+ args.qc = qc;
+
+ ret = block_until_pred(ctx, quic_handshake_wait, &args, 0);
+ if (!quic_mutation_allowed(qc, /*req_active=*/1)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ return 0; /* Shutdown before completion */
+ } else if (ret <= 0) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ return -1; /* Non-protocol error */
+ }
+
+ if (tls_wants_non_io_retry(qc)) {
+ QUIC_RAISE_NORMAL_ERROR(ctx, SSL_get_error(qc->tls, 0));
+ return -1;
+ }
+
+ assert(ossl_quic_channel_is_handshake_complete(qc->ch));
+ return 1;
+ }
+
+ if (tls_wants_non_io_retry(qc)) {
+ QUIC_RAISE_NORMAL_ERROR(ctx, SSL_get_error(qc->tls, 0));
+ return -1;
+ }
+
+ /*
+ * Otherwise, indicate that the handshake isn't done yet.
+ * We can only get here in non-blocking mode.
+ */
+ QUIC_RAISE_NORMAL_ERROR(ctx, SSL_ERROR_WANT_READ);
+ return -1; /* Non-protocol error */
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_do_handshake(SSL *s)
+{
+ int ret;
+ QCTX ctx;
+
+ if (!expect_quic_cs(s, &ctx))
+ return 0;
+
+ qctx_lock_for_io(&ctx);
+
+ ret = quic_do_handshake(&ctx);
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/* SSL_connect */
+int ossl_quic_connect(SSL *s)
+{
+ /* Ensure we are in connect state (no-op if non-idle). */
+ if (!ossl_quic_set_connect_state(s, 1))
+ return -1;
+
+ /* Begin or continue the handshake */
+ return ossl_quic_do_handshake(s);
+}
+
+/* SSL_accept */
+int ossl_quic_accept(SSL *s)
+{
+ /* Ensure we are in accept state (no-op if non-idle). */
+ if (!ossl_quic_set_accept_state(s, 1))
+ return -1;
+
+ /* Begin or continue the handshake */
+ return ossl_quic_do_handshake(s);
+}
+
+/*
+ * QUIC Front-End I/O API: Stream Lifecycle Operations
+ * ===================================================
+ *
+ * SSL_stream_new => ossl_quic_conn_stream_new
+ *
+ */
+
+/*
+ * Try to create the default XSO if it doesn't already exist. Returns 1 if the
+ * default XSO was created. Returns 0 if it was not (e.g. because it already
+ * exists). Note that this is NOT an error condition.
+ */
+QUIC_NEEDS_LOCK
+static int qc_try_create_default_xso_for_write(QCTX *ctx)
+{
+ uint64_t flags = 0;
+ QUIC_CONNECTION *qc = ctx->qc;
+
+ if (qc->default_xso_created
+ || qc->default_stream_mode == SSL_DEFAULT_STREAM_MODE_NONE)
+ /*
+ * We only do this once. If the user detaches a previously created
+ * default XSO we don't auto-create another one.
+ */
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_NO_STREAM, NULL);
+
+ /* Create a locally-initiated stream. */
+ if (qc->default_stream_mode == SSL_DEFAULT_STREAM_MODE_AUTO_UNI)
+ flags |= SSL_STREAM_FLAG_UNI;
+
+ qc_set_default_xso(qc, (QUIC_XSO *)quic_conn_stream_new(ctx, flags,
+ /*needs_lock=*/0),
+ /*touch=*/0);
+ if (qc->default_xso == NULL)
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+
+ qc_touch_default_xso(qc);
+ return 1;
+}
+
+struct quic_wait_for_stream_args {
+ QUIC_CONNECTION *qc;
+ QUIC_STREAM *qs;
+ QCTX *ctx;
+ uint64_t expect_id;
+};
+
+QUIC_NEEDS_LOCK
+static int quic_wait_for_stream(void *arg)
+{
+ struct quic_wait_for_stream_args *args = arg;
+
+ if (!quic_mutation_allowed(args->qc, /*req_active=*/1)) {
+ /* If connection is torn down due to an error while blocking, stop. */
+ QUIC_RAISE_NON_NORMAL_ERROR(args->ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ return -1;
+ }
+
+ args->qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(args->qc->ch),
+ args->expect_id | QUIC_STREAM_DIR_BIDI);
+ if (args->qs == NULL)
+ args->qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(args->qc->ch),
+ args->expect_id | QUIC_STREAM_DIR_UNI);
+
+ if (args->qs != NULL)
+ return 1; /* stream now exists */
+
+ return 0; /* did not get a stream, keep trying */
+}
+
+QUIC_NEEDS_LOCK
+static int qc_wait_for_default_xso_for_read(QCTX *ctx, int peek)
+{
+ /* Called on a QCSO and we don't currently have a default stream. */
+ uint64_t expect_id;
+ QUIC_CONNECTION *qc = ctx->qc;
+ QUIC_STREAM *qs;
+ int res;
+ struct quic_wait_for_stream_args wargs;
+ OSSL_RTT_INFO rtt_info;
+
+ /*
+ * If default stream functionality is disabled or we already detached
+ * one, don't make another default stream and just fail.
+ */
+ if (qc->default_xso_created
+ || qc->default_stream_mode == SSL_DEFAULT_STREAM_MODE_NONE)
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_NO_STREAM, NULL);
+
+ /*
+ * The peer may have opened a stream since we last ticked. So tick and
+ * see if the stream with ordinal 0 (remote, bidi/uni based on stream
+ * mode) exists yet. QUIC stream IDs must be allocated in order, so the
+ * first stream created by a peer must have an ordinal of 0.
+ */
+ expect_id = qc->as_server
+ ? QUIC_STREAM_INITIATOR_CLIENT
+ : QUIC_STREAM_INITIATOR_SERVER;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(qc->ch),
+ expect_id | QUIC_STREAM_DIR_BIDI);
+ if (qs == NULL)
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(qc->ch),
+ expect_id | QUIC_STREAM_DIR_UNI);
+
+ if (qs == NULL) {
+ qctx_maybe_autotick(ctx);
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(qc->ch),
+ expect_id);
+ }
+
+ if (qs == NULL) {
+ if (peek)
+ return 0;
+
+ if (ossl_quic_channel_is_term_any(qc->ch)) {
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ } else if (!qctx_blocking(ctx)) {
+ /* Non-blocking mode, so just bail immediately. */
+ return QUIC_RAISE_NORMAL_ERROR(ctx, SSL_ERROR_WANT_READ);
+ }
+
+ /* Block until we have a stream. */
+ wargs.qc = qc;
+ wargs.qs = NULL;
+ wargs.ctx = ctx;
+ wargs.expect_id = expect_id;
+
+ res = block_until_pred(ctx, quic_wait_for_stream, &wargs, 0);
+ if (res == 0)
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ else if (res < 0 || wargs.qs == NULL)
+ /* quic_wait_for_stream raised error here */
+ return 0;
+
+ qs = wargs.qs;
+ }
+
+ /*
+ * We now have qs != NULL. Remove it from the incoming stream queue so that
+ * it isn't also returned by any future SSL_accept_stream calls.
+ */
+ ossl_statm_get_rtt_info(ossl_quic_channel_get_statm(qc->ch), &rtt_info);
+ ossl_quic_stream_map_remove_from_accept_queue(ossl_quic_channel_get_qsm(qc->ch),
+ qs, rtt_info.smoothed_rtt);
+
+ /*
+ * Now make qs the default stream, creating the necessary XSO.
+ */
+ qc_set_default_xso(qc, create_xso_from_stream(qc, qs), /*touch=*/0);
+ if (qc->default_xso == NULL)
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+
+ qc_touch_default_xso(qc); /* inhibits default XSO */
+ return 1;
+}
+
+QUIC_NEEDS_LOCK
+static QUIC_XSO *create_xso_from_stream(QUIC_CONNECTION *qc, QUIC_STREAM *qs)
+{
+ QUIC_XSO *xso = NULL;
+
+ if ((xso = OPENSSL_zalloc(sizeof(*xso))) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+
+ if (!ossl_quic_obj_init(&xso->obj, qc->obj.ssl.ctx, SSL_TYPE_QUIC_XSO,
+ &qc->obj.ssl, NULL, NULL)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ /* XSO refs QC */
+ if (!SSL_up_ref(&qc->obj.ssl)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_SSL_LIB, NULL);
+ goto err;
+ }
+
+ xso->conn = qc;
+ xso->ssl_mode = qc->default_ssl_mode;
+ xso->ssl_options
+ = qc->default_ssl_options & OSSL_QUIC_PERMITTED_OPTIONS_STREAM;
+ xso->last_error = SSL_ERROR_NONE;
+
+ xso->stream = qs;
+
+ ++qc->num_xso;
+ xso_update_options(xso);
+ return xso;
+
+err:
+ OPENSSL_free(xso);
+ return NULL;
+}
+
+struct quic_new_stream_wait_args {
+ QUIC_CONNECTION *qc;
+ int is_uni;
+};
+
+static int quic_new_stream_wait(void *arg)
+{
+ struct quic_new_stream_wait_args *args = arg;
+ QUIC_CONNECTION *qc = args->qc;
+
+ if (!quic_mutation_allowed(qc, /*req_active=*/1))
+ return -1;
+
+ if (ossl_quic_channel_is_new_local_stream_admissible(qc->ch, args->is_uni))
+ return 1;
+
+ return 0;
+}
+
+/* locking depends on need_lock */
+static SSL *quic_conn_stream_new(QCTX *ctx, uint64_t flags, int need_lock)
+{
+ int ret;
+ QUIC_CONNECTION *qc = ctx->qc;
+ QUIC_XSO *xso = NULL;
+ QUIC_STREAM *qs = NULL;
+ int is_uni = ((flags & SSL_STREAM_FLAG_UNI) != 0);
+ int no_blocking = ((flags & SSL_STREAM_FLAG_NO_BLOCK) != 0);
+ int advance = ((flags & SSL_STREAM_FLAG_ADVANCE) != 0);
+
+ if (need_lock)
+ qctx_lock(ctx);
+
+ if (!quic_mutation_allowed(qc, /*req_active=*/0)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ goto err;
+ }
+
+ if (!advance
+ && !ossl_quic_channel_is_new_local_stream_admissible(qc->ch, is_uni)) {
+ struct quic_new_stream_wait_args args;
+
+ /*
+ * Stream count flow control currently doesn't permit this stream to be
+ * opened.
+ */
+ if (no_blocking || !qctx_blocking(ctx)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_STREAM_COUNT_LIMITED, NULL);
+ goto err;
+ }
+
+ args.qc = qc;
+ args.is_uni = is_uni;
+
+ /* Blocking mode - wait until we can get a stream. */
+ ret = block_until_pred(ctx, quic_new_stream_wait, &args, 0);
+ if (!quic_mutation_allowed(qc, /*req_active=*/1)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ goto err; /* Shutdown before completion */
+ } else if (ret <= 0) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ goto err; /* Non-protocol error */
+ }
+ }
+
+ qs = ossl_quic_channel_new_stream_local(qc->ch, is_uni);
+ if (qs == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ xso = create_xso_from_stream(qc, qs);
+ if (xso == NULL)
+ goto err;
+
+ qc_touch_default_xso(qc); /* inhibits default XSO */
+ if (need_lock)
+ qctx_unlock(ctx);
+
+ return &xso->obj.ssl;
+
+err:
+ OPENSSL_free(xso);
+ ossl_quic_stream_map_release(ossl_quic_channel_get_qsm(qc->ch), qs);
+ if (need_lock)
+ qctx_unlock(ctx);
+
+ return NULL;
+
+}
+
+QUIC_TAKES_LOCK
+SSL *ossl_quic_conn_stream_new(SSL *s, uint64_t flags)
+{
+ QCTX ctx;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return NULL;
+
+ return quic_conn_stream_new(&ctx, flags, /*need_lock=*/1);
+}
+
+/*
+ * QUIC Front-End I/O API: Steady-State Operations
+ * ===============================================
+ *
+ * Here we dispatch calls to the steady-state front-end I/O API functions; that
+ * is, the functions used during the established phase of a QUIC connection
+ * (e.g. SSL_read, SSL_write).
+ *
+ * Each function must handle both blocking and non-blocking modes. As discussed
+ * above, all QUIC I/O is implemented using non-blocking mode internally.
+ *
+ * SSL_get_error => partially implemented by ossl_quic_get_error
+ * SSL_want => ossl_quic_want
+ * (BIO/)SSL_read => ossl_quic_read
+ * (BIO/)SSL_write => ossl_quic_write
+ * SSL_pending => ossl_quic_pending
+ * SSL_stream_conclude => ossl_quic_conn_stream_conclude
+ * SSL_key_update => ossl_quic_key_update
+ */
+
+/* SSL_get_error */
+int ossl_quic_get_error(const SSL *s, int i)
+{
+ QCTX ctx;
+ int net_error, last_error;
+
+ /* SSL_get_errors() should not raise new errors */
+ if (!is_quic_cs(s, &ctx, 0 /* suppress errors */))
+ return SSL_ERROR_SSL;
+
+ qctx_lock(&ctx);
+ net_error = ossl_quic_channel_net_error(ctx.qc->ch);
+ last_error = ctx.is_stream ? ctx.xso->last_error : ctx.qc->last_error;
+ qctx_unlock(&ctx);
+
+ if (net_error)
+ return SSL_ERROR_SYSCALL;
+
+ return last_error;
+}
+
+/* Converts a code returned by SSL_get_error to a code returned by SSL_want. */
+static int error_to_want(int error)
+{
+ switch (error) {
+ case SSL_ERROR_WANT_CONNECT: /* never used - UDP is connectionless */
+ case SSL_ERROR_WANT_ACCEPT: /* never used - UDP is connectionless */
+ case SSL_ERROR_ZERO_RETURN:
+ default:
+ return SSL_NOTHING;
+
+ case SSL_ERROR_WANT_READ:
+ return SSL_READING;
+
+ case SSL_ERROR_WANT_WRITE:
+ return SSL_WRITING;
+
+ case SSL_ERROR_WANT_RETRY_VERIFY:
+ return SSL_RETRY_VERIFY;
+
+ case SSL_ERROR_WANT_CLIENT_HELLO_CB:
+ return SSL_CLIENT_HELLO_CB;
+
+ case SSL_ERROR_WANT_X509_LOOKUP:
+ return SSL_X509_LOOKUP;
+ }
+}
+
+/* SSL_want */
+int ossl_quic_want(const SSL *s)
+{
+ QCTX ctx;
+ int w;
+
+ if (!expect_quic_cs(s, &ctx))
+ return SSL_NOTHING;
+
+ qctx_lock(&ctx);
+
+ w = error_to_want(ctx.is_stream ? ctx.xso->last_error : ctx.qc->last_error);
+
+ qctx_unlock(&ctx);
+ return w;
+}
+
+/*
+ * SSL_write
+ * ---------
+ *
+ * The set of functions below provide the implementation of the public SSL_write
+ * function. We must handle:
+ *
+ * - both blocking and non-blocking operation at the application level,
+ * depending on how we are configured;
+ *
+ * - SSL_MODE_ENABLE_PARTIAL_WRITE being on or off;
+ *
+ * - SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER.
+ *
+ */
+QUIC_NEEDS_LOCK
+static void quic_post_write(QUIC_XSO *xso, int did_append,
+ int did_append_all, uint64_t flags,
+ int do_tick)
+{
+ /*
+ * We have appended at least one byte to the stream.
+ * Potentially mark stream as active, depending on FC.
+ */
+ if (did_append)
+ ossl_quic_stream_map_update_state(ossl_quic_channel_get_qsm(xso->conn->ch),
+ xso->stream);
+
+ if (did_append_all && (flags & SSL_WRITE_FLAG_CONCLUDE) != 0)
+ ossl_quic_sstream_fin(xso->stream->sstream);
+
+ /*
+ * Try and send.
+ *
+ * TODO(QUIC FUTURE): It is probably inefficient to try and do this
+ * immediately, plus we should eventually consider Nagle's algorithm.
+ */
+ if (do_tick)
+ ossl_quic_reactor_tick(ossl_quic_channel_get_reactor(xso->conn->ch), 0);
+}
+
+struct quic_write_again_args {
+ QUIC_XSO *xso;
+ const unsigned char *buf;
+ size_t len;
+ size_t total_written;
+ int err;
+ uint64_t flags;
+};
+
+/*
+ * Absolute maximum write buffer size, enforced to prevent a rogue peer from
+ * deliberately inducing DoS. This has been chosen based on the optimal buffer
+ * size for an RTT of 500ms and a bandwidth of 100 Mb/s.
+ */
+#define MAX_WRITE_BUF_SIZE (6 * 1024 * 1024)
+
+/*
+ * Ensure spare buffer space available (up until a limit, at least).
+ */
+QUIC_NEEDS_LOCK
+static int sstream_ensure_spare(QUIC_SSTREAM *sstream, uint64_t spare)
+{
+ size_t cur_sz = ossl_quic_sstream_get_buffer_size(sstream);
+ size_t avail = ossl_quic_sstream_get_buffer_avail(sstream);
+ size_t spare_ = (spare > SIZE_MAX) ? SIZE_MAX : (size_t)spare;
+ size_t new_sz, growth;
+
+ if (spare_ <= avail || cur_sz == MAX_WRITE_BUF_SIZE)
+ return 1;
+
+ growth = spare_ - avail;
+ if (cur_sz + growth > MAX_WRITE_BUF_SIZE)
+ new_sz = MAX_WRITE_BUF_SIZE;
+ else
+ new_sz = cur_sz + growth;
+
+ return ossl_quic_sstream_set_buffer_size(sstream, new_sz);
+}
+
+/*
+ * Append to a QUIC_STREAM's QUIC_SSTREAM, ensuring buffer space is expanded
+ * as needed according to flow control.
+ */
+QUIC_NEEDS_LOCK
+static int xso_sstream_append(QUIC_XSO *xso, const unsigned char *buf,
+ size_t len, size_t *actual_written)
+{
+ QUIC_SSTREAM *sstream = xso->stream->sstream;
+ uint64_t cur = ossl_quic_sstream_get_cur_size(sstream);
+ uint64_t cwm = ossl_quic_txfc_get_cwm(&xso->stream->txfc);
+ uint64_t permitted = (cwm >= cur ? cwm - cur : 0);
+
+ if (len > permitted)
+ len = (size_t)permitted;
+
+ if (!sstream_ensure_spare(sstream, len))
+ return 0;
+
+ return ossl_quic_sstream_append(sstream, buf, len, actual_written);
+}
+
+QUIC_NEEDS_LOCK
+static int quic_write_again(void *arg)
+{
+ struct quic_write_again_args *args = arg;
+ size_t actual_written = 0;
+
+ if (!quic_mutation_allowed(args->xso->conn, /*req_active=*/1))
+ /* If connection is torn down due to an error while blocking, stop. */
+ return -2;
+
+ if (!quic_validate_for_write(args->xso, &args->err))
+ /*
+ * Stream may have become invalid for write due to connection events
+ * while we blocked.
+ */
+ return -2;
+
+ args->err = ERR_R_INTERNAL_ERROR;
+ if (!xso_sstream_append(args->xso, args->buf, args->len, &actual_written))
+ return -2;
+
+ quic_post_write(args->xso, actual_written > 0,
+ args->len == actual_written, args->flags, 0);
+
+ args->buf += actual_written;
+ args->len -= actual_written;
+ args->total_written += actual_written;
+
+ if (args->len == 0)
+ /* Written everything, done. */
+ return 1;
+
+ /* Not written everything yet, keep trying. */
+ return 0;
+}
+
+QUIC_NEEDS_LOCK
+static int quic_write_blocking(QCTX *ctx, const void *buf, size_t len,
+ uint64_t flags, size_t *written)
+{
+ int res;
+ QUIC_XSO *xso = ctx->xso;
+ struct quic_write_again_args args;
+ size_t actual_written = 0;
+
+ /* First make a best effort to append as much of the data as possible. */
+ if (!xso_sstream_append(xso, buf, len, &actual_written)) {
+ /* Stream already finished or allocation error. */
+ *written = 0;
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ }
+
+ quic_post_write(xso, actual_written > 0, actual_written == len, flags, 1);
+
+ /*
+ * Record however much data we wrote
+ */
+ *written = actual_written;
+
+ if (actual_written == len) {
+ /* Managed to append everything on the first try. */
+ return 1;
+ }
+
+ /*
+ * We did not manage to append all of the data immediately, so the stream
+ * buffer has probably filled up. This means we need to block until some of
+ * it is freed up.
+ */
+ args.xso = xso;
+ args.buf = (const unsigned char *)buf + actual_written;
+ args.len = len - actual_written;
+ args.total_written = 0;
+ args.err = ERR_R_INTERNAL_ERROR;
+ args.flags = flags;
+
+ res = block_until_pred(ctx, quic_write_again, &args, 0);
+ if (res <= 0) {
+ if (!quic_mutation_allowed(xso->conn, /*req_active=*/1))
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ else
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, args.err, NULL);
+ }
+
+ /*
+ * When waiting on extra buffer space to be available, args.total_written
+ * holds the amount of remaining data we requested to write, which will be
+ * something less than the len parameter passed in, however much we wrote
+ * here, add it to the value that we wrote when we initially called
+ * xso_sstream_append
+ */
+ *written += args.total_written;
+ return 1;
+}
+
+/*
+ * Functions to manage All-or-Nothing (AON) (that is, non-ENABLE_PARTIAL_WRITE)
+ * write semantics.
+ */
+static void aon_write_begin(QUIC_XSO *xso, const unsigned char *buf,
+ size_t buf_len, size_t already_sent)
+{
+ assert(!xso->aon_write_in_progress);
+
+ xso->aon_write_in_progress = 1;
+ xso->aon_buf_base = buf;
+ xso->aon_buf_pos = already_sent;
+ xso->aon_buf_len = buf_len;
+}
+
+static void aon_write_finish(QUIC_XSO *xso)
+{
+ xso->aon_write_in_progress = 0;
+ xso->aon_buf_base = NULL;
+ xso->aon_buf_pos = 0;
+ xso->aon_buf_len = 0;
+}
+
+QUIC_NEEDS_LOCK
+static int quic_write_nonblocking_aon(QCTX *ctx, const void *buf,
+ size_t len, uint64_t flags,
+ size_t *written)
+{
+ QUIC_XSO *xso = ctx->xso;
+ const void *actual_buf;
+ size_t actual_len, actual_written = 0;
+ int accept_moving_buffer
+ = ((xso->ssl_mode & SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER) != 0);
+
+ if (xso->aon_write_in_progress) {
+ /*
+ * We are in the middle of an AON write (i.e., a previous write did not
+ * manage to append all data to the SSTREAM and we have Enable Partial
+ * Write (EPW) mode disabled.)
+ */
+ if ((!accept_moving_buffer && xso->aon_buf_base != buf)
+ || len != xso->aon_buf_len)
+ /*
+ * Pointer must not have changed if we are not in accept moving
+ * buffer mode. Length must never change.
+ */
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_BAD_WRITE_RETRY, NULL);
+
+ actual_buf = (unsigned char *)buf + xso->aon_buf_pos;
+ actual_len = len - xso->aon_buf_pos;
+ assert(actual_len > 0);
+ } else {
+ actual_buf = buf;
+ actual_len = len;
+ }
+
+ /* First make a best effort to append as much of the data as possible. */
+ if (!xso_sstream_append(xso, actual_buf, actual_len, &actual_written)) {
+ /* Stream already finished or allocation error. */
+ *written = 0;
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ }
+
+ quic_post_write(xso, actual_written > 0, actual_written == actual_len,
+ flags, qctx_should_autotick(ctx));
+
+ if (actual_written == actual_len) {
+ /* We have sent everything. */
+ if (xso->aon_write_in_progress) {
+ /*
+ * We have sent everything, and we were in the middle of an AON
+ * write. The output write length is the total length of the AON
+ * buffer, not however many bytes we managed to write to the stream
+ * in this call.
+ */
+ *written = xso->aon_buf_len;
+ aon_write_finish(xso);
+ } else {
+ *written = actual_written;
+ }
+
+ return 1;
+ }
+
+ if (xso->aon_write_in_progress) {
+ /*
+ * AON write is in progress but we have not written everything yet. We
+ * may have managed to send zero bytes, or some number of bytes less
+ * than the total remaining which need to be appended during this
+ * AON operation.
+ */
+ xso->aon_buf_pos += actual_written;
+ assert(xso->aon_buf_pos < xso->aon_buf_len);
+ return QUIC_RAISE_NORMAL_ERROR(ctx, SSL_ERROR_WANT_WRITE);
+ }
+
+ /*
+ * Not in an existing AON operation but partial write is not enabled, so we
+ * need to begin a new AON operation. However we needn't bother if we didn't
+ * actually append anything.
+ */
+ if (actual_written > 0)
+ aon_write_begin(xso, buf, len, actual_written);
+
+ /*
+ * AON - We do not publicly admit to having appended anything until AON
+ * completes.
+ */
+ *written = 0;
+ return QUIC_RAISE_NORMAL_ERROR(ctx, SSL_ERROR_WANT_WRITE);
+}
+
+QUIC_NEEDS_LOCK
+static int quic_write_nonblocking_epw(QCTX *ctx, const void *buf, size_t len,
+ uint64_t flags, size_t *written)
+{
+ QUIC_XSO *xso = ctx->xso;
+
+ /* Simple best effort operation. */
+ if (!xso_sstream_append(xso, buf, len, written)) {
+ /* Stream already finished or allocation error. */
+ *written = 0;
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ }
+
+ quic_post_write(xso, *written > 0, *written == len, flags,
+ qctx_should_autotick(ctx));
+
+ if (*written == 0)
+ /* SSL_write_ex returns 0 if it didn't write anything. */
+ return QUIC_RAISE_NORMAL_ERROR(ctx, SSL_ERROR_WANT_WRITE);
+
+ return 1;
+}
+
+QUIC_NEEDS_LOCK
+static int quic_validate_for_write(QUIC_XSO *xso, int *err)
+{
+ QUIC_STREAM_MAP *qsm;
+
+ if (xso == NULL || xso->stream == NULL) {
+ *err = ERR_R_INTERNAL_ERROR;
+ return 0;
+ }
+
+ switch (xso->stream->send_state) {
+ default:
+ case QUIC_SSTREAM_STATE_NONE:
+ *err = SSL_R_STREAM_RECV_ONLY;
+ return 0;
+
+ case QUIC_SSTREAM_STATE_READY:
+ qsm = ossl_quic_channel_get_qsm(xso->conn->ch);
+
+ if (!ossl_quic_stream_map_ensure_send_part_id(qsm, xso->stream)) {
+ *err = ERR_R_INTERNAL_ERROR;
+ return 0;
+ }
+
+ /* FALLTHROUGH */
+ case QUIC_SSTREAM_STATE_SEND:
+ case QUIC_SSTREAM_STATE_DATA_SENT:
+ if (ossl_quic_sstream_get_final_size(xso->stream->sstream, NULL)) {
+ *err = SSL_R_STREAM_FINISHED;
+ return 0;
+ }
+ return 1;
+
+ case QUIC_SSTREAM_STATE_DATA_RECVD:
+ *err = SSL_R_STREAM_FINISHED;
+ return 0;
+
+ case QUIC_SSTREAM_STATE_RESET_SENT:
+ case QUIC_SSTREAM_STATE_RESET_RECVD:
+ *err = SSL_R_STREAM_RESET;
+ return 0;
+ }
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_write_flags(SSL *s, const void *buf, size_t len,
+ uint64_t flags, size_t *written)
+{
+ int ret;
+ QCTX ctx;
+ int partial_write, err;
+
+ *written = 0;
+
+ if (len == 0) {
+ /* Do not autocreate default XSO for zero-length writes. */
+ if (!expect_quic_cs(s, &ctx))
+ return 0;
+
+ qctx_lock_for_io(&ctx);
+ } else {
+ if (!expect_quic_with_stream_lock(s, /*remote_init=*/0, /*io=*/1, &ctx))
+ return 0;
+ }
+
+ partial_write = ((ctx.xso != NULL)
+ ? ((ctx.xso->ssl_mode & SSL_MODE_ENABLE_PARTIAL_WRITE) != 0) : 0);
+
+ if ((flags & ~SSL_WRITE_FLAG_CONCLUDE) != 0) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, SSL_R_UNSUPPORTED_WRITE_FLAG, NULL);
+ goto out;
+ }
+
+ if (!quic_mutation_allowed(ctx.qc, /*req_active=*/0)) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ goto out;
+ }
+
+ /*
+ * If we haven't finished the handshake, try to advance it.
+ * We don't accept writes until the handshake is completed.
+ */
+ if (quic_do_handshake(&ctx) < 1) {
+ ret = 0;
+ goto out;
+ }
+
+ /* Ensure correct stream state, stream send part not concluded, etc. */
+ if (len > 0 && !quic_validate_for_write(ctx.xso, &err)) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, err, NULL);
+ goto out;
+ }
+
+ if (len == 0) {
+ if ((flags & SSL_WRITE_FLAG_CONCLUDE) != 0)
+ quic_post_write(ctx.xso, 0, 1, flags,
+ qctx_should_autotick(&ctx));
+
+ ret = 1;
+ goto out;
+ }
+
+ if (qctx_blocking(&ctx))
+ ret = quic_write_blocking(&ctx, buf, len, flags, written);
+ else if (partial_write)
+ ret = quic_write_nonblocking_epw(&ctx, buf, len, flags, written);
+ else
+ ret = quic_write_nonblocking_aon(&ctx, buf, len, flags, written);
+
+out:
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_write(SSL *s, const void *buf, size_t len, size_t *written)
+{
+ return ossl_quic_write_flags(s, buf, len, 0, written);
+}
+
+/*
+ * SSL_read
+ * --------
+ */
+struct quic_read_again_args {
+ QCTX *ctx;
+ QUIC_STREAM *stream;
+ void *buf;
+ size_t len;
+ size_t *bytes_read;
+ int peek;
+};
+
+QUIC_NEEDS_LOCK
+static int quic_validate_for_read(QUIC_XSO *xso, int *err, int *eos)
+{
+ QUIC_STREAM_MAP *qsm;
+
+ *eos = 0;
+
+ if (xso == NULL || xso->stream == NULL) {
+ *err = ERR_R_INTERNAL_ERROR;
+ return 0;
+ }
+
+ switch (xso->stream->recv_state) {
+ default:
+ case QUIC_RSTREAM_STATE_NONE:
+ *err = SSL_R_STREAM_SEND_ONLY;
+ return 0;
+
+ case QUIC_RSTREAM_STATE_RECV:
+ case QUIC_RSTREAM_STATE_SIZE_KNOWN:
+ case QUIC_RSTREAM_STATE_DATA_RECVD:
+ return 1;
+
+ case QUIC_RSTREAM_STATE_DATA_READ:
+ *eos = 1;
+ return 0;
+
+ case QUIC_RSTREAM_STATE_RESET_RECVD:
+ qsm = ossl_quic_channel_get_qsm(xso->conn->ch);
+ ossl_quic_stream_map_notify_app_read_reset_recv_part(qsm, xso->stream);
+
+ /* FALLTHROUGH */
+ case QUIC_RSTREAM_STATE_RESET_READ:
+ *err = SSL_R_STREAM_RESET;
+ return 0;
+ }
+}
+
+QUIC_NEEDS_LOCK
+static int quic_read_actual(QCTX *ctx,
+ QUIC_STREAM *stream,
+ void *buf, size_t buf_len,
+ size_t *bytes_read,
+ int peek)
+{
+ int is_fin = 0, err, eos;
+ QUIC_CONNECTION *qc = ctx->qc;
+
+ if (!quic_validate_for_read(ctx->xso, &err, &eos)) {
+ if (eos) {
+ ctx->xso->retired_fin = 1;
+ return QUIC_RAISE_NORMAL_ERROR(ctx, SSL_ERROR_ZERO_RETURN);
+ } else {
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, err, NULL);
+ }
+ }
+
+ if (peek) {
+ if (!ossl_quic_rstream_peek(stream->rstream, buf, buf_len,
+ bytes_read, &is_fin))
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+
+ } else {
+ if (!ossl_quic_rstream_read(stream->rstream, buf, buf_len,
+ bytes_read, &is_fin))
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ }
+
+ if (!peek) {
+ if (*bytes_read > 0) {
+ /*
+ * We have read at least one byte from the stream. Inform stream-level
+ * RXFC of the retirement of controlled bytes. Update the active stream
+ * status (the RXFC may now want to emit a frame granting more credit to
+ * the peer).
+ */
+ OSSL_RTT_INFO rtt_info;
+
+ ossl_statm_get_rtt_info(ossl_quic_channel_get_statm(qc->ch), &rtt_info);
+
+ if (!ossl_quic_rxfc_on_retire(&stream->rxfc, *bytes_read,
+ rtt_info.smoothed_rtt))
+ return QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_INTERNAL_ERROR, NULL);
+ }
+
+ if (is_fin && !peek) {
+ QUIC_STREAM_MAP *qsm = ossl_quic_channel_get_qsm(ctx->qc->ch);
+
+ ossl_quic_stream_map_notify_totally_read(qsm, ctx->xso->stream);
+ }
+
+ if (*bytes_read > 0)
+ ossl_quic_stream_map_update_state(ossl_quic_channel_get_qsm(qc->ch),
+ stream);
+ }
+
+ if (*bytes_read == 0 && is_fin) {
+ ctx->xso->retired_fin = 1;
+ return QUIC_RAISE_NORMAL_ERROR(ctx, SSL_ERROR_ZERO_RETURN);
+ }
+
+ return 1;
+}
+
+QUIC_NEEDS_LOCK
+static int quic_read_again(void *arg)
+{
+ struct quic_read_again_args *args = arg;
+
+ if (!quic_mutation_allowed(args->ctx->qc, /*req_active=*/1)) {
+ /* If connection is torn down due to an error while blocking, stop. */
+ QUIC_RAISE_NON_NORMAL_ERROR(args->ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ return -1;
+ }
+
+ if (!quic_read_actual(args->ctx, args->stream,
+ args->buf, args->len, args->bytes_read,
+ args->peek))
+ return -1;
+
+ if (*args->bytes_read > 0)
+ /* got at least one byte, the SSL_read op can finish now */
+ return 1;
+
+ return 0; /* did not read anything, keep trying */
+}
+
+QUIC_TAKES_LOCK
+static int quic_read(SSL *s, void *buf, size_t len, size_t *bytes_read, int peek)
+{
+ int ret, res;
+ QCTX ctx;
+ struct quic_read_again_args args;
+
+ *bytes_read = 0;
+
+ if (!expect_quic_cs(s, &ctx))
+ return 0;
+
+ qctx_lock_for_io(&ctx);
+
+ /* If we haven't finished the handshake, try to advance it. */
+ if (quic_do_handshake(&ctx) < 1) {
+ ret = 0; /* ossl_quic_do_handshake raised error here */
+ goto out;
+ }
+
+ if (ctx.xso == NULL) {
+ /*
+ * Called on a QCSO and we don't currently have a default stream.
+ *
+ * Wait until we get a stream initiated by the peer (blocking mode) or
+ * fail if we don't have one yet (non-blocking mode).
+ */
+ if (!qc_wait_for_default_xso_for_read(&ctx, /*peek=*/0)) {
+ ret = 0; /* error already raised here */
+ goto out;
+ }
+
+ ctx.xso = ctx.qc->default_xso;
+ }
+
+ if (!quic_read_actual(&ctx, ctx.xso->stream, buf, len, bytes_read, peek)) {
+ ret = 0; /* quic_read_actual raised error here */
+ goto out;
+ }
+
+ if (*bytes_read > 0) {
+ /*
+ * Even though we succeeded, tick the reactor here to ensure we are
+ * handling other aspects of the QUIC connection.
+ */
+ if (quic_mutation_allowed(ctx.qc, /*req_active=*/0))
+ qctx_maybe_autotick(&ctx);
+
+ ret = 1;
+ } else if (!quic_mutation_allowed(ctx.qc, /*req_active=*/0)) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ goto out;
+ } else if (qctx_blocking(&ctx)) {
+ /*
+ * We were not able to read anything immediately, so our stream
+ * buffer is empty. This means we need to block until we get
+ * at least one byte.
+ */
+ args.ctx = &ctx;
+ args.stream = ctx.xso->stream;
+ args.buf = buf;
+ args.len = len;
+ args.bytes_read = bytes_read;
+ args.peek = peek;
+
+ res = block_until_pred(&ctx, quic_read_again, &args, 0);
+ if (res == 0) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_INTERNAL_ERROR, NULL);
+ goto out;
+ } else if (res < 0) {
+ ret = 0; /* quic_read_again raised error here */
+ goto out;
+ }
+
+ ret = 1;
+ } else {
+ /*
+ * We did not get any bytes and are not in blocking mode.
+ * Tick to see if this delivers any more.
+ */
+ qctx_maybe_autotick(&ctx);
+
+ /* Try the read again. */
+ if (!quic_read_actual(&ctx, ctx.xso->stream, buf, len, bytes_read, peek)) {
+ ret = 0; /* quic_read_actual raised error here */
+ goto out;
+ }
+
+ if (*bytes_read > 0)
+ ret = 1; /* Succeeded this time. */
+ else
+ ret = QUIC_RAISE_NORMAL_ERROR(&ctx, SSL_ERROR_WANT_READ);
+ }
+
+out:
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+int ossl_quic_read(SSL *s, void *buf, size_t len, size_t *bytes_read)
+{
+ return quic_read(s, buf, len, bytes_read, 0);
+}
+
+int ossl_quic_peek(SSL *s, void *buf, size_t len, size_t *bytes_read)
+{
+ return quic_read(s, buf, len, bytes_read, 1);
+}
+
+/*
+ * SSL_pending
+ * -----------
+ */
+
+QUIC_TAKES_LOCK
+static size_t ossl_quic_pending_int(const SSL *s, int check_channel)
+{
+ QCTX ctx;
+ size_t avail = 0;
+
+ if (!expect_quic_cs(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ if (!ctx.qc->started)
+ goto out;
+
+ if (ctx.xso == NULL) {
+ /* No XSO yet, but there might be a default XSO eligible to be created. */
+ if (qc_wait_for_default_xso_for_read(&ctx, /*peek=*/1)) {
+ ctx.xso = ctx.qc->default_xso;
+ } else {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, SSL_R_NO_STREAM, NULL);
+ goto out;
+ }
+ }
+
+ if (ctx.xso->stream == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_INTERNAL_ERROR, NULL);
+ goto out;
+ }
+
+ if (check_channel)
+ avail = ossl_quic_stream_recv_pending(ctx.xso->stream,
+ /*include_fin=*/1)
+ || ossl_quic_channel_has_pending(ctx.qc->ch)
+ || ossl_quic_channel_is_term_any(ctx.qc->ch);
+ else
+ avail = ossl_quic_stream_recv_pending(ctx.xso->stream,
+ /*include_fin=*/0);
+
+out:
+ qctx_unlock(&ctx);
+ return avail;
+}
+
+size_t ossl_quic_pending(const SSL *s)
+{
+ return ossl_quic_pending_int(s, /*check_channel=*/0);
+}
+
+int ossl_quic_has_pending(const SSL *s)
+{
+ /* Do we have app-side pending data or pending URXEs or RXEs? */
+ return ossl_quic_pending_int(s, /*check_channel=*/1) > 0;
+}
+
+/*
+ * SSL_stream_conclude
+ * -------------------
+ */
+QUIC_TAKES_LOCK
+int ossl_quic_conn_stream_conclude(SSL *s)
+{
+ QCTX ctx;
+ QUIC_STREAM *qs;
+ int err;
+ int ret;
+
+ if (!expect_quic_with_stream_lock(s, /*remote_init=*/0, /*io=*/0, &ctx))
+ return 0;
+
+ qs = ctx.xso->stream;
+
+ if (!quic_mutation_allowed(ctx.qc, /*req_active=*/1)) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ qctx_unlock(&ctx);
+ return ret;
+ }
+
+ if (!quic_validate_for_write(ctx.xso, &err)) {
+ ret = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, err, NULL);
+ qctx_unlock(&ctx);
+ return ret;
+ }
+
+ if (ossl_quic_sstream_get_final_size(qs->sstream, NULL)) {
+ qctx_unlock(&ctx);
+ return 1;
+ }
+
+ ossl_quic_sstream_fin(qs->sstream);
+ quic_post_write(ctx.xso, 1, 0, 0, qctx_should_autotick(&ctx));
+ qctx_unlock(&ctx);
+ return 1;
+}
+
+/*
+ * SSL_inject_net_dgram
+ * --------------------
+ */
+QUIC_TAKES_LOCK
+int SSL_inject_net_dgram(SSL *s, const unsigned char *buf,
+ size_t buf_len,
+ const BIO_ADDR *peer,
+ const BIO_ADDR *local)
+{
+ int ret = 0;
+ QCTX ctx;
+ QUIC_DEMUX *demux;
+ QUIC_PORT *port;
+
+ if (!expect_quic_csl(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ port = ossl_quic_obj_get0_port(ctx.obj);
+ if (port == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_UNSUPPORTED, NULL);
+ goto err;
+ }
+
+ demux = ossl_quic_port_get0_demux(port);
+ ret = ossl_quic_demux_inject(demux, buf, buf_len, peer, local);
+
+err:
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/*
+ * SSL_get0_connection
+ * -------------------
+ */
+SSL *ossl_quic_get0_connection(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_cs(s, &ctx))
+ return NULL;
+
+ return &ctx.qc->obj.ssl;
+}
+
+/*
+ * SSL_get0_listener
+ * -----------------
+ */
+SSL *ossl_quic_get0_listener(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_csl(s, &ctx))
+ return NULL;
+
+ return ctx.ql != NULL ? &ctx.ql->obj.ssl : NULL;
+}
+
+/*
+ * SSL_get0_domain
+ * ---------------
+ */
+SSL *ossl_quic_get0_domain(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_any(s, &ctx))
+ return NULL;
+
+ return ctx.qd != NULL ? &ctx.qd->obj.ssl : NULL;
+}
+
+/*
+ * SSL_get_domain_flags
+ * --------------------
+ */
+int ossl_quic_get_domain_flags(const SSL *ssl, uint64_t *domain_flags)
+{
+ QCTX ctx;
+
+ if (!expect_quic_any(ssl, &ctx))
+ return 0;
+
+ if (domain_flags != NULL)
+ *domain_flags = ctx.obj->domain_flags;
+
+ return 1;
+}
+
+/*
+ * SSL_get_stream_type
+ * -------------------
+ */
+int ossl_quic_get_stream_type(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_cs(s, &ctx))
+ return SSL_STREAM_TYPE_BIDI;
+
+ if (ctx.xso == NULL) {
+ /*
+ * If deferred XSO creation has yet to occur, proceed according to the
+ * default stream mode. If AUTO_BIDI or AUTO_UNI is set, we cannot know
+ * what kind of stream will be created yet, so return BIDI on the basis
+ * that at this time, the client still has the option of calling
+ * SSL_read() or SSL_write() first.
+ */
+ if (ctx.qc->default_xso_created
+ || ctx.qc->default_stream_mode == SSL_DEFAULT_STREAM_MODE_NONE)
+ return SSL_STREAM_TYPE_NONE;
+ else
+ return SSL_STREAM_TYPE_BIDI;
+ }
+
+ if (ossl_quic_stream_is_bidi(ctx.xso->stream))
+ return SSL_STREAM_TYPE_BIDI;
+
+ if (ossl_quic_stream_is_server_init(ctx.xso->stream) != ctx.qc->as_server)
+ return SSL_STREAM_TYPE_READ;
+ else
+ return SSL_STREAM_TYPE_WRITE;
+}
+
+/*
+ * SSL_get_stream_id
+ * -----------------
+ */
+QUIC_TAKES_LOCK
+uint64_t ossl_quic_get_stream_id(SSL *s)
+{
+ QCTX ctx;
+ uint64_t id;
+
+ if (!expect_quic_with_stream_lock(s, /*remote_init=*/-1, /*io=*/0, &ctx))
+ return UINT64_MAX;
+
+ id = ctx.xso->stream->id;
+ qctx_unlock(&ctx);
+
+ return id;
+}
+
+/*
+ * SSL_is_stream_local
+ * -------------------
+ */
+QUIC_TAKES_LOCK
+int ossl_quic_is_stream_local(SSL *s)
+{
+ QCTX ctx;
+ int is_local;
+
+ if (!expect_quic_with_stream_lock(s, /*remote_init=*/-1, /*io=*/0, &ctx))
+ return -1;
+
+ is_local = ossl_quic_stream_is_local_init(ctx.xso->stream);
+ qctx_unlock(&ctx);
+
+ return is_local;
+}
+
+/*
+ * SSL_set_default_stream_mode
+ * ---------------------------
+ */
+QUIC_TAKES_LOCK
+int ossl_quic_set_default_stream_mode(SSL *s, uint32_t mode)
+{
+ QCTX ctx;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ if (ctx.qc->default_xso_created) {
+ qctx_unlock(&ctx);
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED,
+ "too late to change default stream mode");
+ }
+
+ switch (mode) {
+ case SSL_DEFAULT_STREAM_MODE_NONE:
+ case SSL_DEFAULT_STREAM_MODE_AUTO_BIDI:
+ case SSL_DEFAULT_STREAM_MODE_AUTO_UNI:
+ ctx.qc->default_stream_mode = mode;
+ break;
+ default:
+ qctx_unlock(&ctx);
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_PASSED_INVALID_ARGUMENT,
+ "bad default stream type");
+ }
+
+ qctx_unlock(&ctx);
+ return 1;
+}
+
+/*
+ * SSL_detach_stream
+ * -----------------
+ */
+QUIC_TAKES_LOCK
+SSL *ossl_quic_detach_stream(SSL *s)
+{
+ QCTX ctx;
+ QUIC_XSO *xso = NULL;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return NULL;
+
+ qctx_lock(&ctx);
+
+ /* Calling this function inhibits default XSO autocreation. */
+ /* QC ref to any default XSO is transferred to us and to caller. */
+ qc_set_default_xso_keep_ref(ctx.qc, NULL, /*touch=*/1, &xso);
+
+ qctx_unlock(&ctx);
+
+ return xso != NULL ? &xso->obj.ssl : NULL;
+}
+
+/*
+ * SSL_attach_stream
+ * -----------------
+ */
+QUIC_TAKES_LOCK
+int ossl_quic_attach_stream(SSL *conn, SSL *stream)
+{
+ QCTX ctx;
+ QUIC_XSO *xso;
+ int nref;
+
+ if (!expect_quic_conn_only(conn, &ctx))
+ return 0;
+
+ if (stream == NULL || stream->type != SSL_TYPE_QUIC_XSO)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_PASSED_NULL_PARAMETER,
+ "stream to attach must be a valid QUIC stream");
+
+ xso = (QUIC_XSO *)stream;
+
+ qctx_lock(&ctx);
+
+ if (ctx.qc->default_xso != NULL) {
+ qctx_unlock(&ctx);
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED,
+ "connection already has a default stream");
+ }
+
+ /*
+ * It is a caller error for the XSO being attached as a default XSO to have
+ * more than one ref.
+ */
+ if (!CRYPTO_GET_REF(&xso->obj.ssl.references, &nref)) {
+ qctx_unlock(&ctx);
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_INTERNAL_ERROR,
+ "ref");
+ }
+
+ if (nref != 1) {
+ qctx_unlock(&ctx);
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_PASSED_INVALID_ARGUMENT,
+ "stream being attached must have "
+ "only 1 reference");
+ }
+
+ /* Caller's reference to the XSO is transferred to us. */
+ /* Calling this function inhibits default XSO autocreation. */
+ qc_set_default_xso(ctx.qc, xso, /*touch=*/1);
+
+ qctx_unlock(&ctx);
+ return 1;
+}
+
+/*
+ * SSL_set_incoming_stream_policy
+ * ------------------------------
+ */
+QUIC_NEEDS_LOCK
+static int qc_get_effective_incoming_stream_policy(QUIC_CONNECTION *qc)
+{
+ switch (qc->incoming_stream_policy) {
+ case SSL_INCOMING_STREAM_POLICY_AUTO:
+ if ((qc->default_xso == NULL && !qc->default_xso_created)
+ || qc->default_stream_mode == SSL_DEFAULT_STREAM_MODE_NONE)
+ return SSL_INCOMING_STREAM_POLICY_ACCEPT;
+ else
+ return SSL_INCOMING_STREAM_POLICY_REJECT;
+
+ default:
+ return qc->incoming_stream_policy;
+ }
+}
+
+QUIC_NEEDS_LOCK
+static void qc_update_reject_policy(QUIC_CONNECTION *qc)
+{
+ int policy = qc_get_effective_incoming_stream_policy(qc);
+ int enable_reject = (policy == SSL_INCOMING_STREAM_POLICY_REJECT);
+
+ ossl_quic_channel_set_incoming_stream_auto_reject(qc->ch,
+ enable_reject,
+ qc->incoming_stream_aec);
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_set_incoming_stream_policy(SSL *s, int policy,
+ uint64_t aec)
+{
+ int ret = 1;
+ QCTX ctx;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ switch (policy) {
+ case SSL_INCOMING_STREAM_POLICY_AUTO:
+ case SSL_INCOMING_STREAM_POLICY_ACCEPT:
+ case SSL_INCOMING_STREAM_POLICY_REJECT:
+ ctx.qc->incoming_stream_policy = policy;
+ ctx.qc->incoming_stream_aec = aec;
+ break;
+
+ default:
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_PASSED_INVALID_ARGUMENT, NULL);
+ ret = 0;
+ break;
+ }
+
+ qc_update_reject_policy(ctx.qc);
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/*
+ * SSL_get_value, SSL_set_value
+ * ----------------------------
+ */
+QUIC_TAKES_LOCK
+static int qc_getset_idle_timeout(QCTX *ctx, uint32_t class_,
+ uint64_t *p_value_out, uint64_t *p_value_in)
+{
+ int ret = 0;
+ uint64_t value_out = 0, value_in;
+
+ qctx_lock(ctx);
+
+ switch (class_) {
+ case SSL_VALUE_CLASS_FEATURE_REQUEST:
+ value_out = ossl_quic_channel_get_max_idle_timeout_request(ctx->qc->ch);
+
+ if (p_value_in != NULL) {
+ value_in = *p_value_in;
+ if (value_in > OSSL_QUIC_VLINT_MAX) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_PASSED_INVALID_ARGUMENT,
+ NULL);
+ goto err;
+ }
+
+ if (ossl_quic_channel_have_generated_transport_params(ctx->qc->ch)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_FEATURE_NOT_RENEGOTIABLE,
+ NULL);
+ goto err;
+ }
+
+ ossl_quic_channel_set_max_idle_timeout_request(ctx->qc->ch, value_in);
+ }
+ break;
+
+ case SSL_VALUE_CLASS_FEATURE_PEER_REQUEST:
+ case SSL_VALUE_CLASS_FEATURE_NEGOTIATED:
+ if (p_value_in != NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_UNSUPPORTED_CONFIG_VALUE_OP,
+ NULL);
+ goto err;
+ }
+
+ if (!ossl_quic_channel_is_handshake_complete(ctx->qc->ch)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_FEATURE_NEGOTIATION_NOT_COMPLETE,
+ NULL);
+ goto err;
+ }
+
+ value_out = (class_ == SSL_VALUE_CLASS_FEATURE_NEGOTIATED)
+ ? ossl_quic_channel_get_max_idle_timeout_actual(ctx->qc->ch)
+ : ossl_quic_channel_get_max_idle_timeout_peer_request(ctx->qc->ch);
+ break;
+
+ default:
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_UNSUPPORTED_CONFIG_VALUE_CLASS,
+ NULL);
+ goto err;
+ }
+
+ ret = 1;
+err:
+ qctx_unlock(ctx);
+ if (ret && p_value_out != NULL)
+ *p_value_out = value_out;
+
+ return ret;
+}
+
+QUIC_TAKES_LOCK
+static int qc_get_stream_avail(QCTX *ctx, uint32_t class_,
+ int is_uni, int is_remote,
+ uint64_t *value)
+{
+ int ret = 0;
+
+ if (class_ != SSL_VALUE_CLASS_GENERIC) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_UNSUPPORTED_CONFIG_VALUE_CLASS,
+ NULL);
+ return 0;
+ }
+
+ qctx_lock(ctx);
+
+ *value = is_remote
+ ? ossl_quic_channel_get_remote_stream_count_avail(ctx->qc->ch, is_uni)
+ : ossl_quic_channel_get_local_stream_count_avail(ctx->qc->ch, is_uni);
+
+ ret = 1;
+ qctx_unlock(ctx);
+ return ret;
+}
+
+QUIC_NEEDS_LOCK
+static int qctx_should_autotick(QCTX *ctx)
+{
+ int event_handling_mode;
+ QUIC_OBJ *obj = ctx->obj;
+
+ for (; (event_handling_mode = obj->event_handling_mode) == SSL_VALUE_EVENT_HANDLING_MODE_INHERIT
+ && obj->parent_obj != NULL; obj = obj->parent_obj);
+
+ return event_handling_mode != SSL_VALUE_EVENT_HANDLING_MODE_EXPLICIT;
+}
+
+QUIC_NEEDS_LOCK
+static void qctx_maybe_autotick(QCTX *ctx)
+{
+ if (!qctx_should_autotick(ctx))
+ return;
+
+ ossl_quic_reactor_tick(ossl_quic_obj_get0_reactor(ctx->obj), 0);
+}
+
+QUIC_TAKES_LOCK
+static int qc_getset_event_handling(QCTX *ctx, uint32_t class_,
+ uint64_t *p_value_out,
+ uint64_t *p_value_in)
+{
+ int ret = 0;
+ uint64_t value_out = 0;
+
+ qctx_lock(ctx);
+
+ if (class_ != SSL_VALUE_CLASS_GENERIC) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_UNSUPPORTED_CONFIG_VALUE_CLASS,
+ NULL);
+ goto err;
+ }
+
+ if (p_value_in != NULL) {
+ switch (*p_value_in) {
+ case SSL_VALUE_EVENT_HANDLING_MODE_INHERIT:
+ case SSL_VALUE_EVENT_HANDLING_MODE_IMPLICIT:
+ case SSL_VALUE_EVENT_HANDLING_MODE_EXPLICIT:
+ break;
+ default:
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, ERR_R_PASSED_INVALID_ARGUMENT,
+ NULL);
+ goto err;
+ }
+
+ value_out = *p_value_in;
+ ctx->obj->event_handling_mode = (int)value_out;
+ } else {
+ value_out = ctx->obj->event_handling_mode;
+ }
+
+ ret = 1;
+err:
+ qctx_unlock(ctx);
+ if (ret && p_value_out != NULL)
+ *p_value_out = value_out;
+
+ return ret;
+}
+
+QUIC_TAKES_LOCK
+static int qc_get_stream_write_buf_stat(QCTX *ctx, uint32_t class_,
+ uint64_t *p_value_out,
+ size_t (*getter)(QUIC_SSTREAM *sstream))
+{
+ int ret = 0;
+ size_t value = 0;
+
+ qctx_lock(ctx);
+
+ if (class_ != SSL_VALUE_CLASS_GENERIC) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_UNSUPPORTED_CONFIG_VALUE_CLASS,
+ NULL);
+ goto err;
+ }
+
+ if (ctx->xso == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_NO_STREAM, NULL);
+ goto err;
+ }
+
+ if (!ossl_quic_stream_has_send(ctx->xso->stream)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(ctx, SSL_R_STREAM_RECV_ONLY, NULL);
+ goto err;
+ }
+
+ if (ossl_quic_stream_has_send_buffer(ctx->xso->stream))
+ value = getter(ctx->xso->stream->sstream);
+
+ ret = 1;
+err:
+ qctx_unlock(ctx);
+ *p_value_out = (uint64_t)value;
+ return ret;
+}
+
+QUIC_NEEDS_LOCK
+static int expect_quic_for_value(SSL *s, QCTX *ctx, uint32_t id)
+{
+ switch (id) {
+ case SSL_VALUE_EVENT_HANDLING_MODE:
+ case SSL_VALUE_STREAM_WRITE_BUF_SIZE:
+ case SSL_VALUE_STREAM_WRITE_BUF_USED:
+ case SSL_VALUE_STREAM_WRITE_BUF_AVAIL:
+ return expect_quic_cs(s, ctx);
+ default:
+ return expect_quic_conn_only(s, ctx);
+ }
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_get_value_uint(SSL *s, uint32_t class_, uint32_t id,
+ uint64_t *value)
+{
+ QCTX ctx;
+
+ if (!expect_quic_for_value(s, &ctx, id))
+ return 0;
+
+ if (value == NULL)
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx,
+ ERR_R_PASSED_INVALID_ARGUMENT, NULL);
+
+ switch (id) {
+ case SSL_VALUE_QUIC_IDLE_TIMEOUT:
+ return qc_getset_idle_timeout(&ctx, class_, value, NULL);
+
+ case SSL_VALUE_QUIC_STREAM_BIDI_LOCAL_AVAIL:
+ return qc_get_stream_avail(&ctx, class_, /*uni=*/0, /*remote=*/0, value);
+ case SSL_VALUE_QUIC_STREAM_BIDI_REMOTE_AVAIL:
+ return qc_get_stream_avail(&ctx, class_, /*uni=*/0, /*remote=*/1, value);
+ case SSL_VALUE_QUIC_STREAM_UNI_LOCAL_AVAIL:
+ return qc_get_stream_avail(&ctx, class_, /*uni=*/1, /*remote=*/0, value);
+ case SSL_VALUE_QUIC_STREAM_UNI_REMOTE_AVAIL:
+ return qc_get_stream_avail(&ctx, class_, /*uni=*/1, /*remote=*/1, value);
+
+ case SSL_VALUE_EVENT_HANDLING_MODE:
+ return qc_getset_event_handling(&ctx, class_, value, NULL);
+
+ case SSL_VALUE_STREAM_WRITE_BUF_SIZE:
+ return qc_get_stream_write_buf_stat(&ctx, class_, value,
+ ossl_quic_sstream_get_buffer_size);
+ case SSL_VALUE_STREAM_WRITE_BUF_USED:
+ return qc_get_stream_write_buf_stat(&ctx, class_, value,
+ ossl_quic_sstream_get_buffer_used);
+ case SSL_VALUE_STREAM_WRITE_BUF_AVAIL:
+ return qc_get_stream_write_buf_stat(&ctx, class_, value,
+ ossl_quic_sstream_get_buffer_avail);
+
+ default:
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx,
+ SSL_R_UNSUPPORTED_CONFIG_VALUE, NULL);
+ }
+
+ return 1;
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_set_value_uint(SSL *s, uint32_t class_, uint32_t id,
+ uint64_t value)
+{
+ QCTX ctx;
+
+ if (!expect_quic_for_value(s, &ctx, id))
+ return 0;
+
+ switch (id) {
+ case SSL_VALUE_QUIC_IDLE_TIMEOUT:
+ return qc_getset_idle_timeout(&ctx, class_, NULL, &value);
+
+ case SSL_VALUE_EVENT_HANDLING_MODE:
+ return qc_getset_event_handling(&ctx, class_, NULL, &value);
+
+ default:
+ return QUIC_RAISE_NON_NORMAL_ERROR(&ctx,
+ SSL_R_UNSUPPORTED_CONFIG_VALUE, NULL);
+ }
+
+ return 1;
+}
+
+/*
+ * SSL_accept_stream
+ * -----------------
+ */
+struct wait_for_incoming_stream_args {
+ QCTX *ctx;
+ QUIC_STREAM *qs;
+};
+
+QUIC_NEEDS_LOCK
+static int wait_for_incoming_stream(void *arg)
+{
+ struct wait_for_incoming_stream_args *args = arg;
+ QUIC_CONNECTION *qc = args->ctx->qc;
+ QUIC_STREAM_MAP *qsm = ossl_quic_channel_get_qsm(qc->ch);
+
+ if (!quic_mutation_allowed(qc, /*req_active=*/1)) {
+ /* If connection is torn down due to an error while blocking, stop. */
+ QUIC_RAISE_NON_NORMAL_ERROR(args->ctx, SSL_R_PROTOCOL_IS_SHUTDOWN, NULL);
+ return -1;
+ }
+
+ args->qs = ossl_quic_stream_map_peek_accept_queue(qsm);
+ if (args->qs != NULL)
+ return 1; /* got a stream */
+
+ return 0; /* did not get a stream, keep trying */
+}
+
+QUIC_TAKES_LOCK
+SSL *ossl_quic_accept_stream(SSL *s, uint64_t flags)
+{
+ QCTX ctx;
+ int ret;
+ SSL *new_s = NULL;
+ QUIC_STREAM_MAP *qsm;
+ QUIC_STREAM *qs;
+ QUIC_XSO *xso;
+ OSSL_RTT_INFO rtt_info;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return NULL;
+
+ qctx_lock(&ctx);
+
+ if (qc_get_effective_incoming_stream_policy(ctx.qc)
+ == SSL_INCOMING_STREAM_POLICY_REJECT) {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED, NULL);
+ goto out;
+ }
+
+ qsm = ossl_quic_channel_get_qsm(ctx.qc->ch);
+
+ qs = ossl_quic_stream_map_peek_accept_queue(qsm);
+ if (qs == NULL) {
+ if (qctx_blocking(&ctx)
+ && (flags & SSL_ACCEPT_STREAM_NO_BLOCK) == 0) {
+ struct wait_for_incoming_stream_args args;
+
+ args.ctx = &ctx;
+ args.qs = NULL;
+
+ ret = block_until_pred(&ctx, wait_for_incoming_stream, &args, 0);
+ if (ret == 0) {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_INTERNAL_ERROR, NULL);
+ goto out;
+ } else if (ret < 0 || args.qs == NULL) {
+ goto out;
+ }
+
+ qs = args.qs;
+ } else {
+ goto out;
+ }
+ }
+
+ xso = create_xso_from_stream(ctx.qc, qs);
+ if (xso == NULL)
+ goto out;
+
+ ossl_statm_get_rtt_info(ossl_quic_channel_get_statm(ctx.qc->ch), &rtt_info);
+ ossl_quic_stream_map_remove_from_accept_queue(qsm, qs,
+ rtt_info.smoothed_rtt);
+ new_s = &xso->obj.ssl;
+
+ /* Calling this function inhibits default XSO autocreation. */
+ qc_touch_default_xso(ctx.qc); /* inhibits default XSO */
+
+out:
+ qctx_unlock(&ctx);
+ return new_s;
+}
+
+/*
+ * SSL_get_accept_stream_queue_len
+ * -------------------------------
+ */
+QUIC_TAKES_LOCK
+size_t ossl_quic_get_accept_stream_queue_len(SSL *s)
+{
+ QCTX ctx;
+ size_t v;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ v = ossl_quic_stream_map_get_total_accept_queue_len(ossl_quic_channel_get_qsm(ctx.qc->ch));
+
+ qctx_unlock(&ctx);
+ return v;
+}
+
+/*
+ * SSL_stream_reset
+ * ----------------
+ */
+int ossl_quic_stream_reset(SSL *ssl,
+ const SSL_STREAM_RESET_ARGS *args,
+ size_t args_len)
+{
+ QCTX ctx;
+ QUIC_STREAM_MAP *qsm;
+ QUIC_STREAM *qs;
+ uint64_t error_code;
+ int ok, err;
+
+ if (!expect_quic_with_stream_lock(ssl, /*remote_init=*/0, /*io=*/0, &ctx))
+ return 0;
+
+ qsm = ossl_quic_channel_get_qsm(ctx.qc->ch);
+ qs = ctx.xso->stream;
+ error_code = (args != NULL ? args->quic_error_code : 0);
+
+ if (!quic_validate_for_write(ctx.xso, &err)) {
+ ok = QUIC_RAISE_NON_NORMAL_ERROR(&ctx, err, NULL);
+ goto err;
+ }
+
+ ok = ossl_quic_stream_map_reset_stream_send_part(qsm, qs, error_code);
+ if (ok)
+ ctx.xso->requested_reset = 1;
+
+err:
+ qctx_unlock(&ctx);
+ return ok;
+}
+
+/*
+ * SSL_get_stream_read_state
+ * -------------------------
+ */
+static void quic_classify_stream(QUIC_CONNECTION *qc,
+ QUIC_STREAM *qs,
+ int is_write,
+ int *state,
+ uint64_t *app_error_code)
+{
+ int local_init;
+ uint64_t final_size;
+
+ local_init = (ossl_quic_stream_is_server_init(qs) == qc->as_server);
+
+ if (app_error_code != NULL)
+ *app_error_code = UINT64_MAX;
+ else
+ app_error_code = &final_size; /* throw away value */
+
+ if (!ossl_quic_stream_is_bidi(qs) && local_init != is_write) {
+ /*
+ * Unidirectional stream and this direction of transmission doesn't
+ * exist.
+ */
+ *state = SSL_STREAM_STATE_WRONG_DIR;
+ } else if (ossl_quic_channel_is_term_any(qc->ch)) {
+ /* Connection already closed. */
+ *state = SSL_STREAM_STATE_CONN_CLOSED;
+ } else if (!is_write && qs->recv_state == QUIC_RSTREAM_STATE_DATA_READ) {
+ /* Application has read a FIN. */
+ *state = SSL_STREAM_STATE_FINISHED;
+ } else if ((!is_write && qs->stop_sending)
+ || (is_write && ossl_quic_stream_send_is_reset(qs))) {
+ /*
+ * Stream has been reset locally. FIN takes precedence over this for the
+ * read case as the application need not care if the stream is reset
+ * after a FIN has been successfully processed.
+ */
+ *state = SSL_STREAM_STATE_RESET_LOCAL;
+ *app_error_code = !is_write
+ ? qs->stop_sending_aec
+ : qs->reset_stream_aec;
+ } else if ((!is_write && ossl_quic_stream_recv_is_reset(qs))
+ || (is_write && qs->peer_stop_sending)) {
+ /*
+ * Stream has been reset remotely. */
+ *state = SSL_STREAM_STATE_RESET_REMOTE;
+ *app_error_code = !is_write
+ ? qs->peer_reset_stream_aec
+ : qs->peer_stop_sending_aec;
+ } else if (is_write && ossl_quic_sstream_get_final_size(qs->sstream,
+ &final_size)) {
+ /*
+ * Stream has been finished. Stream reset takes precedence over this for
+ * the write case as peer may not have received all data.
+ */
+ *state = SSL_STREAM_STATE_FINISHED;
+ } else {
+ /* Stream still healthy. */
+ *state = SSL_STREAM_STATE_OK;
+ }
+}
+
+static int quic_get_stream_state(SSL *ssl, int is_write)
+{
+ QCTX ctx;
+ int state;
+
+ if (!expect_quic_with_stream_lock(ssl, /*remote_init=*/-1, /*io=*/0, &ctx))
+ return SSL_STREAM_STATE_NONE;
+
+ quic_classify_stream(ctx.qc, ctx.xso->stream, is_write, &state, NULL);
+ qctx_unlock(&ctx);
+ return state;
+}
+
+int ossl_quic_get_stream_read_state(SSL *ssl)
+{
+ return quic_get_stream_state(ssl, /*is_write=*/0);
+}
+
+/*
+ * SSL_get_stream_write_state
+ * --------------------------
+ */
+int ossl_quic_get_stream_write_state(SSL *ssl)
+{
+ return quic_get_stream_state(ssl, /*is_write=*/1);
+}
+
+/*
+ * SSL_get_stream_read_error_code
+ * ------------------------------
+ */
+static int quic_get_stream_error_code(SSL *ssl, int is_write,
+ uint64_t *app_error_code)
+{
+ QCTX ctx;
+ int state;
+
+ if (!expect_quic_with_stream_lock(ssl, /*remote_init=*/-1, /*io=*/0, &ctx))
+ return -1;
+
+ quic_classify_stream(ctx.qc, ctx.xso->stream, /*is_write=*/0,
+ &state, app_error_code);
+
+ qctx_unlock(&ctx);
+ switch (state) {
+ case SSL_STREAM_STATE_FINISHED:
+ return 0;
+ case SSL_STREAM_STATE_RESET_LOCAL:
+ case SSL_STREAM_STATE_RESET_REMOTE:
+ return 1;
+ default:
+ return -1;
+ }
+}
+
+int ossl_quic_get_stream_read_error_code(SSL *ssl, uint64_t *app_error_code)
+{
+ return quic_get_stream_error_code(ssl, /*is_write=*/0, app_error_code);
+}
+
+/*
+ * SSL_get_stream_write_error_code
+ * -------------------------------
+ */
+int ossl_quic_get_stream_write_error_code(SSL *ssl, uint64_t *app_error_code)
+{
+ return quic_get_stream_error_code(ssl, /*is_write=*/1, app_error_code);
+}
+
+/*
+ * Write buffer size mutation
+ * --------------------------
+ */
+int ossl_quic_set_write_buffer_size(SSL *ssl, size_t size)
+{
+ int ret = 0;
+ QCTX ctx;
+
+ if (!expect_quic_with_stream_lock(ssl, /*remote_init=*/-1, /*io=*/0, &ctx))
+ return 0;
+
+ if (!ossl_quic_stream_has_send(ctx.xso->stream)) {
+ /* Called on a unidirectional receive-only stream - error. */
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED, NULL);
+ goto out;
+ }
+
+ if (!ossl_quic_stream_has_send_buffer(ctx.xso->stream)) {
+ /*
+ * If the stream has a send part but we have disposed of it because we
+ * no longer need it, this is a no-op.
+ */
+ ret = 1;
+ goto out;
+ }
+
+ if (!ossl_quic_sstream_set_buffer_size(ctx.xso->stream->sstream, size)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_INTERNAL_ERROR, NULL);
+ goto out;
+ }
+
+ ret = 1;
+
+out:
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/*
+ * SSL_get_conn_close_info
+ * -----------------------
+ */
+int ossl_quic_get_conn_close_info(SSL *ssl,
+ SSL_CONN_CLOSE_INFO *info,
+ size_t info_len)
+{
+ QCTX ctx;
+ const QUIC_TERMINATE_CAUSE *tc;
+
+ if (!expect_quic_conn_only(ssl, &ctx))
+ return -1;
+
+ tc = ossl_quic_channel_get_terminate_cause(ctx.qc->ch);
+ if (tc == NULL)
+ return 0;
+
+ info->error_code = tc->error_code;
+ info->frame_type = tc->frame_type;
+ info->reason = tc->reason;
+ info->reason_len = tc->reason_len;
+ info->flags = 0;
+ if (!tc->remote)
+ info->flags |= SSL_CONN_CLOSE_FLAG_LOCAL;
+ if (!tc->app)
+ info->flags |= SSL_CONN_CLOSE_FLAG_TRANSPORT;
+ return 1;
+}
+
+/*
+ * SSL_key_update
+ * --------------
+ */
+int ossl_quic_key_update(SSL *ssl, int update_type)
+{
+ QCTX ctx;
+
+ if (!expect_quic_conn_only(ssl, &ctx))
+ return 0;
+
+ switch (update_type) {
+ case SSL_KEY_UPDATE_NOT_REQUESTED:
+ /*
+ * QUIC signals peer key update implicily by triggering a local
+ * spontaneous TXKU. Silently upgrade this to SSL_KEY_UPDATE_REQUESTED.
+ */
+ case SSL_KEY_UPDATE_REQUESTED:
+ break;
+
+ default:
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, ERR_R_PASSED_INVALID_ARGUMENT, NULL);
+ return 0;
+ }
+
+ qctx_lock(&ctx);
+
+ /* Attempt to perform a TXKU. */
+ if (!ossl_quic_channel_trigger_txku(ctx.qc->ch)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(&ctx, SSL_R_TOO_MANY_KEY_UPDATES, NULL);
+ qctx_unlock(&ctx);
+ return 0;
+ }
+
+ qctx_unlock(&ctx);
+ return 1;
+}
+
+/*
+ * SSL_get_key_update_type
+ * -----------------------
+ */
+int ossl_quic_get_key_update_type(const SSL *s)
+{
+ /*
+ * We always handle key updates immediately so a key update is never
+ * pending.
+ */
+ return SSL_KEY_UPDATE_NONE;
+}
+
+/**
+ * @brief Allocates an SSL object for a user from a QUIC channel.
+ *
+ * This function creates a new QUIC_CONNECTION object based on an incoming
+ * connection associated with the provided QUIC_LISTENER. If the connection
+ * creation fails, the function returns NULL. Otherwise, it returns a pointer
+ * to the SSL object associated with the newly created connection.
+ *
+ * Note: This function is a registered port callback made from
+ * ossl_quic_new_listener and ossl_quic_new_listener_from, and allows for
+ * pre-allocation of the user_ssl object when a channel is created, rather than
+ * when it is accepted
+ *
+ * @param ch Pointer to the QUIC_CHANNEL representing the incoming connection.
+ * @param arg Pointer to a QUIC_LISTENER used to create the connection.
+ *
+ * @return Pointer to the SSL object on success, or NULL on failure.
+ */
+static SSL *alloc_port_user_ssl(QUIC_CHANNEL *ch, void *arg)
+{
+ QUIC_LISTENER *ql = arg;
+ QUIC_CONNECTION *qc = create_qc_from_incoming_conn(ql, ch);
+
+ return (qc == NULL) ? NULL : &qc->obj.ssl;
+}
+
+/*
+ * QUIC Front-End I/O API: Listeners
+ * =================================
+ */
+
+/*
+ * SSL_new_listener
+ * ----------------
+ */
+SSL *ossl_quic_new_listener(SSL_CTX *ctx, uint64_t flags)
+{
+ QUIC_LISTENER *ql = NULL;
+ QUIC_ENGINE_ARGS engine_args = {0};
+ QUIC_PORT_ARGS port_args = {0};
+
+ if ((ql = OPENSSL_zalloc(sizeof(*ql))) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+
+#if defined(OPENSSL_THREADS)
+ if ((ql->mutex = ossl_crypto_mutex_new()) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+#endif
+
+ engine_args.libctx = ctx->libctx;
+ engine_args.propq = ctx->propq;
+#if defined(OPENSSL_THREADS)
+ engine_args.mutex = ql->mutex;
+#endif
+
+ if (need_notifier_for_domain_flags(ctx->domain_flags))
+ engine_args.reactor_flags |= QUIC_REACTOR_FLAG_USE_NOTIFIER;
+
+ if ((ql->engine = ossl_quic_engine_new(&engine_args)) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ port_args.channel_ctx = ctx;
+ port_args.is_multi_conn = 1;
+ port_args.get_conn_user_ssl = alloc_port_user_ssl;
+ port_args.user_ssl_arg = ql;
+ if ((flags & SSL_LISTENER_FLAG_NO_VALIDATE) == 0)
+ port_args.do_addr_validation = 1;
+ ql->port = ossl_quic_engine_create_port(ql->engine, &port_args);
+ if (ql->port == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ /* TODO(QUIC FUTURE): Implement SSL_LISTENER_FLAG_NO_ACCEPT */
+
+ ossl_quic_port_set_allow_incoming(ql->port, 1);
+
+ /* Initialise the QUIC_LISTENER's object header. */
+ if (!ossl_quic_obj_init(&ql->obj, ctx, SSL_TYPE_QUIC_LISTENER, NULL,
+ ql->engine, ql->port))
+ goto err;
+
+ return &ql->obj.ssl;
+
+err:
+ if (ql != NULL)
+ ossl_quic_engine_free(ql->engine);
+
+#if defined(OPENSSL_THREADS)
+ ossl_crypto_mutex_free(&ql->mutex);
+#endif
+ OPENSSL_free(ql);
+ return NULL;
+}
+
+/*
+ * SSL_new_listener_from
+ * ---------------------
+ */
+SSL *ossl_quic_new_listener_from(SSL *ssl, uint64_t flags)
+{
+ QCTX ctx;
+ QUIC_LISTENER *ql = NULL;
+ QUIC_PORT_ARGS port_args = {0};
+
+ if (!expect_quic_domain(ssl, &ctx))
+ return NULL;
+
+ if (!SSL_up_ref(&ctx.qd->obj.ssl))
+ return NULL;
+
+ qctx_lock(&ctx);
+
+ if ((ql = OPENSSL_zalloc(sizeof(*ql))) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+
+ port_args.channel_ctx = ssl->ctx;
+ port_args.is_multi_conn = 1;
+ port_args.get_conn_user_ssl = alloc_port_user_ssl;
+ port_args.user_ssl_arg = ql;
+ if ((flags & SSL_LISTENER_FLAG_NO_VALIDATE) == 0)
+ port_args.do_addr_validation = 1;
+ ql->port = ossl_quic_engine_create_port(ctx.qd->engine, &port_args);
+ if (ql->port == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ ql->domain = ctx.qd;
+ ql->engine = ctx.qd->engine;
+#if defined(OPENSSL_THREADS)
+ ql->mutex = ctx.qd->mutex;
+#endif
+
+ /*
+ * TODO(QUIC FUTURE): Implement SSL_LISTENER_FLAG_NO_ACCEPT
+ * Given that we have apis to create client SSL objects from
+ * server SSL objects (see SSL_new_from_listener), we have aspirations
+ * to enable a flag that allows for the creation of the latter, but not
+ * be used to do accept any connections. This is a placeholder for the
+ * implementation of that flag
+ */
+
+ ossl_quic_port_set_allow_incoming(ql->port, 1);
+
+ /* Initialise the QUIC_LISTENER's object header. */
+ if (!ossl_quic_obj_init(&ql->obj, ssl->ctx, SSL_TYPE_QUIC_LISTENER,
+ &ctx.qd->obj.ssl, NULL, ql->port))
+ goto err;
+
+ qctx_unlock(&ctx);
+ return &ql->obj.ssl;
+
+err:
+ if (ql != NULL)
+ ossl_quic_port_free(ql->port);
+
+ OPENSSL_free(ql);
+ qctx_unlock(&ctx);
+ SSL_free(&ctx.qd->obj.ssl);
+
+ return NULL;
+}
+
+/*
+ * SSL_new_from_listener
+ * ---------------------
+ * code here is derived from ossl_quic_new(). The `ssl` argument is
+ * a listener object which already comes with QUIC port/engine. The newly
+ * created QUIC connection object (QCSO) is going to share the port/engine
+ * with listener (`ssl`). The `ssl` also becomes a parent of QCSO created
+ * by this function. The caller uses QCSO instance to connect to
+ * remote QUIC server.
+ *
+ * The QCSO created here requires us to also create a channel so we
+ * can connect to remote server.
+ */
+SSL *ossl_quic_new_from_listener(SSL *ssl, uint64_t flags)
+{
+ QCTX ctx;
+ QUIC_CONNECTION *qc = NULL;
+ QUIC_LISTENER *ql;
+ SSL_CONNECTION *sc = NULL;
+
+ if (flags != 0)
+ return NULL;
+
+ if (!expect_quic_listener(ssl, &ctx))
+ return NULL;
+
+ if (!SSL_up_ref(&ctx.ql->obj.ssl))
+ return NULL;
+
+ qctx_lock(&ctx);
+
+ ql = ctx.ql;
+
+ /*
+ * listeners (server) contexts don't typically
+ * allocate a token cache because they don't need
+ * to store them, but here we are using a server side
+ * ctx as a client, so we should allocate one now
+ */
+ if (ssl->ctx->tokencache == NULL)
+ if ((ssl->ctx->tokencache = ossl_quic_new_token_store()) == NULL)
+ goto err;
+
+ if ((qc = OPENSSL_zalloc(sizeof(*qc))) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+
+ /*
+ * NOTE: setting a listener here is needed so `qc_cleanup()` does the right
+ * thing. Setting listener to ql avoids premature destruction of port in
+ * qc_cleanup()
+ */
+ qc->listener = ql;
+ qc->engine = ql->engine;
+ qc->port = ql->port;
+/* create channel */
+#if defined(OPENSSL_THREADS)
+ /* this is the engine mutex */
+ qc->mutex = ql->mutex;
+#endif
+#if !defined(OPENSSL_NO_QUIC_THREAD_ASSIST)
+ qc->is_thread_assisted
+ = ((ql->obj.domain_flags & SSL_DOMAIN_FLAG_THREAD_ASSISTED) != 0);
+#endif
+
+ /* Create the handshake layer. */
+ qc->tls = ossl_ssl_connection_new_int(ql->obj.ssl.ctx, NULL, TLS_method());
+ if (qc->tls == NULL || (sc = SSL_CONNECTION_FROM_SSL(qc->tls)) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+ sc->s3.flags |= TLS1_FLAGS_QUIC | TLS1_FLAGS_QUIC_INTERNAL;
+
+ qc->default_ssl_options = OSSL_QUIC_PERMITTED_OPTIONS;
+ qc->last_error = SSL_ERROR_NONE;
+
+ /*
+ * This is QCSO, we don't expect to accept connections
+ * on success the channel assumes ownership of tls, we need
+ * to grab reference for qc.
+ */
+ qc->ch = ossl_quic_port_create_outgoing(qc->port, qc->tls);
+
+ ossl_quic_channel_set_msg_callback(qc->ch, ql->obj.ssl.ctx->msg_callback, &qc->obj.ssl);
+ ossl_quic_channel_set_msg_callback_arg(qc->ch, ql->obj.ssl.ctx->msg_callback_arg);
+
+ /*
+ * We deliberately pass NULL for engine and port, because we don't want to
+ * to turn QCSO we create here into an event leader, nor port leader.
+ * Both those roles are occupied already by listener (`ssl`) we use
+ * to create a new QCSO here.
+ */
+ if (!ossl_quic_obj_init(&qc->obj, ql->obj.ssl.ctx,
+ SSL_TYPE_QUIC_CONNECTION,
+ &ql->obj.ssl, NULL, NULL)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ /* Initialise libssl APL-related state. */
+ qc->default_stream_mode = SSL_DEFAULT_STREAM_MODE_AUTO_BIDI;
+ qc->default_ssl_mode = qc->obj.ssl.ctx->mode;
+ qc->default_ssl_options = qc->obj.ssl.ctx->options & OSSL_QUIC_PERMITTED_OPTIONS;
+ qc->incoming_stream_policy = SSL_INCOMING_STREAM_POLICY_AUTO;
+ qc->last_error = SSL_ERROR_NONE;
+
+ qc_update_reject_policy(qc);
+
+ qctx_unlock(&ctx);
+
+ return &qc->obj.ssl;
+
+err:
+ if (qc != NULL) {
+ qc_cleanup(qc, /* have_lock= */ 0);
+ OPENSSL_free(qc);
+ }
+ qctx_unlock(&ctx);
+ SSL_free(&ctx.ql->obj.ssl);
+
+ return NULL;
+}
+
+/*
+ * SSL_listen
+ * ----------
+ */
+QUIC_NEEDS_LOCK
+static int ql_listen(QUIC_LISTENER *ql)
+{
+ if (ql->listening)
+ return 1;
+
+ ossl_quic_port_set_allow_incoming(ql->port, 1);
+ ql->listening = 1;
+ return 1;
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_listen(SSL *ssl)
+{
+ QCTX ctx;
+ int ret;
+
+ if (!expect_quic_listener(ssl, &ctx))
+ return 0;
+
+ qctx_lock_for_io(&ctx);
+
+ ret = ql_listen(ctx.ql);
+
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/*
+ * SSL_accept_connection
+ * ---------------------
+ */
+static int quic_accept_connection_wait(void *arg)
+{
+ QUIC_PORT *port = arg;
+
+ if (!ossl_quic_port_is_running(port))
+ return -1;
+
+ if (ossl_quic_port_have_incoming(port))
+ return 1;
+
+ return 0;
+}
+
+QUIC_TAKES_LOCK
+SSL *ossl_quic_accept_connection(SSL *ssl, uint64_t flags)
+{
+ int ret;
+ QCTX ctx;
+ SSL *conn_ssl = NULL;
+ SSL_CONNECTION *conn = NULL;
+ QUIC_CHANNEL *new_ch = NULL;
+ QUIC_CONNECTION *qc;
+ int no_block = ((flags & SSL_ACCEPT_CONNECTION_NO_BLOCK) != 0);
+
+ if (!expect_quic_listener(ssl, &ctx))
+ return NULL;
+
+ qctx_lock_for_io(&ctx);
+
+ if (!ql_listen(ctx.ql))
+ goto out;
+
+ /* Wait for an incoming connection if needed. */
+ new_ch = ossl_quic_port_pop_incoming(ctx.ql->port);
+ if (new_ch == NULL && ossl_quic_port_is_running(ctx.ql->port)) {
+ if (!no_block && qctx_blocking(&ctx)) {
+ ret = block_until_pred(&ctx, quic_accept_connection_wait,
+ ctx.ql->port, 0);
+ if (ret < 1)
+ goto out;
+ } else {
+ qctx_maybe_autotick(&ctx);
+ }
+
+ if (!ossl_quic_port_is_running(ctx.ql->port))
+ goto out;
+
+ new_ch = ossl_quic_port_pop_incoming(ctx.ql->port);
+ }
+
+ if (new_ch == NULL && ossl_quic_port_is_running(ctx.ql->port)) {
+ /* No connections already queued. */
+ ossl_quic_reactor_tick(ossl_quic_engine_get0_reactor(ctx.ql->engine), 0);
+
+ new_ch = ossl_quic_port_pop_incoming(ctx.ql->port);
+ }
+
+ /*
+ * port_make_channel pre-allocates our user_ssl for us for each newly
+ * created channel, so once we pop the new channel from the port above
+ * we just need to extract it
+ */
+ if (new_ch == NULL
+ || (conn_ssl = ossl_quic_channel_get0_tls(new_ch)) == NULL
+ || (conn = SSL_CONNECTION_FROM_SSL(conn_ssl)) == NULL
+ || (conn_ssl = SSL_CONNECTION_GET_USER_SSL(conn)) == NULL)
+ goto out;
+ qc = (QUIC_CONNECTION *)conn_ssl;
+ qc->listener = ctx.ql;
+ qc->pending = 0;
+ if (!SSL_up_ref(&ctx.ql->obj.ssl)) {
+ SSL_free(conn_ssl);
+ SSL_free(ossl_quic_channel_get0_tls(new_ch));
+ conn_ssl = NULL;
+ }
+
+out:
+ qctx_unlock(&ctx);
+ return conn_ssl;
+}
+
+static QUIC_CONNECTION *create_qc_from_incoming_conn(QUIC_LISTENER *ql, QUIC_CHANNEL *ch)
+{
+ QUIC_CONNECTION *qc = NULL;
+
+ if ((qc = OPENSSL_zalloc(sizeof(*qc))) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+
+ if (!ossl_quic_obj_init(&qc->obj, ql->obj.ssl.ctx,
+ SSL_TYPE_QUIC_CONNECTION,
+ &ql->obj.ssl, NULL, NULL)) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ ossl_quic_channel_get_peer_addr(ch, &qc->init_peer_addr); /* best effort */
+ qc->pending = 1;
+ qc->engine = ql->engine;
+ qc->port = ql->port;
+ qc->ch = ch;
+#if defined(OPENSSL_THREADS)
+ qc->mutex = ql->mutex;
+#endif
+ qc->tls = ossl_quic_channel_get0_tls(ch);
+ qc->started = 1;
+ qc->as_server = 1;
+ qc->as_server_state = 1;
+ qc->default_stream_mode = SSL_DEFAULT_STREAM_MODE_AUTO_BIDI;
+ qc->default_ssl_options = ql->obj.ssl.ctx->options & OSSL_QUIC_PERMITTED_OPTIONS;
+ qc->incoming_stream_policy = SSL_INCOMING_STREAM_POLICY_AUTO;
+ qc->last_error = SSL_ERROR_NONE;
+ qc_update_reject_policy(qc);
+ return qc;
+
+err:
+ OPENSSL_free(qc);
+ return NULL;
+}
+
+DEFINE_LHASH_OF_EX(QUIC_TOKEN);
+
+struct ssl_token_store_st {
+ LHASH_OF(QUIC_TOKEN) *cache;
+ CRYPTO_REF_COUNT references;
+ CRYPTO_MUTEX *mutex;
+};
+
+static unsigned long quic_token_hash(const QUIC_TOKEN *item)
+{
+ return (unsigned long)ossl_fnv1a_hash(item->hashkey, item->hashkey_len);
+}
+
+static int quic_token_cmp(const QUIC_TOKEN *a, const QUIC_TOKEN *b)
+{
+ if (a->hashkey_len != b->hashkey_len)
+ return 1;
+ return memcmp(a->hashkey, b->hashkey, a->hashkey_len);
+}
+
+SSL_TOKEN_STORE *ossl_quic_new_token_store(void)
+{
+ int ok = 0;
+ SSL_TOKEN_STORE *newcache = OPENSSL_zalloc(sizeof(SSL_TOKEN_STORE));
+
+ if (newcache == NULL)
+ goto out;
+
+ newcache->cache = lh_QUIC_TOKEN_new(quic_token_hash, quic_token_cmp);
+ if (newcache->cache == NULL)
+ goto out;
+
+#if defined(OPENSSL_THREADS)
+ if ((newcache->mutex = ossl_crypto_mutex_new()) == NULL)
+ goto out;
+#endif
+
+ if (!CRYPTO_NEW_REF(&newcache->references, 1))
+ goto out;
+
+ ok = 1;
+out:
+ if (!ok) {
+ ossl_quic_free_token_store(newcache);
+ newcache = NULL;
+ }
+ return newcache;
+}
+
+static void free_this_token(QUIC_TOKEN *tok)
+{
+ ossl_quic_free_peer_token(tok);
+}
+
+void ossl_quic_free_token_store(SSL_TOKEN_STORE *hdl)
+{
+ int refs;
+
+ if (hdl == NULL)
+ return;
+
+ if (!CRYPTO_DOWN_REF(&hdl->references, &refs))
+ return;
+
+ if (refs > 0)
+ return;
+
+ /* last reference, we can clean up */
+ ossl_crypto_mutex_free(&hdl->mutex);
+ lh_QUIC_TOKEN_doall(hdl->cache, free_this_token);
+ lh_QUIC_TOKEN_free(hdl->cache);
+ CRYPTO_FREE_REF(&hdl->references);
+ OPENSSL_free(hdl);
+ return;
+}
+
+/**
+ * @brief build a new QUIC_TOKEN
+ *
+ * This function creates a new token storage structure for saving in our
+ * tokencache
+ *
+ * In an effort to make allocation and freeing of these tokens a bit faster
+ * We do them in a single allocation in this format
+ * +---------------+ --\
+ * | hashkey * |---| |
+ * | hashkey_len | | | QUIC_TOKEN
+ * | token * |---|--| |
+ * | token_len | | | |
+ * +---------------+<--| | --/
+ * | hashkey buf | |
+ * | | |
+ * |---------------|<-----|
+ * | token buf |
+ * | |
+ * +---------------+
+ *
+ * @param peer - the peer address that sent the token
+ * @param token - the buffer holding the token
+ * @param token_len - the size of token
+ *
+ * @returns a QUIC_TOKEN pointer or NULL on error
+ */
+static QUIC_TOKEN *ossl_quic_build_new_token(BIO_ADDR *peer, uint8_t *token,
+ size_t token_len)
+{
+ QUIC_TOKEN *new_token;
+ size_t hashkey_len = 0;
+ size_t addr_len = 0;
+ int family;
+ unsigned short port;
+ int *famptr;
+ unsigned short *portptr;
+ uint8_t *addrptr;
+
+ if ((token != NULL && token_len == 0) || (token == NULL && token_len != 0))
+ return NULL;
+
+ if (!BIO_ADDR_rawaddress(peer, NULL, &addr_len))
+ return NULL;
+ family = BIO_ADDR_family(peer);
+ port = BIO_ADDR_rawport(peer);
+
+ hashkey_len += sizeof(int); /* hashkey(family) */
+ hashkey_len += sizeof(unsigned short); /* hashkey(port) */
+ hashkey_len += addr_len; /* hashkey(address) */
+
+ new_token = OPENSSL_zalloc(sizeof(QUIC_TOKEN) + hashkey_len + token_len);
+ if (new_token == NULL)
+ return NULL;
+
+ if (!CRYPTO_NEW_REF(&new_token->references, 1)) {
+ OPENSSL_free(new_token);
+ return NULL;
+ }
+
+ new_token->hashkey_len = hashkey_len;
+ /* hashkey is allocated inline, immediately after the QUIC_TOKEN struct */
+ new_token->hashkey = (uint8_t *)(new_token + 1);
+ /* token buffer follows the hashkey in the inline allocation */
+ new_token->token = new_token->hashkey + hashkey_len;
+ new_token->token_len = token_len;
+ famptr = (int *)new_token->hashkey;
+ portptr = (unsigned short *)(famptr + 1);
+ addrptr = (uint8_t *)(portptr + 1);
+ *famptr = family;
+ *portptr = port;
+ if (!BIO_ADDR_rawaddress(peer, addrptr, NULL)) {
+ ossl_quic_free_peer_token(new_token);
+ return NULL;
+ }
+ if (token != NULL)
+ memcpy(new_token->token, token, token_len);
+ return new_token;
+}
+
+int ossl_quic_set_peer_token(SSL_CTX *ctx, BIO_ADDR *peer,
+ const uint8_t *token, size_t token_len)
+{
+ SSL_TOKEN_STORE *c = ctx->tokencache;
+ QUIC_TOKEN *tok, *old = NULL;
+
+ if (ctx->tokencache == NULL)
+ return 0;
+
+ tok = ossl_quic_build_new_token(peer, (uint8_t *)token, token_len);
+ if (tok == NULL)
+ return 0;
+
+ /* we might be sharing this cache, lock it */
+ ossl_crypto_mutex_lock(c->mutex);
+
+ old = lh_QUIC_TOKEN_retrieve(c->cache, tok);
+ if (old != NULL) {
+ lh_QUIC_TOKEN_delete(c->cache, old);
+ ossl_quic_free_peer_token(old);
+ }
+ lh_QUIC_TOKEN_insert(c->cache, tok);
+
+ ossl_crypto_mutex_unlock(c->mutex);
+ return 1;
+}
+
+int ossl_quic_get_peer_token(SSL_CTX *ctx, BIO_ADDR *peer,
+ QUIC_TOKEN **token)
+{
+ SSL_TOKEN_STORE *c = ctx->tokencache;
+ QUIC_TOKEN *key = NULL;
+ QUIC_TOKEN *tok = NULL;
+ int ret;
+ int rc = 0;
+
+ if (c == NULL)
+ return 0;
+
+ key = ossl_quic_build_new_token(peer, NULL, 0);
+ if (key == NULL)
+ return 0;
+
+ ossl_crypto_mutex_lock(c->mutex);
+ tok = lh_QUIC_TOKEN_retrieve(c->cache, key);
+ if (tok != NULL) {
+ *token = tok;
+ CRYPTO_UP_REF(&tok->references, &ret);
+ rc = 1;
+ }
+
+ ossl_crypto_mutex_unlock(c->mutex);
+ ossl_quic_free_peer_token(key);
+ return rc;
+}
+
+void ossl_quic_free_peer_token(QUIC_TOKEN *token)
+{
+ int refs = 0;
+
+ if (!CRYPTO_DOWN_REF(&token->references, &refs))
+ return;
+
+ if (refs > 0)
+ return;
+
+ CRYPTO_FREE_REF(&token->references);
+ OPENSSL_free(token);
+}
+
+/*
+ * SSL_get_accept_connection_queue_len
+ * -----------------------------------
+ */
+QUIC_TAKES_LOCK
+size_t ossl_quic_get_accept_connection_queue_len(SSL *ssl)
+{
+ QCTX ctx;
+ int ret;
+
+ if (!expect_quic_listener(ssl, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ ret = ossl_quic_port_get_num_incoming_channels(ctx.ql->port);
+
+ qctx_unlock(&ctx);
+ return ret;
+}
+
+/*
+ * QUIC Front-End I/O API: Domains
+ * ===============================
+ */
+
+/*
+ * SSL_new_domain
+ * --------------
+ */
+SSL *ossl_quic_new_domain(SSL_CTX *ctx, uint64_t flags)
+{
+ QUIC_DOMAIN *qd = NULL;
+ QUIC_ENGINE_ARGS engine_args = {0};
+ uint64_t domain_flags;
+
+ domain_flags = ctx->domain_flags;
+ if ((flags & (SSL_DOMAIN_FLAG_SINGLE_THREAD
+ | SSL_DOMAIN_FLAG_MULTI_THREAD
+ | SSL_DOMAIN_FLAG_THREAD_ASSISTED)) != 0)
+ domain_flags = flags;
+ else
+ domain_flags = ctx->domain_flags | flags;
+
+ if (!ossl_adjust_domain_flags(domain_flags, &domain_flags))
+ return NULL;
+
+ if ((qd = OPENSSL_zalloc(sizeof(*qd))) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ return NULL;
+ }
+
+#if defined(OPENSSL_THREADS)
+ if ((qd->mutex = ossl_crypto_mutex_new()) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_CRYPTO_LIB, NULL);
+ goto err;
+ }
+#endif
+
+ engine_args.libctx = ctx->libctx;
+ engine_args.propq = ctx->propq;
+#if defined(OPENSSL_THREADS)
+ engine_args.mutex = qd->mutex;
+#endif
+
+ if (need_notifier_for_domain_flags(domain_flags))
+ engine_args.reactor_flags |= QUIC_REACTOR_FLAG_USE_NOTIFIER;
+
+ if ((qd->engine = ossl_quic_engine_new(&engine_args)) == NULL) {
+ QUIC_RAISE_NON_NORMAL_ERROR(NULL, ERR_R_INTERNAL_ERROR, NULL);
+ goto err;
+ }
+
+ /* Initialise the QUIC_DOMAIN's object header. */
+ if (!ossl_quic_obj_init(&qd->obj, ctx, SSL_TYPE_QUIC_DOMAIN, NULL,
+ qd->engine, NULL))
+ goto err;
+
+ ossl_quic_obj_set_domain_flags(&qd->obj, domain_flags);
+ return &qd->obj.ssl;
+
+err:
+ ossl_quic_engine_free(qd->engine);
+#if defined(OPENSSL_THREADS)
+ ossl_crypto_mutex_free(&qd->mutex);
+#endif
+ OPENSSL_free(qd);
+ return NULL;
+}
+
+/*
+ * QUIC Front-End I/O API: SSL_CTX Management
+ * ==========================================
+ */
+
+long ossl_quic_ctx_ctrl(SSL_CTX *ctx, int cmd, long larg, void *parg)
+{
+ switch (cmd) {
+ default:
+ return ssl3_ctx_ctrl(ctx, cmd, larg, parg);
+ }
+}
+
+long ossl_quic_callback_ctrl(SSL *s, int cmd, void (*fp) (void))
+{
+ QCTX ctx;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return 0;
+
+ switch (cmd) {
+ case SSL_CTRL_SET_MSG_CALLBACK:
+ ossl_quic_channel_set_msg_callback(ctx.qc->ch, (ossl_msg_cb)fp,
+ &ctx.qc->obj.ssl);
+ /* This callback also needs to be set on the internal SSL object */
+ return ssl3_callback_ctrl(ctx.qc->tls, cmd, fp);;
+
+ default:
+ /* Probably a TLS related ctrl. Defer to our internal SSL object */
+ return ssl3_callback_ctrl(ctx.qc->tls, cmd, fp);
+ }
+}
+
+long ossl_quic_ctx_callback_ctrl(SSL_CTX *ctx, int cmd, void (*fp) (void))
+{
+ return ssl3_ctx_callback_ctrl(ctx, cmd, fp);
+}
+
+int ossl_quic_renegotiate_check(SSL *ssl, int initok)
+{
+ /* We never do renegotiation. */
+ return 0;
+}
+
+const SSL_CIPHER *ossl_quic_get_cipher_by_char(const unsigned char *p)
+{
+ const SSL_CIPHER *ciph = ssl3_get_cipher_by_char(p);
+
+ if ((ciph->algorithm2 & SSL_QUIC) == 0)
+ return NULL;
+
+ return ciph;
+}
+
+/*
+ * These functions define the TLSv1.2 (and below) ciphers that are supported by
+ * the SSL_METHOD. Since QUIC only supports TLSv1.3 we don't support any.
+ */
+
+int ossl_quic_num_ciphers(void)
+{
+ return 0;
+}
+
+const SSL_CIPHER *ossl_quic_get_cipher(unsigned int u)
+{
+ return NULL;
+}
+
+/*
+ * SSL_get_shutdown()
+ * ------------------
+ */
+int ossl_quic_get_shutdown(const SSL *s)
+{
+ QCTX ctx;
+ int shut = 0;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return 0;
+
+ if (ossl_quic_channel_is_term_any(ctx.qc->ch)) {
+ shut |= SSL_SENT_SHUTDOWN;
+ if (!ossl_quic_channel_is_closing(ctx.qc->ch))
+ shut |= SSL_RECEIVED_SHUTDOWN;
+ }
+
+ return shut;
+}
+
+/*
+ * QUIC Polling Support APIs
+ * =========================
+ */
+
+/* Do we have the R (read) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_r(QUIC_XSO *xso)
+{
+ int fin = 0;
+ size_t avail = 0;
+
+ /*
+ * If a stream has had the fin bit set on the last packet
+ * received, then we need to return a 1 here to raise
+ * SSL_POLL_EVENT_R, so that the stream can have its completion
+ * detected and closed gracefully by an application.
+ * However, if the client reads the data via SSL_read[_ex], that api
+ * provides no stream status, and as a result the stream state moves to
+ * QUIC_RSTREAM_STATE_DATA_READ, and the receive buffer is freed, which
+ * stored the fin state, so its not directly know-able here. Instead
+ * check for the stream state being QUIC_RSTREAM_STATE_DATA_READ, which
+ * is only set if the last stream frame received had the fin bit set, and
+ * the client read the data. This catches our poll/read/poll case
+ */
+ if (xso->stream->recv_state == QUIC_RSTREAM_STATE_DATA_READ)
+ return 1;
+
+ return ossl_quic_stream_has_recv_buffer(xso->stream)
+ && ossl_quic_rstream_available(xso->stream->rstream, &avail, &fin)
+ && (avail > 0 || (fin && !xso->retired_fin));
+}
+
+/* Do we have the ER (exception: read) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_er(QUIC_XSO *xso)
+{
+ return ossl_quic_stream_has_recv(xso->stream)
+ && ossl_quic_stream_recv_is_reset(xso->stream)
+ && !xso->retired_fin;
+}
+
+/* Do we have the W (write) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_w(QUIC_XSO *xso)
+{
+ return !xso->conn->shutting_down
+ && ossl_quic_stream_has_send_buffer(xso->stream)
+ && ossl_quic_sstream_get_buffer_avail(xso->stream->sstream)
+ && !ossl_quic_sstream_get_final_size(xso->stream->sstream, NULL)
+ && ossl_quic_txfc_get_cwm(&xso->stream->txfc)
+ > ossl_quic_sstream_get_cur_size(xso->stream->sstream)
+ && quic_mutation_allowed(xso->conn, /*req_active=*/1);
+}
+
+/* Do we have the EW (exception: write) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_ew(QUIC_XSO *xso)
+{
+ return ossl_quic_stream_has_send(xso->stream)
+ && xso->stream->peer_stop_sending
+ && !xso->requested_reset
+ && !xso->conn->shutting_down;
+}
+
+/* Do we have the EC (exception: connection) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_ec(QUIC_CONNECTION *qc)
+{
+ return ossl_quic_channel_is_term_any(qc->ch);
+}
+
+/* Do we have the ECD (exception: connection drained) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_ecd(QUIC_CONNECTION *qc)
+{
+ return ossl_quic_channel_is_terminated(qc->ch);
+}
+
+/* Do we have the IS (incoming: stream) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_is(QUIC_CONNECTION *qc, int is_uni)
+{
+ return ossl_quic_stream_map_get_accept_queue_len(ossl_quic_channel_get_qsm(qc->ch),
+ is_uni);
+}
+
+/* Do we have the OS (outgoing: stream) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_os(QUIC_CONNECTION *qc, int is_uni)
+{
+ /* Is it currently possible for us to make an outgoing stream? */
+ return quic_mutation_allowed(qc, /*req_active=*/1)
+ && ossl_quic_channel_get_local_stream_count_avail(qc->ch, is_uni) > 0;
+}
+
+/* Do we have the EL (exception: listener) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_el(QUIC_LISTENER *ql)
+{
+ return !ossl_quic_port_is_running(ql->port);
+}
+
+/* Do we have the IC (incoming: connection) condition? */
+QUIC_NEEDS_LOCK
+static int test_poll_event_ic(QUIC_LISTENER *ql)
+{
+ return ossl_quic_port_get_num_incoming_channels(ql->port) > 0;
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_conn_poll_events(SSL *ssl, uint64_t events, int do_tick,
+ uint64_t *p_revents)
+{
+ QCTX ctx;
+ uint64_t revents = 0;
+
+ if (!expect_quic_csl(ssl, &ctx))
+ return 0;
+
+ qctx_lock(&ctx);
+
+ if (ctx.qc != NULL && !ctx.qc->started) {
+ /* We can only try to write on non-started connection. */
+ if ((events & SSL_POLL_EVENT_W) != 0)
+ revents |= SSL_POLL_EVENT_W;
+ goto end;
+ }
+
+ if (do_tick)
+ ossl_quic_reactor_tick(ossl_quic_obj_get0_reactor(ctx.obj), 0);
+
+ if (ctx.xso != NULL) {
+ /* SSL object has a stream component. */
+
+ if ((events & SSL_POLL_EVENT_R) != 0
+ && test_poll_event_r(ctx.xso))
+ revents |= SSL_POLL_EVENT_R;
+
+ if ((events & SSL_POLL_EVENT_ER) != 0
+ && test_poll_event_er(ctx.xso))
+ revents |= SSL_POLL_EVENT_ER;
+
+ if ((events & SSL_POLL_EVENT_W) != 0
+ && test_poll_event_w(ctx.xso))
+ revents |= SSL_POLL_EVENT_W;
+
+ if ((events & SSL_POLL_EVENT_EW) != 0
+ && test_poll_event_ew(ctx.xso))
+ revents |= SSL_POLL_EVENT_EW;
+ }
+
+ if (ctx.qc != NULL && !ctx.is_stream) {
+ if ((events & SSL_POLL_EVENT_EC) != 0
+ && test_poll_event_ec(ctx.qc))
+ revents |= SSL_POLL_EVENT_EC;
+
+ if ((events & SSL_POLL_EVENT_ECD) != 0
+ && test_poll_event_ecd(ctx.qc))
+ revents |= SSL_POLL_EVENT_ECD;
+
+ if ((events & SSL_POLL_EVENT_ISB) != 0
+ && test_poll_event_is(ctx.qc, /*uni=*/0))
+ revents |= SSL_POLL_EVENT_ISB;
+
+ if ((events & SSL_POLL_EVENT_ISU) != 0
+ && test_poll_event_is(ctx.qc, /*uni=*/1))
+ revents |= SSL_POLL_EVENT_ISU;
+
+ if ((events & SSL_POLL_EVENT_OSB) != 0
+ && test_poll_event_os(ctx.qc, /*uni=*/0))
+ revents |= SSL_POLL_EVENT_OSB;
+
+ if ((events & SSL_POLL_EVENT_OSU) != 0
+ && test_poll_event_os(ctx.qc, /*uni=*/1))
+ revents |= SSL_POLL_EVENT_OSU;
+ }
+
+ if (ctx.is_listener) {
+ if ((events & SSL_POLL_EVENT_EL) != 0
+ && test_poll_event_el(ctx.ql))
+ revents |= SSL_POLL_EVENT_EL;
+
+ if ((events & SSL_POLL_EVENT_IC) != 0
+ && test_poll_event_ic(ctx.ql))
+ revents |= SSL_POLL_EVENT_IC;
+ }
+
+ end:
+ qctx_unlock(&ctx);
+ *p_revents = revents;
+ return 1;
+}
+
+QUIC_TAKES_LOCK
+int ossl_quic_get_notifier_fd(SSL *ssl)
+{
+ QCTX ctx;
+ QUIC_REACTOR *rtor;
+ RIO_NOTIFIER *nfy;
+ int nfd = -1;
+
+ if (!expect_quic_any(ssl, &ctx))
+ return -1;
+
+ qctx_lock(&ctx);
+ rtor = ossl_quic_obj_get0_reactor(ctx.obj);
+ nfy = ossl_quic_reactor_get0_notifier(rtor);
+ if (nfy == NULL)
+ goto end;
+ nfd = ossl_rio_notifier_as_fd(nfy);
+
+ end:
+ qctx_unlock(&ctx);
+ return nfd;
+}
+
+QUIC_TAKES_LOCK
+void ossl_quic_enter_blocking_section(SSL *ssl, QUIC_REACTOR_WAIT_CTX *wctx)
+{
+ QCTX ctx;
+ QUIC_REACTOR *rtor;
+
+ if (!expect_quic_any(ssl, &ctx))
+ return;
+
+ qctx_lock(&ctx);
+ rtor = ossl_quic_obj_get0_reactor(ctx.obj);
+ ossl_quic_reactor_wait_ctx_enter(wctx, rtor);
+ qctx_unlock(&ctx);
+}
+
+QUIC_TAKES_LOCK
+void ossl_quic_leave_blocking_section(SSL *ssl, QUIC_REACTOR_WAIT_CTX *wctx)
+{
+ QCTX ctx;
+ QUIC_REACTOR *rtor;
+
+ if (!expect_quic_any(ssl, &ctx))
+ return;
+
+ qctx_lock(&ctx);
+ rtor = ossl_quic_obj_get0_reactor(ctx.obj);
+ ossl_quic_reactor_wait_ctx_leave(wctx, rtor);
+ qctx_unlock(&ctx);
+}
+
+/*
+ * Internal Testing APIs
+ * =====================
+ */
+
+QUIC_CHANNEL *ossl_quic_conn_get_channel(SSL *s)
+{
+ QCTX ctx;
+
+ if (!expect_quic_conn_only(s, &ctx))
+ return NULL;
+
+ return ctx.qc->ch;
+}
+
+int ossl_quic_set_diag_title(SSL_CTX *ctx, const char *title)
+{
+#ifndef OPENSSL_NO_QLOG
+ OPENSSL_free(ctx->qlog_title);
+ ctx->qlog_title = NULL;
+
+ if (title == NULL)
+ return 1;
+
+ if ((ctx->qlog_title = OPENSSL_strdup(title)) == NULL)
+ return 0;
+#endif
+
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_lcidm.c b/crypto/openssl/ssl/quic/quic_lcidm.c
new file mode 100644
index 000000000000..f31fd101a1f3
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_lcidm.c
@@ -0,0 +1,617 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_lcidm.h"
+#include "internal/quic_types.h"
+#include "internal/quic_vlint.h"
+#include "internal/common.h"
+#include "crypto/siphash.h"
+#include <openssl/lhash.h>
+#include <openssl/rand.h>
+#include <openssl/err.h>
+
+/*
+ * QUIC Local Connection ID Manager
+ * ================================
+ */
+
+typedef struct quic_lcidm_conn_st QUIC_LCIDM_CONN;
+
+enum {
+ LCID_TYPE_ODCID, /* This LCID is the ODCID from the peer */
+ LCID_TYPE_INITIAL, /* This is our Initial SCID */
+ LCID_TYPE_NCID /* This LCID was issued via a NCID frame */
+};
+
+typedef struct quic_lcid_st {
+ QUIC_CONN_ID cid;
+ uint64_t seq_num;
+
+ /* copy of the hash key from lcidm */
+ uint64_t *hash_key;
+
+ /* Back-pointer to the owning QUIC_LCIDM_CONN structure. */
+ QUIC_LCIDM_CONN *conn;
+
+ /* LCID_TYPE_* */
+ unsigned int type : 2;
+} QUIC_LCID;
+
+DEFINE_LHASH_OF_EX(QUIC_LCID);
+DEFINE_LHASH_OF_EX(QUIC_LCIDM_CONN);
+
+struct quic_lcidm_conn_st {
+ size_t num_active_lcid;
+ LHASH_OF(QUIC_LCID) *lcids;
+ void *opaque;
+ QUIC_LCID *odcid_lcid_obj;
+ uint64_t next_seq_num;
+
+ /* Have we enrolled an ODCID? */
+ unsigned int done_odcid : 1;
+};
+
+struct quic_lcidm_st {
+ OSSL_LIB_CTX *libctx;
+ uint64_t hash_key[2]; /* random key for siphash */
+ LHASH_OF(QUIC_LCID) *lcids; /* (QUIC_CONN_ID) -> (QUIC_LCID *) */
+ LHASH_OF(QUIC_LCIDM_CONN) *conns; /* (void *opaque) -> (QUIC_LCIDM_CONN *) */
+ size_t lcid_len; /* Length in bytes for all LCIDs */
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ QUIC_CONN_ID next_lcid;
+#endif
+};
+
+static unsigned long lcid_hash(const QUIC_LCID *lcid_obj)
+{
+ SIPHASH siphash = {0, };
+ unsigned long hashval = 0;
+
+ if (!SipHash_set_hash_size(&siphash, sizeof(unsigned long)))
+ goto out;
+ if (!SipHash_Init(&siphash, (uint8_t *)lcid_obj->hash_key, 0, 0))
+ goto out;
+ SipHash_Update(&siphash, lcid_obj->cid.id, lcid_obj->cid.id_len);
+ if (!SipHash_Final(&siphash, (unsigned char *)&hashval,
+ sizeof(unsigned long)))
+ goto out;
+out:
+ return hashval;
+}
+
+static int lcid_comp(const QUIC_LCID *a, const QUIC_LCID *b)
+{
+ return !ossl_quic_conn_id_eq(&a->cid, &b->cid);
+}
+
+static unsigned long lcidm_conn_hash(const QUIC_LCIDM_CONN *conn)
+{
+ return (unsigned long)(uintptr_t)conn->opaque;
+}
+
+static int lcidm_conn_comp(const QUIC_LCIDM_CONN *a, const QUIC_LCIDM_CONN *b)
+{
+ return a->opaque != b->opaque;
+}
+
+QUIC_LCIDM *ossl_quic_lcidm_new(OSSL_LIB_CTX *libctx, size_t lcid_len)
+{
+ QUIC_LCIDM *lcidm = NULL;
+
+ if (lcid_len > QUIC_MAX_CONN_ID_LEN)
+ goto err;
+
+ if ((lcidm = OPENSSL_zalloc(sizeof(*lcidm))) == NULL)
+ goto err;
+
+ /* generate a random key for the hash tables hash function */
+ if (!RAND_bytes_ex(libctx, (unsigned char *)&lcidm->hash_key,
+ sizeof(uint64_t) * 2, 0))
+ goto err;
+
+ if ((lcidm->lcids = lh_QUIC_LCID_new(lcid_hash, lcid_comp)) == NULL)
+ goto err;
+
+ if ((lcidm->conns = lh_QUIC_LCIDM_CONN_new(lcidm_conn_hash,
+ lcidm_conn_comp)) == NULL)
+ goto err;
+
+ lcidm->libctx = libctx;
+ lcidm->lcid_len = lcid_len;
+ return lcidm;
+
+err:
+ if (lcidm != NULL) {
+ lh_QUIC_LCID_free(lcidm->lcids);
+ lh_QUIC_LCIDM_CONN_free(lcidm->conns);
+ OPENSSL_free(lcidm);
+ }
+ return NULL;
+}
+
+static void lcidm_delete_conn(QUIC_LCIDM *lcidm, QUIC_LCIDM_CONN *conn);
+
+static void lcidm_delete_conn_(QUIC_LCIDM_CONN *conn, void *arg)
+{
+ lcidm_delete_conn((QUIC_LCIDM *)arg, conn);
+}
+
+void ossl_quic_lcidm_free(QUIC_LCIDM *lcidm)
+{
+ if (lcidm == NULL)
+ return;
+
+ /*
+ * Calling OPENSSL_lh_delete during a doall call is unsafe with our
+ * current LHASH implementation for several reasons:
+ *
+ * - firstly, because deletes can cause the hashtable to be contracted,
+ * resulting in rehashing which might cause items in later buckets to
+ * move to earlier buckets, which might cause doall to skip an item,
+ * resulting in a memory leak;
+ *
+ * - secondly, because doall in general is not safe across hashtable
+ * size changes, as it caches hashtable size and pointer values
+ * while operating.
+ *
+ * The fix for this is to disable hashtable contraction using the following
+ * call, which guarantees that no rehashing will occur so long as we only
+ * call delete and not insert.
+ */
+ lh_QUIC_LCIDM_CONN_set_down_load(lcidm->conns, 0);
+
+ lh_QUIC_LCIDM_CONN_doall_arg(lcidm->conns, lcidm_delete_conn_, lcidm);
+
+ lh_QUIC_LCID_free(lcidm->lcids);
+ lh_QUIC_LCIDM_CONN_free(lcidm->conns);
+ OPENSSL_free(lcidm);
+}
+
+static QUIC_LCID *lcidm_get0_lcid(const QUIC_LCIDM *lcidm, const QUIC_CONN_ID *lcid)
+{
+ QUIC_LCID key;
+
+ key.cid = *lcid;
+ key.hash_key = (uint64_t *)lcidm->hash_key;
+
+ if (key.cid.id_len > QUIC_MAX_CONN_ID_LEN)
+ return NULL;
+
+ return lh_QUIC_LCID_retrieve(lcidm->lcids, &key);
+}
+
+static QUIC_LCIDM_CONN *lcidm_get0_conn(const QUIC_LCIDM *lcidm, void *opaque)
+{
+ QUIC_LCIDM_CONN key;
+
+ key.opaque = opaque;
+
+ return lh_QUIC_LCIDM_CONN_retrieve(lcidm->conns, &key);
+}
+
+static QUIC_LCIDM_CONN *lcidm_upsert_conn(const QUIC_LCIDM *lcidm, void *opaque)
+{
+ QUIC_LCIDM_CONN *conn = lcidm_get0_conn(lcidm, opaque);
+
+ if (conn != NULL)
+ return conn;
+
+ if ((conn = OPENSSL_zalloc(sizeof(*conn))) == NULL)
+ goto err;
+
+ if ((conn->lcids = lh_QUIC_LCID_new(lcid_hash, lcid_comp)) == NULL)
+ goto err;
+
+ conn->opaque = opaque;
+
+ lh_QUIC_LCIDM_CONN_insert(lcidm->conns, conn);
+ if (lh_QUIC_LCIDM_CONN_error(lcidm->conns))
+ goto err;
+
+ return conn;
+
+err:
+ if (conn != NULL) {
+ lh_QUIC_LCID_free(conn->lcids);
+ OPENSSL_free(conn);
+ }
+ return NULL;
+}
+
+static void lcidm_delete_conn_lcid(QUIC_LCIDM *lcidm, QUIC_LCID *lcid_obj)
+{
+ lh_QUIC_LCID_delete(lcidm->lcids, lcid_obj);
+ lh_QUIC_LCID_delete(lcid_obj->conn->lcids, lcid_obj);
+ assert(lcid_obj->conn->num_active_lcid > 0);
+ --lcid_obj->conn->num_active_lcid;
+ OPENSSL_free(lcid_obj);
+}
+
+/* doall_arg wrapper */
+static void lcidm_delete_conn_lcid_(QUIC_LCID *lcid_obj, void *arg)
+{
+ lcidm_delete_conn_lcid((QUIC_LCIDM *)arg, lcid_obj);
+}
+
+static void lcidm_delete_conn(QUIC_LCIDM *lcidm, QUIC_LCIDM_CONN *conn)
+{
+ /* See comment in ossl_quic_lcidm_free */
+ lh_QUIC_LCID_set_down_load(conn->lcids, 0);
+
+ lh_QUIC_LCID_doall_arg(conn->lcids, lcidm_delete_conn_lcid_, lcidm);
+ lh_QUIC_LCIDM_CONN_delete(lcidm->conns, conn);
+ lh_QUIC_LCID_free(conn->lcids);
+ OPENSSL_free(conn);
+}
+
+static QUIC_LCID *lcidm_conn_new_lcid(QUIC_LCIDM *lcidm, QUIC_LCIDM_CONN *conn,
+ const QUIC_CONN_ID *lcid)
+{
+ QUIC_LCID *lcid_obj = NULL;
+
+ if (lcid->id_len > QUIC_MAX_CONN_ID_LEN)
+ return NULL;
+
+ if ((lcid_obj = OPENSSL_zalloc(sizeof(*lcid_obj))) == NULL)
+ goto err;
+
+ lcid_obj->cid = *lcid;
+ lcid_obj->conn = conn;
+ lcid_obj->hash_key = lcidm->hash_key;
+
+ lh_QUIC_LCID_insert(conn->lcids, lcid_obj);
+ if (lh_QUIC_LCID_error(conn->lcids))
+ goto err;
+
+ lh_QUIC_LCID_insert(lcidm->lcids, lcid_obj);
+ if (lh_QUIC_LCID_error(lcidm->lcids)) {
+ lh_QUIC_LCID_delete(conn->lcids, lcid_obj);
+ goto err;
+ }
+
+ ++conn->num_active_lcid;
+ return lcid_obj;
+
+err:
+ OPENSSL_free(lcid_obj);
+ return NULL;
+}
+
+size_t ossl_quic_lcidm_get_lcid_len(const QUIC_LCIDM *lcidm)
+{
+ return lcidm->lcid_len;
+}
+
+size_t ossl_quic_lcidm_get_num_active_lcid(const QUIC_LCIDM *lcidm,
+ void *opaque)
+{
+ QUIC_LCIDM_CONN *conn;
+
+ conn = lcidm_get0_conn(lcidm, opaque);
+ if (conn == NULL)
+ return 0;
+
+ return conn->num_active_lcid;
+}
+
+static int lcidm_generate_cid(QUIC_LCIDM *lcidm,
+ QUIC_CONN_ID *cid)
+{
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ int i;
+
+ lcidm->next_lcid.id_len = (unsigned char)lcidm->lcid_len;
+ *cid = lcidm->next_lcid;
+
+ for (i = lcidm->lcid_len - 1; i >= 0; --i)
+ if (++lcidm->next_lcid.id[i] != 0)
+ break;
+
+ return 1;
+#else
+ return ossl_quic_gen_rand_conn_id(lcidm->libctx, lcidm->lcid_len, cid);
+#endif
+}
+
+static int lcidm_generate(QUIC_LCIDM *lcidm,
+ void *opaque,
+ unsigned int type,
+ QUIC_CONN_ID *lcid_out,
+ uint64_t *seq_num)
+{
+ QUIC_LCIDM_CONN *conn;
+ QUIC_LCID key, *lcid_obj;
+ size_t i;
+#define MAX_RETRIES 8
+
+ if ((conn = lcidm_upsert_conn(lcidm, opaque)) == NULL)
+ return 0;
+
+ if ((type == LCID_TYPE_INITIAL && conn->next_seq_num > 0)
+ || conn->next_seq_num > OSSL_QUIC_VLINT_MAX)
+ return 0;
+
+ i = 0;
+ do {
+ if (i++ >= MAX_RETRIES)
+ /*
+ * Too many retries; should not happen but if it does, don't loop
+ * endlessly.
+ */
+ return 0;
+
+ if (!lcidm_generate_cid(lcidm, lcid_out))
+ return 0;
+
+ key.cid = *lcid_out;
+ key.hash_key = lcidm->hash_key;
+
+ /* If a collision occurs, retry. */
+ } while (lh_QUIC_LCID_retrieve(lcidm->lcids, &key) != NULL);
+
+ if ((lcid_obj = lcidm_conn_new_lcid(lcidm, conn, lcid_out)) == NULL)
+ return 0;
+
+ lcid_obj->seq_num = conn->next_seq_num;
+ lcid_obj->type = type;
+
+ if (seq_num != NULL)
+ *seq_num = lcid_obj->seq_num;
+
+ ++conn->next_seq_num;
+ return 1;
+}
+
+int ossl_quic_lcidm_enrol_odcid(QUIC_LCIDM *lcidm,
+ void *opaque,
+ const QUIC_CONN_ID *initial_odcid)
+{
+ QUIC_LCIDM_CONN *conn;
+ QUIC_LCID key, *lcid_obj;
+
+ if (initial_odcid == NULL || initial_odcid->id_len < QUIC_MIN_ODCID_LEN
+ || initial_odcid->id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ if ((conn = lcidm_upsert_conn(lcidm, opaque)) == NULL)
+ return 0;
+
+ if (conn->done_odcid)
+ return 0;
+
+ key.cid = *initial_odcid;
+ key.hash_key = lcidm->hash_key;
+ if (lh_QUIC_LCID_retrieve(lcidm->lcids, &key) != NULL)
+ return 0;
+
+ if ((lcid_obj = lcidm_conn_new_lcid(lcidm, conn, initial_odcid)) == NULL)
+ return 0;
+
+ lcid_obj->seq_num = LCIDM_ODCID_SEQ_NUM;
+ lcid_obj->type = LCID_TYPE_ODCID;
+
+ conn->odcid_lcid_obj = lcid_obj;
+ conn->done_odcid = 1;
+ return 1;
+}
+
+int ossl_quic_lcidm_generate_initial(QUIC_LCIDM *lcidm,
+ void *opaque,
+ QUIC_CONN_ID *initial_lcid)
+{
+ return lcidm_generate(lcidm, opaque, LCID_TYPE_INITIAL,
+ initial_lcid, NULL);
+}
+
+int ossl_quic_lcidm_bind_channel(QUIC_LCIDM *lcidm, void *opaque,
+ const QUIC_CONN_ID *lcid)
+{
+ QUIC_LCIDM_CONN *conn;
+ QUIC_LCID *lcid_obj;
+
+ /*
+ * the plan is simple:
+ * make sure the lcid is still unused.
+ * do the same business as ossl_quic_lcidm_gnerate_initial() does,
+ * except we will use lcid instead of generating a new one.
+ */
+ if (ossl_quic_lcidm_lookup(lcidm, lcid, NULL, NULL) != 0)
+ return 0;
+
+ if ((conn = lcidm_upsert_conn(lcidm, opaque)) == NULL)
+ return 0;
+
+ if ((lcid_obj = lcidm_conn_new_lcid(lcidm, conn, lcid)) == NULL) {
+ lcidm_delete_conn(lcidm, conn);
+ return 0;
+ }
+
+ lcid_obj->seq_num = conn->next_seq_num;
+ lcid_obj->type = LCID_TYPE_INITIAL;
+ conn->next_seq_num++;
+
+ return 1;
+}
+
+int ossl_quic_lcidm_generate(QUIC_LCIDM *lcidm,
+ void *opaque,
+ OSSL_QUIC_FRAME_NEW_CONN_ID *ncid_frame)
+{
+ ncid_frame->seq_num = 0;
+ ncid_frame->retire_prior_to = 0;
+
+ return lcidm_generate(lcidm, opaque, LCID_TYPE_NCID,
+ &ncid_frame->conn_id,
+ &ncid_frame->seq_num);
+}
+
+int ossl_quic_lcidm_retire_odcid(QUIC_LCIDM *lcidm, void *opaque)
+{
+ QUIC_LCIDM_CONN *conn;
+
+ if ((conn = lcidm_upsert_conn(lcidm, opaque)) == NULL)
+ return 0;
+
+ if (conn->odcid_lcid_obj == NULL)
+ return 0;
+
+ lcidm_delete_conn_lcid(lcidm, conn->odcid_lcid_obj);
+ conn->odcid_lcid_obj = NULL;
+ return 1;
+}
+
+struct retire_args {
+ QUIC_LCID *earliest_seq_num_lcid_obj;
+ uint64_t earliest_seq_num, retire_prior_to;
+};
+
+static void retire_for_conn(QUIC_LCID *lcid_obj, void *arg)
+{
+ struct retire_args *args = arg;
+
+ /* ODCID LCID cannot be retired via this API */
+ if (lcid_obj->type == LCID_TYPE_ODCID
+ || lcid_obj->seq_num >= args->retire_prior_to)
+ return;
+
+ if (lcid_obj->seq_num < args->earliest_seq_num) {
+ args->earliest_seq_num = lcid_obj->seq_num;
+ args->earliest_seq_num_lcid_obj = lcid_obj;
+ }
+}
+
+int ossl_quic_lcidm_retire(QUIC_LCIDM *lcidm,
+ void *opaque,
+ uint64_t retire_prior_to,
+ const QUIC_CONN_ID *containing_pkt_dcid,
+ QUIC_CONN_ID *retired_lcid,
+ uint64_t *retired_seq_num,
+ int *did_retire)
+{
+ QUIC_LCIDM_CONN key, *conn;
+ struct retire_args args = {0};
+
+ key.opaque = opaque;
+
+ if (did_retire == NULL)
+ return 0;
+
+ *did_retire = 0;
+ if ((conn = lh_QUIC_LCIDM_CONN_retrieve(lcidm->conns, &key)) == NULL)
+ return 1;
+
+ args.retire_prior_to = retire_prior_to;
+ args.earliest_seq_num = UINT64_MAX;
+
+ lh_QUIC_LCID_doall_arg(conn->lcids, retire_for_conn, &args);
+ if (args.earliest_seq_num_lcid_obj == NULL)
+ return 1;
+
+ if (containing_pkt_dcid != NULL
+ && ossl_quic_conn_id_eq(&args.earliest_seq_num_lcid_obj->cid,
+ containing_pkt_dcid))
+ return 0;
+
+ *did_retire = 1;
+ if (retired_lcid != NULL)
+ *retired_lcid = args.earliest_seq_num_lcid_obj->cid;
+ if (retired_seq_num != NULL)
+ *retired_seq_num = args.earliest_seq_num_lcid_obj->seq_num;
+
+ lcidm_delete_conn_lcid(lcidm, args.earliest_seq_num_lcid_obj);
+ return 1;
+}
+
+int ossl_quic_lcidm_cull(QUIC_LCIDM *lcidm, void *opaque)
+{
+ QUIC_LCIDM_CONN key, *conn;
+
+ key.opaque = opaque;
+
+ if ((conn = lh_QUIC_LCIDM_CONN_retrieve(lcidm->conns, &key)) == NULL)
+ return 0;
+
+ lcidm_delete_conn(lcidm, conn);
+ return 1;
+}
+
+int ossl_quic_lcidm_lookup(QUIC_LCIDM *lcidm,
+ const QUIC_CONN_ID *lcid,
+ uint64_t *seq_num,
+ void **opaque)
+{
+ QUIC_LCID *lcid_obj;
+
+ if (lcid == NULL)
+ return 0;
+
+ if ((lcid_obj = lcidm_get0_lcid(lcidm, lcid)) == NULL)
+ return 0;
+
+ if (seq_num != NULL)
+ *seq_num = lcid_obj->seq_num;
+
+ if (opaque != NULL)
+ *opaque = lcid_obj->conn->opaque;
+
+ return 1;
+}
+
+int ossl_quic_lcidm_debug_remove(QUIC_LCIDM *lcidm,
+ const QUIC_CONN_ID *lcid)
+{
+ QUIC_LCID key, *lcid_obj;
+
+ key.cid = *lcid;
+ key.hash_key = lcidm->hash_key;
+ if ((lcid_obj = lh_QUIC_LCID_retrieve(lcidm->lcids, &key)) == NULL)
+ return 0;
+
+ lcidm_delete_conn_lcid(lcidm, lcid_obj);
+ return 1;
+}
+
+int ossl_quic_lcidm_debug_add(QUIC_LCIDM *lcidm, void *opaque,
+ const QUIC_CONN_ID *lcid,
+ uint64_t seq_num)
+{
+ QUIC_LCIDM_CONN *conn;
+ QUIC_LCID key, *lcid_obj;
+
+ if (lcid == NULL || lcid->id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ if ((conn = lcidm_upsert_conn(lcidm, opaque)) == NULL)
+ return 0;
+
+ key.cid = *lcid;
+ key.hash_key = lcidm->hash_key;
+ if (lh_QUIC_LCID_retrieve(lcidm->lcids, &key) != NULL)
+ return 0;
+
+ if ((lcid_obj = lcidm_conn_new_lcid(lcidm, conn, lcid)) == NULL)
+ return 0;
+
+ lcid_obj->seq_num = seq_num;
+ lcid_obj->type = LCID_TYPE_NCID;
+ return 1;
+}
+
+int ossl_quic_lcidm_get_unused_cid(QUIC_LCIDM *lcidm, QUIC_CONN_ID *cid)
+{
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ if (lcidm_generate_cid(lcidm, cid)
+ && lcidm_get0_lcid(lcidm, cid) == NULL)
+ return 1; /* not found <=> radomly generated cid is unused */
+ }
+
+ return 0;
+}
diff --git a/crypto/openssl/ssl/quic/quic_local.h b/crypto/openssl/ssl/quic/quic_local.h
new file mode 100644
index 000000000000..97029ae9c482
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_local.h
@@ -0,0 +1,342 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OSSL_QUIC_LOCAL_H
+# define OSSL_QUIC_LOCAL_H
+
+# include <openssl/ssl.h>
+# include "internal/quic_ssl.h" /* QUIC_CONNECTION */
+# include "internal/quic_txp.h"
+# include "internal/quic_statm.h"
+# include "internal/quic_demux.h"
+# include "internal/quic_record_rx.h"
+# include "internal/quic_tls.h"
+# include "internal/quic_fc.h"
+# include "internal/quic_stream.h"
+# include "internal/quic_channel.h"
+# include "internal/quic_reactor.h"
+# include "internal/quic_thread_assist.h"
+# include "../ssl_local.h"
+# include "quic_obj_local.h"
+
+# ifndef OPENSSL_NO_QUIC
+
+/*
+ * QUIC stream SSL object (QSSO) type. This implements the API personality layer
+ * for QSSO objects, wrapping the QUIC-native QUIC_STREAM object and tracking
+ * state required by the libssl API personality.
+ */
+struct quic_xso_st {
+ /* QUIC_OBJ common header, including SSL object common header. */
+ QUIC_OBJ obj;
+
+ /* The connection this stream is associated with. Always non-NULL. */
+ QUIC_CONNECTION *conn;
+
+ /* The stream object. Always non-NULL for as long as the XSO exists. */
+ QUIC_STREAM *stream;
+
+ /* The application has retired a FIN (i.e. SSL_ERROR_ZERO_RETURN). */
+ unsigned int retired_fin : 1;
+
+ /*
+ * The application has requested a reset. Not set for reflexive
+ * STREAM_RESETs caused by peer STOP_SENDING.
+ */
+ unsigned int requested_reset : 1;
+
+ /*
+ * This state tracks SSL_write all-or-nothing (AON) write semantics
+ * emulation.
+ *
+ * Example chronology:
+ *
+ * t=0: aon_write_in_progress=0
+ * t=1: SSL_write(ssl, b1, l1) called;
+ * too big to enqueue into sstream at once, SSL_ERROR_WANT_WRITE;
+ * aon_write_in_progress=1; aon_buf_base=b1; aon_buf_len=l1;
+ * aon_buf_pos < l1 (depends on how much room was in sstream);
+ * t=2: SSL_write(ssl, b2, l2);
+ * b2 must equal b1 (validated unless ACCEPT_MOVING_WRITE_BUFFER)
+ * l2 must equal l1 (always validated)
+ * append into sstream from [b2 + aon_buf_pos, b2 + aon_buf_len)
+ * if done, aon_write_in_progress=0
+ *
+ */
+ /* Is an AON write in progress? */
+ unsigned int aon_write_in_progress : 1;
+
+ /*
+ * The base buffer pointer the caller passed us for the initial AON write
+ * call. We use this for validation purposes unless
+ * ACCEPT_MOVING_WRITE_BUFFER is enabled.
+ *
+ * NOTE: We never dereference this, as the caller might pass a different
+ * (but identical) buffer if using ACCEPT_MOVING_WRITE_BUFFER. It is for
+ * validation by pointer comparison only.
+ */
+ const unsigned char *aon_buf_base;
+ /* The total length of the AON buffer being sent, in bytes. */
+ size_t aon_buf_len;
+ /*
+ * The position in the AON buffer up to which we have successfully sent data
+ * so far.
+ */
+ size_t aon_buf_pos;
+
+ /* SSL_set_mode */
+ uint32_t ssl_mode;
+
+ /* SSL_set_options */
+ uint64_t ssl_options;
+
+ /*
+ * Last 'normal' error during an app-level I/O operation, used by
+ * SSL_get_error(); used to track data-path errors like SSL_ERROR_WANT_READ
+ * and SSL_ERROR_WANT_WRITE.
+ */
+ int last_error;
+};
+
+/*
+ * QUIC connection SSL object (QCSO) type. This implements the API personality
+ * layer for QCSO objects, wrapping the QUIC-native QUIC_CHANNEL object.
+ */
+struct quic_conn_st {
+ /*
+ * QUIC_OBJ is a common header for QUIC APL objects, allowing objects of
+ * these different types to be disambiguated at runtime and providing some
+ * common fields.
+ *
+ * Note: This must come first in the QUIC_CONNECTION structure.
+ */
+ QUIC_OBJ obj;
+
+ SSL *tls;
+
+ /* The QLSO this connection belongs to, if any. */
+ QUIC_LISTENER *listener;
+
+ /* The QDSO this connection belongs to, if any. */
+ QUIC_DOMAIN *domain;
+
+ /* The QUIC engine representing the QUIC event domain. */
+ QUIC_ENGINE *engine;
+
+ /* The QUIC port representing the QUIC listener and socket. */
+ QUIC_PORT *port;
+
+ /*
+ * The QUIC channel providing the core QUIC connection implementation. Note
+ * that this is not instantiated until we actually start trying to do the
+ * handshake. This is to allow us to gather information like whether we are
+ * going to be in client or server mode before committing to instantiating
+ * the channel, since we want to determine the channel arguments based on
+ * that.
+ *
+ * The channel remains available after connection termination until the SSL
+ * object is freed, thus (ch != NULL) iff (started == 1).
+ */
+ QUIC_CHANNEL *ch;
+
+ /*
+ * The mutex used to synchronise access to the QUIC_CHANNEL. We own this but
+ * provide it to the channel.
+ */
+#if defined(OPENSSL_THREADS)
+ CRYPTO_MUTEX *mutex;
+#endif
+
+ /*
+ * If we have a default stream attached, this is the internal XSO
+ * object. If there is no default stream, this is NULL.
+ */
+ QUIC_XSO *default_xso;
+
+ /* Initial peer L4 address. */
+ BIO_ADDR init_peer_addr;
+
+# ifndef OPENSSL_NO_QUIC_THREAD_ASSIST
+ /* Manages thread for QUIC thread assisted mode. */
+ QUIC_THREAD_ASSIST thread_assist;
+# endif
+
+ /* Number of XSOs allocated. Includes the default XSO, if any. */
+ size_t num_xso;
+
+ /* Have we started? */
+ unsigned int started : 1;
+
+ /*
+ * This is 1 if we were instantiated using a QUIC server method
+ * (for future use).
+ */
+ unsigned int as_server : 1;
+
+ /*
+ * Has the application called SSL_set_accept_state? We require this to be
+ * congruent with the value of as_server.
+ */
+ unsigned int as_server_state : 1;
+
+ /* Are we using thread assisted mode? Never changes after init. */
+ unsigned int is_thread_assisted : 1;
+
+ /* Have we created a default XSO yet? */
+ unsigned int default_xso_created : 1;
+
+ /*
+ * Pre-TERMINATING shutdown phase in which we are flushing streams.
+ * Monotonically transitions to 1.
+ * New streams cannot be created in this state.
+ */
+ unsigned int shutting_down : 1;
+
+ /* Have we probed the BIOs for addressing support? */
+ unsigned int addressing_probe_done : 1;
+
+ /* Are we using addressed mode (BIO_sendmmsg with non-NULL peer)? */
+ unsigned int addressed_mode_w : 1;
+ unsigned int addressed_mode_r : 1;
+
+ /* Flag to indicate waiting on accept queue */
+ unsigned int pending : 1;
+
+ /* Default stream type. Defaults to SSL_DEFAULT_STREAM_MODE_AUTO_BIDI. */
+ uint32_t default_stream_mode;
+
+ /* SSL_set_mode. This is not used directly but inherited by new XSOs. */
+ uint32_t default_ssl_mode;
+
+ /* SSL_set_options. This is not used directly but inherited by new XSOs. */
+ uint64_t default_ssl_options;
+
+ /* SSL_set_incoming_stream_policy. */
+ int incoming_stream_policy;
+ uint64_t incoming_stream_aec;
+
+ /*
+ * Last 'normal' error during an app-level I/O operation, used by
+ * SSL_get_error(); used to track data-path errors like SSL_ERROR_WANT_READ
+ * and SSL_ERROR_WANT_WRITE.
+ */
+ int last_error;
+};
+
+/*
+ * QUIC listener SSL object (QLSO) type. This implements the API personality
+ * layer for QLSO objects, wrapping the QUIC-native QUIC_PORT object.
+ */
+struct quic_listener_st {
+ /* QUIC_OBJ common header, including SSL object common header. */
+ QUIC_OBJ obj;
+
+ /* The QDSO this connection belongs to, if any. */
+ QUIC_DOMAIN *domain;
+
+ /* The QUIC engine representing the QUIC event domain. */
+ QUIC_ENGINE *engine;
+
+ /* The QUIC port representing the QUIC listener and socket. */
+ QUIC_PORT *port;
+
+#if defined(OPENSSL_THREADS)
+ /*
+ * The mutex used to synchronise access to the QUIC_ENGINE. We own this but
+ * provide it to the engine.
+ */
+ CRYPTO_MUTEX *mutex;
+#endif
+
+ /* Have we started listening yet? */
+ unsigned int listening : 1;
+};
+
+/*
+ * QUIC domain SSL object (QDSO) type. This implements the API personality layer
+ * for QDSO objects, wrapping the QUIC-native QUIC_ENGINE object.
+ */
+struct quic_domain_st {
+ /* QUIC_OBJ common header, including SSL object common header. */
+ QUIC_OBJ obj;
+
+ /* The QUIC engine representing the QUIC event domain. */
+ QUIC_ENGINE *engine;
+
+#if defined(OPENSSL_THREADS)
+ /*
+ * The mutex used to synchronise access to the QUIC_ENGINE. We own this but
+ * provide it to the engine.
+ */
+ CRYPTO_MUTEX *mutex;
+#endif
+};
+
+/* Internal calls to the QUIC CSM which come from various places. */
+int ossl_quic_conn_on_handshake_confirmed(QUIC_CONNECTION *qc);
+
+/*
+ * To be called when a protocol violation occurs. The connection is torn down
+ * with the given error code, which should be a OSSL_QUIC_ERR_* value. Reason
+ * string is optional and copied if provided. frame_type should be 0 if not
+ * applicable.
+ */
+void ossl_quic_conn_raise_protocol_error(QUIC_CONNECTION *qc,
+ uint64_t error_code,
+ uint64_t frame_type,
+ const char *reason);
+
+void ossl_quic_conn_on_remote_conn_close(QUIC_CONNECTION *qc,
+ OSSL_QUIC_FRAME_CONN_CLOSE *f);
+
+# define OSSL_QUIC_ANY_VERSION 0xFFFFF
+# endif
+
+# define IMPLEMENT_quic_meth_func(version, func_name, q_accept, \
+ q_connect, enc_data) \
+const SSL_METHOD *func_name(void) \
+ { \
+ static const SSL_METHOD func_name##_data= { \
+ version, \
+ 0, \
+ 0, \
+ ossl_quic_new, \
+ ossl_quic_free, \
+ ossl_quic_reset, \
+ ossl_quic_init, \
+ NULL /* clear */, \
+ ossl_quic_deinit, \
+ q_accept, \
+ q_connect, \
+ ossl_quic_read, \
+ ossl_quic_peek, \
+ ossl_quic_write, \
+ NULL /* shutdown */, \
+ NULL /* renegotiate */, \
+ ossl_quic_renegotiate_check, \
+ NULL /* read_bytes */, \
+ NULL /* write_bytes */, \
+ NULL /* dispatch_alert */, \
+ ossl_quic_ctrl, \
+ ossl_quic_ctx_ctrl, \
+ ossl_quic_get_cipher_by_char, \
+ NULL /* put_cipher_by_char */, \
+ ossl_quic_pending, \
+ ossl_quic_num_ciphers, \
+ ossl_quic_get_cipher, \
+ tls1_default_timeout, \
+ &enc_data, \
+ ssl_undefined_void_function, \
+ ossl_quic_callback_ctrl, \
+ ossl_quic_ctx_callback_ctrl, \
+ }; \
+ return &func_name##_data; \
+ }
+
+#endif
diff --git a/crypto/openssl/ssl/quic/quic_method.c b/crypto/openssl/ssl/quic/quic_method.c
new file mode 100644
index 000000000000..8092855efc61
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_method.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/macros.h>
+#include <openssl/objects.h>
+#include "quic_local.h"
+
+IMPLEMENT_quic_meth_func(OSSL_QUIC_ANY_VERSION,
+ OSSL_QUIC_client_method,
+ ssl_undefined_function,
+ ossl_quic_connect, ssl3_undef_enc_method)
+
+IMPLEMENT_quic_meth_func(OSSL_QUIC_ANY_VERSION,
+ OSSL_QUIC_client_thread_method,
+ ssl_undefined_function,
+ ossl_quic_connect, ssl3_undef_enc_method)
+
+IMPLEMENT_quic_meth_func(OSSL_QUIC_ANY_VERSION,
+ OSSL_QUIC_server_method,
+ ossl_quic_accept,
+ ssl_undefined_function, ssl3_undef_enc_method)
diff --git a/crypto/openssl/ssl/quic/quic_obj.c b/crypto/openssl/ssl/quic/quic_obj.c
new file mode 100644
index 000000000000..6c31c8c93314
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_obj.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "quic_obj_local.h"
+#include "quic_local.h"
+#include "internal/ssl_unwrap.h"
+
+static int obj_update_cache(QUIC_OBJ *obj);
+
+int ossl_quic_obj_init(QUIC_OBJ *obj,
+ SSL_CTX *ctx,
+ int type,
+ SSL *parent_obj,
+ QUIC_ENGINE *engine,
+ QUIC_PORT *port)
+{
+ int is_event_leader = (engine != NULL);
+ int is_port_leader = (port != NULL);
+
+ if (!ossl_assert(obj != NULL && !obj->init_done && SSL_TYPE_IS_QUIC(type)
+ && (parent_obj == NULL || IS_QUIC(parent_obj))))
+ return 0;
+
+ /* Event leader is always the root object. */
+ if (!ossl_assert(!is_event_leader || parent_obj == NULL))
+ return 0;
+
+ if (!ossl_ssl_init(&obj->ssl, ctx, ctx->method, type))
+ goto err;
+
+ obj->domain_flags = ctx->domain_flags;
+ obj->parent_obj = (QUIC_OBJ *)parent_obj;
+ obj->is_event_leader = is_event_leader;
+ obj->is_port_leader = is_port_leader;
+ obj->engine = engine;
+ obj->port = port;
+ obj->req_blocking_mode = QUIC_BLOCKING_MODE_INHERIT;
+ if (!obj_update_cache(obj))
+ goto err;
+
+ obj->init_done = 1;
+ return 1;
+
+err:
+ obj->is_event_leader = 0;
+ obj->is_port_leader = 0;
+ return 0;
+}
+
+static int obj_update_cache(QUIC_OBJ *obj)
+{
+ QUIC_OBJ *p;
+
+ for (p = obj; p != NULL && !p->is_event_leader;
+ p = p->parent_obj)
+ if (!ossl_assert(p == obj || p->init_done))
+ return 0;
+
+ if (!ossl_assert(p != NULL))
+ return 0;
+
+ /*
+ * Offset of ->ssl is guaranteed to be 0 but the NULL check makes ubsan
+ * happy.
+ */
+ obj->cached_event_leader = p;
+ obj->engine = p->engine;
+
+ for (p = obj; p != NULL && !p->is_port_leader;
+ p = p->parent_obj);
+
+ obj->cached_port_leader = p;
+ obj->port = (p != NULL) ? p->port : NULL;
+ return 1;
+}
+
+SSL_CONNECTION *ossl_quic_obj_get0_handshake_layer(QUIC_OBJ *obj)
+{
+ assert(obj != NULL && obj->init_done);
+
+ if (obj->ssl.type != SSL_TYPE_QUIC_CONNECTION)
+ return NULL;
+
+ return SSL_CONNECTION_FROM_SSL_ONLY(((QUIC_CONNECTION *)obj)->tls);
+}
+
+/* (Returns a cached result.) */
+int ossl_quic_obj_can_support_blocking(const QUIC_OBJ *obj)
+{
+ QUIC_REACTOR *rtor;
+
+ assert(obj != NULL);
+ rtor = ossl_quic_obj_get0_reactor(obj);
+
+ if ((obj->domain_flags
+ & (SSL_DOMAIN_FLAG_LEGACY_BLOCKING | SSL_DOMAIN_FLAG_BLOCKING)) == 0)
+ return 0;
+
+ return ossl_quic_reactor_can_poll_r(rtor)
+ || ossl_quic_reactor_can_poll_w(rtor);
+}
+
+int ossl_quic_obj_desires_blocking(const QUIC_OBJ *obj)
+{
+ unsigned int req_blocking_mode;
+
+ assert(obj != NULL);
+ for (; (req_blocking_mode = obj->req_blocking_mode) == QUIC_BLOCKING_MODE_INHERIT
+ && obj->parent_obj != NULL; obj = obj->parent_obj);
+
+ return req_blocking_mode != QUIC_BLOCKING_MODE_NONBLOCKING;
+}
+
+int ossl_quic_obj_blocking(const QUIC_OBJ *obj)
+{
+ assert(obj != NULL);
+
+ if (!ossl_quic_obj_desires_blocking(obj))
+ return 0;
+
+ ossl_quic_engine_update_poll_descriptors(ossl_quic_obj_get0_engine(obj),
+ /*force=*/0);
+ return ossl_quic_obj_can_support_blocking(obj);
+}
+
+void ossl_quic_obj_set_blocking_mode(QUIC_OBJ *obj, unsigned int mode)
+{
+ assert(obj != NULL);
+
+ obj->req_blocking_mode = mode;
+}
diff --git a/crypto/openssl/ssl/quic/quic_obj_local.h b/crypto/openssl/ssl/quic/quic_obj_local.h
new file mode 100644
index 000000000000..9f8729d32590
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_obj_local.h
@@ -0,0 +1,341 @@
+/*
+ * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OSSL_QUIC_OBJ_LOCAL_H
+# define OSSL_QUIC_OBJ_LOCAL_H
+
+# include <openssl/ssl.h>
+# include "internal/quic_predef.h"
+# include "internal/quic_engine.h"
+# include "../ssl_local.h"
+
+# ifndef OPENSSL_NO_QUIC
+
+/*
+ * QUIC Object Structure.
+ *
+ * In the libssl APL, we have QLSOs, QCSOs and QSSOs, and in the future might
+ * choose to introduce QDSOs. There are also roles such as Port Leader and Event
+ * Leader which can be assumed by these different types under different
+ * circumstances — in other words, whether an APL object is a Port or Event
+ * Leader is not a static function of its type and these roles can 'float'
+ * dynamically depending on the circumstances under which an APL object was
+ * created.
+ *
+ * The QUIC_OBJ is a base type for QUIC APL objects which provides functionality
+ * common to all QUIC objects and which supports having different APL objects
+ * dynamically assume leader roles. It can therefore be seen as an extension of
+ * the SSL base class and extends the SSL object for QUIC APL objects. This
+ * avoids duplication of functionality for different types of QUIC object and
+ * allows access to common responsibilities of different types of APL object
+ * without regard to the kind of APL object we are dealing with.
+ *
+ * The "inheritance" hierarchy is as follows:
+ *
+ * SSL
+ * SSL_CONNECTION
+ * QUIC_OBJ
+ * QUIC_DOMAIN (QDSO) -> QUIC_ENGINE *E
+ * QUIC_LISTENER (QLSO) -> QUIC_PORT eP
+ * QUIC_CONNECTION (QCSO) -> QUIC_CHANNEL epCs
+ * QUIC_XSO (QSSO) -> QUIC_STREAM S
+ *
+ * Legend:
+ *
+ * *: Not currently modelled in the APL, though QUIC_ENGINE exists internally.
+ *
+ * E: Always an event leader if it exists.
+ * e: Potentially an event leader (namely if it is the root APL object in a
+ * hierarchy).
+ *
+ * P: Always a port leader if it exists.
+ * p: Potentially a port leader (namely if there is no port leader above it).
+ *
+ * C: Always a connection leader.
+ *
+ * s: Potentially usable as a stream (if it has a default stream attached).
+ * S: Always has the stream role if it exists.
+ *
+ * This structure must come at the start of a QUIC object structure definition.
+ *
+ * ssl->type still determines the actual object type. An SSL object
+ * pointer s can be safely cast to (QUIC_OBJ *) iff IS_QUIC(s) is true.
+ */
+struct quic_obj_st {
+ /* SSL object common header. */
+ struct ssl_st ssl;
+
+ /*
+ * Pointer to a parent APL object in a QUIC APL object hierarchy, or NULL if
+ * this is the root object.
+ */
+ QUIC_OBJ *parent_obj;
+
+ /* invariant: != NULL */
+ QUIC_OBJ *cached_event_leader;
+ /* invariant: != NULL iff this is a port leader or subsidiary object */
+ QUIC_OBJ *cached_port_leader;
+
+ /*
+ * Points to the QUIC_ENGINE instance. Always equals
+ * cached_event_leader->engine. The containing_obj APL object owns this
+ * instance iff is_event_leader is set, otherwise it is an additional
+ * reference cached for convenience. Unlike port this is never NULL because
+ * a QUIC domain is always rooted in an event leader.
+ */
+ QUIC_ENGINE *engine;
+
+ /*
+ * Points to the QUIC_PORT instance applicable to the containing_obj APL
+ * object, or NULL if we are not at or below a port leader. Always equals
+ * cached_port_leader->port. The containing_obj APL object owns this
+ * instance iff is_port_leader is set, otherwise it is an additional
+ * reference cached for convenience.
+ */
+ QUIC_PORT *port;
+
+ /* SSL_DOMAIN_FLAG values taken from SSL_CTX at construction time. */
+ uint64_t domain_flags;
+
+ unsigned int init_done : 1;
+ unsigned int is_event_leader : 1;
+ unsigned int is_port_leader : 1;
+
+ /*
+ * Blocking mode configuration is handled generically through QUIC_OBJ as it
+ * by default inherits from the parent SSL object.
+ */
+ unsigned int req_blocking_mode : 2; /* QUIC_BLOCKING_MODE */
+
+ /* Event handling mode. One of SSL_QUIC_VALUE_EVENT_HANDLING. */
+ unsigned int event_handling_mode : 2;
+};
+
+enum {
+ QUIC_BLOCKING_MODE_INHERIT,
+ QUIC_BLOCKING_MODE_NONBLOCKING,
+ QUIC_BLOCKING_MODE_BLOCKING
+};
+
+/*
+ * Core Functions and Inlines
+ * ==========================
+ */
+
+/*
+ * Initialises a QUIC_OBJ structure with zero or more roles active. Returns 1
+ * on success or 0 on failure.
+ *
+ * ctx: A SSL_CTX used to initialise the SSL base object structure.
+ *
+ * type: A SSL_TYPE_* value designating the SSL object type.
+ *
+ * parent_obj: NULL if this is the root APL object in a new hierarchy, or a
+ * pointer to the parent APL object otherwise.
+ *
+ * engine: If non-NULL, this object becomes the Event Leader. parent_obj must be
+ * NULL iff this is non-NULL as currently the Event Leader is always the root in
+ * an APL object hierarchy. If NULL, the contextually applicable engine is
+ * determined by using parent_obj and ancestors to find the Event Leader.
+ *
+ * port: If non-NULL, this object becomes a Port Leader. If NULL, the
+ * contextually applicable port (if any) is determined by using parent_obj and
+ * ancestors to find the Port Leader.
+ */
+int ossl_quic_obj_init(QUIC_OBJ *obj,
+ SSL_CTX *ctx,
+ int type,
+ SSL *parent_obj,
+ QUIC_ENGINE *engine,
+ QUIC_PORT *port);
+
+/*
+ * Returns a pointer to the handshake layer object which should be accessible on
+ * obj for purposes of handshake API autoforwarding, if any.
+ *
+ * This returns NULL if a handshake layer SSL object is available but should not
+ * be used for autoforwarding purposes, for example on a QSSO.
+ */
+SSL_CONNECTION *ossl_quic_obj_get0_handshake_layer(QUIC_OBJ *obj);
+
+/*
+ * Returns a pointer to the SSL base object structure. Returns NULL if obj is
+ * NULL. If obj is non-NULL, it must be initialised.
+ */
+static ossl_inline ossl_unused SSL *
+ossl_quic_obj_get0_ssl(QUIC_OBJ *obj)
+{
+ /*
+ * ->ssl is guaranteed to have an offset of 0 but the NULL check here makes
+ * ubsan happy.
+ */
+ if (!ossl_assert(obj != NULL))
+ return NULL;
+
+ return &obj->ssl;
+}
+
+/*
+ * Determines the applicable engine and return a pointer to it. Never returns
+ * NULL.
+ */
+static ossl_inline ossl_unused QUIC_ENGINE *
+ossl_quic_obj_get0_engine(const QUIC_OBJ *obj)
+{
+ assert(obj->init_done);
+ assert(obj->engine != NULL);
+ return obj->engine;
+}
+
+/* Determines the applicable port (if any) and returns a pointer to it. */
+static ossl_inline ossl_unused QUIC_PORT *
+ossl_quic_obj_get0_port(const QUIC_OBJ *obj)
+{
+ assert(obj->init_done);
+ return obj->port;
+}
+
+/* Returns 1 iff this leader structure represents an event leader. */
+static ossl_inline ossl_unused int
+ossl_quic_obj_is_event_leader(const QUIC_OBJ *obj)
+{
+ return obj->is_event_leader;
+}
+
+/*
+ * Similar to ossl_quic_obj_get0_engine, but only returns a non-NULL value if
+ * the obj object itself is an event leader, rather than one of its ancestors.
+ */
+static ossl_inline ossl_unused QUIC_ENGINE *
+ossl_quic_obj_get0_engine_local(const QUIC_OBJ *obj)
+{
+ return ossl_quic_obj_is_event_leader(obj)
+ ? ossl_quic_obj_get0_engine(obj) : NULL;
+}
+
+/* Returns 1 iff this leader structure represents a port leader. */
+static ossl_inline ossl_unused int
+ossl_quic_obj_is_port_leader(const QUIC_OBJ *obj)
+{
+ return obj->is_port_leader;
+}
+
+/*
+ * Similar to ossl_quic_obj_get0_port, but only returns a non-NULL value if
+ * the obj object itself is a port leader, rather than one of its ancestors.
+ */
+static ossl_inline ossl_unused QUIC_PORT *
+ossl_quic_obj_get0_port_local(const QUIC_OBJ *obj)
+{
+ return ossl_quic_obj_is_port_leader(obj)
+ ? ossl_quic_obj_get0_port(obj) : NULL;
+}
+
+/*
+ * Return 1 if we are currently capable of supporting blocking mode (regardless
+ * of whether it is actually turned on).
+ */
+int ossl_quic_obj_can_support_blocking(const QUIC_OBJ *obj);
+
+/*
+ * Returns 1 if we *desire* to do blocking I/O, regardless of whether it will
+ * actually be used (e.g. because it cannot currently be supported).
+ */
+int ossl_quic_obj_desires_blocking(const QUIC_OBJ *obj);
+
+/*
+ * Return 1 if an API call directly to the given object should use blocking mode
+ * and 0 otherwise.
+ */
+int ossl_quic_obj_blocking(const QUIC_OBJ *obj);
+
+/*
+ * Set the (requested) blocking mode, which might or might not be honoured
+ * depending on whether the BIO configuration can support it. Argument is a
+ * QUIC_BLOCKING_MODE value. If the top-level object in a QSO hierarchy is set
+ * to QUIC_BLOCKING_MODE_INHERIT, defaults to blocking mode.
+ */
+void ossl_quic_obj_set_blocking_mode(QUIC_OBJ *obj, unsigned int mode);
+
+/*
+ * Convenience Inlines
+ * ===================
+ *
+ * These inlines are expressed in terms of the core functions and inlines above.
+ */
+
+/* Get a pointer to the QUIC domain mutex. Always returns non-NULL. */
+static ossl_inline ossl_unused CRYPTO_MUTEX *
+ossl_quic_obj_get0_mutex(const QUIC_OBJ *obj)
+{
+ return ossl_quic_engine_get0_mutex(ossl_quic_obj_get0_engine(obj));
+}
+
+/*
+ * Get a reference to the reactor applicable to a leader. Always returns
+ * non-NULL.
+ */
+static ossl_inline ossl_unused QUIC_REACTOR *
+ossl_quic_obj_get0_reactor(const QUIC_OBJ *obj)
+{
+ return ossl_quic_engine_get0_reactor(ossl_quic_obj_get0_engine(obj));
+}
+
+/* Get a reference to the OSSL_LIB_CTX pointer applicable to a leader. */
+static ossl_inline ossl_unused OSSL_LIB_CTX *
+ossl_quic_obj_get0_libctx(const QUIC_OBJ *obj)
+{
+ return ossl_quic_engine_get0_libctx(ossl_quic_obj_get0_engine(obj));
+}
+
+/* Get a reference to the propq pointer applicable to a leader. */
+static ossl_inline ossl_unused const char *
+ossl_quic_obj_get0_propq(const QUIC_OBJ *obj)
+{
+ return ossl_quic_engine_get0_propq(ossl_quic_obj_get0_engine(obj));
+}
+
+/*
+ * Returns the APL object pointer to the event leader in a hierarchy. Always
+ * returns non-NULL.
+ */
+static ossl_inline ossl_unused SSL *
+ossl_quic_obj_get0_event_leader(const QUIC_OBJ *obj)
+{
+ assert(obj->init_done);
+ return obj->cached_event_leader != NULL
+ ? &obj->cached_event_leader->ssl
+ : NULL;
+}
+
+/*
+ * Returns the APL object pointer to the port leader in a hierarchy (if any).
+ * Always returns non-NULL.
+ */
+static ossl_inline ossl_unused SSL *
+ossl_quic_obj_get0_port_leader(const QUIC_OBJ *obj)
+{
+ assert(obj->init_done);
+ return obj->cached_port_leader != NULL
+ ? &obj->cached_port_leader->ssl
+ : NULL;
+}
+
+/*
+ * Change the domain flags. Should only be called immediately after
+ * ossl_quic_obj_init().
+ */
+static ossl_inline ossl_unused void
+ossl_quic_obj_set_domain_flags(QUIC_OBJ *obj, uint64_t domain_flags)
+{
+ obj->domain_flags = domain_flags;
+}
+
+# endif
+#endif
diff --git a/crypto/openssl/ssl/quic/quic_port.c b/crypto/openssl/ssl/quic/quic_port.c
new file mode 100644
index 000000000000..d6e6d4d25cb5
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_port.c
@@ -0,0 +1,1747 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_port.h"
+#include "internal/quic_channel.h"
+#include "internal/quic_lcidm.h"
+#include "internal/quic_srtm.h"
+#include "internal/quic_txp.h"
+#include "internal/ssl_unwrap.h"
+#include "quic_port_local.h"
+#include "quic_channel_local.h"
+#include "quic_engine_local.h"
+#include "quic_local.h"
+#include "../ssl_local.h"
+#include <openssl/rand.h>
+
+/*
+ * QUIC Port Structure
+ * ===================
+ */
+#define INIT_DCID_LEN 8
+
+static int port_init(QUIC_PORT *port);
+static void port_cleanup(QUIC_PORT *port);
+static OSSL_TIME get_time(void *arg);
+static void port_default_packet_handler(QUIC_URXE *e, void *arg,
+ const QUIC_CONN_ID *dcid);
+static void port_rx_pre(QUIC_PORT *port);
+
+/**
+ * @struct validation_token
+ * @brief Represents a validation token for secure connection handling.
+ *
+ * This struct is used to store information related to a validation token.
+ *
+ * @var validation_token::is_retry
+ * True iff this validation token is for a token sent in a RETRY packet.
+ * Otherwise, this token is from a NEW_TOKEN_packet. Iff this value is true,
+ * then ODCID and RSCID are set.
+ *
+ * @var validation_token::timestamp
+ * Time that the validation token was minted.
+ *
+ * @var validation_token::odcid
+ * An original connection ID (`QUIC_CONN_ID`) used to identify the QUIC
+ * connection. This ID helps associate the token with a specific connection.
+ * This will only be valid for validation tokens from RETRY packets.
+ *
+ * @var validation_token::rscid
+ * DCID that the client will use as the DCID of the subsequent initial packet
+ * i.e the "new" DCID.
+ * This will only be valid for validation tokens from RETRY packets.
+ *
+ * @var validation_token::remote_addr_len
+ * Length of the following character array.
+ *
+ * @var validation_token::remote_addr
+ * A character array holding the raw address of the client requesting the
+ * connection.
+ */
+typedef struct validation_token {
+ OSSL_TIME timestamp;
+ QUIC_CONN_ID odcid;
+ QUIC_CONN_ID rscid;
+ size_t remote_addr_len;
+ unsigned char *remote_addr;
+ unsigned char is_retry;
+} QUIC_VALIDATION_TOKEN;
+
+/*
+ * Maximum length of a marshalled validation token.
+ *
+ * - timestamp is 8 bytes
+ * - odcid and rscid are maximally 42 bytes in total
+ * - remote_addr_len is a size_t (8 bytes)
+ * - remote_addr is in the worst case 110 bytes (in the case of using a
+ * maximally sized AF_UNIX socket)
+ * - is_retry is a single byte
+ */
+#define MARSHALLED_TOKEN_MAX_LEN 169
+
+/*
+ * Maximum length of an encrypted marshalled validation token.
+ *
+ * This will include the size of the marshalled validation token plus a 16 byte
+ * tag and a 12 byte IV, so in total 197 bytes.
+ */
+#define ENCRYPTED_TOKEN_MAX_LEN (MARSHALLED_TOKEN_MAX_LEN + 16 + 12)
+
+DEFINE_LIST_OF_IMPL(ch, QUIC_CHANNEL);
+DEFINE_LIST_OF_IMPL(incoming_ch, QUIC_CHANNEL);
+DEFINE_LIST_OF_IMPL(port, QUIC_PORT);
+
+QUIC_PORT *ossl_quic_port_new(const QUIC_PORT_ARGS *args)
+{
+ QUIC_PORT *port;
+
+ if ((port = OPENSSL_zalloc(sizeof(QUIC_PORT))) == NULL)
+ return NULL;
+
+ port->engine = args->engine;
+ port->channel_ctx = args->channel_ctx;
+ port->is_multi_conn = args->is_multi_conn;
+ port->validate_addr = args->do_addr_validation;
+ port->get_conn_user_ssl = args->get_conn_user_ssl;
+ port->user_ssl_arg = args->user_ssl_arg;
+
+ if (!port_init(port)) {
+ OPENSSL_free(port);
+ return NULL;
+ }
+
+ return port;
+}
+
+void ossl_quic_port_free(QUIC_PORT *port)
+{
+ if (port == NULL)
+ return;
+
+ port_cleanup(port);
+ OPENSSL_free(port);
+}
+
+static int port_init(QUIC_PORT *port)
+{
+ size_t rx_short_dcid_len = (port->is_multi_conn ? INIT_DCID_LEN : 0);
+ int key_len;
+ EVP_CIPHER *cipher = NULL;
+ unsigned char *token_key = NULL;
+ int ret = 0;
+
+ if (port->engine == NULL || port->channel_ctx == NULL)
+ goto err;
+
+ if ((port->err_state = OSSL_ERR_STATE_new()) == NULL)
+ goto err;
+
+ if ((port->demux = ossl_quic_demux_new(/*BIO=*/NULL,
+ /*Short CID Len=*/rx_short_dcid_len,
+ get_time, port)) == NULL)
+ goto err;
+
+ ossl_quic_demux_set_default_handler(port->demux,
+ port_default_packet_handler,
+ port);
+
+ if ((port->srtm = ossl_quic_srtm_new(port->engine->libctx,
+ port->engine->propq)) == NULL)
+ goto err;
+
+ if ((port->lcidm = ossl_quic_lcidm_new(port->engine->libctx,
+ rx_short_dcid_len)) == NULL)
+ goto err;
+
+ port->rx_short_dcid_len = (unsigned char)rx_short_dcid_len;
+ port->tx_init_dcid_len = INIT_DCID_LEN;
+ port->state = QUIC_PORT_STATE_RUNNING;
+
+ ossl_list_port_insert_tail(&port->engine->port_list, port);
+ port->on_engine_list = 1;
+ port->bio_changed = 1;
+
+ /* Generate random key for token encryption */
+ if ((port->token_ctx = EVP_CIPHER_CTX_new()) == NULL
+ || (cipher = EVP_CIPHER_fetch(port->engine->libctx,
+ "AES-256-GCM", NULL)) == NULL
+ || !EVP_EncryptInit_ex(port->token_ctx, cipher, NULL, NULL, NULL)
+ || (key_len = EVP_CIPHER_CTX_get_key_length(port->token_ctx)) <= 0
+ || (token_key = OPENSSL_malloc(key_len)) == NULL
+ || !RAND_bytes_ex(port->engine->libctx, token_key, key_len, 0)
+ || !EVP_EncryptInit_ex(port->token_ctx, NULL, NULL, token_key, NULL))
+ goto err;
+
+ ret = 1;
+err:
+ EVP_CIPHER_free(cipher);
+ OPENSSL_free(token_key);
+ if (!ret)
+ port_cleanup(port);
+ return ret;
+}
+
+static void port_cleanup(QUIC_PORT *port)
+{
+ assert(ossl_list_ch_num(&port->channel_list) == 0);
+
+ ossl_quic_demux_free(port->demux);
+ port->demux = NULL;
+
+ ossl_quic_srtm_free(port->srtm);
+ port->srtm = NULL;
+
+ ossl_quic_lcidm_free(port->lcidm);
+ port->lcidm = NULL;
+
+ OSSL_ERR_STATE_free(port->err_state);
+ port->err_state = NULL;
+
+ if (port->on_engine_list) {
+ ossl_list_port_remove(&port->engine->port_list, port);
+ port->on_engine_list = 0;
+ }
+
+ EVP_CIPHER_CTX_free(port->token_ctx);
+ port->token_ctx = NULL;
+}
+
+static void port_transition_failed(QUIC_PORT *port)
+{
+ if (port->state == QUIC_PORT_STATE_FAILED)
+ return;
+
+ port->state = QUIC_PORT_STATE_FAILED;
+}
+
+int ossl_quic_port_is_running(const QUIC_PORT *port)
+{
+ return port->state == QUIC_PORT_STATE_RUNNING;
+}
+
+QUIC_ENGINE *ossl_quic_port_get0_engine(QUIC_PORT *port)
+{
+ return port->engine;
+}
+
+QUIC_REACTOR *ossl_quic_port_get0_reactor(QUIC_PORT *port)
+{
+ return ossl_quic_engine_get0_reactor(port->engine);
+}
+
+QUIC_DEMUX *ossl_quic_port_get0_demux(QUIC_PORT *port)
+{
+ return port->demux;
+}
+
+CRYPTO_MUTEX *ossl_quic_port_get0_mutex(QUIC_PORT *port)
+{
+ return ossl_quic_engine_get0_mutex(port->engine);
+}
+
+OSSL_TIME ossl_quic_port_get_time(QUIC_PORT *port)
+{
+ return ossl_quic_engine_get_time(port->engine);
+}
+
+static OSSL_TIME get_time(void *port)
+{
+ return ossl_quic_port_get_time((QUIC_PORT *)port);
+}
+
+int ossl_quic_port_get_rx_short_dcid_len(const QUIC_PORT *port)
+{
+ return port->rx_short_dcid_len;
+}
+
+int ossl_quic_port_get_tx_init_dcid_len(const QUIC_PORT *port)
+{
+ return port->tx_init_dcid_len;
+}
+
+size_t ossl_quic_port_get_num_incoming_channels(const QUIC_PORT *port)
+{
+ return ossl_list_incoming_ch_num(&port->incoming_channel_list);
+}
+
+/*
+ * QUIC Port: Network BIO Configuration
+ * ====================================
+ */
+
+/* Determines whether we can support a given poll descriptor. */
+static int validate_poll_descriptor(const BIO_POLL_DESCRIPTOR *d)
+{
+ if (d->type == BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD && d->value.fd < 0) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ return 1;
+}
+
+BIO *ossl_quic_port_get_net_rbio(QUIC_PORT *port)
+{
+ return port->net_rbio;
+}
+
+BIO *ossl_quic_port_get_net_wbio(QUIC_PORT *port)
+{
+ return port->net_wbio;
+}
+
+static int port_update_poll_desc(QUIC_PORT *port, BIO *net_bio, int for_write)
+{
+ BIO_POLL_DESCRIPTOR d = {0};
+
+ if (net_bio == NULL
+ || (!for_write && !BIO_get_rpoll_descriptor(net_bio, &d))
+ || (for_write && !BIO_get_wpoll_descriptor(net_bio, &d)))
+ /* Non-pollable BIO */
+ d.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
+
+ if (!validate_poll_descriptor(&d))
+ return 0;
+
+ /*
+ * TODO(QUIC MULTIPORT): We currently only support one port per
+ * engine/domain. This is necessitated because QUIC_REACTOR only supports a
+ * single pollable currently. In the future, once complete polling
+ * infrastructure has been implemented, this limitation can be removed.
+ *
+ * For now, just update the descriptor on the engine's reactor as we are
+ * guaranteed to be the only port under it.
+ */
+ if (for_write)
+ ossl_quic_reactor_set_poll_w(&port->engine->rtor, &d);
+ else
+ ossl_quic_reactor_set_poll_r(&port->engine->rtor, &d);
+
+ return 1;
+}
+
+int ossl_quic_port_update_poll_descriptors(QUIC_PORT *port, int force)
+{
+ int ok = 1;
+
+ if (!force && !port->bio_changed)
+ return 0;
+
+ if (!port_update_poll_desc(port, port->net_rbio, /*for_write=*/0))
+ ok = 0;
+
+ if (!port_update_poll_desc(port, port->net_wbio, /*for_write=*/1))
+ ok = 0;
+
+ port->bio_changed = 0;
+ return ok;
+}
+
+/*
+ * We need to determine our addressing mode. There are basically two ways we can
+ * use L4 addresses:
+ *
+ * - Addressed mode, in which our BIO_sendmmsg calls have destination
+ * addresses attached to them which we expect the underlying network BIO to
+ * handle;
+ *
+ * - Unaddressed mode, in which the BIO provided to us on the network side
+ * neither provides us with L4 addresses nor is capable of honouring ones we
+ * provide. We don't know where the QUIC traffic we send ends up exactly and
+ * trust the application to know what it is doing.
+ *
+ * Addressed mode is preferred because it enables support for connection
+ * migration, multipath, etc. in the future. Addressed mode is automatically
+ * enabled if we are using e.g. BIO_s_datagram, with or without BIO_s_connect.
+ *
+ * If we are passed a BIO_s_dgram_pair (or some custom BIO) we may have to use
+ * unaddressed mode unless that BIO supports capability flags indicating it can
+ * provide and honour L4 addresses.
+ *
+ * Our strategy for determining address mode is simple: we probe the underlying
+ * network BIOs for their capabilities. If the network BIOs support what we
+ * need, we use addressed mode. Otherwise, we use unaddressed mode.
+ *
+ * If addressed mode is chosen, we require an initial peer address to be set. If
+ * this is not set, we fail. If unaddressed mode is used, we do not require
+ * this, as such an address is superfluous, though it can be set if desired.
+ */
+static void port_update_addressing_mode(QUIC_PORT *port)
+{
+ long rcaps = 0, wcaps = 0;
+
+ if (port->net_rbio != NULL)
+ rcaps = BIO_dgram_get_effective_caps(port->net_rbio);
+
+ if (port->net_wbio != NULL)
+ wcaps = BIO_dgram_get_effective_caps(port->net_wbio);
+
+ port->addressed_mode_r = ((rcaps & BIO_DGRAM_CAP_PROVIDES_SRC_ADDR) != 0);
+ port->addressed_mode_w = ((wcaps & BIO_DGRAM_CAP_HANDLES_DST_ADDR) != 0);
+ port->bio_changed = 1;
+}
+
+int ossl_quic_port_is_addressed_r(const QUIC_PORT *port)
+{
+ return port->addressed_mode_r;
+}
+
+int ossl_quic_port_is_addressed_w(const QUIC_PORT *port)
+{
+ return port->addressed_mode_w;
+}
+
+int ossl_quic_port_is_addressed(const QUIC_PORT *port)
+{
+ return ossl_quic_port_is_addressed_r(port) && ossl_quic_port_is_addressed_w(port);
+}
+
+/*
+ * QUIC_PORT does not ref any BIO it is provided with, nor is any ref
+ * transferred to it. The caller (e.g., QUIC_CONNECTION) is responsible for
+ * ensuring the BIO lasts until the channel is freed or the BIO is switched out
+ * for another BIO by a subsequent successful call to this function.
+ */
+int ossl_quic_port_set_net_rbio(QUIC_PORT *port, BIO *net_rbio)
+{
+ if (port->net_rbio == net_rbio)
+ return 1;
+
+ if (!port_update_poll_desc(port, net_rbio, /*for_write=*/0))
+ return 0;
+
+ ossl_quic_demux_set_bio(port->demux, net_rbio);
+ port->net_rbio = net_rbio;
+ port_update_addressing_mode(port);
+ return 1;
+}
+
+int ossl_quic_port_set_net_wbio(QUIC_PORT *port, BIO *net_wbio)
+{
+ QUIC_CHANNEL *ch;
+
+ if (port->net_wbio == net_wbio)
+ return 1;
+
+ if (!port_update_poll_desc(port, net_wbio, /*for_write=*/1))
+ return 0;
+
+ OSSL_LIST_FOREACH(ch, ch, &port->channel_list)
+ ossl_qtx_set_bio(ch->qtx, net_wbio);
+
+ port->net_wbio = net_wbio;
+ port_update_addressing_mode(port);
+ return 1;
+}
+
+SSL_CTX *ossl_quic_port_get_channel_ctx(QUIC_PORT *port)
+{
+ return port->channel_ctx;
+}
+
+/*
+ * QUIC Port: Channel Lifecycle
+ * ============================
+ */
+
+static SSL *port_new_handshake_layer(QUIC_PORT *port, QUIC_CHANNEL *ch)
+{
+ SSL *tls = NULL;
+ SSL_CONNECTION *tls_conn = NULL;
+ SSL *user_ssl = NULL;
+ QUIC_CONNECTION *qc = NULL;
+ QUIC_LISTENER *ql = NULL;
+
+ /*
+ * It only makes sense to call this function if we know how to associate
+ * the handshake layer we are about to create with some user_ssl object.
+ */
+ if (!ossl_assert(port->get_conn_user_ssl != NULL))
+ return NULL;
+ user_ssl = port->get_conn_user_ssl(ch, port->user_ssl_arg);
+ if (user_ssl == NULL)
+ return NULL;
+ qc = (QUIC_CONNECTION *)user_ssl;
+ ql = (QUIC_LISTENER *)port->user_ssl_arg;
+
+ /*
+ * We expect the user_ssl to be newly created so it must not have an
+ * existing qc->tls
+ */
+ if (!ossl_assert(qc->tls == NULL)) {
+ SSL_free(user_ssl);
+ return NULL;
+ }
+
+ tls = ossl_ssl_connection_new_int(port->channel_ctx, user_ssl, TLS_method());
+ qc->tls = tls;
+ if (tls == NULL || (tls_conn = SSL_CONNECTION_FROM_SSL(tls)) == NULL) {
+ SSL_free(user_ssl);
+ return NULL;
+ }
+
+ if (ql != NULL && ql->obj.ssl.ctx->new_pending_conn_cb != NULL)
+ if (!ql->obj.ssl.ctx->new_pending_conn_cb(ql->obj.ssl.ctx, user_ssl,
+ ql->obj.ssl.ctx->new_pending_conn_arg)) {
+ SSL_free(user_ssl);
+ return NULL;
+ }
+
+ /* Override the user_ssl of the inner connection. */
+ tls_conn->s3.flags |= TLS1_FLAGS_QUIC | TLS1_FLAGS_QUIC_INTERNAL;
+
+ /* Restrict options derived from the SSL_CTX. */
+ tls_conn->options &= OSSL_QUIC_PERMITTED_OPTIONS_CONN;
+ tls_conn->pha_enabled = 0;
+ return tls;
+}
+
+static QUIC_CHANNEL *port_make_channel(QUIC_PORT *port, SSL *tls, OSSL_QRX *qrx,
+ int is_server, int is_tserver)
+{
+ QUIC_CHANNEL_ARGS args = {0};
+ QUIC_CHANNEL *ch;
+
+ args.port = port;
+ args.is_server = is_server;
+ args.lcidm = port->lcidm;
+ args.srtm = port->srtm;
+ args.qrx = qrx;
+ args.is_tserver_ch = is_tserver;
+
+ /*
+ * Creating a a new channel is made a bit tricky here as there is a
+ * bit of a circular dependency. Initalizing a channel requires that
+ * the ch->tls and optionally the qlog_title be configured prior to
+ * initalization, but we need the channel at least partially configured
+ * to create the new handshake layer, so we have to do this in a few steps.
+ */
+
+ /*
+ * start by allocation and provisioning as much of the channel as we can
+ */
+ ch = ossl_quic_channel_alloc(&args);
+ if (ch == NULL)
+ return NULL;
+
+ /*
+ * Fixup the channel tls connection here before we init the channel
+ */
+ ch->tls = (tls != NULL) ? tls : port_new_handshake_layer(port, ch);
+
+ if (ch->tls == NULL) {
+ OPENSSL_free(ch);
+ return NULL;
+ }
+
+#ifndef OPENSSL_NO_QLOG
+ /*
+ * If we're using qlog, make sure the tls get further configured properly
+ */
+ ch->use_qlog = 1;
+ if (ch->tls->ctx->qlog_title != NULL) {
+ if ((ch->qlog_title = OPENSSL_strdup(ch->tls->ctx->qlog_title)) == NULL) {
+ OPENSSL_free(ch);
+ return NULL;
+ }
+ }
+#endif
+
+ /*
+ * And finally init the channel struct
+ */
+ if (!ossl_quic_channel_init(ch)) {
+ OPENSSL_free(ch);
+ return NULL;
+ }
+
+ ossl_qtx_set_bio(ch->qtx, port->net_wbio);
+ return ch;
+}
+
+QUIC_CHANNEL *ossl_quic_port_create_outgoing(QUIC_PORT *port, SSL *tls)
+{
+ return port_make_channel(port, tls, NULL, /* is_server= */ 0,
+ /* is_tserver= */ 0);
+}
+
+QUIC_CHANNEL *ossl_quic_port_create_incoming(QUIC_PORT *port, SSL *tls)
+{
+ QUIC_CHANNEL *ch;
+
+ assert(port->tserver_ch == NULL);
+
+ /*
+ * pass -1 for qrx to indicate port will create qrx
+ * later in port_default_packet_handler() when calling port_bind_channel().
+ */
+ ch = port_make_channel(port, tls, NULL, /* is_server= */ 1,
+ /* is_tserver_ch */ 1);
+ port->tserver_ch = ch;
+ port->allow_incoming = 1;
+ return ch;
+}
+
+QUIC_CHANNEL *ossl_quic_port_pop_incoming(QUIC_PORT *port)
+{
+ QUIC_CHANNEL *ch;
+
+ ch = ossl_list_incoming_ch_head(&port->incoming_channel_list);
+ if (ch == NULL)
+ return NULL;
+
+ ossl_list_incoming_ch_remove(&port->incoming_channel_list, ch);
+ return ch;
+}
+
+int ossl_quic_port_have_incoming(QUIC_PORT *port)
+{
+ return ossl_list_incoming_ch_head(&port->incoming_channel_list) != NULL;
+}
+
+void ossl_quic_port_drop_incoming(QUIC_PORT *port)
+{
+ QUIC_CHANNEL *ch;
+ SSL *tls;
+ SSL *user_ssl;
+ SSL_CONNECTION *sc;
+
+ for (;;) {
+ ch = ossl_quic_port_pop_incoming(port);
+ if (ch == NULL)
+ break;
+
+ tls = ossl_quic_channel_get0_tls(ch);
+ /*
+ * The user ssl may or may not have been created via the
+ * get_conn_user_ssl callback in the QUIC stack. The
+ * differentiation being if the user_ssl pointer and tls pointer
+ * are different. If they are, then the user_ssl needs freeing here
+ * which sends us through ossl_quic_free, which then drops the actual
+ * ch->tls ref and frees the channel
+ */
+ sc = SSL_CONNECTION_FROM_SSL(tls);
+ if (sc == NULL)
+ break;
+
+ user_ssl = SSL_CONNECTION_GET_USER_SSL(sc);
+ if (user_ssl == tls) {
+ ossl_quic_channel_free(ch);
+ SSL_free(tls);
+ } else {
+ SSL_free(user_ssl);
+ }
+ }
+}
+
+void ossl_quic_port_set_allow_incoming(QUIC_PORT *port, int allow_incoming)
+{
+ port->allow_incoming = allow_incoming;
+}
+
+/*
+ * QUIC Port: Ticker-Mutator
+ * =========================
+ */
+
+/*
+ * Tick function for this port. This does everything related to network I/O for
+ * this port's network BIOs, and services child channels.
+ */
+void ossl_quic_port_subtick(QUIC_PORT *port, QUIC_TICK_RESULT *res,
+ uint32_t flags)
+{
+ QUIC_CHANNEL *ch;
+
+ res->net_read_desired = ossl_quic_port_is_running(port);
+ res->net_write_desired = 0;
+ res->notify_other_threads = 0;
+ res->tick_deadline = ossl_time_infinite();
+
+ if (!port->engine->inhibit_tick) {
+ /* Handle any incoming data from network. */
+ if (ossl_quic_port_is_running(port))
+ port_rx_pre(port);
+
+ /* Iterate through all channels and service them. */
+ OSSL_LIST_FOREACH(ch, ch, &port->channel_list) {
+ QUIC_TICK_RESULT subr = {0};
+
+ ossl_quic_channel_subtick(ch, &subr, flags);
+ ossl_quic_tick_result_merge_into(res, &subr);
+ }
+ }
+}
+
+/* Process incoming datagrams, if any. */
+static void port_rx_pre(QUIC_PORT *port)
+{
+ int ret;
+
+ /*
+ * Originally, this check (don't RX before we have sent anything if we are
+ * not a server, because there can't be anything) was just intended as a
+ * minor optimisation. However, it is actually required on Windows, and
+ * removing this check will cause Windows to break.
+ *
+ * The reason is that under Win32, recvfrom() does not work on a UDP socket
+ * which has not had bind() called (???). However, calling sendto() will
+ * automatically bind an unbound UDP socket. Therefore, if we call a Winsock
+ * recv-type function before calling a Winsock send-type function, that call
+ * will fail with WSAEINVAL, which we will regard as a permanent network
+ * error.
+ *
+ * Therefore, this check is essential as we do not require our API users to
+ * bind a socket first when using the API in client mode.
+ */
+ if (!port->allow_incoming && !port->have_sent_any_pkt)
+ return;
+
+ /*
+ * Get DEMUX to BIO_recvmmsg from the network and queue incoming datagrams
+ * to the appropriate QRX instances.
+ */
+ ret = ossl_quic_demux_pump(port->demux);
+ if (ret == QUIC_DEMUX_PUMP_RES_PERMANENT_FAIL)
+ /*
+ * We don't care about transient failure, but permanent failure means we
+ * should tear down the port. All connections skip straight to the
+ * Terminated state as there is no point trying to send CONNECTION_CLOSE
+ * frames if the network BIO is not operating correctly.
+ */
+ ossl_quic_port_raise_net_error(port, NULL);
+}
+
+/*
+ * Handles an incoming connection request and potentially decides to make a
+ * connection from it. If a new connection is made, the new channel is written
+ * to *new_ch.
+ */
+static void port_bind_channel(QUIC_PORT *port, const BIO_ADDR *peer,
+ const QUIC_CONN_ID *scid, const QUIC_CONN_ID *dcid,
+ const QUIC_CONN_ID *odcid, OSSL_QRX *qrx,
+ QUIC_CHANNEL **new_ch)
+{
+ QUIC_CHANNEL *ch;
+
+ /*
+ * If we're running with a simulated tserver, it will already have
+ * a dummy channel created, use that instead
+ */
+ if (port->tserver_ch != NULL) {
+ ch = port->tserver_ch;
+ port->tserver_ch = NULL;
+ ossl_quic_channel_bind_qrx(ch, qrx);
+ ossl_qrx_set_msg_callback(ch->qrx, ch->msg_callback,
+ ch->msg_callback_ssl);
+ ossl_qrx_set_msg_callback_arg(ch->qrx, ch->msg_callback_arg);
+ } else {
+ ch = port_make_channel(port, NULL, qrx, /* is_server= */ 1,
+ /* is_tserver */ 0);
+ }
+
+ if (ch == NULL)
+ return;
+
+ /*
+ * If we didn't provide a qrx here that means we need to set our initial
+ * secret here, since we just created a qrx
+ * Normally its not needed, as the initial secret gets added when we send
+ * our first server hello, but if we get a huge client hello, crossing
+ * multiple datagrams, we don't have a chance to do that, and datagrams
+ * after the first won't get decoded properly, for lack of secrets
+ */
+ if (qrx == NULL)
+ if (!ossl_quic_provide_initial_secret(ch->port->engine->libctx,
+ ch->port->engine->propq,
+ dcid, /* is_server */ 1,
+ ch->qrx, NULL))
+ return;
+
+ if (odcid->id_len != 0) {
+ /*
+ * If we have an odcid, then we went through server address validation
+ * and as such, this channel need not conform to the 3x validation cap
+ * See RFC 9000 s. 8.1
+ */
+ ossl_quic_tx_packetiser_set_validated(ch->txp);
+ if (!ossl_quic_bind_channel(ch, peer, scid, dcid, odcid)) {
+ ossl_quic_channel_free(ch);
+ return;
+ }
+ } else {
+ /*
+ * No odcid means we didn't do server validation, so we need to
+ * generate a cid via ossl_quic_channel_on_new_conn
+ */
+ if (!ossl_quic_channel_on_new_conn(ch, peer, scid, dcid)) {
+ ossl_quic_channel_free(ch);
+ return;
+ }
+ }
+
+ ossl_list_incoming_ch_insert_tail(&port->incoming_channel_list, ch);
+ *new_ch = ch;
+}
+
+static int port_try_handle_stateless_reset(QUIC_PORT *port, const QUIC_URXE *e)
+{
+ size_t i;
+ const unsigned char *data = ossl_quic_urxe_data(e);
+ void *opaque = NULL;
+
+ /*
+ * Perform some fast and cheap checks for a packet not being a stateless
+ * reset token. RFC 9000 s. 10.3 specifies this layout for stateless
+ * reset packets:
+ *
+ * Stateless Reset {
+ * Fixed Bits (2) = 1,
+ * Unpredictable Bits (38..),
+ * Stateless Reset Token (128),
+ * }
+ *
+ * It also specifies:
+ * However, endpoints MUST treat any packet ending in a valid
+ * stateless reset token as a Stateless Reset, as other QUIC
+ * versions might allow the use of a long header.
+ *
+ * We can rapidly check for the minimum length and that the first pair
+ * of bits in the first byte are 01 or 11.
+ *
+ * The function returns 1 if it is a stateless reset packet, 0 if it isn't
+ * and -1 if an error was encountered.
+ */
+ if (e->data_len < QUIC_STATELESS_RESET_TOKEN_LEN + 5
+ || (0100 & *data) != 0100)
+ return 0;
+
+ for (i = 0;; ++i) {
+ if (!ossl_quic_srtm_lookup(port->srtm,
+ (QUIC_STATELESS_RESET_TOKEN *)(data + e->data_len
+ - sizeof(QUIC_STATELESS_RESET_TOKEN)),
+ i, &opaque, NULL))
+ break;
+
+ assert(opaque != NULL);
+ ossl_quic_channel_on_stateless_reset((QUIC_CHANNEL *)opaque);
+ }
+
+ return i > 0;
+}
+
+static void cleanup_validation_token(QUIC_VALIDATION_TOKEN *token)
+{
+ OPENSSL_free(token->remote_addr);
+}
+
+/**
+ * @brief Generates a validation token for a RETRY/NEW_TOKEN packet.
+ *
+ *
+ * @param peer Address of the client peer receiving the packet.
+ * @param odcid DCID of the connection attempt.
+ * @param rscid Retry source connection ID of the connection attempt.
+ * @param token Address of token to fill data.
+ *
+ * @return 1 if validation token is filled successfully, 0 otherwise.
+ */
+static int generate_token(BIO_ADDR *peer, QUIC_CONN_ID odcid,
+ QUIC_CONN_ID rscid, QUIC_VALIDATION_TOKEN *token,
+ int is_retry)
+{
+ token->is_retry = is_retry;
+ token->timestamp = ossl_time_now();
+ token->remote_addr = NULL;
+ token->odcid = odcid;
+ token->rscid = rscid;
+
+ if (!BIO_ADDR_rawaddress(peer, NULL, &token->remote_addr_len)
+ || token->remote_addr_len == 0
+ || (token->remote_addr = OPENSSL_malloc(token->remote_addr_len)) == NULL
+ || !BIO_ADDR_rawaddress(peer, token->remote_addr,
+ &token->remote_addr_len)) {
+ cleanup_validation_token(token);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * @brief Marshals a validation token into a new buffer.
+ *
+ * |buffer| should already be allocated and at least MARSHALLED_TOKEN_MAX_LEN
+ * bytes long. Stores the length of data stored in |buffer| in |buffer_len|.
+ *
+ * @param token Validation token.
+ * @param buffer Address to store the marshalled token.
+ * @param buffer_len Size of data stored in |buffer|.
+ */
+static int marshal_validation_token(QUIC_VALIDATION_TOKEN *token,
+ unsigned char *buffer, size_t *buffer_len)
+{
+ WPACKET wpkt = {0};
+ BUF_MEM *buf_mem = BUF_MEM_new();
+
+ if (buffer == NULL || buf_mem == NULL
+ || (token->is_retry != 0 && token->is_retry != 1)) {
+ BUF_MEM_free(buf_mem);
+ return 0;
+ }
+
+ if (!WPACKET_init(&wpkt, buf_mem)
+ || !WPACKET_memset(&wpkt, token->is_retry, 1)
+ || !WPACKET_memcpy(&wpkt, &token->timestamp,
+ sizeof(token->timestamp))
+ || (token->is_retry
+ && (!WPACKET_sub_memcpy_u8(&wpkt, &token->odcid.id,
+ token->odcid.id_len)
+ || !WPACKET_sub_memcpy_u8(&wpkt, &token->rscid.id,
+ token->rscid.id_len)))
+ || !WPACKET_sub_memcpy_u8(&wpkt, token->remote_addr, token->remote_addr_len)
+ || !WPACKET_get_total_written(&wpkt, buffer_len)
+ || *buffer_len > MARSHALLED_TOKEN_MAX_LEN
+ || !WPACKET_finish(&wpkt)) {
+ WPACKET_cleanup(&wpkt);
+ BUF_MEM_free(buf_mem);
+ return 0;
+ }
+
+ memcpy(buffer, buf_mem->data, *buffer_len);
+ BUF_MEM_free(buf_mem);
+ return 1;
+}
+
+/**
+ * @brief Encrypts a validation token using AES-256-GCM
+ *
+ * @param port The QUIC port containing the encryption key
+ * @param plaintext The data to encrypt
+ * @param pt_len Length of the plaintext
+ * @param ciphertext Buffer to receive encrypted data. If NULL, ct_len will be
+ * set to the required buffer size and function returns
+ * immediately.
+ * @param ct_len Pointer to size_t that will receive the ciphertext length.
+ * This also includes bytes for QUIC_RETRY_INTEGRITY_TAG_LEN.
+ *
+ * @return 1 on success, 0 on failure
+ *
+ * The ciphertext format is:
+ * [EVP_GCM_IV_LEN bytes IV][encrypted data][EVP_GCM_TAG_LEN bytes tag]
+ */
+static int encrypt_validation_token(const QUIC_PORT *port,
+ const unsigned char *plaintext,
+ size_t pt_len,
+ unsigned char *ciphertext,
+ size_t *ct_len)
+{
+ int iv_len, len, ret = 0;
+ size_t tag_len;
+ unsigned char *iv = ciphertext, *data, *tag;
+
+ if ((tag_len = EVP_CIPHER_CTX_get_tag_length(port->token_ctx)) == 0
+ || (iv_len = EVP_CIPHER_CTX_get_iv_length(port->token_ctx)) <= 0)
+ goto err;
+
+ *ct_len = iv_len + pt_len + tag_len + QUIC_RETRY_INTEGRITY_TAG_LEN;
+ if (ciphertext == NULL) {
+ ret = 1;
+ goto err;
+ }
+
+ data = ciphertext + iv_len;
+ tag = data + pt_len;
+
+ if (!RAND_bytes_ex(port->engine->libctx, ciphertext, iv_len, 0)
+ || !EVP_EncryptInit_ex(port->token_ctx, NULL, NULL, NULL, iv)
+ || !EVP_EncryptUpdate(port->token_ctx, data, &len, plaintext, pt_len)
+ || !EVP_EncryptFinal_ex(port->token_ctx, data + pt_len, &len)
+ || !EVP_CIPHER_CTX_ctrl(port->token_ctx, EVP_CTRL_GCM_GET_TAG, tag_len, tag))
+ goto err;
+
+ ret = 1;
+err:
+ return ret;
+}
+
+/**
+ * @brief Decrypts a validation token using AES-256-GCM
+ *
+ * @param port The QUIC port containing the decryption key
+ * @param ciphertext The encrypted data (including IV and tag)
+ * @param ct_len Length of the ciphertext
+ * @param plaintext Buffer to receive decrypted data. If NULL, pt_len will be
+ * set to the required buffer size.
+ * @param pt_len Pointer to size_t that will receive the plaintext length
+ *
+ * @return 1 on success, 0 on failure
+ *
+ * Expected ciphertext format:
+ * [EVP_GCM_IV_LEN bytes IV][encrypted data][EVP_GCM_TAG_LEN bytes tag]
+ */
+static int decrypt_validation_token(const QUIC_PORT *port,
+ const unsigned char *ciphertext,
+ size_t ct_len,
+ unsigned char *plaintext,
+ size_t *pt_len)
+{
+ int iv_len, len = 0, ret = 0;
+ size_t tag_len;
+ const unsigned char *iv = ciphertext, *data, *tag;
+
+ if ((tag_len = EVP_CIPHER_CTX_get_tag_length(port->token_ctx)) == 0
+ || (iv_len = EVP_CIPHER_CTX_get_iv_length(port->token_ctx)) <= 0)
+ goto err;
+
+ /* Prevent decryption of a buffer that is not within reasonable bounds */
+ if (ct_len < (iv_len + tag_len) || ct_len > ENCRYPTED_TOKEN_MAX_LEN)
+ goto err;
+
+ *pt_len = ct_len - iv_len - tag_len;
+ if (plaintext == NULL) {
+ ret = 1;
+ goto err;
+ }
+
+ data = ciphertext + iv_len;
+ tag = ciphertext + ct_len - tag_len;
+
+ if (!EVP_DecryptInit_ex(port->token_ctx, NULL, NULL, NULL, iv)
+ || !EVP_DecryptUpdate(port->token_ctx, plaintext, &len, data,
+ ct_len - iv_len - tag_len)
+ || !EVP_CIPHER_CTX_ctrl(port->token_ctx, EVP_CTRL_GCM_SET_TAG, tag_len,
+ (void *)tag)
+ || !EVP_DecryptFinal_ex(port->token_ctx, plaintext + len, &len))
+ goto err;
+
+ ret = 1;
+
+err:
+ return ret;
+}
+
+/**
+ * @brief Parses contents of a buffer into a validation token.
+ *
+ * VALIDATION_TOKEN should already be initalized. Does some basic sanity checks.
+ *
+ * @param token Validation token to fill data in.
+ * @param buf Buffer of previously marshaled validation token.
+ * @param buf_len Length of |buf|.
+ */
+static int parse_validation_token(QUIC_VALIDATION_TOKEN *token,
+ const unsigned char *buf, size_t buf_len)
+{
+ PACKET pkt, subpkt;
+
+ if (buf == NULL || token == NULL)
+ return 0;
+
+ token->remote_addr = NULL;
+
+ if (!PACKET_buf_init(&pkt, buf, buf_len)
+ || !PACKET_copy_bytes(&pkt, &token->is_retry, sizeof(token->is_retry))
+ || !(token->is_retry == 0 || token->is_retry == 1)
+ || !PACKET_copy_bytes(&pkt, (unsigned char *)&token->timestamp,
+ sizeof(token->timestamp))
+ || (token->is_retry
+ && (!PACKET_get_length_prefixed_1(&pkt, &subpkt)
+ || (token->odcid.id_len = (unsigned char)PACKET_remaining(&subpkt))
+ > QUIC_MAX_CONN_ID_LEN
+ || !PACKET_copy_bytes(&subpkt,
+ (unsigned char *)&token->odcid.id,
+ token->odcid.id_len)
+ || !PACKET_get_length_prefixed_1(&pkt, &subpkt)
+ || (token->rscid.id_len = (unsigned char)PACKET_remaining(&subpkt))
+ > QUIC_MAX_CONN_ID_LEN
+ || !PACKET_copy_bytes(&subpkt, (unsigned char *)&token->rscid.id,
+ token->rscid.id_len)))
+ || !PACKET_get_length_prefixed_1(&pkt, &subpkt)
+ || (token->remote_addr_len = PACKET_remaining(&subpkt)) == 0
+ || (token->remote_addr = OPENSSL_malloc(token->remote_addr_len)) == NULL
+ || !PACKET_copy_bytes(&subpkt, token->remote_addr, token->remote_addr_len)
+ || PACKET_remaining(&pkt) != 0) {
+ cleanup_validation_token(token);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * @brief Sends a QUIC Retry packet to a client.
+ *
+ * This function constructs and sends a Retry packet to the specified client
+ * using the provided connection header information. The Retry packet
+ * includes a generated validation token and a new connection ID, following
+ * the QUIC protocol specifications for connection establishment.
+ *
+ * @param port Pointer to the QUIC port from which to send the packet.
+ * @param peer Address of the client peer receiving the packet.
+ * @param client_hdr Header of the client's initial packet, containing
+ * connection IDs and other relevant information.
+ *
+ * This function performs the following steps:
+ * - Generates a validation token for the client.
+ * - Sets the destination and source connection IDs.
+ * - Calculates the integrity tag and sets the token length.
+ * - Encodes and sends the packet via the BIO network interface.
+ *
+ * Error handling is included for failures in CID generation, encoding, and
+ * network transmiss
+ */
+static void port_send_retry(QUIC_PORT *port,
+ BIO_ADDR *peer,
+ QUIC_PKT_HDR *client_hdr)
+{
+ BIO_MSG msg[1];
+ /*
+ * Buffer is used for both marshalling the token as well as for the RETRY
+ * packet. The size of buffer should not be less than
+ * MARSHALLED_TOKEN_MAX_LEN.
+ */
+ unsigned char buffer[512];
+ unsigned char ct_buf[ENCRYPTED_TOKEN_MAX_LEN];
+ WPACKET wpkt;
+ size_t written, token_buf_len, ct_len;
+ QUIC_PKT_HDR hdr = {0};
+ QUIC_VALIDATION_TOKEN token = {0};
+ int ok;
+
+ if (!ossl_assert(sizeof(buffer) >= MARSHALLED_TOKEN_MAX_LEN))
+ return;
+ /*
+ * 17.2.5.1 Sending a Retry packet
+ * dst ConnId is src ConnId we got from client
+ * src ConnId comes from local conn ID manager
+ */
+ memset(&hdr, 0, sizeof(QUIC_PKT_HDR));
+ hdr.dst_conn_id = client_hdr->src_conn_id;
+ /*
+ * this is the random connection ID, we expect client is
+ * going to send the ID with next INITIAL packet which
+ * will also come with token we generate here.
+ */
+ ok = ossl_quic_lcidm_get_unused_cid(port->lcidm, &hdr.src_conn_id);
+ if (ok == 0)
+ goto err;
+
+ memset(&token, 0, sizeof(QUIC_VALIDATION_TOKEN));
+
+ /* Generate retry validation token */
+ if (!generate_token(peer, client_hdr->dst_conn_id,
+ hdr.src_conn_id, &token, 1)
+ || !marshal_validation_token(&token, buffer, &token_buf_len)
+ || !encrypt_validation_token(port, buffer, token_buf_len, NULL,
+ &ct_len)
+ || ct_len > ENCRYPTED_TOKEN_MAX_LEN
+ || !encrypt_validation_token(port, buffer, token_buf_len, ct_buf,
+ &ct_len)
+ || !ossl_assert(ct_len >= QUIC_RETRY_INTEGRITY_TAG_LEN))
+ goto err;
+
+ hdr.dst_conn_id = client_hdr->src_conn_id;
+ hdr.type = QUIC_PKT_TYPE_RETRY;
+ hdr.fixed = 1;
+ hdr.version = 1;
+ hdr.len = ct_len;
+ hdr.data = ct_buf;
+ ok = ossl_quic_calculate_retry_integrity_tag(port->engine->libctx,
+ port->engine->propq, &hdr,
+ &client_hdr->dst_conn_id,
+ ct_buf + ct_len
+ - QUIC_RETRY_INTEGRITY_TAG_LEN);
+ if (ok == 0)
+ goto err;
+
+ hdr.token = hdr.data;
+ hdr.token_len = hdr.len;
+
+ msg[0].data = buffer;
+ msg[0].peer = peer;
+ msg[0].local = NULL;
+ msg[0].flags = 0;
+
+ ok = WPACKET_init_static_len(&wpkt, buffer, sizeof(buffer), 0);
+ if (ok == 0)
+ goto err;
+
+ ok = ossl_quic_wire_encode_pkt_hdr(&wpkt, client_hdr->dst_conn_id.id_len,
+ &hdr, NULL);
+ if (ok == 0)
+ goto err;
+
+ ok = WPACKET_get_total_written(&wpkt, &msg[0].data_len);
+ if (ok == 0)
+ goto err;
+
+ ok = WPACKET_finish(&wpkt);
+ if (ok == 0)
+ goto err;
+
+ /*
+ * TODO(QUIC FUTURE) need to retry this in the event it return EAGAIN
+ * on a non-blocking BIO
+ */
+ if (!BIO_sendmmsg(port->net_wbio, msg, sizeof(BIO_MSG), 1, 0, &written))
+ ERR_raise_data(ERR_LIB_SSL, SSL_R_QUIC_NETWORK_ERROR,
+ "port retry send failed due to network BIO I/O error");
+
+err:
+ cleanup_validation_token(&token);
+}
+
+/**
+ * @brief Sends a QUIC Version Negotiation packet to the specified peer.
+ *
+ * This function constructs and sends a Version Negotiation packet using
+ * the connection IDs from the client's initial packet header. The
+ * Version Negotiation packet indicates support for QUIC version 1.
+ *
+ * @param port Pointer to the QUIC_PORT structure representing the port
+ * context used for network communication.
+ * @param peer Pointer to the BIO_ADDR structure specifying the address
+ * of the peer to which the Version Negotiation packet
+ * will be sent.
+ * @param client_hdr Pointer to the QUIC_PKT_HDR structure containing the
+ * client's packet header used to extract connection IDs.
+ *
+ * @note The function will raise an error if sending the message fails.
+ */
+static void port_send_version_negotiation(QUIC_PORT *port, BIO_ADDR *peer,
+ QUIC_PKT_HDR *client_hdr)
+{
+ BIO_MSG msg[1];
+ unsigned char buffer[1024];
+ QUIC_PKT_HDR hdr;
+ WPACKET wpkt;
+ uint32_t supported_versions[1];
+ size_t written;
+ size_t i;
+
+ memset(&hdr, 0, sizeof(QUIC_PKT_HDR));
+ /*
+ * Reverse the source and dst conn ids
+ */
+ hdr.dst_conn_id = client_hdr->src_conn_id;
+ hdr.src_conn_id = client_hdr->dst_conn_id;
+
+ /*
+ * This is our list of supported protocol versions
+ * Currently only QUIC_VERSION_1
+ */
+ supported_versions[0] = QUIC_VERSION_1;
+
+ /*
+ * Fill out the header fields
+ * Note: Version negotiation packets, must, unlike
+ * other packet types have a version of 0
+ */
+ hdr.type = QUIC_PKT_TYPE_VERSION_NEG;
+ hdr.version = 0;
+ hdr.token = 0;
+ hdr.token_len = 0;
+ hdr.len = sizeof(supported_versions);
+ hdr.data = (unsigned char *)supported_versions;
+
+ msg[0].data = buffer;
+ msg[0].peer = peer;
+ msg[0].local = NULL;
+ msg[0].flags = 0;
+
+ if (!WPACKET_init_static_len(&wpkt, buffer, sizeof(buffer), 0))
+ return;
+
+ if (!ossl_quic_wire_encode_pkt_hdr(&wpkt, client_hdr->dst_conn_id.id_len,
+ &hdr, NULL))
+ return;
+
+ /*
+ * Add the array of supported versions to the end of the packet
+ */
+ for (i = 0; i < OSSL_NELEM(supported_versions); i++) {
+ if (!WPACKET_put_bytes_u32(&wpkt, supported_versions[i]))
+ return;
+ }
+
+ if (!WPACKET_get_total_written(&wpkt, &msg[0].data_len))
+ return;
+
+ if (!WPACKET_finish(&wpkt))
+ return;
+
+ /*
+ * Send it back to the client attempting to connect
+ * TODO(QUIC FUTURE): Need to handle the EAGAIN case here, if the
+ * BIO_sendmmsg call falls in a retryable manner
+ */
+ if (!BIO_sendmmsg(port->net_wbio, msg, sizeof(BIO_MSG), 1, 0, &written))
+ ERR_raise_data(ERR_LIB_SSL, SSL_R_QUIC_NETWORK_ERROR,
+ "port version negotiation send failed");
+}
+
+/**
+ * @brief defintions of token lifetimes
+ *
+ * RETRY tokens are only valid for 10 seconds
+ * NEW_TOKEN tokens have a lifetime of 3600 sec (1 hour)
+ */
+
+#define RETRY_LIFETIME 10
+#define NEW_TOKEN_LIFETIME 3600
+/**
+ * @brief Validates a received token in a QUIC packet header.
+ *
+ * This function checks the validity of a token contained in the provided
+ * QUIC packet header (`QUIC_PKT_HDR *hdr`). The validation process involves
+ * verifying that the token matches an expected format and value. If the
+ * token is from a RETRY packet, the function extracts the original connection
+ * ID (ODCID)/original source connection ID (SCID) and stores it in the provided
+ * parameters. If the token is from a NEW_TOKEN packet, the values will be
+ * derived instead.
+ *
+ * @param hdr Pointer to the QUIC packet header containing the token.
+ * @param port Pointer to the QUIC port from which to send the packet.
+ * @param peer Address of the client peer receiving the packet.
+ * @param odcid Pointer to the connection ID structure to store the ODCID if the
+ * token is valid.
+ * @param scid Pointer to the connection ID structure to store the SCID if the
+ * token is valid.
+ *
+ * @return 1 if the token is valid and ODCID/SCID are successfully set.
+ * 0 otherwise.
+ *
+ * The function performs the following checks:
+ * - Token length meets the required minimum.
+ * - Buffer matches expected format.
+ * - Peer address matches previous connection address.
+ * - Token has not expired. Currently set to 10 seconds for tokens from RETRY
+ * packets and 60 minutes for tokens from NEW_TOKEN packets. This may be
+ * configurable in the future.
+ */
+static int port_validate_token(QUIC_PKT_HDR *hdr, QUIC_PORT *port,
+ BIO_ADDR *peer, QUIC_CONN_ID *odcid,
+ QUIC_CONN_ID *scid, uint8_t *gen_new_token)
+{
+ int ret = 0;
+ QUIC_VALIDATION_TOKEN token = { 0 };
+ uint64_t time_diff;
+ size_t remote_addr_len, dec_token_len;
+ unsigned char *remote_addr = NULL, dec_token[MARSHALLED_TOKEN_MAX_LEN];
+ OSSL_TIME now = ossl_time_now();
+
+ *gen_new_token = 0;
+
+ if (!decrypt_validation_token(port, hdr->token, hdr->token_len, NULL,
+ &dec_token_len)
+ || dec_token_len > MARSHALLED_TOKEN_MAX_LEN
+ || !decrypt_validation_token(port, hdr->token, hdr->token_len,
+ dec_token, &dec_token_len)
+ || !parse_validation_token(&token, dec_token, dec_token_len))
+ goto err;
+
+ /*
+ * Validate token timestamp. Current time should not be before the token
+ * timestamp.
+ */
+ if (ossl_time_compare(now, token.timestamp) < 0)
+ goto err;
+ time_diff = ossl_time2seconds(ossl_time_abs_difference(token.timestamp,
+ now));
+ if ((token.is_retry && time_diff > RETRY_LIFETIME)
+ || (!token.is_retry && time_diff > NEW_TOKEN_LIFETIME))
+ goto err;
+
+ /* Validate remote address */
+ if (!BIO_ADDR_rawaddress(peer, NULL, &remote_addr_len)
+ || remote_addr_len != token.remote_addr_len
+ || (remote_addr = OPENSSL_malloc(remote_addr_len)) == NULL
+ || !BIO_ADDR_rawaddress(peer, remote_addr, &remote_addr_len)
+ || memcmp(remote_addr, token.remote_addr, remote_addr_len) != 0)
+ goto err;
+
+ /*
+ * Set ODCID and SCID. If the token is from a RETRY packet, retrieve both
+ * from the token. Otherwise, generate a new ODCID and use the header's
+ * source connection ID for SCID.
+ */
+ if (token.is_retry) {
+ /*
+ * We're parsing a packet header before its gone through AEAD validation
+ * here, so there is a chance we are dealing with corrupted data. Make
+ * Sure the dcid encoded in the token matches the headers dcid to
+ * mitigate that.
+ * TODO(QUIC FUTURE): Consider handling AEAD validation at the port
+ * level rather than the QRX/channel level to eliminate the need for
+ * this.
+ */
+ if (token.rscid.id_len != hdr->dst_conn_id.id_len
+ || memcmp(&token.rscid.id, &hdr->dst_conn_id.id,
+ token.rscid.id_len) != 0)
+ goto err;
+ *odcid = token.odcid;
+ *scid = token.rscid;
+ } else {
+ if (!ossl_quic_lcidm_get_unused_cid(port->lcidm, odcid))
+ goto err;
+ *scid = hdr->src_conn_id;
+ }
+
+ /*
+ * Determine if we need to send a NEW_TOKEN frame
+ * If we validated a retry token, we should always
+ * send a NEW_TOKEN frame to the client
+ *
+ * If however, we validated a NEW_TOKEN, which may be
+ * reused multiple times, only send a NEW_TOKEN frame
+ * if the existing received token has less than 10% of its lifetime
+ * remaining. This prevents us from constantly sending
+ * NEW_TOKEN frames on every connection when not needed
+ */
+ if (token.is_retry) {
+ *gen_new_token = 1;
+ } else {
+ if (time_diff > ((NEW_TOKEN_LIFETIME * 9) / 10))
+ *gen_new_token = 1;
+ }
+
+ ret = 1;
+err:
+ cleanup_validation_token(&token);
+ OPENSSL_free(remote_addr);
+ return ret;
+}
+
+static void generate_new_token(QUIC_CHANNEL *ch, BIO_ADDR *peer)
+{
+ QUIC_CONN_ID rscid = { 0 };
+ QUIC_VALIDATION_TOKEN token;
+ unsigned char buffer[ENCRYPTED_TOKEN_MAX_LEN];
+ unsigned char *ct_buf;
+ size_t ct_len;
+ size_t token_buf_len = 0;
+
+ /* Clients never send a NEW_TOKEN */
+ if (!ch->is_server)
+ return;
+
+ ct_buf = OPENSSL_zalloc(ENCRYPTED_TOKEN_MAX_LEN);
+ if (ct_buf == NULL)
+ return;
+
+ /*
+ * NEW_TOKEN tokens may be used for multiple subsequent connections
+ * within their timeout period, so don't reserve an rscid here
+ * like we do for retry tokens, instead, just fill it with random
+ * data, as we won't use it anyway
+ */
+ rscid.id_len = 8;
+ if (!RAND_bytes_ex(ch->port->engine->libctx, rscid.id, 8, 0)) {
+ OPENSSL_free(ct_buf);
+ return;
+ }
+
+ memset(&token, 0, sizeof(QUIC_VALIDATION_TOKEN));
+
+ if (!generate_token(peer, ch->init_dcid, rscid, &token, 0)
+ || !marshal_validation_token(&token, buffer, &token_buf_len)
+ || !encrypt_validation_token(ch->port, buffer, token_buf_len, NULL,
+ &ct_len)
+ || ct_len > ENCRYPTED_TOKEN_MAX_LEN
+ || !encrypt_validation_token(ch->port, buffer, token_buf_len, ct_buf,
+ &ct_len)
+ || !ossl_assert(ct_len >= QUIC_RETRY_INTEGRITY_TAG_LEN)) {
+ OPENSSL_free(ct_buf);
+ cleanup_validation_token(&token);
+ return;
+ }
+
+ ch->pending_new_token = ct_buf;
+ ch->pending_new_token_len = ct_len;
+
+ cleanup_validation_token(&token);
+}
+
+/*
+ * This is called by the demux when we get a packet not destined for any known
+ * DCID.
+ */
+static void port_default_packet_handler(QUIC_URXE *e, void *arg,
+ const QUIC_CONN_ID *dcid)
+{
+ QUIC_PORT *port = arg;
+ PACKET pkt;
+ QUIC_PKT_HDR hdr;
+ QUIC_CHANNEL *ch = NULL, *new_ch = NULL;
+ QUIC_CONN_ID odcid, scid;
+ uint8_t gen_new_token = 0;
+ OSSL_QRX *qrx = NULL;
+ OSSL_QRX *qrx_src = NULL;
+ OSSL_QRX_ARGS qrx_args = {0};
+ uint64_t cause_flags = 0;
+ OSSL_QRX_PKT *qrx_pkt = NULL;
+
+ /* Don't handle anything if we are no longer running. */
+ if (!ossl_quic_port_is_running(port))
+ goto undesirable;
+
+ if (port_try_handle_stateless_reset(port, e))
+ goto undesirable;
+
+ if (dcid != NULL
+ && ossl_quic_lcidm_lookup(port->lcidm, dcid, NULL,
+ (void **)&ch)) {
+ assert(ch != NULL);
+ ossl_quic_channel_inject(ch, e);
+ return;
+ }
+
+ /*
+ * If we have an incoming packet which doesn't match any existing connection
+ * we assume this is an attempt to make a new connection.
+ */
+ if (!port->allow_incoming)
+ goto undesirable;
+
+ /*
+ * We have got a packet for an unknown DCID. This might be an attempt to
+ * open a new connection.
+ */
+ if (e->data_len < QUIC_MIN_INITIAL_DGRAM_LEN)
+ goto undesirable;
+
+ if (!PACKET_buf_init(&pkt, ossl_quic_urxe_data(e), e->data_len))
+ goto undesirable;
+
+ /*
+ * We set short_conn_id_len to SIZE_MAX here which will cause the decode
+ * operation to fail if we get a 1-RTT packet. This is fine since we only
+ * care about Initial packets.
+ */
+ if (!ossl_quic_wire_decode_pkt_hdr(&pkt, SIZE_MAX, 1, 0, &hdr, NULL,
+ &cause_flags)) {
+ /*
+ * If we fail due to a bad version, we know the packet up to the version
+ * number was decoded, and we use it below to send a version
+ * negotiation packet
+ */
+ if ((cause_flags & QUIC_PKT_HDR_DECODE_BAD_VERSION) == 0)
+ goto undesirable;
+ }
+
+ switch (hdr.version) {
+ case QUIC_VERSION_1:
+ break;
+
+ case QUIC_VERSION_NONE:
+ default:
+
+ /*
+ * If we get here, then we have a bogus version, and might need
+ * to send a version negotiation packet. According to
+ * RFC 9000 s. 6 and 14.1, we only do so however, if the UDP datagram
+ * is a minimum of 1200 bytes in size
+ */
+ if (e->data_len < 1200)
+ goto undesirable;
+
+ /*
+ * If we don't get a supported version, respond with a ver
+ * negotiation packet, and discard
+ * TODO(QUIC FUTURE): Rate limit the reception of these
+ */
+ port_send_version_negotiation(port, &e->peer, &hdr);
+ goto undesirable;
+ }
+
+ /*
+ * We only care about Initial packets which might be trying to establish a
+ * connection.
+ */
+ if (hdr.type != QUIC_PKT_TYPE_INITIAL)
+ goto undesirable;
+
+ odcid.id_len = 0;
+
+ /*
+ * Create qrx now so we can check integrity of packet
+ * which does not belong to any channel.
+ */
+ qrx_args.libctx = port->engine->libctx;
+ qrx_args.demux = port->demux;
+ qrx_args.short_conn_id_len = dcid->id_len;
+ qrx_args.max_deferred = 32;
+ qrx = ossl_qrx_new(&qrx_args);
+ if (qrx == NULL)
+ goto undesirable;
+
+ /*
+ * Derive secrets for qrx only.
+ */
+ if (!ossl_quic_provide_initial_secret(port->engine->libctx,
+ port->engine->propq,
+ &hdr.dst_conn_id,
+ /* is_server */ 1,
+ qrx, NULL))
+ goto undesirable;
+
+ if (ossl_qrx_validate_initial_packet(qrx, e, (const QUIC_CONN_ID *)dcid) == 0)
+ goto undesirable;
+
+ if (port->validate_addr == 0) {
+ /*
+ * Forget qrx, because it becomes (almost) useless here. We must let
+ * channel to create a new QRX for connection ID server chooses. The
+ * validation keys for new DCID will be derived by
+ * ossl_quic_channel_on_new_conn() when we will be creating channel.
+ * See RFC 9000 section 7.2 negotiating connection id to better
+ * understand what's going on here.
+ *
+ * Did we say qrx is almost useless? Why? Because qrx remembers packets
+ * we just validated. Those packets must be injected to channel we are
+ * going to create. We use qrx_src alias so we can read packets from
+ * qrx and inject them to channel.
+ */
+ qrx_src = qrx;
+ qrx = NULL;
+ }
+ /*
+ * TODO(QUIC FUTURE): there should be some logic similar to accounting half-open
+ * states in TCP. If we reach certain threshold, then we want to
+ * validate clients.
+ */
+ if (port->validate_addr == 1 && hdr.token == NULL) {
+ port_send_retry(port, &e->peer, &hdr);
+ goto undesirable;
+ }
+
+ /*
+ * Note, even if we don't enforce the sending of retry frames for
+ * server address validation, we may still get a token if we sent
+ * a NEW_TOKEN frame during a prior connection, which we should still
+ * validate here
+ */
+ if (hdr.token != NULL
+ && port_validate_token(&hdr, port, &e->peer,
+ &odcid, &scid,
+ &gen_new_token) == 0) {
+ /*
+ * RFC 9000 s 8.1.3
+ * When a server receives an Initial packet with an address
+ * validation token, it MUST attempt to validate the token,
+ * unless it has already completed address validation.
+ * If the token is invalid, then the server SHOULD proceed as
+ * if the client did not have a validated address,
+ * including potentially sending a Retry packet
+ * Note: If address validation is disabled, just act like
+ * the request is valid
+ */
+ if (port->validate_addr == 1) {
+ /*
+ * Again: we should consider saving initial encryption level
+ * secrets to token here to save some CPU cycles.
+ */
+ port_send_retry(port, &e->peer, &hdr);
+ goto undesirable;
+ }
+
+ /*
+ * client is under amplification limit, until it completes
+ * handshake.
+ *
+ * forget qrx so channel can create a new one
+ * with valid initial encryption level keys.
+ */
+ qrx_src = qrx;
+ qrx = NULL;
+ }
+
+ port_bind_channel(port, &e->peer, &scid, &hdr.dst_conn_id,
+ &odcid, qrx, &new_ch);
+
+ /*
+ * if packet validates it gets moved to channel, we've just bound
+ * to port.
+ */
+ if (new_ch == NULL)
+ goto undesirable;
+
+ /*
+ * Generate a token for sending in a later NEW_TOKEN frame
+ */
+ if (gen_new_token == 1)
+ generate_new_token(new_ch, &e->peer);
+
+ if (qrx != NULL) {
+ /*
+ * The qrx belongs to channel now, so don't free it.
+ */
+ qrx = NULL;
+ } else {
+ /*
+ * We still need to salvage packets from almost forgotten qrx
+ * and pass them to channel.
+ */
+ while (ossl_qrx_read_pkt(qrx_src, &qrx_pkt) == 1)
+ ossl_quic_channel_inject_pkt(new_ch, qrx_pkt);
+ ossl_qrx_update_pn_space(qrx_src, new_ch->qrx);
+ }
+
+ /*
+ * If function reaches this place, then packet got validated in
+ * ossl_qrx_validate_initial_packet(). Keep in mind the function
+ * ossl_qrx_validate_initial_packet() decrypts the packet to validate it.
+ * If packet validation was successful (and it was because we are here),
+ * then the function puts the packet to qrx->rx_pending. We must not call
+ * ossl_qrx_inject_urxe() here now, because we don't want to insert
+ * the packet to qrx->urx_pending which keeps packet waiting for decryption.
+ *
+ * We are going to call ossl_quic_demux_release_urxe() to dispose buffer
+ * which still holds encrypted data.
+ */
+
+undesirable:
+ ossl_qrx_free(qrx);
+ ossl_qrx_free(qrx_src);
+ ossl_quic_demux_release_urxe(port->demux, e);
+}
+
+void ossl_quic_port_raise_net_error(QUIC_PORT *port,
+ QUIC_CHANNEL *triggering_ch)
+{
+ QUIC_CHANNEL *ch;
+
+ if (!ossl_quic_port_is_running(port))
+ return;
+
+ /*
+ * Immediately capture any triggering error on the error stack, with a
+ * cover error.
+ */
+ ERR_raise_data(ERR_LIB_SSL, SSL_R_QUIC_NETWORK_ERROR,
+ "port failed due to network BIO I/O error");
+ OSSL_ERR_STATE_save(port->err_state);
+
+ port_transition_failed(port);
+
+ /* Give the triggering channel (if any) the first notification. */
+ if (triggering_ch != NULL)
+ ossl_quic_channel_raise_net_error(triggering_ch);
+
+ OSSL_LIST_FOREACH(ch, ch, &port->channel_list)
+ if (ch != triggering_ch)
+ ossl_quic_channel_raise_net_error(ch);
+}
+
+void ossl_quic_port_restore_err_state(const QUIC_PORT *port)
+{
+ ERR_clear_error();
+ OSSL_ERR_STATE_restore(port->err_state);
+}
diff --git a/crypto/openssl/ssl/quic/quic_port_local.h b/crypto/openssl/ssl/quic/quic_port_local.h
new file mode 100644
index 000000000000..e36272a94d61
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_port_local.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OSSL_QUIC_PORT_LOCAL_H
+# define OSSL_QUIC_PORT_LOCAL_H
+
+# include "internal/quic_port.h"
+# include "internal/quic_reactor.h"
+# include "internal/list.h"
+
+# ifndef OPENSSL_NO_QUIC
+
+/*
+ * QUIC Port Structure
+ * ===================
+ *
+ * QUIC port internals. It is intended that only the QUIC_PORT and QUIC_CHANNEL
+ * implementation be allowed to access this structure directly.
+ *
+ * Other components should not include this header.
+ */
+DECLARE_LIST_OF(ch, QUIC_CHANNEL);
+DECLARE_LIST_OF(incoming_ch, QUIC_CHANNEL);
+
+/* A port is always in one of the following states: */
+enum {
+ /* Initial and steady state. */
+ QUIC_PORT_STATE_RUNNING,
+
+ /*
+ * Terminal state indicating port is no longer functioning. There are no
+ * transitions out of this state. May be triggered by e.g. a permanent
+ * network BIO error.
+ */
+ QUIC_PORT_STATE_FAILED
+};
+
+struct quic_port_st {
+ /* The engine which this port is a child of. */
+ QUIC_ENGINE *engine;
+
+ /*
+ * QUIC_ENGINE keeps the ports which belong to it on a list for bookkeeping
+ * purposes.
+ */
+ OSSL_LIST_MEMBER(port, QUIC_PORT);
+
+ SSL * (*get_conn_user_ssl)(QUIC_CHANNEL *ch, void *arg);
+ void *user_ssl_arg;
+
+ /* Used to create handshake layer objects inside newly created channels. */
+ SSL_CTX *channel_ctx;
+
+ /* Network-side read and write BIOs. */
+ BIO *net_rbio, *net_wbio;
+
+ /* RX demuxer. We register incoming DCIDs with this. */
+ QUIC_DEMUX *demux;
+
+ /* List of all child channels. */
+ OSSL_LIST(ch) channel_list;
+
+ /*
+ * Queue of unaccepted incoming channels. Each such channel is also on
+ * channel_list.
+ */
+ OSSL_LIST(incoming_ch) incoming_channel_list;
+
+ /* Special TSERVER channel. To be removed in the future. */
+ QUIC_CHANNEL *tserver_ch;
+
+ /* LCIDM used for incoming packet routing by DCID. */
+ QUIC_LCIDM *lcidm;
+
+ /* SRTM used for incoming packet routing by SRT. */
+ QUIC_SRTM *srtm;
+
+ /* Port-level permanent errors (causing failure state) are stored here. */
+ ERR_STATE *err_state;
+
+ /* DCID length used for incoming short header packets. */
+ unsigned char rx_short_dcid_len;
+ /* For clients, CID length used for outgoing Initial packets. */
+ unsigned char tx_init_dcid_len;
+
+ /* Port state (QUIC_PORT_STATE_*). */
+ unsigned int state : 1;
+
+ /* Is this port created to support multiple connections? */
+ unsigned int is_multi_conn : 1;
+
+ /* Is this port doing server address validation */
+ unsigned int validate_addr : 1;
+
+ /* Has this port sent any packet of any kind yet? */
+ unsigned int have_sent_any_pkt : 1;
+
+ /* Does this port allow incoming connections? */
+ unsigned int allow_incoming : 1;
+
+ /* Are we on the QUIC_ENGINE linked list of ports? */
+ unsigned int on_engine_list : 1;
+
+ /* Are we using addressed mode (BIO_sendmmsg with non-NULL peer)? */
+ unsigned int addressed_mode_w : 1;
+ unsigned int addressed_mode_r : 1;
+
+ /* Has the BIO been changed since we last updated reactor pollability? */
+ unsigned int bio_changed : 1;
+
+ /* AES-256 GCM context for token encryption */
+ EVP_CIPHER_CTX *token_ctx;
+};
+
+# endif
+
+#endif
diff --git a/crypto/openssl/ssl/quic/quic_rcidm.c b/crypto/openssl/ssl/quic/quic_rcidm.c
new file mode 100644
index 000000000000..18f7e8096b20
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_rcidm.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright 2023-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_rcidm.h"
+#include "internal/priority_queue.h"
+#include "internal/list.h"
+#include "internal/common.h"
+
+/*
+ * QUIC Remote Connection ID Manager
+ * =================================
+ *
+ * We can receive an arbitrary number of RCIDs via NCID frames. Periodically, we
+ * may desire (for example for anti-connection fingerprinting reasons, etc.)
+ * to switch to a new RCID according to some arbitrary policy such as the number
+ * of packets we have sent.
+ *
+ * When we do this we should move to the next RCID in the sequence of received
+ * RCIDs ordered by sequence number. For example, if a peer sends us three NCID
+ * frames with sequence numbers 10, 11, 12, we should seek to consume these
+ * RCIDs in order.
+ *
+ * However, due to the possibility of packet reordering in the network, NCID
+ * frames might be received out of order. Thus if a peer sends us NCID frames
+ * with sequence numbers 12, 10, 11, we should still consume the RCID with
+ * sequence number 10 before consuming the RCIDs with sequence numbers 11 or 12.
+ *
+ * We use a priority queue for this purpose.
+ */
+static void rcidm_update(QUIC_RCIDM *rcidm);
+static void rcidm_set_preferred_rcid(QUIC_RCIDM *rcidm,
+ const QUIC_CONN_ID *rcid);
+
+#define PACKETS_PER_RCID 10000
+
+#define INITIAL_SEQ_NUM 0
+#define PREF_ADDR_SEQ_NUM 1
+
+/*
+ * RCID
+ * ====
+ *
+ * The RCID structure is used to track RCIDs which have sequence numbers (i.e.,
+ * INITIAL, PREF_ADDR and NCID type RCIDs). The RCIDs without sequence numbers
+ * (Initial ODCIDs and Retry ODCIDs), hereafter referred to as unnumbered RCIDs,
+ * can logically be viewed as their own type of RCID but are tracked separately
+ * as singletons without needing a discrete structure.
+ *
+ * At any given time an RCID object is in one of these states:
+ *
+ *
+ * (start)
+ * |
+ * [add]
+ * |
+ * _____v_____ ___________ ____________
+ * | | | | | |
+ * | PENDING | --[select]--> | CURRENT | --[retire]--> | RETIRING |
+ * |___________| |___________| |____________|
+ * |
+ * [pop]
+ * |
+ * v
+ * (fin)
+ *
+ * The transition through the states is monotonic and irreversible.
+ * The RCID object is freed when it is popped.
+ *
+ * PENDING
+ * Invariants:
+ * rcid->state == RCID_STATE_PENDING;
+ * rcid->pq_idx != SIZE_MAX (debug assert only);
+ * the RCID is not the current RCID, rcidm->cur_rcid != rcid;
+ * the RCID is in the priority queue;
+ * the RCID is not in the retiring_list.
+ *
+ * CURRENT
+ * Invariants:
+ * rcid->state == RCID_STATE_CUR;
+ * rcid->pq_idx == SIZE_MAX (debug assert only);
+ * the RCID is the current RCID, rcidm->cur_rcid == rcid;
+ * the RCID is not in the priority queue;
+ * the RCID is not in the retiring_list.
+ *
+ * RETIRING
+ * Invariants:
+ * rcid->state == RCID_STATE_RETIRING;
+ * rcid->pq_idx == SIZE_MAX (debug assert only);
+ * the RCID is not the current RCID, rcidm->cur_rcid != rcid;
+ * the RCID is not in the priority queue;
+ * the RCID is in the retiring_list.
+ *
+ * Invariant: At most one RCID object is in the CURRENT state at any one time.
+ *
+ * (If no RCID object is in the CURRENT state, this means either
+ * an unnumbered RCID is being used as the preferred RCID
+ * or we currently have no preferred RCID.)
+ *
+ * All of the above states can be considered substates of the 'ACTIVE' state
+ * for an RCID as specified in RFC 9000. A CID only ceases to be active
+ * when we send a RETIRE_CONN_ID frame, which is the responsibility of the
+ * user of the RCIDM and happens after the above state machine is terminated.
+ */
+enum {
+ RCID_STATE_PENDING,
+ RCID_STATE_CUR,
+ RCID_STATE_RETIRING
+};
+
+enum {
+ RCID_TYPE_INITIAL, /* CID is from an peer INITIAL packet (seq 0) */
+ RCID_TYPE_PREF_ADDR, /* CID is from a preferred_address TPARAM (seq 1) */
+ RCID_TYPE_NCID /* CID is from a NCID frame */
+ /*
+ * INITIAL_ODCID and RETRY_ODCID also conceptually exist but are tracked
+ * separately.
+ */
+};
+
+typedef struct rcid_st {
+ OSSL_LIST_MEMBER(retiring, struct rcid_st); /* valid iff RETIRING */
+
+ QUIC_CONN_ID cid; /* The actual CID string for this RCID */
+ uint64_t seq_num;
+ size_t pq_idx; /* Index of entry into priority queue */
+ unsigned int state : 2; /* RCID_STATE_* */
+ unsigned int type : 2; /* RCID_TYPE_* */
+} RCID;
+
+DEFINE_PRIORITY_QUEUE_OF(RCID);
+DEFINE_LIST_OF(retiring, RCID);
+
+/*
+ * RCID Manager
+ * ============
+ *
+ * The following "business logic" invariants also apply to the RCIDM
+ * as a whole:
+ *
+ * Invariant: An RCID of INITIAL type has a sequence number of 0.
+ * Invariant: An RCID of PREF_ADDR type has a sequence number of 1.
+ *
+ * Invariant: There is never more than one Initial ODCID
+ * added throughout the lifetime of an RCIDM.
+ * Invariant: There is never more than one Retry ODCID
+ * added throughout the lifetime of an RCIDM.
+ * Invariant: There is never more than one INITIAL RCID created
+ * throughout the lifetime of an RCIDM.
+ * Invariant: There is never more than one PREF_ADDR RCID created
+ * throughout the lifetime of an RCIDM.
+ * Invariant: No INITIAL or PREF_ADDR RCID may be added after
+ * the handshake is completed.
+ *
+ */
+struct quic_rcidm_st {
+ /*
+ * The current RCID we prefer to use (value undefined if
+ * !have_preferred_rcid).
+ *
+ * This is preferentially set to a numbered RCID (represented by an RCID
+ * object) if we have one (in which case preferred_rcid == cur_rcid->cid);
+ * otherwise it is set to one of the unnumbered RCIDs (the Initial ODCID or
+ * Retry ODCID) if available (and cur_rcid == NULL).
+ */
+ QUIC_CONN_ID preferred_rcid;
+
+ /*
+ * These are initialized if the corresponding added_ flags are set.
+ */
+ QUIC_CONN_ID initial_odcid, retry_odcid;
+
+ /*
+ * Total number of packets sent since we last made a packet count-based RCID
+ * update decision.
+ */
+ uint64_t packets_sent;
+
+ /* Number of post-handshake RCID changes we have performed. */
+ uint64_t num_changes;
+
+ /*
+ * The Retire Prior To watermark value; max(retire_prior_to) of all received
+ * NCID frames.
+ */
+ uint64_t retire_prior_to;
+
+ /* (SORT BY seq_num ASC) -> (RCID *) */
+ PRIORITY_QUEUE_OF(RCID) *rcids;
+
+ /*
+ * Current RCID object we are using. This may differ from the first item in
+ * the priority queue if we received NCID frames out of order. For example
+ * if we get seq 5, switch to it immediately, then get seq 4, we want to
+ * keep using seq 5 until we decide to roll again rather than immediately
+ * switch to seq 4. Never points to an object on the retiring_list.
+ */
+ RCID *cur_rcid;
+
+ /*
+ * When a RCID becomes pending-retirement, it is moved to the retiring_list,
+ * then freed when it is popped from the retired queue. We use a list for
+ * this rather than a priority queue as the order in which items are freed
+ * does not matter. We always append to the tail of the list in order to
+ * maintain the guarantee that the head (if present) only changes when a
+ * caller calls pop().
+ */
+ OSSL_LIST(retiring) retiring_list;
+
+ /* Number of entries on the retiring_list. */
+ size_t num_retiring;
+
+ /* preferred_rcid has been changed? */
+ unsigned int preferred_rcid_changed : 1;
+
+ /* Do we have any RCID we can use currently? */
+ unsigned int have_preferred_rcid : 1;
+
+ /* QUIC handshake has been completed? */
+ unsigned int handshake_complete : 1;
+
+ /* odcid was set (not necessarily still valid as a RCID)? */
+ unsigned int added_initial_odcid : 1;
+ /* retry_odcid was set (not necessarily still valid as a RCID?) */
+ unsigned int added_retry_odcid : 1;
+ /* An initial RCID was added as an RCID structure? */
+ unsigned int added_initial_rcid : 1;
+ /* Has a RCID roll been manually requested? */
+ unsigned int roll_requested : 1;
+};
+
+/*
+ * Caller must periodically pop retired RCIDs and handle them. If the caller
+ * fails to do so, fail safely rather than start exhibiting integer rollover.
+ * Limit the total number of numbered RCIDs to an implausibly large but safe
+ * value.
+ */
+#define MAX_NUMBERED_RCIDS (SIZE_MAX / 2)
+
+static void rcidm_transition_rcid(QUIC_RCIDM *rcidm, RCID *rcid,
+ unsigned int state);
+
+/* Check invariants of an RCID */
+static void rcidm_check_rcid(QUIC_RCIDM *rcidm, RCID *rcid)
+{
+ assert(rcid->state == RCID_STATE_PENDING
+ || rcid->state == RCID_STATE_CUR
+ || rcid->state == RCID_STATE_RETIRING);
+ assert((rcid->state == RCID_STATE_PENDING)
+ == (rcid->pq_idx != SIZE_MAX));
+ assert((rcid->state == RCID_STATE_CUR)
+ == (rcidm->cur_rcid == rcid));
+ assert((ossl_list_retiring_next(rcid) != NULL
+ || ossl_list_retiring_prev(rcid) != NULL
+ || ossl_list_retiring_head(&rcidm->retiring_list) == rcid)
+ == (rcid->state == RCID_STATE_RETIRING));
+ assert(rcid->type != RCID_TYPE_INITIAL || rcid->seq_num == 0);
+ assert(rcid->type != RCID_TYPE_PREF_ADDR || rcid->seq_num == 1);
+ assert(rcid->seq_num <= OSSL_QUIC_VLINT_MAX);
+ assert(rcid->cid.id_len > 0 && rcid->cid.id_len <= QUIC_MAX_CONN_ID_LEN);
+ assert(rcid->seq_num >= rcidm->retire_prior_to
+ || rcid->state == RCID_STATE_RETIRING);
+ assert(rcidm->num_changes == 0 || rcidm->handshake_complete);
+ assert(rcid->state != RCID_STATE_RETIRING || rcidm->num_retiring > 0);
+}
+
+static int rcid_cmp(const RCID *a, const RCID *b)
+{
+ if (a->seq_num < b->seq_num)
+ return -1;
+ if (a->seq_num > b->seq_num)
+ return 1;
+ return 0;
+}
+
+QUIC_RCIDM *ossl_quic_rcidm_new(const QUIC_CONN_ID *initial_odcid)
+{
+ QUIC_RCIDM *rcidm;
+
+ if ((rcidm = OPENSSL_zalloc(sizeof(*rcidm))) == NULL)
+ return NULL;
+
+ if ((rcidm->rcids = ossl_pqueue_RCID_new(rcid_cmp)) == NULL) {
+ OPENSSL_free(rcidm);
+ return NULL;
+ }
+
+ if (initial_odcid != NULL) {
+ rcidm->initial_odcid = *initial_odcid;
+ rcidm->added_initial_odcid = 1;
+ }
+
+ rcidm_update(rcidm);
+ return rcidm;
+}
+
+void ossl_quic_rcidm_free(QUIC_RCIDM *rcidm)
+{
+ RCID *rcid, *rnext;
+
+ if (rcidm == NULL)
+ return;
+
+ OPENSSL_free(rcidm->cur_rcid);
+ while ((rcid = ossl_pqueue_RCID_pop(rcidm->rcids)) != NULL)
+ OPENSSL_free(rcid);
+
+ OSSL_LIST_FOREACH_DELSAFE(rcid, rnext, retiring, &rcidm->retiring_list)
+ OPENSSL_free(rcid);
+
+ ossl_pqueue_RCID_free(rcidm->rcids);
+ OPENSSL_free(rcidm);
+}
+
+static void rcidm_set_preferred_rcid(QUIC_RCIDM *rcidm,
+ const QUIC_CONN_ID *rcid)
+{
+ if (rcid == NULL) {
+ rcidm->preferred_rcid_changed = 1;
+ rcidm->have_preferred_rcid = 0;
+ return;
+ }
+
+ if (ossl_quic_conn_id_eq(&rcidm->preferred_rcid, rcid))
+ return;
+
+ rcidm->preferred_rcid = *rcid;
+ rcidm->preferred_rcid_changed = 1;
+ rcidm->have_preferred_rcid = 1;
+}
+
+/*
+ * RCID Lifecycle Management
+ * =========================
+ */
+static RCID *rcidm_create_rcid(QUIC_RCIDM *rcidm, uint64_t seq_num,
+ const QUIC_CONN_ID *cid,
+ unsigned int type)
+{
+ RCID *rcid;
+
+ if (cid->id_len < 1 || cid->id_len > QUIC_MAX_CONN_ID_LEN
+ || seq_num > OSSL_QUIC_VLINT_MAX
+ || ossl_pqueue_RCID_num(rcidm->rcids) + rcidm->num_retiring
+ > MAX_NUMBERED_RCIDS)
+ return NULL;
+
+ if ((rcid = OPENSSL_zalloc(sizeof(*rcid))) == NULL)
+ return NULL;
+
+ rcid->seq_num = seq_num;
+ rcid->cid = *cid;
+ rcid->type = type;
+
+ if (rcid->seq_num >= rcidm->retire_prior_to) {
+ rcid->state = RCID_STATE_PENDING;
+
+ if (!ossl_pqueue_RCID_push(rcidm->rcids, rcid, &rcid->pq_idx)) {
+ OPENSSL_free(rcid);
+ return NULL;
+ }
+ } else {
+ /* RCID is immediately retired upon creation. */
+ rcid->state = RCID_STATE_RETIRING;
+ rcid->pq_idx = SIZE_MAX;
+ ossl_list_retiring_insert_tail(&rcidm->retiring_list, rcid);
+ ++rcidm->num_retiring;
+ }
+
+ rcidm_check_rcid(rcidm, rcid);
+ return rcid;
+}
+
+static void rcidm_transition_rcid(QUIC_RCIDM *rcidm, RCID *rcid,
+ unsigned int state)
+{
+ unsigned int old_state = rcid->state;
+
+ assert(state >= old_state && state <= RCID_STATE_RETIRING);
+ rcidm_check_rcid(rcidm, rcid);
+ if (state == old_state)
+ return;
+
+ if (rcidm->cur_rcid != NULL && state == RCID_STATE_CUR) {
+ rcidm_transition_rcid(rcidm, rcidm->cur_rcid, RCID_STATE_RETIRING);
+ assert(rcidm->cur_rcid == NULL);
+ }
+
+ if (old_state == RCID_STATE_PENDING) {
+ ossl_pqueue_RCID_remove(rcidm->rcids, rcid->pq_idx);
+ rcid->pq_idx = SIZE_MAX;
+ }
+
+ rcid->state = state;
+
+ if (state == RCID_STATE_CUR) {
+ rcidm->cur_rcid = rcid;
+ } else if (state == RCID_STATE_RETIRING) {
+ if (old_state == RCID_STATE_CUR)
+ rcidm->cur_rcid = NULL;
+
+ ossl_list_retiring_insert_tail(&rcidm->retiring_list, rcid);
+ ++rcidm->num_retiring;
+ }
+
+ rcidm_check_rcid(rcidm, rcid);
+}
+
+static void rcidm_free_rcid(QUIC_RCIDM *rcidm, RCID *rcid)
+{
+ if (rcid == NULL)
+ return;
+
+ rcidm_check_rcid(rcidm, rcid);
+
+ switch (rcid->state) {
+ case RCID_STATE_PENDING:
+ ossl_pqueue_RCID_remove(rcidm->rcids, rcid->pq_idx);
+ break;
+ case RCID_STATE_CUR:
+ rcidm->cur_rcid = NULL;
+ break;
+ case RCID_STATE_RETIRING:
+ ossl_list_retiring_remove(&rcidm->retiring_list, rcid);
+ --rcidm->num_retiring;
+ break;
+ default:
+ assert(0);
+ break;
+ }
+
+ OPENSSL_free(rcid);
+}
+
+static void rcidm_handle_retire_prior_to(QUIC_RCIDM *rcidm,
+ uint64_t retire_prior_to)
+{
+ RCID *rcid;
+
+ if (retire_prior_to <= rcidm->retire_prior_to)
+ return;
+
+ /*
+ * Retire the current RCID (if any) if it is affected.
+ */
+ if (rcidm->cur_rcid != NULL && rcidm->cur_rcid->seq_num < retire_prior_to)
+ rcidm_transition_rcid(rcidm, rcidm->cur_rcid, RCID_STATE_RETIRING);
+
+ /*
+ * Any other RCIDs needing retirement will be at the start of the priority
+ * queue, so just stop once we see a higher sequence number exceeding the
+ * threshold.
+ */
+ while ((rcid = ossl_pqueue_RCID_peek(rcidm->rcids)) != NULL
+ && rcid->seq_num < retire_prior_to)
+ rcidm_transition_rcid(rcidm, rcid, RCID_STATE_RETIRING);
+
+ rcidm->retire_prior_to = retire_prior_to;
+}
+
+/*
+ * Decision Logic
+ * ==============
+ */
+
+static void rcidm_roll(QUIC_RCIDM *rcidm)
+{
+ RCID *rcid;
+
+ if ((rcid = ossl_pqueue_RCID_peek(rcidm->rcids)) == NULL)
+ return;
+
+ rcidm_transition_rcid(rcidm, rcid, RCID_STATE_CUR);
+
+ ++rcidm->num_changes;
+ rcidm->roll_requested = 0;
+
+ if (rcidm->packets_sent >= PACKETS_PER_RCID)
+ rcidm->packets_sent %= PACKETS_PER_RCID;
+ else
+ rcidm->packets_sent = 0;
+}
+
+static void rcidm_update(QUIC_RCIDM *rcidm)
+{
+ RCID *rcid;
+
+ /*
+ * If we have no current numbered RCID but have one or more pending, use it.
+ */
+ if (rcidm->cur_rcid == NULL
+ && (rcid = ossl_pqueue_RCID_peek(rcidm->rcids)) != NULL) {
+ rcidm_transition_rcid(rcidm, rcid, RCID_STATE_CUR);
+ assert(rcidm->cur_rcid != NULL);
+ }
+
+ /* Prefer use of any current numbered RCID we have, if possible. */
+ if (rcidm->cur_rcid != NULL) {
+ rcidm_check_rcid(rcidm, rcidm->cur_rcid);
+ rcidm_set_preferred_rcid(rcidm, &rcidm->cur_rcid->cid);
+ return;
+ }
+
+ /*
+ * If there are no RCIDs from NCID frames we can use, go through the various
+ * kinds of bootstrapping RCIDs we can use in order of priority.
+ */
+ if (rcidm->added_retry_odcid && !rcidm->handshake_complete) {
+ rcidm_set_preferred_rcid(rcidm, &rcidm->retry_odcid);
+ return;
+ }
+
+ if (rcidm->added_initial_odcid && !rcidm->handshake_complete) {
+ rcidm_set_preferred_rcid(rcidm, &rcidm->initial_odcid);
+ return;
+ }
+
+ /* We don't know of any usable RCIDs */
+ rcidm_set_preferred_rcid(rcidm, NULL);
+}
+
+static int rcidm_should_roll(QUIC_RCIDM *rcidm)
+{
+ /*
+ * Always switch as soon as possible if handshake completes;
+ * and every n packets after handshake completes or the last roll; and
+ * whenever manually requested.
+ */
+ return rcidm->handshake_complete
+ && (rcidm->num_changes == 0
+ || rcidm->packets_sent >= PACKETS_PER_RCID
+ || rcidm->roll_requested);
+}
+
+static void rcidm_tick(QUIC_RCIDM *rcidm)
+{
+ if (rcidm_should_roll(rcidm))
+ rcidm_roll(rcidm);
+
+ rcidm_update(rcidm);
+}
+
+/*
+ * Events
+ * ======
+ */
+void ossl_quic_rcidm_on_handshake_complete(QUIC_RCIDM *rcidm)
+{
+ if (rcidm->handshake_complete)
+ return;
+
+ rcidm->handshake_complete = 1;
+ rcidm_tick(rcidm);
+}
+
+void ossl_quic_rcidm_on_packet_sent(QUIC_RCIDM *rcidm, uint64_t num_packets)
+{
+ if (num_packets == 0)
+ return;
+
+ rcidm->packets_sent += num_packets;
+ rcidm_tick(rcidm);
+}
+
+void ossl_quic_rcidm_request_roll(QUIC_RCIDM *rcidm)
+{
+ rcidm->roll_requested = 1;
+ rcidm_tick(rcidm);
+}
+
+/*
+ * Mutation Operations
+ * ===================
+ */
+int ossl_quic_rcidm_add_from_initial(QUIC_RCIDM *rcidm,
+ const QUIC_CONN_ID *rcid)
+{
+ RCID *rcid_obj;
+
+ if (rcidm->added_initial_rcid || rcidm->handshake_complete)
+ return 0;
+
+ rcid_obj = rcidm_create_rcid(rcidm, INITIAL_SEQ_NUM,
+ rcid, RCID_TYPE_INITIAL);
+ if (rcid_obj == NULL)
+ return 0;
+
+ rcidm->added_initial_rcid = 1;
+ rcidm_tick(rcidm);
+ return 1;
+}
+
+int ossl_quic_rcidm_add_from_server_retry(QUIC_RCIDM *rcidm,
+ const QUIC_CONN_ID *retry_odcid)
+{
+ if (rcidm->added_retry_odcid || rcidm->handshake_complete)
+ return 0;
+
+ rcidm->retry_odcid = *retry_odcid;
+ rcidm->added_retry_odcid = 1;
+ rcidm_tick(rcidm);
+ return 1;
+}
+
+int ossl_quic_rcidm_add_from_ncid(QUIC_RCIDM *rcidm,
+ const OSSL_QUIC_FRAME_NEW_CONN_ID *ncid)
+{
+ RCID *rcid;
+
+ rcid = rcidm_create_rcid(rcidm, ncid->seq_num, &ncid->conn_id, RCID_TYPE_NCID);
+ if (rcid == NULL)
+ return 0;
+
+ rcidm_handle_retire_prior_to(rcidm, ncid->retire_prior_to);
+ rcidm_tick(rcidm);
+ return 1;
+}
+
+/*
+ * Queries
+ * =======
+ */
+
+static int rcidm_get_retire(QUIC_RCIDM *rcidm, uint64_t *seq_num, int peek)
+{
+ RCID *rcid = ossl_list_retiring_head(&rcidm->retiring_list);
+
+ if (rcid == NULL)
+ return 0;
+
+ if (seq_num != NULL)
+ *seq_num = rcid->seq_num;
+
+ if (!peek)
+ rcidm_free_rcid(rcidm, rcid);
+
+ return 1;
+}
+
+int ossl_quic_rcidm_pop_retire_seq_num(QUIC_RCIDM *rcidm,
+ uint64_t *seq_num)
+{
+ return rcidm_get_retire(rcidm, seq_num, /*peek=*/0);
+}
+
+int ossl_quic_rcidm_peek_retire_seq_num(QUIC_RCIDM *rcidm,
+ uint64_t *seq_num)
+{
+ return rcidm_get_retire(rcidm, seq_num, /*peek=*/1);
+}
+
+int ossl_quic_rcidm_get_preferred_tx_dcid(QUIC_RCIDM *rcidm,
+ QUIC_CONN_ID *tx_dcid)
+{
+ if (!rcidm->have_preferred_rcid)
+ return 0;
+
+ *tx_dcid = rcidm->preferred_rcid;
+ return 1;
+}
+
+int ossl_quic_rcidm_get_preferred_tx_dcid_changed(QUIC_RCIDM *rcidm,
+ int clear)
+{
+ int r = rcidm->preferred_rcid_changed;
+
+ if (clear)
+ rcidm->preferred_rcid_changed = 0;
+
+ return r;
+}
+
+size_t ossl_quic_rcidm_get_num_active(const QUIC_RCIDM *rcidm)
+{
+ return ossl_pqueue_RCID_num(rcidm->rcids)
+ + (rcidm->cur_rcid != NULL ? 1 : 0)
+ + ossl_quic_rcidm_get_num_retiring(rcidm);
+}
+
+size_t ossl_quic_rcidm_get_num_retiring(const QUIC_RCIDM *rcidm)
+{
+ return rcidm->num_retiring;
+}
diff --git a/crypto/openssl/ssl/quic/quic_reactor.c b/crypto/openssl/ssl/quic/quic_reactor.c
new file mode 100644
index 000000000000..bca46011f2e9
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_reactor.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+#include "internal/quic_reactor.h"
+#include "internal/common.h"
+#include "internal/thread_arch.h"
+#include <assert.h>
+
+/*
+ * Core I/O Reactor Framework
+ * ==========================
+ */
+static void rtor_notify_other_threads(QUIC_REACTOR *rtor);
+
+int ossl_quic_reactor_init(QUIC_REACTOR *rtor,
+ void (*tick_cb)(QUIC_TICK_RESULT *res, void *arg,
+ uint32_t flags),
+ void *tick_cb_arg,
+ CRYPTO_MUTEX *mutex,
+ OSSL_TIME initial_tick_deadline,
+ uint64_t flags)
+{
+ rtor->poll_r.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
+ rtor->poll_w.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
+ rtor->net_read_desired = 0;
+ rtor->net_write_desired = 0;
+ rtor->can_poll_r = 0;
+ rtor->can_poll_w = 0;
+ rtor->tick_deadline = initial_tick_deadline;
+
+ rtor->tick_cb = tick_cb;
+ rtor->tick_cb_arg = tick_cb_arg;
+ rtor->mutex = mutex;
+
+ rtor->cur_blocking_waiters = 0;
+
+ if ((flags & QUIC_REACTOR_FLAG_USE_NOTIFIER) != 0) {
+ if (!ossl_rio_notifier_init(&rtor->notifier))
+ return 0;
+
+ if ((rtor->notifier_cv = ossl_crypto_condvar_new()) == NULL) {
+ ossl_rio_notifier_cleanup(&rtor->notifier);
+ return 0;
+ }
+
+ rtor->have_notifier = 1;
+ } else {
+ rtor->have_notifier = 0;
+ }
+
+ return 1;
+}
+
+void ossl_quic_reactor_cleanup(QUIC_REACTOR *rtor)
+{
+ if (rtor == NULL)
+ return;
+
+ if (rtor->have_notifier) {
+ ossl_rio_notifier_cleanup(&rtor->notifier);
+ rtor->have_notifier = 0;
+
+ ossl_crypto_condvar_free(&rtor->notifier_cv);
+ }
+}
+
+void ossl_quic_reactor_set_poll_r(QUIC_REACTOR *rtor, const BIO_POLL_DESCRIPTOR *r)
+{
+ if (r == NULL)
+ rtor->poll_r.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
+ else
+ rtor->poll_r = *r;
+
+ rtor->can_poll_r
+ = ossl_quic_reactor_can_support_poll_descriptor(rtor, &rtor->poll_r);
+}
+
+void ossl_quic_reactor_set_poll_w(QUIC_REACTOR *rtor, const BIO_POLL_DESCRIPTOR *w)
+{
+ if (w == NULL)
+ rtor->poll_w.type = BIO_POLL_DESCRIPTOR_TYPE_NONE;
+ else
+ rtor->poll_w = *w;
+
+ rtor->can_poll_w
+ = ossl_quic_reactor_can_support_poll_descriptor(rtor, &rtor->poll_w);
+}
+
+const BIO_POLL_DESCRIPTOR *ossl_quic_reactor_get_poll_r(const QUIC_REACTOR *rtor)
+{
+ return &rtor->poll_r;
+}
+
+const BIO_POLL_DESCRIPTOR *ossl_quic_reactor_get_poll_w(const QUIC_REACTOR *rtor)
+{
+ return &rtor->poll_w;
+}
+
+int ossl_quic_reactor_can_support_poll_descriptor(const QUIC_REACTOR *rtor,
+ const BIO_POLL_DESCRIPTOR *d)
+{
+ return d->type == BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD;
+}
+
+int ossl_quic_reactor_can_poll_r(const QUIC_REACTOR *rtor)
+{
+ return rtor->can_poll_r;
+}
+
+int ossl_quic_reactor_can_poll_w(const QUIC_REACTOR *rtor)
+{
+ return rtor->can_poll_w;
+}
+
+int ossl_quic_reactor_net_read_desired(QUIC_REACTOR *rtor)
+{
+ return rtor->net_read_desired;
+}
+
+int ossl_quic_reactor_net_write_desired(QUIC_REACTOR *rtor)
+{
+ return rtor->net_write_desired;
+}
+
+OSSL_TIME ossl_quic_reactor_get_tick_deadline(QUIC_REACTOR *rtor)
+{
+ return rtor->tick_deadline;
+}
+
+int ossl_quic_reactor_tick(QUIC_REACTOR *rtor, uint32_t flags)
+{
+ QUIC_TICK_RESULT res = {0};
+
+ /*
+ * Note that the tick callback cannot fail; this is intentional. Arguably it
+ * does not make that much sense for ticking to 'fail' (in the sense of an
+ * explicit error indicated to the user) because ticking is by its nature
+ * best effort. If something fatal happens with a connection we can report
+ * it on the next actual application I/O call.
+ */
+ rtor->tick_cb(&res, rtor->tick_cb_arg, flags);
+
+ rtor->net_read_desired = res.net_read_desired;
+ rtor->net_write_desired = res.net_write_desired;
+ rtor->tick_deadline = res.tick_deadline;
+ if (res.notify_other_threads)
+ rtor_notify_other_threads(rtor);
+
+ return 1;
+}
+
+RIO_NOTIFIER *ossl_quic_reactor_get0_notifier(QUIC_REACTOR *rtor)
+{
+ return rtor->have_notifier ? &rtor->notifier : NULL;
+}
+
+/*
+ * Blocking I/O Adaptation Layer
+ * =============================
+ */
+
+/*
+ * Utility which can be used to poll on up to two FDs. This is designed to
+ * support use of split FDs (e.g. with SSL_set_rfd and SSL_set_wfd where
+ * different FDs are used for read and write).
+ *
+ * Generally use of poll(2) is preferred where available. Windows, however,
+ * hasn't traditionally offered poll(2), only select(2). WSAPoll() was
+ * introduced in Vista but has seemingly been buggy until relatively recent
+ * versions of Windows 10. Moreover we support XP so this is not a suitable
+ * target anyway. However, the traditional issues with select(2) turn out not to
+ * be an issue on Windows; whereas traditional *NIX select(2) uses a bitmap of
+ * FDs (and thus is limited in the magnitude of the FDs expressible), Windows
+ * select(2) is very different. In Windows, socket handles are not allocated
+ * contiguously from zero and thus this bitmap approach was infeasible. Thus in
+ * adapting the Berkeley sockets API to Windows a different approach was taken
+ * whereby the fd_set contains a fixed length array of socket handles and an
+ * integer indicating how many entries are valid; thus Windows select()
+ * ironically is actually much more like *NIX poll(2) than *NIX select(2). In
+ * any case, this means that the relevant limit for Windows select() is the
+ * number of FDs being polled, not the magnitude of those FDs. Since we only
+ * poll for two FDs here, this limit does not concern us.
+ *
+ * Usage: rfd and wfd may be the same or different. Either or both may also be
+ * -1. If rfd_want_read is 1, rfd is polled for readability, and if
+ * wfd_want_write is 1, wfd is polled for writability. Note that since any
+ * passed FD is always polled for error conditions, setting rfd_want_read=0 and
+ * wfd_want_write=0 is not the same as passing -1 for both FDs.
+ *
+ * deadline is a timestamp to return at. If it is ossl_time_infinite(), the call
+ * never times out.
+ *
+ * Returns 0 on error and 1 on success. Timeout expiry is considered a success
+ * condition. We don't elaborate our return values here because the way we are
+ * actually using this doesn't currently care.
+ *
+ * If mutex is non-NULL, it is assumed to be held for write and is unlocked for
+ * the duration of the call.
+ *
+ * Precondition: mutex is NULL or is held for write (unchecked)
+ * Postcondition: mutex is NULL or is held for write (unless
+ * CRYPTO_THREAD_write_lock fails)
+ */
+static int poll_two_fds(int rfd, int rfd_want_read,
+ int wfd, int wfd_want_write,
+ int notify_rfd,
+ OSSL_TIME deadline,
+ CRYPTO_MUTEX *mutex)
+{
+#if defined(OPENSSL_SYS_WINDOWS) || !defined(POLLIN)
+ fd_set rfd_set, wfd_set, efd_set;
+ OSSL_TIME now, timeout;
+ struct timeval tv, *ptv;
+ int maxfd, pres;
+
+# ifndef OPENSSL_SYS_WINDOWS
+ /*
+ * On Windows there is no relevant limit to the magnitude of a fd value (see
+ * above). On *NIX the fd_set uses a bitmap and we must check the limit.
+ */
+ if (rfd >= FD_SETSIZE || wfd >= FD_SETSIZE)
+ return 0;
+# endif
+
+ FD_ZERO(&rfd_set);
+ FD_ZERO(&wfd_set);
+ FD_ZERO(&efd_set);
+
+ if (rfd != INVALID_SOCKET && rfd_want_read)
+ openssl_fdset(rfd, &rfd_set);
+ if (wfd != INVALID_SOCKET && wfd_want_write)
+ openssl_fdset(wfd, &wfd_set);
+
+ /* Always check for error conditions. */
+ if (rfd != INVALID_SOCKET)
+ openssl_fdset(rfd, &efd_set);
+ if (wfd != INVALID_SOCKET)
+ openssl_fdset(wfd, &efd_set);
+
+ /* Check for notifier FD readability. */
+ if (notify_rfd != INVALID_SOCKET) {
+ openssl_fdset(notify_rfd, &rfd_set);
+ openssl_fdset(notify_rfd, &efd_set);
+ }
+
+ maxfd = rfd;
+ if (wfd > maxfd)
+ maxfd = wfd;
+ if (notify_rfd > maxfd)
+ maxfd = notify_rfd;
+
+ if (!ossl_assert(rfd != INVALID_SOCKET || wfd != INVALID_SOCKET
+ || !ossl_time_is_infinite(deadline)))
+ /* Do not block forever; should not happen. */
+ return 0;
+
+ /*
+ * The mutex dance (unlock/re-locak after poll/seclect) is
+ * potentially problematic. This may create a situation when
+ * two threads arrive to select/poll with the same file
+ * descriptors. We just need to be aware of this.
+ */
+# if defined(OPENSSL_THREADS)
+ if (mutex != NULL)
+ ossl_crypto_mutex_unlock(mutex);
+# endif
+
+ do {
+ /*
+ * select expects a timeout, not a deadline, so do the conversion.
+ * Update for each call to ensure the correct value is used if we repeat
+ * due to EINTR.
+ */
+ if (ossl_time_is_infinite(deadline)) {
+ ptv = NULL;
+ } else {
+ now = ossl_time_now();
+ /*
+ * ossl_time_subtract saturates to zero so we don't need to check if
+ * now > deadline.
+ */
+ timeout = ossl_time_subtract(deadline, now);
+ tv = ossl_time_to_timeval(timeout);
+ ptv = &tv;
+ }
+
+ pres = select(maxfd + 1, &rfd_set, &wfd_set, &efd_set, ptv);
+ } while (pres == -1 && get_last_socket_error_is_eintr());
+
+# if defined(OPENSSL_THREADS)
+ if (mutex != NULL)
+ ossl_crypto_mutex_lock(mutex);
+# endif
+
+ return pres < 0 ? 0 : 1;
+#else
+ int pres, timeout_ms;
+ OSSL_TIME now, timeout;
+ struct pollfd pfds[3] = {0};
+ size_t npfd = 0;
+
+ if (rfd == wfd) {
+ pfds[npfd].fd = rfd;
+ pfds[npfd].events = (rfd_want_read ? POLLIN : 0)
+ | (wfd_want_write ? POLLOUT : 0);
+ if (rfd >= 0 && pfds[npfd].events != 0)
+ ++npfd;
+ } else {
+ pfds[npfd].fd = rfd;
+ pfds[npfd].events = (rfd_want_read ? POLLIN : 0);
+ if (rfd >= 0 && pfds[npfd].events != 0)
+ ++npfd;
+
+ pfds[npfd].fd = wfd;
+ pfds[npfd].events = (wfd_want_write ? POLLOUT : 0);
+ if (wfd >= 0 && pfds[npfd].events != 0)
+ ++npfd;
+ }
+
+ if (notify_rfd >= 0) {
+ pfds[npfd].fd = notify_rfd;
+ pfds[npfd].events = POLLIN;
+ ++npfd;
+ }
+
+ if (!ossl_assert(npfd != 0 || !ossl_time_is_infinite(deadline)))
+ /* Do not block forever; should not happen. */
+ return 0;
+
+# if defined(OPENSSL_THREADS)
+ if (mutex != NULL)
+ ossl_crypto_mutex_unlock(mutex);
+# endif
+
+ do {
+ if (ossl_time_is_infinite(deadline)) {
+ timeout_ms = -1;
+ } else {
+ now = ossl_time_now();
+ timeout = ossl_time_subtract(deadline, now);
+ timeout_ms = ossl_time2ms(timeout);
+ }
+
+ pres = poll(pfds, npfd, timeout_ms);
+ } while (pres == -1 && get_last_socket_error_is_eintr());
+
+# if defined(OPENSSL_THREADS)
+ if (mutex != NULL)
+ ossl_crypto_mutex_lock(mutex);
+# endif
+
+ return pres < 0 ? 0 : 1;
+#endif
+}
+
+static int poll_descriptor_to_fd(const BIO_POLL_DESCRIPTOR *d, int *fd)
+{
+ if (d == NULL || d->type == BIO_POLL_DESCRIPTOR_TYPE_NONE) {
+ *fd = INVALID_SOCKET;
+ return 1;
+ }
+
+ if (d->type != BIO_POLL_DESCRIPTOR_TYPE_SOCK_FD
+ || d->value.fd == INVALID_SOCKET)
+ return 0;
+
+ *fd = d->value.fd;
+ return 1;
+}
+
+/*
+ * Poll up to two abstract poll descriptors, as well as an optional notify FD.
+ * Currently we only support poll descriptors which represent FDs.
+ *
+ * If mutex is non-NULL, it is assumed be a lock currently held for write and is
+ * unlocked for the duration of any wait.
+ *
+ * Precondition: mutex is NULL or is held for write (unchecked)
+ * Postcondition: mutex is NULL or is held for write (unless
+ * CRYPTO_THREAD_write_lock fails)
+ */
+static int poll_two_descriptors(const BIO_POLL_DESCRIPTOR *r, int r_want_read,
+ const BIO_POLL_DESCRIPTOR *w, int w_want_write,
+ int notify_rfd,
+ OSSL_TIME deadline,
+ CRYPTO_MUTEX *mutex)
+{
+ int rfd, wfd;
+
+ if (!poll_descriptor_to_fd(r, &rfd)
+ || !poll_descriptor_to_fd(w, &wfd))
+ return 0;
+
+ return poll_two_fds(rfd, r_want_read, wfd, w_want_write,
+ notify_rfd, deadline, mutex);
+}
+
+/*
+ * Notify other threads currently blocking in
+ * ossl_quic_reactor_block_until_pred() calls that a predicate they are using
+ * might now be met due to state changes.
+ *
+ * This function must be called after state changes which might cause a
+ * predicate in another thread to now be met (i.e., ticking). It is a no-op if
+ * inter-thread notification is not being used.
+ *
+ * The reactor mutex must be held while calling this function.
+ */
+static void rtor_notify_other_threads(QUIC_REACTOR *rtor)
+{
+ if (!rtor->have_notifier)
+ return;
+
+ /*
+ * This function is called when we have done anything on this thread which
+ * might allow a predicate for a block_until_pred call on another thread to
+ * now be met.
+ *
+ * When this happens, we need to wake those threads using the notifier.
+ * However, we do not want to wake *this* thread (if/when it subsequently
+ * enters block_until_pred) due to the notifier FD becoming readable.
+ * Therefore, signal the notifier, and use a CV to detect when all other
+ * threads have woken.
+ */
+
+ if (rtor->cur_blocking_waiters == 0)
+ /* Nothing to do in this case. */
+ return;
+
+ /* Signal the notifier to wake up all threads. */
+ if (!rtor->signalled_notifier) {
+ ossl_rio_notifier_signal(&rtor->notifier);
+ rtor->signalled_notifier = 1;
+ }
+
+ /*
+ * Wait on the CV until all threads have finished the first phase of the
+ * wakeup process and the last thread out has taken responsibility for
+ * unsignalling the notifier.
+ */
+ while (rtor->signalled_notifier)
+ ossl_crypto_condvar_wait(rtor->notifier_cv, rtor->mutex);
+}
+
+/*
+ * Block until a predicate function evaluates to true.
+ *
+ * If mutex is non-NULL, it is assumed be a lock currently held for write and is
+ * unlocked for the duration of any wait.
+ *
+ * Precondition: Must hold channel write lock (unchecked)
+ * Precondition: mutex is NULL or is held for write (unchecked)
+ * Postcondition: mutex is NULL or is held for write (unless
+ * CRYPTO_THREAD_write_lock fails)
+ */
+int ossl_quic_reactor_block_until_pred(QUIC_REACTOR *rtor,
+ int (*pred)(void *arg), void *pred_arg,
+ uint32_t flags)
+{
+ int res, net_read_desired, net_write_desired, notifier_fd;
+ OSSL_TIME tick_deadline;
+
+ notifier_fd
+ = (rtor->have_notifier ? ossl_rio_notifier_as_fd(&rtor->notifier)
+ : INVALID_SOCKET);
+
+ for (;;) {
+ if ((flags & SKIP_FIRST_TICK) != 0)
+ flags &= ~SKIP_FIRST_TICK;
+ else
+ /* best effort */
+ ossl_quic_reactor_tick(rtor, 0);
+
+ if ((res = pred(pred_arg)) != 0)
+ return res;
+
+ net_read_desired = ossl_quic_reactor_net_read_desired(rtor);
+ net_write_desired = ossl_quic_reactor_net_write_desired(rtor);
+ tick_deadline = ossl_quic_reactor_get_tick_deadline(rtor);
+ if (!net_read_desired && !net_write_desired
+ && ossl_time_is_infinite(tick_deadline))
+ /* Can't wait if there is nothing to wait for. */
+ return 0;
+
+ ossl_quic_reactor_enter_blocking_section(rtor);
+
+ res = poll_two_descriptors(ossl_quic_reactor_get_poll_r(rtor),
+ net_read_desired,
+ ossl_quic_reactor_get_poll_w(rtor),
+ net_write_desired,
+ notifier_fd,
+ tick_deadline,
+ rtor->mutex);
+
+ /*
+ * We have now exited the OS poller call. We may have
+ * (rtor->signalled_notifier), and other threads may still be blocking.
+ * This means that cur_blocking_waiters may still be non-zero. As such,
+ * we cannot unsignal the notifier until all threads have had an
+ * opportunity to wake up.
+ *
+ * At the same time, we cannot unsignal in the case where
+ * cur_blocking_waiters is now zero because this condition may not occur
+ * reliably. Consider the following scenario:
+ *
+ * T1 enters block_until_pred, cur_blocking_waiters -> 1
+ * T2 enters block_until_pred, cur_blocking_waiters -> 2
+ * T3 enters block_until_pred, cur_blocking_waiters -> 3
+ *
+ * T4 enters block_until_pred, does not block, ticks,
+ * sees that cur_blocking_waiters > 0 and signals the notifier
+ *
+ * T3 wakes, cur_blocking_waiters -> 2
+ * T3 predicate is not satisfied, cur_blocking_waiters -> 3, block again
+ *
+ * Notifier is still signalled, so T3 immediately wakes again
+ * and is stuck repeating the above steps.
+ *
+ * T1, T2 are also woken by the notifier but never see
+ * cur_blocking_waiters drop to 0, so never unsignal the notifier.
+ *
+ * As such, a two phase approach is chosen when designalling the
+ * notifier:
+ *
+ * First, all of the poll_two_descriptor calls on all threads are
+ * allowed to exit due to the notifier being signalled.
+ *
+ * Second, the thread which happened to be the one which decremented
+ * cur_blocking_waiters to 0 unsignals the notifier and is then
+ * responsible for broadcasting to a CV to indicate to the other
+ * threads that the synchronised wakeup has been completed. Other
+ * threads wait for this CV to be signalled.
+ *
+ */
+ ossl_quic_reactor_leave_blocking_section(rtor);
+
+ if (!res)
+ /*
+ * We don't actually care why the call succeeded (timeout, FD
+ * readiness), we just call reactor_tick and start trying to do I/O
+ * things again. If poll_two_fds returns 0, this is some other
+ * non-timeout failure and we should stop here.
+ *
+ * TODO(QUIC FUTURE): In the future we could avoid unnecessary
+ * syscalls by not retrying network I/O that isn't ready based
+ * on the result of the poll call. However this might be difficult
+ * because it requires we do the call to poll(2) or equivalent
+ * syscall ourselves, whereas in the general case the application
+ * does the polling and just calls SSL_handle_events().
+ * Implementing this optimisation in the future will probably
+ * therefore require API changes.
+ */
+ return 0;
+ }
+
+ return res;
+}
+
+void ossl_quic_reactor_enter_blocking_section(QUIC_REACTOR *rtor)
+{
+ ++rtor->cur_blocking_waiters;
+}
+
+void ossl_quic_reactor_leave_blocking_section(QUIC_REACTOR *rtor)
+{
+ assert(rtor->cur_blocking_waiters > 0);
+ --rtor->cur_blocking_waiters;
+
+ if (rtor->have_notifier && rtor->signalled_notifier) {
+ if (rtor->cur_blocking_waiters == 0) {
+ ossl_rio_notifier_unsignal(&rtor->notifier);
+ rtor->signalled_notifier = 0;
+
+ /*
+ * Release the other threads which have woken up (and possibly
+ * rtor_notify_other_threads as well).
+ */
+ ossl_crypto_condvar_broadcast(rtor->notifier_cv);
+ } else {
+ /* We are not the last waiter out - so wait for that one. */
+ while (rtor->signalled_notifier)
+ ossl_crypto_condvar_wait(rtor->notifier_cv, rtor->mutex);
+ }
+ }
+}
diff --git a/crypto/openssl/ssl/quic/quic_reactor_wait_ctx.c b/crypto/openssl/ssl/quic/quic_reactor_wait_ctx.c
new file mode 100644
index 000000000000..00efac715603
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_reactor_wait_ctx.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+#include "internal/quic_reactor_wait_ctx.h"
+#include "internal/common.h"
+#include "internal/thread_arch.h"
+#include <assert.h>
+
+struct quic_reactor_wait_slot_st {
+ OSSL_LIST_MEMBER(quic_reactor_wait_slot, QUIC_REACTOR_WAIT_SLOT);
+ QUIC_REACTOR *rtor; /* primary key */
+ size_t blocking_count; /* datum */
+};
+
+DEFINE_LIST_OF_IMPL(quic_reactor_wait_slot, QUIC_REACTOR_WAIT_SLOT);
+
+void ossl_quic_reactor_wait_ctx_init(QUIC_REACTOR_WAIT_CTX *ctx)
+{
+ ossl_list_quic_reactor_wait_slot_init(&ctx->slots);
+}
+
+static void slot_activate(QUIC_REACTOR_WAIT_SLOT *slot)
+{
+ if (++slot->blocking_count == 1)
+ ossl_quic_reactor_enter_blocking_section(slot->rtor);
+}
+
+static void slot_deactivate(QUIC_REACTOR_WAIT_SLOT *slot)
+{
+ assert(slot->blocking_count > 0);
+
+ if (--slot->blocking_count > 0)
+ return;
+
+ ossl_quic_reactor_leave_blocking_section(slot->rtor);
+}
+
+int ossl_quic_reactor_wait_ctx_enter(QUIC_REACTOR_WAIT_CTX *ctx,
+ QUIC_REACTOR *rtor)
+{
+ QUIC_REACTOR_WAIT_SLOT *slot;
+
+ OSSL_LIST_FOREACH(slot, quic_reactor_wait_slot, &ctx->slots)
+ if (slot->rtor == rtor)
+ break;
+
+ if (slot == NULL) {
+ if ((slot = OPENSSL_zalloc(sizeof(QUIC_REACTOR_WAIT_SLOT))) == NULL)
+ return 0;
+
+ slot->rtor = rtor;
+ ossl_list_quic_reactor_wait_slot_insert_tail(&ctx->slots, slot);
+ }
+
+ slot_activate(slot);
+ return 1;
+}
+
+void ossl_quic_reactor_wait_ctx_leave(QUIC_REACTOR_WAIT_CTX *ctx,
+ QUIC_REACTOR *rtor)
+{
+ QUIC_REACTOR_WAIT_SLOT *slot;
+
+ OSSL_LIST_FOREACH(slot, quic_reactor_wait_slot, &ctx->slots)
+ if (slot->rtor == rtor)
+ break;
+
+ assert(slot != NULL);
+ slot_deactivate(slot);
+}
+
+void ossl_quic_reactor_wait_ctx_cleanup(QUIC_REACTOR_WAIT_CTX *ctx)
+{
+ QUIC_REACTOR_WAIT_SLOT *slot, *nslot;
+
+ OSSL_LIST_FOREACH_DELSAFE(slot, nslot, quic_reactor_wait_slot, &ctx->slots) {
+ assert(slot->blocking_count == 0);
+ OPENSSL_free(slot);
+ }
+}
diff --git a/crypto/openssl/ssl/quic/quic_record_rx.c b/crypto/openssl/ssl/quic/quic_record_rx.c
new file mode 100644
index 000000000000..1a8194b396d7
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_record_rx.c
@@ -0,0 +1,1603 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/ssl.h>
+#include "internal/quic_record_rx.h"
+#include "quic_record_shared.h"
+#include "internal/common.h"
+#include "internal/list.h"
+#include "../ssl_local.h"
+
+/*
+ * Mark a packet in a bitfield.
+ *
+ * pkt_idx: index of packet within datagram.
+ */
+static ossl_inline void pkt_mark(uint64_t *bitf, size_t pkt_idx)
+{
+ assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
+ *bitf |= ((uint64_t)1) << pkt_idx;
+}
+
+/* Returns 1 if a packet is in the bitfield. */
+static ossl_inline int pkt_is_marked(const uint64_t *bitf, size_t pkt_idx)
+{
+ assert(pkt_idx < QUIC_MAX_PKT_PER_URXE);
+ return (*bitf & (((uint64_t)1) << pkt_idx)) != 0;
+}
+
+/*
+ * RXE
+ * ===
+ *
+ * RX Entries (RXEs) store processed (i.e., decrypted) data received from the
+ * network. One RXE is used per received QUIC packet.
+ */
+typedef struct rxe_st RXE;
+
+struct rxe_st {
+ OSSL_QRX_PKT pkt;
+ OSSL_LIST_MEMBER(rxe, RXE);
+ size_t data_len, alloc_len, refcount;
+
+ /* Extra fields for per-packet information. */
+ QUIC_PKT_HDR hdr; /* data/len are decrypted payload */
+
+ /* Decoded packet number. */
+ QUIC_PN pn;
+
+ /* Addresses copied from URXE. */
+ BIO_ADDR peer, local;
+
+ /* Time we received the packet (not when we processed it). */
+ OSSL_TIME time;
+
+ /* Total length of the datagram which contained this packet. */
+ size_t datagram_len;
+
+ /*
+ * The key epoch the packet was received with. Always 0 for non-1-RTT
+ * packets.
+ */
+ uint64_t key_epoch;
+
+ /*
+ * Monotonically increases with each datagram received.
+ * For diagnostic use only.
+ */
+ uint64_t datagram_id;
+
+ /*
+ * alloc_len allocated bytes (of which data_len bytes are valid) follow this
+ * structure.
+ */
+};
+
+DEFINE_LIST_OF(rxe, RXE);
+typedef OSSL_LIST(rxe) RXE_LIST;
+
+static ossl_inline unsigned char *rxe_data(const RXE *e)
+{
+ return (unsigned char *)(e + 1);
+}
+
+/*
+ * QRL
+ * ===
+ */
+struct ossl_qrx_st {
+ OSSL_LIB_CTX *libctx;
+ const char *propq;
+
+ /* Demux to receive datagrams from. */
+ QUIC_DEMUX *demux;
+
+ /* Length of connection IDs used in short-header packets in bytes. */
+ size_t short_conn_id_len;
+
+ /* Maximum number of deferred datagrams buffered at any one time. */
+ size_t max_deferred;
+
+ /* Current count of deferred datagrams. */
+ size_t num_deferred;
+
+ /*
+ * List of URXEs which are filled with received encrypted data.
+ * These are returned to the DEMUX's free list as they are processed.
+ */
+ QUIC_URXE_LIST urx_pending;
+
+ /*
+ * List of URXEs which we could not decrypt immediately and which are being
+ * kept in case they can be decrypted later.
+ */
+ QUIC_URXE_LIST urx_deferred;
+
+ /*
+ * List of RXEs which are not currently in use. These are moved
+ * to the pending list as they are filled.
+ */
+ RXE_LIST rx_free;
+
+ /*
+ * List of RXEs which are filled with decrypted packets ready to be passed
+ * to the user. A RXE is removed from all lists inside the QRL when passed
+ * to the user, then returned to the free list when the user returns it.
+ */
+ RXE_LIST rx_pending;
+
+ /* Largest PN we have received and processed in a given PN space. */
+ QUIC_PN largest_pn[QUIC_PN_SPACE_NUM];
+
+ /* Per encryption-level state. */
+ OSSL_QRL_ENC_LEVEL_SET el_set;
+
+ /* Bytes we have received since this counter was last cleared. */
+ uint64_t bytes_received;
+
+ /*
+ * Number of forged packets we have received since the QRX was instantiated.
+ * Note that as per RFC 9001, this is connection-level state; it is not per
+ * EL and is not reset by a key update.
+ */
+ uint64_t forged_pkt_count;
+
+ /*
+ * The PN the current key epoch started at, inclusive.
+ */
+ uint64_t cur_epoch_start_pn;
+
+ /* Validation callback. */
+ ossl_qrx_late_validation_cb *validation_cb;
+ void *validation_cb_arg;
+
+ /* Key update callback. */
+ ossl_qrx_key_update_cb *key_update_cb;
+ void *key_update_cb_arg;
+
+ /* Initial key phase. For debugging use only; always 0 in real use. */
+ unsigned char init_key_phase_bit;
+
+ /* Are we allowed to process 1-RTT packets yet? */
+ unsigned char allow_1rtt;
+
+ /* Message callback related arguments */
+ ossl_msg_cb msg_callback;
+ void *msg_callback_arg;
+ SSL *msg_callback_ssl;
+};
+
+static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len);
+static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
+ const QUIC_CONN_ID *first_dcid);
+static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
+ const unsigned char **pptr, size_t buf_len);
+static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe);
+static RXE *qrx_reserve_rxe(RXE_LIST *rxl, RXE *rxe, size_t n);
+static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
+ const unsigned char *src,
+ size_t src_len, size_t *dec_len,
+ const unsigned char *aad, size_t aad_len,
+ QUIC_PN pn, uint32_t enc_level,
+ unsigned char key_phase_bit,
+ uint64_t *rx_key_epoch);
+static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe);
+static uint32_t rxe_determine_pn_space(RXE *rxe);
+static void ignore_res(int x);
+
+OSSL_QRX *ossl_qrx_new(const OSSL_QRX_ARGS *args)
+{
+ OSSL_QRX *qrx;
+ size_t i;
+
+ if (args->demux == NULL || args->max_deferred == 0)
+ return NULL;
+
+ qrx = OPENSSL_zalloc(sizeof(OSSL_QRX));
+ if (qrx == NULL)
+ return NULL;
+
+ for (i = 0; i < OSSL_NELEM(qrx->largest_pn); ++i)
+ qrx->largest_pn[i] = args->init_largest_pn[i];
+
+ qrx->libctx = args->libctx;
+ qrx->propq = args->propq;
+ qrx->demux = args->demux;
+ qrx->short_conn_id_len = args->short_conn_id_len;
+ qrx->init_key_phase_bit = args->init_key_phase_bit;
+ qrx->max_deferred = args->max_deferred;
+ return qrx;
+}
+
+static void qrx_cleanup_rxl(RXE_LIST *l)
+{
+ RXE *e, *enext;
+
+ for (e = ossl_list_rxe_head(l); e != NULL; e = enext) {
+ enext = ossl_list_rxe_next(e);
+ ossl_list_rxe_remove(l, e);
+ OPENSSL_free(e);
+ }
+}
+
+static void qrx_cleanup_urxl(OSSL_QRX *qrx, QUIC_URXE_LIST *l)
+{
+ QUIC_URXE *e, *enext;
+
+ for (e = ossl_list_urxe_head(l); e != NULL; e = enext) {
+ enext = ossl_list_urxe_next(e);
+ ossl_list_urxe_remove(l, e);
+ ossl_quic_demux_release_urxe(qrx->demux, e);
+ }
+}
+
+void ossl_qrx_update_pn_space(OSSL_QRX *src, OSSL_QRX *dst)
+{
+ size_t i;
+
+ for (i = 0; i < QUIC_PN_SPACE_NUM; i++)
+ dst->largest_pn[i] = src->largest_pn[i];
+
+ return;
+}
+
+void ossl_qrx_free(OSSL_QRX *qrx)
+{
+ uint32_t i;
+
+ if (qrx == NULL)
+ return;
+
+ /* Free RXE queue data. */
+ qrx_cleanup_rxl(&qrx->rx_free);
+ qrx_cleanup_rxl(&qrx->rx_pending);
+ qrx_cleanup_urxl(qrx, &qrx->urx_pending);
+ qrx_cleanup_urxl(qrx, &qrx->urx_deferred);
+
+ /* Drop keying material and crypto resources. */
+ for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
+ ossl_qrl_enc_level_set_discard(&qrx->el_set, i);
+
+ OPENSSL_free(qrx);
+}
+
+void ossl_qrx_inject_urxe(OSSL_QRX *qrx, QUIC_URXE *urxe)
+{
+ /* Initialize our own fields inside the URXE and add to the pending list. */
+ urxe->processed = 0;
+ urxe->hpr_removed = 0;
+ urxe->deferred = 0;
+ ossl_list_urxe_insert_tail(&qrx->urx_pending, urxe);
+
+ if (qrx->msg_callback != NULL)
+ qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM, urxe + 1,
+ urxe->data_len, qrx->msg_callback_ssl,
+ qrx->msg_callback_arg);
+}
+
+void ossl_qrx_inject_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT *pkt)
+{
+ RXE *rxe = (RXE *)pkt;
+
+ /*
+ * port_default_packet_handler() uses ossl_qrx_read_pkt()
+ * to get pkt. Such packet has refcount 1.
+ */
+ ossl_qrx_pkt_orphan(pkt);
+ if (ossl_assert(rxe->refcount == 0))
+ ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
+}
+
+/*
+ * qrx_validate_initial_pkt() is derived from qrx_process_pkt(). Unlike
+ * qrx_process_pkt() the qrx_validate_initial_pkt() function can process
+ * initial packet only. All other packets should be discarded. This allows
+ * port_default_packet_handler() to validate incoming packet. If packet
+ * is not valid, then port_default_packet_handler() must discard the
+ * packet instead of creating a new channel for it.
+ */
+static int qrx_validate_initial_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
+ const QUIC_CONN_ID *first_dcid,
+ size_t datagram_len)
+{
+ PACKET pkt, orig_pkt;
+ RXE *rxe;
+ size_t i = 0, aad_len = 0, dec_len = 0;
+ const unsigned char *sop;
+ unsigned char *dst;
+ QUIC_PKT_HDR_PTRS ptrs;
+ uint32_t pn_space;
+ OSSL_QRL_ENC_LEVEL *el = NULL;
+ uint64_t rx_key_epoch = UINT64_MAX;
+
+ if (!PACKET_buf_init(&pkt, ossl_quic_urxe_data(urxe), urxe->data_len))
+ return 0;
+
+ orig_pkt = pkt;
+ sop = PACKET_data(&pkt);
+
+ /*
+ * Get a free RXE. If we need to allocate a new one, use the packet length
+ * as a good ballpark figure.
+ */
+ rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(&pkt));
+ if (rxe == NULL)
+ return 0;
+
+ /*
+ * we expect INITIAL packet only, therefore it is OK to pass
+ * short_conn_id_len as 0.
+ */
+ if (!ossl_quic_wire_decode_pkt_hdr(&pkt,
+ 0, /* short_conn_id_len */
+ 1, /* need second decode */
+ 0, /* nodata -> want to read data */
+ &rxe->hdr, &ptrs,
+ NULL))
+ goto malformed;
+
+ if (rxe->hdr.type != QUIC_PKT_TYPE_INITIAL)
+ goto malformed;
+
+ if (!qrx_validate_hdr_early(qrx, rxe, NULL))
+ goto malformed;
+
+ if (ossl_qrl_enc_level_set_have_el(&qrx->el_set, QUIC_ENC_LEVEL_INITIAL) != 1)
+ goto malformed;
+
+ if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
+ const unsigned char *token = rxe->hdr.token;
+
+ /*
+ * This may change the value of rxe and change the value of the token
+ * pointer as well. So we must make a temporary copy of the pointer to
+ * the token, and then copy it back into the new location of the rxe
+ */
+ if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
+ goto malformed;
+
+ rxe->hdr.token = token;
+ }
+
+ pkt = orig_pkt;
+
+ el = ossl_qrl_enc_level_set_get(&qrx->el_set, QUIC_ENC_LEVEL_INITIAL, 1);
+ assert(el != NULL); /* Already checked above */
+
+ if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
+ goto malformed;
+
+ /*
+ * We have removed header protection, so don't attempt to do it again if
+ * the packet gets deferred and processed again.
+ */
+ pkt_mark(&urxe->hpr_removed, 0);
+
+ /* Decode the now unprotected header. */
+ if (ossl_quic_wire_decode_pkt_hdr(&pkt, 0,
+ 0, 0, &rxe->hdr, NULL, NULL) != 1)
+ goto malformed;
+
+ /* Validate header and decode PN. */
+ if (!qrx_validate_hdr(qrx, rxe))
+ goto malformed;
+
+ /*
+ * The AAD data is the entire (unprotected) packet header including the PN.
+ * The packet header has been unprotected in place, so we can just reuse the
+ * PACKET buffer. The header ends where the payload begins.
+ */
+ aad_len = rxe->hdr.data - sop;
+
+ /* Ensure the RXE buffer size is adequate for our payload. */
+ if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL)
+ goto malformed;
+
+ /*
+ * We decrypt the packet body to immediately after the token at the start of
+ * the RXE buffer (where present).
+ *
+ * Do the decryption from the PACKET (which points into URXE memory) to our
+ * RXE payload (single-copy decryption), then fixup the pointers in the
+ * header to point to our new buffer.
+ *
+ * If decryption fails this is considered a permanent error; we defer
+ * packets we don't yet have decryption keys for above, so if this fails,
+ * something has gone wrong with the handshake process or a packet has been
+ * corrupted.
+ */
+ dst = (unsigned char *)rxe_data(rxe) + i;
+ if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
+ &dec_len, sop, aad_len, rxe->pn, QUIC_ENC_LEVEL_INITIAL,
+ rxe->hdr.key_phase, &rx_key_epoch))
+ goto malformed;
+
+ /*
+ * -----------------------------------------------------
+ * IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
+ * AND MUST BE TIMING-CHANNEL SAFE.
+ * -----------------------------------------------------
+ *
+ * At this point, we have successfully authenticated the AEAD tag and no
+ * longer need to worry about exposing the PN, PN length or Key Phase bit in
+ * timing channels. Invoke any configured validation callback to allow for
+ * rejection of duplicate PNs.
+ */
+ if (!qrx_validate_hdr_late(qrx, rxe))
+ goto malformed;
+
+ pkt_mark(&urxe->processed, 0);
+
+ /*
+ * Update header to point to the decrypted buffer, which may be shorter
+ * due to AEAD tags, block padding, etc.
+ */
+ rxe->hdr.data = dst;
+ rxe->hdr.len = dec_len;
+ rxe->data_len = dec_len;
+ rxe->datagram_len = datagram_len;
+ rxe->key_epoch = rx_key_epoch;
+
+ /* We processed the PN successfully, so update largest processed PN. */
+ pn_space = rxe_determine_pn_space(rxe);
+ if (rxe->pn > qrx->largest_pn[pn_space])
+ qrx->largest_pn[pn_space] = rxe->pn;
+
+ /* Copy across network addresses and RX time from URXE to RXE. */
+ rxe->peer = urxe->peer;
+ rxe->local = urxe->local;
+ rxe->time = urxe->time;
+ rxe->datagram_id = urxe->datagram_id;
+
+ /*
+ * The packet is decrypted, we are going to move it from
+ * rx_pending queue where it waits to be further processed
+ * by ch_rx().
+ */
+ ossl_list_rxe_remove(&qrx->rx_free, rxe);
+ ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
+
+ return 1;
+
+malformed:
+ /* caller (port_default_packet_handler()) should discard urxe */
+ return 0;
+}
+
+int ossl_qrx_validate_initial_packet(OSSL_QRX *qrx, QUIC_URXE *urxe,
+ const QUIC_CONN_ID *dcid)
+{
+ urxe->processed = 0;
+ urxe->hpr_removed = 0;
+ urxe->deferred = 0;
+
+ return qrx_validate_initial_pkt(qrx, urxe, dcid, urxe->data_len);
+}
+
+static void qrx_requeue_deferred(OSSL_QRX *qrx)
+{
+ QUIC_URXE *e;
+
+ while ((e = ossl_list_urxe_head(&qrx->urx_deferred)) != NULL) {
+ ossl_list_urxe_remove(&qrx->urx_deferred, e);
+ ossl_list_urxe_insert_tail(&qrx->urx_pending, e);
+ }
+}
+
+int ossl_qrx_provide_secret(OSSL_QRX *qrx, uint32_t enc_level,
+ uint32_t suite_id, EVP_MD *md,
+ const unsigned char *secret, size_t secret_len)
+{
+ if (enc_level >= QUIC_ENC_LEVEL_NUM)
+ return 0;
+
+ if (!ossl_qrl_enc_level_set_provide_secret(&qrx->el_set,
+ qrx->libctx,
+ qrx->propq,
+ enc_level,
+ suite_id,
+ md,
+ secret,
+ secret_len,
+ qrx->init_key_phase_bit,
+ /*is_tx=*/0))
+ return 0;
+
+ /*
+ * Any packets we previously could not decrypt, we may now be able to
+ * decrypt, so move any datagrams containing deferred packets from the
+ * deferred to the pending queue.
+ */
+ qrx_requeue_deferred(qrx);
+ return 1;
+}
+
+int ossl_qrx_discard_enc_level(OSSL_QRX *qrx, uint32_t enc_level)
+{
+ if (enc_level >= QUIC_ENC_LEVEL_NUM)
+ return 0;
+
+ ossl_qrl_enc_level_set_discard(&qrx->el_set, enc_level);
+ return 1;
+}
+
+/* Returns 1 if there are one or more pending RXEs. */
+int ossl_qrx_processed_read_pending(OSSL_QRX *qrx)
+{
+ return !ossl_list_rxe_is_empty(&qrx->rx_pending);
+}
+
+/* Returns 1 if there are yet-unprocessed packets. */
+int ossl_qrx_unprocessed_read_pending(OSSL_QRX *qrx)
+{
+ return !ossl_list_urxe_is_empty(&qrx->urx_pending)
+ || !ossl_list_urxe_is_empty(&qrx->urx_deferred);
+}
+
+/* Pop the next pending RXE. Returns NULL if no RXE is pending. */
+static RXE *qrx_pop_pending_rxe(OSSL_QRX *qrx)
+{
+ RXE *rxe = ossl_list_rxe_head(&qrx->rx_pending);
+
+ if (rxe == NULL)
+ return NULL;
+
+ ossl_list_rxe_remove(&qrx->rx_pending, rxe);
+ return rxe;
+}
+
+/* Allocate a new RXE. */
+static RXE *qrx_alloc_rxe(size_t alloc_len)
+{
+ RXE *rxe;
+
+ if (alloc_len >= SIZE_MAX - sizeof(RXE))
+ return NULL;
+
+ rxe = OPENSSL_malloc(sizeof(RXE) + alloc_len);
+ if (rxe == NULL)
+ return NULL;
+
+ ossl_list_rxe_init_elem(rxe);
+ rxe->alloc_len = alloc_len;
+ rxe->data_len = 0;
+ rxe->refcount = 0;
+ return rxe;
+}
+
+/*
+ * Ensures there is at least one RXE in the RX free list, allocating a new entry
+ * if necessary. The returned RXE is in the RX free list; it is not popped.
+ *
+ * alloc_len is a hint which may be used to determine the RXE size if allocation
+ * is necessary. Returns NULL on allocation failure.
+ */
+static RXE *qrx_ensure_free_rxe(OSSL_QRX *qrx, size_t alloc_len)
+{
+ RXE *rxe;
+
+ if (ossl_list_rxe_head(&qrx->rx_free) != NULL)
+ return ossl_list_rxe_head(&qrx->rx_free);
+
+ rxe = qrx_alloc_rxe(alloc_len);
+ if (rxe == NULL)
+ return NULL;
+
+ ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
+ return rxe;
+}
+
+/*
+ * Resize the data buffer attached to an RXE to be n bytes in size. The address
+ * of the RXE might change; the new address is returned, or NULL on failure, in
+ * which case the original RXE remains valid.
+ */
+static RXE *qrx_resize_rxe(RXE_LIST *rxl, RXE *rxe, size_t n)
+{
+ RXE *rxe2, *p;
+
+ /* Should never happen. */
+ if (rxe == NULL)
+ return NULL;
+
+ if (n >= SIZE_MAX - sizeof(RXE))
+ return NULL;
+
+ /* Remove the item from the list to avoid accessing freed memory */
+ p = ossl_list_rxe_prev(rxe);
+ ossl_list_rxe_remove(rxl, rxe);
+
+ /* Should never resize an RXE which has been handed out. */
+ if (!ossl_assert(rxe->refcount == 0))
+ return NULL;
+
+ /*
+ * NOTE: We do not clear old memory, although it does contain decrypted
+ * data.
+ */
+ rxe2 = OPENSSL_realloc(rxe, sizeof(RXE) + n);
+ if (rxe2 == NULL) {
+ /* Resize failed, restore old allocation. */
+ if (p == NULL)
+ ossl_list_rxe_insert_head(rxl, rxe);
+ else
+ ossl_list_rxe_insert_after(rxl, p, rxe);
+ return NULL;
+ }
+
+ if (p == NULL)
+ ossl_list_rxe_insert_head(rxl, rxe2);
+ else
+ ossl_list_rxe_insert_after(rxl, p, rxe2);
+
+ rxe2->alloc_len = n;
+ return rxe2;
+}
+
+/*
+ * Ensure the data buffer attached to an RXE is at least n bytes in size.
+ * Returns NULL on failure.
+ */
+static RXE *qrx_reserve_rxe(RXE_LIST *rxl,
+ RXE *rxe, size_t n)
+{
+ if (rxe->alloc_len >= n)
+ return rxe;
+
+ return qrx_resize_rxe(rxl, rxe, n);
+}
+
+/* Return a RXE handed out to the user back to our freelist. */
+static void qrx_recycle_rxe(OSSL_QRX *qrx, RXE *rxe)
+{
+ /* RXE should not be in any list */
+ assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
+ rxe->pkt.hdr = NULL;
+ rxe->pkt.peer = NULL;
+ rxe->pkt.local = NULL;
+ ossl_list_rxe_insert_tail(&qrx->rx_free, rxe);
+}
+
+/*
+ * Given a pointer to a pointer pointing to a buffer and the size of that
+ * buffer, copy the buffer into *prxe, expanding the RXE if necessary (its
+ * pointer may change due to realloc). *pi is the offset in bytes to copy the
+ * buffer to, and on success is updated to be the offset pointing after the
+ * copied buffer. *pptr is updated to point to the new location of the buffer.
+ */
+static int qrx_relocate_buffer(OSSL_QRX *qrx, RXE **prxe, size_t *pi,
+ const unsigned char **pptr, size_t buf_len)
+{
+ RXE *rxe;
+ unsigned char *dst;
+
+ if (!buf_len)
+ return 1;
+
+ if ((rxe = qrx_reserve_rxe(&qrx->rx_free, *prxe, *pi + buf_len)) == NULL)
+ return 0;
+
+ *prxe = rxe;
+ dst = (unsigned char *)rxe_data(rxe) + *pi;
+
+ memcpy(dst, *pptr, buf_len);
+ *pi += buf_len;
+ *pptr = dst;
+ return 1;
+}
+
+static uint32_t qrx_determine_enc_level(const QUIC_PKT_HDR *hdr)
+{
+ switch (hdr->type) {
+ case QUIC_PKT_TYPE_INITIAL:
+ return QUIC_ENC_LEVEL_INITIAL;
+ case QUIC_PKT_TYPE_HANDSHAKE:
+ return QUIC_ENC_LEVEL_HANDSHAKE;
+ case QUIC_PKT_TYPE_0RTT:
+ return QUIC_ENC_LEVEL_0RTT;
+ case QUIC_PKT_TYPE_1RTT:
+ return QUIC_ENC_LEVEL_1RTT;
+
+ default:
+ assert(0);
+ case QUIC_PKT_TYPE_RETRY:
+ case QUIC_PKT_TYPE_VERSION_NEG:
+ return QUIC_ENC_LEVEL_INITIAL; /* not used */
+ }
+}
+
+static uint32_t rxe_determine_pn_space(RXE *rxe)
+{
+ uint32_t enc_level;
+
+ enc_level = qrx_determine_enc_level(&rxe->hdr);
+ return ossl_quic_enc_level_to_pn_space(enc_level);
+}
+
+static int qrx_validate_hdr_early(OSSL_QRX *qrx, RXE *rxe,
+ const QUIC_CONN_ID *first_dcid)
+{
+ /* Ensure version is what we want. */
+ if (rxe->hdr.version != QUIC_VERSION_1
+ && rxe->hdr.version != QUIC_VERSION_NONE)
+ return 0;
+
+ /* Clients should never receive 0-RTT packets. */
+ if (rxe->hdr.type == QUIC_PKT_TYPE_0RTT)
+ return 0;
+
+ /* Version negotiation and retry packets must be the first packet. */
+ if (first_dcid != NULL && !ossl_quic_pkt_type_can_share_dgram(rxe->hdr.type))
+ return 0;
+
+ /*
+ * If this is not the first packet in a datagram, the destination connection
+ * ID must match the one in that packet.
+ */
+ if (first_dcid != NULL) {
+ if (!ossl_assert(first_dcid->id_len < QUIC_MAX_CONN_ID_LEN)
+ || !ossl_quic_conn_id_eq(first_dcid,
+ &rxe->hdr.dst_conn_id))
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Validate header and decode PN. */
+static int qrx_validate_hdr(OSSL_QRX *qrx, RXE *rxe)
+{
+ int pn_space = rxe_determine_pn_space(rxe);
+
+ if (!ossl_quic_wire_decode_pkt_hdr_pn(rxe->hdr.pn, rxe->hdr.pn_len,
+ qrx->largest_pn[pn_space],
+ &rxe->pn))
+ return 0;
+
+ return 1;
+}
+
+/* Late packet header validation. */
+static int qrx_validate_hdr_late(OSSL_QRX *qrx, RXE *rxe)
+{
+ int pn_space = rxe_determine_pn_space(rxe);
+
+ /*
+ * Allow our user to decide whether to discard the packet before we try and
+ * decrypt it.
+ */
+ if (qrx->validation_cb != NULL
+ && !qrx->validation_cb(rxe->pn, pn_space, qrx->validation_cb_arg))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Retrieves the correct cipher context for an EL and key phase. Writes the key
+ * epoch number actually used for packet decryption to *rx_key_epoch.
+ */
+static size_t qrx_get_cipher_ctx_idx(OSSL_QRX *qrx, OSSL_QRL_ENC_LEVEL *el,
+ uint32_t enc_level,
+ unsigned char key_phase_bit,
+ uint64_t *rx_key_epoch,
+ int *is_old_key)
+{
+ size_t idx;
+
+ *is_old_key = 0;
+
+ if (enc_level != QUIC_ENC_LEVEL_1RTT) {
+ *rx_key_epoch = 0;
+ return 0;
+ }
+
+ if (!ossl_assert(key_phase_bit <= 1))
+ return SIZE_MAX;
+
+ /*
+ * RFC 9001 requires that we not create timing channels which could reveal
+ * the decrypted value of the Key Phase bit. We usually handle this by
+ * keeping the cipher contexts for both the current and next key epochs
+ * around, so that we just select a cipher context blindly using the key
+ * phase bit, which is time-invariant.
+ *
+ * In the COOLDOWN state, we only have one keyslot/cipher context. RFC 9001
+ * suggests an implementation strategy to avoid creating a timing channel in
+ * this case:
+ *
+ * Endpoints can use randomized packet protection keys in place of
+ * discarded keys when key updates are not yet permitted.
+ *
+ * Rather than use a randomised key, we simply use our existing key as it
+ * will fail AEAD verification anyway. This avoids the need to keep around a
+ * dedicated garbage key.
+ *
+ * Note: Accessing different cipher contexts is technically not
+ * timing-channel safe due to microarchitectural side channels, but this is
+ * the best we can reasonably do and appears to be directly suggested by the
+ * RFC.
+ */
+ idx = (el->state == QRL_EL_STATE_PROV_COOLDOWN ? el->key_epoch & 1
+ : key_phase_bit);
+
+ /*
+ * We also need to determine the key epoch number which this index
+ * corresponds to. This is so we can report the key epoch number in the
+ * OSSL_QRX_PKT structure, which callers need to validate whether it was OK
+ * for a packet to be sent using a given key epoch's keys.
+ */
+ switch (el->state) {
+ case QRL_EL_STATE_PROV_NORMAL:
+ /*
+ * If we are in the NORMAL state, usually the KP bit will match the LSB
+ * of our key epoch, meaning no new key update is being signalled. If it
+ * does not match, this means the packet (purports to) belong to
+ * the next key epoch.
+ *
+ * IMPORTANT: The AEAD tag has not been verified yet when this function
+ * is called, so this code must be timing-channel safe, hence use of
+ * XOR. Moreover, the value output below is not yet authenticated.
+ */
+ *rx_key_epoch
+ = el->key_epoch + ((el->key_epoch & 1) ^ (uint64_t)key_phase_bit);
+ break;
+
+ case QRL_EL_STATE_PROV_UPDATING:
+ /*
+ * If we are in the UPDATING state, usually the KP bit will match the
+ * LSB of our key epoch. If it does not match, this means that the
+ * packet (purports to) belong to the previous key epoch.
+ *
+ * As above, must be timing-channel safe.
+ */
+ *is_old_key = (el->key_epoch & 1) ^ (uint64_t)key_phase_bit;
+ *rx_key_epoch = el->key_epoch - (uint64_t)*is_old_key;
+ break;
+
+ case QRL_EL_STATE_PROV_COOLDOWN:
+ /*
+ * If we are in COOLDOWN, there is only one key epoch we can possibly
+ * decrypt with, so just try that. If AEAD decryption fails, the
+ * value we output here isn't used anyway.
+ */
+ *rx_key_epoch = el->key_epoch;
+ break;
+ }
+
+ return idx;
+}
+
+/*
+ * Tries to decrypt a packet payload.
+ *
+ * Returns 1 on success or 0 on failure (which is permanent). The payload is
+ * decrypted from src and written to dst. The buffer dst must be of at least
+ * src_len bytes in length. The actual length of the output in bytes is written
+ * to *dec_len on success, which will always be equal to or less than (usually
+ * less than) src_len.
+ */
+static int qrx_decrypt_pkt_body(OSSL_QRX *qrx, unsigned char *dst,
+ const unsigned char *src,
+ size_t src_len, size_t *dec_len,
+ const unsigned char *aad, size_t aad_len,
+ QUIC_PN pn, uint32_t enc_level,
+ unsigned char key_phase_bit,
+ uint64_t *rx_key_epoch)
+{
+ int l = 0, l2 = 0, is_old_key, nonce_len;
+ unsigned char nonce[EVP_MAX_IV_LENGTH];
+ size_t i, cctx_idx;
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
+ enc_level, 1);
+ EVP_CIPHER_CTX *cctx;
+
+ if (src_len > INT_MAX || aad_len > INT_MAX)
+ return 0;
+
+ /* We should not have been called if we do not have key material. */
+ if (!ossl_assert(el != NULL))
+ return 0;
+
+ if (el->tag_len >= src_len)
+ return 0;
+
+ /*
+ * If we have failed to authenticate a certain number of ciphertexts, refuse
+ * to decrypt any more ciphertexts.
+ */
+ if (qrx->forged_pkt_count >= ossl_qrl_get_suite_max_forged_pkt(el->suite_id))
+ return 0;
+
+ cctx_idx = qrx_get_cipher_ctx_idx(qrx, el, enc_level, key_phase_bit,
+ rx_key_epoch, &is_old_key);
+ if (!ossl_assert(cctx_idx < OSSL_NELEM(el->cctx)))
+ return 0;
+
+ if (is_old_key && pn >= qrx->cur_epoch_start_pn)
+ /*
+ * RFC 9001 s. 5.5: Once an endpoint successfully receives a packet with
+ * a given PN, it MUST discard all packets in the same PN space with
+ * higher PNs if they cannot be successfully unprotected with the same
+ * key, or -- if there is a key update -- a subsequent packet protection
+ * key.
+ *
+ * In other words, once a PN x triggers a KU, it is invalid for us to
+ * receive a packet with a newer PN y (y > x) using the old keys.
+ */
+ return 0;
+
+ cctx = el->cctx[cctx_idx];
+
+ /* Construct nonce (nonce=IV ^ PN). */
+ nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
+ if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN)))
+ return 0;
+
+ memcpy(nonce, el->iv[cctx_idx], nonce_len);
+ for (i = 0; i < sizeof(QUIC_PN); ++i)
+ nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
+
+ /* type and key will already have been setup; feed the IV. */
+ if (EVP_CipherInit_ex(cctx, NULL,
+ NULL, NULL, nonce, /*enc=*/0) != 1)
+ return 0;
+
+ /* Feed the AEAD tag we got so the cipher can validate it. */
+ if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_SET_TAG,
+ el->tag_len,
+ (unsigned char *)src + src_len - el->tag_len) != 1)
+ return 0;
+
+ /* Feed AAD data. */
+ if (EVP_CipherUpdate(cctx, NULL, &l, aad, aad_len) != 1)
+ return 0;
+
+ /* Feed encrypted packet body. */
+ if (EVP_CipherUpdate(cctx, dst, &l, src, src_len - el->tag_len) != 1)
+ return 0;
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /*
+ * Throw away what we just decrypted and just use the ciphertext instead
+ * (which should be unencrypted)
+ */
+ memcpy(dst, src, l);
+
+ /* Pretend to authenticate the tag but ignore it */
+ if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
+ /* We don't care */
+ }
+#else
+ /* Ensure authentication succeeded. */
+ if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
+ /* Authentication failed, increment failed auth counter. */
+ ++qrx->forged_pkt_count;
+ return 0;
+ }
+#endif
+
+ *dec_len = l;
+ return 1;
+}
+
+static ossl_inline void ignore_res(int x)
+{
+ /* No-op. */
+}
+
+static void qrx_key_update_initiated(OSSL_QRX *qrx, QUIC_PN pn)
+{
+ if (!ossl_qrl_enc_level_set_key_update(&qrx->el_set, QUIC_ENC_LEVEL_1RTT))
+ /* We are already in RXKU, so we don't call the callback again. */
+ return;
+
+ qrx->cur_epoch_start_pn = pn;
+
+ if (qrx->key_update_cb != NULL)
+ qrx->key_update_cb(pn, qrx->key_update_cb_arg);
+}
+
+/* Process a single packet in a datagram. */
+static int qrx_process_pkt(OSSL_QRX *qrx, QUIC_URXE *urxe,
+ PACKET *pkt, size_t pkt_idx,
+ QUIC_CONN_ID *first_dcid,
+ size_t datagram_len)
+{
+ RXE *rxe;
+ const unsigned char *eop = NULL;
+ size_t i, aad_len = 0, dec_len = 0;
+ PACKET orig_pkt = *pkt;
+ const unsigned char *sop = PACKET_data(pkt);
+ unsigned char *dst;
+ char need_second_decode = 0, already_processed = 0;
+ QUIC_PKT_HDR_PTRS ptrs;
+ uint32_t pn_space, enc_level;
+ OSSL_QRL_ENC_LEVEL *el = NULL;
+ uint64_t rx_key_epoch = UINT64_MAX;
+
+ /*
+ * Get a free RXE. If we need to allocate a new one, use the packet length
+ * as a good ballpark figure.
+ */
+ rxe = qrx_ensure_free_rxe(qrx, PACKET_remaining(pkt));
+ if (rxe == NULL)
+ return 0;
+
+ /* Have we already processed this packet? */
+ if (pkt_is_marked(&urxe->processed, pkt_idx))
+ already_processed = 1;
+
+ /*
+ * Decode the header into the RXE structure. We first decrypt and read the
+ * unprotected part of the packet header (unless we already removed header
+ * protection, in which case we decode all of it).
+ */
+ need_second_decode = !pkt_is_marked(&urxe->hpr_removed, pkt_idx);
+ if (!ossl_quic_wire_decode_pkt_hdr(pkt,
+ qrx->short_conn_id_len,
+ need_second_decode, 0, &rxe->hdr, &ptrs,
+ NULL))
+ goto malformed;
+
+ /*
+ * Our successful decode above included an intelligible length and the
+ * PACKET is now pointing to the end of the QUIC packet.
+ */
+ eop = PACKET_data(pkt);
+
+ /*
+ * Make a note of the first packet's DCID so we can later ensure the
+ * destination connection IDs of all packets in a datagram match.
+ */
+ if (pkt_idx == 0)
+ *first_dcid = rxe->hdr.dst_conn_id;
+
+ /*
+ * Early header validation. Since we now know the packet length, we can also
+ * now skip over it if we already processed it.
+ */
+ if (already_processed
+ || !qrx_validate_hdr_early(qrx, rxe, pkt_idx == 0 ? NULL : first_dcid))
+ /*
+ * Already processed packets are handled identically to malformed
+ * packets; i.e., they are ignored.
+ */
+ goto malformed;
+
+ if (!ossl_quic_pkt_type_is_encrypted(rxe->hdr.type)) {
+ /*
+ * Version negotiation and retry packets are a special case. They do not
+ * contain a payload which needs decrypting and have no header
+ * protection.
+ */
+
+ /* Just copy the payload from the URXE to the RXE. */
+ if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len)) == NULL)
+ /*
+ * Allocation failure. EOP will be pointing to the end of the
+ * datagram so processing of this datagram will end here.
+ */
+ goto malformed;
+
+ /* We are now committed to returning the packet. */
+ memcpy(rxe_data(rxe), rxe->hdr.data, rxe->hdr.len);
+ pkt_mark(&urxe->processed, pkt_idx);
+
+ rxe->hdr.data = rxe_data(rxe);
+ rxe->pn = QUIC_PN_INVALID;
+
+ rxe->data_len = rxe->hdr.len;
+ rxe->datagram_len = datagram_len;
+ rxe->key_epoch = 0;
+ rxe->peer = urxe->peer;
+ rxe->local = urxe->local;
+ rxe->time = urxe->time;
+ rxe->datagram_id = urxe->datagram_id;
+
+ /* Move RXE to pending. */
+ ossl_list_rxe_remove(&qrx->rx_free, rxe);
+ ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
+ return 0; /* success, did not defer */
+ }
+
+ /* Determine encryption level of packet. */
+ enc_level = qrx_determine_enc_level(&rxe->hdr);
+
+ /* If we do not have keying material for this encryption level yet, defer. */
+ switch (ossl_qrl_enc_level_set_have_el(&qrx->el_set, enc_level)) {
+ case 1:
+ /* We have keys. */
+ if (enc_level == QUIC_ENC_LEVEL_1RTT && !qrx->allow_1rtt)
+ /*
+ * But we cannot process 1-RTT packets until the handshake is
+ * completed (RFC 9000 s. 5.7).
+ */
+ goto cannot_decrypt;
+
+ break;
+ case 0:
+ /* No keys yet. */
+ goto cannot_decrypt;
+ default:
+ /* We already discarded keys for this EL, we will never process this.*/
+ goto malformed;
+ }
+
+ /*
+ * We will copy any token included in the packet to the start of our RXE
+ * data buffer (so that we don't reference the URXE buffer any more and can
+ * recycle it). Track our position in the RXE buffer by index instead of
+ * pointer as the pointer may change as reallocs occur.
+ */
+ i = 0;
+
+ /*
+ * rxe->hdr.data is now pointing at the (encrypted) packet payload. rxe->hdr
+ * also has fields pointing into the PACKET buffer which will be going away
+ * soon (the URXE will be reused for another incoming packet).
+ *
+ * Firstly, relocate some of these fields into the RXE as needed.
+ *
+ * Relocate token buffer and fix pointer.
+ */
+ if (rxe->hdr.type == QUIC_PKT_TYPE_INITIAL) {
+ const unsigned char *token = rxe->hdr.token;
+
+ /*
+ * This may change the value of rxe and change the value of the token
+ * pointer as well. So we must make a temporary copy of the pointer to
+ * the token, and then copy it back into the new location of the rxe
+ */
+ if (!qrx_relocate_buffer(qrx, &rxe, &i, &token, rxe->hdr.token_len))
+ goto malformed;
+
+ rxe->hdr.token = token;
+ }
+
+ /* Now remove header protection. */
+ *pkt = orig_pkt;
+
+ el = ossl_qrl_enc_level_set_get(&qrx->el_set, enc_level, 1);
+ assert(el != NULL); /* Already checked above */
+
+ if (need_second_decode) {
+ if (!ossl_quic_hdr_protector_decrypt(&el->hpr, &ptrs))
+ goto malformed;
+
+ /*
+ * We have removed header protection, so don't attempt to do it again if
+ * the packet gets deferred and processed again.
+ */
+ pkt_mark(&urxe->hpr_removed, pkt_idx);
+
+ /* Decode the now unprotected header. */
+ if (ossl_quic_wire_decode_pkt_hdr(pkt, qrx->short_conn_id_len,
+ 0, 0, &rxe->hdr, NULL, NULL) != 1)
+ goto malformed;
+ }
+
+ /* Validate header and decode PN. */
+ if (!qrx_validate_hdr(qrx, rxe))
+ goto malformed;
+
+ if (qrx->msg_callback != NULL)
+ qrx->msg_callback(0, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, sop,
+ eop - sop - rxe->hdr.len, qrx->msg_callback_ssl,
+ qrx->msg_callback_arg);
+
+ /*
+ * The AAD data is the entire (unprotected) packet header including the PN.
+ * The packet header has been unprotected in place, so we can just reuse the
+ * PACKET buffer. The header ends where the payload begins.
+ */
+ aad_len = rxe->hdr.data - sop;
+
+ /* Ensure the RXE buffer size is adequate for our payload. */
+ if ((rxe = qrx_reserve_rxe(&qrx->rx_free, rxe, rxe->hdr.len + i)) == NULL) {
+ /*
+ * Allocation failure, treat as malformed and do not bother processing
+ * any further packets in the datagram as they are likely to also
+ * encounter allocation failures.
+ */
+ eop = NULL;
+ goto malformed;
+ }
+
+ /*
+ * We decrypt the packet body to immediately after the token at the start of
+ * the RXE buffer (where present).
+ *
+ * Do the decryption from the PACKET (which points into URXE memory) to our
+ * RXE payload (single-copy decryption), then fixup the pointers in the
+ * header to point to our new buffer.
+ *
+ * If decryption fails this is considered a permanent error; we defer
+ * packets we don't yet have decryption keys for above, so if this fails,
+ * something has gone wrong with the handshake process or a packet has been
+ * corrupted.
+ */
+ dst = (unsigned char *)rxe_data(rxe) + i;
+ if (!qrx_decrypt_pkt_body(qrx, dst, rxe->hdr.data, rxe->hdr.len,
+ &dec_len, sop, aad_len, rxe->pn, enc_level,
+ rxe->hdr.key_phase, &rx_key_epoch))
+ goto malformed;
+
+ /*
+ * -----------------------------------------------------
+ * IMPORTANT: ANYTHING ABOVE THIS LINE IS UNVERIFIED
+ * AND MUST BE TIMING-CHANNEL SAFE.
+ * -----------------------------------------------------
+ *
+ * At this point, we have successfully authenticated the AEAD tag and no
+ * longer need to worry about exposing the PN, PN length or Key Phase bit in
+ * timing channels. Invoke any configured validation callback to allow for
+ * rejection of duplicate PNs.
+ */
+ if (!qrx_validate_hdr_late(qrx, rxe))
+ goto malformed;
+
+ /* Check for a Key Phase bit differing from our expectation. */
+ if (rxe->hdr.type == QUIC_PKT_TYPE_1RTT
+ && rxe->hdr.key_phase != (el->key_epoch & 1))
+ qrx_key_update_initiated(qrx, rxe->pn);
+
+ /*
+ * We have now successfully decrypted the packet payload. If there are
+ * additional packets in the datagram, it is possible we will fail to
+ * decrypt them and need to defer them until we have some key material we
+ * don't currently possess. If this happens, the URXE will be moved to the
+ * deferred queue. Since a URXE corresponds to one datagram, which may
+ * contain multiple packets, we must ensure any packets we have already
+ * processed in the URXE are not processed again (this is an RFC
+ * requirement). We do this by marking the nth packet in the datagram as
+ * processed.
+ *
+ * We are now committed to returning this decrypted packet to the user,
+ * meaning we now consider the packet processed and must mark it
+ * accordingly.
+ */
+ pkt_mark(&urxe->processed, pkt_idx);
+
+ /*
+ * Update header to point to the decrypted buffer, which may be shorter
+ * due to AEAD tags, block padding, etc.
+ */
+ rxe->hdr.data = dst;
+ rxe->hdr.len = dec_len;
+ rxe->data_len = dec_len;
+ rxe->datagram_len = datagram_len;
+ rxe->key_epoch = rx_key_epoch;
+
+ /* We processed the PN successfully, so update largest processed PN. */
+ pn_space = rxe_determine_pn_space(rxe);
+ if (rxe->pn > qrx->largest_pn[pn_space])
+ qrx->largest_pn[pn_space] = rxe->pn;
+
+ /* Copy across network addresses and RX time from URXE to RXE. */
+ rxe->peer = urxe->peer;
+ rxe->local = urxe->local;
+ rxe->time = urxe->time;
+ rxe->datagram_id = urxe->datagram_id;
+
+ /* Move RXE to pending. */
+ ossl_list_rxe_remove(&qrx->rx_free, rxe);
+ ossl_list_rxe_insert_tail(&qrx->rx_pending, rxe);
+ return 0; /* success, did not defer; not distinguished from failure */
+
+cannot_decrypt:
+ /*
+ * We cannot process this packet right now (but might be able to later). We
+ * MUST attempt to process any other packets in the datagram, so defer it
+ * and skip over it.
+ */
+ assert(eop != NULL && eop >= PACKET_data(pkt));
+ /*
+ * We don't care if this fails as it will just result in the packet being at
+ * the end of the datagram buffer.
+ */
+ ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
+ return 1; /* deferred */
+
+malformed:
+ if (eop != NULL) {
+ /*
+ * This packet cannot be processed and will never be processable. We
+ * were at least able to decode its header and determine its length, so
+ * we can skip over it and try to process any subsequent packets in the
+ * datagram.
+ *
+ * Mark as processed as an optimization.
+ */
+ assert(eop >= PACKET_data(pkt));
+ pkt_mark(&urxe->processed, pkt_idx);
+ /* We don't care if this fails (see above) */
+ ignore_res(PACKET_forward(pkt, eop - PACKET_data(pkt)));
+ } else {
+ /*
+ * This packet cannot be processed and will never be processable.
+ * Because even its header is not intelligible, we cannot examine any
+ * further packets in the datagram because its length cannot be
+ * discerned.
+ *
+ * Advance over the entire remainder of the datagram, and mark it as
+ * processed as an optimization.
+ */
+ pkt_mark(&urxe->processed, pkt_idx);
+ /* We don't care if this fails (see above) */
+ ignore_res(PACKET_forward(pkt, PACKET_remaining(pkt)));
+ }
+ return 0; /* failure, did not defer; not distinguished from success */
+}
+
+/* Process a datagram which was received. */
+static int qrx_process_datagram(OSSL_QRX *qrx, QUIC_URXE *e,
+ const unsigned char *data,
+ size_t data_len)
+{
+ int have_deferred = 0;
+ PACKET pkt;
+ size_t pkt_idx = 0;
+ QUIC_CONN_ID first_dcid = { 255 };
+
+ qrx->bytes_received += data_len;
+
+ if (!PACKET_buf_init(&pkt, data, data_len))
+ return 0;
+
+ for (; PACKET_remaining(&pkt) > 0; ++pkt_idx) {
+ /*
+ * A packet smaller than the minimum possible QUIC packet size is not
+ * considered valid. We also ignore more than a certain number of
+ * packets within the same datagram.
+ */
+ if (PACKET_remaining(&pkt) < QUIC_MIN_VALID_PKT_LEN
+ || pkt_idx >= QUIC_MAX_PKT_PER_URXE)
+ break;
+
+ /*
+ * We note whether packet processing resulted in a deferral since
+ * this means we need to move the URXE to the deferred list rather
+ * than the free list after we're finished dealing with it for now.
+ *
+ * However, we don't otherwise care here whether processing succeeded or
+ * failed, as the RFC says even if a packet in a datagram is malformed,
+ * we should still try to process any packets following it.
+ *
+ * In the case where the packet is so malformed we can't determine its
+ * length, qrx_process_pkt will take care of advancing to the end of
+ * the packet, so we will exit the loop automatically in this case.
+ */
+ if (qrx_process_pkt(qrx, e, &pkt, pkt_idx, &first_dcid, data_len))
+ have_deferred = 1;
+ }
+
+ /* Only report whether there were any deferrals. */
+ return have_deferred;
+}
+
+/* Process a single pending URXE. */
+static int qrx_process_one_urxe(OSSL_QRX *qrx, QUIC_URXE *e)
+{
+ int was_deferred;
+
+ /* The next URXE we process should be at the head of the pending list. */
+ if (!ossl_assert(e == ossl_list_urxe_head(&qrx->urx_pending)))
+ return 0;
+
+ /*
+ * Attempt to process the datagram. The return value indicates only if
+ * processing of the datagram was deferred. If we failed to process the
+ * datagram, we do not attempt to process it again and silently eat the
+ * error.
+ */
+ was_deferred = qrx_process_datagram(qrx, e, ossl_quic_urxe_data(e),
+ e->data_len);
+
+ /*
+ * Remove the URXE from the pending list and return it to
+ * either the free or deferred list.
+ */
+ ossl_list_urxe_remove(&qrx->urx_pending, e);
+ if (was_deferred > 0 &&
+ (e->deferred || qrx->num_deferred < qrx->max_deferred)) {
+ ossl_list_urxe_insert_tail(&qrx->urx_deferred, e);
+ if (!e->deferred) {
+ e->deferred = 1;
+ ++qrx->num_deferred;
+ }
+ } else {
+ if (e->deferred) {
+ e->deferred = 0;
+ --qrx->num_deferred;
+ }
+ ossl_quic_demux_release_urxe(qrx->demux, e);
+ }
+
+ return 1;
+}
+
+/* Process any pending URXEs to generate pending RXEs. */
+static int qrx_process_pending_urxl(OSSL_QRX *qrx)
+{
+ QUIC_URXE *e;
+
+ while ((e = ossl_list_urxe_head(&qrx->urx_pending)) != NULL)
+ if (!qrx_process_one_urxe(qrx, e))
+ return 0;
+
+ return 1;
+}
+
+int ossl_qrx_read_pkt(OSSL_QRX *qrx, OSSL_QRX_PKT **ppkt)
+{
+ RXE *rxe;
+
+ if (!ossl_qrx_processed_read_pending(qrx)) {
+ if (!qrx_process_pending_urxl(qrx))
+ return 0;
+
+ if (!ossl_qrx_processed_read_pending(qrx))
+ return 0;
+ }
+
+ rxe = qrx_pop_pending_rxe(qrx);
+ if (!ossl_assert(rxe != NULL))
+ return 0;
+
+ assert(rxe->refcount == 0);
+ rxe->refcount = 1;
+
+ rxe->pkt.hdr = &rxe->hdr;
+ rxe->pkt.pn = rxe->pn;
+ rxe->pkt.time = rxe->time;
+ rxe->pkt.datagram_len = rxe->datagram_len;
+ rxe->pkt.peer
+ = BIO_ADDR_family(&rxe->peer) != AF_UNSPEC ? &rxe->peer : NULL;
+ rxe->pkt.local
+ = BIO_ADDR_family(&rxe->local) != AF_UNSPEC ? &rxe->local : NULL;
+ rxe->pkt.key_epoch = rxe->key_epoch;
+ rxe->pkt.datagram_id = rxe->datagram_id;
+ rxe->pkt.qrx = qrx;
+ *ppkt = &rxe->pkt;
+
+ return 1;
+}
+
+void ossl_qrx_pkt_release(OSSL_QRX_PKT *pkt)
+{
+ RXE *rxe;
+
+ if (pkt == NULL)
+ return;
+
+ rxe = (RXE *)pkt;
+ assert(rxe->refcount > 0);
+ if (--rxe->refcount == 0)
+ qrx_recycle_rxe(pkt->qrx, rxe);
+}
+
+void ossl_qrx_pkt_orphan(OSSL_QRX_PKT *pkt)
+{
+ RXE *rxe;
+
+ if (pkt == NULL)
+ return;
+ rxe = (RXE *)pkt;
+ assert(rxe->refcount > 0);
+ rxe->refcount--;
+ assert(ossl_list_rxe_prev(rxe) == NULL && ossl_list_rxe_next(rxe) == NULL);
+ return;
+}
+
+void ossl_qrx_pkt_up_ref(OSSL_QRX_PKT *pkt)
+{
+ RXE *rxe = (RXE *)pkt;
+
+ assert(rxe->refcount > 0);
+ ++rxe->refcount;
+}
+
+uint64_t ossl_qrx_get_bytes_received(OSSL_QRX *qrx, int clear)
+{
+ uint64_t v = qrx->bytes_received;
+
+ if (clear)
+ qrx->bytes_received = 0;
+
+ return v;
+}
+
+int ossl_qrx_set_late_validation_cb(OSSL_QRX *qrx,
+ ossl_qrx_late_validation_cb *cb,
+ void *cb_arg)
+{
+ qrx->validation_cb = cb;
+ qrx->validation_cb_arg = cb_arg;
+ return 1;
+}
+
+int ossl_qrx_set_key_update_cb(OSSL_QRX *qrx,
+ ossl_qrx_key_update_cb *cb,
+ void *cb_arg)
+{
+ qrx->key_update_cb = cb;
+ qrx->key_update_cb_arg = cb_arg;
+ return 1;
+}
+
+uint64_t ossl_qrx_get_key_epoch(OSSL_QRX *qrx)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
+ QUIC_ENC_LEVEL_1RTT, 1);
+
+ return el == NULL ? UINT64_MAX : el->key_epoch;
+}
+
+int ossl_qrx_key_update_timeout(OSSL_QRX *qrx, int normal)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
+ QUIC_ENC_LEVEL_1RTT, 1);
+
+ if (el == NULL)
+ return 0;
+
+ if (el->state == QRL_EL_STATE_PROV_UPDATING
+ && !ossl_qrl_enc_level_set_key_update_done(&qrx->el_set,
+ QUIC_ENC_LEVEL_1RTT))
+ return 0;
+
+ if (normal && el->state == QRL_EL_STATE_PROV_COOLDOWN
+ && !ossl_qrl_enc_level_set_key_cooldown_done(&qrx->el_set,
+ QUIC_ENC_LEVEL_1RTT))
+ return 0;
+
+ return 1;
+}
+
+uint64_t ossl_qrx_get_cur_forged_pkt_count(OSSL_QRX *qrx)
+{
+ return qrx->forged_pkt_count;
+}
+
+uint64_t ossl_qrx_get_max_forged_pkt_count(OSSL_QRX *qrx,
+ uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(&qrx->el_set,
+ enc_level, 1);
+
+ return el == NULL ? UINT64_MAX
+ : ossl_qrl_get_suite_max_forged_pkt(el->suite_id);
+}
+
+void ossl_qrx_allow_1rtt_processing(OSSL_QRX *qrx)
+{
+ if (qrx->allow_1rtt)
+ return;
+
+ qrx->allow_1rtt = 1;
+ qrx_requeue_deferred(qrx);
+}
+
+void ossl_qrx_set_msg_callback(OSSL_QRX *qrx, ossl_msg_cb msg_callback,
+ SSL *msg_callback_ssl)
+{
+ qrx->msg_callback = msg_callback;
+ qrx->msg_callback_ssl = msg_callback_ssl;
+}
+
+void ossl_qrx_set_msg_callback_arg(OSSL_QRX *qrx, void *msg_callback_arg)
+{
+ qrx->msg_callback_arg = msg_callback_arg;
+}
+
+size_t ossl_qrx_get_short_hdr_conn_id_len(OSSL_QRX *qrx)
+{
+ return qrx->short_conn_id_len;
+}
diff --git a/crypto/openssl/ssl/quic/quic_record_shared.c b/crypto/openssl/ssl/quic/quic_record_shared.c
new file mode 100644
index 000000000000..a3fd51db6dcf
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_record_shared.c
@@ -0,0 +1,489 @@
+#include "quic_record_shared.h"
+#include "internal/quic_record_util.h"
+#include "internal/common.h"
+#include "../ssl_local.h"
+
+/* Constants used for key derivation in QUIC v1. */
+static const unsigned char quic_v1_iv_label[] = {
+ 0x71, 0x75, 0x69, 0x63, 0x20, 0x69, 0x76 /* "quic iv" */
+};
+static const unsigned char quic_v1_key_label[] = {
+ 0x71, 0x75, 0x69, 0x63, 0x20, 0x6b, 0x65, 0x79 /* "quic key" */
+};
+static const unsigned char quic_v1_hp_label[] = {
+ 0x71, 0x75, 0x69, 0x63, 0x20, 0x68, 0x70 /* "quic hp" */
+};
+static const unsigned char quic_v1_ku_label[] = {
+ 0x71, 0x75, 0x69, 0x63, 0x20, 0x6b, 0x75 /* "quic ku" */
+};
+
+OSSL_QRL_ENC_LEVEL *ossl_qrl_enc_level_set_get(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level,
+ int require_prov)
+{
+ OSSL_QRL_ENC_LEVEL *el;
+
+ if (!ossl_assert(enc_level < QUIC_ENC_LEVEL_NUM))
+ return NULL;
+
+ el = &els->el[enc_level];
+
+ if (require_prov)
+ switch (el->state) {
+ case QRL_EL_STATE_PROV_NORMAL:
+ case QRL_EL_STATE_PROV_UPDATING:
+ case QRL_EL_STATE_PROV_COOLDOWN:
+ break;
+ default:
+ return NULL;
+ }
+
+ return el;
+}
+
+int ossl_qrl_enc_level_set_have_el(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+
+ switch (el->state) {
+ case QRL_EL_STATE_UNPROV:
+ return 0;
+ case QRL_EL_STATE_PROV_NORMAL:
+ case QRL_EL_STATE_PROV_UPDATING:
+ case QRL_EL_STATE_PROV_COOLDOWN:
+ return 1;
+ default:
+ case QRL_EL_STATE_DISCARDED:
+ return -1;
+ }
+}
+
+int ossl_qrl_enc_level_set_has_keyslot(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level,
+ unsigned char tgt_state,
+ size_t keyslot)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+
+ if (!ossl_assert(el != NULL && keyslot < 2))
+ return 0;
+
+ switch (tgt_state) {
+ case QRL_EL_STATE_PROV_NORMAL:
+ case QRL_EL_STATE_PROV_UPDATING:
+ return enc_level == QUIC_ENC_LEVEL_1RTT || keyslot == 0;
+ case QRL_EL_STATE_PROV_COOLDOWN:
+ assert(enc_level == QUIC_ENC_LEVEL_1RTT);
+ return keyslot == (el->key_epoch & 1);
+ default:
+ return 0;
+ }
+}
+
+static void el_teardown_keyslot(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level,
+ size_t keyslot)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+
+ if (!ossl_qrl_enc_level_set_has_keyslot(els, enc_level, el->state, keyslot))
+ return;
+
+ if (el->cctx[keyslot] != NULL) {
+ EVP_CIPHER_CTX_free(el->cctx[keyslot]);
+ el->cctx[keyslot] = NULL;
+ }
+
+ OPENSSL_cleanse(el->iv[keyslot], sizeof(el->iv[keyslot]));
+}
+
+static int el_setup_keyslot(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level,
+ unsigned char tgt_state,
+ size_t keyslot,
+ const unsigned char *secret,
+ size_t secret_len)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+ unsigned char key[EVP_MAX_KEY_LENGTH];
+ size_t key_len = 0, iv_len = 0;
+ const char *cipher_name = NULL;
+ EVP_CIPHER *cipher = NULL;
+ EVP_CIPHER_CTX *cctx = NULL;
+
+ if (!ossl_assert(el != NULL
+ && ossl_qrl_enc_level_set_has_keyslot(els, enc_level,
+ tgt_state, keyslot))) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ cipher_name = ossl_qrl_get_suite_cipher_name(el->suite_id);
+ iv_len = ossl_qrl_get_suite_cipher_iv_len(el->suite_id);
+ key_len = ossl_qrl_get_suite_cipher_key_len(el->suite_id);
+ if (cipher_name == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ if (secret_len != ossl_qrl_get_suite_secret_len(el->suite_id)
+ || secret_len > EVP_MAX_KEY_LENGTH) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ assert(el->cctx[keyslot] == NULL);
+
+ /* Derive "quic iv" key. */
+ if (!tls13_hkdf_expand_ex(el->libctx, el->propq,
+ el->md,
+ secret,
+ quic_v1_iv_label,
+ sizeof(quic_v1_iv_label),
+ NULL, 0,
+ el->iv[keyslot], iv_len, 1))
+ goto err;
+
+ /* Derive "quic key" key. */
+ if (!tls13_hkdf_expand_ex(el->libctx, el->propq,
+ el->md,
+ secret,
+ quic_v1_key_label,
+ sizeof(quic_v1_key_label),
+ NULL, 0,
+ key, key_len, 1))
+ goto err;
+
+ /* Create and initialise cipher context. */
+ if ((cipher = EVP_CIPHER_fetch(el->libctx, cipher_name, el->propq)) == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ if ((cctx = EVP_CIPHER_CTX_new()) == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ if (!ossl_assert(iv_len == (size_t)EVP_CIPHER_get_iv_length(cipher))
+ || !ossl_assert(key_len == (size_t)EVP_CIPHER_get_key_length(cipher))) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ goto err;
+ }
+
+ /* IV will be changed on RX/TX so we don't need to use a real value here. */
+ if (!EVP_CipherInit_ex(cctx, cipher, NULL, key, el->iv[keyslot], 0)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ el->cctx[keyslot] = cctx;
+
+ /* Zeroize intermediate keys. */
+ OPENSSL_cleanse(key, sizeof(key));
+ EVP_CIPHER_free(cipher);
+ return 1;
+
+ err:
+ EVP_CIPHER_CTX_free(cctx);
+ EVP_CIPHER_free(cipher);
+ OPENSSL_cleanse(el->iv[keyslot], sizeof(el->iv[keyslot]));
+ OPENSSL_cleanse(key, sizeof(key));
+ return 0;
+}
+
+int ossl_qrl_enc_level_set_provide_secret(OSSL_QRL_ENC_LEVEL_SET *els,
+ OSSL_LIB_CTX *libctx,
+ const char *propq,
+ uint32_t enc_level,
+ uint32_t suite_id,
+ EVP_MD *md,
+ const unsigned char *secret,
+ size_t secret_len,
+ unsigned char init_key_phase_bit,
+ int is_tx)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+ unsigned char ku_key[EVP_MAX_KEY_LENGTH], hpr_key[EVP_MAX_KEY_LENGTH];
+ int have_ks0 = 0, have_ks1 = 0, own_md = 0;
+ const char *md_name = ossl_qrl_get_suite_md_name(suite_id);
+ size_t hpr_key_len, init_keyslot;
+
+ if (el == NULL
+ || md_name == NULL
+ || init_key_phase_bit > 1 || is_tx < 0 || is_tx > 1
+ || (init_key_phase_bit > 0 && enc_level != QUIC_ENC_LEVEL_1RTT)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ if (enc_level == QUIC_ENC_LEVEL_INITIAL
+ && el->state == QRL_EL_STATE_PROV_NORMAL) {
+ /*
+ * Sometimes the INITIAL EL needs to be reprovisioned, namely if a
+ * connection retry occurs. Exceptionally, if the caller wants to
+ * reprovision the INITIAL EL, tear it down as usual and then override
+ * the state so it can be provisioned again.
+ */
+ ossl_qrl_enc_level_set_discard(els, enc_level);
+ el->state = QRL_EL_STATE_UNPROV;
+ }
+
+ if (el->state != QRL_EL_STATE_UNPROV) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ init_keyslot = is_tx ? 0 : init_key_phase_bit;
+ hpr_key_len = ossl_qrl_get_suite_hdr_prot_key_len(suite_id);
+ if (hpr_key_len == 0) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ if (md == NULL) {
+ md = EVP_MD_fetch(libctx, md_name, propq);
+ if (md == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+ own_md = 1;
+ }
+
+ el->libctx = libctx;
+ el->propq = propq;
+ el->md = md;
+ el->suite_id = suite_id;
+ el->tag_len = ossl_qrl_get_suite_cipher_tag_len(suite_id);
+ el->op_count = 0;
+ el->key_epoch = (uint64_t)init_key_phase_bit;
+ el->is_tx = (unsigned char)is_tx;
+
+ /* Derive "quic hp" key. */
+ if (!tls13_hkdf_expand_ex(libctx, propq,
+ md,
+ secret,
+ quic_v1_hp_label,
+ sizeof(quic_v1_hp_label),
+ NULL, 0,
+ hpr_key, hpr_key_len, 1))
+ goto err;
+
+ /* Setup KS0 (or KS1 if init_key_phase_bit), our initial keyslot. */
+ if (!el_setup_keyslot(els, enc_level, QRL_EL_STATE_PROV_NORMAL,
+ init_keyslot, secret, secret_len))
+ goto err;
+
+ have_ks0 = 1;
+
+ if (enc_level == QUIC_ENC_LEVEL_1RTT) {
+ /* Derive "quic ku" key (the epoch 1 secret). */
+ if (!tls13_hkdf_expand_ex(libctx, propq,
+ md,
+ secret,
+ quic_v1_ku_label,
+ sizeof(quic_v1_ku_label),
+ NULL, 0,
+ is_tx ? el->ku : ku_key, secret_len, 1))
+ goto err;
+
+ if (!is_tx) {
+ /* Setup KS1 (or KS0 if init_key_phase_bit), our next keyslot. */
+ if (!el_setup_keyslot(els, enc_level, QRL_EL_STATE_PROV_NORMAL,
+ !init_keyslot, ku_key, secret_len))
+ goto err;
+
+ have_ks1 = 1;
+
+ /* Derive NEXT "quic ku" key (the epoch 2 secret). */
+ if (!tls13_hkdf_expand_ex(libctx, propq,
+ md,
+ ku_key,
+ quic_v1_ku_label,
+ sizeof(quic_v1_ku_label),
+ NULL, 0,
+ el->ku, secret_len, 1))
+ goto err;
+ }
+ }
+
+ /* Setup header protection context. */
+ if (!ossl_quic_hdr_protector_init(&el->hpr,
+ libctx, propq,
+ ossl_qrl_get_suite_hdr_prot_cipher_id(suite_id),
+ hpr_key, hpr_key_len))
+ goto err;
+
+ /*
+ * We are now provisioned: KS0 has our current key (for key epoch 0), KS1
+ * has our next key (for key epoch 1, in the case of the 1-RTT EL only), and
+ * el->ku has the secret which will be used to generate keys for key epoch
+ * 2.
+ */
+ OPENSSL_cleanse(hpr_key, sizeof(hpr_key));
+ OPENSSL_cleanse(ku_key, sizeof(ku_key));
+ el->state = QRL_EL_STATE_PROV_NORMAL;
+ return 1;
+
+ err:
+ el->suite_id = 0;
+ el->md = NULL;
+ OPENSSL_cleanse(hpr_key, sizeof(hpr_key));
+ OPENSSL_cleanse(ku_key, sizeof(ku_key));
+ OPENSSL_cleanse(el->ku, sizeof(el->ku));
+ if (have_ks0)
+ el_teardown_keyslot(els, enc_level, init_keyslot);
+ if (have_ks1)
+ el_teardown_keyslot(els, enc_level, !init_keyslot);
+ if (own_md)
+ EVP_MD_free(md);
+ return 0;
+}
+
+int ossl_qrl_enc_level_set_key_update(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+ size_t secret_len;
+ unsigned char new_ku[EVP_MAX_KEY_LENGTH];
+
+ if (el == NULL || !ossl_assert(enc_level == QUIC_ENC_LEVEL_1RTT)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ if (el->state != QRL_EL_STATE_PROV_NORMAL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ if (!el->is_tx) {
+ /*
+ * We already have the key for the next epoch, so just move to using it.
+ */
+ ++el->key_epoch;
+ el->state = QRL_EL_STATE_PROV_UPDATING;
+ return 1;
+ }
+
+ /*
+ * TX case. For the TX side we use only keyslot 0; it replaces the old key
+ * immediately.
+ */
+ secret_len = ossl_qrl_get_suite_secret_len(el->suite_id);
+
+ /* Derive NEXT "quic ku" key (the epoch n+1 secret). */
+ if (!tls13_hkdf_expand_ex(el->libctx, el->propq,
+ el->md, el->ku,
+ quic_v1_ku_label,
+ sizeof(quic_v1_ku_label),
+ NULL, 0,
+ new_ku, secret_len, 1))
+ return 0;
+
+ el_teardown_keyslot(els, enc_level, 0);
+
+ /* Setup keyslot for CURRENT "quic ku" key. */
+ if (!el_setup_keyslot(els, enc_level, QRL_EL_STATE_PROV_NORMAL,
+ 0, el->ku, secret_len))
+ return 0;
+
+ ++el->key_epoch;
+ el->op_count = 0;
+ memcpy(el->ku, new_ku, secret_len);
+ /* Remain in PROV_NORMAL state */
+ return 1;
+}
+
+/* Transitions from PROV_UPDATING to PROV_COOLDOWN. */
+int ossl_qrl_enc_level_set_key_update_done(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+
+ if (el == NULL || !ossl_assert(enc_level == QUIC_ENC_LEVEL_1RTT)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ /* No new key yet, but erase key material to aid PFS. */
+ el_teardown_keyslot(els, enc_level, ~el->key_epoch & 1);
+ el->state = QRL_EL_STATE_PROV_COOLDOWN;
+ return 1;
+}
+
+/*
+ * Transitions from PROV_COOLDOWN to PROV_NORMAL. (If in PROV_UPDATING,
+ * auto-transitions to PROV_COOLDOWN first.)
+ */
+int ossl_qrl_enc_level_set_key_cooldown_done(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+ size_t secret_len;
+ unsigned char new_ku[EVP_MAX_KEY_LENGTH];
+
+ if (el == NULL || !ossl_assert(enc_level == QUIC_ENC_LEVEL_1RTT)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ if (el->state == QRL_EL_STATE_PROV_UPDATING
+ && !ossl_qrl_enc_level_set_key_update_done(els, enc_level)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ if (el->state != QRL_EL_STATE_PROV_COOLDOWN) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ secret_len = ossl_qrl_get_suite_secret_len(el->suite_id);
+
+ if (!el_setup_keyslot(els, enc_level, QRL_EL_STATE_PROV_NORMAL,
+ ~el->key_epoch & 1, el->ku, secret_len))
+ return 0;
+
+ /* Derive NEXT "quic ku" key (the epoch n+1 secret). */
+ if (!tls13_hkdf_expand_ex(el->libctx, el->propq,
+ el->md,
+ el->ku,
+ quic_v1_ku_label,
+ sizeof(quic_v1_ku_label),
+ NULL, 0,
+ new_ku, secret_len, 1)) {
+ el_teardown_keyslot(els, enc_level, ~el->key_epoch & 1);
+ return 0;
+ }
+
+ memcpy(el->ku, new_ku, secret_len);
+ el->state = QRL_EL_STATE_PROV_NORMAL;
+ return 1;
+}
+
+/*
+ * Discards keying material for a given encryption level. Transitions from any
+ * state to DISCARDED.
+ */
+void ossl_qrl_enc_level_set_discard(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el = ossl_qrl_enc_level_set_get(els, enc_level, 0);
+
+ if (el == NULL || el->state == QRL_EL_STATE_DISCARDED)
+ return;
+
+ if (ossl_qrl_enc_level_set_have_el(els, enc_level) == 1) {
+ ossl_quic_hdr_protector_cleanup(&el->hpr);
+
+ el_teardown_keyslot(els, enc_level, 0);
+ el_teardown_keyslot(els, enc_level, 1);
+ }
+
+ EVP_MD_free(el->md);
+ el->md = NULL;
+ el->state = QRL_EL_STATE_DISCARDED;
+}
diff --git a/crypto/openssl/ssl/quic/quic_record_shared.h b/crypto/openssl/ssl/quic/quic_record_shared.h
new file mode 100644
index 000000000000..e8c9e28e92a4
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_record_shared.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#ifndef OSSL_QUIC_RECORD_SHARED_H
+# define OSSL_QUIC_RECORD_SHARED_H
+
+# include <openssl/ssl.h>
+# include "internal/quic_types.h"
+# include "internal/quic_wire_pkt.h"
+
+/*
+ * QUIC Record Layer EL Management Utilities
+ * =========================================
+ *
+ * This defines a structure for managing the cryptographic state at a given
+ * encryption level, as this functionality is shared between QRX and QTX. For
+ * QRL use only.
+ */
+
+/*
+ * States an EL can be in. The Updating and Cooldown states are used by RX only;
+ * a TX EL in the Provisioned state is always in the Normal substate.
+ *
+ * Key material is available if in the Provisioned state.
+ */
+#define QRL_EL_STATE_UNPROV 0 /* Unprovisioned (initial state) */
+#define QRL_EL_STATE_PROV_NORMAL 1 /* Provisioned - Normal */
+#define QRL_EL_STATE_PROV_UPDATING 2 /* Provisioned - Updating */
+#define QRL_EL_STATE_PROV_COOLDOWN 3 /* Provisioned - Cooldown */
+#define QRL_EL_STATE_DISCARDED 4 /* Discarded (terminal state) */
+
+typedef struct ossl_qrl_enc_level_st {
+ /*
+ * Cryptographic context used to apply and remove header protection from
+ * packet headers.
+ */
+ QUIC_HDR_PROTECTOR hpr;
+
+ /* Hash function used for key derivation. */
+ EVP_MD *md;
+
+ /* Context used for packet body ciphering. One for each keyslot. */
+ EVP_CIPHER_CTX *cctx[2];
+
+ OSSL_LIB_CTX *libctx;
+ const char *propq;
+
+ /*
+ * Key epoch, essentially the number of times we have done a key update.
+ *
+ * The least significant bit of this is therefore by definition the current
+ * Key Phase bit value.
+ */
+ uint64_t key_epoch;
+
+ /* Usage counter. The caller maintains this. Used by TX side only. */
+ uint64_t op_count;
+
+ /* QRL_SUITE_* value. */
+ uint32_t suite_id;
+
+ /* Length of authentication tag. */
+ uint32_t tag_len;
+
+ /* Current EL state. */
+ unsigned char state; /* QRL_EL_STATE_* */
+
+ /* 1 if for TX, else RX. Initialised when secret provided. */
+ unsigned char is_tx;
+
+ /* IV used to construct nonces used for AEAD packet body ciphering. */
+ unsigned char iv[2][EVP_MAX_IV_LENGTH];
+
+ /*
+ * Secret for next key epoch.
+ */
+ unsigned char ku[EVP_MAX_KEY_LENGTH];
+} OSSL_QRL_ENC_LEVEL;
+
+typedef struct ossl_qrl_enc_level_set_st {
+ OSSL_QRL_ENC_LEVEL el[QUIC_ENC_LEVEL_NUM];
+} OSSL_QRL_ENC_LEVEL_SET;
+
+/*
+ * Returns 1 if we have key material for a given encryption level (that is, if
+ * we are in the PROVISIONED state), 0 if we do not yet have material (we are in
+ * the UNPROVISIONED state) and -1 if the EL is discarded (we are in the
+ * DISCARDED state).
+ */
+int ossl_qrl_enc_level_set_have_el(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level);
+
+/*
+ * Returns EL in a set. If enc_level is not a valid QUIC_ENC_LEVEL_* value,
+ * returns NULL. If require_prov is 1, returns NULL if the EL is not in
+ * the PROVISIONED state; otherwise, the returned EL may be in any state.
+ */
+OSSL_QRL_ENC_LEVEL *ossl_qrl_enc_level_set_get(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level,
+ int require_prov);
+
+/* Provide secret to an EL. md may be NULL. */
+int ossl_qrl_enc_level_set_provide_secret(OSSL_QRL_ENC_LEVEL_SET *els,
+ OSSL_LIB_CTX *libctx,
+ const char *propq,
+ uint32_t enc_level,
+ uint32_t suite_id,
+ EVP_MD *md,
+ const unsigned char *secret,
+ size_t secret_len,
+ unsigned char init_key_phase_bit,
+ int is_tx);
+
+/*
+ * Returns 1 if the given keyslot index is currently valid for a given EL and EL
+ * state.
+ */
+int ossl_qrl_enc_level_set_has_keyslot(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level,
+ unsigned char tgt_state,
+ size_t keyslot);
+
+/* Perform a key update. Transitions from PROV_NORMAL to PROV_UPDATING. */
+int ossl_qrl_enc_level_set_key_update(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level);
+
+/* Transitions from PROV_UPDATING to PROV_COOLDOWN. */
+int ossl_qrl_enc_level_set_key_update_done(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level);
+
+/*
+ * Transitions from PROV_COOLDOWN to PROV_NORMAL. (If in PROV_UPDATING,
+ * auto-transitions to PROV_COOLDOWN first.)
+ */
+int ossl_qrl_enc_level_set_key_cooldown_done(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level);
+
+/*
+ * Discard an EL. No secret can be provided for the EL ever again.
+ */
+void ossl_qrl_enc_level_set_discard(OSSL_QRL_ENC_LEVEL_SET *els,
+ uint32_t enc_level);
+
+#endif
diff --git a/crypto/openssl/ssl/quic/quic_record_tx.c b/crypto/openssl/ssl/quic/quic_record_tx.c
new file mode 100644
index 000000000000..ae37353a9b26
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_record_tx.c
@@ -0,0 +1,1105 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_record_tx.h"
+#include "internal/qlog_event_helpers.h"
+#include "internal/bio_addr.h"
+#include "internal/common.h"
+#include "quic_record_shared.h"
+#include "internal/list.h"
+#include "../ssl_local.h"
+
+/*
+ * TXE
+ * ===
+ * Encrypted packets awaiting transmission are kept in TX Entries (TXEs), which
+ * are queued in linked lists just like TXEs.
+ */
+typedef struct txe_st TXE;
+
+struct txe_st {
+ OSSL_LIST_MEMBER(txe, TXE);
+ size_t data_len, alloc_len;
+
+ /*
+ * Destination and local addresses, as applicable. Both of these are only
+ * used if the family is not AF_UNSPEC.
+ */
+ BIO_ADDR peer, local;
+
+ /*
+ * alloc_len allocated bytes (of which data_len bytes are valid) follow this
+ * structure.
+ */
+};
+
+DEFINE_LIST_OF(txe, TXE);
+typedef OSSL_LIST(txe) TXE_LIST;
+
+static ossl_inline unsigned char *txe_data(const TXE *e)
+{
+ return (unsigned char *)(e + 1);
+}
+
+/*
+ * QTX
+ * ===
+ */
+struct ossl_qtx_st {
+ OSSL_LIB_CTX *libctx;
+ const char *propq;
+
+ /* Per encryption-level state. */
+ OSSL_QRL_ENC_LEVEL_SET el_set;
+
+ /* TX BIO. */
+ BIO *bio;
+
+ /* QLOG instance retrieval callback if in use, or NULL. */
+ QLOG *(*get_qlog_cb)(void *arg);
+ void *get_qlog_cb_arg;
+
+ /* TX maximum datagram payload length. */
+ size_t mdpl;
+
+ /*
+ * List of TXEs which are not currently in use. These are moved to the
+ * pending list (possibly via tx_cons first) as they are filled.
+ */
+ TXE_LIST free;
+
+ /*
+ * List of TXEs which are filled with completed datagrams ready to be
+ * transmitted.
+ */
+ TXE_LIST pending;
+ size_t pending_count; /* items in list */
+ size_t pending_bytes; /* sum(txe->data_len) in pending */
+
+ /*
+ * TXE which is under construction for coalescing purposes, if any.
+ * This TXE is neither on the free nor pending list. Once the datagram
+ * is completed, it is moved to the pending list.
+ */
+ TXE *cons;
+ size_t cons_count; /* num packets */
+
+ /*
+ * Number of packets transmitted in this key epoch. Used to enforce AEAD
+ * confidentiality limit.
+ */
+ uint64_t epoch_pkt_count;
+
+ /* Datagram counter. Increases monotonically per datagram (not per packet). */
+ uint64_t datagram_count;
+
+ ossl_mutate_packet_cb mutatecb;
+ ossl_finish_mutate_cb finishmutatecb;
+ void *mutatearg;
+
+ /* Message callback related arguments */
+ ossl_msg_cb msg_callback;
+ void *msg_callback_arg;
+ SSL *msg_callback_ssl;
+};
+
+/* Instantiates a new QTX. */
+OSSL_QTX *ossl_qtx_new(const OSSL_QTX_ARGS *args)
+{
+ OSSL_QTX *qtx;
+
+ if (args->mdpl < QUIC_MIN_INITIAL_DGRAM_LEN)
+ return 0;
+
+ qtx = OPENSSL_zalloc(sizeof(OSSL_QTX));
+ if (qtx == NULL)
+ return 0;
+
+ qtx->libctx = args->libctx;
+ qtx->propq = args->propq;
+ qtx->bio = args->bio;
+ qtx->mdpl = args->mdpl;
+ qtx->get_qlog_cb = args->get_qlog_cb;
+ qtx->get_qlog_cb_arg = args->get_qlog_cb_arg;
+
+ return qtx;
+}
+
+static void qtx_cleanup_txl(TXE_LIST *l)
+{
+ TXE *e, *enext;
+
+ for (e = ossl_list_txe_head(l); e != NULL; e = enext) {
+ enext = ossl_list_txe_next(e);
+ OPENSSL_free(e);
+ }
+}
+
+/* Frees the QTX. */
+void ossl_qtx_free(OSSL_QTX *qtx)
+{
+ uint32_t i;
+
+ if (qtx == NULL)
+ return;
+
+ /* Free TXE queue data. */
+ qtx_cleanup_txl(&qtx->pending);
+ qtx_cleanup_txl(&qtx->free);
+ OPENSSL_free(qtx->cons);
+
+ /* Drop keying material and crypto resources. */
+ for (i = 0; i < QUIC_ENC_LEVEL_NUM; ++i)
+ ossl_qrl_enc_level_set_discard(&qtx->el_set, i);
+
+ OPENSSL_free(qtx);
+}
+
+/* Set mutator callbacks for test framework support */
+void ossl_qtx_set_mutator(OSSL_QTX *qtx, ossl_mutate_packet_cb mutatecb,
+ ossl_finish_mutate_cb finishmutatecb, void *mutatearg)
+{
+ qtx->mutatecb = mutatecb;
+ qtx->finishmutatecb = finishmutatecb;
+ qtx->mutatearg = mutatearg;
+}
+
+void ossl_qtx_set_qlog_cb(OSSL_QTX *qtx, QLOG *(*get_qlog_cb)(void *arg),
+ void *get_qlog_cb_arg)
+{
+ qtx->get_qlog_cb = get_qlog_cb;
+ qtx->get_qlog_cb_arg = get_qlog_cb_arg;
+}
+
+int ossl_qtx_provide_secret(OSSL_QTX *qtx,
+ uint32_t enc_level,
+ uint32_t suite_id,
+ EVP_MD *md,
+ const unsigned char *secret,
+ size_t secret_len)
+{
+ if (enc_level >= QUIC_ENC_LEVEL_NUM)
+ return 0;
+
+ return ossl_qrl_enc_level_set_provide_secret(&qtx->el_set,
+ qtx->libctx,
+ qtx->propq,
+ enc_level,
+ suite_id,
+ md,
+ secret,
+ secret_len,
+ 0,
+ /*is_tx=*/1);
+}
+
+int ossl_qtx_discard_enc_level(OSSL_QTX *qtx, uint32_t enc_level)
+{
+ if (enc_level >= QUIC_ENC_LEVEL_NUM)
+ return 0;
+
+ ossl_qrl_enc_level_set_discard(&qtx->el_set, enc_level);
+ return 1;
+}
+
+int ossl_qtx_is_enc_level_provisioned(OSSL_QTX *qtx, uint32_t enc_level)
+{
+ return ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1) != NULL;
+}
+
+/* Allocate a new TXE. */
+static TXE *qtx_alloc_txe(size_t alloc_len)
+{
+ TXE *txe;
+
+ if (alloc_len >= SIZE_MAX - sizeof(TXE))
+ return NULL;
+
+ txe = OPENSSL_malloc(sizeof(TXE) + alloc_len);
+ if (txe == NULL)
+ return NULL;
+
+ ossl_list_txe_init_elem(txe);
+ txe->alloc_len = alloc_len;
+ txe->data_len = 0;
+ return txe;
+}
+
+/*
+ * Ensures there is at least one TXE in the free list, allocating a new entry
+ * if necessary. The returned TXE is in the free list; it is not popped.
+ *
+ * alloc_len is a hint which may be used to determine the TXE size if allocation
+ * is necessary. Returns NULL on allocation failure.
+ */
+static TXE *qtx_ensure_free_txe(OSSL_QTX *qtx, size_t alloc_len)
+{
+ TXE *txe;
+
+ txe = ossl_list_txe_head(&qtx->free);
+ if (txe != NULL)
+ return txe;
+
+ txe = qtx_alloc_txe(alloc_len);
+ if (txe == NULL)
+ return NULL;
+
+ ossl_list_txe_insert_tail(&qtx->free, txe);
+ return txe;
+}
+
+/*
+ * Resize the data buffer attached to an TXE to be n bytes in size. The address
+ * of the TXE might change; the new address is returned, or NULL on failure, in
+ * which case the original TXE remains valid.
+ */
+static TXE *qtx_resize_txe(OSSL_QTX *qtx, TXE_LIST *txl, TXE *txe, size_t n)
+{
+ TXE *txe2, *p;
+
+ /* Should never happen. */
+ if (txe == NULL)
+ return NULL;
+
+ if (n >= SIZE_MAX - sizeof(TXE))
+ return NULL;
+
+ /* Remove the item from the list to avoid accessing freed memory */
+ p = ossl_list_txe_prev(txe);
+ ossl_list_txe_remove(txl, txe);
+
+ /*
+ * NOTE: We do not clear old memory, although it does contain decrypted
+ * data.
+ */
+ txe2 = OPENSSL_realloc(txe, sizeof(TXE) + n);
+ if (txe2 == NULL) {
+ if (p == NULL)
+ ossl_list_txe_insert_head(txl, txe);
+ else
+ ossl_list_txe_insert_after(txl, p, txe);
+ return NULL;
+ }
+
+ if (p == NULL)
+ ossl_list_txe_insert_head(txl, txe2);
+ else
+ ossl_list_txe_insert_after(txl, p, txe2);
+
+ if (qtx->cons == txe)
+ qtx->cons = txe2;
+
+ txe2->alloc_len = n;
+ return txe2;
+}
+
+/*
+ * Ensure the data buffer attached to an TXE is at least n bytes in size.
+ * Returns NULL on failure.
+ */
+static TXE *qtx_reserve_txe(OSSL_QTX *qtx, TXE_LIST *txl,
+ TXE *txe, size_t n)
+{
+ if (txe->alloc_len >= n)
+ return txe;
+
+ return qtx_resize_txe(qtx, txl, txe, n);
+}
+
+/* Move a TXE from pending to free. */
+static void qtx_pending_to_free(OSSL_QTX *qtx)
+{
+ TXE *txe = ossl_list_txe_head(&qtx->pending);
+
+ assert(txe != NULL);
+ ossl_list_txe_remove(&qtx->pending, txe);
+ --qtx->pending_count;
+ qtx->pending_bytes -= txe->data_len;
+ ossl_list_txe_insert_tail(&qtx->free, txe);
+}
+
+/* Add a TXE not currently in any list to the pending list. */
+static void qtx_add_to_pending(OSSL_QTX *qtx, TXE *txe)
+{
+ ossl_list_txe_insert_tail(&qtx->pending, txe);
+ ++qtx->pending_count;
+ qtx->pending_bytes += txe->data_len;
+}
+
+struct iovec_cur {
+ const OSSL_QTX_IOVEC *iovec;
+ size_t num_iovec, idx, byte_off, bytes_remaining;
+};
+
+static size_t iovec_total_bytes(const OSSL_QTX_IOVEC *iovec,
+ size_t num_iovec)
+{
+ size_t i, l = 0;
+
+ for (i = 0; i < num_iovec; ++i)
+ l += iovec[i].buf_len;
+
+ return l;
+}
+
+static void iovec_cur_init(struct iovec_cur *cur,
+ const OSSL_QTX_IOVEC *iovec,
+ size_t num_iovec)
+{
+ cur->iovec = iovec;
+ cur->num_iovec = num_iovec;
+ cur->idx = 0;
+ cur->byte_off = 0;
+ cur->bytes_remaining = iovec_total_bytes(iovec, num_iovec);
+}
+
+/*
+ * Get an extent of bytes from the iovec cursor. *buf is set to point to the
+ * buffer and the number of bytes in length of the buffer is returned. This
+ * value may be less than the max_buf_len argument. If no more data is
+ * available, returns 0.
+ */
+static size_t iovec_cur_get_buffer(struct iovec_cur *cur,
+ const unsigned char **buf,
+ size_t max_buf_len)
+{
+ size_t l;
+
+ if (max_buf_len == 0) {
+ *buf = NULL;
+ return 0;
+ }
+
+ for (;;) {
+ if (cur->idx >= cur->num_iovec)
+ return 0;
+
+ l = cur->iovec[cur->idx].buf_len - cur->byte_off;
+ if (l > max_buf_len)
+ l = max_buf_len;
+
+ if (l > 0) {
+ *buf = cur->iovec[cur->idx].buf + cur->byte_off;
+ cur->byte_off += l;
+ cur->bytes_remaining -= l;
+ return l;
+ }
+
+ /*
+ * Zero-length iovec entry or we already consumed all of it, try the
+ * next iovec.
+ */
+ ++cur->idx;
+ cur->byte_off = 0;
+ }
+}
+
+/* Determines the size of the AEAD output given the input size. */
+int ossl_qtx_calculate_ciphertext_payload_len(OSSL_QTX *qtx, uint32_t enc_level,
+ size_t plaintext_len,
+ size_t *ciphertext_len)
+{
+ OSSL_QRL_ENC_LEVEL *el
+ = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
+ size_t tag_len;
+
+ if (el == NULL) {
+ *ciphertext_len = 0;
+ return 0;
+ }
+
+ /*
+ * We currently only support ciphers with a 1:1 mapping between plaintext
+ * and ciphertext size, save for authentication tag.
+ */
+ tag_len = ossl_qrl_get_suite_cipher_tag_len(el->suite_id);
+
+ *ciphertext_len = plaintext_len + tag_len;
+ return 1;
+}
+
+/* Determines the size of the AEAD input given the output size. */
+int ossl_qtx_calculate_plaintext_payload_len(OSSL_QTX *qtx, uint32_t enc_level,
+ size_t ciphertext_len,
+ size_t *plaintext_len)
+{
+ OSSL_QRL_ENC_LEVEL *el
+ = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
+ size_t tag_len;
+
+ if (el == NULL) {
+ *plaintext_len = 0;
+ return 0;
+ }
+
+ tag_len = ossl_qrl_get_suite_cipher_tag_len(el->suite_id);
+
+ if (ciphertext_len <= tag_len) {
+ *plaintext_len = 0;
+ return 0;
+ }
+
+ *plaintext_len = ciphertext_len - tag_len;
+ return 1;
+}
+
+/* Any other error (including packet being too big for MDPL). */
+#define QTX_FAIL_GENERIC (-1)
+
+/*
+ * Returned where there is insufficient room in the datagram to write the
+ * packet.
+ */
+#define QTX_FAIL_INSUFFICIENT_LEN (-2)
+
+static int qtx_write_hdr(OSSL_QTX *qtx, const QUIC_PKT_HDR *hdr, TXE *txe,
+ QUIC_PKT_HDR_PTRS *ptrs)
+{
+ WPACKET wpkt;
+ size_t l = 0;
+ unsigned char *data = txe_data(txe) + txe->data_len;
+
+ if (!WPACKET_init_static_len(&wpkt, data, txe->alloc_len - txe->data_len, 0))
+ return 0;
+
+ if (!ossl_quic_wire_encode_pkt_hdr(&wpkt, hdr->dst_conn_id.id_len,
+ hdr, ptrs)
+ || !WPACKET_get_total_written(&wpkt, &l)) {
+ WPACKET_finish(&wpkt);
+ return 0;
+ }
+ WPACKET_finish(&wpkt);
+
+ if (qtx->msg_callback != NULL)
+ qtx->msg_callback(1, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_PACKET, data, l,
+ qtx->msg_callback_ssl, qtx->msg_callback_arg);
+
+ txe->data_len += l;
+
+ return 1;
+}
+
+static int qtx_encrypt_into_txe(OSSL_QTX *qtx, struct iovec_cur *cur, TXE *txe,
+ uint32_t enc_level, QUIC_PN pn,
+ const unsigned char *hdr, size_t hdr_len,
+ QUIC_PKT_HDR_PTRS *ptrs)
+{
+ int l = 0, l2 = 0, nonce_len;
+ OSSL_QRL_ENC_LEVEL *el
+ = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
+ unsigned char nonce[EVP_MAX_IV_LENGTH];
+ size_t i;
+ EVP_CIPHER_CTX *cctx = NULL;
+
+ /* We should not have been called if we do not have key material. */
+ if (!ossl_assert(el != NULL)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ /*
+ * Have we already encrypted the maximum number of packets using the current
+ * key?
+ */
+ if (el->op_count >= ossl_qrl_get_suite_max_pkt(el->suite_id)) {
+ ERR_raise(ERR_LIB_SSL, SSL_R_MAXIMUM_ENCRYPTED_PKTS_REACHED);
+ return 0;
+ }
+
+ /*
+ * TX key update is simpler than for RX; once we initiate a key update, we
+ * never need the old keys, as we never deliberately send a packet with old
+ * keys. Thus the EL always uses keyslot 0 for the TX side.
+ */
+ cctx = el->cctx[0];
+ if (!ossl_assert(cctx != NULL)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ /* Construct nonce (nonce=IV ^ PN). */
+ nonce_len = EVP_CIPHER_CTX_get_iv_length(cctx);
+ if (!ossl_assert(nonce_len >= (int)sizeof(QUIC_PN))) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ memcpy(nonce, el->iv[0], (size_t)nonce_len);
+ for (i = 0; i < sizeof(QUIC_PN); ++i)
+ nonce[nonce_len - i - 1] ^= (unsigned char)(pn >> (i * 8));
+
+ /* type and key will already have been setup; feed the IV. */
+ if (EVP_CipherInit_ex(cctx, NULL, NULL, NULL, nonce, /*enc=*/1) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+ /* Feed AAD data. */
+ if (EVP_CipherUpdate(cctx, NULL, &l, hdr, hdr_len) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+ /* Encrypt plaintext directly into TXE. */
+ for (;;) {
+ const unsigned char *src;
+ size_t src_len;
+
+ src_len = iovec_cur_get_buffer(cur, &src, SIZE_MAX);
+ if (src_len == 0)
+ break;
+
+ if (EVP_CipherUpdate(cctx, txe_data(txe) + txe->data_len,
+ &l, src, src_len) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* Ignore what we just encrypted and overwrite it with the plaintext */
+ memcpy(txe_data(txe) + txe->data_len, src, l);
+#endif
+
+ assert(l > 0 && src_len == (size_t)l);
+ txe->data_len += src_len;
+ }
+
+ /* Finalise and get tag. */
+ if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+ if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_GET_TAG,
+ el->tag_len, txe_data(txe) + txe->data_len) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+ txe->data_len += el->tag_len;
+
+ /* Apply header protection. */
+ if (!ossl_quic_hdr_protector_encrypt(&el->hpr, ptrs))
+ return 0;
+
+ ++el->op_count;
+ return 1;
+}
+
+/*
+ * Append a packet to the TXE buffer, serializing and encrypting it in the
+ * process.
+ */
+static int qtx_write(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt, TXE *txe,
+ uint32_t enc_level, QUIC_PKT_HDR *hdr,
+ const OSSL_QTX_IOVEC *iovec, size_t num_iovec)
+{
+ int ret, needs_encrypt;
+ size_t hdr_len, pred_hdr_len, payload_len, pkt_len, space_left;
+ size_t min_len, orig_data_len;
+ struct iovec_cur cur;
+ QUIC_PKT_HDR_PTRS ptrs;
+ unsigned char *hdr_start;
+ OSSL_QRL_ENC_LEVEL *el = NULL;
+
+ /*
+ * Determine if the packet needs encryption and the minimum conceivable
+ * serialization length.
+ */
+ if (!ossl_quic_pkt_type_is_encrypted(hdr->type)) {
+ needs_encrypt = 0;
+ min_len = QUIC_MIN_VALID_PKT_LEN;
+ } else {
+ needs_encrypt = 1;
+ min_len = QUIC_MIN_VALID_PKT_LEN_CRYPTO;
+ el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
+ if (!ossl_assert(el != NULL)) /* should already have been checked */
+ return 0;
+ }
+
+ orig_data_len = txe->data_len;
+ space_left = txe->alloc_len - txe->data_len;
+ if (space_left < min_len) {
+ /* Not even a possibility of it fitting. */
+ ret = QTX_FAIL_INSUFFICIENT_LEN;
+ goto err;
+ }
+
+ /* Set some fields in the header we are responsible for. */
+ if (hdr->type == QUIC_PKT_TYPE_1RTT)
+ hdr->key_phase = (unsigned char)(el->key_epoch & 1);
+
+ /* Walk the iovecs to determine actual input payload length. */
+ iovec_cur_init(&cur, iovec, num_iovec);
+
+ if (cur.bytes_remaining == 0) {
+ /* No zero-length payloads allowed. */
+ ret = QTX_FAIL_GENERIC;
+ goto err;
+ }
+
+ /* Determine encrypted payload length. */
+ if (needs_encrypt)
+ ossl_qtx_calculate_ciphertext_payload_len(qtx, enc_level,
+ cur.bytes_remaining,
+ &payload_len);
+ else
+ payload_len = cur.bytes_remaining;
+
+ /* Determine header length. */
+ hdr->data = NULL;
+ hdr->len = payload_len;
+ pred_hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(hdr->dst_conn_id.id_len,
+ hdr);
+ if (pred_hdr_len == 0) {
+ ret = QTX_FAIL_GENERIC;
+ goto err;
+ }
+
+ /* We now definitively know our packet length. */
+ pkt_len = pred_hdr_len + payload_len;
+
+ if (pkt_len > space_left) {
+ ret = QTX_FAIL_INSUFFICIENT_LEN;
+ goto err;
+ }
+
+ if (ossl_quic_pkt_type_has_pn(hdr->type)) {
+ if (!ossl_quic_wire_encode_pkt_hdr_pn(pkt->pn,
+ hdr->pn,
+ hdr->pn_len)) {
+ ret = QTX_FAIL_GENERIC;
+ goto err;
+ }
+ }
+
+ /* Append the header to the TXE. */
+ hdr_start = txe_data(txe) + txe->data_len;
+ if (!qtx_write_hdr(qtx, hdr, txe, &ptrs)) {
+ ret = QTX_FAIL_GENERIC;
+ goto err;
+ }
+
+ hdr_len = (txe_data(txe) + txe->data_len) - hdr_start;
+ assert(hdr_len == pred_hdr_len);
+
+ if (!needs_encrypt) {
+ /* Just copy the payload across. */
+ const unsigned char *src;
+ size_t src_len;
+
+ for (;;) {
+ /* Buffer length has already been checked above. */
+ src_len = iovec_cur_get_buffer(&cur, &src, SIZE_MAX);
+ if (src_len == 0)
+ break;
+
+ memcpy(txe_data(txe) + txe->data_len, src, src_len);
+ txe->data_len += src_len;
+ }
+ } else {
+ /* Encrypt into TXE. */
+ if (!qtx_encrypt_into_txe(qtx, &cur, txe, enc_level, pkt->pn,
+ hdr_start, hdr_len, &ptrs)) {
+ ret = QTX_FAIL_GENERIC;
+ goto err;
+ }
+
+ assert(txe->data_len - orig_data_len == pkt_len);
+ }
+
+ return 1;
+
+err:
+ /*
+ * Restore original length so we don't leave a half-written packet in the
+ * TXE.
+ */
+ txe->data_len = orig_data_len;
+ return ret;
+}
+
+static TXE *qtx_ensure_cons(OSSL_QTX *qtx)
+{
+ TXE *txe = qtx->cons;
+
+ if (txe != NULL)
+ return txe;
+
+ txe = qtx_ensure_free_txe(qtx, qtx->mdpl);
+ if (txe == NULL)
+ return NULL;
+
+ ossl_list_txe_remove(&qtx->free, txe);
+ qtx->cons = txe;
+ qtx->cons_count = 0;
+ txe->data_len = 0;
+ return txe;
+}
+
+static QLOG *qtx_get_qlog(OSSL_QTX *qtx)
+{
+ if (qtx->get_qlog_cb == NULL)
+ return NULL;
+
+ return qtx->get_qlog_cb(qtx->get_qlog_cb_arg);
+}
+
+static int qtx_mutate_write(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt, TXE *txe,
+ uint32_t enc_level)
+{
+ int ret;
+ QUIC_PKT_HDR *hdr;
+ const OSSL_QTX_IOVEC *iovec;
+ size_t num_iovec;
+
+ /* If we are running tests then mutate_packet may be non NULL */
+ if (qtx->mutatecb != NULL) {
+ if (!qtx->mutatecb(pkt->hdr, pkt->iovec, pkt->num_iovec, &hdr,
+ &iovec, &num_iovec, qtx->mutatearg))
+ return QTX_FAIL_GENERIC;
+ } else {
+ hdr = pkt->hdr;
+ iovec = pkt->iovec;
+ num_iovec = pkt->num_iovec;
+ }
+
+ ret = qtx_write(qtx, pkt, txe, enc_level,
+ hdr, iovec, num_iovec);
+ if (ret == 1)
+ ossl_qlog_event_transport_packet_sent(qtx_get_qlog(qtx), hdr, pkt->pn,
+ iovec, num_iovec,
+ qtx->datagram_count);
+
+ if (qtx->finishmutatecb != NULL)
+ qtx->finishmutatecb(qtx->mutatearg);
+
+ return ret;
+}
+
+static int addr_eq(const BIO_ADDR *a, const BIO_ADDR *b)
+{
+ return ((a == NULL || BIO_ADDR_family(a) == AF_UNSPEC)
+ && (b == NULL || BIO_ADDR_family(b) == AF_UNSPEC))
+ || (a != NULL && b != NULL && memcmp(a, b, sizeof(*a)) == 0);
+}
+
+int ossl_qtx_write_pkt(OSSL_QTX *qtx, const OSSL_QTX_PKT *pkt)
+{
+ int ret;
+ int coalescing = (pkt->flags & OSSL_QTX_PKT_FLAG_COALESCE) != 0;
+ int was_coalescing;
+ TXE *txe;
+ uint32_t enc_level;
+
+ /* Must have EL configured, must have header. */
+ if (pkt->hdr == NULL)
+ return 0;
+
+ enc_level = ossl_quic_pkt_type_to_enc_level(pkt->hdr->type);
+
+ /* Some packet types must be in a packet all by themselves. */
+ if (!ossl_quic_pkt_type_can_share_dgram(pkt->hdr->type))
+ ossl_qtx_finish_dgram(qtx);
+ else if (enc_level >= QUIC_ENC_LEVEL_NUM
+ || ossl_qrl_enc_level_set_have_el(&qtx->el_set, enc_level) != 1) {
+ /* All other packet types are encrypted. */
+ return 0;
+ }
+
+ was_coalescing = (qtx->cons != NULL && qtx->cons->data_len > 0);
+ if (was_coalescing)
+ if (!addr_eq(&qtx->cons->peer, pkt->peer)
+ || !addr_eq(&qtx->cons->local, pkt->local)) {
+ /* Must stop coalescing if addresses have changed */
+ ossl_qtx_finish_dgram(qtx);
+ was_coalescing = 0;
+ }
+
+ for (;;) {
+ /*
+ * Start a new coalescing session or continue using the existing one and
+ * serialize/encrypt the packet. We always encrypt packets as soon as
+ * our caller gives them to us, which relieves the caller of any need to
+ * keep the plaintext around.
+ */
+ txe = qtx_ensure_cons(qtx);
+ if (txe == NULL)
+ return 0; /* allocation failure */
+
+ /*
+ * Ensure TXE has at least MDPL bytes allocated. This should only be
+ * possible if the MDPL has increased.
+ */
+ if (!qtx_reserve_txe(qtx, NULL, txe, qtx->mdpl))
+ return 0;
+
+ if (!was_coalescing) {
+ /* Set addresses in TXE. */
+ if (pkt->peer != NULL) {
+ if (!BIO_ADDR_copy(&txe->peer, pkt->peer))
+ return 0;
+ } else {
+ BIO_ADDR_clear(&txe->peer);
+ }
+
+ if (pkt->local != NULL) {
+ if (!BIO_ADDR_copy(&txe->local, pkt->local))
+ return 0;
+ } else {
+ BIO_ADDR_clear(&txe->local);
+ }
+ }
+
+ ret = qtx_mutate_write(qtx, pkt, txe, enc_level);
+ if (ret == 1) {
+ break;
+ } else if (ret == QTX_FAIL_INSUFFICIENT_LEN) {
+ if (was_coalescing) {
+ /*
+ * We failed due to insufficient length, so end the current
+ * datagram and try again.
+ */
+ ossl_qtx_finish_dgram(qtx);
+ was_coalescing = 0;
+ } else {
+ /*
+ * We failed due to insufficient length, but we were not
+ * coalescing/started with an empty datagram, so any future
+ * attempt to write this packet must also fail.
+ */
+ return 0;
+ }
+ } else {
+ return 0; /* other error */
+ }
+ }
+
+ ++qtx->cons_count;
+
+ /*
+ * Some packet types cannot have another packet come after them.
+ */
+ if (ossl_quic_pkt_type_must_be_last(pkt->hdr->type))
+ coalescing = 0;
+
+ if (!coalescing)
+ ossl_qtx_finish_dgram(qtx);
+
+ return 1;
+}
+
+/*
+ * Finish any incomplete datagrams for transmission which were flagged for
+ * coalescing. If there is no current coalescing datagram, this is a no-op.
+ */
+void ossl_qtx_finish_dgram(OSSL_QTX *qtx)
+{
+ TXE *txe = qtx->cons;
+
+ if (txe == NULL)
+ return;
+
+ if (txe->data_len == 0)
+ /*
+ * If we did not put anything in the datagram, just move it back to the
+ * free list.
+ */
+ ossl_list_txe_insert_tail(&qtx->free, txe);
+ else
+ qtx_add_to_pending(qtx, txe);
+
+ qtx->cons = NULL;
+ qtx->cons_count = 0;
+ ++qtx->datagram_count;
+}
+
+static void txe_to_msg(TXE *txe, BIO_MSG *msg)
+{
+ msg->data = txe_data(txe);
+ msg->data_len = txe->data_len;
+ msg->flags = 0;
+ msg->peer
+ = BIO_ADDR_family(&txe->peer) != AF_UNSPEC ? &txe->peer : NULL;
+ msg->local
+ = BIO_ADDR_family(&txe->local) != AF_UNSPEC ? &txe->local : NULL;
+}
+
+#define MAX_MSGS_PER_SEND 32
+
+int ossl_qtx_flush_net(OSSL_QTX *qtx)
+{
+ BIO_MSG msg[MAX_MSGS_PER_SEND];
+ size_t wr, i, total_written = 0;
+ TXE *txe;
+ int res;
+
+ if (ossl_list_txe_head(&qtx->pending) == NULL)
+ return QTX_FLUSH_NET_RES_OK; /* Nothing to send. */
+
+ if (qtx->bio == NULL)
+ return QTX_FLUSH_NET_RES_PERMANENT_FAIL;
+
+ for (;;) {
+ for (txe = ossl_list_txe_head(&qtx->pending), i = 0;
+ txe != NULL && i < OSSL_NELEM(msg);
+ txe = ossl_list_txe_next(txe), ++i)
+ txe_to_msg(txe, &msg[i]);
+
+ if (!i)
+ /* Nothing to send. */
+ break;
+
+ ERR_set_mark();
+ res = BIO_sendmmsg(qtx->bio, msg, sizeof(BIO_MSG), i, 0, &wr);
+ if (res && wr == 0) {
+ /*
+ * Treat 0 messages sent as a transient error and just stop for now.
+ */
+ ERR_clear_last_mark();
+ break;
+ } else if (!res) {
+ /*
+ * We did not get anything, so further calls will probably not
+ * succeed either.
+ */
+ if (BIO_err_is_non_fatal(ERR_peek_last_error())) {
+ /* Transient error, just stop for now, clearing the error. */
+ ERR_pop_to_mark();
+ break;
+ } else {
+ /* Non-transient error, fail and do not clear the error. */
+ ERR_clear_last_mark();
+ return QTX_FLUSH_NET_RES_PERMANENT_FAIL;
+ }
+ }
+
+ ERR_clear_last_mark();
+
+ /*
+ * Remove everything which was successfully sent from the pending queue.
+ */
+ for (i = 0; i < wr; ++i) {
+ if (qtx->msg_callback != NULL)
+ qtx->msg_callback(1, OSSL_QUIC1_VERSION, SSL3_RT_QUIC_DATAGRAM,
+ msg[i].data, msg[i].data_len,
+ qtx->msg_callback_ssl,
+ qtx->msg_callback_arg);
+ qtx_pending_to_free(qtx);
+ }
+
+ total_written += wr;
+ }
+
+ return total_written > 0
+ ? QTX_FLUSH_NET_RES_OK
+ : QTX_FLUSH_NET_RES_TRANSIENT_FAIL;
+}
+
+int ossl_qtx_pop_net(OSSL_QTX *qtx, BIO_MSG *msg)
+{
+ TXE *txe = ossl_list_txe_head(&qtx->pending);
+
+ if (txe == NULL)
+ return 0;
+
+ txe_to_msg(txe, msg);
+ qtx_pending_to_free(qtx);
+ return 1;
+}
+
+void ossl_qtx_set_bio(OSSL_QTX *qtx, BIO *bio)
+{
+ qtx->bio = bio;
+}
+
+int ossl_qtx_set_mdpl(OSSL_QTX *qtx, size_t mdpl)
+{
+ if (mdpl < QUIC_MIN_INITIAL_DGRAM_LEN)
+ return 0;
+
+ qtx->mdpl = mdpl;
+ return 1;
+}
+
+size_t ossl_qtx_get_mdpl(OSSL_QTX *qtx)
+{
+ return qtx->mdpl;
+}
+
+size_t ossl_qtx_get_queue_len_datagrams(OSSL_QTX *qtx)
+{
+ return qtx->pending_count;
+}
+
+size_t ossl_qtx_get_queue_len_bytes(OSSL_QTX *qtx)
+{
+ return qtx->pending_bytes;
+}
+
+size_t ossl_qtx_get_cur_dgram_len_bytes(OSSL_QTX *qtx)
+{
+ return qtx->cons != NULL ? qtx->cons->data_len : 0;
+}
+
+size_t ossl_qtx_get_unflushed_pkt_count(OSSL_QTX *qtx)
+{
+ return qtx->cons_count;
+}
+
+int ossl_qtx_trigger_key_update(OSSL_QTX *qtx)
+{
+ return ossl_qrl_enc_level_set_key_update(&qtx->el_set,
+ QUIC_ENC_LEVEL_1RTT);
+}
+
+uint64_t ossl_qtx_get_cur_epoch_pkt_count(OSSL_QTX *qtx, uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el;
+
+ el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
+ if (el == NULL)
+ return UINT64_MAX;
+
+ return el->op_count;
+}
+
+uint64_t ossl_qtx_get_max_epoch_pkt_count(OSSL_QTX *qtx, uint32_t enc_level)
+{
+ OSSL_QRL_ENC_LEVEL *el;
+
+ el = ossl_qrl_enc_level_set_get(&qtx->el_set, enc_level, 1);
+ if (el == NULL)
+ return UINT64_MAX;
+
+ return ossl_qrl_get_suite_max_pkt(el->suite_id);
+}
+
+void ossl_qtx_set_msg_callback(OSSL_QTX *qtx, ossl_msg_cb msg_callback,
+ SSL *msg_callback_ssl)
+{
+ qtx->msg_callback = msg_callback;
+ qtx->msg_callback_ssl = msg_callback_ssl;
+}
+
+void ossl_qtx_set_msg_callback_arg(OSSL_QTX *qtx, void *msg_callback_arg)
+{
+ qtx->msg_callback_arg = msg_callback_arg;
+}
+
+uint64_t ossl_qtx_get_key_epoch(OSSL_QTX *qtx)
+{
+ OSSL_QRL_ENC_LEVEL *el;
+
+ el = ossl_qrl_enc_level_set_get(&qtx->el_set, QUIC_ENC_LEVEL_1RTT, 1);
+ if (el == NULL)
+ return 0;
+
+ return el->key_epoch;
+}
diff --git a/crypto/openssl/ssl/quic/quic_record_util.c b/crypto/openssl/ssl/quic/quic_record_util.c
new file mode 100644
index 000000000000..d7c3cece888d
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_record_util.c
@@ -0,0 +1,287 @@
+/*
+ * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_record_util.h"
+#include "internal/quic_record_rx.h"
+#include "internal/quic_record_tx.h"
+#include "internal/quic_wire_pkt.h"
+#include "../ssl_local.h"
+#include <openssl/kdf.h>
+#include <openssl/core_names.h>
+
+/*
+ * QUIC Key Derivation Utilities
+ * =============================
+ */
+int ossl_quic_hkdf_extract(OSSL_LIB_CTX *libctx,
+ const char *propq,
+ const EVP_MD *md,
+ const unsigned char *salt, size_t salt_len,
+ const unsigned char *ikm, size_t ikm_len,
+ unsigned char *out, size_t out_len)
+{
+ int ret = 0;
+ EVP_KDF *kdf = NULL;
+ EVP_KDF_CTX *kctx = NULL;
+ OSSL_PARAM params[8], *p = params;
+ int key_check = 0;
+ int mode = EVP_PKEY_HKDEF_MODE_EXTRACT_ONLY;
+ const char *md_name;
+
+ if ((md_name = EVP_MD_get0_name(md)) == NULL
+ || (kdf = EVP_KDF_fetch(libctx, OSSL_KDF_NAME_HKDF, propq)) == NULL
+ || (kctx = EVP_KDF_CTX_new(kdf)) == NULL)
+ goto err;
+
+ /*
+ * According to RFC 9000, the length of destination connection ID must be
+ * at least 8 bytes. It means that the length of destination connection ID
+ * may be less than the minimum length for HKDF required by FIPS provider.
+ *
+ * Therefore, we need to set `key-check` to zero to allow using destionation
+ * connection ID as IKM.
+ */
+ *p++ = OSSL_PARAM_construct_int(OSSL_KDF_PARAM_FIPS_KEY_CHECK, &key_check);
+ *p++ = OSSL_PARAM_construct_int(OSSL_KDF_PARAM_MODE, &mode);
+ *p++ = OSSL_PARAM_construct_utf8_string(OSSL_KDF_PARAM_DIGEST,
+ (char *)md_name, 0);
+ *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_SALT,
+ (unsigned char *)salt, salt_len);
+ *p++ = OSSL_PARAM_construct_octet_string(OSSL_KDF_PARAM_KEY,
+ (unsigned char *)ikm, ikm_len);
+ *p++ = OSSL_PARAM_construct_end();
+
+ ret = EVP_KDF_derive(kctx, out, out_len, params);
+
+err:
+ EVP_KDF_CTX_free(kctx);
+ EVP_KDF_free(kdf);
+ return ret;
+}
+
+/* Constants used for key derivation in QUIC v1. */
+static const unsigned char quic_client_in_label[] = {
+ 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x20, 0x69, 0x6e /* "client in" */
+};
+static const unsigned char quic_server_in_label[] = {
+ 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x20, 0x69, 0x6e /* "server in" */
+};
+
+/* Salt used to derive Initial packet protection keys (RFC 9001 Section 5.2). */
+static const unsigned char quic_v1_initial_salt[] = {
+ 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17,
+ 0x9a, 0xe6, 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a
+};
+
+int ossl_quic_provide_initial_secret(OSSL_LIB_CTX *libctx,
+ const char *propq,
+ const QUIC_CONN_ID *dst_conn_id,
+ int is_server,
+ struct ossl_qrx_st *qrx,
+ struct ossl_qtx_st *qtx)
+{
+ unsigned char initial_secret[32];
+ unsigned char client_initial_secret[32], server_initial_secret[32];
+ unsigned char *rx_secret, *tx_secret;
+ EVP_MD *sha256;
+
+ if (qrx == NULL && qtx == NULL)
+ return 1;
+
+ /* Initial encryption always uses SHA-256. */
+ if ((sha256 = EVP_MD_fetch(libctx, "SHA256", propq)) == NULL)
+ return 0;
+
+ if (is_server) {
+ rx_secret = client_initial_secret;
+ tx_secret = server_initial_secret;
+ } else {
+ rx_secret = server_initial_secret;
+ tx_secret = client_initial_secret;
+ }
+
+ /* Derive initial secret from destination connection ID. */
+ if (!ossl_quic_hkdf_extract(libctx, propq,
+ sha256,
+ quic_v1_initial_salt,
+ sizeof(quic_v1_initial_salt),
+ dst_conn_id->id,
+ dst_conn_id->id_len,
+ initial_secret,
+ sizeof(initial_secret)))
+ goto err;
+
+ /* Derive "client in" secret. */
+ if (((qtx != NULL && tx_secret == client_initial_secret)
+ || (qrx != NULL && rx_secret == client_initial_secret))
+ && !tls13_hkdf_expand_ex(libctx, propq,
+ sha256,
+ initial_secret,
+ quic_client_in_label,
+ sizeof(quic_client_in_label),
+ NULL, 0,
+ client_initial_secret,
+ sizeof(client_initial_secret), 1))
+ goto err;
+
+ /* Derive "server in" secret. */
+ if (((qtx != NULL && tx_secret == server_initial_secret)
+ || (qrx != NULL && rx_secret == server_initial_secret))
+ && !tls13_hkdf_expand_ex(libctx, propq,
+ sha256,
+ initial_secret,
+ quic_server_in_label,
+ sizeof(quic_server_in_label),
+ NULL, 0,
+ server_initial_secret,
+ sizeof(server_initial_secret), 1))
+ goto err;
+
+ /* Setup RX EL. Initial encryption always uses AES-128-GCM. */
+ if (qrx != NULL
+ && !ossl_qrx_provide_secret(qrx, QUIC_ENC_LEVEL_INITIAL,
+ QRL_SUITE_AES128GCM,
+ sha256,
+ rx_secret,
+ sizeof(server_initial_secret)))
+ goto err;
+
+ /*
+ * ossl_qrx_provide_secret takes ownership of our ref to SHA256, so if we
+ * are initialising both sides, get a new ref for the following call for the
+ * TX side.
+ */
+ if (qrx != NULL && qtx != NULL && !EVP_MD_up_ref(sha256)) {
+ sha256 = NULL;
+ goto err;
+ }
+
+ /* Setup TX cipher. */
+ if (qtx != NULL
+ && !ossl_qtx_provide_secret(qtx, QUIC_ENC_LEVEL_INITIAL,
+ QRL_SUITE_AES128GCM,
+ sha256,
+ tx_secret,
+ sizeof(server_initial_secret)))
+ goto err;
+
+ return 1;
+
+err:
+ EVP_MD_free(sha256);
+ return 0;
+}
+
+/*
+ * QUIC Record Layer Ciphersuite Info
+ * ==================================
+ */
+
+struct suite_info {
+ const char *cipher_name, *md_name;
+ uint32_t secret_len, cipher_key_len, cipher_iv_len, cipher_tag_len;
+ uint32_t hdr_prot_key_len, hdr_prot_cipher_id;
+ uint64_t max_pkt, max_forged_pkt;
+};
+
+static const struct suite_info suite_aes128gcm = {
+ "AES-128-GCM", "SHA256", 32, 16, 12, 16, 16,
+ QUIC_HDR_PROT_CIPHER_AES_128,
+ ((uint64_t)1) << 23, /* Limits as prescribed by RFC 9001 */
+ ((uint64_t)1) << 52,
+};
+
+static const struct suite_info suite_aes256gcm = {
+ "AES-256-GCM", "SHA384", 48, 32, 12, 16, 32,
+ QUIC_HDR_PROT_CIPHER_AES_256,
+ ((uint64_t)1) << 23, /* Limits as prescribed by RFC 9001 */
+ ((uint64_t)1) << 52,
+};
+
+static const struct suite_info suite_chacha20poly1305 = {
+ "ChaCha20-Poly1305", "SHA256", 32, 32, 12, 16, 32,
+ QUIC_HDR_PROT_CIPHER_CHACHA,
+ /* Do not use UINT64_MAX here as this represents an invalid value */
+ UINT64_MAX - 1, /* No applicable limit for this suite (RFC 9001) */
+ ((uint64_t)1) << 36, /* Limit as prescribed by RFC 9001 */
+};
+
+static const struct suite_info *get_suite(uint32_t suite_id)
+{
+ switch (suite_id) {
+ case QRL_SUITE_AES128GCM:
+ return &suite_aes128gcm;
+ case QRL_SUITE_AES256GCM:
+ return &suite_aes256gcm;
+ case QRL_SUITE_CHACHA20POLY1305:
+ return &suite_chacha20poly1305;
+ default:
+ return NULL;
+ }
+}
+
+const char *ossl_qrl_get_suite_cipher_name(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->cipher_name : NULL;
+}
+
+const char *ossl_qrl_get_suite_md_name(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->md_name : NULL;
+}
+
+uint32_t ossl_qrl_get_suite_secret_len(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->secret_len : 0;
+}
+
+uint32_t ossl_qrl_get_suite_cipher_key_len(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->cipher_key_len : 0;
+}
+
+uint32_t ossl_qrl_get_suite_cipher_iv_len(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->cipher_iv_len : 0;
+}
+
+uint32_t ossl_qrl_get_suite_cipher_tag_len(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->cipher_tag_len : 0;
+}
+
+uint32_t ossl_qrl_get_suite_hdr_prot_cipher_id(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->hdr_prot_cipher_id : 0;
+}
+
+uint32_t ossl_qrl_get_suite_hdr_prot_key_len(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->hdr_prot_key_len : 0;
+}
+
+uint64_t ossl_qrl_get_suite_max_pkt(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->max_pkt : UINT64_MAX;
+}
+
+uint64_t ossl_qrl_get_suite_max_forged_pkt(uint32_t suite_id)
+{
+ const struct suite_info *c = get_suite(suite_id);
+ return c != NULL ? c->max_forged_pkt : UINT64_MAX;
+}
diff --git a/crypto/openssl/ssl/quic/quic_rstream.c b/crypto/openssl/ssl/quic/quic_rstream.c
new file mode 100644
index 000000000000..dd3dbf756b44
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_rstream.c
@@ -0,0 +1,295 @@
+/*
+* Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
+*
+* Licensed under the Apache License 2.0 (the "License"). You may not use
+* this file except in compliance with the License. You can obtain a copy
+* in the file LICENSE in the source distribution or at
+* https://www.openssl.org/source/license.html
+*/
+#include <openssl/err.h>
+#include "internal/common.h"
+#include "internal/time.h"
+#include "internal/quic_stream.h"
+#include "internal/quic_sf_list.h"
+#include "internal/ring_buf.h"
+
+struct quic_rstream_st {
+ SFRAME_LIST fl;
+ QUIC_RXFC *rxfc;
+ OSSL_STATM *statm;
+ UINT_RANGE head_range;
+ struct ring_buf rbuf;
+};
+
+QUIC_RSTREAM *ossl_quic_rstream_new(QUIC_RXFC *rxfc,
+ OSSL_STATM *statm, size_t rbuf_size)
+{
+ QUIC_RSTREAM *ret = OPENSSL_zalloc(sizeof(*ret));
+
+ if (ret == NULL)
+ return NULL;
+
+ ring_buf_init(&ret->rbuf);
+ if (!ring_buf_resize(&ret->rbuf, rbuf_size, 0)) {
+ OPENSSL_free(ret);
+ return NULL;
+ }
+
+ ossl_sframe_list_init(&ret->fl);
+ ret->rxfc = rxfc;
+ ret->statm = statm;
+ return ret;
+}
+
+void ossl_quic_rstream_free(QUIC_RSTREAM *qrs)
+{
+ int cleanse;
+
+ if (qrs == NULL)
+ return;
+
+ cleanse = qrs->fl.cleanse;
+ ossl_sframe_list_destroy(&qrs->fl);
+ ring_buf_destroy(&qrs->rbuf, cleanse);
+ OPENSSL_free(qrs);
+}
+
+int ossl_quic_rstream_queue_data(QUIC_RSTREAM *qrs, OSSL_QRX_PKT *pkt,
+ uint64_t offset,
+ const unsigned char *data, uint64_t data_len,
+ int fin)
+{
+ UINT_RANGE range;
+
+ if ((data == NULL && data_len != 0) || (data_len == 0 && fin == 0)) {
+ /* empty frame allowed only at the end of the stream */
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ range.start = offset;
+ range.end = offset + data_len;
+
+ return ossl_sframe_list_insert(&qrs->fl, &range, pkt, data, fin);
+}
+
+static int read_internal(QUIC_RSTREAM *qrs, unsigned char *buf, size_t size,
+ size_t *readbytes, int *fin, int drop)
+{
+ void *iter = NULL;
+ UINT_RANGE range;
+ const unsigned char *data;
+ uint64_t offset = 0;
+ size_t readbytes_ = 0;
+ int fin_ = 0, ret = 1;
+
+ while (ossl_sframe_list_peek(&qrs->fl, &iter, &range, &data, &fin_)) {
+ size_t l = (size_t)(range.end - range.start);
+
+ if (l > size) {
+ l = size;
+ fin_ = 0;
+ }
+ offset = range.start + l;
+ if (l == 0)
+ break;
+
+ if (data == NULL) {
+ size_t max_len;
+
+ data = ring_buf_get_ptr(&qrs->rbuf, range.start, &max_len);
+ if (!ossl_assert(data != NULL))
+ return 0;
+ if (max_len < l) {
+ memcpy(buf, data, max_len);
+ size -= max_len;
+ buf += max_len;
+ readbytes_ += max_len;
+ l -= max_len;
+ data = ring_buf_get_ptr(&qrs->rbuf, range.start + max_len,
+ &max_len);
+ if (!ossl_assert(data != NULL) || !ossl_assert(max_len > l))
+ return 0;
+ }
+ }
+
+ memcpy(buf, data, l);
+ size -= l;
+ buf += l;
+ readbytes_ += l;
+ if (size == 0)
+ break;
+ }
+
+ if (drop && offset != 0) {
+ ret = ossl_sframe_list_drop_frames(&qrs->fl, offset);
+ ring_buf_cpop_range(&qrs->rbuf, 0, offset - 1, qrs->fl.cleanse);
+ }
+
+ if (ret) {
+ *readbytes = readbytes_;
+ *fin = fin_;
+ }
+
+ return ret;
+}
+
+static OSSL_TIME get_rtt(QUIC_RSTREAM *qrs)
+{
+ OSSL_TIME rtt;
+
+ if (qrs->statm != NULL) {
+ OSSL_RTT_INFO rtt_info;
+
+ ossl_statm_get_rtt_info(qrs->statm, &rtt_info);
+ rtt = rtt_info.smoothed_rtt;
+ } else {
+ rtt = ossl_time_zero();
+ }
+ return rtt;
+}
+
+int ossl_quic_rstream_read(QUIC_RSTREAM *qrs, unsigned char *buf, size_t size,
+ size_t *readbytes, int *fin)
+{
+ OSSL_TIME rtt = get_rtt(qrs);
+
+ if (!read_internal(qrs, buf, size, readbytes, fin, 1))
+ return 0;
+
+ if (qrs->rxfc != NULL
+ && !ossl_quic_rxfc_on_retire(qrs->rxfc, *readbytes, rtt))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_rstream_peek(QUIC_RSTREAM *qrs, unsigned char *buf, size_t size,
+ size_t *readbytes, int *fin)
+{
+ return read_internal(qrs, buf, size, readbytes, fin, 0);
+}
+
+int ossl_quic_rstream_available(QUIC_RSTREAM *qrs, size_t *avail, int *fin)
+{
+ void *iter = NULL;
+ UINT_RANGE range;
+ const unsigned char *data;
+ uint64_t avail_ = 0;
+
+ while (ossl_sframe_list_peek(&qrs->fl, &iter, &range, &data, fin))
+ avail_ += range.end - range.start;
+
+#if SIZE_MAX < UINT64_MAX
+ *avail = avail_ > SIZE_MAX ? SIZE_MAX : (size_t)avail_;
+#else
+ *avail = (size_t)avail_;
+#endif
+ return 1;
+}
+
+int ossl_quic_rstream_get_record(QUIC_RSTREAM *qrs,
+ const unsigned char **record, size_t *rec_len,
+ int *fin)
+{
+ const unsigned char *record_ = NULL;
+ size_t rec_len_, max_len;
+
+ if (!ossl_sframe_list_lock_head(&qrs->fl, &qrs->head_range, &record_, fin)) {
+ /* No head frame to lock and return */
+ *record = NULL;
+ *rec_len = 0;
+ return 1;
+ }
+
+ /* if final empty frame, we drop it immediately */
+ if (qrs->head_range.end == qrs->head_range.start) {
+ if (!ossl_assert(*fin))
+ return 0;
+ if (!ossl_sframe_list_drop_frames(&qrs->fl, qrs->head_range.end))
+ return 0;
+ }
+
+ rec_len_ = (size_t)(qrs->head_range.end - qrs->head_range.start);
+
+ if (record_ == NULL && rec_len_ != 0) {
+ record_ = ring_buf_get_ptr(&qrs->rbuf, qrs->head_range.start,
+ &max_len);
+ if (!ossl_assert(record_ != NULL))
+ return 0;
+ if (max_len < rec_len_) {
+ rec_len_ = max_len;
+ qrs->head_range.end = qrs->head_range.start + max_len;
+ }
+ }
+
+ *rec_len = rec_len_;
+ *record = record_;
+ return 1;
+}
+
+
+int ossl_quic_rstream_release_record(QUIC_RSTREAM *qrs, size_t read_len)
+{
+ uint64_t offset;
+
+ if (!ossl_sframe_list_is_head_locked(&qrs->fl))
+ return 0;
+
+ if (read_len > qrs->head_range.end - qrs->head_range.start) {
+ if (read_len != SIZE_MAX)
+ return 0;
+ offset = qrs->head_range.end;
+ } else {
+ offset = qrs->head_range.start + read_len;
+ }
+
+ if (!ossl_sframe_list_drop_frames(&qrs->fl, offset))
+ return 0;
+
+ if (offset > 0)
+ ring_buf_cpop_range(&qrs->rbuf, 0, offset - 1, qrs->fl.cleanse);
+
+ if (qrs->rxfc != NULL) {
+ OSSL_TIME rtt = get_rtt(qrs);
+
+ if (!ossl_quic_rxfc_on_retire(qrs->rxfc, offset, rtt))
+ return 0;
+ }
+
+ return 1;
+}
+
+static int write_at_ring_buf_cb(uint64_t logical_offset,
+ const unsigned char *buf,
+ size_t buf_len,
+ void *cb_arg)
+{
+ struct ring_buf *rbuf = cb_arg;
+
+ return ring_buf_write_at(rbuf, logical_offset, buf, buf_len);
+}
+
+int ossl_quic_rstream_move_to_rbuf(QUIC_RSTREAM *qrs)
+{
+ if (ring_buf_avail(&qrs->rbuf) == 0)
+ return 0;
+ return ossl_sframe_list_move_data(&qrs->fl,
+ write_at_ring_buf_cb, &qrs->rbuf);
+}
+
+int ossl_quic_rstream_resize_rbuf(QUIC_RSTREAM *qrs, size_t rbuf_size)
+{
+ if (ossl_sframe_list_is_head_locked(&qrs->fl))
+ return 0;
+
+ if (!ring_buf_resize(&qrs->rbuf, rbuf_size, qrs->fl.cleanse))
+ return 0;
+
+ return 1;
+}
+
+void ossl_quic_rstream_set_cleanse(QUIC_RSTREAM *qrs, int cleanse)
+{
+ qrs->fl.cleanse = cleanse;
+}
diff --git a/crypto/openssl/ssl/quic/quic_rx_depack.c b/crypto/openssl/ssl/quic/quic_rx_depack.c
new file mode 100644
index 000000000000..f800d8984193
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_rx_depack.c
@@ -0,0 +1,1479 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/packet_quic.h"
+#include "internal/nelem.h"
+#include "internal/quic_wire.h"
+#include "internal/quic_record_rx.h"
+#include "internal/quic_ackm.h"
+#include "internal/quic_rx_depack.h"
+#include "internal/quic_error.h"
+#include "internal/quic_fc.h"
+#include "internal/quic_channel.h"
+#include "internal/sockets.h"
+
+#include "quic_local.h"
+#include "quic_channel_local.h"
+#include "../ssl_local.h"
+
+/*
+ * Helper functions to process different frame types.
+ *
+ * Typically, those that are ACK eliciting will take an OSSL_ACKM_RX_PKT
+ * pointer argument, the few that aren't ACK eliciting will not. This makes
+ * them a verifiable pattern against tables where this is specified.
+ */
+static int depack_do_implicit_stream_create(QUIC_CHANNEL *ch,
+ uint64_t stream_id,
+ uint64_t frame_type,
+ QUIC_STREAM **result);
+
+static int depack_do_frame_padding(PACKET *pkt)
+{
+ /* We ignore this frame */
+ ossl_quic_wire_decode_padding(pkt);
+ return 1;
+}
+
+static int depack_do_frame_ping(PACKET *pkt, QUIC_CHANNEL *ch,
+ uint32_t enc_level,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ /* We ignore this frame, apart from eliciting an ACK */
+ if (!ossl_quic_wire_decode_frame_ping(pkt)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_PING,
+ "decode error");
+ return 0;
+ }
+
+ ossl_quic_tx_packetiser_schedule_ack_eliciting(ch->txp, enc_level);
+ return 1;
+}
+
+static int depack_do_frame_ack(PACKET *pkt, QUIC_CHANNEL *ch,
+ int packet_space, OSSL_TIME received,
+ uint64_t frame_type,
+ OSSL_QRX_PKT *qpacket)
+{
+ OSSL_QUIC_FRAME_ACK ack;
+ OSSL_QUIC_ACK_RANGE *p;
+ uint64_t total_ranges = 0;
+ uint32_t ack_delay_exp = ch->rx_ack_delay_exp;
+
+ if (!ossl_quic_wire_peek_frame_ack_num_ranges(pkt, &total_ranges)
+ /* In case sizeof(uint64_t) > sizeof(size_t) */
+ || total_ranges > SIZE_MAX / sizeof(OSSL_QUIC_ACK_RANGE))
+ goto malformed;
+
+ if (ch->num_ack_range_scratch < (size_t)total_ranges) {
+ if ((p = OPENSSL_realloc(ch->ack_range_scratch,
+ sizeof(OSSL_QUIC_ACK_RANGE)
+ * (size_t)total_ranges)) == NULL)
+ goto malformed;
+
+ ch->ack_range_scratch = p;
+ ch->num_ack_range_scratch = (size_t)total_ranges;
+ }
+
+ ack.ack_ranges = ch->ack_range_scratch;
+ ack.num_ack_ranges = (size_t)total_ranges;
+
+ if (!ossl_quic_wire_decode_frame_ack(pkt, ack_delay_exp, &ack, NULL))
+ goto malformed;
+
+ if (qpacket->hdr->type == QUIC_PKT_TYPE_1RTT
+ && (qpacket->key_epoch < ossl_qrx_get_key_epoch(ch->qrx)
+ || ch->rxku_expected)
+ && ack.ack_ranges[0].end >= ch->txku_pn) {
+ /*
+ * RFC 9001 s. 6.2: An endpoint that receives an acknowledgment that is
+ * carried in a packet protected with old keys where any acknowledged
+ * packet was protected with newer keys MAY treat that as a connection
+ * error of type KEY_UPDATE_ERROR.
+ *
+ * Two cases to handle here:
+ *
+ * - We did spontaneous TXKU, the peer has responded in kind and we
+ * have detected RXKU; !ch->rxku_expected, but then it sent a packet
+ * with old keys acknowledging a packet in the new key epoch.
+ *
+ * This also covers the case where we got RXKU and triggered
+ * solicited TXKU, and then for some reason the peer sent an ACK of
+ * a PN in our new TX key epoch with old keys.
+ *
+ * - We did spontaneous TXKU; ch->txku_pn is the starting PN of our
+ * new TX key epoch; the peer has not initiated a solicited TXKU in
+ * response (so we have not detected RXKU); in this case the RX key
+ * epoch has not incremented and ch->rxku_expected is still 1.
+ */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_KEY_UPDATE_ERROR,
+ frame_type,
+ "acked packet which initiated a "
+ "key update without a "
+ "corresponding key update");
+ return 0;
+ }
+
+ if (!ossl_ackm_on_rx_ack_frame(ch->ackm, &ack,
+ packet_space, received))
+ goto malformed;
+
+ ++ch->diag_num_rx_ack;
+ return 1;
+
+malformed:
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "decode error");
+ return 0;
+}
+
+static int depack_do_frame_reset_stream(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ OSSL_QUIC_FRAME_RESET_STREAM frame_data;
+ QUIC_STREAM *stream = NULL;
+ uint64_t fce;
+
+ if (!ossl_quic_wire_decode_frame_reset_stream(pkt, &frame_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_RESET_STREAM,
+ "decode error");
+ return 0;
+ }
+
+ if (!depack_do_implicit_stream_create(ch, frame_data.stream_id,
+ OSSL_QUIC_FRAME_TYPE_RESET_STREAM,
+ &stream))
+ return 0; /* error already raised for us */
+
+ if (stream == NULL)
+ return 1; /* old deleted stream, not a protocol violation, ignore */
+
+ if (!ossl_quic_stream_has_recv(stream)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_STREAM_STATE_ERROR,
+ OSSL_QUIC_FRAME_TYPE_RESET_STREAM,
+ "RESET_STREAM frame for "
+ "TX only stream");
+ return 0;
+ }
+
+ /*
+ * The final size field of the RESET_STREAM frame must be used to determine
+ * how much flow control credit the aborted stream was considered to have
+ * consumed.
+ *
+ * We also need to ensure that if we already have a final size for the
+ * stream, the RESET_STREAM frame's Final Size field matches this; we SHOULD
+ * terminate the connection otherwise (RFC 9000 s. 4.5). The RXFC takes care
+ * of this for us.
+ */
+ if (!ossl_quic_rxfc_on_rx_stream_frame(&stream->rxfc,
+ frame_data.final_size, /*is_fin=*/1)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ OSSL_QUIC_FRAME_TYPE_RESET_STREAM,
+ "internal error (flow control)");
+ return 0;
+ }
+
+ /* Has a flow control error occurred? */
+ fce = ossl_quic_rxfc_get_error(&stream->rxfc, 0);
+ if (fce != OSSL_QUIC_ERR_NO_ERROR) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ fce,
+ OSSL_QUIC_FRAME_TYPE_RESET_STREAM,
+ "flow control violation");
+ return 0;
+ }
+
+ /*
+ * Depending on the receive part state this is handled either as a reset
+ * transition or a no-op (e.g. if a reset has already been received before,
+ * or the application already retired a FIN). Best effort - there are no
+ * protocol error conditions we need to check for here.
+ */
+ ossl_quic_stream_map_notify_reset_recv_part(&ch->qsm, stream,
+ frame_data.app_error_code,
+ frame_data.final_size);
+
+ ossl_quic_stream_map_update_state(&ch->qsm, stream);
+ return 1;
+}
+
+static int depack_do_frame_stop_sending(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ OSSL_QUIC_FRAME_STOP_SENDING frame_data;
+ QUIC_STREAM *stream = NULL;
+
+ if (!ossl_quic_wire_decode_frame_stop_sending(pkt, &frame_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_STOP_SENDING,
+ "decode error");
+ return 0;
+ }
+
+ if (!depack_do_implicit_stream_create(ch, frame_data.stream_id,
+ OSSL_QUIC_FRAME_TYPE_STOP_SENDING,
+ &stream))
+ return 0; /* error already raised for us */
+
+ if (stream == NULL)
+ return 1; /* old deleted stream, not a protocol violation, ignore */
+
+ if (!ossl_quic_stream_has_send(stream)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_STREAM_STATE_ERROR,
+ OSSL_QUIC_FRAME_TYPE_STOP_SENDING,
+ "STOP_SENDING frame for "
+ "RX only stream");
+ return 0;
+ }
+
+ stream->peer_stop_sending = 1;
+ stream->peer_stop_sending_aec = frame_data.app_error_code;
+
+ /*
+ * RFC 9000 s. 3.5: Receiving a STOP_SENDING frame means we must respond in
+ * turn with a RESET_STREAM frame for the same part of the stream. The other
+ * part is unaffected.
+ */
+ ossl_quic_stream_map_reset_stream_send_part(&ch->qsm, stream,
+ frame_data.app_error_code);
+ return 1;
+}
+
+static int depack_do_frame_crypto(PACKET *pkt, QUIC_CHANNEL *ch,
+ OSSL_QRX_PKT *parent_pkt,
+ OSSL_ACKM_RX_PKT *ackm_data,
+ uint64_t *datalen)
+{
+ OSSL_QUIC_FRAME_CRYPTO f;
+ QUIC_RSTREAM *rstream;
+ QUIC_RXFC *rxfc;
+
+ *datalen = 0;
+
+ if (!ossl_quic_wire_decode_frame_crypto(pkt, 0, &f)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_CRYPTO,
+ "decode error");
+ return 0;
+ }
+
+ if (f.len == 0)
+ return 1; /* nothing to do */
+
+ rstream = ch->crypto_recv[ackm_data->pkt_space];
+ if (!ossl_assert(rstream != NULL))
+ /*
+ * This should not happen; we should only have a NULL stream here if
+ * the EL has been discarded, and if the EL has been discarded we
+ * shouldn't be here.
+ */
+ return 0;
+
+ rxfc = &ch->crypto_rxfc[ackm_data->pkt_space];
+
+ if (!ossl_quic_rxfc_on_rx_stream_frame(rxfc, f.offset + f.len,
+ /*is_fin=*/0)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ OSSL_QUIC_FRAME_TYPE_CRYPTO,
+ "internal error (crypto RXFC)");
+ return 0;
+ }
+
+ if (ossl_quic_rxfc_get_error(rxfc, 0) != OSSL_QUIC_ERR_NO_ERROR) {
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_CRYPTO_BUFFER_EXCEEDED,
+ OSSL_QUIC_FRAME_TYPE_CRYPTO,
+ "exceeded maximum crypto buffer");
+ return 0;
+ }
+
+ if (!ossl_quic_rstream_queue_data(rstream, parent_pkt,
+ f.offset, f.data, f.len, 0)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ OSSL_QUIC_FRAME_TYPE_CRYPTO,
+ "internal error (rstream queue)");
+ return 0;
+ }
+
+ ch->did_crypto_frame = 1;
+ *datalen = f.len;
+
+ return 1;
+}
+
+static int depack_do_frame_new_token(PACKET *pkt, QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ const uint8_t *token;
+ size_t token_len;
+
+ if (!ossl_quic_wire_decode_frame_new_token(pkt, &token, &token_len)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_TOKEN,
+ "decode error");
+ return 0;
+ }
+
+ if (token_len == 0) {
+ /*
+ * RFC 9000 s. 19.7: "A client MUST treat receipt of a NEW_TOKEN frame
+ * with an empty Token field as a connection error of type
+ * FRAME_ENCODING_ERROR."
+ */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_TOKEN,
+ "zero-length NEW_TOKEN");
+ return 0;
+ }
+
+ /* store the new token in our token cache */
+ if (!ossl_quic_set_peer_token(ossl_quic_port_get_channel_ctx(ch->port),
+ &ch->cur_peer_addr, token, token_len))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Returns 1 if no protocol violation has occurred. In this case *result will be
+ * non-NULL unless this is an old deleted stream and we should ignore the frame
+ * causing this function to be called. Returns 0 on protocol violation.
+ */
+static int depack_do_implicit_stream_create(QUIC_CHANNEL *ch,
+ uint64_t stream_id,
+ uint64_t frame_type,
+ QUIC_STREAM **result)
+{
+ QUIC_STREAM *stream;
+ uint64_t peer_role, stream_ordinal;
+ uint64_t *p_next_ordinal_local, *p_next_ordinal_remote;
+ QUIC_RXFC *max_streams_fc;
+ int is_uni, is_remote_init;
+
+ stream = ossl_quic_stream_map_get_by_id(&ch->qsm, stream_id);
+ if (stream != NULL) {
+ *result = stream;
+ return 1;
+ }
+
+ /*
+ * If we do not yet have a stream with the given ID, there are three
+ * possibilities:
+ *
+ * (a) The stream ID is for a remotely-created stream and the peer
+ * is creating a stream.
+ *
+ * (b) The stream ID is for a locally-created stream which has
+ * previously been deleted.
+ *
+ * (c) The stream ID is for a locally-created stream which does
+ * not exist yet. This is a protocol violation and we must
+ * terminate the connection in this case.
+ *
+ * We distinguish between (b) and (c) using the stream ID allocator
+ * variable. Since stream ordinals are allocated monotonically, we
+ * simply determine if the stream ordinal is in the future.
+ */
+ peer_role = ch->is_server
+ ? QUIC_STREAM_INITIATOR_CLIENT
+ : QUIC_STREAM_INITIATOR_SERVER;
+
+ is_remote_init = ((stream_id & QUIC_STREAM_INITIATOR_MASK) == peer_role);
+ is_uni = ((stream_id & QUIC_STREAM_DIR_MASK) == QUIC_STREAM_DIR_UNI);
+
+ stream_ordinal = stream_id >> 2;
+
+ if (is_remote_init) {
+ /*
+ * Peer-created stream which does not yet exist. Create it. QUIC stream
+ * ordinals within a given stream type MUST be used in sequence and
+ * receiving a STREAM frame for ordinal n must implicitly create streams
+ * with ordinals [0, n) within that stream type even if no explicit
+ * STREAM frames are received for those ordinals.
+ */
+ p_next_ordinal_remote = is_uni
+ ? &ch->next_remote_stream_ordinal_uni
+ : &ch->next_remote_stream_ordinal_bidi;
+
+ /* Check this isn't violating stream count flow control. */
+ max_streams_fc = is_uni
+ ? &ch->max_streams_uni_rxfc
+ : &ch->max_streams_bidi_rxfc;
+
+ if (!ossl_quic_rxfc_on_rx_stream_frame(max_streams_fc,
+ stream_ordinal + 1,
+ /*is_fin=*/0)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ frame_type,
+ "internal error (stream count RXFC)");
+ return 0;
+ }
+
+ if (ossl_quic_rxfc_get_error(max_streams_fc, 0) != OSSL_QUIC_ERR_NO_ERROR) {
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_STREAM_LIMIT_ERROR,
+ frame_type,
+ "exceeded maximum allowed streams");
+ return 0;
+ }
+
+ /*
+ * Create the named stream and any streams coming before it yet to be
+ * created.
+ */
+ while (*p_next_ordinal_remote <= stream_ordinal) {
+ uint64_t cur_stream_id = (*p_next_ordinal_remote << 2) |
+ (stream_id
+ & (QUIC_STREAM_DIR_MASK | QUIC_STREAM_INITIATOR_MASK));
+
+ stream = ossl_quic_channel_new_stream_remote(ch, cur_stream_id);
+ if (stream == NULL) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ frame_type,
+ "internal error (stream allocation)");
+ return 0;
+ }
+
+ ++*p_next_ordinal_remote;
+ }
+
+ *result = stream;
+ } else {
+ /* Locally-created stream which does not yet exist. */
+ p_next_ordinal_local = is_uni
+ ? &ch->next_local_stream_ordinal_uni
+ : &ch->next_local_stream_ordinal_bidi;
+
+ if (stream_ordinal >= *p_next_ordinal_local) {
+ /*
+ * We never created this stream yet, this is a protocol
+ * violation.
+ */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_STREAM_STATE_ERROR,
+ frame_type,
+ "STREAM frame for nonexistent "
+ "stream");
+ return 0;
+ }
+
+ /*
+ * Otherwise this is for an old locally-initiated stream which we
+ * have subsequently deleted. Ignore the data; it may simply be a
+ * retransmission. We already take care of notifying the peer of the
+ * termination of the stream during the stream deletion lifecycle.
+ */
+ *result = NULL;
+ }
+
+ return 1;
+}
+
+static int depack_do_frame_stream(PACKET *pkt, QUIC_CHANNEL *ch,
+ OSSL_QRX_PKT *parent_pkt,
+ OSSL_ACKM_RX_PKT *ackm_data,
+ uint64_t frame_type,
+ uint64_t *datalen)
+{
+ OSSL_QUIC_FRAME_STREAM frame_data;
+ QUIC_STREAM *stream;
+ uint64_t fce;
+ size_t rs_avail;
+ int rs_fin = 0;
+
+ *datalen = 0;
+
+ if (!ossl_quic_wire_decode_frame_stream(pkt, 0, &frame_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "decode error");
+ return 0;
+ }
+
+ if (!depack_do_implicit_stream_create(ch, frame_data.stream_id,
+ frame_type, &stream))
+ return 0; /* protocol error raised by above call */
+
+ if (stream == NULL)
+ /*
+ * Data for old stream which is not a protocol violation but should be
+ * ignored, so stop here.
+ */
+ return 1;
+
+ if (!ossl_quic_stream_has_recv(stream)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_STREAM_STATE_ERROR,
+ frame_type,
+ "STREAM frame for TX only "
+ "stream");
+ return 0;
+ }
+
+ /* Notify stream flow controller. */
+ if (!ossl_quic_rxfc_on_rx_stream_frame(&stream->rxfc,
+ frame_data.offset + frame_data.len,
+ frame_data.is_fin)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ frame_type,
+ "internal error (flow control)");
+ return 0;
+ }
+
+ /* Has a flow control error occurred? */
+ fce = ossl_quic_rxfc_get_error(&stream->rxfc, 0);
+ if (fce != OSSL_QUIC_ERR_NO_ERROR) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ fce,
+ frame_type,
+ "flow control violation");
+ return 0;
+ }
+
+ switch (stream->recv_state) {
+ case QUIC_RSTREAM_STATE_RECV:
+ case QUIC_RSTREAM_STATE_SIZE_KNOWN:
+ /*
+ * It only makes sense to process incoming STREAM frames in these
+ * states.
+ */
+ break;
+
+ case QUIC_RSTREAM_STATE_DATA_RECVD:
+ case QUIC_RSTREAM_STATE_DATA_READ:
+ case QUIC_RSTREAM_STATE_RESET_RECVD:
+ case QUIC_RSTREAM_STATE_RESET_READ:
+ default:
+ /*
+ * We have no use for STREAM frames once the receive part reaches any of
+ * these states, so just ignore.
+ */
+ return 1;
+ }
+
+ /* If we are in RECV, auto-transition to SIZE_KNOWN on FIN. */
+ if (frame_data.is_fin
+ && !ossl_quic_stream_recv_get_final_size(stream, NULL)) {
+
+ /* State was already checked above, so can't fail. */
+ ossl_quic_stream_map_notify_size_known_recv_part(&ch->qsm, stream,
+ frame_data.offset
+ + frame_data.len);
+ }
+
+ /*
+ * If we requested STOP_SENDING do not bother buffering the data. Note that
+ * this must happen after RXFC checks above as even if we sent STOP_SENDING
+ * we must still enforce correct flow control (RFC 9000 s. 3.5).
+ */
+ if (stream->stop_sending)
+ return 1; /* not an error - packet reordering, etc. */
+
+ /*
+ * The receive stream buffer may or may not choose to consume the data
+ * without copying by reffing the OSSL_QRX_PKT. In this case
+ * ossl_qrx_pkt_release() will be eventually called when the data is no
+ * longer needed.
+ *
+ * It is OK for the peer to send us a zero-length non-FIN STREAM frame,
+ * which is a no-op, aside from the fact that it ensures the stream exists.
+ * In this case we have nothing to report to the receive buffer.
+ */
+ if ((frame_data.len > 0 || frame_data.is_fin)
+ && !ossl_quic_rstream_queue_data(stream->rstream, parent_pkt,
+ frame_data.offset,
+ frame_data.data,
+ frame_data.len,
+ frame_data.is_fin)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ frame_type,
+ "internal error (rstream queue)");
+ return 0;
+ }
+
+ /*
+ * rs_fin will be 1 only if we can read all data up to and including the FIN
+ * without any gaps before it; this implies we have received all data. Avoid
+ * calling ossl_quic_rstream_available() where it is not necessary as it is
+ * more expensive.
+ */
+ if (stream->recv_state == QUIC_RSTREAM_STATE_SIZE_KNOWN
+ && !ossl_quic_rstream_available(stream->rstream, &rs_avail, &rs_fin)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ frame_type,
+ "internal error (rstream available)");
+ return 0;
+ }
+
+ if (rs_fin)
+ ossl_quic_stream_map_notify_totally_received(&ch->qsm, stream);
+
+ *datalen = frame_data.len;
+
+ return 1;
+}
+
+static void update_streams(QUIC_STREAM *s, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ ossl_quic_stream_map_update_state(&ch->qsm, s);
+}
+
+static void update_streams_bidi(QUIC_STREAM *s, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ if (!ossl_quic_stream_is_bidi(s))
+ return;
+
+ ossl_quic_stream_map_update_state(&ch->qsm, s);
+}
+
+static void update_streams_uni(QUIC_STREAM *s, void *arg)
+{
+ QUIC_CHANNEL *ch = arg;
+
+ if (ossl_quic_stream_is_bidi(s))
+ return;
+
+ ossl_quic_stream_map_update_state(&ch->qsm, s);
+}
+
+static int depack_do_frame_max_data(PACKET *pkt, QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint64_t max_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_max_data(pkt, &max_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_MAX_DATA,
+ "decode error");
+ return 0;
+ }
+
+ ossl_quic_txfc_bump_cwm(&ch->conn_txfc, max_data);
+ ossl_quic_stream_map_visit(&ch->qsm, update_streams, ch);
+ return 1;
+}
+
+static int depack_do_frame_max_stream_data(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint64_t stream_id = 0;
+ uint64_t max_stream_data = 0;
+ QUIC_STREAM *stream;
+
+ if (!ossl_quic_wire_decode_frame_max_stream_data(pkt, &stream_id,
+ &max_stream_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA,
+ "decode error");
+ return 0;
+ }
+
+ if (!depack_do_implicit_stream_create(ch, stream_id,
+ OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA,
+ &stream))
+ return 0; /* error already raised for us */
+
+ if (stream == NULL)
+ return 1; /* old deleted stream, not a protocol violation, ignore */
+
+ if (!ossl_quic_stream_has_send(stream)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_STREAM_STATE_ERROR,
+ OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA,
+ "MAX_STREAM_DATA for TX only "
+ "stream");
+ return 0;
+ }
+
+ ossl_quic_txfc_bump_cwm(&stream->txfc, max_stream_data);
+ ossl_quic_stream_map_update_state(&ch->qsm, stream);
+ return 1;
+}
+
+static int depack_do_frame_max_streams(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data,
+ uint64_t frame_type)
+{
+ uint64_t max_streams = 0;
+
+ if (!ossl_quic_wire_decode_frame_max_streams(pkt, &max_streams)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "decode error");
+ return 0;
+ }
+
+ if (max_streams > (((uint64_t)1) << 60)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "invalid max streams value");
+ return 0;
+ }
+
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI:
+ if (max_streams > ch->max_local_streams_bidi)
+ ch->max_local_streams_bidi = max_streams;
+
+ /* Some streams may now be able to send. */
+ ossl_quic_stream_map_visit(&ch->qsm, update_streams_bidi, ch);
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI:
+ if (max_streams > ch->max_local_streams_uni)
+ ch->max_local_streams_uni = max_streams;
+
+ /* Some streams may now be able to send. */
+ ossl_quic_stream_map_visit(&ch->qsm, update_streams_uni, ch);
+ break;
+ default:
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "decode error");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int depack_do_frame_data_blocked(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint64_t max_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_data_blocked(pkt, &max_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_DATA_BLOCKED,
+ "decode error");
+ return 0;
+ }
+
+ /* No-op - informative/debugging frame. */
+ return 1;
+}
+
+static int depack_do_frame_stream_data_blocked(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint64_t stream_id = 0;
+ uint64_t max_data = 0;
+ QUIC_STREAM *stream;
+
+ if (!ossl_quic_wire_decode_frame_stream_data_blocked(pkt, &stream_id,
+ &max_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED,
+ "decode error");
+ return 0;
+ }
+
+ /*
+ * This is an informative/debugging frame, so we don't have to do anything,
+ * but it does trigger stream creation.
+ */
+ if (!depack_do_implicit_stream_create(ch, stream_id,
+ OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED,
+ &stream))
+ return 0; /* error already raised for us */
+
+ if (stream == NULL)
+ return 1; /* old deleted stream, not a protocol violation, ignore */
+
+ if (!ossl_quic_stream_has_recv(stream)) {
+ /*
+ * RFC 9000 s. 19.14: "An endpoint that receives a STREAM_DATA_BLOCKED
+ * frame for a send-only stream MUST terminate the connection with error
+ * STREAM_STATE_ERROR."
+ */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_STREAM_STATE_ERROR,
+ OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED,
+ "STREAM_DATA_BLOCKED frame for "
+ "TX only stream");
+ return 0;
+ }
+
+ /* No-op - informative/debugging frame. */
+ return 1;
+}
+
+static int depack_do_frame_streams_blocked(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data,
+ uint64_t frame_type)
+{
+ uint64_t max_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_streams_blocked(pkt, &max_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "decode error");
+ return 0;
+ }
+
+ if (max_data > (((uint64_t)1) << 60)) {
+ /*
+ * RFC 9000 s. 19.14: "This value cannot exceed 2**60, as it is not
+ * possible to encode stream IDs larger than 2**62 - 1. Receipt of a
+ * frame that encodes a larger stream ID MUST be treated as a connection
+ * error of type STREAM_LIMIT_ERROR or FRAME_ENCODING_ERROR."
+ */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_STREAM_LIMIT_ERROR,
+ frame_type,
+ "invalid stream count limit");
+ return 0;
+ }
+
+ /* No-op - informative/debugging frame. */
+ return 1;
+}
+
+static int depack_do_frame_new_conn_id(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ OSSL_QUIC_FRAME_NEW_CONN_ID frame_data;
+
+ if (!ossl_quic_wire_decode_frame_new_conn_id(pkt, &frame_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID,
+ "decode error");
+ return 0;
+ }
+
+ ossl_quic_channel_on_new_conn_id(ch, &frame_data);
+
+ return 1;
+}
+
+static int depack_do_frame_retire_conn_id(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint64_t seq_num;
+
+ if (!ossl_quic_wire_decode_frame_retire_conn_id(pkt, &seq_num)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID,
+ "decode error");
+ return 0;
+ }
+
+ /*
+ * RFC 9000 s. 19.16: "An endpoint cannot send this frame if it was provided
+ * with a zero-length connection ID by its peer. An endpoint that provides a
+ * zero-length connection ID MUST treat receipt of a RETIRE_CONNECTION_ID
+ * frame as a connection error of type PROTOCOL_VIOLATION."
+ *
+ * Since we always use a zero-length SCID as a client, there is no case
+ * where it is valid for a server to send this. Our server support is
+ * currently non-conformant and for internal testing use; simply handle it
+ * as a no-op in this case.
+ *
+ * TODO(QUIC FUTURE): Revise and implement correctly for server support.
+ */
+ if (!ch->is_server) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID,
+ "conn has zero-length CID");
+ return 0;
+ }
+
+ return 1;
+}
+
+static void free_path_response(unsigned char *buf, size_t buf_len, void *arg)
+{
+ OPENSSL_free(buf);
+}
+
+static int depack_do_frame_path_challenge(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint64_t frame_data = 0;
+ unsigned char *encoded = NULL;
+ size_t encoded_len;
+ WPACKET wpkt;
+
+ if (!ossl_quic_wire_decode_frame_path_challenge(pkt, &frame_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_PATH_CHALLENGE,
+ "decode error");
+ return 0;
+ }
+
+ /*
+ * RFC 9000 s. 8.2.2: On receiving a PATH_CHALLENGE frame, an endpoint MUST
+ * respond by echoing the data contained in the PATH_CHALLENGE frame in a
+ * PATH_RESPONSE frame.
+ *
+ * TODO(QUIC FUTURE): We should try to avoid allocation here in the future.
+ */
+ encoded_len = sizeof(uint64_t) + 1;
+ if ((encoded = OPENSSL_malloc(encoded_len)) == NULL)
+ goto err;
+
+ if (!WPACKET_init_static_len(&wpkt, encoded, encoded_len, 0))
+ goto err;
+
+ if (!ossl_quic_wire_encode_frame_path_response(&wpkt, frame_data)) {
+ WPACKET_cleanup(&wpkt);
+ goto err;
+ }
+
+ WPACKET_finish(&wpkt);
+
+ if (!ossl_quic_cfq_add_frame(ch->cfq, 0, QUIC_PN_SPACE_APP,
+ OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE,
+ QUIC_CFQ_ITEM_FLAG_UNRELIABLE,
+ encoded, encoded_len,
+ free_path_response, NULL))
+ goto err;
+
+ return 1;
+
+err:
+ OPENSSL_free(encoded);
+ ossl_quic_channel_raise_protocol_error(ch, OSSL_QUIC_ERR_INTERNAL_ERROR,
+ OSSL_QUIC_FRAME_TYPE_PATH_CHALLENGE,
+ "internal error");
+ return 0;
+}
+
+static int depack_do_frame_path_response(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint64_t frame_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_path_response(pkt, &frame_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE,
+ "decode error");
+ return 0;
+ }
+
+ /* TODO(QUIC MULTIPATH): ADD CODE to send |frame_data| to the ch manager */
+
+ return 1;
+}
+
+static int depack_do_frame_conn_close(PACKET *pkt, QUIC_CHANNEL *ch,
+ uint64_t frame_type)
+{
+ OSSL_QUIC_FRAME_CONN_CLOSE frame_data;
+
+ if (!ossl_quic_wire_decode_frame_conn_close(pkt, &frame_data)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "decode error");
+ return 0;
+ }
+
+ ossl_quic_channel_on_remote_conn_close(ch, &frame_data);
+ return 1;
+}
+
+static int depack_do_frame_handshake_done(PACKET *pkt,
+ QUIC_CHANNEL *ch,
+ OSSL_ACKM_RX_PKT *ackm_data)
+{
+ if (!ossl_quic_wire_decode_frame_handshake_done(pkt)) {
+ /* This can fail only with an internal error. */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_INTERNAL_ERROR,
+ OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE,
+ "internal error (decode frame handshake done)");
+ return 0;
+ }
+
+ ossl_quic_channel_on_handshake_confirmed(ch);
+ return 1;
+}
+
+/* Main frame processor */
+
+static int depack_process_frames(QUIC_CHANNEL *ch, PACKET *pkt,
+ OSSL_QRX_PKT *parent_pkt, uint32_t enc_level,
+ OSSL_TIME received, OSSL_ACKM_RX_PKT *ackm_data)
+{
+ uint32_t pkt_type = parent_pkt->hdr->type;
+ uint32_t packet_space = ossl_quic_enc_level_to_pn_space(enc_level);
+
+ if (PACKET_remaining(pkt) == 0) {
+ /*
+ * RFC 9000 s. 12.4: An endpoint MUST treat receipt of a packet
+ * containing no frames as a connection error of type
+ * PROTOCOL_VIOLATION.
+ */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ 0,
+ "empty packet payload");
+ return 0;
+ }
+
+ while (PACKET_remaining(pkt) > 0) {
+ int was_minimal;
+ uint64_t frame_type;
+ const unsigned char *sof = NULL;
+ uint64_t datalen = 0;
+
+ if (ch->msg_callback != NULL)
+ sof = PACKET_data(pkt);
+
+ if (!ossl_quic_wire_peek_frame_header(pkt, &frame_type, &was_minimal)) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ 0,
+ "malformed frame header");
+ return 0;
+ }
+
+ if (!was_minimal) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "non-minimal frame type encoding");
+ return 0;
+ }
+
+ /*
+ * There are only a few frame types which are not ACK-eliciting. Handle
+ * these centrally to make error handling cases more resilient, as we
+ * should tell the ACKM about an ACK-eliciting frame even if it was not
+ * successfully handled.
+ */
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_PADDING:
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITHOUT_ECN:
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN:
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_TRANSPORT:
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_APP:
+ break;
+ default:
+ ackm_data->is_ack_eliciting = 1;
+ break;
+ }
+
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_PING:
+ /* Allowed in all packet types */
+ if (!depack_do_frame_ping(pkt, ch, enc_level, ackm_data))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PADDING:
+ /* Allowed in all packet types */
+ if (!depack_do_frame_padding(pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITHOUT_ECN:
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN:
+ /* ACK frames are valid everywhere except in 0RTT packets */
+ if (pkt_type == QUIC_PKT_TYPE_0RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "ACK not valid in 0-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_ack(pkt, ch, packet_space, received,
+ frame_type, parent_pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_RESET_STREAM:
+ /* RESET_STREAM frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "RESET_STREAM not valid in "
+ "INITIAL/HANDSHAKE");
+ return 0;
+ }
+ if (!depack_do_frame_reset_stream(pkt, ch, ackm_data))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_STOP_SENDING:
+ /* STOP_SENDING frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "STOP_SENDING not valid in "
+ "INITIAL/HANDSHAKE");
+ return 0;
+ }
+ if (!depack_do_frame_stop_sending(pkt, ch, ackm_data))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_CRYPTO:
+ /* CRYPTO frames are valid everywhere except in 0RTT packets */
+ if (pkt_type == QUIC_PKT_TYPE_0RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "CRYPTO frame not valid in 0-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_crypto(pkt, ch, parent_pkt, ackm_data, &datalen))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
+ /* NEW_TOKEN frames are valid in 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "NEW_TOKEN valid only in 1-RTT");
+ return 0;
+ }
+
+ /*
+ * RFC 9000 s. 19.7: "A server MUST treat receipt of a NEW_TOKEN
+ * frame as a connection error of type PROTOCOL_VIOLATION."
+ */
+ if (ch->is_server) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "NEW_TOKEN can only be sent by a server");
+ return 0;
+ }
+
+ if (!depack_do_frame_new_token(pkt, ch, ackm_data))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN_FIN:
+ /* STREAM frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "STREAM valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_stream(pkt, ch, parent_pkt, ackm_data,
+ frame_type, &datalen))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_MAX_DATA:
+ /* MAX_DATA frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "MAX_DATA valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_max_data(pkt, ch, ackm_data))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA:
+ /* MAX_STREAM_DATA frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "MAX_STREAM_DATA valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_max_stream_data(pkt, ch, ackm_data))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI:
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI:
+ /* MAX_STREAMS frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "MAX_STREAMS valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_max_streams(pkt, ch, ackm_data,
+ frame_type))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_DATA_BLOCKED:
+ /* DATA_BLOCKED frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "DATA_BLOCKED valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_data_blocked(pkt, ch, ackm_data))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED:
+ /* STREAM_DATA_BLOCKED frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "STREAM_DATA_BLOCKED valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_stream_data_blocked(pkt, ch, ackm_data))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_BIDI:
+ case OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_UNI:
+ /* STREAMS_BLOCKED frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "STREAMS valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_streams_blocked(pkt, ch, ackm_data,
+ frame_type))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
+ /* NEW_CONN_ID frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "NEW_CONN_ID valid only in 0/1-RTT");
+ }
+ if (!depack_do_frame_new_conn_id(pkt, ch, ackm_data))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
+ /* RETIRE_CONN_ID frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "RETIRE_CONN_ID valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_retire_conn_id(pkt, ch, ackm_data))
+ return 0;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PATH_CHALLENGE:
+ /* PATH_CHALLENGE frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "PATH_CHALLENGE valid only in 0/1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_path_challenge(pkt, ch, ackm_data))
+ return 0;
+
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE:
+ /* PATH_RESPONSE frames are valid in 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "PATH_CHALLENGE valid only in 1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_path_response(pkt, ch, ackm_data))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_APP:
+ /* CONN_CLOSE_APP frames are valid in 0RTT and 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_0RTT
+ && pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "CONN_CLOSE (APP) valid only in 0/1-RTT");
+ return 0;
+ }
+ /* FALLTHRU */
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_TRANSPORT:
+ /* CONN_CLOSE_TRANSPORT frames are valid in all packets */
+ if (!depack_do_frame_conn_close(pkt, ch, frame_type))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE:
+ /* HANDSHAKE_DONE frames are valid in 1RTT packets */
+ if (pkt_type != QUIC_PKT_TYPE_1RTT) {
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_PROTOCOL_VIOLATION,
+ frame_type,
+ "HANDSHAKE_DONE valid only in 1-RTT");
+ return 0;
+ }
+ if (!depack_do_frame_handshake_done(pkt, ch, ackm_data))
+ return 0;
+ break;
+
+ default:
+ /* Unknown frame type */
+ ossl_quic_channel_raise_protocol_error(ch,
+ OSSL_QUIC_ERR_FRAME_ENCODING_ERROR,
+ frame_type,
+ "Unknown frame type received");
+ return 0;
+ }
+
+ if (ch->msg_callback != NULL) {
+ int ctype = SSL3_RT_QUIC_FRAME_FULL;
+
+ size_t framelen = PACKET_data(pkt) - sof;
+
+ if (frame_type == OSSL_QUIC_FRAME_TYPE_PADDING) {
+ ctype = SSL3_RT_QUIC_FRAME_PADDING;
+ } else if (OSSL_QUIC_FRAME_TYPE_IS_STREAM(frame_type)
+ || frame_type == OSSL_QUIC_FRAME_TYPE_CRYPTO) {
+ ctype = SSL3_RT_QUIC_FRAME_HEADER;
+ framelen -= (size_t)datalen;
+ }
+
+ ch->msg_callback(0, OSSL_QUIC1_VERSION, ctype, sof, framelen,
+ ch->msg_callback_ssl, ch->msg_callback_arg);
+ }
+ }
+
+ return 1;
+}
+
+QUIC_NEEDS_LOCK
+int ossl_quic_handle_frames(QUIC_CHANNEL *ch, OSSL_QRX_PKT *qpacket)
+{
+ PACKET pkt;
+ OSSL_ACKM_RX_PKT ackm_data;
+ uint32_t enc_level;
+ size_t dgram_len = qpacket->datagram_len;
+
+ if (ch == NULL)
+ return 0;
+
+ ch->did_crypto_frame = 0;
+
+ /* Initialize |ackm_data| (and reinitialize |ok|)*/
+ memset(&ackm_data, 0, sizeof(ackm_data));
+ /*
+ * ASSUMPTION: All packets that aren't special case have a
+ * packet number.
+ */
+ ackm_data.pkt_num = qpacket->pn;
+ ackm_data.time = qpacket->time;
+ enc_level = ossl_quic_pkt_type_to_enc_level(qpacket->hdr->type);
+ if (enc_level >= QUIC_ENC_LEVEL_NUM)
+ /*
+ * Retry and Version Negotiation packets should not be passed to this
+ * function.
+ */
+ return 0;
+
+ ackm_data.pkt_space = ossl_quic_enc_level_to_pn_space(enc_level);
+
+ /*
+ * RFC 9000 s. 8.1
+ * We can consider the connection to be validated, if we receive a packet
+ * from the client protected via handshake keys, meaning that the
+ * amplification limit no longer applies (i.e. we can set it as validated.
+ * Otherwise, add the size of this packet to the unvalidated credit for
+ * the connection.
+ */
+ if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE)
+ ossl_quic_tx_packetiser_set_validated(ch->txp);
+ else
+ ossl_quic_tx_packetiser_add_unvalidated_credit(ch->txp, dgram_len);
+
+ /* Now that special cases are out of the way, parse frames */
+ if (!PACKET_buf_init(&pkt, qpacket->hdr->data, qpacket->hdr->len)
+ || !depack_process_frames(ch, &pkt, qpacket,
+ enc_level,
+ qpacket->time,
+ &ackm_data))
+ return 0;
+
+ ossl_ackm_on_rx_packet(ch->ackm, &ackm_data);
+
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_sf_list.c b/crypto/openssl/ssl/quic/quic_sf_list.c
new file mode 100644
index 000000000000..0541a2ab6371
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_sf_list.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/uint_set.h"
+#include "internal/common.h"
+#include "internal/quic_sf_list.h"
+
+struct stream_frame_st {
+ struct stream_frame_st *prev, *next;
+ UINT_RANGE range;
+ OSSL_QRX_PKT *pkt;
+ const unsigned char *data;
+};
+
+static void stream_frame_free(SFRAME_LIST *fl, STREAM_FRAME *sf)
+{
+ if (fl->cleanse && sf->data != NULL)
+ OPENSSL_cleanse((unsigned char *)sf->data,
+ (size_t)(sf->range.end - sf->range.start));
+ ossl_qrx_pkt_release(sf->pkt);
+ OPENSSL_free(sf);
+}
+
+static STREAM_FRAME *stream_frame_new(UINT_RANGE *range, OSSL_QRX_PKT *pkt,
+ const unsigned char *data)
+{
+ STREAM_FRAME *sf = OPENSSL_zalloc(sizeof(*sf));
+
+ if (sf == NULL)
+ return NULL;
+
+ if (pkt != NULL)
+ ossl_qrx_pkt_up_ref(pkt);
+
+ sf->range = *range;
+ sf->pkt = pkt;
+ sf->data = data;
+
+ return sf;
+}
+
+void ossl_sframe_list_init(SFRAME_LIST *fl)
+{
+ memset(fl, 0, sizeof(*fl));
+}
+
+void ossl_sframe_list_destroy(SFRAME_LIST *fl)
+{
+ STREAM_FRAME *sf, *next_frame;
+
+ for (sf = fl->head; sf != NULL; sf = next_frame) {
+ next_frame = sf->next;
+ stream_frame_free(fl, sf);
+ }
+}
+
+static int append_frame(SFRAME_LIST *fl, UINT_RANGE *range,
+ OSSL_QRX_PKT *pkt,
+ const unsigned char *data)
+{
+ STREAM_FRAME *new_frame;
+
+ if ((new_frame = stream_frame_new(range, pkt, data)) == NULL)
+ return 0;
+ new_frame->prev = fl->tail;
+ if (fl->tail != NULL)
+ fl->tail->next = new_frame;
+ fl->tail = new_frame;
+ ++fl->num_frames;
+ return 1;
+}
+
+int ossl_sframe_list_insert(SFRAME_LIST *fl, UINT_RANGE *range,
+ OSSL_QRX_PKT *pkt,
+ const unsigned char *data, int fin)
+{
+ STREAM_FRAME *sf, *new_frame, *prev_frame, *next_frame;
+#ifndef NDEBUG
+ uint64_t curr_end = fl->tail != NULL ? fl->tail->range.end
+ : fl->offset;
+
+ /* This check for FINAL_SIZE_ERROR is handled by QUIC FC already */
+ assert((!fin || curr_end <= range->end)
+ && (!fl->fin || curr_end >= range->end));
+#endif
+
+ if (fl->offset >= range->end)
+ goto end;
+
+ /* nothing there yet */
+ if (fl->tail == NULL) {
+ fl->tail = fl->head = stream_frame_new(range, pkt, data);
+ if (fl->tail == NULL)
+ return 0;
+
+ ++fl->num_frames;
+ goto end;
+ }
+
+ /* optimize insertion at the end */
+ if (fl->tail->range.start < range->start) {
+ if (fl->tail->range.end >= range->end)
+ goto end;
+
+ if (!append_frame(fl, range, pkt, data))
+ return 0;
+ goto end;
+ }
+
+ prev_frame = NULL;
+ for (sf = fl->head; sf != NULL && sf->range.start < range->start;
+ sf = sf->next)
+ prev_frame = sf;
+
+ if (!ossl_assert(sf != NULL))
+ /* frame list invariant broken */
+ return 0;
+
+ if (prev_frame != NULL && prev_frame->range.end >= range->end)
+ goto end;
+
+ /*
+ * Now we must create a new frame although in the end we might drop it,
+ * because we will be potentially dropping existing overlapping frames.
+ */
+ new_frame = stream_frame_new(range, pkt, data);
+ if (new_frame == NULL)
+ return 0;
+
+ for (next_frame = sf;
+ next_frame != NULL && next_frame->range.end <= range->end;) {
+ STREAM_FRAME *drop_frame = next_frame;
+
+ next_frame = next_frame->next;
+ if (next_frame != NULL)
+ next_frame->prev = drop_frame->prev;
+ if (prev_frame != NULL)
+ prev_frame->next = drop_frame->next;
+ if (fl->head == drop_frame)
+ fl->head = next_frame;
+ if (fl->tail == drop_frame)
+ fl->tail = prev_frame;
+ --fl->num_frames;
+ stream_frame_free(fl, drop_frame);
+ }
+
+ if (next_frame != NULL) {
+ /* check whether the new_frame is redundant because there is no gap */
+ if (prev_frame != NULL
+ && next_frame->range.start <= prev_frame->range.end) {
+ stream_frame_free(fl, new_frame);
+ goto end;
+ }
+ next_frame->prev = new_frame;
+ } else {
+ fl->tail = new_frame;
+ }
+
+ new_frame->next = next_frame;
+ new_frame->prev = prev_frame;
+
+ if (prev_frame != NULL)
+ prev_frame->next = new_frame;
+ else
+ fl->head = new_frame;
+
+ ++fl->num_frames;
+
+ end:
+ fl->fin = fin || fl->fin;
+
+ return 1;
+}
+
+int ossl_sframe_list_peek(const SFRAME_LIST *fl, void **iter,
+ UINT_RANGE *range, const unsigned char **data,
+ int *fin)
+{
+ STREAM_FRAME *sf = *iter;
+ uint64_t start;
+
+ if (sf == NULL) {
+ start = fl->offset;
+ sf = fl->head;
+ } else {
+ start = sf->range.end;
+ sf = sf->next;
+ }
+
+ range->start = start;
+
+ if (sf == NULL || sf->range.start > start
+ || !ossl_assert(start < sf->range.end)) {
+ range->end = start;
+ *data = NULL;
+ *iter = NULL;
+ /* set fin only if we are at the end */
+ *fin = sf == NULL ? fl->fin : 0;
+ return 0;
+ }
+
+ range->end = sf->range.end;
+ if (sf->data != NULL)
+ *data = sf->data + (start - sf->range.start);
+ else
+ *data = NULL;
+ *fin = sf->next == NULL ? fl->fin : 0;
+ *iter = sf;
+ return 1;
+}
+
+int ossl_sframe_list_drop_frames(SFRAME_LIST *fl, uint64_t limit)
+{
+ STREAM_FRAME *sf;
+
+ /* offset cannot move back or past the data received */
+ if (!ossl_assert(limit >= fl->offset)
+ || !ossl_assert(fl->tail == NULL
+ || limit <= fl->tail->range.end)
+ || !ossl_assert(fl->tail != NULL
+ || limit == fl->offset))
+ return 0;
+
+ fl->offset = limit;
+
+ for (sf = fl->head; sf != NULL && sf->range.end <= limit;) {
+ STREAM_FRAME *drop_frame = sf;
+
+ sf = sf->next;
+ --fl->num_frames;
+ stream_frame_free(fl, drop_frame);
+ }
+ fl->head = sf;
+
+ if (sf != NULL)
+ sf->prev = NULL;
+ else
+ fl->tail = NULL;
+
+ fl->head_locked = 0;
+
+ return 1;
+}
+
+int ossl_sframe_list_lock_head(SFRAME_LIST *fl, UINT_RANGE *range,
+ const unsigned char **data,
+ int *fin)
+{
+ int ret;
+ void *iter = NULL;
+
+ if (fl->head_locked)
+ return 0;
+
+ ret = ossl_sframe_list_peek(fl, &iter, range, data, fin);
+ if (ret)
+ fl->head_locked = 1;
+ return ret;
+}
+
+int ossl_sframe_list_is_head_locked(SFRAME_LIST *fl)
+{
+ return fl->head_locked;
+}
+
+int ossl_sframe_list_move_data(SFRAME_LIST *fl,
+ sframe_list_write_at_cb *write_at_cb,
+ void *cb_arg)
+{
+ STREAM_FRAME *sf = fl->head, *prev_frame = NULL;
+ uint64_t limit = fl->offset;
+
+ if (sf == NULL)
+ return 1;
+
+ if (fl->head_locked)
+ sf = sf->next;
+
+ for (; sf != NULL; sf = sf->next) {
+ size_t len;
+ const unsigned char *data = sf->data;
+
+ if (limit < sf->range.start)
+ limit = sf->range.start;
+
+ if (data != NULL) {
+ if (limit > sf->range.start)
+ data += (size_t)(limit - sf->range.start);
+ len = (size_t)(sf->range.end - limit);
+
+ if (!write_at_cb(limit, data, len, cb_arg))
+ /* data did not fit */
+ return 0;
+
+ if (fl->cleanse)
+ OPENSSL_cleanse((unsigned char *)sf->data,
+ (size_t)(sf->range.end - sf->range.start));
+
+ /* release the packet */
+ sf->data = NULL;
+ ossl_qrx_pkt_release(sf->pkt);
+ sf->pkt = NULL;
+ }
+
+ limit = sf->range.end;
+
+ /* merge contiguous frames */
+ if (prev_frame != NULL
+ && prev_frame->range.end >= sf->range.start) {
+ prev_frame->range.end = sf->range.end;
+ prev_frame->next = sf->next;
+
+ if (sf->next != NULL)
+ sf->next->prev = prev_frame;
+ else
+ fl->tail = prev_frame;
+
+ --fl->num_frames;
+ stream_frame_free(fl, sf);
+ sf = prev_frame;
+ continue;
+ }
+
+ prev_frame = sf;
+ }
+
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_srt_gen.c b/crypto/openssl/ssl/quic/quic_srt_gen.c
new file mode 100644
index 000000000000..233e4aa628cb
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_srt_gen.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+#include "internal/quic_srt_gen.h"
+#include <openssl/core_names.h>
+#include <openssl/evp.h>
+
+struct quic_srt_gen_st {
+ EVP_MAC *mac;
+ EVP_MAC_CTX *mac_ctx;
+};
+
+/*
+ * Simple HMAC-SHA256-based stateless reset token generator.
+ */
+
+QUIC_SRT_GEN *ossl_quic_srt_gen_new(OSSL_LIB_CTX *libctx, const char *propq,
+ const unsigned char *key, size_t key_len)
+{
+ QUIC_SRT_GEN *srt_gen;
+ OSSL_PARAM params[3], *p = params;
+
+ if ((srt_gen = OPENSSL_zalloc(sizeof(*srt_gen))) == NULL)
+ return NULL;
+
+ if ((srt_gen->mac = EVP_MAC_fetch(libctx, "HMAC", propq)) == NULL)
+ goto err;
+
+ if ((srt_gen->mac_ctx = EVP_MAC_CTX_new(srt_gen->mac)) == NULL)
+ goto err;
+
+ *p++ = OSSL_PARAM_construct_utf8_string(OSSL_MAC_PARAM_DIGEST, "SHA256", 7);
+ if (propq != NULL)
+ *p++ = OSSL_PARAM_construct_utf8_string(OSSL_MAC_PARAM_PROPERTIES,
+ (char *)propq, 0);
+ *p++ = OSSL_PARAM_construct_end();
+
+ if (!EVP_MAC_init(srt_gen->mac_ctx, key, key_len, params))
+ goto err;
+
+ return srt_gen;
+
+err:
+ ossl_quic_srt_gen_free(srt_gen);
+ return NULL;
+}
+
+void ossl_quic_srt_gen_free(QUIC_SRT_GEN *srt_gen)
+{
+ if (srt_gen == NULL)
+ return;
+
+ EVP_MAC_CTX_free(srt_gen->mac_ctx);
+ EVP_MAC_free(srt_gen->mac);
+ OPENSSL_free(srt_gen);
+}
+
+int ossl_quic_srt_gen_calculate_token(QUIC_SRT_GEN *srt_gen,
+ const QUIC_CONN_ID *dcid,
+ QUIC_STATELESS_RESET_TOKEN *token)
+{
+ size_t outl = 0;
+ unsigned char mac[SHA256_DIGEST_LENGTH];
+
+ if (!EVP_MAC_init(srt_gen->mac_ctx, NULL, 0, NULL))
+ return 0;
+
+ if (!EVP_MAC_update(srt_gen->mac_ctx, (const unsigned char *)dcid->id,
+ dcid->id_len))
+ return 0;
+
+ if (!EVP_MAC_final(srt_gen->mac_ctx, mac, &outl, sizeof(mac))
+ || outl != sizeof(mac))
+ return 0;
+
+ assert(sizeof(mac) >= sizeof(token->token));
+ memcpy(token->token, mac, sizeof(token->token));
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_srtm.c b/crypto/openssl/ssl/quic/quic_srtm.c
new file mode 100644
index 000000000000..3d0bfd97c7e0
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_srtm.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright 2023-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_srtm.h"
+#include "internal/common.h"
+#include <openssl/lhash.h>
+#include <openssl/core_names.h>
+#include <openssl/rand.h>
+
+/*
+ * QUIC Stateless Reset Token Manager
+ * ==================================
+ */
+typedef struct srtm_item_st SRTM_ITEM;
+
+#define BLINDED_SRT_LEN 16
+
+DEFINE_LHASH_OF_EX(SRTM_ITEM);
+
+/*
+ * The SRTM is implemented using two LHASH instances, one matching opaque pointers to
+ * an item structure, and another matching a SRT-derived value to an item
+ * structure. Multiple items with different seq_num values under a given opaque,
+ * and duplicate SRTs, are handled using sorted singly-linked lists.
+ *
+ * The O(n) insert and lookup performance is tolerated on the basis that the
+ * total number of entries for a given opaque (total number of extant CIDs for a
+ * connection) should be quite small, and the QUIC protocol allows us to place a
+ * hard limit on this via the active_connection_id_limit TPARAM. Thus there is
+ * no risk of a large number of SRTs needing to be registered under a given
+ * opaque.
+ *
+ * It is expected one SRTM will exist per QUIC_PORT and track all SRTs across
+ * all connections for that QUIC_PORT.
+ */
+struct srtm_item_st {
+ SRTM_ITEM *next_by_srt_blinded; /* SORT BY opaque DESC */
+ SRTM_ITEM *next_by_seq_num; /* SORT BY seq_num DESC */
+ void *opaque; /* \__ unique identity for item */
+ uint64_t seq_num; /* / */
+ QUIC_STATELESS_RESET_TOKEN srt;
+ unsigned char srt_blinded[BLINDED_SRT_LEN]; /* H(srt) */
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ uint32_t debug_token;
+#endif
+};
+
+struct quic_srtm_st {
+ /* Crypto context used to calculate blinded SRTs H(srt). */
+ EVP_CIPHER_CTX *blind_ctx; /* kept with key */
+
+ LHASH_OF(SRTM_ITEM) *items_fwd; /* (opaque) -> SRTM_ITEM */
+ LHASH_OF(SRTM_ITEM) *items_rev; /* (H(srt)) -> SRTM_ITEM */
+
+ /*
+ * Monotonically transitions to 1 in event of allocation failure. The only
+ * valid operation on such an object is to free it.
+ */
+ unsigned int alloc_failed : 1;
+};
+
+static unsigned long items_fwd_hash(const SRTM_ITEM *item)
+{
+ return (unsigned long)(uintptr_t)item->opaque;
+}
+
+static int items_fwd_cmp(const SRTM_ITEM *a, const SRTM_ITEM *b)
+{
+ return a->opaque != b->opaque;
+}
+
+static unsigned long items_rev_hash(const SRTM_ITEM *item)
+{
+ /*
+ * srt_blinded has already been through a crypto-grade hash function, so we
+ * can just use bits from that.
+ */
+ unsigned long l;
+
+ memcpy(&l, item->srt_blinded, sizeof(l));
+ return l;
+}
+
+static int items_rev_cmp(const SRTM_ITEM *a, const SRTM_ITEM *b)
+{
+ /*
+ * We don't need to use CRYPTO_memcmp here as the relationship of
+ * srt_blinded to srt is already cryptographically obfuscated.
+ */
+ return memcmp(a->srt_blinded, b->srt_blinded, sizeof(a->srt_blinded));
+}
+
+static int srtm_check_lh(QUIC_SRTM *srtm, LHASH_OF(SRTM_ITEM) *lh)
+{
+ if (lh_SRTM_ITEM_error(lh)) {
+ srtm->alloc_failed = 1;
+ return 0;
+ }
+
+ return 1;
+}
+
+QUIC_SRTM *ossl_quic_srtm_new(OSSL_LIB_CTX *libctx, const char *propq)
+{
+ QUIC_SRTM *srtm = NULL;
+ unsigned char key[16];
+ EVP_CIPHER *ecb = NULL;
+
+ if (RAND_priv_bytes_ex(libctx, key, sizeof(key), sizeof(key) * 8) != 1)
+ goto err;
+
+ if ((srtm = OPENSSL_zalloc(sizeof(*srtm))) == NULL)
+ return NULL;
+
+ /* Use AES-128-ECB as a permutation over 128-bit SRTs. */
+ if ((ecb = EVP_CIPHER_fetch(libctx, "AES-128-ECB", propq)) == NULL)
+ goto err;
+
+ if ((srtm->blind_ctx = EVP_CIPHER_CTX_new()) == NULL)
+ goto err;
+
+ if (!EVP_EncryptInit_ex2(srtm->blind_ctx, ecb, key, NULL, NULL))
+ goto err;
+
+ EVP_CIPHER_free(ecb);
+ ecb = NULL;
+
+ /* Create mappings. */
+ if ((srtm->items_fwd = lh_SRTM_ITEM_new(items_fwd_hash, items_fwd_cmp)) == NULL
+ || (srtm->items_rev = lh_SRTM_ITEM_new(items_rev_hash, items_rev_cmp)) == NULL)
+ goto err;
+
+ return srtm;
+
+err:
+ /*
+ * No cleansing of key needed as blinding exists only for side channel
+ * mitigation.
+ */
+ ossl_quic_srtm_free(srtm);
+ EVP_CIPHER_free(ecb);
+ return NULL;
+}
+
+static void srtm_free_each(SRTM_ITEM *ihead)
+{
+ SRTM_ITEM *inext, *item = ihead;
+
+ for (item = item->next_by_seq_num; item != NULL; item = inext) {
+ inext = item->next_by_seq_num;
+ OPENSSL_free(item);
+ }
+
+ OPENSSL_free(ihead);
+}
+
+void ossl_quic_srtm_free(QUIC_SRTM *srtm)
+{
+ if (srtm == NULL)
+ return;
+
+ lh_SRTM_ITEM_free(srtm->items_rev);
+ if (srtm->items_fwd != NULL) {
+ lh_SRTM_ITEM_doall(srtm->items_fwd, srtm_free_each);
+ lh_SRTM_ITEM_free(srtm->items_fwd);
+ }
+
+ EVP_CIPHER_CTX_free(srtm->blind_ctx);
+ OPENSSL_free(srtm);
+}
+
+/*
+ * Find a SRTM_ITEM by (opaque, seq_num). Returns NULL if no match.
+ * If head is non-NULL, writes the head of the relevant opaque list to *head if
+ * there is one.
+ * If prev is non-NULL, writes the previous node to *prev or NULL if it is
+ * the first item.
+ */
+static SRTM_ITEM *srtm_find(QUIC_SRTM *srtm, void *opaque, uint64_t seq_num,
+ SRTM_ITEM **head_p, SRTM_ITEM **prev_p)
+{
+ SRTM_ITEM key, *item = NULL, *prev = NULL;
+
+ key.opaque = opaque;
+
+ item = lh_SRTM_ITEM_retrieve(srtm->items_fwd, &key);
+ if (head_p != NULL)
+ *head_p = item;
+
+ for (; item != NULL; prev = item, item = item->next_by_seq_num)
+ if (item->seq_num == seq_num) {
+ break;
+ } else if (item->seq_num < seq_num) {
+ /*
+ * List is sorted in descending order so there can't be any match
+ * after this.
+ */
+ item = NULL;
+ break;
+ }
+
+ if (prev_p != NULL)
+ *prev_p = prev;
+
+ return item;
+}
+
+/*
+ * Inserts a SRTM_ITEM into the singly-linked by-sequence-number linked list.
+ * The new head pointer is written to *new_head (which may or may not be
+ * unchanged).
+ */
+static void sorted_insert_seq_num(SRTM_ITEM *head, SRTM_ITEM *item, SRTM_ITEM **new_head)
+{
+ uint64_t seq_num = item->seq_num;
+ SRTM_ITEM *cur = head, **fixup = new_head;
+
+ *new_head = head;
+
+ while (cur != NULL && cur->seq_num > seq_num) {
+ fixup = &cur->next_by_seq_num;
+ cur = cur->next_by_seq_num;
+ }
+
+ item->next_by_seq_num = *fixup;
+ *fixup = item;
+}
+
+/*
+ * Inserts a SRTM_ITEM into the singly-linked by-SRT list.
+ * The new head pointer is written to *new_head (which may or may not be
+ * unchanged).
+ */
+static void sorted_insert_srt(SRTM_ITEM *head, SRTM_ITEM *item, SRTM_ITEM **new_head)
+{
+ uintptr_t opaque = (uintptr_t)item->opaque;
+ SRTM_ITEM *cur = head, **fixup = new_head;
+
+ *new_head = head;
+
+ while (cur != NULL && (uintptr_t)cur->opaque > opaque) {
+ fixup = &cur->next_by_srt_blinded;
+ cur = cur->next_by_srt_blinded;
+ }
+
+ item->next_by_srt_blinded = *fixup;
+ *fixup = item;
+}
+
+/*
+ * Computes the blinded SRT value used for internal lookup for side channel
+ * mitigation purposes. We compute this once as a cached value when an SRTM_ITEM
+ * is formed.
+ */
+static int srtm_compute_blinded(QUIC_SRTM *srtm, SRTM_ITEM *item,
+ const QUIC_STATELESS_RESET_TOKEN *token)
+{
+ int outl = 0;
+
+ /*
+ * We use AES-128-ECB as a permutation using a random key to facilitate
+ * blinding for side-channel purposes. Encrypt the token as a single AES
+ * block.
+ */
+ if (!EVP_EncryptUpdate(srtm->blind_ctx, item->srt_blinded, &outl,
+ (const unsigned char *)token, sizeof(*token)))
+ return 0;
+
+ if (!ossl_assert(outl == sizeof(*token)))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_srtm_add(QUIC_SRTM *srtm, void *opaque, uint64_t seq_num,
+ const QUIC_STATELESS_RESET_TOKEN *token)
+{
+ SRTM_ITEM *item = NULL, *head = NULL, *new_head, *r_item;
+
+ if (srtm->alloc_failed)
+ return 0;
+
+ /* (opaque, seq_num) duplicates not allowed */
+ if ((item = srtm_find(srtm, opaque, seq_num, &head, NULL)) != NULL)
+ return 0;
+
+ if ((item = OPENSSL_zalloc(sizeof(*item))) == NULL)
+ return 0;
+
+ item->opaque = opaque;
+ item->seq_num = seq_num;
+ item->srt = *token;
+ if (!srtm_compute_blinded(srtm, item, &item->srt)) {
+ OPENSSL_free(item);
+ return 0;
+ }
+
+ /* Add to forward mapping. */
+ if (head == NULL) {
+ /* First item under this opaque */
+ lh_SRTM_ITEM_insert(srtm->items_fwd, item);
+ if (!srtm_check_lh(srtm, srtm->items_fwd)) {
+ OPENSSL_free(item);
+ return 0;
+ }
+ } else {
+ sorted_insert_seq_num(head, item, &new_head);
+ if (new_head != head) { /* head changed, update in lhash */
+ lh_SRTM_ITEM_insert(srtm->items_fwd, new_head);
+ if (!srtm_check_lh(srtm, srtm->items_fwd)) {
+ OPENSSL_free(item);
+ return 0;
+ }
+ }
+ }
+
+ /* Add to reverse mapping. */
+ r_item = lh_SRTM_ITEM_retrieve(srtm->items_rev, item);
+ if (r_item == NULL) {
+ /* First item under this blinded SRT */
+ lh_SRTM_ITEM_insert(srtm->items_rev, item);
+ if (!srtm_check_lh(srtm, srtm->items_rev))
+ /*
+ * Can't free the item now as we would have to undo the insertion
+ * into the forward mapping which would require an insert operation
+ * to restore the previous value. which might also fail. However,
+ * the item will be freed OK when we free the entire SRTM.
+ */
+ return 0;
+ } else {
+ sorted_insert_srt(r_item, item, &new_head);
+ if (new_head != r_item) { /* head changed, update in lhash */
+ lh_SRTM_ITEM_insert(srtm->items_rev, new_head);
+ if (!srtm_check_lh(srtm, srtm->items_rev))
+ /* As above. */
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* Remove item from reverse mapping. */
+static int srtm_remove_from_rev(QUIC_SRTM *srtm, SRTM_ITEM *item)
+{
+ SRTM_ITEM *rh_item;
+
+ rh_item = lh_SRTM_ITEM_retrieve(srtm->items_rev, item);
+ assert(rh_item != NULL);
+ if (rh_item == item) {
+ /*
+ * Change lhash to point to item after this one, or remove the entry if
+ * this is the last one.
+ */
+ if (item->next_by_srt_blinded != NULL) {
+ lh_SRTM_ITEM_insert(srtm->items_rev, item->next_by_srt_blinded);
+ if (!srtm_check_lh(srtm, srtm->items_rev))
+ return 0;
+ } else {
+ lh_SRTM_ITEM_delete(srtm->items_rev, item);
+ }
+ } else {
+ /* Find our entry in the SRT list */
+ for (; rh_item->next_by_srt_blinded != item;
+ rh_item = rh_item->next_by_srt_blinded);
+ rh_item->next_by_srt_blinded = item->next_by_srt_blinded;
+ }
+
+ return 1;
+}
+
+int ossl_quic_srtm_remove(QUIC_SRTM *srtm, void *opaque, uint64_t seq_num)
+{
+ SRTM_ITEM *item, *prev = NULL;
+
+ if (srtm->alloc_failed)
+ return 0;
+
+ if ((item = srtm_find(srtm, opaque, seq_num, NULL, &prev)) == NULL)
+ /* No match */
+ return 0;
+
+ /* Remove from forward mapping. */
+ if (prev == NULL) {
+ /*
+ * Change lhash to point to item after this one, or remove the entry if
+ * this is the last one.
+ */
+ if (item->next_by_seq_num != NULL) {
+ lh_SRTM_ITEM_insert(srtm->items_fwd, item->next_by_seq_num);
+ if (!srtm_check_lh(srtm, srtm->items_fwd))
+ return 0;
+ } else {
+ lh_SRTM_ITEM_delete(srtm->items_fwd, item);
+ }
+ } else {
+ prev->next_by_seq_num = item->next_by_seq_num;
+ }
+
+ /* Remove from reverse mapping. */
+ if (!srtm_remove_from_rev(srtm, item))
+ return 0;
+
+ OPENSSL_free(item);
+ return 1;
+}
+
+int ossl_quic_srtm_cull(QUIC_SRTM *srtm, void *opaque)
+{
+ SRTM_ITEM key, *item = NULL, *inext, *ihead;
+
+ key.opaque = opaque;
+
+ if (srtm->alloc_failed)
+ return 0;
+
+ if ((ihead = lh_SRTM_ITEM_retrieve(srtm->items_fwd, &key)) == NULL)
+ return 1; /* nothing removed is a success condition */
+
+ for (item = ihead; item != NULL; item = inext) {
+ inext = item->next_by_seq_num;
+ if (item != ihead) {
+ srtm_remove_from_rev(srtm, item);
+ OPENSSL_free(item);
+ }
+ }
+
+ lh_SRTM_ITEM_delete(srtm->items_fwd, ihead);
+ srtm_remove_from_rev(srtm, ihead);
+ OPENSSL_free(ihead);
+ return 1;
+}
+
+int ossl_quic_srtm_lookup(QUIC_SRTM *srtm,
+ const QUIC_STATELESS_RESET_TOKEN *token,
+ size_t idx,
+ void **opaque, uint64_t *seq_num)
+{
+ SRTM_ITEM key, *item;
+
+ if (srtm->alloc_failed)
+ return 0;
+
+ if (!srtm_compute_blinded(srtm, &key, token))
+ return 0;
+
+ item = lh_SRTM_ITEM_retrieve(srtm->items_rev, &key);
+ for (; idx > 0 && item != NULL; --idx, item = item->next_by_srt_blinded);
+ if (item == NULL)
+ return 0;
+
+ if (opaque != NULL)
+ *opaque = item->opaque;
+ if (seq_num != NULL)
+ *seq_num = item->seq_num;
+
+ return 1;
+}
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+
+static uint32_t token_next = 0x5eadbeef;
+static size_t tokens_seen;
+
+struct check_args {
+ uint32_t token;
+ int mode;
+};
+
+static void check_mark(SRTM_ITEM *item, void *arg)
+{
+ struct check_args *arg_ = arg;
+ uint32_t token = arg_->token;
+ uint64_t prev_seq_num = 0;
+ void *prev_opaque = NULL;
+ int have_prev = 0;
+
+ assert(item != NULL);
+
+ while (item != NULL) {
+ if (have_prev) {
+ assert(!(item->opaque == prev_opaque && item->seq_num == prev_seq_num));
+ if (!arg_->mode)
+ assert(item->opaque != prev_opaque || item->seq_num < prev_seq_num);
+ }
+
+ ++tokens_seen;
+ item->debug_token = token;
+ prev_opaque = item->opaque;
+ prev_seq_num = item->seq_num;
+ have_prev = 1;
+
+ if (arg_->mode)
+ item = item->next_by_srt_blinded;
+ else
+ item = item->next_by_seq_num;
+ }
+}
+
+static void check_count(SRTM_ITEM *item, void *arg)
+{
+ struct check_args *arg_ = arg;
+ uint32_t token = arg_->token;
+
+ assert(item != NULL);
+
+ while (item != NULL) {
+ ++tokens_seen;
+ assert(item->debug_token == token);
+
+ if (arg_->mode)
+ item = item->next_by_seq_num;
+ else
+ item = item->next_by_srt_blinded;
+ }
+}
+
+#endif
+
+void ossl_quic_srtm_check(const QUIC_SRTM *srtm)
+{
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ struct check_args args = {0};
+ size_t tokens_expected, tokens_expected_old;
+
+ args.token = token_next;
+ ++token_next;
+
+ assert(srtm != NULL);
+ assert(srtm->blind_ctx != NULL);
+ assert(srtm->items_fwd != NULL);
+ assert(srtm->items_rev != NULL);
+
+ tokens_seen = 0;
+ lh_SRTM_ITEM_doall_arg(srtm->items_fwd, check_mark, &args);
+
+ tokens_expected = tokens_seen;
+ tokens_seen = 0;
+ lh_SRTM_ITEM_doall_arg(srtm->items_rev, check_count, &args);
+
+ assert(tokens_seen == tokens_expected);
+ tokens_expected_old = tokens_expected;
+
+ args.token = token_next;
+ ++token_next;
+
+ args.mode = 1;
+ tokens_seen = 0;
+ lh_SRTM_ITEM_doall_arg(srtm->items_rev, check_mark, &args);
+
+ tokens_expected = tokens_seen;
+ tokens_seen = 0;
+ lh_SRTM_ITEM_doall_arg(srtm->items_fwd, check_count, &args);
+
+ assert(tokens_seen == tokens_expected);
+ assert(tokens_seen == tokens_expected_old);
+#endif
+}
diff --git a/crypto/openssl/ssl/quic/quic_sstream.c b/crypto/openssl/ssl/quic/quic_sstream.c
new file mode 100644
index 000000000000..1f0b5497fce5
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_sstream.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_stream.h"
+#include "internal/uint_set.h"
+#include "internal/common.h"
+#include "internal/ring_buf.h"
+
+/*
+ * ==================================================================
+ * QUIC Send Stream
+ */
+struct quic_sstream_st {
+ struct ring_buf ring_buf;
+
+ /*
+ * Any logical byte in the stream is in one of these states:
+ *
+ * - NEW: The byte has not yet been transmitted, or has been lost and is
+ * in need of retransmission.
+ *
+ * - IN_FLIGHT: The byte has been transmitted but is awaiting
+ * acknowledgement. We continue to store the data in case we return
+ * to the NEW state.
+ *
+ * - ACKED: The byte has been acknowledged and we can cease storing it.
+ * We do not necessarily cull it immediately, so there may be a delay
+ * between reaching the ACKED state and the buffer space actually being
+ * recycled.
+ *
+ * A logical byte in the stream is
+ *
+ * - in the NEW state if it is in new_set;
+ * - is in the ACKED state if it is in acked_set
+ * (and may or may not have been culled);
+ * - is in the IN_FLIGHT state otherwise.
+ *
+ * Invariant: No logical byte is ever in both new_set and acked_set.
+ */
+ UINT_SET new_set, acked_set;
+
+ /*
+ * The current size of the stream is ring_buf.head_offset. If
+ * have_final_size is true, this is also the final size of the stream.
+ */
+ unsigned int have_final_size : 1;
+ unsigned int sent_final_size : 1;
+ unsigned int acked_final_size : 1;
+ unsigned int cleanse : 1;
+};
+
+static void qss_cull(QUIC_SSTREAM *qss);
+
+QUIC_SSTREAM *ossl_quic_sstream_new(size_t init_buf_size)
+{
+ QUIC_SSTREAM *qss;
+
+ qss = OPENSSL_zalloc(sizeof(QUIC_SSTREAM));
+ if (qss == NULL)
+ return NULL;
+
+ ring_buf_init(&qss->ring_buf);
+ if (!ring_buf_resize(&qss->ring_buf, init_buf_size, 0)) {
+ ring_buf_destroy(&qss->ring_buf, 0);
+ OPENSSL_free(qss);
+ return NULL;
+ }
+
+ ossl_uint_set_init(&qss->new_set);
+ ossl_uint_set_init(&qss->acked_set);
+ return qss;
+}
+
+void ossl_quic_sstream_free(QUIC_SSTREAM *qss)
+{
+ if (qss == NULL)
+ return;
+
+ ossl_uint_set_destroy(&qss->new_set);
+ ossl_uint_set_destroy(&qss->acked_set);
+ ring_buf_destroy(&qss->ring_buf, qss->cleanse);
+ OPENSSL_free(qss);
+}
+
+int ossl_quic_sstream_get_stream_frame(QUIC_SSTREAM *qss,
+ size_t skip,
+ OSSL_QUIC_FRAME_STREAM *hdr,
+ OSSL_QTX_IOVEC *iov,
+ size_t *num_iov)
+{
+ size_t num_iov_ = 0, src_len = 0, total_len = 0, i;
+ uint64_t max_len;
+ const unsigned char *src = NULL;
+ UINT_SET_ITEM *range = ossl_list_uint_set_head(&qss->new_set);
+
+ if (*num_iov < 2)
+ return 0;
+
+ for (i = 0; i < skip && range != NULL; ++i)
+ range = ossl_list_uint_set_next(range);
+
+ if (range == NULL) {
+ if (i < skip)
+ /* Don't return FIN for infinitely increasing skip */
+ return 0;
+
+ /* No new bytes to send, but we might have a FIN */
+ if (!qss->have_final_size || qss->sent_final_size)
+ return 0;
+
+ hdr->offset = qss->ring_buf.head_offset;
+ hdr->len = 0;
+ hdr->is_fin = 1;
+ *num_iov = 0;
+ return 1;
+ }
+
+ /*
+ * We can only send a contiguous range of logical bytes in a single
+ * stream frame, so limit ourselves to the range of the first set entry.
+ *
+ * Set entries never have 'adjacent' entries so we don't have to worry
+ * about them here.
+ */
+ max_len = range->range.end - range->range.start + 1;
+
+ for (i = 0;; ++i) {
+ if (total_len >= max_len)
+ break;
+
+ if (!ring_buf_get_buf_at(&qss->ring_buf,
+ range->range.start + total_len,
+ &src, &src_len))
+ return 0;
+
+ if (src_len == 0)
+ break;
+
+ assert(i < 2);
+
+ if (total_len + src_len > max_len)
+ src_len = (size_t)(max_len - total_len);
+
+ iov[num_iov_].buf = src;
+ iov[num_iov_].buf_len = src_len;
+
+ total_len += src_len;
+ ++num_iov_;
+ }
+
+ hdr->offset = range->range.start;
+ hdr->len = total_len;
+ hdr->is_fin = qss->have_final_size
+ && hdr->offset + hdr->len == qss->ring_buf.head_offset;
+
+ *num_iov = num_iov_;
+ return 1;
+}
+
+int ossl_quic_sstream_has_pending(QUIC_SSTREAM *qss)
+{
+ OSSL_QUIC_FRAME_STREAM shdr;
+ OSSL_QTX_IOVEC iov[2];
+ size_t num_iov = OSSL_NELEM(iov);
+
+ return ossl_quic_sstream_get_stream_frame(qss, 0, &shdr, iov, &num_iov);
+}
+
+uint64_t ossl_quic_sstream_get_cur_size(QUIC_SSTREAM *qss)
+{
+ return qss->ring_buf.head_offset;
+}
+
+int ossl_quic_sstream_mark_transmitted(QUIC_SSTREAM *qss,
+ uint64_t start,
+ uint64_t end)
+{
+ UINT_RANGE r;
+
+ r.start = start;
+ r.end = end;
+
+ if (!ossl_uint_set_remove(&qss->new_set, &r))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_sstream_mark_transmitted_fin(QUIC_SSTREAM *qss,
+ uint64_t final_size)
+{
+ /*
+ * We do not really need final_size since we already know the size of the
+ * stream, but this serves as a sanity check.
+ */
+ if (!qss->have_final_size || final_size != qss->ring_buf.head_offset)
+ return 0;
+
+ qss->sent_final_size = 1;
+ return 1;
+}
+
+int ossl_quic_sstream_mark_lost(QUIC_SSTREAM *qss,
+ uint64_t start,
+ uint64_t end)
+{
+ UINT_RANGE r;
+ r.start = start;
+ r.end = end;
+
+ /*
+ * We lost a range of stream data bytes, so reinsert them into the new set,
+ * so that they are returned once more by ossl_quic_sstream_get_stream_frame.
+ */
+ if (!ossl_uint_set_insert(&qss->new_set, &r))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_sstream_mark_lost_fin(QUIC_SSTREAM *qss)
+{
+ if (qss->acked_final_size)
+ /* Does not make sense to lose a FIN after it has been ACKed */
+ return 0;
+
+ /* FIN was lost, so we need to transmit it again. */
+ qss->sent_final_size = 0;
+ return 1;
+}
+
+int ossl_quic_sstream_mark_acked(QUIC_SSTREAM *qss,
+ uint64_t start,
+ uint64_t end)
+{
+ UINT_RANGE r;
+ r.start = start;
+ r.end = end;
+
+ if (!ossl_uint_set_insert(&qss->acked_set, &r))
+ return 0;
+
+ qss_cull(qss);
+ return 1;
+}
+
+int ossl_quic_sstream_mark_acked_fin(QUIC_SSTREAM *qss)
+{
+ if (!qss->have_final_size)
+ /* Cannot ack final size before we have a final size */
+ return 0;
+
+ qss->acked_final_size = 1;
+ return 1;
+}
+
+void ossl_quic_sstream_fin(QUIC_SSTREAM *qss)
+{
+ if (qss->have_final_size)
+ return;
+
+ qss->have_final_size = 1;
+}
+
+int ossl_quic_sstream_get_final_size(QUIC_SSTREAM *qss, uint64_t *final_size)
+{
+ if (!qss->have_final_size)
+ return 0;
+
+ if (final_size != NULL)
+ *final_size = qss->ring_buf.head_offset;
+
+ return 1;
+}
+
+int ossl_quic_sstream_append(QUIC_SSTREAM *qss,
+ const unsigned char *buf,
+ size_t buf_len,
+ size_t *consumed)
+{
+ size_t l, consumed_ = 0;
+ UINT_RANGE r;
+ struct ring_buf old_ring_buf = qss->ring_buf;
+
+ if (qss->have_final_size) {
+ *consumed = 0;
+ return 0;
+ }
+
+ /*
+ * Note: It is assumed that ossl_quic_sstream_append will be called during a
+ * call to e.g. SSL_write and this function is therefore designed to support
+ * such semantics. In particular, the buffer pointed to by buf is only
+ * assumed to be valid for the duration of this call, therefore we must copy
+ * the data here. We will later copy-and-encrypt the data during packet
+ * encryption, so this is a two-copy design. Supporting a one-copy design in
+ * the future will require applications to use a different kind of API.
+ * Supporting such changes in future will require corresponding enhancements
+ * to this code.
+ */
+ while (buf_len > 0) {
+ l = ring_buf_push(&qss->ring_buf, buf, buf_len);
+ if (l == 0)
+ break;
+
+ buf += l;
+ buf_len -= l;
+ consumed_ += l;
+ }
+
+ if (consumed_ > 0) {
+ r.start = old_ring_buf.head_offset;
+ r.end = r.start + consumed_ - 1;
+ assert(r.end + 1 == qss->ring_buf.head_offset);
+ if (!ossl_uint_set_insert(&qss->new_set, &r)) {
+ qss->ring_buf = old_ring_buf;
+ *consumed = 0;
+ return 0;
+ }
+ }
+
+ *consumed = consumed_;
+ return 1;
+}
+
+static void qss_cull(QUIC_SSTREAM *qss)
+{
+ UINT_SET_ITEM *h = ossl_list_uint_set_head(&qss->acked_set);
+
+ /*
+ * Potentially cull data from our ring buffer. This can happen once data has
+ * been ACKed and we know we are never going to have to transmit it again.
+ *
+ * Since we use a ring buffer design for simplicity, we cannot cull byte n +
+ * k (for k > 0) from the ring buffer until byte n has also been culled.
+ * This means if parts of the stream get acknowledged out of order we might
+ * keep around some data we technically don't need to for a while. The
+ * impact of this is likely to be small and limited to quite a short
+ * duration, and doesn't justify the use of a more complex design.
+ */
+
+ /*
+ * We only need to check the first range entry in the integer set because we
+ * can only cull contiguous areas at the start of the ring buffer anyway.
+ */
+ if (h != NULL)
+ ring_buf_cpop_range(&qss->ring_buf, h->range.start, h->range.end,
+ qss->cleanse);
+}
+
+int ossl_quic_sstream_set_buffer_size(QUIC_SSTREAM *qss, size_t num_bytes)
+{
+ return ring_buf_resize(&qss->ring_buf, num_bytes, qss->cleanse);
+}
+
+size_t ossl_quic_sstream_get_buffer_size(QUIC_SSTREAM *qss)
+{
+ return qss->ring_buf.alloc;
+}
+
+size_t ossl_quic_sstream_get_buffer_used(QUIC_SSTREAM *qss)
+{
+ return ring_buf_used(&qss->ring_buf);
+}
+
+size_t ossl_quic_sstream_get_buffer_avail(QUIC_SSTREAM *qss)
+{
+ return ring_buf_avail(&qss->ring_buf);
+}
+
+int ossl_quic_sstream_is_totally_acked(QUIC_SSTREAM *qss)
+{
+ UINT_RANGE r;
+ uint64_t cur_size;
+
+ if (qss->have_final_size && !qss->acked_final_size)
+ return 0;
+
+ if (ossl_quic_sstream_get_cur_size(qss) == 0)
+ return 1;
+
+ if (ossl_list_uint_set_num(&qss->acked_set) != 1)
+ return 0;
+
+ r = ossl_list_uint_set_head(&qss->acked_set)->range;
+ cur_size = qss->ring_buf.head_offset;
+
+ /*
+ * The invariants of UINT_SET guarantee a single list element if we have a
+ * single contiguous range, which is what we should have if everything has
+ * been acked.
+ */
+ assert(r.end + 1 <= cur_size);
+ return r.start == 0 && r.end + 1 == cur_size;
+}
+
+void ossl_quic_sstream_adjust_iov(size_t len,
+ OSSL_QTX_IOVEC *iov,
+ size_t num_iov)
+{
+ size_t running = 0, i, iovlen;
+
+ for (i = 0, running = 0; i < num_iov; ++i) {
+ iovlen = iov[i].buf_len;
+
+ if (running >= len)
+ iov[i].buf_len = 0;
+ else if (running + iovlen > len)
+ iov[i].buf_len = len - running;
+
+ running += iovlen;
+ }
+}
+
+void ossl_quic_sstream_set_cleanse(QUIC_SSTREAM *qss, int cleanse)
+{
+ qss->cleanse = cleanse;
+}
diff --git a/crypto/openssl/ssl/quic/quic_statm.c b/crypto/openssl/ssl/quic/quic_statm.c
new file mode 100644
index 000000000000..f1e0f6591411
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_statm.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_statm.h"
+
+void ossl_statm_update_rtt(OSSL_STATM *statm,
+ OSSL_TIME ack_delay,
+ OSSL_TIME override_latest_rtt)
+{
+ OSSL_TIME adjusted_rtt, latest_rtt = override_latest_rtt;
+
+ /* Use provided RTT value, or else last RTT value. */
+ if (ossl_time_is_zero(latest_rtt))
+ latest_rtt = statm->latest_rtt;
+ else
+ statm->latest_rtt = latest_rtt;
+
+ if (!statm->have_first_sample) {
+ statm->min_rtt = latest_rtt;
+ statm->smoothed_rtt = latest_rtt;
+ statm->rtt_variance = ossl_time_divide(latest_rtt, 2);
+ statm->have_first_sample = 1;
+ return;
+ }
+
+ /* Update minimum RTT. */
+ if (ossl_time_compare(latest_rtt, statm->min_rtt) < 0)
+ statm->min_rtt = latest_rtt;
+
+ /*
+ * Enforcement of max_ack_delay is the responsibility of
+ * the caller as it is context-dependent.
+ */
+
+ adjusted_rtt = latest_rtt;
+ if (ossl_time_compare(latest_rtt, ossl_time_add(statm->min_rtt, ack_delay)) >= 0)
+ adjusted_rtt = ossl_time_subtract(latest_rtt, ack_delay);
+
+ statm->rtt_variance = ossl_time_divide(ossl_time_add(ossl_time_multiply(statm->rtt_variance, 3),
+ ossl_time_abs_difference(statm->smoothed_rtt,
+ adjusted_rtt)), 4);
+ statm->smoothed_rtt = ossl_time_divide(ossl_time_add(ossl_time_multiply(statm->smoothed_rtt, 7),
+ adjusted_rtt), 8);
+}
+
+/* RFC 9002 kInitialRtt value. RFC recommended value. */
+#define K_INITIAL_RTT ossl_ms2time(333)
+
+int ossl_statm_init(OSSL_STATM *statm)
+{
+ statm->smoothed_rtt = K_INITIAL_RTT;
+ statm->latest_rtt = ossl_time_zero();
+ statm->min_rtt = ossl_time_infinite();
+ statm->rtt_variance = ossl_time_divide(K_INITIAL_RTT, 2);
+ statm->have_first_sample = 0;
+ return 1;
+}
+
+void ossl_statm_destroy(OSSL_STATM *statm)
+{
+ /* No-op. */
+}
+
+void ossl_statm_get_rtt_info(OSSL_STATM *statm, OSSL_RTT_INFO *rtt_info)
+{
+ rtt_info->min_rtt = statm->min_rtt;
+ rtt_info->latest_rtt = statm->latest_rtt;
+ rtt_info->smoothed_rtt = statm->smoothed_rtt;
+ rtt_info->rtt_variance = statm->rtt_variance;
+}
diff --git a/crypto/openssl/ssl/quic/quic_stream_map.c b/crypto/openssl/ssl/quic/quic_stream_map.c
new file mode 100644
index 000000000000..64700b09d95e
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_stream_map.c
@@ -0,0 +1,861 @@
+/*
+* Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
+*
+* Licensed under the Apache License 2.0 (the "License"). You may not use
+* this file except in compliance with the License. You can obtain a copy
+* in the file LICENSE in the source distribution or at
+* https://www.openssl.org/source/license.html
+*/
+
+#include "internal/quic_stream_map.h"
+#include "internal/nelem.h"
+
+/*
+ * QUIC Stream Map
+ * ===============
+ */
+DEFINE_LHASH_OF_EX(QUIC_STREAM);
+
+static void shutdown_flush_done(QUIC_STREAM_MAP *qsm, QUIC_STREAM *qs);
+
+/* Circular list management. */
+static void list_insert_tail(QUIC_STREAM_LIST_NODE *l,
+ QUIC_STREAM_LIST_NODE *n)
+{
+ /* Must not be in list. */
+ assert(n->prev == NULL && n->next == NULL
+ && l->prev != NULL && l->next != NULL);
+
+ n->prev = l->prev;
+ n->prev->next = n;
+ l->prev = n;
+ n->next = l;
+}
+
+static void list_remove(QUIC_STREAM_LIST_NODE *l,
+ QUIC_STREAM_LIST_NODE *n)
+{
+ assert(n->prev != NULL && n->next != NULL
+ && n->prev != n && n->next != n);
+
+ n->prev->next = n->next;
+ n->next->prev = n->prev;
+ n->next = n->prev = NULL;
+}
+
+static QUIC_STREAM *list_next(QUIC_STREAM_LIST_NODE *l, QUIC_STREAM_LIST_NODE *n,
+ size_t off)
+{
+ assert(n->prev != NULL && n->next != NULL
+ && (n == l || (n->prev != n && n->next != n))
+ && l->prev != NULL && l->next != NULL);
+
+ n = n->next;
+
+ if (n == l)
+ n = n->next;
+ if (n == l)
+ return NULL;
+
+ assert(n != NULL);
+
+ return (QUIC_STREAM *)(((char *)n) - off);
+}
+
+#define active_next(l, s) list_next((l), &(s)->active_node, \
+ offsetof(QUIC_STREAM, active_node))
+#define accept_next(l, s) list_next((l), &(s)->accept_node, \
+ offsetof(QUIC_STREAM, accept_node))
+#define ready_for_gc_next(l, s) list_next((l), &(s)->ready_for_gc_node, \
+ offsetof(QUIC_STREAM, ready_for_gc_node))
+#define accept_head(l) list_next((l), (l), \
+ offsetof(QUIC_STREAM, accept_node))
+#define ready_for_gc_head(l) list_next((l), (l), \
+ offsetof(QUIC_STREAM, ready_for_gc_node))
+
+static unsigned long hash_stream(const QUIC_STREAM *s)
+{
+ return (unsigned long)s->id;
+}
+
+static int cmp_stream(const QUIC_STREAM *a, const QUIC_STREAM *b)
+{
+ if (a->id < b->id)
+ return -1;
+ if (a->id > b->id)
+ return 1;
+ return 0;
+}
+
+int ossl_quic_stream_map_init(QUIC_STREAM_MAP *qsm,
+ uint64_t (*get_stream_limit_cb)(int uni, void *arg),
+ void *get_stream_limit_cb_arg,
+ QUIC_RXFC *max_streams_bidi_rxfc,
+ QUIC_RXFC *max_streams_uni_rxfc,
+ int is_server)
+{
+ qsm->map = lh_QUIC_STREAM_new(hash_stream, cmp_stream);
+ qsm->active_list.prev = qsm->active_list.next = &qsm->active_list;
+ qsm->accept_list.prev = qsm->accept_list.next = &qsm->accept_list;
+ qsm->ready_for_gc_list.prev = qsm->ready_for_gc_list.next
+ = &qsm->ready_for_gc_list;
+ qsm->rr_stepping = 1;
+ qsm->rr_counter = 0;
+ qsm->rr_cur = NULL;
+
+ qsm->num_accept_bidi = 0;
+ qsm->num_accept_uni = 0;
+ qsm->num_shutdown_flush = 0;
+
+ qsm->get_stream_limit_cb = get_stream_limit_cb;
+ qsm->get_stream_limit_cb_arg = get_stream_limit_cb_arg;
+ qsm->max_streams_bidi_rxfc = max_streams_bidi_rxfc;
+ qsm->max_streams_uni_rxfc = max_streams_uni_rxfc;
+ qsm->is_server = is_server;
+ return 1;
+}
+
+static void release_each(QUIC_STREAM *stream, void *arg)
+{
+ QUIC_STREAM_MAP *qsm = arg;
+
+ ossl_quic_stream_map_release(qsm, stream);
+}
+
+void ossl_quic_stream_map_cleanup(QUIC_STREAM_MAP *qsm)
+{
+ ossl_quic_stream_map_visit(qsm, release_each, qsm);
+
+ lh_QUIC_STREAM_free(qsm->map);
+ qsm->map = NULL;
+}
+
+void ossl_quic_stream_map_visit(QUIC_STREAM_MAP *qsm,
+ void (*visit_cb)(QUIC_STREAM *stream, void *arg),
+ void *visit_cb_arg)
+{
+ lh_QUIC_STREAM_doall_arg(qsm->map, visit_cb, visit_cb_arg);
+}
+
+QUIC_STREAM *ossl_quic_stream_map_alloc(QUIC_STREAM_MAP *qsm,
+ uint64_t stream_id,
+ int type)
+{
+ QUIC_STREAM *s;
+ QUIC_STREAM key;
+
+ key.id = stream_id;
+
+ s = lh_QUIC_STREAM_retrieve(qsm->map, &key);
+ if (s != NULL)
+ return NULL;
+
+ s = OPENSSL_zalloc(sizeof(*s));
+ if (s == NULL)
+ return NULL;
+
+ s->id = stream_id;
+ s->type = type;
+ s->as_server = qsm->is_server;
+ s->send_state = (ossl_quic_stream_is_local_init(s)
+ || ossl_quic_stream_is_bidi(s))
+ ? QUIC_SSTREAM_STATE_READY
+ : QUIC_SSTREAM_STATE_NONE;
+ s->recv_state = (!ossl_quic_stream_is_local_init(s)
+ || ossl_quic_stream_is_bidi(s))
+ ? QUIC_RSTREAM_STATE_RECV
+ : QUIC_RSTREAM_STATE_NONE;
+
+ s->send_final_size = UINT64_MAX;
+
+ lh_QUIC_STREAM_insert(qsm->map, s);
+ return s;
+}
+
+void ossl_quic_stream_map_release(QUIC_STREAM_MAP *qsm, QUIC_STREAM *stream)
+{
+ if (stream == NULL)
+ return;
+
+ if (stream->active_node.next != NULL)
+ list_remove(&qsm->active_list, &stream->active_node);
+ if (stream->accept_node.next != NULL)
+ list_remove(&qsm->accept_list, &stream->accept_node);
+ if (stream->ready_for_gc_node.next != NULL)
+ list_remove(&qsm->ready_for_gc_list, &stream->ready_for_gc_node);
+
+ ossl_quic_sstream_free(stream->sstream);
+ stream->sstream = NULL;
+
+ ossl_quic_rstream_free(stream->rstream);
+ stream->rstream = NULL;
+
+ lh_QUIC_STREAM_delete(qsm->map, stream);
+ OPENSSL_free(stream);
+}
+
+QUIC_STREAM *ossl_quic_stream_map_get_by_id(QUIC_STREAM_MAP *qsm,
+ uint64_t stream_id)
+{
+ QUIC_STREAM key;
+
+ key.id = stream_id;
+
+ return lh_QUIC_STREAM_retrieve(qsm->map, &key);
+}
+
+static void stream_map_mark_active(QUIC_STREAM_MAP *qsm, QUIC_STREAM *s)
+{
+ if (s->active)
+ return;
+
+ list_insert_tail(&qsm->active_list, &s->active_node);
+
+ if (qsm->rr_cur == NULL)
+ qsm->rr_cur = s;
+
+ s->active = 1;
+}
+
+static void stream_map_mark_inactive(QUIC_STREAM_MAP *qsm, QUIC_STREAM *s)
+{
+ if (!s->active)
+ return;
+
+ if (qsm->rr_cur == s)
+ qsm->rr_cur = active_next(&qsm->active_list, s);
+ if (qsm->rr_cur == s)
+ qsm->rr_cur = NULL;
+
+ list_remove(&qsm->active_list, &s->active_node);
+
+ s->active = 0;
+}
+
+void ossl_quic_stream_map_set_rr_stepping(QUIC_STREAM_MAP *qsm, size_t stepping)
+{
+ qsm->rr_stepping = stepping;
+ qsm->rr_counter = 0;
+}
+
+static int stream_has_data_to_send(QUIC_STREAM *s)
+{
+ OSSL_QUIC_FRAME_STREAM shdr;
+ OSSL_QTX_IOVEC iov[2];
+ size_t num_iov;
+ uint64_t fc_credit, fc_swm, fc_limit;
+
+ switch (s->send_state) {
+ case QUIC_SSTREAM_STATE_READY:
+ case QUIC_SSTREAM_STATE_SEND:
+ case QUIC_SSTREAM_STATE_DATA_SENT:
+ /*
+ * We can still have data to send in DATA_SENT due to retransmissions,
+ * etc.
+ */
+ break;
+ default:
+ return 0; /* Nothing to send. */
+ }
+
+ /*
+ * We cannot determine if we have data to send simply by checking if
+ * ossl_quic_txfc_get_credit() is zero, because we may also have older
+ * stream data we need to retransmit. The SSTREAM returns older data first,
+ * so we do a simple comparison of the next chunk the SSTREAM wants to send
+ * against the TXFC CWM.
+ */
+ num_iov = OSSL_NELEM(iov);
+ if (!ossl_quic_sstream_get_stream_frame(s->sstream, 0, &shdr, iov,
+ &num_iov))
+ return 0;
+
+ fc_credit = ossl_quic_txfc_get_credit(&s->txfc, 0);
+ fc_swm = ossl_quic_txfc_get_swm(&s->txfc);
+ fc_limit = fc_swm + fc_credit;
+
+ return (shdr.is_fin && shdr.len == 0) || shdr.offset < fc_limit;
+}
+
+static ossl_unused int qsm_send_part_permits_gc(const QUIC_STREAM *qs)
+{
+ switch (qs->send_state) {
+ case QUIC_SSTREAM_STATE_NONE:
+ case QUIC_SSTREAM_STATE_DATA_RECVD:
+ case QUIC_SSTREAM_STATE_RESET_RECVD:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int qsm_ready_for_gc(QUIC_STREAM_MAP *qsm, QUIC_STREAM *qs)
+{
+ int recv_stream_fully_drained = 0; /* TODO(QUIC FUTURE): Optimisation */
+
+ /*
+ * If sstream has no FIN, we auto-reset it at marked-for-deletion time, so
+ * we don't need to worry about that here.
+ */
+ assert(!qs->deleted
+ || !ossl_quic_stream_has_send(qs)
+ || ossl_quic_stream_send_is_reset(qs)
+ || ossl_quic_stream_send_get_final_size(qs, NULL));
+
+ return
+ qs->deleted
+ && (!ossl_quic_stream_has_recv(qs)
+ || recv_stream_fully_drained
+ || qs->acked_stop_sending)
+ && (!ossl_quic_stream_has_send(qs)
+ || qs->send_state == QUIC_SSTREAM_STATE_DATA_RECVD
+ || qs->send_state == QUIC_SSTREAM_STATE_RESET_RECVD);
+}
+
+int ossl_quic_stream_map_is_local_allowed_by_stream_limit(QUIC_STREAM_MAP *qsm,
+ uint64_t stream_ordinal,
+ int is_uni)
+{
+ uint64_t stream_limit;
+
+ if (qsm->get_stream_limit_cb == NULL)
+ return 1;
+
+ stream_limit = qsm->get_stream_limit_cb(is_uni, qsm->get_stream_limit_cb_arg);
+ return stream_ordinal < stream_limit;
+}
+
+void ossl_quic_stream_map_update_state(QUIC_STREAM_MAP *qsm, QUIC_STREAM *s)
+{
+ int should_be_active, allowed_by_stream_limit = 1;
+
+ if (ossl_quic_stream_is_server_init(s) == qsm->is_server) {
+ int is_uni = !ossl_quic_stream_is_bidi(s);
+ uint64_t stream_ordinal = s->id >> 2;
+
+ allowed_by_stream_limit
+ = ossl_quic_stream_map_is_local_allowed_by_stream_limit(qsm,
+ stream_ordinal,
+ is_uni);
+ }
+
+ if (s->send_state == QUIC_SSTREAM_STATE_DATA_SENT
+ && ossl_quic_sstream_is_totally_acked(s->sstream))
+ ossl_quic_stream_map_notify_totally_acked(qsm, s);
+ else if (s->shutdown_flush
+ && s->send_state == QUIC_SSTREAM_STATE_SEND
+ && ossl_quic_sstream_is_totally_acked(s->sstream))
+ shutdown_flush_done(qsm, s);
+
+ if (!s->ready_for_gc) {
+ s->ready_for_gc = qsm_ready_for_gc(qsm, s);
+ if (s->ready_for_gc)
+ list_insert_tail(&qsm->ready_for_gc_list, &s->ready_for_gc_node);
+ }
+
+ should_be_active
+ = allowed_by_stream_limit
+ && !s->ready_for_gc
+ && ((ossl_quic_stream_has_recv(s)
+ && !ossl_quic_stream_recv_is_reset(s)
+ && (s->recv_state == QUIC_RSTREAM_STATE_RECV
+ && (s->want_max_stream_data
+ || ossl_quic_rxfc_has_cwm_changed(&s->rxfc, 0))))
+ || s->want_stop_sending
+ || s->want_reset_stream
+ || (!s->peer_stop_sending && stream_has_data_to_send(s)));
+
+ if (should_be_active)
+ stream_map_mark_active(qsm, s);
+ else
+ stream_map_mark_inactive(qsm, s);
+}
+
+/*
+ * Stream Send Part State Management
+ * =================================
+ */
+
+int ossl_quic_stream_map_ensure_send_part_id(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs)
+{
+ switch (qs->send_state) {
+ case QUIC_SSTREAM_STATE_NONE:
+ /* Stream without send part - caller error. */
+ return 0;
+
+ case QUIC_SSTREAM_STATE_READY:
+ /*
+ * We always allocate a stream ID upfront, so we don't need to do it
+ * here.
+ */
+ qs->send_state = QUIC_SSTREAM_STATE_SEND;
+ return 1;
+
+ default:
+ /* Nothing to do. */
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_notify_all_data_sent(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs)
+{
+ switch (qs->send_state) {
+ default:
+ /* Wrong state - caller error. */
+ case QUIC_SSTREAM_STATE_NONE:
+ /* Stream without send part - caller error. */
+ return 0;
+
+ case QUIC_SSTREAM_STATE_SEND:
+ if (!ossl_quic_sstream_get_final_size(qs->sstream, &qs->send_final_size))
+ return 0;
+
+ qs->send_state = QUIC_SSTREAM_STATE_DATA_SENT;
+ return 1;
+ }
+}
+
+static void shutdown_flush_done(QUIC_STREAM_MAP *qsm, QUIC_STREAM *qs)
+{
+ if (!qs->shutdown_flush)
+ return;
+
+ assert(qsm->num_shutdown_flush > 0);
+ qs->shutdown_flush = 0;
+ --qsm->num_shutdown_flush;
+}
+
+int ossl_quic_stream_map_notify_totally_acked(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs)
+{
+ switch (qs->send_state) {
+ default:
+ /* Wrong state - caller error. */
+ case QUIC_SSTREAM_STATE_NONE:
+ /* Stream without send part - caller error. */
+ return 0;
+
+ case QUIC_SSTREAM_STATE_DATA_SENT:
+ qs->send_state = QUIC_SSTREAM_STATE_DATA_RECVD;
+ /* We no longer need a QUIC_SSTREAM in this state. */
+ ossl_quic_sstream_free(qs->sstream);
+ qs->sstream = NULL;
+
+ shutdown_flush_done(qsm, qs);
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_reset_stream_send_part(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs,
+ uint64_t aec)
+{
+ switch (qs->send_state) {
+ default:
+ case QUIC_SSTREAM_STATE_NONE:
+ /*
+ * RESET_STREAM pertains to sending part only, so we cannot reset a
+ * receive-only stream.
+ */
+ case QUIC_SSTREAM_STATE_DATA_RECVD:
+ /*
+ * RFC 9000 s. 3.3: A sender MUST NOT [...] send RESET_STREAM from a
+ * terminal state. If the stream has already finished normally and the
+ * peer has acknowledged this, we cannot reset it.
+ */
+ return 0;
+
+ case QUIC_SSTREAM_STATE_READY:
+ if (!ossl_quic_stream_map_ensure_send_part_id(qsm, qs))
+ return 0;
+
+ /* FALLTHROUGH */
+ case QUIC_SSTREAM_STATE_SEND:
+ /*
+ * If we already have a final size (e.g. because we are coming from
+ * DATA_SENT), we have to be consistent with that, so don't change it.
+ * If we don't already have a final size, determine a final size value.
+ * This is the value which we will end up using for a RESET_STREAM frame
+ * for flow control purposes. We could send the stream size (total
+ * number of bytes appended to QUIC_SSTREAM by the application), but it
+ * is in our interest to exclude any bytes we have not actually
+ * transmitted yet, to avoid unnecessarily consuming flow control
+ * credit. We can get this from the TXFC.
+ */
+ qs->send_final_size = ossl_quic_txfc_get_swm(&qs->txfc);
+
+ /* FALLTHROUGH */
+ case QUIC_SSTREAM_STATE_DATA_SENT:
+ qs->reset_stream_aec = aec;
+ qs->want_reset_stream = 1;
+ qs->send_state = QUIC_SSTREAM_STATE_RESET_SENT;
+
+ ossl_quic_sstream_free(qs->sstream);
+ qs->sstream = NULL;
+
+ shutdown_flush_done(qsm, qs);
+ ossl_quic_stream_map_update_state(qsm, qs);
+ return 1;
+
+ case QUIC_SSTREAM_STATE_RESET_SENT:
+ case QUIC_SSTREAM_STATE_RESET_RECVD:
+ /*
+ * Idempotent - no-op. In any case, do not send RESET_STREAM again - as
+ * mentioned, we must not send it from a terminal state.
+ */
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_notify_reset_stream_acked(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs)
+{
+ switch (qs->send_state) {
+ default:
+ /* Wrong state - caller error. */
+ case QUIC_SSTREAM_STATE_NONE:
+ /* Stream without send part - caller error. */
+ return 0;
+
+ case QUIC_SSTREAM_STATE_RESET_SENT:
+ qs->send_state = QUIC_SSTREAM_STATE_RESET_RECVD;
+ return 1;
+
+ case QUIC_SSTREAM_STATE_RESET_RECVD:
+ /* Already in the correct state. */
+ return 1;
+ }
+}
+
+/*
+ * Stream Receive Part State Management
+ * ====================================
+ */
+
+int ossl_quic_stream_map_notify_size_known_recv_part(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs,
+ uint64_t final_size)
+{
+ switch (qs->recv_state) {
+ default:
+ /* Wrong state - caller error. */
+ case QUIC_RSTREAM_STATE_NONE:
+ /* Stream without receive part - caller error. */
+ return 0;
+
+ case QUIC_RSTREAM_STATE_RECV:
+ qs->recv_state = QUIC_RSTREAM_STATE_SIZE_KNOWN;
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_notify_totally_received(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs)
+{
+ switch (qs->recv_state) {
+ default:
+ /* Wrong state - caller error. */
+ case QUIC_RSTREAM_STATE_NONE:
+ /* Stream without receive part - caller error. */
+ return 0;
+
+ case QUIC_RSTREAM_STATE_SIZE_KNOWN:
+ qs->recv_state = QUIC_RSTREAM_STATE_DATA_RECVD;
+ qs->want_stop_sending = 0;
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_notify_totally_read(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs)
+{
+ switch (qs->recv_state) {
+ default:
+ /* Wrong state - caller error. */
+ case QUIC_RSTREAM_STATE_NONE:
+ /* Stream without receive part - caller error. */
+ return 0;
+
+ case QUIC_RSTREAM_STATE_DATA_RECVD:
+ qs->recv_state = QUIC_RSTREAM_STATE_DATA_READ;
+
+ /* QUIC_RSTREAM is no longer needed */
+ ossl_quic_rstream_free(qs->rstream);
+ qs->rstream = NULL;
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_notify_reset_recv_part(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs,
+ uint64_t app_error_code,
+ uint64_t final_size)
+{
+ uint64_t prev_final_size;
+
+ switch (qs->recv_state) {
+ default:
+ case QUIC_RSTREAM_STATE_NONE:
+ /* Stream without receive part - caller error. */
+ return 0;
+
+ case QUIC_RSTREAM_STATE_RECV:
+ case QUIC_RSTREAM_STATE_SIZE_KNOWN:
+ case QUIC_RSTREAM_STATE_DATA_RECVD:
+ if (ossl_quic_stream_recv_get_final_size(qs, &prev_final_size)
+ && prev_final_size != final_size)
+ /* Cannot change previous final size. */
+ return 0;
+
+ qs->recv_state = QUIC_RSTREAM_STATE_RESET_RECVD;
+ qs->peer_reset_stream_aec = app_error_code;
+
+ /* RFC 9000 s. 3.3: No point sending STOP_SENDING if already reset. */
+ qs->want_stop_sending = 0;
+
+ /* QUIC_RSTREAM is no longer needed */
+ ossl_quic_rstream_free(qs->rstream);
+ qs->rstream = NULL;
+
+ ossl_quic_stream_map_update_state(qsm, qs);
+ return 1;
+
+ case QUIC_RSTREAM_STATE_DATA_READ:
+ /*
+ * If we already retired the FIN to the application this is moot
+ * - just ignore.
+ */
+ case QUIC_RSTREAM_STATE_RESET_RECVD:
+ case QUIC_RSTREAM_STATE_RESET_READ:
+ /* Could be a reordered/retransmitted frame - just ignore. */
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_notify_app_read_reset_recv_part(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs)
+{
+ switch (qs->recv_state) {
+ default:
+ /* Wrong state - caller error. */
+ case QUIC_RSTREAM_STATE_NONE:
+ /* Stream without receive part - caller error. */
+ return 0;
+
+ case QUIC_RSTREAM_STATE_RESET_RECVD:
+ qs->recv_state = QUIC_RSTREAM_STATE_RESET_READ;
+ return 1;
+ }
+}
+
+int ossl_quic_stream_map_stop_sending_recv_part(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *qs,
+ uint64_t aec)
+{
+ if (qs->stop_sending)
+ return 0;
+
+ switch (qs->recv_state) {
+ default:
+ case QUIC_RSTREAM_STATE_NONE:
+ /* Send-only stream, so this makes no sense. */
+ case QUIC_RSTREAM_STATE_DATA_RECVD:
+ case QUIC_RSTREAM_STATE_DATA_READ:
+ /*
+ * Not really any point in STOP_SENDING if we already received all data.
+ */
+ case QUIC_RSTREAM_STATE_RESET_RECVD:
+ case QUIC_RSTREAM_STATE_RESET_READ:
+ /*
+ * RFC 9000 s. 3.5: "STOP_SENDING SHOULD only be sent for a stream that
+ * has not been reset by the peer."
+ *
+ * No point in STOP_SENDING if the peer already reset their send part.
+ */
+ return 0;
+
+ case QUIC_RSTREAM_STATE_RECV:
+ case QUIC_RSTREAM_STATE_SIZE_KNOWN:
+ /*
+ * RFC 9000 s. 3.5: "If the stream is in the Recv or Size Known state,
+ * the transport SHOULD signal this by sending a STOP_SENDING frame to
+ * prompt closure of the stream in the opposite direction."
+ *
+ * Note that it does make sense to send STOP_SENDING for a receive part
+ * of a stream which has a known size (because we have received a FIN)
+ * but which still has other (previous) stream data yet to be received.
+ */
+ break;
+ }
+
+ qs->stop_sending = 1;
+ qs->stop_sending_aec = aec;
+ return ossl_quic_stream_map_schedule_stop_sending(qsm, qs);
+}
+
+/* Called to mark STOP_SENDING for generation, or regeneration after loss. */
+int ossl_quic_stream_map_schedule_stop_sending(QUIC_STREAM_MAP *qsm, QUIC_STREAM *qs)
+{
+ if (!qs->stop_sending)
+ return 0;
+
+ /*
+ * Ignore the call as a no-op if already scheduled, or in a state
+ * where it makes no sense to send STOP_SENDING.
+ */
+ if (qs->want_stop_sending)
+ return 1;
+
+ switch (qs->recv_state) {
+ default:
+ return 1; /* ignore */
+ case QUIC_RSTREAM_STATE_RECV:
+ case QUIC_RSTREAM_STATE_SIZE_KNOWN:
+ /*
+ * RFC 9000 s. 3.5: "An endpoint is expected to send another
+ * STOP_SENDING frame if a packet containing a previous STOP_SENDING is
+ * lost. However, once either all stream data or a RESET_STREAM frame
+ * has been received for the stream -- that is, the stream is in any
+ * state other than "Recv" or "Size Known" -- sending a STOP_SENDING
+ * frame is unnecessary."
+ */
+ break;
+ }
+
+ qs->want_stop_sending = 1;
+ ossl_quic_stream_map_update_state(qsm, qs);
+ return 1;
+}
+
+QUIC_STREAM *ossl_quic_stream_map_peek_accept_queue(QUIC_STREAM_MAP *qsm)
+{
+ return accept_head(&qsm->accept_list);
+}
+
+void ossl_quic_stream_map_push_accept_queue(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *s)
+{
+ list_insert_tail(&qsm->accept_list, &s->accept_node);
+ if (ossl_quic_stream_is_bidi(s))
+ ++qsm->num_accept_bidi;
+ else
+ ++qsm->num_accept_uni;
+}
+
+static QUIC_RXFC *qsm_get_max_streams_rxfc(QUIC_STREAM_MAP *qsm, QUIC_STREAM *s)
+{
+ return ossl_quic_stream_is_bidi(s)
+ ? qsm->max_streams_bidi_rxfc
+ : qsm->max_streams_uni_rxfc;
+}
+
+void ossl_quic_stream_map_remove_from_accept_queue(QUIC_STREAM_MAP *qsm,
+ QUIC_STREAM *s,
+ OSSL_TIME rtt)
+{
+ QUIC_RXFC *max_streams_rxfc;
+
+ list_remove(&qsm->accept_list, &s->accept_node);
+ if (ossl_quic_stream_is_bidi(s))
+ --qsm->num_accept_bidi;
+ else
+ --qsm->num_accept_uni;
+
+ if ((max_streams_rxfc = qsm_get_max_streams_rxfc(qsm, s)) != NULL)
+ (void)ossl_quic_rxfc_on_retire(max_streams_rxfc, 1, rtt);
+}
+
+size_t ossl_quic_stream_map_get_accept_queue_len(QUIC_STREAM_MAP *qsm, int is_uni)
+{
+ return is_uni ? qsm->num_accept_uni : qsm->num_accept_bidi;
+}
+
+size_t ossl_quic_stream_map_get_total_accept_queue_len(QUIC_STREAM_MAP *qsm)
+{
+ return ossl_quic_stream_map_get_accept_queue_len(qsm, /*is_uni=*/0)
+ + ossl_quic_stream_map_get_accept_queue_len(qsm, /*is_uni=*/1);
+}
+
+void ossl_quic_stream_map_gc(QUIC_STREAM_MAP *qsm)
+{
+ QUIC_STREAM *qs, *qs_head, *qsn = NULL;
+
+ for (qs = qs_head = ready_for_gc_head(&qsm->ready_for_gc_list);
+ qs != NULL && qs != qs_head;
+ qs = qsn)
+ {
+ qsn = ready_for_gc_next(&qsm->ready_for_gc_list, qs);
+
+ ossl_quic_stream_map_release(qsm, qs);
+ }
+}
+
+static int eligible_for_shutdown_flush(QUIC_STREAM *qs)
+{
+ /*
+ * We only care about servicing the send part of a stream (if any) during
+ * shutdown flush. We make sure we flush a stream if it is either
+ * non-terminated or was terminated normally such as via
+ * SSL_stream_conclude. A stream which was terminated via a reset is not
+ * flushed, and we will have thrown away the send buffer in that case
+ * anyway.
+ */
+ switch (qs->send_state) {
+ case QUIC_SSTREAM_STATE_SEND:
+ case QUIC_SSTREAM_STATE_DATA_SENT:
+ return !ossl_quic_sstream_is_totally_acked(qs->sstream);
+ default:
+ return 0;
+ }
+}
+
+static void begin_shutdown_flush_each(QUIC_STREAM *qs, void *arg)
+{
+ QUIC_STREAM_MAP *qsm = arg;
+
+ if (!eligible_for_shutdown_flush(qs) || qs->shutdown_flush)
+ return;
+
+ qs->shutdown_flush = 1;
+ ++qsm->num_shutdown_flush;
+}
+
+void ossl_quic_stream_map_begin_shutdown_flush(QUIC_STREAM_MAP *qsm)
+{
+ qsm->num_shutdown_flush = 0;
+
+ ossl_quic_stream_map_visit(qsm, begin_shutdown_flush_each, qsm);
+}
+
+int ossl_quic_stream_map_is_shutdown_flush_finished(QUIC_STREAM_MAP *qsm)
+{
+ return qsm->num_shutdown_flush == 0;
+}
+
+/*
+ * QUIC Stream Iterator
+ * ====================
+ */
+void ossl_quic_stream_iter_init(QUIC_STREAM_ITER *it, QUIC_STREAM_MAP *qsm,
+ int advance_rr)
+{
+ it->qsm = qsm;
+ it->stream = it->first_stream = qsm->rr_cur;
+ if (advance_rr && it->stream != NULL
+ && ++qsm->rr_counter >= qsm->rr_stepping) {
+ qsm->rr_counter = 0;
+ qsm->rr_cur = active_next(&qsm->active_list, qsm->rr_cur);
+ }
+}
+
+void ossl_quic_stream_iter_next(QUIC_STREAM_ITER *it)
+{
+ if (it->stream == NULL)
+ return;
+
+ it->stream = active_next(&it->qsm->active_list, it->stream);
+ if (it->stream == it->first_stream)
+ it->stream = NULL;
+}
diff --git a/crypto/openssl/ssl/quic/quic_thread_assist.c b/crypto/openssl/ssl/quic/quic_thread_assist.c
new file mode 100644
index 000000000000..5315bf1662c6
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_thread_assist.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/macros.h>
+#include "quic_local.h"
+#include "internal/time.h"
+#include "internal/thread.h"
+#include "internal/thread_arch.h"
+#include "internal/quic_thread_assist.h"
+
+#if !defined(OPENSSL_NO_QUIC_THREAD_ASSIST)
+
+/* Main loop for the QUIC assist thread. */
+static unsigned int assist_thread_main(void *arg)
+{
+ QUIC_THREAD_ASSIST *qta = arg;
+ CRYPTO_MUTEX *m = ossl_quic_channel_get_mutex(qta->ch);
+ QUIC_REACTOR *rtor;
+ QUIC_ENGINE *eng = ossl_quic_channel_get0_engine(qta->ch);
+
+ ossl_crypto_mutex_lock(m);
+
+ rtor = ossl_quic_channel_get_reactor(qta->ch);
+
+ for (;;) {
+ OSSL_TIME deadline;
+
+ if (qta->teardown)
+ break;
+
+ deadline = ossl_quic_reactor_get_tick_deadline(rtor);
+ /*
+ * ossl_crypto_condvar_wait_timeout needs to use real time for the
+ * deadline
+ */
+ deadline = ossl_quic_engine_make_real_time(eng, deadline);
+ ossl_crypto_condvar_wait_timeout(qta->cv, m, deadline);
+
+ /*
+ * We have now been woken up. This can be for one of the following
+ * reasons:
+ *
+ * - We have been asked to teardown (qta->teardown is set);
+ * - The tick deadline has passed.
+ * - The tick deadline has changed.
+ *
+ * For robustness, this loop also handles spurious wakeups correctly
+ * (which does not require any extra code).
+ */
+ if (qta->teardown)
+ break;
+
+ ossl_quic_reactor_tick(rtor, QUIC_REACTOR_TICK_FLAG_CHANNEL_ONLY);
+ }
+
+ ossl_crypto_mutex_unlock(m);
+ return 1;
+}
+
+int ossl_quic_thread_assist_init_start(QUIC_THREAD_ASSIST *qta,
+ QUIC_CHANNEL *ch)
+{
+ CRYPTO_MUTEX *mutex = ossl_quic_channel_get_mutex(ch);
+
+ if (mutex == NULL)
+ return 0;
+
+ qta->ch = ch;
+ qta->teardown = 0;
+ qta->joined = 0;
+
+ qta->cv = ossl_crypto_condvar_new();
+ if (qta->cv == NULL)
+ return 0;
+
+ qta->t = ossl_crypto_thread_native_start(assist_thread_main,
+ qta, /*joinable=*/1);
+ if (qta->t == NULL) {
+ ossl_crypto_condvar_free(&qta->cv);
+ return 0;
+ }
+
+ return 1;
+}
+
+int ossl_quic_thread_assist_stop_async(QUIC_THREAD_ASSIST *qta)
+{
+ if (!qta->teardown) {
+ qta->teardown = 1;
+ ossl_crypto_condvar_signal(qta->cv);
+ }
+
+ return 1;
+}
+
+int ossl_quic_thread_assist_wait_stopped(QUIC_THREAD_ASSIST *qta)
+{
+ CRYPTO_THREAD_RETVAL rv;
+ CRYPTO_MUTEX *m = ossl_quic_channel_get_mutex(qta->ch);
+
+ if (qta->joined)
+ return 1;
+
+ if (!ossl_quic_thread_assist_stop_async(qta))
+ return 0;
+
+ ossl_crypto_mutex_unlock(m);
+
+ if (!ossl_crypto_thread_native_join(qta->t, &rv)) {
+ ossl_crypto_mutex_lock(m);
+ return 0;
+ }
+
+ qta->joined = 1;
+
+ ossl_crypto_mutex_lock(m);
+ return 1;
+}
+
+int ossl_quic_thread_assist_cleanup(QUIC_THREAD_ASSIST *qta)
+{
+ if (!ossl_assert(qta->joined))
+ return 0;
+
+ ossl_crypto_condvar_free(&qta->cv);
+ ossl_crypto_thread_native_clean(qta->t);
+
+ qta->ch = NULL;
+ qta->t = NULL;
+ return 1;
+}
+
+int ossl_quic_thread_assist_notify_deadline_changed(QUIC_THREAD_ASSIST *qta)
+{
+ if (qta->teardown)
+ return 0;
+
+ ossl_crypto_condvar_signal(qta->cv);
+ return 1;
+}
+
+#endif
diff --git a/crypto/openssl/ssl/quic/quic_tls.c b/crypto/openssl/ssl/quic/quic_tls.c
new file mode 100644
index 000000000000..ccc053203d4f
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_tls.c
@@ -0,0 +1,949 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+#include <openssl/ssl.h>
+#include "internal/recordmethod.h"
+#include "internal/quic_tls.h"
+#include "../ssl_local.h"
+#include "internal/quic_record_util.h"
+#include "internal/quic_error.h"
+#include "internal/quic_types.h"
+#include "internal/ssl_unwrap.h"
+
+#define QUIC_TLS_FATAL(rl, ad, err) \
+ do { \
+ if ((rl) != NULL) (rl)->alert = (ad); \
+ ERR_raise(ERR_LIB_SSL, (err)); \
+ if ((rl) != NULL) (rl)->qtls->inerror = 1; \
+ } while(0)
+
+struct quic_tls_st {
+ QUIC_TLS_ARGS args;
+
+ /*
+ * Transport parameters which client should send. Buffer lifetime must
+ * exceed the lifetime of the QUIC_TLS object.
+ */
+ const unsigned char *local_transport_params;
+ size_t local_transport_params_len;
+
+ ERR_STATE *error_state;
+
+ /*
+ * QUIC error code (usually in the TLS Alert-mapped CRYPTO_ERR range). Valid
+ * only if inerror is 1.
+ */
+ uint64_t error_code;
+
+ /*
+ * Error message with static storage duration. Valid only if inerror is 1.
+ * Should be suitable for encapsulation in a CONNECTION_CLOSE frame.
+ */
+ const char *error_msg;
+
+ /* Whether our SSL object for TLS has been configured for use in QUIC */
+ unsigned int configured : 1;
+
+ /* Set if we have hit any error state */
+ unsigned int inerror : 1;
+
+ /* Set if the handshake has completed */
+ unsigned int complete : 1;
+
+ /* Set if we have consumed the local transport parameters yet. */
+ unsigned int local_transport_params_consumed : 1;
+};
+
+struct ossl_record_layer_st {
+ QUIC_TLS *qtls;
+
+ /* Protection level */
+ int level;
+
+ /* Only used for retry flags */
+ BIO *dummybio;
+
+ /* Number of bytes written so far if we are part way through a write */
+ size_t written;
+
+ /* If we are part way through a write, a copy of the template */
+ OSSL_RECORD_TEMPLATE template;
+
+ /*
+ * If we hit an error, what alert code should be used
+ */
+ int alert;
+
+ /* Amount of crypto stream data we read in the last call to quic_read_record */
+ size_t recread;
+
+ /* Amount of crypto stream data read but not yet released */
+ size_t recunreleased;
+
+ /* Callbacks */
+ OSSL_FUNC_rlayer_msg_callback_fn *msg_callback;
+ void *cbarg;
+};
+
+static int quic_set1_bio(OSSL_RECORD_LAYER *rl, BIO *bio);
+static int quic_free(OSSL_RECORD_LAYER *r);
+
+static int
+quic_new_record_layer(OSSL_LIB_CTX *libctx, const char *propq, int vers,
+ int role, int direction, int level, uint16_t epoch,
+ unsigned char *secret, size_t secretlen,
+ unsigned char *key, size_t keylen, unsigned char *iv,
+ size_t ivlen, unsigned char *mackey, size_t mackeylen,
+ const EVP_CIPHER *ciph, size_t taglen,
+ int mactype,
+ const EVP_MD *md, COMP_METHOD *comp,
+ const EVP_MD *kdfdigest, BIO *prev, BIO *transport,
+ BIO *next, BIO_ADDR *local, BIO_ADDR *peer,
+ const OSSL_PARAM *settings, const OSSL_PARAM *options,
+ const OSSL_DISPATCH *fns, void *cbarg, void *rlarg,
+ OSSL_RECORD_LAYER **retrl)
+{
+ OSSL_RECORD_LAYER *rl = OPENSSL_zalloc(sizeof(*rl));
+ int qdir;
+ uint32_t suite_id = 0;
+
+ if (rl == NULL) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ rl->qtls = (QUIC_TLS *)rlarg;
+ rl->level = level;
+ if (!quic_set1_bio(rl, transport)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ goto err;
+ }
+ rl->cbarg = cbarg;
+ *retrl = rl;
+
+ if (fns != NULL) {
+ for (; fns->function_id != 0; fns++) {
+ switch (fns->function_id) {
+ break;
+ case OSSL_FUNC_RLAYER_MSG_CALLBACK:
+ rl->msg_callback = OSSL_FUNC_rlayer_msg_callback(fns);
+ break;
+ default:
+ /* Just ignore anything we don't understand */
+ break;
+ }
+ }
+ }
+
+ if (level == OSSL_RECORD_PROTECTION_LEVEL_NONE)
+ return 1;
+
+ if (direction == OSSL_RECORD_DIRECTION_READ)
+ qdir = 0;
+ else
+ qdir = 1;
+
+ if (rl->qtls->args.ossl_quic) {
+#ifndef OPENSSL_NO_QUIC
+ /*
+ * We only look up the suite_id/MD for internal callers. Not used in the
+ * public API. We assume that a 3rd party QUIC stack will want to
+ * figure this out by itself (e.g. so that they could add new
+ * ciphersuites at a different pace to us)
+ */
+ if (EVP_CIPHER_is_a(ciph, "AES-128-GCM")) {
+ suite_id = QRL_SUITE_AES128GCM;
+ } else if (EVP_CIPHER_is_a(ciph, "AES-256-GCM")) {
+ suite_id = QRL_SUITE_AES256GCM;
+ } else if (EVP_CIPHER_is_a(ciph, "CHACHA20-POLY1305")) {
+ suite_id = QRL_SUITE_CHACHA20POLY1305;
+ } else {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, SSL_R_UNKNOWN_CIPHER_TYPE);
+ goto err;
+ }
+
+ /* We pass a ref to the md in a successful yield_secret_cb call */
+ /* TODO(QUIC FUTURE): This cast is horrible. We should try and remove it */
+ if (!EVP_MD_up_ref((EVP_MD *)kdfdigest)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ goto err;
+ }
+#else
+ if (!ossl_assert("Should not happen" == NULL))
+ goto err;
+#endif
+ } else {
+ kdfdigest = NULL;
+ }
+
+ if (!rl->qtls->args.yield_secret_cb(level, qdir, suite_id,
+ (EVP_MD *)kdfdigest, secret, secretlen,
+ rl->qtls->args.yield_secret_cb_arg)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ EVP_MD_free((EVP_MD *)kdfdigest);
+ goto err;
+ }
+
+ return 1;
+ err:
+ *retrl = NULL;
+ quic_free(rl);
+ return 0;
+}
+
+static int quic_free(OSSL_RECORD_LAYER *rl)
+{
+ if (rl == NULL)
+ return 1;
+
+ BIO_free(rl->dummybio);
+ OPENSSL_free(rl);
+ return 1;
+}
+
+static int quic_unprocessed_read_pending(OSSL_RECORD_LAYER *rl)
+{
+ /*
+ * Read ahead isn't really a thing for QUIC so we never have unprocessed
+ * data pending
+ */
+ return 0;
+}
+
+static int quic_processed_read_pending(OSSL_RECORD_LAYER *rl)
+{
+ /*
+ * This is currently only ever used by:
+ * - SSL_has_pending()
+ * - to check whether we have more records that we want to supply to the
+ * upper layers
+ *
+ * We only ever supply 1 record at a time to the upper layers, and
+ * SSL_has_pending() will go via the QUIC method not the TLS method so that
+ * use case doesn't apply here.
+ * Therefore we can ignore this for now and always return 0. We might
+ * eventually want to change this to check in the receive buffers to see if
+ * we have any more data pending.
+ */
+ return 0;
+}
+
+static size_t quic_get_max_records(OSSL_RECORD_LAYER *rl, uint8_t type,
+ size_t len,
+ size_t maxfrag, size_t *preffrag)
+{
+ return 1;
+}
+
+static int quic_write_records(OSSL_RECORD_LAYER *rl,
+ OSSL_RECORD_TEMPLATE *template,
+ size_t numtempl)
+{
+ size_t consumed;
+ unsigned char alert;
+
+ if (!ossl_assert(numtempl == 1)) {
+ /* How could this be? quic_get_max_records() always returns 1 */
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+
+ BIO_clear_retry_flags(rl->dummybio);
+
+ if (rl->msg_callback != NULL) {
+ unsigned char dummyrec[SSL3_RT_HEADER_LENGTH];
+
+ /*
+ * For the purposes of the callback we "pretend" to be normal TLS,
+ * and manufacture a dummy record header
+ */
+ dummyrec[0] = (rl->level == OSSL_RECORD_PROTECTION_LEVEL_NONE)
+ ? template->type
+ : SSL3_RT_APPLICATION_DATA;
+ dummyrec[1] = (unsigned char)((template->version >> 8) & 0xff);
+ dummyrec[2] = (unsigned char)(template->version & 0xff);
+ /*
+ * We assume that buflen is always <= UINT16_MAX. Since this is
+ * generated by libssl itself we actually expect it to never
+ * exceed SSL3_RT_MAX_PLAIN_LENGTH - so it should be a safe
+ * assumption
+ */
+ dummyrec[3] = (unsigned char)((template->buflen >> 8) & 0xff);
+ dummyrec[4] = (unsigned char)(template->buflen & 0xff);
+
+ rl->msg_callback(1, TLS1_3_VERSION, SSL3_RT_HEADER, dummyrec,
+ SSL3_RT_HEADER_LENGTH, rl->cbarg);
+
+ if (rl->level != OSSL_RECORD_PROTECTION_LEVEL_NONE) {
+ rl->msg_callback(1, TLS1_3_VERSION, SSL3_RT_INNER_CONTENT_TYPE,
+ &template->type, 1, rl->cbarg);
+ }
+ }
+
+ switch (template->type) {
+ case SSL3_RT_ALERT:
+ if (template->buflen != 2) {
+ /*
+ * We assume that libssl always sends both bytes of an alert to
+ * us in one go, and never fragments it. If we ever get more
+ * or less bytes than exactly 2 then this is very unexpected.
+ */
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, SSL_R_BAD_VALUE);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+ /*
+ * Byte 0 is the alert level (we ignore it) and byte 1 is the alert
+ * description that we are actually interested in.
+ */
+ alert = template->buf[1];
+
+ if (!rl->qtls->args.alert_cb(rl->qtls->args.alert_cb_arg, alert)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+ break;
+
+ case SSL3_RT_HANDSHAKE:
+ /*
+ * We expect this to only fail on some fatal error (e.g. malloc
+ * failure)
+ */
+ if (!rl->qtls->args.crypto_send_cb(template->buf + rl->written,
+ template->buflen - rl->written,
+ &consumed,
+ rl->qtls->args.crypto_send_cb_arg)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+ /*
+ * We might have written less than we wanted to if we have filled the
+ * send stream buffer.
+ */
+ if (consumed + rl->written != template->buflen) {
+ if (!ossl_assert(consumed + rl->written < template->buflen)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+
+ /*
+ * We've not written everything we wanted to. Take a copy of the
+ * template, remember how much we wrote so far and signal a retry.
+ * The buffer supplied in the template is guaranteed to be the same
+ * on a retry for handshake data
+ */
+ rl->written += consumed;
+ rl->template = *template;
+ BIO_set_retry_write(rl->dummybio);
+
+ return OSSL_RECORD_RETURN_RETRY;
+ }
+ rl->written = 0;
+ break;
+
+ default:
+ /* Anything else is unexpected and an error */
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+
+ return OSSL_RECORD_RETURN_SUCCESS;
+}
+
+static int quic_retry_write_records(OSSL_RECORD_LAYER *rl)
+{
+ return quic_write_records(rl, &rl->template, 1);
+}
+
+static int quic_read_record(OSSL_RECORD_LAYER *rl, void **rechandle,
+ int *rversion, uint8_t *type, const unsigned char **data,
+ size_t *datalen, uint16_t *epoch,
+ unsigned char *seq_num)
+{
+ if (rl->recread != 0 || rl->recunreleased != 0)
+ return OSSL_RECORD_RETURN_FATAL;
+
+ BIO_clear_retry_flags(rl->dummybio);
+
+ if (!rl->qtls->args.crypto_recv_rcd_cb(data, datalen,
+ rl->qtls->args.crypto_recv_rcd_cb_arg)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+
+ if (*datalen == 0) {
+ BIO_set_retry_read(rl->dummybio);
+ return OSSL_RECORD_RETURN_RETRY;
+ }
+
+ *rechandle = rl;
+ *rversion = TLS1_3_VERSION;
+ *type = SSL3_RT_HANDSHAKE;
+ rl->recread = rl->recunreleased = *datalen;
+ /* epoch/seq_num are not relevant for TLS */
+
+ if (rl->msg_callback != NULL) {
+ unsigned char dummyrec[SSL3_RT_HEADER_LENGTH];
+
+ /*
+ * For the purposes of the callback we "pretend" to be normal TLS,
+ * and manufacture a dummy record header
+ */
+ dummyrec[0] = (rl->level == OSSL_RECORD_PROTECTION_LEVEL_NONE)
+ ? SSL3_RT_HANDSHAKE
+ : SSL3_RT_APPLICATION_DATA;
+ dummyrec[1] = (unsigned char)((TLS1_2_VERSION >> 8) & 0xff);
+ dummyrec[2] = (unsigned char)(TLS1_2_VERSION & 0xff);
+ /*
+ * *datalen will always fit into 2 bytes because our original buffer
+ * size is less than that.
+ */
+ dummyrec[3] = (unsigned char)((*datalen >> 8) & 0xff);
+ dummyrec[4] = (unsigned char)(*datalen & 0xff);
+
+ rl->msg_callback(0, TLS1_3_VERSION, SSL3_RT_HEADER, dummyrec,
+ SSL3_RT_HEADER_LENGTH, rl->cbarg);
+ rl->msg_callback(0, TLS1_3_VERSION, SSL3_RT_INNER_CONTENT_TYPE, type, 1,
+ rl->cbarg);
+ }
+
+ return OSSL_RECORD_RETURN_SUCCESS;
+}
+
+static int quic_release_record(OSSL_RECORD_LAYER *rl, void *rechandle,
+ size_t length)
+{
+ if (!ossl_assert(rl->recread > 0)
+ || !ossl_assert(rl->recunreleased <= rl->recread)
+ || !ossl_assert(rl == rechandle)
+ || !ossl_assert(length <= rl->recunreleased)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+
+ if (rl->recunreleased == length) {
+ if (!rl->qtls->args.crypto_release_rcd_cb(rl->recread,
+ rl->qtls->args.crypto_release_rcd_cb_arg)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return OSSL_RECORD_RETURN_FATAL;
+ }
+ rl->recread = 0;
+ }
+ rl->recunreleased -= length;
+ return OSSL_RECORD_RETURN_SUCCESS;
+}
+
+static int quic_get_alert_code(OSSL_RECORD_LAYER *rl)
+{
+ return rl->alert;
+}
+
+static int quic_set_protocol_version(OSSL_RECORD_LAYER *rl, int version)
+{
+ /* We only support TLSv1.3, so its bad if we negotiate anything else */
+ if (!ossl_assert(version == TLS1_3_VERSION)) {
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_INTERNAL_ERROR);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void quic_set_plain_alerts(OSSL_RECORD_LAYER *rl, int allow)
+{
+ /* We don't care */
+}
+
+static void quic_set_first_handshake(OSSL_RECORD_LAYER *rl, int first)
+{
+ /* We don't care */
+}
+
+static void quic_set_max_pipelines(OSSL_RECORD_LAYER *rl, size_t max_pipelines)
+{
+ /* We don't care */
+}
+
+static void quic_get_state(OSSL_RECORD_LAYER *rl, const char **shortstr,
+ const char **longstr)
+{
+ /*
+ * According to the docs, valid read state strings are: "RH"/"read header",
+ * "RB"/"read body", and "unknown"/"unknown". We don't read records in quite
+ * that way, so we report every "normal" state as "read header". In the
+ * event of error then we report "unknown".
+ */
+
+ if (rl->qtls->inerror) {
+ if (shortstr != NULL)
+ *shortstr = "unknown";
+ if (longstr != NULL)
+ *longstr = "unknown";
+ } else {
+ if (shortstr != NULL)
+ *shortstr = "RH";
+ if (longstr != NULL)
+ *longstr = "read header";
+ }
+}
+
+static int quic_set_options(OSSL_RECORD_LAYER *rl, const OSSL_PARAM *options)
+{
+ /*
+ * We don't support any options yet - but we might do at some point so
+ * this could be useful.
+ */
+ return 1;
+}
+
+static const COMP_METHOD *quic_get_compression(OSSL_RECORD_LAYER *rl)
+{
+ /* We only support TLSv1.3 which doesn't have compression */
+ return NULL;
+}
+
+static void quic_set_max_frag_len(OSSL_RECORD_LAYER *rl, size_t max_frag_len)
+{
+ /* This really doesn't make any sense for QUIC. Ignore it */
+}
+
+static int quic_alloc_buffers(OSSL_RECORD_LAYER *rl)
+{
+ /*
+ * This is a hint only. We don't support it (yet), so just ignore the
+ * request
+ */
+ return 1;
+}
+
+static int quic_free_buffers(OSSL_RECORD_LAYER *rl)
+{
+ /*
+ * This is a hint only. We don't support it (yet), so just ignore the
+ * request
+ */
+ return 1;
+}
+
+static int quic_set1_bio(OSSL_RECORD_LAYER *rl, BIO *bio)
+{
+ if (bio != NULL && !BIO_up_ref(bio))
+ return 0;
+ BIO_free(rl->dummybio);
+ rl->dummybio = bio;
+
+ return 1;
+}
+
+/*
+ * Never called functions
+ *
+ * Due to the way we are configured and used we never expect any of the next set
+ * of functions to be called. Therefore we set them to always fail.
+ */
+
+static size_t quic_app_data_pending(OSSL_RECORD_LAYER *rl)
+{
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return (size_t)ossl_assert(0);
+}
+
+static size_t quic_get_max_record_overhead(OSSL_RECORD_LAYER *rl)
+{
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return (size_t)ossl_assert(0);
+}
+
+static int quic_increment_sequence_ctr(OSSL_RECORD_LAYER *rl)
+{
+ QUIC_TLS_FATAL(rl, SSL_AD_INTERNAL_ERROR, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return ossl_assert(0);
+}
+
+/* End of never called functions */
+
+static const OSSL_RECORD_METHOD quic_tls_record_method = {
+ quic_new_record_layer,
+ quic_free,
+ quic_unprocessed_read_pending,
+ quic_processed_read_pending,
+ quic_app_data_pending, /* Never called */
+ quic_get_max_records,
+ quic_write_records,
+ quic_retry_write_records,
+ quic_read_record,
+ quic_release_record,
+ quic_get_alert_code,
+ quic_set1_bio,
+ quic_set_protocol_version,
+ quic_set_plain_alerts,
+ quic_set_first_handshake,
+ quic_set_max_pipelines,
+ NULL, /* set_in_init: Optional - we don't need it */
+ quic_get_state,
+ quic_set_options,
+ quic_get_compression,
+ quic_set_max_frag_len,
+ quic_get_max_record_overhead, /* Never called */
+ quic_increment_sequence_ctr, /* Never called */
+ quic_alloc_buffers,
+ quic_free_buffers
+};
+
+static int add_transport_params_cb(SSL *s, unsigned int ext_type,
+ unsigned int context,
+ const unsigned char **out, size_t *outlen,
+ X509 *x, size_t chainidx, int *al,
+ void *add_arg)
+{
+ QUIC_TLS *qtls = add_arg;
+
+ *out = qtls->local_transport_params;
+ *outlen = qtls->local_transport_params_len;
+ qtls->local_transport_params_consumed = 1;
+ return 1;
+}
+
+static void free_transport_params_cb(SSL *s, unsigned int ext_type,
+ unsigned int context,
+ const unsigned char *out,
+ void *add_arg)
+{
+}
+
+static int parse_transport_params_cb(SSL *s, unsigned int ext_type,
+ unsigned int context,
+ const unsigned char *in,
+ size_t inlen, X509 *x,
+ size_t chainidx,
+ int *al, void *parse_arg)
+{
+ QUIC_TLS *qtls = parse_arg;
+
+ return qtls->args.got_transport_params_cb(in, inlen,
+ qtls->args.got_transport_params_cb_arg);
+}
+
+QUIC_TLS *ossl_quic_tls_new(const QUIC_TLS_ARGS *args)
+{
+ QUIC_TLS *qtls;
+
+ if (args->crypto_send_cb == NULL
+ || args->crypto_recv_rcd_cb == NULL
+ || args->crypto_release_rcd_cb == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
+ return NULL;
+ }
+
+ qtls = OPENSSL_zalloc(sizeof(*qtls));
+ if (qtls == NULL)
+ return NULL;
+
+ if (args->ossl_quic && (qtls->error_state = OSSL_ERR_STATE_new()) == NULL) {
+ OPENSSL_free(qtls);
+ return NULL;
+ }
+
+ qtls->args = *args;
+ return qtls;
+}
+
+void ossl_quic_tls_free(QUIC_TLS *qtls)
+{
+ if (qtls == NULL)
+ return;
+ OSSL_ERR_STATE_free(qtls->error_state);
+ OPENSSL_free(qtls);
+}
+
+static int raise_error(QUIC_TLS *qtls, uint64_t error_code,
+ const char *error_msg,
+ const char *src_file,
+ int src_line,
+ const char *src_func)
+{
+ /*
+ * When QTLS fails, add a "cover letter" error with information, potentially
+ * with any underlying libssl errors underneath it (but our cover error may
+ * be the only error in some cases). Then capture this into an ERR_STATE so
+ * we can report it later if need be when the QUIC_CHANNEL asks for it.
+ * For external QUIC TLS we just raise the error.
+ */
+ ERR_new();
+ ERR_set_debug(src_file, src_line, src_func);
+ ERR_set_error(ERR_LIB_SSL, SSL_R_QUIC_HANDSHAKE_LAYER_ERROR,
+ "handshake layer error, error code %llu (0x%llx) (\"%s\")",
+ error_code, error_code, error_msg);
+
+ if (qtls->args.ossl_quic) {
+ OSSL_ERR_STATE_save_to_mark(qtls->error_state);
+
+ /*
+ * We record the error information reported via the QUIC protocol
+ * separately.
+ */
+ qtls->error_code = error_code;
+ qtls->error_msg = error_msg;
+ qtls->inerror = 1;
+
+ ERR_pop_to_mark();
+ }
+ return 0;
+}
+
+#define RAISE_ERROR(qtls, error_code, error_msg) \
+ raise_error((qtls), (error_code), (error_msg), \
+ OPENSSL_FILE, OPENSSL_LINE, OPENSSL_FUNC)
+
+#ifndef OPENSSL_NO_QUIC
+# define RAISE_INTERNAL_ERROR(qtls) \
+ RAISE_ERROR((qtls), OSSL_QUIC_ERR_INTERNAL_ERROR, "internal error")
+#else
+# define RAISE_INTERNAL_ERROR(qtls) \
+ RAISE_ERROR((qtls), 0x01, "internal error")
+#endif
+
+int ossl_quic_tls_configure(QUIC_TLS *qtls)
+{
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(qtls->args.s);
+ BIO *nullbio;
+
+ if (sc == NULL || !SSL_set_min_proto_version(qtls->args.s, TLS1_3_VERSION))
+ return RAISE_INTERNAL_ERROR(qtls);
+
+ nullbio = BIO_new(BIO_s_null());
+ if (nullbio == NULL)
+ return RAISE_INTERNAL_ERROR(qtls);
+
+ /*
+ * Our custom record layer doesn't use the BIO - but libssl generally
+ * expects one to be present.
+ */
+ SSL_set_bio(qtls->args.s, nullbio, nullbio);
+
+ SSL_clear_options(qtls->args.s, SSL_OP_ENABLE_MIDDLEBOX_COMPAT);
+ ossl_ssl_set_custom_record_layer(sc, &quic_tls_record_method, qtls);
+
+ if (!ossl_tls_add_custom_ext_intern(NULL, &sc->cert->custext,
+ qtls->args.is_server ? ENDPOINT_SERVER
+ : ENDPOINT_CLIENT,
+ TLSEXT_TYPE_quic_transport_parameters,
+ SSL_EXT_TLS1_3_ONLY
+ | SSL_EXT_CLIENT_HELLO
+ | SSL_EXT_TLS1_3_ENCRYPTED_EXTENSIONS,
+ add_transport_params_cb,
+ free_transport_params_cb, qtls,
+ parse_transport_params_cb, qtls))
+ return 0;
+
+ sc->s3.flags |= TLS1_FLAGS_QUIC;
+
+ return 1;
+}
+
+#ifndef OPENSSL_NO_QUIC
+int ossl_quic_tls_tick(QUIC_TLS *qtls)
+{
+ int ret, err;
+ const unsigned char *alpn;
+ unsigned int alpnlen;
+
+ if (qtls->inerror)
+ return 0;
+
+ /*
+ * SSL_get_error does not truly know what the cause of an SSL_read failure
+ * is and to some extent guesses based on contextual information. In
+ * particular, if there is _any_ ERR on the error stack, SSL_ERROR_SSL or
+ * SSL_ERROR_SYSCALL will be returned no matter what and there is no
+ * possibility of SSL_ERROR_WANT_READ/WRITE being returned, even if that was
+ * the actual cause of the SSL_read() failure.
+ *
+ * This means that ordinarily, the below code might not work right if the
+ * application has any ERR on the error stack. In order to make this code
+ * perform correctly regardless of prior ERR state, we use a variant of
+ * SSL_get_error() which ignores the error stack. However, some ERRs are
+ * raised by SSL_read() and actually indicate that something has gone wrong
+ * during the call to SSL_read(). We therefore adopt a strategy of marking
+ * the ERR stack and seeing if any errors get appended during the call to
+ * SSL_read(). If they are, we assume SSL_read() has raised an error and
+ * that we should use normal SSL_get_error() handling.
+ *
+ * NOTE: Ensure all escape paths from this function call
+ * ERR_clear_to_mark(). The RAISE macros handle this in failure cases.
+ */
+ ERR_set_mark();
+
+ if (!qtls->configured) {
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(qtls->args.s);
+ SSL_CTX *sctx;
+
+ if (sc == NULL)
+ return RAISE_INTERNAL_ERROR(qtls);
+ sctx = SSL_CONNECTION_GET_CTX(sc);
+
+ /*
+ * No matter how the user has configured us, there are certain
+ * requirements for QUIC-TLS that we enforce
+ */
+
+ /* ALPN is a requirement for QUIC and must be set */
+ if (qtls->args.is_server) {
+ if (sctx->ext.alpn_select_cb == NULL)
+ return RAISE_INTERNAL_ERROR(qtls);
+ } else {
+ if (sc->ext.alpn == NULL || sc->ext.alpn_len == 0)
+ return RAISE_ERROR(qtls, OSSL_QUIC_ERR_CRYPTO_NO_APP_PROTO,
+ "ALPN must be configured when using QUIC");
+ }
+
+ if (!ossl_quic_tls_configure(qtls))
+ return RAISE_INTERNAL_ERROR(qtls);
+
+ sc->s3.flags |= TLS1_FLAGS_QUIC_INTERNAL;
+
+ if (qtls->args.is_server)
+ SSL_set_accept_state(qtls->args.s);
+ else
+ SSL_set_connect_state(qtls->args.s);
+
+ qtls->configured = 1;
+ }
+
+ if (qtls->complete)
+ /*
+ * There should never be app data to read, but calling SSL_read() will
+ * ensure any post-handshake messages are processed.
+ */
+ ret = SSL_read(qtls->args.s, NULL, 0);
+ else
+ ret = SSL_do_handshake(qtls->args.s);
+
+ if (ret <= 0) {
+ err = ossl_ssl_get_error(qtls->args.s, ret,
+ /*check_err=*/ERR_count_to_mark() > 0);
+
+ switch (err) {
+ case SSL_ERROR_WANT_READ:
+ case SSL_ERROR_WANT_WRITE:
+ case SSL_ERROR_WANT_CLIENT_HELLO_CB:
+ case SSL_ERROR_WANT_X509_LOOKUP:
+ case SSL_ERROR_WANT_RETRY_VERIFY:
+ ERR_pop_to_mark();
+ return 1;
+
+ default:
+ return RAISE_INTERNAL_ERROR(qtls);
+ }
+ }
+
+ if (!qtls->complete) {
+ /* Validate that we have ALPN */
+ SSL_get0_alpn_selected(qtls->args.s, &alpn, &alpnlen);
+ if (alpn == NULL || alpnlen == 0)
+ return RAISE_ERROR(qtls, OSSL_QUIC_ERR_CRYPTO_NO_APP_PROTO,
+ "no application protocol negotiated");
+
+ qtls->complete = 1;
+ ERR_pop_to_mark();
+ return qtls->args.handshake_complete_cb(qtls->args.handshake_complete_cb_arg);
+ }
+
+ ERR_pop_to_mark();
+ return 1;
+}
+#endif
+
+void ossl_quic_tls_clear(QUIC_TLS *qtls)
+{
+ if (qtls == NULL)
+ return;
+ qtls->local_transport_params_consumed = 0;
+}
+
+int ossl_quic_tls_set_transport_params(QUIC_TLS *qtls,
+ const unsigned char *transport_params,
+ size_t transport_params_len)
+{
+ if (qtls->local_transport_params_consumed)
+ return 0;
+
+ qtls->local_transport_params = transport_params;
+ qtls->local_transport_params_len = transport_params_len;
+ return 1;
+}
+
+int ossl_quic_tls_get_error(QUIC_TLS *qtls,
+ uint64_t *error_code,
+ const char **error_msg,
+ ERR_STATE **error_state)
+{
+ if (qtls->inerror) {
+ *error_code = qtls->error_code;
+ *error_msg = qtls->error_msg;
+ *error_state = qtls->error_state;
+ }
+
+ return qtls->inerror;
+}
+
+/*
+ * Returns true if the last handshake record message we processed was a
+ * CertificateRequest
+ */
+int ossl_quic_tls_is_cert_request(QUIC_TLS *qtls)
+{
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(qtls->args.s);
+
+ if (sc == NULL)
+ return 0;
+
+ return sc->s3.tmp.message_type == SSL3_MT_CERTIFICATE_REQUEST;
+}
+
+/*
+ * Returns true if the last session associated with the connection has an
+ * invalid max_early_data value for QUIC.
+ */
+int ossl_quic_tls_has_bad_max_early_data(QUIC_TLS *qtls)
+{
+ uint32_t max_early_data = SSL_get0_session(qtls->args.s)->ext.max_early_data;
+
+ /*
+ * If max_early_data was present we always ensure a non-zero value is
+ * stored in the session for QUIC. Therefore if max_early_data == 0 here
+ * we can be confident that it was not present in the NewSessionTicket
+ */
+ return max_early_data != 0xffffffff && max_early_data != 0;
+}
+
+int ossl_quic_tls_set_early_data_enabled(QUIC_TLS *qtls, int enabled)
+{
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(qtls->args.s);
+
+ if (sc == NULL || !SSL_IS_QUIC_HANDSHAKE(sc) || !SSL_in_before(qtls->args.s))
+ return 0;
+
+ if (!enabled) {
+ sc->max_early_data = 0;
+ sc->early_data_state = SSL_EARLY_DATA_NONE;
+ return 1;
+ }
+
+ if (sc->server) {
+ sc->max_early_data = 0xffffffff;
+ sc->early_data_state = SSL_EARLY_DATA_ACCEPTING;
+ return 1;
+ }
+
+ if ((sc->session == NULL || sc->session->ext.max_early_data != 0xffffffff)
+ && sc->psk_use_session_cb == NULL)
+ return 0;
+
+ sc->early_data_state = SSL_EARLY_DATA_CONNECTING;
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_tls_api.c b/crypto/openssl/ssl/quic/quic_tls_api.c
new file mode 100644
index 000000000000..ea8924f2c416
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_tls_api.c
@@ -0,0 +1,208 @@
+/*
+ * Copyright 2024-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/ssl.h>
+#include "internal/ssl_unwrap.h"
+#include "internal/quic_tls.h"
+#include "../ssl_local.h"
+
+static int crypto_send_cb(const unsigned char *buf, size_t buf_len,
+ size_t *consumed, void *arg)
+{
+ SSL *s = (SSL *)arg;
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (sc == NULL)
+ return 0;
+ return sc->qtcb.crypto_send_cb(s, buf, buf_len, consumed, sc->qtarg);
+}
+
+static int crypto_recv_rcd_cb(const unsigned char **buf, size_t *bytes_read,
+ void *arg)
+{
+ SSL *s = (SSL *)arg;
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (sc == NULL)
+ return 0;
+ return sc->qtcb.crypto_recv_rcd_cb(s, buf, bytes_read, sc->qtarg);
+}
+
+static int crypto_release_rcd_cb(size_t bytes_read, void *arg)
+{
+ SSL *s = (SSL *)arg;
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (sc == NULL)
+ return 0;
+ return sc->qtcb.crypto_release_rcd_cb(s, bytes_read, sc->qtarg);
+}
+static int yield_secret_cb(uint32_t prot_level, int direction,
+ uint32_t suite_id, EVP_MD *md,
+ const unsigned char *secret, size_t secret_len,
+ void *arg)
+{
+ SSL *s = (SSL *)arg;
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (sc == NULL)
+ return 0;
+ return sc->qtcb.yield_secret_cb(s, prot_level, direction,
+ secret, secret_len, sc->qtarg);
+}
+
+static int got_transport_params_cb(const unsigned char *params,
+ size_t params_len,
+ void *arg)
+{
+ SSL *s = (SSL *)arg;
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (sc == NULL)
+ return 0;
+ return sc->qtcb.got_transport_params_cb(s, params, params_len, sc->qtarg);
+}
+
+static int alert_cb(void *arg, unsigned char alert_code)
+{
+ SSL *s = (SSL *)arg;
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (sc == NULL)
+ return 0;
+ return sc->qtcb.alert_cb(s, alert_code, sc->qtarg);
+}
+
+static int tls_callbacks_from_dispatch(OSSL_QUIC_TLS_CALLBACKS *qtcb,
+ const OSSL_DISPATCH *qtdis)
+{
+ for (; qtdis->function_id != 0; qtdis++) {
+ switch (qtdis->function_id) {
+ case OSSL_FUNC_SSL_QUIC_TLS_CRYPTO_SEND:
+ if (qtcb->crypto_send_cb == NULL)
+ qtcb->crypto_send_cb = OSSL_FUNC_SSL_QUIC_TLS_crypto_send(qtdis);
+ break;
+ case OSSL_FUNC_SSL_QUIC_TLS_CRYPTO_RECV_RCD:
+ if (qtcb->crypto_recv_rcd_cb == NULL)
+ qtcb->crypto_recv_rcd_cb =
+ OSSL_FUNC_SSL_QUIC_TLS_crypto_recv_rcd(qtdis);
+ break;
+ case OSSL_FUNC_SSL_QUIC_TLS_CRYPTO_RELEASE_RCD:
+ if (qtcb->crypto_release_rcd_cb == NULL)
+ qtcb->crypto_release_rcd_cb =
+ OSSL_FUNC_SSL_QUIC_TLS_crypto_release_rcd(qtdis);
+ break;
+ case OSSL_FUNC_SSL_QUIC_TLS_YIELD_SECRET:
+ if (qtcb->yield_secret_cb == NULL)
+ qtcb->yield_secret_cb =
+ OSSL_FUNC_SSL_QUIC_TLS_yield_secret(qtdis);
+ break;
+ case OSSL_FUNC_SSL_QUIC_TLS_GOT_TRANSPORT_PARAMS:
+ if (qtcb->got_transport_params_cb == NULL)
+ qtcb->got_transport_params_cb =
+ OSSL_FUNC_SSL_QUIC_TLS_got_transport_params(qtdis);
+ break;
+ case OSSL_FUNC_SSL_QUIC_TLS_ALERT:
+ if (qtcb->alert_cb == NULL)
+ qtcb->alert_cb =
+ OSSL_FUNC_SSL_QUIC_TLS_alert(qtdis);
+ break;
+ }
+ }
+
+ if (qtcb->crypto_send_cb == NULL
+ || qtcb->crypto_recv_rcd_cb == NULL
+ || qtcb->crypto_release_rcd_cb == NULL
+ || qtcb->yield_secret_cb == NULL
+ || qtcb->got_transport_params_cb == NULL
+ || qtcb->alert_cb == NULL) {
+ ERR_raise(ERR_LIB_SSL, SSL_R_MISSING_QUIC_TLS_FUNCTIONS);
+ return 0;
+ }
+
+ return 1;
+}
+
+int SSL_set_quic_tls_cbs(SSL *s, const OSSL_DISPATCH *qtdis, void *arg)
+{
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+ QUIC_TLS_ARGS qtlsargs;
+
+ if (!SSL_is_tls(s)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return 0;
+ }
+
+ if (!tls_callbacks_from_dispatch(&sc->qtcb, qtdis))
+ /* ERR_raise already called */
+ return 0;
+
+ sc->qtarg = arg;
+
+ ossl_quic_tls_free(sc->qtls);
+ qtlsargs.s = s;
+ qtlsargs.crypto_send_cb = crypto_send_cb;
+ qtlsargs.crypto_send_cb_arg = s;
+ qtlsargs.crypto_recv_rcd_cb = crypto_recv_rcd_cb;
+ qtlsargs.crypto_recv_rcd_cb_arg = s;
+ qtlsargs.crypto_release_rcd_cb = crypto_release_rcd_cb;
+ qtlsargs.crypto_release_rcd_cb_arg = s;
+ qtlsargs.yield_secret_cb = yield_secret_cb;
+ qtlsargs.yield_secret_cb_arg = s;
+ qtlsargs.got_transport_params_cb = got_transport_params_cb;
+ qtlsargs.got_transport_params_cb_arg = s;
+ qtlsargs.handshake_complete_cb = NULL;
+ qtlsargs.handshake_complete_cb_arg = NULL;
+ qtlsargs.alert_cb = alert_cb;
+ qtlsargs.alert_cb_arg = s;
+ qtlsargs.is_server = sc->server;
+ qtlsargs.ossl_quic = 0;
+ sc->qtls = ossl_quic_tls_new(&qtlsargs);
+ if (sc->qtls == NULL)
+ return 0;
+
+ if (!ossl_quic_tls_configure(sc->qtls))
+ return 0;
+
+ return 1;
+}
+
+int SSL_set_quic_tls_transport_params(SSL *s,
+ const unsigned char *params,
+ size_t params_len)
+{
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (sc == NULL)
+ return 0;
+
+ if (sc->qtls == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return 0;
+ }
+
+ return ossl_quic_tls_set_transport_params(sc->qtls, params, params_len);
+}
+
+int SSL_set_quic_tls_early_data_enabled(SSL *s, int enabled)
+{
+ SSL_CONNECTION *sc = SSL_CONNECTION_FROM_SSL(s);
+
+ if (!SSL_is_tls(s)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return 0;
+ }
+
+ if (sc->qtls == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_SHOULD_NOT_HAVE_BEEN_CALLED);
+ return 0;
+ }
+
+ return ossl_quic_tls_set_early_data_enabled(sc->qtls, enabled);
+}
diff --git a/crypto/openssl/ssl/quic/quic_trace.c b/crypto/openssl/ssl/quic/quic_trace.c
new file mode 100644
index 000000000000..936707a573f1
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_trace.c
@@ -0,0 +1,650 @@
+/*
+ * Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/bio.h>
+#include "../ssl_local.h"
+#include "internal/quic_trace.h"
+#include "internal/quic_ssl.h"
+#include "internal/quic_channel.h"
+#include "internal/quic_wire_pkt.h"
+#include "internal/quic_wire.h"
+#include "internal/ssl_unwrap.h"
+
+static const char *packet_type(int type)
+{
+ switch (type) {
+ case QUIC_PKT_TYPE_INITIAL:
+ return "Initial";
+
+ case QUIC_PKT_TYPE_0RTT:
+ return "0RTT";
+
+ case QUIC_PKT_TYPE_HANDSHAKE:
+ return "Handshake";
+
+ case QUIC_PKT_TYPE_RETRY:
+ return "Retry";
+
+ case QUIC_PKT_TYPE_1RTT:
+ return "1RTT";
+
+ case QUIC_PKT_TYPE_VERSION_NEG:
+ return "VersionNeg";
+
+ default:
+ return "Unknown";
+ }
+}
+
+/* Print a non-NUL terminated string to BIO */
+static void put_str(BIO *bio, char *str, size_t slen)
+{
+ size_t i;
+
+ for (i = 0; i < slen; i++)
+ BIO_printf(bio, "%c", str[i]);
+}
+
+static void put_data(BIO *bio, const uint8_t *data, size_t datalen)
+{
+ size_t i;
+
+ for (i = 0; i < datalen; i++)
+ BIO_printf(bio, "%02x", data[i]);
+}
+
+static void put_conn_id(BIO *bio, QUIC_CONN_ID *id)
+{
+ if (id->id_len == 0) {
+ BIO_puts(bio, "<zero length id>");
+ return;
+ }
+
+ BIO_puts(bio, "0x");
+ put_data(bio, id->id, id->id_len);
+}
+
+static void put_token(BIO *bio, const uint8_t *token, size_t token_len)
+{
+ if (token_len == 0)
+ BIO_puts(bio, "<zero length token>");
+ else
+ put_data(bio, token, token_len);
+}
+
+static int frame_ack(BIO *bio, PACKET *pkt)
+{
+ OSSL_QUIC_FRAME_ACK ack;
+ OSSL_QUIC_ACK_RANGE *ack_ranges = NULL;
+ uint64_t total_ranges = 0;
+ uint64_t i;
+ int ret = 0;
+
+ if (!ossl_quic_wire_peek_frame_ack_num_ranges(pkt, &total_ranges)
+ /* In case sizeof(uint64_t) > sizeof(size_t) */
+ || total_ranges > SIZE_MAX / sizeof(ack_ranges[0])
+ || (ack_ranges = OPENSSL_zalloc(sizeof(ack_ranges[0])
+ * (size_t)total_ranges)) == NULL)
+ return ret;
+
+ ack.ack_ranges = ack_ranges;
+ ack.num_ack_ranges = (size_t)total_ranges;
+
+ /* Ack delay exponent is 0, so we can get the raw delay time below */
+ if (!ossl_quic_wire_decode_frame_ack(pkt, 0, &ack, NULL))
+ goto end;
+
+ BIO_printf(bio, " Largest acked: %llu\n",
+ (unsigned long long)ack.ack_ranges[0].end);
+ BIO_printf(bio, " Ack delay (raw) %llu\n",
+ (unsigned long long)ossl_time2ticks(ack.delay_time));
+ BIO_printf(bio, " Ack range count: %llu\n",
+ (unsigned long long)total_ranges - 1);
+ BIO_printf(bio, " First ack range: %llu\n",
+ (unsigned long long)(ack.ack_ranges[0].end
+ - ack.ack_ranges[0].start));
+ for (i = 1; i < total_ranges; i++) {
+ BIO_printf(bio, " Gap: %llu\n",
+ (unsigned long long)(ack.ack_ranges[i - 1].start
+ - ack.ack_ranges[i].end - 2));
+ BIO_printf(bio, " Ack range len: %llu\n",
+ (unsigned long long)(ack.ack_ranges[i].end
+ - ack.ack_ranges[i].start));
+ }
+
+ ret = 1;
+end:
+ OPENSSL_free(ack_ranges);
+ return ret;
+}
+
+static int frame_reset_stream(BIO *bio, PACKET *pkt)
+{
+ OSSL_QUIC_FRAME_RESET_STREAM frame_data;
+
+ if (!ossl_quic_wire_decode_frame_reset_stream(pkt, &frame_data))
+ return 0;
+
+ BIO_printf(bio, " Stream id: %llu\n",
+ (unsigned long long)frame_data.stream_id);
+ BIO_printf(bio, " App Protocol Error Code: %llu\n",
+ (unsigned long long)frame_data.app_error_code);
+ BIO_printf(bio, " Final size: %llu\n",
+ (unsigned long long)frame_data.final_size);
+
+ return 1;
+}
+
+static int frame_stop_sending(BIO *bio, PACKET *pkt)
+{
+ OSSL_QUIC_FRAME_STOP_SENDING frame_data;
+
+ if (!ossl_quic_wire_decode_frame_stop_sending(pkt, &frame_data))
+ return 0;
+
+ BIO_printf(bio, " Stream id: %llu\n",
+ (unsigned long long)frame_data.stream_id);
+ BIO_printf(bio, " App Protocol Error Code: %llu\n",
+ (unsigned long long)frame_data.app_error_code);
+
+ return 1;
+}
+
+static int frame_crypto(BIO *bio, PACKET *pkt)
+{
+ OSSL_QUIC_FRAME_CRYPTO frame_data;
+
+ if (!ossl_quic_wire_decode_frame_crypto(pkt, 1, &frame_data))
+ return 0;
+
+ BIO_printf(bio, " Offset: %llu\n", (unsigned long long)frame_data.offset);
+ BIO_printf(bio, " Len: %llu\n", (unsigned long long)frame_data.len);
+
+ return 1;
+}
+
+static int frame_new_token(BIO *bio, PACKET *pkt)
+{
+ const uint8_t *token;
+ size_t token_len;
+
+ if (!ossl_quic_wire_decode_frame_new_token(pkt, &token, &token_len))
+ return 0;
+
+ BIO_puts(bio, " Token: ");
+ put_token(bio, token, token_len);
+ BIO_puts(bio, "\n");
+
+ return 1;
+}
+
+static int frame_stream(BIO *bio, PACKET *pkt, uint64_t frame_type)
+{
+
+ OSSL_QUIC_FRAME_STREAM frame_data;
+
+ BIO_puts(bio, "Stream");
+ switch(frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_STREAM:
+ BIO_puts(bio, "\n");
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_FIN:
+ BIO_puts(bio, " (Fin)\n");
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN:
+ BIO_puts(bio, " (Len)\n");
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN_FIN:
+ BIO_puts(bio, " (Len, Fin)\n");
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF:
+ BIO_puts(bio, " (Off)\n");
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_FIN:
+ BIO_puts(bio, " (Off, Fin)\n");
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN:
+ BIO_puts(bio, " (Off, Len)\n");
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN_FIN:
+ BIO_puts(bio, " (Off, Len, Fin)\n");
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (!ossl_quic_wire_decode_frame_stream(pkt, 1, &frame_data))
+ return 0;
+
+ BIO_printf(bio, " Stream id: %llu\n",
+ (unsigned long long)frame_data.stream_id);
+ BIO_printf(bio, " Offset: %llu\n",
+ (unsigned long long)frame_data.offset);
+ /*
+ * It would be nice to find a way of passing the implicit length through
+ * to the msg_callback. But this is not currently possible.
+ */
+ if (frame_data.has_explicit_len)
+ BIO_printf(bio, " Len: %llu\n", (unsigned long long)frame_data.len);
+ else
+ BIO_puts(bio, " Len: <implicit length>\n");
+
+ return 1;
+}
+
+static int frame_max_data(BIO *bio, PACKET *pkt)
+{
+ uint64_t max_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_max_data(pkt, &max_data))
+ return 0;
+
+ BIO_printf(bio, " Max Data: %llu\n", (unsigned long long)max_data);
+
+ return 1;
+}
+
+static int frame_max_stream_data(BIO *bio, PACKET *pkt)
+{
+ uint64_t stream_id = 0;
+ uint64_t max_stream_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_max_stream_data(pkt, &stream_id,
+ &max_stream_data))
+ return 0;
+
+ BIO_printf(bio, " Max Stream Data: %llu\n",
+ (unsigned long long)max_stream_data);
+
+ return 1;
+}
+
+static int frame_max_streams(BIO *bio, PACKET *pkt)
+{
+ uint64_t max_streams = 0;
+
+ if (!ossl_quic_wire_decode_frame_max_streams(pkt, &max_streams))
+ return 0;
+
+ BIO_printf(bio, " Max Streams: %llu\n", (unsigned long long)max_streams);
+
+ return 1;
+}
+
+static int frame_data_blocked(BIO *bio, PACKET *pkt)
+{
+ uint64_t max_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_data_blocked(pkt, &max_data))
+ return 0;
+
+ BIO_printf(bio, " Max Data: %llu\n", (unsigned long long)max_data);
+
+ return 1;
+}
+
+static int frame_stream_data_blocked(BIO *bio, PACKET *pkt)
+{
+ uint64_t stream_id = 0;
+ uint64_t max_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_stream_data_blocked(pkt, &stream_id,
+ &max_data))
+ return 0;
+
+ BIO_printf(bio, " Stream id: %llu\n", (unsigned long long)stream_id);
+ BIO_printf(bio, " Max Data: %llu\n", (unsigned long long)max_data);
+
+ return 1;
+}
+
+static int frame_streams_blocked(BIO *bio, PACKET *pkt)
+{
+ uint64_t max_data = 0;
+
+ if (!ossl_quic_wire_decode_frame_streams_blocked(pkt, &max_data))
+ return 0;
+
+ BIO_printf(bio, " Max Data: %llu\n", (unsigned long long)max_data);
+
+ return 1;
+}
+
+static int frame_new_conn_id(BIO *bio, PACKET *pkt)
+{
+ OSSL_QUIC_FRAME_NEW_CONN_ID frame_data;
+
+ if (!ossl_quic_wire_decode_frame_new_conn_id(pkt, &frame_data))
+ return 0;
+
+ BIO_printf(bio, " Sequence Number: %llu\n",
+ (unsigned long long)frame_data.seq_num);
+ BIO_printf(bio, " Retire prior to: %llu\n",
+ (unsigned long long)frame_data.retire_prior_to);
+ BIO_puts(bio, " Connection id: ");
+ put_conn_id(bio, &frame_data.conn_id);
+ BIO_puts(bio, "\n Stateless Reset Token: ");
+ put_data(bio, frame_data.stateless_reset.token,
+ sizeof(frame_data.stateless_reset.token));
+ BIO_puts(bio, "\n");
+
+ return 1;
+}
+
+static int frame_retire_conn_id(BIO *bio, PACKET *pkt)
+{
+ uint64_t seq_num;
+
+ if (!ossl_quic_wire_decode_frame_retire_conn_id(pkt, &seq_num))
+ return 0;
+
+ BIO_printf(bio, " Sequence Number: %llu\n", (unsigned long long)seq_num);
+
+ return 1;
+}
+
+static int frame_path_challenge(BIO *bio, PACKET *pkt)
+{
+ uint64_t data = 0;
+
+ if (!ossl_quic_wire_decode_frame_path_challenge(pkt, &data))
+ return 0;
+
+ BIO_printf(bio, " Data: %016llx\n", (unsigned long long)data);
+
+ return 1;
+}
+
+static int frame_path_response(BIO *bio, PACKET *pkt)
+{
+ uint64_t data = 0;
+
+ if (!ossl_quic_wire_decode_frame_path_response(pkt, &data))
+ return 0;
+
+ BIO_printf(bio, " Data: %016llx\n", (unsigned long long)data);
+
+ return 1;
+}
+
+static int frame_conn_closed(BIO *bio, PACKET *pkt)
+{
+ OSSL_QUIC_FRAME_CONN_CLOSE frame_data;
+
+ if (!ossl_quic_wire_decode_frame_conn_close(pkt, &frame_data))
+ return 0;
+
+ BIO_printf(bio, " Error Code: %llu\n",
+ (unsigned long long)frame_data.error_code);
+ BIO_puts(bio, " Reason: ");
+ put_str(bio, frame_data.reason, frame_data.reason_len);
+ BIO_puts(bio, "\n");
+
+ return 1;
+}
+
+static int trace_frame_data(BIO *bio, PACKET *pkt)
+{
+ uint64_t frame_type;
+
+ if (!ossl_quic_wire_peek_frame_header(pkt, &frame_type, NULL))
+ return 0;
+
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_PING:
+ BIO_puts(bio, "Ping\n");
+ if (!ossl_quic_wire_decode_frame_ping(pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_PADDING:
+ BIO_puts(bio, "Padding\n");
+ ossl_quic_wire_decode_padding(pkt);
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITHOUT_ECN:
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN:
+ BIO_puts(bio, "Ack ");
+ if (frame_type == OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN)
+ BIO_puts(bio, " (with ECN)\n");
+ else
+ BIO_puts(bio, " (without ECN)\n");
+ if (!frame_ack(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_RESET_STREAM:
+ BIO_puts(bio, "Reset stream\n");
+ if (!frame_reset_stream(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STOP_SENDING:
+ BIO_puts(bio, "Stop sending\n");
+ if (!frame_stop_sending(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_CRYPTO:
+ BIO_puts(bio, "Crypto\n");
+ if (!frame_crypto(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
+ BIO_puts(bio, "New token\n");
+ if (!frame_new_token(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_LEN_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_FIN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN:
+ case OSSL_QUIC_FRAME_TYPE_STREAM_OFF_LEN_FIN:
+ /* frame_stream() prints the frame type string */
+ if (!frame_stream(bio, pkt, frame_type))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_MAX_DATA:
+ BIO_puts(bio, "Max data\n");
+ if (!frame_max_data(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA:
+ BIO_puts(bio, "Max stream data\n");
+ if (!frame_max_stream_data(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI:
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI:
+ BIO_puts(bio, "Max streams ");
+ if (frame_type == OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI)
+ BIO_puts(bio, " (Bidi)\n");
+ else
+ BIO_puts(bio, " (Uni)\n");
+ if (!frame_max_streams(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_DATA_BLOCKED:
+ BIO_puts(bio, "Data blocked\n");
+ if (!frame_data_blocked(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED:
+ BIO_puts(bio, "Stream data blocked\n");
+ if (!frame_stream_data_blocked(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_BIDI:
+ case OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_UNI:
+ BIO_puts(bio, "Streams blocked");
+ if (frame_type == OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_BIDI)
+ BIO_puts(bio, " (Bidi)\n");
+ else
+ BIO_puts(bio, " (Uni)\n");
+ if (!frame_streams_blocked(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
+ BIO_puts(bio, "New conn id\n");
+ if (!frame_new_conn_id(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
+ BIO_puts(bio, "Retire conn id\n");
+ if (!frame_retire_conn_id(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_PATH_CHALLENGE:
+ BIO_puts(bio, "Path challenge\n");
+ if (!frame_path_challenge(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE:
+ BIO_puts(bio, "Path response\n");
+ if (!frame_path_response(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_APP:
+ case OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_TRANSPORT:
+ BIO_puts(bio, "Connection close");
+ if (frame_type == OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_APP)
+ BIO_puts(bio, " (app)\n");
+ else
+ BIO_puts(bio, " (transport)\n");
+ if (!frame_conn_closed(bio, pkt))
+ return 0;
+ break;
+
+ case OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE:
+ BIO_puts(bio, "Handshake done\n");
+ if (!ossl_quic_wire_decode_frame_handshake_done(pkt))
+ return 0;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (PACKET_remaining(pkt) != 0)
+ BIO_puts(bio, " <unexpected trailing frame data skipped>\n");
+
+ return 1;
+}
+
+int ossl_quic_trace(int write_p, int version, int content_type,
+ const void *buf, size_t msglen, SSL *ssl, void *arg)
+{
+ BIO *bio = arg;
+ PACKET pkt;
+ size_t id_len = 0;
+ QUIC_CHANNEL *ch;
+
+ switch (content_type) {
+ case SSL3_RT_QUIC_DATAGRAM:
+ BIO_puts(bio, write_p ? "Sent" : "Received");
+ /*
+ * Unfortunately there is no way of receiving auxiliary information
+ * about the datagram through the msg_callback API such as the peer
+ * address
+ */
+ BIO_printf(bio, " Datagram\n Length: %zu\n", msglen);
+ break;
+
+ case SSL3_RT_QUIC_PACKET:
+ {
+ QUIC_PKT_HDR hdr;
+ size_t i;
+
+ if (!PACKET_buf_init(&pkt, buf, msglen))
+ return 0;
+ /* Decode the packet header */
+ ch = ossl_quic_conn_get_channel(ssl);
+ id_len = ossl_quic_channel_get_short_header_conn_id_len(ch);
+ if (ossl_quic_wire_decode_pkt_hdr(&pkt, id_len, 0, 1, &hdr, NULL,
+ NULL) != 1)
+ return 0;
+
+ BIO_puts(bio, write_p ? "Sent" : "Received");
+ BIO_puts(bio, " Packet\n");
+ BIO_printf(bio, " Packet Type: %s\n", packet_type(hdr.type));
+ if (hdr.type != QUIC_PKT_TYPE_1RTT)
+ BIO_printf(bio, " Version: 0x%08lx\n",
+ (unsigned long)hdr.version);
+ BIO_puts(bio, " Destination Conn Id: ");
+ put_conn_id(bio, &hdr.dst_conn_id);
+ BIO_puts(bio, "\n");
+ if (hdr.type != QUIC_PKT_TYPE_1RTT) {
+ BIO_puts(bio, " Source Conn Id: ");
+ put_conn_id(bio, &hdr.src_conn_id);
+ BIO_puts(bio, "\n");
+ }
+ BIO_printf(bio, " Payload length: %zu\n", hdr.len);
+ if (hdr.type == QUIC_PKT_TYPE_INITIAL) {
+ BIO_puts(bio, " Token: ");
+ put_token(bio, hdr.token, hdr.token_len);
+ BIO_puts(bio, "\n");
+ }
+ if (hdr.type != QUIC_PKT_TYPE_VERSION_NEG
+ && hdr.type != QUIC_PKT_TYPE_RETRY) {
+ BIO_puts(bio, " Packet Number: 0x");
+ /* Will always be at least 1 byte */
+ for (i = 0; i < hdr.pn_len; i++)
+ BIO_printf(bio, "%02x", hdr.pn[i]);
+ BIO_puts(bio, "\n");
+ }
+ break;
+ }
+
+ case SSL3_RT_QUIC_FRAME_PADDING:
+ case SSL3_RT_QUIC_FRAME_FULL:
+ case SSL3_RT_QUIC_FRAME_HEADER:
+ {
+ BIO_puts(bio, write_p ? "Sent" : "Received");
+ BIO_puts(bio, " Frame: ");
+
+ if (!PACKET_buf_init(&pkt, buf, msglen))
+ return 0;
+ if (!trace_frame_data(bio, &pkt)) {
+ BIO_puts(bio, " <error processing frame data>\n");
+ return 0;
+ }
+ }
+ break;
+
+ default:
+ /* Unrecognised content_type. We defer to SSL_trace */
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_tserver.c b/crypto/openssl/ssl/quic/quic_tserver.c
new file mode 100644
index 000000000000..f7106b2fef37
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_tserver.c
@@ -0,0 +1,584 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_tserver.h"
+#include "internal/quic_channel.h"
+#include "internal/quic_statm.h"
+#include "internal/quic_port.h"
+#include "internal/quic_engine.h"
+#include "internal/common.h"
+#include "internal/time.h"
+#include "quic_local.h"
+
+/*
+ * QUIC Test Server Module
+ * =======================
+ */
+struct quic_tserver_st {
+ QUIC_TSERVER_ARGS args;
+
+ /* Dummy SSL object for this QUIC connection for use by msg_callback */
+ SSL *ssl;
+
+ /*
+ * The QUIC engine, port and channel providing the core QUIC connection
+ * implementation.
+ */
+ QUIC_ENGINE *engine;
+ QUIC_PORT *port;
+ QUIC_CHANNEL *ch;
+
+ /* The mutex we give to the QUIC channel. */
+ CRYPTO_MUTEX *mutex;
+
+ /* SSL_CTX for creating the underlying TLS connection */
+ SSL_CTX *ctx;
+
+ /* SSL for the underlying TLS connection */
+ SSL *tls;
+
+ /* Are we connected to a peer? */
+ unsigned int connected : 1;
+};
+
+static int alpn_select_cb(SSL *ssl, const unsigned char **out,
+ unsigned char *outlen, const unsigned char *in,
+ unsigned int inlen, void *arg)
+{
+ QUIC_TSERVER *srv = arg;
+ static const unsigned char alpndeflt[] = {
+ 8, 'o', 's', 's', 'l', 't', 'e', 's', 't'
+ };
+ const unsigned char *alpn;
+ size_t alpnlen;
+
+ if (srv->args.alpn == NULL) {
+ alpn = alpndeflt;
+ alpnlen = sizeof(alpndeflt);
+ } else {
+ alpn = srv->args.alpn;
+ alpnlen = srv->args.alpnlen;
+ }
+
+ if (SSL_select_next_proto((unsigned char **)out, outlen, alpn, alpnlen,
+ in, inlen) != OPENSSL_NPN_NEGOTIATED)
+ return SSL_TLSEXT_ERR_ALERT_FATAL;
+
+ return SSL_TLSEXT_ERR_OK;
+}
+
+QUIC_TSERVER *ossl_quic_tserver_new(const QUIC_TSERVER_ARGS *args,
+ const char *certfile, const char *keyfile)
+{
+ QUIC_TSERVER *srv = NULL;
+ QUIC_ENGINE_ARGS engine_args = {0};
+ QUIC_PORT_ARGS port_args = {0};
+ QUIC_CONNECTION *qc = NULL;
+
+ if (args->net_rbio == NULL || args->net_wbio == NULL)
+ goto err;
+
+ if ((srv = OPENSSL_zalloc(sizeof(*srv))) == NULL)
+ goto err;
+
+ srv->args = *args;
+
+#if defined(OPENSSL_THREADS)
+ if ((srv->mutex = ossl_crypto_mutex_new()) == NULL)
+ goto err;
+#endif
+
+ if (args->ctx != NULL)
+ srv->ctx = args->ctx;
+ else
+ srv->ctx = SSL_CTX_new_ex(srv->args.libctx, srv->args.propq,
+ TLS_method());
+ if (srv->ctx == NULL)
+ goto err;
+
+ if (certfile != NULL
+ && SSL_CTX_use_certificate_file(srv->ctx, certfile, SSL_FILETYPE_PEM) <= 0)
+ goto err;
+
+ if (keyfile != NULL
+ && SSL_CTX_use_PrivateKey_file(srv->ctx, keyfile, SSL_FILETYPE_PEM) <= 0)
+ goto err;
+
+ SSL_CTX_set_alpn_select_cb(srv->ctx, alpn_select_cb, srv);
+
+ srv->tls = SSL_new(srv->ctx);
+ if (srv->tls == NULL)
+ goto err;
+
+ engine_args.libctx = srv->args.libctx;
+ engine_args.propq = srv->args.propq;
+ engine_args.mutex = srv->mutex;
+
+ if ((srv->engine = ossl_quic_engine_new(&engine_args)) == NULL)
+ goto err;
+
+ ossl_quic_engine_set_time_cb(srv->engine, srv->args.now_cb,
+ srv->args.now_cb_arg);
+
+ port_args.channel_ctx = srv->ctx;
+ port_args.is_multi_conn = 1;
+ port_args.do_addr_validation = 1;
+ if ((srv->port = ossl_quic_engine_create_port(srv->engine, &port_args)) == NULL)
+ goto err;
+
+ if ((srv->ch = ossl_quic_port_create_incoming(srv->port, srv->tls)) == NULL)
+ goto err;
+
+ if (!ossl_quic_port_set_net_rbio(srv->port, srv->args.net_rbio)
+ || !ossl_quic_port_set_net_wbio(srv->port, srv->args.net_wbio))
+ goto err;
+
+ qc = OPENSSL_zalloc(sizeof(*qc));
+ if (qc == NULL)
+ goto err;
+ srv->ssl = (SSL *)qc;
+ qc->ch = srv->ch;
+ srv->ssl->type = SSL_TYPE_QUIC_CONNECTION;
+
+ return srv;
+
+err:
+ if (srv != NULL) {
+ if (args->ctx == NULL)
+ SSL_CTX_free(srv->ctx);
+ SSL_free(srv->tls);
+ ossl_quic_channel_free(srv->ch);
+ ossl_quic_port_free(srv->port);
+ ossl_quic_engine_free(srv->engine);
+#if defined(OPENSSL_THREADS)
+ ossl_crypto_mutex_free(&srv->mutex);
+#endif
+ OPENSSL_free(qc);
+ }
+
+ OPENSSL_free(srv);
+ return NULL;
+}
+
+void ossl_quic_tserver_free(QUIC_TSERVER *srv)
+{
+ if (srv == NULL)
+ return;
+
+ SSL_free(srv->tls);
+ ossl_quic_channel_free(srv->ch);
+ ossl_quic_port_free(srv->port);
+ ossl_quic_engine_free(srv->engine);
+ BIO_free_all(srv->args.net_rbio);
+ BIO_free_all(srv->args.net_wbio);
+ OPENSSL_free(srv->ssl);
+ SSL_CTX_free(srv->ctx);
+#if defined(OPENSSL_THREADS)
+ ossl_crypto_mutex_free(&srv->mutex);
+#endif
+ OPENSSL_free(srv);
+}
+
+/* Set mutator callbacks for test framework support */
+int ossl_quic_tserver_set_plain_packet_mutator(QUIC_TSERVER *srv,
+ ossl_mutate_packet_cb mutatecb,
+ ossl_finish_mutate_cb finishmutatecb,
+ void *mutatearg)
+{
+ return ossl_quic_channel_set_mutator(srv->ch, mutatecb, finishmutatecb,
+ mutatearg);
+}
+
+int ossl_quic_tserver_set_handshake_mutator(QUIC_TSERVER *srv,
+ ossl_statem_mutate_handshake_cb mutate_handshake_cb,
+ ossl_statem_finish_mutate_handshake_cb finish_mutate_handshake_cb,
+ void *mutatearg)
+{
+ return ossl_statem_set_mutator(ossl_quic_channel_get0_ssl(srv->ch),
+ mutate_handshake_cb,
+ finish_mutate_handshake_cb,
+ mutatearg);
+}
+
+int ossl_quic_tserver_tick(QUIC_TSERVER *srv)
+{
+ ossl_quic_reactor_tick(ossl_quic_channel_get_reactor(srv->ch), 0);
+
+ if (ossl_quic_channel_is_active(srv->ch))
+ srv->connected = 1;
+
+ return 1;
+}
+
+int ossl_quic_tserver_is_connected(QUIC_TSERVER *srv)
+{
+ return ossl_quic_channel_is_active(srv->ch);
+}
+
+/* Returns 1 if the server is in any terminating or terminated state */
+int ossl_quic_tserver_is_term_any(const QUIC_TSERVER *srv)
+{
+ return ossl_quic_channel_is_term_any(srv->ch);
+}
+
+const QUIC_TERMINATE_CAUSE *
+ossl_quic_tserver_get_terminate_cause(const QUIC_TSERVER *srv)
+{
+ return ossl_quic_channel_get_terminate_cause(srv->ch);
+}
+
+/* Returns 1 if the server is in a terminated state */
+int ossl_quic_tserver_is_terminated(const QUIC_TSERVER *srv)
+{
+ return ossl_quic_channel_is_terminated(srv->ch);
+}
+
+size_t ossl_quic_tserver_get_short_header_conn_id_len(const QUIC_TSERVER *srv)
+{
+ return ossl_quic_channel_get_short_header_conn_id_len(srv->ch);
+}
+
+int ossl_quic_tserver_is_handshake_confirmed(const QUIC_TSERVER *srv)
+{
+ return ossl_quic_channel_is_handshake_confirmed(srv->ch);
+}
+
+int ossl_quic_tserver_read(QUIC_TSERVER *srv,
+ uint64_t stream_id,
+ unsigned char *buf,
+ size_t buf_len,
+ size_t *bytes_read)
+{
+ int is_fin = 0;
+ QUIC_STREAM *qs;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(srv->ch),
+ stream_id);
+ if (qs == NULL) {
+ int is_client_init
+ = ((stream_id & QUIC_STREAM_INITIATOR_MASK)
+ == QUIC_STREAM_INITIATOR_CLIENT);
+
+ /*
+ * A client-initiated stream might spontaneously come into existence, so
+ * allow trying to read on a client-initiated stream before it exists,
+ * assuming the connection is still active.
+ * Otherwise, fail.
+ */
+ if (!is_client_init || !ossl_quic_channel_is_active(srv->ch))
+ return 0;
+
+ *bytes_read = 0;
+ return 1;
+ }
+
+ if (qs->recv_state == QUIC_RSTREAM_STATE_DATA_READ
+ || !ossl_quic_stream_has_recv_buffer(qs))
+ return 0;
+
+ if (!ossl_quic_rstream_read(qs->rstream, buf, buf_len,
+ bytes_read, &is_fin))
+ return 0;
+
+ if (*bytes_read > 0) {
+ /*
+ * We have read at least one byte from the stream. Inform stream-level
+ * RXFC of the retirement of controlled bytes. Update the active stream
+ * status (the RXFC may now want to emit a frame granting more credit to
+ * the peer).
+ */
+ OSSL_RTT_INFO rtt_info;
+
+ ossl_statm_get_rtt_info(ossl_quic_channel_get_statm(srv->ch), &rtt_info);
+
+ if (!ossl_quic_rxfc_on_retire(&qs->rxfc, *bytes_read,
+ rtt_info.smoothed_rtt))
+ return 0;
+ }
+
+ if (is_fin)
+ ossl_quic_stream_map_notify_totally_read(ossl_quic_channel_get_qsm(srv->ch),
+ qs);
+
+ if (*bytes_read > 0)
+ ossl_quic_stream_map_update_state(ossl_quic_channel_get_qsm(srv->ch), qs);
+
+ return 1;
+}
+
+int ossl_quic_tserver_has_read_ended(QUIC_TSERVER *srv, uint64_t stream_id)
+{
+ QUIC_STREAM *qs;
+ unsigned char buf[1];
+ size_t bytes_read = 0;
+ int is_fin = 0;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(srv->ch),
+ stream_id);
+
+ if (qs == NULL)
+ return 0;
+
+ if (qs->recv_state == QUIC_RSTREAM_STATE_DATA_READ)
+ return 1;
+
+ if (!ossl_quic_stream_has_recv_buffer(qs))
+ return 0;
+
+ /*
+ * If we do not have the DATA_READ, it is possible we should still return 1
+ * if there is a lone FIN (but no more data) remaining to be retired from
+ * the RSTREAM, for example because ossl_quic_tserver_read() has not been
+ * called since the FIN was received.
+ */
+ if (!ossl_quic_rstream_peek(qs->rstream, buf, sizeof(buf),
+ &bytes_read, &is_fin))
+ return 0;
+
+ if (is_fin && bytes_read == 0) {
+ /* If we have a FIN awaiting retirement and no data before it... */
+ /* Let RSTREAM know we've consumed this FIN. */
+ if (!ossl_quic_rstream_read(qs->rstream, buf, sizeof(buf),
+ &bytes_read, &is_fin))
+ return 0;
+
+ assert(is_fin && bytes_read == 0);
+ assert(qs->recv_state == QUIC_RSTREAM_STATE_DATA_RECVD);
+
+ ossl_quic_stream_map_notify_totally_read(ossl_quic_channel_get_qsm(srv->ch),
+ qs);
+ ossl_quic_stream_map_update_state(ossl_quic_channel_get_qsm(srv->ch), qs);
+ return 1;
+ }
+
+ return 0;
+}
+
+int ossl_quic_tserver_write(QUIC_TSERVER *srv,
+ uint64_t stream_id,
+ const unsigned char *buf,
+ size_t buf_len,
+ size_t *bytes_written)
+{
+ QUIC_STREAM *qs;
+
+ if (!ossl_quic_channel_is_active(srv->ch))
+ return 0;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(srv->ch),
+ stream_id);
+ if (qs == NULL || !ossl_quic_stream_has_send_buffer(qs))
+ return 0;
+
+ if (!ossl_quic_sstream_append(qs->sstream,
+ buf, buf_len, bytes_written))
+ return 0;
+
+ if (*bytes_written > 0)
+ /*
+ * We have appended at least one byte to the stream. Potentially mark
+ * the stream as active, depending on FC.
+ */
+ ossl_quic_stream_map_update_state(ossl_quic_channel_get_qsm(srv->ch), qs);
+
+ /* Try and send. */
+ ossl_quic_tserver_tick(srv);
+ return 1;
+}
+
+int ossl_quic_tserver_conclude(QUIC_TSERVER *srv, uint64_t stream_id)
+{
+ QUIC_STREAM *qs;
+
+ if (!ossl_quic_channel_is_active(srv->ch))
+ return 0;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(srv->ch),
+ stream_id);
+ if (qs == NULL || !ossl_quic_stream_has_send_buffer(qs))
+ return 0;
+
+ if (!ossl_quic_sstream_get_final_size(qs->sstream, NULL)) {
+ ossl_quic_sstream_fin(qs->sstream);
+ ossl_quic_stream_map_update_state(ossl_quic_channel_get_qsm(srv->ch), qs);
+ }
+
+ ossl_quic_tserver_tick(srv);
+ return 1;
+}
+
+int ossl_quic_tserver_stream_new(QUIC_TSERVER *srv,
+ int is_uni,
+ uint64_t *stream_id)
+{
+ QUIC_STREAM *qs;
+
+ if (!ossl_quic_channel_is_active(srv->ch))
+ return 0;
+
+ if ((qs = ossl_quic_channel_new_stream_local(srv->ch, is_uni)) == NULL)
+ return 0;
+
+ *stream_id = qs->id;
+ return 1;
+}
+
+BIO *ossl_quic_tserver_get0_rbio(QUIC_TSERVER *srv)
+{
+ return srv->args.net_rbio;
+}
+
+SSL_CTX *ossl_quic_tserver_get0_ssl_ctx(QUIC_TSERVER *srv)
+{
+ return srv->ctx;
+}
+
+int ossl_quic_tserver_stream_has_peer_stop_sending(QUIC_TSERVER *srv,
+ uint64_t stream_id,
+ uint64_t *app_error_code)
+{
+ QUIC_STREAM *qs;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(srv->ch),
+ stream_id);
+ if (qs == NULL)
+ return 0;
+
+ if (qs->peer_stop_sending && app_error_code != NULL)
+ *app_error_code = qs->peer_stop_sending_aec;
+
+ return qs->peer_stop_sending;
+}
+
+int ossl_quic_tserver_stream_has_peer_reset_stream(QUIC_TSERVER *srv,
+ uint64_t stream_id,
+ uint64_t *app_error_code)
+{
+ QUIC_STREAM *qs;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(srv->ch),
+ stream_id);
+ if (qs == NULL)
+ return 0;
+
+ if (ossl_quic_stream_recv_is_reset(qs) && app_error_code != NULL)
+ *app_error_code = qs->peer_reset_stream_aec;
+
+ return ossl_quic_stream_recv_is_reset(qs);
+}
+
+int ossl_quic_tserver_set_new_local_cid(QUIC_TSERVER *srv,
+ const QUIC_CONN_ID *conn_id)
+{
+ /* Replace existing local connection ID in the QUIC_CHANNEL */
+ return ossl_quic_channel_replace_local_cid(srv->ch, conn_id);
+}
+
+uint64_t ossl_quic_tserver_pop_incoming_stream(QUIC_TSERVER *srv)
+{
+ QUIC_STREAM_MAP *qsm = ossl_quic_channel_get_qsm(srv->ch);
+ QUIC_STREAM *qs = ossl_quic_stream_map_peek_accept_queue(qsm);
+
+ if (qs == NULL)
+ return UINT64_MAX;
+
+ ossl_quic_stream_map_remove_from_accept_queue(qsm, qs, ossl_time_zero());
+
+ return qs->id;
+}
+
+int ossl_quic_tserver_is_stream_totally_acked(QUIC_TSERVER *srv,
+ uint64_t stream_id)
+{
+ QUIC_STREAM *qs;
+
+ qs = ossl_quic_stream_map_get_by_id(ossl_quic_channel_get_qsm(srv->ch),
+ stream_id);
+ if (qs == NULL)
+ return 1;
+
+ return ossl_quic_sstream_is_totally_acked(qs->sstream);
+}
+
+int ossl_quic_tserver_get_net_read_desired(QUIC_TSERVER *srv)
+{
+ return ossl_quic_reactor_net_read_desired(
+ ossl_quic_channel_get_reactor(srv->ch));
+}
+
+int ossl_quic_tserver_get_net_write_desired(QUIC_TSERVER *srv)
+{
+ return ossl_quic_reactor_net_write_desired(
+ ossl_quic_channel_get_reactor(srv->ch));
+}
+
+OSSL_TIME ossl_quic_tserver_get_deadline(QUIC_TSERVER *srv)
+{
+ return ossl_quic_reactor_get_tick_deadline(
+ ossl_quic_channel_get_reactor(srv->ch));
+}
+
+int ossl_quic_tserver_shutdown(QUIC_TSERVER *srv, uint64_t app_error_code)
+{
+ ossl_quic_channel_local_close(srv->ch, app_error_code, NULL);
+
+ if (ossl_quic_channel_is_terminated(srv->ch))
+ return 1;
+
+ ossl_quic_reactor_tick(ossl_quic_channel_get_reactor(srv->ch), 0);
+
+ return ossl_quic_channel_is_terminated(srv->ch);
+}
+
+int ossl_quic_tserver_ping(QUIC_TSERVER *srv)
+{
+ if (ossl_quic_channel_is_terminated(srv->ch))
+ return 0;
+
+ if (!ossl_quic_channel_ping(srv->ch))
+ return 0;
+
+ ossl_quic_reactor_tick(ossl_quic_channel_get_reactor(srv->ch), 0);
+ return 1;
+}
+
+QUIC_CHANNEL *ossl_quic_tserver_get_channel(QUIC_TSERVER *srv)
+{
+ return srv->ch;
+}
+
+void ossl_quic_tserver_set_msg_callback(QUIC_TSERVER *srv,
+ void (*f)(int write_p, int version,
+ int content_type,
+ const void *buf, size_t len,
+ SSL *ssl, void *arg),
+ void *arg)
+{
+ ossl_quic_channel_set_msg_callback(srv->ch, f, srv->ssl);
+ ossl_quic_channel_set_msg_callback_arg(srv->ch, arg);
+ SSL_set_msg_callback(srv->tls, f);
+ SSL_set_msg_callback_arg(srv->tls, arg);
+}
+
+int ossl_quic_tserver_new_ticket(QUIC_TSERVER *srv)
+{
+ return SSL_new_session_ticket(srv->tls);
+}
+
+int ossl_quic_tserver_set_max_early_data(QUIC_TSERVER *srv,
+ uint32_t max_early_data)
+{
+ return SSL_set_max_early_data(srv->tls, max_early_data);
+}
+
+void ossl_quic_tserver_set_psk_find_session_cb(QUIC_TSERVER *srv,
+ SSL_psk_find_session_cb_func cb)
+{
+ SSL_set_psk_find_session_callback(srv->tls, cb);
+}
diff --git a/crypto/openssl/ssl/quic/quic_txp.c b/crypto/openssl/ssl/quic/quic_txp.c
new file mode 100644
index 000000000000..10d842bce0bf
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_txp.c
@@ -0,0 +1,3256 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_txp.h"
+#include "internal/quic_fifd.h"
+#include "internal/quic_stream_map.h"
+#include "internal/quic_error.h"
+#include "internal/common.h"
+#include <openssl/err.h>
+
+#define MIN_CRYPTO_HDR_SIZE 3
+
+#define MIN_FRAME_SIZE_HANDSHAKE_DONE 1
+#define MIN_FRAME_SIZE_MAX_DATA 2
+#define MIN_FRAME_SIZE_ACK 5
+#define MIN_FRAME_SIZE_CRYPTO (MIN_CRYPTO_HDR_SIZE + 1)
+#define MIN_FRAME_SIZE_STREAM 3 /* minimum useful size (for non-FIN) */
+#define MIN_FRAME_SIZE_MAX_STREAMS_BIDI 2
+#define MIN_FRAME_SIZE_MAX_STREAMS_UNI 2
+
+/*
+ * Packet Archetypes
+ * =================
+ */
+
+/* Generate normal packets containing most frame types, subject to EL. */
+#define TX_PACKETISER_ARCHETYPE_NORMAL 0
+
+/*
+ * A probe packet is different in that:
+ * - It bypasses CC, but *is* counted as in flight for purposes of CC;
+ * - It must be ACK-eliciting.
+ */
+#define TX_PACKETISER_ARCHETYPE_PROBE 1
+
+/*
+ * An ACK-only packet is different in that:
+ * - It bypasses CC, and is considered a 'non-inflight' packet;
+ * - It may not contain anything other than an ACK frame, not even padding.
+ */
+#define TX_PACKETISER_ARCHETYPE_ACK_ONLY 2
+
+#define TX_PACKETISER_ARCHETYPE_NUM 3
+
+struct ossl_quic_tx_packetiser_st {
+ OSSL_QUIC_TX_PACKETISER_ARGS args;
+
+ /*
+ * Opaque initial token blob provided by caller. TXP frees using the
+ * callback when it is no longer needed.
+ */
+ const unsigned char *initial_token;
+ size_t initial_token_len;
+ ossl_quic_initial_token_free_fn *initial_token_free_cb;
+ void *initial_token_free_cb_arg;
+
+ /* Subcomponents of the TXP that we own. */
+ QUIC_FIFD fifd; /* QUIC Frame-in-Flight Dispatcher */
+
+ /* Internal state. */
+ uint64_t next_pn[QUIC_PN_SPACE_NUM]; /* Next PN to use in given PN space. */
+ OSSL_TIME last_tx_time; /* Last time a packet was generated, or 0. */
+
+ size_t unvalidated_credit; /* Limit of data we can send until validated */
+
+ /* Internal state - frame (re)generation flags. */
+ unsigned int want_handshake_done : 1;
+ unsigned int want_max_data : 1;
+ unsigned int want_max_streams_bidi : 1;
+ unsigned int want_max_streams_uni : 1;
+
+ /* Internal state - frame (re)generation flags - per PN space. */
+ unsigned int want_ack : QUIC_PN_SPACE_NUM;
+ unsigned int force_ack_eliciting : QUIC_PN_SPACE_NUM;
+
+ /*
+ * Internal state - connection close terminal state.
+ * Once this is set, it is not unset unlike other want_ flags - we keep
+ * sending it in every packet.
+ */
+ unsigned int want_conn_close : 1;
+
+ /* Has the handshake been completed? */
+ unsigned int handshake_complete : 1;
+
+ OSSL_QUIC_FRAME_CONN_CLOSE conn_close_frame;
+
+ /*
+ * Counts of the number of bytes received and sent while in the closing
+ * state.
+ */
+ uint64_t closing_bytes_recv;
+ uint64_t closing_bytes_xmit;
+
+ /* Internal state - packet assembly. */
+ struct txp_el {
+ unsigned char *scratch; /* scratch buffer for packet assembly */
+ size_t scratch_len; /* number of bytes allocated for scratch */
+ OSSL_QTX_IOVEC *iovec; /* scratch iovec array for use with QTX */
+ size_t alloc_iovec; /* size of iovec array */
+ } el[QUIC_ENC_LEVEL_NUM];
+
+ /* Message callback related arguments */
+ ossl_msg_cb msg_callback;
+ void *msg_callback_arg;
+ SSL *msg_callback_ssl;
+
+ /* Callbacks. */
+ void (*ack_tx_cb)(const OSSL_QUIC_FRAME_ACK *ack,
+ uint32_t pn_space,
+ void *arg);
+ void *ack_tx_cb_arg;
+};
+
+/*
+ * The TX helper records state used while generating frames into packets. It
+ * enables serialization into the packet to be done "transactionally" where
+ * serialization of a frame can be rolled back if it fails midway (e.g. if it
+ * does not fit).
+ */
+struct tx_helper {
+ OSSL_QUIC_TX_PACKETISER *txp;
+ /*
+ * The Maximum Packet Payload Length in bytes. This is the amount of
+ * space we have to generate frames into.
+ */
+ size_t max_ppl;
+ /*
+ * Number of bytes we have generated so far.
+ */
+ size_t bytes_appended;
+ /*
+ * Number of scratch bytes in txp->scratch we have used so far. Some iovecs
+ * will reference this scratch buffer. When we need to use more of it (e.g.
+ * when we need to put frame headers somewhere), we append to the scratch
+ * buffer, resizing if necessary, and increase this accordingly.
+ */
+ size_t scratch_bytes;
+ /*
+ * Bytes reserved in the MaxPPL budget. We keep this number of bytes spare
+ * until reserve_allowed is set to 1. Currently this is always at most 1, as
+ * a PING frame takes up one byte and this mechanism is only used to ensure
+ * we can encode a PING frame if we have been asked to ensure a packet is
+ * ACK-eliciting and we are unusure if we are going to add any other
+ * ACK-eliciting frames before we reach our MaxPPL budget.
+ */
+ size_t reserve;
+ /*
+ * Number of iovecs we have currently appended. This is the number of
+ * entries valid in txp->iovec.
+ */
+ size_t num_iovec;
+ /* The EL this TX helper is being used for. */
+ uint32_t enc_level;
+ /*
+ * Whether we are allowed to make use of the reserve bytes in our MaxPPL
+ * budget. This is used to ensure we have room to append a PING frame later
+ * if we need to. Once we know we will not need to append a PING frame, this
+ * is set to 1.
+ */
+ unsigned int reserve_allowed : 1;
+ /*
+ * Set to 1 if we have appended a STREAM frame with an implicit length. If
+ * this happens we should never append another frame after that frame as it
+ * cannot be validly encoded. This is just a safety check.
+ */
+ unsigned int done_implicit : 1;
+ struct {
+ /*
+ * The fields in this structure are valid if active is set, which means
+ * that a serialization transaction is currently in progress.
+ */
+ unsigned char *data;
+ WPACKET wpkt;
+ unsigned int active : 1;
+ } txn;
+};
+
+static void tx_helper_rollback(struct tx_helper *h);
+static int txp_el_ensure_iovec(struct txp_el *el, size_t num);
+
+/* Initialises the TX helper. */
+static int tx_helper_init(struct tx_helper *h, OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t enc_level, size_t max_ppl, size_t reserve)
+{
+ if (reserve > max_ppl)
+ return 0;
+
+ h->txp = txp;
+ h->enc_level = enc_level;
+ h->max_ppl = max_ppl;
+ h->reserve = reserve;
+ h->num_iovec = 0;
+ h->bytes_appended = 0;
+ h->scratch_bytes = 0;
+ h->reserve_allowed = 0;
+ h->done_implicit = 0;
+ h->txn.data = NULL;
+ h->txn.active = 0;
+
+ if (max_ppl > h->txp->el[enc_level].scratch_len) {
+ unsigned char *scratch;
+
+ scratch = OPENSSL_realloc(h->txp->el[enc_level].scratch, max_ppl);
+ if (scratch == NULL)
+ return 0;
+
+ h->txp->el[enc_level].scratch = scratch;
+ h->txp->el[enc_level].scratch_len = max_ppl;
+ }
+
+ return 1;
+}
+
+static void tx_helper_cleanup(struct tx_helper *h)
+{
+ if (h->txn.active)
+ tx_helper_rollback(h);
+
+ h->txp = NULL;
+}
+
+static void tx_helper_unrestrict(struct tx_helper *h)
+{
+ h->reserve_allowed = 1;
+}
+
+/*
+ * Append an extent of memory to the iovec list. The memory must remain
+ * allocated until we finish generating the packet and call the QTX.
+ *
+ * In general, the buffers passed to this function will be from one of two
+ * ranges:
+ *
+ * - Application data contained in stream buffers managed elsewhere
+ * in the QUIC stack; or
+ *
+ * - Control frame data appended into txp->scratch using tx_helper_begin and
+ * tx_helper_commit.
+ *
+ */
+static int tx_helper_append_iovec(struct tx_helper *h,
+ const unsigned char *buf,
+ size_t buf_len)
+{
+ struct txp_el *el = &h->txp->el[h->enc_level];
+
+ if (buf_len == 0)
+ return 1;
+
+ if (!ossl_assert(!h->done_implicit))
+ return 0;
+
+ if (!txp_el_ensure_iovec(el, h->num_iovec + 1))
+ return 0;
+
+ el->iovec[h->num_iovec].buf = buf;
+ el->iovec[h->num_iovec].buf_len = buf_len;
+
+ ++h->num_iovec;
+ h->bytes_appended += buf_len;
+ return 1;
+}
+
+/*
+ * How many more bytes of space do we have left in our plaintext packet payload?
+ */
+static size_t tx_helper_get_space_left(struct tx_helper *h)
+{
+ return h->max_ppl
+ - (h->reserve_allowed ? 0 : h->reserve) - h->bytes_appended;
+}
+
+/*
+ * Begin a control frame serialization transaction. This allows the
+ * serialization of the control frame to be backed out if it turns out it won't
+ * fit. Write the control frame to the returned WPACKET. Ensure you always
+ * call tx_helper_rollback or tx_helper_commit (or tx_helper_cleanup). Returns
+ * NULL on failure.
+ */
+static WPACKET *tx_helper_begin(struct tx_helper *h)
+{
+ size_t space_left, len;
+ unsigned char *data;
+ struct txp_el *el = &h->txp->el[h->enc_level];
+
+ if (!ossl_assert(!h->txn.active))
+ return NULL;
+
+ if (!ossl_assert(!h->done_implicit))
+ return NULL;
+
+ data = (unsigned char *)el->scratch + h->scratch_bytes;
+ len = el->scratch_len - h->scratch_bytes;
+
+ space_left = tx_helper_get_space_left(h);
+ if (!ossl_assert(space_left <= len))
+ return NULL;
+
+ if (!WPACKET_init_static_len(&h->txn.wpkt, data, len, 0))
+ return NULL;
+
+ if (!WPACKET_set_max_size(&h->txn.wpkt, space_left)) {
+ WPACKET_cleanup(&h->txn.wpkt);
+ return NULL;
+ }
+
+ h->txn.data = data;
+ h->txn.active = 1;
+ return &h->txn.wpkt;
+}
+
+static void tx_helper_end(struct tx_helper *h, int success)
+{
+ if (success)
+ WPACKET_finish(&h->txn.wpkt);
+ else
+ WPACKET_cleanup(&h->txn.wpkt);
+
+ h->txn.active = 0;
+ h->txn.data = NULL;
+}
+
+/* Abort a control frame serialization transaction. */
+static void tx_helper_rollback(struct tx_helper *h)
+{
+ if (!h->txn.active)
+ return;
+
+ tx_helper_end(h, 0);
+}
+
+/* Commit a control frame. */
+static int tx_helper_commit(struct tx_helper *h)
+{
+ size_t l = 0;
+
+ if (!h->txn.active)
+ return 0;
+
+ if (!WPACKET_get_total_written(&h->txn.wpkt, &l)) {
+ tx_helper_end(h, 0);
+ return 0;
+ }
+
+ if (!tx_helper_append_iovec(h, h->txn.data, l)) {
+ tx_helper_end(h, 0);
+ return 0;
+ }
+
+ if (h->txp->msg_callback != NULL && l > 0) {
+ uint64_t ftype;
+ int ctype = SSL3_RT_QUIC_FRAME_FULL;
+ PACKET pkt;
+
+ if (!PACKET_buf_init(&pkt, h->txn.data, l)
+ || !ossl_quic_wire_peek_frame_header(&pkt, &ftype, NULL)) {
+ tx_helper_end(h, 0);
+ return 0;
+ }
+
+ if (ftype == OSSL_QUIC_FRAME_TYPE_PADDING)
+ ctype = SSL3_RT_QUIC_FRAME_PADDING;
+ else if (OSSL_QUIC_FRAME_TYPE_IS_STREAM(ftype)
+ || ftype == OSSL_QUIC_FRAME_TYPE_CRYPTO)
+ ctype = SSL3_RT_QUIC_FRAME_HEADER;
+
+ h->txp->msg_callback(1, OSSL_QUIC1_VERSION, ctype, h->txn.data, l,
+ h->txp->msg_callback_ssl,
+ h->txp->msg_callback_arg);
+ }
+
+ h->scratch_bytes += l;
+ tx_helper_end(h, 1);
+ return 1;
+}
+
+struct archetype_data {
+ unsigned int allow_ack : 1;
+ unsigned int allow_ping : 1;
+ unsigned int allow_crypto : 1;
+ unsigned int allow_handshake_done : 1;
+ unsigned int allow_path_challenge : 1;
+ unsigned int allow_path_response : 1;
+ unsigned int allow_new_conn_id : 1;
+ unsigned int allow_retire_conn_id : 1;
+ unsigned int allow_stream_rel : 1;
+ unsigned int allow_conn_fc : 1;
+ unsigned int allow_conn_close : 1;
+ unsigned int allow_cfq_other : 1;
+ unsigned int allow_new_token : 1;
+ unsigned int allow_force_ack_eliciting : 1;
+ unsigned int allow_padding : 1;
+ unsigned int require_ack_eliciting : 1;
+ unsigned int bypass_cc : 1;
+};
+
+struct txp_pkt_geom {
+ size_t cmpl, cmppl, hwm, pkt_overhead;
+ uint32_t archetype;
+ struct archetype_data adata;
+};
+
+struct txp_pkt {
+ struct tx_helper h;
+ int h_valid;
+ QUIC_TXPIM_PKT *tpkt;
+ QUIC_STREAM *stream_head;
+ QUIC_PKT_HDR phdr;
+ struct txp_pkt_geom geom;
+ int force_pad;
+};
+
+static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space,
+ void *arg);
+static void on_regen_notify(uint64_t frame_type, uint64_t stream_id,
+ QUIC_TXPIM_PKT *pkt, void *arg);
+static void on_confirm_notify(uint64_t frame_type, uint64_t stream_id,
+ QUIC_TXPIM_PKT *pkt, void *arg);
+static void on_sstream_updated(uint64_t stream_id, void *arg);
+static int sstream_is_pending(QUIC_SSTREAM *sstream);
+static int txp_should_try_staging(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t enc_level,
+ uint32_t archetype,
+ uint64_t cc_limit,
+ uint32_t *conn_close_enc_level);
+static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp);
+static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp,
+ size_t pl,
+ uint32_t enc_level,
+ size_t hdr_len,
+ size_t *r);
+static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp);
+static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp,
+ struct txp_pkt *pkt,
+ int chosen_for_conn_close);
+static int txp_pkt_init(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t enc_level, uint32_t archetype,
+ size_t running_total);
+static void txp_pkt_cleanup(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp);
+static int txp_pkt_postgen_update_pkt_overhead(struct txp_pkt *pkt,
+ OSSL_QUIC_TX_PACKETISER *txp);
+static int txp_pkt_append_padding(struct txp_pkt *pkt,
+ OSSL_QUIC_TX_PACKETISER *txp, size_t num_bytes);
+static int txp_pkt_commit(OSSL_QUIC_TX_PACKETISER *txp, struct txp_pkt *pkt,
+ uint32_t archetype, int *txpim_pkt_reffed);
+static uint32_t txp_determine_archetype(OSSL_QUIC_TX_PACKETISER *txp,
+ uint64_t cc_limit);
+
+/**
+ * Sets the validated state of a QUIC TX packetiser.
+ *
+ * This function marks the provided QUIC TX packetiser as having its credit
+ * fully validated by setting its `unvalidated_credit` field to `SIZE_MAX`.
+ *
+ * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to update.
+ */
+void ossl_quic_tx_packetiser_set_validated(OSSL_QUIC_TX_PACKETISER *txp)
+{
+ txp->unvalidated_credit = SIZE_MAX;
+ return;
+}
+
+/**
+ * Adds unvalidated credit to a QUIC TX packetiser.
+ *
+ * This function increases the unvalidated credit of the provided QUIC TX
+ * packetiser. If the current unvalidated credit is not `SIZE_MAX`, the
+ * function adds three times the specified `credit` value, ensuring it does
+ * not exceed the maximum allowable value (`SIZE_MAX - 1`). If the addition
+ * would cause an overflow, the unvalidated credit is capped at
+ * `SIZE_MAX - 1`. If the current unvalidated credit is already `SIZE_MAX`,
+ * the function does nothing.
+ *
+ * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to update.
+ * @param credit The amount of credit to add, multiplied by 3.
+ */
+void ossl_quic_tx_packetiser_add_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp,
+ size_t credit)
+{
+ if (txp->unvalidated_credit != SIZE_MAX) {
+ if ((SIZE_MAX - txp->unvalidated_credit) > (credit * 3))
+ txp->unvalidated_credit += credit * 3;
+ else
+ txp->unvalidated_credit = SIZE_MAX - 1;
+ }
+
+ return;
+}
+
+/**
+ * Consumes unvalidated credit from a QUIC TX packetiser.
+ *
+ * This function decreases the unvalidated credit of the specified
+ * QUIC TX packetiser by the given `credit` value. If the unvalidated credit
+ * is set to `SIZE_MAX`, the function does nothing, as `SIZE_MAX` represents
+ * an unlimited credit state.
+ *
+ * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to update.
+ * @param credit The amount of credit to consume.
+ */
+void ossl_quic_tx_packetiser_consume_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp,
+ size_t credit)
+{
+ if (txp->unvalidated_credit != SIZE_MAX) {
+ if (txp->unvalidated_credit < credit)
+ txp->unvalidated_credit = 0;
+ else
+ txp->unvalidated_credit -= credit;
+ }
+}
+
+/**
+ * Checks if the QUIC TX packetiser has sufficient unvalidated credit.
+ *
+ * This function determines whether the unvalidated credit of the specified
+ * QUIC TX packetiser exceeds the required credit value (`req_credit`).
+ * If the unvalidated credit is greater than `req_credit`, the function
+ * returns 1 (true); otherwise, it returns 0 (false).
+ *
+ * @param txp A pointer to the OSSL_QUIC_TX_PACKETISER structure to check.
+ * @param req_credit The required credit value to compare against.
+ *
+ * @return 1 if the unvalidated credit exceeds `req_credit`, 0 otherwise.
+ */
+int ossl_quic_tx_packetiser_check_unvalidated_credit(OSSL_QUIC_TX_PACKETISER *txp,
+ size_t req_credit)
+{
+ return (txp->unvalidated_credit > req_credit);
+}
+
+OSSL_QUIC_TX_PACKETISER *ossl_quic_tx_packetiser_new(const OSSL_QUIC_TX_PACKETISER_ARGS *args)
+{
+ OSSL_QUIC_TX_PACKETISER *txp;
+
+ if (args == NULL
+ || args->qtx == NULL
+ || args->txpim == NULL
+ || args->cfq == NULL
+ || args->ackm == NULL
+ || args->qsm == NULL
+ || args->conn_txfc == NULL
+ || args->conn_rxfc == NULL
+ || args->max_streams_bidi_rxfc == NULL
+ || args->max_streams_uni_rxfc == NULL
+ || args->protocol_version == 0) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
+ return NULL;
+ }
+
+ txp = OPENSSL_zalloc(sizeof(*txp));
+ if (txp == NULL)
+ return NULL;
+
+ txp->args = *args;
+ txp->last_tx_time = ossl_time_zero();
+
+ if (!ossl_quic_fifd_init(&txp->fifd,
+ txp->args.cfq, txp->args.ackm, txp->args.txpim,
+ get_sstream_by_id, txp,
+ on_regen_notify, txp,
+ on_confirm_notify, txp,
+ on_sstream_updated, txp,
+ args->get_qlog_cb,
+ args->get_qlog_cb_arg)) {
+ OPENSSL_free(txp);
+ return NULL;
+ }
+
+ return txp;
+}
+
+void ossl_quic_tx_packetiser_free(OSSL_QUIC_TX_PACKETISER *txp)
+{
+ uint32_t enc_level;
+
+ if (txp == NULL)
+ return;
+
+ ossl_quic_tx_packetiser_set_initial_token(txp, NULL, 0, NULL, NULL);
+ ossl_quic_fifd_cleanup(&txp->fifd);
+ OPENSSL_free(txp->conn_close_frame.reason);
+
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level) {
+ OPENSSL_free(txp->el[enc_level].iovec);
+ OPENSSL_free(txp->el[enc_level].scratch);
+ }
+
+ OPENSSL_free(txp);
+}
+
+/*
+ * Determine if an Initial packet token length is reasonable based on the
+ * current MDPL, returning 1 if it is OK.
+ *
+ * The real PMTU to the peer could differ from our (pessimistic) understanding
+ * of the PMTU, therefore it is possible we could receive an Initial token from
+ * a server in a Retry packet which is bigger than the MDPL. In this case it is
+ * impossible for us ever to make forward progress and we need to error out
+ * and fail the connection attempt.
+ *
+ * The specific boundary condition is complex: for example, after the size of
+ * the Initial token, there are the Initial packet header overheads and then
+ * encryption/AEAD tag overheads. After that, the minimum room for frame data in
+ * order to guarantee forward progress must be guaranteed. For example, a crypto
+ * stream needs to always be able to serialize at least one byte in a CRYPTO
+ * frame in order to make forward progress. Because the offset field of a CRYPTO
+ * frame uses a variable-length integer, the number of bytes needed to ensure
+ * this also varies.
+ *
+ * Rather than trying to get this boundary condition check actually right,
+ * require a reasonable amount of slack to avoid pathological behaviours. (After
+ * all, transmitting a CRYPTO stream one byte at a time is probably not
+ * desirable anyway.)
+ *
+ * We choose 160 bytes as the required margin, which is double the rough
+ * estimation of the minimum we would require to guarantee forward progress
+ * under worst case packet overheads.
+ */
+#define TXP_REQUIRED_TOKEN_MARGIN 160
+
+static int txp_check_token_len(size_t token_len, size_t mdpl)
+{
+ if (token_len == 0)
+ return 1;
+
+ if (token_len >= mdpl)
+ return 0;
+
+ if (TXP_REQUIRED_TOKEN_MARGIN >= mdpl)
+ /* (should not be possible because MDPL must be at least 1200) */
+ return 0;
+
+ if (token_len > mdpl - TXP_REQUIRED_TOKEN_MARGIN)
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_tx_packetiser_set_initial_token(OSSL_QUIC_TX_PACKETISER *txp,
+ const unsigned char *token,
+ size_t token_len,
+ ossl_quic_initial_token_free_fn *free_cb,
+ void *free_cb_arg)
+{
+ if (!txp_check_token_len(token_len, txp_get_mdpl(txp)))
+ return 0;
+
+ if (txp->initial_token != NULL && txp->initial_token_free_cb != NULL)
+ txp->initial_token_free_cb(txp->initial_token, txp->initial_token_len,
+ txp->initial_token_free_cb_arg);
+
+ txp->initial_token = token;
+ txp->initial_token_len = token_len;
+ txp->initial_token_free_cb = free_cb;
+ txp->initial_token_free_cb_arg = free_cb_arg;
+ return 1;
+}
+
+int ossl_quic_tx_packetiser_set_protocol_version(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t protocol_version)
+{
+ txp->args.protocol_version = protocol_version;
+ return 1;
+}
+
+int ossl_quic_tx_packetiser_set_cur_dcid(OSSL_QUIC_TX_PACKETISER *txp,
+ const QUIC_CONN_ID *dcid)
+{
+ if (dcid == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
+ return 0;
+ }
+
+ txp->args.cur_dcid = *dcid;
+ return 1;
+}
+
+int ossl_quic_tx_packetiser_set_cur_scid(OSSL_QUIC_TX_PACKETISER *txp,
+ const QUIC_CONN_ID *scid)
+{
+ if (scid == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_NULL_PARAMETER);
+ return 0;
+ }
+
+ txp->args.cur_scid = *scid;
+ return 1;
+}
+
+/* Change the destination L4 address the TXP uses to send datagrams. */
+int ossl_quic_tx_packetiser_set_peer(OSSL_QUIC_TX_PACKETISER *txp,
+ const BIO_ADDR *peer)
+{
+ if (peer == NULL) {
+ BIO_ADDR_clear(&txp->args.peer);
+ return 1;
+ }
+
+ return BIO_ADDR_copy(&txp->args.peer, peer);
+}
+
+void ossl_quic_tx_packetiser_set_ack_tx_cb(OSSL_QUIC_TX_PACKETISER *txp,
+ void (*cb)(const OSSL_QUIC_FRAME_ACK *ack,
+ uint32_t pn_space,
+ void *arg),
+ void *cb_arg)
+{
+ txp->ack_tx_cb = cb;
+ txp->ack_tx_cb_arg = cb_arg;
+}
+
+void ossl_quic_tx_packetiser_set_qlog_cb(OSSL_QUIC_TX_PACKETISER *txp,
+ QLOG *(*get_qlog_cb)(void *arg),
+ void *get_qlog_cb_arg)
+{
+ ossl_quic_fifd_set_qlog_cb(&txp->fifd, get_qlog_cb, get_qlog_cb_arg);
+
+}
+
+int ossl_quic_tx_packetiser_discard_enc_level(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t enc_level)
+{
+ if (enc_level >= QUIC_ENC_LEVEL_NUM) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ if (enc_level != QUIC_ENC_LEVEL_0RTT)
+ txp->args.crypto[ossl_quic_enc_level_to_pn_space(enc_level)] = NULL;
+
+ return 1;
+}
+
+void ossl_quic_tx_packetiser_notify_handshake_complete(OSSL_QUIC_TX_PACKETISER *txp)
+{
+ txp->handshake_complete = 1;
+}
+
+void ossl_quic_tx_packetiser_schedule_handshake_done(OSSL_QUIC_TX_PACKETISER *txp)
+{
+ txp->want_handshake_done = 1;
+}
+
+void ossl_quic_tx_packetiser_schedule_ack_eliciting(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t pn_space)
+{
+ txp->force_ack_eliciting |= (1UL << pn_space);
+}
+
+void ossl_quic_tx_packetiser_schedule_ack(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t pn_space)
+{
+ txp->want_ack |= (1UL << pn_space);
+}
+
+#define TXP_ERR_INTERNAL 0 /* Internal (e.g. alloc) error */
+#define TXP_ERR_SUCCESS 1 /* Success */
+#define TXP_ERR_SPACE 2 /* Not enough room for another packet */
+#define TXP_ERR_INPUT 3 /* Invalid/malformed input */
+
+/*
+ * Generates a datagram by polling the various ELs to determine if they want to
+ * generate any frames, and generating a datagram which coalesces packets for
+ * any ELs which do.
+ */
+int ossl_quic_tx_packetiser_generate(OSSL_QUIC_TX_PACKETISER *txp,
+ QUIC_TXP_STATUS *status)
+{
+ /*
+ * Called to generate one or more datagrams, each containing one or more
+ * packets.
+ *
+ * There are some tricky things to note here:
+ *
+ * - The TXP is only concerned with generating encrypted packets;
+ * other packets use a different path.
+ *
+ * - Any datagram containing an Initial packet must have a payload length
+ * (DPL) of at least 1200 bytes. This padding need not necessarily be
+ * found in the Initial packet.
+ *
+ * - It is desirable to be able to coalesce an Initial packet
+ * with a Handshake packet. Since, before generating the Handshake
+ * packet, we do not know how long it will be, we cannot know the
+ * correct amount of padding to ensure a DPL of at least 1200 bytes.
+ * Thus this padding must added to the Handshake packet (or whatever
+ * packet is the last in the datagram).
+ *
+ * - However, at the time that we generate the Initial packet,
+ * we do not actually know for sure that we will be followed
+ * in the datagram by another packet. For example, suppose we have
+ * some queued data (e.g. crypto stream data for the HANDSHAKE EL)
+ * it looks like we will want to send on the HANDSHAKE EL.
+ * We could assume padding will be placed in the Handshake packet
+ * subsequently and avoid adding any padding to the Initial packet
+ * (which would leave no room for the Handshake packet in the
+ * datagram).
+ *
+ * However, this is not actually a safe assumption. Suppose that we
+ * are using a link with a MDPL of 1200 bytes, the minimum allowed by
+ * QUIC. Suppose that the Initial packet consumes 1195 bytes in total.
+ * Since it is not possible to fit a Handshake packet in just 5 bytes,
+ * upon trying to add a Handshake packet after generating the Initial
+ * packet, we will discover we have no room to fit it! This is not a
+ * problem in itself as another datagram can be sent subsequently, but
+ * it is a problem because we were counting to use that packet to hold
+ * the essential padding. But if we have already finished encrypting
+ * the Initial packet, we cannot go and add padding to it anymore.
+ * This leaves us stuck.
+ *
+ * Because of this, we have to plan multiple packets simultaneously, such
+ * that we can start generating a Handshake (or 0-RTT or 1-RTT, or so on)
+ * packet while still having the option to go back and add padding to the
+ * Initial packet if it turns out to be needed.
+ *
+ * Trying to predict ahead of time (e.g. during Initial packet generation)
+ * whether we will successfully generate a subsequent packet is fraught with
+ * error as it relies on a large number of variables:
+ *
+ * - Do we have room to fit a packet header? (Consider that due to
+ * variable-length integer encoding this is highly variable and can even
+ * depend on payload length due to a variable-length Length field.)
+ *
+ * - Can we fit even a single one of the frames we want to put in this
+ * packet in the packet? (Each frame type has a bespoke encoding. While
+ * our encodings of some frame types are adaptive based on the available
+ * room - e.g. STREAM frames - ultimately all frame types have some
+ * absolute minimum number of bytes to be successfully encoded. For
+ * example, if after an Initial packet there is enough room to encode
+ * only one byte of frame data, it is quite likely we can't send any of
+ * the frames we wanted to send.) While this is not strictly a problem
+ * because we could just fill the packet with padding frames, this is a
+ * pointless packet and is wasteful.
+ *
+ * Thus we adopt a multi-phase architecture:
+ *
+ * 1. Archetype Selection: Determine desired packet archetype.
+ *
+ * 2. Packet Staging: Generation of packet information and packet payload
+ * data (frame data) into staging areas.
+ *
+ * 3. Packet Adjustment: Adjustment of staged packets, adding padding to
+ * the staged packets if needed.
+ *
+ * 4. Commit: The packets are sent to the QTX and recorded as having been
+ * sent to the FIFM.
+ *
+ */
+ int res = 0, rc;
+ uint32_t archetype, enc_level;
+ uint32_t conn_close_enc_level = QUIC_ENC_LEVEL_NUM;
+ struct txp_pkt pkt[QUIC_ENC_LEVEL_NUM];
+ size_t pkts_done = 0;
+ uint64_t cc_limit = txp->args.cc_method->get_tx_allowance(txp->args.cc_data);
+ int need_padding = 0, txpim_pkt_reffed;
+
+ memset(status, 0, sizeof(*status));
+
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level)
+ pkt[enc_level].h_valid = 0;
+
+
+ /*
+ * Should not be needed, but a sanity check in case anyone else has been
+ * using the QTX.
+ */
+ ossl_qtx_finish_dgram(txp->args.qtx);
+
+ /* 1. Archetype Selection */
+ archetype = txp_determine_archetype(txp, cc_limit);
+
+ /* 2. Packet Staging */
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level) {
+ size_t running_total = (enc_level > QUIC_ENC_LEVEL_INITIAL)
+ ? pkt[enc_level - 1].geom.hwm : 0;
+
+ pkt[enc_level].geom.hwm = running_total;
+
+ if (!txp_should_try_staging(txp, enc_level, archetype, cc_limit,
+ &conn_close_enc_level))
+ continue;
+
+ if (!txp_pkt_init(&pkt[enc_level], txp, enc_level, archetype,
+ running_total))
+ /*
+ * If this fails this is not a fatal error - it means the geometry
+ * planning determined there was not enough space for another
+ * packet. So just proceed with what we've already planned for.
+ */
+ break;
+
+ rc = txp_generate_for_el(txp, &pkt[enc_level],
+ conn_close_enc_level == enc_level);
+ if (rc != TXP_ERR_SUCCESS)
+ goto out;
+
+ if (pkt[enc_level].force_pad)
+ /*
+ * txp_generate_for_el emitted a frame which forces packet padding.
+ */
+ need_padding = 1;
+
+ pkt[enc_level].geom.hwm = running_total
+ + pkt[enc_level].h.bytes_appended
+ + pkt[enc_level].geom.pkt_overhead;
+ }
+
+ /* 3. Packet Adjustment */
+ if (pkt[QUIC_ENC_LEVEL_INITIAL].h_valid
+ && pkt[QUIC_ENC_LEVEL_INITIAL].h.bytes_appended > 0)
+ /*
+ * We have an Initial packet in this datagram, so we need to make sure
+ * the total size of the datagram is adequate.
+ */
+ need_padding = 1;
+
+ if (need_padding) {
+ size_t total_dgram_size = 0;
+ const size_t min_dpl = QUIC_MIN_INITIAL_DGRAM_LEN;
+ uint32_t pad_el = QUIC_ENC_LEVEL_NUM;
+
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level)
+ if (pkt[enc_level].h_valid && pkt[enc_level].h.bytes_appended > 0) {
+ if (pad_el == QUIC_ENC_LEVEL_NUM
+ /*
+ * We might not be able to add padding, for example if we
+ * are using the ACK_ONLY archetype.
+ */
+ && pkt[enc_level].geom.adata.allow_padding
+ && !pkt[enc_level].h.done_implicit)
+ pad_el = enc_level;
+
+ txp_pkt_postgen_update_pkt_overhead(&pkt[enc_level], txp);
+ total_dgram_size += pkt[enc_level].geom.pkt_overhead
+ + pkt[enc_level].h.bytes_appended;
+ }
+
+ if (pad_el != QUIC_ENC_LEVEL_NUM && total_dgram_size < min_dpl) {
+ size_t deficit = min_dpl - total_dgram_size;
+
+ if (!txp_pkt_append_padding(&pkt[pad_el], txp, deficit))
+ goto out;
+
+ total_dgram_size += deficit;
+
+ /*
+ * Padding frames make a packet ineligible for being a non-inflight
+ * packet.
+ */
+ pkt[pad_el].tpkt->ackm_pkt.is_inflight = 1;
+ }
+
+ /*
+ * If we have failed to make a datagram of adequate size, for example
+ * because we have a padding requirement but are using the ACK_ONLY
+ * archetype (because we are CC limited), which precludes us from
+ * sending padding, give up on generating the datagram - there is
+ * nothing we can do.
+ */
+ if (total_dgram_size < min_dpl) {
+ res = 1;
+ goto out;
+ }
+ }
+
+ /* 4. Commit */
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level) {
+
+ if (!pkt[enc_level].h_valid)
+ /* Did not attempt to generate a packet for this EL. */
+ continue;
+
+ if (pkt[enc_level].h.bytes_appended == 0)
+ /* Nothing was generated for this EL, so skip. */
+ continue;
+
+ if (!ossl_quic_tx_packetiser_check_unvalidated_credit(txp,
+ pkt[enc_level].h.bytes_appended)) {
+ res = TXP_ERR_SPACE;
+ goto out;
+ }
+ ossl_quic_tx_packetiser_consume_unvalidated_credit(txp, pkt[enc_level].h.bytes_appended);
+
+ rc = txp_pkt_commit(txp, &pkt[enc_level], archetype,
+ &txpim_pkt_reffed);
+ if (rc) {
+ status->sent_ack_eliciting
+ = status->sent_ack_eliciting
+ || pkt[enc_level].tpkt->ackm_pkt.is_ack_eliciting;
+
+ if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE)
+ status->sent_handshake
+ = (pkt[enc_level].h_valid
+ && pkt[enc_level].h.bytes_appended > 0);
+ }
+
+ if (txpim_pkt_reffed)
+ pkt[enc_level].tpkt = NULL; /* don't free */
+
+ if (!rc)
+ goto out;
+
+ ++pkts_done;
+
+ }
+
+ /* Flush & Cleanup */
+ res = 1;
+out:
+ ossl_qtx_finish_dgram(txp->args.qtx);
+
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level)
+ txp_pkt_cleanup(&pkt[enc_level], txp);
+
+ status->sent_pkt = pkts_done;
+
+ return res;
+}
+
+static const struct archetype_data archetypes[QUIC_ENC_LEVEL_NUM][TX_PACKETISER_ARCHETYPE_NUM] = {
+ /* EL 0(INITIAL) */
+ {
+ /* EL 0(INITIAL) - Archetype 0(NORMAL) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 1,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 0,
+ },
+ /* EL 0(INITIAL) - Archetype 1(PROBE) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 1,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 1,
+ /*bypass_cc =*/ 1,
+ },
+ /* EL 0(INITIAL) - Archetype 2(ACK_ONLY) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 0,
+ /*allow_crypto =*/ 0,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 0,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 0,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 1,
+ },
+ },
+ /* EL 1(0RTT) */
+ {
+ /* EL 1(0RTT) - Archetype 0(NORMAL) */
+ {
+ /*allow_ack =*/ 0,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 0,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 1,
+ /*allow_retire_conn_id =*/ 1,
+ /*allow_stream_rel =*/ 1,
+ /*allow_conn_fc =*/ 1,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 0,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 0,
+ },
+ /* EL 1(0RTT) - Archetype 1(PROBE) */
+ {
+ /*allow_ack =*/ 0,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 0,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 1,
+ /*allow_retire_conn_id =*/ 1,
+ /*allow_stream_rel =*/ 1,
+ /*allow_conn_fc =*/ 1,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 0,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 1,
+ /*bypass_cc =*/ 1,
+ },
+ /* EL 1(0RTT) - Archetype 2(ACK_ONLY) */
+ {
+ /*allow_ack =*/ 0,
+ /*allow_ping =*/ 0,
+ /*allow_crypto =*/ 0,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 0,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 0,
+ /*allow_padding =*/ 0,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 1,
+ },
+ },
+ /* EL (HANDSHAKE) */
+ {
+ /* EL 2(HANDSHAKE) - Archetype 0(NORMAL) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 1,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 0,
+ },
+ /* EL 2(HANDSHAKE) - Archetype 1(PROBE) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 1,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 1,
+ /*bypass_cc =*/ 1,
+ },
+ /* EL 2(HANDSHAKE) - Archetype 2(ACK_ONLY) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 0,
+ /*allow_crypto =*/ 0,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 0,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 0,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 1,
+ },
+ },
+ /* EL 3(1RTT) */
+ {
+ /* EL 3(1RTT) - Archetype 0(NORMAL) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 1,
+ /*allow_handshake_done =*/ 1,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 1,
+ /*allow_new_conn_id =*/ 1,
+ /*allow_retire_conn_id =*/ 1,
+ /*allow_stream_rel =*/ 1,
+ /*allow_conn_fc =*/ 1,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 1,
+ /*allow_new_token =*/ 1,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 0,
+ },
+ /* EL 3(1RTT) - Archetype 1(PROBE) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 1,
+ /*allow_crypto =*/ 1,
+ /*allow_handshake_done =*/ 1,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 1,
+ /*allow_new_conn_id =*/ 1,
+ /*allow_retire_conn_id =*/ 1,
+ /*allow_stream_rel =*/ 1,
+ /*allow_conn_fc =*/ 1,
+ /*allow_conn_close =*/ 1,
+ /*allow_cfq_other =*/ 1,
+ /*allow_new_token =*/ 1,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 1,
+ /*require_ack_eliciting =*/ 1,
+ /*bypass_cc =*/ 1,
+ },
+ /* EL 3(1RTT) - Archetype 2(ACK_ONLY) */
+ {
+ /*allow_ack =*/ 1,
+ /*allow_ping =*/ 0,
+ /*allow_crypto =*/ 0,
+ /*allow_handshake_done =*/ 0,
+ /*allow_path_challenge =*/ 0,
+ /*allow_path_response =*/ 0,
+ /*allow_new_conn_id =*/ 0,
+ /*allow_retire_conn_id =*/ 0,
+ /*allow_stream_rel =*/ 0,
+ /*allow_conn_fc =*/ 0,
+ /*allow_conn_close =*/ 0,
+ /*allow_cfq_other =*/ 0,
+ /*allow_new_token =*/ 0,
+ /*allow_force_ack_eliciting =*/ 1,
+ /*allow_padding =*/ 0,
+ /*require_ack_eliciting =*/ 0,
+ /*bypass_cc =*/ 1,
+ }
+ }
+};
+
+static int txp_get_archetype_data(uint32_t enc_level,
+ uint32_t archetype,
+ struct archetype_data *a)
+{
+ if (enc_level >= QUIC_ENC_LEVEL_NUM
+ || archetype >= TX_PACKETISER_ARCHETYPE_NUM)
+ return 0;
+
+ /* No need to avoid copying this as it should not exceed one int in size. */
+ *a = archetypes[enc_level][archetype];
+ return 1;
+}
+
+static int txp_determine_geometry(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t archetype,
+ uint32_t enc_level,
+ size_t running_total,
+ QUIC_PKT_HDR *phdr,
+ struct txp_pkt_geom *geom)
+{
+ size_t mdpl, cmpl, hdr_len;
+
+ /* Get information about packet archetype. */
+ if (!txp_get_archetype_data(enc_level, archetype, &geom->adata))
+ return 0;
+
+ /* Assemble packet header. */
+ phdr->type = ossl_quic_enc_level_to_pkt_type(enc_level);
+ phdr->spin_bit = 0;
+ phdr->pn_len = txp_determine_pn_len(txp);
+ phdr->partial = 0;
+ phdr->fixed = 1;
+ phdr->reserved = 0;
+ phdr->version = txp->args.protocol_version;
+ phdr->dst_conn_id = txp->args.cur_dcid;
+ phdr->src_conn_id = txp->args.cur_scid;
+
+ /*
+ * We need to know the length of the payload to get an accurate header
+ * length for non-1RTT packets, because the Length field found in
+ * Initial/Handshake/0-RTT packets uses a variable-length encoding. However,
+ * we don't have a good idea of the length of our payload, because the
+ * length of the payload depends on the room in the datagram after fitting
+ * the header, which depends on the size of the header.
+ *
+ * In general, it does not matter if a packet is slightly shorter (because
+ * e.g. we predicted use of a 2-byte length field, but ended up only needing
+ * a 1-byte length field). However this does matter for Initial packets
+ * which must be at least 1200 bytes, which is also the assumed default MTU;
+ * therefore in many cases Initial packets will be padded to 1200 bytes,
+ * which means if we overestimated the header size, we will be short by a
+ * few bytes and the server will ignore the packet for being too short. In
+ * this case, however, such packets always *will* be padded to meet 1200
+ * bytes, which requires a 2-byte length field, so we don't actually need to
+ * worry about this. Thus we estimate the header length assuming a 2-byte
+ * length field here, which should in practice work well in all cases.
+ */
+ phdr->len = OSSL_QUIC_VLINT_2B_MAX - phdr->pn_len;
+
+ if (enc_level == QUIC_ENC_LEVEL_INITIAL) {
+ phdr->token = txp->initial_token;
+ phdr->token_len = txp->initial_token_len;
+ } else {
+ phdr->token = NULL;
+ phdr->token_len = 0;
+ }
+
+ hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(phdr->dst_conn_id.id_len,
+ phdr);
+ if (hdr_len == 0)
+ return 0;
+
+ /* MDPL: Maximum datagram payload length. */
+ mdpl = txp_get_mdpl(txp);
+
+ /*
+ * CMPL: Maximum encoded packet size we can put into this datagram given any
+ * previous packets coalesced into it.
+ */
+ if (running_total > mdpl)
+ /* Should not be possible, but if it happens: */
+ cmpl = 0;
+ else
+ cmpl = mdpl - running_total;
+
+ /* CMPPL: Maximum amount we can put into the current packet payload */
+ if (!txp_determine_ppl_from_pl(txp, cmpl, enc_level, hdr_len, &geom->cmppl))
+ return 0;
+
+ geom->cmpl = cmpl;
+ geom->pkt_overhead = cmpl - geom->cmppl;
+ geom->archetype = archetype;
+ return 1;
+}
+
+static uint32_t txp_determine_archetype(OSSL_QUIC_TX_PACKETISER *txp,
+ uint64_t cc_limit)
+{
+ OSSL_ACKM_PROBE_INFO *probe_info
+ = ossl_ackm_get0_probe_request(txp->args.ackm);
+ uint32_t pn_space;
+
+ /*
+ * If ACKM has requested probe generation (e.g. due to PTO), we generate a
+ * Probe-archetype packet. Actually, we determine archetype on a
+ * per-datagram basis, so if any EL wants a probe, do a pass in which
+ * we try and generate a probe (if needed) for all ELs.
+ */
+ if (probe_info->anti_deadlock_initial > 0
+ || probe_info->anti_deadlock_handshake > 0)
+ return TX_PACKETISER_ARCHETYPE_PROBE;
+
+ for (pn_space = QUIC_PN_SPACE_INITIAL;
+ pn_space < QUIC_PN_SPACE_NUM;
+ ++pn_space)
+ if (probe_info->pto[pn_space] > 0)
+ return TX_PACKETISER_ARCHETYPE_PROBE;
+
+ /*
+ * If we are out of CC budget, we cannot send a normal packet,
+ * but we can do an ACK-only packet (potentially, if we
+ * want to send an ACK).
+ */
+ if (cc_limit == 0)
+ return TX_PACKETISER_ARCHETYPE_ACK_ONLY;
+
+ /* All other packets. */
+ return TX_PACKETISER_ARCHETYPE_NORMAL;
+}
+
+static int txp_should_try_staging(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t enc_level,
+ uint32_t archetype,
+ uint64_t cc_limit,
+ uint32_t *conn_close_enc_level)
+{
+ struct archetype_data a;
+ uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+ QUIC_CFQ_ITEM *cfq_item;
+
+ if (!ossl_qtx_is_enc_level_provisioned(txp->args.qtx, enc_level))
+ return 0;
+
+ if (!txp_get_archetype_data(enc_level, archetype, &a))
+ return 0;
+
+ if (!a.bypass_cc && cc_limit == 0)
+ /* CC not allowing us to send. */
+ return 0;
+
+ /*
+ * We can produce CONNECTION_CLOSE frames on any EL in principle, which
+ * means we need to choose which EL we would prefer to use. After a
+ * connection is fully established we have only one provisioned EL and this
+ * is a non-issue. Where multiple ELs are provisioned, it is possible the
+ * peer does not have the keys for the EL yet, which suggests in general it
+ * is preferable to use the lowest EL which is still provisioned.
+ *
+ * However (RFC 9000 s. 10.2.3 & 12.5) we are also required to not send
+ * application CONNECTION_CLOSE frames in non-1-RTT ELs, so as to not
+ * potentially leak application data on a connection which has yet to be
+ * authenticated. Thus when we have an application CONNECTION_CLOSE frame
+ * queued and need to send it on a non-1-RTT EL, we have to convert it
+ * into a transport CONNECTION_CLOSE frame which contains no application
+ * data. Since this loses information, it suggests we should use the 1-RTT
+ * EL to avoid this if possible, even if a lower EL is also available.
+ *
+ * At the same time, just because we have the 1-RTT EL provisioned locally
+ * does not necessarily mean the peer does, for example if a handshake
+ * CRYPTO frame has been lost. It is fairly important that CONNECTION_CLOSE
+ * is signalled in a way we know our peer can decrypt, as we stop processing
+ * connection retransmission logic for real after connection close and
+ * simply 'blindly' retransmit the same CONNECTION_CLOSE frame.
+ *
+ * This is not a major concern for clients, since if a client has a 1-RTT EL
+ * provisioned the server is guaranteed to also have a 1-RTT EL provisioned.
+ *
+ * TODO(QUIC FUTURE): Revisit this when when have reached a decision on how
+ * best to implement this
+ */
+ if (*conn_close_enc_level > enc_level
+ && *conn_close_enc_level != QUIC_ENC_LEVEL_1RTT)
+ *conn_close_enc_level = enc_level;
+
+ /* Do we need to send a PTO probe? */
+ if (a.allow_force_ack_eliciting) {
+ OSSL_ACKM_PROBE_INFO *probe_info
+ = ossl_ackm_get0_probe_request(txp->args.ackm);
+
+ if ((enc_level == QUIC_ENC_LEVEL_INITIAL
+ && probe_info->anti_deadlock_initial > 0)
+ || (enc_level == QUIC_ENC_LEVEL_HANDSHAKE
+ && probe_info->anti_deadlock_handshake > 0)
+ || probe_info->pto[pn_space] > 0)
+ return 1;
+ }
+
+ /* Does the crypto stream for this EL want to produce anything? */
+ if (a.allow_crypto && sstream_is_pending(txp->args.crypto[pn_space]))
+ return 1;
+
+ /* Does the ACKM for this PN space want to produce anything? */
+ if (a.allow_ack && (ossl_ackm_is_ack_desired(txp->args.ackm, pn_space)
+ || (txp->want_ack & (1UL << pn_space)) != 0))
+ return 1;
+
+ /* Do we need to force emission of an ACK-eliciting packet? */
+ if (a.allow_force_ack_eliciting
+ && (txp->force_ack_eliciting & (1UL << pn_space)) != 0)
+ return 1;
+
+ /* Does the connection-level RXFC want to produce a frame? */
+ if (a.allow_conn_fc && (txp->want_max_data
+ || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0)))
+ return 1;
+
+ /* Do we want to produce a MAX_STREAMS frame? */
+ if (a.allow_conn_fc
+ && (txp->want_max_streams_bidi
+ || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc,
+ 0)
+ || txp->want_max_streams_uni
+ || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc,
+ 0)))
+ return 1;
+
+ /* Do we want to produce a HANDSHAKE_DONE frame? */
+ if (a.allow_handshake_done && txp->want_handshake_done)
+ return 1;
+
+ /* Do we want to produce a CONNECTION_CLOSE frame? */
+ if (a.allow_conn_close && txp->want_conn_close &&
+ *conn_close_enc_level == enc_level)
+ /*
+ * This is a bit of a special case since CONNECTION_CLOSE can appear in
+ * most packet types, and when we decide we want to send it this status
+ * isn't tied to a specific EL. So if we want to send it, we send it
+ * only on the lowest non-dropped EL.
+ */
+ return 1;
+
+ /* Does the CFQ have any frames queued for this PN space? */
+ if (enc_level != QUIC_ENC_LEVEL_0RTT)
+ for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space);
+ cfq_item != NULL;
+ cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) {
+ uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item);
+
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
+ if (a.allow_new_conn_id)
+ return 1;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
+ if (a.allow_retire_conn_id)
+ return 1;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
+ if (a.allow_new_token)
+ return 1;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE:
+ if (a.allow_path_response)
+ return 1;
+ break;
+ default:
+ if (a.allow_cfq_other)
+ return 1;
+ break;
+ }
+ }
+
+ if (a.allow_stream_rel && txp->handshake_complete) {
+ QUIC_STREAM_ITER it;
+
+ /* If there are any active streams, 0/1-RTT wants to produce a packet.
+ * Whether a stream is on the active list is required to be precise
+ * (i.e., a stream is never on the active list if we cannot produce a
+ * frame for it), and all stream-related frames are governed by
+ * a.allow_stream_rel (i.e., if we can send one type of stream-related
+ * frame, we can send any of them), so we don't need to inspect
+ * individual streams on the active list, just confirm that the active
+ * list is non-empty.
+ */
+ ossl_quic_stream_iter_init(&it, txp->args.qsm, 0);
+ if (it.stream != NULL)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int sstream_is_pending(QUIC_SSTREAM *sstream)
+{
+ OSSL_QUIC_FRAME_STREAM hdr;
+ OSSL_QTX_IOVEC iov[2];
+ size_t num_iov = OSSL_NELEM(iov);
+
+ return ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov, &num_iov);
+}
+
+/* Determine how many bytes we should use for the encoded PN. */
+static size_t txp_determine_pn_len(OSSL_QUIC_TX_PACKETISER *txp)
+{
+ return 4; /* TODO(QUIC FUTURE) */
+}
+
+/* Determine plaintext packet payload length from payload length. */
+static int txp_determine_ppl_from_pl(OSSL_QUIC_TX_PACKETISER *txp,
+ size_t pl,
+ uint32_t enc_level,
+ size_t hdr_len,
+ size_t *r)
+{
+ if (pl < hdr_len)
+ return 0;
+
+ pl -= hdr_len;
+
+ if (!ossl_qtx_calculate_plaintext_payload_len(txp->args.qtx, enc_level,
+ pl, &pl))
+ return 0;
+
+ *r = pl;
+ return 1;
+}
+
+static size_t txp_get_mdpl(OSSL_QUIC_TX_PACKETISER *txp)
+{
+ return ossl_qtx_get_mdpl(txp->args.qtx);
+}
+
+static QUIC_SSTREAM *get_sstream_by_id(uint64_t stream_id, uint32_t pn_space,
+ void *arg)
+{
+ OSSL_QUIC_TX_PACKETISER *txp = arg;
+ QUIC_STREAM *s;
+
+ if (stream_id == UINT64_MAX)
+ return txp->args.crypto[pn_space];
+
+ s = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
+ if (s == NULL)
+ return NULL;
+
+ return s->sstream;
+}
+
+static void on_regen_notify(uint64_t frame_type, uint64_t stream_id,
+ QUIC_TXPIM_PKT *pkt, void *arg)
+{
+ OSSL_QUIC_TX_PACKETISER *txp = arg;
+
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE:
+ txp->want_handshake_done = 1;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_DATA:
+ txp->want_max_data = 1;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI:
+ txp->want_max_streams_bidi = 1;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI:
+ txp->want_max_streams_uni = 1;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN:
+ txp->want_ack |= (1UL << pkt->ackm_pkt.pkt_space);
+ break;
+ case OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA:
+ {
+ QUIC_STREAM *s
+ = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
+
+ if (s == NULL)
+ return;
+
+ s->want_max_stream_data = 1;
+ ossl_quic_stream_map_update_state(txp->args.qsm, s);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_STOP_SENDING:
+ {
+ QUIC_STREAM *s
+ = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
+
+ if (s == NULL)
+ return;
+
+ ossl_quic_stream_map_schedule_stop_sending(txp->args.qsm, s);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_RESET_STREAM:
+ {
+ QUIC_STREAM *s
+ = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
+
+ if (s == NULL)
+ return;
+
+ s->want_reset_stream = 1;
+ ossl_quic_stream_map_update_state(txp->args.qsm, s);
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+static int txp_need_ping(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t pn_space,
+ const struct archetype_data *adata)
+{
+ return adata->allow_ping
+ && (adata->require_ack_eliciting
+ || (txp->force_ack_eliciting & (1UL << pn_space)) != 0);
+}
+
+static int txp_pkt_init(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t enc_level, uint32_t archetype,
+ size_t running_total)
+{
+ uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+
+ if (!txp_determine_geometry(txp, archetype, enc_level,
+ running_total, &pkt->phdr, &pkt->geom))
+ return 0;
+
+ /*
+ * Initialise TX helper. If we must be ACK eliciting, reserve 1 byte for
+ * PING.
+ */
+ if (!tx_helper_init(&pkt->h, txp, enc_level,
+ pkt->geom.cmppl,
+ txp_need_ping(txp, pn_space, &pkt->geom.adata) ? 1 : 0))
+ return 0;
+
+ pkt->h_valid = 1;
+ pkt->tpkt = NULL;
+ pkt->stream_head = NULL;
+ pkt->force_pad = 0;
+ return 1;
+}
+
+static void txp_pkt_cleanup(struct txp_pkt *pkt, OSSL_QUIC_TX_PACKETISER *txp)
+{
+ if (!pkt->h_valid)
+ return;
+
+ tx_helper_cleanup(&pkt->h);
+ pkt->h_valid = 0;
+
+ if (pkt->tpkt != NULL) {
+ ossl_quic_txpim_pkt_release(txp->args.txpim, pkt->tpkt);
+ pkt->tpkt = NULL;
+ }
+}
+
+static int txp_pkt_postgen_update_pkt_overhead(struct txp_pkt *pkt,
+ OSSL_QUIC_TX_PACKETISER *txp)
+{
+ /*
+ * After we have staged and generated our packets, but before we commit
+ * them, it is possible for the estimated packet overhead (packet header +
+ * AEAD tag size) to shrink slightly because we generated a short packet
+ * whose which can be represented in fewer bytes as a variable-length
+ * integer than we were (pessimistically) budgeting for. We need to account
+ * for this to ensure that we get our padding calculation exactly right.
+ *
+ * Update pkt_overhead to be accurate now that we know how much data is
+ * going in a packet.
+ */
+ size_t hdr_len, ciphertext_len;
+
+ if (pkt->h.enc_level == QUIC_ENC_LEVEL_INITIAL)
+ /*
+ * Don't update overheads for the INITIAL EL - we have not finished
+ * appending padding to it and would potentially miscalculate the
+ * correct padding if we now update the pkt_overhead field to switch to
+ * e.g. a 1-byte length field in the packet header. Since we are padding
+ * to QUIC_MIN_INITIAL_DGRAM_LEN which requires a 2-byte length field,
+ * this is guaranteed to be moot anyway. See comment in
+ * txp_determine_geometry for more information.
+ */
+ return 1;
+
+ if (!ossl_qtx_calculate_ciphertext_payload_len(txp->args.qtx, pkt->h.enc_level,
+ pkt->h.bytes_appended,
+ &ciphertext_len))
+ return 0;
+
+ pkt->phdr.len = ciphertext_len;
+
+ hdr_len = ossl_quic_wire_get_encoded_pkt_hdr_len(pkt->phdr.dst_conn_id.id_len,
+ &pkt->phdr);
+
+ pkt->geom.pkt_overhead = hdr_len + ciphertext_len - pkt->h.bytes_appended;
+ return 1;
+}
+
+static void on_confirm_notify(uint64_t frame_type, uint64_t stream_id,
+ QUIC_TXPIM_PKT *pkt, void *arg)
+{
+ OSSL_QUIC_TX_PACKETISER *txp = arg;
+
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_STOP_SENDING:
+ {
+ QUIC_STREAM *s
+ = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
+
+ if (s == NULL)
+ return;
+
+ s->acked_stop_sending = 1;
+ ossl_quic_stream_map_update_state(txp->args.qsm, s);
+ }
+ break;
+ case OSSL_QUIC_FRAME_TYPE_RESET_STREAM:
+ {
+ QUIC_STREAM *s
+ = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
+
+ if (s == NULL)
+ return;
+
+ /*
+ * We must already be in RESET_SENT or RESET_RECVD if we are
+ * here, so we don't need to check state here.
+ */
+ ossl_quic_stream_map_notify_reset_stream_acked(txp->args.qsm, s);
+ ossl_quic_stream_map_update_state(txp->args.qsm, s);
+ }
+ break;
+ default:
+ assert(0);
+ break;
+ }
+}
+
+static int txp_pkt_append_padding(struct txp_pkt *pkt,
+ OSSL_QUIC_TX_PACKETISER *txp, size_t num_bytes)
+{
+ WPACKET *wpkt;
+
+ if (num_bytes == 0)
+ return 1;
+
+ if (!ossl_assert(pkt->h_valid))
+ return 0;
+
+ if (!ossl_assert(pkt->tpkt != NULL))
+ return 0;
+
+ wpkt = tx_helper_begin(&pkt->h);
+ if (wpkt == NULL)
+ return 0;
+
+ if (!ossl_quic_wire_encode_padding(wpkt, num_bytes)) {
+ tx_helper_rollback(&pkt->h);
+ return 0;
+ }
+
+ if (!tx_helper_commit(&pkt->h))
+ return 0;
+
+ pkt->tpkt->ackm_pkt.num_bytes += num_bytes;
+ /* Cannot be non-inflight if we have a PADDING frame */
+ pkt->tpkt->ackm_pkt.is_inflight = 1;
+ return 1;
+}
+
+static void on_sstream_updated(uint64_t stream_id, void *arg)
+{
+ OSSL_QUIC_TX_PACKETISER *txp = arg;
+ QUIC_STREAM *s;
+
+ s = ossl_quic_stream_map_get_by_id(txp->args.qsm, stream_id);
+ if (s == NULL)
+ return;
+
+ ossl_quic_stream_map_update_state(txp->args.qsm, s);
+}
+
+/*
+ * Returns 1 if we can send that many bytes in closing state, 0 otherwise.
+ * Also maintains the bytes sent state if it returns a success.
+ */
+static int try_commit_conn_close(OSSL_QUIC_TX_PACKETISER *txp, size_t n)
+{
+ int res;
+
+ /* We can always send the first connection close frame */
+ if (txp->closing_bytes_recv == 0)
+ return 1;
+
+ /*
+ * RFC 9000 s. 10.2.1 Closing Connection State:
+ * To avoid being used for an amplification attack, such
+ * endpoints MUST limit the cumulative size of packets it sends
+ * to three times the cumulative size of the packets that are
+ * received and attributed to the connection.
+ * and:
+ * An endpoint in the closing state MUST either discard packets
+ * received from an unvalidated address or limit the cumulative
+ * size of packets it sends to an unvalidated address to three
+ * times the size of packets it receives from that address.
+ */
+ res = txp->closing_bytes_xmit + n <= txp->closing_bytes_recv * 3;
+
+ /*
+ * Attribute the bytes to the connection, if we are allowed to send them
+ * and this isn't the first closing frame.
+ */
+ if (res && txp->closing_bytes_recv != 0)
+ txp->closing_bytes_xmit += n;
+ return res;
+}
+
+void ossl_quic_tx_packetiser_record_received_closing_bytes(
+ OSSL_QUIC_TX_PACKETISER *txp, size_t n)
+{
+ txp->closing_bytes_recv += n;
+}
+
+static int txp_generate_pre_token(OSSL_QUIC_TX_PACKETISER *txp,
+ struct txp_pkt *pkt,
+ int chosen_for_conn_close,
+ int *can_be_non_inflight)
+{
+ const uint32_t enc_level = pkt->h.enc_level;
+ const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+ const struct archetype_data *a = &pkt->geom.adata;
+ QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
+ struct tx_helper *h = &pkt->h;
+ const OSSL_QUIC_FRAME_ACK *ack;
+ OSSL_QUIC_FRAME_ACK ack2;
+
+ tpkt->ackm_pkt.largest_acked = QUIC_PN_INVALID;
+
+ /* ACK Frames (Regenerate) */
+ if (a->allow_ack
+ && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_ACK
+ && (((txp->want_ack & (1UL << pn_space)) != 0)
+ || ossl_ackm_is_ack_desired(txp->args.ackm, pn_space))
+ && (ack = ossl_ackm_get_ack_frame(txp->args.ackm, pn_space)) != NULL) {
+ WPACKET *wpkt = tx_helper_begin(h);
+
+ if (wpkt == NULL)
+ return 0;
+
+ /* We do not currently support ECN */
+ ack2 = *ack;
+ ack2.ecn_present = 0;
+
+ if (ossl_quic_wire_encode_frame_ack(wpkt,
+ txp->args.ack_delay_exponent,
+ &ack2)) {
+ if (!tx_helper_commit(h))
+ return 0;
+
+ tpkt->had_ack_frame = 1;
+
+ if (ack->num_ack_ranges > 0)
+ tpkt->ackm_pkt.largest_acked = ack->ack_ranges[0].end;
+
+ if (txp->ack_tx_cb != NULL)
+ txp->ack_tx_cb(&ack2, pn_space, txp->ack_tx_cb_arg);
+ } else {
+ tx_helper_rollback(h);
+ }
+ }
+
+ /* CONNECTION_CLOSE Frames (Regenerate) */
+ if (a->allow_conn_close && txp->want_conn_close && chosen_for_conn_close) {
+ WPACKET *wpkt = tx_helper_begin(h);
+ OSSL_QUIC_FRAME_CONN_CLOSE f, *pf = &txp->conn_close_frame;
+ size_t l;
+
+ if (wpkt == NULL)
+ return 0;
+
+ /*
+ * Application CONNECTION_CLOSE frames may only be sent in the
+ * Application PN space, as otherwise they may be sent before a
+ * connection is authenticated and leak application data. Therefore, if
+ * we need to send a CONNECTION_CLOSE frame in another PN space and were
+ * given an application CONNECTION_CLOSE frame, convert it into a
+ * transport CONNECTION_CLOSE frame, removing any sensitive application
+ * data.
+ *
+ * RFC 9000 s. 10.2.3: "A CONNECTION_CLOSE of type 0x1d MUST be replaced
+ * by a CONNECTION_CLOSE of type 0x1c when sending the frame in Initial
+ * or Handshake packets. Otherwise, information about the application
+ * state might be revealed. Endpoints MUST clear the value of the Reason
+ * Phrase field and SHOULD use the APPLICATION_ERROR code when
+ * converting to a CONNECTION_CLOSE of type 0x1c."
+ */
+ if (pn_space != QUIC_PN_SPACE_APP && pf->is_app) {
+ pf = &f;
+ pf->is_app = 0;
+ pf->frame_type = 0;
+ pf->error_code = OSSL_QUIC_ERR_APPLICATION_ERROR;
+ pf->reason = NULL;
+ pf->reason_len = 0;
+ }
+
+ if (ossl_quic_wire_encode_frame_conn_close(wpkt, pf)
+ && WPACKET_get_total_written(wpkt, &l)
+ && try_commit_conn_close(txp, l)) {
+ if (!tx_helper_commit(h))
+ return 0;
+
+ tpkt->had_conn_close = 1;
+ *can_be_non_inflight = 0;
+ } else {
+ tx_helper_rollback(h);
+ }
+ }
+
+ return 1;
+}
+
+static int try_len(size_t space_left, size_t orig_len,
+ size_t base_hdr_len, size_t lenbytes,
+ uint64_t maxn, size_t *hdr_len, size_t *payload_len)
+{
+ size_t n;
+ size_t maxn_ = maxn > SIZE_MAX ? SIZE_MAX : (size_t)maxn;
+
+ *hdr_len = base_hdr_len + lenbytes;
+
+ if (orig_len == 0 && space_left >= *hdr_len) {
+ *payload_len = 0;
+ return 1;
+ }
+
+ n = orig_len;
+ if (n > maxn_)
+ n = maxn_;
+ if (n + *hdr_len > space_left)
+ n = (space_left >= *hdr_len) ? space_left - *hdr_len : 0;
+
+ *payload_len = n;
+ return n > 0;
+}
+
+static int determine_len(size_t space_left, size_t orig_len,
+ size_t base_hdr_len,
+ uint64_t *hlen, uint64_t *len)
+{
+ int ok = 0;
+ size_t chosen_payload_len = 0;
+ size_t chosen_hdr_len = 0;
+ size_t payload_len[4], hdr_len[4];
+ int i, valid[4] = {0};
+
+ valid[0] = try_len(space_left, orig_len, base_hdr_len,
+ 1, OSSL_QUIC_VLINT_1B_MAX,
+ &hdr_len[0], &payload_len[0]);
+ valid[1] = try_len(space_left, orig_len, base_hdr_len,
+ 2, OSSL_QUIC_VLINT_2B_MAX,
+ &hdr_len[1], &payload_len[1]);
+ valid[2] = try_len(space_left, orig_len, base_hdr_len,
+ 4, OSSL_QUIC_VLINT_4B_MAX,
+ &hdr_len[2], &payload_len[2]);
+ valid[3] = try_len(space_left, orig_len, base_hdr_len,
+ 8, OSSL_QUIC_VLINT_8B_MAX,
+ &hdr_len[3], &payload_len[3]);
+
+ for (i = OSSL_NELEM(valid) - 1; i >= 0; --i)
+ if (valid[i] && payload_len[i] >= chosen_payload_len) {
+ chosen_payload_len = payload_len[i];
+ chosen_hdr_len = hdr_len[i];
+ ok = 1;
+ }
+
+ *hlen = chosen_hdr_len;
+ *len = chosen_payload_len;
+ return ok;
+}
+
+/*
+ * Given a CRYPTO frame header with accurate chdr->len and a budget
+ * (space_left), try to find the optimal value of chdr->len to fill as much of
+ * the budget as possible. This is slightly hairy because larger values of
+ * chdr->len cause larger encoded sizes of the length field of the frame, which
+ * in turn mean less space available for payload data. We check all possible
+ * encodings and choose the optimal encoding.
+ */
+static int determine_crypto_len(struct tx_helper *h,
+ OSSL_QUIC_FRAME_CRYPTO *chdr,
+ size_t space_left,
+ uint64_t *hlen,
+ uint64_t *len)
+{
+ size_t orig_len;
+ size_t base_hdr_len; /* CRYPTO header length without length field */
+
+ if (chdr->len > SIZE_MAX)
+ return 0;
+
+ orig_len = (size_t)chdr->len;
+
+ chdr->len = 0;
+ base_hdr_len = ossl_quic_wire_get_encoded_frame_len_crypto_hdr(chdr);
+ chdr->len = orig_len;
+ if (base_hdr_len == 0)
+ return 0;
+
+ --base_hdr_len;
+
+ return determine_len(space_left, orig_len, base_hdr_len, hlen, len);
+}
+
+static int determine_stream_len(struct tx_helper *h,
+ OSSL_QUIC_FRAME_STREAM *shdr,
+ size_t space_left,
+ uint64_t *hlen,
+ uint64_t *len)
+{
+ size_t orig_len;
+ size_t base_hdr_len; /* STREAM header length without length field */
+
+ if (shdr->len > SIZE_MAX)
+ return 0;
+
+ orig_len = (size_t)shdr->len;
+
+ shdr->len = 0;
+ base_hdr_len = ossl_quic_wire_get_encoded_frame_len_stream_hdr(shdr);
+ shdr->len = orig_len;
+ if (base_hdr_len == 0)
+ return 0;
+
+ if (shdr->has_explicit_len)
+ --base_hdr_len;
+
+ return determine_len(space_left, orig_len, base_hdr_len, hlen, len);
+}
+
+static int txp_generate_crypto_frames(OSSL_QUIC_TX_PACKETISER *txp,
+ struct txp_pkt *pkt,
+ int *have_ack_eliciting)
+{
+ const uint32_t enc_level = pkt->h.enc_level;
+ const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+ QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
+ struct tx_helper *h = &pkt->h;
+ size_t num_stream_iovec;
+ OSSL_QUIC_FRAME_STREAM shdr = {0};
+ OSSL_QUIC_FRAME_CRYPTO chdr = {0};
+ OSSL_QTX_IOVEC iov[2];
+ uint64_t hdr_bytes;
+ WPACKET *wpkt;
+ QUIC_TXPIM_CHUNK chunk = {0};
+ size_t i, space_left;
+
+ for (i = 0;; ++i) {
+ space_left = tx_helper_get_space_left(h);
+
+ if (space_left < MIN_FRAME_SIZE_CRYPTO)
+ return 1; /* no point trying */
+
+ /* Do we have any CRYPTO data waiting? */
+ num_stream_iovec = OSSL_NELEM(iov);
+ if (!ossl_quic_sstream_get_stream_frame(txp->args.crypto[pn_space],
+ i, &shdr, iov,
+ &num_stream_iovec))
+ return 1; /* nothing to do */
+
+ /* Convert STREAM frame header to CRYPTO frame header */
+ chdr.offset = shdr.offset;
+ chdr.len = shdr.len;
+
+ if (chdr.len == 0)
+ return 1; /* nothing to do */
+
+ /* Find best fit (header length, payload length) combination. */
+ if (!determine_crypto_len(h, &chdr, space_left, &hdr_bytes,
+ &chdr.len))
+ return 1; /* can't fit anything */
+
+ /*
+ * Truncate IOVs to match our chosen length.
+ *
+ * The length cannot be more than SIZE_MAX because this length comes
+ * from our send stream buffer.
+ */
+ ossl_quic_sstream_adjust_iov((size_t)chdr.len, iov, num_stream_iovec);
+
+ /*
+ * Ensure we have enough iovecs allocated (1 for the header, up to 2 for
+ * the stream data.)
+ */
+ if (!txp_el_ensure_iovec(&txp->el[enc_level], h->num_iovec + 3))
+ return 0; /* alloc error */
+
+ /* Encode the header. */
+ wpkt = tx_helper_begin(h);
+ if (wpkt == NULL)
+ return 0; /* alloc error */
+
+ if (!ossl_quic_wire_encode_frame_crypto_hdr(wpkt, &chdr)) {
+ tx_helper_rollback(h);
+ return 1; /* can't fit */
+ }
+
+ if (!tx_helper_commit(h))
+ return 0; /* alloc error */
+
+ /* Add payload iovecs to the helper (infallible). */
+ for (i = 0; i < num_stream_iovec; ++i)
+ tx_helper_append_iovec(h, iov[i].buf, iov[i].buf_len);
+
+ *have_ack_eliciting = 1;
+ tx_helper_unrestrict(h); /* no longer need PING */
+
+ /* Log chunk to TXPIM. */
+ chunk.stream_id = UINT64_MAX; /* crypto stream */
+ chunk.start = chdr.offset;
+ chunk.end = chdr.offset + chdr.len - 1;
+ chunk.has_fin = 0; /* Crypto stream never ends */
+ if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
+ return 0; /* alloc error */
+ }
+}
+
+struct chunk_info {
+ OSSL_QUIC_FRAME_STREAM shdr;
+ uint64_t orig_len;
+ OSSL_QTX_IOVEC iov[2];
+ size_t num_stream_iovec;
+ int valid;
+};
+
+static int txp_plan_stream_chunk(OSSL_QUIC_TX_PACKETISER *txp,
+ struct tx_helper *h,
+ QUIC_SSTREAM *sstream,
+ QUIC_TXFC *stream_txfc,
+ size_t skip,
+ struct chunk_info *chunk,
+ uint64_t consumed)
+{
+ uint64_t fc_credit, fc_swm, fc_limit;
+
+ chunk->num_stream_iovec = OSSL_NELEM(chunk->iov);
+ chunk->valid = ossl_quic_sstream_get_stream_frame(sstream, skip,
+ &chunk->shdr,
+ chunk->iov,
+ &chunk->num_stream_iovec);
+ if (!chunk->valid)
+ return 1;
+
+ if (!ossl_assert(chunk->shdr.len > 0 || chunk->shdr.is_fin))
+ /* Should only have 0-length chunk if FIN */
+ return 0;
+
+ chunk->orig_len = chunk->shdr.len;
+
+ /* Clamp according to connection and stream-level TXFC. */
+ fc_credit = ossl_quic_txfc_get_credit(stream_txfc, consumed);
+ fc_swm = ossl_quic_txfc_get_swm(stream_txfc);
+ fc_limit = fc_swm + fc_credit;
+
+ if (chunk->shdr.len > 0 && chunk->shdr.offset + chunk->shdr.len > fc_limit) {
+ chunk->shdr.len = (fc_limit <= chunk->shdr.offset)
+ ? 0 : fc_limit - chunk->shdr.offset;
+ chunk->shdr.is_fin = 0;
+ }
+
+ if (chunk->shdr.len == 0 && !chunk->shdr.is_fin) {
+ /*
+ * Nothing to do due to TXFC. Since SSTREAM returns chunks in ascending
+ * order of offset we don't need to check any later chunks, so stop
+ * iterating here.
+ */
+ chunk->valid = 0;
+ return 1;
+ }
+
+ return 1;
+}
+
+/*
+ * Returns 0 on fatal error (e.g. allocation failure), 1 on success.
+ * *packet_full is set to 1 if there is no longer enough room for another STREAM
+ * frame.
+ */
+static int txp_generate_stream_frames(OSSL_QUIC_TX_PACKETISER *txp,
+ struct txp_pkt *pkt,
+ uint64_t id,
+ QUIC_SSTREAM *sstream,
+ QUIC_TXFC *stream_txfc,
+ QUIC_STREAM *next_stream,
+ int *have_ack_eliciting,
+ int *packet_full,
+ uint64_t *new_credit_consumed,
+ uint64_t conn_consumed)
+{
+ int rc = 0;
+ struct chunk_info chunks[2] = {0};
+ const uint32_t enc_level = pkt->h.enc_level;
+ QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
+ struct tx_helper *h = &pkt->h;
+ OSSL_QUIC_FRAME_STREAM *shdr;
+ WPACKET *wpkt;
+ QUIC_TXPIM_CHUNK chunk;
+ size_t i, j, space_left;
+ int can_fill_payload, use_explicit_len;
+ int could_have_following_chunk;
+ uint64_t orig_len;
+ uint64_t hdr_len_implicit, payload_len_implicit;
+ uint64_t hdr_len_explicit, payload_len_explicit;
+ uint64_t fc_swm, fc_new_hwm;
+
+ fc_swm = ossl_quic_txfc_get_swm(stream_txfc);
+ fc_new_hwm = fc_swm;
+
+ /*
+ * Load the first two chunks if any offered by the send stream. We retrieve
+ * the next chunk in advance so we can determine if we need to send any more
+ * chunks from the same stream after this one, which is needed when
+ * determining when we can use an implicit length in a STREAM frame.
+ */
+ for (i = 0; i < 2; ++i) {
+ if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i, &chunks[i],
+ conn_consumed))
+ goto err;
+
+ if (i == 0 && !chunks[i].valid) {
+ /* No chunks, nothing to do. */
+ rc = 1;
+ goto err;
+ }
+ chunks[i].shdr.stream_id = id;
+ }
+
+ for (i = 0;; ++i) {
+ space_left = tx_helper_get_space_left(h);
+
+ if (!chunks[i % 2].valid) {
+ /* Out of chunks; we're done. */
+ rc = 1;
+ goto err;
+ }
+
+ if (space_left < MIN_FRAME_SIZE_STREAM) {
+ *packet_full = 1;
+ rc = 1;
+ goto err;
+ }
+
+ if (!ossl_assert(!h->done_implicit))
+ /*
+ * Logic below should have ensured we didn't append an
+ * implicit-length unless we filled the packet or didn't have
+ * another stream to handle, so this should not be possible.
+ */
+ goto err;
+
+ shdr = &chunks[i % 2].shdr;
+ orig_len = chunks[i % 2].orig_len;
+ if (i > 0)
+ /* Load next chunk for lookahead. */
+ if (!txp_plan_stream_chunk(txp, h, sstream, stream_txfc, i + 1,
+ &chunks[(i + 1) % 2], conn_consumed))
+ goto err;
+
+ /*
+ * Find best fit (header length, payload length) combination for if we
+ * use an implicit length.
+ */
+ shdr->has_explicit_len = 0;
+ hdr_len_implicit = payload_len_implicit = 0;
+ if (!determine_stream_len(h, shdr, space_left,
+ &hdr_len_implicit, &payload_len_implicit)) {
+ *packet_full = 1;
+ rc = 1;
+ goto err; /* can't fit anything */
+ }
+
+ /*
+ * If there is a next stream, we don't use the implicit length so we can
+ * add more STREAM frames after this one, unless there is enough data
+ * for this STREAM frame to fill the packet.
+ */
+ can_fill_payload = (hdr_len_implicit + payload_len_implicit
+ >= space_left);
+
+ /*
+ * Is there is a stream after this one, or another chunk pending
+ * transmission in this stream?
+ */
+ could_have_following_chunk
+ = (next_stream != NULL || chunks[(i + 1) % 2].valid);
+
+ /* Choose between explicit or implicit length representations. */
+ use_explicit_len = !((can_fill_payload || !could_have_following_chunk)
+ && !pkt->force_pad);
+
+ if (use_explicit_len) {
+ /*
+ * Find best fit (header length, payload length) combination for if
+ * we use an explicit length.
+ */
+ shdr->has_explicit_len = 1;
+ hdr_len_explicit = payload_len_explicit = 0;
+ if (!determine_stream_len(h, shdr, space_left,
+ &hdr_len_explicit, &payload_len_explicit)) {
+ *packet_full = 1;
+ rc = 1;
+ goto err; /* can't fit anything */
+ }
+
+ shdr->len = payload_len_explicit;
+ } else {
+ *packet_full = 1;
+ shdr->has_explicit_len = 0;
+ shdr->len = payload_len_implicit;
+ }
+
+ /* If this is a FIN, don't keep filling the packet with more FINs. */
+ if (shdr->is_fin)
+ chunks[(i + 1) % 2].valid = 0;
+
+ /*
+ * We are now committed to our length (shdr->len can't change).
+ * If we truncated the chunk, clear the FIN bit.
+ */
+ if (shdr->len < orig_len)
+ shdr->is_fin = 0;
+
+ /* Truncate IOVs to match our chosen length. */
+ ossl_quic_sstream_adjust_iov((size_t)shdr->len, chunks[i % 2].iov,
+ chunks[i % 2].num_stream_iovec);
+
+ /*
+ * Ensure we have enough iovecs allocated (1 for the header, up to 2 for
+ * the stream data.)
+ */
+ if (!txp_el_ensure_iovec(&txp->el[enc_level], h->num_iovec + 3))
+ goto err; /* alloc error */
+
+ /* Encode the header. */
+ wpkt = tx_helper_begin(h);
+ if (wpkt == NULL)
+ goto err; /* alloc error */
+
+ if (!ossl_assert(ossl_quic_wire_encode_frame_stream_hdr(wpkt, shdr))) {
+ /* (Should not be possible.) */
+ tx_helper_rollback(h);
+ *packet_full = 1;
+ rc = 1;
+ goto err; /* can't fit */
+ }
+
+ if (!tx_helper_commit(h))
+ goto err; /* alloc error */
+
+ /* Add payload iovecs to the helper (infallible). */
+ for (j = 0; j < chunks[i % 2].num_stream_iovec; ++j)
+ tx_helper_append_iovec(h, chunks[i % 2].iov[j].buf,
+ chunks[i % 2].iov[j].buf_len);
+
+ *have_ack_eliciting = 1;
+ tx_helper_unrestrict(h); /* no longer need PING */
+ if (!shdr->has_explicit_len)
+ h->done_implicit = 1;
+
+ /* Log new TXFC credit which was consumed. */
+ if (shdr->len > 0 && shdr->offset + shdr->len > fc_new_hwm)
+ fc_new_hwm = shdr->offset + shdr->len;
+
+ /* Log chunk to TXPIM. */
+ chunk.stream_id = shdr->stream_id;
+ chunk.start = shdr->offset;
+ chunk.end = shdr->offset + shdr->len - 1;
+ chunk.has_fin = shdr->is_fin;
+ chunk.has_stop_sending = 0;
+ chunk.has_reset_stream = 0;
+ if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
+ goto err; /* alloc error */
+
+ if (shdr->len < orig_len) {
+ /*
+ * If we did not serialize all of this chunk we definitely do not
+ * want to try the next chunk
+ */
+ rc = 1;
+ goto err;
+ }
+ }
+
+err:
+ *new_credit_consumed = fc_new_hwm - fc_swm;
+ return rc;
+}
+
+static void txp_enlink_tmp(QUIC_STREAM **tmp_head, QUIC_STREAM *stream)
+{
+ stream->txp_next = *tmp_head;
+ *tmp_head = stream;
+}
+
+static int txp_generate_stream_related(OSSL_QUIC_TX_PACKETISER *txp,
+ struct txp_pkt *pkt,
+ int *have_ack_eliciting,
+ QUIC_STREAM **tmp_head)
+{
+ QUIC_STREAM_ITER it;
+ WPACKET *wpkt;
+ uint64_t cwm;
+ QUIC_STREAM *stream, *snext;
+ struct tx_helper *h = &pkt->h;
+ uint64_t conn_consumed = 0;
+
+ for (ossl_quic_stream_iter_init(&it, txp->args.qsm, 1);
+ it.stream != NULL;) {
+
+ stream = it.stream;
+ ossl_quic_stream_iter_next(&it);
+ snext = it.stream;
+
+ stream->txp_sent_fc = 0;
+ stream->txp_sent_stop_sending = 0;
+ stream->txp_sent_reset_stream = 0;
+ stream->txp_blocked = 0;
+ stream->txp_txfc_new_credit_consumed = 0;
+
+ /* Stream Abort Frames (STOP_SENDING, RESET_STREAM) */
+ if (stream->want_stop_sending) {
+ OSSL_QUIC_FRAME_STOP_SENDING f;
+
+ wpkt = tx_helper_begin(h);
+ if (wpkt == NULL)
+ return 0; /* alloc error */
+
+ f.stream_id = stream->id;
+ f.app_error_code = stream->stop_sending_aec;
+ if (!ossl_quic_wire_encode_frame_stop_sending(wpkt, &f)) {
+ tx_helper_rollback(h); /* can't fit */
+ txp_enlink_tmp(tmp_head, stream);
+ break;
+ }
+
+ if (!tx_helper_commit(h))
+ return 0; /* alloc error */
+
+ *have_ack_eliciting = 1;
+ tx_helper_unrestrict(h); /* no longer need PING */
+ stream->txp_sent_stop_sending = 1;
+ }
+
+ if (stream->want_reset_stream) {
+ OSSL_QUIC_FRAME_RESET_STREAM f;
+
+ if (!ossl_assert(stream->send_state == QUIC_SSTREAM_STATE_RESET_SENT))
+ return 0;
+
+ wpkt = tx_helper_begin(h);
+ if (wpkt == NULL)
+ return 0; /* alloc error */
+
+ f.stream_id = stream->id;
+ f.app_error_code = stream->reset_stream_aec;
+ if (!ossl_quic_stream_send_get_final_size(stream, &f.final_size))
+ return 0; /* should not be possible */
+
+ if (!ossl_quic_wire_encode_frame_reset_stream(wpkt, &f)) {
+ tx_helper_rollback(h); /* can't fit */
+ txp_enlink_tmp(tmp_head, stream);
+ break;
+ }
+
+ if (!tx_helper_commit(h))
+ return 0; /* alloc error */
+
+ *have_ack_eliciting = 1;
+ tx_helper_unrestrict(h); /* no longer need PING */
+ stream->txp_sent_reset_stream = 1;
+
+ /*
+ * The final size of the stream as indicated by RESET_STREAM is used
+ * to ensure a consistent view of flow control state by both
+ * parties; if we happen to send a RESET_STREAM that consumes more
+ * flow control credit, make sure we account for that.
+ */
+ if (!ossl_assert(f.final_size <= ossl_quic_txfc_get_swm(&stream->txfc)))
+ return 0;
+
+ stream->txp_txfc_new_credit_consumed
+ = f.final_size - ossl_quic_txfc_get_swm(&stream->txfc);
+ }
+
+ /*
+ * Stream Flow Control Frames (MAX_STREAM_DATA)
+ *
+ * RFC 9000 s. 13.3: "An endpoint SHOULD stop sending MAX_STREAM_DATA
+ * frames when the receiving part of the stream enters a "Size Known" or
+ * "Reset Recvd" state." -- In practice, RECV is the only state
+ * in which it makes sense to generate more MAX_STREAM_DATA frames.
+ */
+ if (stream->recv_state == QUIC_RSTREAM_STATE_RECV
+ && (stream->want_max_stream_data
+ || ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 0))) {
+
+ wpkt = tx_helper_begin(h);
+ if (wpkt == NULL)
+ return 0; /* alloc error */
+
+ cwm = ossl_quic_rxfc_get_cwm(&stream->rxfc);
+
+ if (!ossl_quic_wire_encode_frame_max_stream_data(wpkt, stream->id,
+ cwm)) {
+ tx_helper_rollback(h); /* can't fit */
+ txp_enlink_tmp(tmp_head, stream);
+ break;
+ }
+
+ if (!tx_helper_commit(h))
+ return 0; /* alloc error */
+
+ *have_ack_eliciting = 1;
+ tx_helper_unrestrict(h); /* no longer need PING */
+ stream->txp_sent_fc = 1;
+ }
+
+ /*
+ * Stream Data Frames (STREAM)
+ *
+ * RFC 9000 s. 3.3: A sender MUST NOT send a STREAM [...] frame for a
+ * stream in the "Reset Sent" state [or any terminal state]. We don't
+ * send any more STREAM frames if we are sending, have sent, or are
+ * planning to send, RESET_STREAM. The other terminal state is Data
+ * Recvd, but txp_generate_stream_frames() is guaranteed to generate
+ * nothing in this case.
+ */
+ if (ossl_quic_stream_has_send_buffer(stream)
+ && !ossl_quic_stream_send_is_reset(stream)) {
+ int packet_full = 0;
+
+ if (!ossl_assert(!stream->want_reset_stream))
+ return 0;
+
+ if (!txp_generate_stream_frames(txp, pkt,
+ stream->id, stream->sstream,
+ &stream->txfc,
+ snext,
+ have_ack_eliciting,
+ &packet_full,
+ &stream->txp_txfc_new_credit_consumed,
+ conn_consumed)) {
+ /* Fatal error (allocation, etc.) */
+ txp_enlink_tmp(tmp_head, stream);
+ return 0;
+ }
+ conn_consumed += stream->txp_txfc_new_credit_consumed;
+
+ if (packet_full) {
+ txp_enlink_tmp(tmp_head, stream);
+ break;
+ }
+ }
+
+ txp_enlink_tmp(tmp_head, stream);
+ }
+
+ return 1;
+}
+
+static int txp_generate_for_el(OSSL_QUIC_TX_PACKETISER *txp,
+ struct txp_pkt *pkt,
+ int chosen_for_conn_close)
+{
+ int rc = TXP_ERR_SUCCESS;
+ const uint32_t enc_level = pkt->h.enc_level;
+ const uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+ int have_ack_eliciting = 0, done_pre_token = 0;
+ const struct archetype_data a = pkt->geom.adata;
+ /*
+ * Cleared if we encode any non-ACK-eliciting frame type which rules out the
+ * packet being a non-inflight frame. This means any non-ACK ACK-eliciting
+ * frame, even PADDING frames. ACK eliciting frames always cause a packet to
+ * become ineligible for non-inflight treatment so it is not necessary to
+ * clear this in cases where have_ack_eliciting is set, as it is ignored in
+ * that case.
+ */
+ int can_be_non_inflight = 1;
+ QUIC_CFQ_ITEM *cfq_item;
+ QUIC_TXPIM_PKT *tpkt = NULL;
+ struct tx_helper *h = &pkt->h;
+
+ /* Maximum PN reached? */
+ if (!ossl_quic_pn_valid(txp->next_pn[pn_space]))
+ goto fatal_err;
+
+ if (!ossl_assert(pkt->tpkt == NULL))
+ goto fatal_err;
+
+ if ((pkt->tpkt = tpkt = ossl_quic_txpim_pkt_alloc(txp->args.txpim)) == NULL)
+ goto fatal_err;
+
+ /*
+ * Frame Serialization
+ * ===================
+ *
+ * We now serialize frames into the packet in descending order of priority.
+ */
+
+ /* HANDSHAKE_DONE (Regenerate) */
+ if (a.allow_handshake_done && txp->want_handshake_done
+ && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_HANDSHAKE_DONE) {
+ WPACKET *wpkt = tx_helper_begin(h);
+
+ if (wpkt == NULL)
+ goto fatal_err;
+
+ if (ossl_quic_wire_encode_frame_handshake_done(wpkt)) {
+ tpkt->had_handshake_done_frame = 1;
+ have_ack_eliciting = 1;
+
+ if (!tx_helper_commit(h))
+ goto fatal_err;
+
+ tx_helper_unrestrict(h); /* no longer need PING */
+ } else {
+ tx_helper_rollback(h);
+ }
+ }
+
+ /* MAX_DATA (Regenerate) */
+ if (a.allow_conn_fc
+ && (txp->want_max_data
+ || ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 0))
+ && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_DATA) {
+ WPACKET *wpkt = tx_helper_begin(h);
+ uint64_t cwm = ossl_quic_rxfc_get_cwm(txp->args.conn_rxfc);
+
+ if (wpkt == NULL)
+ goto fatal_err;
+
+ if (ossl_quic_wire_encode_frame_max_data(wpkt, cwm)) {
+ tpkt->had_max_data_frame = 1;
+ have_ack_eliciting = 1;
+
+ if (!tx_helper_commit(h))
+ goto fatal_err;
+
+ tx_helper_unrestrict(h); /* no longer need PING */
+ } else {
+ tx_helper_rollback(h);
+ }
+ }
+
+ /* MAX_STREAMS_BIDI (Regenerate) */
+ if (a.allow_conn_fc
+ && (txp->want_max_streams_bidi
+ || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc, 0))
+ && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_STREAMS_BIDI) {
+ WPACKET *wpkt = tx_helper_begin(h);
+ uint64_t max_streams
+ = ossl_quic_rxfc_get_cwm(txp->args.max_streams_bidi_rxfc);
+
+ if (wpkt == NULL)
+ goto fatal_err;
+
+ if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/0,
+ max_streams)) {
+ tpkt->had_max_streams_bidi_frame = 1;
+ have_ack_eliciting = 1;
+
+ if (!tx_helper_commit(h))
+ goto fatal_err;
+
+ tx_helper_unrestrict(h); /* no longer need PING */
+ } else {
+ tx_helper_rollback(h);
+ }
+ }
+
+ /* MAX_STREAMS_UNI (Regenerate) */
+ if (a.allow_conn_fc
+ && (txp->want_max_streams_uni
+ || ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc, 0))
+ && tx_helper_get_space_left(h) >= MIN_FRAME_SIZE_MAX_STREAMS_UNI) {
+ WPACKET *wpkt = tx_helper_begin(h);
+ uint64_t max_streams
+ = ossl_quic_rxfc_get_cwm(txp->args.max_streams_uni_rxfc);
+
+ if (wpkt == NULL)
+ goto fatal_err;
+
+ if (ossl_quic_wire_encode_frame_max_streams(wpkt, /*is_uni=*/1,
+ max_streams)) {
+ tpkt->had_max_streams_uni_frame = 1;
+ have_ack_eliciting = 1;
+
+ if (!tx_helper_commit(h))
+ goto fatal_err;
+
+ tx_helper_unrestrict(h); /* no longer need PING */
+ } else {
+ tx_helper_rollback(h);
+ }
+ }
+
+ /* GCR Frames */
+ for (cfq_item = ossl_quic_cfq_get_priority_head(txp->args.cfq, pn_space);
+ cfq_item != NULL;
+ cfq_item = ossl_quic_cfq_item_get_priority_next(cfq_item, pn_space)) {
+ uint64_t frame_type = ossl_quic_cfq_item_get_frame_type(cfq_item);
+ const unsigned char *encoded = ossl_quic_cfq_item_get_encoded(cfq_item);
+ size_t encoded_len = ossl_quic_cfq_item_get_encoded_len(cfq_item);
+
+ switch (frame_type) {
+ case OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID:
+ if (!a.allow_new_conn_id)
+ continue;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID:
+ if (!a.allow_retire_conn_id)
+ continue;
+ break;
+ case OSSL_QUIC_FRAME_TYPE_NEW_TOKEN:
+ if (!a.allow_new_token)
+ continue;
+
+ /*
+ * NEW_TOKEN frames are handled via GCR, but some
+ * Regenerate-strategy frames should come before them (namely
+ * ACK, CONNECTION_CLOSE, PATH_CHALLENGE and PATH_RESPONSE). If
+ * we find a NEW_TOKEN frame, do these now. If there are no
+ * NEW_TOKEN frames in the GCR queue we will handle these below.
+ */
+ if (!done_pre_token)
+ if (txp_generate_pre_token(txp, pkt,
+ chosen_for_conn_close,
+ &can_be_non_inflight))
+ done_pre_token = 1;
+
+ break;
+ case OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE:
+ if (!a.allow_path_response)
+ continue;
+
+ /*
+ * RFC 9000 s. 8.2.2: An endpoint MUST expand datagrams that
+ * contain a PATH_RESPONSE frame to at least the smallest
+ * allowed maximum datagram size of 1200 bytes.
+ */
+ pkt->force_pad = 1;
+ break;
+ default:
+ if (!a.allow_cfq_other)
+ continue;
+ break;
+ }
+
+ /*
+ * If the frame is too big, don't try to schedule any more GCR frames in
+ * this packet rather than sending subsequent ones out of order.
+ */
+ if (encoded_len > tx_helper_get_space_left(h))
+ break;
+
+ if (!tx_helper_append_iovec(h, encoded, encoded_len))
+ goto fatal_err;
+
+ ossl_quic_txpim_pkt_add_cfq_item(tpkt, cfq_item);
+
+ if (ossl_quic_frame_type_is_ack_eliciting(frame_type)) {
+ have_ack_eliciting = 1;
+ tx_helper_unrestrict(h); /* no longer need PING */
+ }
+ }
+
+ /*
+ * If we didn't generate ACK, CONNECTION_CLOSE, PATH_CHALLENGE or
+ * PATH_RESPONSE (as desired) before, do so now.
+ */
+ if (!done_pre_token)
+ if (txp_generate_pre_token(txp, pkt,
+ chosen_for_conn_close,
+ &can_be_non_inflight))
+ done_pre_token = 1;
+
+ /* CRYPTO Frames */
+ if (a.allow_crypto)
+ if (!txp_generate_crypto_frames(txp, pkt, &have_ack_eliciting))
+ goto fatal_err;
+
+ /* Stream-specific frames */
+ if (a.allow_stream_rel && txp->handshake_complete)
+ if (!txp_generate_stream_related(txp, pkt,
+ &have_ack_eliciting,
+ &pkt->stream_head))
+ goto fatal_err;
+
+ /* PING */
+ tx_helper_unrestrict(h);
+
+ if (!have_ack_eliciting && txp_need_ping(txp, pn_space, &a)) {
+ WPACKET *wpkt;
+
+ assert(h->reserve > 0);
+ wpkt = tx_helper_begin(h);
+ if (wpkt == NULL)
+ goto fatal_err;
+
+ if (!ossl_quic_wire_encode_frame_ping(wpkt)
+ || !tx_helper_commit(h))
+ /*
+ * We treat a request to be ACK-eliciting as a requirement, so this
+ * is an error.
+ */
+ goto fatal_err;
+
+ have_ack_eliciting = 1;
+ }
+
+ /* PADDING is added by ossl_quic_tx_packetiser_generate(). */
+
+ /*
+ * ACKM Data
+ * =========
+ */
+ if (have_ack_eliciting)
+ can_be_non_inflight = 0;
+
+ /* ACKM Data */
+ tpkt->ackm_pkt.num_bytes = h->bytes_appended + pkt->geom.pkt_overhead;
+ tpkt->ackm_pkt.pkt_num = txp->next_pn[pn_space];
+ /* largest_acked is set in txp_generate_pre_token */
+ tpkt->ackm_pkt.pkt_space = pn_space;
+ tpkt->ackm_pkt.is_inflight = !can_be_non_inflight;
+ tpkt->ackm_pkt.is_ack_eliciting = have_ack_eliciting;
+ tpkt->ackm_pkt.is_pto_probe = 0;
+ tpkt->ackm_pkt.is_mtu_probe = 0;
+ tpkt->ackm_pkt.time = txp->args.now(txp->args.now_arg);
+ tpkt->pkt_type = pkt->phdr.type;
+
+ /* Done. */
+ return rc;
+
+fatal_err:
+ /*
+ * Handler for fatal errors, i.e. errors causing us to abort the entire
+ * packet rather than just one frame. Examples of such errors include
+ * allocation errors.
+ */
+ if (tpkt != NULL) {
+ ossl_quic_txpim_pkt_release(txp->args.txpim, tpkt);
+ pkt->tpkt = NULL;
+ }
+ return TXP_ERR_INTERNAL;
+}
+
+/*
+ * Commits and queues a packet for transmission. There is no backing out after
+ * this.
+ *
+ * This:
+ *
+ * - Sends the packet to the QTX for encryption and transmission;
+ *
+ * - Records the packet as having been transmitted in FIFM. ACKM is informed,
+ * etc. and the TXPIM record is filed.
+ *
+ * - Informs various subsystems of frames that were sent and clears frame
+ * wanted flags so that we do not generate the same frames again.
+ *
+ * Assumptions:
+ *
+ * - pkt is a txp_pkt for the correct EL;
+ *
+ * - pkt->tpkt is valid;
+ *
+ * - pkt->tpkt->ackm_pkt has been fully filled in;
+ *
+ * - Stream chunk records have been appended to pkt->tpkt for STREAM and
+ * CRYPTO frames, but not for RESET_STREAM or STOP_SENDING frames;
+ *
+ * - The chosen stream list for the packet can be fully walked from
+ * pkt->stream_head using stream->txp_next;
+ *
+ * - pkt->has_ack_eliciting is set correctly.
+ *
+ */
+static int txp_pkt_commit(OSSL_QUIC_TX_PACKETISER *txp,
+ struct txp_pkt *pkt,
+ uint32_t archetype,
+ int *txpim_pkt_reffed)
+{
+ int rc = 1;
+ uint32_t enc_level = pkt->h.enc_level;
+ uint32_t pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+ QUIC_TXPIM_PKT *tpkt = pkt->tpkt;
+ QUIC_STREAM *stream;
+ OSSL_QTX_PKT txpkt;
+ struct archetype_data a;
+
+ *txpim_pkt_reffed = 0;
+
+ /* Cannot send a packet with an empty payload. */
+ if (pkt->h.bytes_appended == 0)
+ return 0;
+
+ if (!txp_get_archetype_data(enc_level, archetype, &a))
+ return 0;
+
+ /* Packet Information for QTX */
+ txpkt.hdr = &pkt->phdr;
+ txpkt.iovec = txp->el[enc_level].iovec;
+ txpkt.num_iovec = pkt->h.num_iovec;
+ txpkt.local = NULL;
+ txpkt.peer = BIO_ADDR_family(&txp->args.peer) == AF_UNSPEC
+ ? NULL : &txp->args.peer;
+ txpkt.pn = txp->next_pn[pn_space];
+ txpkt.flags = OSSL_QTX_PKT_FLAG_COALESCE; /* always try to coalesce */
+
+ /* Generate TXPIM chunks representing STOP_SENDING and RESET_STREAM frames. */
+ for (stream = pkt->stream_head; stream != NULL; stream = stream->txp_next)
+ if (stream->txp_sent_stop_sending || stream->txp_sent_reset_stream) {
+ /* Log STOP_SENDING/RESET_STREAM chunk to TXPIM. */
+ QUIC_TXPIM_CHUNK chunk;
+
+ chunk.stream_id = stream->id;
+ chunk.start = UINT64_MAX;
+ chunk.end = 0;
+ chunk.has_fin = 0;
+ chunk.has_stop_sending = stream->txp_sent_stop_sending;
+ chunk.has_reset_stream = stream->txp_sent_reset_stream;
+ if (!ossl_quic_txpim_pkt_append_chunk(tpkt, &chunk))
+ return 0; /* alloc error */
+ }
+
+ /* Dispatch to FIFD. */
+ if (!ossl_quic_fifd_pkt_commit(&txp->fifd, tpkt))
+ return 0;
+
+ /*
+ * Transmission and Post-Packet Generation Bookkeeping
+ * ===================================================
+ *
+ * No backing out anymore - at this point the ACKM has recorded the packet
+ * as having been sent, so we need to increment our next PN counter, or
+ * the ACKM will complain when we try to record a duplicate packet with
+ * the same PN later. At this point actually sending the packet may still
+ * fail. In this unlikely event it will simply be handled as though it
+ * were a lost packet.
+ */
+ ++txp->next_pn[pn_space];
+ *txpim_pkt_reffed = 1;
+
+ /* Send the packet. */
+ if (!ossl_qtx_write_pkt(txp->args.qtx, &txpkt))
+ return 0;
+
+ /*
+ * Record FC and stream abort frames as sent; deactivate streams which no
+ * longer have anything to do.
+ */
+ for (stream = pkt->stream_head; stream != NULL; stream = stream->txp_next) {
+ if (stream->txp_sent_fc) {
+ stream->want_max_stream_data = 0;
+ ossl_quic_rxfc_has_cwm_changed(&stream->rxfc, 1);
+ }
+
+ if (stream->txp_sent_stop_sending)
+ stream->want_stop_sending = 0;
+
+ if (stream->txp_sent_reset_stream)
+ stream->want_reset_stream = 0;
+
+ if (stream->txp_txfc_new_credit_consumed > 0) {
+ if (!ossl_assert(ossl_quic_txfc_consume_credit(&stream->txfc,
+ stream->txp_txfc_new_credit_consumed)))
+ /*
+ * Should not be possible, but we should continue with our
+ * bookkeeping as we have already committed the packet to the
+ * FIFD. Just change the value we return.
+ */
+ rc = 0;
+
+ stream->txp_txfc_new_credit_consumed = 0;
+ }
+
+ /*
+ * If we no longer need to generate any flow control (MAX_STREAM_DATA),
+ * STOP_SENDING or RESET_STREAM frames, nor any STREAM frames (because
+ * the stream is drained of data or TXFC-blocked), we can mark the
+ * stream as inactive.
+ */
+ ossl_quic_stream_map_update_state(txp->args.qsm, stream);
+
+ if (ossl_quic_stream_has_send_buffer(stream)
+ && !ossl_quic_sstream_has_pending(stream->sstream)
+ && ossl_quic_sstream_get_final_size(stream->sstream, NULL))
+ /*
+ * Transition to DATA_SENT if stream has a final size and we have
+ * sent all data.
+ */
+ ossl_quic_stream_map_notify_all_data_sent(txp->args.qsm, stream);
+ }
+
+ /* We have now sent the packet, so update state accordingly. */
+ if (tpkt->ackm_pkt.is_ack_eliciting)
+ txp->force_ack_eliciting &= ~(1UL << pn_space);
+
+ if (tpkt->had_handshake_done_frame)
+ txp->want_handshake_done = 0;
+
+ if (tpkt->had_max_data_frame) {
+ txp->want_max_data = 0;
+ ossl_quic_rxfc_has_cwm_changed(txp->args.conn_rxfc, 1);
+ }
+
+ if (tpkt->had_max_streams_bidi_frame) {
+ txp->want_max_streams_bidi = 0;
+ ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_bidi_rxfc, 1);
+ }
+
+ if (tpkt->had_max_streams_uni_frame) {
+ txp->want_max_streams_uni = 0;
+ ossl_quic_rxfc_has_cwm_changed(txp->args.max_streams_uni_rxfc, 1);
+ }
+
+ if (tpkt->had_ack_frame)
+ txp->want_ack &= ~(1UL << pn_space);
+
+ if (tpkt->had_conn_close)
+ txp->want_conn_close = 0;
+
+ /*
+ * Decrement probe request counts if we have sent a packet that meets
+ * the requirement of a probe, namely being ACK-eliciting.
+ */
+ if (tpkt->ackm_pkt.is_ack_eliciting) {
+ OSSL_ACKM_PROBE_INFO *probe_info
+ = ossl_ackm_get0_probe_request(txp->args.ackm);
+
+ if (enc_level == QUIC_ENC_LEVEL_INITIAL
+ && probe_info->anti_deadlock_initial > 0)
+ --probe_info->anti_deadlock_initial;
+
+ if (enc_level == QUIC_ENC_LEVEL_HANDSHAKE
+ && probe_info->anti_deadlock_handshake > 0)
+ --probe_info->anti_deadlock_handshake;
+
+ if (a.allow_force_ack_eliciting /* (i.e., not for 0-RTT) */
+ && probe_info->pto[pn_space] > 0)
+ --probe_info->pto[pn_space];
+ }
+
+ return rc;
+}
+
+/* Ensure the iovec array is at least num elements long. */
+static int txp_el_ensure_iovec(struct txp_el *el, size_t num)
+{
+ OSSL_QTX_IOVEC *iovec;
+
+ if (el->alloc_iovec >= num)
+ return 1;
+
+ num = el->alloc_iovec != 0 ? el->alloc_iovec * 2 : 8;
+
+ iovec = OPENSSL_realloc(el->iovec, sizeof(OSSL_QTX_IOVEC) * num);
+ if (iovec == NULL)
+ return 0;
+
+ el->iovec = iovec;
+ el->alloc_iovec = num;
+ return 1;
+}
+
+int ossl_quic_tx_packetiser_schedule_conn_close(OSSL_QUIC_TX_PACKETISER *txp,
+ const OSSL_QUIC_FRAME_CONN_CLOSE *f)
+{
+ char *reason = NULL;
+ size_t reason_len = f->reason_len;
+ size_t max_reason_len = txp_get_mdpl(txp) / 2;
+
+ if (txp->want_conn_close)
+ return 0;
+
+ /*
+ * Arbitrarily limit the length of the reason length string to half of the
+ * MDPL.
+ */
+ if (reason_len > max_reason_len)
+ reason_len = max_reason_len;
+
+ if (reason_len > 0) {
+ reason = OPENSSL_memdup(f->reason, reason_len);
+ if (reason == NULL)
+ return 0;
+ }
+
+ txp->conn_close_frame = *f;
+ txp->conn_close_frame.reason = reason;
+ txp->conn_close_frame.reason_len = reason_len;
+ txp->want_conn_close = 1;
+ return 1;
+}
+
+void ossl_quic_tx_packetiser_set_msg_callback(OSSL_QUIC_TX_PACKETISER *txp,
+ ossl_msg_cb msg_callback,
+ SSL *msg_callback_ssl)
+{
+ txp->msg_callback = msg_callback;
+ txp->msg_callback_ssl = msg_callback_ssl;
+}
+
+void ossl_quic_tx_packetiser_set_msg_callback_arg(OSSL_QUIC_TX_PACKETISER *txp,
+ void *msg_callback_arg)
+{
+ txp->msg_callback_arg = msg_callback_arg;
+}
+
+QUIC_PN ossl_quic_tx_packetiser_get_next_pn(OSSL_QUIC_TX_PACKETISER *txp,
+ uint32_t pn_space)
+{
+ if (pn_space >= QUIC_PN_SPACE_NUM)
+ return UINT64_MAX;
+
+ return txp->next_pn[pn_space];
+}
+
+OSSL_TIME ossl_quic_tx_packetiser_get_deadline(OSSL_QUIC_TX_PACKETISER *txp)
+{
+ /*
+ * TXP-specific deadline computations which rely on TXP innards. This is in
+ * turn relied on by the QUIC_CHANNEL code to determine the channel event
+ * handling deadline.
+ */
+ OSSL_TIME deadline = ossl_time_infinite();
+ uint32_t enc_level, pn_space;
+
+ /*
+ * ACK generation is not CC-gated - packets containing only ACKs are allowed
+ * to bypass CC. We want to generate ACK frames even if we are currently
+ * restricted by CC so the peer knows we have received data. The generate
+ * call will take care of selecting the correct packet archetype.
+ */
+ for (enc_level = QUIC_ENC_LEVEL_INITIAL;
+ enc_level < QUIC_ENC_LEVEL_NUM;
+ ++enc_level)
+ if (ossl_qtx_is_enc_level_provisioned(txp->args.qtx, enc_level)) {
+ pn_space = ossl_quic_enc_level_to_pn_space(enc_level);
+ deadline = ossl_time_min(deadline,
+ ossl_ackm_get_ack_deadline(txp->args.ackm, pn_space));
+ }
+
+ /* When will CC let us send more? */
+ if (txp->args.cc_method->get_tx_allowance(txp->args.cc_data) == 0)
+ deadline = ossl_time_min(deadline,
+ txp->args.cc_method->get_wakeup_deadline(txp->args.cc_data));
+
+ return deadline;
+}
diff --git a/crypto/openssl/ssl/quic/quic_txpim.c b/crypto/openssl/ssl/quic/quic_txpim.c
new file mode 100644
index 000000000000..04b25ee47a04
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_txpim.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_txpim.h"
+#include <stdlib.h>
+
+typedef struct quic_txpim_pkt_ex_st QUIC_TXPIM_PKT_EX;
+
+struct quic_txpim_pkt_ex_st {
+ QUIC_TXPIM_PKT public;
+ QUIC_TXPIM_PKT_EX *prev, *next;
+ QUIC_TXPIM_CHUNK *chunks;
+ size_t num_chunks, alloc_chunks;
+ unsigned int chunks_need_sort : 1;
+};
+
+typedef struct quic_txpim_pkt_ex_list {
+ QUIC_TXPIM_PKT_EX *head, *tail;
+} QUIC_TXPIM_PKT_EX_LIST;
+
+struct quic_txpim_st {
+ QUIC_TXPIM_PKT_EX_LIST free_list;
+ size_t in_use;
+};
+
+#define MAX_ALLOC_CHUNKS 512
+
+QUIC_TXPIM *ossl_quic_txpim_new(void)
+{
+ QUIC_TXPIM *txpim = OPENSSL_zalloc(sizeof(*txpim));
+
+ if (txpim == NULL)
+ return NULL;
+
+ return txpim;
+}
+
+static void free_list(QUIC_TXPIM_PKT_EX_LIST *l)
+{
+ QUIC_TXPIM_PKT_EX *n, *nnext;
+
+ for (n = l->head; n != NULL; n = nnext) {
+ nnext = n->next;
+
+ OPENSSL_free(n->chunks);
+ OPENSSL_free(n);
+ }
+
+ l->head = l->tail = NULL;
+}
+
+void ossl_quic_txpim_free(QUIC_TXPIM *txpim)
+{
+ if (txpim == NULL)
+ return;
+
+ assert(txpim->in_use == 0);
+ free_list(&txpim->free_list);
+ OPENSSL_free(txpim);
+}
+
+static void list_remove(QUIC_TXPIM_PKT_EX_LIST *l, QUIC_TXPIM_PKT_EX *n)
+{
+ if (l->head == n)
+ l->head = n->next;
+ if (l->tail == n)
+ l->tail = n->prev;
+ if (n->prev != NULL)
+ n->prev->next = n->next;
+ if (n->next != NULL)
+ n->next->prev = n->prev;
+ n->prev = n->next = NULL;
+}
+
+static void list_insert_tail(QUIC_TXPIM_PKT_EX_LIST *l, QUIC_TXPIM_PKT_EX *n)
+{
+ n->prev = l->tail;
+ n->next = NULL;
+ l->tail = n;
+ if (n->prev != NULL)
+ n->prev->next = n;
+ if (l->head == NULL)
+ l->head = n;
+}
+
+static QUIC_TXPIM_PKT_EX *txpim_get_free(QUIC_TXPIM *txpim)
+{
+ QUIC_TXPIM_PKT_EX *ex = txpim->free_list.head;
+
+ if (ex != NULL)
+ return ex;
+
+ ex = OPENSSL_zalloc(sizeof(*ex));
+ if (ex == NULL)
+ return NULL;
+
+ list_insert_tail(&txpim->free_list, ex);
+ return ex;
+}
+
+static void txpim_clear(QUIC_TXPIM_PKT_EX *ex)
+{
+ memset(&ex->public.ackm_pkt, 0, sizeof(ex->public.ackm_pkt));
+ ossl_quic_txpim_pkt_clear_chunks(&ex->public);
+ ex->public.retx_head = NULL;
+ ex->public.fifd = NULL;
+ ex->public.had_handshake_done_frame = 0;
+ ex->public.had_max_data_frame = 0;
+ ex->public.had_max_streams_bidi_frame = 0;
+ ex->public.had_max_streams_uni_frame = 0;
+ ex->public.had_ack_frame = 0;
+ ex->public.had_conn_close = 0;
+}
+
+QUIC_TXPIM_PKT *ossl_quic_txpim_pkt_alloc(QUIC_TXPIM *txpim)
+{
+ QUIC_TXPIM_PKT_EX *ex = txpim_get_free(txpim);
+
+ if (ex == NULL)
+ return NULL;
+
+ txpim_clear(ex);
+ list_remove(&txpim->free_list, ex);
+ ++txpim->in_use;
+ return &ex->public;
+}
+
+void ossl_quic_txpim_pkt_release(QUIC_TXPIM *txpim, QUIC_TXPIM_PKT *fpkt)
+{
+ QUIC_TXPIM_PKT_EX *ex = (QUIC_TXPIM_PKT_EX *)fpkt;
+
+ assert(txpim->in_use > 0);
+ --txpim->in_use;
+ list_insert_tail(&txpim->free_list, ex);
+}
+
+void ossl_quic_txpim_pkt_add_cfq_item(QUIC_TXPIM_PKT *fpkt,
+ QUIC_CFQ_ITEM *item)
+{
+ item->pkt_next = fpkt->retx_head;
+ item->pkt_prev = NULL;
+ fpkt->retx_head = item;
+}
+
+void ossl_quic_txpim_pkt_clear_chunks(QUIC_TXPIM_PKT *fpkt)
+{
+ QUIC_TXPIM_PKT_EX *ex = (QUIC_TXPIM_PKT_EX *)fpkt;
+
+ ex->num_chunks = 0;
+}
+
+int ossl_quic_txpim_pkt_append_chunk(QUIC_TXPIM_PKT *fpkt,
+ const QUIC_TXPIM_CHUNK *chunk)
+{
+ QUIC_TXPIM_PKT_EX *ex = (QUIC_TXPIM_PKT_EX *)fpkt;
+ QUIC_TXPIM_CHUNK *new_chunk;
+ size_t new_alloc_chunks = ex->alloc_chunks;
+
+ if (ex->num_chunks == ex->alloc_chunks) {
+ new_alloc_chunks = (ex->alloc_chunks == 0) ? 4 : ex->alloc_chunks * 8 / 5;
+ if (new_alloc_chunks > MAX_ALLOC_CHUNKS)
+ new_alloc_chunks = MAX_ALLOC_CHUNKS;
+ if (ex->num_chunks == new_alloc_chunks)
+ return 0;
+
+ new_chunk = OPENSSL_realloc(ex->chunks,
+ new_alloc_chunks * sizeof(QUIC_TXPIM_CHUNK));
+ if (new_chunk == NULL)
+ return 0;
+
+ ex->chunks = new_chunk;
+ ex->alloc_chunks = new_alloc_chunks;
+ }
+
+ ex->chunks[ex->num_chunks++] = *chunk;
+ ex->chunks_need_sort = 1;
+ return 1;
+}
+
+static int compare(const void *a, const void *b)
+{
+ const QUIC_TXPIM_CHUNK *ac = a, *bc = b;
+
+ if (ac->stream_id < bc->stream_id)
+ return -1;
+ else if (ac->stream_id > bc->stream_id)
+ return 1;
+
+ if (ac->start < bc->start)
+ return -1;
+ else if (ac->start > bc->start)
+ return 1;
+
+ return 0;
+}
+
+const QUIC_TXPIM_CHUNK *ossl_quic_txpim_pkt_get_chunks(const QUIC_TXPIM_PKT *fpkt)
+{
+ QUIC_TXPIM_PKT_EX *ex = (QUIC_TXPIM_PKT_EX *)fpkt;
+
+ if (ex->chunks_need_sort) {
+ /*
+ * List of chunks will generally be very small so there is no issue
+ * simply sorting here.
+ */
+ qsort(ex->chunks, ex->num_chunks, sizeof(QUIC_TXPIM_CHUNK), compare);
+ ex->chunks_need_sort = 0;
+ }
+
+ return ex->chunks;
+}
+
+size_t ossl_quic_txpim_pkt_get_num_chunks(const QUIC_TXPIM_PKT *fpkt)
+{
+ QUIC_TXPIM_PKT_EX *ex = (QUIC_TXPIM_PKT_EX *)fpkt;
+
+ return ex->num_chunks;
+}
+
+size_t ossl_quic_txpim_get_in_use(const QUIC_TXPIM *txpim)
+{
+ return txpim->in_use;
+}
diff --git a/crypto/openssl/ssl/quic/quic_types.c b/crypto/openssl/ssl/quic/quic_types.c
new file mode 100644
index 000000000000..4ff3ae6580b5
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_types.c
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/quic_types.h"
+#include <openssl/rand.h>
+#include <openssl/err.h>
+
+int ossl_quic_gen_rand_conn_id(OSSL_LIB_CTX *libctx, size_t len,
+ QUIC_CONN_ID *cid)
+{
+ if (len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ cid->id_len = (unsigned char)len;
+
+ if (RAND_bytes_ex(libctx, cid->id, len, len * 8) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_RAND_LIB);
+ cid->id_len = 0;
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/crypto/openssl/ssl/quic/quic_wire.c b/crypto/openssl/ssl/quic/quic_wire.c
new file mode 100644
index 000000000000..a7c7667797d9
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_wire.c
@@ -0,0 +1,1078 @@
+/*
+ * Copyright 2022-2024 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/macros.h>
+#include <openssl/objects.h>
+#include "internal/quic_ssl.h"
+#include "internal/quic_vlint.h"
+#include "internal/quic_wire.h"
+#include "internal/quic_error.h"
+
+OSSL_SAFE_MATH_UNSIGNED(uint64_t, uint64_t)
+
+int ossl_quic_frame_ack_contains_pn(const OSSL_QUIC_FRAME_ACK *ack, QUIC_PN pn)
+{
+ size_t i;
+
+ for (i = 0; i < ack->num_ack_ranges; ++i)
+ if (pn >= ack->ack_ranges[i].start
+ && pn <= ack->ack_ranges[i].end)
+ return 1;
+
+ return 0;
+}
+
+/*
+ * QUIC Wire Format Encoding
+ * =========================
+ */
+
+int ossl_quic_wire_encode_padding(WPACKET *pkt, size_t num_bytes)
+{
+ /*
+ * PADDING is frame type zero, which as a variable-length integer is
+ * represented as a single zero byte. As an optimisation, just use memset.
+ */
+ return WPACKET_memset(pkt, 0, num_bytes);
+}
+
+static int encode_frame_hdr(WPACKET *pkt, uint64_t frame_type)
+{
+ return WPACKET_quic_write_vlint(pkt, frame_type);
+}
+
+int ossl_quic_wire_encode_frame_ping(WPACKET *pkt)
+{
+ return encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_PING);
+}
+
+int ossl_quic_wire_encode_frame_ack(WPACKET *pkt,
+ uint32_t ack_delay_exponent,
+ const OSSL_QUIC_FRAME_ACK *ack)
+{
+ uint64_t frame_type = ack->ecn_present ? OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN
+ : OSSL_QUIC_FRAME_TYPE_ACK_WITHOUT_ECN;
+
+ uint64_t largest_ackd, first_ack_range, ack_delay_enc;
+ uint64_t i, num_ack_ranges = ack->num_ack_ranges;
+ OSSL_TIME delay;
+
+ if (num_ack_ranges == 0)
+ return 0;
+
+ delay = ossl_time_divide(ossl_time_divide(ack->delay_time, OSSL_TIME_US),
+ (uint64_t)1 << ack_delay_exponent);
+ ack_delay_enc = ossl_time2ticks(delay);
+
+ largest_ackd = ack->ack_ranges[0].end;
+ first_ack_range = ack->ack_ranges[0].end - ack->ack_ranges[0].start;
+
+ if (!encode_frame_hdr(pkt, frame_type)
+ || !WPACKET_quic_write_vlint(pkt, largest_ackd)
+ || !WPACKET_quic_write_vlint(pkt, ack_delay_enc)
+ || !WPACKET_quic_write_vlint(pkt, num_ack_ranges - 1)
+ || !WPACKET_quic_write_vlint(pkt, first_ack_range))
+ return 0;
+
+ for (i = 1; i < num_ack_ranges; ++i) {
+ uint64_t gap, range_len;
+
+ gap = ack->ack_ranges[i - 1].start - ack->ack_ranges[i].end - 2;
+ range_len = ack->ack_ranges[i].end - ack->ack_ranges[i].start;
+
+ if (!WPACKET_quic_write_vlint(pkt, gap)
+ || !WPACKET_quic_write_vlint(pkt, range_len))
+ return 0;
+ }
+
+ if (ack->ecn_present)
+ if (!WPACKET_quic_write_vlint(pkt, ack->ect0)
+ || !WPACKET_quic_write_vlint(pkt, ack->ect1)
+ || !WPACKET_quic_write_vlint(pkt, ack->ecnce))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_reset_stream(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_RESET_STREAM *f)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_RESET_STREAM)
+ || !WPACKET_quic_write_vlint(pkt, f->stream_id)
+ || !WPACKET_quic_write_vlint(pkt, f->app_error_code)
+ || !WPACKET_quic_write_vlint(pkt, f->final_size))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_stop_sending(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_STOP_SENDING *f)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_STOP_SENDING)
+ || !WPACKET_quic_write_vlint(pkt, f->stream_id)
+ || !WPACKET_quic_write_vlint(pkt, f->app_error_code))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_crypto_hdr(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_CRYPTO *f)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_CRYPTO)
+ || !WPACKET_quic_write_vlint(pkt, f->offset)
+ || !WPACKET_quic_write_vlint(pkt, f->len))
+ return 0;
+
+ return 1;
+}
+
+size_t ossl_quic_wire_get_encoded_frame_len_crypto_hdr(const OSSL_QUIC_FRAME_CRYPTO *f)
+{
+ size_t a, b, c;
+
+ a = ossl_quic_vlint_encode_len(OSSL_QUIC_FRAME_TYPE_CRYPTO);
+ b = ossl_quic_vlint_encode_len(f->offset);
+ c = ossl_quic_vlint_encode_len(f->len);
+ if (a == 0 || b == 0 || c == 0)
+ return 0;
+
+ return a + b + c;
+}
+
+void *ossl_quic_wire_encode_frame_crypto(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_CRYPTO *f)
+{
+ unsigned char *p = NULL;
+
+ if (!ossl_quic_wire_encode_frame_crypto_hdr(pkt, f)
+ || f->len > SIZE_MAX /* sizeof(uint64_t) > sizeof(size_t)? */
+ || !WPACKET_allocate_bytes(pkt, (size_t)f->len, &p))
+ return NULL;
+
+ if (f->data != NULL)
+ memcpy(p, f->data, (size_t)f->len);
+
+ return p;
+}
+
+int ossl_quic_wire_encode_frame_new_token(WPACKET *pkt,
+ const unsigned char *token,
+ size_t token_len)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_NEW_TOKEN)
+ || !WPACKET_quic_write_vlint(pkt, token_len)
+ || !WPACKET_memcpy(pkt, token, token_len))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_stream_hdr(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_STREAM *f)
+{
+ uint64_t frame_type = OSSL_QUIC_FRAME_TYPE_STREAM;
+
+ if (f->offset != 0)
+ frame_type |= OSSL_QUIC_FRAME_FLAG_STREAM_OFF;
+ if (f->has_explicit_len)
+ frame_type |= OSSL_QUIC_FRAME_FLAG_STREAM_LEN;
+ if (f->is_fin)
+ frame_type |= OSSL_QUIC_FRAME_FLAG_STREAM_FIN;
+
+ if (!encode_frame_hdr(pkt, frame_type)
+ || !WPACKET_quic_write_vlint(pkt, f->stream_id))
+ return 0;
+
+ if (f->offset != 0 && !WPACKET_quic_write_vlint(pkt, f->offset))
+ return 0;
+
+ if (f->has_explicit_len && !WPACKET_quic_write_vlint(pkt, f->len))
+ return 0;
+
+ return 1;
+}
+
+size_t ossl_quic_wire_get_encoded_frame_len_stream_hdr(const OSSL_QUIC_FRAME_STREAM *f)
+{
+ size_t a, b, c, d;
+
+ a = ossl_quic_vlint_encode_len(OSSL_QUIC_FRAME_TYPE_STREAM);
+ b = ossl_quic_vlint_encode_len(f->stream_id);
+ if (a == 0 || b == 0)
+ return 0;
+
+ if (f->offset > 0) {
+ c = ossl_quic_vlint_encode_len(f->offset);
+ if (c == 0)
+ return 0;
+ } else {
+ c = 0;
+ }
+
+ if (f->has_explicit_len) {
+ d = ossl_quic_vlint_encode_len(f->len);
+ if (d == 0)
+ return 0;
+ } else {
+ d = 0;
+ }
+
+ return a + b + c + d;
+}
+
+void *ossl_quic_wire_encode_frame_stream(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_STREAM *f)
+{
+
+ unsigned char *p = NULL;
+
+ if (!ossl_quic_wire_encode_frame_stream_hdr(pkt, f)
+ || f->len > SIZE_MAX /* sizeof(uint64_t) > sizeof(size_t)? */)
+ return NULL;
+
+ if (!WPACKET_allocate_bytes(pkt, (size_t)f->len, &p))
+ return NULL;
+
+ if (f->data != NULL)
+ memcpy(p, f->data, (size_t)f->len);
+
+ return p;
+}
+
+int ossl_quic_wire_encode_frame_max_data(WPACKET *pkt,
+ uint64_t max_data)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_MAX_DATA)
+ || !WPACKET_quic_write_vlint(pkt, max_data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_max_stream_data(WPACKET *pkt,
+ uint64_t stream_id,
+ uint64_t max_data)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA)
+ || !WPACKET_quic_write_vlint(pkt, stream_id)
+ || !WPACKET_quic_write_vlint(pkt, max_data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_max_streams(WPACKET *pkt,
+ char is_uni,
+ uint64_t max_streams)
+{
+ if (!encode_frame_hdr(pkt, is_uni ? OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_UNI
+ : OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI)
+ || !WPACKET_quic_write_vlint(pkt, max_streams))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_data_blocked(WPACKET *pkt,
+ uint64_t max_data)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_DATA_BLOCKED)
+ || !WPACKET_quic_write_vlint(pkt, max_data))
+ return 0;
+
+ return 1;
+}
+
+
+int ossl_quic_wire_encode_frame_stream_data_blocked(WPACKET *pkt,
+ uint64_t stream_id,
+ uint64_t max_stream_data)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED)
+ || !WPACKET_quic_write_vlint(pkt, stream_id)
+ || !WPACKET_quic_write_vlint(pkt, max_stream_data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_streams_blocked(WPACKET *pkt,
+ char is_uni,
+ uint64_t max_streams)
+{
+ if (!encode_frame_hdr(pkt, is_uni ? OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_UNI
+ : OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_BIDI)
+ || !WPACKET_quic_write_vlint(pkt, max_streams))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_new_conn_id(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_NEW_CONN_ID *f)
+{
+ if (f->conn_id.id_len < 1
+ || f->conn_id.id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID)
+ || !WPACKET_quic_write_vlint(pkt, f->seq_num)
+ || !WPACKET_quic_write_vlint(pkt, f->retire_prior_to)
+ || !WPACKET_put_bytes_u8(pkt, f->conn_id.id_len)
+ || !WPACKET_memcpy(pkt, f->conn_id.id, f->conn_id.id_len)
+ || !WPACKET_memcpy(pkt, f->stateless_reset.token,
+ sizeof(f->stateless_reset.token)))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_retire_conn_id(WPACKET *pkt,
+ uint64_t seq_num)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID)
+ || !WPACKET_quic_write_vlint(pkt, seq_num))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_path_challenge(WPACKET *pkt,
+ uint64_t data)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_PATH_CHALLENGE)
+ || !WPACKET_put_bytes_u64(pkt, data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_path_response(WPACKET *pkt,
+ uint64_t data)
+{
+ if (!encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE)
+ || !WPACKET_put_bytes_u64(pkt, data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_conn_close(WPACKET *pkt,
+ const OSSL_QUIC_FRAME_CONN_CLOSE *f)
+{
+ if (!encode_frame_hdr(pkt, f->is_app ? OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_APP
+ : OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_TRANSPORT)
+ || !WPACKET_quic_write_vlint(pkt, f->error_code))
+ return 0;
+
+ /*
+ * RFC 9000 s. 19.19: The application-specific variant of CONNECTION_CLOSE
+ * (type 0x1d) does not include this field.
+ */
+ if (!f->is_app && !WPACKET_quic_write_vlint(pkt, f->frame_type))
+ return 0;
+
+ if (!WPACKET_quic_write_vlint(pkt, f->reason_len)
+ || !WPACKET_memcpy(pkt, f->reason, f->reason_len))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_frame_handshake_done(WPACKET *pkt)
+{
+ return encode_frame_hdr(pkt, OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE);
+}
+
+unsigned char *ossl_quic_wire_encode_transport_param_bytes(WPACKET *pkt,
+ uint64_t id,
+ const unsigned char *value,
+ size_t value_len)
+{
+ unsigned char *b = NULL;
+
+ if (!WPACKET_quic_write_vlint(pkt, id)
+ || !WPACKET_quic_write_vlint(pkt, value_len))
+ return NULL;
+
+ if (value_len == 0)
+ b = WPACKET_get_curr(pkt);
+ else if (!WPACKET_allocate_bytes(pkt, value_len, (unsigned char **)&b))
+ return NULL;
+
+ if (value != NULL)
+ memcpy(b, value, value_len);
+
+ return b;
+}
+
+int ossl_quic_wire_encode_transport_param_int(WPACKET *pkt,
+ uint64_t id,
+ uint64_t value)
+{
+ if (!WPACKET_quic_write_vlint(pkt, id)
+ || !WPACKET_quic_write_vlint(pkt, ossl_quic_vlint_encode_len(value))
+ || !WPACKET_quic_write_vlint(pkt, value))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_transport_param_cid(WPACKET *wpkt,
+ uint64_t id,
+ const QUIC_CONN_ID *cid)
+{
+ if (cid->id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ if (ossl_quic_wire_encode_transport_param_bytes(wpkt, id,
+ cid->id,
+ cid->id_len) == NULL)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * QUIC Wire Format Decoding
+ * =========================
+ */
+int ossl_quic_wire_peek_frame_header(PACKET *pkt, uint64_t *type,
+ int *was_minimal)
+{
+ return PACKET_peek_quic_vlint_ex(pkt, type, was_minimal);
+}
+
+int ossl_quic_wire_skip_frame_header(PACKET *pkt, uint64_t *type)
+{
+ return PACKET_get_quic_vlint(pkt, type);
+}
+
+static int expect_frame_header_mask(PACKET *pkt,
+ uint64_t expected_frame_type,
+ uint64_t mask_bits,
+ uint64_t *actual_frame_type)
+{
+ uint64_t actual_frame_type_;
+
+ if (!ossl_quic_wire_skip_frame_header(pkt, &actual_frame_type_)
+ || (actual_frame_type_ & ~mask_bits) != expected_frame_type)
+ return 0;
+
+ if (actual_frame_type != NULL)
+ *actual_frame_type = actual_frame_type_;
+
+ return 1;
+}
+
+static int expect_frame_header(PACKET *pkt, uint64_t expected_frame_type)
+{
+ uint64_t actual_frame_type;
+
+ if (!ossl_quic_wire_skip_frame_header(pkt, &actual_frame_type)
+ || actual_frame_type != expected_frame_type)
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_peek_frame_ack_num_ranges(const PACKET *orig_pkt,
+ uint64_t *total_ranges)
+{
+ PACKET pkt = *orig_pkt;
+ uint64_t ack_range_count, i;
+
+ if (!expect_frame_header_mask(&pkt, OSSL_QUIC_FRAME_TYPE_ACK_WITHOUT_ECN,
+ 1, NULL)
+ || !PACKET_skip_quic_vlint(&pkt)
+ || !PACKET_skip_quic_vlint(&pkt)
+ || !PACKET_get_quic_vlint(&pkt, &ack_range_count))
+ return 0;
+
+ /*
+ * Ensure the specified number of ack ranges listed in the ACK frame header
+ * actually are available in the frame data. This naturally bounds the
+ * number of ACK ranges which can be requested by the MDPL, and therefore by
+ * the MTU. This ensures we do not allocate memory for an excessive number
+ * of ACK ranges.
+ */
+ for (i = 0; i < ack_range_count; ++i)
+ if (!PACKET_skip_quic_vlint(&pkt)
+ || !PACKET_skip_quic_vlint(&pkt))
+ return 0;
+
+ /* (cannot overflow because QUIC vlints can only encode up to 2**62-1) */
+ *total_ranges = ack_range_count + 1;
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_ack(PACKET *pkt,
+ uint32_t ack_delay_exponent,
+ OSSL_QUIC_FRAME_ACK *ack,
+ uint64_t *total_ranges) {
+ uint64_t frame_type, largest_ackd, ack_delay_raw;
+ uint64_t ack_range_count, first_ack_range, start, end, i;
+
+ /* This call matches both ACK_WITHOUT_ECN and ACK_WITH_ECN. */
+ if (!expect_frame_header_mask(pkt, OSSL_QUIC_FRAME_TYPE_ACK_WITHOUT_ECN,
+ 1, &frame_type)
+ || !PACKET_get_quic_vlint(pkt, &largest_ackd)
+ || !PACKET_get_quic_vlint(pkt, &ack_delay_raw)
+ || !PACKET_get_quic_vlint(pkt, &ack_range_count)
+ || !PACKET_get_quic_vlint(pkt, &first_ack_range))
+ return 0;
+
+ if (first_ack_range > largest_ackd)
+ return 0;
+
+ if (ack_range_count > SIZE_MAX /* sizeof(uint64_t) > sizeof(size_t)? */)
+ return 0;
+
+ start = largest_ackd - first_ack_range;
+
+ if (ack != NULL) {
+ int err = 0;
+ ack->delay_time
+ = ossl_time_multiply(ossl_ticks2time(OSSL_TIME_US),
+ safe_mul_uint64_t(ack_delay_raw,
+ (uint64_t)1 << ack_delay_exponent,
+ &err));
+ if (err)
+ ack->delay_time = ossl_time_infinite();
+
+ if (ack->num_ack_ranges > 0) {
+ ack->ack_ranges[0].end = largest_ackd;
+ ack->ack_ranges[0].start = start;
+ }
+ }
+
+ for (i = 0; i < ack_range_count; ++i) {
+ uint64_t gap, len;
+
+ if (!PACKET_get_quic_vlint(pkt, &gap)
+ || !PACKET_get_quic_vlint(pkt, &len))
+ return 0;
+
+ end = start - gap - 2;
+ if (start < gap + 2 || len > end)
+ return 0;
+
+ if (ack != NULL && i + 1 < ack->num_ack_ranges) {
+ ack->ack_ranges[i + 1].start = start = end - len;
+ ack->ack_ranges[i + 1].end = end;
+ }
+ }
+
+ if (ack != NULL && ack_range_count + 1 < ack->num_ack_ranges)
+ ack->num_ack_ranges = (size_t)ack_range_count + 1;
+
+ if (total_ranges != NULL)
+ *total_ranges = ack_range_count + 1;
+
+ if (frame_type == OSSL_QUIC_FRAME_TYPE_ACK_WITH_ECN) {
+ uint64_t ect0, ect1, ecnce;
+
+ if (!PACKET_get_quic_vlint(pkt, &ect0)
+ || !PACKET_get_quic_vlint(pkt, &ect1)
+ || !PACKET_get_quic_vlint(pkt, &ecnce))
+ return 0;
+
+ if (ack != NULL) {
+ ack->ect0 = ect0;
+ ack->ect1 = ect1;
+ ack->ecnce = ecnce;
+ ack->ecn_present = 1;
+ }
+ } else if (ack != NULL) {
+ ack->ecn_present = 0;
+ }
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_reset_stream(PACKET *pkt,
+ OSSL_QUIC_FRAME_RESET_STREAM *f)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_RESET_STREAM)
+ || !PACKET_get_quic_vlint(pkt, &f->stream_id)
+ || !PACKET_get_quic_vlint(pkt, &f->app_error_code)
+ || !PACKET_get_quic_vlint(pkt, &f->final_size))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_stop_sending(PACKET *pkt,
+ OSSL_QUIC_FRAME_STOP_SENDING *f)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_STOP_SENDING)
+ || !PACKET_get_quic_vlint(pkt, &f->stream_id)
+ || !PACKET_get_quic_vlint(pkt, &f->app_error_code))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_crypto(PACKET *pkt,
+ int nodata,
+ OSSL_QUIC_FRAME_CRYPTO *f)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_CRYPTO)
+ || !PACKET_get_quic_vlint(pkt, &f->offset)
+ || !PACKET_get_quic_vlint(pkt, &f->len)
+ || f->len > SIZE_MAX /* sizeof(uint64_t) > sizeof(size_t)? */)
+ return 0;
+
+ if (f->offset + f->len > (((uint64_t)1) << 62) - 1)
+ /* RFC 9000 s. 19.6 */
+ return 0;
+
+ if (nodata) {
+ f->data = NULL;
+ } else {
+ if (PACKET_remaining(pkt) < f->len)
+ return 0;
+
+ f->data = PACKET_data(pkt);
+
+ if (!PACKET_forward(pkt, (size_t)f->len))
+ return 0;
+ }
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_new_token(PACKET *pkt,
+ const unsigned char **token,
+ size_t *token_len)
+{
+ uint64_t token_len_;
+
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_NEW_TOKEN)
+ || !PACKET_get_quic_vlint(pkt, &token_len_))
+ return 0;
+
+ if (token_len_ > SIZE_MAX)
+ return 0;
+
+ *token = PACKET_data(pkt);
+ *token_len = (size_t)token_len_;
+
+ if (!PACKET_forward(pkt, (size_t)token_len_))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_stream(PACKET *pkt,
+ int nodata,
+ OSSL_QUIC_FRAME_STREAM *f)
+{
+ uint64_t frame_type;
+
+ /* This call matches all STREAM values (low 3 bits are masked). */
+ if (!expect_frame_header_mask(pkt, OSSL_QUIC_FRAME_TYPE_STREAM,
+ OSSL_QUIC_FRAME_FLAG_STREAM_MASK,
+ &frame_type)
+ || !PACKET_get_quic_vlint(pkt, &f->stream_id))
+ return 0;
+
+ if ((frame_type & OSSL_QUIC_FRAME_FLAG_STREAM_OFF) != 0) {
+ if (!PACKET_get_quic_vlint(pkt, &f->offset))
+ return 0;
+ } else {
+ f->offset = 0;
+ }
+
+ f->has_explicit_len = ((frame_type & OSSL_QUIC_FRAME_FLAG_STREAM_LEN) != 0);
+ f->is_fin = ((frame_type & OSSL_QUIC_FRAME_FLAG_STREAM_FIN) != 0);
+
+ if (f->has_explicit_len) {
+ if (!PACKET_get_quic_vlint(pkt, &f->len))
+ return 0;
+ } else {
+ if (nodata)
+ f->len = 0;
+ else
+ f->len = PACKET_remaining(pkt);
+ }
+
+ /*
+ * RFC 9000 s. 19.8: "The largest offset delivered on a stream -- the sum of
+ * the offset and data length -- cannot exceed 2**62 - 1, as it is not
+ * possible to provide flow control credit for that data."
+ */
+ if (f->offset + f->len > (((uint64_t)1) << 62) - 1)
+ return 0;
+
+ if (nodata) {
+ f->data = NULL;
+ } else {
+ f->data = PACKET_data(pkt);
+
+ if (f->len > SIZE_MAX /* sizeof(uint64_t) > sizeof(size_t)? */
+ || !PACKET_forward(pkt, (size_t)f->len))
+ return 0;
+ }
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_max_data(PACKET *pkt,
+ uint64_t *max_data)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_MAX_DATA)
+ || !PACKET_get_quic_vlint(pkt, max_data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_max_stream_data(PACKET *pkt,
+ uint64_t *stream_id,
+ uint64_t *max_stream_data)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_MAX_STREAM_DATA)
+ || !PACKET_get_quic_vlint(pkt, stream_id)
+ || !PACKET_get_quic_vlint(pkt, max_stream_data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_max_streams(PACKET *pkt,
+ uint64_t *max_streams)
+{
+ /* This call matches both MAX_STREAMS_BIDI and MAX_STREAMS_UNI. */
+ if (!expect_frame_header_mask(pkt, OSSL_QUIC_FRAME_TYPE_MAX_STREAMS_BIDI,
+ 1, NULL)
+ || !PACKET_get_quic_vlint(pkt, max_streams))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_data_blocked(PACKET *pkt,
+ uint64_t *max_data)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_DATA_BLOCKED)
+ || !PACKET_get_quic_vlint(pkt, max_data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_stream_data_blocked(PACKET *pkt,
+ uint64_t *stream_id,
+ uint64_t *max_stream_data)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_STREAM_DATA_BLOCKED)
+ || !PACKET_get_quic_vlint(pkt, stream_id)
+ || !PACKET_get_quic_vlint(pkt, max_stream_data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_streams_blocked(PACKET *pkt,
+ uint64_t *max_streams)
+{
+ /* This call matches both STREAMS_BLOCKED_BIDI and STREAMS_BLOCKED_UNI. */
+ if (!expect_frame_header_mask(pkt, OSSL_QUIC_FRAME_TYPE_STREAMS_BLOCKED_BIDI,
+ 1, NULL)
+ || !PACKET_get_quic_vlint(pkt, max_streams))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_new_conn_id(PACKET *pkt,
+ OSSL_QUIC_FRAME_NEW_CONN_ID *f)
+{
+ unsigned int len;
+
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_NEW_CONN_ID)
+ || !PACKET_get_quic_vlint(pkt, &f->seq_num)
+ || !PACKET_get_quic_vlint(pkt, &f->retire_prior_to)
+ || f->seq_num < f->retire_prior_to
+ || !PACKET_get_1(pkt, &len)
+ || len < 1
+ || len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ f->conn_id.id_len = (unsigned char)len;
+ if (!PACKET_copy_bytes(pkt, f->conn_id.id, len))
+ return 0;
+
+ /* Clear unused bytes to allow consistent memcmp. */
+ if (len < QUIC_MAX_CONN_ID_LEN)
+ memset(f->conn_id.id + len, 0, QUIC_MAX_CONN_ID_LEN - len);
+
+ if (!PACKET_copy_bytes(pkt, f->stateless_reset.token,
+ sizeof(f->stateless_reset.token)))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_retire_conn_id(PACKET *pkt,
+ uint64_t *seq_num)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_RETIRE_CONN_ID)
+ || !PACKET_get_quic_vlint(pkt, seq_num))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_path_challenge(PACKET *pkt,
+ uint64_t *data)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_PATH_CHALLENGE)
+ || !PACKET_get_net_8(pkt, data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_path_response(PACKET *pkt,
+ uint64_t *data)
+{
+ if (!expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_PATH_RESPONSE)
+ || !PACKET_get_net_8(pkt, data))
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_frame_conn_close(PACKET *pkt,
+ OSSL_QUIC_FRAME_CONN_CLOSE *f)
+{
+ uint64_t frame_type, reason_len;
+
+ /* This call matches both CONN_CLOSE_TRANSPORT and CONN_CLOSE_APP. */
+ if (!expect_frame_header_mask(pkt, OSSL_QUIC_FRAME_TYPE_CONN_CLOSE_TRANSPORT,
+ 1, &frame_type)
+ || !PACKET_get_quic_vlint(pkt, &f->error_code))
+ return 0;
+
+ f->is_app = ((frame_type & 1) != 0);
+
+ if (!f->is_app) {
+ if (!PACKET_get_quic_vlint(pkt, &f->frame_type))
+ return 0;
+ } else {
+ f->frame_type = 0;
+ }
+
+ if (!PACKET_get_quic_vlint(pkt, &reason_len)
+ || reason_len > SIZE_MAX)
+ return 0;
+
+ if (!PACKET_get_bytes(pkt, (const unsigned char **)&f->reason,
+ (size_t)reason_len))
+ return 0;
+
+ f->reason_len = (size_t)reason_len;
+ return 1;
+}
+
+size_t ossl_quic_wire_decode_padding(PACKET *pkt)
+{
+ const unsigned char *start = PACKET_data(pkt), *end = PACKET_end(pkt),
+ *p = start;
+
+ while (p < end && *p == 0)
+ ++p;
+
+ if (!PACKET_forward(pkt, p - start))
+ return 0;
+
+ return p - start;
+}
+
+int ossl_quic_wire_decode_frame_ping(PACKET *pkt)
+{
+ return expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_PING);
+}
+
+int ossl_quic_wire_decode_frame_handshake_done(PACKET *pkt)
+{
+ return expect_frame_header(pkt, OSSL_QUIC_FRAME_TYPE_HANDSHAKE_DONE);
+}
+
+int ossl_quic_wire_peek_transport_param(PACKET *pkt, uint64_t *id)
+{
+ return PACKET_peek_quic_vlint(pkt, id);
+}
+
+const unsigned char *ossl_quic_wire_decode_transport_param_bytes(PACKET *pkt,
+ uint64_t *id,
+ size_t *len)
+{
+ uint64_t len_;
+ const unsigned char *b = NULL;
+ uint64_t id_;
+
+ if (!PACKET_get_quic_vlint(pkt, &id_)
+ || !PACKET_get_quic_vlint(pkt, &len_))
+ return NULL;
+
+ if (len_ > SIZE_MAX
+ || !PACKET_get_bytes(pkt, (const unsigned char **)&b, (size_t)len_))
+ return NULL;
+
+ *len = (size_t)len_;
+ if (id != NULL)
+ *id = id_;
+ return b;
+}
+
+int ossl_quic_wire_decode_transport_param_int(PACKET *pkt,
+ uint64_t *id,
+ uint64_t *value)
+{
+ PACKET sub;
+
+ sub.curr = ossl_quic_wire_decode_transport_param_bytes(pkt,
+ id, &sub.remaining);
+ if (sub.curr == NULL)
+ return 0;
+
+ if (!PACKET_get_quic_vlint(&sub, value))
+ return 0;
+
+ if (PACKET_remaining(&sub) > 0)
+ return 0;
+
+ return 1;
+}
+
+int ossl_quic_wire_decode_transport_param_cid(PACKET *pkt,
+ uint64_t *id,
+ QUIC_CONN_ID *cid)
+{
+ const unsigned char *body;
+ size_t len = 0;
+
+ body = ossl_quic_wire_decode_transport_param_bytes(pkt, id, &len);
+ if (body == NULL || len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ cid->id_len = (unsigned char)len;
+ memcpy(cid->id, body, cid->id_len);
+ return 1;
+}
+
+int ossl_quic_wire_decode_transport_param_preferred_addr(PACKET *pkt,
+ QUIC_PREFERRED_ADDR *p)
+{
+ const unsigned char *body;
+ uint64_t id;
+ size_t len = 0;
+ PACKET pkt2;
+ unsigned int ipv4_port, ipv6_port, cidl;
+
+ body = ossl_quic_wire_decode_transport_param_bytes(pkt, &id, &len);
+ if (body == NULL
+ || len < QUIC_MIN_ENCODED_PREFERRED_ADDR_LEN
+ || len > QUIC_MAX_ENCODED_PREFERRED_ADDR_LEN
+ || id != QUIC_TPARAM_PREFERRED_ADDR)
+ return 0;
+
+ if (!PACKET_buf_init(&pkt2, body, len))
+ return 0;
+
+ if (!PACKET_copy_bytes(&pkt2, p->ipv4, sizeof(p->ipv4))
+ || !PACKET_get_net_2(&pkt2, &ipv4_port)
+ || !PACKET_copy_bytes(&pkt2, p->ipv6, sizeof(p->ipv6))
+ || !PACKET_get_net_2(&pkt2, &ipv6_port)
+ || !PACKET_get_1(&pkt2, &cidl)
+ || cidl > QUIC_MAX_CONN_ID_LEN
+ || !PACKET_copy_bytes(&pkt2, p->cid.id, cidl)
+ || !PACKET_copy_bytes(&pkt2, p->stateless_reset.token,
+ sizeof(p->stateless_reset.token)))
+ return 0;
+
+ p->ipv4_port = (uint16_t)ipv4_port;
+ p->ipv6_port = (uint16_t)ipv6_port;
+ p->cid.id_len = (unsigned char)cidl;
+ return 1;
+}
+
+const char *
+ossl_quic_frame_type_to_string(uint64_t frame_type)
+{
+ switch (frame_type) {
+#define X(name) case OSSL_QUIC_FRAME_TYPE_##name: return #name;
+ X(PADDING)
+ X(PING)
+ X(ACK_WITHOUT_ECN)
+ X(ACK_WITH_ECN)
+ X(RESET_STREAM)
+ X(STOP_SENDING)
+ X(CRYPTO)
+ X(NEW_TOKEN)
+ X(MAX_DATA)
+ X(MAX_STREAM_DATA)
+ X(MAX_STREAMS_BIDI)
+ X(MAX_STREAMS_UNI)
+ X(DATA_BLOCKED)
+ X(STREAM_DATA_BLOCKED)
+ X(STREAMS_BLOCKED_BIDI)
+ X(STREAMS_BLOCKED_UNI)
+ X(NEW_CONN_ID)
+ X(RETIRE_CONN_ID)
+ X(PATH_CHALLENGE)
+ X(PATH_RESPONSE)
+ X(CONN_CLOSE_TRANSPORT)
+ X(CONN_CLOSE_APP)
+ X(HANDSHAKE_DONE)
+ X(STREAM)
+ X(STREAM_FIN)
+ X(STREAM_LEN)
+ X(STREAM_LEN_FIN)
+ X(STREAM_OFF)
+ X(STREAM_OFF_FIN)
+ X(STREAM_OFF_LEN)
+ X(STREAM_OFF_LEN_FIN)
+#undef X
+ default:
+ return NULL;
+ }
+}
+
+const char *ossl_quic_err_to_string(uint64_t error_code)
+{
+ switch (error_code) {
+#define X(name) case OSSL_QUIC_ERR_##name: return #name;
+ X(NO_ERROR)
+ X(INTERNAL_ERROR)
+ X(CONNECTION_REFUSED)
+ X(FLOW_CONTROL_ERROR)
+ X(STREAM_LIMIT_ERROR)
+ X(STREAM_STATE_ERROR)
+ X(FINAL_SIZE_ERROR)
+ X(FRAME_ENCODING_ERROR)
+ X(TRANSPORT_PARAMETER_ERROR)
+ X(CONNECTION_ID_LIMIT_ERROR)
+ X(PROTOCOL_VIOLATION)
+ X(INVALID_TOKEN)
+ X(APPLICATION_ERROR)
+ X(CRYPTO_BUFFER_EXCEEDED)
+ X(KEY_UPDATE_ERROR)
+ X(AEAD_LIMIT_REACHED)
+ X(NO_VIABLE_PATH)
+#undef X
+ default:
+ return NULL;
+ }
+}
diff --git a/crypto/openssl/ssl/quic/quic_wire_pkt.c b/crypto/openssl/ssl/quic/quic_wire_pkt.c
new file mode 100644
index 000000000000..b6ee898eddf7
--- /dev/null
+++ b/crypto/openssl/ssl/quic/quic_wire_pkt.c
@@ -0,0 +1,962 @@
+/*
+ * Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include <openssl/err.h>
+#include "internal/common.h"
+#include "internal/quic_wire_pkt.h"
+
+int ossl_quic_hdr_protector_init(QUIC_HDR_PROTECTOR *hpr,
+ OSSL_LIB_CTX *libctx,
+ const char *propq,
+ uint32_t cipher_id,
+ const unsigned char *quic_hp_key,
+ size_t quic_hp_key_len)
+{
+ const char *cipher_name = NULL;
+
+ switch (cipher_id) {
+ case QUIC_HDR_PROT_CIPHER_AES_128:
+ cipher_name = "AES-128-ECB";
+ break;
+ case QUIC_HDR_PROT_CIPHER_AES_256:
+ cipher_name = "AES-256-ECB";
+ break;
+ case QUIC_HDR_PROT_CIPHER_CHACHA:
+ cipher_name = "ChaCha20";
+ break;
+ default:
+ ERR_raise(ERR_LIB_SSL, ERR_R_UNSUPPORTED);
+ return 0;
+ }
+
+ hpr->cipher_ctx = EVP_CIPHER_CTX_new();
+ if (hpr->cipher_ctx == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+ hpr->cipher = EVP_CIPHER_fetch(libctx, cipher_name, propq);
+ if (hpr->cipher == NULL
+ || quic_hp_key_len != (size_t)EVP_CIPHER_get_key_length(hpr->cipher)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ if (!EVP_CipherInit_ex(hpr->cipher_ctx, hpr->cipher, NULL,
+ quic_hp_key, NULL, 1)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ hpr->libctx = libctx;
+ hpr->propq = propq;
+ hpr->cipher_id = cipher_id;
+ return 1;
+
+err:
+ ossl_quic_hdr_protector_cleanup(hpr);
+ return 0;
+}
+
+void ossl_quic_hdr_protector_cleanup(QUIC_HDR_PROTECTOR *hpr)
+{
+ EVP_CIPHER_CTX_free(hpr->cipher_ctx);
+ hpr->cipher_ctx = NULL;
+
+ EVP_CIPHER_free(hpr->cipher);
+ hpr->cipher = NULL;
+}
+
+static int hdr_generate_mask(QUIC_HDR_PROTECTOR *hpr,
+ const unsigned char *sample, size_t sample_len,
+ unsigned char *mask)
+{
+ int l = 0;
+ unsigned char dst[16];
+ static const unsigned char zeroes[5] = {0};
+ size_t i;
+
+ if (hpr->cipher_id == QUIC_HDR_PROT_CIPHER_AES_128
+ || hpr->cipher_id == QUIC_HDR_PROT_CIPHER_AES_256) {
+ if (sample_len < 16) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ if (!EVP_CipherInit_ex(hpr->cipher_ctx, NULL, NULL, NULL, NULL, 1)
+ || !EVP_CipherUpdate(hpr->cipher_ctx, dst, &l, sample, 16)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+
+ for (i = 0; i < 5; ++i)
+ mask[i] = dst[i];
+ } else if (hpr->cipher_id == QUIC_HDR_PROT_CIPHER_CHACHA) {
+ if (sample_len < 16) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ return 0;
+ }
+
+ if (!EVP_CipherInit_ex(hpr->cipher_ctx, NULL, NULL, NULL, sample, 1)
+ || !EVP_CipherUpdate(hpr->cipher_ctx, mask, &l,
+ zeroes, sizeof(zeroes))) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ return 0;
+ }
+ } else {
+ ERR_raise(ERR_LIB_SSL, ERR_R_INTERNAL_ERROR);
+ assert(0);
+ return 0;
+ }
+
+#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ /* No matter what we did above we use the same mask in fuzzing mode */
+ memset(mask, 0, 5);
+#endif
+
+ return 1;
+}
+
+int ossl_quic_hdr_protector_decrypt(QUIC_HDR_PROTECTOR *hpr,
+ QUIC_PKT_HDR_PTRS *ptrs)
+{
+ return ossl_quic_hdr_protector_decrypt_fields(hpr,
+ ptrs->raw_sample,
+ ptrs->raw_sample_len,
+ ptrs->raw_start,
+ ptrs->raw_pn);
+}
+
+int ossl_quic_hdr_protector_decrypt_fields(QUIC_HDR_PROTECTOR *hpr,
+ const unsigned char *sample,
+ size_t sample_len,
+ unsigned char *first_byte,
+ unsigned char *pn_bytes)
+{
+ unsigned char mask[5], pn_len, i;
+
+ if (!hdr_generate_mask(hpr, sample, sample_len, mask))
+ return 0;
+
+ *first_byte ^= mask[0] & ((*first_byte & 0x80) != 0 ? 0xf : 0x1f);
+ pn_len = (*first_byte & 0x3) + 1;
+
+ for (i = 0; i < pn_len; ++i)
+ pn_bytes[i] ^= mask[i + 1];
+
+ return 1;
+}
+
+int ossl_quic_hdr_protector_encrypt(QUIC_HDR_PROTECTOR *hpr,
+ QUIC_PKT_HDR_PTRS *ptrs)
+{
+ return ossl_quic_hdr_protector_encrypt_fields(hpr,
+ ptrs->raw_sample,
+ ptrs->raw_sample_len,
+ ptrs->raw_start,
+ ptrs->raw_pn);
+}
+
+int ossl_quic_hdr_protector_encrypt_fields(QUIC_HDR_PROTECTOR *hpr,
+ const unsigned char *sample,
+ size_t sample_len,
+ unsigned char *first_byte,
+ unsigned char *pn_bytes)
+{
+ unsigned char mask[5], pn_len, i;
+
+ if (!hdr_generate_mask(hpr, sample, sample_len, mask))
+ return 0;
+
+ pn_len = (*first_byte & 0x3) + 1;
+ for (i = 0; i < pn_len; ++i)
+ pn_bytes[i] ^= mask[i + 1];
+
+ *first_byte ^= mask[0] & ((*first_byte & 0x80) != 0 ? 0xf : 0x1f);
+ return 1;
+}
+
+int ossl_quic_wire_decode_pkt_hdr(PACKET *pkt,
+ size_t short_conn_id_len,
+ int partial,
+ int nodata,
+ QUIC_PKT_HDR *hdr,
+ QUIC_PKT_HDR_PTRS *ptrs,
+ uint64_t *fail_cause)
+{
+ unsigned int b0;
+ unsigned char *pn = NULL;
+ size_t l = PACKET_remaining(pkt);
+
+ if (fail_cause != NULL)
+ *fail_cause = QUIC_PKT_HDR_DECODE_DECODE_ERR;
+
+ if (ptrs != NULL) {
+ ptrs->raw_start = (unsigned char *)PACKET_data(pkt);
+ ptrs->raw_sample = NULL;
+ ptrs->raw_sample_len = 0;
+ ptrs->raw_pn = NULL;
+ }
+
+ if (l < QUIC_MIN_VALID_PKT_LEN
+ || !PACKET_get_1(pkt, &b0))
+ return 0;
+
+ hdr->partial = partial;
+ hdr->unused = 0;
+ hdr->reserved = 0;
+
+ if ((b0 & 0x80) == 0) {
+ /* Short header. */
+ if (short_conn_id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ if ((b0 & 0x40) == 0 /* fixed bit not set? */
+ || l < QUIC_MIN_VALID_PKT_LEN_CRYPTO)
+ return 0;
+
+ hdr->type = QUIC_PKT_TYPE_1RTT;
+ hdr->fixed = 1;
+ hdr->spin_bit = (b0 & 0x20) != 0;
+ if (partial) {
+ hdr->key_phase = 0; /* protected, zero for now */
+ hdr->pn_len = 0; /* protected, zero for now */
+ hdr->reserved = 0; /* protected, zero for now */
+ } else {
+ hdr->key_phase = (b0 & 0x04) != 0;
+ hdr->pn_len = (b0 & 0x03) + 1;
+ hdr->reserved = (b0 & 0x18) >> 3;
+ }
+
+ /* Copy destination connection ID field to header structure. */
+ if (!PACKET_copy_bytes(pkt, hdr->dst_conn_id.id, short_conn_id_len))
+ return 0;
+
+ hdr->dst_conn_id.id_len = (unsigned char)short_conn_id_len;
+
+ /*
+ * Skip over the PN. If this is a partial decode, the PN length field
+ * currently has header protection applied. Thus we do not know the
+ * length of the PN but we are allowed to assume it is 4 bytes long at
+ * this stage.
+ */
+ memset(hdr->pn, 0, sizeof(hdr->pn));
+ pn = (unsigned char *)PACKET_data(pkt);
+ if (partial) {
+ if (!PACKET_forward(pkt, sizeof(hdr->pn)))
+ return 0;
+ } else {
+ if (!PACKET_copy_bytes(pkt, hdr->pn, hdr->pn_len))
+ return 0;
+ }
+
+ /* Fields not used in short-header packets. */
+ hdr->version = 0;
+ hdr->src_conn_id.id_len = 0;
+ hdr->token = NULL;
+ hdr->token_len = 0;
+
+ /*
+ * Short-header packets always come last in a datagram, the length
+ * is the remainder of the buffer.
+ */
+ hdr->len = PACKET_remaining(pkt);
+ hdr->data = PACKET_data(pkt);
+
+ /*
+ * Skip over payload. Since this is a short header packet, which cannot
+ * be followed by any other kind of packet, this advances us to the end
+ * of the datagram.
+ */
+ if (!PACKET_forward(pkt, hdr->len))
+ return 0;
+ } else {
+ /* Long header. */
+ unsigned long version;
+ unsigned int dst_conn_id_len, src_conn_id_len, raw_type;
+
+ if (!PACKET_get_net_4(pkt, &version))
+ return 0;
+
+ /*
+ * All QUIC packets must have the fixed bit set, except exceptionally
+ * for Version Negotiation packets.
+ */
+ if (version != 0 && (b0 & 0x40) == 0)
+ return 0;
+
+ if (!PACKET_get_1(pkt, &dst_conn_id_len)
+ || dst_conn_id_len > QUIC_MAX_CONN_ID_LEN
+ || !PACKET_copy_bytes(pkt, hdr->dst_conn_id.id, dst_conn_id_len)
+ || !PACKET_get_1(pkt, &src_conn_id_len)
+ || src_conn_id_len > QUIC_MAX_CONN_ID_LEN
+ || !PACKET_copy_bytes(pkt, hdr->src_conn_id.id, src_conn_id_len))
+ return 0;
+
+ hdr->version = (uint32_t)version;
+ hdr->dst_conn_id.id_len = (unsigned char)dst_conn_id_len;
+ hdr->src_conn_id.id_len = (unsigned char)src_conn_id_len;
+
+ if (version == 0) {
+ /*
+ * Version negotiation packet. Version negotiation packets are
+ * identified by a version field of 0 and the type bits in the first
+ * byte are ignored (they may take any value, and we ignore them).
+ */
+ hdr->type = QUIC_PKT_TYPE_VERSION_NEG;
+ hdr->fixed = (b0 & 0x40) != 0;
+
+ hdr->data = PACKET_data(pkt);
+ hdr->len = PACKET_remaining(pkt);
+
+ /*
+ * Version negotiation packets must contain an array of u32s, so it
+ * is invalid for their payload length to not be divisible by 4.
+ */
+ if ((hdr->len % 4) != 0)
+ return 0;
+
+ /* Version negotiation packets are always fully decoded. */
+ hdr->partial = 0;
+
+ /* Fields not used in version negotiation packets. */
+ hdr->pn_len = 0;
+ hdr->spin_bit = 0;
+ hdr->key_phase = 0;
+ hdr->token = NULL;
+ hdr->token_len = 0;
+ memset(hdr->pn, 0, sizeof(hdr->pn));
+
+ if (!PACKET_forward(pkt, hdr->len))
+ return 0;
+ } else if (version != QUIC_VERSION_1) {
+ if (fail_cause != NULL)
+ *fail_cause |= QUIC_PKT_HDR_DECODE_BAD_VERSION;
+ /* Unknown version, do not decode. */
+ return 0;
+ } else {
+ if (l < QUIC_MIN_VALID_PKT_LEN_CRYPTO)
+ return 0;
+
+ /* Get long packet type and decode to QUIC_PKT_TYPE_*. */
+ raw_type = ((b0 >> 4) & 0x3);
+
+ switch (raw_type) {
+ case 0:
+ hdr->type = QUIC_PKT_TYPE_INITIAL;
+ break;
+ case 1:
+ hdr->type = QUIC_PKT_TYPE_0RTT;
+ break;
+ case 2:
+ hdr->type = QUIC_PKT_TYPE_HANDSHAKE;
+ break;
+ case 3:
+ hdr->type = QUIC_PKT_TYPE_RETRY;
+ break;
+ }
+
+ hdr->pn_len = 0;
+ hdr->fixed = 1;
+
+ /* Fields not used in long-header packets. */
+ hdr->spin_bit = 0;
+ hdr->key_phase = 0;
+
+ if (hdr->type == QUIC_PKT_TYPE_INITIAL) {
+ /* Initial packet. */
+ uint64_t token_len;
+
+ if (!PACKET_get_quic_vlint(pkt, &token_len)
+ || token_len > SIZE_MAX
+ || !PACKET_get_bytes(pkt, &hdr->token, (size_t)token_len))
+ return 0;
+
+ hdr->token_len = (size_t)token_len;
+ if (token_len == 0)
+ hdr->token = NULL;
+ } else {
+ hdr->token = NULL;
+ hdr->token_len = 0;
+ }
+
+ if (hdr->type == QUIC_PKT_TYPE_RETRY) {
+ /* Retry packet. */
+ hdr->data = PACKET_data(pkt);
+ hdr->len = PACKET_remaining(pkt);
+
+ /* Retry packets are always fully decoded. */
+ hdr->partial = 0;
+
+ /* Unused bits in Retry header. */
+ hdr->unused = b0 & 0x0f;
+
+ /* Fields not used in Retry packets. */
+ memset(hdr->pn, 0, sizeof(hdr->pn));
+
+ if (!PACKET_forward(pkt, hdr->len))
+ return 0;
+ } else {
+ /* Initial, 0-RTT or Handshake packet. */
+ uint64_t len;
+
+ hdr->pn_len = partial ? 0 : ((b0 & 0x03) + 1);
+ hdr->reserved = partial ? 0 : ((b0 & 0x0C) >> 2);
+
+ if (!PACKET_get_quic_vlint(pkt, &len)
+ || len < sizeof(hdr->pn))
+ return 0;
+
+ if (!nodata && len > PACKET_remaining(pkt))
+ return 0;
+
+ /*
+ * Skip over the PN. If this is a partial decode, the PN length
+ * field currently has header protection applied. Thus we do not
+ * know the length of the PN but we are allowed to assume it is
+ * 4 bytes long at this stage.
+ */
+ pn = (unsigned char *)PACKET_data(pkt);
+ memset(hdr->pn, 0, sizeof(hdr->pn));
+ if (partial) {
+ if (!PACKET_forward(pkt, sizeof(hdr->pn)))
+ return 0;
+
+ hdr->len = (size_t)(len - sizeof(hdr->pn));
+ } else {
+ if (!PACKET_copy_bytes(pkt, hdr->pn, hdr->pn_len))
+ return 0;
+
+ hdr->len = (size_t)(len - hdr->pn_len);
+ }
+
+ if (nodata) {
+ hdr->data = NULL;
+ } else {
+ hdr->data = PACKET_data(pkt);
+
+ /* Skip over packet body. */
+ if (!PACKET_forward(pkt, hdr->len))
+ return 0;
+ }
+ }
+ }
+ }
+
+ if (ptrs != NULL) {
+ ptrs->raw_pn = pn;
+ if (pn != NULL) {
+ ptrs->raw_sample = pn + 4;
+ ptrs->raw_sample_len = PACKET_end(pkt) - ptrs->raw_sample;
+ }
+ }
+
+ /*
+ * Good decode, clear the generic DECODE_ERR flag
+ */
+ if (fail_cause != NULL)
+ *fail_cause &= ~QUIC_PKT_HDR_DECODE_DECODE_ERR;
+
+ return 1;
+}
+
+int ossl_quic_wire_encode_pkt_hdr(WPACKET *pkt,
+ size_t short_conn_id_len,
+ const QUIC_PKT_HDR *hdr,
+ QUIC_PKT_HDR_PTRS *ptrs)
+{
+ unsigned char b0;
+ size_t off_start, off_sample, off_pn;
+ unsigned char *start = WPACKET_get_curr(pkt);
+
+ if (!WPACKET_get_total_written(pkt, &off_start))
+ return 0;
+
+ if (ptrs != NULL) {
+ /* ptrs would not be stable on non-static WPACKET */
+ if (!ossl_assert(pkt->staticbuf != NULL))
+ return 0;
+ ptrs->raw_start = NULL;
+ ptrs->raw_sample = NULL;
+ ptrs->raw_sample_len = 0;
+ ptrs->raw_pn = 0;
+ }
+
+ /* Cannot serialize a partial header, or one whose DCID length is wrong. */
+ if (hdr->partial
+ || (hdr->type == QUIC_PKT_TYPE_1RTT
+ && hdr->dst_conn_id.id_len != short_conn_id_len))
+ return 0;
+
+ if (hdr->type == QUIC_PKT_TYPE_1RTT) {
+ /* Short header. */
+
+ /*
+ * Cannot serialize a header whose DCID length is wrong, or with an
+ * invalid PN length.
+ */
+ if (hdr->dst_conn_id.id_len != short_conn_id_len
+ || short_conn_id_len > QUIC_MAX_CONN_ID_LEN
+ || hdr->pn_len < 1 || hdr->pn_len > 4)
+ return 0;
+
+ b0 = (hdr->spin_bit << 5)
+ | (hdr->key_phase << 2)
+ | (hdr->pn_len - 1)
+ | (hdr->reserved << 3)
+ | 0x40; /* fixed bit */
+
+ if (!WPACKET_put_bytes_u8(pkt, b0)
+ || !WPACKET_memcpy(pkt, hdr->dst_conn_id.id, short_conn_id_len)
+ || !WPACKET_get_total_written(pkt, &off_pn)
+ || !WPACKET_memcpy(pkt, hdr->pn, hdr->pn_len))
+ return 0;
+ } else {
+ /* Long header. */
+ unsigned int raw_type;
+
+ if (hdr->dst_conn_id.id_len > QUIC_MAX_CONN_ID_LEN
+ || hdr->src_conn_id.id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ if (ossl_quic_pkt_type_has_pn(hdr->type)
+ && (hdr->pn_len < 1 || hdr->pn_len > 4))
+ return 0;
+
+ switch (hdr->type) {
+ case QUIC_PKT_TYPE_VERSION_NEG:
+ if (hdr->version != 0)
+ return 0;
+
+ /* Version negotiation packets use zero for the type bits */
+ raw_type = 0;
+ break;
+
+ case QUIC_PKT_TYPE_INITIAL: raw_type = 0; break;
+ case QUIC_PKT_TYPE_0RTT: raw_type = 1; break;
+ case QUIC_PKT_TYPE_HANDSHAKE: raw_type = 2; break;
+ case QUIC_PKT_TYPE_RETRY: raw_type = 3; break;
+ default:
+ return 0;
+ }
+
+ b0 = (raw_type << 4) | 0x80; /* long */
+ if (hdr->type != QUIC_PKT_TYPE_VERSION_NEG || hdr->fixed)
+ b0 |= 0x40; /* fixed */
+ if (ossl_quic_pkt_type_has_pn(hdr->type)) {
+ b0 |= hdr->pn_len - 1;
+ b0 |= (hdr->reserved << 2);
+ }
+ if (hdr->type == QUIC_PKT_TYPE_RETRY)
+ b0 |= hdr->unused;
+
+ if (!WPACKET_put_bytes_u8(pkt, b0)
+ || !WPACKET_put_bytes_u32(pkt, hdr->version)
+ || !WPACKET_put_bytes_u8(pkt, hdr->dst_conn_id.id_len)
+ || !WPACKET_memcpy(pkt, hdr->dst_conn_id.id,
+ hdr->dst_conn_id.id_len)
+ || !WPACKET_put_bytes_u8(pkt, hdr->src_conn_id.id_len)
+ || !WPACKET_memcpy(pkt, hdr->src_conn_id.id,
+ hdr->src_conn_id.id_len))
+ return 0;
+
+ if (hdr->type == QUIC_PKT_TYPE_VERSION_NEG) {
+ if (hdr->len > 0 && !WPACKET_reserve_bytes(pkt, hdr->len, NULL))
+ return 0;
+
+ return 1;
+ }
+
+ if (hdr->type == QUIC_PKT_TYPE_INITIAL) {
+ if (!WPACKET_quic_write_vlint(pkt, hdr->token_len)
+ || !WPACKET_memcpy(pkt, hdr->token, hdr->token_len))
+ return 0;
+ }
+
+ if (hdr->type == QUIC_PKT_TYPE_RETRY) {
+ if (!WPACKET_memcpy(pkt, hdr->token, hdr->token_len))
+ return 0;
+ return 1;
+ }
+
+ if (!WPACKET_quic_write_vlint(pkt, hdr->len + hdr->pn_len)
+ || !WPACKET_get_total_written(pkt, &off_pn)
+ || !WPACKET_memcpy(pkt, hdr->pn, hdr->pn_len))
+ return 0;
+ }
+
+ if (hdr->len > 0 && !WPACKET_reserve_bytes(pkt, hdr->len, NULL))
+ return 0;
+
+ off_sample = off_pn + 4;
+
+ if (ptrs != NULL) {
+ ptrs->raw_start = start;
+ ptrs->raw_sample = start + (off_sample - off_start);
+ ptrs->raw_sample_len
+ = WPACKET_get_curr(pkt) + hdr->len - ptrs->raw_sample;
+ ptrs->raw_pn = start + (off_pn - off_start);
+ }
+
+ return 1;
+}
+
+int ossl_quic_wire_get_encoded_pkt_hdr_len(size_t short_conn_id_len,
+ const QUIC_PKT_HDR *hdr)
+{
+ size_t len = 0, enclen;
+
+ /* Cannot serialize a partial header, or one whose DCID length is wrong. */
+ if (hdr->partial
+ || (hdr->type == QUIC_PKT_TYPE_1RTT
+ && hdr->dst_conn_id.id_len != short_conn_id_len))
+ return 0;
+
+ if (hdr->type == QUIC_PKT_TYPE_1RTT) {
+ /* Short header. */
+
+ /*
+ * Cannot serialize a header whose DCID length is wrong, or with an
+ * invalid PN length.
+ */
+ if (hdr->dst_conn_id.id_len != short_conn_id_len
+ || short_conn_id_len > QUIC_MAX_CONN_ID_LEN
+ || hdr->pn_len < 1 || hdr->pn_len > 4)
+ return 0;
+
+ return 1 + short_conn_id_len + hdr->pn_len;
+ } else {
+ /* Long header. */
+ if (hdr->dst_conn_id.id_len > QUIC_MAX_CONN_ID_LEN
+ || hdr->src_conn_id.id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ len += 1 /* Initial byte */ + 4 /* Version */
+ + 1 + hdr->dst_conn_id.id_len /* DCID Len, DCID */
+ + 1 + hdr->src_conn_id.id_len /* SCID Len, SCID */
+ ;
+
+ if (ossl_quic_pkt_type_has_pn(hdr->type)) {
+ if (hdr->pn_len < 1 || hdr->pn_len > 4)
+ return 0;
+
+ len += hdr->pn_len;
+ }
+
+ if (hdr->type == QUIC_PKT_TYPE_INITIAL) {
+ enclen = ossl_quic_vlint_encode_len(hdr->token_len);
+ if (!enclen)
+ return 0;
+
+ len += enclen + hdr->token_len;
+ }
+
+ if (!ossl_quic_pkt_type_must_be_last(hdr->type)) {
+ enclen = ossl_quic_vlint_encode_len(hdr->len + hdr->pn_len);
+ if (!enclen)
+ return 0;
+
+ len += enclen;
+ }
+
+ return len;
+ }
+}
+
+int ossl_quic_wire_get_pkt_hdr_dst_conn_id(const unsigned char *buf,
+ size_t buf_len,
+ size_t short_conn_id_len,
+ QUIC_CONN_ID *dst_conn_id)
+{
+ unsigned char b0;
+ size_t blen;
+
+ if (buf_len < QUIC_MIN_VALID_PKT_LEN
+ || short_conn_id_len > QUIC_MAX_CONN_ID_LEN)
+ return 0;
+
+ b0 = buf[0];
+ if ((b0 & 0x80) != 0) {
+ /*
+ * Long header. We need 6 bytes (initial byte, 4 version bytes, DCID
+ * length byte to begin with). This is covered by the buf_len test
+ * above.
+ */
+
+ /*
+ * If the version field is non-zero (meaning that this is not a Version
+ * Negotiation packet), the fixed bit must be set.
+ */
+ if ((buf[1] || buf[2] || buf[3] || buf[4]) && (b0 & 0x40) == 0)
+ return 0;
+
+ blen = (size_t)buf[5]; /* DCID Length */
+ if (blen > QUIC_MAX_CONN_ID_LEN
+ || buf_len < QUIC_MIN_VALID_PKT_LEN + blen)
+ return 0;
+
+ dst_conn_id->id_len = (unsigned char)blen;
+ memcpy(dst_conn_id->id, buf + 6, blen);
+ return 1;
+ } else {
+ /* Short header. */
+ if ((b0 & 0x40) == 0)
+ /* Fixed bit not set, not a valid QUIC packet header. */
+ return 0;
+
+ if (buf_len < QUIC_MIN_VALID_PKT_LEN_CRYPTO + short_conn_id_len)
+ return 0;
+
+ dst_conn_id->id_len = (unsigned char)short_conn_id_len;
+ memcpy(dst_conn_id->id, buf + 1, short_conn_id_len);
+ return 1;
+ }
+}
+
+int ossl_quic_wire_decode_pkt_hdr_pn(const unsigned char *enc_pn,
+ size_t enc_pn_len,
+ QUIC_PN largest_pn,
+ QUIC_PN *res_pn)
+{
+ int64_t expected_pn, truncated_pn, candidate_pn, pn_win, pn_hwin, pn_mask;
+
+ switch (enc_pn_len) {
+ case 1:
+ truncated_pn = enc_pn[0];
+ break;
+ case 2:
+ truncated_pn = ((QUIC_PN)enc_pn[0] << 8)
+ | (QUIC_PN)enc_pn[1];
+ break;
+ case 3:
+ truncated_pn = ((QUIC_PN)enc_pn[0] << 16)
+ | ((QUIC_PN)enc_pn[1] << 8)
+ | (QUIC_PN)enc_pn[2];
+ break;
+ case 4:
+ truncated_pn = ((QUIC_PN)enc_pn[0] << 24)
+ | ((QUIC_PN)enc_pn[1] << 16)
+ | ((QUIC_PN)enc_pn[2] << 8)
+ | (QUIC_PN)enc_pn[3];
+ break;
+ default:
+ return 0;
+ }
+
+ /* Implemented as per RFC 9000 Section A.3. */
+ expected_pn = largest_pn + 1;
+ pn_win = ((int64_t)1) << (enc_pn_len * 8);
+ pn_hwin = pn_win / 2;
+ pn_mask = pn_win - 1;
+ candidate_pn = (expected_pn & ~pn_mask) | truncated_pn;
+ if (candidate_pn <= expected_pn - pn_hwin
+ && candidate_pn < (((int64_t)1) << 62) - pn_win)
+ *res_pn = candidate_pn + pn_win;
+ else if (candidate_pn > expected_pn + pn_hwin
+ && candidate_pn >= pn_win)
+ *res_pn = candidate_pn - pn_win;
+ else
+ *res_pn = candidate_pn;
+ return 1;
+}
+
+/* From RFC 9000 Section A.2. Simplified implementation. */
+int ossl_quic_wire_determine_pn_len(QUIC_PN pn,
+ QUIC_PN largest_acked)
+{
+ uint64_t num_unacked
+ = (largest_acked == QUIC_PN_INVALID) ? pn + 1 : pn - largest_acked;
+
+ /*
+ * num_unacked \in [ 0, 2** 7] -> 1 byte
+ * num_unacked \in (2** 7, 2**15] -> 2 bytes
+ * num_unacked \in (2**15, 2**23] -> 3 bytes
+ * num_unacked \in (2**23, ] -> 4 bytes
+ */
+
+ if (num_unacked <= (1U<<7)) return 1;
+ if (num_unacked <= (1U<<15)) return 2;
+ if (num_unacked <= (1U<<23)) return 3;
+ return 4;
+}
+
+int ossl_quic_wire_encode_pkt_hdr_pn(QUIC_PN pn,
+ unsigned char *enc_pn,
+ size_t enc_pn_len)
+{
+ switch (enc_pn_len) {
+ case 1:
+ enc_pn[0] = (unsigned char)pn;
+ break;
+ case 2:
+ enc_pn[1] = (unsigned char)pn;
+ enc_pn[0] = (unsigned char)(pn >> 8);
+ break;
+ case 3:
+ enc_pn[2] = (unsigned char)pn;
+ enc_pn[1] = (unsigned char)(pn >> 8);
+ enc_pn[0] = (unsigned char)(pn >> 16);
+ break;
+ case 4:
+ enc_pn[3] = (unsigned char)pn;
+ enc_pn[2] = (unsigned char)(pn >> 8);
+ enc_pn[1] = (unsigned char)(pn >> 16);
+ enc_pn[0] = (unsigned char)(pn >> 24);
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
+
+int ossl_quic_validate_retry_integrity_tag(OSSL_LIB_CTX *libctx,
+ const char *propq,
+ const QUIC_PKT_HDR *hdr,
+ const QUIC_CONN_ID *client_initial_dcid)
+{
+ unsigned char expected_tag[QUIC_RETRY_INTEGRITY_TAG_LEN];
+ const unsigned char *actual_tag;
+
+ if (hdr == NULL || hdr->len < QUIC_RETRY_INTEGRITY_TAG_LEN)
+ return 0;
+
+ if (!ossl_quic_calculate_retry_integrity_tag(libctx, propq,
+ hdr, client_initial_dcid,
+ expected_tag))
+ return 0;
+
+ actual_tag = hdr->data + hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN;
+
+ return !CRYPTO_memcmp(expected_tag, actual_tag,
+ QUIC_RETRY_INTEGRITY_TAG_LEN);
+}
+
+/* RFC 9001 s. 5.8 */
+static const unsigned char retry_integrity_key[] = {
+ 0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a,
+ 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e
+};
+
+static const unsigned char retry_integrity_nonce[] = {
+ 0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2,
+ 0x23, 0x98, 0x25, 0xbb
+};
+
+int ossl_quic_calculate_retry_integrity_tag(OSSL_LIB_CTX *libctx,
+ const char *propq,
+ const QUIC_PKT_HDR *hdr,
+ const QUIC_CONN_ID *client_initial_dcid,
+ unsigned char *tag)
+{
+ EVP_CIPHER *cipher = NULL;
+ EVP_CIPHER_CTX *cctx = NULL;
+ int ok = 0, l = 0, l2 = 0, wpkt_valid = 0;
+ WPACKET wpkt;
+ /* Worst case length of the Retry Psuedo-Packet header is 68 bytes. */
+ unsigned char buf[128];
+ QUIC_PKT_HDR hdr2;
+ size_t hdr_enc_len = 0;
+
+ if (hdr->type != QUIC_PKT_TYPE_RETRY || hdr->version == 0
+ || hdr->len < QUIC_RETRY_INTEGRITY_TAG_LEN
+ || hdr->data == NULL
+ || client_initial_dcid == NULL || tag == NULL
+ || client_initial_dcid->id_len > QUIC_MAX_CONN_ID_LEN) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_PASSED_INVALID_ARGUMENT);
+ goto err;
+ }
+
+ /*
+ * Do not reserve packet body in WPACKET. Retry packet header
+ * does not contain a Length field so this does not affect
+ * the serialized packet header.
+ */
+ hdr2 = *hdr;
+ hdr2.len = 0;
+
+ /* Assemble retry psuedo-packet. */
+ if (!WPACKET_init_static_len(&wpkt, buf, sizeof(buf), 0)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_CRYPTO_LIB);
+ goto err;
+ }
+
+ wpkt_valid = 1;
+
+ /* Prepend original DCID to the packet. */
+ if (!WPACKET_put_bytes_u8(&wpkt, client_initial_dcid->id_len)
+ || !WPACKET_memcpy(&wpkt, client_initial_dcid->id,
+ client_initial_dcid->id_len)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_CRYPTO_LIB);
+ goto err;
+ }
+
+ /* Encode main retry header. */
+ if (!ossl_quic_wire_encode_pkt_hdr(&wpkt, hdr2.dst_conn_id.id_len,
+ &hdr2, NULL))
+ goto err;
+
+ if (!WPACKET_get_total_written(&wpkt, &hdr_enc_len)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_CRYPTO_LIB);
+ goto err;
+ }
+
+ /* Create and initialise cipher context. */
+ /* TODO(QUIC FUTURE): Cipher fetch caching. */
+ if ((cipher = EVP_CIPHER_fetch(libctx, "AES-128-GCM", propq)) == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ if ((cctx = EVP_CIPHER_CTX_new()) == NULL) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ if (!EVP_CipherInit_ex(cctx, cipher, NULL,
+ retry_integrity_key, retry_integrity_nonce, /*enc=*/1)) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ /* Feed packet header as AAD data. */
+ if (EVP_CipherUpdate(cctx, NULL, &l, buf, hdr_enc_len) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ /* Feed packet body as AAD data. */
+ if (EVP_CipherUpdate(cctx, NULL, &l, hdr->data,
+ hdr->len - QUIC_RETRY_INTEGRITY_TAG_LEN) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ /* Finalise and get tag. */
+ if (EVP_CipherFinal_ex(cctx, NULL, &l2) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ if (EVP_CIPHER_CTX_ctrl(cctx, EVP_CTRL_AEAD_GET_TAG,
+ QUIC_RETRY_INTEGRITY_TAG_LEN,
+ tag) != 1) {
+ ERR_raise(ERR_LIB_SSL, ERR_R_EVP_LIB);
+ goto err;
+ }
+
+ ok = 1;
+err:
+ EVP_CIPHER_free(cipher);
+ EVP_CIPHER_CTX_free(cctx);
+ if (wpkt_valid)
+ WPACKET_finish(&wpkt);
+
+ return ok;
+}
diff --git a/crypto/openssl/ssl/quic/uint_set.c b/crypto/openssl/ssl/quic/uint_set.c
new file mode 100644
index 000000000000..faca906003b0
--- /dev/null
+++ b/crypto/openssl/ssl/quic/uint_set.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2022-2023 The OpenSSL Project Authors. All Rights Reserved.
+ *
+ * Licensed under the Apache License 2.0 (the "License"). You may not use
+ * this file except in compliance with the License. You can obtain a copy
+ * in the file LICENSE in the source distribution or at
+ * https://www.openssl.org/source/license.html
+ */
+
+#include "internal/uint_set.h"
+#include "internal/common.h"
+#include <assert.h>
+
+/*
+ * uint64_t Integer Sets
+ * =====================
+ *
+ * This data structure supports the following operations:
+ *
+ * Insert Range: Adds an inclusive range of integers [start, end]
+ * to the set. Equivalent to Insert for each number
+ * in the range.
+ *
+ * Remove Range: Removes an inclusive range of integers [start, end]
+ * from the set. Not all of the range need already be in
+ * the set, but any part of the range in the set is removed.
+ *
+ * Query: Is an integer in the data structure?
+ *
+ * The data structure can be iterated.
+ *
+ * For greater efficiency in tracking large numbers of contiguous integers, we
+ * track integer ranges rather than individual integers. The data structure
+ * manages a list of integer ranges [[start, end]...]. Internally this is
+ * implemented as a doubly linked sorted list of range structures, which are
+ * automatically split and merged as necessary.
+ *
+ * This data structure requires O(n) traversal of the list for insertion,
+ * removal and query when we are not adding/removing ranges which are near the
+ * beginning or end of the set of ranges. For the applications for which this
+ * data structure is used (e.g. QUIC PN tracking for ACK generation), it is
+ * expected that the number of integer ranges needed at any given time will
+ * generally be small and that most operations will be close to the beginning or
+ * end of the range.
+ *
+ * Invariant: The data structure is always sorted in ascending order by value.
+ *
+ * Invariant: No two adjacent ranges ever 'border' one another (have no
+ * numerical gap between them) as the data structure always ensures
+ * such ranges are merged.
+ *
+ * Invariant: No two ranges ever overlap.
+ *
+ * Invariant: No range [a, b] ever has a > b.
+ *
+ * Invariant: Since ranges are represented using inclusive bounds, no range
+ * item inside the data structure can represent a span of zero
+ * integers.
+ */
+void ossl_uint_set_init(UINT_SET *s)
+{
+ ossl_list_uint_set_init(s);
+}
+
+void ossl_uint_set_destroy(UINT_SET *s)
+{
+ UINT_SET_ITEM *x, *xnext;
+
+ for (x = ossl_list_uint_set_head(s); x != NULL; x = xnext) {
+ xnext = ossl_list_uint_set_next(x);
+ OPENSSL_free(x);
+ }
+}
+
+/* Possible merge of x, prev(x) */
+static void uint_set_merge_adjacent(UINT_SET *s, UINT_SET_ITEM *x)
+{
+ UINT_SET_ITEM *xprev = ossl_list_uint_set_prev(x);
+
+ if (xprev == NULL)
+ return;
+
+ if (x->range.start - 1 != xprev->range.end)
+ return;
+
+ x->range.start = xprev->range.start;
+ ossl_list_uint_set_remove(s, xprev);
+ OPENSSL_free(xprev);
+}
+
+static uint64_t u64_min(uint64_t x, uint64_t y)
+{
+ return x < y ? x : y;
+}
+
+static uint64_t u64_max(uint64_t x, uint64_t y)
+{
+ return x > y ? x : y;
+}
+
+/*
+ * Returns 1 if there exists an integer x which falls within both ranges a and
+ * b.
+ */
+static int uint_range_overlaps(const UINT_RANGE *a,
+ const UINT_RANGE *b)
+{
+ return u64_min(a->end, b->end)
+ >= u64_max(a->start, b->start);
+}
+
+static UINT_SET_ITEM *create_set_item(uint64_t start, uint64_t end)
+{
+ UINT_SET_ITEM *x = OPENSSL_malloc(sizeof(UINT_SET_ITEM));
+
+ if (x == NULL)
+ return NULL;
+
+ ossl_list_uint_set_init_elem(x);
+ x->range.start = start;
+ x->range.end = end;
+ return x;
+}
+
+int ossl_uint_set_insert(UINT_SET *s, const UINT_RANGE *range)
+{
+ UINT_SET_ITEM *x, *xnext, *z, *zprev, *f;
+ uint64_t start = range->start, end = range->end;
+
+ if (!ossl_assert(start <= end))
+ return 0;
+
+ if (ossl_list_uint_set_is_empty(s)) {
+ /* Nothing in the set yet, so just add this range. */
+ x = create_set_item(start, end);
+ if (x == NULL)
+ return 0;
+ ossl_list_uint_set_insert_head(s, x);
+ return 1;
+ }
+
+ z = ossl_list_uint_set_tail(s);
+ if (start > z->range.end) {
+ /*
+ * Range is after the latest range in the set, so append.
+ *
+ * Note: The case where the range is before the earliest range in the
+ * set is handled as a degenerate case of the final case below. See
+ * optimization note (*) below.
+ */
+ if (z->range.end + 1 == start) {
+ z->range.end = end;
+ return 1;
+ }
+
+ x = create_set_item(start, end);
+ if (x == NULL)
+ return 0;
+ ossl_list_uint_set_insert_tail(s, x);
+ return 1;
+ }
+
+ f = ossl_list_uint_set_head(s);
+ if (start <= f->range.start && end >= z->range.end) {
+ /*
+ * New range dwarfs all ranges in our set.
+ *
+ * Free everything except the first range in the set, which we scavenge
+ * and reuse.
+ */
+ x = ossl_list_uint_set_head(s);
+ x->range.start = start;
+ x->range.end = end;
+ for (x = ossl_list_uint_set_next(x); x != NULL; x = xnext) {
+ xnext = ossl_list_uint_set_next(x);
+ ossl_list_uint_set_remove(s, x);
+ }
+ return 1;
+ }
+
+ /*
+ * Walk backwards since we will most often be inserting at the end. As an
+ * optimization, test the head node first and skip iterating over the
+ * entire list if we are inserting at the start. The assumption is that
+ * insertion at the start and end of the space will be the most common
+ * operations. (*)
+ */
+ z = end < f->range.start ? f : z;
+
+ for (; z != NULL; z = zprev) {
+ zprev = ossl_list_uint_set_prev(z);
+
+ /* An existing range dwarfs our new range (optimisation). */
+ if (z->range.start <= start && z->range.end >= end)
+ return 1;
+
+ if (uint_range_overlaps(&z->range, range)) {
+ /*
+ * Our new range overlaps an existing range, or possibly several
+ * existing ranges.
+ */
+ UINT_SET_ITEM *ovend = z;
+
+ ovend->range.end = u64_max(end, z->range.end);
+
+ /* Get earliest overlapping range. */
+ while (zprev != NULL && uint_range_overlaps(&zprev->range, range)) {
+ z = zprev;
+ zprev = ossl_list_uint_set_prev(z);
+ }
+
+ ovend->range.start = u64_min(start, z->range.start);
+
+ /* Replace sequence of nodes z..ovend with updated ovend only. */
+ while (z != ovend) {
+ z = ossl_list_uint_set_next(x = z);
+ ossl_list_uint_set_remove(s, x);
+ OPENSSL_free(x);
+ }
+ break;
+ } else if (end < z->range.start
+ && (zprev == NULL || start > zprev->range.end)) {
+ if (z->range.start == end + 1) {
+ /* We can extend the following range backwards. */
+ z->range.start = start;
+
+ /*
+ * If this closes a gap we now need to merge
+ * consecutive nodes.
+ */
+ uint_set_merge_adjacent(s, z);
+ } else if (zprev != NULL && zprev->range.end + 1 == start) {
+ /* We can extend the preceding range forwards. */
+ zprev->range.end = end;
+
+ /*
+ * If this closes a gap we now need to merge
+ * consecutive nodes.
+ */
+ uint_set_merge_adjacent(s, z);
+ } else {
+ /*
+ * The new interval is between intervals without overlapping or
+ * touching them, so insert between, preserving sort.
+ */
+ x = create_set_item(start, end);
+ if (x == NULL)
+ return 0;
+ ossl_list_uint_set_insert_before(s, z, x);
+ }
+ break;
+ }
+ }
+
+ return 1;
+}
+
+int ossl_uint_set_remove(UINT_SET *s, const UINT_RANGE *range)
+{
+ UINT_SET_ITEM *z, *zprev, *y;
+ uint64_t start = range->start, end = range->end;
+
+ if (!ossl_assert(start <= end))
+ return 0;
+
+ /* Walk backwards since we will most often be removing at the end. */
+ for (z = ossl_list_uint_set_tail(s); z != NULL; z = zprev) {
+ zprev = ossl_list_uint_set_prev(z);
+
+ if (start > z->range.end)
+ /* No overlapping ranges can exist beyond this point, so stop. */
+ break;
+
+ if (start <= z->range.start && end >= z->range.end) {
+ /*
+ * The range being removed dwarfs this range, so it should be
+ * removed.
+ */
+ ossl_list_uint_set_remove(s, z);
+ OPENSSL_free(z);
+ } else if (start <= z->range.start && end >= z->range.start) {
+ /*
+ * The range being removed includes start of this range, but does
+ * not cover the entire range (as this would be caught by the case
+ * above). Shorten the range.
+ */
+ assert(end < z->range.end);
+ z->range.start = end + 1;
+ } else if (end >= z->range.end) {
+ /*
+ * The range being removed includes the end of this range, but does
+ * not cover the entire range (as this would be caught by the case
+ * above). Shorten the range. We can also stop iterating.
+ */
+ assert(start > z->range.start);
+ assert(start > 0);
+ z->range.end = start - 1;
+ break;
+ } else if (start > z->range.start && end < z->range.end) {
+ /*
+ * The range being removed falls entirely in this range, so cut it
+ * into two. Cases where a zero-length range would be created are
+ * handled by the above cases.
+ */
+ y = create_set_item(end + 1, z->range.end);
+ ossl_list_uint_set_insert_after(s, z, y);
+ z->range.end = start - 1;
+ break;
+ } else {
+ /* Assert no partial overlap; all cases should be covered above. */
+ assert(!uint_range_overlaps(&z->range, range));
+ }
+ }
+
+ return 1;
+}
+
+int ossl_uint_set_query(const UINT_SET *s, uint64_t v)
+{
+ UINT_SET_ITEM *x;
+
+ if (ossl_list_uint_set_is_empty(s))
+ return 0;
+
+ for (x = ossl_list_uint_set_tail(s); x != NULL; x = ossl_list_uint_set_prev(x))
+ if (x->range.start <= v && x->range.end >= v)
+ return 1;
+ else if (x->range.end < v)
+ return 0;
+
+ return 0;
+}