2022-11-30 22:21:00 +08:00
|
|
|
/*
|
2025-03-12 21:35:59 +08:00
|
|
|
* Copyright 2022-2025 The OpenSSL Project Authors. All Rights Reserved.
|
2022-11-30 22:21:00 +08:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License 2.0 (the "License"). You may not use
|
|
|
|
* this file except in compliance with the License. You can obtain a copy
|
|
|
|
* in the file LICENSE in the source distribution or at
|
|
|
|
* https://www.openssl.org/source/license.html
|
|
|
|
*/
|
|
|
|
|
2022-12-01 00:41:31 +08:00
|
|
|
#include <assert.h>
|
2023-03-14 01:36:24 +08:00
|
|
|
#include <openssl/configuration.h>
|
2022-12-13 00:52:50 +08:00
|
|
|
#include <openssl/bio.h>
|
2023-09-23 22:38:13 +08:00
|
|
|
#include "internal/e_os.h" /* For struct timeval */
|
2022-11-30 22:21:00 +08:00
|
|
|
#include "quictestlib.h"
|
2023-03-14 01:36:24 +08:00
|
|
|
#include "ssltestlib.h"
|
2022-11-30 22:21:00 +08:00
|
|
|
#include "../testutil.h"
|
2023-03-14 01:36:24 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
# include "../threadstest.h"
|
|
|
|
#endif
|
2023-08-17 22:23:36 +08:00
|
|
|
#include "internal/quic_ssl.h"
|
2022-12-01 00:41:31 +08:00
|
|
|
#include "internal/quic_wire_pkt.h"
|
|
|
|
#include "internal/quic_record_tx.h"
|
2022-12-07 00:44:09 +08:00
|
|
|
#include "internal/quic_error.h"
|
2022-12-02 23:52:21 +08:00
|
|
|
#include "internal/packet.h"
|
2023-08-01 19:22:58 +08:00
|
|
|
#include "internal/tsan_assist.h"
|
2022-12-01 00:41:31 +08:00
|
|
|
|
|
|
|
#define GROWTH_ALLOWANCE 1024
|
2022-11-30 22:21:00 +08:00
|
|
|
|
2023-10-02 18:47:08 +08:00
|
|
|
struct noise_args_data_st {
|
|
|
|
BIO *cbio;
|
|
|
|
BIO *sbio;
|
|
|
|
BIO *tracebio;
|
|
|
|
int flags;
|
|
|
|
};
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
struct qtest_fault {
|
2022-11-30 22:21:00 +08:00
|
|
|
QUIC_TSERVER *qtserv;
|
2022-12-01 00:41:31 +08:00
|
|
|
|
|
|
|
/* Plain packet mutations */
|
|
|
|
/* Header for the plaintext packet */
|
|
|
|
QUIC_PKT_HDR pplainhdr;
|
|
|
|
/* iovec for the plaintext packet data buffer */
|
|
|
|
OSSL_QTX_IOVEC pplainio;
|
2023-05-09 15:06:40 +08:00
|
|
|
/* Allocated size of the plaintext packet data buffer */
|
2022-12-01 00:41:31 +08:00
|
|
|
size_t pplainbuf_alloc;
|
2023-02-06 23:34:05 +08:00
|
|
|
qtest_fault_on_packet_plain_cb pplaincb;
|
2022-12-01 00:41:31 +08:00
|
|
|
void *pplaincbarg;
|
2022-12-02 23:52:21 +08:00
|
|
|
|
|
|
|
/* Handshake message mutations */
|
|
|
|
/* Handshake message buffer */
|
|
|
|
unsigned char *handbuf;
|
|
|
|
/* Allocated size of the handshake message buffer */
|
|
|
|
size_t handbufalloc;
|
|
|
|
/* Actual length of the handshake message */
|
|
|
|
size_t handbuflen;
|
2023-02-06 23:34:05 +08:00
|
|
|
qtest_fault_on_handshake_cb handshakecb;
|
2022-12-02 23:52:21 +08:00
|
|
|
void *handshakecbarg;
|
2023-02-06 23:34:05 +08:00
|
|
|
qtest_fault_on_enc_ext_cb encextcb;
|
2022-12-02 23:52:21 +08:00
|
|
|
void *encextcbarg;
|
2022-12-13 00:52:50 +08:00
|
|
|
|
|
|
|
/* Cipher packet mutations */
|
2023-02-06 23:34:05 +08:00
|
|
|
qtest_fault_on_packet_cipher_cb pciphercb;
|
2022-12-13 00:52:50 +08:00
|
|
|
void *pciphercbarg;
|
2023-01-11 02:55:05 +08:00
|
|
|
|
|
|
|
/* Datagram mutations */
|
2023-02-06 23:34:05 +08:00
|
|
|
qtest_fault_on_datagram_cb datagramcb;
|
2023-01-11 02:55:05 +08:00
|
|
|
void *datagramcbarg;
|
|
|
|
/* The currently processed message */
|
|
|
|
BIO_MSG msg;
|
|
|
|
/* Allocated size of msg data buffer */
|
|
|
|
size_t msgalloc;
|
2023-10-02 18:47:08 +08:00
|
|
|
struct noise_args_data_st noiseargs;
|
2022-11-30 22:21:00 +08:00
|
|
|
};
|
|
|
|
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
static int client_ready = 0;
|
|
|
|
static CRYPTO_CONDVAR *client_ready_cond = NULL;
|
|
|
|
static CRYPTO_MUTEX *client_ready_mutex = NULL;
|
|
|
|
#endif
|
|
|
|
|
2022-12-02 23:52:21 +08:00
|
|
|
static void packet_plain_finish(void *arg);
|
|
|
|
static void handshake_finish(void *arg);
|
2023-11-04 00:56:40 +08:00
|
|
|
static OSSL_TIME qtest_get_time(void);
|
|
|
|
static void qtest_reset_time(void);
|
2022-12-02 23:52:21 +08:00
|
|
|
|
2023-09-18 23:55:52 +08:00
|
|
|
static int using_fake_time = 0;
|
2023-07-03 23:58:46 +08:00
|
|
|
static OSSL_TIME fake_now;
|
2023-11-04 00:56:40 +08:00
|
|
|
static CRYPTO_RWLOCK *fake_now_lock = NULL;
|
2024-02-14 19:45:15 +08:00
|
|
|
static OSSL_TIME start_time;
|
2023-07-03 23:58:46 +08:00
|
|
|
|
|
|
|
static OSSL_TIME fake_now_cb(void *arg)
|
|
|
|
{
|
2023-11-04 00:56:40 +08:00
|
|
|
return qtest_get_time();
|
2023-07-03 23:58:46 +08:00
|
|
|
}
|
|
|
|
|
2023-10-02 18:47:08 +08:00
|
|
|
static void noise_msg_callback(int write_p, int version, int content_type,
|
|
|
|
const void *buf, size_t len, SSL *ssl,
|
|
|
|
void *arg)
|
|
|
|
{
|
|
|
|
struct noise_args_data_st *noiseargs = (struct noise_args_data_st *)arg;
|
|
|
|
|
|
|
|
if (content_type == SSL3_RT_QUIC_FRAME_FULL) {
|
|
|
|
PACKET pkt;
|
|
|
|
uint64_t frame_type;
|
|
|
|
|
|
|
|
if (!PACKET_buf_init(&pkt, buf, len))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!ossl_quic_wire_peek_frame_header(&pkt, &frame_type, NULL))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (frame_type == OSSL_QUIC_FRAME_TYPE_PING) {
|
|
|
|
/*
|
|
|
|
* If either endpoint issues a ping frame then we are in danger
|
|
|
|
* of our noise being too much such that the connection itself
|
|
|
|
* fails. We back off on the noise for a bit to avoid that.
|
|
|
|
*/
|
Extend backoff period in noisydgram BIO users
Initially tests that were written which make use of the noisy dgram BIO,
were done under the assumption that, despite any packet mangling done by
the noisy dgram bio, the connection would still be established. This
was initiall guaranteed by configuring the BIO to avoid
corrupting/dropping/duplicating/re-injecting the first packet received,
thus ensuring that the client and server hello frames would make it to
the peer successfully.
This implicitly made the assumption that the client and server hellos
were contained within a single datagram, which until recently was true.
However, with the introduction of ML-KEM keyshares, the above assumption
no longer holds. Large ML-KEM keyshares generally expand these TLS
messages accross multiple datagrams, and so it is now possible that
those initial records can become corrupted/lost etc, leading to
unexpected connection failures.
Lets fix it by restoring the guarantee that these tests were written
under by making the backoff time configurable to a number of frames, and
configuring the quic connection objects used in the test to not drop the
first two initial frames, once again guaranteeing that the client and
server hello arrive at the peer uncorrupted, so that we get a good
connection established.
Fixes #27103
Reviewed-by: Matt Caswell <matt@openssl.org>
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27169)
2025-03-26 23:17:31 +08:00
|
|
|
(void)BIO_ctrl(noiseargs->cbio, BIO_CTRL_NOISE_BACK_OFF, 1, NULL);
|
|
|
|
(void)BIO_ctrl(noiseargs->sbio, BIO_CTRL_NOISE_BACK_OFF, 1, NULL);
|
2023-10-02 18:47:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef OPENSSL_NO_SSL_TRACE
|
|
|
|
if ((noiseargs->flags & QTEST_FLAG_CLIENT_TRACE) != 0
|
|
|
|
&& !SSL_is_server(ssl))
|
|
|
|
SSL_trace(write_p, version, content_type, buf, len, ssl,
|
|
|
|
noiseargs->tracebio);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
int qtest_create_quic_objects(OSSL_LIB_CTX *libctx, SSL_CTX *clientctx,
|
2023-07-28 22:32:57 +08:00
|
|
|
SSL_CTX *serverctx, char *certfile, char *keyfile,
|
2023-07-03 23:58:46 +08:00
|
|
|
int flags, QUIC_TSERVER **qtserv, SSL **cssl,
|
2023-09-19 23:40:25 +08:00
|
|
|
QTEST_FAULT **fault, BIO **tracebio)
|
2022-11-30 22:21:00 +08:00
|
|
|
{
|
|
|
|
/* ALPN value as recognised by QUIC_TSERVER */
|
|
|
|
unsigned char alpn[] = { 8, 'o', 's', 's', 'l', 't', 'e', 's', 't' };
|
|
|
|
QUIC_TSERVER_ARGS tserver_args = {0};
|
2022-12-13 00:52:50 +08:00
|
|
|
BIO *cbio = NULL, *sbio = NULL, *fisbio = NULL;
|
2022-11-30 22:21:00 +08:00
|
|
|
BIO_ADDR *peeraddr = NULL;
|
|
|
|
struct in_addr ina = {0};
|
2023-09-19 23:40:25 +08:00
|
|
|
BIO *tmpbio = NULL;
|
2025-01-30 02:18:57 +08:00
|
|
|
QTEST_DATA *bdata = NULL;
|
|
|
|
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
if (client_ready_cond == NULL) {
|
|
|
|
client_ready_cond = ossl_crypto_condvar_new();
|
|
|
|
if (client_ready_cond == NULL)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (client_ready_mutex == NULL) {
|
|
|
|
client_ready_mutex = ossl_crypto_mutex_new();
|
|
|
|
if (client_ready_mutex == NULL) {
|
|
|
|
ossl_crypto_condvar_free(&client_ready_cond);
|
|
|
|
client_ready_cond = NULL;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
bdata = OPENSSL_zalloc(sizeof(QTEST_DATA));
|
|
|
|
if (bdata == NULL)
|
|
|
|
return 0;
|
2022-11-30 22:21:00 +08:00
|
|
|
|
|
|
|
*qtserv = NULL;
|
2023-07-04 23:28:41 +08:00
|
|
|
if (*cssl == NULL) {
|
|
|
|
*cssl = SSL_new(clientctx);
|
|
|
|
if (!TEST_ptr(*cssl))
|
|
|
|
return 0;
|
|
|
|
}
|
2022-11-30 22:21:00 +08:00
|
|
|
|
2023-10-02 18:47:08 +08:00
|
|
|
if (fault != NULL) {
|
|
|
|
*fault = OPENSSL_zalloc(sizeof(**fault));
|
|
|
|
if (*fault == NULL)
|
|
|
|
goto err;
|
2025-01-30 02:18:57 +08:00
|
|
|
bdata->fault = *fault;
|
2023-10-02 18:47:08 +08:00
|
|
|
}
|
|
|
|
|
2023-09-26 19:14:56 +08:00
|
|
|
#ifndef OPENSSL_NO_SSL_TRACE
|
2023-09-19 23:40:25 +08:00
|
|
|
if ((flags & QTEST_FLAG_CLIENT_TRACE) != 0) {
|
|
|
|
tmpbio = BIO_new_fp(stdout, BIO_NOCLOSE);
|
|
|
|
if (!TEST_ptr(tmpbio))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
SSL_set_msg_callback(*cssl, SSL_trace);
|
|
|
|
SSL_set_msg_callback_arg(*cssl, tmpbio);
|
|
|
|
}
|
2023-09-26 19:14:56 +08:00
|
|
|
#endif
|
2023-09-19 23:40:25 +08:00
|
|
|
if (tracebio != NULL)
|
|
|
|
*tracebio = tmpbio;
|
|
|
|
|
2022-11-30 22:21:00 +08:00
|
|
|
/* SSL_set_alpn_protos returns 0 for success! */
|
|
|
|
if (!TEST_false(SSL_set_alpn_protos(*cssl, alpn, sizeof(alpn))))
|
|
|
|
goto err;
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
if (!TEST_ptr(peeraddr = BIO_ADDR_new()))
|
2022-11-30 22:21:00 +08:00
|
|
|
goto err;
|
|
|
|
|
2023-07-03 23:58:46 +08:00
|
|
|
if ((flags & QTEST_FLAG_BLOCK) != 0) {
|
2023-03-14 01:36:24 +08:00
|
|
|
#if !defined(OPENSSL_NO_POSIX_IO)
|
|
|
|
int cfd, sfd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For blocking mode we need to create actual sockets rather than doing
|
|
|
|
* everything in memory
|
|
|
|
*/
|
|
|
|
if (!TEST_true(create_test_sockets(&cfd, &sfd, SOCK_DGRAM, peeraddr)))
|
|
|
|
goto err;
|
|
|
|
cbio = BIO_new_dgram(cfd, 1);
|
|
|
|
if (!TEST_ptr(cbio)) {
|
|
|
|
close(cfd);
|
|
|
|
close(sfd);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
sbio = BIO_new_dgram(sfd, 1);
|
|
|
|
if (!TEST_ptr(sbio)) {
|
|
|
|
close(sfd);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
#else
|
2022-11-30 22:21:00 +08:00
|
|
|
goto err;
|
2023-03-14 01:36:24 +08:00
|
|
|
#endif
|
|
|
|
} else {
|
2024-11-26 20:36:52 +08:00
|
|
|
BIO_ADDR *localaddr = NULL;
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
if (!TEST_true(BIO_new_bio_dgram_pair(&cbio, 0, &sbio, 0)))
|
|
|
|
goto err;
|
2022-11-30 22:21:00 +08:00
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
if (!TEST_true(BIO_dgram_set_caps(cbio, BIO_DGRAM_CAP_HANDLES_DST_ADDR))
|
|
|
|
|| !TEST_true(BIO_dgram_set_caps(sbio, BIO_DGRAM_CAP_HANDLES_DST_ADDR)))
|
|
|
|
goto err;
|
2022-11-30 22:21:00 +08:00
|
|
|
|
2024-11-26 20:36:52 +08:00
|
|
|
if (!TEST_ptr(localaddr = BIO_ADDR_new()))
|
|
|
|
goto err;
|
|
|
|
/* Dummy client local addresses */
|
|
|
|
if (!TEST_true(BIO_ADDR_rawmake(localaddr, AF_INET, &ina, sizeof(ina),
|
|
|
|
htons(0)))) {
|
|
|
|
BIO_ADDR_free(localaddr);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
if (!TEST_int_eq(BIO_dgram_set0_local_addr(cbio, localaddr), 1)) {
|
|
|
|
BIO_ADDR_free(localaddr);
|
|
|
|
goto err;
|
|
|
|
}
|
2023-03-14 01:36:24 +08:00
|
|
|
/* Dummy server address */
|
|
|
|
if (!TEST_true(BIO_ADDR_rawmake(peeraddr, AF_INET, &ina, sizeof(ina),
|
|
|
|
htons(0))))
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2023-09-19 19:21:27 +08:00
|
|
|
if ((flags & QTEST_FLAG_PACKET_SPLIT) != 0) {
|
|
|
|
BIO *pktsplitbio = BIO_new(bio_f_pkt_split_dgram_filter());
|
|
|
|
|
|
|
|
if (!TEST_ptr(pktsplitbio))
|
|
|
|
goto err;
|
|
|
|
cbio = BIO_push(pktsplitbio, cbio);
|
2025-01-30 02:18:57 +08:00
|
|
|
BIO_set_data(pktsplitbio, bdata);
|
2023-09-19 23:52:00 +08:00
|
|
|
|
|
|
|
pktsplitbio = BIO_new(bio_f_pkt_split_dgram_filter());
|
|
|
|
if (!TEST_ptr(pktsplitbio))
|
|
|
|
goto err;
|
|
|
|
sbio = BIO_push(pktsplitbio, sbio);
|
2025-01-30 02:18:57 +08:00
|
|
|
BIO_set_data(pktsplitbio, bdata);
|
2023-09-19 19:21:27 +08:00
|
|
|
}
|
|
|
|
|
2023-09-14 17:59:47 +08:00
|
|
|
if ((flags & QTEST_FLAG_NOISE) != 0) {
|
2023-10-02 18:47:08 +08:00
|
|
|
BIO *noisebio;
|
2024-02-14 19:45:15 +08:00
|
|
|
struct bio_noise_now_cb_st now_cb = { fake_now_cb, NULL };
|
2023-10-02 18:47:08 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It is an error to not have a QTEST_FAULT object when introducing noise
|
|
|
|
*/
|
|
|
|
if (!TEST_ptr(fault))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
noisebio = BIO_new(bio_f_noisy_dgram_filter());
|
2023-09-14 17:59:47 +08:00
|
|
|
|
|
|
|
if (!TEST_ptr(noisebio))
|
|
|
|
goto err;
|
|
|
|
cbio = BIO_push(noisebio, cbio);
|
2024-02-14 19:45:15 +08:00
|
|
|
if ((flags & QTEST_FLAG_FAKE_TIME) != 0) {
|
|
|
|
if (!TEST_int_eq(BIO_ctrl(cbio, BIO_CTRL_NOISE_SET_NOW_CB,
|
|
|
|
0, &now_cb), 1))
|
|
|
|
goto err;
|
|
|
|
}
|
2023-09-19 23:52:00 +08:00
|
|
|
|
|
|
|
noisebio = BIO_new(bio_f_noisy_dgram_filter());
|
|
|
|
|
|
|
|
if (!TEST_ptr(noisebio))
|
|
|
|
goto err;
|
|
|
|
sbio = BIO_push(noisebio, sbio);
|
2024-02-14 19:45:15 +08:00
|
|
|
if ((flags & QTEST_FLAG_FAKE_TIME) != 0) {
|
|
|
|
if (!TEST_int_eq(BIO_ctrl(sbio, BIO_CTRL_NOISE_SET_NOW_CB,
|
|
|
|
0, &now_cb), 1))
|
|
|
|
goto err;
|
|
|
|
}
|
2025-01-30 03:19:35 +08:00
|
|
|
|
Extend backoff period in noisydgram BIO users
Initially tests that were written which make use of the noisy dgram BIO,
were done under the assumption that, despite any packet mangling done by
the noisy dgram bio, the connection would still be established. This
was initiall guaranteed by configuring the BIO to avoid
corrupting/dropping/duplicating/re-injecting the first packet received,
thus ensuring that the client and server hello frames would make it to
the peer successfully.
This implicitly made the assumption that the client and server hellos
were contained within a single datagram, which until recently was true.
However, with the introduction of ML-KEM keyshares, the above assumption
no longer holds. Large ML-KEM keyshares generally expand these TLS
messages accross multiple datagrams, and so it is now possible that
those initial records can become corrupted/lost etc, leading to
unexpected connection failures.
Lets fix it by restoring the guarantee that these tests were written
under by making the backoff time configurable to a number of frames, and
configuring the quic connection objects used in the test to not drop the
first two initial frames, once again guaranteeing that the client and
server hello arrive at the peer uncorrupted, so that we get a good
connection established.
Fixes #27103
Reviewed-by: Matt Caswell <matt@openssl.org>
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27169)
2025-03-26 23:17:31 +08:00
|
|
|
(void)BIO_ctrl(sbio, BIO_CTRL_NOISE_BACK_OFF, 2, NULL);
|
2023-10-02 18:47:08 +08:00
|
|
|
|
|
|
|
(*fault)->noiseargs.cbio = cbio;
|
|
|
|
(*fault)->noiseargs.sbio = sbio;
|
|
|
|
(*fault)->noiseargs.tracebio = tmpbio;
|
|
|
|
(*fault)->noiseargs.flags = flags;
|
|
|
|
|
|
|
|
SSL_set_msg_callback(*cssl, noise_msg_callback);
|
|
|
|
SSL_set_msg_callback_arg(*cssl, &(*fault)->noiseargs);
|
2023-09-14 17:59:47 +08:00
|
|
|
}
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
SSL_set_bio(*cssl, cbio, cbio);
|
2022-11-30 22:21:00 +08:00
|
|
|
|
2023-07-03 23:58:46 +08:00
|
|
|
if (!TEST_true(SSL_set_blocking_mode(*cssl,
|
|
|
|
(flags & QTEST_FLAG_BLOCK) != 0 ? 1 : 0)))
|
2022-11-30 22:21:00 +08:00
|
|
|
goto err;
|
|
|
|
|
2023-08-23 15:19:01 +08:00
|
|
|
if (!TEST_true(SSL_set1_initial_peer_addr(*cssl, peeraddr)))
|
2022-11-30 22:21:00 +08:00
|
|
|
goto err;
|
|
|
|
|
2023-06-06 23:25:12 +08:00
|
|
|
fisbio = BIO_new(qtest_get_bio_method());
|
2022-12-13 00:52:50 +08:00
|
|
|
if (!TEST_ptr(fisbio))
|
|
|
|
goto err;
|
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
BIO_set_data(fisbio, bdata);
|
2022-12-13 00:52:50 +08:00
|
|
|
|
2023-09-20 23:24:37 +08:00
|
|
|
if (!BIO_up_ref(sbio))
|
2022-11-30 22:21:00 +08:00
|
|
|
goto err;
|
2023-09-20 23:24:37 +08:00
|
|
|
if (!TEST_ptr(BIO_push(fisbio, sbio))) {
|
|
|
|
BIO_free(sbio);
|
|
|
|
goto err;
|
|
|
|
}
|
2022-12-13 00:52:50 +08:00
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
tserver_args.libctx = libctx;
|
2022-12-13 00:52:50 +08:00
|
|
|
tserver_args.net_rbio = sbio;
|
|
|
|
tserver_args.net_wbio = fisbio;
|
2023-06-08 19:18:38 +08:00
|
|
|
tserver_args.alpn = NULL;
|
2023-07-28 22:32:57 +08:00
|
|
|
if (serverctx != NULL && !TEST_true(SSL_CTX_up_ref(serverctx)))
|
|
|
|
goto err;
|
|
|
|
tserver_args.ctx = serverctx;
|
2023-11-04 00:56:40 +08:00
|
|
|
if (fake_now_lock == NULL) {
|
|
|
|
fake_now_lock = CRYPTO_THREAD_lock_new();
|
|
|
|
if (fake_now_lock == NULL)
|
|
|
|
goto err;
|
|
|
|
}
|
2023-07-03 23:58:46 +08:00
|
|
|
if ((flags & QTEST_FLAG_FAKE_TIME) != 0) {
|
2023-09-18 23:55:52 +08:00
|
|
|
using_fake_time = 1;
|
2023-11-04 00:56:40 +08:00
|
|
|
qtest_reset_time();
|
2023-07-03 23:58:46 +08:00
|
|
|
tserver_args.now_cb = fake_now_cb;
|
2024-09-13 23:00:22 +08:00
|
|
|
(void)ossl_quic_set_override_now_cb(*cssl, fake_now_cb, NULL);
|
2023-09-18 23:55:52 +08:00
|
|
|
} else {
|
|
|
|
using_fake_time = 0;
|
2023-07-03 23:58:46 +08:00
|
|
|
}
|
2022-11-30 22:21:00 +08:00
|
|
|
|
|
|
|
if (!TEST_ptr(*qtserv = ossl_quic_tserver_new(&tserver_args, certfile,
|
2022-12-13 00:52:50 +08:00
|
|
|
keyfile)))
|
2022-11-30 22:21:00 +08:00
|
|
|
goto err;
|
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
bdata->short_conn_id_len = ossl_quic_tserver_get_short_header_conn_id_len(*qtserv);
|
2022-12-13 00:52:50 +08:00
|
|
|
/* Ownership of fisbio and sbio is now held by *qtserv */
|
|
|
|
sbio = NULL;
|
|
|
|
fisbio = NULL;
|
2022-11-30 22:21:00 +08:00
|
|
|
|
2023-10-02 18:47:08 +08:00
|
|
|
if ((flags & QTEST_FLAG_NOISE) != 0)
|
|
|
|
ossl_quic_tserver_set_msg_callback(*qtserv, noise_msg_callback,
|
|
|
|
&(*fault)->noiseargs);
|
|
|
|
|
2022-12-13 00:52:50 +08:00
|
|
|
if (fault != NULL)
|
2022-11-30 22:21:00 +08:00
|
|
|
(*fault)->qtserv = *qtserv;
|
|
|
|
|
|
|
|
BIO_ADDR_free(peeraddr);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
err:
|
2023-07-28 22:32:57 +08:00
|
|
|
SSL_CTX_free(tserver_args.ctx);
|
2022-11-30 22:21:00 +08:00
|
|
|
BIO_ADDR_free(peeraddr);
|
2023-09-19 23:52:00 +08:00
|
|
|
BIO_free_all(cbio);
|
2023-09-20 23:24:37 +08:00
|
|
|
BIO_free_all(fisbio);
|
2023-09-19 23:52:00 +08:00
|
|
|
BIO_free_all(sbio);
|
2022-11-30 22:21:00 +08:00
|
|
|
SSL_free(*cssl);
|
2023-03-14 01:36:24 +08:00
|
|
|
*cssl = NULL;
|
2022-11-30 22:21:00 +08:00
|
|
|
ossl_quic_tserver_free(*qtserv);
|
|
|
|
if (fault != NULL)
|
|
|
|
OPENSSL_free(*fault);
|
2025-01-30 02:18:57 +08:00
|
|
|
OPENSSL_free(bdata);
|
2023-09-19 23:40:25 +08:00
|
|
|
BIO_free(tmpbio);
|
|
|
|
if (tracebio != NULL)
|
|
|
|
*tracebio = NULL;
|
2022-11-30 22:21:00 +08:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-07-03 23:58:46 +08:00
|
|
|
void qtest_add_time(uint64_t millis)
|
|
|
|
{
|
2023-11-04 00:56:40 +08:00
|
|
|
if (!CRYPTO_THREAD_write_lock(fake_now_lock))
|
|
|
|
return;
|
2023-07-03 23:58:46 +08:00
|
|
|
fake_now = ossl_time_add(fake_now, ossl_ms2time(millis));
|
2023-11-04 00:56:40 +08:00
|
|
|
CRYPTO_THREAD_unlock(fake_now_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static OSSL_TIME qtest_get_time(void)
|
|
|
|
{
|
|
|
|
OSSL_TIME ret;
|
|
|
|
|
|
|
|
if (!CRYPTO_THREAD_read_lock(fake_now_lock))
|
|
|
|
return ossl_time_zero();
|
|
|
|
ret = fake_now;
|
|
|
|
CRYPTO_THREAD_unlock(fake_now_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void qtest_reset_time(void)
|
|
|
|
{
|
|
|
|
if (!CRYPTO_THREAD_write_lock(fake_now_lock))
|
|
|
|
return;
|
|
|
|
fake_now = ossl_time_zero();
|
|
|
|
CRYPTO_THREAD_unlock(fake_now_lock);
|
|
|
|
/* zero time can have a special meaning, bump it */
|
|
|
|
qtest_add_time(1);
|
2023-07-03 23:58:46 +08:00
|
|
|
}
|
|
|
|
|
2024-02-14 19:45:15 +08:00
|
|
|
void qtest_start_stopwatch(void)
|
|
|
|
{
|
|
|
|
start_time = qtest_get_time();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t qtest_get_stopwatch_time(void)
|
|
|
|
{
|
|
|
|
return ossl_time2ms(ossl_time_subtract(qtest_get_time(), start_time));
|
|
|
|
}
|
|
|
|
|
2023-06-06 23:25:12 +08:00
|
|
|
QTEST_FAULT *qtest_create_injector(QUIC_TSERVER *ts)
|
|
|
|
{
|
|
|
|
QTEST_FAULT *f;
|
|
|
|
|
|
|
|
f = OPENSSL_zalloc(sizeof(*f));
|
|
|
|
if (f == NULL)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
f->qtserv = ts;
|
|
|
|
return f;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
int qtest_supports_blocking(void)
|
|
|
|
{
|
|
|
|
#if !defined(OPENSSL_NO_POSIX_IO) && defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-11-30 22:21:00 +08:00
|
|
|
#define MAXLOOPS 1000
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
static int globserverret = 0;
|
2023-08-31 23:18:28 +08:00
|
|
|
static TSAN_QUALIFIER int abortserverthread = 0;
|
2023-03-14 01:36:24 +08:00
|
|
|
static QUIC_TSERVER *globtserv;
|
|
|
|
static const thread_t thread_zero;
|
|
|
|
static void run_server_thread(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This will operate in a busy loop because the server does not block,
|
|
|
|
* but should be acceptable because it is local and we expect this to be
|
|
|
|
* fast
|
|
|
|
*/
|
|
|
|
globserverret = qtest_create_quic_connection(globtserv, NULL);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-09-19 23:52:00 +08:00
|
|
|
int qtest_wait_for_timeout(SSL *s, QUIC_TSERVER *qtserv)
|
2023-09-18 23:55:52 +08:00
|
|
|
{
|
|
|
|
struct timeval tv;
|
|
|
|
OSSL_TIME ctimeout, stimeout, mintimeout, now;
|
|
|
|
int cinf;
|
|
|
|
|
|
|
|
/* We don't need to wait in blocking mode */
|
2023-09-19 23:52:00 +08:00
|
|
|
if (s == NULL || SSL_get_blocking_mode(s))
|
2023-09-18 23:55:52 +08:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
/* Don't wait if either BIO has data waiting */
|
|
|
|
if (BIO_pending(SSL_get_rbio(s)) > 0
|
|
|
|
|| BIO_pending(ossl_quic_tserver_get0_rbio(qtserv)) > 0)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Neither endpoint has data waiting to be read. We assume data transmission
|
|
|
|
* is instantaneous due to using mem based BIOs, so there is no data "in
|
|
|
|
* flight" and no more data will be sent by either endpoint until some time
|
|
|
|
* based event has occurred. Therefore, wait for a timeout to occur. This
|
|
|
|
* might happen if we are using the noisy BIO and datagrams have been lost.
|
|
|
|
*/
|
|
|
|
if (!SSL_get_event_timeout(s, &tv, &cinf))
|
|
|
|
return 0;
|
2023-11-04 00:56:40 +08:00
|
|
|
|
2023-09-18 23:55:52 +08:00
|
|
|
if (using_fake_time)
|
2023-11-04 00:56:40 +08:00
|
|
|
now = qtest_get_time();
|
2023-09-18 23:55:52 +08:00
|
|
|
else
|
|
|
|
now = ossl_time_now();
|
2023-11-04 00:56:40 +08:00
|
|
|
|
2023-09-18 23:55:52 +08:00
|
|
|
ctimeout = cinf ? ossl_time_infinite() : ossl_time_from_timeval(tv);
|
|
|
|
stimeout = ossl_time_subtract(ossl_quic_tserver_get_deadline(qtserv), now);
|
|
|
|
mintimeout = ossl_time_min(ctimeout, stimeout);
|
|
|
|
if (ossl_time_is_infinite(mintimeout))
|
|
|
|
return 0;
|
2023-11-04 00:56:40 +08:00
|
|
|
|
2023-09-18 23:55:52 +08:00
|
|
|
if (using_fake_time)
|
2023-11-04 00:56:40 +08:00
|
|
|
qtest_add_time(ossl_time2ms(mintimeout));
|
2023-09-18 23:55:52 +08:00
|
|
|
else
|
|
|
|
OSSL_sleep(ossl_time2ms(mintimeout));
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-08-31 23:18:28 +08:00
|
|
|
int qtest_create_quic_connection_ex(QUIC_TSERVER *qtserv, SSL *clientssl,
|
|
|
|
int wanterr)
|
2022-11-30 22:21:00 +08:00
|
|
|
{
|
2023-08-16 02:53:32 +08:00
|
|
|
int retc = -1, rets = 0, abortctr = 0, ret = 0;
|
2022-11-30 22:21:00 +08:00
|
|
|
int clienterr = 0, servererr = 0;
|
2023-03-14 01:36:24 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
/*
|
|
|
|
* Pointless initialisation to avoid bogus compiler warnings about using
|
|
|
|
* t uninitialised
|
|
|
|
*/
|
|
|
|
thread_t t = thread_zero;
|
2023-08-31 23:18:28 +08:00
|
|
|
|
|
|
|
if (clientssl != NULL)
|
|
|
|
abortserverthread = 0;
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only set the client_ready flag to zero if we are the client
|
|
|
|
*/
|
|
|
|
if (clientssl != NULL) {
|
|
|
|
ossl_crypto_mutex_lock(client_ready_mutex);
|
|
|
|
client_ready = 0;
|
|
|
|
ossl_crypto_mutex_unlock(client_ready_mutex);
|
|
|
|
}
|
2023-03-14 01:36:24 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (!TEST_ptr(qtserv)) {
|
|
|
|
goto err;
|
|
|
|
} else if (clientssl == NULL) {
|
|
|
|
retc = 1;
|
|
|
|
} else if (SSL_get_blocking_mode(clientssl) > 0) {
|
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
/*
|
|
|
|
* clientssl is blocking. We will need a thread to complete the
|
|
|
|
* connection
|
|
|
|
*/
|
|
|
|
globtserv = qtserv;
|
|
|
|
if (!TEST_true(run_thread(&t, run_server_thread)))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
qtserv = NULL;
|
|
|
|
rets = 1;
|
|
|
|
#else
|
|
|
|
TEST_error("No thread support in this build");
|
|
|
|
goto err;
|
|
|
|
#endif
|
|
|
|
}
|
2022-11-30 22:21:00 +08:00
|
|
|
|
|
|
|
do {
|
2023-08-16 02:53:32 +08:00
|
|
|
if (!clienterr && retc <= 0) {
|
|
|
|
int err;
|
|
|
|
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
ossl_crypto_mutex_lock(client_ready_mutex);
|
|
|
|
client_ready = 1;
|
|
|
|
ossl_crypto_condvar_broadcast(client_ready_cond);
|
|
|
|
ossl_crypto_mutex_unlock(client_ready_mutex);
|
|
|
|
#endif
|
2022-11-30 22:21:00 +08:00
|
|
|
retc = SSL_connect(clientssl);
|
2023-08-16 02:53:32 +08:00
|
|
|
if (retc <= 0) {
|
2022-11-30 22:21:00 +08:00
|
|
|
err = SSL_get_error(clientssl, retc);
|
|
|
|
|
2023-08-31 23:18:28 +08:00
|
|
|
if (err == wanterr) {
|
|
|
|
retc = 1;
|
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
if (qtserv == NULL && rets > 0)
|
|
|
|
tsan_store(&abortserverthread, 1);
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
rets = 1;
|
|
|
|
} else {
|
|
|
|
if (err != SSL_ERROR_WANT_READ
|
|
|
|
&& err != SSL_ERROR_WANT_WRITE) {
|
|
|
|
TEST_info("SSL_connect() failed %d, %d", retc, err);
|
|
|
|
TEST_openssl_errors();
|
|
|
|
clienterr = 1;
|
|
|
|
}
|
2023-08-16 02:53:32 +08:00
|
|
|
}
|
|
|
|
}
|
2022-11-30 22:21:00 +08:00
|
|
|
}
|
|
|
|
|
2023-09-19 23:52:00 +08:00
|
|
|
qtest_add_time(1);
|
|
|
|
if (clientssl != NULL)
|
2023-05-04 02:01:12 +08:00
|
|
|
SSL_handle_events(clientssl);
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
if (qtserv != NULL) {
|
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
ossl_crypto_mutex_lock(client_ready_mutex);
|
|
|
|
for (;;) {
|
|
|
|
if (client_ready == 1)
|
|
|
|
break;
|
|
|
|
ossl_crypto_condvar_wait(client_ready_cond, client_ready_mutex);
|
|
|
|
}
|
|
|
|
ossl_crypto_mutex_unlock(client_ready_mutex);
|
|
|
|
#endif
|
2023-09-19 23:52:00 +08:00
|
|
|
ossl_quic_tserver_tick(qtserv);
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
}
|
2023-08-31 23:18:28 +08:00
|
|
|
|
2022-12-07 00:44:09 +08:00
|
|
|
if (!servererr && rets <= 0) {
|
2023-02-06 23:34:05 +08:00
|
|
|
servererr = ossl_quic_tserver_is_term_any(qtserv);
|
2022-12-07 00:44:09 +08:00
|
|
|
if (!servererr)
|
2023-01-11 01:52:18 +08:00
|
|
|
rets = ossl_quic_tserver_is_handshake_confirmed(qtserv);
|
2022-11-30 22:21:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (clienterr && servererr)
|
|
|
|
goto err;
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
if (clientssl != NULL && ++abortctr == MAXLOOPS) {
|
2022-11-30 22:21:00 +08:00
|
|
|
TEST_info("No progress made");
|
|
|
|
goto err;
|
|
|
|
}
|
2023-09-18 23:55:52 +08:00
|
|
|
|
2023-09-19 23:52:00 +08:00
|
|
|
if ((retc <= 0 && !clienterr) || (rets <= 0 && !servererr)) {
|
|
|
|
if (!qtest_wait_for_timeout(clientssl, qtserv))
|
|
|
|
goto err;
|
|
|
|
}
|
2023-08-31 23:18:28 +08:00
|
|
|
} while ((retc <= 0 && !clienterr)
|
|
|
|
|| (rets <= 0 && !servererr
|
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
&& !tsan_load(&abortserverthread)
|
|
|
|
#endif
|
|
|
|
));
|
2022-11-30 22:21:00 +08:00
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
if (qtserv == NULL && rets > 0) {
|
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
/*
|
|
|
|
* Make sure we unblock the server before we wait on completion here
|
|
|
|
* in case it didn't happen in the connect loop above
|
|
|
|
*/
|
|
|
|
ossl_crypto_mutex_lock(client_ready_mutex);
|
|
|
|
client_ready = 1;
|
|
|
|
ossl_crypto_condvar_broadcast(client_ready_cond);
|
|
|
|
ossl_crypto_mutex_unlock(client_ready_mutex);
|
2023-03-14 01:36:24 +08:00
|
|
|
if (!TEST_true(wait_for_thread(t)) || !TEST_true(globserverret))
|
|
|
|
goto err;
|
|
|
|
#else
|
|
|
|
TEST_error("Should not happen");
|
|
|
|
goto err;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2022-12-07 00:44:09 +08:00
|
|
|
if (!clienterr && !servererr)
|
|
|
|
ret = 1;
|
2022-11-30 22:21:00 +08:00
|
|
|
err:
|
|
|
|
return ret;
|
|
|
|
}
|
2022-12-01 00:41:31 +08:00
|
|
|
|
2023-08-31 23:18:28 +08:00
|
|
|
int qtest_create_quic_connection(QUIC_TSERVER *qtserv, SSL *clientssl)
|
|
|
|
{
|
|
|
|
return qtest_create_quic_connection_ex(qtserv, clientssl, SSL_ERROR_NONE);
|
|
|
|
}
|
|
|
|
|
2023-08-01 19:22:58 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
static TSAN_QUALIFIER int shutdowndone;
|
|
|
|
|
|
|
|
static void run_server_shutdown_thread(void)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This will operate in a busy loop because the server does not block,
|
|
|
|
* but should be acceptable because it is local and we expect this to be
|
|
|
|
* fast
|
|
|
|
*/
|
|
|
|
do {
|
|
|
|
ossl_quic_tserver_tick(globtserv);
|
|
|
|
} while(!tsan_load(&shutdowndone));
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
int qtest_shutdown(QUIC_TSERVER *qtserv, SSL *clientssl)
|
|
|
|
{
|
2023-08-01 19:22:58 +08:00
|
|
|
int tickserver = 1;
|
|
|
|
int ret = 0;
|
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
/*
|
|
|
|
* Pointless initialisation to avoid bogus compiler warnings about using
|
|
|
|
* t uninitialised
|
|
|
|
*/
|
|
|
|
thread_t t = thread_zero;
|
Attempt to fix occasional failure of quicapi test in ci
https://github.com/openssl/openssl/actions/runs/15214054228/job/42795224720
the theory I have for the cause of this failure is:
1. qtest_create_quic_connection_ex is called for the client
2. The client is in blocking mode, so we fall into the conditional on line 512
3. We create the server thread on line 519, which is non-blocking
4. The scheduler in the failing case, lets the server run ahead of the client
5. Server thread enters qtest_create_quic_connection_ex and iterates steps
6-9 in the do_while loop starting on line 530
6. Server calls qtest_add_time
7. Server calls ossl_quic_tserver_tick
8. Server calls ossl_quic_tserver_is_term_any, received NULL return
9. Server calls qtest_wait_for_timeout
10. Eventually qtest_wait_for_timeout returns zero, adn the server jumps to
the error label, returning zero to globservret, and the thread exits
11. Client thread regains the cpu, and attempts to call SSL_connect, which
fails, as the server is no longer listening
12. We fall into the error case on line 556, and SSL_get_error returns
SSL_ERROR_SSL, which causes clienterr to get set to 1
13. We exit the do{} while loop on line 581, and do the TEST_true check on
line 593. The server having exited wait_for_thread returns true, but
globserverret is still zero from step 10 above, and so the test fails
I can't prove this is the case, as the test only appears to fail in CI,
and we can't dump verbose logging there, lest we affect the timing of
the tests, so this is just a theory, but it seems to fit the
observations we have.
Attempting to fix this, by creating a thread interlock with a condition
variable that blocks the server from ticking the quic reactor until such
time as the client is about to call SSL_connect to prevent the race
condition
Reviewed-by: Saša Nedvědický <sashan@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/27704)
2025-05-27 21:36:35 +08:00
|
|
|
|
|
|
|
ossl_crypto_condvar_free(&client_ready_cond);
|
|
|
|
client_ready_cond = NULL;
|
|
|
|
ossl_crypto_mutex_free(&client_ready_mutex);
|
|
|
|
client_ready_mutex = NULL;
|
2023-08-01 19:22:58 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
if (SSL_get_blocking_mode(clientssl) > 0) {
|
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
/*
|
|
|
|
* clientssl is blocking. We will need a thread to complete the
|
|
|
|
* connection
|
|
|
|
*/
|
|
|
|
globtserv = qtserv;
|
|
|
|
shutdowndone = 0;
|
|
|
|
if (!TEST_true(run_thread(&t, run_server_shutdown_thread)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
tickserver = 0;
|
|
|
|
#else
|
|
|
|
TEST_error("No thread support in this build");
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-03-14 01:36:24 +08:00
|
|
|
/* Busy loop in non-blocking mode. It should be quick because its local */
|
2023-07-25 01:11:23 +08:00
|
|
|
for (;;) {
|
|
|
|
int rc = SSL_shutdown(clientssl);
|
|
|
|
|
2023-08-01 19:22:58 +08:00
|
|
|
if (rc == 1) {
|
|
|
|
ret = 1;
|
2023-07-25 01:11:23 +08:00
|
|
|
break;
|
2023-08-01 19:22:58 +08:00
|
|
|
}
|
2023-07-25 01:11:23 +08:00
|
|
|
|
|
|
|
if (rc < 0)
|
2023-08-01 19:22:58 +08:00
|
|
|
break;
|
2023-07-25 01:11:23 +08:00
|
|
|
|
2023-08-01 19:22:58 +08:00
|
|
|
if (tickserver)
|
|
|
|
ossl_quic_tserver_tick(qtserv);
|
2023-07-25 01:11:23 +08:00
|
|
|
}
|
2023-03-14 01:36:24 +08:00
|
|
|
|
2023-08-01 19:22:58 +08:00
|
|
|
#if defined(OPENSSL_THREADS) && !defined(CRYPTO_TDEBUG)
|
|
|
|
tsan_store(&shutdowndone, 1);
|
|
|
|
if (!tickserver) {
|
|
|
|
if (!TEST_true(wait_for_thread(t)))
|
|
|
|
ret = 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
return ret;
|
2023-03-14 01:36:24 +08:00
|
|
|
}
|
|
|
|
|
2023-01-12 00:04:25 +08:00
|
|
|
int qtest_check_server_transport_err(QUIC_TSERVER *qtserv, uint64_t code)
|
2022-12-07 00:44:09 +08:00
|
|
|
{
|
2023-04-19 02:30:55 +08:00
|
|
|
const QUIC_TERMINATE_CAUSE *cause;
|
2022-12-07 00:44:09 +08:00
|
|
|
|
|
|
|
ossl_quic_tserver_tick(qtserv);
|
|
|
|
|
|
|
|
/*
|
2023-01-12 00:04:25 +08:00
|
|
|
* Check that the server has closed with the specified code from the client
|
2022-12-07 00:44:09 +08:00
|
|
|
*/
|
2023-01-12 00:04:25 +08:00
|
|
|
if (!TEST_true(ossl_quic_tserver_is_term_any(qtserv)))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
cause = ossl_quic_tserver_get_terminate_cause(qtserv);
|
2023-04-19 02:30:55 +08:00
|
|
|
if (!TEST_ptr(cause)
|
|
|
|
|| !TEST_true(cause->remote)
|
2023-06-06 23:25:10 +08:00
|
|
|
|| !TEST_false(cause->app)
|
2023-04-19 02:30:55 +08:00
|
|
|
|| !TEST_uint64_t_eq(cause->error_code, code))
|
2022-12-07 00:44:09 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-01-12 00:04:25 +08:00
|
|
|
int qtest_check_server_protocol_err(QUIC_TSERVER *qtserv)
|
|
|
|
{
|
2024-02-15 16:55:36 +08:00
|
|
|
return qtest_check_server_transport_err(qtserv, OSSL_QUIC_ERR_PROTOCOL_VIOLATION);
|
2023-01-12 00:04:25 +08:00
|
|
|
}
|
|
|
|
|
2023-06-06 23:25:10 +08:00
|
|
|
int qtest_check_server_frame_encoding_err(QUIC_TSERVER *qtserv)
|
|
|
|
{
|
2024-02-15 16:55:36 +08:00
|
|
|
return qtest_check_server_transport_err(qtserv, OSSL_QUIC_ERR_FRAME_ENCODING_ERROR);
|
2023-06-06 23:25:10 +08:00
|
|
|
}
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
void qtest_fault_free(QTEST_FAULT *fault)
|
2022-12-01 00:41:31 +08:00
|
|
|
{
|
|
|
|
if (fault == NULL)
|
|
|
|
return;
|
|
|
|
|
2022-12-02 23:52:21 +08:00
|
|
|
packet_plain_finish(fault);
|
|
|
|
handshake_finish(fault);
|
|
|
|
|
2022-12-01 00:41:31 +08:00
|
|
|
OPENSSL_free(fault);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int packet_plain_mutate(const QUIC_PKT_HDR *hdrin,
|
|
|
|
const OSSL_QTX_IOVEC *iovecin, size_t numin,
|
|
|
|
QUIC_PKT_HDR **hdrout,
|
|
|
|
const OSSL_QTX_IOVEC **iovecout,
|
|
|
|
size_t *numout,
|
|
|
|
void *arg)
|
|
|
|
{
|
2023-02-06 23:34:05 +08:00
|
|
|
QTEST_FAULT *fault = arg;
|
2022-12-01 00:41:31 +08:00
|
|
|
size_t i, bufsz = 0;
|
|
|
|
unsigned char *cur;
|
2025-02-19 07:03:39 +08:00
|
|
|
int grow_allowance;
|
2022-12-01 00:41:31 +08:00
|
|
|
|
|
|
|
/* Coalesce our data into a single buffer */
|
|
|
|
|
|
|
|
/* First calculate required buffer size */
|
|
|
|
for (i = 0; i < numin; i++)
|
|
|
|
bufsz += iovecin[i].buf_len;
|
|
|
|
|
|
|
|
fault->pplainio.buf_len = bufsz;
|
|
|
|
|
2025-02-19 07:03:39 +08:00
|
|
|
/*
|
|
|
|
* 1200 is QUIC payload length we use
|
|
|
|
* bufsz is what we got from txp
|
|
|
|
* 16 is the length of tag added by encryption
|
|
|
|
* 14 long header (we assume token length is 0,
|
|
|
|
* which is fine for server not so fine for client)
|
|
|
|
*/
|
|
|
|
grow_allowance = 1200 - bufsz - 16 - 14;
|
|
|
|
grow_allowance -= hdrin->dst_conn_id.id_len;
|
|
|
|
grow_allowance -= hdrin->src_conn_id.id_len;
|
|
|
|
assert(grow_allowance >= 0);
|
|
|
|
bufsz += grow_allowance;
|
2022-12-01 00:41:31 +08:00
|
|
|
|
|
|
|
fault->pplainio.buf = cur = OPENSSL_malloc(bufsz);
|
|
|
|
if (cur == NULL) {
|
|
|
|
fault->pplainio.buf_len = 0;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
fault->pplainbuf_alloc = bufsz;
|
|
|
|
|
|
|
|
/* Copy in the data from the input buffers */
|
|
|
|
for (i = 0; i < numin; i++) {
|
|
|
|
memcpy(cur, iovecin[i].buf, iovecin[i].buf_len);
|
|
|
|
cur += iovecin[i].buf_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
fault->pplainhdr = *hdrin;
|
|
|
|
|
2025-02-19 07:03:39 +08:00
|
|
|
/*
|
|
|
|
* Cast below is safe because we allocated the buffer
|
|
|
|
* mutation is best effort. we can inject frame if
|
|
|
|
* there is enough space. If there is not enough space
|
|
|
|
* we must give up.
|
|
|
|
*/
|
|
|
|
if (fault->pplaincb != NULL)
|
|
|
|
fault->pplaincb(fault, &fault->pplainhdr,
|
|
|
|
(unsigned char *)fault->pplainio.buf,
|
|
|
|
fault->pplainio.buf_len, fault->pplaincbarg);
|
2022-12-01 00:41:31 +08:00
|
|
|
|
|
|
|
*hdrout = &fault->pplainhdr;
|
|
|
|
*iovecout = &fault->pplainio;
|
|
|
|
*numout = 1;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void packet_plain_finish(void *arg)
|
|
|
|
{
|
2023-02-06 23:34:05 +08:00
|
|
|
QTEST_FAULT *fault = arg;
|
2022-12-01 00:41:31 +08:00
|
|
|
|
|
|
|
/* Cast below is safe because we allocated the buffer */
|
|
|
|
OPENSSL_free((unsigned char *)fault->pplainio.buf);
|
|
|
|
fault->pplainio.buf_len = 0;
|
|
|
|
fault->pplainbuf_alloc = 0;
|
2022-12-02 23:52:21 +08:00
|
|
|
fault->pplainio.buf = NULL;
|
2022-12-01 00:41:31 +08:00
|
|
|
}
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_set_packet_plain_listener(QTEST_FAULT *fault,
|
|
|
|
qtest_fault_on_packet_plain_cb pplaincb,
|
|
|
|
void *pplaincbarg)
|
2022-12-01 00:41:31 +08:00
|
|
|
{
|
|
|
|
fault->pplaincb = pplaincb;
|
|
|
|
fault->pplaincbarg = pplaincbarg;
|
|
|
|
|
2022-12-02 22:38:46 +08:00
|
|
|
return ossl_quic_tserver_set_plain_packet_mutator(fault->qtserv,
|
|
|
|
packet_plain_mutate,
|
|
|
|
packet_plain_finish,
|
|
|
|
fault);
|
2022-12-01 00:41:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* To be called from a packet_plain_listener callback */
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_resize_plain_packet(QTEST_FAULT *fault, size_t newlen)
|
2022-12-01 00:41:31 +08:00
|
|
|
{
|
|
|
|
unsigned char *buf;
|
|
|
|
size_t oldlen = fault->pplainio.buf_len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Alloc'd size should always be non-zero, so if this fails we've been
|
|
|
|
* incorrectly called
|
|
|
|
*/
|
|
|
|
if (fault->pplainbuf_alloc == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (newlen > fault->pplainbuf_alloc) {
|
|
|
|
/* This exceeds our growth allowance. Fail */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Cast below is safe because we allocated the buffer */
|
|
|
|
buf = (unsigned char *)fault->pplainio.buf;
|
|
|
|
|
|
|
|
if (newlen > oldlen) {
|
|
|
|
/* Extend packet with 0 bytes */
|
|
|
|
memset(buf + oldlen, 0, newlen - oldlen);
|
|
|
|
} /* else we're truncating or staying the same */
|
|
|
|
|
|
|
|
fault->pplainio.buf_len = newlen;
|
|
|
|
fault->pplainhdr.len = newlen;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2022-12-02 23:52:21 +08:00
|
|
|
|
2023-01-17 23:16:42 +08:00
|
|
|
/*
|
|
|
|
* Prepend frame data into a packet. To be called from a packet_plain_listener
|
|
|
|
* callback
|
|
|
|
*/
|
2023-06-06 23:25:12 +08:00
|
|
|
int qtest_fault_prepend_frame(QTEST_FAULT *fault, const unsigned char *frame,
|
2023-02-06 23:34:05 +08:00
|
|
|
size_t frame_len)
|
2023-01-17 23:16:42 +08:00
|
|
|
{
|
|
|
|
unsigned char *buf;
|
|
|
|
size_t old_len;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Alloc'd size should always be non-zero, so if this fails we've been
|
|
|
|
* incorrectly called
|
|
|
|
*/
|
|
|
|
if (fault->pplainbuf_alloc == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Cast below is safe because we allocated the buffer */
|
|
|
|
buf = (unsigned char *)fault->pplainio.buf;
|
|
|
|
old_len = fault->pplainio.buf_len;
|
|
|
|
|
|
|
|
/* Extend the size of the packet by the size of the new frame */
|
2023-02-06 23:34:05 +08:00
|
|
|
if (!TEST_true(qtest_fault_resize_plain_packet(fault,
|
|
|
|
old_len + frame_len)))
|
2023-01-17 23:16:42 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
memmove(buf + frame_len, buf, old_len);
|
|
|
|
memcpy(buf, frame, frame_len);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2022-12-02 23:52:21 +08:00
|
|
|
static int handshake_mutate(const unsigned char *msgin, size_t msginlen,
|
|
|
|
unsigned char **msgout, size_t *msgoutlen,
|
|
|
|
void *arg)
|
|
|
|
{
|
2023-02-06 23:34:05 +08:00
|
|
|
QTEST_FAULT *fault = arg;
|
2022-12-02 23:52:21 +08:00
|
|
|
unsigned char *buf;
|
|
|
|
unsigned long payloadlen;
|
|
|
|
unsigned int msgtype;
|
|
|
|
PACKET pkt;
|
|
|
|
|
|
|
|
buf = OPENSSL_malloc(msginlen + GROWTH_ALLOWANCE);
|
|
|
|
if (buf == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
fault->handbuf = buf;
|
|
|
|
fault->handbuflen = msginlen;
|
|
|
|
fault->handbufalloc = msginlen + GROWTH_ALLOWANCE;
|
|
|
|
memcpy(buf, msgin, msginlen);
|
|
|
|
|
|
|
|
if (!PACKET_buf_init(&pkt, buf, msginlen)
|
|
|
|
|| !PACKET_get_1(&pkt, &msgtype)
|
|
|
|
|| !PACKET_get_net_3(&pkt, &payloadlen)
|
|
|
|
|| PACKET_remaining(&pkt) != payloadlen)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Parse specific message types */
|
|
|
|
switch (msgtype) {
|
|
|
|
case SSL3_MT_ENCRYPTED_EXTENSIONS:
|
|
|
|
{
|
2023-02-06 23:34:05 +08:00
|
|
|
QTEST_ENCRYPTED_EXTENSIONS ee;
|
2022-12-02 23:52:21 +08:00
|
|
|
|
|
|
|
if (fault->encextcb == NULL)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The EncryptedExtensions message is very simple. It just has an
|
|
|
|
* extensions block in it and nothing else.
|
|
|
|
*/
|
|
|
|
ee.extensions = (unsigned char *)PACKET_data(&pkt);
|
|
|
|
ee.extensionslen = payloadlen;
|
|
|
|
if (!fault->encextcb(fault, &ee, payloadlen, fault->encextcbarg))
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* No specific handlers for these message types yet */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fault->handshakecb != NULL
|
|
|
|
&& !fault->handshakecb(fault, buf, fault->handbuflen,
|
|
|
|
fault->handshakecbarg))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*msgout = buf;
|
|
|
|
*msgoutlen = fault->handbuflen;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void handshake_finish(void *arg)
|
|
|
|
{
|
2023-02-06 23:34:05 +08:00
|
|
|
QTEST_FAULT *fault = arg;
|
2022-12-02 23:52:21 +08:00
|
|
|
|
|
|
|
OPENSSL_free(fault->handbuf);
|
|
|
|
fault->handbuf = NULL;
|
|
|
|
}
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_set_handshake_listener(QTEST_FAULT *fault,
|
|
|
|
qtest_fault_on_handshake_cb handshakecb,
|
|
|
|
void *handshakecbarg)
|
2022-12-02 23:52:21 +08:00
|
|
|
{
|
|
|
|
fault->handshakecb = handshakecb;
|
|
|
|
fault->handshakecbarg = handshakecbarg;
|
|
|
|
|
|
|
|
return ossl_quic_tserver_set_handshake_mutator(fault->qtserv,
|
|
|
|
handshake_mutate,
|
|
|
|
handshake_finish,
|
|
|
|
fault);
|
|
|
|
}
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_set_hand_enc_ext_listener(QTEST_FAULT *fault,
|
|
|
|
qtest_fault_on_enc_ext_cb encextcb,
|
|
|
|
void *encextcbarg)
|
2022-12-02 23:52:21 +08:00
|
|
|
{
|
|
|
|
fault->encextcb = encextcb;
|
|
|
|
fault->encextcbarg = encextcbarg;
|
|
|
|
|
|
|
|
return ossl_quic_tserver_set_handshake_mutator(fault->qtserv,
|
|
|
|
handshake_mutate,
|
|
|
|
handshake_finish,
|
|
|
|
fault);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* To be called from a handshake_listener callback */
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_resize_handshake(QTEST_FAULT *fault, size_t newlen)
|
2022-12-02 23:52:21 +08:00
|
|
|
{
|
|
|
|
unsigned char *buf;
|
|
|
|
size_t oldlen = fault->handbuflen;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Alloc'd size should always be non-zero, so if this fails we've been
|
|
|
|
* incorrectly called
|
|
|
|
*/
|
|
|
|
if (fault->handbufalloc == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (newlen > fault->handbufalloc) {
|
|
|
|
/* This exceeds our growth allowance. Fail */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = (unsigned char *)fault->handbuf;
|
|
|
|
|
|
|
|
if (newlen > oldlen) {
|
|
|
|
/* Extend packet with 0 bytes */
|
|
|
|
memset(buf + oldlen, 0, newlen - oldlen);
|
|
|
|
} /* else we're truncating or staying the same */
|
|
|
|
|
|
|
|
fault->handbuflen = newlen;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* To be called from message specific listener callbacks */
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_resize_message(QTEST_FAULT *fault, size_t newlen)
|
2022-12-02 23:52:21 +08:00
|
|
|
{
|
|
|
|
/* First resize the underlying message */
|
2023-02-06 23:34:05 +08:00
|
|
|
if (!qtest_fault_resize_handshake(fault, newlen + SSL3_HM_HEADER_LENGTH))
|
2022-12-02 23:52:21 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Fixup the handshake message header */
|
|
|
|
fault->handbuf[1] = (unsigned char)((newlen >> 16) & 0xff);
|
|
|
|
fault->handbuf[2] = (unsigned char)((newlen >> 8) & 0xff);
|
|
|
|
fault->handbuf[3] = (unsigned char)((newlen ) & 0xff);
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_delete_extension(QTEST_FAULT *fault,
|
|
|
|
unsigned int exttype, unsigned char *ext,
|
2023-10-26 18:37:21 +08:00
|
|
|
size_t *extlen,
|
|
|
|
BUF_MEM *old_ext)
|
2022-12-02 23:52:21 +08:00
|
|
|
{
|
|
|
|
PACKET pkt, sub, subext;
|
2023-10-26 18:37:21 +08:00
|
|
|
WPACKET old_ext_wpkt;
|
2022-12-02 23:52:21 +08:00
|
|
|
unsigned int type;
|
|
|
|
const unsigned char *start, *end;
|
2023-10-26 18:37:21 +08:00
|
|
|
size_t newlen, w;
|
2022-12-07 00:44:09 +08:00
|
|
|
size_t msglen = fault->handbuflen;
|
2022-12-02 23:52:21 +08:00
|
|
|
|
|
|
|
if (!PACKET_buf_init(&pkt, ext, *extlen))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Extension block starts with 2 bytes for extension block length */
|
|
|
|
if (!PACKET_as_length_prefixed_2(&pkt, &sub))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
start = PACKET_data(&sub);
|
|
|
|
if (!PACKET_get_net_2(&sub, &type)
|
2022-12-07 00:44:09 +08:00
|
|
|
|| !PACKET_get_length_prefixed_2(&sub, &subext))
|
2022-12-02 23:52:21 +08:00
|
|
|
return 0;
|
|
|
|
} while (type != exttype);
|
|
|
|
|
|
|
|
/* Found it */
|
|
|
|
end = PACKET_data(&sub);
|
|
|
|
|
2023-10-26 18:37:21 +08:00
|
|
|
if (old_ext != NULL) {
|
|
|
|
if (!WPACKET_init(&old_ext_wpkt, old_ext))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!WPACKET_memcpy(&old_ext_wpkt, PACKET_data(&subext),
|
|
|
|
PACKET_remaining(&subext))
|
|
|
|
|| !WPACKET_get_total_written(&old_ext_wpkt, &w)) {
|
|
|
|
WPACKET_cleanup(&old_ext_wpkt);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
WPACKET_finish(&old_ext_wpkt);
|
|
|
|
old_ext->length = w;
|
|
|
|
}
|
|
|
|
|
2022-12-02 23:52:21 +08:00
|
|
|
/*
|
|
|
|
* If we're not the last extension we need to move the rest earlier. The
|
|
|
|
* cast below is safe because we own the underlying buffer and we're no
|
|
|
|
* longer making PACKET calls.
|
|
|
|
*/
|
|
|
|
if (end < ext + *extlen)
|
|
|
|
memmove((unsigned char *)start, end, end - start);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Calculate new extensions payload length =
|
|
|
|
* Original length
|
|
|
|
* - 2 extension block length bytes
|
|
|
|
* - length of removed extension
|
|
|
|
*/
|
|
|
|
newlen = *extlen - 2 - (end - start);
|
|
|
|
|
|
|
|
/* Fixup the length bytes for the extension block */
|
|
|
|
ext[0] = (unsigned char)((newlen >> 8) & 0xff);
|
|
|
|
ext[1] = (unsigned char)((newlen ) & 0xff);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Length of the whole extension block is the new payload length plus the
|
|
|
|
* 2 bytes for the length
|
|
|
|
*/
|
|
|
|
*extlen = newlen + 2;
|
|
|
|
|
|
|
|
/* We can now resize the message */
|
2022-12-07 00:44:09 +08:00
|
|
|
if ((size_t)(end - start) + SSL3_HM_HEADER_LENGTH > msglen)
|
|
|
|
return 0; /* Should not happen */
|
|
|
|
msglen -= (end - start) + SSL3_HM_HEADER_LENGTH;
|
2023-02-06 23:34:05 +08:00
|
|
|
if (!qtest_fault_resize_message(fault, msglen))
|
2022-12-02 23:52:21 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2022-12-13 00:52:50 +08:00
|
|
|
|
|
|
|
#define BIO_TYPE_CIPHER_PACKET_FILTER (0x80 | BIO_TYPE_FILTER)
|
|
|
|
|
|
|
|
static BIO_METHOD *pcipherbiometh = NULL;
|
|
|
|
|
|
|
|
# define BIO_MSG_N(array, stride, n) (*(BIO_MSG *)((char *)(array) + (n)*(stride)))
|
|
|
|
|
|
|
|
static int pcipher_sendmmsg(BIO *b, BIO_MSG *msg, size_t stride,
|
|
|
|
size_t num_msg, uint64_t flags,
|
|
|
|
size_t *num_processed)
|
|
|
|
{
|
|
|
|
BIO *next = BIO_next(b);
|
|
|
|
ossl_ssize_t ret = 0;
|
|
|
|
size_t i = 0, tmpnump;
|
|
|
|
QUIC_PKT_HDR hdr;
|
|
|
|
PACKET pkt;
|
2023-01-11 02:55:05 +08:00
|
|
|
unsigned char *tmpdata;
|
2025-01-30 02:18:57 +08:00
|
|
|
QTEST_DATA *bdata = NULL;
|
2022-12-13 00:52:50 +08:00
|
|
|
|
|
|
|
if (next == NULL)
|
|
|
|
return 0;
|
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
bdata = BIO_get_data(b);
|
|
|
|
if (bdata == NULL || bdata->fault == NULL
|
|
|
|
|| (bdata->fault->pciphercb == NULL && bdata->fault->datagramcb == NULL))
|
2022-12-13 00:52:50 +08:00
|
|
|
return BIO_sendmmsg(next, msg, stride, num_msg, flags, num_processed);
|
|
|
|
|
|
|
|
if (num_msg == 0) {
|
|
|
|
*num_processed = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < num_msg; ++i) {
|
2025-01-30 02:18:57 +08:00
|
|
|
bdata->fault->msg = BIO_MSG_N(msg, stride, i);
|
2022-12-13 00:52:50 +08:00
|
|
|
|
|
|
|
/* Take a copy of the data so that callbacks can modify it */
|
2025-01-30 02:18:57 +08:00
|
|
|
tmpdata = OPENSSL_malloc(bdata->fault->msg.data_len + GROWTH_ALLOWANCE);
|
2023-01-11 02:55:05 +08:00
|
|
|
if (tmpdata == NULL)
|
2022-12-13 00:52:50 +08:00
|
|
|
return 0;
|
2025-01-30 02:18:57 +08:00
|
|
|
memcpy(tmpdata, bdata->fault->msg.data, bdata->fault->msg.data_len);
|
|
|
|
bdata->fault->msg.data = tmpdata;
|
|
|
|
bdata->fault->msgalloc = bdata->fault->msg.data_len + GROWTH_ALLOWANCE;
|
2023-01-11 02:55:05 +08:00
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
if (bdata->fault->pciphercb != NULL) {
|
|
|
|
if (!PACKET_buf_init(&pkt, bdata->fault->msg.data, bdata->fault->msg.data_len))
|
2023-01-11 02:55:05 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
do {
|
|
|
|
if (!ossl_quic_wire_decode_pkt_hdr(&pkt,
|
2025-01-30 02:18:57 +08:00
|
|
|
bdata->short_conn_id_len,
|
|
|
|
1, 0, &hdr, NULL, NULL))
|
2023-01-11 02:55:05 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* hdr.data is const - but its our buffer so casting away the
|
|
|
|
* const is safe
|
|
|
|
*/
|
2025-01-30 02:18:57 +08:00
|
|
|
if (!bdata->fault->pciphercb(bdata->fault, &hdr,
|
|
|
|
(unsigned char *)hdr.data, hdr.len,
|
|
|
|
bdata->fault->pciphercbarg))
|
2023-01-11 02:55:05 +08:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
2023-07-18 03:07:59 +08:00
|
|
|
* At the moment modifications to hdr by the callback
|
2023-01-11 02:55:05 +08:00
|
|
|
* are ignored. We might need to rewrite the QUIC header to
|
|
|
|
* enable tests to change this. We also don't yet have a
|
|
|
|
* mechanism for the callback to change the encrypted data
|
|
|
|
* length. It's not clear if that's needed or not.
|
|
|
|
*/
|
|
|
|
} while (PACKET_remaining(&pkt) > 0);
|
|
|
|
}
|
2022-12-13 00:52:50 +08:00
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
if (bdata->fault->datagramcb != NULL
|
|
|
|
&& !bdata->fault->datagramcb(bdata->fault, &bdata->fault->msg, stride,
|
|
|
|
bdata->fault->datagramcbarg))
|
2023-01-11 02:55:05 +08:00
|
|
|
goto out;
|
2022-12-13 00:52:50 +08:00
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
if (!BIO_sendmmsg(next, &bdata->fault->msg, stride, 1, flags, &tmpnump)) {
|
2022-12-13 00:52:50 +08:00
|
|
|
*num_processed = i;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
OPENSSL_free(bdata->fault->msg.data);
|
|
|
|
bdata->fault->msg.data = NULL;
|
|
|
|
bdata->fault->msgalloc = 0;
|
2022-12-13 00:52:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
*num_processed = i;
|
|
|
|
out:
|
2023-02-06 23:34:05 +08:00
|
|
|
ret = i > 0;
|
2025-01-30 02:18:57 +08:00
|
|
|
OPENSSL_free(bdata->fault->msg.data);
|
|
|
|
bdata->fault->msg.data = NULL;
|
2022-12-13 00:52:50 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long pcipher_ctrl(BIO *b, int cmd, long larg, void *parg)
|
|
|
|
{
|
|
|
|
BIO *next = BIO_next(b);
|
|
|
|
|
|
|
|
if (next == NULL)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
return BIO_ctrl(next, cmd, larg, parg);
|
|
|
|
}
|
|
|
|
|
2025-01-30 02:18:57 +08:00
|
|
|
static int pcipher_destroy(BIO *b)
|
|
|
|
{
|
|
|
|
OPENSSL_free(BIO_get_data(b));
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2023-06-06 23:25:12 +08:00
|
|
|
BIO_METHOD *qtest_get_bio_method(void)
|
2022-12-13 00:52:50 +08:00
|
|
|
{
|
|
|
|
BIO_METHOD *tmp;
|
|
|
|
|
|
|
|
if (pcipherbiometh != NULL)
|
|
|
|
return pcipherbiometh;
|
|
|
|
|
|
|
|
tmp = BIO_meth_new(BIO_TYPE_CIPHER_PACKET_FILTER, "Cipher Packet Filter");
|
|
|
|
|
|
|
|
if (!TEST_ptr(tmp))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
if (!TEST_true(BIO_meth_set_sendmmsg(tmp, pcipher_sendmmsg))
|
2025-01-30 02:18:57 +08:00
|
|
|
|| !TEST_true(BIO_meth_set_ctrl(tmp, pcipher_ctrl))
|
|
|
|
|| !TEST_true(BIO_meth_set_destroy(tmp, pcipher_destroy)))
|
2022-12-13 00:52:50 +08:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
pcipherbiometh = tmp;
|
|
|
|
tmp = NULL;
|
|
|
|
err:
|
|
|
|
BIO_meth_free(tmp);
|
|
|
|
return pcipherbiometh;
|
|
|
|
}
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_set_packet_cipher_listener(QTEST_FAULT *fault,
|
|
|
|
qtest_fault_on_packet_cipher_cb pciphercb,
|
|
|
|
void *pciphercbarg)
|
2022-12-13 00:52:50 +08:00
|
|
|
{
|
|
|
|
fault->pciphercb = pciphercb;
|
|
|
|
fault->pciphercbarg = pciphercbarg;
|
|
|
|
|
|
|
|
return 1;
|
2023-01-11 02:55:05 +08:00
|
|
|
}
|
|
|
|
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_set_datagram_listener(QTEST_FAULT *fault,
|
|
|
|
qtest_fault_on_datagram_cb datagramcb,
|
|
|
|
void *datagramcbarg)
|
2023-01-11 02:55:05 +08:00
|
|
|
{
|
|
|
|
fault->datagramcb = datagramcb;
|
|
|
|
fault->datagramcbarg = datagramcbarg;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* To be called from a datagram_listener callback */
|
2023-02-06 23:34:05 +08:00
|
|
|
int qtest_fault_resize_datagram(QTEST_FAULT *fault, size_t newlen)
|
2023-01-11 02:55:05 +08:00
|
|
|
{
|
|
|
|
if (newlen > fault->msgalloc)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (newlen > fault->msg.data_len)
|
|
|
|
memset((unsigned char *)fault->msg.data + fault->msg.data_len, 0,
|
|
|
|
newlen - fault->msg.data_len);
|
|
|
|
|
|
|
|
fault->msg.data_len = newlen;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
2023-09-19 18:52:42 +08:00
|
|
|
|
2024-02-14 19:45:15 +08:00
|
|
|
int qtest_fault_set_bw_limit(QTEST_FAULT *fault,
|
|
|
|
size_t ctos_bw, size_t stoc_bw,
|
|
|
|
int noise_rate)
|
|
|
|
{
|
|
|
|
BIO *sbio = fault->noiseargs.sbio;
|
|
|
|
BIO *cbio = fault->noiseargs.cbio;
|
|
|
|
|
|
|
|
if (!TEST_ptr(sbio) || !TEST_ptr(cbio))
|
|
|
|
return 0;
|
|
|
|
if (!TEST_int_eq(BIO_ctrl(sbio, BIO_CTRL_NOISE_RATE, noise_rate, NULL), 1))
|
|
|
|
return 0;
|
|
|
|
if (!TEST_int_eq(BIO_ctrl(cbio, BIO_CTRL_NOISE_RATE, noise_rate, NULL), 1))
|
|
|
|
return 0;
|
|
|
|
/* We set the bandwidth limit on the sending side */
|
|
|
|
if (!TEST_int_eq(BIO_ctrl(cbio, BIO_CTRL_NOISE_SEND_BANDWIDTH,
|
|
|
|
(long)ctos_bw, NULL), 1))
|
|
|
|
return 0;
|
|
|
|
if (!TEST_int_eq(BIO_ctrl(sbio, BIO_CTRL_NOISE_SEND_BANDWIDTH,
|
|
|
|
(long)stoc_bw, NULL), 1))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-09-19 18:52:42 +08:00
|
|
|
int bio_msg_copy(BIO_MSG *dst, BIO_MSG *src)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Note it is assumed that the originally allocated data sizes for dst and
|
|
|
|
* src are the same
|
|
|
|
*/
|
|
|
|
memcpy(dst->data, src->data, src->data_len);
|
|
|
|
dst->data_len = src->data_len;
|
|
|
|
dst->flags = src->flags;
|
|
|
|
if (dst->local != NULL) {
|
|
|
|
if (src->local != NULL) {
|
2023-09-21 19:16:38 +08:00
|
|
|
if (!TEST_true(BIO_ADDR_copy(dst->local, src->local)))
|
2023-09-19 18:52:42 +08:00
|
|
|
return 0;
|
|
|
|
} else {
|
|
|
|
BIO_ADDR_clear(dst->local);
|
|
|
|
}
|
|
|
|
}
|
2023-09-21 19:16:38 +08:00
|
|
|
if (!TEST_true(BIO_ADDR_copy(dst->peer, src->peer)))
|
2023-09-19 18:52:42 +08:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|