in quic/s2n-quic-transport/src/endpoint/mod.rs [960:1240]
fn create_client_connection(
&mut self,
request: endpoint::connect::Request,
timestamp: Timestamp,
) -> Result<(), connection::Error> {
assert!(
Cfg::ENDPOINT_TYPE.is_client(),
"only client endpoints can be created with client configurations"
);
let endpoint::connect::Request {
connect:
endpoint::connect::Connect {
remote_address,
server_name: hostname,
deduplicate,
},
sender,
} = request;
let internal_connection_id = self.connection_id_generator.generate_id();
if deduplicate && !Cfg::DcEndpoint::ENABLED {
// FIXME: Deduplication causes us to return full `Connection` handles to application
// via the `sender`. Currently though only a single Connection is supported, only
// multiple `Handle`s are allowed (largely because we only store one waker in various
// places internally).
//
// Just prevent that configuration for now.
return Err(connection::Error::invalid_configuration(
"Requested connection deduplication which is not supported without (unstable) dc provider configured",
));
}
let open_registry = if deduplicate {
match self.connection_id_mapper.lazy_open(
internal_connection_id,
endpoint::connect::Connect {
remote_address,
server_name: hostname.clone(),
deduplicate,
},
) {
Ok(existing) => {
if let Err(sender) = self
.connections
.register_sender_for_client_connection(&existing, sender)
{
// If we are in the Ok branch of lazy_open, then the connection already exists
// in open_request_map (i.e., it is currently open). If we failed to register
// the sender, then the connection is already fully open. So, we need to lookup
// the connection handle and send it.
let handle = self
.connections
.get_connection_handle(&existing)
.expect("handle exists for active connection");
let _ = sender.send(Ok(handle));
return Ok(());
} else {
// Done, will get notified with a Result<handle> once the connection opens.
return Ok(());
}
}
Err(registry) => Some(registry),
}
} else {
None
};
let local_connection_id = self
.config
.context()
.connection_id_format
.generate(&ConnectionInfo::new(&remote_address));
let local_connection_id_expiration_time = self
.config
.context()
.connection_id_format
.lifetime()
.map(|duration| timestamp + duration);
let rotate_handshake_connection_id = self
.config
.context()
.connection_id_format
.rotate_handshake_connection_id();
let local_id_registry = {
// TODO: the client currently generates a random stateless_reset_token but doesnt
// transmit it. Refactor `create_local_id_registry` to instead accept None for
// stateless_reset_token.
let stateless_reset_token = self
.config
.context()
.stateless_reset_token_generator
.generate(local_connection_id.as_bytes());
self.connection_id_mapper.create_local_id_registry(
internal_connection_id,
&local_connection_id,
local_connection_id_expiration_time,
stateless_reset_token,
rotate_handshake_connection_id,
)
};
let endpoint_context = self.config.context();
//= https://www.rfc-editor.org/rfc/rfc9000#section-7.2
//# When an Initial packet is sent by a client that has not previously
//# received an Initial or Retry packet from the server, the client
//# populates the Destination Connection ID field with an unpredictable
//# value.
let original_destination_connection_id = {
let mut data = [0u8; InitialId::MIN_LEN];
endpoint_context
.random_generator
.public_random_fill(&mut data);
InitialId::try_from_bytes(&data).expect("InitialId creation failed.")
};
//= https://www.rfc-editor.org/rfc/rfc9000#section-10.3
//# Note that clients cannot use the
//# stateless_reset_token transport parameter because their transport
//# parameters do not have confidentiality protection.
//
// The original_destination_connection_id is a random value used to establish the
// connection. Since the connection is not yet secured, the client must not set a
// stateless_reset_token.
let peer_id_registry = self
.connection_id_mapper
.create_client_peer_id_registry(internal_connection_id, rotate_handshake_connection_id);
//= https://www.rfc-editor.org/rfc/rfc9000#section-15
//# This version of the specification is identified by the number
//# 0x00000001.
let quic_version = 0x00000001;
let meta = event::builder::ConnectionMeta {
endpoint_type: Cfg::ENDPOINT_TYPE,
id: internal_connection_id.into(),
timestamp,
};
let supervisor_context = supervisor::Context::new(
self.connections.handshake_connections(),
self.connections.len(),
&remote_address,
true,
);
let mut event_context = endpoint_context.event_subscriber.create_connection_context(
&meta.clone().into_event(),
&event::builder::ConnectionInfo {}.into_event(),
);
let mut transport_parameters = ClientTransportParameters {
initial_source_connection_id: Some(local_connection_id.into()),
..Default::default()
};
let limits = endpoint_context
.connection_limits
.on_connection(&LimitsInfo::new(&remote_address));
let mut endpoint_publisher = event::EndpointPublisherSubscriber::new(
event::builder::EndpointMeta {
endpoint_type: Cfg::ENDPOINT_TYPE,
timestamp,
},
Some(quic_version),
endpoint_context.event_subscriber,
);
let mtu_config = endpoint_context
.mtu
.config(&remote_address)
.map_err(|_err| {
let error = connection::Error::invalid_configuration(
"MTU provider produced an invalid MTU configuration",
);
endpoint_publisher.on_endpoint_connection_attempt_failed(
event::builder::EndpointConnectionAttemptFailed { error },
);
error
})?;
let mut publisher = event::ConnectionPublisherSubscriber::new(
meta,
quic_version,
endpoint_context.event_subscriber,
&mut event_context,
);
let congestion_controller = {
let path_info = congestion_controller::PathInfo::new(&mtu_config, &remote_address);
endpoint_context
.congestion_controller
.new_congestion_controller(path_info)
};
transport_parameters.load_limits(&limits);
transport_parameters.max_datagram_frame_size = endpoint_context
.datagram
.max_datagram_frame_size(&PreConnectionInfo::new())
.try_into()
.expect("Failed to convert max_datagram_frame_size");
transport_parameters.active_connection_id_limit = s2n_quic_core::varint::VarInt::from(
connection::peer_id_registry::ACTIVE_CONNECTION_ID_LIMIT,
)
.try_into()
.unwrap();
if Cfg::DcEndpoint::ENABLED {
transport_parameters.dc_supported_versions =
DcSupportedVersions::for_client(dc::SUPPORTED_VERSIONS);
}
//= https://www.rfc-editor.org/rfc/rfc9000#section-7.2
//# The Destination Connection ID field from the first Initial packet
//# sent by a client is used to determine packet protection keys for
//# Initial packets.
//
// Use the randomly generated `original_destination_connection_id` to generate the packet
// protection keys.
let (initial_key, initial_header_key) =
<<Cfg::TLSEndpoint as tls::Endpoint>::Session as CryptoSuite>::InitialKey::new_client(
original_destination_connection_id.as_bytes(),
);
let tls_session = endpoint_context
.tls
// TODO should SNI be optional? rustls expects a SNI but other tls providers dont seem
// to require this value.
.new_client_session(
&transport_parameters,
hostname.expect("application should provide a valid server name"),
);
let space_manager = PacketSpaceManager::new(
original_destination_connection_id,
tls_session,
initial_key,
initial_header_key,
timestamp,
&mut publisher,
);
let wakeup_handle = self
.wakeup_queue
.create_wakeup_handle(internal_connection_id);
let path_handle =
<<Cfg as endpoint::Config>::PathHandle as path::Handle>::from_remote_address(
remote_address,
);
let connection_parameters = connection::Parameters {
internal_connection_id,
local_id_registry,
peer_id_registry,
space_manager,
wakeup_handle,
peer_connection_id: original_destination_connection_id.into(),
local_connection_id,
path_handle,
congestion_controller,
timestamp,
quic_version,
limits,
mtu_config,
event_context,
supervisor_context: &supervisor_context,
event_subscriber: endpoint_context.event_subscriber,
datagram_endpoint: endpoint_context.datagram,
dc_endpoint: endpoint_context.dc,
open_registry,
limits_endpoint: endpoint_context.connection_limits,
};
let connection = <Cfg as crate::endpoint::Config>::Connection::new(connection_parameters)?;
self.connections
.insert_client_connection(connection, internal_connection_id, sender);
Ok(())
}