refactor(connlib): use events for pushing updated resource list (#5035)

The API of connlib is designed around a uni-directional dataflow where
commands flow one way and events flow the other way. By design, this
creates a system of eventual consistency: We don't exactly know when
connlib will emit an event. This is important because it gives us
flexibility in what the internals of connlib look like. It also forces
the downstream apps to be able to handle any event at any point which
avoids bugs where clients rely on a certain order that may just be an
implementation detail.

To achieve all of this, it is important that we don't introduce APIs
with return values. As soon as a function returns a value, it commits to
being able to compute this return value _synchronously_. Any refactoring
that may make the computation of the return value asynchronous is then a
breaking change.

Consequently, APIs like `handle_timeout` should never return a value.
Instead, they should queue an event that the layer above reacts to
accordingly.
This commit is contained in:
Thomas Eizinger
2024-05-21 07:20:45 +10:00
committed by GitHub
parent 361aafb746
commit 3fba5745ee
3 changed files with 26 additions and 14 deletions

View File

@@ -147,6 +147,17 @@ where
.send(PHOENIX_TOPIC, EgressMessages::ReuseConnection(connection));
}
}
firezone_tunnel::ClientEvent::ResourcesChanged { resources } => {
// Note: This may look a bit weird: We are reading an event from the tunnel and yet delegate back to the tunnel here.
// Couldn't the tunnel just do this internally?
// Technically, yes.
// But, we are only accessing the callbacks here which _eventually_ will be removed from `Tunnel`.
// At that point, the tunnel has to emit this event and we need to handle it without delegating back to the tunnel.
// We only access the callbacks here because `Tunnel` already has them and the callbacks are the current way of talking to the UI.
// At a later point, we will probably map to another event here that gets pushed further up.
self.tunnel.callbacks.on_update_resources(resources)
}
}
}

View File

@@ -859,9 +859,7 @@ impl ClientState {
earliest(self.next_dns_refresh, self.node.poll_timeout())
}
/// Returns whether resources statuses have updated
pub fn handle_timeout(&mut self, now: Instant) -> bool {
let mut resources_updated = false;
pub fn handle_timeout(&mut self, now: Instant) {
self.node.handle_timeout(now);
match self.next_dns_refresh {
@@ -899,7 +897,10 @@ impl ClientState {
match event {
snownet::Event::ConnectionFailed(id) => {
self.cleanup_connected_gateway(&id);
resources_updated = true;
self.buffered_events
.push_back(ClientEvent::ResourcesChanged {
resources: self.resources(),
});
}
snownet::Event::NewIceCandidate {
connection,
@@ -921,12 +922,13 @@ impl ClientState {
}),
snownet::Event::ConnectionEstablished(id) => {
self.update_site_status_by_gateway(&id, Status::Online);
resources_updated = true;
self.buffered_events
.push_back(ClientEvent::ResourcesChanged {
resources: self.resources(),
});
}
}
}
resources_updated
}
fn update_site_status_by_gateway(&mut self, gateway_id: &GatewayId, status: Status) {

View File

@@ -6,6 +6,7 @@
use boringtun::x25519::StaticSecret;
use chrono::Utc;
use connlib_shared::{
callbacks,
messages::{ClientId, GatewayId, Relay, RelayId, ResourceId, ReuseConnection},
Callbacks, Result,
};
@@ -118,13 +119,7 @@ where
self.device_read_buf.as_mut(),
)? {
Poll::Ready(io::Input::Timeout(timeout)) => {
let resources_updated = self.role_state.handle_timeout(timeout);
if resources_updated {
self.callbacks
.on_update_resources(self.role_state.resources());
}
self.role_state.handle_timeout(timeout);
continue;
}
Poll::Ready(io::Input::Device(packet)) => {
@@ -262,6 +257,10 @@ pub enum ClientEvent {
RefreshResources {
connections: Vec<ReuseConnection>,
},
/// The list of resources has changed and UI clients may have to be updated.
ResourcesChanged {
resources: Vec<callbacks::ResourceDescription>,
},
}
pub enum GatewayEvent {