mirror of
https://github.com/outbackdingo/firezone.git
synced 2026-01-27 10:18:54 +00:00
refactor: rename reconnect to reset (#6057)
Connection roaming within `connlib` has changed a fair-bit since we introduced the `reconnect` function. The new implementation is basically a hard-reset of all state within `connlib`. Renaming this function across all layers makes this more obvious. Resolves: #6038.
This commit is contained in:
@@ -25,5 +25,5 @@ object ConnlibSession {
|
||||
fd: Int,
|
||||
): Boolean
|
||||
|
||||
external fun reconnect(connlibSession: Long): Boolean
|
||||
external fun reset(connlibSession: Long): Boolean
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ class NetworkMonitor(private val tunnelService: TunnelService) : ConnectivityMan
|
||||
|
||||
if (lastNetwork != network) {
|
||||
lastNetwork = network
|
||||
ConnlibSession.reconnect(tunnelService.connlibSessionPtr!!)
|
||||
ConnlibSession.reset(tunnelService.connlibSessionPtr!!)
|
||||
}
|
||||
|
||||
// Release mutex lock
|
||||
|
||||
@@ -497,13 +497,13 @@ pub unsafe extern "system" fn Java_dev_firezone_android_tunnel_ConnlibSession_se
|
||||
/// at any point before or during operation of this function.
|
||||
#[allow(non_snake_case)]
|
||||
#[no_mangle]
|
||||
pub unsafe extern "system" fn Java_dev_firezone_android_tunnel_ConnlibSession_reconnect(
|
||||
pub unsafe extern "system" fn Java_dev_firezone_android_tunnel_ConnlibSession_reset(
|
||||
_: JNIEnv,
|
||||
_: JClass,
|
||||
session_ptr: jlong,
|
||||
) {
|
||||
let session = &*(session_ptr as *const SessionWrapper);
|
||||
session.inner.reconnect();
|
||||
session.inner.reset();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
||||
@@ -52,7 +52,7 @@ mod ffi {
|
||||
callback_handler: CallbackHandler,
|
||||
) -> Result<WrappedSession, String>;
|
||||
|
||||
fn reconnect(&mut self);
|
||||
fn reset(&mut self);
|
||||
|
||||
// Set system DNS resolvers
|
||||
//
|
||||
@@ -225,8 +225,8 @@ impl WrappedSession {
|
||||
})
|
||||
}
|
||||
|
||||
fn reconnect(&mut self) {
|
||||
self.inner.reconnect()
|
||||
fn reset(&mut self) {
|
||||
self.inner.reset()
|
||||
}
|
||||
|
||||
fn set_dns(&mut self, dns_servers: String) {
|
||||
|
||||
@@ -32,7 +32,7 @@ pub struct Eventloop<C: Callbacks> {
|
||||
/// Commands that can be sent to the [`Eventloop`].
|
||||
pub enum Command {
|
||||
Stop,
|
||||
Reconnect,
|
||||
Reset,
|
||||
SetDns(Vec<IpAddr>),
|
||||
SetTun(Box<dyn Tun>),
|
||||
}
|
||||
@@ -71,7 +71,7 @@ where
|
||||
self.tunnel.set_tun(tun);
|
||||
continue;
|
||||
}
|
||||
Poll::Ready(Some(Command::Reconnect)) => {
|
||||
Poll::Ready(Some(Command::Reset)) => {
|
||||
self.portal.reconnect();
|
||||
if let Err(e) = self.tunnel.reset() {
|
||||
tracing::warn!("Failed to reconnect tunnel: {e}");
|
||||
|
||||
@@ -60,19 +60,19 @@ impl Session {
|
||||
Self { channel: tx }
|
||||
}
|
||||
|
||||
/// Attempts to reconnect a [`Session`].
|
||||
/// Reset a [`Session`].
|
||||
///
|
||||
/// Reconnecting a session will:
|
||||
/// Resetting a session will:
|
||||
///
|
||||
/// - Close and re-open a connection to the portal.
|
||||
/// - Refresh all allocations
|
||||
/// - Rebind local UDP sockets
|
||||
/// - Delete all allocations.
|
||||
/// - Rebind local UDP sockets.
|
||||
///
|
||||
/// # Implementation note
|
||||
///
|
||||
/// The reason we rebind the UDP sockets are:
|
||||
///
|
||||
/// 1. On MacOS, as socket bound to the unspecified IP cannot send to interfaces attached after the socket has been created.
|
||||
/// 1. On MacOS, a socket bound to the unspecified IP cannot send to interfaces attached after the socket has been created.
|
||||
/// 2. Switching between networks changes the 3-tuple of the client.
|
||||
/// The TURN protocol identifies a client's allocation based on the 3-tuple.
|
||||
/// Consequently, an allocation is invalid after switching networks and we clear the state.
|
||||
@@ -80,8 +80,8 @@ impl Session {
|
||||
/// However, if the user would now change _back_ to the previous network,
|
||||
/// the TURN server would recognise the old allocation but the client already lost all its state associated with it.
|
||||
/// To avoid race-conditions like this, we rebind the sockets to a new port.
|
||||
pub fn reconnect(&self) {
|
||||
let _ = self.channel.send(Command::Reconnect);
|
||||
pub fn reset(&self) {
|
||||
let _ = self.channel.send(Command::Reset);
|
||||
}
|
||||
|
||||
/// Sets a new set of upstream DNS servers for this [`Session`].
|
||||
|
||||
@@ -808,7 +808,7 @@ async fn run_controller(
|
||||
() = com_worker.notified() => {
|
||||
if controller.status.connlib_is_up() {
|
||||
tracing::debug!("Internet up/down changed, calling `Session::reconnect`");
|
||||
controller.ipc_client.reconnect().await?;
|
||||
controller.ipc_client.reset().await?;
|
||||
}
|
||||
},
|
||||
result = dns_listener.notified() => {
|
||||
|
||||
@@ -79,10 +79,10 @@ impl Client {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn reconnect(&mut self) -> Result<()> {
|
||||
self.send_msg(&IpcClientMsg::Reconnect)
|
||||
pub(crate) async fn reset(&mut self) -> Result<()> {
|
||||
self.send_msg(&IpcClientMsg::Reset)
|
||||
.await
|
||||
.context("Couldn't send Reconnect")?;
|
||||
.context("Couldn't send Reset")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ impl Default for Cmd {
|
||||
pub enum ClientMsg {
|
||||
Connect { api_url: String, token: String },
|
||||
Disconnect,
|
||||
Reconnect,
|
||||
Reset,
|
||||
SetDns(Vec<IpAddr>),
|
||||
}
|
||||
|
||||
@@ -382,11 +382,7 @@ impl<'a> Handler<'a> {
|
||||
tracing::error!("Error - Got Disconnect when we're already not connected");
|
||||
}
|
||||
}
|
||||
ClientMsg::Reconnect => self
|
||||
.connlib
|
||||
.as_mut()
|
||||
.context("No connlib session")?
|
||||
.reconnect(),
|
||||
ClientMsg::Reset => self.connlib.as_mut().context("No connlib session")?.reset(),
|
||||
ClientMsg::SetDns(v) => self
|
||||
.connlib
|
||||
.as_mut()
|
||||
|
||||
@@ -206,7 +206,7 @@ mod tests {
|
||||
.expect("Error while waiting for next IPC client");
|
||||
while let Some(req) = rx.next().await {
|
||||
let req = req.expect("Error while reading from IPC client");
|
||||
ensure!(req == IpcClientMsg::Reconnect);
|
||||
ensure!(req == IpcClientMsg::Reset);
|
||||
tx.send(&IpcServerMsg::OnUpdateResources(vec![]))
|
||||
.await
|
||||
.expect("Error while writing to IPC client");
|
||||
@@ -222,7 +222,7 @@ mod tests {
|
||||
.await
|
||||
.context("Error while connecting to IPC server")?;
|
||||
|
||||
let req = IpcClientMsg::Reconnect;
|
||||
let req = IpcClientMsg::Reset;
|
||||
for _ in 0..10 {
|
||||
tx.send(&req)
|
||||
.await
|
||||
|
||||
@@ -215,7 +215,7 @@ pub fn run_only_headless_client() -> Result<()> {
|
||||
},
|
||||
() = hangup => {
|
||||
tracing::info!("Caught SIGHUP");
|
||||
session.reconnect();
|
||||
session.reset();
|
||||
continue;
|
||||
},
|
||||
result = dns_changed => {
|
||||
@@ -225,8 +225,8 @@ pub fn run_only_headless_client() -> Result<()> {
|
||||
continue;
|
||||
},
|
||||
() = network_changed => {
|
||||
tracing::info!("Network change, reconnecting Session");
|
||||
session.reconnect();
|
||||
tracing::info!("Network change, resetting Session");
|
||||
session.reset();
|
||||
continue;
|
||||
},
|
||||
cb = cb_rx.next() => cb.context("cb_rx unexpectedly ran empty")?,
|
||||
|
||||
@@ -52,7 +52,7 @@ class Adapter {
|
||||
/// Track our last fetched DNS resolvers to know whether to tell connlib they've updated
|
||||
private var lastFetchedResolvers: [String] = []
|
||||
|
||||
/// Used to avoid needlessly sending reconnects to connlib
|
||||
/// Used to avoid needlessly sending resets to connlib
|
||||
private var primaryInterfaceName: String?
|
||||
|
||||
/// Private queue used to ensure consistent ordering among path update and connlib callbacks
|
||||
@@ -260,7 +260,7 @@ extension Adapter {
|
||||
// If our primary interface changes, we can be certain the old socket shouldn't be
|
||||
// used anymore.
|
||||
if path.availableInterfaces.first?.name != primaryInterfaceName {
|
||||
session.reconnect()
|
||||
session.reset()
|
||||
primaryInterfaceName = path.availableInterfaces.first?.name
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user