From 7e0fa50cae2400a30117fe65aa2c569d2ce6a3ad Mon Sep 17 00:00:00 2001 From: Thomas Eizinger Date: Thu, 3 Oct 2024 07:14:51 +1000 Subject: [PATCH] fix(connlib): handle silently rebooted / disconnected relays (#6666) Our relays are essential for connectivity because they also perform STUN for us through which we learn our server-reflexive address. Thus, we must at all times have at least one relay that we can reach in order to establish a connection. The portal tracks the connectivity to the relays for us and in case any of them go down, sends us a `relays_presence` message, meaning we can stop using that relay and migrate any relayed connections to a new one. This works well for as long as we are connected to the portal while the relay is rebooting / going-down. If we are not currently connected to the portal and a relay we are using reboots, we don't learn about it. At least if we are actively using it, the connection will fail and further attempted communication with the relay will time-out and we will stop using it. In case we aren't currently using the relay, this gets a bit trickier. If we aren't using the relay but it rebooted while we were partitioned from the portal, logging in again might return the same relay to us in the `init` message, but this time with different credentials. The first bug that we are fixing in this PR is that we previously ignored those credentials because we already knew about the relay, thinking that we can still use our existing credentials. The fix here is to also compare the credentials and ditch the local state if they differ. The second bug identified during fixing the first one is that we need to pro-actively probe whether all other relays that we know about are actually still responsive. For that, we issue a `REFRESH` message to them. If that one times-out or fails otherwise, we will remove that one from our list of `Allocation`s too. To fix the 2nd bug, several changes were necessary: 1. We lower the log-level of `Disconnecting from relay` from ERROR to WARN. Any ERROR emitted during a test-run fails our test-suite which is what partially motivated this. The test suite builds on the assumption that ERRORs are fatal and thus should never happen during our tests. This change surfaces that disconnecting from a relay can indeed happen during normal operation, which justifies lowering this to WARN. Users should at the minimum monitor on WARN to be alerted about problems. 2. We reduce the total backoff duration for requests to relays from 60s to 10s. The current 60s result in total of 8 retries. UDP is unreliable but it isn't THAT unreliable to justify retrying everything for 60s. We also use a 10s timeout for ICE, which means these are now aligned to better match each other. We had to change the max backoff duration because we only idle-spin for at most 10s in the tests and thus the current 60s were too long to detect that a relay actually disappeared. 3. We had to shuffle around some function calls to make sure all intermediary event buffers are emptied at the right point in time to make the test deterministic. Fixes: #6648. --- rust/connlib/snownet/src/allocation.rs | 29 ++- rust/connlib/snownet/src/backoff.rs | 8 +- rust/connlib/snownet/src/node.rs | 199 ++++++++++++++---- .../tunnel/proptest-regressions/tests.txt | 5 + rust/connlib/tunnel/src/client.rs | 18 +- rust/connlib/tunnel/src/gateway.rs | 12 +- rust/connlib/tunnel/src/tests/reference.rs | 53 +++-- rust/connlib/tunnel/src/tests/strategies.rs | 6 +- rust/connlib/tunnel/src/tests/sut.rs | 100 +++++---- rust/connlib/tunnel/src/tests/transition.rs | 5 + 10 files changed, 304 insertions(+), 131 deletions(-) diff --git a/rust/connlib/snownet/src/allocation.rs b/rust/connlib/snownet/src/allocation.rs index 4fa3ffbd5..f489b2176 100644 --- a/rust/connlib/snownet/src/allocation.rs +++ b/rust/connlib/snownet/src/allocation.rs @@ -259,9 +259,7 @@ impl Allocation { tracing::debug!("Refreshing allocation"); - // Allocation is not suspended here, we check as part of `handle_input` whether we need to `ALLOCATE` or `REFRESH` - self.active_socket = None; - self.send_binding_requests(); + self.authenticate_and_queue(make_refresh_request(self.software.clone()), None); } #[tracing::instrument(level = "debug", skip_all, fields(%from, tid, method, class, rtt))] @@ -620,6 +618,7 @@ impl Allocation { // If we fail to queue the refresh message because we've exceeded our backoff, give up. if !queued && is_refresh { + self.active_socket = None; // The socket seems to no longer be reachable. self.invalidate_allocation(); } @@ -788,6 +787,16 @@ impl Allocation { self.credentials.is_some() } + pub fn matches_credentials(&self, username: &Username, password: &str) -> bool { + self.credentials + .as_ref() + .is_some_and(|c| &c.username == username && c.password == password) + } + + pub fn matches_socket(&self, socket: &RelaySocket) -> bool { + &self.server == socket + } + fn log_update(&self, now: Instant) { tracing::info!( srflx_ip4 = ?self.ip4_srflx_candidate.as_ref().map(|c| c.addr()), @@ -814,6 +823,8 @@ impl Allocation { } fn invalidate_allocation(&mut self) { + tracing::info!(active_socket = ?self.active_socket, "Invalidating allocation"); + if let Some(candidate) = self.ip4_allocation.take() { self.events.push_back(CandidateEvent::Invalid(candidate)) } @@ -1933,10 +1944,6 @@ mod tests { allocation.refresh_with_same_credentials(); - let binding = allocation.next_message().unwrap(); - assert_eq!(binding.method(), BINDING); - allocation.handle_test_input_ip4(&binding_response(&binding, PEER1), Instant::now()); - let refresh = allocation.next_message().unwrap(); assert_eq!(refresh.method(), REFRESH); @@ -2033,10 +2040,6 @@ mod tests { allocation.refresh_with_same_credentials(); - let binding = allocation.next_message().unwrap(); - assert_eq!(binding.method(), BINDING); - allocation.handle_test_input_ip4(&binding_response(&binding, PEER1), Instant::now()); - let refresh = allocation.next_message().unwrap(); allocation.handle_test_input_ip4(&allocation_mismatch(&refresh), Instant::now()); @@ -2337,10 +2340,6 @@ mod tests { let now = now + Duration::from_secs(1); allocation.refresh(now); - let binding = allocation.next_message().unwrap(); - assert_eq!(binding.method(), BINDING); - allocation.handle_test_input_ip4(&binding_response(&binding, PEER1), Instant::now()); - // If the relay is restarted, our current credentials will be invalid. Simulate with an "unauthorized" response". let now = now + Duration::from_secs(1); let refresh = allocation.next_message().unwrap(); diff --git a/rust/connlib/snownet/src/backoff.rs b/rust/connlib/snownet/src/backoff.rs index f7db0959c..036729d99 100644 --- a/rust/connlib/snownet/src/backoff.rs +++ b/rust/connlib/snownet/src/backoff.rs @@ -24,7 +24,7 @@ pub fn new( multiplier: backoff::default::MULTIPLIER, max_interval: Duration::from_millis(backoff::default::MAX_INTERVAL_MILLIS), start_time: now, - max_elapsed_time: Some(Duration::from_secs(60)), + max_elapsed_time: Some(Duration::from_secs(10)), clock: ManualClock { now }, } } @@ -33,7 +33,7 @@ pub fn new( /// /// The current strategy is multiplying the previous interval by 1.5 and adding them up. #[cfg(test)] -pub fn steps(start: Instant) -> [Instant; 8] { +pub fn steps(start: Instant) -> [Instant; 4] { fn secs(secs: f64) -> Duration { Duration::from_nanos((secs * 1_000_000_000.0) as u64) } @@ -43,9 +43,5 @@ pub fn steps(start: Instant) -> [Instant; 8] { start + secs(1.0 + 1.5), start + secs(1.0 + 1.5 + 2.25), start + secs(1.0 + 1.5 + 2.25 + 3.375), - start + secs(1.0 + 1.5 + 2.25 + 3.375 + 5.0625), - start + secs(1.0 + 1.5 + 2.25 + 3.375 + 5.0625 + 7.59375), - start + secs(1.0 + 1.5 + 2.25 + 3.375 + 5.0625 + 7.59375 + 11.390625), - start + secs(1.0 + 1.5 + 2.25 + 3.375 + 5.0625 + 7.59375 + 11.390625 + 17.0859375), ] } diff --git a/rust/connlib/snownet/src/node.rs b/rust/connlib/snownet/src/node.rs index 363ccd5f6..869cb897d 100644 --- a/rust/connlib/snownet/src/node.rs +++ b/rust/connlib/snownet/src/node.rs @@ -13,10 +13,11 @@ use ip_packet::{ConvertibleIpv4Packet, ConvertibleIpv6Packet, IpPacket, IpPacket use itertools::Itertools as _; use rand::rngs::StdRng; use rand::seq::IteratorRandom; -use rand::{random, SeedableRng}; +use rand::{random, Rng, SeedableRng}; use secrecy::{ExposeSecret, Secret}; use sha2::Digest; use std::borrow::Cow; +use std::collections::btree_map::Entry; use std::collections::{BTreeMap, BTreeSet}; use std::hash::Hash; use std::marker::PhantomData; @@ -266,7 +267,7 @@ where } #[tracing::instrument(level = "info", skip_all, fields(%cid))] - pub fn remove_remote_candidate(&mut self, cid: TId, candidate: String) { + pub fn remove_remote_candidate(&mut self, cid: TId, candidate: String, now: Instant) { let candidate = match Candidate::from_sdp_string(&candidate) { Ok(c) => c, Err(e) => { @@ -277,6 +278,7 @@ where if let Some(agent) = self.connections.agent_mut(cid) { agent.invalidate_candidate(&candidate); + agent.handle_timeout(now); // We may have invalidated the last candidate, ensure we check our nomination state. } } @@ -423,7 +425,11 @@ where /// /// As such, it ends up being cleaner to "drain" all lower-level components of their events, transmits etc within this function. pub fn handle_timeout(&mut self, now: Instant) { - self.bindings_and_allocations_drain_events(); + for allocation in self.allocations.values_mut() { + allocation.handle_timeout(now); + } + + self.allocations_drain_events(); for (id, connection) in self.connections.iter_established_mut() { connection.handle_timeout(id, now, &mut self.allocations, &mut self.buffered_transmits); @@ -433,10 +439,6 @@ where connection.handle_timeout(id, now); } - for allocation in self.allocations.values_mut() { - allocation.handle_timeout(now); - } - let next_reset = *self.next_rate_limiter_reset.get_or_insert(now); if now >= next_reset { @@ -447,12 +449,14 @@ where self.allocations .retain(|rid, allocation| match allocation.can_be_freed() { Some(e) => { - tracing::error!(%rid, "Disconnecting from relay; {e}"); + tracing::warn!(%rid, "Disconnecting from relay; {e}"); false } None => true, }); + self.connections + .check_relays_available(&self.allocations, &mut self.rng); self.connections.gc(&mut self.pending_events); } @@ -485,21 +489,18 @@ where now: Instant, ) { // First, invalidate all candidates from relays that we should stop using. - for rid in to_remove { - let Some(allocation) = self.allocations.remove(&rid) else { + for rid in &to_remove { + let Some(allocation) = self.allocations.remove(rid) else { tracing::debug!(%rid, "Cannot delete unknown allocation"); continue; }; - for (cid, agent, _guard) in self.connections.agents_mut() { - for candidate in allocation - .current_candidates() - .filter(|c| c.kind() == CandidateKind::Relayed) - { - remove_local_candidate(cid, agent, &candidate, &mut self.pending_events); - } - } + invalidate_allocation_candidates( + &mut self.connections, + &allocation, + &mut self.pending_events, + ); tracing::info!(%rid, address = ?allocation.server(), "Removed TURN server"); } @@ -515,24 +516,61 @@ where continue; }; - if self.allocations.contains_key(rid) { - tracing::info!(%rid, address = ?server, "Skipping known TURN server"); - continue; + match self.allocations.entry(*rid) { + Entry::Vacant(v) => { + v.insert(Allocation::new( + *server, + username, + password.clone(), + realm, + now, + self.session_id.clone(), + )); + + tracing::info!(%rid, address = ?server, "Added new TURN server"); + } + Entry::Occupied(mut o) => { + let allocation = o.get(); + + if allocation.matches_credentials(&username, password) + && allocation.matches_socket(server) + { + tracing::info!(%rid, address = ?server, "Skipping known TURN server"); + continue; + } + + invalidate_allocation_candidates( + &mut self.connections, + allocation, + &mut self.pending_events, + ); + + o.insert(Allocation::new( + *server, + username, + password.clone(), + realm, + now, + self.session_id.clone(), + )); + + tracing::info!(%rid, address = ?server, "Replaced TURN server"); + } } + } - self.allocations.insert( - *rid, - Allocation::new( - *server, - username, - password.clone(), - realm, - now, - self.session_id.clone(), - ), - ); + let newly_added_relays = to_add + .iter() + .map(|(id, _, _, _, _)| *id) + .collect::>(); - tracing::info!(%rid, address = ?server, "Added new TURN server"); + // Third, check if other relays are still present. + for (_, previous_allocation) in self + .allocations + .iter_mut() + .filter(|(id, _)| !newly_added_relays.contains(id)) + { + previous_allocation.refresh(now); } } @@ -742,13 +780,14 @@ where })) } - fn bindings_and_allocations_drain_events(&mut self) { - let allocation_events = self - .allocations - .iter_mut() - .flat_map(|(rid, allocation)| Some((*rid, allocation.poll_event()?))); + fn allocations_drain_events(&mut self) { + let allocation_events = self.allocations.iter_mut().flat_map(|(rid, allocation)| { + std::iter::from_fn(|| allocation.poll_event()).map(|e| (*rid, e)) + }); for (rid, event) in allocation_events { + tracing::trace!(%rid, ?event); + match event { CandidateEvent::New(candidate) if candidate.kind() == CandidateKind::ServerReflexive => @@ -774,7 +813,11 @@ where /// Sample a relay to use for a new connection. fn sample_relay(&mut self) -> Option { - self.allocations.keys().copied().choose(&mut self.rng) + let rid = self.allocations.keys().copied().choose(&mut self.rng)?; + + tracing::debug!(%rid, "Sampled relay"); + + Some(rid) } } @@ -1019,6 +1062,66 @@ where }); } + fn check_relays_available( + &mut self, + allocations: &BTreeMap, + rng: &mut impl Rng, + ) { + // For initial connections, we can just update the relay to be used. + for (_, c) in self.iter_initial_mut() { + if c.relay.is_some_and(|r| allocations.contains_key(&r)) { + continue; + } + + let _guard = c.span.enter(); + + let Some(new_rid) = allocations.keys().copied().choose(rng) else { + continue; + }; + + tracing::info!(old_rid = ?c.relay, %new_rid, "Updating relay"); + c.relay = Some(new_rid); + } + + // For established connections, we check if we are currently using the relay. + for (_, c) in self.iter_established_mut() { + let _guard = c.span.enter(); + + use ConnectionState::*; + let peer_socket = match &mut c.state { + Connected { peer_socket, .. } | Idle { peer_socket } => peer_socket, + Failed => continue, + Connecting { + relay: maybe_relay, .. + } => { + let Some(relay) = maybe_relay else { + continue; + }; + + if allocations.contains_key(relay) { + continue; + } + + tracing::debug!("Selected relay disconnected during ICE; connection may fail"); + *maybe_relay = None; + continue; + } + }; + + let relay = match peer_socket { + PeerSocket::Direct { .. } => continue, // Don't care if relay of direct connection disappears, we weren't using it anyway. + PeerSocket::Relay { relay, .. } => relay, + }; + + if allocations.contains_key(relay) { + continue; // Our relay is still there, no problems. + } + + tracing::info!("Connection failed (relay disconnected)"); + c.state = ConnectionState::Failed; + } + } + fn stats(&self) -> impl Iterator + '_ { self.established.iter().map(move |(id, c)| (*id, c.stats)) } @@ -1166,6 +1269,24 @@ fn add_local_candidate( } } +fn invalidate_allocation_candidates( + connections: &mut Connections, + allocation: &Allocation, + pending_events: &mut VecDeque>, +) where + TId: Eq + Hash + Copy + Ord + fmt::Display, + RId: Copy + Eq + Hash + PartialEq + Ord + fmt::Debug + fmt::Display, +{ + for (cid, agent, _guard) in connections.agents_mut() { + for candidate in allocation + .current_candidates() + .filter(|c| c.kind() == CandidateKind::Relayed) + { + remove_local_candidate(cid, agent, &candidate, pending_events); + } + } +} + fn remove_local_candidate( id: TId, agent: &mut IceAgent, diff --git a/rust/connlib/tunnel/proptest-regressions/tests.txt b/rust/connlib/tunnel/proptest-regressions/tests.txt index bfbf99892..7cf198ed3 100644 --- a/rust/connlib/tunnel/proptest-regressions/tests.txt +++ b/rust/connlib/tunnel/proptest-regressions/tests.txt @@ -107,3 +107,8 @@ cc 4f5a2f6c9162963e20d82f9e6dfddca6992401ae65287776b1c3a736f7e1f1f7 cc f0ffe4d3c6a019810f4dc87fb0f741c9b76d5756c0077edeee657ec3a5193df9 cc ebb6357451ca734f198bc35ced587cfa51eea4df815a75ac57b3476e6aa3fe71 cc aa50269a0b4c691fd00812648f5d26853e3f2581939c1c3d35c4aff2811ee2a4 # shrinks to (ReferenceState { client: Host { inner: RefClient { id: aa161b58-2acd-0f88-a1ff-6af707903c09, key: PrivateKey("9fd04b63de8d3ae6e83d039369bf29de01fab81f31cafe5732e186cdd9ca5118"), known_hosts: {"api.firez.one": [2bf4:75e5:1edd:1eca:8e47:1e7f:7706:cdce]}, tunnel_ip4: 100.64.0.1, tunnel_ip6: fd00:2021:1111::, ipv4_routes: {Ipv4Network { network_address: 100.96.0.0, netmask: 11 }, Ipv4Network { network_address: 100.100.111.0, netmask: 24 }}, ipv6_routes: {Ipv6Network { network_address: fd00:2021:1111:8000::, netmask: 107 }, Ipv6Network { network_address: fd00:2021:1111:8000:100:100:111:0, netmask: 120 }} }, ip4: None, ip6: Some(2001:db80::40), default_port: 13004, latency: 285ms }, gateways: {1b82ddd6-902f-d519-394f-92ae107fec08: Host { inner: RefGateway { key: PrivateKey("ce2232673aa01c948004881d3c86ed13212393f8aaea40a7e853c494a645189e") }, ip4: Some(203.0.113.25), ip6: Some(2001:db80::b), default_port: 39793, latency: 125ms }, 3476f787-9f99-9cfb-845d-6bc3068085b8: Host { inner: RefGateway { key: PrivateKey("ed9647b905876947cbc98cfbce2362215b76a76fbeec72eea179280a0685b0c5") }, ip4: Some(203.0.113.90), ip6: Some(2001:db80::37), default_port: 42103, latency: 82ms }, 3a154caa-8804-7461-9a86-81614e2bafe0: Host { inner: RefGateway { key: PrivateKey("8659e1d83a85b5f2bc8d6321af680364f1969f1d51ab2c1f2307ed8573afe751") }, ip4: Some(203.0.113.4), ip6: Some(2001:db80::42), default_port: 16650, latency: 124ms }, e16813f6-f291-9c92-7385-2ddf57598b07: Host { inner: RefGateway { key: PrivateKey("56d997c3583de22abfbe5cbf2f30ebe1883823f757dec45037db25c577a9fd59") }, ip4: Some(203.0.113.92), ip6: Some(2001:db80::32), default_port: 14859, latency: 38ms }}, relays: {5a54264a-0e72-0e93-897d-8ddd92da7424: Host { inner: 5725775471353133246, ip4: Some(203.0.113.97), ip6: Some(2001:db80::10), default_port: 3478, latency: 45ms }}, dns_servers: {1AEA7C457539FE1D6979A3E69BA2117D: Host { inner: RefDns, ip4: Some(217.255.130.74), ip6: None, default_port: 53, latency: 36ms }, 33ECD04A65AC03B0DBBE5949AAC04019: Host { inner: RefDns, ip4: Some(225.49.6.141), ip6: None, default_port: 53, latency: 32ms }, A173FB62F64BEDE5A4649C9E15363674: Host { inner: RefDns, ip4: Some(246.68.236.104), ip6: None, default_port: 53, latency: 23ms }, F59A62324FA1DB3C5B3D82765D679D3F: Host { inner: RefDns, ip4: None, ip6: Some(::ffff:115.190.101.132), default_port: 53, latency: 46ms }}, portal: StubPortal { gateways_by_site: {2475243d-3a89-de38-9b22-a6a89f161466: {1b82ddd6-902f-d519-394f-92ae107fec08}, 24ca4825-0e32-2e43-fb5f-a00b66c729b0: {3476f787-9f99-9cfb-845d-6bc3068085b8}, db992e5e-afbb-a6ac-d143-8207a10e66cc: {3a154caa-8804-7461-9a86-81614e2bafe0, e16813f6-f291-9c92-7385-2ddf57598b07}}, cidr_resources: {14ed97c5-a780-eef3-a0f6-874f61f737a8: ResourceDescriptionCidr { id: 14ed97c5-a780-eef3-a0f6-874f61f737a8, address: V4(Ipv4Network { network_address: 127.0.0.0, netmask: 25 }), name: "bvujezlta", address_description: None, sites: [Site { id: 24ca4825-0e32-2e43-fb5f-a00b66c729b0, name: "tviv" }] }, 44bac9fa-202f-34b3-b673-3c43b1d232b6: ResourceDescriptionCidr { id: 44bac9fa-202f-34b3-b673-3c43b1d232b6, address: V6(Ipv6Network { network_address: ::ffff:127.0.0.0, netmask: 122 }), name: "xhnb", address_description: None, sites: [Site { id: 2475243d-3a89-de38-9b22-a6a89f161466, name: "xdlakl" }] }, f2b00be1-cdca-4675-2378-a7ff2ecf9f68: ResourceDescriptionCidr { id: f2b00be1-cdca-4675-2378-a7ff2ecf9f68, address: V4(Ipv4Network { network_address: 80.174.231.1, netmask: 32 }), name: "estabe", address_description: None, sites: [Site { id: 2475243d-3a89-de38-9b22-a6a89f161466, name: "xdlakl" }] }, ffe8a2ab-0a71-d306-98da-8123af83e162: ResourceDescriptionCidr { id: ffe8a2ab-0a71-d306-98da-8123af83e162, address: V6(Ipv6Network { network_address: 520e:e55f:10df:6d4a:add5:e191:3def:fbb0, netmask: 125 }), name: "nbheoefpb", address_description: None, sites: [Site { id: 24ca4825-0e32-2e43-fb5f-a00b66c729b0, name: "tviv" }] }}, dns_resources: {a58cff08-68cf-e48c-cbef-e52a472cd27b: ResourceDescriptionDns { id: a58cff08-68cf-e48c-cbef-e52a472cd27b, address: "**.efyyd.apgalu.avsm", name: "gfrz", address_description: Some("bwzhi"), sites: [Site { id: db992e5e-afbb-a6ac-d143-8207a10e66cc, name: "wewshsy" }] }, af51261a-a158-5734-3752-871a27a655e2: ResourceDescriptionDns { id: af51261a-a158-5734-3752-871a27a655e2, address: "**.rnjqja.whmjsb.wzix", name: "efsqxffipr", address_description: Some("qgylk"), sites: [Site { id: 24ca4825-0e32-2e43-fb5f-a00b66c729b0, name: "tviv" }] }}, internet_resource: ResourceDescriptionInternet { name: "Internet Resource", id: b4a20971-d535-9810-9745-6b90fed299bd, sites: [Site { id: 24ca4825-0e32-2e43-fb5f-a00b66c729b0, name: "tviv" }] } }, drop_direct_client_traffic: true, global_dns_records: {Name(etcww.efyyd.apgalu.avsm.): {2001:db80::f9}, Name(slqfgi.efyyd.apgalu.avsm.): {198.51.100.73}, Name(yhtb.efyyd.apgalu.avsm.): {198.51.100.19, 198.51.100.35, 198.51.100.159, 2001:db80::3e, 2001:db80::e0}, Name(wnysm.kll.): {0.0.0.0, 18.168.101.152, 127.0.0.1}, Name(yvkgu.pkgc.): {119.85.130.192, ::ffff:37.204.219.209, ::ffff:74.157.127.159, 2e16:188:431b:3296:d16f:5cd7:a4f7:7f0d, 87a0:47fa:103f:510c:3fc0:44fd:3168:4ad5}, Name(hml.kzais.ugedo.): {::ffff:133.198.237.87, ::ffff:162.175.130.36, 43f7:2392:8e05:e8db:4f48:c307:ec8b:712}, Name(ctxysv.rnjqja.whmjsb.wzix.): {198.51.100.118}}, network: RoutingTable { routes: {(V4(Ipv4Network { network_address: 203.0.113.4, netmask: 32 }), Gateway(3a154caa-8804-7461-9a86-81614e2bafe0)), (V4(Ipv4Network { network_address: 203.0.113.25, netmask: 32 }), Gateway(1b82ddd6-902f-d519-394f-92ae107fec08)), (V4(Ipv4Network { network_address: 203.0.113.90, netmask: 32 }), Gateway(3476f787-9f99-9cfb-845d-6bc3068085b8)), (V4(Ipv4Network { network_address: 203.0.113.92, netmask: 32 }), Gateway(e16813f6-f291-9c92-7385-2ddf57598b07)), (V4(Ipv4Network { network_address: 203.0.113.97, netmask: 32 }), Relay(5a54264a-0e72-0e93-897d-8ddd92da7424)), (V4(Ipv4Network { network_address: 217.255.130.74, netmask: 32 }), DnsServer(1AEA7C457539FE1D6979A3E69BA2117D)), (V4(Ipv4Network { network_address: 225.49.6.141, netmask: 32 }), DnsServer(33ECD04A65AC03B0DBBE5949AAC04019)), (V4(Ipv4Network { network_address: 246.68.236.104, netmask: 32 }), DnsServer(A173FB62F64BEDE5A4649C9E15363674)), (V6(Ipv6Network { network_address: ::ffff:115.190.101.132, netmask: 128 }), DnsServer(F59A62324FA1DB3C5B3D82765D679D3F)), (V6(Ipv6Network { network_address: 2001:db80::b, netmask: 128 }), Gateway(1b82ddd6-902f-d519-394f-92ae107fec08)), (V6(Ipv6Network { network_address: 2001:db80::10, netmask: 128 }), Relay(5a54264a-0e72-0e93-897d-8ddd92da7424)), (V6(Ipv6Network { network_address: 2001:db80::32, netmask: 128 }), Gateway(e16813f6-f291-9c92-7385-2ddf57598b07)), (V6(Ipv6Network { network_address: 2001:db80::37, netmask: 128 }), Gateway(3476f787-9f99-9cfb-845d-6bc3068085b8)), (V6(Ipv6Network { network_address: 2001:db80::40, netmask: 128 }), Client(aa161b58-2acd-0f88-a1ff-6af707903c09)), (V6(Ipv6Network { network_address: 2001:db80::42, netmask: 128 }), Gateway(3a154caa-8804-7461-9a86-81614e2bafe0))} } }, [ActivateResource(Dns(ResourceDescriptionDns { id: a58cff08-68cf-e48c-cbef-e52a472cd27b, address: "**.efyyd.apgalu.avsm", name: "gfrz", address_description: Some("bwzhi"), sites: [Site { id: db992e5e-afbb-a6ac-d143-8207a10e66cc, name: "wewshsy" }] })), SendDnsQueries([DnsQuery { domain: Name(slqfgi.efyyd.apgalu.avsm.), r_type: Rtype::AAAA, query_id: 8803, dns_server: [::ffff:115.190.101.132]:53 }, DnsQuery { domain: Name(yhtb.efyyd.apgalu.avsm.), r_type: Rtype::A, query_id: 32250, dns_server: [::ffff:115.190.101.132]:53 }]), SendICMPPacketToDnsResource { src: fd00:2021:1111::, dst: Name(slqfgi.efyyd.apgalu.avsm.), seq: 0, identifier: 0, payload: 0 }, SendICMPPacketToDnsResource { src: fd00:2021:1111::, dst: Name(slqfgi.efyyd.apgalu.avsm.), seq: 0, identifier: 0, payload: 0 }], None) +cc e73cf48159abce0e86f167fd5fabd987054d03c20ccd9d42f67c85097ac214f2 # shrinks to (ReferenceState { client: Host { inner: RefClient { id: 00000000-0000-0000-0000-000000000000, key: PrivateKey("823f770b0314a62c2adc3c432c09661322080b59493e4c16884100c09f3a1faf"), known_hosts: {"api.firezone.dev": [0.0.0.0]}, tunnel_ip4: 100.64.0.1, tunnel_ip6: fd00:2021:1111::, ipv4_routes: {Ipv4Network { network_address: 100.96.0.0, netmask: 11 }, Ipv4Network { network_address: 100.100.111.0, netmask: 24 }}, ipv6_routes: {Ipv6Network { network_address: fd00:2021:1111:8000::, netmask: 107 }, Ipv6Network { network_address: fd00:2021:1111:8000:100:100:111:0, netmask: 120 }} }, ip4: Some(203.0.113.6), ip6: None, default_port: 59563, latency: 244ms }, gateways: {3808f2e5-e9ab-8342-a512-e17b61067e78: Host { inner: RefGateway { key: PrivateKey("1873cd719ce6d622c7c350348cdec09e6d81fd64ca8a53c62598f7ff6fa1143d") }, ip4: Some(203.0.113.5), ip6: Some(2001:db80::38), default_port: 21307, latency: 16ms }, 977ffdb5-0f63-520d-5832-05141efa0abb: Host { inner: RefGateway { key: PrivateKey("4152d4180c41afced00154032679e6f8f9e5306b888e0218a39758f19e3db539") }, ip4: Some(203.0.113.36), ip6: Some(2001:db80::36), default_port: 61077, latency: 193ms }, 9af8b5d3-f369-f930-e364-46c0e80fea76: Host { inner: RefGateway { key: PrivateKey("d465457449f8787df68f6937360ff6af6176a43e70e8c03afa5122cfeea95d1a") }, ip4: Some(203.0.113.2), ip6: Some(2001:db80::41), default_port: 62310, latency: 107ms }, 9f9cc4ba-3946-a963-cbbf-c7b7f0f34bed: Host { inner: RefGateway { key: PrivateKey("2e1599b90dde57e85b3dc0050cd13da70d34a9e208f95f39ab1868d1b20ebdcf") }, ip4: Some(203.0.113.54), ip6: Some(2001:db80::7), default_port: 3201, latency: 26ms }, d6f0c484-64ba-0662-ef2d-3bf77a1b6ce7: Host { inner: RefGateway { key: PrivateKey("e388723539e7ba8025686698d6c8aa16788181443f49ad590f30fbc6305ea2bb") }, ip4: Some(203.0.113.13), ip6: Some(2001:db80::a), default_port: 18005, latency: 169ms }}, relays: {a8bcd955-2b63-7aa7-55c3-12bf84edb531: Host { inner: 2599781976767244549, ip4: Some(203.0.113.71), ip6: Some(2001:db80::60), default_port: 3478, latency: 48ms }, b98763cf-a409-a2e6-bbe1-02e730c092ad: Host { inner: 820565940652546070, ip4: Some(203.0.113.27), ip6: Some(2001:db80::3f), default_port: 3478, latency: 18ms }}, dns_servers: {32BC7C03A6CF4C4B12C6FCEDDA2325C7: Host { inner: RefDns, ip4: Some(57.77.75.67), ip6: None, default_port: 53, latency: 12ms }, 466A73234F61D762FBB2972232E0584C: Host { inner: RefDns, ip4: Some(224.252.195.243), ip6: None, default_port: 53, latency: 14ms }, 6FE40104DCB3B225DB5EA74A929E706D: Host { inner: RefDns, ip4: Some(233.83.144.49), ip6: None, default_port: 53, latency: 22ms }, A11711315EAF84A2CBB7F1B1D06487E3: Host { inner: RefDns, ip4: None, ip6: Some(::ffff:135.56.39.28), default_port: 53, latency: 44ms }, E53B036CD2B18CE2FF8AFBB3ED36D28C: Host { inner: RefDns, ip4: None, ip6: Some(::ffff:222.170.119.242), default_port: 53, latency: 46ms }, F1F40CD17008FCA42FC3F2B45716E7C4: Host { inner: RefDns, ip4: None, ip6: Some(fd8d:9f89:6a80:64e1:6f00:642:dc9c:d657), default_port: 53, latency: 44ms }}, portal: StubPortal { gateways_by_site: {07a69a08-a349-4880-6ca8-3de36221e6c8: {3808f2e5-e9ab-8342-a512-e17b61067e78}, be372c3d-9b83-4e3a-2a50-5ba6b0caf161: {d6f0c484-64ba-0662-ef2d-3bf77a1b6ce7}, dec56e47-e81c-e3c1-d5c2-bb9f3156fbe1: {977ffdb5-0f63-520d-5832-05141efa0abb, 9af8b5d3-f369-f930-e364-46c0e80fea76, 9f9cc4ba-3946-a963-cbbf-c7b7f0f34bed}}, cidr_resources: {f2c014f0-cebd-4c20-45bf-b71376af8f3c: ResourceDescriptionCidr { id: f2c014f0-cebd-4c20-45bf-b71376af8f3c, address: V6(Ipv6Network { network_address: ::ffff:127.0.0.1, netmask: 128 }), name: "tefq", address_description: None, sites: [Site { id: 07a69a08-a349-4880-6ca8-3de36221e6c8, name: "wdzysojl" }] }}, dns_resources: {6c083f09-0171-c27c-da37-dedb026cb4b1: ResourceDescriptionDns { id: 6c083f09-0171-c27c-da37-dedb026cb4b1, address: "*.cvhhul.zdr.uox", name: "bwfzitjgrb", address_description: Some("csawgpkc"), sites: [Site { id: dec56e47-e81c-e3c1-d5c2-bb9f3156fbe1, name: "wsjb" }] }}, internet_resource: ResourceDescriptionInternet { name: "Internet Resource", id: 0a1acd67-3978-9c6e-f50a-457eabad0a1f, sites: [Site { id: dec56e47-e81c-e3c1-d5c2-bb9f3156fbe1, name: "wsjb" }] } }, drop_direct_client_traffic: true, global_dns_records: {Name(vwqh.emtqfx.): {222.180.35.244}, Name(ehn.cvhhul.zdr.uox.): {198.51.100.69, 2001:db80::50, 2001:db80::cb}, Name(mbmcc.cvhhul.zdr.uox.): {198.51.100.26, 2001:db80::bb}, Name(nkvkvb.cvhhul.zdr.uox.): {198.51.100.127, 2001:db80::da}, Name(spnnm.dsn.vpn.): {127.0.0.1, ::ffff:249.210.75.162}}, network: RoutingTable { routes: {(V4(Ipv4Network { network_address: 57.77.75.67, netmask: 32 }), DnsServer(32BC7C03A6CF4C4B12C6FCEDDA2325C7)), (V4(Ipv4Network { network_address: 203.0.113.2, netmask: 32 }), Gateway(9af8b5d3-f369-f930-e364-46c0e80fea76)), (V4(Ipv4Network { network_address: 203.0.113.5, netmask: 32 }), Gateway(3808f2e5-e9ab-8342-a512-e17b61067e78)), (V4(Ipv4Network { network_address: 203.0.113.6, netmask: 32 }), Client(00000000-0000-0000-0000-000000000000)), (V4(Ipv4Network { network_address: 203.0.113.13, netmask: 32 }), Gateway(d6f0c484-64ba-0662-ef2d-3bf77a1b6ce7)), (V4(Ipv4Network { network_address: 203.0.113.27, netmask: 32 }), Relay(b98763cf-a409-a2e6-bbe1-02e730c092ad)), (V4(Ipv4Network { network_address: 203.0.113.36, netmask: 32 }), Gateway(977ffdb5-0f63-520d-5832-05141efa0abb)), (V4(Ipv4Network { network_address: 203.0.113.54, netmask: 32 }), Gateway(9f9cc4ba-3946-a963-cbbf-c7b7f0f34bed)), (V4(Ipv4Network { network_address: 203.0.113.71, netmask: 32 }), Relay(a8bcd955-2b63-7aa7-55c3-12bf84edb531)), (V4(Ipv4Network { network_address: 224.252.195.243, netmask: 32 }), DnsServer(466A73234F61D762FBB2972232E0584C)), (V4(Ipv4Network { network_address: 233.83.144.49, netmask: 32 }), DnsServer(6FE40104DCB3B225DB5EA74A929E706D)), (V6(Ipv6Network { network_address: ::ffff:135.56.39.28, netmask: 128 }), DnsServer(A11711315EAF84A2CBB7F1B1D06487E3)), (V6(Ipv6Network { network_address: ::ffff:222.170.119.242, netmask: 128 }), DnsServer(E53B036CD2B18CE2FF8AFBB3ED36D28C)), (V6(Ipv6Network { network_address: 2001:db80::7, netmask: 128 }), Gateway(9f9cc4ba-3946-a963-cbbf-c7b7f0f34bed)), (V6(Ipv6Network { network_address: 2001:db80::a, netmask: 128 }), Gateway(d6f0c484-64ba-0662-ef2d-3bf77a1b6ce7)), (V6(Ipv6Network { network_address: 2001:db80::36, netmask: 128 }), Gateway(977ffdb5-0f63-520d-5832-05141efa0abb)), (V6(Ipv6Network { network_address: 2001:db80::38, netmask: 128 }), Gateway(3808f2e5-e9ab-8342-a512-e17b61067e78)), (V6(Ipv6Network { network_address: 2001:db80::3f, netmask: 128 }), Relay(b98763cf-a409-a2e6-bbe1-02e730c092ad)), (V6(Ipv6Network { network_address: 2001:db80::41, netmask: 128 }), Gateway(9af8b5d3-f369-f930-e364-46c0e80fea76)), (V6(Ipv6Network { network_address: 2001:db80::60, netmask: 128 }), Relay(a8bcd955-2b63-7aa7-55c3-12bf84edb531)), (V6(Ipv6Network { network_address: fd8d:9f89:6a80:64e1:6f00:642:dc9c:d657, netmask: 128 }), DnsServer(F1F40CD17008FCA42FC3F2B45716E7C4))} } }, [ActivateResource(Cidr(ResourceDescriptionCidr { id: f2c014f0-cebd-4c20-45bf-b71376af8f3c, address: V6(Ipv6Network { network_address: ::ffff:127.0.0.1, netmask: 128 }), name: "tefq", address_description: None, sites: [Site { id: 07a69a08-a349-4880-6ca8-3de36221e6c8, name: "wdzysojl" }] })), ActivateResource(Dns(ResourceDescriptionDns { id: 6c083f09-0171-c27c-da37-dedb026cb4b1, address: "*.cvhhul.zdr.uox", name: "bwfzitjgrb", address_description: Some("csawgpkc"), sites: [Site { id: dec56e47-e81c-e3c1-d5c2-bb9f3156fbe1, name: "wsjb" }] })), DeactivateResource(6c083f09-0171-c27c-da37-dedb026cb4b1), UpdateUpstreamDnsServers([233.83.144.49:53, [fd8d:9f89:6a80:64e1:6f00:642:dc9c:d657]:53]), UpdateUpstreamDnsServers([224.252.195.243:53, [::ffff:135.56.39.28]:53, [::ffff:222.170.119.242]:53]), SendICMPPacketToCidrResource { src: fd00:2021:1111::, dst: ::ffff:127.0.0.1, seq: 0, identifier: 0, payload: 0 }, DeployNewRelays({b98763cf-a409-a2e6-bbe1-02e730c092ad: Host { inner: 0, ip4: Some(203.0.113.3), ip6: Some(2001:db80::2), default_port: 3478, latency: 18ms }}), SendICMPPacketToCidrResource { src: fd00:2021:1111::, dst: ::ffff:127.0.0.1, seq: 0, identifier: 0, payload: 0 }], None) +cc c55d4a415f5f65da57a49e5a56f1553d4b72c1533bbc69f481393cc0299520f7 # shrinks to (ReferenceState { client: Host { inner: RefClient { id: 00000000-0000-0000-0000-000000000000, key: PrivateKey("cb2d041253141b38ca18002c012b2e240909bb31421f1314108a5237042e0a0e"), known_hosts: {"api.firezone.dev": [0.0.0.0]}, tunnel_ip4: 100.64.0.1, tunnel_ip6: fd00:2021:1111::, ipv4_routes: {Ipv4Network { network_address: 100.96.0.0, netmask: 11 }, Ipv4Network { network_address: 100.100.111.0, netmask: 24 }}, ipv6_routes: {Ipv6Network { network_address: fd00:2021:1111:8000::, netmask: 107 }, Ipv6Network { network_address: fd00:2021:1111:8000:100:100:111:0, netmask: 120 }} }, ip4: Some(203.0.113.2), ip6: None, default_port: 43494, latency: 81ms }, gateways: {d2422792-ecee-5c2c-d938-7141af405dec: Host { inner: RefGateway { key: PrivateKey("10413277c735340a90fa3c0942d5e18a8531b4e63702d1978b87f1419ce3cfc9") }, ip4: Some(203.0.113.53), ip6: Some(2001:db80::14), default_port: 35963, latency: 149ms }}, relays: {549313d8-bed1-e205-b1f9-b7d313278143: Host { inner: 6741266820879122076, ip4: Some(203.0.113.33), ip6: Some(2001:db80::5), default_port: 3478, latency: 43ms }, c865db05-7b0f-0fd2-c892-e923fed0ec43: Host { inner: 2585533426197656492, ip4: Some(203.0.113.8), ip6: Some(2001:db80::46), default_port: 3478, latency: 23ms }}, dns_servers: {4C75A51342F5C316DD27C2354CF679DB: Host { inner: RefDns, ip4: Some(0.0.0.0), ip6: None, default_port: 53, latency: 36ms }, 6B8D28A41DDC108C2B538C1E72CA5351: Host { inner: RefDns, ip4: Some(90.217.45.54), ip6: None, default_port: 53, latency: 23ms }, 91DFAC29CFC2E12A1DD8C4D6AF391763: Host { inner: RefDns, ip4: None, ip6: Some(::ffff:127.0.0.1), default_port: 53, latency: 24ms }, F35329313B7CE7003308DCD1F7605549: Host { inner: RefDns, ip4: None, ip6: Some(66db:9a41:6e18:b2fb:2d0c:abcd:88f8:7bdf), default_port: 53, latency: 21ms }}, portal: StubPortal { gateways_by_site: {f599a654-b76a-872b-d4c9-989771a13262: {d2422792-ecee-5c2c-d938-7141af405dec}}, cidr_resources: {0b724551-5de5-1705-5ade-6579cb60acf4: ResourceDescriptionCidr { id: 0b724551-5de5-1705-5ade-6579cb60acf4, address: V6(Ipv6Network { network_address: ::ffff:152.64.97.233, netmask: 128 }), name: "pqqlooq", address_description: None, sites: [Site { id: f599a654-b76a-872b-d4c9-989771a13262, name: "dkkoo" }] }}, dns_resources: {8cf0a3b1-be99-6dfe-b796-fba194f22113: ResourceDescriptionDns { id: 8cf0a3b1-be99-6dfe-b796-fba194f22113, address: "*.garkr.rocno", name: "vxclzkpdi", address_description: None, sites: [Site { id: f599a654-b76a-872b-d4c9-989771a13262, name: "dkkoo" }] }}, internet_resource: ResourceDescriptionInternet { name: "Internet Resource", id: bb6cd6a8-5025-b625-21ed-4d5b17a5e72b, sites: [Site { id: f599a654-b76a-872b-d4c9-989771a13262, name: "dkkoo" }] } }, drop_direct_client_traffic: true, global_dns_records: {Name(chb.garkr.rocno.): {198.51.100.162, 198.51.100.163, 2001:db80::2e, 2001:db80::83}}, network: RoutingTable { routes: {(V4(Ipv4Network { network_address: 0.0.0.0, netmask: 32 }), DnsServer(4C75A51342F5C316DD27C2354CF679DB)), (V4(Ipv4Network { network_address: 90.217.45.54, netmask: 32 }), DnsServer(6B8D28A41DDC108C2B538C1E72CA5351)), (V4(Ipv4Network { network_address: 203.0.113.2, netmask: 32 }), Client(00000000-0000-0000-0000-000000000000)), (V4(Ipv4Network { network_address: 203.0.113.8, netmask: 32 }), Relay(c865db05-7b0f-0fd2-c892-e923fed0ec43)), (V4(Ipv4Network { network_address: 203.0.113.33, netmask: 32 }), Relay(549313d8-bed1-e205-b1f9-b7d313278143)), (V4(Ipv4Network { network_address: 203.0.113.53, netmask: 32 }), Gateway(d2422792-ecee-5c2c-d938-7141af405dec)), (V6(Ipv6Network { network_address: ::ffff:127.0.0.1, netmask: 128 }), DnsServer(91DFAC29CFC2E12A1DD8C4D6AF391763)), (V6(Ipv6Network { network_address: 2001:db80::5, netmask: 128 }), Relay(549313d8-bed1-e205-b1f9-b7d313278143)), (V6(Ipv6Network { network_address: 2001:db80::14, netmask: 128 }), Gateway(d2422792-ecee-5c2c-d938-7141af405dec)), (V6(Ipv6Network { network_address: 2001:db80::46, netmask: 128 }), Relay(c865db05-7b0f-0fd2-c892-e923fed0ec43)), (V6(Ipv6Network { network_address: 66db:9a41:6e18:b2fb:2d0c:abcd:88f8:7bdf, netmask: 128 }), DnsServer(F35329313B7CE7003308DCD1F7605549))} } }, [RebootRelaysWhilePartitioned({549313d8-bed1-e205-b1f9-b7d313278143: Host { inner: 0, ip4: Some(203.0.113.1), ip6: Some(2001:db80::b), default_port: 3478, latency: 24ms }}), ActivateResource(Cidr(ResourceDescriptionCidr { id: 0b724551-5de5-1705-5ade-6579cb60acf4, address: V6(Ipv6Network { network_address: ::ffff:152.64.97.233, netmask: 128 }), name: "pqqlooq", address_description: None, sites: [Site { id: f599a654-b76a-872b-d4c9-989771a13262, name: "dkkoo" }] })), SendICMPPacketToCidrResource { src: fd00:2021:1111::, dst: ::ffff:152.64.97.233, seq: 0, identifier: 0, payload: 0 }, SendICMPPacketToCidrResource { src: fd00:2021:1111::, dst: ::ffff:152.64.97.233, seq: 0, identifier: 0, payload: 0 }], None) +cc 5f3732130f35138485e1e0f36aa6e2fbcc934687c1c6aa23025c21ac56b51ec3 # shrinks to (ReferenceState { client: Host { inner: RefClient { id: 00000000-0000-0000-0000-000000000000, key: PrivateKey("0000000000000000000000000000000000000000000000000000000000000000"), known_hosts: {"api.firezone.dev": [0.0.0.0]}, tunnel_ip4: 100.64.0.1, tunnel_ip6: fd00:2021:1111::, ipv4_routes: {Ipv4Network { network_address: 100.96.0.0, netmask: 11 }, Ipv4Network { network_address: 100.100.111.0, netmask: 24 }}, ipv6_routes: {Ipv6Network { network_address: fd00:2021:1111:8000::, netmask: 107 }, Ipv6Network { network_address: fd00:2021:1111:8000:100:100:111:0, netmask: 120 }} }, ip4: Some(203.0.113.1), ip6: None, default_port: 1, latency: 10ms }, gateways: {b088fd78-1f5f-1069-42a3-fabcf5a3f8c5: Host { inner: RefGateway { key: PrivateKey("000000000000000000000000000000275acf37c45c00b4fc2c61b664faa6d52c") }, ip4: Some(203.0.113.43), ip6: Some(2001:db80::21), default_port: 28364, latency: 41ms }, ce2b3014-e205-d153-44be-257997a1ab8e: Host { inner: RefGateway { key: PrivateKey("c5cb3ed3cfc33543357300ca21d3fe0176f36c198fcde65bcf629c4595bced17") }, ip4: Some(203.0.113.32), ip6: Some(2001:db80::29), default_port: 59978, latency: 125ms }}, relays: {55f1ca3e-e53f-05d4-2620-7714c9dc1d6a: Host { inner: 8647444976938059545, ip4: Some(203.0.113.97), ip6: Some(2001:db80::57), default_port: 3478, latency: 25ms }, 89fbb18a-85d5-5fe1-dd7f-018733c19047: Host { inner: 11217482414423846813, ip4: Some(203.0.113.54), ip6: Some(2001:db80::10), default_port: 3478, latency: 12ms }}, dns_servers: {2FC5E7686EB5D8826AB0E4606E701106: Host { inner: RefDns, ip4: Some(0.0.0.0), ip6: None, default_port: 53, latency: 46ms }, 3B39769455557C340D65409EB430101B: Host { inner: RefDns, ip4: Some(98.172.139.128), ip6: None, default_port: 53, latency: 31ms }, 7CFB955E334A3E98193EEF6D0177DEF8: Host { inner: RefDns, ip4: None, ip6: Some(::ffff:9.219.243.11), default_port: 53, latency: 34ms }, D7669F8EA05560DEB9FA06E4891D9BB1: Host { inner: RefDns, ip4: None, ip6: Some(::ffff:212.211.33.93), default_port: 53, latency: 35ms }, E2C3E37214F5443E8F4AF9694495472C: Host { inner: RefDns, ip4: None, ip6: Some(daef:cec:9c71:ad2d:2fa3:e0ef:397f:cba9), default_port: 53, latency: 37ms }}, portal: StubPortal { gateways_by_site: {4c4e8354-eebe-155f-591f-cecb5052aa43: {b088fd78-1f5f-1069-42a3-fabcf5a3f8c5}, ef6a613a-730e-2ef1-d229-a69b830ffca2: {ce2b3014-e205-d153-44be-257997a1ab8e}}, cidr_resources: {2140a248-3213-da0b-5436-ba47b9c77535: ResourceDescriptionCidr { id: 2140a248-3213-da0b-5436-ba47b9c77535, address: V4(Ipv4Network { network_address: 127.0.0.0, netmask: 28 }), name: "qfnveks", address_description: Some("hrwjk"), sites: [Site { id: ef6a613a-730e-2ef1-d229-a69b830ffca2, name: "skatgap" }] }, 67100749-067d-3ba7-7e20-b244da81f406: ResourceDescriptionCidr { id: 67100749-067d-3ba7-7e20-b244da81f406, address: V4(Ipv4Network { network_address: 23.127.162.0, netmask: 29 }), name: "fmezyvgcmc", address_description: None, sites: [Site { id: ef6a613a-730e-2ef1-d229-a69b830ffca2, name: "skatgap" }] }}, dns_resources: {30b6533e-1259-5a9e-a885-36a06b52898f: ResourceDescriptionDns { id: 30b6533e-1259-5a9e-a885-36a06b52898f, address: "*.mjiqor.tsbkx", name: "kcixofklir", address_description: None, sites: [Site { id: ef6a613a-730e-2ef1-d229-a69b830ffca2, name: "skatgap" }] }, 51fa178c-956b-51de-4c00-736fd5b0b719: ResourceDescriptionDns { id: 51fa178c-956b-51de-4c00-736fd5b0b719, address: "nvcmq.alkv", name: "xgrfj", address_description: None, sites: [Site { id: ef6a613a-730e-2ef1-d229-a69b830ffca2, name: "skatgap" }] }}, internet_resource: ResourceDescriptionInternet { name: "Internet Resource", id: 85abd791-b6df-cc9e-37e7-8c151494237a, sites: [Site { id: 4c4e8354-eebe-155f-591f-cecb5052aa43, name: "ubjdwszssr" }] } }, drop_direct_client_traffic: true, global_dns_records: {Name(nvcmq.alkv.): {198.51.100.125, 198.51.100.182, 198.51.100.228, 198.51.100.241, 2001:db80::68}, Name(ffgya.gqrnj.): {127.0.0.1, ::ffff:0.0.0.0, ::ffff:127.0.0.1}, Name(awg.kfn.): {0.0.0.0, 78.152.42.122, 143.204.186.151, 231.198.159.251, ::ffff:227.117.122.165}, Name(ccnq.myrfd.mno.): {127.0.0.1, c6f9:5b3d:2a89:805b:dd68:3d0d:76bd:d786}, Name(mwx.xwqzp.mquea.): {178.142.181.30, ::ffff:127.0.0.1, ::ffff:147.54.88.245, ::ffff:162.225.220.211}, Name(ljbxv.mjiqor.tsbkx.): {198.51.100.75, 2001:db80::95, 2001:db80::be}, Name(paodvg.mjiqor.tsbkx.): {198.51.100.17, 198.51.100.130, 198.51.100.182}, Name(qviwog.mjiqor.tsbkx.): {198.51.100.192, 198.51.100.244, 2001:db80::97}}, network: RoutingTable { routes: {(V4(Ipv4Network { network_address: 0.0.0.0, netmask: 32 }), DnsServer(2FC5E7686EB5D8826AB0E4606E701106)), (V4(Ipv4Network { network_address: 98.172.139.128, netmask: 32 }), DnsServer(3B39769455557C340D65409EB430101B)), (V4(Ipv4Network { network_address: 203.0.113.1, netmask: 32 }), Client(00000000-0000-0000-0000-000000000000)), (V4(Ipv4Network { network_address: 203.0.113.32, netmask: 32 }), Gateway(ce2b3014-e205-d153-44be-257997a1ab8e)), (V4(Ipv4Network { network_address: 203.0.113.43, netmask: 32 }), Gateway(b088fd78-1f5f-1069-42a3-fabcf5a3f8c5)), (V4(Ipv4Network { network_address: 203.0.113.54, netmask: 32 }), Relay(89fbb18a-85d5-5fe1-dd7f-018733c19047)), (V4(Ipv4Network { network_address: 203.0.113.97, netmask: 32 }), Relay(55f1ca3e-e53f-05d4-2620-7714c9dc1d6a)), (V6(Ipv6Network { network_address: ::ffff:9.219.243.11, netmask: 128 }), DnsServer(7CFB955E334A3E98193EEF6D0177DEF8)), (V6(Ipv6Network { network_address: ::ffff:212.211.33.93, netmask: 128 }), DnsServer(D7669F8EA05560DEB9FA06E4891D9BB1)), (V6(Ipv6Network { network_address: 2001:db80::10, netmask: 128 }), Relay(89fbb18a-85d5-5fe1-dd7f-018733c19047)), (V6(Ipv6Network { network_address: 2001:db80::21, netmask: 128 }), Gateway(b088fd78-1f5f-1069-42a3-fabcf5a3f8c5)), (V6(Ipv6Network { network_address: 2001:db80::29, netmask: 128 }), Gateway(ce2b3014-e205-d153-44be-257997a1ab8e)), (V6(Ipv6Network { network_address: 2001:db80::57, netmask: 128 }), Relay(55f1ca3e-e53f-05d4-2620-7714c9dc1d6a)), (V6(Ipv6Network { network_address: daef:cec:9c71:ad2d:2fa3:e0ef:397f:cba9, netmask: 128 }), DnsServer(E2C3E37214F5443E8F4AF9694495472C))} } }, [ActivateResource(Cidr(ResourceDescriptionCidr { id: 67100749-067d-3ba7-7e20-b244da81f406, address: V4(Ipv4Network { network_address: 23.127.162.0, netmask: 29 }), name: "fmezyvgcmc", address_description: None, sites: [Site { id: ef6a613a-730e-2ef1-d229-a69b830ffca2, name: "skatgap" }] })), PartitionRelaysFromPortal, SendICMPPacketToCidrResource { src: 100.64.0.1, dst: 23.127.162.0, seq: 0, identifier: 0, payload: 0 }, SendICMPPacketToCidrResource { src: 100.64.0.1, dst: 23.127.162.0, seq: 0, identifier: 0, payload: 0 }], None) +cc 643f3cd8c2def55affe30ad690465a09514d475bef2df5cbda65f98c3a414f76 +cc eb667925801c37305d723dc1460cf0c94b4ad6be0f5e0392216dad33a43e892f diff --git a/rust/connlib/tunnel/src/client.rs b/rust/connlib/tunnel/src/client.rs index fbcdb0d24..64bb7f8d0 100644 --- a/rust/connlib/tunnel/src/client.rs +++ b/rust/connlib/tunnel/src/client.rs @@ -150,7 +150,8 @@ impl ClientTunnel { } pub fn remove_ice_candidate(&mut self, conn_id: GatewayId, ice_candidate: String) { - self.role_state.remove_ice_candidate(conn_id, ice_candidate); + self.role_state + .remove_ice_candidate(conn_id, ice_candidate, Instant::now()); } pub fn on_routing_details( @@ -501,8 +502,14 @@ impl ClientState { self.node.add_remote_candidate(conn_id, ice_candidate, now); } - pub fn remove_ice_candidate(&mut self, conn_id: GatewayId, ice_candidate: String) { - self.node.remove_remote_candidate(conn_id, ice_candidate); + pub fn remove_ice_candidate( + &mut self, + conn_id: GatewayId, + ice_candidate: String, + now: Instant, + ) { + self.node + .remove_remote_candidate(conn_id, ice_candidate, now); } #[tracing::instrument(level = "trace", skip_all, fields(%resource_id))] @@ -906,10 +913,10 @@ impl ClientState { pub fn handle_timeout(&mut self, now: Instant) { self.node.handle_timeout(now); + self.drain_node_events(); + self.mangled_dns_queries.retain(|_, exp| now < *exp); self.forwarded_dns_queries.retain(|_, (_, exp)| now < *exp); - - self.drain_node_events(); } fn maybe_update_tun_routes(&mut self) { @@ -1256,6 +1263,7 @@ impl ClientState { now: Instant, ) { self.node.update_relays(to_remove, &to_add, now); + self.drain_node_events(); // Ensure all state changes are fully-propagated. } } diff --git a/rust/connlib/tunnel/src/gateway.rs b/rust/connlib/tunnel/src/gateway.rs index 538d73b58..ca7589135 100644 --- a/rust/connlib/tunnel/src/gateway.rs +++ b/rust/connlib/tunnel/src/gateway.rs @@ -125,7 +125,8 @@ impl GatewayTunnel { } pub fn remove_ice_candidate(&mut self, conn_id: ClientId, ice_candidate: String) { - self.role_state.remove_ice_candidate(conn_id, ice_candidate); + self.role_state + .remove_ice_candidate(conn_id, ice_candidate, Instant::now()); } } @@ -245,8 +246,9 @@ impl GatewayState { self.node.add_remote_candidate(conn_id, ice_candidate, now); } - pub fn remove_ice_candidate(&mut self, conn_id: ClientId, ice_candidate: String) { - self.node.remove_remote_candidate(conn_id, ice_candidate); + pub fn remove_ice_candidate(&mut self, conn_id: ClientId, ice_candidate: String, now: Instant) { + self.node + .remove_remote_candidate(conn_id, ice_candidate, now); } /// Accept a connection request from a client. @@ -330,6 +332,7 @@ impl GatewayState { pub fn handle_timeout(&mut self, now: Instant, utc_now: DateTime) { self.node.handle_timeout(now); + self.drain_node_events(); match self.next_expiry_resources_check { Some(next_expiry_resources_check) if now >= next_expiry_resources_check => { @@ -344,7 +347,9 @@ impl GatewayState { None => self.next_expiry_resources_check = Some(now + EXPIRE_RESOURCES_INTERVAL), Some(_) => {} } + } + fn drain_node_events(&mut self) { let mut added_ice_candidates = BTreeMap::>::default(); let mut removed_ice_candidates = BTreeMap::>::default(); @@ -417,6 +422,7 @@ impl GatewayState { now: Instant, ) { self.node.update_relays(to_remove, &to_add, now); + self.drain_node_events() } } diff --git a/rust/connlib/tunnel/src/tests/reference.rs b/rust/connlib/tunnel/src/tests/reference.rs index 40e03b497..83407e39e 100644 --- a/rust/connlib/tunnel/src/tests/reference.rs +++ b/rust/connlib/tunnel/src/tests/reference.rs @@ -2,7 +2,7 @@ use super::{ composite_strategy::CompositeStrategy, sim_client::*, sim_dns::*, sim_gateway::*, sim_net::*, strategies::*, stub_portal::StubPortal, transition::*, }; -use crate::dns::is_subdomain; +use crate::{dns::is_subdomain, proptest::relay_id}; use connlib_shared::{ messages::{ client::{self, ResourceDescription}, @@ -67,7 +67,7 @@ impl ReferenceStateMachine for ReferenceState { system_dns_servers(dns_servers.values().cloned().collect()), upstream_dns_servers(dns_servers.values().cloned().collect()), ); - let relays = relays(); + let relays = relays(relay_id()); let global_dns_records = global_dns_records(); // Start out with a set of global DNS records so we have something to resolve outside of DNS resources. let drop_direct_client_traffic = any::(); @@ -195,8 +195,15 @@ impl ReferenceStateMachine for ReferenceState { sample::select(resource_ids).prop_map(Transition::DeactivateResource) }) .with(1, roam_client()) - .with(1, relays().prop_map(Transition::DeployNewRelays)) + .with(1, relays(relay_id()).prop_map(Transition::DeployNewRelays)) .with(1, Just(Transition::PartitionRelaysFromPortal)) + .with( + 1, + relays(sample::select( + state.relays.keys().copied().collect::>(), + )) + .prop_map(Transition::RebootRelaysWhilePartitioned), + ) .with(1, Just(Transition::ReconnectPortal)) .with(1, Just(Transition::Idle)) .with_if_not_empty(1, state.client.inner().all_resource_ids(), |resources_id| { @@ -454,22 +461,9 @@ impl ReferenceStateMachine for ReferenceState { // We do re-add all resources though so depending on the order they are added in, overlapping CIDR resources may change. state.client.exec_mut(|c| c.readd_all_resources()); } - Transition::DeployNewRelays(new_relays) => { - // Always take down all relays because we can't know which one was sampled for the connection. - for relay in state.relays.values() { - state.network.remove_host(relay); - } - state.relays.clear(); - - for (rid, new_relay) in new_relays { - state.relays.insert(*rid, new_relay.clone()); - debug_assert!(state.network.add_host(*rid, new_relay)); - } - - // In case we were using the relays, all connections will be cut and require us to make a new one. - if state.drop_direct_client_traffic { - state.client.exec_mut(|client| client.reset_connections()); - } + Transition::DeployNewRelays(new_relays) => state.deploy_new_relays(new_relays), + Transition::RebootRelaysWhilePartitioned(new_relays) => { + state.deploy_new_relays(new_relays) } Transition::Idle => {} Transition::PartitionRelaysFromPortal => { @@ -620,7 +614,8 @@ impl ReferenceStateMachine for ReferenceState { Transition::DeactivateResource(r) => { state.client.inner().all_resource_ids().contains(r) } - Transition::DeployNewRelays(new_relays) => { + Transition::RebootRelaysWhilePartitioned(new_relays) + | Transition::DeployNewRelays(new_relays) => { let mut additional_routes = RoutingTable::default(); for (rid, relay) in new_relays { if !additional_routes.add_host(*rid, relay) { @@ -672,6 +667,24 @@ impl ReferenceState { all_resources } + + fn deploy_new_relays(&mut self, new_relays: &BTreeMap>) { + // Always take down all relays because we can't know which one was sampled for the connection. + for relay in self.relays.values() { + self.network.remove_host(relay); + } + self.relays.clear(); + + for (rid, new_relay) in new_relays { + self.relays.insert(*rid, new_relay.clone()); + debug_assert!(self.network.add_host(*rid, new_relay)); + } + + // In case we were using the relays, all connections will be cut and require us to make a new one. + if self.drop_direct_client_traffic { + self.client.exec_mut(|client| client.reset_connections()); + } + } } pub(crate) fn private_key() -> impl Strategy { diff --git a/rust/connlib/tunnel/src/tests/strategies.rs b/rust/connlib/tunnel/src/tests/strategies.rs index 37c5a3e90..8fa6df72d 100644 --- a/rust/connlib/tunnel/src/tests/strategies.rs +++ b/rust/connlib/tunnel/src/tests/strategies.rs @@ -109,8 +109,10 @@ pub(crate) fn stub_portal() -> impl Strategy { ) } -pub(crate) fn relays() -> impl Strategy>> { - collection::btree_map(relay_id(), ref_relay_host(), 1..=2) +pub(crate) fn relays( + id: impl Strategy, +) -> impl Strategy>> { + collection::btree_map(id, ref_relay_host(), 1..=2) } /// Sample a list of DNS servers. diff --git a/rust/connlib/tunnel/src/tests/sut.rs b/rust/connlib/tunnel/src/tests/sut.rs index a0e7990d7..7bd995b9b 100644 --- a/rust/connlib/tunnel/src/tests/sut.rs +++ b/rust/connlib/tunnel/src/tests/sut.rs @@ -273,28 +273,10 @@ impl TunnelTest { }); } Transition::DeployNewRelays(new_relays) => { - for relay in state.relays.values() { - state.network.remove_host(relay); - } + // If we are connected to the portal, we will learn, which ones went down, i.e. `relays_presence`. + let to_remove = state.relays.keys().copied().collect(); - let online = new_relays - .into_iter() - .map(|(rid, relay)| (rid, relay.map(SimRelay::new, debug_span!("relay", %rid)))) - .collect::>(); - - for (rid, relay) in &online { - debug_assert!(state.network.add_host(*rid, relay)); - } - - state.client.exec_mut(|c| { - c.update_relays(state.relays.keys().copied(), online.iter(), now); - }); - for gateway in state.gateways.values_mut() { - gateway.exec_mut(|g| { - g.update_relays(state.relays.keys().copied(), online.iter(), now) - }); - } - state.relays = online; // Override all relays. + state.deploy_new_relays(new_relays, now, to_remove); } Transition::Idle => { const IDLE_DURATION: Duration = Duration::from_secs(6 * 60); // Ensure idling twice in a row puts us in the 10-15 minute window where TURN data channels are cooling down. @@ -334,6 +316,8 @@ impl TunnelTest { // 2. Advance state to ensure this is reflected. state.advance(ref_state, &mut buffered_transmits); + let now = state.flux_capacitor.now(); + // 3. Reconnect all relays. state .client @@ -342,6 +326,12 @@ impl TunnelTest { gateway.exec_mut(|g| g.update_relays(iter::empty(), state.relays.iter(), now)); } } + Transition::RebootRelaysWhilePartitioned(new_relays) => { + // If we are partitioned from the portal, we will only learn which relays to use, potentially replacing existing ones. + let to_remove = Vec::default(); + + state.deploy_new_relays(new_relays, now, to_remove); + } }; state.advance(ref_state, &mut buffered_transmits); @@ -390,6 +380,24 @@ impl TunnelTest { self.handle_timeout(&ref_state.global_dns_records, buffered_transmits); let now = self.flux_capacitor.now(); + for (id, gateway) in self.gateways.iter_mut() { + let Some(event) = gateway.exec_mut(|g| g.sut.poll_event()) else { + continue; + }; + + on_gateway_event(*id, event, &mut self.client, now); + continue 'outer; + } + if let Some(event) = self.client.exec_mut(|c| c.sut.poll_event()) { + self.on_client_event( + self.client.inner().id, + event, + &ref_state.portal, + &ref_state.global_dns_records, + ); + continue; + } + for (_, relay) in self.relays.iter_mut() { let Some(message) = relay.exec_mut(|r| r.sut.next_command()) else { continue; @@ -435,28 +443,10 @@ impl TunnelTest { continue 'outer; } - for (id, gateway) in self.gateways.iter_mut() { - let Some(event) = gateway.exec_mut(|g| g.sut.poll_event()) else { - continue; - }; - - on_gateway_event(*id, event, &mut self.client, now); - continue 'outer; - } - if let Some(transmit) = self.client.exec_mut(|sim| sim.sut.poll_transmit()) { buffered_transmits.push_from(transmit, &self.client, now); continue; } - if let Some(event) = self.client.exec_mut(|c| c.sut.poll_event()) { - self.on_client_event( - self.client.inner().id, - event, - &ref_state.portal, - &ref_state.global_dns_records, - ); - continue; - } self.client.exec_mut(|sim| { while let Some(packet) = sim.sut.poll_packets() { sim.on_received_packet(packet) @@ -654,7 +644,7 @@ impl TunnelTest { gateway.exec_mut(|g| { for candidate in candidates { - g.sut.remove_ice_candidate(src, candidate) + g.sut.remove_ice_candidate(src, candidate, now) } }) } @@ -797,6 +787,34 @@ impl TunnelTest { } } } + + fn deploy_new_relays( + &mut self, + new_relays: BTreeMap>, + now: Instant, + to_remove: Vec, + ) { + for relay in self.relays.values() { + self.network.remove_host(relay); + } + + let online = new_relays + .into_iter() + .map(|(rid, relay)| (rid, relay.map(SimRelay::new, debug_span!("relay", %rid)))) + .collect::>(); + + for (rid, relay) in &online { + debug_assert!(self.network.add_host(*rid, relay)); + } + + self.client.exec_mut(|c| { + c.update_relays(to_remove.iter().copied(), online.iter(), now); + }); + for gateway in self.gateways.values_mut() { + gateway.exec_mut(|g| g.update_relays(to_remove.iter().copied(), online.iter(), now)); + } + self.relays = online; // Override all relays. + } } fn on_gateway_event( @@ -813,7 +831,7 @@ fn on_gateway_event( }), GatewayEvent::RemovedIceCandidates { candidates, .. } => client.exec_mut(|c| { for candidate in candidates { - c.sut.remove_ice_candidate(src, candidate) + c.sut.remove_ice_candidate(src, candidate, now) } }), GatewayEvent::RefreshDns { .. } => todo!(), diff --git a/rust/connlib/tunnel/src/tests/transition.rs b/rust/connlib/tunnel/src/tests/transition.rs index 10a7686ca..969fe059e 100644 --- a/rust/connlib/tunnel/src/tests/transition.rs +++ b/rust/connlib/tunnel/src/tests/transition.rs @@ -84,6 +84,11 @@ pub(crate) enum Transition { /// Idle connlib for a while. Idle, + + /// Simulate all relays rebooting while we are network partitioned from the portal. + /// + /// In this case, we won't receive a `relays_presence` but instead we will receive relays with the same ID yet different credentials. + RebootRelaysWhilePartitioned(BTreeMap>), } #[derive(Debug, Clone)]