CI: add a flow that test client to resource ping (#1729)

This PR fixes a bunch of small things to allow a new flow to test
clients pinging a resource within docker compose.

Masquerade/Forwarding is enabled directly in the container for now, this
might change in the future.

Also added a README to be able to run this locally.

---------

Signed-off-by: Gabi <gabrielalejandro7@gmail.com>
Co-authored-by: Jamil <jamilbk@users.noreply.github.com>
This commit is contained in:
Gabi
2023-07-05 00:17:26 -03:00
committed by GitHub
parent 9cb024e787
commit eb5fc34f35
13 changed files with 101 additions and 36 deletions

33
.github/workflows/integration-tests.yml vendored Normal file
View File

@@ -0,0 +1,33 @@
name: Integration Tests
on:
merge_group:
types: [checks_requested]
pull_request:
jobs:
test-basic-flow:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build images
uses: docker/bake-action@v3
with:
files: docker-compose.yml
push: false
- name: Seed database
run: docker compose run elixir /bin/sh -c "cd apps/domain && mix ecto.seed"
- name: Start docker compose in the background
run: docker compose up -d
- name: Test that client can ping resource
# FIXME: When the client sends the first packet trying to connect but there's no relay
# the portal responds with that and we don't try to continue the flow until we receive a success
# response with all the relays, thus we just sleep here waiting for the relay to have its presence tracked
# by the portal.
# The fix next will be:
# * If the relay list comes back as an error, retry a few times
# * If the it still comes as an error try a direct connection(local network)
# Right now this is working because we wait for the relay to be up and running before starting the client
run: docker compose exec -it client ping 172.20.0.100 -c 5

View File

@@ -113,7 +113,7 @@ jobs:
with:
name: connlib-android
path: |
./connlib-${{ needs.draft-release.outputs.tag_name }}.aar
./rust/connlib-${{ needs.draft-release.outputs.tag_name }}.aar
build-apple:
needs:

View File

@@ -8,7 +8,7 @@ started.
- [Overview](#overview)
- [Quick Start](#quick-start)
- [Docker Setup](#docker-setup)
- [Docker Caveat](#docker-caveat)
- [Test With Docker](#test-with-docker)
- [Bootstrapping](#bootstrapping)
- [Ensure Everything Works](#ensure-everything-works)
- [Developer Environment Setup](#developer-environment-setup)
@@ -57,32 +57,38 @@ We recommend [Docker Desktop](https://docs.docker.com/engine/install/#desktop)
even if you're developing on Linux. This is what the Firezone core devs use and
comes with `compose` included.
#### Docker Caveat
#### Test With Docker
Routing packets from the host's WireGuard client through the Firezone compose
cluster and out to the external network will not work. This is because Docker
Desktop
[rewrites the source address from containers to appear as if they originated the
host](https://www.docker.com/blog/how-docker-desktop-networking-works-under-the-hood/)
, causing a routing loop:
When you want to test every component together the ideal way to go is to use docker.
1. Packet originates on Host
1. Enters WireGuard client tunnel
1. Forwarding through the Docker bridge net
1. Forward to the Firezone container `127.0.0.1:51820`
1. Firezone sends packet back out
1. Docker bridge net, Docker rewrites src IP to Host's LAN IP, (d'oh!)
1. Docker sends packet out to Host ->
1. Packet now has same src IP and dest IP as step 1 above, and the cycle
continues
To do this you first need a seeded database, for that follow the steps on the
[Elixir's README](elixir/readme#running-control-plane-for-local-development).
Then you can do:
```sh
# To start all the components
docker compose up -d --build
However, packets destined for Firezone compose cluster IPs (`172.28.0.0/16`)
reach their destination through the tunnel just fine. Because of this, it's
recommended to use `172.28.0.0/16` for your `AllowedIPs` parameter when using
host-based WireGuard clients with Firezone running under Docker Desktop.
# To check the logs
docker compose logs -f
```
Routing packets from _another_ host on the local network, through your development
machine, and out to the external Internet should work as well.
After this you will have running:
* A portal
* A gateway connected to the portal
* A client connected to the portal
* A relay connected to the portal
* A resource on a network only with the gateway
* The ip is `172.20.0.100`
(And any other dependency for these to run)
```sh
# To test that a client can ping the resource
docker compose exec -it client ping 172.20.0.100
# You can also directly use the client
docker compose exec -it client /bin/sh
```
### Bootstrapping

View File

@@ -135,6 +135,8 @@ services:
depends_on:
api:
condition: 'service_healthy'
relay:
condition: 'service_healthy'
networks:
app:
ipv4_address: 172.28.0.100
@@ -144,6 +146,7 @@ services:
FZ_URL: "ws://api:8081/"
FZ_SECRET: "SFMyNTY.g2gDaAJtAAAAJDNjZWYwNTY2LWFkZmQtNDhmZS1hMGYxLTU4MDY3OTYwOGY2Zm0AAABAamp0enhSRkpQWkdCYy1vQ1o5RHkyRndqd2FIWE1BVWRwenVScjJzUnJvcHg3NS16bmhfeHBfNWJUNU9uby1yYm4GAFvAb_mIAWIAAVGA.1DaY3H3fKzW5sqcciJqlHyG0uFctzOewofsVRGS7NrQ"
RUST_LOG: gateway=trace,firezone_gateway_connlib=trace,firezone_tunnel=trace,libs_common=trace,warn
ENABLE_MASQUERADE: 1
build:
context: rust
dockerfile: Dockerfile.dev
@@ -169,7 +172,8 @@ services:
image: alpine:3.18
command: tail -f /dev/null
networks:
- resources
resources:
ipv4_address: 172.20.0.100
relay:
environment:
@@ -185,6 +189,12 @@ services:
args:
PACKAGE: relay
image: firezone-relay
healthcheck:
test: [ "CMD-SHELL", "lsof -i UDP | grep relay" ]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
depends_on:
api:
condition: 'service_healthy'

View File

@@ -14,11 +14,12 @@ FROM debian:11.7-slim
ARG PACKAGE
WORKDIR /app/
COPY --from=BUILDER /usr/local/bin/$PACKAGE .
COPY ./docker-init.sh .
ENV RUST_BACKTRACE=1
ENV PATH "/app:$PATH"
ENV PACKAGE_NAME ${PACKAGE}
RUN apt-get update -y \
&& apt-get install -y iputils-ping \
&& apt-get install -y iputils-ping iptables lsof \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Some black magics here:
@@ -28,6 +29,6 @@ RUN apt-get update -y \
# this means that this will ignore after the first arguments
# if we ever combine this with `CMD` in exec form so always use shell form
# (Note we could use shell-form here, but this is the same made explicit)
ENTRYPOINT ["/bin/sh", "-c", "$PACKAGE_NAME $0"]
ENTRYPOINT ["/bin/sh", "-c", "./docker-init.sh && $PACKAGE_NAME $0"]
# *sigh* if we don't add this $0 becomes /bin/sh in the command above
CMD [""]

View File

@@ -277,6 +277,14 @@ where
d.on_open(Box::new(move || {
tracing::trace!("new data channel opened!");
Box::pin(async move {
{
let mut iface_config = tunnel.iface_config.lock().await;
for ip in &peer.ips {
if let Err(e) = iface_config.add_route(ip).await {
tunnel.callbacks.on_error(&e, Recoverable);
}
}
}
if let Err(e) = tunnel.handle_channel_open(data_channel, index, peer).await
{
tunnel.callbacks.on_error(&e, Recoverable);

View File

@@ -219,7 +219,7 @@ where
{
let mut iface_config = self.iface_config.lock().await;
for ip in resource_description.ips() {
if let Err(err) = iface_config.add_route(ip).await {
if let Err(err) = iface_config.add_route(&ip).await {
self.callbacks.on_error(&err, Fatal);
}
}

View File

@@ -14,7 +14,7 @@ pub(crate) struct ResourceTable {
dns_name: HashMap<String, NonNull<ResourceDescription>>,
}
// SAFETY: We actually hold a `Vec` internally that the poitners points to
// SAFETY: We actually hold a hashmap internally that the pointers points to
unsafe impl Send for ResourceTable {}
// SAFETY: we don't allow interior mutability of the pointers we hold, in fact we don't allow ANY mutability!
// (this is part of the reason why the API is so limiting, it is easier to reason about.
@@ -69,7 +69,7 @@ impl ResourceTable {
self.id_table.remove(&id);
}
fn cleaup_resource(&mut self, resource_description: &ResourceDescription) {
fn cleanup_resource(&mut self, resource_description: &ResourceDescription) {
match resource_description {
ResourceDescription::Dns(r) => {
if let Some(res) = self.id_table.get(&r.id) {
@@ -123,7 +123,7 @@ impl ResourceTable {
// For soundness it's very important that this API only takes a resource_description
// doing this, we can assume that when removing a resource from the id table we have all the info
// about all the o
// about all the tables.
/// Inserts a new resource_description
///
/// If the id was used previously the old value will be deleted.
@@ -132,7 +132,7 @@ impl ResourceTable {
///
/// This is done so that we don't have dangling values.
pub fn insert(&mut self, resource_description: ResourceDescription) {
self.cleaup_resource(&resource_description);
self.cleanup_resource(&resource_description);
let id = resource_description.id();
self.id_table.insert(id, resource_description);
// we just inserted it we can unwrap

View File

@@ -69,7 +69,7 @@ impl IfaceDevice {
}
impl IfaceConfig {
pub async fn add_route(&mut self, route: IpNetwork) -> Result<()> {
pub async fn add_route(&mut self, route: &IpNetwork) -> Result<()> {
tracing::error!("`add_route` unimplemented on Android: `{route:#?}`");
Ok(())
}

View File

@@ -262,7 +262,7 @@ impl IfaceDevice {
// So, these functions take a mutable &self, this is not necessary in theory but it's correct!
impl IfaceConfig {
pub async fn add_route(&mut self, route: IpNetwork) -> Result<()> {
pub async fn add_route(&mut self, route: &IpNetwork) -> Result<()> {
tracing::error!("`add_route` unimplemented on macOS: `{route:#?}`");
Ok(())
}

View File

@@ -177,7 +177,7 @@ fn get_last_error() -> Error {
}
impl IfaceConfig {
pub async fn add_route(&mut self, route: IpNetwork) -> Result<()> {
pub async fn add_route(&mut self, route: &IpNetwork) -> Result<()> {
let req = self
.0
.handle

View File

@@ -16,7 +16,7 @@ impl IfaceConfig {
todo!()
}
pub async fn add_route(&mut self, _route: IpNetwork) -> Result<()> {
pub async fn add_route(&mut self, _route: &IpNetwork) -> Result<()> {
todo!()
}
}

7
rust/docker-init.sh Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/sh
if [ $ENABLE_MASQUERADE = "1" ]; then
IFACE="utun"
iptables -A FORWARD -i $IFACE -j ACCEPT
iptables -A FORWARD -o $IFACE -j ACCEPT
iptables -t nat -A POSTROUTING -o eth+ -j MASQUERADE
fi