connlib: moves it to the main firezone library

This brindgs connlib from its own separated repo to firezone's monorepo.
    
 On top of bringing connlib we also add and unify the Dockerfile for all
 rust binaries and add a docker-compose that can run a headless client, a
 relay and a gateway which eventually will test the whole flow between a
 client and a resource. For this to work we also incorporated some elixir
 scripts to generate portal tokens for those components.
This commit is contained in:
Gabi
2023-06-23 19:39:58 -03:00
committed by GitHub
parent e039f1919d
commit e9be4b9ef5
87 changed files with 7218 additions and 295 deletions

46
.github/workflows/publish_connlib.yml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: Publish packages to GitHub Packages
on:
release:
types: [published]
jobs:
# Noop: XCFramework is attached to release already in build workflow
# publish-apple:
publish-android:
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./rust
permissions:
contents: read
packages: write
steps:
- uses: actions/checkout@v3
- uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'adopt'
- uses: Swatinem/rust-cache@v2
with:
workspaces: ./rust
- name: Setup toolchain
run: rustup show
- name: Validate Gradle wrapper
uses: gradle/wrapper-validation-action@v1
- name: Sanity check tag equals AAR version
run: |
pkg_version=$(awk -F ' = ' '$1 ~ /version/ { gsub(/[\"]/, "", $2); printf("%s",$2); exit; }' connlib/android/lib/build.gradle.kts)
if [[ "${{ github.ref_name }}" = "$pkg_version" ]]; then
echo "Github ref name ${{ github.ref_name }} equals parsed package version $pkg_version. Continuing..."
else
echo "Github ref name ${{ github.ref_name }} differs from parsed package version $pkg_version! Aborting..."
exit 1
fi
- name: Publish package
uses: gradle/gradle-build-action@v2
with:
build-root-directory: android
arguments: publish
env:
GITHUB_ACTOR: ${{ secrets.GITHUB_ACTOR }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -12,8 +12,18 @@ concurrency:
cancel-in-progress: true
jobs:
test:
name: Test all crates
draft-release:
runs-on: ubuntu-latest
outputs:
tag_name: ${{ steps.release_drafter.outputs.tag_name }}
steps:
- uses: release-drafter/release-drafter@v5
id: release_drafter
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
test-relay:
name: Test relay
runs-on: ubuntu-latest
defaults:
run:
@@ -28,15 +38,130 @@ jobs:
- uses: Swatinem/rust-cache@v2
with:
workspaces: ./rust
- run: cargo fmt -- --check
- run: cargo doc --no-deps --document-private-items
- run: cargo fmt -p relay -- --check
- run: cargo doc -p relay --no-deps --document-private-items
env:
RUSTDOCFLAGS: "-D warnings"
- run: cargo clippy --all-targets --all-features -- -D warnings
- run: cargo clippy -p relay --all-targets --all-features -- -D warnings
- run: cargo test
cross: # cross is separate from test because cross-compiling yields different artifacts and we cannot reuse the cache.
name: Cross compile all crates
test-connlib:
needs: draft-release
name: Connlib checks
strategy:
matrix:
runs-on:
- ubuntu-20.04
- ubuntu-22.04
- macos-11
- macos-12
- windows-2019
- windows-2022
runs-on: ${{ matrix.runs-on }}
defaults:
run:
working-directory: ./rust
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Update toolchain
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: Run connlib checks and tests
run: |
cargo check --workspace --exclude relay
cargo clippy --workspace --exclude relay -- -D clippy::all
cargo test --workspace --exclude relay
build-android:
needs:
- test-connlib
- draft-release
runs-on: ubuntu-latest
permissions:
contents: read
strategy:
matrix:
rust: [stable]
steps:
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Update toolchain
run: rustup show
- uses: actions/cache@v3
with:
path: |
~/rust/connlib/clients/android/.gradle/caches
~/rust/connlib/clients/android/.gradle/wrapper
key: ${{ runner.os }}-gradle-${{ hashFiles('**/*.gradle*', '**/gradle-wrapper.properties') }}
restore-keys: |
${{ runner.os }}-gradle-
- uses: actions/setup-java@v3
with:
java-version: '17'
distribution: 'adopt'
- name: Validate Gradle wrapper
uses: gradle/wrapper-validation-action@v1
- name: Assemble Release
uses: gradle/gradle-build-action@v2
with:
arguments: build assembleRelease
build-root-directory: rust/connlib/clients/android
- name: Move artifact
run: |
mv ./rust/connlib/clients/android/lib/build/outputs/aar/lib-release.aar ./connlib-${{ needs.draft-release.outputs.tag_name }}.aar
- uses: actions/upload-artifact@v3
with:
name: connlib-android
path: |
./connlib-${{ needs.draft-release.outputs.tag_name }}.aar
build-apple:
needs:
- test-connlib
- draft-release
runs-on: macos-latest
permissions:
contents: read
strategy:
matrix:
rust: [stable]
steps:
- uses: actions/checkout@v3
- uses: Swatinem/rust-cache@v2
- name: Update toolchain
run: rustup show
- name: Setup lipo
run: cargo install cargo-lipo
- uses: actions/cache@v3
with:
path: apple/.build
key: ${{ runner.os }}-spm-${{ hashFiles('**/Package.resolved') }}
restore-keys: |
${{ runner.os }}-spm-
- name: Build Connlib.xcframework.zip
env:
CONFIGURATION: Release
PROJECT_DIR: .
working-directory: ./rust/connlib/clients/apple
run: |
# build-xcframework.sh calls build-rust.sh indirectly via `xcodebuild`, but it pollutes the environment
# to the point that it causes the `ring` build to fail for the aarch64-apple-darwin target. So, explicitly
# build first. See https://github.com/briansmith/ring/issues/1332
./build-rust.sh
./build-xcframework.sh
mv Connlib.xcframework.zip ../../../Connlib-${{ needs.draft-release.outputs.tag_name }}.xcframework.zip
mv Connlib.xcframework.zip.checksum.txt ../../../Connlib-${{ needs.draft-release.outputs.tag_name }}.xcframework.zip.checksum.txt
- uses: actions/upload-artifact@v3
with:
name: connlib-apple
path: |
./Connlib-${{ needs.draft-release.outputs.tag_name }}.xcframework.zip
./Connlib-${{ needs.draft-release.outputs.tag_name }}.xcframework.zip.checksum.txt
cross-relay: # cross is separate from test because cross-compiling yields different artifacts and we cannot reuse the cache.
name: Cross compile relay
runs-on: ubuntu-latest
defaults:
run:
@@ -66,7 +191,6 @@ jobs:
# This implicitly triggers installation of the toolchain in the `rust-toolchain.toml` file.
# If we don't do this here, our cache action will compute a cache key based on the Rust version shipped on GitHub's runner which might differ from the one we use.
- run: rustup show
- uses: Swatinem/rust-cache@v2
with:
workspaces: ./rust

20
NOTICE.txt Normal file
View File

@@ -0,0 +1,20 @@
NOTICES AND INFORMATION
Do Not Translate or Localize
This software incorporates material from third parties.
Please refer to this document for the license terms of the components that this product depends and use.
===
This product depends on and uses Boringtun source code:
Copyright (c) 2019 Cloudflare, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -90,12 +90,78 @@ services:
OUTBOUND_EMAIL_ADAPTER_OPTS: "{\"api_key\":\"7da7d1cd-111c-44a7-b5ac-4027b9d230e5\"}"
# Seeds
STATIC_SEEDS: "true"
# Client info
USER_AGENT: "iOS/12.5 (iPhone) connlib/0.7.412"
depends_on:
postgres:
condition: 'service_healthy'
networks:
- app
client:
environment:
FZ_URL: "ws://api:8081/"
FZ_SECRET: "SFMyNTY.g2gDaANkAAhpZGVudGl0eW0AAAAkN2RhN2QxY2QtMTExYy00NGE3LWI1YWMtNDAyN2I5ZDIzMGU1bQAAACDZI3ehOZSu3JOSMREkvzrtKjs8jkrW6fpbVw9opDYmi24GANjCD-qIAWIB4TOA.XhoLEDjIzuv1SXEVUV6lfIHW12n5-J5aBDUKCl8ovMk"
build:
context: rust
args:
PACKAGE: headless
image: firezone-headless
cap_add:
- NET_ADMIN
sysctls:
- net.ipv6.conf.all.disable_ipv6=0
devices:
- "/dev/net/tun:/dev/net/tun"
depends_on:
- api
networks:
app:
ipv4_address: 172.28.0.100
gateway:
environment:
FZ_URL: "ws://api:8081/"
FZ_SECRET: "SFMyNTY.g2gDaAJtAAAAJDNjZWYwNTY2LWFkZmQtNDhmZS1hMGYxLTU4MDY3OTYwOGY2Zm0AAABAamp0enhSRkpQWkdCYy1vQ1o5RHkyRndqd2FIWE1BVWRwenVScjJzUnJvcHg3NS16bmhfeHBfNWJUNU9uby1yYm4GAJXr4emIAWIAAVGA.jz0s-NohxgdAXeRMjIQ9kLBOyd7CmKXWi2FHY-Op8GM"
build:
context: rust
args:
PACKAGE: gateway
image: firezone-gateway
cap_add:
- NET_ADMIN
sysctls:
- net.ipv4.ip_forward=1
- net.ipv4.conf.all.src_valid_mark=1
- net.ipv6.conf.all.disable_ipv6=0
devices:
- "/dev/net/tun:/dev/net/tun"
depends_on:
- api
networks:
- app
relay:
environment:
PUBLIC_IP4_ADDR: 172.28.0.101
LISTEN_IP4_ADDR: 172.28.0.101
PORTAL_WS_URL: "ws://api:8081/"
PORTAL_TOKEN: "SFMyNTY.g2gDaAJtAAAAJDcyODZiNTNkLTA3M2UtNGM0MS05ZmYxLWNjODQ1MWRhZDI5OW0AAABARVg3N0dhMEhLSlVWTGdjcE1yTjZIYXRkR25mdkFEWVFyUmpVV1d5VHFxdDdCYVVkRVUzbzktRmJCbFJkSU5JS24GAMDq4emIAWIAAVGA.fLlZsUMS0VJ4RCN146QzUuINmGubpsxoyIf3uhRHdiQ"
ports:
- "3478/udp"
- "49152-65535/udp"
build:
context: rust
args:
PACKAGE: relay
image: firezone-relay
depends_on:
- api
networks:
app:
ipv4_address: 172.28.0.101
command: "--allow-insecure-ws"
api:
build:
context: elixir

6
elixir/.gitignore vendored
View File

@@ -1,6 +1,3 @@
# macOS cruft
.DS_Store
# HTTPS dev certs
priv/pki/authorities/local/
@@ -16,9 +13,6 @@ deps/
# If the VM crashes, it generates a dump, let's ignore it too.
erl_crash.dump
# If NPM crashes, it generates a log, let's ignore it too.
npm-debug.log
# The directory NPM downloads your dependencies sources to.
/assets/node_modules/

1
gateway_variables.env Normal file
View File

@@ -0,0 +1 @@
FZ_SECRET=SFMyNTY.g2gDaAJtAAAAJGFkMTBjNTVhLTNiYTUtNDdjYy04YTZkLWJlNmE1NWJlN2FlN20AAABAcUpCSGNzRGtkVVdUUjJyaGQ0c2dGSFZ3U0d1ZnJncWFIV0dwNXFsOU5nUWR4RVRTbk9ycW1GbnN6cDVZWk50N24GAJzge-WIAWIAAVGA.au7yCBGyycngufVpdgPEGf4OtjzIx01k4-JSXVEALtk

1
headless_variables.env Normal file
View File

@@ -0,0 +1 @@
FZ_SECRET=SFMyNTY.g2gDaANkAAhpZGVudGl0eW0AAAAkN2VhMGVkYzQtZjlkNy00NzlhLWE2OWQtYWM3NTZkN2QyYzk4bQAAACBzHhbd6lPr9SMx_HaE6mqiHZqvmV2wEcmmYUdtbi6xjW4GALGZe-WIAWIACTqA.zsVviEWg7VlEjHBf5krjxQ1wvj-TzrYdPBJfDbY3NnE

1
relay_variables.env Normal file
View File

@@ -0,0 +1 @@
PORTAL_TOKEN=SFMyNTY.g2gDaAJtAAAAJDQ0NWNkMWY2LThkMzMtNDJlOC1hMDQ0LWMzYTVlMmEyNTU0NW0AAABAWmk4c1JMX2FWWjdyZUZoUll5b3BVZ09qRV85aHVjajF2ZGlCSjg4Q0RXOUw4MzBOdmVCU3pSdXg0MFhtazlEcG4GAGD1e-WIAWIAAVGA.BMTyT0jfmPvn_WEWn9AjxvVuv5BhdGNWslaHgzuCATA

1038
rust/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,2 +1,18 @@
[workspace]
members = ["relay", "phoenix-channel"]
members = [
"relay",
"phoenix-channel",
"connlib/clients/android",
"connlib/clients/apple",
"connlib/clients/headless",
"connlib/libs/tunnel",
"connlib/libs/client",
"connlib/libs/gateway",
"connlib/libs/common",
"connlib/gateway",
"connlib/macros",
]
[workspace.dependencies]
boringtun = { git = "https://github.com/cloudflare/boringtun", rev = "878385f", default-features = false }
swift-bridge = { git = "https://github.com/chinedufn/swift-bridge.git", rev = "4fbd30f" }

21
rust/Dockerfile Normal file
View File

@@ -0,0 +1,21 @@
FROM rust:1.70-slim as BUILDER
ARG PACKAGE
WORKDIR /build/
COPY . ./
RUN --mount=type=cache,target=./target \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/rustup \
apt update && apt install -y musl-tools && \
cargo build -p $PACKAGE --release --target x86_64-unknown-linux-musl
RUN --mount=type=cache,target=./target \
mv ./target/x86_64-unknown-linux-musl/release/$PACKAGE /usr/local/bin/$PACKAGE
FROM alpine:3.18
ARG PACKAGE
WORKDIR /app/
COPY --from=BUILDER /usr/local/bin/$PACKAGE .
ENV RUST_BACKTRACE=1
ENV PATH "/app:$PATH"
ENV PACKAGE_NAME ${PACKAGE}
CMD ${PACKAGE_NAME}

170
rust/connlib/.gitignore vendored Normal file
View File

@@ -0,0 +1,170 @@
### Android ###
# Gradle files
.gradle/
build/
# Local configuration file (sdk path, etc)
local.properties
# Log/OS Files
*.log
# Android Studio generated files and folders
captures/
.externalNativeBuild/
.cxx/
*.apk
output.json
# IntelliJ
*.iml
.idea/
misc.xml
deploymentTargetDropDown.xml
render.experimental.xml
# Keystore files
*.jks
*.keystore
# Google Services (e.g. APIs or Firebase)
google-services.json
# Android Profiling
*.hprof
### Android Patch ###
gen-external-apklibs
# Replacement of .externalNativeBuild directories introduced
# with Android Studio 3.5.
### Kotlin ###
# Compiled class file
*.class
# Log file
# BlueJ files
*.ctxt
# Mobile Tools for Java (J2ME)
.mtj.tmp/
# Package Files #
*.jar
*.war
*.nar
*.ear
*.zip
*.tar.gz
*.rar
# virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
hs_err_pid*
replay_pid*
### AndroidStudio ###
# Covers files to be ignored for android development using Android Studio.
# Built application files
*.ap_
*.aab
# Files for the ART/Dalvik VM
*.dex
# Java class files
# Generated files
bin/
gen/
out/
# Gradle files
.gradle
# Signing files
.signing/
# Local configuration file (sdk path, etc)
# Proguard folder generated by Eclipse
proguard/
# Log Files
# Android Studio
build/
/*/local.properties
out/
production/
.navigation/
*.ipr
*~
*.swp
# Keystore files
# Google Services (e.g. APIs or Firebase)
# google-services.json
# Android Patch
# External native build folder generated in Android Studio 2.2 and later
.externalNativeBuild
# NDK
obj/
# IntelliJ IDEA
*.iws
# User-specific configurations
.idea/caches/
.idea/libraries/
.idea/shelf/
.idea/workspace.xml
.idea/tasks.xml
.idea/.name
.idea/compiler.xml
.idea/copyright/profiles_settings.xml
.idea/encodings.xml
.idea/misc.xml
.idea/modules.xml
.idea/scopes/scope_settings.xml
.idea/dictionaries
.idea/vcs.xml
.idea/jsLibraryMappings.xml
.idea/datasources.xml
.idea/dataSources.ids
.idea/sqlDataSources.xml
.idea/dynamic.xml
.idea/uiDesigner.xml
.idea/assetWizardSettings.xml
.idea/gradle.xml
.idea/jarRepositories.xml
.idea/navEditor.xml
## Plugin-specific files:
# mpeltonen/sbt-idea plugin
.idea_modules/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
### AndroidStudio Patch ###
!clients/android/gradle/wrapper/gradle-wrapper.jar
### Apple ###
.build/
DerivedData/
xcuserdata/
*.xcuserstate
Firezone/Developer.xcconfig

31
rust/connlib/README.md Normal file
View File

@@ -0,0 +1,31 @@
# Connlib
Firezone's connectivity library shared by all clients.
## 🚧 Disclaimer 🚧
**NOTE**: This repository is undergoing heavy construction. You could say we're
_Building In The Open™_ in true open source spirit. Do not attempt to use
anything released here until this notice is removed. You have been warned.
## Building Connlib
1. You'll need a Rust toolchain installed if you don't have one already. We
recommend following the instructions at https://rustup.rs.
1. `rustup show` will install all needed targets since they are added to `rust-toolchain.toml`.
1. Follow the relevant instructions for your platform:
1. [Apple](#apple)
1. [Android](#android)
1. [Linux](#linux)
1. [Windows](#windows)
### Apple
Connlib should build successfully with recent macOS and Xcode versions assuming
you have Rust installed. If not, open a PR with the notes you found.
### Android
### Linux
### Windows

View File

@@ -0,0 +1,14 @@
[package]
name = "connlib-android"
version = "0.1.6"
edition = "2021"
[dependencies]
jni = { version = "0.21.1", features = ["invocation"] }
firezone-client-connlib = { path = "../../libs/client" }
log = "0.4"
android_logger = "0.13"
[lib]
name = "connlib"
crate-type = ["cdylib"]

View File

@@ -0,0 +1,9 @@
plugins {
id("org.mozilla.rust-android-gradle.rust-android") version "0.9.3"
id("com.android.library") version "7.4.2" apply false
id("org.jetbrains.kotlin.android") version "1.7.21" apply false
}
tasks.register("clean",Delete::class) {
delete(rootProject.buildDir)
}

View File

@@ -0,0 +1,3 @@
android.useAndroidX=true
kotlin.code.style=official
org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8

Binary file not shown.

View File

@@ -0,0 +1,6 @@
distributionBase=GRADLE_USER_HOME
distributionPath=wrapper/dists
distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-bin.zip
networkTimeout=10000
zipStoreBase=GRADLE_USER_HOME
zipStorePath=wrapper/dists

244
rust/connlib/clients/android/gradlew vendored Executable file
View File

@@ -0,0 +1,244 @@
#!/bin/sh
#
# Copyright © 2015-2021 the original authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##############################################################################
#
# Gradle start up script for POSIX generated by Gradle.
#
# Important for running:
#
# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
# noncompliant, but you have some other compliant shell such as ksh or
# bash, then to run this script, type that shell name before the whole
# command line, like:
#
# ksh Gradle
#
# Busybox and similar reduced shells will NOT work, because this script
# requires all of these POSIX shell features:
# * functions;
# * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
# «${var#prefix}», «${var%suffix}», and «$( cmd )»;
# * compound commands having a testable exit status, especially «case»;
# * various built-in commands including «command», «set», and «ulimit».
#
# Important for patching:
#
# (2) This script targets any POSIX shell, so it avoids extensions provided
# by Bash, Ksh, etc; in particular arrays are avoided.
#
# The "traditional" practice of packing multiple parameters into a
# space-separated string is a well documented source of bugs and security
# problems, so this is (mostly) avoided, by progressively accumulating
# options in "$@", and eventually passing that to Java.
#
# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
# see the in-line comments for details.
#
# There are tweaks for specific operating systems such as AIX, CygWin,
# Darwin, MinGW, and NonStop.
#
# (3) This script is generated from the Groovy template
# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
# within the Gradle project.
#
# You can find Gradle at https://github.com/gradle/gradle/.
#
##############################################################################
# Attempt to set APP_HOME
# Resolve links: $0 may be a link
app_path=$0
# Need this for daisy-chained symlinks.
while
APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
[ -h "$app_path" ]
do
ls=$( ls -ld "$app_path" )
link=${ls#*' -> '}
case $link in #(
/*) app_path=$link ;; #(
*) app_path=$APP_HOME$link ;;
esac
done
# This is normally unused
# shellcheck disable=SC2034
APP_BASE_NAME=${0##*/}
APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit
# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
# Use the maximum available, or set MAX_FD != -1 to use that value.
MAX_FD=maximum
warn () {
echo "$*"
} >&2
die () {
echo
echo "$*"
echo
exit 1
} >&2
# OS specific support (must be 'true' or 'false').
cygwin=false
msys=false
darwin=false
nonstop=false
case "$( uname )" in #(
CYGWIN* ) cygwin=true ;; #(
Darwin* ) darwin=true ;; #(
MSYS* | MINGW* ) msys=true ;; #(
NONSTOP* ) nonstop=true ;;
esac
CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
# Determine the Java command to use to start the JVM.
if [ -n "$JAVA_HOME" ] ; then
if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
# IBM's JDK on AIX uses strange locations for the executables
JAVACMD=$JAVA_HOME/jre/sh/java
else
JAVACMD=$JAVA_HOME/bin/java
fi
if [ ! -x "$JAVACMD" ] ; then
die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
else
JAVACMD=java
which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
Please set the JAVA_HOME variable in your environment to match the
location of your Java installation."
fi
# Increase the maximum file descriptors if we can.
if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
case $MAX_FD in #(
max*)
# In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked.
# shellcheck disable=SC3045
MAX_FD=$( ulimit -H -n ) ||
warn "Could not query maximum file descriptor limit"
esac
case $MAX_FD in #(
'' | soft) :;; #(
*)
# In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked.
# shellcheck disable=SC3045
ulimit -n "$MAX_FD" ||
warn "Could not set maximum file descriptor limit to $MAX_FD"
esac
fi
# Collect all arguments for the java command, stacking in reverse order:
# * args from the command line
# * the main class name
# * -classpath
# * -D...appname settings
# * --module-path (only if needed)
# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
# For Cygwin or MSYS, switch paths to Windows format before running java
if "$cygwin" || "$msys" ; then
APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
JAVACMD=$( cygpath --unix "$JAVACMD" )
# Now convert the arguments - kludge to limit ourselves to /bin/sh
for arg do
if
case $arg in #(
-*) false ;; # don't mess with options #(
/?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
[ -e "$t" ] ;; #(
*) false ;;
esac
then
arg=$( cygpath --path --ignore --mixed "$arg" )
fi
# Roll the args list around exactly as many times as the number of
# args, so each arg winds up back in the position where it started, but
# possibly modified.
#
# NB: a `for` loop captures its iteration list before it begins, so
# changing the positional parameters here affects neither the number of
# iterations, nor the values presented in `arg`.
shift # remove old arg
set -- "$@" "$arg" # push replacement arg
done
fi
# Collect all arguments for the java command;
# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of
# shell script including quotes and variable substitutions, so put them in
# double quotes to make sure that they get re-expanded; and
# * put everything else in single quotes, so that it's not re-expanded.
set -- \
"-Dorg.gradle.appname=$APP_BASE_NAME" \
-classpath "$CLASSPATH" \
org.gradle.wrapper.GradleWrapperMain \
"$@"
# Stop when "xargs" is not available.
if ! command -v xargs >/dev/null 2>&1
then
die "xargs is not available"
fi
# Use "xargs" to parse quoted args.
#
# With -n1 it outputs one arg per line, with the quotes and backslashes removed.
#
# In Bash we could simply go:
#
# readarray ARGS < <( xargs -n1 <<<"$var" ) &&
# set -- "${ARGS[@]}" "$@"
#
# but POSIX shell has neither arrays nor command substitution, so instead we
# post-process each arg (as a line of input to sed) to backslash-escape any
# character that might be a shell metacharacter, then use eval to reverse
# that process (while maintaining the separation between arguments), and wrap
# the whole thing up as a single "set" statement.
#
# This will of course break if any of these variables contains a newline or
# an unmatched quote.
#
eval "set -- $(
printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
xargs -n1 |
sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
tr '\n' ' '
)" '"$@"'
exec "$JAVACMD" "$@"

View File

@@ -0,0 +1,92 @@
@rem
@rem Copyright 2015 the original author or authors.
@rem
@rem Licensed under the Apache License, Version 2.0 (the "License");
@rem you may not use this file except in compliance with the License.
@rem You may obtain a copy of the License at
@rem
@rem https://www.apache.org/licenses/LICENSE-2.0
@rem
@rem Unless required by applicable law or agreed to in writing, software
@rem distributed under the License is distributed on an "AS IS" BASIS,
@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@rem See the License for the specific language governing permissions and
@rem limitations under the License.
@rem
@if "%DEBUG%"=="" @echo off
@rem ##########################################################################
@rem
@rem Gradle startup script for Windows
@rem
@rem ##########################################################################
@rem Set local scope for the variables with windows NT shell
if "%OS%"=="Windows_NT" setlocal
set DIRNAME=%~dp0
if "%DIRNAME%"=="" set DIRNAME=.
@rem This is normally unused
set APP_BASE_NAME=%~n0
set APP_HOME=%DIRNAME%
@rem Resolve any "." and ".." in APP_HOME to make it shorter.
for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
@rem Find java.exe
if defined JAVA_HOME goto findJavaFromJavaHome
set JAVA_EXE=java.exe
%JAVA_EXE% -version >NUL 2>&1
if %ERRORLEVEL% equ 0 goto execute
echo.
echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:findJavaFromJavaHome
set JAVA_HOME=%JAVA_HOME:"=%
set JAVA_EXE=%JAVA_HOME%/bin/java.exe
if exist "%JAVA_EXE%" goto execute
echo.
echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
echo.
echo Please set the JAVA_HOME variable in your environment to match the
echo location of your Java installation.
goto fail
:execute
@rem Setup the command line
set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
@rem Execute Gradle
"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
:end
@rem End local scope for the variables with windows NT shell
if %ERRORLEVEL% equ 0 goto mainEnd
:fail
rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
rem the _cmd.exe /c_ return code!
set EXIT_CODE=%ERRORLEVEL%
if %EXIT_CODE% equ 0 set EXIT_CODE=1
if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
exit /b %EXIT_CODE%
:mainEnd
if "%OS%"=="Windows_NT" endlocal
:omega

View File

@@ -0,0 +1,94 @@
plugins {
id("org.mozilla.rust-android-gradle.rust-android")
id("com.android.library")
id("kotlin-android")
id("org.jetbrains.kotlin.android")
`maven-publish`
}
afterEvaluate {
publishing {
publications {
create<MavenPublication>("release") {
groupId = "dev.firezone"
artifactId = "connlib"
version = "0.1.6"
from(components["release"])
}
}
}
}
publishing {
repositories {
maven {
url = uri("https://maven.pkg.github.com/firezone/connlib")
name = "GitHubPackages"
credentials {
username = System.getenv("GITHUB_ACTOR")
password = System.getenv("GITHUB_TOKEN")
}
}
}
}
android {
namespace = "dev.firezone.connlib"
compileSdk = 33
defaultConfig {
minSdk = 29
targetSdk = 33
consumerProguardFiles("consumer-rules.pro")
testInstrumentationRunner = "androidx.test.runner.AndroidJUnitRunner"
}
externalNativeBuild {
cmake {
version = "3.22.1"
}
}
ndkVersion = "25.2.9519653"
buildTypes {
getByName("release") {
isMinifyEnabled = false
proguardFiles(getDefaultProguardFile("proguard-android-optimize.txt"), "proguard-rules.pro")
}
}
compileOptions {
sourceCompatibility(JavaVersion.VERSION_1_8)
targetCompatibility(JavaVersion.VERSION_1_8)
}
kotlinOptions {
jvmTarget = "1.8"
}
publishing {
singleVariant("release")
}
}
dependencies {
implementation("androidx.core:core-ktx:1.7.0")
implementation("androidx.test.ext:junit-gtest:1.0.0-alpha01")
implementation("com.android.ndk.thirdparty:googletest:1.11.0-beta-1")
implementation(fileTree(mapOf("dir" to "libs", "include" to listOf("*.jar"))))
implementation("org.jetbrains.kotlin:kotlin-stdlib:1.7.21")
testImplementation("junit:junit:4.13.2")
androidTestImplementation("androidx.test.ext:junit:1.1.3")
androidTestImplementation("androidx.test.espresso:espresso-core:3.4.0")
}
apply(plugin = "org.mozilla.rust-android-gradle.rust-android")
cargo {
prebuiltToolchains = true
verbose = true
module = "../"
libname = "connlib"
targets = listOf("arm", "arm64", "x86", "x86_64")
}
tasks.whenTaskAdded {
if (name.startsWith("javaPreCompile")) {
dependsOn(tasks.named("cargoBuild"))
}
}

View File

@@ -0,0 +1,4 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android">
</manifest>

View File

@@ -0,0 +1,5 @@
package dev.firezone.connlib
public object Logger {
public external fun init()
}

View File

@@ -0,0 +1,8 @@
package dev.firezone.connlib
public object Session {
public external fun connect(portalURL: String, token: String, callback: Any): Long
public external fun disconnect(session: Long): Boolean
public external fun bumpSockets(session: Long): Boolean
public external fun disableSomeRoamingForBrokenMobileSemantics(session: Long): Boolean
}

View File

@@ -0,0 +1,19 @@
package dev.firezone.connlib
import android.util.Log
public class VpnService : android.net.VpnService() {
public override fun onCreate() {
super.onCreate()
Log.d("Connlib", "VpnService.onCreate")
}
public override fun onDestroy() {
super.onDestroy()
Log.d("Connlib", "VpnService.onDestroy")
}
public override fun onStartCommand(intent: android.content.Intent?, flags: Int, startId: Int): Int {
Log.d("Connlib", "VpnService.onStartCommand")
return super.onStartCommand(intent, flags, startId)
}
}

View File

@@ -0,0 +1,9 @@
package dev.firezone.connlib
import org.junit.Test
import org.junit.Assert.*
class LoggerTest {
// TODO
}

View File

@@ -0,0 +1,9 @@
package dev.firezone.connlib
import org.junit.Test
import org.junit.Assert.*
class SessionTest {
// TODO
}

View File

@@ -0,0 +1,10 @@
package dev.firezone.connlib
import org.junit.Test
import org.junit.Assert.*
class VpnServiceTest {
// TODO
}

View File

@@ -0,0 +1,21 @@
# Add project specific ProGuard rules here.
# You can control the set of applied configuration files using the
# proguardFiles setting in build.gradle.
#
# For more details, see
# http://developer.android.com/guide/developing/tools/proguard.html
# If your project uses WebView with JS, uncomment the following
# and specify the fully qualified class name to the JavaScript interface
# class:
#-keepclassmembers class fqcn.of.javascript.interface.for.webview {
# public *;
#}
# Uncomment this to preserve the line number information for
# debugging stack traces.
#-keepattributes SourceFile,LineNumberTable
# If you keep the line number information, uncomment this to
# hide the original source file name.
#-renamesourcefileattribute SourceFile

View File

@@ -0,0 +1,17 @@
pluginManagement {
repositories {
gradlePluginPortal()
google()
mavenCentral()
}
}
dependencyResolutionManagement {
repositoriesMode.set(RepositoriesMode.FAIL_ON_PROJECT_REPOS)
repositories {
google()
mavenCentral()
}
}
rootProject.name = "Connlib"
include(":lib")

View File

@@ -0,0 +1,128 @@
#[macro_use]
extern crate log;
extern crate android_logger;
extern crate jni;
use self::jni::JNIEnv;
use android_logger::Config;
use firezone_client_connlib::{
Callbacks, Error, ErrorType, ResourceList, Session, TunnelAddresses,
};
use jni::objects::{JClass, JObject, JString, JValue};
use log::LevelFilter;
/// This should be called once after the library is loaded by the system.
#[allow(non_snake_case)]
#[no_mangle]
pub extern "system" fn Java_dev_firezone_connlib_Logger_init(_: JNIEnv, _: JClass) {
#[cfg(debug_assertions)]
let level = LevelFilter::Trace;
#[cfg(not(debug_assertions))]
let level = LevelFilter::Warn;
android_logger::init_once(
Config::default()
// Allow all log levels
.with_max_level(level)
.with_tag("connlib"),
)
}
pub enum CallbackHandler {}
impl Callbacks for CallbackHandler {
fn on_update_resources(_resource_list: ResourceList) {
todo!()
}
fn on_set_tunnel_adresses(_tunnel_addresses: TunnelAddresses) {
todo!()
}
fn on_error(_error: &Error, _error_type: ErrorType) {
todo!()
}
}
/// # Safety
/// Pointers must be valid
#[allow(non_snake_case)]
#[no_mangle]
pub unsafe extern "system" fn Java_dev_firezone_connlib_Session_connect(
mut env: JNIEnv,
_class: JClass,
portal_url: JString,
portal_token: JString,
callback: JObject,
) -> *const Session<CallbackHandler> {
let portal_url: String = env.get_string(&portal_url).unwrap().into();
let portal_token: String = env.get_string(&portal_token).unwrap().into();
let session = Box::new(
Session::connect::<CallbackHandler>(portal_url.as_str(), portal_token).expect("TODO!"),
);
// TODO: Get actual IPs returned from portal based on this device
let tunnelAddressesJSON = "[{\"tunnel_ipv4\": \"100.100.1.1\", \"tunnel_ipv6\": \"fd00:0222:2011:1111:6def:1001:fe67:0012\"}]";
let tunnel_addresses = env.new_string(tunnelAddressesJSON).unwrap();
match env.call_method(
callback,
"onSetTunnelAddresses",
"(Ljava/lang/String;)Z",
&[JValue::from(&tunnel_addresses)],
) {
Ok(res) => trace!("onSetTunnelAddresses returned {:?}", res),
Err(e) => error!("Failed to call setTunnelAddresses: {:?}", e),
}
Box::into_raw(session)
}
/// # Safety
/// Pointers must be valid
#[allow(non_snake_case)]
#[no_mangle]
pub unsafe extern "system" fn Java_dev_firezone_connlib_Session_disconnect(
_env: JNIEnv,
_: JClass,
session_ptr: *mut Session<CallbackHandler>,
) -> bool {
if session_ptr.is_null() {
return false;
}
let session = unsafe { &mut *session_ptr };
session.disconnect()
}
/// # Safety
/// Pointers must be valid
#[allow(non_snake_case)]
#[no_mangle]
pub unsafe extern "system" fn Java_dev_firezone_connlib_Session_bump_sockets(
session_ptr: *const Session<CallbackHandler>,
) -> bool {
if session_ptr.is_null() {
return false;
}
unsafe { (*session_ptr).bump_sockets() };
// TODO: See https://github.com/WireGuard/wireguard-apple/blob/2fec12a6e1f6e3460b6ee483aa00ad29cddadab1/Sources/WireGuardKitGo/api-apple.go#LL197C6-L197C50
true
}
/// # Safety
/// Pointers must be valid
#[allow(non_snake_case)]
#[no_mangle]
pub unsafe extern "system" fn Java_dev_firezone_connlib_disable_some_roaming_for_broken_mobile_semantics(
session_ptr: *const Session<CallbackHandler>,
) -> bool {
if session_ptr.is_null() {
return false;
}
unsafe { (*session_ptr).disable_some_roaming_for_broken_mobile_semantics() };
// TODO: See https://github.com/WireGuard/wireguard-apple/blob/2fec12a6e1f6e3460b6ee483aa00ad29cddadab1/Sources/WireGuardKitGo/api-apple.go#LL197C6-L197C50
true
}

31
rust/connlib/clients/apple/.gitignore vendored Normal file
View File

@@ -0,0 +1,31 @@
.DS_Store
# Rust
/target
Cargo.lock
### Xcode ###
xcuserdata/
/.build
/Packages
DerivedData/
.swiftpm/config/registries.json
.swiftpm/xcode/package.xcworkspace/contents.xcworkspacedata
.netrc
*.xcarchive/
*.xcframework/
*.checksum.txt
### Xcode Patch ###
*.xcodeproj/*
!*.xcodeproj/project.pbxproj
!*.xcodeproj/xcshareddata/
!*.xcodeproj/project.xcworkspace/
!*.xcworkspace/contents.xcworkspacedata
/*.gcno
**/xcshareddata/WorkspaceSettings.xcsettings
## Xcode 8 and earlier
*.xcscmblueprint
*.xccheckout

View File

@@ -0,0 +1,16 @@
[package]
name = "connlib-apple"
version = "0.1.6"
edition = "2021"
[build-dependencies]
swift-bridge-build = { git = "https://github.com/conectado/swift-bridge.git", branch = "fix-already-declared" }
[dependencies]
libc = "0.2"
swift-bridge = { workspace = true }
firezone-client-connlib = { path = "../../libs/client" }
[lib]
name = "connlib"
crate-type = ["staticlib"]

View File

@@ -0,0 +1,19 @@
# Connlib Apple Wrapper
Apple Package wrapper for Connlib distributed as a binary XCFramework for
inclusion in the Firezone Apple client.
## Prerequisites
1. Install [ stable rust ](https://www.rust-lang.org/tools/install) for your
platform
1. Install `llvm` from Homebrew:
```
brew install llvm
```
This fixes build issues with Apple's command line tools. See
https://github.com/briansmith/ring/issues/1374

View File

@@ -0,0 +1,298 @@
//
// Adapter.swift
// (c) 2023 Firezone, Inc.
// LICENSE: Apache-2.0
//
import Foundation
import NetworkExtension
import os.log
public enum AdapterError: Error {
/// Failure to perform an operation in such state.
case invalidState
/// Failure to set network settings.
case setNetworkSettings(Error)
}
/// Enum representing internal state of the `WireGuardAdapter`
private enum State {
/// The tunnel is stopped
case stopped
/// The tunnel is up and running
case started(_ handle: WrappedSession)
/// The tunnel is temporarily shutdown due to device going offline
case temporaryShutdown
}
// Loosely inspired from WireGuardAdapter from WireGuardKit
public class Adapter {
private let logger = Logger(subsystem: "dev.firezone.firezone", category: "packet-tunnel")
// Maintain a handle to the currently instantiated tunnel adapter 🤮
public static var currentAdapter: Adapter?
// Maintain a reference to the initialized callback handler
public static var callbackHandler: CallbackHandler?
// Latest applied NETunnelProviderNetworkSettings
public var lastNetworkSettings: NEPacketTunnelNetworkSettings?
/// Packet tunnel provider.
private weak var packetTunnelProvider: NEPacketTunnelProvider?
/// Network routes monitor.
private var networkMonitor: NWPathMonitor?
/// Private queue used to synchronize access to `WireGuardAdapter` members.
private let workQueue = DispatchQueue(label: "FirezoneAdapterWorkQueue")
/// Adapter state.
private var state: State = .stopped
public init(with packetTunnelProvider: NEPacketTunnelProvider) {
self.packetTunnelProvider = packetTunnelProvider
// There must be a better way than making this a static class var...
Self.currentAdapter = self
Self.callbackHandler = CallbackHandler(adapter: self)
}
deinit {
// Remove static var reference
Self.currentAdapter = nil
// Cancel network monitor
networkMonitor?.cancel()
// Shutdown the tunnel
if case .started(let wrappedSession) = self.state {
self.logger.log(level: .debug, "\(#function)")
wrappedSession.disconnect()
}
}
/// Start the tunnel tunnel.
/// - Parameters:
/// - completionHandler: completion handler.
public func start(completionHandler: @escaping (AdapterError?) -> Void) throws {
workQueue.async {
guard case .stopped = self.state else {
completionHandler(.invalidState)
return
}
let networkMonitor = NWPathMonitor()
networkMonitor.pathUpdateHandler = { [weak self] path in
self?.didReceivePathUpdate(path: path)
}
networkMonitor.start(queue: self.workQueue)
do {
try self.setNetworkSettings(self.generateNetworkSettings(ipv4Routes: [], ipv6Routes: []))
self.state = .started(
WrappedSession.connect(
"http://localhost:4568",
"test-token",
Self.callbackHandler!
)
)
self.networkMonitor = networkMonitor
completionHandler(nil)
} catch let error as AdapterError {
networkMonitor.cancel()
completionHandler(error)
} catch {
fatalError()
}
}
}
public func stop(completionHandler: @escaping (AdapterError?) -> Void) {
workQueue.async {
switch self.state {
case .started(let wrappedSession):
wrappedSession.disconnect()
case .temporaryShutdown:
break
case .stopped:
completionHandler(.invalidState)
return
}
self.networkMonitor?.cancel()
self.networkMonitor = nil
self.state = .stopped
completionHandler(nil)
}
}
public func generateNetworkSettings(
addresses4: [String] = ["100.100.111.2"], addresses6: [String] = ["fd00:0222:2011:1111::2"],
ipv4Routes: [NEIPv4Route], ipv6Routes: [NEIPv6Route]
)
-> NEPacketTunnelNetworkSettings
{
// The destination IP that connlib will assign our DNS proxy to.
let dnsSentinel = "1.1.1.1"
// We can probably do better than this; see https://www.rfc-editor.org/info/rfc4821
// But stick with something simple for now. 1280 is the minimum that will work for IPv6.
let mtu = 1280
// TODO: replace these with IPs returned from the connect call to portal
let subnetmask = "255.192.0.0"
let networkPrefixLength = NSNumber(value: 64)
/* iOS requires a tunnel endpoint, whereas in WireGuard it's valid for
* a tunnel to have no endpoint, or for there to be many endpoints, in
* which case, displaying a single one in settings doesn't really
* make sense. So, we fill it in with this placeholder, which is not
* a valid IP address that will actually route over the Internet.
*/
let networkSettings = NEPacketTunnelNetworkSettings(tunnelRemoteAddress: "127.0.0.1")
let dnsSettings = NEDNSSettings(servers: [dnsSentinel])
// All DNS queries must first go through the tunnel's DNS
dnsSettings.matchDomains = [""]
networkSettings.dnsSettings = dnsSettings
networkSettings.mtu = NSNumber(value: mtu)
let ipv4Settings = NEIPv4Settings(
addresses: addresses4,
subnetMasks: [subnetmask])
ipv4Settings.includedRoutes = ipv4Routes
networkSettings.ipv4Settings = ipv4Settings
let ipv6Settings = NEIPv6Settings(
addresses: addresses6,
networkPrefixLengths: [networkPrefixLength])
ipv6Settings.includedRoutes = ipv6Routes
networkSettings.ipv6Settings = ipv6Settings
return networkSettings
}
public func setNetworkSettings(_ networkSettings: NEPacketTunnelNetworkSettings) throws {
var systemError: Error?
let condition = NSCondition()
// Activate the condition
condition.lock()
defer { condition.unlock() }
self.packetTunnelProvider?.setTunnelNetworkSettings(networkSettings) { error in
systemError = error
condition.signal()
}
// Packet tunnel's `setTunnelNetworkSettings` times out in certain
// scenarios & never calls the given callback.
let setTunnelNetworkSettingsTimeout: TimeInterval = 5 // seconds
if condition.wait(until: Date().addingTimeInterval(setTunnelNetworkSettingsTimeout)) {
if let systemError = systemError {
throw AdapterError.setNetworkSettings(systemError)
}
}
// Save the latest applied network settings if there was no error.
if systemError != nil {
self.lastNetworkSettings = networkSettings
}
}
/// Update runtime configuration.
/// - Parameters:
/// - ipv4Routes: IPv4 routes to send through the tunnel.
/// - ipv6Routes: IPv6 routes to send through the tunnel.
/// - completionHandler: completion handler.
public func update(
ipv4Routes: [NEIPv4Route], ipv6Routes: [NEIPv6Route],
completionHandler: @escaping (AdapterError?) -> Void
) {
workQueue.async {
if case .stopped = self.state {
completionHandler(.invalidState)
return
}
// Tell the system that the tunnel is going to reconnect using new WireGuard
// configuration.
// This will broadcast the `NEVPNStatusDidChange` notification to the GUI process.
self.packetTunnelProvider?.reasserting = true
defer {
self.packetTunnelProvider?.reasserting = false
}
do {
try self.setNetworkSettings(
self.generateNetworkSettings(ipv4Routes: ipv4Routes, ipv6Routes: ipv6Routes))
switch self.state {
case .started(let wrappedSession):
self.state = .started(wrappedSession)
case .temporaryShutdown:
self.state = .temporaryShutdown
case .stopped:
fatalError()
}
completionHandler(nil)
} catch let error as AdapterError {
completionHandler(error)
} catch {
fatalError()
}
}
}
private func didReceivePathUpdate(path: Network.NWPath) {
#if os(macOS)
if case .started(let wrappedSession) = self.state {
wrappedSession.bumpSockets()
}
#elseif os(iOS)
switch self.state {
case .started(let wrappedSession):
if path.status == .satisfied {
wrappedSession.disableSomeRoamingForBrokenMobileSemantics()
wrappedSession.bumpSockets()
} else {
//self.logger.log(.debug, "Connectivity offline, pausing backend.")
self.state = .temporaryShutdown
wrappedSession.disconnect()
}
case .temporaryShutdown:
guard path.status == .satisfied else { return }
self.logger.log(level: .debug, "Connectivity online, resuming backend.")
do {
try self.setNetworkSettings(self.lastNetworkSettings!)
self.state = .started(
try WrappedSession.connect("http://localhost:4568", "test-token", Self.callbackHandler!)
)
} catch {
self.logger.log(level: .debug, "Failed to restart backend: \(error.localizedDescription)")
}
case .stopped:
// no-op
break
}
#else
#error("Unsupported")
#endif
}
}

View File

@@ -0,0 +1,7 @@
#ifndef BridgingHeader_h
#define BridgingHeader_h
#include <connlib/SwiftBridgeCore.h>
#include <connlib/connlib-apple.h>
#endif

View File

@@ -0,0 +1,93 @@
//
// Callbacks.swift
// connlib
//
// Created by Jamil Bou Kheir on 4/3/23.
//
import NetworkExtension
import os.log
public protocol CallbackHandlerDelegate: AnyObject {
func didUpdateResources(_ resourceList: ResourceList)
}
public class CallbackHandler {
// TODO: Add a table view property here to update?
var adapter: Adapter?
public weak var delegate: CallbackHandlerDelegate?
init(adapter: Adapter) {
self.adapter = adapter
}
func onUpdateResources(resourceList: ResourceList) -> Bool {
// If there's any entity that assigned itself as this callbackHandler's delegate, it will be called every time this `onUpdateResources` method is, allowing that entity to react to resource updates and do whatever they want.
delegate?.didUpdateResources(resourceList)
let addresses4 =
self.adapter?.lastNetworkSettings?.ipv4Settings?.addresses ?? ["100.100.111.2"]
let addresses6 =
self.adapter?.lastNetworkSettings?.ipv6Settings?.addresses ?? [
"fd00:0222:2021:1111::2"
]
// TODO: Use actual passed in resources to achieve split tunnel
let ipv4Routes = [NEIPv4Route(destinationAddress: "100.64.0.0", subnetMask: "255.192.0.0")]
let ipv6Routes = [
NEIPv6Route(destinationAddress: "fd00:0222:2021:1111::0", networkPrefixLength: 64)
]
return setTunnelSettingsKeepingSomeExisting(
addresses4: addresses4, addresses6: addresses6, ipv4Routes: ipv4Routes, ipv6Routes: ipv6Routes
)
}
func onSetTunnelAddresses(tunnelAddresses: TunnelAddresses) -> Bool {
let addresses4 = [tunnelAddresses.address4.toString()]
let addresses6 = [tunnelAddresses.address6.toString()]
let ipv4Routes =
Adapter.currentAdapter?.lastNetworkSettings?.ipv4Settings?.includedRoutes ?? []
let ipv6Routes =
Adapter.currentAdapter?.lastNetworkSettings?.ipv6Settings?.includedRoutes ?? []
return setTunnelSettingsKeepingSomeExisting(
addresses4: addresses4, addresses6: addresses6, ipv4Routes: ipv4Routes, ipv6Routes: ipv6Routes
)
}
private func setTunnelSettingsKeepingSomeExisting(
addresses4: [String], addresses6: [String], ipv4Routes: [NEIPv4Route], ipv6Routes: [NEIPv6Route]
) -> Bool {
let logger = Logger(subsystem: "dev.firezone.firezone", category: "packet-tunnel")
if self.adapter != nil {
do {
/* If the tunnel interface addresses are being updated, it's impossible for the tunnel to
stay up due to the way WireGuard works. Still, we try not to change the tunnel's routes
here Just In Case.
*/
try self.adapter!.setNetworkSettings(
self.adapter!.generateNetworkSettings(
addresses4: addresses4,
addresses6: addresses6,
ipv4Routes: ipv4Routes,
ipv6Routes: ipv6Routes
)
)
return true
} catch let error {
logger.log(level: .debug, "Error setting adapter settings: \(String(describing: error))")
return false
}
} else {
logger.log(level: .debug, "Adapter not initialized!")
return false
}
}
}

View File

@@ -0,0 +1,2 @@
*
!.gitignore

View File

@@ -0,0 +1,18 @@
//
// connlib.h
// connlib
//
// Created by Jamil Bou Kheir on 4/3/23.
//
#import <Foundation/Foundation.h>
//! Project version number for connlib.
FOUNDATION_EXPORT double connlibVersionNumber;
//! Project version string for connlib.
FOUNDATION_EXPORT const unsigned char connlibVersionString[];
// In this header, you should import all the public headers of your framework using statements like #import <connlib/PublicHeader.h>
#import <connlib/BridgingHeader.h>

View File

@@ -0,0 +1,60 @@
#!/bin/bash
##################################################
# We call this from an Xcode run script.
##################################################
set -ex
if [[ -z "$PROJECT_DIR" ]]; then
echo "Must provide PROJECT_DIR environment variable set to the Xcode project directory." 1>&2
exit 1
fi
cd $PROJECT_DIR
# Default PLATFORM_NAME to macosx if not set.
: "${PLATFORM_NAME:=macosx}"
export PATH="$HOME/.cargo/bin:$PATH"
base_dir=$(xcrun --sdk $PLATFORM_NAME --show-sdk-path)
# See https://github.com/briansmith/ring/issues/1332
export LIBRARY_PATH="${base_dir}/usr/lib"
export INCLUDE_PATH="${base_dir}/usr/include"
export CFLAGS="-L ${LIBRARY_PATH} -I ${INCLUDE_PATH}"
export RUSTFLAGS="-C link-arg=-F$base_dir/System/Library/Frameworks"
TARGETS=""
if [[ "$PLATFORM_NAME" = "macosx" ]]; then
TARGETS="aarch64-apple-darwin,x86_64-apple-darwin"
else
if [[ "$PLATFORM_NAME" = "iphonesimulator" ]]; then
TARGETS="aarch64-apple-ios-sim,x86_64-apple-ios"
else
if [[ "$PLATFORM_NAME" = "iphoneos" ]]; then
TARGETS="aarch64-apple-ios"
else
echo "Unsupported platform: $PLATFORM_NAME"
exit 1
fi
fi
fi
# if [ $ENABLE_PREVIEWS == "NO" ]; then
if [[ $CONFIGURATION == "Release" ]]; then
echo "BUILDING FOR RELEASE ($TARGETS)"
cargo lipo --release --manifest-path ./Cargo.toml --targets $TARGETS
else
echo "BUILDING FOR DEBUG ($TARGETS)"
cargo lipo --manifest-path ./Cargo.toml --targets $TARGETS
fi
# else
# echo "Skipping the script because of preview mode"
# fi

View File

@@ -0,0 +1,33 @@
#!/bin/bash
set -ex
for sdk in macosx iphoneos iphonesimulator; do
echo "Building for $sdk"
xcodebuild archive \
-scheme Connlib \
-destination "generic/platform=$sdk" \
-sdk $sdk \
-archivePath ./connlib-$sdk \
SKIP_INSTALL=NO \
BUILD_LIBRARY_FOR_DISTRIBUTION=YES
done
xcodebuild -create-xcframework \
-framework ./connlib-iphoneos.xcarchive/Products/Library/Frameworks/connlib.framework \
-framework ./connlib-iphonesimulator.xcarchive/Products/Library/Frameworks/connlib.framework \
-framework ./connlib-macosx.xcarchive/Products/Library/Frameworks/connlib.framework \
-output ./Connlib.xcframework
echo "Build successful. Removing temporary archives"
rm -rf ./connlib-iphoneos.xcarchive
rm -rf ./connlib-iphonesimulator.xcarchive
rm -rf ./connlib-macosx.xcarchive
echo "Computing checksum"
touch Package.swift
zip -r -y Connlib.xcframework.zip Connlib.xcframework
swift package compute-checksum Connlib.xcframework.zip > Connlib.xcframework.zip.checksum.txt
rm Package.swift
rm -rf Connlib.xcframework

View File

@@ -0,0 +1,14 @@
const XCODE_CONFIGURATION_ENV: &str = "CONFIGURATION";
fn main() {
let out_dir = "Sources/Connlib/Generated";
let bridges = vec!["src/lib.rs"];
for path in &bridges {
println!("cargo:rerun-if-changed={}", path);
}
println!("cargo:rerun-if-env-changed={}", XCODE_CONFIGURATION_ENV);
swift_bridge_build::parse_bridges(bridges)
.write_all_concatenated(out_dir, env!("CARGO_PKG_NAME"));
}

View File

@@ -0,0 +1,466 @@
// !$*UTF8*$!
{
archiveVersion = 1;
classes = {
};
objectVersion = 56;
objects = {
/* Begin PBXBuildFile section */
8D46EDDF29DBC29800FF01CA /* Adapter.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8D46EDD729DBC29800FF01CA /* Adapter.swift */; };
8D46EDE029DBC29800FF01CA /* CallbackHandler.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8D46EDD829DBC29800FF01CA /* CallbackHandler.swift */; };
8D967B2B29DBA064000B9D58 /* libconnlib.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 8D967B2A29DBA03F000B9D58 /* libconnlib.a */; };
8DA207F829DBD80C00703A4A /* connlib-apple.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8DA207F329DBD80C00703A4A /* connlib-apple.swift */; };
8DA207F929DBD80C00703A4A /* connlib-apple.h in Headers */ = {isa = PBXBuildFile; fileRef = 8DA207F429DBD80C00703A4A /* connlib-apple.h */; settings = {ATTRIBUTES = (Public, ); }; };
8DA207FA29DBD80C00703A4A /* .gitignore in Resources */ = {isa = PBXBuildFile; fileRef = 8DA207F529DBD80C00703A4A /* .gitignore */; };
8DA207FC29DBD80C00703A4A /* SwiftBridgeCore.swift in Sources */ = {isa = PBXBuildFile; fileRef = 8DA207F729DBD80C00703A4A /* SwiftBridgeCore.swift */; };
8DA207FD29DBD86100703A4A /* SwiftBridgeCore.h in Headers */ = {isa = PBXBuildFile; fileRef = 8DA207F629DBD80C00703A4A /* SwiftBridgeCore.h */; settings = {ATTRIBUTES = (Public, ); }; };
8DA207FE29DBD86100703A4A /* connlib.h in Headers */ = {isa = PBXBuildFile; fileRef = 8D4BADD129DBD6CC00940F0D /* connlib.h */; settings = {ATTRIBUTES = (Public, ); }; };
8DA207FF29DBD86100703A4A /* BridgingHeader.h in Headers */ = {isa = PBXBuildFile; fileRef = 8D46EDD629DBC29800FF01CA /* BridgingHeader.h */; settings = {ATTRIBUTES = (Public, ); }; };
/* End PBXBuildFile section */
/* Begin PBXFileReference section */
8D209DCE29DBE96B00B68D27 /* Security.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = Security.framework; path = Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS16.4.sdk/System/Library/Frameworks/Security.framework; sourceTree = DEVELOPER_DIR; };
8D46EDD629DBC29800FF01CA /* BridgingHeader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BridgingHeader.h; sourceTree = "<group>"; };
8D46EDD729DBC29800FF01CA /* Adapter.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Adapter.swift; sourceTree = "<group>"; };
8D46EDD829DBC29800FF01CA /* CallbackHandler.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CallbackHandler.swift; sourceTree = "<group>"; };
8D4BADD129DBD6CC00940F0D /* connlib.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = connlib.h; sourceTree = "<group>"; };
8D7D983129DB8437007B8198 /* connlib.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = connlib.framework; sourceTree = BUILT_PRODUCTS_DIR; };
8D967B2629DB9A3B000B9D58 /* build-rust.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = "build-rust.sh"; sourceTree = "<group>"; };
8D967B2A29DBA03F000B9D58 /* libconnlib.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libconnlib.a; path = target/universal/debug/libconnlib.a; sourceTree = "<group>"; };
8DA207F329DBD80C00703A4A /* connlib-apple.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = "connlib-apple.swift"; sourceTree = "<group>"; };
8DA207F429DBD80C00703A4A /* connlib-apple.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "connlib-apple.h"; sourceTree = "<group>"; };
8DA207F529DBD80C00703A4A /* .gitignore */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; path = .gitignore; sourceTree = "<group>"; };
8DA207F629DBD80C00703A4A /* SwiftBridgeCore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SwiftBridgeCore.h; sourceTree = "<group>"; };
8DA207F729DBD80C00703A4A /* SwiftBridgeCore.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SwiftBridgeCore.swift; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
8D7D982E29DB8437007B8198 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
files = (
8D967B2B29DBA064000B9D58 /* libconnlib.a in Frameworks */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXFrameworksBuildPhase section */
/* Begin PBXGroup section */
8D46EDCE29DBC29800FF01CA /* Connlib */ = {
isa = PBXGroup;
children = (
8DA207F129DBD80C00703A4A /* Generated */,
8D4BADD129DBD6CC00940F0D /* connlib.h */,
8D46EDD629DBC29800FF01CA /* BridgingHeader.h */,
8D46EDD729DBC29800FF01CA /* Adapter.swift */,
8D46EDD829DBC29800FF01CA /* CallbackHandler.swift */,
);
path = Connlib;
sourceTree = "<group>";
};
8D7D982729DB8437007B8198 = {
isa = PBXGroup;
children = (
8D967B3E29DBA34C000B9D58 /* Tests */,
8D967B3D29DBA344000B9D58 /* Sources */,
8D967B2629DB9A3B000B9D58 /* build-rust.sh */,
8D7D983229DB8437007B8198 /* Products */,
8D967B2929DBA03F000B9D58 /* Frameworks */,
);
sourceTree = "<group>";
};
8D7D983229DB8437007B8198 /* Products */ = {
isa = PBXGroup;
children = (
8D7D983129DB8437007B8198 /* connlib.framework */,
);
name = Products;
sourceTree = "<group>";
};
8D967B2929DBA03F000B9D58 /* Frameworks */ = {
isa = PBXGroup;
children = (
8D209DCE29DBE96B00B68D27 /* Security.framework */,
8D967B2A29DBA03F000B9D58 /* libconnlib.a */,
);
name = Frameworks;
sourceTree = "<group>";
};
8D967B3D29DBA344000B9D58 /* Sources */ = {
isa = PBXGroup;
children = (
8D46EDCE29DBC29800FF01CA /* Connlib */,
);
path = Sources;
sourceTree = "<group>";
};
8D967B3E29DBA34C000B9D58 /* Tests */ = {
isa = PBXGroup;
children = (
);
path = Tests;
sourceTree = "<group>";
};
8DA207F129DBD80C00703A4A /* Generated */ = {
isa = PBXGroup;
children = (
8DA207F229DBD80C00703A4A /* connlib-apple */,
8DA207F529DBD80C00703A4A /* .gitignore */,
8DA207F629DBD80C00703A4A /* SwiftBridgeCore.h */,
8DA207F729DBD80C00703A4A /* SwiftBridgeCore.swift */,
);
path = Generated;
sourceTree = "<group>";
};
8DA207F229DBD80C00703A4A /* connlib-apple */ = {
isa = PBXGroup;
children = (
8DA207F329DBD80C00703A4A /* connlib-apple.swift */,
8DA207F429DBD80C00703A4A /* connlib-apple.h */,
);
path = "connlib-apple";
sourceTree = "<group>";
};
/* End PBXGroup section */
/* Begin PBXHeadersBuildPhase section */
8D7D982C29DB8437007B8198 /* Headers */ = {
isa = PBXHeadersBuildPhase;
buildActionMask = 2147483647;
files = (
8DA207F929DBD80C00703A4A /* connlib-apple.h in Headers */,
8DA207FD29DBD86100703A4A /* SwiftBridgeCore.h in Headers */,
8DA207FE29DBD86100703A4A /* connlib.h in Headers */,
8DA207FF29DBD86100703A4A /* BridgingHeader.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXHeadersBuildPhase section */
/* Begin PBXNativeTarget section */
8D7D983029DB8437007B8198 /* connlib */ = {
isa = PBXNativeTarget;
buildConfigurationList = 8D7D984529DB8437007B8198 /* Build configuration list for PBXNativeTarget "connlib" */;
buildPhases = (
8D7D982C29DB8437007B8198 /* Headers */,
8D967B2829DB9A91000B9D58 /* ShellScript */,
8D7D982D29DB8437007B8198 /* Sources */,
8D7D982E29DB8437007B8198 /* Frameworks */,
8D7D982F29DB8437007B8198 /* Resources */,
);
buildRules = (
);
dependencies = (
);
name = connlib;
productName = connlib;
productReference = 8D7D983129DB8437007B8198 /* connlib.framework */;
productType = "com.apple.product-type.framework";
};
/* End PBXNativeTarget section */
/* Begin PBXProject section */
8D7D982829DB8437007B8198 /* Project object */ = {
isa = PBXProject;
attributes = {
BuildIndependentTargetsInParallel = 1;
LastSwiftUpdateCheck = 1430;
LastUpgradeCheck = 1430;
TargetAttributes = {
8D7D983029DB8437007B8198 = {
CreatedOnToolsVersion = 14.3;
};
};
};
buildConfigurationList = 8D7D982B29DB8437007B8198 /* Build configuration list for PBXProject "connlib" */;
compatibilityVersion = "Xcode 14.0";
developmentRegion = en;
hasScannedForEncodings = 0;
knownRegions = (
en,
Base,
);
mainGroup = 8D7D982729DB8437007B8198;
productRefGroup = 8D7D983229DB8437007B8198 /* Products */;
projectDirPath = "";
projectRoot = "";
targets = (
8D7D983029DB8437007B8198 /* connlib */,
);
};
/* End PBXProject section */
/* Begin PBXResourcesBuildPhase section */
8D7D982F29DB8437007B8198 /* Resources */ = {
isa = PBXResourcesBuildPhase;
buildActionMask = 2147483647;
files = (
8DA207FA29DBD80C00703A4A /* .gitignore in Resources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXResourcesBuildPhase section */
/* Begin PBXShellScriptBuildPhase section */
8D967B2829DB9A91000B9D58 /* ShellScript */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
files = (
);
inputFileListPaths = (
);
inputPaths = (
);
outputFileListPaths = (
);
outputPaths = (
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
shellScript = "./build-rust.sh\n";
};
/* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
8D7D982D29DB8437007B8198 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
files = (
8DA207F829DBD80C00703A4A /* connlib-apple.swift in Sources */,
8D46EDDF29DBC29800FF01CA /* Adapter.swift in Sources */,
8D46EDE029DBC29800FF01CA /* CallbackHandler.swift in Sources */,
8DA207FC29DBD80C00703A4A /* SwiftBridgeCore.swift in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXSourcesBuildPhase section */
/* Begin XCBuildConfiguration section */
8D7D984329DB8437007B8198 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++20";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_ENABLE_OBJC_WEAK = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
CURRENT_PROJECT_VERSION = 1;
DEBUG_INFORMATION_FORMAT = dwarf;
ENABLE_STRICT_OBJC_MSGSEND = YES;
ENABLE_TESTABILITY = YES;
GCC_C_LANGUAGE_STANDARD = gnu11;
GCC_DYNAMIC_NO_PIC = NO;
GCC_NO_COMMON_BLOCKS = YES;
GCC_OPTIMIZATION_LEVEL = 0;
GCC_PREPROCESSOR_DEFINITIONS = (
"DEBUG=1",
"$(inherited)",
);
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
MTL_FAST_MATH = YES;
ONLY_ACTIVE_ARCH = YES;
SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
VERSIONING_SYSTEM = "apple-generic";
VERSION_INFO_PREFIX = "";
};
name = Debug;
};
8D7D984429DB8437007B8198 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
ALWAYS_SEARCH_USER_PATHS = NO;
CLANG_ANALYZER_NONNULL = YES;
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
CLANG_CXX_LANGUAGE_STANDARD = "gnu++20";
CLANG_ENABLE_MODULES = YES;
CLANG_ENABLE_OBJC_ARC = YES;
CLANG_ENABLE_OBJC_WEAK = YES;
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
CLANG_WARN_BOOL_CONVERSION = YES;
CLANG_WARN_COMMA = YES;
CLANG_WARN_CONSTANT_CONVERSION = YES;
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
CLANG_WARN_EMPTY_BODY = YES;
CLANG_WARN_ENUM_CONVERSION = YES;
CLANG_WARN_INFINITE_RECURSION = YES;
CLANG_WARN_INT_CONVERSION = YES;
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
CLANG_WARN_STRICT_PROTOTYPES = YES;
CLANG_WARN_SUSPICIOUS_MOVE = YES;
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
CLANG_WARN_UNREACHABLE_CODE = YES;
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
COPY_PHASE_STRIP = NO;
CURRENT_PROJECT_VERSION = 1;
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
ENABLE_NS_ASSERTIONS = NO;
ENABLE_STRICT_OBJC_MSGSEND = YES;
GCC_C_LANGUAGE_STANDARD = gnu11;
GCC_NO_COMMON_BLOCKS = YES;
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
GCC_WARN_UNDECLARED_SELECTOR = YES;
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
GCC_WARN_UNUSED_FUNCTION = YES;
GCC_WARN_UNUSED_VARIABLE = YES;
MTL_ENABLE_DEBUG_INFO = NO;
MTL_FAST_MATH = YES;
SWIFT_COMPILATION_MODE = wholemodule;
SWIFT_OPTIMIZATION_LEVEL = "-O";
VERSIONING_SYSTEM = "apple-generic";
VERSION_INFO_PREFIX = "";
};
name = Release;
};
8D7D984629DB8437007B8198 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
BUILD_LIBRARY_FOR_DISTRIBUTION = YES;
CODE_SIGN_IDENTITY = "Apple Development";
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 1;
DEFINES_MODULE = YES;
DEVELOPMENT_TEAM = "";
DYLIB_COMPATIBILITY_VERSION = 1;
DYLIB_CURRENT_VERSION = 1;
DYLIB_INSTALL_NAME_BASE = "@rpath";
ENABLE_MODULE_VERIFIER = YES;
GENERATE_INFOPLIST_FILE = YES;
INFOPLIST_KEY_NSHumanReadableCopyright = "";
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
IPHONEOS_DEPLOYMENT_TARGET = 15.6;
LD_RUNPATH_SEARCH_PATHS = (
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
"LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]" = (
"@executable_path/../Frameworks",
"@loader_path/Frameworks",
);
LIBRARY_SEARCH_PATHS = "$(PROJECT_DIR)/target/universal/debug";
MACOSX_DEPLOYMENT_TARGET = 12.4;
MARKETING_VERSION = 1.0;
MODULE_VERIFIER_SUPPORTED_LANGUAGES = "objective-c objective-c++";
MODULE_VERIFIER_SUPPORTED_LANGUAGE_STANDARDS = "gnu11 gnu++20";
PRODUCT_BUNDLE_IDENTIFIER = dev.firezone.connlib;
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
PROVISIONING_PROFILE_SPECIFIER = "";
SDKROOT = auto;
SKIP_INSTALL = YES;
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator macosx";
SWIFT_EMIT_LOC_STRINGS = YES;
SWIFT_VERSION = 5.0;
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Debug;
};
8D7D984729DB8437007B8198 /* Release */ = {
isa = XCBuildConfiguration;
buildSettings = {
BUILD_LIBRARY_FOR_DISTRIBUTION = YES;
CODE_SIGN_IDENTITY = "Apple Development";
CODE_SIGN_STYLE = Automatic;
CURRENT_PROJECT_VERSION = 1;
DEFINES_MODULE = YES;
DEVELOPMENT_TEAM = "";
DYLIB_COMPATIBILITY_VERSION = 1;
DYLIB_CURRENT_VERSION = 1;
DYLIB_INSTALL_NAME_BASE = "@rpath";
ENABLE_MODULE_VERIFIER = YES;
GENERATE_INFOPLIST_FILE = YES;
INFOPLIST_KEY_NSHumanReadableCopyright = "";
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
IPHONEOS_DEPLOYMENT_TARGET = 15.6;
LD_RUNPATH_SEARCH_PATHS = (
"@executable_path/Frameworks",
"@loader_path/Frameworks",
);
"LD_RUNPATH_SEARCH_PATHS[sdk=macosx*]" = (
"@executable_path/../Frameworks",
"@loader_path/Frameworks",
);
LIBRARY_SEARCH_PATHS = (
"$(PROJECT_DIR)/target/universal/release",
"$(PROJECT_DIR)/target/universal/debug",
);
MACOSX_DEPLOYMENT_TARGET = 12.4;
MARKETING_VERSION = 1.0;
MODULE_VERIFIER_SUPPORTED_LANGUAGES = "objective-c objective-c++";
MODULE_VERIFIER_SUPPORTED_LANGUAGE_STANDARDS = "gnu11 gnu++20";
PRODUCT_BUNDLE_IDENTIFIER = dev.firezone.connlib;
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
PROVISIONING_PROFILE_SPECIFIER = "";
SDKROOT = auto;
SKIP_INSTALL = YES;
SUPPORTED_PLATFORMS = "iphoneos iphonesimulator macosx";
SWIFT_EMIT_LOC_STRINGS = YES;
SWIFT_VERSION = 5.0;
TARGETED_DEVICE_FAMILY = "1,2";
};
name = Release;
};
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
8D7D982B29DB8437007B8198 /* Build configuration list for PBXProject "connlib" */ = {
isa = XCConfigurationList;
buildConfigurations = (
8D7D984329DB8437007B8198 /* Debug */,
8D7D984429DB8437007B8198 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
8D7D984529DB8437007B8198 /* Build configuration list for PBXNativeTarget "connlib" */ = {
isa = XCConfigurationList;
buildConfigurations = (
8D7D984629DB8437007B8198 /* Debug */,
8D7D984729DB8437007B8198 /* Release */,
);
defaultConfigurationIsVisible = 0;
defaultConfigurationName = Release;
};
/* End XCConfigurationList section */
};
rootObject = 8D7D982829DB8437007B8198 /* Project object */;
}

View File

@@ -0,0 +1,7 @@
<?xml version="1.0" encoding="UTF-8"?>
<Workspace
version = "1.0">
<FileRef
location = "self:">
</FileRef>
</Workspace>

View File

@@ -0,0 +1,8 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>IDEDidComputeMac32BitWarning</key>
<true/>
</dict>
</plist>

View File

@@ -0,0 +1,66 @@
<?xml version="1.0" encoding="UTF-8"?>
<Scheme
LastUpgradeVersion = "1430"
version = "1.7">
<BuildAction
parallelizeBuildables = "YES"
buildImplicitDependencies = "YES">
<BuildActionEntries>
<BuildActionEntry
buildForTesting = "YES"
buildForRunning = "YES"
buildForProfiling = "YES"
buildForArchiving = "YES"
buildForAnalyzing = "YES">
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "8D7D983029DB8437007B8198"
BuildableName = "connlib.framework"
BlueprintName = "connlib"
ReferencedContainer = "container:connlib.xcodeproj">
</BuildableReference>
</BuildActionEntry>
</BuildActionEntries>
</BuildAction>
<TestAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
shouldUseLaunchSchemeArgsEnv = "YES"
shouldAutocreateTestPlan = "YES">
</TestAction>
<LaunchAction
buildConfiguration = "Debug"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
debugDocumentVersioning = "YES"
debugServiceExtension = "internal"
allowLocationSimulation = "YES">
</LaunchAction>
<ProfileAction
buildConfiguration = "Release"
shouldUseLaunchSchemeArgsEnv = "YES"
savedToolIdentifier = ""
useCustomWorkingDirectory = "NO"
debugDocumentVersioning = "YES">
<MacroExpansion>
<BuildableReference
BuildableIdentifier = "primary"
BlueprintIdentifier = "8D7D983029DB8437007B8198"
BuildableName = "connlib.framework"
BlueprintName = "connlib"
ReferencedContainer = "container:connlib.xcodeproj">
</BuildableReference>
</MacroExpansion>
</ProfileAction>
<AnalyzeAction
buildConfiguration = "Debug">
</AnalyzeAction>
<ArchiveAction
buildConfiguration = "Release"
revealArchiveInOrganizer = "YES">
</ArchiveAction>
</Scheme>

View File

@@ -0,0 +1,115 @@
// Swift bridge generated code triggers this below
#![allow(improper_ctypes)]
#![cfg(any(target_os = "macos", target_os = "ios"))]
use firezone_client_connlib::{
Callbacks, Error, ErrorType, ResourceList, Session, SwiftConnlibError, SwiftErrorType,
TunnelAddresses,
};
#[swift_bridge::bridge]
mod ffi {
#[swift_bridge(swift_repr = "struct")]
struct ResourceList {
resources: String,
}
// TODO: Allegedly not FFI safe, but works
#[swift_bridge(swift_repr = "struct")]
struct TunnelAddresses {
address4: String,
address6: String,
}
#[swift_bridge(already_declared)]
enum SwiftConnlibError {}
#[swift_bridge(already_declared)]
enum SwiftErrorType {}
extern "Rust" {
type WrappedSession;
#[swift_bridge(associated_to = WrappedSession)]
fn connect(portal_url: String, token: String) -> Result<WrappedSession, SwiftConnlibError>;
#[swift_bridge(swift_name = "bumpSockets")]
fn bump_sockets(&self) -> bool;
#[swift_bridge(swift_name = "disableSomeRoamingForBrokenMobileSemantics")]
fn disable_some_roaming_for_broken_mobile_semantics(&self) -> bool;
fn disconnect(&mut self) -> bool;
}
extern "Swift" {
type Opaque;
#[swift_bridge(swift_name = "onUpdateResources")]
fn on_update_resources(resourceList: ResourceList);
#[swift_bridge(swift_name = "onSetTunnelAddresses")]
fn on_set_tunnel_addresses(tunnelAddresses: TunnelAddresses);
#[swift_bridge(swift_name = "onError")]
fn on_error(error: SwiftConnlibError, error_type: SwiftErrorType);
}
}
impl From<ResourceList> for ffi::ResourceList {
fn from(value: ResourceList) -> Self {
Self {
resources: value.resources.join(","),
}
}
}
impl From<TunnelAddresses> for ffi::TunnelAddresses {
fn from(value: TunnelAddresses) -> Self {
Self {
address4: value.address4.to_string(),
address6: value.address6.to_string(),
}
}
}
/// This is used by the apple client to interact with our code.
pub struct WrappedSession {
session: Session<CallbackHandler>,
}
struct CallbackHandler;
impl Callbacks for CallbackHandler {
fn on_update_resources(resource_list: ResourceList) {
ffi::on_update_resources(resource_list.into());
}
fn on_set_tunnel_adresses(tunnel_addresses: TunnelAddresses) {
ffi::on_set_tunnel_addresses(tunnel_addresses.into());
}
fn on_error(error: &Error, error_type: ErrorType) {
ffi::on_error(error.into(), error_type.into());
}
}
impl WrappedSession {
fn connect(portal_url: String, token: String) -> Result<Self, SwiftConnlibError> {
let session = Session::connect::<CallbackHandler>(portal_url.as_str(), token)?;
Ok(Self { session })
}
fn bump_sockets(&self) -> bool {
// TODO: See https://github.com/WireGuard/wireguard-apple/blob/2fec12a6e1f6e3460b6ee483aa00ad29cddadab1/Sources/WireGuardKitGo/api-apple.go#L177
todo!()
}
fn disable_some_roaming_for_broken_mobile_semantics(&self) -> bool {
// TODO: See https://github.com/WireGuard/wireguard-apple/blob/2fec12a6e1f6e3460b6ee483aa00ad29cddadab1/Sources/WireGuardKitGo/api-apple.go#LL197C6-L197C50
todo!()
}
fn disconnect(&mut self) -> bool {
self.session.disconnect()
}
}

View File

@@ -0,0 +1,14 @@
[package]
name = "headless"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
firezone-client-connlib = { path = "../../libs/client" }
url = { version = "2.3.1", default-features = false }
tracing-subscriber = { version = "0.3" }
tracing = { version = "0.1" }
anyhow = { version = "1.0" }
clap = { version = "4.3", features = ["derive"] }

View File

@@ -0,0 +1,70 @@
use anyhow::{Context, Result};
use clap::Parser;
use std::str::FromStr;
use firezone_client_connlib::{
get_user_agent, Callbacks, Error, ErrorType, ResourceList, Session, TunnelAddresses,
};
use url::Url;
enum CallbackHandler {}
impl Callbacks for CallbackHandler {
fn on_update_resources(_resource_list: ResourceList) {
todo!()
}
fn on_set_tunnel_adresses(_tunnel_addresses: TunnelAddresses) {
todo!()
}
fn on_error(error: &Error, error_type: ErrorType) {
match error_type {
ErrorType::Recoverable => tracing::warn!("Encountered error: {error}"),
ErrorType::Fatal => panic!("Encountered fatal error: {error}"),
}
}
}
const URL_ENV_VAR: &str = "FZ_URL";
const SECRET_ENV_VAR: &str = "FZ_SECRET";
fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let cli = Cli::parse();
if cli.print_agent {
println!("{}", get_user_agent());
return Ok(());
}
// TODO: allow passing as arg vars
let url = parse_env_var::<Url>(URL_ENV_VAR)?;
let secret = parse_env_var::<String>(SECRET_ENV_VAR)?;
// TODO: This is disgusting
let mut session = Session::<CallbackHandler>::connect::<CallbackHandler>(url, secret).unwrap();
tracing::info!("Started new session");
session.wait_for_ctrl_c().unwrap();
session.disconnect();
Ok(())
}
fn parse_env_var<T>(key: &str) -> Result<T>
where
T: FromStr,
T::Err: std::error::Error + Send + Sync + 'static,
{
let res = std::env::var(key)
.with_context(|| format!("`{key}` env variable is unset"))?
.parse()
.with_context(|| format!("failed to parse {key} env variable"))?;
Ok(res)
}
// probably will change this to a subcommand in the future
#[derive(Parser)]
#[command(author, version, about, long_about = None)]
struct Cli {
#[arg(short, long)]
print_agent: bool,
}

View File

@@ -0,0 +1,13 @@
[package]
name = "gateway"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
firezone-gateway-connlib = { path = "../libs/gateway" }
url = { version = "2.3.1", default-features = false }
tracing-subscriber = { version = "0.3" }
tracing = { version = "0.1" }
anyhow = { version = "1.0" }

View File

@@ -0,0 +1,54 @@
use anyhow::{Context, Result};
use std::str::FromStr;
use firezone_gateway_connlib::{
Callbacks, Error, ErrorType, ResourceList, Session, TunnelAddresses,
};
use url::Url;
enum CallbackHandler {}
impl Callbacks for CallbackHandler {
fn on_update_resources(_resource_list: ResourceList) {
todo!()
}
fn on_set_tunnel_adresses(_tunnel_addresses: TunnelAddresses) {
todo!()
}
fn on_error(error: &Error, error_type: ErrorType) {
match error_type {
ErrorType::Recoverable => tracing::warn!("Encountered error: {error}"),
ErrorType::Fatal => panic!("Encountered fatal error: {error}"),
}
}
}
const URL_ENV_VAR: &str = "FZ_URL";
const SECRET_ENV_VAR: &str = "FZ_SECRET";
fn main() -> Result<()> {
tracing_subscriber::fmt::init();
// TODO: allow passing as arg vars
let url = parse_env_var::<Url>(URL_ENV_VAR)?;
let secret = parse_env_var::<String>(SECRET_ENV_VAR)?;
// TODO: This is disgusting
let mut session = Session::<CallbackHandler>::connect::<CallbackHandler>(url, secret).unwrap();
session.wait_for_ctrl_c().unwrap();
session.disconnect();
Ok(())
}
fn parse_env_var<T>(key: &str) -> Result<T>
where
T: FromStr,
T::Err: std::error::Error + Send + Sync + 'static,
{
let res = std::env::var(key)
.with_context(|| format!("`{key}` env variable is unset"))?
.parse()
.with_context(|| format!("failed to parse {key} env variable"))?;
Ok(res)
}

View File

@@ -0,0 +1,16 @@
[package]
name = "firezone-client-connlib"
version = "0.1.0"
edition = "2021"
[dependencies]
tokio = { version = "1.27", default-features = false, features = ["sync"] }
tracing = { version = "0.1", default-features = false, features = ["std", "attributes"] }
async-trait = { version = "0.1", default-features = false }
libs-common = { path = "../common" }
firezone-tunnel = { path = "../tunnel" }
serde = { version = "1.0", default-features = false, features = ["std", "derive"] }
boringtun = { workspace = true }
[dev-dependencies]
serde_json = { version = "1.0", default-features = false, features = ["std"] }

View File

@@ -0,0 +1,191 @@
use std::{marker::PhantomData, sync::Arc, time::Duration};
use crate::messages::{Connect, EgressMessages, InitClient, Messages, Relays};
use boringtun::x25519::StaticSecret;
use libs_common::{
error_type::ErrorType::{Fatal, Recoverable},
messages::{Id, ResourceDescription},
Callbacks, ControlSession, Result,
};
use async_trait::async_trait;
use firezone_tunnel::{ControlSignal, Tunnel};
use tokio::sync::mpsc::{channel, Receiver, Sender};
const INTERNAL_CHANNEL_SIZE: usize = 256;
#[async_trait]
impl ControlSignal for ControlSignaler {
async fn signal_connection_to(&self, resource: &ResourceDescription) -> Result<()> {
self.internal_sender
.send(EgressMessages::ListRelays {
resource_id: resource.id(),
})
.await?;
Ok(())
}
}
/// Implementation of [ControlSession] for clients.
pub struct ControlPlane<C: Callbacks> {
tunnel: Arc<Tunnel<ControlSignaler, C>>,
control_signaler: ControlSignaler,
_phantom: PhantomData<C>,
}
#[derive(Clone)]
struct ControlSignaler {
internal_sender: Arc<Sender<EgressMessages>>,
}
impl<C: Callbacks> ControlPlane<C>
where
C: Send + Sync + 'static,
{
#[tracing::instrument(level = "trace", skip(self))]
async fn start(mut self, mut receiver: Receiver<Messages>) {
let mut interval = tokio::time::interval(Duration::from_secs(10));
loop {
tokio::select! {
Some(msg) = receiver.recv() => self.handle_message(msg).await,
_ = interval.tick() => self.stats_event().await,
else => break
}
}
}
#[tracing::instrument(level = "trace", skip_all)]
async fn init(
&mut self,
InitClient {
interface,
resources,
}: InitClient,
) {
if let Err(e) = self.tunnel.set_interface(&interface).await {
tracing::error!("Couldn't initialize interface: {e}");
C::on_error(&e, Fatal);
return;
}
for resource_description in resources {
self.add_resource(resource_description).await
}
tracing::info!("Firezoned Started!");
}
#[tracing::instrument(level = "trace", skip(self))]
async fn connect(
&mut self,
Connect {
rtc_sdp,
resource_id,
gateway_public_key,
}: Connect,
) {
if let Err(e) = self
.tunnel
.recieved_offer_response(resource_id, rtc_sdp, gateway_public_key.0.into())
.await
{
C::on_error(&e, Recoverable);
}
}
#[tracing::instrument(level = "trace", skip(self))]
async fn add_resource(&self, resource_description: ResourceDescription) {
self.tunnel.add_resource(resource_description).await;
}
#[tracing::instrument(level = "trace", skip(self))]
fn remove_resource(&self, id: Id) {
todo!()
}
#[tracing::instrument(level = "trace", skip(self))]
fn update_resource(&self, resource_description: ResourceDescription) {
todo!()
}
#[tracing::instrument(level = "trace", skip(self))]
fn relays(
&self,
Relays {
resource_id,
relays,
}: Relays,
) {
let tunnel = Arc::clone(&self.tunnel);
let control_signaler = self.control_signaler.clone();
tokio::spawn(async move {
match tunnel.request_connection(resource_id, relays).await {
Ok(connection_request) => {
if let Err(err) = control_signaler
.internal_sender
.send(EgressMessages::RequestConnection(connection_request))
.await
{
tunnel.cleanup_connection(resource_id);
C::on_error(&err.into(), Recoverable);
}
}
Err(err) => {
tunnel.cleanup_connection(resource_id);
C::on_error(&err, Recoverable);
}
}
});
}
#[tracing::instrument(level = "trace", skip(self))]
pub(super) async fn handle_message(&mut self, msg: Messages) {
match msg {
Messages::Init(init) => self.init(init).await,
Messages::Relays(connection_details) => self.relays(connection_details),
Messages::Connect(connect) => self.connect(connect).await,
Messages::ResourceAdded(resource) => self.add_resource(resource).await,
Messages::ResourceRemoved(resource) => self.remove_resource(resource.id),
Messages::ResourceUpdated(resource) => self.update_resource(resource),
}
}
#[tracing::instrument(level = "trace", skip(self))]
pub(super) async fn stats_event(&mut self) {
// TODO
}
}
#[async_trait]
impl<C: Callbacks + Sync + Send + 'static> ControlSession<Messages, EgressMessages>
for ControlPlane<C>
{
#[tracing::instrument(level = "trace", skip(private_key))]
async fn start(
private_key: StaticSecret,
) -> Result<(Sender<Messages>, Receiver<EgressMessages>)> {
// This is kinda hacky, the buffer size is 1 so that we make sure that we
// process one message at a time, blocking if a previous message haven't been processed
// to force queue ordering.
let (sender, receiver) = channel::<Messages>(1);
let (internal_sender, internal_receiver) = channel(INTERNAL_CHANNEL_SIZE);
let internal_sender = Arc::new(internal_sender);
let control_signaler = ControlSignaler { internal_sender };
let tunnel = Arc::new(Tunnel::new(private_key, control_signaler.clone()).await?);
let control_plane = ControlPlane::<C> {
tunnel,
control_signaler,
_phantom: PhantomData,
};
tokio::spawn(async move { control_plane.start(receiver).await });
Ok((sender, internal_receiver))
}
fn socket_path() -> &'static str {
"device"
}
}

View File

@@ -0,0 +1,21 @@
//! Main connlib library for clients.
use control::ControlPlane;
use messages::EgressMessages;
use messages::IngressMessages;
mod control;
mod messages;
/// Session type for clients.
///
/// For more information see libs_common docs on [Session][libs_common::Session].
pub type Session<C> =
libs_common::Session<ControlPlane<C>, IngressMessages, EgressMessages, ReplyMessages, Messages>;
pub use libs_common::{
error::SwiftConnlibError,
error_type::{ErrorType, SwiftErrorType},
get_user_agent, Callbacks, Error, ResourceList, TunnelAddresses,
};
use messages::Messages;
use messages::ReplyMessages;

View File

@@ -0,0 +1,274 @@
use firezone_tunnel::RTCSessionDescription;
use serde::{Deserialize, Serialize};
use libs_common::messages::{Id, Interface, Key, Relay, RequestConnection, ResourceDescription};
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize, Clone)]
pub struct InitClient {
pub interface: Interface,
#[serde(skip_serializing_if = "Vec::is_empty", default)]
pub resources: Vec<ResourceDescription>,
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct RemoveResource {
pub id: Id,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Connect {
pub rtc_sdp: RTCSessionDescription,
pub resource_id: Id,
pub gateway_public_key: Key,
}
// Just because RTCSessionDescription doesn't implement partialeq
impl PartialEq for Connect {
fn eq(&self, other: &Self) -> bool {
self.resource_id == other.resource_id && self.gateway_public_key == other.gateway_public_key
}
}
impl Eq for Connect {}
/// List of relays
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct Relays {
/// Resource id corresponding to the relay
pub resource_id: Id,
/// The actual list of relays
pub relays: Vec<Relay>,
}
// These messages are the messages that can be received
// by a client.
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case", tag = "event", content = "payload")]
// TODO: We will need to re-visit webrtc-rs
#[allow(clippy::large_enum_variant)]
pub enum IngressMessages {
Init(InitClient),
Connect(Connect),
// Resources: arrive in an orderly fashion
ResourceAdded(ResourceDescription),
ResourceRemoved(RemoveResource),
ResourceUpdated(ResourceDescription),
}
/// The replies that can arrive from the channel by a client
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(untagged)]
pub enum ReplyMessages {
Relays(Relays),
}
/// The totality of all messages (might have a macro in the future to derive the other types)
#[derive(Debug, Clone, PartialEq, Eq)]
#[allow(clippy::large_enum_variant)]
pub enum Messages {
Init(InitClient),
Relays(Relays),
Connect(Connect),
// Resources: arrive in an orderly fashion
ResourceAdded(ResourceDescription),
ResourceRemoved(RemoveResource),
ResourceUpdated(ResourceDescription),
}
impl From<IngressMessages> for Messages {
fn from(value: IngressMessages) -> Self {
match value {
IngressMessages::Init(m) => Self::Init(m),
IngressMessages::Connect(m) => Self::Connect(m),
IngressMessages::ResourceAdded(m) => Self::ResourceAdded(m),
IngressMessages::ResourceRemoved(m) => Self::ResourceRemoved(m),
IngressMessages::ResourceUpdated(m) => Self::ResourceUpdated(m),
}
}
}
impl From<ReplyMessages> for Messages {
fn from(value: ReplyMessages) -> Self {
match value {
ReplyMessages::Relays(m) => Self::Relays(m),
}
}
}
// These messages can be sent from a client to a control pane
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case", tag = "event", content = "payload")]
// TODO: We will need to re-visit webrtc-rs
#[allow(clippy::large_enum_variant)]
pub enum EgressMessages {
ListRelays { resource_id: Id },
RequestConnection(RequestConnection),
}
#[cfg(test)]
mod test {
use libs_common::{
control::PhoenixMessage,
messages::{
Interface, Relay, ResourceDescription, ResourceDescriptionCidr, ResourceDescriptionDns,
Stun, Turn,
},
};
use crate::messages::{EgressMessages, Relays, ReplyMessages};
use super::{IngressMessages, InitClient};
// TODO: request_connection tests
#[test]
fn init_phoenix_message() {
let m = PhoenixMessage::new(
"device",
IngressMessages::Init(InitClient {
interface: Interface {
ipv4: "100.72.112.111".parse().unwrap(),
ipv6: "fd00:2011:1111::13:efb9".parse().unwrap(),
upstream_dns: vec![],
},
resources: vec![
ResourceDescription::Cidr(ResourceDescriptionCidr {
id: "73037362-715d-4a83-a749-f18eadd970e6".parse().unwrap(),
address: "172.172.0.0/16".parse().unwrap(),
name: "172.172.0.0/16".to_string(),
}),
ResourceDescription::Dns(ResourceDescriptionDns {
id: "03000143-e25e-45c7-aafb-144990e57dcd".parse().unwrap(),
address: "gitlab.mycorp.com".to_string(),
ipv4: "100.126.44.50".parse().unwrap(),
ipv6: "fd00:2011:1111::e:7758".parse().unwrap(),
name: "gitlab.mycorp.com".to_string(),
}),
],
}),
);
println!("{}", serde_json::to_string(&m).unwrap());
let message = r#"{
"event": "init",
"payload": {
"interface": {
"ipv4": "100.72.112.111",
"ipv6": "fd00:2011:1111::13:efb9",
"upstream_dns": []
},
"resources": [
{
"address": "172.172.0.0/16",
"id": "73037362-715d-4a83-a749-f18eadd970e6",
"name": "172.172.0.0/16",
"type": "cidr"
},
{
"address": "gitlab.mycorp.com",
"id": "03000143-e25e-45c7-aafb-144990e57dcd",
"ipv4": "100.126.44.50",
"ipv6": "fd00:2011:1111::e:7758",
"name": "gitlab.mycorp.com",
"type": "dns"
}
]
},
"ref": null,
"topic": "device"
}"#;
let ingress_message: PhoenixMessage<IngressMessages, ReplyMessages> =
serde_json::from_str(message).unwrap();
assert_eq!(m, ingress_message);
}
#[test]
fn list_relays_message() {
let m = PhoenixMessage::<EgressMessages, ()>::new(
"device",
EgressMessages::ListRelays {
resource_id: "f16ecfa0-a94f-4bfd-a2ef-1cc1f2ef3da3".parse().unwrap(),
},
);
let message = r#"
{
"event": "list_relays",
"payload": {
"resource_id": "f16ecfa0-a94f-4bfd-a2ef-1cc1f2ef3da3"
},
"ref":null,
"topic": "device"
}
"#;
let egress_message = serde_json::from_str(&message).unwrap();
assert_eq!(m, egress_message);
}
#[test]
fn list_relays_reply() {
let m = PhoenixMessage::<IngressMessages, ReplyMessages>::new_reply(
"device",
ReplyMessages::Relays(Relays {
resource_id: "f16ecfa0-a94f-4bfd-a2ef-1cc1f2ef3da3".parse().unwrap(),
relays: vec![
Relay::Stun(Stun {
uri: "stun:189.172.73.111:3478".to_string(),
}),
Relay::Turn(Turn {
expires_at: 1686629954,
uri: "turn:189.172.73.111:3478".to_string(),
username: "1686629954:C7I74wXYFdFugMYM".to_string(),
password: "OXXRDJ7lJN1cm+4+2BWgL87CxDrvpVrn5j3fnJHye98".to_string(),
}),
Relay::Stun(Stun {
uri: "stun:::1:3478".to_string(),
}),
Relay::Turn(Turn {
expires_at: 1686629954,
uri: "turn:::1:3478".to_string(),
username: "1686629954:dpHxHfNfOhxPLfMG".to_string(),
password: "8Wtb+3YGxO6ia23JUeSEfZ2yFD6RhGLkbgZwqjebyKY".to_string(),
}),
],
}),
);
let message = r#"
{
"ref":null,
"topic":"device",
"event": "phx_reply",
"payload": {
"response": {
"relays": [
{
"type":"stun",
"uri":"stun:189.172.73.111:3478"
},
{
"expires_at": 1686629954,
"password": "OXXRDJ7lJN1cm+4+2BWgL87CxDrvpVrn5j3fnJHye98",
"type": "turn",
"uri": "turn:189.172.73.111:3478",
"username":"1686629954:C7I74wXYFdFugMYM"
},
{
"type": "stun",
"uri": "stun:::1:3478"
},
{
"expires_at": 1686629954,
"password": "8Wtb+3YGxO6ia23JUeSEfZ2yFD6RhGLkbgZwqjebyKY",
"type": "turn",
"uri": "turn:::1:3478",
"username": "1686629954:dpHxHfNfOhxPLfMG"
}],
"resource_id": "f16ecfa0-a94f-4bfd-a2ef-1cc1f2ef3da3"
},
"status":"ok"
}
}"#;
let reply_message = serde_json::from_str(&message).unwrap();
assert_eq!(m, reply_message);
}
}

View File

@@ -0,0 +1,36 @@
[package]
name = "libs-common"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[features]
jni-bindings = ["boringtun/jni-bindings"]
[dependencies]
base64 = { version = "0.21", default-features = false, features = ["std"] }
serde = { version = "1.0", default-features = false, features = ["derive", "std"] }
futures = { version = "0.3", default-features = false, features = ["std", "async-await", "executor"] }
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await", "async-await-macro"] }
tokio-tungstenite = { version = "0.18", default-features = false, features = ["connect", "handshake"] }
webrtc = { version = "0.8" }
uuid = { version = "1.3", default-features = false, features = ["std", "v4", "serde"] }
thiserror = { version = "1.0", default-features = false }
tracing = { version = "0.1", default-features = false, features = ["std", "attributes"] }
serde_json = { version = "1.0", default-features = false, features = ["std"] }
tokio = { version = "1.28", default-features = false, features = ["rt", "rt-multi-thread"]}
url = { version = "2.3.1", default-features = false }
rand_core = { version = "0.6.4", default-features = false, features = ["std"] }
async-trait = { version = "0.1", default-features = false }
backoff = { version = "0.4", default-features = false }
ip_network = { version = "0.4", default-features = false, features = ["serde"] }
boringtun = { workspace = true }
os_info = { version = "3", default-features = false }
macros = { path = "../../macros" }
[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies]
swift-bridge = { workspace = true }
[target.'cfg(target_os = "linux")'.dependencies]
rtnetlink = { version = "0.12", default-features = false, features = ["tokio_socket"] }

View File

@@ -0,0 +1,334 @@
//! Control protocol related module.
//!
//! This modules contains the logic for handling in and out messages through the control plane.
//! Handling of the message itself can be found in the other lib crates.
//!
//! Entrypoint for this module is [PhoenixChannel].
use std::{marker::PhantomData, time::Duration};
use base64::Engine;
use futures::{
channel::mpsc::{channel, Receiver, Sender},
TryStreamExt,
};
use futures_util::{Future, SinkExt, StreamExt};
use rand_core::{OsRng, RngCore};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use tokio_tungstenite::{
connect_async,
tungstenite::{self, handshake::client::Request},
};
use tungstenite::Message;
use url::Url;
use crate::{get_user_agent, Error, Result};
const CHANNEL_SIZE: usize = 1_000;
/// Main struct to interact with the control-protocol channel.
///
/// After creating a new `PhoenixChannel` using [PhoenixChannel::new] you need to
/// use [start][PhoenixChannel::start] for the channel to do anything.
///
/// If you want to send something through the channel you need to obtain a [PhoenixSender] through
/// [PhoenixChannel::sender], this will already clone the sender so no need to clone it after you obtain it.
///
/// When [PhoenixChannel::start] is called a new websocket is created that will listen message from the control plane
/// based on the parameters passed on [new][PhoenixChannel::new], from then on any messages sent with a sender
/// obtained by [PhoenixChannel::sender] will be forwarded to the websocket up to the control plane. Ingress messages
/// will be passed on to the `handler` provided in [PhoenixChannel::new].
///
/// The future returned by [PhoenixChannel::start] will finish when the websocket closes (by an error), meaning that if you
/// `await` it, it will block until you use `close` in a [PhoenixSender], the portal close the connection or something goes wrong.
pub struct PhoenixChannel<F, I, R, M> {
uri: Url,
handler: F,
sender: Sender<Message>,
receiver: Receiver<Message>,
_phantom: PhantomData<(I, R, M)>,
}
// This is basically the same as tungstenite does but we add some new headers (namely user-agent)
fn make_request(uri: &Url) -> Result<Request> {
let host = uri.host().ok_or(Error::UriError)?;
let host = if let Some(port) = uri.port() {
format!("{host}:{port}")
} else {
host.to_string()
};
let mut r = [0u8; 16];
OsRng.fill_bytes(&mut r);
let key = base64::engine::general_purpose::STANDARD.encode(r);
let req = Request::builder()
.method("GET")
.header("Host", host)
.header("Connection", "Upgrade")
.header("Upgrade", "websocket")
.header("Sec-WebSocket-Version", "13")
.header("Sec-WebSocket-Key", key)
// TODO: Get OS Info here (os_info crate)
.header("User-Agent", get_user_agent())
.uri(uri.as_str())
.body(())?;
Ok(req)
}
impl<F, Fut, I, R, M> PhoenixChannel<F, I, R, M>
where
I: DeserializeOwned,
R: DeserializeOwned,
M: From<I> + From<R>,
F: Fn(M) -> Fut,
Fut: Future<Output = ()> + Send + 'static,
{
/// Starts the tunnel with the parameters given in [Self::new].
///
// (Note: we could add a generic list of messages but this is easier)
/// Additionally, you can add a list of topic to join after connection ASAP.
///
/// See [struct-level docs][PhoenixChannel] for more info.
#[tracing::instrument(level = "trace", skip(self))]
pub async fn start(&mut self, topics: Vec<String>) -> Result<()> {
tracing::trace!("Trying to connect to the portal...");
let (ws_stream, _) = connect_async(make_request(&self.uri)?).await?;
tracing::trace!("Successfully connected to portal");
let (mut write, read) = ws_stream.split();
let mut sender = self.sender();
let Self {
handler, receiver, ..
} = self;
let process_messages = read.try_for_each(|message| async {
Self::message_process(handler, message).await;
Ok(())
});
// Would we like to do write.send_all(futures::stream(Message::text(...))) ?
// yes.
// but since write is taken by reference rust doesn't believe this future is sendable anymore
// so this works for now, since we only use it with 1 topic.
for topic in topics {
write
.send(Message::Text(
// We don't care about the reply type when serializing
serde_json::to_string(&PhoenixMessage::<_, ()>::new(
topic,
EgressControlMessage::PhxJoin(Empty {}),
))
.expect("we should always be able to serialize a join topic message"),
))
.await?;
}
// TODO: is Forward cancel safe?
// I would assume it is and that's the advantage over
// while let Some(item) = receiver.next().await { write.send(item) } ...
// but double check this!
// If it's not cancel safe this means an item can be consumed and never sent.
// Furthermore can this also happen if write errors out? *that* I'd assume is possible...
// What option is left? write a new future to forward items.
// For now we should never assume that an item arrived the portal because we sent it!
let send_messages = receiver.map(Ok).forward(write);
let phoenix_heartbeat = tokio::spawn(async move {
let mut timer = tokio::time::interval(Duration::from_secs(30));
loop {
timer.tick().await;
let Ok(_) = sender.send("phoenix", EgressControlMessage::Heartbeat(Empty {})).await else { break };
}
});
futures_util::pin_mut!(process_messages, send_messages);
// processing messages should be quick otherwise it'd block sending messages.
// we could remove this limitation by spawning a separate task for each of these.
let result = futures::future::select(process_messages, send_messages)
.await
.factor_first()
.0;
phoenix_heartbeat.abort();
result?;
Ok(())
}
#[tracing::instrument(level = "trace", skip(handler))]
async fn message_process(handler: &F, message: tungstenite::Message) {
tracing::trace!("{message:?}");
match message.into_text() {
Ok(m_str) => match serde_json::from_str::<PhoenixMessage<I, R>>(&m_str) {
Ok(m) => match m.payload {
Payload::Message(m) => handler(m.into()).await,
Payload::Reply(status) => match status {
ReplyMessage::PhxReply(phx_reply) => match phx_reply {
// TODO: Here we should pass error info to a subscriber
PhxReply::Error(info) => tracing::error!("Portal error: {info:?}"),
PhxReply::Ok(reply) => match reply {
OkReply::NoMessage(Empty {}) => {
tracing::trace!("Phoenix status message")
}
OkReply::Message(m) => handler(m.into()).await,
},
},
ReplyMessage::PhxError(Empty {}) => tracing::error!("Phoenix error"),
},
},
Err(e) => {
tracing::error!("Error deserializing message {m_str}: {e:?}");
}
},
_ => tracing::error!("Received message that is not text"),
}
}
/// Obtains a new sender that can be used to send message with this [PhoenixChannel] to the portal.
///
/// Note that for the sender to relay any message will need the future returned [PhoenixChannel::start] to be polled (await it),
/// and [PhoenixChannel::start] takes `&mut self`, meaning you need to get the sender before running [PhoenixChannel::start].
pub fn sender(&self) -> PhoenixSender {
PhoenixSender {
sender: self.sender.clone(),
}
}
/// Creates a new [PhoenixChannel] not started yet.
///
/// # Parameters:
/// - `uri`: Portal's websocket uri
/// - `handler`: The handle that will be called for each received message.
///
/// For more info see [struct-level docs][PhoenixChannel].
pub fn new(uri: Url, handler: F) -> Self {
let (sender, receiver) = channel(CHANNEL_SIZE);
Self {
sender,
receiver,
uri,
handler,
_phantom: PhantomData,
}
}
}
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize, Clone)]
#[serde(untagged)]
enum Payload<T, R> {
// We might want other type for the reply message
// but that makes everything even more convoluted!
// and we need to think how to make this whole mess less convoluted.
Reply(ReplyMessage<R>),
Message(T),
}
#[derive(Debug, PartialEq, Eq, Clone, Deserialize, Serialize)]
pub struct PhoenixMessage<T, R> {
topic: String,
#[serde(flatten)]
payload: Payload<T, R>,
#[serde(rename = "ref")]
reference: Option<i32>,
}
impl<T, R> PhoenixMessage<T, R> {
pub fn new(topic: impl Into<String>, payload: T) -> Self {
Self {
topic: topic.into(),
payload: Payload::Message(payload),
reference: None,
}
}
pub fn new_reply(topic: impl Into<String>, payload: R) -> Self {
Self {
topic: topic.into(),
// There has to be a better way :\
payload: Payload::Reply(ReplyMessage::PhxReply(PhxReply::Ok(OkReply::Message(
payload,
)))),
reference: None,
}
}
}
// Awful hack to get serde_json to generate an empty "{}" instead of using "null"
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, Clone)]
#[serde(deny_unknown_fields)]
struct Empty {}
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "snake_case", tag = "event", content = "payload")]
enum EgressControlMessage {
PhxJoin(Empty),
Heartbeat(Empty),
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case", tag = "event", content = "payload")]
enum ReplyMessage<T> {
PhxReply(PhxReply<T>),
PhxError(Empty),
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(untagged)]
enum OkReply<T> {
Message(T),
NoMessage(Empty),
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case")]
enum ErrorInfo {
Reason(String),
Offline,
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case", tag = "status", content = "response")]
enum PhxReply<T> {
Ok(OkReply<T>),
Error(ErrorInfo),
}
/// You can use this sender to send messages through a `PhoenixChannel`.
///
/// Messages won't be sent unless [PhoenixChannel::start] is running, internally
/// this sends messages through a future channel that are forwrarded then in [PhoenixChannel] event loop
pub struct PhoenixSender {
sender: Sender<Message>,
}
impl PhoenixSender {
/// Sends a message upstream to a connected [PhoenixChannel].
///
/// # Parameters
/// - topic: Phoenix topic
/// - payload: Message's payload
pub async fn send(&mut self, topic: impl Into<String>, payload: impl Serialize) -> Result<()> {
// We don't care about the reply type when serializing
let str = serde_json::to_string(&PhoenixMessage::<_, ()>::new(topic, payload))?;
self.sender.send(Message::text(str)).await?;
Ok(())
}
/// Join a phoenix topic, meaning that after this method is invoked [PhoenixChannel] will
/// receive messages from that topic, given that upstream accepts you into the given topic.
pub async fn join_topic(&mut self, topic: impl Into<String>) -> Result<()> {
self.send(topic, EgressControlMessage::PhxJoin(Empty {}))
.await
}
/// Closes the [PhoenixChannel]
pub async fn close(&mut self) -> Result<()> {
self.sender.send(Message::Close(None)).await?;
self.sender.close().await?;
Ok(())
}
}

View File

@@ -0,0 +1,119 @@
//! Error module.
use base64::{DecodeError, DecodeSliceError};
use boringtun::noise::errors::WireGuardError;
use macros::SwiftEnum;
use thiserror::Error;
/// Unified Result type to use across connlib.
pub type Result<T> = std::result::Result<T, ConnlibError>;
/// Unified error type to use across connlib.
#[derive(Error, Debug, SwiftEnum)]
pub enum ConnlibError {
/// Standard IO error.
#[error(transparent)]
Io(#[from] std::io::Error),
/// Error while decoding a base64 value.
#[error("There was an error while decoding a base64 value: {0}")]
Base64DecodeError(#[from] DecodeError),
/// Error while decoding a base64 value from a slice.
#[error("There was an error while decoding a base64 value: {0}")]
Base64DecodeSliceError(#[from] DecodeSliceError),
/// Request error for websocket connection.
#[error("Error forming request: {0}")]
RequestError(#[from] tokio_tungstenite::tungstenite::http::Error),
/// Error during websocket connection.
#[error("Portal connection error: {0}")]
PortalConnectionError(#[from] tokio_tungstenite::tungstenite::error::Error),
/// Provided string was not formatted as a URL.
#[error("Badly formatted URI")]
UriError,
/// Serde's serialize error.
#[error(transparent)]
SerializeError(#[from] serde_json::Error),
/// Webrtc error
#[error("ICE-related error: {0}")]
IceError(#[from] webrtc::Error),
/// Webrtc error regarding data channel.
#[error("ICE-data error: {0}")]
IceDataError(#[from] webrtc::data::Error),
/// Error while sending through an async channelchannel.
#[error("Error sending message through an async channel")]
SendChannelError,
/// Error when trying to establish connection between peers.
#[error("Error while establishing connection between peers")]
ConnectionEstablishError,
/// Error related to wireguard protocol.
#[error("Wireguard error")]
WireguardError(WireGuardError),
/// Expected an initialized runtime but there was none.
#[error("Expected runtime to be initialized")]
NoRuntime,
/// Tried to access a resource which didn't exists.
#[error("Tried to access an undefined resource")]
UnknownResource,
/// Error regarding our own control protocol.
#[error("Control plane protocol error. Unexpected messages or message order.")]
ControlProtocolError,
/// Error when reading system's interface
#[error("Error while reading system's interface")]
IfaceRead(std::io::Error),
/// Glob for errors without a type.
#[error("Other error: {0}")]
Other(&'static str),
/// Invalid tunnel name
#[error("Invalid tunnel name")]
InvalidTunnelName,
#[cfg(target_os = "linux")]
#[error(transparent)]
NetlinkError(rtnetlink::Error),
/// Io translation of netlink error
/// The IO version is easier to interpret
/// We maintain a different variant from the standard IO for this to keep more context
#[error("IO netlink error: {0}")]
NetlinkErrorIo(std::io::Error),
/// No iface found
#[error("No iface found")]
NoIface,
/// No MTU found
#[error("No MTU found")]
NoMtu,
}
/// Type auto-generated by [SwiftEnum] intended to be used with rust-swift-bridge.
/// All the variants come from [ConnlibError], reference that for documentation.
pub use swift_ffi::SwiftConnlibError;
#[cfg(target_os = "linux")]
impl From<rtnetlink::Error> for ConnlibError {
fn from(err: rtnetlink::Error) -> Self {
match err {
rtnetlink::Error::NetlinkError(err) => Self::NetlinkErrorIo(err.to_io()),
err => Self::NetlinkError(err),
}
}
}
impl From<WireGuardError> for ConnlibError {
fn from(e: WireGuardError) -> Self {
ConnlibError::WireguardError(e)
}
}
impl From<&'static str> for ConnlibError {
fn from(e: &'static str) -> Self {
ConnlibError::Other(e)
}
}
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for ConnlibError {
fn from(_: tokio::sync::mpsc::error::SendError<T>) -> Self {
ConnlibError::SendChannelError
}
}
impl From<futures::channel::mpsc::SendError> for ConnlibError {
fn from(_: futures::channel::mpsc::SendError) -> Self {
ConnlibError::SendChannelError
}
}

View File

@@ -0,0 +1,20 @@
//! Module that contains the Error-Type that hints how to handle an error to upper layers.
use macros::SwiftEnum;
/// This indicates whether the produced error is something recoverable or fatal.
/// Fata/Recoverable only indicates how to handle the error for the client.
///
/// Any of the errors in [ConnlibError][crate::error::ConnlibError] could be of any [ErrorType] depending the circumstance.
#[derive(Debug, Clone, Copy, SwiftEnum)]
pub enum ErrorType {
/// Recoverable means that the session can continue
/// e.g. Failed to send an SDP
Recoverable,
/// Fatal error means that the session should stop and start again,
/// generally after user input, such as clicking connect once more.
/// e.g. Max number of retries was reached when trying to connect to the portal.
Fatal,
}
/// Auto generated enum by [SwiftEnum], all variants come from [ErrorType]
/// reference that for docs.
pub use swift_ffi::SwiftErrorType;

View File

@@ -0,0 +1,29 @@
//! This crates contains shared types and behavior between all the other libraries.
//!
//! This includes types provided by external crates, i.e. [boringtun] to make sure that
//! we are using the same version across our own crates.
pub mod error;
pub mod error_type;
mod session;
pub mod control;
pub mod messages;
pub use error::ConnlibError as Error;
pub use error::Result;
pub use session::{Callbacks, ControlSession, ResourceList, Session, TunnelAddresses};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LIB_NAME: &str = "connlib";
pub fn get_user_agent() -> String {
let info = os_info::get();
let os_type = info.os_type();
let os_version = info.version();
let lib_version = VERSION;
let lib_name = LIB_NAME;
format!("{os_type}/{os_version} {lib_name}/{lib_version}")
}

View File

@@ -0,0 +1,160 @@
//! Message types that are used by both the gateway and client.
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use ip_network::IpNetwork;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use webrtc::peer_connection::sdp::session_description::RTCSessionDescription;
mod key;
pub use key::Key;
/// General type for handling portal's id (UUID v4)
pub type Id = Uuid;
/// Represents a wireguard peer.
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize, Clone)]
pub struct Peer {
/// Keepalive: How often to send a keep alive message.
pub persistent_keepalive: Option<u16>,
/// Peer's public key.
pub public_key: Key,
/// Peer's Ipv4 (only 1 ipv4 per peer for now and mandatory).
pub ipv4: Ipv4Addr,
/// Peer's Ipv6 (only 1 ipv6 per peer for now and mandatory).
pub ipv6: Ipv6Addr,
/// Preshared key for the given peer.
pub preshared_key: Key,
}
/// Represent a connection request from a client to a given resource.
///
/// While this is a client-only message it's hosted in common since the tunnel
/// make use of this message type.
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct RequestConnection {
/// Resource id the request is for.
pub resource_id: Id,
/// The preshared key the client generated for the connection that it is trying to establish.
pub device_preshared_key: Key,
/// Client's local RTC Session Description that the client will use for this connection.
pub device_rtc_session_description: RTCSessionDescription,
}
// Custom implementation of partial eq to ignore client_rtc_sdp
impl PartialEq for RequestConnection {
fn eq(&self, other: &Self) -> bool {
self.resource_id == other.resource_id
&& self.device_preshared_key == other.device_preshared_key
}
}
impl Eq for RequestConnection {}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum ResourceDescription {
Dns(ResourceDescriptionDns),
Cidr(ResourceDescriptionCidr),
}
/// Description of a resource that maps to a DNS record.
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct ResourceDescriptionDns {
/// Resource's id.
pub id: Id,
/// Internal resource's domain name.
pub address: String,
/// Resource's ipv4 mapping.
///
/// Note that this is not the actual ipv4 for the resource not even wireguard's ipv4 for the resource.
/// This is just the mapping we use internally between a resource and its ip for intercepting packets.
pub ipv4: Ipv4Addr,
/// Resource's ipv6 mapping.
///
/// Note that this is not the actual ipv6 for the resource not even wireguard's ipv6 for the resource.
/// This is just the mapping we use internally between a resource and its ip for intercepting packets.
pub ipv6: Ipv6Addr,
/// Name of the resource.
///
/// Used only for display.
pub name: String,
}
impl ResourceDescription {
pub fn ips(&self) -> Vec<IpNetwork> {
match self {
ResourceDescription::Dns(r) => vec![r.ipv4.into(), r.ipv6.into()],
ResourceDescription::Cidr(r) => vec![r.address],
}
}
pub fn id(&self) -> Id {
match self {
ResourceDescription::Dns(r) => r.id,
ResourceDescription::Cidr(r) => r.id,
}
}
}
/// Description of a resource that maps to a CIDR.
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct ResourceDescriptionCidr {
/// Resource's id.
pub id: Id,
/// CIDR that this resource points to.
pub address: IpNetwork,
/// Name of the resource.
///
/// Used only for display.
pub name: String,
}
/// Represents a wireguard interface configuration.
///
/// Note that the ips are /32 for ipv4 and /128 for ipv6.
/// This is done to minimize collisions and we update the routing table manually.
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct Interface {
/// Interface's Ipv4.
pub ipv4: Ipv4Addr,
/// Interface's Ipv6.
pub ipv6: Ipv6Addr,
/// DNS that will be used to query for DNS that aren't within our resource list.
#[serde(skip_serializing_if = "Vec::is_empty")]
#[serde(default)]
pub upstream_dns: Vec<IpAddr>,
}
/// A single relay
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum Relay {
/// STUN type of relay
Stun(Stun),
/// TURN type of relay
Turn(Turn),
}
/// Represent a TURN relay
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct Turn {
// TODO: DateTIme
//// Expire time of the username/password in unix millisecond timestamp UTC
pub expires_at: u64,
/// URI of the relay
pub uri: String,
/// Username for the relay
pub username: String,
// TODO: SecretString
/// Password for the relay
pub password: String,
}
/// Stun kind of relay
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct Stun {
/// URI for the relay
pub uri: String,
}

View File

@@ -0,0 +1,54 @@
use base64::{display::Base64Display, engine::general_purpose::STANDARD, Engine};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use std::{fmt, str::FromStr};
use crate::Error;
const KEY_SIZE: usize = 32;
/// A `Key` struct to hold interface or peer keys as bytes. This type is
/// deserialized from a base64 encoded string. It can also be serialized back
/// into an encoded string.
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
pub struct Key(pub [u8; KEY_SIZE]);
impl FromStr for Key {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut key_bytes = [0u8; KEY_SIZE];
let bytes_decoded = STANDARD.decode_slice(s, &mut key_bytes)?;
if bytes_decoded != KEY_SIZE {
Err(base64::DecodeError::InvalidLength)?;
}
Ok(Self(key_bytes))
}
}
impl fmt::Display for Key {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", Base64Display::new(&self.0, &STANDARD))
}
}
impl<'de> Deserialize<'de> for Key {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
s.parse().map_err(de::Error::custom)
}
}
impl Serialize for Key {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_str(&self)
}
}

View File

@@ -0,0 +1,241 @@
use async_trait::async_trait;
use backoff::{backoff::Backoff, ExponentialBackoffBuilder};
use boringtun::x25519::{PublicKey, StaticSecret};
use rand_core::OsRng;
use std::{
marker::PhantomData,
net::{Ipv4Addr, Ipv6Addr},
};
use tokio::{
runtime::Runtime,
sync::mpsc::{Receiver, Sender},
};
use url::Url;
use crate::{control::PhoenixChannel, error_type::ErrorType, messages::Key, Error, Result};
// TODO: Not the most tidy trait for a control-plane.
/// Trait that represents a control-plane.
#[async_trait]
pub trait ControlSession<T, U> {
/// Start control-plane with the given private-key in the background.
async fn start(private_key: StaticSecret) -> Result<(Sender<T>, Receiver<U>)>;
/// Either "gateway" or "client" used to get the control-plane URL.
fn socket_path() -> &'static str;
}
// TODO: Currently I'm using Session for both gateway and clients
// however, gateway could use the runtime directly and could make things easier
// so revisit this.
/// A session is the entry-point for connlib, maintains the runtime and the tunnel.
///
/// A session is created using [Session::connect], then to stop a session we use [Session::disconnect].
pub struct Session<T, U, V, R, M> {
runtime: Option<Runtime>,
_phantom: PhantomData<(T, U, V, R, M)>,
}
/// Resource list that will be displayed to the users.
pub struct ResourceList {
pub resources: Vec<String>,
}
/// Tunnel addresses to be surfaced to the client apps.
pub struct TunnelAddresses {
/// IPv4 Address.
pub address4: Ipv4Addr,
/// IPv6 Address.
pub address6: Ipv6Addr,
}
// Evaluate doing this not static
/// Traits that will be used by connlib to callback the client upper layers.
pub trait Callbacks {
/// Called when there's a change in the resource list.
fn on_update_resources(resource_list: ResourceList);
/// Called when the tunnel address is set.
fn on_set_tunnel_adresses(tunnel_addresses: TunnelAddresses);
/// Called when there's an error.
///
/// # Parameters
/// - `error`: The actual error that happened.
/// - `error_type`: Whether the error should terminate the session or not.
fn on_error(error: &Error, error_type: ErrorType);
}
macro_rules! fatal_error {
($result:expr, $c:ty) => {
match $result {
Ok(res) => res,
Err(e) => {
<$c>::on_error(&e, ErrorType::Fatal);
return;
}
}
};
}
impl<T, U, V, R, M> Session<T, U, V, R, M>
where
T: ControlSession<M, V>,
U: for<'de> serde::Deserialize<'de> + std::fmt::Debug + Send + 'static,
R: for<'de> serde::Deserialize<'de> + std::fmt::Debug + Send + 'static,
V: serde::Serialize + Send + 'static,
M: From<U> + From<R> + Send + 'static + std::fmt::Debug,
{
/// Block on waiting for ctrl+c to terminate the runtime.
/// (Used for the gateways).
pub fn wait_for_ctrl_c(&mut self) -> Result<()> {
self.runtime
.as_ref()
.ok_or(Error::NoRuntime)?
.block_on(async {
tokio::signal::ctrl_c().await?;
Ok(())
})
}
/// Starts a session in the background.
///
/// This will:
/// 1. Create and start a tokio runtime
/// 2. Connect to the control plane to the portal
/// 3. Start the tunnel in the background and forward control plane messages to it.
///
/// The generic parameter `C` should implement all the handlers and that's how errors will be surfaced.
///
/// On a fatal error you should call `[Session::disconnect]` and start a new one.
// TODO: token should be something like SecretString but we need to think about FFI compatibility
pub fn connect<C: Callbacks>(portal_url: impl TryInto<Url>, token: String) -> Result<Self> {
// TODO: We could use tokio::runtime::current() to get the current runtime
// which could work with swif-rust that already runs a runtime. But IDK if that will work
// in all pltaforms, a couple of new threads shouldn't bother none.
// Big question here however is how do we get the result? We could block here await the result and spawn a new task.
// but then platforms should know that this function is blocking.
let portal_url = portal_url.try_into().map_err(|_| Error::UriError)?;
let runtime = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()?;
runtime.spawn(async move {
let private_key = StaticSecret::random_from_rng(OsRng);
let self_id = uuid::Uuid::new_v4();
let connect_url = fatal_error!(get_websocket_path(portal_url, token, T::socket_path(), &Key(PublicKey::from(&private_key).to_bytes()), &self_id.to_string()), C);
let (sender, mut receiver) = fatal_error!(T::start(private_key).await, C);
let mut connection = PhoenixChannel::<_, U, R, M>::new(connect_url, move |msg| {
let sender = sender.clone();
async move {
tracing::trace!("Received message: {msg:?}");
if let Err(e) = sender.send(msg).await {
tracing::warn!("Received a message after handler already closed: {e}. Probably message received during session clean up.");
}
}
});
// Used to send internal messages
let mut internal_sender = connection.sender();
let topic = T::socket_path().to_string();
let topic_send = topic.clone();
tokio::spawn(async move {
let mut exponential_backoff = ExponentialBackoffBuilder::default().build();
loop {
let result = connection.start(vec![topic.clone()]).await;
if let Some(t) = exponential_backoff.next_backoff() {
tracing::warn!("Error during connection to the portal, retrying in {} seconds", t.as_secs());
match result {
Ok(()) => C::on_error(&tokio_tungstenite::tungstenite::Error::ConnectionClosed.into(), ErrorType::Recoverable),
Err(e) => C::on_error(&e, ErrorType::Recoverable)
}
tokio::time::sleep(t).await;
} else {
tracing::error!("Connection to the portal error, check your internet or the status of the portal.\nDisconnecting interface.");
match result {
Ok(()) => C::on_error(&crate::Error::PortalConnectionError(tokio_tungstenite::tungstenite::Error::ConnectionClosed), ErrorType::Fatal),
Err(e) => C::on_error(&e, ErrorType::Fatal)
}
break;
}
}
});
// TODO: Implement Sink for PhoenixEvent (created from a PhoenixSender event + topic)
// that way we can simply do receiver.forward(sender)
tokio::spawn(async move {
while let Some(message) = receiver.recv().await {
if let Err(err) = internal_sender.send(&topic_send, message).await {
tracing::error!("Channel already closed when trying to send message: {err}. Probably trying to send a message during session clean up.");
}
}
});
});
Ok(Self {
runtime: Some(runtime),
_phantom: PhantomData,
})
}
/// Cleanup a [Session].
///
/// For now this just drops the runtime, which should drop all pending tasks.
/// Further cleanup should be done here. (Otherwise we can just drop [Session]).
pub fn disconnect(&mut self) -> bool {
// 1. Close the websocket connection
// 2. Free the device handle (UNIX)
// 3. Close the file descriptor (UNIX)
// 4. Remove the mapping
// The way we cleanup the tasks is we drop the runtime
// this means we don't need to keep track of different tasks
// but if any of the tasks never yields this will block forever!
// So always yield and if you spawn a blocking tasks rewrite this.
// Furthermore, we will depend on Drop impls to do the list above so,
// implement them :)
self.runtime = None;
true
}
/// TODO
pub fn bump_sockets(&self) -> bool {
true
}
/// TODO
pub fn disable_some_roaming_for_broken_mobile_semantics(&self) -> bool {
true
}
}
fn get_websocket_path(
mut url: Url,
secret: String,
mode: &str,
public_key: &Key,
external_id: &str,
) -> Result<Url> {
{
let mut paths = url.path_segments_mut().map_err(|_| Error::UriError)?;
paths.pop_if_empty();
paths.push(mode);
paths.push("websocket");
}
{
let mut query_pairs = url.query_pairs_mut();
query_pairs.clear();
query_pairs.append_pair("token", &secret);
query_pairs.append_pair("public_key", &public_key.to_string());
query_pairs.append_pair("external_id", external_id);
query_pairs.append_pair("name_suffix", "todo");
}
Ok(url)
}

View File

@@ -0,0 +1,16 @@
[package]
name = "firezone-gateway-connlib"
version = "0.1.0"
edition = "2021"
[dependencies]
libs-common = { path = "../common" }
async-trait = { version = "0.1", default-features = false }
firezone-tunnel = { path = "../tunnel" }
tokio = { version = "1.27", default-features = false, features = ["sync"] }
tracing = { version = "0.1", default-features = false, features = ["std", "attributes"] }
serde = { version = "1.0", default-features = false, features = ["std", "derive"] }
boringtun = { workspace = true }
[dev-dependencies]
serde_json = { version = "1.0", default-features = false, features = ["std"] }

View File

@@ -0,0 +1,159 @@
use std::{sync::Arc, time::Duration};
use firezone_tunnel::{ControlSignal, Tunnel};
use boringtun::x25519::StaticSecret;
use libs_common::{
error_type::ErrorType::{Fatal, Recoverable},
messages::ResourceDescription,
Callbacks, ControlSession, Result,
};
use tokio::sync::mpsc::{channel, Receiver, Sender};
use super::messages::{
ConnectionReady, EgressMessages, IngressMessages, InitGateway, RequestConnection,
};
use async_trait::async_trait;
const INTERNAL_CHANNEL_SIZE: usize = 256;
pub struct ControlPlane<C: Callbacks> {
tunnel: Arc<Tunnel<ControlSignaler, C>>,
control_signaler: ControlSignaler,
}
#[derive(Clone)]
struct ControlSignaler {
internal_sender: Arc<Sender<EgressMessages>>,
}
#[async_trait]
impl ControlSignal for ControlSignaler {
async fn signal_connection_to(&self, resource: &ResourceDescription) -> Result<()> {
tracing::warn!("A message to network resource: {resource:?} was discarded, gateways aren't meant to be used as clients.");
Ok(())
}
}
impl<C: Callbacks> ControlPlane<C>
where
C: Send + Sync + 'static,
{
#[tracing::instrument(level = "trace", skip(self))]
async fn start(mut self, mut receiver: Receiver<IngressMessages>) {
let mut interval = tokio::time::interval(Duration::from_secs(10));
loop {
tokio::select! {
Some(msg) = receiver.recv() => self.handle_message(msg).await,
_ = interval.tick() => self.stats_event().await,
else => break
}
}
}
#[tracing::instrument(level = "trace", skip_all)]
async fn init(&mut self, init: InitGateway) {
if let Err(e) = self.tunnel.set_interface(&init.interface).await {
tracing::error!("Couldn't initialize interface: {e}");
C::on_error(&e, Fatal);
return;
}
// TODO: Enable masquerading here.
tracing::info!("Firezoned Started!");
}
#[tracing::instrument(level = "trace", skip(self))]
fn connection_request(&self, connection_request: RequestConnection) {
let tunnel = Arc::clone(&self.tunnel);
let control_signaler = self.control_signaler.clone();
tokio::spawn(async move {
match tunnel
.set_peer_connection_request(
connection_request.device.rtc_session_description,
connection_request.device.peer.into(),
connection_request.relays,
connection_request.device.id,
)
.await
{
Ok(gateway_rtc_sdp) => {
if let Err(err) = control_signaler
.internal_sender
.send(EgressMessages::ConnectionReady(ConnectionReady {
client_id: connection_request.device.id,
gateway_rtc_sdp,
}))
.await
{
tunnel.cleanup_peer_connection(connection_request.device.id);
C::on_error(&err.into(), Recoverable);
}
}
Err(err) => {
tunnel.cleanup_peer_connection(connection_request.device.id);
C::on_error(&err, Recoverable);
}
}
});
}
#[tracing::instrument(level = "trace", skip(self))]
fn add_resource(&self, resource: ResourceDescription) {
todo!()
}
#[tracing::instrument(level = "trace", skip(self))]
pub(super) async fn handle_message(&mut self, msg: IngressMessages) {
match msg {
IngressMessages::Init(init) => self.init(init).await,
IngressMessages::RequestConnection(connection_request) => {
self.connection_request(connection_request)
}
IngressMessages::AddResource(resource) => self.add_resource(resource),
IngressMessages::RemoveResource(_) => todo!(),
IngressMessages::UpdateResource(_) => todo!(),
}
}
#[tracing::instrument(level = "trace", skip(self))]
pub(super) async fn stats_event(&mut self) {
tracing::debug!("TODO: STATS EVENT");
}
}
#[async_trait]
impl<C: Callbacks> ControlSession<IngressMessages, EgressMessages> for ControlPlane<C>
where
C: Send + Sync + 'static,
{
#[tracing::instrument(level = "trace", skip(private_key))]
async fn start(
private_key: StaticSecret,
) -> Result<(Sender<IngressMessages>, Receiver<EgressMessages>)> {
// This is kinda hacky, the buffer size is 1 so that we make sure that we
// process one message at a time, blocking if a previous message haven't been processed
// to force queue ordering.
// (couldn't find any other guarantee of the ordering of message)
let (sender, receiver) = channel::<IngressMessages>(1);
let (internal_sender, internal_receiver) = channel(INTERNAL_CHANNEL_SIZE);
let internal_sender = Arc::new(internal_sender);
let control_signaler = ControlSignaler { internal_sender };
let tunnel = Arc::new(Tunnel::<_, C>::new(private_key, control_signaler.clone()).await?);
let control_plane = ControlPlane {
tunnel,
control_signaler,
};
// TODO: We should have some kind of callback from clients to surface errors here
tokio::spawn(async move { control_plane.start(receiver).await });
Ok((sender, internal_receiver))
}
fn socket_path() -> &'static str {
"gateway"
}
}

View File

@@ -0,0 +1,21 @@
//! Main connlib library for gateway.
use control::ControlPlane;
use messages::EgressMessages;
use messages::IngressMessages;
mod control;
mod messages;
/// Session type for gateway.
///
/// For more information see libs_common docs on [Session][libs_common::Session].
// TODO: Still working on gateway messages
pub type Session<C> = libs_common::Session<
ControlPlane<C>,
IngressMessages,
EgressMessages,
IngressMessages,
IngressMessages,
>;
pub use libs_common::{error_type::ErrorType, Callbacks, Error, ResourceList, TunnelAddresses};

View File

@@ -0,0 +1,138 @@
use std::net::IpAddr;
use firezone_tunnel::RTCSessionDescription;
use libs_common::messages::{Id, Interface, Peer, Relay, ResourceDescription};
use serde::{Deserialize, Serialize};
// TODO: Should this have a resource?
#[derive(Debug, PartialEq, Eq, Deserialize, Serialize, Clone)]
pub struct InitGateway {
pub interface: Interface,
pub ipv4_masquerade_enabled: bool,
pub ipv6_masquerade_enabled: bool,
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct Actor {
pub id: Id,
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct Device {
pub id: Id,
pub rtc_session_description: RTCSessionDescription,
pub peer: Peer,
}
// rtc_sdp is ignored from eq since RTCSessionDescription doesn't implement this
// this will probably be changed in the future.
impl PartialEq for Device {
fn eq(&self, other: &Self) -> bool {
self.id == other.id && self.peer == other.peer
}
}
impl Eq for Device {}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct RequestConnection {
pub actor: Actor,
pub relays: Vec<Relay>,
pub resource: ResourceDescription,
pub device: Device,
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub enum Destination {
DnsName(String),
Ip(Vec<IpAddr>),
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct Metrics {
peers_metrics: Vec<Metric>,
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct Metric {
pub client_id: Id,
pub resource_id: Id,
pub rx_bytes: u32,
pub tx_bytes: u32,
}
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
pub struct RemoveResource {
pub id: Id,
}
// These messages are the messages that can be received
// either by a client or a gateway by the client.
#[derive(Debug, Deserialize, Serialize, Clone, PartialEq, Eq)]
#[serde(rename_all = "snake_case", tag = "event", content = "payload")]
// TODO: We will need to re-visit webrtc-rs
#[allow(clippy::large_enum_variant)]
pub enum IngressMessages {
Init(InitGateway),
RequestConnection(RequestConnection),
AddResource(ResourceDescription),
RemoveResource(RemoveResource),
UpdateResource(ResourceDescription),
}
// These messages can be sent from a gateway
// to a control pane.
#[derive(Debug, Deserialize, Serialize, Clone)]
#[serde(rename_all = "snake_case", tag = "event", content = "payload")]
// TODO: We will need to re-visit webrtc-rs
#[allow(clippy::large_enum_variant)]
pub enum EgressMessages {
ConnectionReady(ConnectionReady),
Metrics(Metrics),
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct ConnectionReady {
pub client_id: Id,
pub gateway_rtc_sdp: RTCSessionDescription,
}
#[cfg(test)]
mod test {
use libs_common::{control::PhoenixMessage, messages::Interface};
use super::{IngressMessages, InitGateway};
#[test]
fn init_phoenix_message() {
let m = PhoenixMessage::new(
"gateway:83d28051-324e-48fe-98ed-19690899b3b6",
IngressMessages::Init(InitGateway {
interface: Interface {
ipv4: "100.115.164.78".parse().unwrap(),
ipv6: "fd00:2011:1111::2c:f6ab".parse().unwrap(),
upstream_dns: vec![],
},
ipv4_masquerade_enabled: true,
ipv6_masquerade_enabled: true,
}),
);
let message = r#"{
"event": "init",
"payload": {
"interface": {
"ipv4": "100.115.164.78",
"ipv6": "fd00:2011:1111::2c:f6ab"
},
"ipv4_masquerade_enabled": true,
"ipv6_masquerade_enabled": true
},
"ref": null,
"topic": "gateway:83d28051-324e-48fe-98ed-19690899b3b6"
}"#;
let ingress_message: PhoenixMessage<IngressMessages, ()> =
serde_json::from_str(message).unwrap();
assert_eq!(m, ingress_message);
}
}

View File

@@ -0,0 +1,40 @@
[package]
name = "firezone-tunnel"
version = "0.1.0"
edition = "2021"
[dependencies]
async-trait = { version = "0.1", default-features = false }
tokio = { version = "1.27", default-features = false, features = ["rt", "rt-multi-thread", "sync"] }
thiserror = { version = "1.0", default-features = false }
rand_core = { version = "0.6", default-features = false, features = ["getrandom"] }
serde = { version = "1.0", default-features = false, features = ["derive", "std"] }
futures = { version = "0.3", default-features = false, features = ["std", "async-await", "executor"] }
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await", "async-await-macro"] }
tracing = { version = "0.1", default-features = false, features = ["std", "attributes"] }
parking_lot = { version = "0.12", default-features = false }
bytes = { version = "1.4", default-features = false, features = ["std"] }
itertools = { version = "0.10", default-features = false, features = ["use_std"] }
libs-common = { path = "../common" }
libc = { version = "0.2", default-features = false, features = ["std", "const-extern-fn", "extra_traits"] }
ip_network = { version = "0.4", default-features = false }
ip_network_table = { version = "0.2", default-features = false }
boringtun = { workspace = true }
# TODO: research replacing for https://github.com/algesten/str0m
webrtc = { version = "0.8" }
# Linux tunnel dependencies
[target.'cfg(target_os = "linux")'.dependencies]
netlink-packet-route = { version = "0.15", default-features = false }
netlink-packet-core = { version = "0.5", default-features = false }
rtnetlink = { version = "0.12", default-features = false, features = ["tokio_socket"] }
# Android tunnel dependencies
[target.'cfg(target_os = "android")'.dependencies]
android_logger = "0.13"
log = "0.4.14"
# Windows tunnel dependencies
[target.'cfg(target_os = "windows")'.dependencies]
wintun = "0.2.1"

View File

@@ -0,0 +1,314 @@
use boringtun::{
noise::Tunn,
x25519::{PublicKey, StaticSecret},
};
use std::sync::Arc;
use libs_common::{
error_type::ErrorType::Recoverable,
messages::{Id, Key, Relay, RequestConnection},
Callbacks, Error, Result,
};
use rand_core::OsRng;
use webrtc::{
data_channel::RTCDataChannel,
ice_transport::{ice_credential_type::RTCIceCredentialType, ice_server::RTCIceServer},
peer_connection::{
configuration::RTCConfiguration, peer_connection_state::RTCPeerConnectionState,
sdp::session_description::RTCSessionDescription, RTCPeerConnection,
},
};
use crate::{peer::Peer, ControlSignal, PeerConfig, Tunnel};
impl<C: ControlSignal, CB: Callbacks> Tunnel<C, CB>
where
C: Send + Sync + 'static,
CB: Send + Sync + 'static,
{
async fn handle_channel_open(
self: &Arc<Self>,
data_channel: Arc<RTCDataChannel>,
index: u32,
peer_config: PeerConfig,
) -> Result<()> {
let channel = data_channel.detach().await.expect("TODO");
let tunn = Tunn::new(
self.private_key.clone(),
peer_config.public_key,
Some(peer_config.preshared_key.to_bytes()),
peer_config.persistent_keepalive,
index,
None,
)?;
let peer = Arc::new(Peer::from_config(
tunn,
index,
&peer_config,
Arc::clone(&channel),
));
{
let mut peers_by_ip = self.peers_by_ip.write();
for ip in peer_config.ips {
peers_by_ip.insert(ip, Arc::clone(&peer));
}
}
self.start_peer_handler(Arc::clone(&peer));
Ok(())
}
#[tracing::instrument(level = "trace", skip(self))]
async fn initialize_peer_request(
self: &Arc<Self>,
relays: Vec<Relay>,
) -> Result<Arc<RTCPeerConnection>> {
let config = RTCConfiguration {
ice_servers: relays
.into_iter()
.map(|srv| match srv {
Relay::Stun(stun) => RTCIceServer {
urls: vec![stun.uri],
..Default::default()
},
Relay::Turn(turn) => RTCIceServer {
urls: vec![turn.uri],
username: turn.username,
credential: turn.password,
// TODO: check what this is used for
credential_type: RTCIceCredentialType::Password,
},
})
.collect(),
..Default::default()
};
let peer_connection = Arc::new(self.webrtc_api.new_peer_connection(config).await?);
peer_connection.on_peer_connection_state_change(Box::new(|_s| {
Box::pin(async {
// Respond with failure to control plane and remove peer
})
}));
Ok(peer_connection)
}
#[tracing::instrument(level = "trace", skip(self))]
fn handle_connection_state_update(self: &Arc<Self>, state: RTCPeerConnectionState) {
tracing::trace!("Peer Connection State has changed: {state}");
if state == RTCPeerConnectionState::Failed {
// Wait until PeerConnection has had no network activity for 30 seconds or another failure. It may be reconnected using an ICE Restart.
// Use webrtc.PeerConnectionStateDisconnected if you are interested in detecting faster timeout.
// Note that the PeerConnection may come back from PeerConnectionStateDisconnected.
tracing::warn!("Peer Connection has gone to failed exiting");
}
}
#[tracing::instrument(level = "trace", skip(self))]
fn set_connection_state_update(self: &Arc<Self>, peer_connection: &Arc<RTCPeerConnection>) {
let tunnel = Arc::clone(self);
peer_connection.on_peer_connection_state_change(Box::new(
move |state: RTCPeerConnectionState| {
let tunnel = Arc::clone(&tunnel);
Box::pin(async move { tunnel.handle_connection_state_update(state) })
},
));
}
/// Initiate an ice connection request.
///
/// Given a resource id and a list of relay creates a [RequestConnection]
/// and prepares the tunnel to handle the connection once initiated.
///
/// # Note
/// This function blocks until all ICE candidates are gathered so it might block for a long time.
///
/// # Parameters
/// - `resource_id`: Id of the resource we are going to request the connection to.
/// - `relays`: The list of relays used for that connection.
///
/// # Returns
/// A [RequestConnection] that should be sent to the gateway through the control-plane.
#[tracing::instrument(level = "trace", skip(self))]
pub async fn request_connection(
self: &Arc<Self>,
resource_id: Id,
relays: Vec<Relay>,
) -> Result<RequestConnection> {
let peer_connection = self.initialize_peer_request(relays).await?;
self.set_connection_state_update(&peer_connection);
let data_channel = peer_connection.create_data_channel("data", None).await?;
let d = Arc::clone(&data_channel);
let tunnel = Arc::clone(self);
let preshared_key = StaticSecret::random_from_rng(OsRng);
let p_key = preshared_key.clone();
let resource_description = tunnel
.resources
.read()
.get_by_id(&resource_id)
.expect("TODO")
.clone();
data_channel.on_open(Box::new(move || {
tracing::trace!("new data channel opened!");
Box::pin(async move {
let index = tunnel.next_index();
let Some(gateway_public_key) = tunnel.gateway_public_keys.lock().remove(&resource_id) else {
tunnel.cleanup_connection(resource_id);
tracing::warn!("Opened ICE channel with gateway without ever receiving public key");
CB::on_error(&Error::ControlProtocolError, Recoverable);
return;
};
let peer_config = PeerConfig {
persistent_keepalive: None,
public_key: gateway_public_key,
ips: resource_description.ips(),
preshared_key: p_key,
};
if let Err(e) = tunnel.handle_channel_open(d, index, peer_config).await {
tracing::error!("Couldn't establish wireguard link after channel was opened: {e}");
CB::on_error(&e, Recoverable);
tunnel.cleanup_connection(resource_id);
}
tunnel.awaiting_connection.lock().remove(&resource_id);
})
}));
let offer = peer_connection.create_offer(None).await?;
let mut gather_complete = peer_connection.gathering_complete_promise().await;
peer_connection.set_local_description(offer).await?;
// FIXME: timeout here! (but probably don't even bother because we need to implement ICE trickle)
let _ = gather_complete.recv().await;
let local_description = peer_connection
.local_description()
.await
.expect("set_local_description was just called above");
self.peer_connections
.lock()
.insert(resource_id, peer_connection);
Ok(RequestConnection {
resource_id,
device_preshared_key: Key(preshared_key.to_bytes()),
device_rtc_session_description: local_description,
})
}
/// Called when a response to [Tunnel::request_connection] is ready.
///
/// Once this is called if everything goes fine a new tunnel should be started between the 2 peers.
///
/// # Parameters
/// - `resource_id`: Id of the resource that responded.
/// - `rtc_sdp`: Remote SDP.
/// - `gateway_public_key`: Public key of the gateway that is handling that resource for this connection.
#[tracing::instrument(level = "trace", skip(self))]
pub async fn recieved_offer_response(
self: &Arc<Self>,
resource_id: Id,
rtc_sdp: RTCSessionDescription,
gateway_public_key: PublicKey,
) -> Result<()> {
let peer_connection = self
.peer_connections
.lock()
.get(&resource_id)
.ok_or(Error::UnknownResource)?
.clone();
self.gateway_public_keys
.lock()
.insert(resource_id, gateway_public_key);
peer_connection.set_remote_description(rtc_sdp).await?;
Ok(())
}
/// Removes client's id from connections we are expecting.
pub fn cleanup_peer_connection(self: &Arc<Self>, client_id: Id) {
self.peer_connections.lock().remove(&client_id);
}
/// Accept a connection request from a client.
///
/// Sets a connection to a remote SDP, creates the local SDP
/// and returns it.
///
/// # Note
///
/// This function blocks until it gathers all the ICE candidates
/// so it might block for a long time.
///
/// # Parameters
/// - `sdp_session`: Remote session description.
/// - `peer`: Configuration for the remote peer.
/// - `relays`: List of relays to use with this connection.
/// - `client_id`: UUID of the remote client.
///
/// # Returns
/// An [RTCSessionDescription] of the local sdp, with candidates gathered.
pub async fn set_peer_connection_request(
self: &Arc<Self>,
sdp_session: RTCSessionDescription,
peer: PeerConfig,
relays: Vec<Relay>,
client_id: Id,
) -> Result<RTCSessionDescription> {
let peer_connection = self.initialize_peer_request(relays).await?;
let index = self.next_index();
let tunnel = Arc::clone(self);
self.peer_connections
.lock()
.insert(client_id, Arc::clone(&peer_connection));
self.set_connection_state_update(&peer_connection);
peer_connection.on_data_channel(Box::new(move |d| {
tracing::trace!("data channel created!");
let data_channel = Arc::clone(&d);
let peer = peer.clone();
let tunnel = Arc::clone(&tunnel);
Box::pin(async move {
d.on_open(Box::new(move || {
tracing::trace!("new data channel opened!");
Box::pin(async move {
if let Err(e) = tunnel.handle_channel_open(data_channel, index, peer).await
{
CB::on_error(&e, Recoverable);
tracing::error!(
"Couldn't establish wireguard link after opening channel: {e}"
);
// Note: handle_channel_open can only error out before insert to peers_by_ip
// otherwise we would need to clean that up too!
tunnel.peer_connections.lock().remove(&client_id);
}
})
}))
})
}));
peer_connection.set_remote_description(sdp_session).await?;
let mut gather_complete = peer_connection.gathering_complete_promise().await;
let answer = peer_connection.create_answer(None).await?;
peer_connection.set_local_description(answer).await?;
let _ = gather_complete.recv().await;
let local_desc = peer_connection
.local_description()
.await
.ok_or(Error::ConnectionEstablishError)?;
Ok(local_desc)
}
/// Clean up a connection to a resource.
pub fn cleanup_connection(&self, resource_id: Id) {
self.awaiting_connection.lock().remove(&resource_id);
self.peer_connections.lock().remove(&resource_id);
}
}

View File

@@ -0,0 +1,70 @@
use std::sync::Arc;
use libs_common::{Error, Result};
use tokio::io::unix::AsyncFd;
use crate::tun::{IfaceConfig, IfaceDevice};
#[derive(Debug)]
pub(crate) struct DeviceChannel(AsyncFd<Arc<IfaceDevice>>);
impl DeviceChannel {
pub(crate) async fn mtu(&self) -> Result<usize> {
self.0.get_ref().mtu().await
}
pub(crate) async fn read(&self, out: &mut [u8]) -> std::io::Result<usize> {
loop {
let mut guard = self.0.readable().await?;
match guard.try_io(|inner| {
inner.get_ref().read(out).map_err(|err| match err {
Error::IfaceRead(e) => e,
_ => panic!("Unexpected error while trying to read network interface"),
})
}) {
Ok(result) => break result.map(|e| e.len()),
Err(_would_block) => continue,
}
}
}
pub(crate) async fn write4(&self, buf: &[u8]) -> std::io::Result<usize> {
loop {
let mut guard = self.0.writable().await?;
// write4 and write6 does the same
match guard.try_io(|inner| match inner.get_ref().write4(buf) {
0 => Err(std::io::Error::last_os_error()),
i => Ok(i),
}) {
Ok(result) => break result,
Err(_would_block) => continue,
}
}
}
pub(crate) async fn write6(&self, buf: &[u8]) -> std::io::Result<usize> {
loop {
let mut guard = self.0.writable().await?;
// write4 and write6 does the same
match guard.try_io(|inner| match inner.get_ref().write6(buf) {
0 => Err(std::io::Error::last_os_error()),
i => Ok(i),
}) {
Ok(result) => break result,
Err(_would_block) => continue,
}
}
}
}
pub(crate) async fn create_iface() -> Result<(IfaceConfig, DeviceChannel)> {
let dev = Arc::new(IfaceDevice::new("utun").await?.set_non_blocking()?);
let async_dev = Arc::clone(&dev);
let device_channel = DeviceChannel(AsyncFd::new(async_dev)?);
let iface_config = IfaceConfig(dev);
Ok((iface_config, device_channel))
}

View File

@@ -0,0 +1,27 @@
use crate::tun::IfaceConfig;
use libs_common::Result;
#[derive(Debug)]
pub(crate) struct DeviceChannel;
impl DeviceChannel {
pub(crate) async fn mtu(&self) -> Result<usize> {
todo!()
}
pub(crate) async fn read(&self, _out: &mut [u8]) -> std::io::Result<usize> {
todo!()
}
pub(crate) async fn write4(&self, _buf: &[u8]) -> std::io::Result<usize> {
todo!()
}
pub(crate) async fn write6(&self, _buf: &[u8]) -> std::io::Result<usize> {
todo!()
}
}
pub(crate) async fn create_iface() -> Result<(IfaceConfig, DeviceChannel)> {
todo!()
}

View File

@@ -0,0 +1,61 @@
use rand_core::{OsRng, RngCore};
// A basic linear-feedback shift register implemented as xorshift, used to
// distribute peer indexes across the 24-bit address space reserved for peer
// identification.
// The purpose is to obscure the total number of peers using the system and to
// ensure it requires a non-trivial amount of processing power and/or samples
// to guess other peers' indices. Anything more ambitious than this is wasted
// with only 24 bits of space.
pub(crate) struct IndexLfsr {
initial: u32,
lfsr: u32,
mask: u32,
}
impl IndexLfsr {
/// Generate a random 24-bit nonzero integer
fn random_index() -> u32 {
const LFSR_MAX: u32 = 0xffffff; // 24-bit seed
loop {
let i = OsRng.next_u32() & LFSR_MAX;
if i > 0 {
// LFSR seed must be non-zero
break i;
}
}
}
/// Generate the next value in the pseudorandom sequence
pub(crate) fn next(&mut self) -> u32 {
// 24-bit polynomial for randomness. This is arbitrarily chosen to
// inject bitflips into the value.
const LFSR_POLY: u32 = 0xd80000; // 24-bit polynomial
debug_assert_ne!(self.lfsr, 0);
let value = self.lfsr - 1; // lfsr will never have value of 0
self.lfsr = (self.lfsr >> 1) ^ ((0u32.wrapping_sub(self.lfsr & 1u32)) & LFSR_POLY);
assert!(self.lfsr != self.initial, "Too many peers created");
value ^ self.mask
}
}
impl Default for IndexLfsr {
fn default() -> Self {
let seed = Self::random_index();
IndexLfsr {
initial: seed,
lfsr: seed,
mask: Self::random_index(),
}
}
}
// Checks that a packet has the index we expect
pub(crate) fn check_packet_index(recv_idx: u32, expected_idx: u32) -> bool {
if (recv_idx >> 8) == expected_idx {
true
} else {
tracing::warn!("receiver index doesn't match peer index, something fishy is going on");
false
}
}

View File

@@ -0,0 +1,511 @@
//! Connlib tunnel implementation.
//!
//! This is both the wireguard and ICE implementation that should work in tandem.
//! [Tunnel] is the main entry-point for this crate.
use ip_network::IpNetwork;
use ip_network_table::IpNetworkTable;
use boringtun::{
noise::{
errors::WireGuardError, handshake::parse_handshake_anon, rate_limiter::RateLimiter,
Packet, Tunn, TunnResult,
},
x25519::{PublicKey, StaticSecret},
};
use libs_common::{
error_type::ErrorType::{Fatal, Recoverable},
Callbacks,
};
use async_trait::async_trait;
use bytes::Bytes;
use itertools::Itertools;
use parking_lot::{Mutex, RwLock};
use peer::Peer;
use resource_table::ResourceTable;
use tokio::time::MissedTickBehavior;
use webrtc::{
api::{
interceptor_registry::register_default_interceptors, media_engine::MediaEngine,
setting_engine::SettingEngine, APIBuilder, API,
},
interceptor::registry::Registry,
peer_connection::RTCPeerConnection,
};
use std::{
collections::{HashMap, HashSet},
marker::PhantomData,
net::IpAddr,
sync::Arc,
time::Duration,
};
use libs_common::{
messages::{Id, Interface as InterfaceConfig, ResourceDescription},
Result,
};
use device_channel::{create_iface, DeviceChannel};
use tun::IfaceConfig;
pub use webrtc::peer_connection::sdp::session_description::RTCSessionDescription;
use index::{check_packet_index, IndexLfsr};
mod control_protocol;
mod index;
mod peer;
mod resource_table;
// TODO: For now all tunnel implementations are the same
// will divide when we start introducing differences.
#[cfg(target_os = "windows")]
#[path = "tun_win.rs"]
mod tun;
#[cfg(any(target_os = "macos", target_os = "ios"))]
#[path = "tun_darwin.rs"]
mod tun;
#[cfg(target_os = "linux")]
#[path = "tun_linux.rs"]
mod tun;
#[cfg(target_os = "android")]
#[path = "tun_android.rs"]
mod tun;
#[cfg(any(
target_os = "macos",
target_os = "ios",
target_os = "linux",
target_os = "android"
))]
#[path = "device_channel_unix.rs"]
mod device_channel;
#[cfg(target_os = "windows")]
#[path = "device_channel_win.rs"]
mod device_channel;
const RESET_PACKET_COUNT_INTERVAL: Duration = Duration::from_secs(1);
const REFRESH_PEERS_TIEMRS_INTERVAL: Duration = Duration::from_secs(1);
// Note: Taken from boringtun
const HANDSHAKE_RATE_LIMIT: u64 = 100;
const MAX_UDP_SIZE: usize = (1 << 16) - 1;
/// Represent's the tunnel actual peer's config
/// Obtained from libs_common's Peer
#[derive(Clone)]
pub struct PeerConfig {
pub(crate) persistent_keepalive: Option<u16>,
pub(crate) public_key: PublicKey,
pub(crate) ips: Vec<IpNetwork>,
pub(crate) preshared_key: StaticSecret,
}
impl From<libs_common::messages::Peer> for PeerConfig {
fn from(value: libs_common::messages::Peer) -> Self {
Self {
persistent_keepalive: value.persistent_keepalive,
public_key: value.public_key.0.into(),
ips: vec![value.ipv4.into(), value.ipv6.into()],
preshared_key: value.preshared_key.0.into(),
}
}
}
/// Trait used for out-going signals to control plane that are **required** to be made from inside the tunnel.
///
/// Generally, we try to return from the functions here rather than using this callback.
#[async_trait]
pub trait ControlSignal {
/// Signals to the control plane an intent to initiate a connection to the given resource.
///
/// Used when a packet is found to a resource we have no connection stablished but is within the list of resources available for the client.
async fn signal_connection_to(&self, resource: &ResourceDescription) -> Result<()>;
}
/// Tunnel is a wireguard state machine that uses webrtc's ICE channels instead of UDP sockets
/// to communicate between peers.
pub struct Tunnel<C: ControlSignal, CB: Callbacks> {
next_index: Mutex<IndexLfsr>,
// We use a tokio's mutex here since it makes things easier and we only need it
// during init, so the performance hit is neglibile
iface_config: tokio::sync::Mutex<IfaceConfig>,
device_channel: Arc<DeviceChannel>,
rate_limiter: Arc<RateLimiter>,
private_key: StaticSecret,
public_key: PublicKey,
peers_by_ip: RwLock<IpNetworkTable<Arc<Peer>>>,
peer_connections: Mutex<HashMap<Id, Arc<RTCPeerConnection>>>,
awaiting_connection: Mutex<HashSet<Id>>,
webrtc_api: API,
resources: RwLock<ResourceTable>,
control_signaler: C,
gateway_public_keys: Mutex<HashMap<Id, PublicKey>>,
_phantom: PhantomData<CB>,
}
impl<C: ControlSignal, CB: Callbacks> Tunnel<C, CB>
where
C: Send + Sync + 'static,
CB: Send + Sync + 'static,
{
/// Creates a new tunnel.
///
/// # Parameters
/// - `private_key`: wireguard's private key.
/// - `control_signaler`: this is used to send SDP from the tunnel to the control plane.
#[tracing::instrument(level = "trace", skip(private_key, control_signaler))]
pub async fn new(private_key: StaticSecret, control_signaler: C) -> Result<Self> {
let public_key = (&private_key).into();
let rate_limiter = Arc::new(RateLimiter::new(&public_key, HANDSHAKE_RATE_LIMIT));
let peers_by_ip = RwLock::new(IpNetworkTable::new());
let next_index = Default::default();
let (iface_config, device_channel) = create_iface().await?;
let iface_config = tokio::sync::Mutex::new(iface_config);
let device_channel = Arc::new(device_channel);
let peer_connections = Default::default();
let resources = Default::default();
let awaiting_connection = Default::default();
let gateway_public_keys = Default::default();
// ICE
let mut media_engine = MediaEngine::default();
// Register default codecs (TODO: We need this?)
media_engine.register_default_codecs()?;
let mut registry = Registry::new();
registry = register_default_interceptors(registry, &mut media_engine)?;
let mut setting_engine = SettingEngine::default();
setting_engine.detach_data_channels();
// TODO: Enable UDPMultiplex (had some problems before)
let webrtc_api = APIBuilder::new()
.with_media_engine(media_engine)
.with_interceptor_registry(registry)
.with_setting_engine(setting_engine)
.build();
Ok(Self {
gateway_public_keys,
rate_limiter,
private_key,
peer_connections,
public_key,
peers_by_ip,
next_index,
webrtc_api,
iface_config,
device_channel,
resources,
awaiting_connection,
control_signaler,
_phantom: PhantomData,
})
}
/// Adds a the given resource to the tunnel.
///
/// Once added, when a packet for the resource is intercepted a new data channel will be created
/// and packets will be wrapped with wireguard and sent through it.
#[tracing::instrument(level = "trace", skip(self))]
pub async fn add_resource(&self, resource_description: ResourceDescription) {
{
let mut iface_config = self.iface_config.lock().await;
for ip in resource_description.ips() {
if let Err(err) = iface_config.add_route(ip).await {
CB::on_error(&err, Fatal);
}
}
}
self.resources.write().insert(resource_description);
}
/// Sets the interface configuration and starts background tasks.
#[tracing::instrument(level = "trace", skip(self))]
pub async fn set_interface(self: &Arc<Self>, config: &InterfaceConfig) -> Result<()> {
{
let mut iface_config = self.iface_config.lock().await;
iface_config
.set_iface_config(config)
.await
.expect("Couldn't initiate interface");
iface_config
.up()
.await
.expect("Couldn't initiate interface");
}
self.start_timers();
self.start_iface_handler();
tracing::trace!("Started background loops");
Ok(())
}
async fn peer_refresh(peer: &Peer, dst_buf: &mut [u8; MAX_UDP_SIZE]) {
let update_timers_result = peer.update_timers(&mut dst_buf[..]);
match update_timers_result {
TunnResult::Done => {}
TunnResult::Err(WireGuardError::ConnectionExpired) => {
tracing::error!("Connection expired");
}
TunnResult::Err(e) => tracing::error!(message = "Timer error", error = ?e),
TunnResult::WriteToNetwork(packet) => peer.send_infallible::<CB>(packet).await,
_ => panic!("Unexpected result from update_timers"),
};
}
fn start_rate_limiter_refresh_timer(self: &Arc<Self>) {
let rate_limiter = self.rate_limiter.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(RESET_PACKET_COUNT_INTERVAL);
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
loop {
rate_limiter.reset_count();
interval.tick().await;
}
});
}
fn start_peers_refresh_timer(self: &Arc<Self>) {
let tunnel = self.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(REFRESH_PEERS_TIEMRS_INTERVAL);
interval.set_missed_tick_behavior(MissedTickBehavior::Delay);
let mut dst_buf = [0u8; MAX_UDP_SIZE];
loop {
let peers: Vec<_> = tunnel
.peers_by_ip
.read()
.iter()
.map(|p| p.1)
.unique_by(|p| p.index)
.cloned()
.collect();
for peer in peers {
Self::peer_refresh(&peer, &mut dst_buf).await;
}
interval.tick().await;
}
});
}
fn start_timers(self: &Arc<Self>) {
self.start_rate_limiter_refresh_timer();
self.start_peers_refresh_timer();
}
fn is_wireguard_packet_ok(&self, parsed_packet: &Packet, peer: &Peer) -> bool {
match &parsed_packet {
Packet::HandshakeInit(p) => {
parse_handshake_anon(&self.private_key, &self.public_key, p).is_ok()
}
Packet::HandshakeResponse(p) => check_packet_index(p.receiver_idx, peer.index),
Packet::PacketCookieReply(p) => check_packet_index(p.receiver_idx, peer.index),
Packet::PacketData(p) => check_packet_index(p.receiver_idx, peer.index),
}
}
fn start_peer_handler(self: &Arc<Self>, peer: Arc<Peer>) {
let tunnel = Arc::clone(self);
tokio::spawn(async move {
let mut src_buf = [0u8; MAX_UDP_SIZE];
let mut dst_buf = [0u8; MAX_UDP_SIZE];
// Loop while we have packets on the anonymous connection
while let Ok(size) = peer.channel.read(&mut src_buf[..]).await {
tracing::trace!("read {size} bytes from peer");
// The rate limiter initially checks mac1 and mac2, and optionally asks to send a cookie
let parsed_packet = match tunnel.rate_limiter.verify_packet(
// TODO: Some(addr.ip()) webrtc doesn't expose easily the underlying data channel remote ip
// so for now we don't use it. but we need it for rate limiter although we probably not need it since the data channel
// will only be established to authenticated peers, so the portal could already prevent being ddos'd
// but maybe in that cased we can drop this rate_limiter all together and just use decapsulate
None,
&src_buf[..size],
&mut dst_buf,
) {
Ok(packet) => packet,
Err(TunnResult::WriteToNetwork(cookie)) => {
peer.send_infallible::<CB>(cookie).await;
continue;
}
Err(_) => continue,
};
if !tunnel.is_wireguard_packet_ok(&parsed_packet, &peer) {
continue;
}
let decapsulate_result = peer.tunnel.lock().decapsulate(
// TODO: See comment above
None,
&src_buf[..size],
&mut dst_buf[..],
);
// We found a peer, use it to decapsulate the message+
let mut flush = false;
match decapsulate_result {
TunnResult::Done => {}
TunnResult::Err(_) => continue,
TunnResult::WriteToNetwork(packet) => {
flush = true;
peer.send_infallible::<CB>(packet).await;
}
TunnResult::WriteToTunnelV4(packet, addr) => {
if peer.is_allowed(addr) {
tunnel.write4_device_infallible(packet).await;
}
}
TunnResult::WriteToTunnelV6(packet, addr) => {
if peer.is_allowed(addr) {
tunnel.write6_device_infallible(packet).await;
}
}
};
if flush {
// Flush pending queue
while let TunnResult::WriteToNetwork(packet) = {
let res = peer.tunnel.lock().decapsulate(None, &[], &mut dst_buf[..]);
res
} {
peer.send_infallible::<CB>(packet).await;
}
}
}
});
}
async fn write4_device_infallible(&self, packet: &[u8]) {
if let Err(e) = self.device_channel.write4(packet).await {
CB::on_error(&e.into(), Recoverable);
}
}
async fn write6_device_infallible(&self, packet: &[u8]) {
if let Err(e) = self.device_channel.write6(packet).await {
CB::on_error(&e.into(), Recoverable);
}
}
fn get_resource(&self, buff: &[u8]) -> Option<ResourceDescription> {
// TODO: Check if DNS packet, in that case parse and get dns
let addr = Tunn::dst_address(buff)?;
let resources = self.resources.read();
match addr {
IpAddr::V4(ipv4) => resources.get_by_ip(ipv4).cloned(),
IpAddr::V6(ipv6) => resources.get_by_ip(ipv6).cloned(),
}
}
fn start_iface_handler(self: &Arc<Self>) {
let dev = self.clone();
tokio::spawn(async move {
loop {
let mut src = [0u8; MAX_UDP_SIZE];
let mut dst = [0u8; MAX_UDP_SIZE];
let res = {
// TODO: We should check here if what we read is a whole packet
// there's no docs on tun device on when a whole packet is read, is it \n or another thing?
// found some comments saying that a single read syscall represents a single packet but no docs on that
// See https://stackoverflow.com/questions/18461365/how-to-read-packet-by-packet-from-linux-tun-tap
match dev.device_channel.mtu().await {
Ok(mtu) => match dev.device_channel.read(&mut src[..mtu]).await {
Ok(res) => res,
Err(err) => {
tracing::error!("Couldn't read packet from interface: {err}");
CB::on_error(&err.into(), Recoverable);
continue;
}
},
Err(err) => {
tracing::error!("Couldn't obtain iface mtu: {err}");
CB::on_error(&err, Recoverable);
continue;
}
}
};
let dst_addr = match Tunn::dst_address(&src[..res]) {
Some(addr) => addr,
None => continue,
};
let (encapsulate_result, channel) = {
let peers_by_ip = dev.peers_by_ip.read();
match peers_by_ip.longest_match(dst_addr).map(|p| p.1) {
Some(peer) => (
peer.tunnel.lock().encapsulate(&src[..res], &mut dst[..]),
peer.channel.clone(),
),
None => {
// We can buffer requests here but will drop them for now and let the upper layer reliability protocol handle this
if let Some(resource) = dev.get_resource(&src[..res]) {
// We have awaiting connection to prevent a race condition where
// create_peer_connection hasn't added the thing to peer_connections
// and we are finding another packet to the same address (otherwise we would just use peer_connections here)
let mut awaiting_connection = dev.awaiting_connection.lock();
let id = resource.id();
if !awaiting_connection.contains(&id) {
tracing::trace!("Found new intent to send packets to resource with resource-ip: {dst_addr}, initializing connection...");
awaiting_connection.insert(id);
let dev = Arc::clone(&dev);
tokio::spawn(async move {
if let Err(e) = dev
.control_signaler
.signal_connection_to(&resource)
.await
{
// Not a deadlock because this is a different task
dev.awaiting_connection.lock().remove(&id);
tracing::error!("couldn't start protocol for new connection to resource: {e}");
CB::on_error(&e, Recoverable);
}
});
}
}
continue;
}
}
};
match encapsulate_result {
TunnResult::Done => {
tracing::trace!(
"tunnel for resource corresponding to {dst_addr} was finalized"
);
}
TunnResult::Err(e) => {
tracing::error!(message = "Encapsulate error for resource corresponding to {dst_addr}", error = ?e);
CB::on_error(&e.into(), Recoverable);
}
TunnResult::WriteToNetwork(packet) => {
tracing::trace!("writing iface packet to peer: {dst_addr}");
if let Err(e) = channel.write(&Bytes::copy_from_slice(packet)).await {
tracing::error!("Couldn't write packet to channel: {e}");
CB::on_error(&e.into(), Recoverable);
}
}
_ => panic!("Unexpected result from encapsulate"),
};
}
});
}
fn next_index(&self) -> u32 {
self.next_index.lock().next()
}
}

View File

@@ -0,0 +1,65 @@
use std::{net::IpAddr, sync::Arc};
use bytes::Bytes;
use ip_network::IpNetwork;
use ip_network_table::IpNetworkTable;
use boringtun::noise::{Tunn, TunnResult};
use libs_common::{
error_type::ErrorType,
Callbacks,
};
use parking_lot::Mutex;
use webrtc::data::data_channel::DataChannel;
use super::PeerConfig;
pub(crate) struct Peer {
pub tunnel: Mutex<Tunn>,
pub index: u32,
pub allowed_ips: IpNetworkTable<()>,
pub channel: Arc<DataChannel>,
}
impl Peer {
pub(crate) async fn send_infallible<CB: Callbacks>(&self, data: &[u8]) {
if let Err(e) = self.channel.write(&Bytes::copy_from_slice(data)).await {
tracing::error!("Couldn't send packet to connected peer: {e}");
CB::on_error(&e.into(), ErrorType::Recoverable);
}
}
pub(crate) fn from_config(
tunnel: Tunn,
index: u32,
config: &PeerConfig,
channel: Arc<DataChannel>,
) -> Self {
Self::new(Mutex::new(tunnel), index, config.ips.clone(), channel)
}
pub(crate) fn new(
tunnel: Mutex<Tunn>,
index: u32,
ips: Vec<IpNetwork>,
channel: Arc<DataChannel>,
) -> Peer {
let mut allowed_ips = IpNetworkTable::new();
for ip in ips {
allowed_ips.insert(ip, ());
}
Peer {
tunnel,
index,
allowed_ips,
channel,
}
}
pub(crate) fn update_timers<'a>(&self, dst: &'a mut [u8]) -> TunnResult<'a> {
self.tunnel.lock().update_timers(dst)
}
pub(crate) fn is_allowed(&self, addr: impl Into<IpAddr>) -> bool {
self.allowed_ips.longest_match(addr).is_some()
}
}

View File

@@ -0,0 +1,151 @@
//! A resource table is a custom type that allows us to store a resource under an id and possibly multiple ips or even network ranges
use std::{collections::HashMap, net::IpAddr, ptr::NonNull};
use ip_network_table::IpNetworkTable;
use libs_common::messages::{Id, ResourceDescription};
// Oh boy... here we go
/// The resource table type
///
/// This is specifically crafted for our use case, so the API is particularly made for us and not generic
pub(crate) struct ResourceTable {
id_table: HashMap<Id, ResourceDescription>,
network_table: IpNetworkTable<NonNull<ResourceDescription>>,
dns_name: HashMap<String, NonNull<ResourceDescription>>,
}
// SAFETY: We actually hold a `Vec` internally that the poitners points to
unsafe impl Send for ResourceTable {}
// SAFETY: we don't allow interior mutability of the pointers we hold, in fact we don't allow ANY mutability!
// (this is part of the reason why the API is so limiting, it is easier to reason about.
unsafe impl Sync for ResourceTable {}
impl Default for ResourceTable {
fn default() -> ResourceTable {
ResourceTable::new()
}
}
impl ResourceTable {
/// Creates a new `ResourceTable`
pub fn new() -> ResourceTable {
ResourceTable {
network_table: IpNetworkTable::new(),
id_table: HashMap::new(),
dns_name: HashMap::new(),
}
}
/// Gets the resource by ip
pub fn get_by_ip(&self, ip: impl Into<IpAddr>) -> Option<&ResourceDescription> {
// SAFETY: if we found the pointer, due to our internal consistency rules it is in the id_table
self.network_table
.longest_match(ip)
.map(|m| unsafe { m.1.as_ref() })
}
/// Gets the resource by id
pub fn get_by_id(&self, id: &Id) -> Option<&ResourceDescription> {
self.id_table.get(id)
}
// SAFETY: resource_description must still be in storage since we are going to reference it.
unsafe fn remove_resource(&mut self, resource_description: NonNull<ResourceDescription>) {
let id = {
let res = resource_description.as_ref();
match res {
ResourceDescription::Dns(r) => {
self.dns_name.remove(&r.address);
self.network_table.remove(r.ipv4);
self.network_table.remove(r.ipv6);
r.id
}
ResourceDescription::Cidr(r) => {
self.network_table.remove(r.address);
r.id
}
}
};
self.id_table.remove(&id);
}
fn cleaup_resource(&mut self, resource_description: &ResourceDescription) {
match resource_description {
ResourceDescription::Dns(r) => {
if let Some(res) = self.id_table.get(&r.id) {
// SAFETY: We are consistent that if the item exists on any of the containers it still exists in the storage
unsafe {
self.remove_resource(res.into());
}
// Don't use res after here
}
if let Some(res) = self.dns_name.remove(&r.address) {
// SAFETY: We are consistent that if the item exists on any of the containers it still exists in the storage
unsafe {
self.remove_resource(res);
}
// Don't use res after here
}
if let Some(res) = self.network_table.remove(r.ipv4) {
// SAFETY: We are consistent that if the item exists on any of the containers it still exists in the storage
unsafe {
self.remove_resource(res);
}
}
if let Some(res) = self.network_table.remove(r.ipv6) {
// SAFETY: We are consistent that if the item exists on any of the containers it still exists in the storage
unsafe {
self.remove_resource(res);
}
}
}
ResourceDescription::Cidr(r) => {
if let Some(res) = self.id_table.get(&r.id) {
// SAFETY: We are consistent that if the item exists on any of the containers it still exists in the storage
unsafe {
self.remove_resource(res.into());
}
// Don't use res after here
}
if let Some(res) = self.network_table.remove(r.address) {
// SAFETY: We are consistent that if the item exists on any of the containers it still exists in the storage
unsafe {
self.remove_resource(res);
}
}
}
}
}
// For soundness it's very important that this API only takes a resource_description
// doing this, we can assume that when removing a resource from the id table we have all the info
// about all the o
/// Inserts a new resource_description
///
/// If the id was used previously the old value will be deleted.
/// Same goes if any of the ip matches exactly an old ip or dns name.
/// This means that a match in IP or dns name will discard all old values.
///
/// This is done so that we don't have dangling values.
pub fn insert(&mut self, resource_description: ResourceDescription) {
self.cleaup_resource(&resource_description);
let id = resource_description.id();
self.id_table.insert(id, resource_description);
// we just inserted it we can unwrap
let res = self.id_table.get(&id).unwrap();
match res {
ResourceDescription::Dns(r) => {
self.network_table.insert(r.ipv4, res.into());
self.network_table.insert(r.ipv6, res.into());
self.dns_name.insert(r.address.clone(), res.into());
}
ResourceDescription::Cidr(r) => {
self.network_table.insert(r.address, res.into());
}
}
}
}

View File

@@ -0,0 +1,20 @@
use super::InterfaceConfig;
use libs_common::Result;
#[derive(Debug)]
pub(crate) struct IfaceConfig(pub(crate) Arc<IfaceDevice>);
#[derive(Debug)]
pub(crate) struct IfaceDevice;
impl IfaceConfig {
// It's easier to not make these functions async, setting these should not block the thread for too long
#[tracing::instrument(level = "trace", skip(self))]
pub fn set_iface_config(&mut self, _config: &InterfaceConfig) -> Result<()> {
todo!()
}
pub fn up(&mut self) -> Result<()> {
todo!()
}
}

View File

@@ -0,0 +1,284 @@
use ip_network::IpNetwork;
use libc::{
close, connect, ctl_info, fcntl, getsockopt, ioctl, iovec, msghdr, recvmsg, sendmsg, sockaddr,
sockaddr_ctl, sockaddr_in, socket, socklen_t, AF_INET, AF_INET6, AF_SYSTEM, AF_SYS_CONTROL,
CTLIOCGINFO, F_GETFL, F_SETFL, IF_NAMESIZE, IPPROTO_IP, O_NONBLOCK, PF_SYSTEM, SOCK_DGRAM,
SOCK_STREAM, SYSPROTO_CONTROL, UTUN_OPT_IFNAME,
};
use libs_common::{Error, Result};
use std::{
ffi::{c_int, c_short, c_uchar},
io,
mem::{size_of, size_of_val},
os::fd::{AsRawFd, RawFd},
sync::Arc,
};
use super::InterfaceConfig;
const CTRL_NAME: &[u8] = b"com.apple.net.utun_control";
const SIOCGIFMTU: u64 = 0x0000_0000_c020_6933;
#[derive(Debug)]
pub(crate) struct IfaceConfig(pub(crate) Arc<IfaceDevice>);
#[derive(Debug)]
pub(crate) struct IfaceDevice {
fd: RawFd,
}
impl AsRawFd for IfaceDevice {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl Drop for IfaceDevice {
fn drop(&mut self) {
unsafe { close(self.fd) };
}
}
// For some reason this is not available in libc for darwin :c
#[allow(non_camel_case_types)]
#[repr(C)]
pub struct ifreq {
ifr_name: [c_uchar; IF_NAMESIZE],
ifr_ifru: IfrIfru,
}
#[repr(C)]
union IfrIfru {
ifru_addr: sockaddr,
ifru_addr_v4: sockaddr_in,
ifru_addr_v6: sockaddr_in,
ifru_dstaddr: sockaddr,
ifru_broadaddr: sockaddr,
ifru_flags: c_short,
ifru_metric: c_int,
ifru_mtu: c_int,
ifru_phys: c_int,
ifru_media: c_int,
ifru_intval: c_int,
ifru_wake_flags: u32,
ifru_route_refcnt: u32,
ifru_cap: [c_int; 2],
ifru_functional_type: u32,
}
// On Darwin tunnel can only be named utunXXX
pub fn parse_utun_name(name: &str) -> Result<u32> {
if !name.starts_with("utun") {
return Err(Error::InvalidTunnelName);
}
match name.get(4..) {
None | Some("") => {
// The name is simply "utun"
Ok(0)
}
Some(idx) => {
// Everything past utun should represent an integer index
idx.parse::<u32>()
.map_err(|_| Error::InvalidTunnelName)
.map(|x| x + 1)
}
}
}
impl IfaceDevice {
fn write(&self, src: &[u8], af: u8) -> usize {
let mut hdr = [0, 0, 0, af];
let mut iov = [
iovec {
iov_base: hdr.as_mut_ptr() as _,
iov_len: hdr.len(),
},
iovec {
iov_base: src.as_ptr() as _,
iov_len: src.len(),
},
];
let msg_hdr = msghdr {
msg_name: std::ptr::null_mut(),
msg_namelen: 0,
msg_iov: &mut iov[0],
msg_iovlen: iov.len() as _,
msg_control: std::ptr::null_mut(),
msg_controllen: 0,
msg_flags: 0,
};
match unsafe { sendmsg(self.fd, &msg_hdr, 0) } {
-1 => 0,
n => n as usize,
}
}
pub async fn new(name: &str) -> Result<Self> {
let idx = parse_utun_name(name)?;
let fd = match unsafe { socket(PF_SYSTEM, SOCK_DGRAM, SYSPROTO_CONTROL) } {
-1 => return Err(get_last_error()),
fd => fd,
};
let mut info = ctl_info {
ctl_id: 0,
ctl_name: [0; 96],
};
info.ctl_name[..CTRL_NAME.len()]
// SAFETY: We only care about maintaining the same byte value not the same value,
// meaning that the slice &[u8] here is just a blob of bytes for us, we need this conversion
// just because `c_char` is i8 (for some reason).
// One thing I don't like about this is that `ctl_name` is actually a nul-terminated string,
// which we are only getting because `CTRL_NAME` is less than 96 bytes long and we are 0-value
// initializing the array we should be using a CStr to be explicit... but this is slightly easier.
.copy_from_slice(unsafe { &*(CTRL_NAME as *const [u8] as *const [i8]) });
if unsafe { ioctl(fd, CTLIOCGINFO, &mut info as *mut ctl_info) } < 0 {
unsafe { close(fd) };
return Err(get_last_error());
}
let addr = sockaddr_ctl {
sc_len: size_of::<sockaddr_ctl>() as u8,
sc_family: AF_SYSTEM as u8,
ss_sysaddr: AF_SYS_CONTROL as u16,
sc_id: info.ctl_id,
sc_unit: idx,
sc_reserved: Default::default(),
};
if unsafe {
connect(
fd,
&addr as *const sockaddr_ctl as _,
size_of_val(&addr) as _,
)
} < 0
{
unsafe { close(fd) };
return Err(get_last_error());
}
Ok(Self { fd })
}
pub fn set_non_blocking(self) -> Result<Self> {
match unsafe { fcntl(self.fd, F_GETFL) } {
-1 => Err(get_last_error()),
flags => match unsafe { fcntl(self.fd, F_SETFL, flags | O_NONBLOCK) } {
-1 => Err(get_last_error()),
_ => Ok(self),
},
}
}
pub fn name(&self) -> Result<String> {
let mut tunnel_name = [0u8; 256];
let mut tunnel_name_len = tunnel_name.len() as socklen_t;
if unsafe {
getsockopt(
self.fd,
SYSPROTO_CONTROL,
UTUN_OPT_IFNAME,
tunnel_name.as_mut_ptr() as _,
&mut tunnel_name_len,
)
} < 0
|| tunnel_name_len == 0
{
return Err(get_last_error());
}
Ok(String::from_utf8_lossy(&tunnel_name[..(tunnel_name_len - 1) as usize]).to_string())
}
/// Get the current MTU value
pub async fn mtu(&self) -> Result<usize> {
let fd = match unsafe { socket(AF_INET, SOCK_STREAM, IPPROTO_IP) } {
-1 => return Err(get_last_error()),
fd => fd,
};
let name = self.name()?;
let iface_name: &[u8] = name.as_ref();
let mut ifr = ifreq {
ifr_name: [0; IF_NAMESIZE],
ifr_ifru: IfrIfru { ifru_mtu: 0 },
};
ifr.ifr_name[..iface_name.len()].copy_from_slice(iface_name);
if unsafe { ioctl(fd, SIOCGIFMTU, &ifr) } < 0 {
return Err(get_last_error());
}
unsafe { close(fd) };
Ok(unsafe { ifr.ifr_ifru.ifru_mtu } as _)
}
pub fn write4(&self, src: &[u8]) -> usize {
self.write(src, AF_INET as u8)
}
pub fn write6(&self, src: &[u8]) -> usize {
self.write(src, AF_INET6 as u8)
}
pub fn read<'a>(&self, dst: &'a mut [u8]) -> Result<&'a mut [u8]> {
let mut hdr = [0u8; 4];
let mut iov = [
iovec {
iov_base: hdr.as_mut_ptr() as _,
iov_len: hdr.len(),
},
iovec {
iov_base: dst.as_mut_ptr() as _,
iov_len: dst.len(),
},
];
let mut msg_hdr = msghdr {
msg_name: std::ptr::null_mut(),
msg_namelen: 0,
msg_iov: &mut iov[0],
msg_iovlen: iov.len() as _,
msg_control: std::ptr::null_mut(),
msg_controllen: 0,
msg_flags: 0,
};
match unsafe { recvmsg(self.fd, &mut msg_hdr, 0) } {
-1 => Err(Error::IfaceRead(io::Error::last_os_error())),
0..=4 => Ok(&mut dst[..0]),
n => Ok(&mut dst[..(n - 4) as usize]),
}
}
}
// So, these functions take a mutable &self, this is not necessary in theory but it's correct!
impl IfaceConfig {
#[tracing::instrument(level = "trace", skip(self))]
pub async fn set_iface_config(&mut self, config: &InterfaceConfig) -> Result<()> {
// TODO
Ok(())
}
pub async fn up(&mut self) -> Result<()> {
// TODO
Ok(())
}
pub async fn add_route(&mut self, route: IpNetwork) -> Result<()> {
todo!()
}
}
fn get_last_error() -> Error {
Error::Io(io::Error::last_os_error())
}

View File

@@ -0,0 +1,272 @@
use futures::TryStreamExt;
use ip_network::IpNetwork;
use libc::{
close, fcntl, ioctl, open, read, sockaddr, sockaddr_in, write, F_GETFL, F_SETFL,
IFF_MULTI_QUEUE, IFF_NO_PI, IFF_TUN, IFNAMSIZ, O_NONBLOCK, O_RDWR,
};
use libs_common::{Error, Result};
use netlink_packet_route::rtnl::link::nlas::Nla;
use rtnetlink::{new_connection, Handle};
use std::{
ffi::{c_int, c_short, c_uchar},
io,
os::fd::{AsRawFd, RawFd},
sync::Arc,
};
use super::InterfaceConfig;
#[derive(Debug)]
pub(crate) struct IfaceConfig(pub(crate) Arc<IfaceDevice>);
const TUNSETIFF: u64 = 0x4004_54ca;
const TUN_FILE: &[u8] = b"/dev/net/tun\0";
const RT_SCOPE_LINK: u8 = 253;
const RT_PROT_UNSPEC: u8 = 0;
#[repr(C)]
union IfrIfru {
ifru_addr: sockaddr,
ifru_addr_v4: sockaddr_in,
ifru_addr_v6: sockaddr_in,
ifru_dstaddr: sockaddr,
ifru_broadaddr: sockaddr,
ifru_flags: c_short,
ifru_metric: c_int,
ifru_mtu: c_int,
ifru_phys: c_int,
ifru_media: c_int,
ifru_intval: c_int,
ifru_wake_flags: u32,
ifru_route_refcnt: u32,
ifru_cap: [c_int; 2],
ifru_functional_type: u32,
}
#[repr(C)]
pub struct ifreq {
ifr_name: [c_uchar; IFNAMSIZ],
ifr_ifru: IfrIfru,
}
#[derive(Debug)]
pub struct IfaceDevice {
fd: RawFd,
handle: Handle,
connection: tokio::task::JoinHandle<()>,
interface_index: u32,
}
impl Drop for IfaceDevice {
fn drop(&mut self) {
self.connection.abort();
unsafe { close(self.fd) };
}
}
impl AsRawFd for IfaceDevice {
fn as_raw_fd(&self) -> RawFd {
self.fd
}
}
impl IfaceDevice {
fn write(&self, buf: &[u8]) -> usize {
match unsafe { write(self.fd, buf.as_ptr() as _, buf.len() as _) } {
-1 => 0,
n => n as usize,
}
}
pub async fn new(name: &str) -> Result<IfaceDevice> {
let fd = match unsafe { open(TUN_FILE.as_ptr() as _, O_RDWR) } {
-1 => return Err(get_last_error()),
fd => fd,
};
let iface_name = name.as_bytes();
let mut ifr = ifreq {
ifr_name: [0; IFNAMSIZ],
ifr_ifru: IfrIfru {
ifru_flags: (IFF_TUN | IFF_NO_PI | IFF_MULTI_QUEUE) as _,
},
};
if iface_name.len() >= ifr.ifr_name.len() {
return Err(Error::InvalidTunnelName);
}
ifr.ifr_name[..iface_name.len()].copy_from_slice(iface_name);
if unsafe { ioctl(fd, TUNSETIFF as _, &ifr) } < 0 {
return Err(get_last_error());
}
let name = name.to_string();
let (connection, handle, _) = new_connection()?;
let join_handle = tokio::spawn(connection);
let interface_index = handle
.link()
.get()
.match_name(name.clone())
.execute()
.try_next()
.await?
.ok_or(Error::NoIface)?
.header
.index;
Ok(Self {
fd,
handle,
connection: join_handle,
interface_index,
})
}
pub fn set_non_blocking(self) -> Result<Self> {
match unsafe { fcntl(self.fd, F_GETFL) } {
-1 => Err(get_last_error()),
flags => match unsafe { fcntl(self.fd, F_SETFL, flags | O_NONBLOCK) } {
-1 => Err(get_last_error()),
_ => Ok(self),
},
}
}
/// Get the current MTU value
pub async fn mtu(&self) -> Result<usize> {
while let Ok(Some(msg)) = self
.handle
.link()
.get()
.match_index(self.interface_index)
.execute()
.try_next()
.await
{
for nla in msg.nlas {
if let Nla::Mtu(mtu) = nla {
return Ok(mtu as usize);
}
}
}
Err(Error::NoMtu)
}
pub fn write4(&self, src: &[u8]) -> usize {
self.write(src)
}
pub fn write6(&self, src: &[u8]) -> usize {
self.write(src)
}
pub fn read<'a>(&self, dst: &'a mut [u8]) -> Result<&'a mut [u8]> {
match unsafe { read(self.fd, dst.as_mut_ptr() as _, dst.len()) } {
-1 => Err(Error::IfaceRead(io::Error::last_os_error())),
n => Ok(&mut dst[..n as usize]),
}
}
}
fn get_last_error() -> Error {
Error::Io(io::Error::last_os_error())
}
impl IfaceConfig {
pub async fn add_route(&mut self, route: IpNetwork) -> Result<()> {
let req = self
.0
.handle
.route()
.add()
.output_interface(self.0.interface_index)
.protocol(RT_PROT_UNSPEC)
.scope(RT_SCOPE_LINK);
match route {
IpNetwork::V4(ipnet) => {
req.v4()
.source_prefix(ipnet.network_address(), ipnet.netmask())
.destination_prefix(ipnet.network_address(), ipnet.netmask())
.execute()
.await?
}
IpNetwork::V6(ipnet) => {
req.v6()
.source_prefix(ipnet.network_address(), ipnet.netmask())
.destination_prefix(ipnet.network_address(), ipnet.netmask())
.execute()
.await?
}
}
/*
TODO: This works for ignoring the error but the route isn't added afterwards
let's try removing all routes on init for the given interface I think that will work.
match res {
Ok(_)
| Err(rtnetlink::Error::NetlinkError(netlink_packet_core::error::ErrorMessage {
code: NETLINK_ERROR_FILE_EXISTS,
..
})) => Ok(()),
Err(err) => Err(err.into()),
}
*/
Ok(())
}
#[tracing::instrument(level = "trace", skip(self))]
pub async fn set_iface_config(&mut self, config: &InterfaceConfig) -> Result<()> {
let ips = self
.0
.handle
.address()
.get()
.set_link_index_filter(self.0.interface_index)
.execute();
ips.try_for_each(|ip| self.0.handle.address().del(ip).execute())
.await?;
self.0
.handle
.address()
.add(self.0.interface_index, config.ipv4.into(), 32)
.execute()
.await?;
// TODO: Disable this when ipv6 is disabled
self.0
.handle
.address()
.add(self.0.interface_index, config.ipv6.into(), 128)
.execute()
.await?;
//TODO!
/*
let name: String = self.name.clone().try_into()?;
for dns in &config.dns {
//resolvconf::set_dns(&name, dns).await?;
}
*/
//nftables::enable_masquerade((config.ipv4_masquerade, config.ipv6_masquerade)).await?;
Ok(())
}
pub async fn up(&mut self) -> Result<()> {
self.0
.handle
.link()
.set(self.0.interface_index)
.up()
.execute()
.await?;
Ok(())
}
}

View File

@@ -0,0 +1,22 @@
use super::InterfaceConfig;
use ip_network::IpNetwork;
use libs_common::Result;
#[derive(Debug)]
pub(crate) struct IfaceConfig;
impl IfaceConfig {
// It's easier to not make these functions async, setting these should not block the thread for too long
#[tracing::instrument(level = "trace", skip(self))]
pub async fn set_iface_config(&mut self, _config: &InterfaceConfig) -> Result<()> {
todo!()
}
pub async fn up(&mut self) -> Result<()> {
todo!()
}
pub async fn add_route(&mut self, route: IpNetwork) -> Result<()> {
todo!()
}
}

View File

@@ -0,0 +1,12 @@
[package]
name = "macros"
version = "0.1.0"
edition = "2021"
[lib]
proc-macro = true
[dependencies]
syn = { version = "2.0" }
proc-macro2 = { version = "1.0" }
quote = { version = "1.0" }

View File

@@ -0,0 +1,108 @@
#![recursion_limit = "128"]
extern crate proc_macro;
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{Data, DeriveInput, Fields};
/// Macro that generates a new enum with only the discriminants of another enum within a module that implements swift_bridge.
///
/// This is a workaround to create an error type compatible with swift that can be converted from the original error type.
/// it implements `From<OriginalEnum>` so the idea is that you can call a swift ffi function `handle_error(err.into());`
///
/// This makes a lot of assumption about the types it's being implemented on since we're controlling the type it is not meant
/// to be a public macro. (However be careful if you reuse it somewhere else! this is based in strum's EnumDiscrminant so you can
/// check there for an actual proper implementation).
///
/// IMPORTANT!: You need to include swift_bridge::bridge for macos and ios target so this doesn't error out.
#[proc_macro_derive(SwiftEnum)]
pub fn swift_enum(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
let ast = syn::parse_macro_input!(input as DeriveInput);
let toks = swift_enum_inner(&ast).unwrap_or_else(|err| err.to_compile_error());
toks.into()
}
fn swift_enum_inner(ast: &DeriveInput) -> syn::Result<TokenStream> {
let name = &ast.ident;
let vis = &ast.vis;
let variants = match &ast.data {
Data::Enum(v) => &v.variants,
_ => {
return Err(syn::Error::new(
Span::call_site(),
"This macro only support enums.",
))
}
};
let discriminants: Vec<_> = variants
.into_iter()
.map(|v| {
let ident = &v.ident;
quote! {#ident}
})
.collect();
let enum_name = syn::Ident::new(&format!("Swift{}", name), Span::call_site());
let mod_name = syn::Ident::new("swift_ffi", Span::call_site());
let arms = variants
.iter()
.map(|variant| {
let ident = &variant.ident;
let params = match &variant.fields {
Fields::Unit => quote! {},
Fields::Unnamed(_fields) => {
quote! { (..) }
}
Fields::Named(_fields) => {
quote! { { .. } }
}
};
quote! { #name::#ident #params => #mod_name::#enum_name::#ident }
})
.collect::<Vec<_>>();
let from_fn_body = quote! { match val { #(#arms),* } };
let impl_from_ref = {
quote! {
impl<'a> ::core::convert::From<&'a #name> for #mod_name::#enum_name {
fn from(val: &'a #name) -> Self {
#from_fn_body
}
}
}
};
let impl_from = {
quote! {
impl ::core::convert::From<#name> for #mod_name::#enum_name {
fn from(val: #name) -> Self {
#from_fn_body
}
}
}
};
// If we wanted to expose this function we should have another crate that actually also includes
// swift_bridge. but since we are only using this inside our crates we can just make sure we include it.
Ok(quote! {
#[cfg_attr(any(target_os = "macos", target_os = "ios"), swift_bridge::bridge)]
#vis mod #mod_name {
pub enum #enum_name {
#(#discriminants),*
}
}
#[cfg(any(target_os = "macos", target_os = "ios"))]
#impl_from_ref
#[cfg(any(target_os = "macos", target_os = "ios"))]
#impl_from
})
}

View File

@@ -1,23 +0,0 @@
# syntax=docker/dockerfile:1.5-labs
FROM rust:1.70-slim as builder
WORKDIR /workspace
ADD . .
RUN --mount=type=cache,target=./target \
--mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/rustup \
rustup target add x86_64-unknown-linux-musl && \
cargo build --release --bin relay --target x86_64-unknown-linux-musl
RUN --mount=type=cache,target=./target \
mv ./target/x86_64-unknown-linux-musl/release/relay /usr/local/bin/relay
FROM scratch
COPY --from=builder /usr/local/bin/relay /usr/local/bin/relay
ENV RUST_BACKTRACE=1
EXPOSE 3478/udp
EXPOSE 49152-65535/udp
# This purposely does not include an `init` process. Use `docker run --init` for proper signal handling.
ENTRYPOINT ["relay"]

View File

@@ -29,7 +29,7 @@ url = "2.4.0"
serde = { version = "1.0.163", features = ["derive"] }
[dev-dependencies]
webrtc = "0.7.2"
webrtc = { version = "0.8" }
redis = { version = "0.23.0", default-features = false, features = ["tokio-comp"] }
difference = "2.0.0"

View File

@@ -34,6 +34,9 @@ struct Args {
/// If omitted, the relay server will start immediately, otherwise we first log on and wait for the `init` message.
#[arg(long, env)]
portal_ws_url: Option<Url>,
/// Token generated by the portal to authorize websocket connection
#[arg(long, env)]
portal_token: Option<String>,
/// Whether to allow connecting to the portal over an insecure connection.
#[arg(long)]
allow_insecure_ws: bool,
@@ -44,6 +47,28 @@ struct Args {
rng_seed: Option<u64>,
}
// TODO: Code repetition from common
fn get_websocket_path(mut url: Url, token: String, ipv4: Ipv4Addr) -> Result<Url> {
{
let mut paths = url
.path_segments_mut()
.map_err(|_| anyhow!("invalid url"))
.context("No url base found while trying to format the portal's URL")?;
paths.pop_if_empty();
paths.push("relay");
paths.push("websocket");
}
{
let mut query_pairs = url.query_pairs_mut();
query_pairs.clear();
query_pairs.append_pair("token", &token);
query_pairs.append_pair("ipv4", &ipv4.to_string());
}
Ok(url)
}
#[tokio::main]
async fn main() -> Result<()> {
let args = Args::parse();
@@ -70,7 +95,13 @@ async fn main() -> Result<()> {
.append_pair("ipv4", &args.listen_ip4_addr.to_string());
let mut channel = PhoenixChannel::<InboundPortalMessage, ()>::connect(
portal_url.clone(),
get_websocket_path(
portal_url.clone(),
args.portal_token.ok_or(anyhow!(
"PORTAL_TOKEN must be set if you're setting a PORTAL_WS_URL"
))?,
args.listen_ip4_addr,
)?,
format!("relay/{}", env!("CARGO_PKG_VERSION")),
)
.await

View File

@@ -1,4 +1,16 @@
[toolchain]
channel = "1.70.0"
components = ["rustfmt", "clippy"]
targets = ["x86_64-unknown-linux-musl"]
targets = [
"x86_64-unknown-linux-musl",
"x86_64-linux-android",
"arm-linux-androideabi",
"aarch64-linux-android",
"armv7-linux-androideabi",
"i686-linux-android",
"aarch64-apple-ios-sim",
"aarch64-apple-ios",
"aarch64-apple-darwin",
"x86_64-apple-ios",
"x86_64-apple-darwin",
]