mirror of
https://github.com/optim-enterprises-bv/openlan-cgw.git
synced 2025-10-29 17:32:21 +00:00
Merge pull request #112 from Telecominfraproject/next
1.2 Rel next->main PR
This commit is contained in:
1010
Cargo.lock
generated
1010
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
57
Cargo.toml
57
Cargo.toml
@@ -5,38 +5,47 @@ edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
serde = { version = "1.0.144", features = ["derive"] }
|
||||
serde_json = "1.0.85"
|
||||
env_logger = "0.11.3"
|
||||
log = "0.4.20"
|
||||
serde_json = { version = "1.0.85" }
|
||||
env_logger = { version = "0.11.3" }
|
||||
log = { version = "0.4.20" }
|
||||
tokio = { version = "1.34.0", features = ["full"] }
|
||||
tokio-stream = { version = "0.1.15", features = ["full"] }
|
||||
tokio-tungstenite = { version = "0.23.0" }
|
||||
tokio-rustls = "0.26.0"
|
||||
tokio-postgres = { version = "0.7.10", features = ["with-eui48-1"]}
|
||||
tokio-pg-mapper = "0.2.0"
|
||||
tungstenite = { version = "0.23.0"}
|
||||
tokio-rustls = { version = "0.26.0" }
|
||||
tokio-postgres = { version = "0.7.10", features = ["with-eui48-1"] }
|
||||
tokio-postgres-rustls = { version = "0.12.0" }
|
||||
tokio-pg-mapper = { version = "0.2.0" }
|
||||
tungstenite = { version = "0.23.0" }
|
||||
futures-util = { version = "0.3.0", default-features = false }
|
||||
futures-channel = "0.3.0"
|
||||
futures-channel = { version = "0.3.0" }
|
||||
futures-executor = { version = "0.3.0", optional = true }
|
||||
futures = "0.3.0"
|
||||
rlimit = "0.10.1"
|
||||
tonic = "0.11.0"
|
||||
prost = "0.12"
|
||||
rdkafka = "0.36.2"
|
||||
eui48 = { version = "1.1.0", features = ["serde"]}
|
||||
futures = { version = "0.3.0" }
|
||||
rlimit = { version = "0.10.1" }
|
||||
tonic = { version = "0.11.0" }
|
||||
prost = { version = "0.12" }
|
||||
rdkafka = { version = "0.36.2" }
|
||||
eui48 = { version = "1.1.0", features = ["serde"] }
|
||||
uuid = { version = "1.6.1", features = ["serde"] }
|
||||
redis-async = "0.17.2"
|
||||
warp = "0.3.7"
|
||||
redis = { version = "0.25.3", features = [
|
||||
"tokio-rustls-comp",
|
||||
"tls-rustls-insecure",
|
||||
] }
|
||||
warp = { version = "0.3.7" }
|
||||
prometheus = { version = "0.13.4", features = ["process"] }
|
||||
lazy_static = "1.4.0"
|
||||
lazy_static = { version = "1.4.0" }
|
||||
petgraph = { version = "0.6.4", features = ["stable_graph"] }
|
||||
flate2 = "1.0.28"
|
||||
base64 = "0.22.0"
|
||||
rustls-pemfile = "2.1.2"
|
||||
rustls-pki-types = "1.7.0"
|
||||
x509-parser = "0.16.0"
|
||||
chrono = "0.4.38"
|
||||
derive_more = "0.99.17"
|
||||
flate2 = { version = "1.0.28" }
|
||||
base64 = { version = "0.22.0" }
|
||||
rustls-pemfile = { version = "2.1.2" }
|
||||
rustls-pki-types = { version = "1.7.0" }
|
||||
x509-parser = { version = "0.16.0" }
|
||||
chrono = { version = "0.4.38" }
|
||||
derive_more = { version = "0.99.17" }
|
||||
reqwest = { version = "0.12.5", features = ["json"] }
|
||||
jsonschema = { version = "0.18.0" }
|
||||
url = { version = "2.5.2" }
|
||||
nix = { version = "0.29.0", features = ["net"] }
|
||||
murmur2 = { version = "0.1.0" }
|
||||
|
||||
[build-dependencies]
|
||||
tonic-build = "0.11.0"
|
||||
|
||||
@@ -20,7 +20,7 @@ RUN mkdir -p /usr/src/openlan-cgw
|
||||
# Set the working directory
|
||||
WORKDIR /usr/src/openlan-cgw
|
||||
COPY src src
|
||||
COPY build.rs Cargo.toml Cargo.lock .
|
||||
COPY build.rs Cargo.toml Cargo.lock ./
|
||||
|
||||
#RUN cargo build --target x86_64-unknown-linux-gnu --release && \
|
||||
RUN cargo build --target x86_64-unknown-linux-gnu && \
|
||||
|
||||
24
Makefile
24
Makefile
@@ -16,9 +16,9 @@ CGW_BUILD_ENV_IMG_TAG := $(shell cat Dockerfile | sha1sum | awk '{print substr($
|
||||
|
||||
CGW_BUILD_ENV_IMG_CONTAINER_NAME := "cgw_build_env"
|
||||
|
||||
.PHONY: all cgw-app cgw-build-env-img cgw-img stop clean run
|
||||
.PHONY: all cgw-app cgw-build-env-img cgw-img stop clean run run_docker_services start-multi-cgw stop-multi-cgw run-tests
|
||||
|
||||
all: cgw-build-env-img cgw-img
|
||||
all: start-multi-cgw
|
||||
@echo "uCentral CGW build app (container) done"
|
||||
|
||||
# Executed inside build-env
|
||||
@@ -48,7 +48,7 @@ cgw-img: stop cgw-build-env-img
|
||||
.
|
||||
@echo Docker build done;
|
||||
|
||||
stop:
|
||||
stop: stop-multi-cgw
|
||||
@echo "Stopping / removing container ${CGW_IMG_CONTAINER_NAME}"
|
||||
@docker stop ${CGW_IMG_CONTAINER_NAME} > /dev/null 2>&1 || true;
|
||||
@docker container rm ${CGW_IMG_CONTAINER_NAME} > /dev/null 2>&1 || true;
|
||||
@@ -62,5 +62,21 @@ clean: stop
|
||||
@docker rmi ${CGW_BUILD_ENV_IMG_ID}:${CGW_BUILD_ENV_IMG_TAG} >/dev/null 2>&1 || true
|
||||
@echo Done!
|
||||
|
||||
run: stop cgw-img
|
||||
run: stop cgw-img run_docker_services
|
||||
@./run_cgw.sh "${CGW_IMG_ID}:${CGW_IMG_TAG}" ${CGW_IMG_CONTAINER_NAME}
|
||||
|
||||
start-multi-cgw: cgw-img
|
||||
@pushd ./utils/docker
|
||||
@python3 StartMultiCGW.py --start
|
||||
@popd
|
||||
|
||||
stop-multi-cgw:
|
||||
@pushd ./utils/docker
|
||||
@python3 StartMultiCGW.py --stop
|
||||
@popd
|
||||
|
||||
run_docker_services:
|
||||
@cd ./utils/docker/ && docker compose up -d
|
||||
|
||||
run-tests:
|
||||
@cd ./tests && ./run.sh
|
||||
|
||||
155
README.md
155
README.md
@@ -4,7 +4,19 @@ CGW, like OWGW, manages device (Access Points and OpenLan switches) that impleme
|
||||
The main reasoning behind a new implementation of the GW is the horizontal scalability.
|
||||
# Dependencies (runtime)
|
||||
CGW requires a set of tools and services to operate and function. Some of them are embedded into the application itself and require no external utilities,
|
||||
while others are required to be running for the CGW to operate.
|
||||
while others are required to be running for the CGW to operate.
|
||||
|
||||
**NOTE**: while runtime CGW depends on services like kafka, redis and PGSQL, the *make* / *make all* targets
|
||||
would build a complete out-of-the-box setup with default configs and container params:
|
||||
- Kafka, Redis, PGSQL containers would be created and attached to default - automatically created - *docker_cgw_multi_instances_network* network;
|
||||
All three (and one additional - *init-broker-container* - needed for kafka topics initialization) are all part of single docker compose file.
|
||||
- CGW, while also part of the same docker compose file, yet is being partially generated.
|
||||
The reason, is that multiple CGW instances can be created within single compose-file,
|
||||
and thus container details are being generated.
|
||||
|
||||
More information about the compose generation can be found in the
|
||||
'Automated multi-CGW instances start/stop with Docker Compose' topic.
|
||||
|
||||
## gRPC
|
||||
CGW utilizes gRPC to communicate with other CGW instances (referred to as Shards). This functionality does not depend on some external thirdparty services.
|
||||
## Kafka
|
||||
@@ -35,6 +47,7 @@ FOREIGN KEY(infra_group_id) REFERENCES infrastructure_groups(id) ON DELETE CASCA
|
||||
## Redis
|
||||
fast in-memory DB that CGW uses to store all needed runtime information (InfraGroup assigned CGW id, remote CGW info - IP, gRPC port etc)
|
||||
# Building
|
||||
*NOTE:* The following target builds CGW and also starts up required services with default config and params
|
||||
```console
|
||||
$ make all
|
||||
```
|
||||
@@ -44,9 +57,11 @@ Two new docker images will be generated on host system:
|
||||
# Running
|
||||
The following script can be used to launch the CGW app
|
||||
```console
|
||||
$ make run
|
||||
$ make
|
||||
```
|
||||
Command creates and executed (starts) docker container name 'openlan_cgw'
|
||||
Command creates and executed (starts) docker container group consisting of cgw services
|
||||
as well as thirdpart depending services (redis, kafka, pgsql)
|
||||
|
||||
To stop the container from running (remove it) use the following cmd:
|
||||
```console
|
||||
$ make stop
|
||||
@@ -55,59 +70,89 @@ Running application with default arguments might not be desired behavior.
|
||||
And thus the run script utilizes the following list of *enviroment* variables that you can define before running it to alternate behavior of the app.
|
||||
The following list is a list of enviroment variables you can define to configure cgw-app behavior in certain way:
|
||||
```
|
||||
CGW_ID - Shard ID
|
||||
CGW_GRPC_LISTENING_IP - IP to bind gRPC server to (listens for gRPC requests from remote CGWs)
|
||||
CGW_GRPC_LISTENING_PORT - Port to bind gRPC server to (listens for gRPC requests from remote CGWs)
|
||||
CGW_GRPC_PUBLIC_HOST - IP or hostname for Redis record (remote CGWs will connect to this particular shard through provided host record;
|
||||
it's up to deployment config whether remote CGW#1 will be able to access this CGW#0, for example, through provided hostname/IP)
|
||||
CGW_GRPC_PUBLIC_PORT - PORT for Redis record
|
||||
CGW_WSS_IP - IP to bind websocket server to (listens for incoming WSS connections from underlying devices - infrastructures)
|
||||
CGW_WSS_PORT - PORT to bind WSS server to
|
||||
CGW_WSS_CAS - Web socket CAS certificate file name
|
||||
CGW_WSS_CERT - Web socket server certificate file name
|
||||
CGW_WSS_KEY - Web socket server private key file name
|
||||
CGW_KAFKA_HOST - IP or hostname of remote KAFKA server to connect to (NB API)
|
||||
CGW_KAFKA_PORT - PORT of remote KAFKA server to connect to
|
||||
CGW_DB_HOST - IP or hostname of remote database server to connect to
|
||||
CGW_DB_PORT - PORT of remote database server to connect to
|
||||
CGW_DB_USER - PSQL DB username (credentials) to use upon connect to DB
|
||||
CGW_DB_PASS - PSQL DB password (credentials) to use upon connect to DB
|
||||
CGW_REDIS_HOST - IP or hostname of remote redis-db server to connect to
|
||||
CGW_REDIS_PORT - PORT of remote redis-db server to connect to
|
||||
CGW_LOG_LEVEL - Log level to start CGW application with (debug, info)
|
||||
CGW_METRICS_PORT - PORT of metrics to connect to
|
||||
CGW_CERTS_PATH - Path to certificates located on host machine
|
||||
CGW_ALLOW_CERT_MISMATCH - Allow client certificate CN and device MAC address mismatch (used for OWLS)
|
||||
CGW_ID - Shard ID
|
||||
CGW_GROUPS_CAPACITY - The CGW instance groups capacity
|
||||
CGW_GROUPS_THRESHOLD - The CGW instance groups threshold
|
||||
CGW_GROUP_INFRAS_CAPACITY - The devices capacity for group
|
||||
CGW_GRPC_LISTENING_IP - IP to bind gRPC server to (listens for gRPC requests from remote CGWs)
|
||||
CGW_GRPC_LISTENING_PORT - Port to bind gRPC server to (listens for gRPC requests from remote CGWs)
|
||||
CGW_GRPC_PUBLIC_HOST - IP or hostname for Redis record (remote CGWs will connect to this particular shard through provided host record;
|
||||
it's up to deployment config whether remote CGW#1 will be able to access this CGW#0, for example, through provided hostname/IP)
|
||||
CGW_GRPC_PUBLIC_PORT - PORT for Redis record
|
||||
CGW_WSS_IP - IP to bind websocket server to (listens for incoming WSS connections from underlying devices - infrastructures)
|
||||
CGW_WSS_PORT - PORT to bind WSS server to
|
||||
CGW_WSS_CAS - Web socket CAS certificate file name
|
||||
CGW_WSS_CERT - Web socket server certificate file name
|
||||
CGW_WSS_KEY - Web socket server private key file name
|
||||
CGW_KAFKA_HOST - IP or hostname of remote KAFKA server to connect to (NB API)
|
||||
CGW_KAFKA_PORT - PORT of remote KAFKA server to connect to
|
||||
CGW_DB_HOST - IP or hostname of remote database server to connect to
|
||||
CGW_DB_PORT - PORT of remote database server to connect to
|
||||
CGW_DB_USER - PSQL DB username (credentials) to use upon connect to DB
|
||||
CGW_DB_PASS - PSQL DB password (credentials) to use upon connect to DB
|
||||
CGW_DB_TLS - Utilize TLS connection with DB server
|
||||
CGW_REDIS_HOST - IP or hostname of remote redis-db server to connect to
|
||||
CGW_REDIS_PORT - PORT of remote redis-db server to connect to
|
||||
CGW_REDIS_USERNAME - REDIS username (credentials) to use upon connect to
|
||||
CGW_REDIS_PASSWORD - REDIS password (credentials) to use upon connect to
|
||||
CGW_REDIS_TLS - Utilize TLS connection with REDIS server
|
||||
CGW_LOG_LEVEL - Log level to start CGW application with (debug, info)
|
||||
CGW_METRICS_PORT - PORT of metrics to connect to
|
||||
CGW_CERTS_PATH - Path to certificates located on host machine
|
||||
CGW_ALLOW_CERT_MISMATCH - Allow client certificate CN and device MAC address mismatch (used for OWLS)
|
||||
CGW_NB_INFRA_CERTS_DIR - Path to NB infrastructure (Redis, PostgreSQL) certificates located on host machine
|
||||
CGW_NB_INFRA_TLS - Utilize TLS connection with NB infrastructure (Redis, PostgreSQL)
|
||||
If set enabled - the CGW_DB_TLS and CGW_REDIS_TLS values will be ignored and
|
||||
the TLS connection will be used for Redis and PostgreSQL connection
|
||||
CGW_UCENTRAL_AP_DATAMODEL_URI - Path to AP Config message JSON Validation schema:
|
||||
1. URI in format: "http[s]://<path>", e.g https://somewhere.com/schema.json
|
||||
2. Path to local file: "<path>", e.g /etc/host/schema.json
|
||||
CGW_UCENTRAL_SWITCH_DATAMODEL_URI - Path to Switch Config message JSON Validation schema
|
||||
```
|
||||
|
||||
Example of properly configured list of env variables to start CGW:
|
||||
```console
|
||||
$ export | grep CGW
|
||||
declare -x CGW_DB_HOST="localhost" # PSQL server is located at the local host
|
||||
declare -x CGW_DB_HOST="localhost"
|
||||
declare -x CGW_DB_PORT="5432"
|
||||
declare -x CGW_DB_USERNAME="cgw" # PSQL login credentials (username) default 'cgw' will be used
|
||||
declare -x CGW_DB_PASS="123" # PSQL login credentials (password) default '123' will be used
|
||||
declare -x CGW_GRPC_LISTENING_IP="127.0.0.1" # Local default subnet is 127.0.0.1/24
|
||||
declare -x CGW_DB_USERNAME="cgw"
|
||||
declare -x CGW_DB_PASS="123"
|
||||
declare -x CGW_DB_TLS="no"
|
||||
declare -x CGW_GRPC_LISTENING_IP="127.0.0.1"
|
||||
declare -x CGW_GRPC_LISTENING_PORT="50051"
|
||||
declare -x CGW_GRPC_PUBLIC_HOST="localhost"
|
||||
declare -x CGW_GRPC_PUBLIC_PORT="50051"
|
||||
declare -x CGW_ID="0"
|
||||
declare -x CGW_KAFKA_HOST="localhost" # Kafka is located at the local host
|
||||
declare -x CGW_KAFKA_HOST="localhost"
|
||||
declare -x CGW_KAFKA_PORT="9092"
|
||||
declare -x CGW_LOG_LEVEL="debug"
|
||||
declare -x CGW_REDIS_HOST="localhost" # Redis server can be found at the local host
|
||||
declare -x CGW_REDIS_HOST="localhost"
|
||||
declare -x CGW_REDIS_PORT="6379"
|
||||
declare -x CGW_REDIS_USERNAME="cgw"
|
||||
declare -x CGW_REDIS_PASSWORD="123"
|
||||
declare -x CGW_REDIS_TLS="no"
|
||||
declare -x CGW_METRICS_PORT="8080"
|
||||
declare -x CGW_WSS_IP="0.0.0.0" # Accept WSS connections at all interfaces / subnets
|
||||
declare -x CGW_WSS_IP="0.0.0.0"
|
||||
declare -x CGW_WSS_PORT="15002"
|
||||
declare -x CGW_WSS_CAS="cas.pem"
|
||||
declare -x CGW_WSS_CERT="cert.pem"
|
||||
declare -x CGW_WSS_KEY="key.pem"
|
||||
declare -x CGW_CERTS_PATH="/etc/ssl/certs" # Path to certificates located on host machine
|
||||
declare -x CGW_ALLOW_CERT_MISMATCH="no" # Allow client certificate CN and device MAC address mismatch
|
||||
declare -x CGW_CERTS_PATH="/etc/ssl/certs"
|
||||
declare -x CGW_ALLOW_CERT_MISMATCH="no"
|
||||
declare -x CGW_NB_INFRA_CERTS_PATH="/etc/nb_infra_certs"
|
||||
declare -x CGW_NB_INFRA_TLS="no"
|
||||
declare -x CGW_UCENTRAL_AP_DATAMODEL_URI="https://raw.githubusercontent.com/Telecominfraproject/wlan-ucentral-schema/main/ucentral.schema.json"
|
||||
declare -x CGW_UCENTRAL_SWITCH_DATAMODEL_URI="https://raw.githubusercontent.com/Telecominfraproject/ols-ucentral-schema/main/ucentral.schema.json"
|
||||
declare -x CGW_GROUPS_CAPACITY=1000
|
||||
declare -x CGW_GROUPS_THRESHOLD=50
|
||||
declare -x CGW_GROUP_INFRAS_CAPACITY=2000
|
||||
```
|
||||
# Certificates
|
||||
The CGW uses a number of certificates to provide security.
|
||||
The CGW uses two different sets of certificate configuration:
|
||||
1. AP/Switch connectivity (southbound)
|
||||
2. Infrastructure connectivity (northbound)
|
||||
|
||||
The AP/Switch connectivity uses a number of certificates to provide security (mTLS).
|
||||
There are 2 types of certificates required for a normal deployment:
|
||||
1. Server certificates
|
||||
2. Client certificates
|
||||
@@ -119,3 +164,41 @@ There are several environment variable to configure certificates path and names
|
||||
2. CGW_WSS_KEY - CGW WSS Private Key
|
||||
3. CGW_WSS_CAS - Chain certificates to validate client (root/issuer)
|
||||
4. CGW_CERTS_PATH - path to certificates located on host machine
|
||||
|
||||
The infrastructure connectivity use root certs store - the directory with trusted certificates
|
||||
The environemt variable to configure certificates path:
|
||||
1. CGW_NB_INFRA_CERTS_PATH - path to certificates located on host machine
|
||||
|
||||
# Automated Testing
|
||||
Automated python-based tests are located inside the *tests* directory.
|
||||
Currently, tests should be run manually by changin PWD to *tests* and launching helper script *run.sh*:
|
||||
```console
|
||||
cd ./test
|
||||
./run.sh
|
||||
```
|
||||
or using make target (added for convinience):
|
||||
```console
|
||||
make run-tests
|
||||
```
|
||||
*NOTE:* currently, tests are not running inside a container.
|
||||
To make sure tests can communicate with CGW-enviroment, tests are currently
|
||||
reaching environment through ports exposed to host system.
|
||||
e.g. for WSS - tests try to reach 'wss://localhost:15002' by default and so on.
|
||||
|
||||
# Automated multi-CGW instances start/stop with Docker Compose
|
||||
Automated multi-CGW start/stop based on "docker-compose-template.yml.j2" file located inside the *utils/docker* directory.
|
||||
To bring-up multiple (minumum 1) CGW instances we use templated "docker-compose-template.yml.j2" file.
|
||||
The "StartMultiCGW.py" script located inside the *utils/docker* directory used to:
|
||||
1. Stop all running Docker Composes.
|
||||
2. Update/generate certificates
|
||||
3. Generate "docker-compose-multi-cgw.yml" Docker Compose file that includes:
|
||||
- Kafka service
|
||||
- Redis service
|
||||
- PostgreSQL service
|
||||
- [N] CGW instances
|
||||
4. Start Docker Compose using generated "docker-compose-multi-cgw.yml" file.
|
||||
|
||||
The "CGW_INSTANCES_NUM" environment variable is used to specify number of CGW instances.
|
||||
Makefile has 2 targets to start/stop Docker Compose
|
||||
- start-multi-cgw
|
||||
- stop-multi-cgw
|
||||
|
||||
204
api/cnc_api.yaml
Normal file
204
api/cnc_api.yaml
Normal file
@@ -0,0 +1,204 @@
|
||||
---
|
||||
# Kafka 'CnC' (default) topic API list to interact with CGW infrastructure
|
||||
# The following objects define the layout of messages one can push into
|
||||
# 'CnC' topic.
|
||||
# The result messages for each of the request can be found in the
|
||||
# 'cnc_res_api.yaml' file.
|
||||
infrastructure_group_create:
|
||||
description:
|
||||
Create a single infrastracture group and assign it to any available shard.
|
||||
The decision of assignment will be made by whatever shard that is processing request.
|
||||
Request generates corresponding 'infrastructure_group_create_response' response.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_create
|
||||
infra_group_id:
|
||||
description:
|
||||
The 'infra_group_id' while is represented as string, is actually a stringified digit.
|
||||
type: string
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request.
|
||||
The caller can expect a response with the same UUID value,
|
||||
effectively matching a request with a response, due to
|
||||
async nature of the Kafka bus.
|
||||
type: string
|
||||
format: uuid
|
||||
|
||||
infrastructure_group_create_to_shard:
|
||||
description:
|
||||
Create a single infrastracture group and assign it to specific shard.
|
||||
While group's being assigned to specific shard, the handling of this request
|
||||
will be made by whatever shard that's received the request.
|
||||
Request generates corresponding 'infrastructure_group_create_response' response.
|
||||
(same as for 'infrastructure_group_create').
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_create_to_shard
|
||||
infra_group_id:
|
||||
description:
|
||||
The 'infra_group_id' while is represented as string, is actually a stringified digit.
|
||||
type: string
|
||||
shard_id:
|
||||
description:
|
||||
Specific shard (CGW) id that this group should be assigned to.
|
||||
If shard does not exist, request will fail.
|
||||
type: integer
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request.
|
||||
The caller can expect a response with the same UUID value,
|
||||
effectively matching a request with a response, due to
|
||||
async nature of the Kafka bus.
|
||||
type: string
|
||||
format: uuid
|
||||
|
||||
infrastructure_group_delete:
|
||||
description:
|
||||
Destroy previously created infrastructure group.
|
||||
NOTE - also deassigns (if any) underlying assigned infras.
|
||||
Also works if any assigned infra is already connected to CGW.
|
||||
Request generates corresponding 'infrastructure_group_delete_response' response.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_delete
|
||||
infra_group_id:
|
||||
description:
|
||||
The 'infra_group_id' while is represented as string, is actually a stringified digit.
|
||||
type: string
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request.
|
||||
The caller can expect a response with the same UUID value,
|
||||
effectively matching a request with a response, due to
|
||||
async nature of the Kafka bus.
|
||||
type: string
|
||||
format: uuid
|
||||
|
||||
infrastructure_group_infras_add:
|
||||
description:
|
||||
Assign list of infras to specified group.
|
||||
Request generates corresponding 'infrastructure_group_infras_add_response' response.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_infras_add
|
||||
infra_group_id:
|
||||
description:
|
||||
The 'infra_group_id' while is represented as string, is actually a stringified digit.
|
||||
type: string
|
||||
infra_group_infras:
|
||||
description:
|
||||
Array of infras (MAC address / mac serial, any form is accepted) that
|
||||
should get assigned to specified infra group.
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request.
|
||||
The caller can expect a response with the same UUID value,
|
||||
effectively matching a request with a response, due to
|
||||
async nature of the Kafka bus.
|
||||
type: string
|
||||
format: uuid
|
||||
|
||||
infrastructure_group_infras_del:
|
||||
description:
|
||||
De-assign list of infras from specified group.
|
||||
Any connected infras will become un-assigned and thus - unaddressable.
|
||||
It's up to the caller to make sure to reassign them (if needed).
|
||||
Request generates corresponding 'infrastructure_group_infras_del_response' response.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_infras_del
|
||||
infra_group_id:
|
||||
description:
|
||||
The 'infra_group_id' while is represented as string, is actually a stringified digit.
|
||||
type: string
|
||||
infra_group_infras:
|
||||
description:
|
||||
Array of infras (MAC address / mac serial, any form is accepted) that
|
||||
should get deassigned from specified infra group.
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request.
|
||||
The caller can expect a response with the same UUID value,
|
||||
effectively matching a request with a response, due to
|
||||
async nature of the Kafka bus.
|
||||
type: string
|
||||
format: uuid
|
||||
|
||||
infrastructure_group_infra_message_enqueue:
|
||||
description:
|
||||
Enqueue a uCentral request for the specified infra device.
|
||||
This does not result immediate execution of the underlying request,
|
||||
but rather caches the request in internal message queue, and the request
|
||||
will get executed whenever device is ready to receive one.
|
||||
Request generates corresponding 'infrastructure_group_infra_message_enqueue_response' response.
|
||||
Whenever request get's completed (executed by the device),
|
||||
the corresponding 'infra_request_result' is also generated.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_infra_message_enqueue
|
||||
infra_group_id:
|
||||
description:
|
||||
The 'infra_group_id' while is represented as string, is actually a stringified digit.
|
||||
type: string
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra to sink message down to.
|
||||
Must be a part of infra_group, either way request will fail
|
||||
with corresponding fail message.
|
||||
type: string
|
||||
msg:
|
||||
description:
|
||||
Complete uCentral-formatted JSON document request to the uCentral device.
|
||||
Shuld include method, ID.
|
||||
type: object
|
||||
timeout:
|
||||
description:
|
||||
Timeout value for how long the execution should take.
|
||||
Whenever elapses, msg queue get's completely flushed
|
||||
and the timeout messages - 'infra_request_result' with status 'failed' are generated.
|
||||
type: string
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request.
|
||||
The caller can expect a response with the same UUID value,
|
||||
effectively matching a request with a response, due to
|
||||
async nature of the Kafka bus.
|
||||
type: string
|
||||
format: uuid
|
||||
|
||||
rebalance_groups:
|
||||
description:
|
||||
Rebalance all infrastructure groups among all currently running/available
|
||||
CGW shards.
|
||||
Request generates corresponding 'rebalance_groups_response' response.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- rebalance_groups
|
||||
infra_group_id:
|
||||
description:
|
||||
The 'infra_group_id' while is represented as string, is actually a stringified digit.
|
||||
The 'infra_group_id' while is required, can be set to any value, as it's currently omitted
|
||||
and ignored.
|
||||
type: string
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request.
|
||||
The caller can expect a response with the same UUID value,
|
||||
effectively matching a request with a response, due to
|
||||
async nature of the Kafka bus.
|
||||
type: string
|
||||
format: uuid
|
||||
253
api/cnc_res_api.yaml
Normal file
253
api/cnc_res_api.yaml
Normal file
@@ -0,0 +1,253 @@
|
||||
---
|
||||
# Kafka 'CnC_Res' (default) topic API list that defines response messages format
|
||||
# for each of the CnC request.
|
||||
# The request messages for which these results are generated can be found in the
|
||||
# 'cnc_api.yaml' file.
|
||||
infrastructure_group_create_response:
|
||||
description:
|
||||
Response to corresponding 'infrastructure_group_create'
|
||||
or 'infrastructure_group_create_to_shard' request(s).
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_create_response
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request, to which
|
||||
this response is being generated.
|
||||
type: string
|
||||
format: uuid
|
||||
success:
|
||||
type: boolean
|
||||
error_message:
|
||||
description:
|
||||
Error message reporting why the request failed.
|
||||
Non-empty only if 'success' is false (e.g. request failed).
|
||||
type: string
|
||||
|
||||
infrastructure_group_delete_response:
|
||||
description:
|
||||
Response to corresponding 'infrastructure_group_delete' request.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_delete_response
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
uuid:
|
||||
description:
|
||||
The underlying unique identifier of the request, to which
|
||||
this response is being generated.
|
||||
type: string
|
||||
format: uuid
|
||||
success:
|
||||
type: boolean
|
||||
error_message:
|
||||
type: string
|
||||
description:
|
||||
Error message reporting why the request failed.
|
||||
Non-empty only if 'success' is false (e.g. request failed).
|
||||
|
||||
infrastructure_group_infras_add_response:
|
||||
description:
|
||||
Response to corresponding 'infrastructure_group_infras_add' request.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_infras_add_response
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
uuid:
|
||||
type: string
|
||||
format: uuid
|
||||
description:
|
||||
The underlying unique identifier of the request, to which
|
||||
this response is being generated.
|
||||
success:
|
||||
type: boolean
|
||||
error_message:
|
||||
type: string
|
||||
description:
|
||||
Error message reporting why the request failed.
|
||||
Non-empty only if 'success' is false (e.g. request failed).
|
||||
failed_infras:
|
||||
description:
|
||||
Array of infras (MAC address / mac serial, any form is accepted) that
|
||||
should were not added (failed to) to specified infra group.
|
||||
Potential cause - infra is a duplicate (already member of specified group),
|
||||
or it is already a member of some other group and should be removed
|
||||
from old group first.
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
kafka_partition_key:
|
||||
description:
|
||||
CGW can return a special string value - kafka partition key,
|
||||
that can be used by generating consecutive CnC request,
|
||||
that will result in direct addressing of the shard that replied
|
||||
to the original request.
|
||||
It's an optimization technique to overcome the need of
|
||||
using relaying mechanism all the time.
|
||||
NOTE - this kafka key in replies _could_ be used by the callers,
|
||||
but it's not required. It's optional.
|
||||
Can be empty.
|
||||
Can be present even if request failed.
|
||||
type: string
|
||||
|
||||
infrastructure_group_infras_del_response:
|
||||
description:
|
||||
Response to corresponding 'infrastructure_group_infras_del' request.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_infras_del_response
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
uuid:
|
||||
type: string
|
||||
format: uuid
|
||||
description:
|
||||
The underlying unique identifier of the request, to which
|
||||
this response is being generated.
|
||||
success:
|
||||
type: boolean
|
||||
error_message:
|
||||
type: string
|
||||
description:
|
||||
Error message reporting why the request failed.
|
||||
Non-empty only if 'success' is false (e.g. request failed).
|
||||
failed_infras:
|
||||
description:
|
||||
Array of infras (MAC address / mac serial, any form is accepted) that
|
||||
should were not removed (failed to) from specified infra group.
|
||||
Potential cause - infra is not member of specified group.
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
kafka_partition_key:
|
||||
description:
|
||||
CGW can return a special string value - kafka partition key,
|
||||
that can be used by generating consecutive CnC request,
|
||||
that will result in direct addressing of the shard that replied
|
||||
to the original request.
|
||||
It's an optimization technique to overcome the need of
|
||||
using relaying mechanism all the time.
|
||||
NOTE - this kafka key in replies _could_ be used by the callers,
|
||||
but it's not required. It's optional.
|
||||
Can be empty.
|
||||
Can be present even if request failed.
|
||||
type: string
|
||||
|
||||
infrastructure_group_infra_message_enqueue_response:
|
||||
description:
|
||||
Response to corresponding 'infrastructure_group_infra_message_enqueue' request.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_infra_message_enqueue_response
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
uuid:
|
||||
type: string
|
||||
format: uuid
|
||||
description:
|
||||
The underlying unique identifier of the request, to which
|
||||
this response is being generated.
|
||||
success:
|
||||
type: boolean
|
||||
error_message:
|
||||
type: string
|
||||
description:
|
||||
Error message reporting why the request failed.
|
||||
Non-empty only if 'success' is false (e.g. request failed).
|
||||
kafka_partition_key:
|
||||
description:
|
||||
CGW can return a special string value - kafka partition key,
|
||||
that can be used by generating consecutive CnC request,
|
||||
that will result in direct addressing of the shard that replied
|
||||
to the original request.
|
||||
It's an optimization technique to overcome the need of
|
||||
using relaying mechanism all the time.
|
||||
NOTE - this kafka key in replies _could_ be used by the callers,
|
||||
but it's not required. It's optional.
|
||||
Can be empty.
|
||||
Can be present even if request failed.
|
||||
type: string
|
||||
|
||||
infra_request_result:
|
||||
description:
|
||||
Result of the underlying 'infrastructure_group_infra_message_enqueue' request execution.
|
||||
This result is generated whenever underlying infra finishes and responds
|
||||
to the request with status 'result' message that CGW handles internally.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infra_request_result
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
uuid:
|
||||
type: string
|
||||
format: uuid
|
||||
description:
|
||||
The underlying unique identifier of the request, to which
|
||||
this response is being generated.
|
||||
success:
|
||||
type: boolean
|
||||
error_message:
|
||||
type: string
|
||||
description:
|
||||
Error message reporting why the request failed.
|
||||
Non-empty only if 'success' is false (e.g. request failed).
|
||||
|
||||
rebalance_groups_response:
|
||||
description:
|
||||
Response to corresponding 'rebalance_groups' request.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- rebalance_groups_response
|
||||
reporter_shard_id:
|
||||
type: integer
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
infra_group_id:
|
||||
type: integer
|
||||
uuid:
|
||||
type: string
|
||||
format: uuid
|
||||
description:
|
||||
The underlying unique identifier of the request, to which
|
||||
this response is being generated.
|
||||
success:
|
||||
type: boolean
|
||||
error_message:
|
||||
type: string
|
||||
description:
|
||||
Error message reporting why the request failed.
|
||||
Non-empty only if 'success' is false (e.g. request failed).
|
||||
194
api/events.yaml
Normal file
194
api/events.yaml
Normal file
@@ -0,0 +1,194 @@
|
||||
---
|
||||
# List of events and messages that CGW can raise as a reaction to different
|
||||
# events that happen within CGW (for example infra device connection).
|
||||
infra_join:
|
||||
description:
|
||||
Event, that CGW generates whenever assigned infra successfully connects to CGW.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infra_join
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra that successfully connected to CGW.
|
||||
type: string
|
||||
infra_public_ip:
|
||||
description:
|
||||
Peer address of the connected infra, as seen on the socket level
|
||||
of the CGW.
|
||||
type: string
|
||||
|
||||
infra_leave:
|
||||
description:
|
||||
Event, that CGW generates whenever assigned infra diconnects from the CGW.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infra_leave
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra that successfully connected to CGW.
|
||||
type: string
|
||||
|
||||
unassigned_infra_connection:
|
||||
description:
|
||||
Event, that CGW generates whenever un-assigned infra successfully connects to CGW.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- unassigned_infra_connection
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
group_owner_shard_id:
|
||||
description:
|
||||
ID of the shard that is the actual owner of the infra group.
|
||||
type: integer
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra that successfully connected to CGW.
|
||||
type: string
|
||||
|
||||
foreign_infra_connection:
|
||||
description:
|
||||
Event, that CGW generates whenever foreign (assigned, but connected
|
||||
to the wrong CGW instance) infra successfully connects to CGW.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- foreign_infra_connection
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra that successfully connected to CGW.
|
||||
type: string
|
||||
|
||||
infrastructure_group_infra_capabilities_changed:
|
||||
description:
|
||||
Event, that CGW generates whenever CGW detects capabilities change
|
||||
of the connected assigned infra.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- infrastructure_group_infra_capabilities_changed
|
||||
reporter_shard_id:
|
||||
description:
|
||||
ID of the shard that handled request and generated this response.
|
||||
type: integer
|
||||
infra_group_id:
|
||||
type: integer
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra that successfully connected to CGW.
|
||||
type: string
|
||||
changes:
|
||||
description:
|
||||
List of detected delta- changes / diff in capabilities;
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
changed:
|
||||
description:
|
||||
String-value representing value that changed
|
||||
type: string
|
||||
old:
|
||||
type: string
|
||||
new:
|
||||
type: string
|
||||
|
||||
ap_client_join:
|
||||
description:
|
||||
Event, that CGW generates whenever it detects topology
|
||||
change - a new WiFi client connection.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ap_client_join
|
||||
infra_group_id:
|
||||
type: integer
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra that connected the WiFi ssid of the infra.
|
||||
type: string
|
||||
client:
|
||||
description:
|
||||
MAC (serial) of the infra client that joined.
|
||||
type: string
|
||||
ssid:
|
||||
description:
|
||||
SSID that the underlying infra client joined.
|
||||
type: string
|
||||
band:
|
||||
description:
|
||||
Band on which the underlying infra client joined.
|
||||
type: string
|
||||
|
||||
ap_client_leave:
|
||||
description:
|
||||
Event, that CGW generates whenever it detects topology
|
||||
change - a WiFi client disconnect.
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ap_client_leave
|
||||
infra_group_id:
|
||||
type: integer
|
||||
infra_group_infra:
|
||||
description:
|
||||
MAC (serial) of the infra that disconnected the WiFi ssid of the infra.
|
||||
type: string
|
||||
client:
|
||||
description:
|
||||
MAC (serial) of the infra client that disconnected.
|
||||
type: string
|
||||
band:
|
||||
description:
|
||||
Band on which the underlying infra client disconnected.
|
||||
type: string
|
||||
|
||||
ap_client_migrate:
|
||||
description:
|
||||
Event, that CGW generates whenever it detects topology
|
||||
change - existing WiFi client migrating from one infra to another
|
||||
(wifi client connects to AP_1 while was connected to AP_0).
|
||||
type:
|
||||
type: string
|
||||
enum:
|
||||
- ap_client_migrate
|
||||
infra_group_id:
|
||||
type: integer
|
||||
to_infra_group_infra_device:
|
||||
description:
|
||||
MAC (serial) of the destination infra to which the WiFi client is migrating to.
|
||||
type: string
|
||||
client:
|
||||
description:
|
||||
MAC (serial) of the infra client that joined.
|
||||
type: string
|
||||
to_ssid:
|
||||
description:
|
||||
Destination SSID that the underlying infra client is migrating to.
|
||||
type: string
|
||||
to_band:
|
||||
description:
|
||||
Destination band on which the underlying infra is migrating on.
|
||||
type: string
|
||||
190
run_cgw.sh
190
run_cgw.sh
@@ -1,12 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
DEFAULT_ID=0
|
||||
DEFAULT_LOG_LEVEL="info"
|
||||
DEFAULT_LOG_LEVEL="debug"
|
||||
DEFAULT_GROUPS_CAPACITY=1000
|
||||
DEFAULT_GROUPS_THRESHOLD=50
|
||||
DEFAULT_GROUP_INFRAS_CAPACITY=2000
|
||||
|
||||
# By default - use default subnet's SRC ip to listen to gRPC requests
|
||||
DEFAULT_GRPC_LISTENING_IP="0.0.0.0"
|
||||
DEFAULT_GRPC_LISTENING_PORT=50051
|
||||
DEFAULT_GRPC_PUBLIC_HOST="localhost"
|
||||
DEFAULT_GRPC_PUBLIC_HOST="openlan_cgw"
|
||||
DEFAULT_GRPC_PUBLIC_PORT=50051
|
||||
|
||||
# By default - listen to all interfaces
|
||||
@@ -14,33 +17,46 @@ DEFAULT_WSS_IP="0.0.0.0"
|
||||
DEFAULT_WSS_PORT=15002
|
||||
DEFAULT_WSS_T_NUM=4
|
||||
|
||||
DEFAULT_CERTS_PATH="/etc/ssl/certs"
|
||||
DEFAULT_CERTS_PATH="`realpath ./utils/cert_generator/certs/server/`"
|
||||
DEFAULT_CLIENT_CERTS_PATH="`realpath ./utils/cert_generator/certs/client/`"
|
||||
DEFAULT_WSS_CAS="cas.pem"
|
||||
DEFAULT_WSS_CERT="cert.pem"
|
||||
DEFAULT_WSS_KEY="key.pem"
|
||||
DEFAULT_CLIENT_CERT="base.crt"
|
||||
DEFAULT_CLIENT_KEY="base.key"
|
||||
|
||||
DEFAULT_KAFKA_HOST="localhost"
|
||||
DEFAULT_KAFKA_HOST="docker-broker-1"
|
||||
DEFAULT_KAFKA_PORT=9092
|
||||
DEFAULT_KAFKA_CONSUME_TOPIC="CnC"
|
||||
DEFAULT_KAFKA_PRODUCE_TOPIC="CnC_Res"
|
||||
|
||||
DEFAULT_DB_HOST="localhost"
|
||||
DEFAULT_DB_HOST="docker-postgresql-1"
|
||||
DEFAULT_DB_PORT=5432
|
||||
DEFAULT_DB_NAME="cgw"
|
||||
DEFAULT_DB_USER="cgw"
|
||||
DEFAULT_DB_PASW="123"
|
||||
DEFAULT_DB_TLS="no"
|
||||
|
||||
DEFAULT_REDIS_HOST="localhost"
|
||||
DEFAULT_REDIS_HOST="docker-redis-1"
|
||||
DEFAULT_REDIS_PORT=6379
|
||||
DEFAULT_REDIS_TLS="no"
|
||||
|
||||
DEFAULT_METRICS_PORT=8080
|
||||
|
||||
CONTAINTER_CERTS_VOLUME="/etc/cgw/certs"
|
||||
CONTAINTER_NB_INFRA_CERTS_VOLUME="/etc/cgw/nb_infra/certs"
|
||||
DEFAULT_NB_INFRA_TLS="no"
|
||||
|
||||
DEFAULT_ALLOW_CERT_MISMATCH="no"
|
||||
DEFAULT_ALLOW_CERT_MISMATCH="yes"
|
||||
|
||||
DEFAULT_UCENTRAL_AP_DATAMODEL_URI="https://raw.githubusercontent.com/Telecominfraproject/wlan-ucentral-schema/main/ucentral.schema.json"
|
||||
DEFAULT_UCENTRAL_SWITCH_DATAMODEL_URI="https://raw.githubusercontent.com/Telecominfraproject/ols-ucentral-schema/main/ucentral.schema.json"
|
||||
|
||||
export CGW_LOG_LEVEL="${CGW_LOG_LEVEL:-$DEFAULT_LOG_LEVEL}"
|
||||
export CGW_ID="${CGW_ID:-$DEFAULT_ID}"
|
||||
export CGW_GROUPS_CAPACITY="${CGW_GROUPS_CAPACITY:-$DEFAULT_GROUPS_CAPACITY}"
|
||||
export CGW_GROUPS_THRESHOLD="${CGW_GROUPS_THRESHOLD:-$DEFAULT_GROUPS_THRESHOLD}"
|
||||
export CGW_GROUP_INFRAS_CAPACITY="${CGW_GROUP_INFRAS_CAPACITY:-$DEFAULT_GROUP_INFRAS_CAPACITY}"
|
||||
export CGW_WSS_IP="${CGW_WSS_IP:-$DEFAULT_WSS_IP}"
|
||||
export CGW_WSS_PORT="${CGW_WSS_PORT:-$DEFAULT_WSS_PORT}"
|
||||
export DEFAULT_WSS_THREAD_NUM="${DEFAULT_WSS_THREAD_NUM:-$DEFAULT_WSS_T_NUM}"
|
||||
@@ -60,58 +76,126 @@ export CGW_DB_PORT="${CGW_DB_PORT:-$DEFAULT_DB_PORT}"
|
||||
export CGW_DB_NAME="${CGW_DB_NAME:-$DEFAULT_DB_NAME}"
|
||||
export CGW_DB_USERNAME="${CGW_DB_USER:-$DEFAULT_DB_USER}"
|
||||
export CGW_DB_PASSWORD="${CGW_DB_PASS:-$DEFAULT_DB_PASW}"
|
||||
export CGW_DB_TLS="${CGW_DB_TLS:-$DEFAULT_DB_TLS}"
|
||||
export CGW_REDIS_HOST="${CGW_REDIS_HOST:-$DEFAULT_REDIS_HOST}"
|
||||
export CGW_REDIS_PORT="${CGW_REDIS_PORT:-$DEFAULT_REDIS_PORT}"
|
||||
export CGW_REDIS_TLS="${CGW_REDIS_TLS:-$DEFAULT_REDIS_TLS}"
|
||||
export CGW_METRICS_PORT="${CGW_METRICS_PORT:-$DEFAULT_METRICS_PORT}"
|
||||
export CGW_CERTS_PATH="${CGW_CERTS_PATH:-$DEFAULT_CERTS_PATH}"
|
||||
export CGW_ALLOW_CERT_MISMATCH="${CGW_ALLOW_CERT_MISMATCH:-$DEFAULT_ALLOW_CERT_MISMATCH}"
|
||||
export CGW_NB_INFRA_CERTS_PATH="${CGW_NB_INFRA_CERTS_PATH:-$DEFAULT_CERTS_PATH}"
|
||||
export CGW_NB_INFRA_TLS="${CGW_NB_INFRA_TLS:-$DEFAULT_NB_INFRA_TLS}"
|
||||
export CGW_UCENTRAL_AP_DATAMODEL_URI="${CGW_UCENTRAL_AP_DATAMODEL_URI:-$DEFAULT_UCENTRAL_AP_DATAMODEL_URI}"
|
||||
export CGW_UCENTRAL_SWITCH_DATAMODEL_URI="${CGW_UCENTRAL_SWITCH_DATAMODEL_URI:-$DEFAULT_UCENTRAL_SWITCH_DATAMODEL_URI}"
|
||||
export RUST_BACKTRACE=1
|
||||
|
||||
if [ -z "${CGW_REDIS_USERNAME}" ]; then
|
||||
export CGW_REDIS_USERNAME="${CGW_REDIS_USERNAME}"
|
||||
fi
|
||||
|
||||
if [ -z "${CGW_REDIS_PASSWORD}" ]; then
|
||||
export CGW_REDIS_PASSWORD="${CGW_REDIS_PASSWORD}"
|
||||
fi
|
||||
|
||||
if [ ! -f $CGW_CERTS_PATH/$CGW_WSS_CERT ] ||
|
||||
[ ! -f $CGW_CERTS_PATH/$CGW_WSS_KEY ] ||
|
||||
[ ! -f $CGW_CERTS_PATH/$CGW_WSS_CAS ] ||
|
||||
[ ! -f $DEFAULT_CLIENT_CERTS_PATH/$DEFAULT_CLIENT_CERT ] ||
|
||||
[ ! -f $DEFAULT_CLIENT_CERTS_PATH/$DEFAULT_CLIENT_KEY ]; then
|
||||
echo "WARNING: at specified path $CGW_CERTS_PATH either CAS, CERT or KEY is missing!"
|
||||
echo "WARNING: changing source folder for certificates to default: $DEFAULT_CERTS_PATH and generating self-signed..."
|
||||
export CGW_CERTS_PATH="$DEFAULT_CERTS_PATH";
|
||||
export CGW_WSS_CAS="$DEFAULT_WSS_CAS"
|
||||
export CGW_WSS_CERT="$DEFAULT_WSS_CERT"
|
||||
export CGW_WSS_KEY="$DEFAULT_WSS_KEY"
|
||||
export CGW_NB_INFRA_CERTS_PATH="$DEFAULT_CERTS_PATH"
|
||||
|
||||
cd ./utils/cert_generator/ && \
|
||||
rm ./certs/ca/*crt 2>&1 >/dev/null; \
|
||||
rm ./certs/ca/*key 2>&1 >/dev/null; \
|
||||
rm ./certs/server/*crt 2>&1 >/dev/null; \
|
||||
rm ./certs/server/*key 2>&1 >/dev/null; \
|
||||
rm ./certs/client/*crt 2>&1 >/dev/null; \
|
||||
rm ./certs/client/*key 2>&1 >/dev/null; \
|
||||
./generate_certs.sh -a && \
|
||||
./generate_certs.sh -s && \
|
||||
./generate_certs.sh -c 1 -m 02:00:00:00:00:00 && \
|
||||
cp ./certs/ca/ca.crt $DEFAULT_CERTS_PATH/$DEFAULT_WSS_CAS && \
|
||||
cp ./certs/server/gw.crt $DEFAULT_CERTS_PATH/cert.pem && \
|
||||
cp ./certs/server/gw.key $DEFAULT_CERTS_PATH/key.pem && \
|
||||
cp ./certs/client/*crt $DEFAULT_CLIENT_CERTS_PATH/$DEFAULT_CLIENT_CERT && \
|
||||
cp ./certs/client/*key $DEFAULT_CLIENT_CERTS_PATH/$DEFAULT_CLIENT_KEY && \
|
||||
echo "Generating self-signed certificates done!"
|
||||
fi
|
||||
|
||||
echo "Starting CGW..."
|
||||
echo "CGW LOG LEVEL : $CGW_LOG_LEVEL"
|
||||
echo "CGW ID : $CGW_ID"
|
||||
echo "CGW WSS THREAD NUM : $DEFAULT_WSS_THREAD_NUM"
|
||||
echo "CGW WSS IP/PORT : $CGW_WSS_IP:$CGW_WSS_PORT"
|
||||
echo "CGW WSS CAS : $CGW_WSS_CAS"
|
||||
echo "CGW WSS CERT : $CGW_WSS_CERT"
|
||||
echo "CGW WSS KEY : $CGW_WSS_KEY"
|
||||
echo "CGW GRPC PUBLIC HOST/PORT : $CGW_GRPC_PUBLIC_HOST:$CGW_GRPC_PUBLIC_PORT"
|
||||
echo "CGW GRPC LISTENING IP/PORT : $CGW_GRPC_LISTENING_IP:$CGW_GRPC_LISTENING_PORT"
|
||||
echo "CGW KAFKA HOST/PORT : $CGW_KAFKA_HOST:$CGW_KAFKA_PORT"
|
||||
echo "CGW KAFKA TOPIC : $CGW_KAFKA_CONSUME_TOPIC:$CGW_KAFKA_PRODUCE_TOPIC"
|
||||
echo "CGW DB NAME : $CGW_DB_NAME"
|
||||
echo "CGW DB HOST/PORT : $CGW_DB_HOST:$CGW_DB_PORT"
|
||||
echo "CGW REDIS HOST/PORT : $CGW_REDIS_HOST:$CGW_REDIS_PORT"
|
||||
echo "CGW METRICS PORT : $CGW_METRICS_PORT"
|
||||
echo "CGW CERTS PATH : $CGW_CERTS_PATH"
|
||||
echo "CGW ALLOW CERT MISMATCH : $CGW_ALLOW_CERT_MISMATCH"
|
||||
echo "CGW LOG LEVEL : $CGW_LOG_LEVEL"
|
||||
echo "CGW ID : $CGW_ID"
|
||||
echo "CGW GROUPS CAPACITY/THRESHOLD : $CGW_GROUPS_CAPACITY:$CGW_GROUPS_THRESHOLD"
|
||||
echo "CGW GROUP INFRAS CAPACITY : $CGW_GROUP_INFRAS_CAPACITY"
|
||||
echo "CGW WSS THREAD NUM : $DEFAULT_WSS_THREAD_NUM"
|
||||
echo "CGW WSS IP/PORT : $CGW_WSS_IP:$CGW_WSS_PORT"
|
||||
echo "CGW WSS CAS : $CGW_WSS_CAS"
|
||||
echo "CGW WSS CERT : $CGW_WSS_CERT"
|
||||
echo "CGW WSS KEY : $CGW_WSS_KEY"
|
||||
echo "CGW GRPC PUBLIC HOST/PORT : $CGW_GRPC_PUBLIC_HOST:$CGW_GRPC_PUBLIC_PORT"
|
||||
echo "CGW GRPC LISTENING IP/PORT : $CGW_GRPC_LISTENING_IP:$CGW_GRPC_LISTENING_PORT"
|
||||
echo "CGW KAFKA HOST/PORT : $CGW_KAFKA_HOST:$CGW_KAFKA_PORT"
|
||||
echo "CGW KAFKA TOPIC : $CGW_KAFKA_CONSUME_TOPIC:$CGW_KAFKA_PRODUCE_TOPIC"
|
||||
echo "CGW DB NAME : $CGW_DB_NAME"
|
||||
echo "CGW DB HOST/PORT : $CGW_DB_HOST:$CGW_DB_PORT"
|
||||
echo "CGW DB TLS : $CGW_DB_TLS"
|
||||
echo "CGW REDIS HOST/PORT : $CGW_REDIS_HOST:$CGW_REDIS_PORT"
|
||||
echo "CGW REDIS TLS : $CGW_REDIS_TLS"
|
||||
echo "CGW METRICS PORT : $CGW_METRICS_PORT"
|
||||
echo "CGW CERTS PATH : $CGW_CERTS_PATH"
|
||||
echo "CGW ALLOW CERT MISMATCH : $CGW_ALLOW_CERT_MISMATCH"
|
||||
echo "CGW NB INFRA CERTS PATH : $CGW_NB_INFRA_CERTS_PATH"
|
||||
echo "CGW NB INFRA TLS : $CGW_NB_INFRA_TLS"
|
||||
echo "CGW UCENTRAL AP DATAMODEL URI : $CGW_UCENTRAL_AP_DATAMODEL_URI"
|
||||
echo "CGW UCENTRAL SWITCH DATAMODEL URI : $CGW_UCENTRAL_SWITCH_DATAMODEL_URI"
|
||||
|
||||
docker run \
|
||||
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
||||
-v $CGW_CERTS_PATH:$CONTAINTER_CERTS_VOLUME \
|
||||
-e CGW_LOG_LEVEL \
|
||||
-e CGW_ID \
|
||||
-e CGW_WSS_IP \
|
||||
-e CGW_WSS_PORT \
|
||||
-e DEFAULT_WSS_THREAD_NUM \
|
||||
-e CGW_WSS_CAS \
|
||||
-e CGW_WSS_CERT \
|
||||
-e CGW_WSS_KEY \
|
||||
-e CGW_GRPC_LISTENING_IP \
|
||||
-e CGW_GRPC_LISTENING_PORT \
|
||||
-e CGW_GRPC_PUBLIC_HOST \
|
||||
-e CGW_GRPC_PUBLIC_PORT \
|
||||
-e CGW_KAFKA_HOST \
|
||||
-e CGW_KAFKA_PORT \
|
||||
-e CGW_KAFKA_CONSUME_TOPIC \
|
||||
-e CGW_KAFKA_PRODUCE_TOPIC \
|
||||
-e CGW_DB_NAME \
|
||||
-e CGW_DB_HOST \
|
||||
-e CGW_DB_PORT \
|
||||
-e CGW_DB_USERNAME \
|
||||
-e CGW_DB_PASSWORD \
|
||||
-e CGW_REDIS_HOST \
|
||||
-e CGW_REDIS_PORT \
|
||||
-e CGW_FEATURE_TOPOMAP_ENABLE \
|
||||
-e CGW_METRICS_PORT \
|
||||
-e CGW_ALLOW_CERT_MISMATCH \
|
||||
-d -t --network=host --name $2 $1 ucentral-cgw
|
||||
-p $CGW_WSS_PORT:$CGW_WSS_PORT \
|
||||
-p $CGW_GRPC_PUBLIC_PORT:$CGW_GRPC_PUBLIC_PORT \
|
||||
-p $CGW_METRICS_PORT:$CGW_METRICS_PORT \
|
||||
--cap-add=SYS_PTRACE --security-opt seccomp=unconfined \
|
||||
-v $CGW_CERTS_PATH:$CONTAINTER_CERTS_VOLUME \
|
||||
-v $CGW_NB_INFRA_CERTS_PATH:$CONTAINTER_NB_INFRA_CERTS_VOLUME \
|
||||
-e CGW_LOG_LEVEL \
|
||||
-e CGW_ID \
|
||||
-e CGW_GROUPS_CAPACITY \
|
||||
-e CGW_GROUPS_THRESHOLD \
|
||||
-e CGW_GROUP_INFRAS_CAPACITY \
|
||||
-e CGW_WSS_IP \
|
||||
-e CGW_WSS_PORT \
|
||||
-e DEFAULT_WSS_THREAD_NUM \
|
||||
-e CGW_WSS_CAS \
|
||||
-e CGW_WSS_CERT \
|
||||
-e CGW_WSS_KEY \
|
||||
-e CGW_GRPC_LISTENING_IP \
|
||||
-e CGW_GRPC_LISTENING_PORT \
|
||||
-e CGW_GRPC_PUBLIC_HOST \
|
||||
-e CGW_GRPC_PUBLIC_PORT \
|
||||
-e CGW_KAFKA_HOST \
|
||||
-e CGW_KAFKA_PORT \
|
||||
-e CGW_KAFKA_CONSUME_TOPIC \
|
||||
-e CGW_KAFKA_PRODUCE_TOPIC \
|
||||
-e CGW_DB_NAME \
|
||||
-e CGW_DB_HOST \
|
||||
-e CGW_DB_PORT \
|
||||
-e CGW_DB_USERNAME \
|
||||
-e CGW_DB_PASSWORD \
|
||||
-e CGW_DB_TLS \
|
||||
-e CGW_REDIS_HOST \
|
||||
-e CGW_REDIS_PORT \
|
||||
-e CGW_REDIS_USERNAME \
|
||||
-e CGW_REDIS_PASSWORD \
|
||||
-e CGW_REDIS_TLS \
|
||||
-e CGW_FEATURE_TOPOMAP_ENABLE='1' \
|
||||
-e CGW_METRICS_PORT \
|
||||
-e CGW_ALLOW_CERT_MISMATCH \
|
||||
-e CGW_NB_INFRA_TLS \
|
||||
-e CGW_UCENTRAL_AP_DATAMODEL_URI \
|
||||
-e CGW_UCENTRAL_SWITCH_DATAMODEL_URI \
|
||||
-d -t --network=docker_cgw_network --name $2 $1 ucentral-cgw
|
||||
|
||||
651
src/cgw_app_args.rs
Normal file
651
src/cgw_app_args.rs
Normal file
@@ -0,0 +1,651 @@
|
||||
use std::{
|
||||
env,
|
||||
net::Ipv4Addr,
|
||||
path::{Path, PathBuf},
|
||||
str::FromStr,
|
||||
};
|
||||
|
||||
use url::Url;
|
||||
|
||||
use crate::{
|
||||
cgw_errors::{Error, Result},
|
||||
AppCoreLogLevel,
|
||||
};
|
||||
|
||||
const CGW_DEFAULT_ID: i32 = 0;
|
||||
const CGW_DEFAULT_GROUPS_CAPACITY: i32 = 1000;
|
||||
const CGW_DEFAULT_GROUPS_THRESHOLD: i32 = 50;
|
||||
const CGW_DEFAULT_GROUP_INFRAS_CAPACITY: i32 = 2000;
|
||||
const CGW_DEFAULT_WSS_T_NUM: usize = 4;
|
||||
const CGW_DEFAULT_LOG_LEVEL: AppCoreLogLevel = AppCoreLogLevel::Debug;
|
||||
const CGW_DEFAULT_WSS_IP: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0);
|
||||
const CGW_DEFAULT_WSS_PORT: u16 = 15002;
|
||||
const CGW_DEFAULT_WSS_CAS: &str = "cas.pem";
|
||||
const CGW_DEFAULT_WSS_CERT: &str = "cert.pem";
|
||||
const CGW_DEFAULT_WSS_KEY: &str = "key.pem";
|
||||
const CGW_DEFAULT_GRPC_LISTENING_IP: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0);
|
||||
const CGW_DEFAULT_GRPC_LISTENING_PORT: u16 = 50051;
|
||||
const CGW_DEFAULT_GRPC_PUBLIC_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_GRPC_PUBLIC_PORT: u16 = 50051;
|
||||
const CGW_DEFAULT_KAFKA_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_KAFKA_PORT: u16 = 9092;
|
||||
const CGW_DEFAULT_KAFKA_CONSUME_TOPIC: &str = "CnC";
|
||||
const CGW_DEFAULT_KAFKA_PRODUCE_TOPIC: &str = "CnC_Res";
|
||||
const CGW_DEFAULT_DB_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_DB_PORT: u16 = 6379;
|
||||
const CGW_DEFAULT_DB_NAME: &str = "cgw";
|
||||
const CGW_DEFAULT_DB_USERNAME: &str = "cgw";
|
||||
const CGW_DEFAULT_DB_PASSWORD: &str = "123";
|
||||
const CGW_DEFAULT_DB_TLS: &str = "no";
|
||||
const CGW_DEFAULT_REDIS_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_REDIS_PORT: u16 = 6379;
|
||||
const CGW_DEFAULT_REDIS_TLS: &str = "no";
|
||||
const CGW_DEFAULT_ALLOW_CERT_MISMATCH: &str = "no";
|
||||
const CGW_DEFAULT_METRICS_PORT: u16 = 8080;
|
||||
const CGW_DEFAULT_TOPOMAP_STATE: bool = false;
|
||||
const CGW_DEFAULT_NB_INFRA_TLS: &str = "no";
|
||||
const CGW_DEFAULT_UCENTRAL_AP_DATAMODEL_URI: &str = "https://raw.githubusercontent.com/Telecominfraproject/wlan-ucentral-schema/main/ucentral.schema.json";
|
||||
const CGW_DEFAULT_UCENTRAL_SWITCH_DATAMODEL_URI: &str = "https://raw.githubusercontent.com/Telecominfraproject/ols-ucentral-schema/main/ucentral.schema.json";
|
||||
|
||||
pub struct CGWWSSArgs {
|
||||
/// Number of thread in a threadpool dedicated for handling secure websocket connections
|
||||
pub wss_t_num: usize,
|
||||
/// IP to listen for incoming WSS connection
|
||||
pub wss_ip: Ipv4Addr,
|
||||
/// PORT to listen for incoming WSS connection
|
||||
pub wss_port: u16,
|
||||
/// WSS CAS certificate (contains root and issuer certificates)
|
||||
pub wss_cas: String,
|
||||
/// WSS certificate
|
||||
pub wss_cert: String,
|
||||
/// WSS private key
|
||||
pub wss_key: String,
|
||||
/// Allow Missmatch
|
||||
pub allow_mismatch: bool,
|
||||
}
|
||||
|
||||
impl CGWWSSArgs {
|
||||
fn parse() -> Result<CGWWSSArgs> {
|
||||
let wss_t_num: usize = match env::var("DEFAULT_WSS_THREAD_NUM") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse DEFAULT_WSS_THREAD_NUM! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_WSS_T_NUM,
|
||||
};
|
||||
|
||||
let wss_ip: Ipv4Addr = match env::var("CGW_WSS_IP") {
|
||||
Ok(val) => match Ipv4Addr::from_str(val.as_str()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_WSS_IP! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_WSS_IP,
|
||||
};
|
||||
|
||||
let wss_port: u16 = match env::var("CGW_WSS_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_WSS_PORT! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_WSS_PORT,
|
||||
};
|
||||
|
||||
let wss_cas: String = env::var("CGW_WSS_CAS").unwrap_or(CGW_DEFAULT_WSS_CAS.to_string());
|
||||
let wss_cert: String = env::var("CGW_WSS_CERT").unwrap_or(CGW_DEFAULT_WSS_CERT.to_string());
|
||||
let wss_key: String = env::var("CGW_WSS_KEY").unwrap_or(CGW_DEFAULT_WSS_KEY.to_string());
|
||||
|
||||
let mismatch: String = env::var("CGW_ALLOW_CERT_MISMATCH")
|
||||
.unwrap_or(CGW_DEFAULT_ALLOW_CERT_MISMATCH.to_string());
|
||||
let allow_mismatch = mismatch == "yes";
|
||||
|
||||
Ok(CGWWSSArgs {
|
||||
wss_t_num,
|
||||
wss_ip,
|
||||
wss_port,
|
||||
wss_cas,
|
||||
wss_cert,
|
||||
wss_key,
|
||||
allow_mismatch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CGWGRPCArgs {
|
||||
/// IP to listen for incoming GRPC connection
|
||||
pub grpc_listening_ip: Ipv4Addr,
|
||||
/// PORT to listen for incoming GRPC connection
|
||||
pub grpc_listening_port: u16,
|
||||
/// IP or hostname for Redis Record
|
||||
pub grpc_public_host: String,
|
||||
/// PORT for Redis record
|
||||
pub grpc_public_port: u16,
|
||||
}
|
||||
|
||||
impl CGWGRPCArgs {
|
||||
fn parse() -> Result<CGWGRPCArgs> {
|
||||
let grpc_listening_ip: Ipv4Addr = match env::var("CGW_GRPC_LISTENING_IP") {
|
||||
Ok(val) => match Ipv4Addr::from_str(val.as_str()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GRPC_LISTENING_IP! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GRPC_LISTENING_IP,
|
||||
};
|
||||
|
||||
let grpc_listening_port: u16 = match env::var("CGW_GRPC_LISTENING_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GRPC_LISTENING_PORT! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GRPC_LISTENING_PORT,
|
||||
};
|
||||
|
||||
let grpc_public_host: String = match env::var("CGW_GRPC_PUBLIC_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_GRPC_PUBLIC_HOST.to_string(),
|
||||
};
|
||||
|
||||
let grpc_public_port: u16 = match env::var("CGW_GRPC_PUBLIC_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GRPC_PUBLIC_PORT! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GRPC_PUBLIC_PORT,
|
||||
};
|
||||
|
||||
Ok(CGWGRPCArgs {
|
||||
grpc_listening_ip,
|
||||
grpc_listening_port,
|
||||
grpc_public_host,
|
||||
grpc_public_port,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CGWKafkaArgs {
|
||||
/// IP or hostname to connect to KAFKA broker
|
||||
pub kafka_host: String,
|
||||
/// PORT to connect to KAFKA broker
|
||||
pub kafka_port: u16,
|
||||
/// KAFKA topic from where to consume messages
|
||||
#[allow(unused)]
|
||||
pub kafka_consume_topic: String,
|
||||
/// KAFKA topic where to produce messages
|
||||
#[allow(unused)]
|
||||
pub kafka_produce_topic: String,
|
||||
}
|
||||
|
||||
impl CGWKafkaArgs {
|
||||
fn parse() -> Result<CGWKafkaArgs> {
|
||||
let kafka_host: String = match env::var("CGW_KAFKA_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_KAFKA_HOST.to_string(),
|
||||
};
|
||||
|
||||
let kafka_port: u16 = match env::var("CGW_KAFKA_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_KAFKA_PORT! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_KAFKA_PORT,
|
||||
};
|
||||
|
||||
let kafka_consume_topic: String = env::var("CGW_KAFKA_CONSUMER_TOPIC")
|
||||
.unwrap_or(CGW_DEFAULT_KAFKA_CONSUME_TOPIC.to_string());
|
||||
let kafka_produce_topic: String = env::var("CGW_KAFKA_PRODUCER_TOPIC")
|
||||
.unwrap_or(CGW_DEFAULT_KAFKA_PRODUCE_TOPIC.to_string());
|
||||
|
||||
Ok(CGWKafkaArgs {
|
||||
kafka_host,
|
||||
kafka_port,
|
||||
kafka_consume_topic,
|
||||
kafka_produce_topic,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CGWDBArgs {
|
||||
/// IP or hostname to connect to DB (PSQL)
|
||||
pub db_host: String,
|
||||
/// PORT to connect to DB (PSQL)
|
||||
pub db_port: u16,
|
||||
/// DB name to connect to in DB (PSQL)
|
||||
pub db_name: String,
|
||||
/// DB user name use with connection to in DB (PSQL)
|
||||
pub db_username: String,
|
||||
/// DB user password use with connection to in DB (PSQL)
|
||||
pub db_password: String,
|
||||
/// Utilize TLS connection with DB server
|
||||
pub db_tls: bool,
|
||||
}
|
||||
|
||||
impl CGWDBArgs {
|
||||
fn parse() -> Result<CGWDBArgs> {
|
||||
let db_host: String = match env::var("CGW_DB_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_DB_HOST.to_string(),
|
||||
};
|
||||
|
||||
let db_port: u16 = match env::var("CGW_DB_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_DB_PORT! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_DB_PORT,
|
||||
};
|
||||
|
||||
let db_name: String = env::var("CGW_DB_NAME").unwrap_or(CGW_DEFAULT_DB_NAME.to_string());
|
||||
let db_username: String =
|
||||
env::var("CGW_DB_USERNAME").unwrap_or(CGW_DEFAULT_DB_USERNAME.to_string());
|
||||
let db_password: String =
|
||||
env::var("CGW_DB_PASSWORD").unwrap_or(CGW_DEFAULT_DB_PASSWORD.to_string());
|
||||
|
||||
let db_tls_var: String = env::var("CGW_DB_TLS").unwrap_or(CGW_DEFAULT_DB_TLS.to_string());
|
||||
let db_tls = db_tls_var == "yes";
|
||||
|
||||
Ok(CGWDBArgs {
|
||||
db_host,
|
||||
db_port,
|
||||
db_name,
|
||||
db_username,
|
||||
db_password,
|
||||
db_tls,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CGWRedisArgs {
|
||||
/// IP or hostname to connect to REDIS
|
||||
pub redis_host: String,
|
||||
/// PORT to connect to REDIS
|
||||
pub redis_port: u16,
|
||||
/// REDIS username
|
||||
pub redis_username: Option<String>,
|
||||
/// REDIS password
|
||||
pub redis_password: Option<String>,
|
||||
/// Utilize TLS connection with DB server
|
||||
pub redis_tls: bool,
|
||||
}
|
||||
|
||||
impl CGWRedisArgs {
|
||||
fn parse() -> Result<CGWRedisArgs> {
|
||||
let redis_host: String = match env::var("CGW_REDIS_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_REDIS_HOST.to_string(),
|
||||
};
|
||||
|
||||
let redis_port: u16 = match env::var("CGW_REDIS_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_REDIS_PORT! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_REDIS_PORT,
|
||||
};
|
||||
|
||||
let redis_username: Option<String> = match env::var("CGW_REDIS_USERNAME") {
|
||||
Ok(username) => {
|
||||
if username.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(username)
|
||||
}
|
||||
}
|
||||
Err(_) => None,
|
||||
};
|
||||
|
||||
let redis_password: Option<String> = match env::var("CGW_REDIS_PASSWORD") {
|
||||
Ok(password) => {
|
||||
if password.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(password)
|
||||
}
|
||||
}
|
||||
Err(_) => None,
|
||||
};
|
||||
|
||||
let redis_tls_var: String =
|
||||
env::var("CGW_REDIS_TLS").unwrap_or(CGW_DEFAULT_REDIS_TLS.to_string());
|
||||
let redis_tls = redis_tls_var == "yes";
|
||||
|
||||
Ok(CGWRedisArgs {
|
||||
redis_host,
|
||||
redis_port,
|
||||
redis_username,
|
||||
redis_password,
|
||||
redis_tls,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CGWMetricsArgs {
|
||||
// PORT to connect to Metrics
|
||||
pub metrics_port: u16,
|
||||
}
|
||||
|
||||
impl CGWMetricsArgs {
|
||||
fn parse() -> Result<CGWMetricsArgs> {
|
||||
let metrics_port: u16 = match env::var("CGW_METRICS_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_METRICS_PORT! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_METRICS_PORT,
|
||||
};
|
||||
|
||||
Ok(CGWMetricsArgs { metrics_port })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum CGWValionSchemaRef {
|
||||
SchemaUri(Url),
|
||||
SchemaPath(PathBuf),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CGWValidationSchemaArgs {
|
||||
// URI to AP data model schema
|
||||
pub ap_schema_uri: CGWValionSchemaRef,
|
||||
// URI to Switch data model schema
|
||||
pub switch_schema_uri: CGWValionSchemaRef,
|
||||
}
|
||||
|
||||
impl CGWValidationSchemaArgs {
|
||||
fn parse() -> Result<CGWValidationSchemaArgs> {
|
||||
let ap_schema_uri: CGWValionSchemaRef = match env::var("CGW_UCENTRAL_AP_DATAMODEL_URI") {
|
||||
Ok(uri) => {
|
||||
// CGW_UCENTRAL_AP_DATAMODEL_URI is set
|
||||
if Path::new(&uri).exists() {
|
||||
// CGW_UCENTRAL_AP_DATAMODEL_URI - is path to local file and file exist
|
||||
match PathBuf::from_str(&uri) {
|
||||
Ok(path) => CGWValionSchemaRef::SchemaPath(path),
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_UCENTRAL_AP_DATAMODEL_URI! Invalid URI: {uri}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match Url::parse(&uri) {
|
||||
Ok(url) => CGWValionSchemaRef::SchemaUri(url),
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_UCENTRAL_AP_DATAMODEL_URI! Invalid URI: {uri}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Environment variable was not set - use default
|
||||
Err(_e) => match Url::parse(CGW_DEFAULT_UCENTRAL_AP_DATAMODEL_URI) {
|
||||
// CGW_UCENTRAL_AP_DATAMODEL_URI was not set - try to use default
|
||||
Ok(uri) => CGWValionSchemaRef::SchemaUri(uri),
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse default CGW_UCENTRAL_AP_DATAMODEL_URI! Invalid URI: {CGW_DEFAULT_UCENTRAL_AP_DATAMODEL_URI}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let switch_schema_uri: CGWValionSchemaRef = match env::var(
|
||||
"CGW_UCENTRAL_SWITCH_DATAMODEL_URI",
|
||||
) {
|
||||
Ok(uri) => {
|
||||
// CGW_UCENTRAL_SWITCH_DATAMODEL_URI is set
|
||||
if Path::new(&uri).exists() {
|
||||
match PathBuf::from_str(&uri) {
|
||||
Ok(path) => CGWValionSchemaRef::SchemaPath(path),
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_UCENTRAL_SWITCH_DATAMODEL_URI! Invalid URI: {uri}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match Url::parse(&uri) {
|
||||
Ok(url) => CGWValionSchemaRef::SchemaUri(url),
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_UCENTRAL_SWITCH_DATAMODEL_URI! Invalid URI: {uri}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Environment variable was not set - use default
|
||||
Err(_e) => match Url::from_str(CGW_DEFAULT_UCENTRAL_SWITCH_DATAMODEL_URI) {
|
||||
Ok(url) => CGWValionSchemaRef::SchemaUri(url),
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse default CGW_UCENTRAL_SWITCH_DATAMODEL_URI! Invalid value: {CGW_DEFAULT_UCENTRAL_SWITCH_DATAMODEL_URI}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
Ok(CGWValidationSchemaArgs {
|
||||
ap_schema_uri,
|
||||
switch_schema_uri,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AppArgs {
|
||||
/// Loglevel of application
|
||||
pub log_level: AppCoreLogLevel,
|
||||
|
||||
/// CGW unique identifier (i32)
|
||||
pub cgw_id: i32,
|
||||
|
||||
/// CGW groups capacity (i32)
|
||||
pub cgw_groups_capacity: i32,
|
||||
|
||||
/// CGW groups threshold (i32)
|
||||
pub cgw_groups_threshold: i32,
|
||||
|
||||
/// CGW group infras capacity (i32)
|
||||
pub cgw_group_infras_capacity: i32,
|
||||
|
||||
/// Topomap featue status (enabled/disabled)
|
||||
pub feature_topomap_enabled: bool,
|
||||
|
||||
/// CGW Websocket args
|
||||
pub wss_args: CGWWSSArgs,
|
||||
|
||||
/// CGW GRPC args
|
||||
pub grpc_args: CGWGRPCArgs,
|
||||
|
||||
/// CGW Kafka args
|
||||
pub kafka_args: CGWKafkaArgs,
|
||||
|
||||
/// CGW DB args
|
||||
pub db_args: CGWDBArgs,
|
||||
|
||||
/// CGW Redis args
|
||||
pub redis_args: CGWRedisArgs,
|
||||
|
||||
/// CGW Metrics args
|
||||
pub metrics_args: CGWMetricsArgs,
|
||||
|
||||
/// CGW Validation schema URI args
|
||||
pub validation_schema: CGWValidationSchemaArgs,
|
||||
}
|
||||
|
||||
impl AppArgs {
|
||||
pub fn parse() -> Result<Self> {
|
||||
let log_level: AppCoreLogLevel = match env::var("CGW_LOG_LEVEL") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_LOG_LEVEL! Invalid value: {val}! Error: (unknown)"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_LOG_LEVEL,
|
||||
};
|
||||
|
||||
let cgw_id: i32 = match env::var("CGW_ID") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_ID! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_ID,
|
||||
};
|
||||
|
||||
let cgw_groups_capacity: i32 = match env::var("CGW_GROUPS_CAPACITY") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GROUPS_CAPACITY! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GROUPS_CAPACITY,
|
||||
};
|
||||
|
||||
let cgw_groups_threshold: i32 = match env::var("CGW_GROUPS_THRESHOLD") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GROUPS_CAPACITY! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GROUPS_THRESHOLD,
|
||||
};
|
||||
|
||||
let cgw_group_infras_capacity: i32 = match env::var("CGW_GROUP_INFRAS_CAPACITY") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GROUP_INFRAS_CAPACITY! Invalid value: {val}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GROUP_INFRAS_CAPACITY,
|
||||
};
|
||||
|
||||
let feature_topomap_enabled: bool = match env::var("CGW_FEATURE_TOPOMAP_ENABLE") {
|
||||
Ok(_) => true,
|
||||
Err(_) => CGW_DEFAULT_TOPOMAP_STATE,
|
||||
};
|
||||
|
||||
let nb_infra_tls_var: String =
|
||||
env::var("CGW_NB_INFRA_TLS").unwrap_or(CGW_DEFAULT_NB_INFRA_TLS.to_string());
|
||||
let nb_infra_tls = nb_infra_tls_var == "yes";
|
||||
|
||||
let wss_args = CGWWSSArgs::parse()?;
|
||||
let grpc_args = CGWGRPCArgs::parse()?;
|
||||
let kafka_args = CGWKafkaArgs::parse()?;
|
||||
let mut db_args = CGWDBArgs::parse()?;
|
||||
let mut redis_args = CGWRedisArgs::parse()?;
|
||||
let metrics_args = CGWMetricsArgs::parse()?;
|
||||
let validation_schema = CGWValidationSchemaArgs::parse()?;
|
||||
|
||||
if nb_infra_tls {
|
||||
redis_args.redis_tls = nb_infra_tls;
|
||||
db_args.db_tls = nb_infra_tls;
|
||||
}
|
||||
|
||||
Ok(AppArgs {
|
||||
log_level,
|
||||
cgw_id,
|
||||
feature_topomap_enabled,
|
||||
wss_args,
|
||||
grpc_args,
|
||||
kafka_args,
|
||||
db_args,
|
||||
redis_args,
|
||||
metrics_args,
|
||||
validation_schema,
|
||||
cgw_groups_capacity,
|
||||
cgw_groups_threshold,
|
||||
cgw_group_infras_capacity,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -2,6 +2,7 @@ use crate::{
|
||||
cgw_connection_server::{CGWConnectionServer, CGWConnectionServerReqMsg},
|
||||
cgw_device::{CGWDeviceCapabilities, CGWDeviceType},
|
||||
cgw_errors::{Error, Result},
|
||||
cgw_nb_api_listener::cgw_construct_infra_request_result_msg,
|
||||
cgw_ucentral_messages_queue_manager::{
|
||||
CGWUCentralMessagesQueueItem, CGWUCentralMessagesQueueState, CGW_MESSAGES_QUEUE,
|
||||
MESSAGE_TIMEOUT_DURATION,
|
||||
@@ -19,6 +20,8 @@ use futures_util::{
|
||||
stream::{SplitSink, SplitStream},
|
||||
FutureExt, SinkExt, StreamExt,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
use std::{net::SocketAddr, str::FromStr, sync::Arc};
|
||||
use tokio::{
|
||||
net::TcpStream,
|
||||
@@ -63,24 +66,34 @@ enum CGWUCentralMessageProcessorState {
|
||||
ResultPending,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for CGWUCentralMessageProcessorState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
CGWUCentralMessageProcessorState::Idle => write!(f, "Idle"),
|
||||
CGWUCentralMessageProcessorState::ResultPending => write!(f, "ResultPending"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CGWConnectionProcessor {
|
||||
cgw_server: Arc<CGWConnectionServer>,
|
||||
pub serial: MacAddress,
|
||||
pub addr: SocketAddr,
|
||||
pub idx: i64,
|
||||
pub group_id: i32,
|
||||
pub feature_topomap_enabled: bool,
|
||||
pub device_type: CGWDeviceType,
|
||||
}
|
||||
|
||||
impl CGWConnectionProcessor {
|
||||
pub fn new(server: Arc<CGWConnectionServer>, conn_idx: i64, addr: SocketAddr) -> Self {
|
||||
pub fn new(server: Arc<CGWConnectionServer>, addr: SocketAddr) -> Self {
|
||||
let conn_processor: CGWConnectionProcessor = CGWConnectionProcessor {
|
||||
cgw_server: server.clone(),
|
||||
serial: MacAddress::default(),
|
||||
addr,
|
||||
idx: conn_idx,
|
||||
group_id: 0,
|
||||
feature_topomap_enabled: server.feature_topomap_enabled,
|
||||
// Default to AP, it's safe, as later-on it will be changed
|
||||
device_type: CGWDeviceType::CGWDeviceAP,
|
||||
};
|
||||
|
||||
conn_processor
|
||||
@@ -92,7 +105,24 @@ impl CGWConnectionProcessor {
|
||||
client_cn: MacAddress,
|
||||
allow_mismatch: bool,
|
||||
) -> Result<()> {
|
||||
let ws_stream = tokio_tungstenite::accept_async(tls_stream).await?;
|
||||
let ws_stream = tokio::select! {
|
||||
_val = tokio_tungstenite::accept_async(tls_stream) => {
|
||||
match _val {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
error!("Failed to accept TLS stream from: {}! Reason: {}. Closing connection",
|
||||
self.addr, e);
|
||||
return Err(Error::ConnectionProcessor("Failed to accept TLS stream!"));
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: configurable duration (upon server creation)
|
||||
_val = sleep(Duration::from_millis(15000)) => {
|
||||
error!("Failed to accept TLS stream from: {}! Closing connection", self.addr);
|
||||
return Err(Error::ConnectionProcessor("Failed to accept TLS stream for too long"));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
let (sink, mut stream) = ws_stream.split();
|
||||
|
||||
@@ -105,15 +135,15 @@ impl CGWConnectionProcessor {
|
||||
match _val {
|
||||
Some(m) => m,
|
||||
None => {
|
||||
error!("no connect message received from {}, closing connection", self.addr);
|
||||
error!("No connect message received from: {}! Closing connection!", self.addr);
|
||||
return Err(Error::ConnectionProcessor("No connect message received"));
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: configurable duration (upon server creation)
|
||||
_val = sleep(Duration::from_millis(30000)) => {
|
||||
error!("no message received from {}, closing connection", self.addr);
|
||||
return Err(Error::ConnectionProcessor("No message receive for too long"));
|
||||
error!("No message received from: {}! Closing connection", self.addr);
|
||||
return Err(Error::ConnectionProcessor("No message received for too long"));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -123,7 +153,7 @@ impl CGWConnectionProcessor {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"established connection with device, but failed to receive any messages\n{e}"
|
||||
"Established connection with device, but failed to receive any messages! Error: {e}"
|
||||
);
|
||||
return Err(Error::ConnectionProcessor(
|
||||
"Established connection with device, but failed to receive any messages",
|
||||
@@ -139,7 +169,7 @@ impl CGWConnectionProcessor {
|
||||
}
|
||||
Err(_e) => {
|
||||
error!(
|
||||
"failed to recv connect message from {}, closing connection",
|
||||
"Failed to receive connect message from: {}! Closing connection!",
|
||||
self.addr
|
||||
);
|
||||
return Err(Error::ConnectionProcessor(
|
||||
@@ -160,14 +190,17 @@ impl CGWConnectionProcessor {
|
||||
));
|
||||
} else {
|
||||
debug!(
|
||||
"The client MAC address {} and clinet certificate CN {} chech passed!",
|
||||
"The client MAC address {} and clinet certificate CN {} chech passed",
|
||||
evt.serial.to_hex_string(),
|
||||
client_cn.to_hex_string()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Done Parse Connect Event {}", evt.serial.to_hex_string());
|
||||
debug!(
|
||||
"Parse Connect Event done! Device serial: {}",
|
||||
evt.serial.to_hex_string()
|
||||
);
|
||||
|
||||
let mut caps: CGWDeviceCapabilities = Default::default();
|
||||
match evt.evt_type {
|
||||
@@ -180,47 +213,54 @@ impl CGWConnectionProcessor {
|
||||
caps.label_macaddr = c.capabilities.label_macaddr;
|
||||
}
|
||||
_ => warn!(
|
||||
"Device {} is not abiding the protocol: first message - CONNECT - expected",
|
||||
"Device {} is not abiding the protocol! First message expected to receive: CONNECT!",
|
||||
evt.serial
|
||||
),
|
||||
}
|
||||
|
||||
self.serial = evt.serial;
|
||||
let device_type = CGWDeviceType::from_str(caps.platform.as_str())?;
|
||||
|
||||
self.device_type = match CGWDeviceType::from_str(caps.platform.as_str()) {
|
||||
Ok(dev_type) => dev_type,
|
||||
Err(_) => {
|
||||
warn!("Failed to parse device {} type!", self.serial);
|
||||
return Err(Error::ConnectionProcessor("Failed to parse device type"));
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: we accepted tls stream and split the WS into RX TX part,
|
||||
// now we have to ASK cgw_connection_server's permission whether
|
||||
// we can proceed on with this underlying connection.
|
||||
// cgw_connection_server has an authorative decision whether
|
||||
// we can proceed.
|
||||
debug!("Sending ACK req for {}", self.serial);
|
||||
debug!("Sending ACK request for device serial: {}", self.serial);
|
||||
let (mbox_tx, mut mbox_rx) = unbounded_channel::<CGWConnectionProcessorReqMsg>();
|
||||
let msg = CGWConnectionServerReqMsg::AddNewConnection(evt.serial, caps, mbox_tx);
|
||||
let msg = CGWConnectionServerReqMsg::AddNewConnection(evt.serial, self.addr, caps, mbox_tx);
|
||||
self.cgw_server
|
||||
.enqueue_mbox_message_to_cgw_server(msg)
|
||||
.await;
|
||||
|
||||
let ack = mbox_rx.recv().await;
|
||||
debug!("GOT ACK resp for {}", self.serial);
|
||||
debug!("Got ACK response for device serial: {}", self.serial);
|
||||
if let Some(m) = ack {
|
||||
match m {
|
||||
CGWConnectionProcessorReqMsg::AddNewConnectionAck(gid) => {
|
||||
debug!(
|
||||
"websocket connection established: {} {} gid {gid}",
|
||||
"WebSocket connection established! Address: {}, serial: {} gid {gid}",
|
||||
self.addr, evt.serial
|
||||
);
|
||||
self.group_id = gid;
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::ConnectionProcessor(
|
||||
"Unexpected response from server, expected ACK/NOT ACK)",
|
||||
"Unexpected response from server! Expected: ACK/NOT ACK",
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("connection server declined connection, websocket connection {} {} cannot be established",
|
||||
info!("Connection server declined connection! WebSocket connection for address: {}, serial: {} cannot be established!",
|
||||
self.addr, evt.serial);
|
||||
return Err(Error::ConnectionProcessor("Websocker connection declined"));
|
||||
return Err(Error::ConnectionProcessor("WebSocket connection declined"));
|
||||
}
|
||||
|
||||
// Remove device from disconnected device list
|
||||
@@ -247,13 +287,10 @@ impl CGWConnectionProcessor {
|
||||
queue_lock
|
||||
.set_device_queue_state(&evt.serial, CGWUCentralMessagesQueueState::RxTx)
|
||||
.await;
|
||||
} else {
|
||||
queue_lock.create_device_messages_queue(&evt.serial).await;
|
||||
}
|
||||
}
|
||||
|
||||
self.process_connection(stream, sink, mbox_rx, device_type)
|
||||
.await;
|
||||
self.process_connection(stream, sink, mbox_rx).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -261,13 +298,14 @@ impl CGWConnectionProcessor {
|
||||
async fn process_wss_rx_msg(
|
||||
&self,
|
||||
msg: std::result::Result<Message, tungstenite::error::Error>,
|
||||
device_type: CGWDeviceType,
|
||||
fsm_state: &mut CGWUCentralMessageProcessorState,
|
||||
pending_req_id: u64,
|
||||
pending_req_uuid: Uuid,
|
||||
) -> Result<CGWConnectionState> {
|
||||
// Make sure we always track the as accurate as possible the time
|
||||
// of receiving of the event (where needed).
|
||||
let timestamp = Local::now();
|
||||
let mut kafaka_msg: String = String::new();
|
||||
|
||||
match msg {
|
||||
Ok(msg) => match msg {
|
||||
@@ -275,19 +313,37 @@ impl CGWConnectionProcessor {
|
||||
return Ok(CGWConnectionState::ClosedGracefully);
|
||||
}
|
||||
Text(payload) => {
|
||||
if let Ok(evt) =
|
||||
cgw_ucentral_event_parse(&device_type, &payload, timestamp.timestamp())
|
||||
{
|
||||
if let Ok(evt) = cgw_ucentral_event_parse(
|
||||
&self.device_type,
|
||||
self.feature_topomap_enabled,
|
||||
&payload,
|
||||
timestamp.timestamp(),
|
||||
) {
|
||||
kafaka_msg.clone_from(&payload);
|
||||
if let CGWUCentralEventType::State(_) = evt.evt_type {
|
||||
if let Some(decompressed) = evt.decompressed.clone() {
|
||||
kafaka_msg = decompressed;
|
||||
}
|
||||
if self.feature_topomap_enabled {
|
||||
let topo_map = CGWUCentralTopologyMap::get_ref();
|
||||
topo_map.process_state_message(&device_type, evt).await;
|
||||
topo_map.debug_dump_map().await;
|
||||
|
||||
// TODO: remove this Arc clone:
|
||||
// Dirty hack for now: pass Arc ref of srv to topo map;
|
||||
// Future rework and refactoring would require to separate
|
||||
// NB api from being an internal obj of conn_server to be a
|
||||
// standalone (singleton?) object.
|
||||
topo_map.enqueue_event(
|
||||
evt,
|
||||
self.device_type,
|
||||
self.serial,
|
||||
self.group_id,
|
||||
self.cgw_server.clone(),
|
||||
);
|
||||
}
|
||||
} else if let CGWUCentralEventType::Reply(content) = evt.evt_type {
|
||||
if *fsm_state != CGWUCentralMessageProcessorState::ResultPending {
|
||||
error!(
|
||||
"Unexpected FSM {:?} state! Expected: ResultPending",
|
||||
"Unexpected FSM state: {}! Expected: ResultPending",
|
||||
*fsm_state
|
||||
);
|
||||
}
|
||||
@@ -300,20 +356,40 @@ impl CGWConnectionProcessor {
|
||||
}
|
||||
|
||||
*fsm_state = CGWUCentralMessageProcessorState::Idle;
|
||||
debug!("Got reply event for pending request id: {}", pending_req_id);
|
||||
debug!("Got reply event for pending request id: {pending_req_id}");
|
||||
if let Ok(resp) = cgw_construct_infra_request_result_msg(
|
||||
self.cgw_server.get_local_id(),
|
||||
pending_req_uuid,
|
||||
pending_req_id,
|
||||
true,
|
||||
None,
|
||||
) {
|
||||
self.cgw_server
|
||||
.enqueue_mbox_message_from_cgw_to_nb_api(self.group_id, resp);
|
||||
} else {
|
||||
error!("Failed to construct rebalance_group message!");
|
||||
}
|
||||
} else if let CGWUCentralEventType::RealtimeEvent(_) = evt.evt_type {
|
||||
if self.feature_topomap_enabled {
|
||||
let topo_map = CGWUCentralTopologyMap::get_ref();
|
||||
topo_map
|
||||
.process_device_topology_event(&device_type, evt)
|
||||
.await;
|
||||
topo_map.debug_dump_map().await;
|
||||
// TODO: remove this Arc clone:
|
||||
// Dirty hack for now: pass Arc ref of srv to topo map;
|
||||
// Future rework and refactoring would require to separate
|
||||
// NB api from being an internal obj of conn_server to be a
|
||||
// standalone (singleton?) object.
|
||||
topo_map.enqueue_event(
|
||||
evt,
|
||||
self.device_type,
|
||||
self.serial,
|
||||
self.group_id,
|
||||
self.cgw_server.clone(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.cgw_server
|
||||
.enqueue_mbox_message_from_device_to_nb_api_c(self.group_id, payload)?;
|
||||
.enqueue_mbox_message_from_device_to_nb_api_c(self.group_id, kafaka_msg)?;
|
||||
return Ok(CGWConnectionState::IsActive);
|
||||
}
|
||||
Ping(_t) => {
|
||||
@@ -342,11 +418,11 @@ impl CGWConnectionProcessor {
|
||||
let processor_mac = self.serial;
|
||||
match msg {
|
||||
CGWConnectionProcessorReqMsg::AddNewConnectionShouldClose => {
|
||||
debug!("MBOX_IN: AddNewConnectionShouldClose, processor (mac:{processor_mac}) (ACK OK)");
|
||||
debug!("process_sink_mbox_rx_msg: AddNewConnectionShouldClose, processor (mac:{processor_mac}) (ACK OK)");
|
||||
return Ok(CGWConnectionState::IsForcedToClose);
|
||||
}
|
||||
CGWConnectionProcessorReqMsg::SinkRequestToDevice(pload) => {
|
||||
debug!("MBOX_IN: SinkRequestToDevice, processor (mac:{processor_mac}) req for (mac:{}) payload:{}",
|
||||
debug!("process_sink_mbox_rx_msg: SinkRequestToDevice, processor (mac: {processor_mac}) request for (mac: {}) payload: {}",
|
||||
pload.command.serial,
|
||||
pload.message.clone(),
|
||||
);
|
||||
@@ -354,15 +430,15 @@ impl CGWConnectionProcessor {
|
||||
}
|
||||
CGWConnectionProcessorReqMsg::GroupIdChanged(new_group_id) => {
|
||||
debug!(
|
||||
"Mac {} received gid {} -> {} change request",
|
||||
"Received GroupID change message: mac {} - old gid {} : new gid {}",
|
||||
self.serial, self.group_id, new_group_id
|
||||
);
|
||||
self.group_id = new_group_id;
|
||||
}
|
||||
_ => {
|
||||
warn!("Received unknown mbox message {:?}", msg);
|
||||
warn!("Received unknown mbox message: {:?}!", msg);
|
||||
return Err(Error::ConnectionProcessor(
|
||||
"Sink MBOX: received unexpected message",
|
||||
"Connection processor (Sink MBOX): received unexpected message",
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -394,7 +470,6 @@ impl CGWConnectionProcessor {
|
||||
mut stream: SStream,
|
||||
mut sink: SSink,
|
||||
mut mbox_rx: UnboundedReceiver<CGWConnectionProcessorReqMsg>,
|
||||
device_type: CGWDeviceType,
|
||||
) {
|
||||
#[derive(Debug)]
|
||||
enum WakeupReason {
|
||||
@@ -406,6 +481,7 @@ impl CGWConnectionProcessor {
|
||||
}
|
||||
|
||||
let device_mac = self.serial;
|
||||
let mut pending_req_uuid = Uuid::default();
|
||||
let mut pending_req_id: u64 = 0;
|
||||
let mut pending_req_type: CGWUCentralCommandType;
|
||||
let mut fsm_state = CGWUCentralMessageProcessorState::Idle;
|
||||
@@ -438,19 +514,23 @@ impl CGWConnectionProcessor {
|
||||
if let Some(queue_msg) = queue_lock.dequeue_device_message(&device_mac).await {
|
||||
// Get message from queue, start measure requet processing time
|
||||
start_time = Instant::now();
|
||||
|
||||
pending_req_id = queue_msg.command.id;
|
||||
pending_req_type = queue_msg.command.cmd_type.clone();
|
||||
pending_req_uuid = queue_msg.uuid;
|
||||
|
||||
let timeout = match queue_msg.timeout {
|
||||
Some(secs) => Duration::from_secs(secs),
|
||||
None => MESSAGE_TIMEOUT_DURATION,
|
||||
};
|
||||
|
||||
wakeup_reason = WakeupReason::MboxRx(Some(
|
||||
CGWConnectionProcessorReqMsg::SinkRequestToDevice(queue_msg),
|
||||
));
|
||||
|
||||
// Set new pending request timeout value
|
||||
queue_lock
|
||||
.set_device_last_req_info(
|
||||
&device_mac,
|
||||
pending_req_id,
|
||||
MESSAGE_TIMEOUT_DURATION,
|
||||
)
|
||||
.set_device_last_req_info(&device_mac, pending_req_id, timeout)
|
||||
.await;
|
||||
|
||||
debug!("Got pending request with id: {}", pending_req_id);
|
||||
@@ -515,7 +595,7 @@ impl CGWConnectionProcessor {
|
||||
}
|
||||
|
||||
// Doesn't matter if connection was closed or terminated
|
||||
// Do message queue timeout tick and cleanup queue dut to timeout\
|
||||
// Do message queue timeout tick and cleanup queue due to timeout
|
||||
// Or decrease timer value - on connection termination - background task
|
||||
// is responsible to cleanup queue
|
||||
if fsm_state == CGWUCentralMessageProcessorState::ResultPending {
|
||||
@@ -527,7 +607,25 @@ impl CGWConnectionProcessor {
|
||||
.await
|
||||
{
|
||||
let queue_lock = CGW_MESSAGES_QUEUE.read().await;
|
||||
queue_lock.clear_device_message_queue(&device_mac).await;
|
||||
let flushed_reqs = queue_lock.clear_device_message_queue(&device_mac).await;
|
||||
|
||||
for req in flushed_reqs {
|
||||
if let Ok(resp) = cgw_construct_infra_request_result_msg(
|
||||
self.cgw_server.get_local_id(),
|
||||
req.uuid,
|
||||
req.command.id,
|
||||
false,
|
||||
Some(format!(
|
||||
"Reques flushed from infra queue {device_mac} due to previous request timeout!"
|
||||
)),
|
||||
) {
|
||||
// Currently Device Queue Manager does not store infars GID
|
||||
self.cgw_server
|
||||
.enqueue_mbox_message_from_cgw_to_nb_api(self.group_id, resp);
|
||||
} else {
|
||||
error!("Failed to construct message!");
|
||||
}
|
||||
}
|
||||
|
||||
// reset request duration, request id and queue state
|
||||
pending_req_id = 0;
|
||||
@@ -538,6 +636,18 @@ impl CGWConnectionProcessor {
|
||||
.set_device_last_req_info(&device_mac, 0, Duration::ZERO)
|
||||
.await;
|
||||
fsm_state = CGWUCentralMessageProcessorState::Idle;
|
||||
if let Ok(resp) = cgw_construct_infra_request_result_msg(
|
||||
self.cgw_server.get_local_id(),
|
||||
pending_req_uuid,
|
||||
pending_req_id,
|
||||
false,
|
||||
Some(format!("Request timed out")),
|
||||
) {
|
||||
self.cgw_server
|
||||
.enqueue_mbox_message_from_cgw_to_nb_api(self.group_id, resp);
|
||||
} else {
|
||||
error!("Failed to construct rebalance_group message!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -545,7 +655,7 @@ impl CGWConnectionProcessor {
|
||||
let rc = match wakeup_reason {
|
||||
WakeupReason::WSSRxMsg(res) => {
|
||||
last_contact = Instant::now();
|
||||
self.process_wss_rx_msg(res, device_type, &mut fsm_state, pending_req_id)
|
||||
self.process_wss_rx_msg(res, &mut fsm_state, pending_req_id, pending_req_uuid)
|
||||
.await
|
||||
}
|
||||
WakeupReason::MboxRx(mbox_message) => {
|
||||
@@ -572,26 +682,26 @@ impl CGWConnectionProcessor {
|
||||
return;
|
||||
} else if let CGWConnectionState::ClosedGracefully = state {
|
||||
warn!(
|
||||
"Remote client {} closed connection gracefully",
|
||||
"Remote client {} closed connection gracefully!",
|
||||
self.serial.to_hex_string()
|
||||
);
|
||||
return self.send_connection_close_event().await;
|
||||
} else if let CGWConnectionState::IsStale = state {
|
||||
warn!(
|
||||
"Remote client {} closed due to inactivity",
|
||||
"Remote client {} closed due to inactivity!",
|
||||
self.serial.to_hex_string()
|
||||
);
|
||||
return self.send_connection_close_event().await;
|
||||
} else if let CGWConnectionState::IsDead = state {
|
||||
warn!(
|
||||
"Remote client {} connection is dead",
|
||||
"Remote client {} connection is dead!",
|
||||
self.serial.to_hex_string()
|
||||
);
|
||||
return self.send_connection_close_event().await;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("{:?}", e);
|
||||
warn!("Connection processor closed! Error: {e}");
|
||||
return self.send_connection_close_event().await;
|
||||
}
|
||||
}
|
||||
@@ -604,7 +714,7 @@ impl CGWConnectionProcessor {
|
||||
.enqueue_mbox_message_to_cgw_server(msg)
|
||||
.await;
|
||||
debug!(
|
||||
"MBOX_OUT: ConnectionClosed, processor (mac:{})",
|
||||
"MBOX_OUT: ConnectionClosed, processor (mac: {})",
|
||||
self.serial.to_hex_string()
|
||||
);
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,5 +1,6 @@
|
||||
use crate::AppArgs;
|
||||
use crate::cgw_app_args::CGWDBArgs;
|
||||
|
||||
use crate::cgw_tls::cgw_tls_create_db_connect;
|
||||
use crate::{
|
||||
cgw_errors::{Error, Result},
|
||||
cgw_metrics::{CGWMetrics, CGWMetricsHealthComponent, CGWMetricsHealthComponentStatus},
|
||||
@@ -7,7 +8,8 @@ use crate::{
|
||||
|
||||
use eui48::MacAddress;
|
||||
|
||||
use tokio_postgres::{row::Row, Client, NoTls};
|
||||
use tokio_postgres::NoTls;
|
||||
use tokio_postgres::{row::Row, Client};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CGWDBInfra {
|
||||
@@ -51,40 +53,82 @@ pub struct CGWDBAccessor {
|
||||
}
|
||||
|
||||
impl CGWDBAccessor {
|
||||
pub async fn new(app_args: &AppArgs) -> Result<Self> {
|
||||
pub async fn new(db_args: &CGWDBArgs) -> Result<Self> {
|
||||
let conn_str = format!(
|
||||
"host={host} port={port} user={user} dbname={db} password={pass} connect_timeout=10",
|
||||
host = app_args.db_host,
|
||||
port = app_args.db_port,
|
||||
user = app_args.db_username,
|
||||
db = app_args.db_name,
|
||||
pass = app_args.db_password
|
||||
"sslmode={sslmode} host={host} port={port} user={user} dbname={db} password={pass} connect_timeout=10",
|
||||
host = db_args.db_host,
|
||||
port = db_args.db_port,
|
||||
user = db_args.db_username,
|
||||
db = db_args.db_name,
|
||||
pass = db_args.db_password,
|
||||
sslmode = match db_args.db_tls {
|
||||
true => "require",
|
||||
false => "disable",
|
||||
}
|
||||
);
|
||||
debug!(
|
||||
"Trying to connect to DB ({}:{})...\nConn args {}",
|
||||
app_args.db_host, app_args.db_port, conn_str
|
||||
"Trying to connect to remote db ({}:{})...\nConnection args: {}",
|
||||
db_args.db_host, db_args.db_port, conn_str
|
||||
);
|
||||
|
||||
let (client, connection) = match tokio_postgres::connect(&conn_str, NoTls).await {
|
||||
Ok((cl, conn)) => (cl, conn),
|
||||
Err(e) => {
|
||||
error!("Failed to establish connection with DB, reason: {:?}", e);
|
||||
return Err(Error::DbAccessor("Failed to establish connection with DB"));
|
||||
}
|
||||
};
|
||||
let client: Client;
|
||||
if db_args.db_tls {
|
||||
let tls = match cgw_tls_create_db_connect().await {
|
||||
Ok(tls_connect) => tls_connect,
|
||||
Err(e) => {
|
||||
error!("Failed to build TLS connection with remote DB! Error: {e}");
|
||||
return Err(Error::DbAccessor(
|
||||
"Failed to build TLS connection with remote DB",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
let err_msg = format!("Connection to DB broken: {}", e);
|
||||
error!("{}", err_msg);
|
||||
CGWMetrics::get_ref()
|
||||
.change_component_health_status(
|
||||
CGWMetricsHealthComponent::DBConnection,
|
||||
CGWMetricsHealthComponentStatus::NotReady(err_msg),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
});
|
||||
let (db_client, connection) = match tokio_postgres::connect(&conn_str, tls).await {
|
||||
Ok((cl, conn)) => (cl, conn),
|
||||
Err(e) => {
|
||||
error!("Failed to establish connection with DB! Error: {e}");
|
||||
return Err(Error::DbAccessor("Failed to establish connection with DB"));
|
||||
}
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
let err_msg = format!("Connection to DB broken! Error: {e}");
|
||||
error!("{}", err_msg);
|
||||
CGWMetrics::get_ref()
|
||||
.change_component_health_status(
|
||||
CGWMetricsHealthComponent::DBConnection,
|
||||
CGWMetricsHealthComponentStatus::NotReady(err_msg),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
});
|
||||
|
||||
client = db_client;
|
||||
} else {
|
||||
let (db_client, connection) = match tokio_postgres::connect(&conn_str, NoTls).await {
|
||||
Ok((cl, conn)) => (cl, conn),
|
||||
Err(e) => {
|
||||
error!("Failed to establish connection with DB! Error: {e}");
|
||||
return Err(Error::DbAccessor("Failed to establish connection with DB"));
|
||||
}
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
let err_msg = format!("Connection to DB broken! Error: {e}");
|
||||
error!("{}", err_msg);
|
||||
CGWMetrics::get_ref()
|
||||
.change_component_health_status(
|
||||
CGWMetricsHealthComponent::DBConnection,
|
||||
CGWMetricsHealthComponentStatus::NotReady(err_msg),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
});
|
||||
|
||||
client = db_client;
|
||||
}
|
||||
|
||||
tokio::spawn(async move {
|
||||
CGWMetrics::get_ref()
|
||||
@@ -115,7 +159,7 @@ impl CGWDBAccessor {
|
||||
let q = match self.cl.prepare("INSERT INTO infrastructure_groups (id, reserved_size, actual_size) VALUES ($1, $2, $3)").await {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!("Failed to prepare query (new infra group) for insertion, reason: {:?}", e);
|
||||
error!("Failed to prepare query (new infra group) for insertion! Error: {e}");
|
||||
return Err(Error::DbAccessor("Insert new infra group failed"));
|
||||
}
|
||||
};
|
||||
@@ -127,11 +171,7 @@ impl CGWDBAccessor {
|
||||
match res {
|
||||
Ok(_n) => Ok(()),
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to insert a new infra group {}: {:?}",
|
||||
g.id,
|
||||
e.to_string()
|
||||
);
|
||||
error!("Failed to insert a new infra group {}! Error: {}", g.id, e);
|
||||
Err(Error::DbAccessor("Insert new infra group failed"))
|
||||
}
|
||||
}
|
||||
@@ -146,10 +186,7 @@ impl CGWDBAccessor {
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to prepare query (del infra group) for removal, reason: {:?}",
|
||||
e
|
||||
);
|
||||
error!("Failed to prepare query (del infra group) for removal! Error: {e}");
|
||||
return Err(Error::DbAccessor("Insert new infra group failed"));
|
||||
}
|
||||
};
|
||||
@@ -166,7 +203,7 @@ impl CGWDBAccessor {
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to delete an infra group {gid}: {:?}", e.to_string());
|
||||
error!("Failed to delete an infra group {gid}! Error: {e}");
|
||||
Err(Error::DbAccessor("Delete infra group failed"))
|
||||
}
|
||||
}
|
||||
@@ -227,7 +264,7 @@ impl CGWDBAccessor {
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!("Failed to insert new infra, reason: {:?}", e);
|
||||
error!("Failed to insert new infra! Error: {e}");
|
||||
return Err(Error::DbAccessor("Failed to insert new infra"));
|
||||
}
|
||||
};
|
||||
@@ -239,7 +276,7 @@ impl CGWDBAccessor {
|
||||
match res {
|
||||
Ok(_n) => Ok(()),
|
||||
Err(e) => {
|
||||
error!("Failed to insert a new infra: {:?}", e.to_string());
|
||||
error!("Failed to insert new infra! Error: {e}");
|
||||
Err(Error::DbAccessor("Insert new infra failed"))
|
||||
}
|
||||
}
|
||||
@@ -249,7 +286,7 @@ impl CGWDBAccessor {
|
||||
let q = match self.cl.prepare("DELETE FROM infras WHERE mac = $1").await {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!("Failed to delete infra, reason: {:?}", e);
|
||||
error!("Failed to delete infra! Error: {e}");
|
||||
return Err(Error::DbAccessor("Failed to delete infra from DB"));
|
||||
}
|
||||
};
|
||||
@@ -261,12 +298,12 @@ impl CGWDBAccessor {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::DbAccessor(
|
||||
"Failed to delete infra from DB: MAC does not exist",
|
||||
"Failed to delete infra from DB: MAC does not exist!",
|
||||
))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to delete infra: {:?}", e.to_string());
|
||||
error!("Failed to delete infra! Error: {e}");
|
||||
Err(Error::DbAccessor("Delete infra failed"))
|
||||
}
|
||||
}
|
||||
@@ -286,7 +323,7 @@ impl CGWDBAccessor {
|
||||
Some(list)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to retrieve infras from DB, reason: {:?}", e);
|
||||
error!("Failed to retrieve infras from DB! Error: {e}");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,12 @@ use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
#[derive(Clone, Copy, Debug, Default, Deserialize, Serialize)]
|
||||
pub enum CGWDeviceType {
|
||||
CGWDeviceAP,
|
||||
CGWDeviceSwitch,
|
||||
#[default]
|
||||
CGWDeviceUnknown,
|
||||
}
|
||||
|
||||
impl FromStr for CGWDeviceType {
|
||||
@@ -16,6 +18,7 @@ impl FromStr for CGWDeviceType {
|
||||
match s {
|
||||
"ap" => Ok(CGWDeviceType::CGWDeviceAP),
|
||||
"switch" => Ok(CGWDeviceType::CGWDeviceSwitch),
|
||||
"unknown" => Ok(CGWDeviceType::CGWDeviceUnknown),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
@@ -45,17 +48,19 @@ pub struct CGWDeviceCapabilities {
|
||||
|
||||
impl CGWDeviceCapabilities {
|
||||
pub fn update_device_capabilities(&mut self, new_capabilities: &CGWDeviceCapabilities) {
|
||||
self.firmware = new_capabilities.firmware.clone();
|
||||
self.firmware.clone_from(&new_capabilities.firmware);
|
||||
self.uuid = new_capabilities.uuid;
|
||||
self.compatible = new_capabilities.compatible.clone();
|
||||
self.model = new_capabilities.model.clone();
|
||||
self.platform = new_capabilities.platform.clone();
|
||||
self.label_macaddr = new_capabilities.label_macaddr.clone();
|
||||
self.compatible.clone_from(&new_capabilities.compatible);
|
||||
self.model.clone_from(&new_capabilities.model);
|
||||
self.platform.clone_from(&new_capabilities.platform);
|
||||
self.label_macaddr
|
||||
.clone_from(&new_capabilities.label_macaddr);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Deserialize, Serialize)]
|
||||
pub struct CGWDevice {
|
||||
dev_type: CGWDeviceType,
|
||||
state: CGWDeviceState,
|
||||
group_id: i32,
|
||||
remains_in_db: bool,
|
||||
@@ -64,12 +69,14 @@ pub struct CGWDevice {
|
||||
|
||||
impl CGWDevice {
|
||||
pub fn new(
|
||||
dev_type: CGWDeviceType,
|
||||
state: CGWDeviceState,
|
||||
group_id: i32,
|
||||
remains_in_db: bool,
|
||||
capabilities: CGWDeviceCapabilities,
|
||||
) -> CGWDevice {
|
||||
CGWDevice {
|
||||
dev_type,
|
||||
state,
|
||||
group_id,
|
||||
remains_in_db,
|
||||
@@ -77,6 +84,14 @@ impl CGWDevice {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_device_type(&mut self, dev_type: CGWDeviceType) {
|
||||
self.dev_type = dev_type;
|
||||
}
|
||||
|
||||
pub fn get_device_type(&self) -> CGWDeviceType {
|
||||
self.dev_type
|
||||
}
|
||||
|
||||
pub fn set_device_state(&mut self, new_state: CGWDeviceState) {
|
||||
self.state = new_state;
|
||||
}
|
||||
|
||||
@@ -32,10 +32,7 @@ impl CGWDevicesCache {
|
||||
|
||||
pub fn add_device(&mut self, key: &MacAddress, value: &CGWDevice) -> bool {
|
||||
let status: bool = if self.check_device_exists(key) {
|
||||
debug!(
|
||||
"Failed to add device {}. Requested item already exist.",
|
||||
key
|
||||
);
|
||||
debug!("Failed to add device {}. Requested item already exist", key);
|
||||
false
|
||||
} else {
|
||||
self.cache.insert(*key, value.clone());
|
||||
@@ -51,7 +48,7 @@ impl CGWDevicesCache {
|
||||
true
|
||||
} else {
|
||||
debug!(
|
||||
"Failed to del device {}. Requested item does not exist.",
|
||||
"Failed to del device {}. Requested item does not exist",
|
||||
key
|
||||
);
|
||||
false
|
||||
@@ -61,10 +58,10 @@ impl CGWDevicesCache {
|
||||
}
|
||||
|
||||
pub fn check_device_exists(&self, key: &MacAddress) -> bool {
|
||||
self.cache.get(key).is_some()
|
||||
self.cache.contains_key(key)
|
||||
}
|
||||
|
||||
pub fn get_device(&mut self, key: &MacAddress) -> Option<&mut CGWDevice> {
|
||||
pub fn get_device_mut(&mut self, key: &MacAddress) -> Option<&mut CGWDevice> {
|
||||
if let Some(value) = self.cache.get_mut(key) {
|
||||
Some(value)
|
||||
} else {
|
||||
@@ -72,6 +69,14 @@ impl CGWDevicesCache {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_device(&self, key: &MacAddress) -> Option<&CGWDevice> {
|
||||
if let Some(value) = self.cache.get(key) {
|
||||
Some(value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn get_device_id(&self, key: &MacAddress) -> Option<i32> {
|
||||
self.cache.get(key).map(|value| value.get_device_group_id())
|
||||
@@ -83,13 +88,19 @@ impl CGWDevicesCache {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush_all(&mut self) {
|
||||
self.cache.clear();
|
||||
}
|
||||
|
||||
pub fn dump_devices_cache(&self) {
|
||||
// Debug print - simply ignore errors if any!
|
||||
if let Ok(json_output) = serde_json::to_string_pretty(&self) {
|
||||
debug!("Cache: {}", json_output);
|
||||
|
||||
if let Ok(mut fd) = File::create("/var/devices_cache.json") {
|
||||
let _ = fd.write_all(json_output.as_bytes());
|
||||
if let Err(e) = fd.write_all(json_output.as_bytes()) {
|
||||
error!("Failed to dump CGW device chache data! Error: {e}");
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use derive_more::From;
|
||||
use eui48::MacAddressFormat;
|
||||
|
||||
pub type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
@@ -15,14 +16,22 @@ pub enum Error {
|
||||
|
||||
RemoteDiscoveryFailedInfras(Vec<eui48::MacAddress>),
|
||||
|
||||
Tcp(String),
|
||||
|
||||
Tls(String),
|
||||
|
||||
Redis(String),
|
||||
|
||||
UCentralParser(&'static str),
|
||||
|
||||
UCentralMessagesQueue(&'static str),
|
||||
UCentralValidator(String),
|
||||
|
||||
UCentralMessagesQueue(String),
|
||||
|
||||
AppArgsParser(String),
|
||||
|
||||
Runtime(String),
|
||||
|
||||
// -- Externals
|
||||
#[from]
|
||||
Io(std::io::Error),
|
||||
@@ -63,9 +72,6 @@ pub enum Error {
|
||||
#[from]
|
||||
InvalidUri(warp::http::uri::InvalidUri),
|
||||
|
||||
#[from]
|
||||
RedisAsync(redis_async::error::Error),
|
||||
|
||||
#[from]
|
||||
StaticStr(&'static str),
|
||||
|
||||
@@ -74,17 +80,48 @@ pub enum Error {
|
||||
|
||||
#[from]
|
||||
Tungstenite(tungstenite::Error),
|
||||
|
||||
#[from]
|
||||
Empty(()),
|
||||
}
|
||||
|
||||
impl ToString for Error {
|
||||
fn to_string(&self) -> String {
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Error::AppArgsParser(message) => message.clone(),
|
||||
Error::Tls(message) => message.clone(),
|
||||
_ => format!("{:?}", self),
|
||||
Error::AppArgsParser(message)
|
||||
| Error::Tls(message)
|
||||
| Error::ConnectionServer(message)
|
||||
| Error::Runtime(message)
|
||||
| Error::Redis(message)
|
||||
| Error::Tcp(message)
|
||||
| Error::UCentralMessagesQueue(message)
|
||||
| Error::UCentralValidator(message) => write!(f, "{}", message),
|
||||
Error::ConnectionProcessor(message)
|
||||
| Error::DbAccessor(message)
|
||||
| Error::RemoteDiscovery(message)
|
||||
| Error::UCentralParser(message)
|
||||
| Error::StaticStr(message) => write!(f, "{}", message),
|
||||
Error::Io(io_error) => write!(f, "{}", io_error),
|
||||
Error::ClientVerifierBuilder(verifier_error) => write!(f, "{}", verifier_error),
|
||||
Error::TokioPostgres(psql_error) => write!(f, "{}", psql_error),
|
||||
Error::TokioRustls(rustls_error) => write!(f, "{}", rustls_error),
|
||||
Error::TokioSync(sync_error) => write!(f, "{}", sync_error),
|
||||
Error::IpAddressParse(ip_parse_error) => write!(f, "{}", ip_parse_error),
|
||||
Error::MacAddressParse(mac_parse_error) => write!(f, "{}", mac_parse_error),
|
||||
Error::ParseInt(int_error) => write!(f, "{}", int_error),
|
||||
Error::TryFromInt(try_from_int_error) => write!(f, "{}", try_from_int_error),
|
||||
Error::Prometheus(prometheus_error) => write!(f, "{}", prometheus_error),
|
||||
Error::SerdeJson(serde_error) => write!(f, "{}", serde_error),
|
||||
Error::Kafka(kafka_error) => write!(f, "{}", kafka_error),
|
||||
Error::InvalidUri(uri_error) => write!(f, "{}", uri_error),
|
||||
Error::Tonic(tonic_error) => write!(f, "{}", tonic_error),
|
||||
Error::Tungstenite(tungstenite_error) => write!(f, "{}", tungstenite_error),
|
||||
Error::RemoteDiscoveryFailedInfras(vec) => {
|
||||
let result = vec
|
||||
.iter()
|
||||
.map(|obj| obj.to_string(MacAddressFormat::HexString))
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ");
|
||||
|
||||
write!(f, "{}", result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,6 +27,13 @@ lazy_static! {
|
||||
"Max threshold (extra capacity) of groups this shard can handle"
|
||||
)
|
||||
.expect("metric can be created");
|
||||
pub static ref GROUP_INFRAS_CAPACITY: IntGauge = IntGauge::new(
|
||||
"cgw_group_ifras_capacity",
|
||||
"Max limit (capacity) of infras the group can handle"
|
||||
)
|
||||
.expect("metric can be created");
|
||||
pub static ref GROUP_INFRAS_ASSIGNED_NUM: Arc<RwLock<HashMap<i32, IntGauge>>> =
|
||||
Arc::new(RwLock::new(HashMap::new()));
|
||||
pub static ref CONNECTIONS_NUM: IntGauge = IntGauge::new(
|
||||
"cgw_connections_num",
|
||||
"Number of successfully established WSS connections (underlying Infra connections)"
|
||||
@@ -88,19 +95,17 @@ impl fmt::Display for CGWMetricsHealthComponentStatus {
|
||||
pub enum CGWMetricsCounterType {
|
||||
ActiveCGWNum,
|
||||
GroupsAssignedNum,
|
||||
#[allow(dead_code)]
|
||||
GroupsCapacity,
|
||||
#[allow(dead_code)]
|
||||
GroupsThreshold,
|
||||
GroupInfrasCapacity,
|
||||
GroupInfrasAssignedNum,
|
||||
ConnectionsNum,
|
||||
}
|
||||
|
||||
pub enum CGWMetricsCounterOpType {
|
||||
Inc,
|
||||
#[allow(dead_code)]
|
||||
IncBy(i64),
|
||||
Dec,
|
||||
#[allow(dead_code)]
|
||||
DecBy(i64),
|
||||
Set(i64),
|
||||
}
|
||||
@@ -146,14 +151,9 @@ impl CGWMetrics {
|
||||
CGWMetricsHealthComponentStatus::NotReady("Application is starting".to_string()),
|
||||
);
|
||||
|
||||
// TODO: remove: W/A for now, as currently capacity / threshold
|
||||
// is non-configurable
|
||||
GROUPS_CAPACITY.set(1000i64);
|
||||
GROUPS_THRESHOLD.set(50i64);
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(err) = register_custom_metrics() {
|
||||
warn!("Failed to register CGW Metrics: {:?}", err);
|
||||
if let Err(e) = register_custom_metrics() {
|
||||
warn!("Failed to register CGW Metrics! Error: {e}");
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -199,12 +199,17 @@ impl CGWMetrics {
|
||||
},
|
||||
CGWMetricsCounterType::GroupsCapacity => {
|
||||
if let CGWMetricsCounterOpType::Set(v) = op {
|
||||
ACTIVE_CGW_NUM.set(v);
|
||||
GROUPS_CAPACITY.set(v);
|
||||
}
|
||||
}
|
||||
CGWMetricsCounterType::GroupsThreshold => {
|
||||
if let CGWMetricsCounterOpType::Set(v) = op {
|
||||
ACTIVE_CGW_NUM.set(v);
|
||||
GROUPS_THRESHOLD.set(v);
|
||||
}
|
||||
}
|
||||
CGWMetricsCounterType::GroupInfrasCapacity => {
|
||||
if let CGWMetricsCounterOpType::Set(v) = op {
|
||||
GROUP_INFRAS_CAPACITY.set(v);
|
||||
}
|
||||
}
|
||||
CGWMetricsCounterType::ConnectionsNum => match op {
|
||||
@@ -216,6 +221,62 @@ impl CGWMetrics {
|
||||
}
|
||||
_ => {}
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn change_group_counter(
|
||||
&self,
|
||||
group_id: i32,
|
||||
counter: CGWMetricsCounterType,
|
||||
op: CGWMetricsCounterOpType,
|
||||
) {
|
||||
if let CGWMetricsCounterType::GroupInfrasAssignedNum = counter {
|
||||
let mut lock = GROUP_INFRAS_ASSIGNED_NUM.write().await;
|
||||
|
||||
if let Some(counter) = lock.get(&group_id) {
|
||||
match op {
|
||||
CGWMetricsCounterOpType::Inc => {
|
||||
counter.inc();
|
||||
}
|
||||
CGWMetricsCounterOpType::Dec => {
|
||||
counter.dec();
|
||||
}
|
||||
CGWMetricsCounterOpType::IncBy(inc_val) => {
|
||||
counter.add(inc_val);
|
||||
}
|
||||
CGWMetricsCounterOpType::DecBy(dec_val) => {
|
||||
counter.sub(dec_val);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
} else if let Ok(counter) = IntGauge::new(
|
||||
format!("cgw_group_{group_id}_infras_assigned_num"),
|
||||
"Number of infras assigned to this particular group",
|
||||
) {
|
||||
if REGISTRY.register(Box::new(counter.clone())).is_ok() {
|
||||
match op {
|
||||
CGWMetricsCounterOpType::Inc => counter.set(1),
|
||||
CGWMetricsCounterOpType::IncBy(set_val)
|
||||
| CGWMetricsCounterOpType::Set(set_val) => counter.set(set_val),
|
||||
_ => counter.set(0),
|
||||
}
|
||||
lock.insert(group_id, counter);
|
||||
} else {
|
||||
error!("Failed to register GroupInfrasAssignedNum metric for GID {group_id}!");
|
||||
}
|
||||
} else {
|
||||
error!("Failed to create GroupInfrasAssignedNum metric for GID {group_id}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_group_counter(&self, group_id: i32) {
|
||||
let mut lock = GROUP_INFRAS_ASSIGNED_NUM.write().await;
|
||||
if let Some(counter) = lock.remove(&group_id) {
|
||||
if let Err(e) = REGISTRY.unregister(Box::new(counter)) {
|
||||
error!("Failed to deregister GroupInfrasAssignedNum metric for GID {group_id}! Error: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -229,6 +290,8 @@ fn register_custom_metrics() -> Result<()> {
|
||||
|
||||
REGISTRY.register(Box::new(GROUPS_THRESHOLD.clone()))?;
|
||||
|
||||
REGISTRY.register(Box::new(GROUP_INFRAS_CAPACITY.clone()))?;
|
||||
|
||||
REGISTRY.register(Box::new(CONNECTIONS_NUM.clone()))?;
|
||||
|
||||
Ok(())
|
||||
@@ -268,12 +331,12 @@ async fn metrics_handler() -> std::result::Result<impl Reply, Rejection> {
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
if let Err(e) = encoder.encode(®ISTRY.gather(), &mut buffer) {
|
||||
error!("could not encode custom metrics: {}", e);
|
||||
error!("Could not encode custom metrics! Error: {e}");
|
||||
};
|
||||
let mut res = match String::from_utf8(buffer.clone()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("custom metrics could not be from_utf8'd: {}", e);
|
||||
error!("Custom metrics could not be from_utf8'd! Error: {e}");
|
||||
String::default()
|
||||
}
|
||||
};
|
||||
@@ -281,12 +344,12 @@ async fn metrics_handler() -> std::result::Result<impl Reply, Rejection> {
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
if let Err(e) = encoder.encode(&prometheus::gather(), &mut buffer) {
|
||||
error!("could not encode prometheus metrics: {}", e);
|
||||
error!("Could not encode prometheus metrics! Error: {e}");
|
||||
};
|
||||
let res_custom = match String::from_utf8(buffer.clone()) {
|
||||
Ok(v) => v,
|
||||
Err(e) => {
|
||||
error!("prometheus metrics could not be from_utf8'd: {}", e);
|
||||
error!("Prometheus metrics could not be from_utf8'd! Error: {e}");
|
||||
String::default()
|
||||
}
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::cgw_app_args::CGWKafkaArgs;
|
||||
use crate::cgw_device::OldNew;
|
||||
use crate::cgw_ucentral_parser::CGWDeviceChange;
|
||||
use crate::AppArgs;
|
||||
|
||||
use crate::cgw_connection_server::{CGWConnectionNBAPIReqMsg, CGWConnectionNBAPIReqMsgOrigin};
|
||||
use crate::cgw_errors::{Error, Result};
|
||||
@@ -8,6 +8,7 @@ use crate::cgw_metrics::{CGWMetrics, CGWMetricsHealthComponent, CGWMetricsHealth
|
||||
|
||||
use eui48::MacAddress;
|
||||
use futures::stream::TryStreamExt;
|
||||
use murmur2::murmur2;
|
||||
use rdkafka::client::ClientContext;
|
||||
use rdkafka::config::{ClientConfig, RDKafkaLogLevel};
|
||||
use rdkafka::error::KafkaResult;
|
||||
@@ -17,8 +18,10 @@ use rdkafka::{
|
||||
consumer::{stream_consumer::StreamConsumer, Consumer, ConsumerContext, Rebalance},
|
||||
producer::{FutureProducer, FutureRecord},
|
||||
};
|
||||
use serde::Serialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::SocketAddr;
|
||||
use std::ops::Range;
|
||||
use std::sync::Arc;
|
||||
use tokio::{
|
||||
runtime::{Builder, Runtime},
|
||||
@@ -35,7 +38,7 @@ type CGWCNCProducerType = FutureProducer;
|
||||
pub struct InfraGroupCreateResponse {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub infra_name: String,
|
||||
pub reporter_shard_id: i32,
|
||||
pub uuid: Uuid,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
@@ -45,60 +48,79 @@ pub struct InfraGroupCreateResponse {
|
||||
pub struct InfraGroupDeleteResponse {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub reporter_shard_id: i32,
|
||||
pub uuid: Uuid,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct InfraGroupDeviceAddResponse {
|
||||
pub struct InfraGroupInfrasAddResponse {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub infra_group_infra_devices: Vec<MacAddress>,
|
||||
pub failed_infras: Vec<MacAddress>,
|
||||
pub reporter_shard_id: i32,
|
||||
pub uuid: Uuid,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
pub kafka_partition_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct InfraGroupDeviceDelResponse {
|
||||
pub struct InfraGroupInfrasDelResponse {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub infra_group_infra_devices: Vec<MacAddress>,
|
||||
pub failed_infras: Vec<MacAddress>,
|
||||
pub reporter_shard_id: i32,
|
||||
pub uuid: Uuid,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
pub kafka_partition_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct InfraGroupDeviceMessageEnqueueResponse {
|
||||
pub struct InfraGroupInfraMessageEnqueueResponse {
|
||||
pub r#type: &'static str,
|
||||
pub reporter_shard_id: i32,
|
||||
pub uuid: Uuid,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
pub kafka_partition_key: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct InfraGroupInfraRequestResult {
|
||||
pub r#type: &'static str,
|
||||
pub reporter_shard_id: i32,
|
||||
pub uuid: Uuid,
|
||||
pub id: u64,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct RebalanceGroupsResponse {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub reporter_shard_id: i32,
|
||||
pub uuid: Uuid,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct InfraGroupDeviceCapabilitiesChanged {
|
||||
pub struct InfraGroupInfraCapabilitiesChanged {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub infra_group_infra_device: MacAddress,
|
||||
pub infra_group_infra: MacAddress,
|
||||
pub changes: Vec<CGWDeviceChange>,
|
||||
pub reporter_shard_id: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct UnassignedInfraConnection {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_infra_device: MacAddress,
|
||||
pub infra_group_infra: MacAddress,
|
||||
pub reporter_shard_id: i32,
|
||||
}
|
||||
|
||||
@@ -106,22 +128,68 @@ pub struct UnassignedInfraConnection {
|
||||
pub struct ForeignInfraConnection {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub infra_group_infra_device: MacAddress,
|
||||
pub infra_group_infra: MacAddress,
|
||||
pub reporter_shard_id: i32,
|
||||
pub group_owner_shard_id: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct APClientJoinMessage {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub client: MacAddress,
|
||||
pub infra_group_infra: MacAddress,
|
||||
pub ssid: String,
|
||||
pub band: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct APClientLeaveMessage {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub client: MacAddress,
|
||||
pub infra_group_infra: MacAddress,
|
||||
pub band: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct APClientMigrateMessage {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub client: MacAddress,
|
||||
pub to_infra_group_infra_device: MacAddress,
|
||||
pub to_ssid: String,
|
||||
pub to_band: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct InfraJoinMessage {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub infra_group_infra: MacAddress,
|
||||
pub infra_public_ip: SocketAddr,
|
||||
pub reporter_shard_id: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct InfraLeaveMessage {
|
||||
pub r#type: &'static str,
|
||||
pub infra_group_id: i32,
|
||||
pub infra_group_infra: MacAddress,
|
||||
pub reporter_shard_id: i32,
|
||||
}
|
||||
|
||||
pub fn cgw_construct_infra_group_create_response(
|
||||
infra_group_id: i32,
|
||||
infra_name: String,
|
||||
reporter_shard_id: i32,
|
||||
uuid: Uuid,
|
||||
success: bool,
|
||||
error_message: Option<String>,
|
||||
) -> Result<String> {
|
||||
let group_create = InfraGroupCreateResponse {
|
||||
r#type: "infrastructure_group_create",
|
||||
r#type: "infrastructure_group_create_response",
|
||||
infra_group_id,
|
||||
infra_name,
|
||||
reporter_shard_id,
|
||||
uuid,
|
||||
success,
|
||||
error_message,
|
||||
@@ -132,12 +200,14 @@ pub fn cgw_construct_infra_group_create_response(
|
||||
|
||||
pub fn cgw_construct_infra_group_delete_response(
|
||||
infra_group_id: i32,
|
||||
reporter_shard_id: i32,
|
||||
uuid: Uuid,
|
||||
success: bool,
|
||||
error_message: Option<String>,
|
||||
) -> Result<String> {
|
||||
let group_delete = InfraGroupDeleteResponse {
|
||||
r#type: "infrastructure_group_delete",
|
||||
r#type: "infrastructure_group_delete_response",
|
||||
reporter_shard_id,
|
||||
infra_group_id,
|
||||
uuid,
|
||||
success,
|
||||
@@ -147,54 +217,66 @@ pub fn cgw_construct_infra_group_delete_response(
|
||||
Ok(serde_json::to_string(&group_delete)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_infra_group_device_add_response(
|
||||
pub fn cgw_construct_infra_group_infras_add_response(
|
||||
infra_group_id: i32,
|
||||
infra_group_infra_devices: Vec<MacAddress>,
|
||||
failed_infras: Vec<MacAddress>,
|
||||
reporter_shard_id: i32,
|
||||
uuid: Uuid,
|
||||
success: bool,
|
||||
error_message: Option<String>,
|
||||
kafka_partition_key: Option<String>,
|
||||
) -> Result<String> {
|
||||
let dev_add = InfraGroupDeviceAddResponse {
|
||||
r#type: "infrastructure_group_device_add_response",
|
||||
let dev_add = InfraGroupInfrasAddResponse {
|
||||
r#type: "infrastructure_group_infras_add_response",
|
||||
infra_group_id,
|
||||
infra_group_infra_devices,
|
||||
failed_infras,
|
||||
reporter_shard_id,
|
||||
uuid,
|
||||
success,
|
||||
error_message,
|
||||
kafka_partition_key,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&dev_add)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_infra_group_device_del_response(
|
||||
pub fn cgw_construct_infra_group_infras_del_response(
|
||||
infra_group_id: i32,
|
||||
infra_group_infra_devices: Vec<MacAddress>,
|
||||
failed_infras: Vec<MacAddress>,
|
||||
reporter_shard_id: i32,
|
||||
uuid: Uuid,
|
||||
success: bool,
|
||||
error_message: Option<String>,
|
||||
kafka_partition_key: Option<String>,
|
||||
) -> Result<String> {
|
||||
let dev_del = InfraGroupDeviceDelResponse {
|
||||
r#type: "infrastructure_group_device_del_response",
|
||||
let dev_del = InfraGroupInfrasDelResponse {
|
||||
r#type: "infrastructure_group_infras_del_response",
|
||||
infra_group_id,
|
||||
infra_group_infra_devices,
|
||||
failed_infras,
|
||||
reporter_shard_id,
|
||||
uuid,
|
||||
success,
|
||||
error_message,
|
||||
kafka_partition_key,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&dev_del)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_device_enqueue_response(
|
||||
pub fn cgw_construct_infra_enqueue_response(
|
||||
reporter_shard_id: i32,
|
||||
uuid: Uuid,
|
||||
success: bool,
|
||||
error_message: Option<String>,
|
||||
kafka_partition_key: Option<String>,
|
||||
) -> Result<String> {
|
||||
let dev_enq_resp = InfraGroupDeviceMessageEnqueueResponse {
|
||||
r#type: "infrastructure_group_device_message_enqueu_response",
|
||||
let dev_enq_resp = InfraGroupInfraMessageEnqueueResponse {
|
||||
r#type: "infrastructure_group_infra_message_enqueue_response",
|
||||
reporter_shard_id,
|
||||
uuid,
|
||||
success,
|
||||
error_message,
|
||||
kafka_partition_key,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&dev_enq_resp)?)
|
||||
@@ -202,6 +284,7 @@ pub fn cgw_construct_device_enqueue_response(
|
||||
|
||||
pub fn cgw_construct_rebalance_group_response(
|
||||
infra_group_id: i32,
|
||||
reporter_shard_id: i32,
|
||||
uuid: Uuid,
|
||||
success: bool,
|
||||
error_message: Option<String>,
|
||||
@@ -209,6 +292,7 @@ pub fn cgw_construct_rebalance_group_response(
|
||||
let rebalanse_resp = RebalanceGroupsResponse {
|
||||
r#type: "rebalance_groups_response",
|
||||
infra_group_id,
|
||||
reporter_shard_id,
|
||||
uuid,
|
||||
success,
|
||||
error_message,
|
||||
@@ -217,10 +301,11 @@ pub fn cgw_construct_rebalance_group_response(
|
||||
Ok(serde_json::to_string(&rebalanse_resp)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_device_capabilities_changed_msg(
|
||||
infra_group_infra_device: MacAddress,
|
||||
pub fn cgw_construct_infra_capabilities_changed_msg(
|
||||
infra_group_infra: MacAddress,
|
||||
infra_group_id: i32,
|
||||
diff: &HashMap<String, OldNew>,
|
||||
reporter_shard_id: i32,
|
||||
) -> Result<String> {
|
||||
let mut changes: Vec<CGWDeviceChange> = Vec::new();
|
||||
|
||||
@@ -232,23 +317,24 @@ pub fn cgw_construct_device_capabilities_changed_msg(
|
||||
});
|
||||
}
|
||||
|
||||
let dev_cap_msg = InfraGroupDeviceCapabilitiesChanged {
|
||||
r#type: "infrastructure_group_device_capabilities_changed",
|
||||
let dev_cap_msg = InfraGroupInfraCapabilitiesChanged {
|
||||
r#type: "infrastructure_group_infra_capabilities_changed",
|
||||
infra_group_id,
|
||||
infra_group_infra_device,
|
||||
infra_group_infra,
|
||||
changes,
|
||||
reporter_shard_id,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&dev_cap_msg)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_unassigned_infra_connection_msg(
|
||||
infra_group_infra_device: MacAddress,
|
||||
infra_group_infra: MacAddress,
|
||||
reporter_shard_id: i32,
|
||||
) -> Result<String> {
|
||||
let unassigned_infra_msg = UnassignedInfraConnection {
|
||||
r#type: "unassigned_infra_connection",
|
||||
infra_group_infra_device,
|
||||
infra_group_infra,
|
||||
reporter_shard_id,
|
||||
};
|
||||
|
||||
@@ -257,14 +343,14 @@ pub fn cgw_construct_unassigned_infra_connection_msg(
|
||||
|
||||
pub fn cgw_construct_foreign_infra_connection_msg(
|
||||
infra_group_id: i32,
|
||||
infra_group_infra_device: MacAddress,
|
||||
infra_group_infra: MacAddress,
|
||||
reporter_shard_id: i32,
|
||||
group_owner_shard_id: i32,
|
||||
) -> Result<String> {
|
||||
let foreign_infra_msg = ForeignInfraConnection {
|
||||
r#type: "foreign_infra_connection",
|
||||
infra_group_id,
|
||||
infra_group_infra_device,
|
||||
infra_group_infra,
|
||||
reporter_shard_id,
|
||||
group_owner_shard_id,
|
||||
};
|
||||
@@ -272,11 +358,231 @@ pub fn cgw_construct_foreign_infra_connection_msg(
|
||||
Ok(serde_json::to_string(&foreign_infra_msg)?)
|
||||
}
|
||||
|
||||
struct CustomContext;
|
||||
pub fn cgw_construct_client_join_msg(
|
||||
infra_group_id: i32,
|
||||
client: MacAddress,
|
||||
infra_group_infra: MacAddress,
|
||||
ssid: String,
|
||||
band: String,
|
||||
) -> Result<String> {
|
||||
let client_join_msg = APClientJoinMessage {
|
||||
r#type: "ap_client_join",
|
||||
infra_group_id,
|
||||
client,
|
||||
infra_group_infra,
|
||||
ssid,
|
||||
band,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&client_join_msg)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_client_leave_msg(
|
||||
infra_group_id: i32,
|
||||
client: MacAddress,
|
||||
infra_group_infra: MacAddress,
|
||||
band: String,
|
||||
) -> Result<String> {
|
||||
let client_join_msg = APClientLeaveMessage {
|
||||
r#type: "ap_client_leave",
|
||||
infra_group_id,
|
||||
client,
|
||||
infra_group_infra,
|
||||
band,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&client_join_msg)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_client_migrate_msg(
|
||||
infra_group_id: i32,
|
||||
client: MacAddress,
|
||||
to_infra_group_infra_device: MacAddress,
|
||||
to_ssid: String,
|
||||
to_band: String,
|
||||
) -> Result<String> {
|
||||
let client_migrate_msg = APClientMigrateMessage {
|
||||
r#type: "ap_client_migrate",
|
||||
infra_group_id,
|
||||
client,
|
||||
to_infra_group_infra_device,
|
||||
to_ssid,
|
||||
to_band,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&client_migrate_msg)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_infra_join_msg(
|
||||
infra_group_id: i32,
|
||||
infra_group_infra: MacAddress,
|
||||
infra_public_ip: SocketAddr,
|
||||
reporter_shard_id: i32,
|
||||
) -> Result<String> {
|
||||
let infra_join_msg = InfraJoinMessage {
|
||||
r#type: "infra_join",
|
||||
infra_group_id,
|
||||
infra_group_infra,
|
||||
infra_public_ip,
|
||||
reporter_shard_id,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&infra_join_msg)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_infra_leave_msg(
|
||||
infra_group_id: i32,
|
||||
infra_group_infra: MacAddress,
|
||||
reporter_shard_id: i32,
|
||||
) -> Result<String> {
|
||||
let infra_leave_msg = InfraLeaveMessage {
|
||||
r#type: "infra_leave",
|
||||
infra_group_id,
|
||||
infra_group_infra,
|
||||
reporter_shard_id,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&infra_leave_msg)?)
|
||||
}
|
||||
|
||||
pub fn cgw_construct_infra_request_result_msg(
|
||||
reporter_shard_id: i32,
|
||||
uuid: Uuid,
|
||||
id: u64,
|
||||
success: bool,
|
||||
error_message: Option<String>,
|
||||
) -> Result<String> {
|
||||
let infra_request_result = InfraGroupInfraRequestResult {
|
||||
r#type: "infra_request_result",
|
||||
reporter_shard_id,
|
||||
uuid,
|
||||
id,
|
||||
success,
|
||||
error_message,
|
||||
};
|
||||
|
||||
Ok(serde_json::to_string(&infra_request_result)?)
|
||||
}
|
||||
|
||||
struct CGWConsumerContextData {
|
||||
// Tuple consistion of physical partition id (0,1,2.. etc)
|
||||
// and the corresponding _kafka routing key_, or just kafka key,
|
||||
// that can be used with this topic to access specified topic.
|
||||
// It can be used to optimize CGW to GID to Kafka topic mapping,
|
||||
// e.g. cloud has knowledge of what kafka key to use, to direct
|
||||
// a NB message to specific exact CGW, without the need of
|
||||
// alway backing to the use of relaying mechanism.
|
||||
// P.S. this optimization technic does not necessarily
|
||||
// make relaying obsolete. Relaying is still used to
|
||||
// forward at least one (first) NB request from
|
||||
// the shard that received message to the designated
|
||||
// recipient. Whenever recipient shard receives the NB
|
||||
// request and sends response back to NB services,
|
||||
// shard should reply back with routing_key included.
|
||||
// It's up to cloud (NB services) then to use specified
|
||||
// kafka key to make sure the kafka message reaches
|
||||
// recipient shard in exactly one hop (direct forwarding),
|
||||
// or omit kafka key completely to once again use the
|
||||
// relaying mechanism.
|
||||
partition_mapping: HashMap<u32, String>,
|
||||
assigned_partition_list: Vec<u32>,
|
||||
last_used_key_idx: u32,
|
||||
partition_num: usize,
|
||||
|
||||
// A bit ugly, but we need a way to get
|
||||
// consumer (to retrieve patition num) whenever
|
||||
// client->context rebalance callback is being called.
|
||||
consumer_client: Option<Arc<CGWCNCConsumerType>>,
|
||||
}
|
||||
|
||||
struct CustomContext {
|
||||
ctx_data: std::sync::RwLock<CGWConsumerContextData>,
|
||||
}
|
||||
|
||||
impl CGWConsumerContextData {
|
||||
fn recalculate_partition_to_key_mapping(&mut self, partition_num: usize) {
|
||||
const DEFAULT_HASH_SEED: u32 = 0x9747b28c;
|
||||
|
||||
// The factor of 10 is selected to cover >=15000 of topics,
|
||||
// meaning with 15K partitions, this algorithm can still
|
||||
// confidently covert all 15K partitions with unique
|
||||
// kafka string-keys.
|
||||
// Even then, anything past 10K of partitions per topics
|
||||
// could be an overkill in the first place, hence
|
||||
// this algo should be sufficient.
|
||||
let loop_range = Range {
|
||||
start: 0,
|
||||
end: partition_num * 10,
|
||||
};
|
||||
let mut key_map: HashMap<u32, String> = HashMap::new();
|
||||
|
||||
for x in loop_range {
|
||||
let key_str = x.to_string();
|
||||
let key_bytes = key_str.as_bytes();
|
||||
|
||||
if key_map.len() == partition_num {
|
||||
break;
|
||||
}
|
||||
|
||||
// Default partitioner users the following formula:
|
||||
// toPositive(murmur2(keyBytes)) % numPartitions
|
||||
let hash_res = murmur2(key_bytes, DEFAULT_HASH_SEED) & 0x7fffffff;
|
||||
let part_idx = hash_res.rem_euclid(partition_num as u32);
|
||||
|
||||
if !key_map.contains_key(&part_idx) {
|
||||
debug!("Inserted key '{key_str}' for '{part_idx}' partition");
|
||||
key_map.insert(part_idx, key_str);
|
||||
}
|
||||
}
|
||||
|
||||
info!(
|
||||
"Filled {} unique keys for {} of partitions",
|
||||
key_map.len(),
|
||||
partition_num
|
||||
);
|
||||
|
||||
if key_map.len() != partition_num {
|
||||
// All this means, is that if some partition X has
|
||||
// no corresponding 1:1 kafka key.
|
||||
// From CGW perspective, this means that application
|
||||
// will always instruct NB to use a set of keys that
|
||||
// we were able to map, ignoring any other un-mapped
|
||||
// partitions, rendering them unused completely.
|
||||
// But it's up to NB still to either use or not provided
|
||||
// routing kafka key by CGW.
|
||||
warn!("Filled fulfill all range of kafka topics for 1:1 mapping, some partitions will not be mapped!");
|
||||
}
|
||||
|
||||
self.partition_mapping = key_map;
|
||||
}
|
||||
|
||||
fn get_partition_info(&mut self) -> (Vec<u32>, HashMap<u32, String>) {
|
||||
(
|
||||
self.assigned_partition_list.clone(),
|
||||
self.partition_mapping.clone(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientContext for CustomContext {}
|
||||
|
||||
impl ConsumerContext for CustomContext {
|
||||
fn pre_rebalance(&self, rebalance: &Rebalance<'_>) {
|
||||
debug!("Pre rebalance entry");
|
||||
|
||||
// We need to make sure the <before>
|
||||
// we're _actually_ assigned a partition list,
|
||||
// we don't fool any internal code that depends
|
||||
// on the topic list, and zero-out it when not
|
||||
// ready, and return anything only when it's
|
||||
// available.
|
||||
if let Ok(mut ctx) = self.ctx_data.write() {
|
||||
ctx.partition_mapping.clear();
|
||||
ctx.assigned_partition_list.clear();
|
||||
ctx.last_used_key_idx = 0;
|
||||
ctx.partition_num = 0;
|
||||
}
|
||||
|
||||
let mut part_list = String::new();
|
||||
if let rdkafka::consumer::Rebalance::Assign(partitions) = rebalance {
|
||||
for x in partitions.elements() {
|
||||
@@ -296,15 +602,52 @@ impl ConsumerContext for CustomContext {
|
||||
}
|
||||
|
||||
fn post_rebalance(&self, rebalance: &Rebalance<'_>) {
|
||||
let mut assigned_partition_list: Vec<u32> = Vec::new();
|
||||
let mut part_list = String::new();
|
||||
|
||||
if let rdkafka::consumer::Rebalance::Assign(partitions) = rebalance {
|
||||
for x in partitions.elements() {
|
||||
part_list += &(x.partition().to_string() + " ");
|
||||
assigned_partition_list.push(x.partition() as u32);
|
||||
}
|
||||
debug!("post_rebalance callback, assigned partition(s): {part_list}");
|
||||
}
|
||||
|
||||
if let Ok(mut ctx) = self.ctx_data.write() {
|
||||
if let Some(consumer) = &ctx.consumer_client {
|
||||
if let Ok(metadata) =
|
||||
consumer.fetch_metadata(Some(CONSUMER_TOPICS[0]), Duration::from_millis(2000))
|
||||
{
|
||||
let topic = &metadata.topics()[0];
|
||||
let partition_num: usize = topic.partitions().len();
|
||||
|
||||
debug!("topic: {}, partitions: {}", topic.name(), partition_num);
|
||||
|
||||
// We recalculate mapping only if the underlying
|
||||
// _number_ of partitions's changed.
|
||||
// Also, the underlying assignment to a specific
|
||||
// partitions is irrelevant itself,
|
||||
// as key:partition mapping changes only whenever
|
||||
// underlying number of partitions is altered.
|
||||
//
|
||||
// This also means that the underlying block
|
||||
// will get executed at least once throughout the
|
||||
// CGW lifetime - at least once upon startup,
|
||||
// whenever _this_ CGW consumer group
|
||||
// consumer instance - CGW shard - is being
|
||||
// assigned a list of partitions to consume from.
|
||||
if ctx.partition_num != partition_num {
|
||||
ctx.partition_num = partition_num;
|
||||
ctx.assigned_partition_list = assigned_partition_list;
|
||||
|
||||
ctx.recalculate_partition_to_key_mapping(partition_num);
|
||||
}
|
||||
} else {
|
||||
warn!("Tried to fetch consumer metadata but failed. CGW will not be able to reply with optimized Kafka key for efficient routing!");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
part_list.clear();
|
||||
|
||||
if let rdkafka::consumer::Rebalance::Revoke(partitions) = rebalance {
|
||||
@@ -324,13 +667,13 @@ impl ConsumerContext for CustomContext {
|
||||
});
|
||||
}
|
||||
|
||||
fn commit_callback(&self, _result: KafkaResult<()>, _offsets: &TopicPartitionList) {
|
||||
fn commit_callback(&self, result: KafkaResult<()>, _offsets: &TopicPartitionList) {
|
||||
let mut part_list = String::new();
|
||||
for x in _offsets.elements() {
|
||||
part_list += &(x.partition().to_string() + " ");
|
||||
}
|
||||
debug!("commit_callback callback, partition(s): {part_list}");
|
||||
debug!("Consumer callback: commited offset");
|
||||
debug!("Consumer callback: commiting offsets: {:?}", result);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -343,28 +686,33 @@ struct CGWCNCProducer {
|
||||
}
|
||||
|
||||
struct CGWCNCConsumer {
|
||||
c: CGWCNCConsumerType,
|
||||
c: Arc<CGWCNCConsumerType>,
|
||||
}
|
||||
|
||||
impl CGWCNCConsumer {
|
||||
pub fn new(app_args: &AppArgs) -> Result<Self> {
|
||||
let consum: CGWCNCConsumerType = Self::create_consumer(app_args)?;
|
||||
pub fn new(cgw_id: i32, kafka_args: &CGWKafkaArgs) -> Result<Self> {
|
||||
let consum = Self::create_consumer(cgw_id, kafka_args)?;
|
||||
Ok(CGWCNCConsumer { c: consum })
|
||||
}
|
||||
|
||||
fn create_consumer(app_args: &AppArgs) -> Result<CGWCNCConsumerType> {
|
||||
let context = CustomContext;
|
||||
fn create_consumer(cgw_id: i32, kafka_args: &CGWKafkaArgs) -> Result<Arc<CGWCNCConsumerType>> {
|
||||
let context = CustomContext {
|
||||
ctx_data: std::sync::RwLock::new(CGWConsumerContextData {
|
||||
partition_mapping: HashMap::new(),
|
||||
assigned_partition_list: Vec::new(),
|
||||
last_used_key_idx: 0u32,
|
||||
partition_num: 0usize,
|
||||
consumer_client: None,
|
||||
}),
|
||||
};
|
||||
|
||||
let consumer: CGWCNCConsumerType = match ClientConfig::new()
|
||||
.set("group.id", GROUP_ID)
|
||||
.set(
|
||||
"client.id",
|
||||
GROUP_ID.to_string() + &app_args.cgw_id.to_string(),
|
||||
)
|
||||
.set("group.instance.id", app_args.cgw_id.to_string())
|
||||
.set("client.id", GROUP_ID.to_string() + &cgw_id.to_string())
|
||||
.set("group.instance.id", cgw_id.to_string())
|
||||
.set(
|
||||
"bootstrap.servers",
|
||||
app_args.kafka_host.clone() + ":" + &app_args.kafka_port.to_string(),
|
||||
kafka_args.kafka_host.clone() + ":" + &kafka_args.kafka_port.to_string(),
|
||||
)
|
||||
.set("enable.partition.eof", "false")
|
||||
.set("session.timeout.ms", "6000")
|
||||
@@ -376,39 +724,47 @@ impl CGWCNCConsumer {
|
||||
{
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
error!("Failed to create kafka consumer from config: {:?}", e);
|
||||
error!("Failed to create kafka consumer from config! Error: {e}");
|
||||
return Err(Error::Kafka(e));
|
||||
}
|
||||
};
|
||||
|
||||
let consumer = Arc::new(consumer);
|
||||
// Need to set this guy for context
|
||||
let consumer_clone = consumer.clone();
|
||||
|
||||
debug!(
|
||||
"(consumer) (producer) Created lazy connection to kafka broker ({}:{})...",
|
||||
app_args.kafka_host, app_args.kafka_port,
|
||||
"(consumer) Created lazy connection to kafka broker ({}:{})...",
|
||||
kafka_args.kafka_host, kafka_args.kafka_port,
|
||||
);
|
||||
|
||||
if let Err(e) = consumer.subscribe(&CONSUMER_TOPICS) {
|
||||
error!(
|
||||
"Kafka consumer was unable to subscribe to {:?}",
|
||||
"Kafka consumer was unable to subscribe to {:?}! Error: {e}",
|
||||
CONSUMER_TOPICS
|
||||
);
|
||||
return Err(Error::Kafka(e));
|
||||
};
|
||||
|
||||
if let Ok(mut ctx) = consumer.context().ctx_data.write() {
|
||||
ctx.consumer_client = Some(consumer_clone);
|
||||
}
|
||||
|
||||
Ok(consumer)
|
||||
}
|
||||
}
|
||||
|
||||
impl CGWCNCProducer {
|
||||
pub fn new(app_args: &AppArgs) -> Result<Self> {
|
||||
let prod: CGWCNCProducerType = Self::create_producer(app_args)?;
|
||||
pub fn new(kafka_args: &CGWKafkaArgs) -> Result<Self> {
|
||||
let prod: CGWCNCProducerType = Self::create_producer(kafka_args)?;
|
||||
Ok(CGWCNCProducer { p: prod })
|
||||
}
|
||||
|
||||
fn create_producer(app_args: &AppArgs) -> Result<CGWCNCProducerType> {
|
||||
fn create_producer(kafka_args: &CGWKafkaArgs) -> Result<CGWCNCProducerType> {
|
||||
let producer: FutureProducer = match ClientConfig::new()
|
||||
.set(
|
||||
"bootstrap.servers",
|
||||
app_args.kafka_host.clone() + ":" + &app_args.kafka_port.to_string(),
|
||||
kafka_args.kafka_host.clone() + ":" + &kafka_args.kafka_port.to_string(),
|
||||
)
|
||||
.set("message.timeout.ms", "5000")
|
||||
.create()
|
||||
@@ -422,7 +778,7 @@ impl CGWCNCProducer {
|
||||
|
||||
debug!(
|
||||
"(producer) Created lazy connection to kafka broker ({}:{})...",
|
||||
app_args.kafka_host, app_args.kafka_port,
|
||||
kafka_args.kafka_host, kafka_args.kafka_port,
|
||||
);
|
||||
|
||||
Ok(producer)
|
||||
@@ -433,12 +789,17 @@ pub struct CGWNBApiClient {
|
||||
working_runtime_handle: Runtime,
|
||||
cgw_server_tx_mbox: CGWConnectionServerMboxTx,
|
||||
prod: CGWCNCProducer,
|
||||
consumer: Arc<CGWCNCConsumer>,
|
||||
// TBD: stplit different implementators through a defined trait,
|
||||
// that implements async R W operations?
|
||||
}
|
||||
|
||||
impl CGWNBApiClient {
|
||||
pub fn new(app_args: &AppArgs, cgw_tx: &CGWConnectionServerMboxTx) -> Result<Arc<Self>> {
|
||||
pub fn new(
|
||||
cgw_id: i32,
|
||||
kafka_args: &CGWKafkaArgs,
|
||||
cgw_tx: &CGWConnectionServerMboxTx,
|
||||
) -> Result<Arc<Self>> {
|
||||
let working_runtime_h = Builder::new_multi_thread()
|
||||
.worker_threads(1)
|
||||
.thread_name("cgw-nb-api-l")
|
||||
@@ -446,14 +807,16 @@ impl CGWNBApiClient {
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
let consumer: Arc<CGWCNCConsumer> = Arc::new(CGWCNCConsumer::new(cgw_id, kafka_args)?);
|
||||
let consumer_clone = consumer.clone();
|
||||
let cl = Arc::new(CGWNBApiClient {
|
||||
working_runtime_handle: working_runtime_h,
|
||||
cgw_server_tx_mbox: cgw_tx.clone(),
|
||||
prod: CGWCNCProducer::new(app_args)?,
|
||||
prod: CGWCNCProducer::new(kafka_args)?,
|
||||
consumer: consumer_clone,
|
||||
});
|
||||
|
||||
let cl_clone = cl.clone();
|
||||
let consumer: CGWCNCConsumer = CGWCNCConsumer::new(app_args)?;
|
||||
cl.working_runtime_handle.spawn(async move {
|
||||
loop {
|
||||
let cl_clone = cl_clone.clone();
|
||||
@@ -470,7 +833,7 @@ impl CGWNBApiClient {
|
||||
None => "",
|
||||
Some(Ok(s)) => s,
|
||||
Some(Err(e)) => {
|
||||
warn!("Error while deserializing message payload: {:?}", e);
|
||||
warn!("Error while deserializing message payload! Error: {e}");
|
||||
""
|
||||
}
|
||||
};
|
||||
@@ -479,7 +842,7 @@ impl CGWNBApiClient {
|
||||
None => "",
|
||||
Some(Ok(s)) => s,
|
||||
Some(Err(e)) => {
|
||||
warn!("Error while deserializing message payload: {:?}", e);
|
||||
warn!("Deserializing message payload failed! Error: {e}");
|
||||
""
|
||||
}
|
||||
};
|
||||
@@ -492,13 +855,36 @@ impl CGWNBApiClient {
|
||||
Ok(())
|
||||
}
|
||||
});
|
||||
let _ = stream_processor.await;
|
||||
|
||||
if let Err(e) = stream_processor.await {
|
||||
error!("Failed to create NB API Client! Error: {e}");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(cl)
|
||||
}
|
||||
|
||||
pub fn get_partition_to_local_shard_mapping(&self) -> Vec<(u32, String)> {
|
||||
let mut return_vec: Vec<(u32, String)> = Vec::new();
|
||||
if let Ok(mut ctx) = self.consumer.c.context().ctx_data.write() {
|
||||
let (assigned_partition_list, mut partition_mapping) = ctx.get_partition_info();
|
||||
|
||||
if !partition_mapping.is_empty()
|
||||
&& ctx.partition_num > 0
|
||||
&& !assigned_partition_list.is_empty()
|
||||
{
|
||||
for x in assigned_partition_list {
|
||||
if let Some(key) = partition_mapping.remove(&x) {
|
||||
return_vec.push((x, key));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return_vec
|
||||
}
|
||||
|
||||
pub async fn enqueue_mbox_message_from_cgw_server(&self, key: String, payload: String) {
|
||||
let produce_future = self.prod.p.send(
|
||||
FutureRecord::to(PRODUCER_TOPICS)
|
||||
@@ -508,17 +894,20 @@ impl CGWNBApiClient {
|
||||
);
|
||||
|
||||
if let Err((e, _)) = produce_future.await {
|
||||
error!("{:?}", e)
|
||||
error!("{e}")
|
||||
}
|
||||
}
|
||||
|
||||
async fn enqueue_mbox_message_to_cgw_server(&self, key: String, payload: String) {
|
||||
debug!("MBOX_OUT: EnqueueNewMessageFromNBAPIListener, k:{key}");
|
||||
debug!("MBOX_OUT: EnqueueNewMessageFromNBAPIListener, key: {key}");
|
||||
let msg = CGWConnectionNBAPIReqMsg::EnqueueNewMessageFromNBAPIListener(
|
||||
key,
|
||||
payload,
|
||||
CGWConnectionNBAPIReqMsgOrigin::FromNBAPI,
|
||||
);
|
||||
let _ = self.cgw_server_tx_mbox.send(msg);
|
||||
|
||||
if let Err(e) = self.cgw_server_tx_mbox.send(msg) {
|
||||
error!("Failed to send message to CGW server (remote)! Error: {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
use crate::AppArgs;
|
||||
use crate::cgw_app_args::CGWGRPCArgs;
|
||||
|
||||
pub mod cgw_remote {
|
||||
tonic::include_proto!("cgw.remote");
|
||||
@@ -52,11 +52,11 @@ pub struct CGWRemoteServer {
|
||||
}
|
||||
|
||||
impl CGWRemoteServer {
|
||||
pub fn new(app_args: &AppArgs) -> Self {
|
||||
pub fn new(cgw_id: i32, grpc_args: &CGWGRPCArgs) -> Self {
|
||||
let remote_cfg = CGWRemoteConfig::new(
|
||||
app_args.cgw_id,
|
||||
app_args.grpc_listening_ip,
|
||||
app_args.grpc_listening_port,
|
||||
cgw_id,
|
||||
grpc_args.grpc_listening_ip,
|
||||
grpc_args.grpc_listening_port,
|
||||
);
|
||||
CGWRemoteServer { cfg: remote_cfg }
|
||||
}
|
||||
@@ -76,8 +76,10 @@ impl CGWRemoteServer {
|
||||
self.cfg.remote_id, self.cfg.server_ip, self.cfg.server_port
|
||||
);
|
||||
|
||||
let res = grpc_srv.serve(self.cfg.to_socket_addr()).await;
|
||||
error!("grpc server returned {:?}", res);
|
||||
// end of GRPC server build / start declaration
|
||||
if let Err(e) = grpc_srv.serve(self.cfg.to_socket_addr()).await {
|
||||
error!("gRPC server failed! Error: {e}");
|
||||
};
|
||||
|
||||
// end of gRPC server build / start declaration
|
||||
}
|
||||
}
|
||||
|
||||
113
src/cgw_runtime.rs
Normal file
113
src/cgw_runtime.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use crate::cgw_errors::{Error, Result};
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
atomic::{AtomicUsize, Ordering},
|
||||
Arc, Mutex,
|
||||
},
|
||||
};
|
||||
|
||||
use tokio::runtime::{Builder, Runtime};
|
||||
|
||||
#[derive(Hash, Eq, PartialEq, Debug)]
|
||||
pub enum CGWRuntimeType {
|
||||
WssRxTx,
|
||||
MboxInternal,
|
||||
MboxNbApiRx,
|
||||
MboxNbApiTx,
|
||||
MboxRelay,
|
||||
QueueTimeout,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref RUNTIMES: Mutex<HashMap<CGWRuntimeType, Arc<Runtime>>> = Mutex::new(HashMap::new());
|
||||
}
|
||||
|
||||
pub fn cgw_initialize_runtimes(wss_t_num: usize) -> Result<()> {
|
||||
let wss_runtime_handle = Arc::new(
|
||||
Builder::new_multi_thread()
|
||||
.worker_threads(wss_t_num)
|
||||
.thread_name_fn(|| {
|
||||
static ATOMIC_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
let id = ATOMIC_ID.fetch_add(1, Ordering::SeqCst);
|
||||
format!("cgw-wss-t-{}", id)
|
||||
})
|
||||
.thread_stack_size(3 * 1024 * 1024)
|
||||
.enable_all()
|
||||
.build()?,
|
||||
);
|
||||
let internal_mbox_runtime_handle = Arc::new(
|
||||
Builder::new_multi_thread()
|
||||
.worker_threads(1)
|
||||
.thread_name("cgw-mbox")
|
||||
.thread_stack_size(1024 * 1024)
|
||||
.enable_all()
|
||||
.build()?,
|
||||
);
|
||||
let nb_api_mbox_rx_runtime_handle = Arc::new(
|
||||
Builder::new_multi_thread()
|
||||
.worker_threads(1)
|
||||
.thread_name("cgw-mbox-nbapi")
|
||||
.thread_stack_size(1024 * 1024)
|
||||
.enable_all()
|
||||
.build()?,
|
||||
);
|
||||
let nb_api_mbox_tx_runtime_handle = Arc::new(
|
||||
Builder::new_multi_thread()
|
||||
.worker_threads(1)
|
||||
.thread_name("cgw-mbox-nbapi-tx")
|
||||
.thread_stack_size(1024 * 1024)
|
||||
.enable_all()
|
||||
.build()?,
|
||||
);
|
||||
let relay_msg_mbox_runtime_handle = Arc::new(
|
||||
Builder::new_multi_thread()
|
||||
.worker_threads(1)
|
||||
.thread_name("cgw-relay-mbox-nbapi")
|
||||
.thread_stack_size(1024 * 1024)
|
||||
.enable_all()
|
||||
.build()?,
|
||||
);
|
||||
let queue_timeout_handle = Arc::new(
|
||||
Builder::new_multi_thread()
|
||||
.worker_threads(1)
|
||||
.thread_name("cgw-queue-timeout")
|
||||
.thread_stack_size(1024 * 1024)
|
||||
.enable_all()
|
||||
.build()?,
|
||||
);
|
||||
|
||||
let mut runtimes = match RUNTIMES.lock() {
|
||||
Ok(runtimes_lock) => runtimes_lock,
|
||||
Err(e) => {
|
||||
return Err(Error::Runtime(format!(
|
||||
"Failed to get runtimes lock! Error: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
runtimes.insert(CGWRuntimeType::WssRxTx, wss_runtime_handle);
|
||||
runtimes.insert(CGWRuntimeType::MboxInternal, internal_mbox_runtime_handle);
|
||||
runtimes.insert(CGWRuntimeType::MboxNbApiRx, nb_api_mbox_rx_runtime_handle);
|
||||
runtimes.insert(CGWRuntimeType::MboxNbApiTx, nb_api_mbox_tx_runtime_handle);
|
||||
runtimes.insert(CGWRuntimeType::MboxRelay, relay_msg_mbox_runtime_handle);
|
||||
runtimes.insert(CGWRuntimeType::QueueTimeout, queue_timeout_handle);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn cgw_get_runtime(runtime_type: CGWRuntimeType) -> Result<Option<Arc<Runtime>>> {
|
||||
let runtimes = match RUNTIMES.lock() {
|
||||
Ok(runtimes_lock) => runtimes_lock,
|
||||
Err(e) => {
|
||||
return Err(Error::Runtime(format!(
|
||||
"Failed to get runtimes lock! Error: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(runtimes.get(&runtime_type).cloned())
|
||||
}
|
||||
148
src/cgw_tls.rs
148
src/cgw_tls.rs
@@ -1,10 +1,17 @@
|
||||
use crate::cgw_app_args::CGWWSSArgs;
|
||||
use crate::cgw_errors::{collect_results, Error, Result};
|
||||
use crate::AppArgs;
|
||||
|
||||
use base64::prelude::BASE64_STANDARD;
|
||||
use base64::Engine;
|
||||
use eui48::MacAddress;
|
||||
use rustls_pki_types::{CertificateDer, PrivateKeyDer};
|
||||
use std::fs;
|
||||
use std::io::{BufRead, Read};
|
||||
use std::path::Path;
|
||||
use std::{fs::File, io::BufReader, str::FromStr, sync::Arc};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio_postgres_rustls::MakeRustlsConnect;
|
||||
use tokio_rustls::rustls;
|
||||
use tokio_rustls::{
|
||||
rustls::{server::WebPkiClientVerifier, RootCertStore, ServerConfig},
|
||||
server::TlsStream,
|
||||
@@ -13,35 +20,60 @@ use tokio_rustls::{
|
||||
use x509_parser::parse_x509_certificate;
|
||||
|
||||
const CGW_TLS_CERTIFICATES_PATH: &str = "/etc/cgw/certs";
|
||||
const CGW_TLS_NB_INFRA_CERTS_PATH: &str = "/etc/cgw/nb_infra/certs";
|
||||
|
||||
pub async fn cgw_tls_read_certs(cert_file: &str) -> Result<Vec<CertificateDer<'static>>> {
|
||||
let file = match File::open(cert_file) {
|
||||
async fn cgw_tls_read_file(file_path: &str) -> Result<Vec<u8>> {
|
||||
let mut file = match File::open(file_path) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
return Err(Error::Tls(format!(
|
||||
"Failed to open TLS certificate file: {}. Error: {}",
|
||||
cert_file, e
|
||||
"Failed to open TLS certificate/key file: {file_path}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut reader = BufReader::new(file);
|
||||
let metadata = match fs::metadata(file_path) {
|
||||
Ok(meta) => meta,
|
||||
Err(e) => {
|
||||
return Err(Error::Tls(format!(
|
||||
"Failed to read file {file_path} metadata! Error: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut buffer = vec![0; metadata.len() as usize];
|
||||
if let Err(e) = file.read_exact(&mut buffer) {
|
||||
return Err(Error::Tls(format!(
|
||||
"Failed to read {} file. Error: {}",
|
||||
file_path, e
|
||||
)));
|
||||
}
|
||||
|
||||
let decoded_buffer = {
|
||||
if let Ok(d) = BASE64_STANDARD.decode(buffer.clone()) {
|
||||
info!(
|
||||
"Cert file {} is base64 encoded, trying to use decoded.",
|
||||
file_path
|
||||
);
|
||||
d
|
||||
} else {
|
||||
buffer
|
||||
}
|
||||
};
|
||||
|
||||
Ok(decoded_buffer)
|
||||
}
|
||||
|
||||
pub async fn cgw_tls_read_certs(cert_file: &str) -> Result<Vec<CertificateDer<'static>>> {
|
||||
let buffer = cgw_tls_read_file(cert_file).await?;
|
||||
let mut reader = BufReader::new(buffer.as_slice());
|
||||
|
||||
collect_results(rustls_pemfile::certs(&mut reader))
|
||||
}
|
||||
|
||||
pub async fn cgw_tls_read_private_key(private_key_file: &str) -> Result<PrivateKeyDer<'static>> {
|
||||
let file = match File::open(private_key_file) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
return Err(Error::Tls(format!(
|
||||
"Failed to open TLS private key file: {}. Error: {}",
|
||||
private_key_file, e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut reader = BufReader::new(file);
|
||||
let buffer = cgw_tls_read_file(private_key_file).await?;
|
||||
let mut reader = BufReader::new(buffer.as_slice());
|
||||
|
||||
match rustls_pemfile::private_key(&mut reader) {
|
||||
Ok(ret_pk) => match ret_pk {
|
||||
@@ -52,8 +84,7 @@ pub async fn cgw_tls_read_private_key(private_key_file: &str) -> Result<PrivateK
|
||||
))),
|
||||
},
|
||||
Err(e) => Err(Error::Tls(format!(
|
||||
"Failed to read private key from file: {}. Error: {}",
|
||||
private_key_file, e
|
||||
"Failed to read private key from file: {private_key_file}! Error: {e}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
@@ -85,8 +116,7 @@ pub async fn cgw_tls_get_cn_from_stream(stream: &TlsStream<TcpStream>) -> Result
|
||||
Ok(mac) => return Ok(mac),
|
||||
Err(e) => {
|
||||
return Err(Error::Tls(format!(
|
||||
"Failed to parse clien CN/MAC. Error: {}",
|
||||
e
|
||||
"Failed to parse clien CN/MAC! Error: {e}"
|
||||
)))
|
||||
}
|
||||
};
|
||||
@@ -95,42 +125,42 @@ pub async fn cgw_tls_get_cn_from_stream(stream: &TlsStream<TcpStream>) -> Result
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(Error::Tls(format!(
|
||||
"Failed to read peer comman name. Error: {}",
|
||||
e
|
||||
"Failed to read peer common name (CN)! Error: {e}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::Tls("Failed to read peer comman name!".to_string()))
|
||||
Err(Error::Tls("Failed to read peer common name!".to_string()))
|
||||
}
|
||||
pub async fn cgw_tls_create_acceptor(args: &AppArgs) -> Result<TlsAcceptor> {
|
||||
|
||||
pub async fn cgw_tls_create_acceptor(wss_args: &CGWWSSArgs) -> Result<TlsAcceptor> {
|
||||
// Read root/issuer certs.
|
||||
let cas_path = format!("{}/{}", CGW_TLS_CERTIFICATES_PATH, args.wss_cas);
|
||||
let cas_path = format!("{}/{}", CGW_TLS_CERTIFICATES_PATH, wss_args.wss_cas);
|
||||
let cas = match cgw_tls_read_certs(cas_path.as_str()).await {
|
||||
Ok(cas_pem) => cas_pem,
|
||||
Err(e) => {
|
||||
error!("{}", e.to_string());
|
||||
error!("{e}");
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
// Read cert.
|
||||
let cert_path = format!("{}/{}", CGW_TLS_CERTIFICATES_PATH, args.wss_cert);
|
||||
let cert_path = format!("{}/{}", CGW_TLS_CERTIFICATES_PATH, wss_args.wss_cert);
|
||||
let mut cert = match cgw_tls_read_certs(cert_path.as_str()).await {
|
||||
Ok(cert_pem) => cert_pem,
|
||||
Err(e) => {
|
||||
error!("{}", e.to_string());
|
||||
error!("{e}");
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
cert.extend(cas.clone());
|
||||
|
||||
// Read private key.
|
||||
let key_path = format!("{}/{}", CGW_TLS_CERTIFICATES_PATH, args.wss_key);
|
||||
let key_path = format!("{}/{}", CGW_TLS_CERTIFICATES_PATH, wss_args.wss_key);
|
||||
let key = match cgw_tls_read_private_key(key_path.as_str()).await {
|
||||
Ok(pkey) => pkey,
|
||||
Err(e) => {
|
||||
error!("{}", e.to_string());
|
||||
error!("{e}");
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
@@ -142,7 +172,7 @@ pub async fn cgw_tls_create_acceptor(args: &AppArgs) -> Result<TlsAcceptor> {
|
||||
let client_verifier = match WebPkiClientVerifier::builder(Arc::new(roots)).build() {
|
||||
Ok(verifier) => verifier,
|
||||
Err(e) => {
|
||||
error!("Failed to build client verifier: {}", e.to_string());
|
||||
error!("Failed to build client verifier! Error: {e}");
|
||||
return Err(Error::Tls("Failed to build client verifier!".to_string()));
|
||||
}
|
||||
};
|
||||
@@ -154,7 +184,7 @@ pub async fn cgw_tls_create_acceptor(args: &AppArgs) -> Result<TlsAcceptor> {
|
||||
{
|
||||
Ok(server_config) => server_config,
|
||||
Err(e) => {
|
||||
error!("Failed to build server config: {}", e.to_string());
|
||||
error!("Failed to build server config! Error: {e}");
|
||||
return Err(Error::Tls("Failed to build server config!".to_string()));
|
||||
}
|
||||
};
|
||||
@@ -162,3 +192,55 @@ pub async fn cgw_tls_create_acceptor(args: &AppArgs) -> Result<TlsAcceptor> {
|
||||
// Create the TLS acceptor.
|
||||
Ok(TlsAcceptor::from(Arc::new(config)))
|
||||
}
|
||||
|
||||
pub async fn cgw_read_root_certs_dir() -> Result<Vec<u8>> {
|
||||
let mut certs_vec = Vec::new();
|
||||
|
||||
// Read the directory entries
|
||||
for entry in fs::read_dir(Path::new(CGW_TLS_NB_INFRA_CERTS_PATH))? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
// Check if the entry is a file and has a .crt extension (or other extensions if needed)
|
||||
if path.is_file() {
|
||||
let extension = path.extension().and_then(|ext| ext.to_str());
|
||||
if extension == Some("crt") || extension == Some("pem") {
|
||||
let cert_contents = fs::read(path)?;
|
||||
certs_vec.extend(cert_contents);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(certs_vec)
|
||||
}
|
||||
|
||||
pub async fn cgw_get_root_certs_store() -> Result<RootCertStore> {
|
||||
let certs = cgw_read_root_certs_dir().await?;
|
||||
|
||||
let buf = &mut certs.as_slice() as &mut dyn BufRead;
|
||||
let certs = rustls_pemfile::certs(buf);
|
||||
let mut root_cert_store = rustls::RootCertStore::empty();
|
||||
for cert in certs.flatten() {
|
||||
if let Err(e) = root_cert_store.add(cert.clone()) {
|
||||
error!("Failed do add cert {:?} to root store! Error: {e}", cert);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(root_cert_store)
|
||||
}
|
||||
|
||||
pub async fn cgw_tls_create_db_connect() -> Result<MakeRustlsConnect> {
|
||||
let root_store = match cgw_get_root_certs_store().await {
|
||||
Ok(certs) => certs,
|
||||
Err(e) => {
|
||||
error!("{}", e.to_string());
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
let config = rustls::ClientConfig::builder()
|
||||
.with_root_certificates(root_store)
|
||||
.with_no_client_auth();
|
||||
|
||||
Ok(tokio_postgres_rustls::MakeRustlsConnect::new(config))
|
||||
}
|
||||
|
||||
@@ -13,13 +13,13 @@ use crate::cgw_ucentral_parser::{
|
||||
CGWUCentralEventRealtimeEventWClientJoin, CGWUCentralEventRealtimeEventWClientLeave,
|
||||
CGWUCentralEventReply, CGWUCentralEventState, CGWUCentralEventStateClients,
|
||||
CGWUCentralEventStateClientsData, CGWUCentralEventStateClientsType,
|
||||
CGWUCentralEventStateLLDPData, CGWUCentralEventStateLinks, CGWUCentralEventType,
|
||||
CGWUCentralJRPCMessage,
|
||||
CGWUCentralEventStateLLDPData, CGWUCentralEventStateLinks, CGWUCentralEventStatePort,
|
||||
CGWUCentralEventType, CGWUCentralJRPCMessage,
|
||||
};
|
||||
|
||||
fn parse_lldp_data(
|
||||
lldp_peers: &Map<String, Value>,
|
||||
links: &mut Vec<CGWUCentralEventStateLinks>,
|
||||
links: &mut HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateLinks>>,
|
||||
) -> Result<()> {
|
||||
let directions = [
|
||||
(
|
||||
@@ -54,12 +54,15 @@ fn parse_lldp_data(
|
||||
.ok_or_else(|| Error::UCentralParser("Failed to prase port"))?
|
||||
.to_string();
|
||||
|
||||
links.push(CGWUCentralEventStateLinks {
|
||||
local_port,
|
||||
let local_port = CGWUCentralEventStatePort::PhysicalWiredPort(local_port);
|
||||
|
||||
let clients_data = CGWUCentralEventStateLinks {
|
||||
remote_serial,
|
||||
remote_port,
|
||||
is_downstream,
|
||||
});
|
||||
};
|
||||
|
||||
links.insert(local_port, vec![clients_data]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -108,7 +111,7 @@ fn parse_wireless_ssids_info(
|
||||
|
||||
fn parse_wireless_clients_data(
|
||||
ssids: &Vec<Value>,
|
||||
links: &mut Vec<CGWUCentralEventStateClients>,
|
||||
links: &mut HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateClients>>,
|
||||
upstream_ifaces: &[String],
|
||||
ssids_map: &HashMap<String, (String, String)>,
|
||||
timestamp: i64,
|
||||
@@ -119,7 +122,7 @@ fn parse_wireless_clients_data(
|
||||
if let Value::String(port) = &ssid["iface"] {
|
||||
port.clone()
|
||||
} else {
|
||||
warn!("Failed to retrieve local_port for {:?}", ssid);
|
||||
warn!("Failed to retrieve local_port for {:?}!", ssid);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@@ -130,7 +133,7 @@ fn parse_wireless_clients_data(
|
||||
}
|
||||
|
||||
if !ssid.contains_key("associations") {
|
||||
warn!("Failed to retrieve associations for {local_port}");
|
||||
warn!("Failed to retrieve associations for local port {local_port}!");
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -154,7 +157,7 @@ fn parse_wireless_clients_data(
|
||||
if let Some(v) = ssids_map.get(&bssid_value) {
|
||||
(v.0.clone(), v.1.clone())
|
||||
} else {
|
||||
warn!("Failed to get ssid/band value for {bssid_value}");
|
||||
warn!("Failed to get ssid/band value for {bssid_value}!");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@@ -172,23 +175,28 @@ fn parse_wireless_clients_data(
|
||||
})?;
|
||||
}
|
||||
|
||||
links.push(CGWUCentralEventStateClients {
|
||||
let local_port = CGWUCentralEventStatePort::WirelessPort(ssid, band);
|
||||
|
||||
let clients_data = CGWUCentralEventStateClients {
|
||||
client_type: CGWUCentralEventStateClientsType::Wireless(
|
||||
// Track timestamp of initial connection:
|
||||
// if we receive state evt <now>, substract
|
||||
// connected since from it, to get
|
||||
// original connection timestamp.
|
||||
timestamp - ts,
|
||||
ssid,
|
||||
band,
|
||||
),
|
||||
local_port: local_port.clone(),
|
||||
remote_serial,
|
||||
// TODO: rework remote_port to have Band, RSSI, chan etc
|
||||
// for an edge.
|
||||
remote_port: "<Wireless-client>".to_string(),
|
||||
is_downstream: true,
|
||||
});
|
||||
};
|
||||
|
||||
if let Some(ref mut existing_vec) = links.get_mut(&local_port) {
|
||||
existing_vec.push(clients_data);
|
||||
} else {
|
||||
links.insert(local_port, vec![clients_data]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -200,7 +208,7 @@ fn parse_wireless_clients_data(
|
||||
|
||||
fn parse_wired_clients_data(
|
||||
clients: &Vec<Value>,
|
||||
links: &mut Vec<CGWUCentralEventStateClients>,
|
||||
links: &mut HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateClients>>,
|
||||
upstream_ifaces: &[String],
|
||||
timestamp: i64,
|
||||
) -> Result<()> {
|
||||
@@ -211,14 +219,14 @@ fn parse_wired_clients_data(
|
||||
Some(s) => s.to_string(),
|
||||
None => {
|
||||
warn!(
|
||||
"Failed to get clients port string for {:?}, skipping",
|
||||
"Failed to get clients port string for {:?}, skipping!",
|
||||
client
|
||||
);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("Failed to parse clients port for {:?}, skipping", client);
|
||||
warn!("Failed to parse clients port for {:?}, skipping!", client);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@@ -241,23 +249,30 @@ fn parse_wired_clients_data(
|
||||
continue;
|
||||
}
|
||||
|
||||
let local_port = CGWUCentralEventStatePort::PhysicalWiredPort(local_port);
|
||||
|
||||
let remote_serial = MacAddress::from_str(
|
||||
client["mac"]
|
||||
.as_str()
|
||||
.ok_or_else(|| Error::UCentralParser("Failed to parse mac address"))?,
|
||||
)?;
|
||||
|
||||
links.push(CGWUCentralEventStateClients {
|
||||
let clients_data: CGWUCentralEventStateClients = CGWUCentralEventStateClients {
|
||||
// Wired clients don't have <connected since> data.
|
||||
// Treat <now> as latest connected ts.
|
||||
client_type: CGWUCentralEventStateClientsType::Wired(timestamp),
|
||||
local_port,
|
||||
remote_serial,
|
||||
// TODO: rework remote_port to have speed / duplex characteristics
|
||||
// for an edge.
|
||||
remote_port: "<Wired-client>".to_string(),
|
||||
is_downstream: true,
|
||||
});
|
||||
};
|
||||
|
||||
if let Some(ref mut existing_vec) = links.get_mut(&local_port) {
|
||||
existing_vec.push(clients_data);
|
||||
} else {
|
||||
links.insert(local_port, vec![clients_data]);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -265,7 +280,7 @@ fn parse_wired_clients_data(
|
||||
|
||||
fn parse_interface_data(
|
||||
interface: &Map<String, Value>,
|
||||
links: &mut Vec<CGWUCentralEventStateClients>,
|
||||
links: &mut HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateClients>>,
|
||||
upstream_ifaces: &[String],
|
||||
timestamp: i64,
|
||||
) -> Result<()> {
|
||||
@@ -304,7 +319,11 @@ fn parse_link_state_data(
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_state_event_data(map: CGWUCentralJRPCMessage, timestamp: i64) -> Result<CGWUCentralEvent> {
|
||||
fn parse_state_event_data(
|
||||
feature_topomap_enabled: bool,
|
||||
map: CGWUCentralJRPCMessage,
|
||||
timestamp: i64,
|
||||
) -> Result<CGWUCentralEvent> {
|
||||
if !map.contains_key("params") {
|
||||
return Err(Error::UCentralParser(
|
||||
"Invalid state event received: params is missing",
|
||||
@@ -317,7 +336,7 @@ fn parse_state_event_data(map: CGWUCentralJRPCMessage, timestamp: i64) -> Result
|
||||
let decoded_data = match BASE64_STANDARD.decode(compressed_data) {
|
||||
Ok(d) => d,
|
||||
Err(e) => {
|
||||
warn!("Failed to decode base64+zip state evt {e}");
|
||||
warn!("Failed to decode base64+zip state evt! Error: {e}");
|
||||
return Err(Error::UCentralParser(
|
||||
"Failed to decode base64+zip state evt",
|
||||
));
|
||||
@@ -326,7 +345,7 @@ fn parse_state_event_data(map: CGWUCentralJRPCMessage, timestamp: i64) -> Result
|
||||
let mut d = ZlibDecoder::new(&decoded_data[..]);
|
||||
let mut unzipped_data = String::new();
|
||||
if let Err(e) = d.read_to_string(&mut unzipped_data) {
|
||||
warn!("Failed to decompress decrypted state message {e}");
|
||||
warn!("Failed to decompress decrypted state message! Error: {e}");
|
||||
return Err(Error::UCentralParser(
|
||||
"Failed to decompress decrypted state message",
|
||||
));
|
||||
@@ -335,49 +354,86 @@ fn parse_state_event_data(map: CGWUCentralJRPCMessage, timestamp: i64) -> Result
|
||||
let state_map: CGWUCentralJRPCMessage = match serde_json::from_str(&unzipped_data) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
error!("Failed to parse input state message {e}");
|
||||
error!("Failed to parse input state message! Error: {e}");
|
||||
return Err(Error::UCentralParser("Failed to parse input state message"));
|
||||
}
|
||||
};
|
||||
|
||||
let serial = MacAddress::from_str(
|
||||
state_map["serial"]
|
||||
.as_str()
|
||||
.ok_or_else(|| Error::UCentralParser("Failed to parse mac address"))?,
|
||||
)?;
|
||||
let serial = {
|
||||
if let Value::String(mac) = ¶ms["serial"] {
|
||||
MacAddress::from_str(mac)?
|
||||
} else if let Value::String(mac) = &state_map["serial"] {
|
||||
MacAddress::from_str(mac)?
|
||||
} else {
|
||||
return Err(Error::UCentralParser(
|
||||
"Failed to parse state: mac address is missing",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
if let Value::Object(state_map) = &state_map["state"] {
|
||||
let mut lldp_links: Vec<CGWUCentralEventStateLinks> = Vec::new();
|
||||
let mut clients_links: Vec<CGWUCentralEventStateClients> = Vec::new();
|
||||
let mut lldp_links: HashMap<
|
||||
CGWUCentralEventStatePort,
|
||||
Vec<CGWUCentralEventStateLinks>,
|
||||
> = HashMap::new();
|
||||
let mut clients_links: HashMap<
|
||||
CGWUCentralEventStatePort,
|
||||
Vec<CGWUCentralEventStateClients>,
|
||||
> = HashMap::new();
|
||||
|
||||
if state_map.contains_key("lldp-peers") {
|
||||
if let Value::Object(v) = &state_map["lldp-peers"] {
|
||||
parse_lldp_data(v, &mut lldp_links)?;
|
||||
if feature_topomap_enabled {
|
||||
if state_map.contains_key("lldp-peers") {
|
||||
if let Value::Object(v) = &state_map["lldp-peers"] {
|
||||
parse_lldp_data(v, &mut lldp_links)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut upstream_ifaces: Vec<String> = Vec::new();
|
||||
let mut downstream_ifaces: Vec<String> = Vec::new();
|
||||
let mut upstream_ifaces: Vec<String> = Vec::new();
|
||||
let mut downstream_ifaces: Vec<String> = Vec::new();
|
||||
|
||||
if state_map.contains_key("link-state") {
|
||||
if let Value::Object(obj) = &state_map["link-state"] {
|
||||
parse_link_state_data(obj, &mut upstream_ifaces, &mut downstream_ifaces);
|
||||
if state_map.contains_key("link-state") {
|
||||
if let Value::Object(obj) = &state_map["link-state"] {
|
||||
parse_link_state_data(obj, &mut upstream_ifaces, &mut downstream_ifaces);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Value::Array(arr) = &state_map["interfaces"] {
|
||||
for interface in arr {
|
||||
if let Value::Object(iface) = interface {
|
||||
parse_interface_data(
|
||||
iface,
|
||||
&mut clients_links,
|
||||
&upstream_ifaces,
|
||||
timestamp,
|
||||
)?;
|
||||
if let Value::Array(arr) = &state_map["interfaces"] {
|
||||
for interface in arr {
|
||||
if let Value::Object(iface) = interface {
|
||||
parse_interface_data(
|
||||
iface,
|
||||
&mut clients_links,
|
||||
&upstream_ifaces,
|
||||
timestamp,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Replace compressed data
|
||||
let mut origin_msg = map.clone();
|
||||
let params_value = match Value::from_str(unzipped_data.as_str()) {
|
||||
Ok(val) => val,
|
||||
Err(_e) => {
|
||||
return Err(Error::ConnectionProcessor(
|
||||
"Failed to cast decompressed message to JSON Value",
|
||||
));
|
||||
}
|
||||
};
|
||||
if let Some(value) = origin_msg.get_mut("params") {
|
||||
*value = params_value;
|
||||
}
|
||||
|
||||
let kafka_msg = match serde_json::to_string(&origin_msg) {
|
||||
Ok(msg) => msg,
|
||||
Err(_e) => {
|
||||
return Err(Error::ConnectionProcessor(
|
||||
"Failed to create decompressed Event message",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let state_event = CGWUCentralEvent {
|
||||
serial,
|
||||
evt_type: CGWUCentralEventType::State(CGWUCentralEventState {
|
||||
@@ -388,6 +444,7 @@ fn parse_state_event_data(map: CGWUCentralJRPCMessage, timestamp: i64) -> Result
|
||||
links: clients_links,
|
||||
},
|
||||
}),
|
||||
decompressed: Some(kafka_msg),
|
||||
};
|
||||
|
||||
return Ok(state_event);
|
||||
@@ -397,13 +454,24 @@ fn parse_state_event_data(map: CGWUCentralJRPCMessage, timestamp: i64) -> Result
|
||||
"Parsed, decompressed state message but failed to find state object",
|
||||
));
|
||||
} else if let Value::Object(state_map) = ¶ms["state"] {
|
||||
let serial = MacAddress::from_str(
|
||||
params["serial"]
|
||||
.as_str()
|
||||
.ok_or_else(|| Error::UCentralParser("Failed to parse mac address"))?,
|
||||
)?;
|
||||
let mut lldp_links: Vec<CGWUCentralEventStateLinks> = Vec::new();
|
||||
let mut clients_links: Vec<CGWUCentralEventStateClients> = Vec::new();
|
||||
let serial = {
|
||||
if let Value::String(mac) = ¶ms["serial"] {
|
||||
MacAddress::from_str(mac)?
|
||||
} else if let Value::String(mac) = &state_map["serial"] {
|
||||
MacAddress::from_str(mac)?
|
||||
} else {
|
||||
return Err(Error::UCentralParser(
|
||||
"Failed to parse state: mac address is missing",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let mut lldp_links: HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateLinks>> =
|
||||
HashMap::new();
|
||||
let mut clients_links: HashMap<
|
||||
CGWUCentralEventStatePort,
|
||||
Vec<CGWUCentralEventStateClients>,
|
||||
> = HashMap::new();
|
||||
|
||||
if state_map.contains_key("lldp-peers") {
|
||||
if let Value::Object(v) = &state_map["lldp-peers"] {
|
||||
@@ -438,6 +506,7 @@ fn parse_state_event_data(map: CGWUCentralJRPCMessage, timestamp: i64) -> Result
|
||||
links: clients_links,
|
||||
},
|
||||
}),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
return Ok(state_event);
|
||||
@@ -485,7 +554,7 @@ fn parse_realtime_event_data(
|
||||
};
|
||||
|
||||
if events.len() < 2 {
|
||||
warn!("Received malformed event: number of event values < 2");
|
||||
warn!("Received malformed event: number of event values < 2!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed event: number of event values < 2",
|
||||
));
|
||||
@@ -496,14 +565,14 @@ fn parse_realtime_event_data(
|
||||
match &events[0] {
|
||||
Value::Number(ts) => {
|
||||
if ts.as_i64().is_none() {
|
||||
warn!("Received malformed event: missing timestamp");
|
||||
warn!("Received malformed event: missing timestamp!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed event: missing timestamp",
|
||||
));
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
warn!("Received malformed event: missing timestamp");
|
||||
warn!("Received malformed event: missing timestamp!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed event: missing timestamp",
|
||||
));
|
||||
@@ -513,7 +582,7 @@ fn parse_realtime_event_data(
|
||||
let event_data = match &events[1] {
|
||||
Value::Object(v) => v,
|
||||
_ => {
|
||||
warn!("Received malformed event: missing timestamp");
|
||||
warn!("Received malformed event: missing timestamp!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed event: missing timestamp",
|
||||
));
|
||||
@@ -521,7 +590,7 @@ fn parse_realtime_event_data(
|
||||
};
|
||||
|
||||
if !event_data.contains_key("type") {
|
||||
warn!("Received malformed event: missing type");
|
||||
warn!("Received malformed event: missing type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed event: missing type",
|
||||
));
|
||||
@@ -530,7 +599,7 @@ fn parse_realtime_event_data(
|
||||
let evt_type = match &event_data["type"] {
|
||||
Value::String(t) => t,
|
||||
_ => {
|
||||
warn!("Received malformed event: type is of wrongful underlying format/type");
|
||||
warn!("Received malformed event: type is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed event: type is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -540,7 +609,7 @@ fn parse_realtime_event_data(
|
||||
let evt_payload = match &event_data["payload"] {
|
||||
Value::Object(d) => d,
|
||||
_ => {
|
||||
warn!("Received malformed event: payload is of wrongful underlying format/type");
|
||||
warn!("Received malformed event: payload is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed event: payload is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -555,7 +624,7 @@ fn parse_realtime_event_data(
|
||||
|| !evt_payload.contains_key("rssi")
|
||||
|| !evt_payload.contains_key("channel")
|
||||
{
|
||||
warn!("Received malformed client.join event: band, rssi, ssid, channel and client are required");
|
||||
warn!("Received malformed client.join event: band, rssi, ssid, channel and client are required!");
|
||||
return Err(Error::UCentralParser("Received malformed client.join event: band, rssi, ssid, channel and client are required"));
|
||||
}
|
||||
|
||||
@@ -563,7 +632,7 @@ fn parse_realtime_event_data(
|
||||
match &evt_payload["band"] {
|
||||
Value::String(s) => s,
|
||||
_ => {
|
||||
warn!("Received malformed client.join event: band is of wrongful underlying format/type");
|
||||
warn!("Received malformed client.join event: band is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.join event: band is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -575,7 +644,7 @@ fn parse_realtime_event_data(
|
||||
Value::String(s) => match MacAddress::from_str(s.as_str()) {
|
||||
Ok(v) => v,
|
||||
Err(_) => {
|
||||
warn!("Received malformed client.join event: client is a malformed MAC address");
|
||||
warn!("Received malformed client.join event: client is a malformed MAC address!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.join event: client is a malformed MAC address",
|
||||
));
|
||||
@@ -593,7 +662,7 @@ fn parse_realtime_event_data(
|
||||
match &evt_payload["ssid"] {
|
||||
Value::String(s) => s,
|
||||
_ => {
|
||||
warn!("Received malformed client.join event: ssid is of wrongful underlying format/type");
|
||||
warn!("Received malformed client.join event: ssid is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.join event: ssid is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -612,7 +681,7 @@ fn parse_realtime_event_data(
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
warn!("Received malformed client.join event: rssi is of wrongful underlying format/type");
|
||||
warn!("Received malformed client.join event: rssi is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.join event: rssi is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -631,7 +700,7 @@ fn parse_realtime_event_data(
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
warn!("Received malformed client.join event: channel is of wrongful underlying format/type");
|
||||
warn!("Received malformed client.join event: channel is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.join event: channel is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -655,6 +724,7 @@ fn parse_realtime_event_data(
|
||||
},
|
||||
),
|
||||
}),
|
||||
decompressed: None,
|
||||
})
|
||||
}
|
||||
"client.leave" => {
|
||||
@@ -662,7 +732,7 @@ fn parse_realtime_event_data(
|
||||
|| !evt_payload.contains_key("client")
|
||||
|| !evt_payload.contains_key("connected_time")
|
||||
{
|
||||
warn!("Received malformed client.leave event: client, band and connected_time is required");
|
||||
warn!("Received malformed client.leave event: client, band and connected_time is required!");
|
||||
return Err(Error::UCentralParser("Received malformed client.leave event: client, band and connected_time is required"));
|
||||
}
|
||||
|
||||
@@ -670,7 +740,7 @@ fn parse_realtime_event_data(
|
||||
match &evt_payload["band"] {
|
||||
Value::String(s) => s,
|
||||
_ => {
|
||||
warn!("Received malformed client.leave event: band is of wrongful underlying format/type");
|
||||
warn!("Received malformed client.leave event: band is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.leave event: band is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -682,14 +752,14 @@ fn parse_realtime_event_data(
|
||||
Value::String(s) => match MacAddress::from_str(s.as_str()) {
|
||||
Ok(v) => v,
|
||||
Err(_) => {
|
||||
warn!("Received malformed client.leave event: client is a malformed MAC address");
|
||||
warn!("Received malformed client.leave event: client is a malformed MAC address!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.leave event: client is a malformed MAC address",
|
||||
));
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
warn!("Received malformed client.leave event: client is of wrongful underlying format/type");
|
||||
warn!("Received malformed client.leave event: client is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.leave event: client is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -708,7 +778,7 @@ fn parse_realtime_event_data(
|
||||
}
|
||||
},
|
||||
_ => {
|
||||
warn!("Received malformed client.leave event: connected_time is of wrongful underlying format/type");
|
||||
warn!("Received malformed client.leave event: connected_time is of wrongful underlying format/type!");
|
||||
return Err(Error::UCentralParser(
|
||||
"Received malformed client.leave event: connected_time is of wrongful underlying format/type",
|
||||
));
|
||||
@@ -734,32 +804,37 @@ fn parse_realtime_event_data(
|
||||
},
|
||||
),
|
||||
}),
|
||||
decompressed: None,
|
||||
})
|
||||
}
|
||||
_ => {
|
||||
warn!("Received unknown event: {evt_type}");
|
||||
warn!("Received unknown event: {evt_type}!");
|
||||
Err(Error::UCentralParser("Received unknown event"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cgw_ucentral_ap_parse_message(message: &str, timestamp: i64) -> Result<CGWUCentralEvent> {
|
||||
pub fn cgw_ucentral_ap_parse_message(
|
||||
feature_topomap_enabled: bool,
|
||||
message: &str,
|
||||
timestamp: i64,
|
||||
) -> Result<CGWUCentralEvent> {
|
||||
let map: CGWUCentralJRPCMessage = match serde_json::from_str(message) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
error!("Failed to parse input json {e}");
|
||||
error!("Failed to parse input json! Error: {e}");
|
||||
return Err(Error::UCentralParser("Failed to parse input json"));
|
||||
}
|
||||
};
|
||||
|
||||
if map.contains_key("method") {
|
||||
let method = map["method"].as_str().ok_or_else(|| {
|
||||
warn!("Received malformed JSONRPC msg");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
Error::UCentralParser("JSONRPC field is missing in message")
|
||||
})?;
|
||||
if method == "log" {
|
||||
let params = map.get("params").ok_or_else(|| {
|
||||
warn!("Received JRPC <method> without params.");
|
||||
warn!("Received JRPC <method> without params!");
|
||||
Error::UCentralParser("Received JRPC <method> without params")
|
||||
})?;
|
||||
let serial = MacAddress::from_str(
|
||||
@@ -775,6 +850,7 @@ pub fn cgw_ucentral_ap_parse_message(message: &str, timestamp: i64) -> Result<CG
|
||||
log: params["log"].to_string(),
|
||||
severity: serde_json::from_value(params["severity"].clone())?,
|
||||
}),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
return Ok(log_event);
|
||||
@@ -802,29 +878,39 @@ pub fn cgw_ucentral_ap_parse_message(message: &str, timestamp: i64) -> Result<CG
|
||||
uuid: 1,
|
||||
capabilities: caps,
|
||||
}),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
return Ok(connect_event);
|
||||
} else if method == "state" {
|
||||
return parse_state_event_data(map, timestamp);
|
||||
return parse_state_event_data(feature_topomap_enabled, map, timestamp);
|
||||
} else if method == "event" {
|
||||
return parse_realtime_event_data(map, timestamp);
|
||||
if feature_topomap_enabled {
|
||||
return parse_realtime_event_data(map, timestamp);
|
||||
} else {
|
||||
return Err(Error::UCentralParser(
|
||||
"Received unexpected event while topo map feature is disabled",
|
||||
));
|
||||
}
|
||||
}
|
||||
} else if map.contains_key("result") {
|
||||
if !map.contains_key("id") {
|
||||
warn!("Received JRPC <result> without id.");
|
||||
return Err(Error::UCentralParser("Received JRPC <result> without id"));
|
||||
if let Value::Object(result) = &map["result"] {
|
||||
if !result.contains_key("id") {
|
||||
warn!("Received JRPC <result> without id!");
|
||||
return Err(Error::UCentralParser("Received JRPC <result> without id"));
|
||||
}
|
||||
|
||||
let id = result["id"]
|
||||
.as_u64()
|
||||
.ok_or_else(|| Error::UCentralParser("Failed to parse id"))?;
|
||||
let reply_event = CGWUCentralEvent {
|
||||
serial: Default::default(),
|
||||
evt_type: CGWUCentralEventType::Reply(CGWUCentralEventReply { id }),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
return Ok(reply_event);
|
||||
}
|
||||
|
||||
let id = map["id"]
|
||||
.as_u64()
|
||||
.ok_or_else(|| Error::UCentralParser("Failed to parse id"))?;
|
||||
let reply_event = CGWUCentralEvent {
|
||||
serial: Default::default(),
|
||||
evt_type: CGWUCentralEventType::Reply(CGWUCentralEventReply { id }),
|
||||
};
|
||||
|
||||
return Ok(reply_event);
|
||||
}
|
||||
|
||||
Err(Error::UCentralParser("Failed to parse event/method"))
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::collections::{HashMap, VecDeque};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::time;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::cgw_errors::{Error, Result};
|
||||
use crate::cgw_ucentral_parser::{CGWUCentralCommand, CGWUCentralCommandType};
|
||||
@@ -63,11 +63,11 @@ impl CGWUCentralMessagesQueue {
|
||||
}
|
||||
|
||||
fn insert_item(&mut self, index: usize, value: CGWUCentralMessagesQueueItem) {
|
||||
self.queue.insert(index, value)
|
||||
self.queue.insert(index, value);
|
||||
}
|
||||
|
||||
fn push_back_item(&mut self, value: CGWUCentralMessagesQueueItem) {
|
||||
self.queue.push_back(value)
|
||||
self.queue.push_back(value);
|
||||
}
|
||||
|
||||
fn queue_len(&self) -> usize {
|
||||
@@ -79,17 +79,29 @@ impl CGWUCentralMessagesQueue {
|
||||
pub struct CGWUCentralMessagesQueueItem {
|
||||
pub command: CGWUCentralCommand,
|
||||
pub message: String,
|
||||
pub uuid: Uuid,
|
||||
pub timeout: Option<u64>,
|
||||
}
|
||||
|
||||
impl CGWUCentralMessagesQueueItem {
|
||||
pub fn new(command: CGWUCentralCommand, message: String) -> CGWUCentralMessagesQueueItem {
|
||||
CGWUCentralMessagesQueueItem { command, message }
|
||||
pub fn new(
|
||||
command: CGWUCentralCommand,
|
||||
message: String,
|
||||
uuid: Uuid,
|
||||
timeout: Option<u64>,
|
||||
) -> CGWUCentralMessagesQueueItem {
|
||||
CGWUCentralMessagesQueueItem {
|
||||
command,
|
||||
message,
|
||||
uuid,
|
||||
timeout,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CGWUCentralMessagesQueueManager {
|
||||
queue: Arc<RwLock<HashMap<MacAddress, Arc<RwLock<CGWUCentralMessagesQueue>>>>>,
|
||||
disconnected_devices: Arc<RwLock<HashMap<MacAddress, ()>>>,
|
||||
disconnected_devices: Arc<RwLock<HashMap<MacAddress, i32>>>,
|
||||
}
|
||||
|
||||
const MESSAGE_QUEUE_REBOOT_MSG_INDEX: usize = 0;
|
||||
@@ -106,7 +118,7 @@ lazy_static! {
|
||||
MacAddress,
|
||||
Arc<RwLock<CGWUCentralMessagesQueue>>,
|
||||
>::new(),)),
|
||||
disconnected_devices: Arc::new(RwLock::new(HashMap::<MacAddress, ()>::new()))
|
||||
disconnected_devices: Arc::new(RwLock::new(HashMap::<MacAddress, i32>::new()))
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -119,7 +131,7 @@ lazy_static! {
|
||||
impl CGWUCentralMessagesQueueManager {
|
||||
pub async fn create_device_messages_queue(&self, device_mac: &MacAddress) {
|
||||
if !self.check_messages_queue_exists(device_mac).await {
|
||||
debug!("Create queue message for device: {}", device_mac);
|
||||
debug!("Create queue message for device: {device_mac}");
|
||||
let new_queue: Arc<RwLock<CGWUCentralMessagesQueue>> =
|
||||
Arc::new(RwLock::new(CGWUCentralMessagesQueue::new()));
|
||||
|
||||
@@ -139,76 +151,75 @@ impl CGWUCentralMessagesQueueManager {
|
||||
|
||||
pub async fn delete_device_messages_queue(&self, device_mac: &MacAddress) {
|
||||
let mut write_lock = self.queue.write().await;
|
||||
debug!("Remove queue message for device: {}", device_mac);
|
||||
|
||||
match write_lock.remove(device_mac) {
|
||||
Some(_) => {}
|
||||
None => {
|
||||
error!(
|
||||
"Trying to delete message queue for unexisting device: {}",
|
||||
device_mac
|
||||
);
|
||||
}
|
||||
}
|
||||
debug!("Remove queue message for device: {device_mac}");
|
||||
write_lock.remove(device_mac);
|
||||
}
|
||||
|
||||
pub async fn clear_device_message_queue(&self, device_mac: &MacAddress) {
|
||||
debug!("Flush device {} queue due to timeout!", device_mac);
|
||||
let container_lock = self.queue.read().await;
|
||||
pub async fn clear_device_message_queue(
|
||||
&self,
|
||||
infra_mac: &MacAddress,
|
||||
) -> Vec<CGWUCentralMessagesQueueItem> {
|
||||
debug!("Flush infra {infra_mac} queue due to timeout");
|
||||
let mut reqs: Vec<CGWUCentralMessagesQueueItem> = Vec::new();
|
||||
|
||||
if let Some(device_msg_queue) = container_lock.get(device_mac) {
|
||||
let mut write_lock = device_msg_queue.write().await;
|
||||
write_lock
|
||||
.queue
|
||||
.retain_mut(|item: &mut CGWUCentralMessagesQueueItem| {
|
||||
match item.command.cmd_type {
|
||||
CGWUCentralCommandType::Reboot
|
||||
| CGWUCentralCommandType::Configure
|
||||
| CGWUCentralCommandType::None => {
|
||||
*item = CGWUCentralMessagesQueueItem::default();
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
});
|
||||
while let Some(msg) = self.dequeue_device_message(infra_mac).await {
|
||||
reqs.push(msg);
|
||||
}
|
||||
|
||||
reqs
|
||||
}
|
||||
|
||||
pub async fn push_device_message(
|
||||
&self,
|
||||
device_mac: MacAddress,
|
||||
value: CGWUCentralMessagesQueueItem,
|
||||
) -> Result<()> {
|
||||
// 1. Get current message type
|
||||
let new_cmd_type: CGWUCentralCommandType = value.command.cmd_type.clone();
|
||||
|
||||
// 2. Message queue for device exist -> get mutable ref
|
||||
) -> Result<Option<CGWUCentralMessagesQueueItem>> {
|
||||
// 1. Message queue for device exist -> get mutable ref
|
||||
self.create_device_messages_queue(&device_mac).await;
|
||||
let container_lock = self.queue.read().await;
|
||||
|
||||
let mut replaced_request: Option<CGWUCentralMessagesQueueItem> = None;
|
||||
|
||||
let mut device_msg_queue = container_lock
|
||||
.get(&device_mac)
|
||||
.ok_or_else(|| Error::UCentralMessagesQueue("Failed to get device message queue"))?
|
||||
.ok_or_else(|| {
|
||||
Error::UCentralMessagesQueue("Failed to get device message queue".to_string())
|
||||
})?
|
||||
.write()
|
||||
.await;
|
||||
let queue_state = device_msg_queue.get_state();
|
||||
|
||||
debug!(
|
||||
"Push message for device: {}, queue state {:?}, command type {:?}",
|
||||
device_mac, queue_state, new_cmd_type
|
||||
device_mac, queue_state, value.command.cmd_type
|
||||
);
|
||||
|
||||
// Check Queue Message state
|
||||
// 2. Check Queue Message state
|
||||
match queue_state {
|
||||
CGWUCentralMessagesQueueState::RxTx | CGWUCentralMessagesQueueState::Rx => {
|
||||
match new_cmd_type {
|
||||
match value.command.cmd_type {
|
||||
// 3. If new message type == Reboot then replace message under reserved index
|
||||
CGWUCentralCommandType::Reboot => {
|
||||
device_msg_queue.remove_item(MESSAGE_QUEUE_REBOOT_MSG_INDEX);
|
||||
if let Some(current_reboot) =
|
||||
device_msg_queue.remove_item(MESSAGE_QUEUE_REBOOT_MSG_INDEX)
|
||||
{
|
||||
if current_reboot.command != CGWUCentralCommand::default() {
|
||||
replaced_request = Some(current_reboot);
|
||||
}
|
||||
}
|
||||
|
||||
device_msg_queue.insert_item(MESSAGE_QUEUE_REBOOT_MSG_INDEX, value);
|
||||
}
|
||||
// 4. If new message type == Configure then replace message under reserved index
|
||||
CGWUCentralCommandType::Configure => {
|
||||
device_msg_queue.remove_item(MESSAGE_QUEUE_CONFIGURE_MSG_INDEX);
|
||||
if let Some(current_configure) =
|
||||
device_msg_queue.remove_item(MESSAGE_QUEUE_CONFIGURE_MSG_INDEX)
|
||||
{
|
||||
if current_configure.command != CGWUCentralCommand::default() {
|
||||
replaced_request = Some(current_configure);
|
||||
}
|
||||
}
|
||||
|
||||
device_msg_queue.insert_item(MESSAGE_QUEUE_CONFIGURE_MSG_INDEX, value);
|
||||
}
|
||||
// 5. If new message type == Other then push it back to queue
|
||||
@@ -218,14 +229,16 @@ impl CGWUCentralMessagesQueueManager {
|
||||
}
|
||||
}
|
||||
CGWUCentralMessagesQueueState::Discard | CGWUCentralMessagesQueueState::Unknown => {
|
||||
debug!(
|
||||
let err_msg: String = format!(
|
||||
"Device {} queue is in {:?} state - drop request {}",
|
||||
device_mac, queue_state, value.command.id
|
||||
);
|
||||
debug!("{err_msg}");
|
||||
return Err(Error::UCentralMessagesQueue(err_msg));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
Ok(replaced_request)
|
||||
}
|
||||
|
||||
pub async fn check_messages_queue_exists(&self, device_mac: &MacAddress) -> bool {
|
||||
@@ -361,25 +374,21 @@ impl CGWUCentralMessagesQueueManager {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn device_disconnected(&self, device_mac: &MacAddress) {
|
||||
pub async fn device_disconnected(&self, infra_mac: &MacAddress, infra_gid: i32) {
|
||||
let mut disconnected_lock = self.disconnected_devices.write().await;
|
||||
disconnected_lock.insert(*device_mac, ());
|
||||
disconnected_lock.insert(*infra_mac, infra_gid);
|
||||
}
|
||||
|
||||
pub async fn device_connected(&self, device_mac: &MacAddress) {
|
||||
self.remove_disconnected_device_timeout(device_mac).await;
|
||||
}
|
||||
|
||||
async fn remove_disconnected_device_timeout(&self, device_mac: &MacAddress) {
|
||||
pub async fn device_connected(&self, infra_mac: &MacAddress) {
|
||||
let mut disconnected_lock = self.disconnected_devices.write().await;
|
||||
disconnected_lock.remove(device_mac);
|
||||
disconnected_lock.remove(infra_mac);
|
||||
}
|
||||
|
||||
pub async fn device_request_tick(&self, device_mac: &MacAddress, elapsed: Duration) -> bool {
|
||||
pub async fn device_request_tick(&self, infra_mac: &MacAddress, elapsed: Duration) -> bool {
|
||||
let mut expired: bool = false;
|
||||
let container_read_lock = self.queue.read().await;
|
||||
|
||||
if let Some(device_queue) = container_read_lock.get(device_mac) {
|
||||
if let Some(device_queue) = container_read_lock.get(infra_mac) {
|
||||
let mut write_lock = device_queue.write().await;
|
||||
write_lock.last_req_timeout = write_lock.last_req_timeout.saturating_sub(elapsed);
|
||||
|
||||
@@ -391,45 +400,45 @@ impl CGWUCentralMessagesQueueManager {
|
||||
expired
|
||||
}
|
||||
|
||||
async fn iterate_over_disconnected_devices(&self) {
|
||||
let mut devices_to_flush: Vec<MacAddress> = Vec::<MacAddress>::new();
|
||||
pub async fn iterate_over_disconnected_devices(
|
||||
&self,
|
||||
) -> HashMap<MacAddress, Vec<(i32, CGWUCentralMessagesQueueItem)>> {
|
||||
let mut devices_to_flush: Vec<(MacAddress, i32)> = Vec::<(MacAddress, i32)>::new();
|
||||
|
||||
{
|
||||
// 1. Check if disconnected device message queue is empty
|
||||
// If not empty - just do tick
|
||||
// Else - disconnected device and it queue should be removed
|
||||
let container_read_lock = self.disconnected_devices.read().await;
|
||||
for (device_mac, _) in container_read_lock.iter() {
|
||||
if self.get_device_messages_queue_len(device_mac).await > 0 {
|
||||
for (infra_mac, infra_gid) in container_read_lock.iter() {
|
||||
if self.get_device_messages_queue_len(infra_mac).await > 0 {
|
||||
// If device request is timed out - device and it queue should be removed
|
||||
if self
|
||||
.device_request_tick(device_mac, TIMEOUT_MANAGER_DURATION)
|
||||
.device_request_tick(infra_mac, TIMEOUT_MANAGER_DURATION)
|
||||
.await
|
||||
{
|
||||
devices_to_flush.push(*device_mac);
|
||||
devices_to_flush.push((*infra_mac, *infra_gid));
|
||||
}
|
||||
} else {
|
||||
devices_to_flush.push(*device_mac);
|
||||
devices_to_flush.push((*infra_mac, *infra_gid));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut failed_requests: HashMap<MacAddress, Vec<(i32, CGWUCentralMessagesQueueItem)>> =
|
||||
HashMap::new();
|
||||
// 2. Remove disconnected device and it queue
|
||||
let mut container_write_lock = self.disconnected_devices.write().await;
|
||||
for device_mac in devices_to_flush.iter() {
|
||||
self.delete_device_messages_queue(device_mac).await;
|
||||
container_write_lock.remove(device_mac);
|
||||
for (infra_mac, infra_gid) in devices_to_flush.iter() {
|
||||
let mut reqs: Vec<(i32, CGWUCentralMessagesQueueItem)> = Vec::new();
|
||||
while let Some(msg) = self.dequeue_device_message(infra_mac).await {
|
||||
reqs.push((*infra_gid, msg));
|
||||
}
|
||||
failed_requests.insert(*infra_mac, reqs);
|
||||
self.delete_device_messages_queue(infra_mac).await;
|
||||
container_write_lock.remove(infra_mac);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start_queue_timeout_manager(&self) {
|
||||
loop {
|
||||
// Wait for 10 seconds
|
||||
time::sleep(TIMEOUT_MANAGER_DURATION).await;
|
||||
|
||||
// iterate over disconnected devices
|
||||
let queue_lock = CGW_MESSAGES_QUEUE.read().await;
|
||||
queue_lock.iterate_over_disconnected_devices().await;
|
||||
}
|
||||
failed_requests
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
use std::fs::File;
|
||||
use std::io::BufReader;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::{collections::HashMap, fmt};
|
||||
|
||||
use eui48::MacAddress;
|
||||
|
||||
use jsonschema::JSONSchema;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Map, Value};
|
||||
use tokio_tungstenite::tungstenite::protocol::Message;
|
||||
use url::Url;
|
||||
|
||||
use crate::cgw_app_args::{CGWValidationSchemaArgs, CGWValionSchemaRef};
|
||||
use crate::cgw_errors::{Error, Result};
|
||||
|
||||
use crate::{
|
||||
@@ -15,6 +22,76 @@ use crate::{
|
||||
|
||||
pub type CGWUCentralJRPCMessage = Map<String, Value>;
|
||||
|
||||
pub struct CGWUCentralConfigValidators {
|
||||
ap_schema: JSONSchema,
|
||||
switch_schema: JSONSchema,
|
||||
}
|
||||
|
||||
impl CGWUCentralConfigValidators {
|
||||
pub fn new(uris: CGWValidationSchemaArgs) -> Result<CGWUCentralConfigValidators> {
|
||||
let ap_schema = cgw_initialize_json_validator(uris.ap_schema_uri)?;
|
||||
let switch_schema = cgw_initialize_json_validator(uris.switch_schema_uri)?;
|
||||
|
||||
Ok(CGWUCentralConfigValidators {
|
||||
ap_schema,
|
||||
switch_schema,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn validate_config_message(&self, message: &str, device_type: CGWDeviceType) -> Result<()> {
|
||||
let msg: CGWUCentralJRPCMessage = match serde_json::from_str(message) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
error!("Failed to parse input json {e}");
|
||||
return Err(Error::UCentralParser("Failed to parse input json"));
|
||||
}
|
||||
};
|
||||
|
||||
let config = match msg.get("params") {
|
||||
Some(cfg) => cfg,
|
||||
None => {
|
||||
error!("Failed to get configs, invalid config message recevied");
|
||||
return Err(Error::UCentralParser(
|
||||
"Failed to get configs, invalid config message recevied",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let config = match config.get("config") {
|
||||
Some(cfg) => cfg,
|
||||
None => {
|
||||
error!("Failed to get config params, invalid config message recevied");
|
||||
return Err(Error::UCentralParser(
|
||||
"Failed to get config params, invalid config message recevied",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let result = match device_type {
|
||||
CGWDeviceType::CGWDeviceAP => self.ap_schema.validate(config),
|
||||
CGWDeviceType::CGWDeviceSwitch => self.switch_schema.validate(config),
|
||||
CGWDeviceType::CGWDeviceUnknown => {
|
||||
error!("Failed to validate configure message for device type unknown");
|
||||
return Err(Error::UCentralParser(
|
||||
"Failed to validate configure message for device type unknown",
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
let mut json_errors: String = String::new();
|
||||
if let Err(errors) = result {
|
||||
for error in errors {
|
||||
json_errors += &format!("JSON: Validation error: {}\n", error);
|
||||
json_errors += &format!("JSON: Instance path: {}\n", error.instance_path);
|
||||
}
|
||||
error!("{json_errors}");
|
||||
return Err(Error::UCentralValidator(json_errors));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
pub struct CGWUCentralEventLog {
|
||||
pub serial: MacAddress,
|
||||
@@ -40,7 +117,6 @@ pub struct CGWUCentralEventConnect {
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
pub struct CGWUCentralEventStateLinks {
|
||||
pub local_port: String,
|
||||
#[serde(skip)]
|
||||
pub remote_serial: MacAddress,
|
||||
pub remote_port: String,
|
||||
@@ -51,8 +127,8 @@ pub struct CGWUCentralEventStateLinks {
|
||||
pub enum CGWUCentralEventStateClientsType {
|
||||
// Timestamp
|
||||
Wired(i64),
|
||||
// Timestamp, Ssid, Band
|
||||
Wireless(i64, String, String),
|
||||
// Timestamp
|
||||
Wireless(i64),
|
||||
// VID
|
||||
FDBClient(u16),
|
||||
}
|
||||
@@ -60,7 +136,6 @@ pub enum CGWUCentralEventStateClientsType {
|
||||
#[derive(Debug, Deserialize, Serialize, PartialEq)]
|
||||
pub struct CGWUCentralEventStateClients {
|
||||
pub client_type: CGWUCentralEventStateClientsType,
|
||||
pub local_port: String,
|
||||
#[serde(skip)]
|
||||
pub remote_serial: MacAddress,
|
||||
pub remote_port: String,
|
||||
@@ -70,13 +145,42 @@ pub struct CGWUCentralEventStateClients {
|
||||
#[derive(Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
pub struct CGWUCentralEventStateLLDPData {
|
||||
// links reported by the device:
|
||||
pub links: Vec<CGWUCentralEventStateLinks>,
|
||||
// local port (key), vector of links (value)
|
||||
pub links: HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateLinks>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
pub struct CGWUCentralEventStateClientsData {
|
||||
// links reported by the device (wired and wireless):
|
||||
pub links: Vec<CGWUCentralEventStateClients>,
|
||||
// Composed into hashmap of Port(key), and vector of links
|
||||
// seen on this particular port.
|
||||
pub links: HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateClients>>,
|
||||
}
|
||||
|
||||
// One 'slice' / part of edge (Mac + port);
|
||||
// To make a proper complete edge two parts needed:
|
||||
// SRC -> DST
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq, Deserialize, Serialize)]
|
||||
pub enum CGWUCentralEventStatePort {
|
||||
// Physical port description (port name)
|
||||
#[serde(skip)]
|
||||
PhysicalWiredPort(String),
|
||||
// Wirelss port description (ssid, band)
|
||||
#[serde(skip)]
|
||||
WirelessPort(String, String),
|
||||
}
|
||||
|
||||
impl fmt::Display for CGWUCentralEventStatePort {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
CGWUCentralEventStatePort::PhysicalWiredPort(port) => {
|
||||
write!(f, "{port}")
|
||||
}
|
||||
CGWUCentralEventStatePort::WirelessPort(ssid, band) => {
|
||||
write!(f, "WirelessClient({ssid},{band})")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, PartialEq)]
|
||||
@@ -146,6 +250,7 @@ pub enum CGWUCentralEventType {
|
||||
pub struct CGWUCentralEvent {
|
||||
pub serial: MacAddress,
|
||||
pub evt_type: CGWUCentralEventType,
|
||||
pub decompressed: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Serialize)]
|
||||
@@ -162,7 +267,7 @@ pub enum CGWUCentralCommandType {
|
||||
Powercycle,
|
||||
Upgrade,
|
||||
Factory,
|
||||
Prm,
|
||||
Rrm,
|
||||
Leds,
|
||||
Trace,
|
||||
Wifiscan,
|
||||
@@ -188,7 +293,7 @@ impl FromStr for CGWUCentralCommandType {
|
||||
"powercycle" => Ok(CGWUCentralCommandType::Powercycle),
|
||||
"upgrade" => Ok(CGWUCentralCommandType::Upgrade),
|
||||
"factory" => Ok(CGWUCentralCommandType::Factory),
|
||||
"rrm" => Ok(CGWUCentralCommandType::Prm),
|
||||
"rrm" => Ok(CGWUCentralCommandType::Rrm),
|
||||
"leds" => Ok(CGWUCentralCommandType::Leds),
|
||||
"trace" => Ok(CGWUCentralCommandType::Trace),
|
||||
"wifiscan" => Ok(CGWUCentralCommandType::Wifiscan),
|
||||
@@ -222,12 +327,12 @@ pub fn cgw_ucentral_parse_connect_event(message: Message) -> Result<CGWUCentralE
|
||||
|
||||
let map: CGWUCentralJRPCMessage = serde_json::from_str(&msg)?;
|
||||
if !map.contains_key("jsonrpc") {
|
||||
warn!("Received malformed JSONRPC msg");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
return Err(Error::UCentralParser("JSONRPC field is missing in message"));
|
||||
}
|
||||
|
||||
let method = map["method"].as_str().ok_or_else(|| {
|
||||
warn!("Received malformed JSONRPC msg.");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
Error::UCentralParser("method field is missing in message")
|
||||
})?;
|
||||
|
||||
@@ -238,7 +343,7 @@ pub fn cgw_ucentral_parse_connect_event(message: Message) -> Result<CGWUCentralE
|
||||
}
|
||||
|
||||
let params = map.get("params").ok_or_else(|| {
|
||||
warn!("Received JSONRPC <method> without params");
|
||||
warn!("Received JSONRPC <method> without params!");
|
||||
Error::UCentralParser("Received JSONRPC <method> without params")
|
||||
})?;
|
||||
|
||||
@@ -262,6 +367,7 @@ pub fn cgw_ucentral_parse_connect_event(message: Message) -> Result<CGWUCentralE
|
||||
uuid: 1,
|
||||
capabilities: caps,
|
||||
}),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
Ok(event)
|
||||
@@ -271,28 +377,28 @@ pub fn cgw_ucentral_parse_command_message(message: &str) -> Result<CGWUCentralCo
|
||||
let map: CGWUCentralJRPCMessage = match serde_json::from_str(message) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
error!("Failed to parse input json {e}");
|
||||
error!("Failed to parse input json! Error: {e}");
|
||||
return Err(Error::UCentralParser("Failed to parse input json"));
|
||||
}
|
||||
};
|
||||
|
||||
if !map.contains_key("jsonrpc") {
|
||||
warn!("Received malformed JSONRPC msg");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
return Err(Error::UCentralParser("JSONRPC field is missing in message"));
|
||||
}
|
||||
|
||||
if !map.contains_key("method") {
|
||||
warn!("Received malformed JSONRPC msg");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
return Err(Error::UCentralParser("method field is missing in message"));
|
||||
}
|
||||
|
||||
if !map.contains_key("params") {
|
||||
warn!("Received malformed JSONRPC msg");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
return Err(Error::UCentralParser("params field is missing in message"));
|
||||
}
|
||||
|
||||
if !map.contains_key("id") {
|
||||
warn!("Received malformed JSONRPC msg");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
return Err(Error::UCentralParser("id field is missing in message"));
|
||||
}
|
||||
|
||||
@@ -327,11 +433,90 @@ pub fn cgw_ucentral_parse_command_message(message: &str) -> Result<CGWUCentralCo
|
||||
|
||||
pub fn cgw_ucentral_event_parse(
|
||||
device_type: &CGWDeviceType,
|
||||
feature_topomap_enabled: bool,
|
||||
message: &str,
|
||||
timestamp: i64,
|
||||
) -> Result<CGWUCentralEvent> {
|
||||
match device_type {
|
||||
CGWDeviceType::CGWDeviceAP => cgw_ucentral_ap_parse_message(message, timestamp),
|
||||
CGWDeviceType::CGWDeviceSwitch => cgw_ucentral_switch_parse_message(message, timestamp),
|
||||
CGWDeviceType::CGWDeviceAP => {
|
||||
cgw_ucentral_ap_parse_message(feature_topomap_enabled, message, timestamp)
|
||||
}
|
||||
CGWDeviceType::CGWDeviceSwitch => {
|
||||
cgw_ucentral_switch_parse_message(feature_topomap_enabled, message, timestamp)
|
||||
}
|
||||
CGWDeviceType::CGWDeviceUnknown => Err(Error::UCentralParser(
|
||||
"Failed to parse event message for device type unknown",
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
fn cgw_get_json_validation_schema(schema_ref: CGWValionSchemaRef) -> Result<serde_json::Value> {
|
||||
match schema_ref {
|
||||
CGWValionSchemaRef::SchemaUri(url) => cgw_download_json_validation_schemas(url),
|
||||
CGWValionSchemaRef::SchemaPath(path) => cgw_load_json_validation_schemas(path.as_path()),
|
||||
}
|
||||
}
|
||||
|
||||
fn cgw_download_json_validation_schemas(url: Url) -> Result<serde_json::Value> {
|
||||
let client = reqwest::blocking::Client::new();
|
||||
let response = match client.get(url.clone()).send() {
|
||||
Ok(r) => match r.text() {
|
||||
Ok(t) => t,
|
||||
Err(e) => {
|
||||
return Err(Error::UCentralValidator(format!(
|
||||
"Failed to convert response from target URI {url} to text fromat: {e}"
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
return Err(Error::UCentralValidator(format!(
|
||||
"Failed to receive response from target URI {url}: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
match serde_json::from_str(&response) {
|
||||
Ok(json_schema) => Ok(json_schema),
|
||||
Err(e) => Err(Error::UCentralValidator(format!(
|
||||
"Failed to deserialize text response from target URI {url}: {e}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
fn cgw_load_json_validation_schemas(path: &Path) -> Result<serde_json::Value> {
|
||||
let file = match File::open(path) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
return Err(Error::UCentralValidator(format!(
|
||||
"Failed to open TLS certificate file: {}. Error: {}",
|
||||
path.display(),
|
||||
e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let reader = BufReader::new(file);
|
||||
match serde_json::from_reader(reader) {
|
||||
Ok(json_schema) => Ok(json_schema),
|
||||
Err(e) => Err(Error::UCentralValidator(format!(
|
||||
"Failed to read JSON schema from file {}! Error: {e}",
|
||||
path.display()
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cgw_initialize_json_validator(schema_ref: CGWValionSchemaRef) -> Result<JSONSchema> {
|
||||
let schema = match cgw_get_json_validation_schema(schema_ref) {
|
||||
Ok(sch) => sch,
|
||||
Err(e) => {
|
||||
return Err(Error::UCentralValidator(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
match JSONSchema::compile(&schema) {
|
||||
Ok(json_schema) => Ok(json_schema),
|
||||
Err(e) => Err(Error::UCentralValidator(format!(
|
||||
"Failed to compile input schema to validation tree: {e}",
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
use eui48::MacAddress;
|
||||
use serde_json::Value;
|
||||
use std::str::FromStr;
|
||||
use std::{collections::HashMap, str::FromStr};
|
||||
|
||||
use crate::cgw_errors::{Error, Result};
|
||||
|
||||
use crate::cgw_ucentral_parser::{
|
||||
CGWUCentralEvent, CGWUCentralEventLog, CGWUCentralEventState, CGWUCentralEventStateClients,
|
||||
CGWUCentralEventStateClientsData, CGWUCentralEventStateClientsType,
|
||||
CGWUCentralEventStateLLDPData, CGWUCentralEventStateLinks, CGWUCentralEventType,
|
||||
CGWUCentralJRPCMessage,
|
||||
CGWUCentralEvent, CGWUCentralEventLog, CGWUCentralEventReply, CGWUCentralEventState,
|
||||
CGWUCentralEventStateClients, CGWUCentralEventStateClientsData,
|
||||
CGWUCentralEventStateClientsType, CGWUCentralEventStateLLDPData, CGWUCentralEventStateLinks,
|
||||
CGWUCentralEventStatePort, CGWUCentralEventType, CGWUCentralJRPCMessage,
|
||||
};
|
||||
|
||||
fn parse_lldp_data(
|
||||
data: &Value,
|
||||
links: &mut Vec<CGWUCentralEventStateLinks>,
|
||||
links: &mut HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateLinks>>,
|
||||
upstream_port: &Option<String>,
|
||||
) -> Result<()> {
|
||||
if let Value::Object(map) = data {
|
||||
@@ -52,18 +52,22 @@ fn parse_lldp_data(
|
||||
}
|
||||
};
|
||||
|
||||
links.push(CGWUCentralEventStateLinks {
|
||||
local_port,
|
||||
let local_port = CGWUCentralEventStatePort::PhysicalWiredPort(local_port);
|
||||
|
||||
let clients_data = CGWUCentralEventStateLinks {
|
||||
remote_serial,
|
||||
remote_port,
|
||||
is_downstream,
|
||||
});
|
||||
};
|
||||
|
||||
links.insert(local_port, vec![clients_data]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/*
|
||||
Example of "mac-forwarding-table" in json format:
|
||||
...
|
||||
@@ -74,10 +78,9 @@ Example of "mac-forwarding-table" in json format:
|
||||
},
|
||||
...
|
||||
*/
|
||||
|
||||
fn parse_fdb_data(
|
||||
data: &Value,
|
||||
links: &mut Vec<CGWUCentralEventStateClients>,
|
||||
links: &mut HashMap<CGWUCentralEventStatePort, Vec<CGWUCentralEventStateClients>>,
|
||||
upstream_port: &Option<String>,
|
||||
) -> Result<()> {
|
||||
if let Value::Object(map) = data {
|
||||
@@ -90,29 +93,46 @@ fn parse_fdb_data(
|
||||
}
|
||||
}
|
||||
|
||||
let local_port = CGWUCentralEventStatePort::PhysicalWiredPort(local_port);
|
||||
|
||||
// We iterate on a per-port basis, means this is our first
|
||||
// iteration on this particular port, safe to create empty
|
||||
// vec and populate it later on.
|
||||
links.insert(local_port.clone(), Vec::new());
|
||||
|
||||
let mut existing_vec = links.get_mut(&local_port);
|
||||
|
||||
for (k, v) in port.iter() {
|
||||
let vid = {
|
||||
match u16::from_str(k.as_str()) {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
warn!("Failed to convert vid {k} to u16");
|
||||
Err(e) => {
|
||||
warn!("Failed to convert vid {k} to u16! Error: {e}");
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if let Value::Array(macs) = v {
|
||||
for mac in macs.iter() {
|
||||
let remote_serial =
|
||||
MacAddress::from_str(mac.as_str().ok_or_else(|| {
|
||||
Error::UCentralParser("Failed to parse mac address")
|
||||
})?)?;
|
||||
links.push(CGWUCentralEventStateClients {
|
||||
|
||||
let clients_data = CGWUCentralEventStateClients {
|
||||
client_type: CGWUCentralEventStateClientsType::FDBClient(vid),
|
||||
local_port: local_port.clone(),
|
||||
remote_serial,
|
||||
remote_port: format!("<VLAN{}>", vid),
|
||||
is_downstream: true,
|
||||
});
|
||||
};
|
||||
|
||||
if let Some(ref mut existing_vec) = existing_vec {
|
||||
existing_vec.push(clients_data);
|
||||
} else {
|
||||
warn!("Unexpected: tried to push clients_data [{}:{}], while hashmap entry (key) for it does not exist!",
|
||||
local_port, clients_data.remote_port);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -124,25 +144,26 @@ fn parse_fdb_data(
|
||||
}
|
||||
|
||||
pub fn cgw_ucentral_switch_parse_message(
|
||||
feature_topomap_enabled: bool,
|
||||
message: &str,
|
||||
timestamp: i64,
|
||||
) -> Result<CGWUCentralEvent> {
|
||||
let map: CGWUCentralJRPCMessage = match serde_json::from_str(message) {
|
||||
Ok(m) => m,
|
||||
Err(e) => {
|
||||
error!("Failed to parse input json {e}");
|
||||
error!("Failed to parse input json! Error: {e}");
|
||||
return Err(Error::UCentralParser("Failed to parse input json"));
|
||||
}
|
||||
};
|
||||
|
||||
if !map.contains_key("jsonrpc") {
|
||||
warn!("Received malformed JSONRPC msg");
|
||||
warn!("Received malformed JSONRPC msg!");
|
||||
return Err(Error::UCentralParser("JSONRPC field is missing in message"));
|
||||
}
|
||||
|
||||
if map.contains_key("method") {
|
||||
let method = map["method"].as_str().ok_or_else(|| {
|
||||
warn!("Received JRPC <method> without params.");
|
||||
warn!("Received JRPC <method> without params!");
|
||||
Error::UCentralParser("Received JRPC <method> without params")
|
||||
})?;
|
||||
if method == "log" {
|
||||
@@ -162,6 +183,7 @@ pub fn cgw_ucentral_switch_parse_message(
|
||||
log: params["log"].to_string(),
|
||||
severity: serde_json::from_value(params["severity"].clone())?,
|
||||
}),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
return Ok(log_event);
|
||||
@@ -176,34 +198,42 @@ pub fn cgw_ucentral_switch_parse_message(
|
||||
Error::UCentralParser("Failed to parse serial from params")
|
||||
})?)?;
|
||||
let mut upstream_port: Option<String> = None;
|
||||
let mut lldp_links: Vec<CGWUCentralEventStateLinks> = Vec::new();
|
||||
let mut lldp_links: HashMap<
|
||||
CGWUCentralEventStatePort,
|
||||
Vec<CGWUCentralEventStateLinks>,
|
||||
> = HashMap::new();
|
||||
|
||||
// We can reuse <clients> logic as used in AP, it's safe and OK
|
||||
// since under the hood FDB macs are basically
|
||||
// switch's <wired clients>, where underlying client type
|
||||
// (FDBClient) will have all additional met info, like VID
|
||||
let mut clients_links: Vec<CGWUCentralEventStateClients> = Vec::new();
|
||||
let mut clients_links: HashMap<
|
||||
CGWUCentralEventStatePort,
|
||||
Vec<CGWUCentralEventStateClients>,
|
||||
> = HashMap::new();
|
||||
|
||||
if state_map.contains_key("default-gateway") {
|
||||
if let Value::Array(default_gw) = &state_map["default-gateway"] {
|
||||
if let Some(gw) = default_gw.first() {
|
||||
if let Value::String(port) = &gw["out-port"] {
|
||||
upstream_port = Some(port.as_str().to_string());
|
||||
if feature_topomap_enabled {
|
||||
if state_map.contains_key("default-gateway") {
|
||||
if let Value::Array(default_gw) = &state_map["default-gateway"] {
|
||||
if let Some(gw) = default_gw.first() {
|
||||
if let Value::String(port) = &gw["out-port"] {
|
||||
upstream_port = Some(port.as_str().to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if state_map.contains_key("lldp-peers") {
|
||||
parse_lldp_data(&state_map["lldp-peers"], &mut lldp_links, &upstream_port)?;
|
||||
}
|
||||
if state_map.contains_key("lldp-peers") {
|
||||
parse_lldp_data(&state_map["lldp-peers"], &mut lldp_links, &upstream_port)?;
|
||||
}
|
||||
|
||||
if state_map.contains_key("mac-forwarding-table") {
|
||||
parse_fdb_data(
|
||||
&state_map["mac-forwarding-table"],
|
||||
&mut clients_links,
|
||||
&upstream_port,
|
||||
)?;
|
||||
if state_map.contains_key("mac-forwarding-table") {
|
||||
parse_fdb_data(
|
||||
&state_map["mac-forwarding-table"],
|
||||
&mut clients_links,
|
||||
&upstream_port,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
let state_event = CGWUCentralEvent {
|
||||
@@ -216,17 +246,32 @@ pub fn cgw_ucentral_switch_parse_message(
|
||||
links: clients_links,
|
||||
},
|
||||
}),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
return Ok(state_event);
|
||||
}
|
||||
}
|
||||
} else if map.contains_key("result") {
|
||||
info!("Processing <result> JSONRPC msg");
|
||||
info!("{:?}", map);
|
||||
return Err(Error::UCentralParser(
|
||||
"Result handling is not yet implemented",
|
||||
));
|
||||
// For now, let's mimic AP's basic reply / result
|
||||
// format.
|
||||
if let Value::Object(result) = &map["result"] {
|
||||
if !result.contains_key("id") {
|
||||
warn!("Received JRPC <result> without id!");
|
||||
return Err(Error::UCentralParser("Received JRPC <result> without id"));
|
||||
}
|
||||
|
||||
let id = result["id"]
|
||||
.as_u64()
|
||||
.ok_or_else(|| Error::UCentralParser("Failed to parse id"))?;
|
||||
let reply_event = CGWUCentralEvent {
|
||||
serial: Default::default(),
|
||||
evt_type: CGWUCentralEventType::Reply(CGWUCentralEventReply { id }),
|
||||
decompressed: None,
|
||||
};
|
||||
|
||||
return Ok(reply_event);
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::UCentralParser("Failed to parse event/method"))
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
505
src/main.rs
505
src/main.rs
@@ -1,4 +1,5 @@
|
||||
#![warn(rust_2018_idioms)]
|
||||
mod cgw_app_args;
|
||||
mod cgw_connection_processor;
|
||||
mod cgw_connection_server;
|
||||
mod cgw_db_accessor;
|
||||
@@ -10,6 +11,7 @@ mod cgw_nb_api_listener;
|
||||
mod cgw_remote_client;
|
||||
mod cgw_remote_discovery;
|
||||
mod cgw_remote_server;
|
||||
mod cgw_runtime;
|
||||
mod cgw_tls;
|
||||
mod cgw_ucentral_ap_parser;
|
||||
mod cgw_ucentral_messages_queue_manager;
|
||||
@@ -23,6 +25,10 @@ extern crate log;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
||||
use cgw_app_args::AppArgs;
|
||||
use cgw_runtime::cgw_initialize_runtimes;
|
||||
|
||||
use nix::sys::socket::{setsockopt, sockopt};
|
||||
use tokio::{
|
||||
net::TcpListener,
|
||||
runtime::{Builder, Handle, Runtime},
|
||||
@@ -31,12 +37,7 @@ use tokio::{
|
||||
time::{sleep, Duration},
|
||||
};
|
||||
|
||||
use std::{
|
||||
env,
|
||||
net::{Ipv4Addr, SocketAddr},
|
||||
str::FromStr,
|
||||
sync::Arc,
|
||||
};
|
||||
use std::{env, net::SocketAddr, str::FromStr, sync::Arc};
|
||||
|
||||
use rlimit::{setrlimit, Resource};
|
||||
|
||||
@@ -50,6 +51,14 @@ use cgw_tls::cgw_tls_create_acceptor;
|
||||
|
||||
use crate::cgw_errors::{Error, Result};
|
||||
|
||||
use tokio::net::TcpStream;
|
||||
|
||||
use std::os::unix::io::AsFd;
|
||||
|
||||
const CGW_TCP_KEEPALIVE_TIMEOUT: u32 = 30;
|
||||
const CGW_TCP_KEEPALIVE_COUNT: u32 = 3;
|
||||
const CGW_TCP_KEEPALIVE_INTERVAL: u32 = 10;
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
enum AppCoreLogLevel {
|
||||
/// Print debug-level messages and above
|
||||
@@ -70,369 +79,6 @@ impl FromStr for AppCoreLogLevel {
|
||||
}
|
||||
}
|
||||
|
||||
const CGW_DEFAULT_ID: i32 = 0;
|
||||
const CGW_DEFAULT_WSS_T_NUM: usize = 4;
|
||||
const CGW_DEFAULT_LOG_LEVEL: AppCoreLogLevel = AppCoreLogLevel::Debug;
|
||||
const CGW_DEFAULT_WSS_IP: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0);
|
||||
const CGW_DEFAULT_WSS_PORT: u16 = 15002;
|
||||
const CGW_DEFAULT_WSS_CAS: &str = "cas.pem";
|
||||
const CGW_DEFAULT_WSS_CERT: &str = "cert.pem";
|
||||
const CGW_DEFAULT_WSS_KEY: &str = "key.pem";
|
||||
const CGW_DEFAULT_GRPC_LISTENING_IP: Ipv4Addr = Ipv4Addr::new(0, 0, 0, 0);
|
||||
const CGW_DEFAULT_GRPC_LISTENING_PORT: u16 = 50051;
|
||||
const CGW_DEFAULT_GRPC_PUBLIC_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_GRPC_PUBLIC_PORT: u16 = 50051;
|
||||
const CGW_DEFAULT_KAFKA_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_KAFKA_PORT: u16 = 9092;
|
||||
const CGW_DEFAULT_KAFKA_CONSUME_TOPIC: &str = "CnC";
|
||||
const CGW_DEFAULT_KAFKA_PRODUCE_TOPIC: &str = "CnC_Res";
|
||||
const CGW_DEFAULT_DB_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_DB_PORT: u16 = 6379;
|
||||
const CGW_DEFAULT_DB_NAME: &str = "cgw";
|
||||
const CGW_DEFAULT_DB_USERNAME: &str = "cgw";
|
||||
const CGW_DEFAULT_DB_PASSWORD: &str = "123";
|
||||
const CGW_DEFAULT_REDIS_HOST: &str = "localhost";
|
||||
const CGW_DEFAULT_REDIS_PORT: u16 = 6379;
|
||||
const CGW_DEFAULT_ALLOW_CERT_MISMATCH: &str = "no";
|
||||
const CGW_DEFAULT_METRICS_PORT: u16 = 8080;
|
||||
const CGW_DEFAULT_TOPOMAP_STATE: bool = false;
|
||||
|
||||
/// CGW server
|
||||
pub struct AppArgs {
|
||||
/// Loglevel of application
|
||||
log_level: AppCoreLogLevel,
|
||||
|
||||
/// CGW unique identifier (u64)
|
||||
cgw_id: i32,
|
||||
|
||||
/// Number of thread in a threadpool dedicated for handling secure websocket connections
|
||||
wss_t_num: usize,
|
||||
/// IP to listen for incoming WSS connection
|
||||
wss_ip: Ipv4Addr,
|
||||
/// PORT to listen for incoming WSS connection
|
||||
wss_port: u16,
|
||||
/// WSS CAS certificate (contains root and issuer certificates)
|
||||
wss_cas: String,
|
||||
/// WSS certificate
|
||||
wss_cert: String,
|
||||
/// WSS private key
|
||||
wss_key: String,
|
||||
|
||||
/// IP to listen for incoming GRPC connection
|
||||
grpc_listening_ip: Ipv4Addr,
|
||||
/// PORT to listen for incoming GRPC connection
|
||||
grpc_listening_port: u16,
|
||||
/// IP or hostname for Redis Record
|
||||
grpc_public_host: String,
|
||||
/// PORT for Redis record
|
||||
grpc_public_port: u16,
|
||||
|
||||
/// IP or hostname to connect to KAFKA broker
|
||||
kafka_host: String,
|
||||
/// PORT to connect to KAFKA broker
|
||||
kafka_port: u16,
|
||||
/// KAFKA topic from where to consume messages
|
||||
#[allow(unused)]
|
||||
kafka_consume_topic: String,
|
||||
/// KAFKA topic where to produce messages
|
||||
#[allow(unused)]
|
||||
kafka_produce_topic: String,
|
||||
|
||||
/// IP or hostname to connect to DB (PSQL)
|
||||
db_host: String,
|
||||
/// PORT to connect to DB (PSQL)
|
||||
db_port: u16,
|
||||
/// DB name to connect to in DB (PSQL)
|
||||
db_name: String,
|
||||
/// DB user name use with connection to in DB (PSQL)
|
||||
db_username: String,
|
||||
/// DB user password use with connection to in DB (PSQL)
|
||||
db_password: String,
|
||||
|
||||
/// IP or hostname to connect to REDIS
|
||||
redis_host: String,
|
||||
/// PORT to connect to REDIS
|
||||
redis_port: u16,
|
||||
|
||||
/// Allow Missmatch
|
||||
allow_mismatch: bool,
|
||||
|
||||
/// PORT to connect to Metrics
|
||||
metrics_port: u16,
|
||||
|
||||
/// Topomap featue status (enabled/disabled)
|
||||
feature_topomap_enabled: bool,
|
||||
}
|
||||
|
||||
impl AppArgs {
|
||||
fn parse() -> Result<Self> {
|
||||
let log_level: AppCoreLogLevel = match env::var("CGW_LOG_LEVEL") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_LOG_LEVEL! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_LOG_LEVEL,
|
||||
};
|
||||
|
||||
let cgw_id: i32 = match env::var("CGW_ID") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_ID! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_ID,
|
||||
};
|
||||
|
||||
let wss_t_num: usize = match env::var("DEFAULT_WSS_THREAD_NUM") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse DEFAULT_WSS_THREAD_NUM! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_WSS_T_NUM,
|
||||
};
|
||||
|
||||
let wss_ip: Ipv4Addr = match env::var("CGW_WSS_IP") {
|
||||
Ok(val) => match Ipv4Addr::from_str(val.as_str()) {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_WSS_IP! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_WSS_IP,
|
||||
};
|
||||
|
||||
let wss_port: u16 = match env::var("CGW_WSS_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_WSS_PORT! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_WSS_PORT,
|
||||
};
|
||||
|
||||
let wss_cas: String = env::var("CGW_WSS_CAS").unwrap_or(CGW_DEFAULT_WSS_CAS.to_string());
|
||||
let wss_cert: String = env::var("CGW_WSS_CERT").unwrap_or(CGW_DEFAULT_WSS_CERT.to_string());
|
||||
let wss_key: String = env::var("CGW_WSS_KEY").unwrap_or(CGW_DEFAULT_WSS_KEY.to_string());
|
||||
|
||||
let grpc_listening_ip: Ipv4Addr = match env::var("CGW_GRPC_LISTENING_IP") {
|
||||
Ok(val) => match Ipv4Addr::from_str(val.as_str()) {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GRPC_LISTENING_IP! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GRPC_LISTENING_IP,
|
||||
};
|
||||
|
||||
let grpc_listening_port: u16 = match env::var("CGW_GRPC_LISTENING_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GRPC_LISTENING_PORT! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GRPC_LISTENING_PORT,
|
||||
};
|
||||
|
||||
let grpc_public_host: String = match env::var("CGW_GRPC_PUBLIC_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_GRPC_PUBLIC_HOST.to_string(),
|
||||
};
|
||||
|
||||
let grpc_public_port: u16 = match env::var("CGW_GRPC_PUBLIC_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_GRPC_PUBLIC_PORT! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_GRPC_PUBLIC_PORT,
|
||||
};
|
||||
|
||||
let kafka_host: String = match env::var("CGW_KAFKA_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_KAFKA_HOST.to_string(),
|
||||
};
|
||||
|
||||
let kafka_port: u16 = match env::var("CGW_KAFKA_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_KAFKA_PORT! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_KAFKA_PORT,
|
||||
};
|
||||
|
||||
let kafka_consume_topic: String = env::var("CGW_KAFKA_CONSUMER_TOPIC")
|
||||
.unwrap_or(CGW_DEFAULT_KAFKA_CONSUME_TOPIC.to_string());
|
||||
let kafka_produce_topic: String = env::var("CGW_KAFKA_PRODUCER_TOPIC")
|
||||
.unwrap_or(CGW_DEFAULT_KAFKA_PRODUCE_TOPIC.to_string());
|
||||
|
||||
let db_host: String = match env::var("CGW_DB_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_DB_HOST.to_string(),
|
||||
};
|
||||
|
||||
let db_port: u16 = match env::var("CGW_DB_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_DB_PORT! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_DB_PORT,
|
||||
};
|
||||
|
||||
let db_name: String = env::var("CGW_DB_NAME").unwrap_or(CGW_DEFAULT_DB_NAME.to_string());
|
||||
let db_username: String =
|
||||
env::var("CGW_DB_USERNAME").unwrap_or(CGW_DEFAULT_DB_USERNAME.to_string());
|
||||
let db_password: String =
|
||||
env::var("CGW_DB_PASSWORD").unwrap_or(CGW_DEFAULT_DB_PASSWORD.to_string());
|
||||
|
||||
let redis_host: String = match env::var("CGW_REDIS_HOST") {
|
||||
Ok(val) => {
|
||||
// 1. Try to parse variable into IpAddress
|
||||
match Ipv4Addr::from_str(val.as_str()) {
|
||||
// 2. If parsed - return IpAddress as String value
|
||||
Ok(ip) => ip.to_string(),
|
||||
// 3. If parse failed - probably hostname specified
|
||||
Err(_e) => val,
|
||||
}
|
||||
}
|
||||
// Env. variable is not setup - use default value
|
||||
Err(_) => CGW_DEFAULT_REDIS_HOST.to_string(),
|
||||
};
|
||||
|
||||
let redis_port: u16 = match env::var("CGW_REDIS_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_REDIS_PORT! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_REDIS_PORT,
|
||||
};
|
||||
|
||||
let mismatch: String = env::var("CGW_ALLOW_CERT_MISMATCH")
|
||||
.unwrap_or(CGW_DEFAULT_ALLOW_CERT_MISMATCH.to_string());
|
||||
let allow_mismatch = mismatch == "yes";
|
||||
|
||||
let metrics_port: u16 = match env::var("CGW_METRICS_PORT") {
|
||||
Ok(val) => match val.parse() {
|
||||
Ok(v) => v,
|
||||
Err(_e) => {
|
||||
return Err(Error::AppArgsParser(format!(
|
||||
"Failed to parse CGW_METRICS_PORT! Invalid value: {}",
|
||||
val
|
||||
)));
|
||||
}
|
||||
},
|
||||
Err(_) => CGW_DEFAULT_METRICS_PORT,
|
||||
};
|
||||
|
||||
let feature_topomap_enabled: bool = match env::var("CGW_FEATURE_TOPOMAP_ENABLE") {
|
||||
Ok(_) => true,
|
||||
Err(_) => CGW_DEFAULT_TOPOMAP_STATE,
|
||||
};
|
||||
|
||||
Ok(AppArgs {
|
||||
log_level,
|
||||
cgw_id,
|
||||
wss_t_num,
|
||||
wss_ip,
|
||||
wss_port,
|
||||
wss_cas,
|
||||
wss_cert,
|
||||
wss_key,
|
||||
grpc_listening_ip,
|
||||
grpc_listening_port,
|
||||
grpc_public_host,
|
||||
grpc_public_port,
|
||||
kafka_host,
|
||||
kafka_port,
|
||||
kafka_consume_topic,
|
||||
kafka_produce_topic,
|
||||
db_host,
|
||||
db_port,
|
||||
db_name,
|
||||
db_username,
|
||||
db_password,
|
||||
redis_host,
|
||||
redis_port,
|
||||
allow_mismatch,
|
||||
metrics_port,
|
||||
feature_topomap_enabled,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AppCore {
|
||||
cgw_server: Arc<CGWConnectionServer>,
|
||||
main_runtime_handle: Arc<Handle>,
|
||||
@@ -467,7 +113,7 @@ impl AppCore {
|
||||
let cgw_server = match CGWConnectionServer::new(&app_args).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
error!("Failed to create CGW server: {:?}", e);
|
||||
error!("Failed to create CGW server! Error: {e}");
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
@@ -496,7 +142,7 @@ impl AppCore {
|
||||
let main_runtime_handle: Arc<Handle> = self.main_runtime_handle.clone();
|
||||
let core_clone = self.clone();
|
||||
|
||||
let cgw_remote_server = CGWRemoteServer::new(&self.args);
|
||||
let cgw_remote_server = CGWRemoteServer::new(self.args.cgw_id, &self.args.grpc_args);
|
||||
let cgw_srv_clone = self.cgw_server.clone();
|
||||
let cgw_con_serv = self.cgw_server.clone();
|
||||
self.grpc_server_runtime_handle.spawn(async move {
|
||||
@@ -522,36 +168,91 @@ impl AppCore {
|
||||
}
|
||||
}
|
||||
|
||||
async fn cgw_set_tcp_keepalive_options(stream: TcpStream) -> Result<TcpStream> {
|
||||
// Convert Tokio's TcpStream to std::net::TcpStream
|
||||
let std_stream = match stream.into_std() {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
error!("Failed to convert Tokio TcpStream into Std TcpStream");
|
||||
return Err(Error::Tcp(format!(
|
||||
"Failed to convert Tokio TcpStream into Std TcpStream: {}",
|
||||
e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
// Get the raw file descriptor (socket)
|
||||
let raw_fd = std_stream.as_fd();
|
||||
|
||||
// Set the socket option to enable TCP keepalive
|
||||
if let Err(e) = setsockopt(&raw_fd, sockopt::KeepAlive, &true) {
|
||||
error!("Failed to enable TCP keepalive: {}", e);
|
||||
return Err(Error::Tcp("Failed to enable TCP keepalive".to_string()));
|
||||
}
|
||||
|
||||
// Set the TCP_KEEPIDLE option (keepalive time)
|
||||
if let Err(e) = setsockopt(&raw_fd, sockopt::TcpKeepIdle, &CGW_TCP_KEEPALIVE_TIMEOUT) {
|
||||
error!("Failed to set TCP_KEEPIDLE: {}", e);
|
||||
return Err(Error::Tcp("Failed to set TCP_KEEPIDLE".to_string()));
|
||||
}
|
||||
|
||||
// Set the TCP_KEEPINTVL option (keepalive interval)
|
||||
if let Err(e) = setsockopt(&raw_fd, sockopt::TcpKeepCount, &CGW_TCP_KEEPALIVE_COUNT) {
|
||||
error!("Failed to set TCP_KEEPINTVL: {}", e);
|
||||
return Err(Error::Tcp("Failed to set TCP_KEEPINTVL".to_string()));
|
||||
}
|
||||
|
||||
// Set the TCP_KEEPCNT option (keepalive probes count)
|
||||
if let Err(e) = setsockopt(
|
||||
&raw_fd,
|
||||
sockopt::TcpKeepInterval,
|
||||
&CGW_TCP_KEEPALIVE_INTERVAL,
|
||||
) {
|
||||
error!("Failed to set TCP_KEEPCNT: {}", e);
|
||||
return Err(Error::Tcp("Failed to set TCP_KEEPCNT".to_string()));
|
||||
}
|
||||
|
||||
// Convert the std::net::TcpStream back to Tokio's TcpStream
|
||||
let stream = match TcpStream::from_std(std_stream) {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => {
|
||||
error!("Failed to convert Std TcpStream into Tokio TcpStream");
|
||||
return Err(Error::Tcp(format!(
|
||||
"Failed to convert Std TcpStream into Tokio TcpStream: {}",
|
||||
e
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
||||
async fn server_loop(app_core: Arc<AppCore>) -> Result<()> {
|
||||
debug!("server_loop entry");
|
||||
|
||||
debug!(
|
||||
"Starting WSS server, listening at {}:{}",
|
||||
app_core.args.wss_ip, app_core.args.wss_port
|
||||
app_core.args.wss_args.wss_ip, app_core.args.wss_args.wss_port
|
||||
);
|
||||
// Bind the server's socket
|
||||
let sockaddress = SocketAddr::new(
|
||||
std::net::IpAddr::V4(app_core.args.wss_ip),
|
||||
app_core.args.wss_port,
|
||||
std::net::IpAddr::V4(app_core.args.wss_args.wss_ip),
|
||||
app_core.args.wss_args.wss_port,
|
||||
);
|
||||
let listener: Arc<TcpListener> = match TcpListener::bind(sockaddress).await {
|
||||
Ok(listener) => Arc::new(listener),
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to bind socket address: {}. Error: {}",
|
||||
sockaddress, e
|
||||
);
|
||||
error!("Failed to bind socket address {sockaddress}! Error: {e}");
|
||||
return Err(Error::ConnectionServer(format!(
|
||||
"Failed to bind socket address: {}. Error: {}",
|
||||
sockaddress, e
|
||||
"Failed to bind socket address {sockaddress}! Error: {e}"
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let tls_acceptor = match cgw_tls_create_acceptor(&app_core.args).await {
|
||||
let tls_acceptor = match cgw_tls_create_acceptor(&app_core.args.wss_args).await {
|
||||
Ok(acceptor) => acceptor,
|
||||
Err(e) => {
|
||||
error!("Failed to create TLS acceptor. Error: {}", e.to_string());
|
||||
error!("Failed to create TLS acceptor! Error: {e}");
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
@@ -559,7 +260,7 @@ async fn server_loop(app_core: Arc<AppCore>) -> Result<()> {
|
||||
// Spawn explicitly in main thread: created task accepts connection,
|
||||
// but handling is spawned inside another threadpool runtime
|
||||
let app_core_clone = app_core.clone();
|
||||
let _ = app_core
|
||||
let result = app_core
|
||||
.main_runtime_handle
|
||||
.spawn(async move {
|
||||
let mut conn_idx: i64 = 0;
|
||||
@@ -572,16 +273,27 @@ async fn server_loop(app_core: Arc<AppCore>) -> Result<()> {
|
||||
let (socket, remote_addr) = match listener.accept().await {
|
||||
Ok((sock, addr)) => (sock, addr),
|
||||
Err(e) => {
|
||||
error!("Failed to Accept conn {e}\n");
|
||||
error!("Failed to accept connection! Error: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
info!("ACK conn: {}", conn_idx);
|
||||
let socket = match cgw_set_tcp_keepalive_options(socket).await {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
error!(
|
||||
"Failed to set TCP keepalive options. Error: {}",
|
||||
e.to_string()
|
||||
);
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
info!("Accept (ACK) connection: {conn_idx}, remote address: {remote_addr}");
|
||||
|
||||
app_core_clone.conn_ack_runtime_handle.spawn(async move {
|
||||
cgw_server_clone
|
||||
.ack_connection(socket, tls_acceptor_clone, remote_addr, conn_idx)
|
||||
.ack_connection(socket, tls_acceptor_clone, remote_addr)
|
||||
.await;
|
||||
});
|
||||
|
||||
@@ -590,6 +302,13 @@ async fn server_loop(app_core: Arc<AppCore>) -> Result<()> {
|
||||
})
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(_) => info!("Apllication finished succesfully!"),
|
||||
Err(e) => {
|
||||
error!("Application failed! Error: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -629,7 +348,7 @@ async fn main() -> Result<()> {
|
||||
Ok(app_args) => app_args,
|
||||
Err(e) => {
|
||||
setup_logger(AppCoreLogLevel::Info);
|
||||
error!("Failed to parse app args: {}", e.to_string());
|
||||
error!("Failed to parse application args! Error: {e}");
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
@@ -637,6 +356,12 @@ async fn main() -> Result<()> {
|
||||
// Configure logger
|
||||
setup_logger(args.log_level);
|
||||
|
||||
// Initialize runtimes
|
||||
if let Err(e) = cgw_initialize_runtimes(args.wss_args.wss_t_num) {
|
||||
error!("Failed to initialize CGW runtimes! Error: {e}");
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
if args.feature_topomap_enabled {
|
||||
warn!("CGW_FEATURE_TOPOMAP_ENABLE is set, TOPO MAP feature (unstable) will be enabled (realtime events / state processing) - heavy performance drop with high number of devices connected could be observed");
|
||||
}
|
||||
@@ -653,13 +378,15 @@ async fn main() -> Result<()> {
|
||||
// Spawn a task to listen for SIGHUP, SIGINT, and SIGTERM signals
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = signal_handler(shutdown_notify_clone).await {
|
||||
error!("Failed to handle signal: {:?}", e);
|
||||
error!("Failed to handle signal (SIGHUP, SIGINT, or SIGTERM)! Error: {e}");
|
||||
}
|
||||
});
|
||||
|
||||
// Make sure metrics are available <before> any of the components
|
||||
// starts up;
|
||||
CGWMetrics::get_ref().start(args.metrics_port).await?;
|
||||
CGWMetrics::get_ref()
|
||||
.start(args.metrics_args.metrics_port)
|
||||
.await?;
|
||||
let app = Arc::new(AppCore::new(args).await?);
|
||||
|
||||
app.run(shutdown_notify).await;
|
||||
|
||||
253
tests/conftest.py
Normal file
253
tests/conftest.py
Normal file
@@ -0,0 +1,253 @@
|
||||
import pytest
|
||||
import ssl
|
||||
import json
|
||||
import time
|
||||
from client_simulator.src.simulation_runner import Device as DeviceSimulator
|
||||
from kafka_producer.src.producer import Producer as KafkaProducer
|
||||
from kafka_producer.src.consumer import Consumer as KafkaConsumer
|
||||
from kafka_producer.src.admin import Admin as KafkaAdmin
|
||||
from psql_client.psql_client import PostgreSQLClient as PSQLClient
|
||||
from redis_client.redis_client import RedisClient as RedisClient
|
||||
import requests
|
||||
from typing import List, Tuple
|
||||
import random
|
||||
|
||||
|
||||
# Device connection, kafka wrappers etc
|
||||
class TestContext:
|
||||
@staticmethod
|
||||
def default_dev_sim_mac() -> str:
|
||||
return "02-00-00-00-00-00"
|
||||
|
||||
@staticmethod
|
||||
def default_kafka_group() -> str:
|
||||
return '9999'
|
||||
|
||||
@staticmethod
|
||||
def default_shard_id() -> int:
|
||||
return 0
|
||||
|
||||
@staticmethod
|
||||
def default_producer_topic() -> str:
|
||||
return 'CnC'
|
||||
|
||||
def __init__(self):
|
||||
device = DeviceSimulator(
|
||||
mac=self.default_dev_sim_mac(),
|
||||
server='wss://localhost:15002',
|
||||
ca_cert='./ca-certs/ca.crt',
|
||||
msg_interval=10, msg_size=1024,
|
||||
client_cert='./certs/base.crt', client_key='./certs/base.key', check_cert=False,
|
||||
start_event=None, stop_event=None)
|
||||
|
||||
# Server cert CN? don't care, ignore
|
||||
device.ssl_context.check_hostname = False
|
||||
device.ssl_context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
# Tweak connect message to change initial FW version:
|
||||
# Any latter steps might want to change it to something else
|
||||
# (test capabilities change, for example);
|
||||
# However, we're making a fixture, hence all values must be the same
|
||||
# on the initial step.
|
||||
connect_msg = json.loads(device.messages.connect)
|
||||
connect_msg['params']['capabilities']['platform'] = "ap"
|
||||
connect_msg['params']['firmware'] = "Test_FW_A"
|
||||
connect_msg['params']['uuid'] = 1
|
||||
device.messages.connect = json.dumps(connect_msg)
|
||||
|
||||
self.device_sim = device
|
||||
|
||||
producer = KafkaProducer(db='localhost:9092', topic='CnC')
|
||||
consumer = KafkaConsumer(db='localhost:9092', topic='CnC_Res', consumer_timeout=12000)
|
||||
admin = KafkaAdmin(host='localhost', port=9092)
|
||||
|
||||
self.kafka_producer = producer
|
||||
self.kafka_consumer = consumer
|
||||
self.kafka_admin = admin
|
||||
|
||||
psql_client = PSQLClient(host="localhost", port=5432, database="cgw", user="cgw", password="123")
|
||||
self.psql_client = psql_client
|
||||
|
||||
redis_client = RedisClient(host="localhost", port=6379)
|
||||
self.redis_client = redis_client
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def test_context():
|
||||
ctx = TestContext()
|
||||
|
||||
yield ctx
|
||||
|
||||
ctx.device_sim.disconnect()
|
||||
|
||||
# Let's make sure we destroy all groups after we're done with tests.
|
||||
if ctx.kafka_producer.is_connected():
|
||||
# 1. Destroy default group
|
||||
ctx.kafka_producer.handle_single_group_delete(ctx.default_kafka_group())
|
||||
# 2. Get all groups from Redis or PostgeSQL
|
||||
groups_list = ctx.psql_client.get_all_infrastructure_groups()
|
||||
|
||||
if groups_list != None:
|
||||
# 3. Interate over groups
|
||||
for group in groups_list:
|
||||
# 4. Send group_del request
|
||||
ctx.kafka_producer.handle_single_group_delete(str(group[0]))
|
||||
|
||||
# We have to clear any messages after done working with kafka
|
||||
if ctx.kafka_consumer.is_connected():
|
||||
ctx.kafka_consumer.flush()
|
||||
|
||||
ctx.kafka_producer.disconnect()
|
||||
ctx.kafka_consumer.disconnect()
|
||||
ctx.kafka_admin.disconnect()
|
||||
|
||||
ctx.psql_client.disconnect()
|
||||
ctx.redis_client.disconnect()
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def cgw_probe(test_context):
|
||||
try:
|
||||
r = requests.get("http://localhost:8080/health")
|
||||
print("CGW status: " + str(r.status_code) + ', txt:' + r.text)
|
||||
assert r is not None and r.status_code == 200, \
|
||||
f"CGW is in a bad state (health != 200), can't proceed"
|
||||
except:
|
||||
raise Exception('CGW health fetch failed (Not running?)')
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def kafka_probe(test_context):
|
||||
try:
|
||||
test_context.kafka_producer.connect()
|
||||
test_context.kafka_consumer.connect()
|
||||
except:
|
||||
raise Exception('Failed to connect to kafka broker! Either CnC, CnC_Res topics are unavailable, or broker is down (not running)')
|
||||
|
||||
# Let's make sure default group is always deleted.
|
||||
test_context.kafka_producer.handle_single_group_delete(test_context.default_kafka_group())
|
||||
|
||||
# We have to clear any messages before we can work with kafka
|
||||
test_context.kafka_consumer.flush()
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def kafka_admin_probe(test_context):
|
||||
try:
|
||||
test_context.kafka_admin.connect()
|
||||
except:
|
||||
raise Exception('Failed to connect to Kafka broker!')
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def device_sim_connect(test_context):
|
||||
# Make sure we initiate connect;
|
||||
# If this thing throws - any tests that depend on this ficture would fail.
|
||||
test_context.device_sim.connect()
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def psql_probe(test_context):
|
||||
try:
|
||||
test_context.psql_client.connect()
|
||||
except:
|
||||
raise Exception('Failed to connect to PSQL DB!')
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def redis_probe(test_context):
|
||||
try:
|
||||
test_context.redis_client.connect()
|
||||
except:
|
||||
raise Exception('Failed to connect to Redis DB!')
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def device_sim_reconnect(test_context):
|
||||
assert test_context.device_sim._socket is not None, \
|
||||
f"Expected websocket connection to execute a reconnect while socket is not connected!"
|
||||
|
||||
time.sleep(1)
|
||||
test_context.device_sim.disconnect()
|
||||
assert test_context.device_sim._socket is None, \
|
||||
f"Expected websocket connection to be NULL after disconnect."
|
||||
time.sleep(1)
|
||||
|
||||
test_context.device_sim.connect()
|
||||
assert test_context.device_sim._socket is not None, \
|
||||
f"Expected websocket connection NOT to be NULL after reconnect."
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def device_sim_send_ucentral_connect(test_context):
|
||||
assert test_context.device_sim._socket is not None, \
|
||||
f"Expected websocket connection to send a connect ucentral event while socket is not connected!"
|
||||
|
||||
test_context.device_sim.send_hello(test_context.device_sim._socket)
|
||||
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def kafka_default_infra_group(test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Cannot create default group: kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Cannot create default group: kafka consumer is not connected to Kafka'
|
||||
|
||||
uuid_val = random.randint(1, 100)
|
||||
default_group = test_context.default_kafka_group()
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
test_context.kafka_producer.handle_single_group_create(default_group, uuid_val, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Default infra group creation failed!')
|
||||
|
||||
group_info = test_context.psql_client.get_infrastructure_group(int(default_group))
|
||||
if not group_info:
|
||||
print(f'Failed to get group {default_group} from PSQL!')
|
||||
raise Exception('Default infra group creation failed!')
|
||||
|
||||
assert group_info[0] == int(default_group)
|
||||
|
||||
group_info = test_context.redis_client.get_infrastructure_group(int(default_group))
|
||||
if not group_info:
|
||||
print(f'Failed to get group {default_group} from Redis!')
|
||||
raise Exception('Default infra group creation failed!')
|
||||
|
||||
assert group_info.get('gid') == default_group
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def kafka_default_infra(test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Cannot create default infra: kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Cannot create default infra: kafka consumer is not connected to Kafka'
|
||||
|
||||
uuid_val = random.randint(1, 100)
|
||||
default_group = test_context.default_kafka_group()
|
||||
default_infra_mac = test_context.default_dev_sim_mac()
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
test_context.kafka_producer.handle_single_device_assign(default_group, default_infra_mac, uuid_val)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val)
|
||||
if ret_msg is None:
|
||||
print('Failed to receive infra assign result, was expecting ' + str(uuid_val) + ' uuid reply')
|
||||
raise Exception('Failed to receive infra assign result when expected')
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Default infra assign failed!')
|
||||
|
||||
infra_info = test_context.psql_client.get_infra(default_infra_mac)
|
||||
if not infra_info:
|
||||
print(f'Failed to get infra {default_infra_mac} from PSQL!')
|
||||
raise Exception('Default infra assign failed!')
|
||||
|
||||
db_mac = infra_info[0]
|
||||
db_mac = db_mac.replace(":", "-", 5)
|
||||
assert db_mac == default_infra_mac
|
||||
|
||||
infra_info = test_context.redis_client.get_infra(default_shard_id, default_infra_mac)
|
||||
if not infra_info:
|
||||
print(f'Failed to get infra {default_infra_mac} from Redis!')
|
||||
raise Exception('Default infra assign failed!')
|
||||
|
||||
assert infra_info.get('group_id') == int(default_group)
|
||||
117
tests/metrics.py
Normal file
117
tests/metrics.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import re
|
||||
import requests
|
||||
|
||||
|
||||
def cgw_metric_get(host: str = "localhost", port: int = 8080) -> str:
|
||||
metrics = ""
|
||||
|
||||
try:
|
||||
# Try to fetch metrics with 5 seconds timeout value
|
||||
r = requests.get(f"http://{host}:{port}/metrics", timeout=5)
|
||||
print("CGW metrics ret code: " + str(r.status_code))
|
||||
assert r is not None and r.status_code == 200, \
|
||||
f"CGW metrics is not available"
|
||||
metrics = r.text
|
||||
except Exception as e:
|
||||
print("CGW metrics: raised exception when tried to fetch metrics:" + e)
|
||||
raise Exception('CGW metrics fetch failed (Not running?)')
|
||||
|
||||
return metrics
|
||||
|
||||
|
||||
def cgw_metrics_get_active_shards_num() -> int:
|
||||
active_shards_num = 0
|
||||
metrics = cgw_metric_get()
|
||||
|
||||
match = re.search(r"cgw_active_shards_num (\d+)", metrics)
|
||||
if match:
|
||||
active_shards_num = int(match.group(1))
|
||||
print(f"Active shards num: {active_shards_num}")
|
||||
else:
|
||||
print("Active shards num not found.")
|
||||
|
||||
return active_shards_num
|
||||
|
||||
|
||||
def cgw_metrics_get_connections_num() -> int:
|
||||
wss_connections_num = 0
|
||||
metrics = cgw_metric_get()
|
||||
|
||||
match = re.search(r"cgw_connections_num (\d+)", metrics)
|
||||
if match:
|
||||
wss_connections_num = int(match.group(1))
|
||||
print(f"WSS conections num: {wss_connections_num}")
|
||||
else:
|
||||
print("WSS conections num not found.")
|
||||
|
||||
return wss_connections_num
|
||||
|
||||
|
||||
def cgw_metrics_get_groups_assigned_num() -> int:
|
||||
groups_assigned_num = 0
|
||||
metrics = cgw_metric_get()
|
||||
|
||||
match = re.search(r"cgw_groups_assigned_num (\d+)", metrics)
|
||||
if match:
|
||||
groups_assigned_num = int(match.group(1))
|
||||
print(f"Groups assigned num: {groups_assigned_num}")
|
||||
else:
|
||||
print("Groups assigned num not found.")
|
||||
|
||||
return groups_assigned_num
|
||||
|
||||
|
||||
def cgw_metrics_get_group_infras_assigned_num(group_id: int) -> int:
|
||||
group_infras_assigned_num = 0
|
||||
metrics = cgw_metric_get()
|
||||
|
||||
match = re.search(rf"cgw_group_{group_id}_infras_assigned_num (\d+)", metrics)
|
||||
if match:
|
||||
group_infras_assigned_num = int(match.group(1))
|
||||
print(f"Group {group_id} infras assigned num: {group_infras_assigned_num}")
|
||||
else:
|
||||
print(f"Group {group_id} infras assigned num not found.")
|
||||
|
||||
return group_infras_assigned_num
|
||||
|
||||
|
||||
def cgw_metrics_get_groups_capacity() -> int:
|
||||
groups_capacity = 0
|
||||
metrics = cgw_metric_get()
|
||||
|
||||
match = re.search(r"cgw_groups_capacity (\d+)", metrics)
|
||||
if match:
|
||||
groups_capacity = int(match.group(1))
|
||||
print(f"Groups capacity: {groups_capacity}")
|
||||
else:
|
||||
print("Groups capacity.")
|
||||
|
||||
return groups_capacity
|
||||
|
||||
|
||||
def cgw_metrics_get_groups_threshold() -> int:
|
||||
groups_threshold = 0
|
||||
metrics = cgw_metric_get()
|
||||
|
||||
match = re.search(r"cgw_groups_threshold (\d+)", metrics)
|
||||
if match:
|
||||
groups_threshold = int(match.group(1))
|
||||
print(f"Groups assigned num: {groups_threshold}")
|
||||
else:
|
||||
print("Groups assigned num not found.")
|
||||
|
||||
return groups_threshold
|
||||
|
||||
|
||||
def cgw_metrics_get_group_ifras_capacity() -> int:
|
||||
group_infras_capacity = 0
|
||||
metrics = cgw_metric_get()
|
||||
|
||||
match = re.search(r"cgw_group_ifras_capacity (\d+)", metrics)
|
||||
if match:
|
||||
group_infras_capacity = int(match.group(1))
|
||||
print(f"Groups capacity: {group_infras_capacity}")
|
||||
else:
|
||||
print("Groups capacity.")
|
||||
|
||||
return group_infras_capacity
|
||||
7
tests/requirements.txt
Normal file
7
tests/requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
kafka-python==2.0.2
|
||||
websockets==13.1
|
||||
pytest==8.3.3
|
||||
randmac==0.1
|
||||
psycopg2-binary==2.9.10
|
||||
redis==5.2.0
|
||||
requests==2.32.3
|
||||
28
tests/run.sh
Executable file
28
tests/run.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Separate exports for clearer visibility of _what exactly_
|
||||
# we're putting in python path
|
||||
|
||||
rm -rf /tmp/cgw_tests_runner;
|
||||
mkdir /tmp/cgw_tests_runner && \
|
||||
cp -rf ../tests /tmp/cgw_tests_runner/ && \
|
||||
cp -rf ../utils /tmp/cgw_tests_runner/;
|
||||
|
||||
cd /tmp/cgw_tests_runner/tests
|
||||
|
||||
export PYTHONPATH="$PYTHONPATH:$PWD"
|
||||
export PYTHONPATH="$PYTHONPATH:$PWD/../utils"
|
||||
|
||||
ln -sf ../utils/client_simulator/sim_data sim_data
|
||||
ln -sf ../utils/kafka_producer/kafka_data kafka_data
|
||||
ln -sf ../utils/cert_generator/certs/client/ certs
|
||||
ln -sf ../utils/cert_generator/certs/ca/ ca-certs
|
||||
ln -sf ../utils/client_simulator/ client_simulator
|
||||
ln -sf ../utils/kafka_producer/ kafka_producer
|
||||
ln -sf ../utils/psql_client/ psql_client
|
||||
ln -sf ../utils/redis_client/ redis_client
|
||||
|
||||
pip install -r requirements.txt
|
||||
|
||||
pytest -v
|
||||
#pytest -v -s test_cgw_infras.py
|
||||
149
tests/test_cgw_basic.py
Normal file
149
tests/test_cgw_basic.py
Normal file
@@ -0,0 +1,149 @@
|
||||
import pytest
|
||||
import json
|
||||
import random
|
||||
|
||||
from metrics import cgw_metrics_get_connections_num, \
|
||||
cgw_metrics_get_groups_assigned_num, \
|
||||
cgw_metrics_get_group_infras_assigned_num
|
||||
|
||||
class TestCgwBasic:
|
||||
# Base test:
|
||||
# - test_context can be created - 'tests core' alloc / create
|
||||
# - tests can connect to kafka broker
|
||||
# - CGW is up
|
||||
# - PostgreSQL is up
|
||||
# - Redis is up
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"psql_probe",
|
||||
"redis_probe")
|
||||
def test_basic_probe(self, test_context):
|
||||
pass
|
||||
|
||||
# Base test:
|
||||
# - tests kafka client can create / receive messages through kafka bus
|
||||
# - test infra group can be successfully created
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"psql_probe",
|
||||
"redis_probe",
|
||||
"kafka_default_infra_group")
|
||||
def test_kafka_sanity(self, test_context):
|
||||
pass
|
||||
|
||||
# Base test:
|
||||
# - test infra can be addded successfully to the default infra group
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"psql_probe",
|
||||
"redis_probe",
|
||||
"kafka_default_infra_group",
|
||||
"kafka_default_infra")
|
||||
def test_kafka_basic(self, test_context):
|
||||
pass
|
||||
|
||||
# Base test:
|
||||
# - certificates can be found / Used
|
||||
# - device sim can connect to CGW
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"device_sim_connect")
|
||||
def test_device_sim_sanity(self, test_context):
|
||||
pass
|
||||
|
||||
# Base test:
|
||||
# - device sim can send connect message to cgw
|
||||
# - kafka client can verify (pull msgs from kafka bus)
|
||||
# that simulator's indeed connected
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"device_sim_connect",
|
||||
"device_sim_send_ucentral_connect")
|
||||
def test_device_sim_base(self, test_context):
|
||||
pass
|
||||
|
||||
# Base test:
|
||||
# - unassigned infra connects to CGW, and kafka sim can validate it
|
||||
# through the <infra_join> msg
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"device_sim_connect",
|
||||
"device_sim_send_ucentral_connect")
|
||||
def test_unassigned_infra_base(self, test_context):
|
||||
join_message_received = False
|
||||
infra_is_unassigned = False
|
||||
messages = test_context.kafka_consumer.get_msgs()
|
||||
msg_mac = test_context.default_dev_sim_mac()
|
||||
|
||||
assert messages,\
|
||||
f"Failed to receive any messages (events) from sim-device, while expected connect / infra_join"
|
||||
|
||||
if not messages:
|
||||
raise Exception('Failed to receive infra assign result when expected')
|
||||
|
||||
# Expecting TWO messages to be present in the message list
|
||||
for message in messages:
|
||||
if message.value['type'] == 'infra_join' and message.key == b'0' and message.value['infra_group_infra'] == msg_mac:
|
||||
join_message_received = True
|
||||
continue
|
||||
|
||||
if message.value['type'] == 'unassigned_infra_connection' and message.key == b'0' and message.value['infra_group_infra'] == msg_mac:
|
||||
infra_is_unassigned = True
|
||||
continue
|
||||
|
||||
assert cgw_metrics_get_connections_num() == 1
|
||||
|
||||
assert join_message_received,\
|
||||
f"Failed to find 'infra_join' message for default infra MAC"
|
||||
|
||||
assert infra_is_unassigned,\
|
||||
f"Failed to find unassigned 'unassigned_infra_connection' message for default infra MAC"
|
||||
|
||||
|
||||
# Base test:
|
||||
# - assigned infra connects to CGW, and kafka sim can validate it
|
||||
# through the <infra_join> msg + kafka key
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"psql_probe",
|
||||
"redis_probe",
|
||||
"kafka_default_infra_group",
|
||||
"kafka_default_infra",
|
||||
"device_sim_connect",
|
||||
"device_sim_send_ucentral_connect")
|
||||
def test_assigned_infra_base(self, test_context):
|
||||
join_message_received = False
|
||||
infra_is_assigned = False
|
||||
messages = test_context.kafka_consumer.get_msgs()
|
||||
msg_mac = test_context.default_dev_sim_mac()
|
||||
default_group = test_context.default_kafka_group().encode('utf-8')
|
||||
|
||||
assert messages,\
|
||||
f"Failed to receive any messages (events) from sim-device, while expected connect / infra_join"
|
||||
|
||||
if not messages:
|
||||
raise Exception('Failed to receive infra assign result when expected')
|
||||
|
||||
# We can deduce whether infra's assigned by inspecting a single msg
|
||||
for message in messages:
|
||||
if message.value['type'] == 'infra_join' and message.value['infra_group_infra'] == msg_mac:
|
||||
join_message_received = True
|
||||
if message.key == default_group and str(message.value['infra_group_id']).encode('utf-8') == default_group:
|
||||
infra_is_assigned = True
|
||||
break
|
||||
|
||||
assert cgw_metrics_get_groups_assigned_num() == 1
|
||||
assert cgw_metrics_get_connections_num() == 1
|
||||
assert cgw_metrics_get_group_infras_assigned_num(int(default_group)) == 1
|
||||
|
||||
assert join_message_received,\
|
||||
f"Failed to find 'infra_join' message for default infra MAC"
|
||||
|
||||
assert infra_is_assigned,\
|
||||
f"While detected join message for default infra MAC, expected it to be assigned to group (key != default group id)"
|
||||
803
tests/test_cgw_infra_groups.py
Normal file
803
tests/test_cgw_infra_groups.py
Normal file
@@ -0,0 +1,803 @@
|
||||
import pytest
|
||||
import uuid
|
||||
import random
|
||||
|
||||
from metrics import cgw_metrics_get_groups_assigned_num, \
|
||||
cgw_metrics_get_groups_capacity, \
|
||||
cgw_metrics_get_groups_threshold
|
||||
|
||||
class TestCgwInfraGroup:
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_single_infra_group_add_del(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_multiple_infra_group_add_del(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
groups_num = random.randint(1, 10)
|
||||
|
||||
for group in range(0, groups_num):
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = (100 + group)
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == (group + 1)
|
||||
|
||||
# Make sure assigned groups number from CGW side is expected
|
||||
assert cgw_metrics_get_groups_assigned_num() == groups_num
|
||||
|
||||
for group in range(0, groups_num):
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = (100 + group)
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == (groups_num - (group + 1))
|
||||
|
||||
# Make sure after clean-up assigned group num is zero
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_create_existing_infra_group(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
# Try to create the same group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
# Expected request to be failed
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infra group create completed, while expected to be failed!')
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_remove_not_existing_infra_group(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
group_id = 100
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infra group delete completed, while expected to be failed!')
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_single_infra_group_add_del_to_shard(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_multiple_infra_group_add_del_to_shard(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
groups_num = random.randint(1, 10)
|
||||
|
||||
for group in range(0, groups_num):
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = (100 + group)
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == (group + 1)
|
||||
|
||||
# Make sure assigned groups number from CGW side is expected
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == groups_num
|
||||
|
||||
for group in range(0, groups_num):
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = (100 + group)
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == (groups_num - (group + 1))
|
||||
|
||||
# Make sure after clean-up assigned group num is zero
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_single_infra_group_add_to_not_existing_shard(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
shard_id = 2
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create completed, while expected to be failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_infra_group_capacity_overflow(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
groups_capacity = cgw_metrics_get_groups_capacity()
|
||||
groups_threshold = cgw_metrics_get_groups_threshold()
|
||||
|
||||
groups_num = (groups_capacity + groups_threshold)
|
||||
# Create maximum allowed groups number
|
||||
for group in range(0, groups_num):
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = (100 + group)
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == (group + 1)
|
||||
|
||||
# Make sure we reach MAX groups number assigned to CGW
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == groups_num
|
||||
|
||||
# Try to create additional group to simulate group capacity overflow
|
||||
group_to_fail_id = 2024
|
||||
uuid_val = uuid.uuid4()
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_to_fail_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_to_fail_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create completed, while expected to be failed due to capacity overflow!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_to_fail_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_to_fail_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Get shard info
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Double check groups number assigned to CGW
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == groups_num
|
||||
|
||||
# Cleanup all the rest groups
|
||||
for group in range(0, groups_num):
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = (100 + group)
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == (groups_num - (group + 1))
|
||||
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
1244
tests/test_cgw_infras.py
Normal file
1244
tests/test_cgw_infras.py
Normal file
File diff suppressed because it is too large
Load Diff
95
tests/test_cgw_infras_msg_queue.py
Normal file
95
tests/test_cgw_infras_msg_queue.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import pytest
|
||||
import uuid
|
||||
import random
|
||||
from randmac import RandMac
|
||||
import time
|
||||
|
||||
from metrics import cgw_metrics_get_active_shards_num, \
|
||||
cgw_metrics_get_groups_assigned_num, \
|
||||
cgw_metrics_get_group_infras_assigned_num, \
|
||||
cgw_metrics_get_group_ifras_capacity, \
|
||||
cgw_metrics_get_connections_num
|
||||
|
||||
|
||||
class TestCgwInfrasMsgQueue:
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe",
|
||||
"kafka_default_infra_group",
|
||||
"kafka_default_infra",
|
||||
"device_sim_connect",
|
||||
"device_sim_send_ucentral_connect")
|
||||
def test_infra_msg_reboot(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Cannot create default group: kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Cannot create default group: kafka consumer is not connected to Kafka'
|
||||
|
||||
# Simulate at least 1 sec sleep before checking metrics
|
||||
# Without it, tests sometimes can fail
|
||||
# NOTE: more complex tests might avoid waiting
|
||||
# by making sure to wait / recv the infra_join msg.
|
||||
time.sleep(1)
|
||||
|
||||
assert cgw_metrics_get_active_shards_num() >= 1
|
||||
assert cgw_metrics_get_connections_num() == 1
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(0)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard 0 info from Redis!')
|
||||
raise Exception('Failed to get shard 0 info from Redis!')
|
||||
|
||||
assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
request_id = random.randint(1, 100)
|
||||
default_group = test_context.default_kafka_group()
|
||||
default_infra_mac = test_context.default_dev_sim_mac()
|
||||
|
||||
msg = test_context.kafka_producer.device_message_reboot(default_infra_mac, id=request_id)
|
||||
test_context.kafka_producer.handle_single_device_message(msg, default_group, default_infra_mac, uuid_val.int)
|
||||
wss_recv_msg = test_context.device_sim.get_single_message(test_context.device_sim._socket)
|
||||
kafka_ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
assert wss_recv_msg is not None,\
|
||||
f'Failed to receive any message while expected to'
|
||||
|
||||
assert wss_recv_msg["method"] == msg["method"] == "reboot"
|
||||
assert wss_recv_msg["id"] == msg["id"]
|
||||
assert wss_recv_msg["params"]["serial"] == msg["params"]["serial"]
|
||||
|
||||
reboot_response = test_context.device_sim.messages.from_json(test_context.device_sim.messages.reboot_response)
|
||||
reboot_response["result"]["id"] = wss_recv_msg["id"]
|
||||
|
||||
test_context.device_sim._socket.send(test_context.device_sim.messages.to_json(reboot_response))
|
||||
kafka_result_msg = test_context.kafka_consumer.get_infra_request_result_msg(uuid_val.int)
|
||||
|
||||
assert kafka_result_msg is not None,\
|
||||
f'Expected to received enqueue request result, found none!'
|
||||
|
||||
assert kafka_result_msg.value['success'],\
|
||||
f'Expected result message to have status successful (success=True)!'
|
||||
|
||||
assert kafka_result_msg.value["id"] == request_id
|
||||
|
||||
test_context.device_sim.disconnect()
|
||||
|
||||
# Simulate at least 1 sec sleep before checking metrics
|
||||
time.sleep(1)
|
||||
assert cgw_metrics_get_connections_num() == 0
|
||||
assert test_context.device_sim._socket is None, \
|
||||
f"Expected websocket connection to be NULL after disconnect."
|
||||
|
||||
# Chill for a while before reconnecting
|
||||
time.sleep(2)
|
||||
|
||||
test_context.device_sim.connect()
|
||||
test_context.device_sim.send_hello(test_context.device_sim._socket)
|
||||
|
||||
# Simulate at least 1 sec sleep before checking metrics
|
||||
time.sleep(1)
|
||||
assert cgw_metrics_get_connections_num() == 1
|
||||
assert test_context.device_sim._socket is not None, \
|
||||
f"Expected websocket connection NOT to be NULL after reconnect."
|
||||
200
tests/test_cgw_malformed_packets.py
Normal file
200
tests/test_cgw_malformed_packets.py
Normal file
@@ -0,0 +1,200 @@
|
||||
import pytest
|
||||
import uuid
|
||||
|
||||
from kafka_producer.src.utils import MalformedMessage
|
||||
|
||||
class TestCgwMalformedPackets:
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_malformed_infra_group_add(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'kafka consumer is not connected to Kafka'
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
expected_uuid = uuid.UUID('00000000-0000-0000-0000-000000000000')
|
||||
group_id = 100
|
||||
|
||||
message = MalformedMessage()
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_create(uuid_val.int), bytes(str(group_id), encoding="utf-8"))
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(expected_uuid.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infra_message_enqueue_response')
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infra group create passed while expected to be failed! Malformed packet was sent!')
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_malformed_infra_group_add_to_shard(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'kafka consumer is not connected to Kafka'
|
||||
|
||||
expected_uuid = uuid.UUID('00000000-0000-0000-0000-000000000000')
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
message = MalformedMessage()
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_create_to_shard(default_shard_id, uuid_val.int), bytes(str(group_id), encoding="utf-8"))
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(expected_uuid.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infra_message_enqueue_response')
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infra group create passed while expected to be failed! Malformed packet was sent!')
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_malformed_infra_group_del(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'kafka consumer is not connected to Kafka'
|
||||
|
||||
expected_uuid = uuid.UUID('00000000-0000-0000-0000-000000000000')
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
message = MalformedMessage()
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_delete(uuid_val.int), bytes(str(group_id), encoding="utf-8"))
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(expected_uuid.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infra_message_enqueue_response')
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infra group delete passed while expected to be failed! Malformed packet was sent!')
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_malformed_infras_add(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'kafka consumer is not connected to Kafka'
|
||||
|
||||
expected_uuid = uuid.UUID('00000000-0000-0000-0000-000000000000')
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
message = MalformedMessage()
|
||||
infra_mac = "11-22-33-44-55-66"
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.add_dev_to_group([infra_mac], uuid_val.int), bytes(str(group_id), encoding="utf-8"))
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(expected_uuid.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive infas add result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive infas add result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infra_message_enqueue_response')
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infras add passed while expected to be failed! Malformed packet was sent!')
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_malformed_infras_del(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'kafka consumer is not connected to Kafka'
|
||||
|
||||
expected_uuid = uuid.UUID('00000000-0000-0000-0000-000000000000')
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
message = MalformedMessage()
|
||||
infra_mac = "11-22-33-44-55-66"
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.remove_dev_from_group([infra_mac], uuid_val.int), bytes(str(group_id), encoding="utf-8"))
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(expected_uuid.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive infas del result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive infas del result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infra_message_enqueue_response')
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infras del passed while expected to be failed! Malformed packet was sent!')
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_malformed_infra_msg(self, test_context):
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'kafka consumer is not connected to Kafka'
|
||||
|
||||
expected_uuid = uuid.UUID('00000000-0000-0000-0000-000000000000')
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
message = MalformedMessage()
|
||||
infra_mac = "11-22-33-44-55-66"
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.to_device(infra_mac, uuid_val.int), bytes(str(group_id), encoding="utf-8"))
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(expected_uuid.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive infa message result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive infa message result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infra_message_enqueue_response')
|
||||
|
||||
if ret_msg.value['success'] is True:
|
||||
raise Exception('Infra message passed while expected to be failed! Malformed packet was sent!')
|
||||
601
tests/test_cgw_multi_instances.py
Normal file
601
tests/test_cgw_multi_instances.py
Normal file
@@ -0,0 +1,601 @@
|
||||
import pytest
|
||||
import uuid
|
||||
|
||||
from kafka_producer.src.admin import Message
|
||||
|
||||
from metrics import cgw_metrics_get_groups_assigned_num, \
|
||||
cgw_metrics_get_group_infras_assigned_num, \
|
||||
cgw_metrics_get_active_shards_num
|
||||
|
||||
|
||||
class TestCgwMultiInstances:
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"kafka_admin_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_relay_infra_add_del(self, test_context):
|
||||
"""
|
||||
This test case verify CGW message relaying mechanism
|
||||
1) Create group 100 on Shard ID 0
|
||||
2) Calculate Shard ID to Kafka partition map
|
||||
3) Send infra assign message with group id 100 to Shard id (N),
|
||||
where N - is number of running CGW instances - 1
|
||||
4) Infra assign message receive Shard ID that is not group 100 owner
|
||||
5) Expected message to be relayed to Shard ID 0
|
||||
6) Validate reported Shard ID from infra add response message
|
||||
7) Repeat steps 3-6 for infra deassign message
|
||||
"""
|
||||
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_admin.is_connected(),\
|
||||
f'Kafka admin is not connected to Kafka'
|
||||
|
||||
active_shards_num = cgw_metrics_get_active_shards_num()
|
||||
|
||||
# This test-case reqiure at least 2 CGW instances
|
||||
# To avoid test failure in single CGW env. - make force test passed
|
||||
if active_shards_num <= 1:
|
||||
pytest.skip(f"Number of CGW isnatnces not enough to proceed with test! Expected > 2, actually running - {active_shards_num}. Skip test.")
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.handle_single_group_create(str(group_id), uuid_val.int, default_shard_id)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
# Get highest CGW ID assigned partition
|
||||
partitions = test_context.kafka_admin.get_topic_partitions_for_cgw_id('CnC', ['CGW'], (active_shards_num - 1))
|
||||
assert len(partitions) > 0
|
||||
|
||||
# Infra add to Group
|
||||
# Send message to CGW that does not own group - to force CGW message relay mechanism
|
||||
infra_mac = "11-22-33-44-55-66"
|
||||
message = Message()
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), message.add_dev_to_group(str(group_id), [infra_mac], uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if ret_msg is None:
|
||||
print('Failed to receive infra assign result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive infra assign result when expected')
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra assign failed!')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infras_add_response')
|
||||
assert (int(ret_msg.value["infra_group_id"]) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
# We don't expect to have even a single 'failed_infra',
|
||||
# because the overall command succeded
|
||||
assert (len(list(ret_msg.value["failed_infras"])) == 0)
|
||||
assert (ret_msg.value["reporter_shard_id"] == default_shard_id)
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra assign failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra assign failed!')
|
||||
|
||||
# Validate infras assigned number
|
||||
assert int(group_info_redis.get('infras_assigned')) == cgw_metrics_get_group_infras_assigned_num(group_id) == 1
|
||||
|
||||
# Get infra info from Redis Infra Cache
|
||||
infra_info_redis = test_context.redis_client.get_infra(default_shard_id, infra_mac)
|
||||
if not infra_info_redis:
|
||||
print(f'Failed to get infra {infra_mac} info from Redis!')
|
||||
raise Exception('Infra assign failed!')
|
||||
|
||||
# Get infra info from PSQL
|
||||
infra_info_psql = test_context.psql_client.get_infra(infra_mac)
|
||||
if not infra_info_psql:
|
||||
print(f'Failed to get infra {infra_mac} info from PSQL!')
|
||||
raise Exception('Infra assign failed!')
|
||||
|
||||
# Validate infra assigned group id
|
||||
assert infra_info_psql[1] == int(infra_info_redis.get('group_id')) == group_id
|
||||
|
||||
# Infra del
|
||||
# Send message to CGW that does not own group - to force CGW message relay mechanism
|
||||
uuid_val = uuid.uuid4()
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.remove_dev_from_group(str(group_id), [infra_mac], uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if ret_msg is None:
|
||||
print('Failed to receive infra deassign result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive infra deassign result when expected')
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra deassign failed!')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_infras_del_response')
|
||||
assert (int(ret_msg.value["infra_group_id"]) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
# We don't expect to have even a single 'failed_infra',
|
||||
# because the overall command succeded
|
||||
assert (len(list(ret_msg.value["failed_infras"])) == 0)
|
||||
assert (ret_msg.value["reporter_shard_id"] == default_shard_id)
|
||||
|
||||
# Validate infra removed from Redis Infra Cache
|
||||
infra_info_redis = test_context.redis_client.get_infra(default_shard_id, infra_mac)
|
||||
assert infra_info_redis == None
|
||||
|
||||
# Validate infra removed from PSQL
|
||||
infra_info_psql = test_context.psql_client.get_infra(infra_mac)
|
||||
assert infra_info_psql == None
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra deassign failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra deassign failed!')
|
||||
|
||||
# Validate number of assigned infra number
|
||||
assert int(group_info_redis.get('infras_assigned')) == cgw_metrics_get_group_infras_assigned_num(group_id) == 0
|
||||
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.handle_single_group_delete(str(group_id), uuid_val.int)
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"kafka_admin_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_signle_group_to_shard_0_send_to_shard_1(self, test_context):
|
||||
"""
|
||||
This test case verify CGW interconnection and group create/delete mechanism
|
||||
1) Calculate Shard ID to Kafka partition map
|
||||
2) Prepare group create to Shard ID 0 (group_id = 100) message, but send it to Kafka partition for Shard ID 1
|
||||
3) Expected message to be processed by Shard ID 1
|
||||
4) Validate reported Shard ID from group create response message (Expected to be Shard ID 1)
|
||||
5) Repeat steps 2-5 for group delete message
|
||||
"""
|
||||
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_admin.is_connected(),\
|
||||
f'Kafka admin is not connected to Kafka'
|
||||
|
||||
active_shards_num = cgw_metrics_get_active_shards_num()
|
||||
|
||||
# This test-case reqiure at least 2 CGW instances
|
||||
# To avoid test failure in single CGW env. - make force test passed
|
||||
if active_shards_num <= 1:
|
||||
pytest.skip(f"Number of CGW isnatnces not enough to proceed with test! Expected > 2, actually running - {active_shards_num}. Skip test.")
|
||||
|
||||
default_shard_id = test_context.default_shard_id()
|
||||
expected_reporter_shard_id: int = 1
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
# Get highest CGW ID assigned partition
|
||||
partitions = test_context.kafka_admin.get_topic_partitions_for_cgw_id('CnC', ['CGW'], expected_reporter_shard_id)
|
||||
assert len(partitions) > 0
|
||||
|
||||
message = Message()
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_create_to_shard(str(group_id), default_shard_id, uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
assert (ret_msg.value["reporter_shard_id"] == expected_reporter_shard_id)
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
assert int(group_info_redis.get('shard_id')) == default_shard_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_delete(str(group_id), uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
assert (ret_msg.value["reporter_shard_id"] == expected_reporter_shard_id)
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(default_shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {default_shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"kafka_admin_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_signle_group_to_shard_1_send_to_shard_1(self, test_context):
|
||||
"""
|
||||
This test case verify CGW interconnection and group create/delete mechanism
|
||||
1) Calculate Shard ID to Kafka partition map
|
||||
2) Prepare group create to Shard ID 1 (group_id = 100) message, send it to Kafka partition for Shard ID 1
|
||||
3) Expected message to be processed by Shard ID 1
|
||||
4) Validate reported Shard ID from group create response message (Expected to be Shard ID 1)
|
||||
5) Repeat steps 2-5 for group delete message
|
||||
"""
|
||||
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_admin.is_connected(),\
|
||||
f'Kafka admin is not connected to Kafka'
|
||||
|
||||
active_shards_num = cgw_metrics_get_active_shards_num()
|
||||
|
||||
# This test-case reqiure at least 2 CGW instances
|
||||
# To avoid test failure in single CGW env. - make force test passed
|
||||
if active_shards_num <= 1:
|
||||
pytest.skip(f"Number of CGW isnatnces not enough to proceed with test! Expected > 2, actually running - {active_shards_num}. Skip test.")
|
||||
|
||||
shard_id = 1
|
||||
expected_reporter_shard_id: int = 1
|
||||
|
||||
# Get shard infro from Redis
|
||||
shard_info = test_context.redis_client.get_shard(shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {shard_id} info from Redis!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
# Get highest CGW ID assigned partition
|
||||
partitions = test_context.kafka_admin.get_topic_partitions_for_cgw_id('CnC', ['CGW'], expected_reporter_shard_id)
|
||||
assert len(partitions) > 0
|
||||
|
||||
message = Message()
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_create_to_shard(str(group_id), shard_id, uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
assert (ret_msg.value["reporter_shard_id"] == expected_reporter_shard_id)
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
assert int(group_info_redis.get('shard_id')) == shard_id
|
||||
|
||||
shard_info = test_context.redis_client.get_shard(shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {shard_id} info from Redis!!')
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 1
|
||||
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_delete(str(group_id), uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
assert (ret_msg.value["reporter_shard_id"] == expected_reporter_shard_id)
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Get shard info from Redis
|
||||
shard_info = test_context.redis_client.get_shard(shard_id)
|
||||
if not shard_info:
|
||||
print(f'Failed to get shard {shard_id} info from Redis!')
|
||||
raise Exception(f'Failed to get shard {shard_id} info from Redis!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("test_context",
|
||||
"cgw_probe",
|
||||
"kafka_probe",
|
||||
"kafka_admin_probe",
|
||||
"redis_probe",
|
||||
"psql_probe")
|
||||
def test_signle_group_to_shard_any_send_to_shard_1(self, test_context):
|
||||
"""
|
||||
This test case verify CGW interconnection and group create/delete mechanism
|
||||
1) Calculate Shard ID to Kafka partition map
|
||||
2) Prepare group create (group_id = 100) message (Shard ID -> not specified), send it to Kafka partition for Shard ID 1
|
||||
3) Expected message to be processed by Shard ID 1
|
||||
4) Validate reported Shard ID from group create response message (Expected to be Shard ID 1)
|
||||
5) Repeat steps 2-5 for group delete message
|
||||
"""
|
||||
|
||||
assert test_context.kafka_producer.is_connected(),\
|
||||
f'Kafka producer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_consumer.is_connected(),\
|
||||
f'Kafka consumer is not connected to Kafka'
|
||||
|
||||
assert test_context.kafka_admin.is_connected(),\
|
||||
f'Kafka admin is not connected to Kafka'
|
||||
|
||||
active_shards_num = cgw_metrics_get_active_shards_num()
|
||||
|
||||
# This test-case reqiure at least 2 CGW instances
|
||||
# To avoid test failure in single CGW env. - make force test passed
|
||||
if active_shards_num <= 1:
|
||||
pytest.skip(f"Number of CGW isnatnces not enough to proceed with test! Expected > 2, actually running - {active_shards_num}. Skip test.")
|
||||
|
||||
expected_reporter_shard_id: int = 1
|
||||
|
||||
uuid_val = uuid.uuid4()
|
||||
group_id = 100
|
||||
|
||||
# Get highest CGW ID assigned partition
|
||||
partitions = test_context.kafka_admin.get_topic_partitions_for_cgw_id('CnC', ['CGW'], expected_reporter_shard_id)
|
||||
assert len(partitions) > 0
|
||||
|
||||
message = Message()
|
||||
|
||||
# Create single group
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_create(str(group_id), uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive create group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive create group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_create_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
assert (ret_msg.value["reporter_shard_id"] == expected_reporter_shard_id)
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
if not group_info_redis:
|
||||
print(f'Failed to get group {group_id} info from Redis!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Get group info from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
if not group_info_psql:
|
||||
print(f'Failed to get group {group_id} info from PSQL!')
|
||||
raise Exception('Infra group create failed!')
|
||||
|
||||
# Validate group
|
||||
assert group_info_psql[0] == int(group_info_redis.get('gid')) == group_id
|
||||
|
||||
# Delete single group
|
||||
uuid_val = uuid.uuid4()
|
||||
|
||||
test_context.kafka_producer.conn.send(test_context.default_producer_topic(), \
|
||||
message.group_delete(str(group_id), uuid_val.int), bytes(str(group_id), encoding="utf-8"), partition=partitions[0])
|
||||
ret_msg = test_context.kafka_consumer.get_result_msg(uuid_val.int)
|
||||
if not ret_msg:
|
||||
print('Failed to receive delete group result, was expecting ' + str(uuid_val.int) + ' uuid reply')
|
||||
raise Exception('Failed to receive delete group result when expected')
|
||||
|
||||
assert (ret_msg.value['type'] == 'infrastructure_group_delete_response')
|
||||
assert (int(ret_msg.value['infra_group_id']) == group_id)
|
||||
assert ((uuid.UUID(ret_msg.value['uuid']).int) == (uuid_val.int))
|
||||
assert (ret_msg.value["reporter_shard_id"] == expected_reporter_shard_id)
|
||||
|
||||
if ret_msg.value['success'] is False:
|
||||
print(ret_msg.value['error_message'])
|
||||
raise Exception('Infra group delete failed!')
|
||||
|
||||
# Validate group removed from Redis
|
||||
group_info_redis = test_context.redis_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate group removed from PSQL
|
||||
group_info_psql = test_context.psql_client.get_infrastructure_group(group_id)
|
||||
assert group_info_redis == {}
|
||||
|
||||
# Validate number of assigned groups
|
||||
# assert int(shard_info.get('assigned_groups_num')) == cgw_metrics_get_groups_assigned_num() == 0
|
||||
1
utils/cert_generator/certs/client/macs.txt
Normal file
1
utils/cert_generator/certs/client/macs.txt
Normal file
File diff suppressed because one or more lines are too long
@@ -1,5 +0,0 @@
|
||||
{"connect": {"jsonrpc":"2.0","method":"connect","params":{"serial":"MAC","firmware":"Rel 1.6 build 1","uuid":1692198868,"capabilities":{"compatible":"x86_64-kvm_x86_64-r0","model":"DellEMC-S5248f-P-25G-DPB","platform":"switch","label_macaddr":"MAC"}}},
|
||||
"state": {"jsonrpc": "2.0", "method": "state", "params": {"serial": "MAC","uuid": 1692198868, "request_uuid": null, "state": {}}},
|
||||
"reboot_response": {"jsonrpc": "2.0", "result": {"serial": "MAC", "status": {"error": 0, "text": "", "when": 0}, "id": "ID"}},
|
||||
"log": {"jsonrpc": "2.0", "method": "log", "params": {"serial": "MAC", "log": "", "severity": 7, "data": {}}}
|
||||
}
|
||||
1
utils/client_simulator/macs.txt
Normal file
1
utils/client_simulator/macs.txt
Normal file
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
websockets==12.0
|
||||
websockets==13.1
|
||||
|
||||
@@ -9,7 +9,7 @@ CA_CERT_PATH=./tipcerts
|
||||
CLIENT_CERT_PATH=$(pwd)/../cert_generator/certs/client
|
||||
CLIENT_CERT_PATH=./tipcerts
|
||||
#--no-cert-check
|
||||
python3 single.py --mac "$MAC" \
|
||||
PYTHONPATH="$PYTHONPATH:$PWD:$PWD/src/" python3 single.py --mac "$MAC" \
|
||||
--server "$URL" \
|
||||
--ca-cert "$CA_CERT_PATH/ca.crt" \
|
||||
--client-certs-path "$CLIENT_CERT_PATH" \
|
||||
|
||||
102
utils/client_simulator/sim_data/message_templates.json
Normal file
102
utils/client_simulator/sim_data/message_templates.json
Normal file
File diff suppressed because one or more lines are too long
@@ -3,8 +3,11 @@ from .utils import get_msg_templates, Args
|
||||
from .log import logger
|
||||
from websockets.sync import client
|
||||
from websockets.exceptions import ConnectionClosedOK, ConnectionClosedError, ConnectionClosed
|
||||
from websockets.frames import *
|
||||
from typing import List
|
||||
import multiprocessing
|
||||
import socket
|
||||
import struct
|
||||
import threading
|
||||
import resource
|
||||
import string
|
||||
@@ -20,13 +23,17 @@ import re
|
||||
|
||||
class Message:
|
||||
def __init__(self, mac: str, size: int):
|
||||
tmp_mac = copy.deepcopy(mac)
|
||||
tmp_mac = tmp_mac.replace(":", "")
|
||||
self.templates = get_msg_templates()
|
||||
self.connect = json.dumps(self.templates["connect"]).replace("MAC", mac)
|
||||
self.connect = json.dumps(self.templates["connect"]).replace("MAC", tmp_mac)
|
||||
self.state = json.dumps(self.templates["state"]).replace("MAC", mac)
|
||||
self.reboot_response = json.dumps(self.templates["reboot_response"]).replace("MAC", mac)
|
||||
self.log = copy.deepcopy(self.templates["log"])
|
||||
self.log["params"]["data"] = {"msg": ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))}
|
||||
self.log = json.dumps(self.log).replace("MAC", mac)
|
||||
self.join = json.dumps(self.templates["join"]).replace("MAC", mac)
|
||||
self.leave = json.dumps(self.templates["leave"]).replace("MAC", mac)
|
||||
|
||||
@staticmethod
|
||||
def to_json(msg) -> str:
|
||||
@@ -61,6 +68,9 @@ class Device:
|
||||
self.ssl_context.check_hostname = False
|
||||
self.ssl_context.verify_mode = ssl.CERT_NONE
|
||||
|
||||
def send_ping(self, socket: client.ClientConnection):
|
||||
socket.ping()
|
||||
|
||||
def send_hello(self, socket: client.ClientConnection):
|
||||
logger.debug(self.messages.connect)
|
||||
socket.send(self.messages.connect)
|
||||
@@ -68,6 +78,24 @@ class Device:
|
||||
def send_log(self, socket: client.ClientConnection):
|
||||
socket.send(self.messages.log)
|
||||
|
||||
def send_state(self, socket: client.ClientConnection):
|
||||
socket.send(self.messages.state)
|
||||
|
||||
def send_join(self, socket: client.ClientConnection):
|
||||
socket.send(self.messages.join)
|
||||
|
||||
def send_leave(self, socket: client.ClientConnection):
|
||||
socket.send(self.messages.leave)
|
||||
|
||||
def get_single_message(self, socket: client.ClientConnection):
|
||||
try:
|
||||
msg = socket.recv(self.interval)
|
||||
return self.messages.from_json(msg)
|
||||
except TimeoutError:
|
||||
return None
|
||||
except:
|
||||
raise
|
||||
|
||||
def handle_messages(self, socket: client.ClientConnection):
|
||||
try:
|
||||
msg = socket.recv(self.interval)
|
||||
@@ -98,7 +126,9 @@ class Device:
|
||||
|
||||
def connect(self):
|
||||
if self._socket is None:
|
||||
self._socket = client.connect(self.server_addr, ssl_context=self.ssl_context, open_timeout=7200)
|
||||
# 20 seconds is more then enough to establish conne and exchange
|
||||
# them handshakes.
|
||||
self._socket = client.connect(self.server_addr, ssl=self.ssl_context, open_timeout=20, close_timeout=20)
|
||||
return self._socket
|
||||
|
||||
def disconnect(self):
|
||||
@@ -117,7 +147,8 @@ class Device:
|
||||
logger.error("Connection to GW is lost. Trying to reconnect...")
|
||||
self.connect()
|
||||
if time.time() - start > self.interval:
|
||||
logger.info(f"Sent log")
|
||||
logger.info(f"Device sim heartbeat")
|
||||
self.send_state(self._socket)
|
||||
self.send_log(self._socket)
|
||||
start = time.time()
|
||||
self.handle_messages(self._socket)
|
||||
@@ -140,7 +171,8 @@ class Device:
|
||||
logger.error("Connection to GW is lost. Trying to reconnect...")
|
||||
self.connect()
|
||||
if time.time() - start > self.interval:
|
||||
logger.info(f"Sent log")
|
||||
logger.info(f"Device sim heartbeat")
|
||||
self.send_state(self._socket)
|
||||
self.send_log(self._socket)
|
||||
start = time.time()
|
||||
self.handle_messages(self._socket)
|
||||
@@ -150,11 +182,16 @@ class Device:
|
||||
|
||||
|
||||
def get_avail_mac_addrs(path, mask="XX:XX:XX:XX:XX:XX"):
|
||||
_mask = "".join(("[0-9a-fA-F]" if c == "X" else c) for c in mask.upper())
|
||||
certs = sorted(os.listdir(path))
|
||||
macs = set(cert.split(".")[0] for cert in certs if "crt" in cert and re.match(_mask, cert))
|
||||
return list(macs)
|
||||
mask = mask.upper()
|
||||
_mask = "".join(("[0-9a-fA-F]" if c == "X" else c) for c in mask)
|
||||
macs = open(path + '/macs.txt', 'r').read().split()
|
||||
macs = list(macs)
|
||||
new_macs = list()
|
||||
for mac in macs:
|
||||
if re.match(_mask, mac):
|
||||
new_macs.append(mac)
|
||||
|
||||
return new_macs
|
||||
|
||||
def update_fd_limit():
|
||||
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
@@ -178,8 +215,8 @@ def process(args: Args, mask: str, start_event: multiprocessing.Event, stop_even
|
||||
update_fd_limit()
|
||||
|
||||
devices = [Device(mac, args.server, args.ca_path, args.msg_interval, args.msg_size,
|
||||
os.path.join(args.cert_path, f"{mac}.crt"),
|
||||
os.path.join(args.cert_path, f"{mac}.key"),
|
||||
os.path.join(args.cert_path, f"base.crt"),
|
||||
os.path.join(args.cert_path, f"base.key"),
|
||||
args.check_cert,
|
||||
start_event, stop_event)
|
||||
for mac, _ in zip(macs, range(args.number_of_connections))]
|
||||
|
||||
@@ -7,7 +7,7 @@ import re
|
||||
import os
|
||||
|
||||
|
||||
TEMPLATE_LOCATION = "./data/message_templates.json"
|
||||
TEMPLATE_LOCATION = "./sim_data/message_templates.json"
|
||||
|
||||
|
||||
@dataclass
|
||||
|
||||
393
utils/docker/StartMultiCGW.py
Normal file
393
utils/docker/StartMultiCGW.py
Normal file
@@ -0,0 +1,393 @@
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import shutil
|
||||
from typing import Final
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
# CGW Docker image & container params
|
||||
DEFAULT_NUMBER_OF_CGW_INSTANCES: Final[int] = 1
|
||||
DOCKER_COMPOSE_TEMPLATE_FILE_NAME: Final[str] = "docker-compose-template.yml.j2"
|
||||
DOCKER_COMPOSE_MULTI_CGW_FILE_NAME: Final[str] = "docker-compose-multi-cgw.yml"
|
||||
CGW_IMAGE_BASE_NAME: Final[str] = "openlan-cgw-img"
|
||||
CGW_CONTAINER_BASE_NAME: Final[str] = "openlan_cgw"
|
||||
|
||||
# CGW params
|
||||
DEFAULT_CGW_BASE_ID: Final[int] = 0
|
||||
DEFAULT_LOG_LEVEL: Final[str] ="debug"
|
||||
|
||||
# CGW groups & group infras params
|
||||
DEFAULT_GROUPS_CAPACITY: Final[int] = 1000
|
||||
DEFAULT_GROUPS_THRESHOLD: Final[int] = 50
|
||||
DEFAULT_GROUP_INFRAS_CAPACITY: Final[int] = 2000
|
||||
|
||||
# GRPC params
|
||||
DEFAULT_GRPC_LISTENING_IP: Final[str] = "0.0.0.0"
|
||||
DEFAULT_GRPC_LISTENING_BASE_PORT: Final[int] = 50051
|
||||
DEFAULT_GRPC_PUBLIC_BASE_PORT: Final[int] = 50051
|
||||
DEFAULT_GRPC_PUBLIC_HOST: Final[str] ="openlan_cgw"
|
||||
|
||||
# WSS params
|
||||
DEFAULT_WSS_IP: Final[str] = "0.0.0.0"
|
||||
DEFAULT_WSS_BASE_PORT: Final[int] = 15002
|
||||
DEFAULT_WSS_T_NUM: Final[int] = 4
|
||||
DEFAULT_WSS_CAS: Final[str] = "cas.pem"
|
||||
DEFAULT_WSS_CERT: Final[str] = "cert.pem"
|
||||
DEFAULT_WSS_KEY: Final[str] = "key.pem"
|
||||
|
||||
# Kafka params
|
||||
DEFAULT_KAFKA_HOST: Final[str] = "docker-broker-1"
|
||||
DEFAULT_KAFKA_PORT: Final[int] = 9092
|
||||
DEFAULT_KAFKA_CONSUME_TOPIC: Final[str] = "CnC"
|
||||
DEFAULT_KAFKA_PRODUCE_TOPIC: Final[str] = "CnC_Res"
|
||||
|
||||
# DB params
|
||||
DEFAULT_DB_HOST: Final[str] = "docker-postgresql-1"
|
||||
DEFAULT_DB_PORT: Final[int] = 5432
|
||||
DEFAULT_DB_NAME: Final[str] = "cgw"
|
||||
DEFAULT_DB_USER: Final[str] = "cgw"
|
||||
DEFAULT_DB_PASW: Final[str] = "123"
|
||||
DEFAULT_DB_TLS: Final[str] = "no"
|
||||
|
||||
# Redis params
|
||||
DEFAULT_REDIS_HOST: Final[str] = "docker-redis-1"
|
||||
DEFAULT_REDIS_PORT: Final[int] = 6379
|
||||
DEFAULT_REDIS_TLS: Final[str] = "no"
|
||||
DEFAULT_REDIS_USERNAME: Final[str] = ""
|
||||
DEFAULT_REDIS_PASSWORD: Final[str] = ""
|
||||
|
||||
# Metrics params
|
||||
DEFAULT_METRICS_BASE_PORT: Final[int] = 8080
|
||||
|
||||
# TLS params: cert volumes
|
||||
DEFAULT_CERTS_PATH="../cert_generator/certs/server/"
|
||||
DEFAULT_CLIENT_CERTS_PATH="../cert_generator/certs/client/"
|
||||
|
||||
CONTAINTER_CERTS_VOLUME: Final[str] = "/etc/cgw/certs"
|
||||
CONTAINTER_NB_INFRA_CERTS_VOLUME: Final[str] = "/etc/cgw/nb_infra/certs"
|
||||
|
||||
# Cert & key files name
|
||||
DEFAULT_CERT_GENERATOR_PATH="../cert_generator"
|
||||
|
||||
DEFAULT_WSS_CAS="cas.pem"
|
||||
DEFAULT_WSS_CERT="cert.pem"
|
||||
DEFAULT_WSS_KEY="key.pem"
|
||||
DEFAULT_CLIENT_CERT="base.crt"
|
||||
DEFAULT_CLIENT_KEY="base.key"
|
||||
|
||||
# TLS params
|
||||
DEFAULT_NB_INFRA_TLS: Final[str] = "no"
|
||||
DEFAULT_ALLOW_CERT_MISMATCH: Final[str] = "yes"
|
||||
|
||||
# UCentral params
|
||||
DEFAULT_UCENTRAL_AP_DATAMODEL_URI: Final[str] = "https://raw.githubusercontent.com/Telecominfraproject/wlan-ucentral-schema/main/ucentral.schema.json"
|
||||
DEFAULT_UCENTRAL_SWITCH_DATAMODEL_URI: Final[str] = "https://raw.githubusercontent.com/Telecominfraproject/ols-ucentral-schema/main/ucentral.schema.json"
|
||||
|
||||
|
||||
def get_realpath(base_path) -> str:
|
||||
"""
|
||||
Get absolute path from base
|
||||
"""
|
||||
return str(os.path.realpath(base_path))
|
||||
|
||||
|
||||
# Certificates update
|
||||
def certificates_update(certs_path: str = DEFAULT_CERTS_PATH, client_certs_path: str = DEFAULT_CLIENT_CERTS_PATH):
|
||||
"""
|
||||
Generate server & client certificates
|
||||
"""
|
||||
missing_files = any(
|
||||
not os.path.isfile(os.path.join(certs_path, file))
|
||||
for file in [DEFAULT_WSS_CERT, DEFAULT_WSS_KEY, DEFAULT_WSS_CAS]
|
||||
) or any (
|
||||
not os.path.isfile(os.path.join(client_certs_path, file))
|
||||
for file in [DEFAULT_CLIENT_CERT, DEFAULT_CLIENT_KEY]
|
||||
)
|
||||
|
||||
if missing_files:
|
||||
print(f"WARNING: At specified path {certs_path}, either CAS, CERT, or KEY is missing!")
|
||||
print(f"WARNING: Changing source folder for certificates to default: {client_certs_path} and generating self-signed...")
|
||||
|
||||
cert_gen_path = get_realpath(DEFAULT_CERT_GENERATOR_PATH)
|
||||
|
||||
# Clean up old certificates
|
||||
cert_subfolders = ["ca", "server", "client"]
|
||||
for subfolder in cert_subfolders:
|
||||
cert_folder = os.path.join(cert_gen_path, "certs", subfolder)
|
||||
for file in os.listdir(cert_folder):
|
||||
if file.endswith((".crt", ".key")):
|
||||
os.remove(os.path.join(cert_folder, file))
|
||||
|
||||
# Generate new certificates
|
||||
try:
|
||||
# Save current working directory
|
||||
original_dir = os.getcwd()
|
||||
|
||||
os.chdir(cert_gen_path)
|
||||
print(f"Changed directory to: {os.getcwd()}")
|
||||
|
||||
cert_gen_script = "./generate_certs.sh"
|
||||
|
||||
subprocess.run([cert_gen_script, "-a"], check=True)
|
||||
subprocess.run([cert_gen_script, "-s"], check=True)
|
||||
subprocess.run([cert_gen_script, "-c", "1", "-m", "02:00:00:00:00:00"], check=True)
|
||||
|
||||
# Copy generated certificates to default paths
|
||||
shutil.copy(os.path.join(cert_gen_path, "certs", "ca", "ca.crt"), os.path.join(DEFAULT_CERTS_PATH, DEFAULT_WSS_CAS))
|
||||
shutil.copy(os.path.join(cert_gen_path, "certs", "server", "gw.crt"), os.path.join(DEFAULT_CERTS_PATH, DEFAULT_WSS_CERT))
|
||||
shutil.copy(os.path.join(cert_gen_path, "certs", "server", "gw.key"), os.path.join(DEFAULT_CERTS_PATH, DEFAULT_WSS_KEY))
|
||||
|
||||
for client_file in os.listdir(os.path.join(cert_gen_path, "certs", "client")):
|
||||
if client_file.endswith(".crt"):
|
||||
shutil.copy(
|
||||
os.path.join(cert_gen_path, "certs", "client", client_file),
|
||||
os.path.join(DEFAULT_CLIENT_CERTS_PATH, DEFAULT_CLIENT_CERT)
|
||||
)
|
||||
elif client_file.endswith(".key"):
|
||||
shutil.copy(
|
||||
os.path.join(cert_gen_path, "certs", "client", client_file),
|
||||
os.path.join(DEFAULT_CLIENT_CERTS_PATH, DEFAULT_CLIENT_KEY)
|
||||
)
|
||||
|
||||
print("Generating self-signed certificates done!")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error while generating certificates: {e}")
|
||||
finally:
|
||||
# Change back to the original directory
|
||||
os.chdir(original_dir)
|
||||
print(f"Returned to original directory: {os.getcwd()}")
|
||||
|
||||
|
||||
# Jinga2 template generator
|
||||
def get_cgw_image_base_name() -> str:
|
||||
"""
|
||||
Returns CGW Docker image base name
|
||||
"""
|
||||
return CGW_IMAGE_BASE_NAME
|
||||
|
||||
|
||||
def get_cgw_image_tag() -> str:
|
||||
"""
|
||||
Returns CGW Docker image tag
|
||||
"""
|
||||
tag = None
|
||||
|
||||
try:
|
||||
# Check if there are any uncommitted changes (ignoring untracked files)
|
||||
status_output = subprocess.run(
|
||||
["git", "status", "--porcelain", "--untracked-files=no"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=True
|
||||
).stdout.strip()
|
||||
|
||||
# Get the short commit hash
|
||||
commit_hash = subprocess.run(
|
||||
["git", "rev-parse", "--short", "HEAD"],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=True
|
||||
).stdout.strip()
|
||||
|
||||
# Append '-dirty' if there are uncommitted changes
|
||||
if status_output:
|
||||
tag = f"{commit_hash}-dirty"
|
||||
else:
|
||||
tag = commit_hash
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error: {e.stderr.strip()}")
|
||||
|
||||
return tag
|
||||
|
||||
|
||||
def get_cgw_container_base_name() -> str:
|
||||
"""
|
||||
Returns CGW Docker container base name
|
||||
"""
|
||||
return CGW_CONTAINER_BASE_NAME
|
||||
|
||||
|
||||
def get_cgw_instances_num() -> int:
|
||||
"""
|
||||
Returns CGW instances number from env. variable,
|
||||
or default value "DEFAULT_NUMBER_OF_CGW_INSTANCES"
|
||||
"""
|
||||
|
||||
# Number of clients from an environment variable or fallback to default
|
||||
number_of_cgw_instances = int(os.getenv("CGW_INSTANCES_NUM", DEFAULT_NUMBER_OF_CGW_INSTANCES))
|
||||
|
||||
return number_of_cgw_instances
|
||||
|
||||
|
||||
def remove_docker_compose_multi_cgw_file(docker_compose_multi_cgw_file: str = DOCKER_COMPOSE_MULTI_CGW_FILE_NAME) -> int:
|
||||
"""
|
||||
Remove "docker-compose-multi-cgw.yml" file
|
||||
"""
|
||||
|
||||
if os.path.isfile(docker_compose_multi_cgw_file):
|
||||
try:
|
||||
os.remove(docker_compose_multi_cgw_file)
|
||||
except Exception as e:
|
||||
print(f"Error: Filed to remove file {docker_compose_multi_cgw_file}! Error: {e}")
|
||||
|
||||
|
||||
def generate_docker_compose_file(instances_num: int,
|
||||
docker_compose_template_file: str = DOCKER_COMPOSE_TEMPLATE_FILE_NAME,
|
||||
docker_compose_multi_cgw_file: str = DOCKER_COMPOSE_MULTI_CGW_FILE_NAME):
|
||||
"""
|
||||
Generate docker compose file based on template
|
||||
"""
|
||||
|
||||
# 1. Get CGW image name
|
||||
image_name = get_cgw_image_base_name()
|
||||
|
||||
# 2. Get CGW image tag
|
||||
image_tag = get_cgw_image_tag()
|
||||
|
||||
# 3. Get CGW container name
|
||||
container_name = get_cgw_container_base_name()
|
||||
|
||||
# 4. Get certs realpath
|
||||
certs_realpath = get_realpath(DEFAULT_CERTS_PATH)
|
||||
|
||||
print(f'Generate Docker Compose file!')
|
||||
print(f'\tNumber of CGW instances: {instances_num}')
|
||||
print(f'\tCGW image name : {image_name}')
|
||||
print(f'\tCGW image tag : {image_tag}')
|
||||
print(f'\tCGW container name : {container_name}')
|
||||
|
||||
# 4. Load the Jinja2 template
|
||||
env = Environment(loader=FileSystemLoader(searchpath="."))
|
||||
template = env.get_template(docker_compose_template_file)
|
||||
|
||||
# 5. Render the template with the variable
|
||||
output = template.render(cgw_instances_num = instances_num,
|
||||
cgw_image_name = image_name,
|
||||
cgw_image_tag = image_tag,
|
||||
cgw_container_name = container_name,
|
||||
cgw_base_id = DEFAULT_CGW_BASE_ID,
|
||||
cgw_grpc_listening_ip = DEFAULT_GRPC_LISTENING_IP,
|
||||
cgw_grpc_listening_base_port = DEFAULT_GRPC_LISTENING_BASE_PORT,
|
||||
cgw_grpc_public_host = DEFAULT_GRPC_PUBLIC_HOST,
|
||||
cgw_grpc_public_base_port = DEFAULT_GRPC_PUBLIC_BASE_PORT,
|
||||
cgw_db_host = DEFAULT_DB_HOST,
|
||||
cgw_db_port = DEFAULT_DB_PORT,
|
||||
cgw_db_name = DEFAULT_DB_NAME,
|
||||
cgw_db_username = DEFAULT_DB_USER,
|
||||
cgw_db_password = DEFAULT_DB_PASW,
|
||||
cgw_db_tls = DEFAULT_DB_TLS,
|
||||
cgw_kafka_host = DEFAULT_KAFKA_HOST,
|
||||
cgw_kafka_port = DEFAULT_KAFKA_PORT,
|
||||
cgw_kafka_consumer_topic = DEFAULT_KAFKA_CONSUME_TOPIC,
|
||||
cgw_kafka_producer_topic = DEFAULT_KAFKA_PRODUCE_TOPIC,
|
||||
cgw_log_level = DEFAULT_LOG_LEVEL,
|
||||
cgw_redis_host = DEFAULT_REDIS_HOST,
|
||||
cgw_redis_port = DEFAULT_REDIS_PORT,
|
||||
cgw_redis_tls = DEFAULT_REDIS_TLS,
|
||||
cgw_redis_username = DEFAULT_REDIS_USERNAME,
|
||||
cgw_redis_password = DEFAULT_REDIS_PASSWORD,
|
||||
cgw_metrics_base_port = DEFAULT_METRICS_BASE_PORT,
|
||||
cgw_wss_ip = DEFAULT_WSS_IP,
|
||||
cgw_wss_base_port = DEFAULT_WSS_BASE_PORT,
|
||||
cgw_wss_cas = DEFAULT_WSS_CAS,
|
||||
cgw_wss_cert = DEFAULT_WSS_CERT,
|
||||
cgw_wss_key = DEFAULT_WSS_KEY,
|
||||
cgw_wss_t_num = DEFAULT_WSS_T_NUM,
|
||||
cgw_ucentral_ap_datamodel_uri = DEFAULT_UCENTRAL_AP_DATAMODEL_URI,
|
||||
cgw_ucentral_switch_datamodel_uri = DEFAULT_UCENTRAL_SWITCH_DATAMODEL_URI,
|
||||
cgw_groups_capacity = DEFAULT_GROUPS_CAPACITY,
|
||||
cgw_groups_threshold = DEFAULT_GROUPS_THRESHOLD,
|
||||
cgw_group_infras_capacity = DEFAULT_GROUP_INFRAS_CAPACITY,
|
||||
cgw_allow_certs_missmatch = DEFAULT_ALLOW_CERT_MISMATCH,
|
||||
cgw_nb_infra_tls = DEFAULT_NB_INFRA_TLS,
|
||||
container_certs_voulume = CONTAINTER_CERTS_VOLUME,
|
||||
container_nb_infra_certs_voulume = CONTAINTER_NB_INFRA_CERTS_VOLUME,
|
||||
default_certs_path = certs_realpath)
|
||||
|
||||
# 6. Save the rendered template as docker-compose.yml
|
||||
with open(docker_compose_multi_cgw_file, "w") as f:
|
||||
f.write(output)
|
||||
|
||||
|
||||
def docker_compose_up(docker_compose_file: str = "docker-compose.yml"):
|
||||
"""
|
||||
Runs `docker compose up` with the specified docker-compose file.
|
||||
|
||||
:param compose_file: Path to the docker-compose file (optional).
|
||||
"""
|
||||
|
||||
if docker_compose_file:
|
||||
if not os.path.isfile(docker_compose_file):
|
||||
print(f"Error: The specified compose file '{docker_compose_file}' does not exist.")
|
||||
return
|
||||
cmd = ["docker", "compose", "--file", docker_compose_file, "up", "-d"]
|
||||
else:
|
||||
cmd = ["docker", "compose", "up", "-d"]
|
||||
|
||||
try:
|
||||
print(f"Running command: {' '.join(cmd)}")
|
||||
subprocess.run(cmd, check=True)
|
||||
print("Docker Compose started successfully.")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error: Failed to run docker compose up. {e}")
|
||||
|
||||
|
||||
def docker_compose_down(docker_compose_file: str = "docker-compose.yml"):
|
||||
"""
|
||||
Runs `docker compose down` with the specified docker-compose file.
|
||||
|
||||
:param compose_file: Path to the docker-compose file (optional).
|
||||
"""
|
||||
|
||||
if docker_compose_file:
|
||||
if not os.path.isfile(docker_compose_file):
|
||||
print(f"The specified compose file '{docker_compose_file}' does not exist.")
|
||||
return
|
||||
cmd = ["docker", "compose", "--file", docker_compose_file, "down"]
|
||||
else:
|
||||
cmd = ["docker", "compose", "down"]
|
||||
|
||||
try:
|
||||
print(f"Running command: {' '.join(cmd)}")
|
||||
subprocess.run(cmd, check=True)
|
||||
print("Docker Compose stopped successfully.")
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"Error: Failed to run docker compose down. {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Create the parser
|
||||
parser = argparse.ArgumentParser(description="Demo application to parse arguments.")
|
||||
|
||||
# Add arguments
|
||||
parser.add_argument("--start", action="store_true", help="Stop all Docker Composes. Clean up and generate new compose file. Start Docker Compose.")
|
||||
parser.add_argument("--stop", action="store_true", help="Stop all Docker Composes.")
|
||||
parser.add_argument("--generate-compose", action="store_true", help="Generate new Docker Compose file.")
|
||||
|
||||
# Parse the arguments
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.start or args.stop:
|
||||
# 1. Try to stop default docker compose
|
||||
docker_compose_down()
|
||||
|
||||
# 2. Try to stop multi cgw docker compose
|
||||
docker_compose_down(DOCKER_COMPOSE_MULTI_CGW_FILE_NAME)
|
||||
|
||||
if args.start or args.generate_compose:
|
||||
# 3. Remove old multi cgw docker compose file
|
||||
remove_docker_compose_multi_cgw_file()
|
||||
|
||||
# 4. Update Certitifacates
|
||||
certificates_update()
|
||||
|
||||
# 4. Generate new multi cgw docker compose file
|
||||
generate_docker_compose_file(get_cgw_instances_num())
|
||||
|
||||
if args.start:
|
||||
# 5. Try to start multi cgw docker compose
|
||||
docker_compose_up(DOCKER_COMPOSE_MULTI_CGW_FILE_NAME)
|
||||
58
utils/docker/certs/server.crt
Normal file
58
utils/docker/certs/server.crt
Normal file
@@ -0,0 +1,58 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIE+jCCAuKgAwIBAgIUKNl5fnws7cASsTTcKxmACp8L+mMwDQYJKoZIhvcNAQEL
|
||||
BQAwDTELMAkGA1UEAwwCQ0EwHhcNMjQwNzE3MTYxODA1WhcNMjUwNzE3MTYxODA1
|
||||
WjAUMRIwEAYDVQQDDAlsb2NhbGhvc3QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
|
||||
ggIKAoICAQC/DfDxcFkyH2tjS5hbEnDPF7cLFd03UsgiqPTgLwGdyYnNYURIGMxs
|
||||
gl243FkycmYRz1OsEY1+zf71vi0XNulfdl83+1t+hZcEt1HGzXrO92smznHdDBL7
|
||||
1sqDWO00n1XSmWHo7J9qIWjuvKoBwE5lS25ghSzuZWP/P5WBvOa/2wUyfOdB33DV
|
||||
8CiHCBjf1C2tp+sqw5kFKT0v7lSZTLnCMRH0VlzRc/1CHHrVn7VyvpN618QDJ4D2
|
||||
AjSxs+uGhlu7ppWYXdB34bVJT0ffT0KkPnOQ6pKJ8uLXxMpSfgATHERZfigXZ5yb
|
||||
yTMx/Rwclil828icwowizmGs8AXrI7vk/g4t4JJ74yGIc0A+HKsQ2Gu193pmwQNH
|
||||
pOctN7NMnF+Lvcqf7j3vIWCLVSJDXUom28VAr/egfr3K5xEfipaiePlAUV6PF90k
|
||||
Ou3p/kHlNS9dkqx4tM1z6surq4znr3+oq4Ldr+1UNRnBLc/yT01IwdbD2NKPTSJI
|
||||
yO9uybx0yU04zJC6ZhlZb+Z0kOtF1+T2jzbSjFKIlxoDBUlXonGVW4TXh5S1CUFH
|
||||
FxyJfsPjocayOYikCb6chTj+9aiTaWJI8txLUO7ejvIINZRg7C1hgBobETePcbvZ
|
||||
gRFQCbcQUh/6yRTVY+3aOPhf/DcKmb/eM9ijsQxsZaAYrOnu8xXkYQIDAQABo0sw
|
||||
STAJBgNVHRMEAjAAMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDAa
|
||||
BgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggIBAJrs
|
||||
4xelo+wV3kNoqztngdgAFnoNmxE7xN1c/XrdiIc/xY/xQz61lAUF5ouyy1ma/VB9
|
||||
a9/5exYWiXW8TpZM7CwccYk7ShDpoIZvUTj4YwNo2F9zlYRsgTjk1ekYz0PKsQGz
|
||||
v8dF9xmUYn86bGITu+MlJ4konXdJ2riaV2Cx6LwErLXZ0mGxmHNQ/r49QsApEtFf
|
||||
Y3P+2uYkmSV8UVvRTClyWoxVbojAyAXBv8K5/5/Yuq/NEdl36w6P+gVTXcK3eXy9
|
||||
2tktdHd1qAowQFAcdn5h+nmrnYMCdD3F3wu0CLDUFVunY6lQg5Gxsft3s95kbucT
|
||||
qFrl1Fr1xOaU4pCLwMcUZ6sjURH1jAiSWMdtM1imRXuQDDFvD4DQAktg9UR5UTlM
|
||||
0NFO1GXIMTA9y2MxQeZTUC7zEF7QRjXJ/9xW6VBK+1iFDyzYa+7iFb7Gsqeu5WHv
|
||||
p5fiZe6JKN93XzzWcCWEg98IIS01yTGiygkyO2yWOeZ4xEWQuT7p4gi3e5oydBgC
|
||||
kVsd6slWlJtDP7iGYOwJXucw6pvKMGYj7Ol4kxPosyIb9SV42cp52JAIgPLKTaHn
|
||||
kmrwIfYV+Zn7xoX8FERg8/Oua0mOeeij9JREoYwm5XV52Fhv3p7qDSqe3HIEL6ft
|
||||
SfHCEEAf2JEwQj9R+QWhPrWAeNPUqTeianr8Ks9e
|
||||
-----END CERTIFICATE-----
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIE5DCCAsygAwIBAgIUZwOLM/yaHJBJ8XDvAxE/P3xDFqgwDQYJKoZIhvcNAQEL
|
||||
BQAwDTELMAkGA1UEAwwCQ0EwHhcNMjQwNzE3MTYxNjAxWhcNMjUwNzE3MTYxNjAx
|
||||
WjANMQswCQYDVQQDDAJDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
|
||||
AKg69y4KkyMzE0WNPSlwHfF90dy+oqQ9OETVEeSkNx3ImDqx07tdv/z8QZRjFTSy
|
||||
1OI7S3jSpcinTFlCCoa1mNcEe7Fut2u63YPENRj/lLZpPxo0S168A9yUjkN4c36u
|
||||
zNyLirBxtqmuxp/4jJ4I0nyUN3eVpsdLztR7mMRm0vvaU5DjeevmtXvk4aiAwLov
|
||||
uffnQhte9hGY/ZWfbG7EZ4+okMP7m6W6XWxaMShknzxWQONmCv8wkEA3yEUhvyzl
|
||||
8abpj3SO5vaW6eHmr7sGXMDx6DHs3UCH9Bk3kBmylzttSRH3p4fBtYZle11dlC7N
|
||||
Ks+QFijfTkN5kIHyuaUFieGKkzeHnc+ACJMUultTBdKwOCZMAk4IUkwG8G5LYozH
|
||||
I2e5FRWlvUJS4WgF8Vy6dfCSjALlGES62JM0hc6hnqZmKR3A2xkxTM3vcZElq/w1
|
||||
Ibi6ezqbO5QmqBuoNGIgsphuF8cYtdla30FQumQpN2WuyWFkBUwkjMJDooWdQeJT
|
||||
pAntBP+Gx+PjY5329uMf4Q0Q7JGBXO9LVeRGgav8sJDG4EadXzgtigP9nyjy1L3b
|
||||
QHPkMoqPXYLSqSC2hxxbeiHPA2Kn10/Wx90MSbE81CS4LSoHVMSQ7B2pc3zHkcTn
|
||||
PnQuQdDFzFKXeso6JCI66dPCBGnVHIhhfbjgplXUvlkNAgMBAAGjPDA6MAwGA1Ud
|
||||
EwQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBS3FvhJKWuL/EX+hMJi6i37
|
||||
q9OZCjANBgkqhkiG9w0BAQsFAAOCAgEAU9sjWemPhkPPdDc4Ph280gvQKWfLhfx/
|
||||
pzWtKHmsMPgz4XglQRzj5yHFe5RW0dTUk6FGHgxaIXQaRoP1L2T8k2Z4GLjUNyXP
|
||||
nUEoYgs4OpnZytg+rgHr3cu7qocM9lSi1ZoE4XomijJhe5okREU7tZvj1pHL1Zmn
|
||||
TEUXIx8ktl4VDgDXiSS7QXQ9W2chs2gYyxxWeNUyHckURNzbCDnJlkuOBdsXV7eV
|
||||
v7D60JHP2Pem7zrLhV/G8P7JaRVfY2ZTZB2lH7tKBEhYIpa5muG0JjjyoDkXyOfB
|
||||
s6VX0xjt8ny+E7wklYJb3TQRJRkOFn08HvhV9Jycs2HvpjSsdHBjn+NbF1ea74TI
|
||||
2brgW1GjR30+H5mJPsbr79Qriu0ibuwor/+u8UvPrEaeXpbUp+QJ0XAtzXNqrsV3
|
||||
yCYtIvDstOrxV7M3hYbZyoBiq92dgwVyMAydUMxMn9EAQBqfpen9Hh7pnET9ORNU
|
||||
QcJumJDf4YUdAgB6qpTW54LhmhwQOm2zTGuMUdARYGmVNfZbd7HSW/JVJcYtFjus
|
||||
obiozqvXQVuu1g52JmcpWAlxJLXSFABPifaCI7zy1+XZi4ppzSNKG4+N82QkM5xr
|
||||
KyATGQnS67yAdYzjBz8bsA0vf72oVVOaWLxCmVpj25mf+G+HCYjnwtWlRgH64tBL
|
||||
vQLbQZQgNHo=
|
||||
-----END CERTIFICATE-----
|
||||
52
utils/docker/certs/server.key
Normal file
52
utils/docker/certs/server.key
Normal file
@@ -0,0 +1,52 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC/DfDxcFkyH2tj
|
||||
S5hbEnDPF7cLFd03UsgiqPTgLwGdyYnNYURIGMxsgl243FkycmYRz1OsEY1+zf71
|
||||
vi0XNulfdl83+1t+hZcEt1HGzXrO92smznHdDBL71sqDWO00n1XSmWHo7J9qIWju
|
||||
vKoBwE5lS25ghSzuZWP/P5WBvOa/2wUyfOdB33DV8CiHCBjf1C2tp+sqw5kFKT0v
|
||||
7lSZTLnCMRH0VlzRc/1CHHrVn7VyvpN618QDJ4D2AjSxs+uGhlu7ppWYXdB34bVJ
|
||||
T0ffT0KkPnOQ6pKJ8uLXxMpSfgATHERZfigXZ5ybyTMx/Rwclil828icwowizmGs
|
||||
8AXrI7vk/g4t4JJ74yGIc0A+HKsQ2Gu193pmwQNHpOctN7NMnF+Lvcqf7j3vIWCL
|
||||
VSJDXUom28VAr/egfr3K5xEfipaiePlAUV6PF90kOu3p/kHlNS9dkqx4tM1z6sur
|
||||
q4znr3+oq4Ldr+1UNRnBLc/yT01IwdbD2NKPTSJIyO9uybx0yU04zJC6ZhlZb+Z0
|
||||
kOtF1+T2jzbSjFKIlxoDBUlXonGVW4TXh5S1CUFHFxyJfsPjocayOYikCb6chTj+
|
||||
9aiTaWJI8txLUO7ejvIINZRg7C1hgBobETePcbvZgRFQCbcQUh/6yRTVY+3aOPhf
|
||||
/DcKmb/eM9ijsQxsZaAYrOnu8xXkYQIDAQABAoICAQCt7LYURW+dtoafTTk0ZzQ1
|
||||
AuTKfav16jFxhBfwYjp5dvgw9MQhUhn/Cirh2A6HYydSPUhxk0UZU9QvyGHqCT4o
|
||||
fm0uXG+tXVXeoDgc4ABVm117ZWK4lX5OrvmK2xCN4CNT5fgBADAbgLCy7SNjFrsH
|
||||
ccTYr0P4/moq+qpTAjGaJHu1u4kXKZ7h05BBZPioAtNWjFAEjS2nSiR+ltuC9xsA
|
||||
EoXCxkAXGR1L6vtTr+GRxLYjlXrGWFcJHXb08tKNk5C84mi63WSjTeEoDLlkLBaB
|
||||
yKySF0kxteAWBvscX8IXo5sBUVyT+enS1DX74uyNhZHdLYOSWXUTVogK1DK/Hbe0
|
||||
qCLg1pRsPycwzhmI39pvnoZOIedwOk2oGMu7yy9wlsXQnQfY92hoHt+/HOnIrRxX
|
||||
x86XrSdY9SX+r20PN8iVnybODAO7LA6LEBNAcpaudnU8NinqvCfhK/sk76YMIkWo
|
||||
OCuPx2iQ7sEwDUXfvW3MUs2orbxleCZE25j/vFjMiEhe3MnCnf2RF4TzKD4/WvlW
|
||||
UYfXzbP9BjyjsyOkWagigXNSMKjcO/VmejCbBGiLjFl6YOtLWFU+x/6J2eU1/b26
|
||||
F18zBbDMAc7gtUHYP1JWMMD7Lg5XFfsOlmo3W4RFHKtd0nUyrkMwBCAT+i44gFBL
|
||||
sNVkYW9GdHsg2ll+BjeBhQKCAQEA+5yyGk7yFxenvmLmveLL6qXdVsEKAFAo8c9A
|
||||
kmbrdkWZsmSInVZ2tpo93fS1IS4N0em9tZHkThTJuuoQrbFWQ+jzkzVdmErUFl2/
|
||||
3x9VxS51zxZ/ZrJbNpkIxQe/2m5Xr02J2pyJQsc4N/28Jmr8sRzs1ZUR+ep7v5Zq
|
||||
EXIHx5BLskVj9x/pQC+I9tlxyojJ5MtEFWTHM3oLGeDHIGovO+EW3r2v5lqsug0d
|
||||
s/81tKoLa2mrq6p7j+MpuFc0X5b98QG+ucZWtA3CNYyCXbNdC4qtl8zeqRIh3qXC
|
||||
9ajWHUgiB9TJSIjcM/Auymd0KTlkElmXFZ+ZUnvKr/OGpsEj6wKCAQEAwmLj5U60
|
||||
NukQuqfoHSWvRCzIRjakuavytiY5NXEguEuTqHT+UOt0UWyref9dn5/dEW8BXACs
|
||||
1K4wT4Pa1PvuNRrQ4dYkWPwOx/FHSaGyCe/b5MVsVMrw3iCRUk23zK8hL2iMXKHc
|
||||
BeSJ0QJ+Kgb/k8iFdTpX6zqYImuNwdgjT1+0ndIl0HM/jG2U2yBumNH8BS0usYJW
|
||||
g7We4lYEXqtyxDJiTFcuchmFdtknLRIUOwWZAhgI8eOt+UpGH3hTGeUS0T2g/531
|
||||
ABX22XLkHeo7KOTD+pembbHulgvFNpLA9EI0EPMQQAOBgTKaY7EeiDUsQkP/L5d6
|
||||
BJoJhkza0uVh4wKCAQEAksQwT5BFPpsZycA+//xPHixqE7S+dLhNad+Ottc5+d9X
|
||||
a+uglMZesNz/wXyAOz516UAC3Oqg1brigRkPaYHL5Aj6K6AxXCgp1nIQ/cF1cnNL
|
||||
XOSuo+Tdt9dEekmu62b51tPP2aj9l+pFLMPhADff9h/9NgiiV7kjPforHMn7J7lP
|
||||
rkkzqm7+y+Xuaq8j5RQtUDwRFrmWSLyjxRCMlqfZrX/6qyrSc/foUQ5diSUQ2rVl
|
||||
u94DuTrUoHXNXC3h6vBUaESwUAUVhimQY1P2p1l8qMLXx3hFWTGueAUQ1+MIIkR1
|
||||
NUQ1tQ3ABLvRT1dRNsq3SMzhiEd0U9zJuiC/Jn36yQKCAQAYtpHauWLYCFGEKYyt
|
||||
B/l8ZWUg6BmRMXcuCTYEwVkzlQg0xor+prCnGXXDkN/KR3zHlqFJnRxb/blOoqjT
|
||||
oyPpxHsB+0OrvH/0k4xIpDIKaWA/eYoITbTJyMIxAIh5kVpauKP/suRSK3gKBpMb
|
||||
rMAZfcjZ2o0K7uwglCP1nREAKl7AIdOE6OIPbG8cXMcyzp+H2PKyxqtRG2oTxHPR
|
||||
xWJV50HwCrVw9CWvsnP0mvPPfSqyxXN9rUCVDQhVP+rww9rcl0U8ukxHsoMrqhuu
|
||||
YfUbgdoYpecW0yROFzj/czDs3O3Zqc1LFicE0fYm7oG/N2NlGVf8KPnuU9caJ/M6
|
||||
FMeZAoIBAD/ItfYpsh+UrxC0g7QlE7XoTIuC0k1nlfWFsx9fGTCLM7D13bpf5nzR
|
||||
7JExiplU/HV3HHrEvXqufOedmIE6MSPvZzAj3z2Lqq7w7NtHk5GSImkZqs9onh8i
|
||||
fhQChAYNg1DMXTVu6c0HX23EGMu0ySe+knOE+KJQeZqmxXR28xaTai4u8HjZ7jWA
|
||||
7qX3NrlPGU8l1uZKMuT9kLkLhaRDHba4CLiZvt88uRYkSR9DotJud7lmuTdiUuN9
|
||||
L9/wVeS30N6g4bfOxv4+wkXR3oEZj+DtmdopeRKAb9kqJa/+yc9/uT7g6ePQnR4H
|
||||
vLXrRz8CyZat8oyhEGQIsEHQaMxS6YQ=
|
||||
-----END PRIVATE KEY-----
|
||||
52
utils/docker/certs/server_redis.key
Normal file
52
utils/docker/certs/server_redis.key
Normal file
@@ -0,0 +1,52 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC/DfDxcFkyH2tj
|
||||
S5hbEnDPF7cLFd03UsgiqPTgLwGdyYnNYURIGMxsgl243FkycmYRz1OsEY1+zf71
|
||||
vi0XNulfdl83+1t+hZcEt1HGzXrO92smznHdDBL71sqDWO00n1XSmWHo7J9qIWju
|
||||
vKoBwE5lS25ghSzuZWP/P5WBvOa/2wUyfOdB33DV8CiHCBjf1C2tp+sqw5kFKT0v
|
||||
7lSZTLnCMRH0VlzRc/1CHHrVn7VyvpN618QDJ4D2AjSxs+uGhlu7ppWYXdB34bVJ
|
||||
T0ffT0KkPnOQ6pKJ8uLXxMpSfgATHERZfigXZ5ybyTMx/Rwclil828icwowizmGs
|
||||
8AXrI7vk/g4t4JJ74yGIc0A+HKsQ2Gu193pmwQNHpOctN7NMnF+Lvcqf7j3vIWCL
|
||||
VSJDXUom28VAr/egfr3K5xEfipaiePlAUV6PF90kOu3p/kHlNS9dkqx4tM1z6sur
|
||||
q4znr3+oq4Ldr+1UNRnBLc/yT01IwdbD2NKPTSJIyO9uybx0yU04zJC6ZhlZb+Z0
|
||||
kOtF1+T2jzbSjFKIlxoDBUlXonGVW4TXh5S1CUFHFxyJfsPjocayOYikCb6chTj+
|
||||
9aiTaWJI8txLUO7ejvIINZRg7C1hgBobETePcbvZgRFQCbcQUh/6yRTVY+3aOPhf
|
||||
/DcKmb/eM9ijsQxsZaAYrOnu8xXkYQIDAQABAoICAQCt7LYURW+dtoafTTk0ZzQ1
|
||||
AuTKfav16jFxhBfwYjp5dvgw9MQhUhn/Cirh2A6HYydSPUhxk0UZU9QvyGHqCT4o
|
||||
fm0uXG+tXVXeoDgc4ABVm117ZWK4lX5OrvmK2xCN4CNT5fgBADAbgLCy7SNjFrsH
|
||||
ccTYr0P4/moq+qpTAjGaJHu1u4kXKZ7h05BBZPioAtNWjFAEjS2nSiR+ltuC9xsA
|
||||
EoXCxkAXGR1L6vtTr+GRxLYjlXrGWFcJHXb08tKNk5C84mi63WSjTeEoDLlkLBaB
|
||||
yKySF0kxteAWBvscX8IXo5sBUVyT+enS1DX74uyNhZHdLYOSWXUTVogK1DK/Hbe0
|
||||
qCLg1pRsPycwzhmI39pvnoZOIedwOk2oGMu7yy9wlsXQnQfY92hoHt+/HOnIrRxX
|
||||
x86XrSdY9SX+r20PN8iVnybODAO7LA6LEBNAcpaudnU8NinqvCfhK/sk76YMIkWo
|
||||
OCuPx2iQ7sEwDUXfvW3MUs2orbxleCZE25j/vFjMiEhe3MnCnf2RF4TzKD4/WvlW
|
||||
UYfXzbP9BjyjsyOkWagigXNSMKjcO/VmejCbBGiLjFl6YOtLWFU+x/6J2eU1/b26
|
||||
F18zBbDMAc7gtUHYP1JWMMD7Lg5XFfsOlmo3W4RFHKtd0nUyrkMwBCAT+i44gFBL
|
||||
sNVkYW9GdHsg2ll+BjeBhQKCAQEA+5yyGk7yFxenvmLmveLL6qXdVsEKAFAo8c9A
|
||||
kmbrdkWZsmSInVZ2tpo93fS1IS4N0em9tZHkThTJuuoQrbFWQ+jzkzVdmErUFl2/
|
||||
3x9VxS51zxZ/ZrJbNpkIxQe/2m5Xr02J2pyJQsc4N/28Jmr8sRzs1ZUR+ep7v5Zq
|
||||
EXIHx5BLskVj9x/pQC+I9tlxyojJ5MtEFWTHM3oLGeDHIGovO+EW3r2v5lqsug0d
|
||||
s/81tKoLa2mrq6p7j+MpuFc0X5b98QG+ucZWtA3CNYyCXbNdC4qtl8zeqRIh3qXC
|
||||
9ajWHUgiB9TJSIjcM/Auymd0KTlkElmXFZ+ZUnvKr/OGpsEj6wKCAQEAwmLj5U60
|
||||
NukQuqfoHSWvRCzIRjakuavytiY5NXEguEuTqHT+UOt0UWyref9dn5/dEW8BXACs
|
||||
1K4wT4Pa1PvuNRrQ4dYkWPwOx/FHSaGyCe/b5MVsVMrw3iCRUk23zK8hL2iMXKHc
|
||||
BeSJ0QJ+Kgb/k8iFdTpX6zqYImuNwdgjT1+0ndIl0HM/jG2U2yBumNH8BS0usYJW
|
||||
g7We4lYEXqtyxDJiTFcuchmFdtknLRIUOwWZAhgI8eOt+UpGH3hTGeUS0T2g/531
|
||||
ABX22XLkHeo7KOTD+pembbHulgvFNpLA9EI0EPMQQAOBgTKaY7EeiDUsQkP/L5d6
|
||||
BJoJhkza0uVh4wKCAQEAksQwT5BFPpsZycA+//xPHixqE7S+dLhNad+Ottc5+d9X
|
||||
a+uglMZesNz/wXyAOz516UAC3Oqg1brigRkPaYHL5Aj6K6AxXCgp1nIQ/cF1cnNL
|
||||
XOSuo+Tdt9dEekmu62b51tPP2aj9l+pFLMPhADff9h/9NgiiV7kjPforHMn7J7lP
|
||||
rkkzqm7+y+Xuaq8j5RQtUDwRFrmWSLyjxRCMlqfZrX/6qyrSc/foUQ5diSUQ2rVl
|
||||
u94DuTrUoHXNXC3h6vBUaESwUAUVhimQY1P2p1l8qMLXx3hFWTGueAUQ1+MIIkR1
|
||||
NUQ1tQ3ABLvRT1dRNsq3SMzhiEd0U9zJuiC/Jn36yQKCAQAYtpHauWLYCFGEKYyt
|
||||
B/l8ZWUg6BmRMXcuCTYEwVkzlQg0xor+prCnGXXDkN/KR3zHlqFJnRxb/blOoqjT
|
||||
oyPpxHsB+0OrvH/0k4xIpDIKaWA/eYoITbTJyMIxAIh5kVpauKP/suRSK3gKBpMb
|
||||
rMAZfcjZ2o0K7uwglCP1nREAKl7AIdOE6OIPbG8cXMcyzp+H2PKyxqtRG2oTxHPR
|
||||
xWJV50HwCrVw9CWvsnP0mvPPfSqyxXN9rUCVDQhVP+rww9rcl0U8ukxHsoMrqhuu
|
||||
YfUbgdoYpecW0yROFzj/czDs3O3Zqc1LFicE0fYm7oG/N2NlGVf8KPnuU9caJ/M6
|
||||
FMeZAoIBAD/ItfYpsh+UrxC0g7QlE7XoTIuC0k1nlfWFsx9fGTCLM7D13bpf5nzR
|
||||
7JExiplU/HV3HHrEvXqufOedmIE6MSPvZzAj3z2Lqq7w7NtHk5GSImkZqs9onh8i
|
||||
fhQChAYNg1DMXTVu6c0HX23EGMu0ySe+knOE+KJQeZqmxXR28xaTai4u8HjZ7jWA
|
||||
7qX3NrlPGU8l1uZKMuT9kLkLhaRDHba4CLiZvt88uRYkSR9DotJud7lmuTdiUuN9
|
||||
L9/wVeS30N6g4bfOxv4+wkXR3oEZj+DtmdopeRKAb9kqJa/+yc9/uT7g6ePQnR4H
|
||||
vLXrRz8CyZat8oyhEGQIsEHQaMxS6YQ=
|
||||
-----END PRIVATE KEY-----
|
||||
131
utils/docker/docker-compose-template.yml.j2
Normal file
131
utils/docker/docker-compose-template.yml.j2
Normal file
@@ -0,0 +1,131 @@
|
||||
services:
|
||||
broker:
|
||||
image: docker.io/bitnami/kafka:latest
|
||||
ports:
|
||||
- "9092:9092"
|
||||
- "9094:9094"
|
||||
environment:
|
||||
- KAFKA_ENABLE_KRAFT=yes
|
||||
- KAFKA_CFG_PROCESS_ROLES=broker,controller
|
||||
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
|
||||
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094
|
||||
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
|
||||
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://docker-broker-1:9092,EXTERNAL://kafka_b:9094
|
||||
- KAFKA_BROKER_ID=1
|
||||
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@docker-broker-1:9093
|
||||
- ALLOW_PLAINTEXT_LISTENER=yes
|
||||
- KAFKA_CFG_NODE_ID=1
|
||||
- KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
|
||||
- BITNAMI_DEBUG=yes
|
||||
- KAFKA_CFG_NUM_PARTITIONS=2
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "kafka-topics.sh --bootstrap-server localhost:9092 --list"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
networks:
|
||||
- cgw_multi_instances_network
|
||||
|
||||
postgresql:
|
||||
image: "postgres:latest"
|
||||
ports:
|
||||
- "5432:5432"
|
||||
user: postgres
|
||||
command:
|
||||
- "postgres"
|
||||
- "-c"
|
||||
- "max_connections=400"
|
||||
- "-c"
|
||||
- "shared_buffers=20MB"
|
||||
env_file:
|
||||
- postgresql.env
|
||||
restart: always
|
||||
volumes:
|
||||
- ./postgresql/init-db.sh:/docker-entrypoint-initdb.d/init-db.sh
|
||||
networks:
|
||||
- cgw_multi_instances_network
|
||||
|
||||
redis:
|
||||
image: 'bitnami/redis:latest'
|
||||
ports:
|
||||
- "6379:6379"
|
||||
environment:
|
||||
- ALLOW_EMPTY_PASSWORD=yes
|
||||
networks:
|
||||
- cgw_multi_instances_network
|
||||
init-broker-container:
|
||||
image: docker.io/bitnami/kafka:latest
|
||||
depends_on:
|
||||
- broker
|
||||
entrypoint: [ '/bin/sh', '-c' ]
|
||||
command: |
|
||||
"
|
||||
# rather than giving sleep 15 use this
|
||||
# to block init container to wait for Kafka broker to be ready
|
||||
kafka-topics --bootstrap-server broker:9092 --list
|
||||
|
||||
# create CnC and CnC_Res topics
|
||||
kafka-topics.sh --create --partitions {{ cgw_instances_num }} --bootstrap-server broker:9092 --topic CnC
|
||||
kafka-topics.sh --create --bootstrap-server broker:9092 --partitions 2 --topic CnC_Res
|
||||
"
|
||||
networks:
|
||||
- cgw_multi_instances_network
|
||||
|
||||
{% for i in range(0, cgw_instances_num) %}
|
||||
cgw_instance_{{ i }}:
|
||||
image: {{ cgw_image_name }}:{{ cgw_image_tag }}
|
||||
container_name: {{ cgw_container_name }}_{{ i }}
|
||||
ports:
|
||||
- "{{ cgw_wss_base_port + i }}:{{ cgw_wss_base_port + i }}"
|
||||
- "{{ cgw_grpc_public_base_port + i }}:{{ cgw_grpc_public_base_port + i }}"
|
||||
- "{{ cgw_metrics_base_port + i }}:{{ cgw_metrics_base_port + i }}"
|
||||
environment:
|
||||
- CGW_DB_HOST={{ cgw_db_host }}
|
||||
- CGW_DB_PORT={{ cgw_db_port }}
|
||||
- CGW_DB_NAME={{ cgw_db_name }}
|
||||
- CGW_DB_USERNAME={{ cgw_db_username }}
|
||||
- CGW_DB_PASS={{ cgw_db_password }}
|
||||
- CGW_DB_TLS={{ cgw_db_tls }}
|
||||
- CGW_GRPC_LISTENING_IP={{ cgw_grpc_listening_ip }}
|
||||
- CGW_GRPC_LISTENING_PORT={{ cgw_grpc_listening_base_port + i }}
|
||||
- CGW_GRPC_PUBLIC_HOST={{ cgw_grpc_public_host }}_{{ i }}
|
||||
- CGW_GRPC_PUBLIC_PORT={{ cgw_grpc_public_base_port + i }}
|
||||
- CGW_ID={{ cgw_base_id + i }}
|
||||
- CGW_KAFKA_HOST={{ cgw_kafka_host }}
|
||||
- CGW_KAFKA_PORT={{ cgw_kafka_port }}
|
||||
- CGW_KAFKA_CONSUME_TOPIC={{ cgw_kafka_consumer_topic }}
|
||||
- CGW_KAFKA_PRODUCE_TOPIC={{ cgw_kafka_producer_topic }}
|
||||
- CGW_LOG_LEVEL={{ cgw_log_level }}
|
||||
- CGW_REDIS_HOST={{ cgw_redis_host }}
|
||||
- CGW_REDIS_PORT={{ cgw_redis_port }}
|
||||
- CGW_REDIS_TLS={{ cgw_redis_tls }}
|
||||
- CGW_REDIS_USERNAME={{ cgw_redis_username }}
|
||||
- CGW_REDIS_PASSWORD={{ cgw_redis_password }}
|
||||
- CGW_METRICS_PORT={{ cgw_metrics_base_port + i }}
|
||||
- CGW_WSS_IP={{ cgw_wss_ip }}
|
||||
- CGW_WSS_PORT={{ cgw_wss_base_port + i }}
|
||||
- CGW_WSS_CAS={{ cgw_wss_cas }}
|
||||
- CGW_WSS_CERT={{ cgw_wss_cert }}
|
||||
- CGW_WSS_KEY={{ cgw_wss_key }}
|
||||
- DEFAULT_WSS_THREAD_NUM={{ cgw_wss_t_num }}
|
||||
- CGW_ALLOW_CERT_MISMATCH={{ cgw_allow_certs_missmatch }}
|
||||
- CGW_NB_INFRA_TLS={{ cgw_nb_infra_tls }}
|
||||
- CGW_UCENTRAL_AP_DATAMODEL_URI={{ cgw_ucentral_ap_datamodel_uri }}
|
||||
- CGW_UCENTRAL_SWITCH_DATAMODEL_URI={{ cgw_ucentral_switch_datamodel_uri }}
|
||||
- CGW_GROUPS_CAPACITY={{ cgw_groups_capacity }}
|
||||
- CGW_GROUPS_THRESHOLD={{ cgw_groups_threshold }}
|
||||
- CGW_GROUP_INFRAS_CAPACITY={{ cgw_group_infras_capacity }}
|
||||
- CGW_FEATURE_TOPOMAP_ENABLE='1'
|
||||
depends_on:
|
||||
broker:
|
||||
condition: service_healthy
|
||||
volumes:
|
||||
- {{ default_certs_path }}:{{ container_certs_voulume }}
|
||||
- {{ default_certs_path }}:{{ container_nb_infra_certs_voulume }}
|
||||
networks:
|
||||
- cgw_multi_instances_network
|
||||
{% endfor %}
|
||||
|
||||
networks:
|
||||
cgw_multi_instances_network:
|
||||
@@ -12,9 +12,9 @@ services:
|
||||
- KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER
|
||||
- KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094
|
||||
- KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT,EXTERNAL:PLAINTEXT
|
||||
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://127.0.0.1:9092,EXTERNAL://kafka_b:9094
|
||||
- KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://docker-broker-1:9092,EXTERNAL://kafka_b:9094
|
||||
- KAFKA_BROKER_ID=1
|
||||
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@127.0.0.1:9093
|
||||
- KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=1@docker-broker-1:9093
|
||||
- ALLOW_PLAINTEXT_LISTENER=yes
|
||||
- KAFKA_CFG_NODE_ID=1
|
||||
- KAFKA_AUTO_CREATE_TOPICS_ENABLE=true
|
||||
@@ -26,23 +26,27 @@ services:
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 5s
|
||||
|
||||
networks:
|
||||
- cgw_network
|
||||
|
||||
postgresql:
|
||||
image: "postgres:latest"
|
||||
ports:
|
||||
- "5432:5432"
|
||||
user: postgres
|
||||
command:
|
||||
- "postgres"
|
||||
- "-c"
|
||||
- "max_connections=400"
|
||||
- "-c"
|
||||
- "shared_buffers=20MB"
|
||||
- "shared_buffers=20MB"
|
||||
env_file:
|
||||
- postgresql.env
|
||||
restart: always
|
||||
volumes:
|
||||
- ./postgresql/init-db.sh:/docker-entrypoint-initdb.d/init-db.sh
|
||||
networks:
|
||||
- cgw_network
|
||||
|
||||
redis:
|
||||
image: 'bitnami/redis:latest'
|
||||
@@ -50,3 +54,25 @@ services:
|
||||
- "6379:6379"
|
||||
environment:
|
||||
- ALLOW_EMPTY_PASSWORD=yes
|
||||
networks:
|
||||
- cgw_network
|
||||
init-broker-container:
|
||||
image: docker.io/bitnami/kafka:latest
|
||||
depends_on:
|
||||
- broker
|
||||
entrypoint: [ '/bin/sh', '-c' ]
|
||||
command: |
|
||||
"
|
||||
# rather than giving sleep 15 use this
|
||||
# to block init container to wait for Kafka broker to be ready
|
||||
kafka-topics --bootstrap-server broker:9092 --list
|
||||
|
||||
# create CnC and CnC_Res topics
|
||||
kafka-topics.sh --create --partitions 2 --bootstrap-server broker:9092 --topic CnC
|
||||
kafka-topics.sh --create --bootstrap-server broker:9092 --partitions 2 --topic CnC_Res
|
||||
"
|
||||
networks:
|
||||
- cgw_network
|
||||
|
||||
networks:
|
||||
cgw_network:
|
||||
|
||||
@@ -4,6 +4,10 @@ set -e
|
||||
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL
|
||||
CREATE USER $CGW_DB_USER WITH ENCRYPTED PASSWORD '$CGW_DB_PASSWORD';
|
||||
CREATE DATABASE $CGW_DB OWNER $CGW_DB_USER;
|
||||
\c $CGW_DB;
|
||||
CREATE TABLE infrastructure_groups ( id INT PRIMARY KEY, reserved_size INT, actual_size INT);
|
||||
CREATE TABLE infras ( mac MACADDR PRIMARY KEY, infra_group_id INT, FOREIGN KEY(infra_group_id) REFERENCES infrastructure_groups(id) ON DELETE CASCADE);
|
||||
ALTER DATABASE $CGW_DB OWNER TO $CGW_DB_USER;
|
||||
ALTER TABLE infrastructure_groups OWNER TO $CGW_DB_USER;
|
||||
ALTER TABLE infras OWNER TO $CGW_DB_USER;
|
||||
EOSQL
|
||||
|
||||
125
utils/kafka_producer/fill_groups.sh
Executable file
125
utils/kafka_producer/fill_groups.sh
Executable file
@@ -0,0 +1,125 @@
|
||||
#!/bin/bash
|
||||
################################################################################
|
||||
|
||||
kafka_host="NULL"
|
||||
starting_mac_offset="NULL"
|
||||
mac_max_num=0
|
||||
total_macs_generated=0
|
||||
num_of_groups_filled=0
|
||||
out_fname="/tmp/gid_gen.sh"
|
||||
gid_offset=0
|
||||
|
||||
################################################################################
|
||||
|
||||
function incr_mac() {
|
||||
in_mac=$1
|
||||
in_incr_num=$2
|
||||
in_incr_num=$(($in_incr_num + 1))
|
||||
mac=$(echo $in_mac | tr -d ':')
|
||||
macadd=$(( 0x$mac + $in_incr_num ))
|
||||
macnew=$(printf "%012X" $macadd | sed 's/../&:/g;s/:$//')
|
||||
echo $macnew
|
||||
}
|
||||
|
||||
function usage() {
|
||||
usage="$(basename "$0") [-o <starting_mac_offset>] [-s ip:port] [-c n] [-g n] -- randomly generate and fill KAFKA BUS with groups and infras list create / add messages
|
||||
|
||||
where:
|
||||
-o Starting mac offset (e.g. 00:00:00:00:00:01)
|
||||
-s KAFKA server (in a IP:PORT fashion)
|
||||
-c infras (mac addresses) number to create
|
||||
-g group ID offset to start from"
|
||||
echo "$usage"
|
||||
}
|
||||
|
||||
function check_args() {
|
||||
if [ $starting_mac_offset = "NULL" ] ; then
|
||||
echo "ERR: Starting mac offset ('-o' option) is NULL (unset)"
|
||||
echo
|
||||
usage
|
||||
exit 4;
|
||||
fi
|
||||
|
||||
if [ $mac_max_num -eq 0 ] ; then
|
||||
echo "ERR: Num of macs to create has to be greater than zero ('-c' option is NULL - unset)"
|
||||
echo
|
||||
usage
|
||||
exit 5;
|
||||
fi
|
||||
|
||||
if [ $kafka_host = "NULL" ] ; then
|
||||
echo "ERR: Kafka host has to be set ('-s' option is NULL - unset)"
|
||||
echo
|
||||
usage
|
||||
exit 6;
|
||||
fi
|
||||
|
||||
if ! [ -e ./main.py ] ; then
|
||||
echo "Failed to find main.py file of kafka_producer!"
|
||||
exit 2;
|
||||
fi
|
||||
|
||||
if ! [ `realpath main.py | grep "kafka_producer"` ] ; then
|
||||
echo -e \
|
||||
"Found main.py but it seems invalid:\n"\
|
||||
"expected it to be inside 'kafka_producer' dir.\n" \
|
||||
"$0 should be executed from that folder as well.";
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
################################################################################
|
||||
|
||||
while getopts ':o:hs:c:g:' option; do
|
||||
case "$option" in
|
||||
h) usage
|
||||
exit
|
||||
;;
|
||||
o) starting_mac_offset=$OPTARG
|
||||
echo -e "OPT_IN: starting mac offset will be used: '$starting_mac_offset'"
|
||||
;;
|
||||
s) kafka_host=$OPTARG
|
||||
echo -e "OPT_IN: Kafka host:port will be used: '$kafka_host'"
|
||||
;;
|
||||
c) mac_max_num=$OPTARG
|
||||
echo -e "OPT_IN: Will create the following random macs num: '$mac_max_num'"
|
||||
;;
|
||||
g) gid_offset=$OPTARG
|
||||
echo -e "OPT_IN: Will start creating groups starting from the following GID offset: '$gid_offset'"
|
||||
;;
|
||||
:) printf "missing argument for -%s\n" "$OPTARG" >&2
|
||||
echo "$usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
\?) printf "illegal option: -%s\n" "$OPTARG" >&2
|
||||
echo "$usage" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
check_args
|
||||
################################################################################
|
||||
|
||||
echo "#!/bin/bash" > $out_fname
|
||||
echo >> $out_fname
|
||||
|
||||
mac_offset=$starting_mac_offset
|
||||
num_of_groups_filled=$gid_offset
|
||||
while true ; do
|
||||
echo "Processing mac offset $mac_offset..."
|
||||
random_mac_num=`seq 40 200 | sort -R | head -1` && echo "Generated mac num for gid $num_of_groups_filled - $random_mac_num"
|
||||
echo "python3 ./main.py -s $kafka_host -c 1 --new-group $num_of_groups_filled 0 generated_group_$num_of_groups_filled""_infra_num_$random_mac_num" >> $out_fname
|
||||
echo "python3 ./main.py -s $kafka_host -d $num_of_groups_filled '$mac_offset^$random_mac_num'" >> $out_fname
|
||||
total_macs_generated=$((total_macs_generated + random_mac_num + 1))
|
||||
num_of_groups_filled=$((num_of_groups_filled + 1))
|
||||
if [ $total_macs_generated -ge $mac_max_num ]; then
|
||||
break;
|
||||
fi
|
||||
mac_offset=`incr_mac $mac_offset $random_mac_num`
|
||||
done
|
||||
echo "Created '$total_macs_generated' infra entries dispersed among '$((num_of_groups_filled - gid_offset))' number of groups"
|
||||
|
||||
chmod +x $out_fname
|
||||
|
||||
echo
|
||||
echo "Output file generated, and can be launched: $out_fname"
|
||||
48
utils/kafka_producer/kafka_data/cfg_ap_basic.json
Normal file
48
utils/kafka_producer/kafka_data/cfg_ap_basic.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "configure",
|
||||
"params": {
|
||||
"serial": "MAC_PLACEHOLDER",
|
||||
"uuid": UUID_PLACEHOLDER,
|
||||
"when": 0,
|
||||
"config": {
|
||||
"ethernet": [
|
||||
{
|
||||
"enabled": true,
|
||||
"select-ports": [
|
||||
"LAN1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"enabled": true,
|
||||
"select-ports": [
|
||||
"LAN2"
|
||||
]
|
||||
}
|
||||
],
|
||||
"interfaces": [
|
||||
{
|
||||
"ethernet": [
|
||||
{
|
||||
"select-ports": [
|
||||
"WAN*",
|
||||
"LAN1",
|
||||
"LAN2"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ipv4": {
|
||||
"addressing": "dynamic"
|
||||
},
|
||||
"name": "WAN",
|
||||
"role": "upstream",
|
||||
"services": [
|
||||
"lldp",
|
||||
"mdns"
|
||||
]
|
||||
}
|
||||
],
|
||||
"uuid": UUID_PLACEHOLDER
|
||||
}
|
||||
}
|
||||
}
|
||||
48
utils/kafka_producer/kafka_data/cfg_ap_basic_invalid.json
Normal file
48
utils/kafka_producer/kafka_data/cfg_ap_basic_invalid.json
Normal file
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "configure",
|
||||
"params": {
|
||||
"serial": "MAC_PLACEHOLDER",
|
||||
"uuid": UUID_PLACEHOLDER,
|
||||
"when": WHEN_PLACEHOLDER,
|
||||
"config": {
|
||||
"ethernet": [
|
||||
{
|
||||
"enabled": "YES, TOTALLY ENABLE IT",
|
||||
"select-ports": [
|
||||
"LAN1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"enabled": true,
|
||||
"select-ports": [
|
||||
"LAN2"
|
||||
]
|
||||
}
|
||||
],
|
||||
"interfaces": [
|
||||
{
|
||||
"ethernet": [
|
||||
{
|
||||
"select-ports": [
|
||||
"WAN*",
|
||||
"LAN1",
|
||||
"LAN2"
|
||||
]
|
||||
}
|
||||
],
|
||||
"ipv4": {
|
||||
"addressing": "dynamic"
|
||||
},
|
||||
"name": "WAN",
|
||||
"role": "upstream",
|
||||
"services": [
|
||||
"lldp",
|
||||
"mdns"
|
||||
]
|
||||
}
|
||||
],
|
||||
"uuid": UUID_PLACEHOLDER
|
||||
}
|
||||
}
|
||||
}
|
||||
56
utils/kafka_producer/kafka_data/cfg_switch_basic.json
Normal file
56
utils/kafka_producer/kafka_data/cfg_switch_basic.json
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "configure",
|
||||
"params": {
|
||||
"serial": "MAC_PLACEHOLDER",
|
||||
"uuid": UUID_PLACEHOLDER,
|
||||
"when": 0,
|
||||
"config": {
|
||||
"services": {},
|
||||
"ethernet": [
|
||||
{
|
||||
"select-ports": [
|
||||
"Ethernet*"
|
||||
],
|
||||
"speed": 1000,
|
||||
"duplex": "full",
|
||||
"enabled": true,
|
||||
"poe": {
|
||||
"admin-mode": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"interfaces": [
|
||||
{
|
||||
"vlan": {
|
||||
"id": 1,
|
||||
"proto": "802.1q"
|
||||
},
|
||||
"ethernet": [
|
||||
{
|
||||
"select-ports": [
|
||||
"Ethernet*"
|
||||
],
|
||||
"vlan-tag": "un-tagged"
|
||||
}
|
||||
],
|
||||
"role": "upstream",
|
||||
"name": "mgmt-vlan"
|
||||
}
|
||||
],
|
||||
"switch": {
|
||||
"loop-detection": {
|
||||
"instances": [
|
||||
{
|
||||
"enabled": true,
|
||||
"id": 1,
|
||||
"priority": 32768
|
||||
}
|
||||
],
|
||||
"protocol": "rpvstp"
|
||||
}
|
||||
},
|
||||
"uuid": UUID_PLACEHOLDER
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"method": "configure",
|
||||
"params": {
|
||||
"serial": "MAC_PLACEHOLDER",
|
||||
"uuid": UUID_PLACEHOLDER,
|
||||
"when": 0,
|
||||
"config": {
|
||||
"services": {},
|
||||
"ethernet": [
|
||||
{
|
||||
"select-ports": [
|
||||
"Ethernet*"
|
||||
],
|
||||
"speed": 1000,
|
||||
"duplex": "full",
|
||||
"enabled": "YES, TOTALLY ENABLE IT",
|
||||
"poe": {
|
||||
"admin-mode": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"interfaces": [
|
||||
{
|
||||
"vlan": {
|
||||
"id": 1,
|
||||
"proto": "802.1q"
|
||||
},
|
||||
"ethernet": [
|
||||
{
|
||||
"select-ports": [
|
||||
"Ethernet*"
|
||||
],
|
||||
"vlan-tag": "un-tagged"
|
||||
}
|
||||
],
|
||||
"role": "upstream",
|
||||
"name": "mgmt-vlan"
|
||||
}
|
||||
],
|
||||
"switch": {
|
||||
"loop-detection": {
|
||||
"instances": [
|
||||
{
|
||||
"enabled": true,
|
||||
"id": 1,
|
||||
"priority": 32768
|
||||
}
|
||||
],
|
||||
"protocol": "rpvstp"
|
||||
}
|
||||
},
|
||||
"uuid": UUID_PLACEHOLDER
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,32 @@
|
||||
{
|
||||
"add_group": {
|
||||
"type": "infrastructure_group_create",
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"add_group_to_shard": {
|
||||
"type": "infrastructure_group_create_to_shard",
|
||||
"shard_id": 0,
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"del_group": {
|
||||
"type": "infrastructure_group_delete",
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"add_to_group": {
|
||||
"type": "infrastructure_group_infras_add",
|
||||
"infra_group_infras": [],
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"del_from_group": {
|
||||
"type": "infrastructure_group_infras_del",
|
||||
"infra_group_infras": [],
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"message_infra": {
|
||||
"type": "infrastructure_group_infra_message_enqueue",
|
||||
"infra_group_infra": "mac",
|
||||
"msg": {},
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff",
|
||||
"timeout": 60
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,12 @@
|
||||
"add_group": {
|
||||
"type": "infrastructure_group_create",
|
||||
"infra_group_id": "key",
|
||||
"infra_name": "name",
|
||||
"infra_shard_id": 0,
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"add_group_to_shard": {
|
||||
"type": "infrastructure_group_create_to_shard",
|
||||
"infra_group_id": "key",
|
||||
"shard_id": 0,
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"del_group": {
|
||||
@@ -12,22 +16,23 @@
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"add_to_group": {
|
||||
"type": "infrastructure_group_device_add",
|
||||
"type": "infrastructure_group_infras_add",
|
||||
"infra_group_id": "key",
|
||||
"infra_group_infra_devices": [],
|
||||
"infra_group_infras": [],
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"del_from_group": {
|
||||
"type": "infrastructure_group_device_del",
|
||||
"type": "infrastructure_group_infras_del",
|
||||
"infra_group_id": "key",
|
||||
"infra_group_infra_devices": [],
|
||||
"infra_group_infras": [],
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
},
|
||||
"message_device": {
|
||||
"type": "infrastructure_group_device_message",
|
||||
"message_infra": {
|
||||
"type": "infrastructure_group_infra_message_enqueue",
|
||||
"infra_group_id": "key",
|
||||
"mac": "mac",
|
||||
"infra_group_infra": "mac",
|
||||
"msg": {},
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff"
|
||||
"uuid": "290d06b6-8eba-11ee-8005-aabbccddeeff",
|
||||
"timeout": 60
|
||||
}
|
||||
}
|
||||
}
|
||||
3
utils/kafka_producer/run.sh
Executable file
3
utils/kafka_producer/run.sh
Executable file
@@ -0,0 +1,3 @@
|
||||
#!/bin/bash
|
||||
|
||||
PYTHONPATH="$PYTHONPATH:$PWD:$PWD/src/" python3 main.py $@
|
||||
12
utils/kafka_producer/single_group_add.sh
Executable file
12
utils/kafka_producer/single_group_add.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Although we're looking for broker at localhost,
|
||||
# broker can still direct us to some <docker-broker-1>
|
||||
# so it's up to the caller to either run this script inside
|
||||
# the same network instance as broker, or create a static
|
||||
# hostname entry to point <docker-broker-1>, for example,
|
||||
# to whenever it resides.
|
||||
#
|
||||
# ARGS:
|
||||
# $1 - group id
|
||||
./run.sh -s localhost:9092 -c 1 --new-group $1
|
||||
12
utils/kafka_producer/single_group_del.sh
Executable file
12
utils/kafka_producer/single_group_del.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Although we're looking for broker at localhost,
|
||||
# broker can still direct us to some <docker-broker-1>
|
||||
# so it's up to the caller to either run this script inside
|
||||
# the same network instance as broker, or create a static
|
||||
# hostname entry to point <docker-broker-1>, for example,
|
||||
# to whenever it resides.
|
||||
#
|
||||
# ARGS:
|
||||
# $1 - group id
|
||||
./run.sh -s localhost:9092 --rm-group $1
|
||||
13
utils/kafka_producer/single_infra_add.sh
Executable file
13
utils/kafka_producer/single_infra_add.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Although we're looking for broker at localhost,
|
||||
# broker can still direct us to some <docker-broker-1>
|
||||
# so it's up to the caller to either run this script inside
|
||||
# the same network instance as broker, or create a static
|
||||
# hostname entry to point <docker-broker-1>, for example,
|
||||
# to whenever it resides.
|
||||
#
|
||||
# ARGS:
|
||||
# $1 - group id
|
||||
# $2 - mac address
|
||||
./run.sh -s localhost:9092 --assign-to-group $1 "$2^1"
|
||||
13
utils/kafka_producer/single_infra_del.sh
Executable file
13
utils/kafka_producer/single_infra_del.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Although we're looking for broker at localhost,
|
||||
# broker can still direct us to some <docker-broker-1>
|
||||
# so it's up to the caller to either run this script inside
|
||||
# the same network instance as broker, or create a static
|
||||
# hostname entry to point <docker-broker-1>, for example,
|
||||
# to whenever it resides.
|
||||
#
|
||||
# ARGS:
|
||||
# $1 - group id
|
||||
# $2 - mac address
|
||||
./run.sh -s localhost:9092 --remove-from-group $1 "$2^4"
|
||||
14
utils/kafka_producer/single_infra_msg.sh
Executable file
14
utils/kafka_producer/single_infra_msg.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Although we're looking for broker at localhost,
|
||||
# broker can still direct us to some <docker-broker-1>
|
||||
# so it's up to the caller to either run this script inside
|
||||
# the same network instance as broker, or create a static
|
||||
# hostname entry to point <docker-broker-1>, for example,
|
||||
# to whenever it resides.
|
||||
#
|
||||
# ARGS:
|
||||
# $1 - group id
|
||||
# $2 - mac address
|
||||
# $3 - file name containing complete uCentral request
|
||||
./run.sh --send-to-group $1 --send-to-mac $2^1 -c 1 -m "`cat $3`" 2>/dev/null
|
||||
49
utils/kafka_producer/src/admin.py
Normal file
49
utils/kafka_producer/src/admin.py
Normal file
@@ -0,0 +1,49 @@
|
||||
from kafka import KafkaAdminClient
|
||||
from .utils import Message
|
||||
|
||||
__all__ = ['Message']
|
||||
|
||||
class Admin:
|
||||
def __init__(self, host: str, port: int):
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.connection = None
|
||||
|
||||
def connect(self):
|
||||
"""Connect to the Kafka."""
|
||||
try:
|
||||
self.connection = KafkaAdminClient(bootstrap_servers=f'{self.host}:{self.port}')
|
||||
print("Connection successful")
|
||||
except:
|
||||
print("Error: Unable to connect to the kafka.")
|
||||
|
||||
def disconnect(self) -> None:
|
||||
"""Close the Kafka connection."""
|
||||
|
||||
if self.is_connected() is False:
|
||||
return
|
||||
self.connection.close()
|
||||
self.connection = None
|
||||
print("admin: disconnected from kafka")
|
||||
|
||||
def is_connected(self) -> bool:
|
||||
"""Check if the Kafka connection established."""
|
||||
return self.connection is not None
|
||||
|
||||
def get_topic_partitions_for_cgw_id(self, topic: str, group: list, cgw_id: int) -> list:
|
||||
"""Returns list of partitions assigned to specific CGW shard ID for specific topic."""
|
||||
partitions_list = []
|
||||
|
||||
description = self.connection.describe_consumer_groups(group)
|
||||
|
||||
for group_info in description:
|
||||
for member in group_info.members:
|
||||
if member.client_id == f'CGW{cgw_id}':
|
||||
partitions = member.member_assignment.partitions()
|
||||
|
||||
for partition in partitions:
|
||||
if partition.topic == topic:
|
||||
part_id = partition.partition
|
||||
partitions_list.append(part_id)
|
||||
|
||||
return partitions_list
|
||||
@@ -30,8 +30,8 @@ def time(input: str) -> float:
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Creates entries in kafka.")
|
||||
|
||||
parser.add_argument("-g", "--new-group", metavar=("GROUP-ID", "SHARD-ID", "NAME"),
|
||||
nargs=3, action="append",
|
||||
parser.add_argument("-g", "--new-group", metavar=("GROUP-ID"),
|
||||
nargs=1, action="append",
|
||||
help="create a new group")
|
||||
parser.add_argument("-G", "--rm-group", metavar=("GROUP-ID"),
|
||||
nargs=1, action="append",
|
||||
@@ -92,11 +92,11 @@ def parse_args():
|
||||
send_to_macs=parsed_args.send_to_mac,
|
||||
)
|
||||
if parsed_args.new_group is not None:
|
||||
for group, shard, name in parsed_args.new_group:
|
||||
for (group,) in parsed_args.new_group:
|
||||
try:
|
||||
args.add_groups.append((group, int(shard), name))
|
||||
args.add_groups.append(group)
|
||||
except ValueError:
|
||||
parser.error(f"--new-group: failed to parse shard id \"{shard}\"")
|
||||
parser.error(f"--new-group: failed to parse {group}")
|
||||
if parsed_args.rm_group is not None:
|
||||
for (group,) in parsed_args.rm_group:
|
||||
args.del_groups.append(group)
|
||||
|
||||
161
utils/kafka_producer/src/consumer.py
Normal file
161
utils/kafka_producer/src/consumer.py
Normal file
@@ -0,0 +1,161 @@
|
||||
from .utils import Message, MacRange
|
||||
from .log import logger
|
||||
|
||||
from typing import List, Tuple
|
||||
from kafka.structs import OffsetAndMetadata
|
||||
import kafka
|
||||
import time
|
||||
import uuid
|
||||
import sys
|
||||
import re
|
||||
import json
|
||||
|
||||
|
||||
class Consumer:
|
||||
def __init__(self, db: str, topic: str, consumer_timeout: int) -> None:
|
||||
self.db = db
|
||||
self.conn = None
|
||||
self.topic = topic
|
||||
self.consumer_timeout = consumer_timeout
|
||||
self.message = Message()
|
||||
|
||||
def __enter__(self) -> kafka.KafkaConsumer:
|
||||
return self.connect()
|
||||
|
||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
||||
self.disconnect()
|
||||
|
||||
|
||||
def connect(self) -> kafka.KafkaConsumer:
|
||||
if self.is_connected() is False:
|
||||
self.conn = kafka.KafkaConsumer(self.topic,
|
||||
bootstrap_servers=self.db,
|
||||
client_id="consumer_1",
|
||||
group_id="cgw_tests_consumer",
|
||||
auto_offset_reset='latest',
|
||||
enable_auto_commit=True,
|
||||
consumer_timeout_ms=self.consumer_timeout,
|
||||
value_deserializer=lambda m: json.loads(m.decode('utf-8')))
|
||||
logger.info("consumer: connected to kafka")
|
||||
else:
|
||||
logger.info("consumer: already connected to kafka")
|
||||
return self.conn
|
||||
|
||||
def disconnect(self) -> None:
|
||||
if self.is_connected() is False:
|
||||
return
|
||||
self.conn.close()
|
||||
logger.info("consumer: disconnected from kafka")
|
||||
self.conn = None
|
||||
|
||||
def is_connected(self) -> bool:
|
||||
return self.conn is not None
|
||||
|
||||
def flush(self, timeout_ms: int = 1000):
|
||||
assert self.is_connected(), \
|
||||
f"consumer: Cannot flush kafka topic while not connected!"
|
||||
|
||||
while True:
|
||||
# We explicitly use get_single_msg instead of <get_msgs>
|
||||
# to make sure we return as soon as we find result,
|
||||
# without waiting for potential T/O
|
||||
message = self.get_single_msg(timeout_ms=timeout_ms)
|
||||
if message is None:
|
||||
break
|
||||
|
||||
logger.debug("Flushed kafka msg: %s key=%s value=%s ts=%s" %
|
||||
(message.topic, message.key, message.value, message.timestamp))
|
||||
|
||||
def get_msgs(self, timeout_ms: int = 12000):
|
||||
res_list = []
|
||||
|
||||
assert self.is_connected(),\
|
||||
f"consumer: Cannot get Kafka result msg, Not connected!"
|
||||
|
||||
while True:
|
||||
# We explicitly use get_single_msg instead of <get_msgs>
|
||||
# to make sure we return as soon as we find result,
|
||||
# without waiting for potential T/O
|
||||
message = self.get_single_msg(timeout_ms=timeout_ms)
|
||||
if message is None:
|
||||
break
|
||||
|
||||
res_list.append(message)
|
||||
logger.debug("consumer: Recv kafka msg: %s key=%s value=%s ts=%s" %
|
||||
(message.topic, message.key, message.value, message.timestamp))
|
||||
|
||||
return res_list
|
||||
|
||||
def get_infra_request_result_msg(self, uuid_val: int, timeout_ms: int = 12000):
|
||||
res_uuid = str(uuid.UUID(int=uuid_val))
|
||||
|
||||
assert self.is_connected(),\
|
||||
f"consumer: Cannot get Kafka result msg, Not connected!"
|
||||
|
||||
while True:
|
||||
# We explicitly use get_single_msg instead of <get_msgs>
|
||||
# to make sure we return as soon as we find result,
|
||||
# without waiting for potential T/O
|
||||
message = self.get_single_msg(timeout_ms=timeout_ms)
|
||||
if message is None:
|
||||
break
|
||||
|
||||
logger.debug("Flushed kafka msg: %s key=%s value=%s ts=%s" %
|
||||
(message.topic, message.key, message.value, message.timestamp))
|
||||
if 'uuid' in message.value.keys():
|
||||
if res_uuid == message.value['uuid'] and message.value['type'] == 'infra_request_result':
|
||||
return message
|
||||
return None
|
||||
|
||||
def get_result_msg(self, uuid_val: int, timeout_ms: int = 12000):
|
||||
res_uuid = str(uuid.UUID(int=uuid_val))
|
||||
|
||||
assert self.is_connected(),\
|
||||
f"consumer: Cannot get Kafka result msg, Not connected!"
|
||||
|
||||
while True:
|
||||
# We explicitly use get_single_msg instead of <get_msgs>
|
||||
# to make sure we return as soon as we find result,
|
||||
# without waiting for potential T/O
|
||||
message = self.get_single_msg(timeout_ms=timeout_ms)
|
||||
if message is None:
|
||||
break
|
||||
|
||||
logger.debug("Flushed kafka msg: %s key=%s value=%s ts=%s" %
|
||||
(message.topic, message.key, message.value, message.timestamp))
|
||||
if 'uuid' in message.value.keys():
|
||||
if res_uuid == message.value['uuid']:
|
||||
return message
|
||||
return None
|
||||
|
||||
def get_single_msg(self, timeout_ms: int = 12000):
|
||||
assert self.is_connected(),\
|
||||
f"consumer: Cannot get Kafka result msg, Not connected!"
|
||||
|
||||
msg = self.conn.poll(timeout_ms=timeout_ms, max_records=1)
|
||||
for partition, msgs in msg.items():
|
||||
for m in msgs:
|
||||
return m
|
||||
|
||||
return None
|
||||
|
||||
def get_msg_by_substring(self, substring: int, uuid_val: int, timeout_ms: int = 12000):
|
||||
res_uuid = uuid.UUID(int=uuid_val)
|
||||
|
||||
assert self.is_connected(),\
|
||||
f"Cannot get Kafka result msg, Not connected!"
|
||||
|
||||
while True:
|
||||
# We explicitly use get_single_msg instead of <get_msgs>
|
||||
# to make sure we return as soon as we find result,
|
||||
# without waiting for potential T/O
|
||||
message = self.get_single_msg(timeout_ms=timeout_ms)
|
||||
if message is None:
|
||||
break
|
||||
|
||||
if re.search(substring, message.value):
|
||||
logger.debug("Found '%s' in kafka msg: %s key=%s value=%s ts=%s" %
|
||||
(substring, message.topic, message.key, message.value, message.timestamp))
|
||||
return message
|
||||
|
||||
return None
|
||||
@@ -1,4 +1,4 @@
|
||||
from .utils import Message, MacRange
|
||||
from .utils import Message, MacRange, UCentralConfigRequest
|
||||
from .log import logger
|
||||
|
||||
from typing import List, Tuple
|
||||
@@ -6,14 +6,123 @@ import kafka
|
||||
import time
|
||||
import uuid
|
||||
import sys
|
||||
import random
|
||||
import json
|
||||
|
||||
|
||||
class Producer:
|
||||
@staticmethod
|
||||
def device_message_reboot(mac: str, id: int = None):
|
||||
msg = {}
|
||||
params = {}
|
||||
|
||||
if mac is None:
|
||||
raise Exception('Cannot format message without MAC specified')
|
||||
|
||||
if id is None:
|
||||
id = 1
|
||||
|
||||
params["serial"] = mac
|
||||
params["when"] = 0
|
||||
|
||||
msg["jsonrpc"] = "2.0"
|
||||
msg["method"] = "reboot"
|
||||
msg["params"] = params
|
||||
msg["id"] = id
|
||||
|
||||
return msg
|
||||
|
||||
@staticmethod
|
||||
def device_message_factory(mac: str, id: int = None, keep_rediretor: bool = None):
|
||||
msg = {}
|
||||
params = {}
|
||||
|
||||
if mac is None:
|
||||
raise Exception('Cannot format message without MAC specified')
|
||||
|
||||
if id is None:
|
||||
id = 1
|
||||
|
||||
if keep_rediretor is None:
|
||||
keep_rediretor = True
|
||||
|
||||
params["serial"] = mac
|
||||
params["when"] = 0
|
||||
params["keep_rediretor"] = keep_rediretor
|
||||
|
||||
msg["jsonrpc"] = "2.0"
|
||||
msg["method"] = "factory"
|
||||
msg["params"] = params
|
||||
msg["id"] = id
|
||||
|
||||
return msg
|
||||
|
||||
@staticmethod
|
||||
def device_message_ping(mac: str, id: int = None):
|
||||
msg = {}
|
||||
params = {}
|
||||
|
||||
if mac is None:
|
||||
raise Exception('Cannot format message without MAC specified')
|
||||
|
||||
if id is None:
|
||||
id = 1
|
||||
|
||||
params["serial"] = mac
|
||||
|
||||
msg["jsonrpc"] = "2.0"
|
||||
msg["method"] = "ping"
|
||||
msg["params"] = params
|
||||
msg["id"] = id
|
||||
|
||||
return msg
|
||||
|
||||
def device_message_config_ap_basic(self, mac: str, id: int = None) -> str:
|
||||
if mac is None:
|
||||
raise Exception('Cannot format message without MAC specified')
|
||||
|
||||
if id is None:
|
||||
id = 1
|
||||
|
||||
msg = self.ucentral_configs.get_ap_basic_cfg(mac, id);
|
||||
return json.loads(msg)
|
||||
|
||||
def device_message_config_ap_basic_invalid(self, mac: str, id: int = None) -> str:
|
||||
if mac is None:
|
||||
raise Exception('Cannot format message without MAC specified')
|
||||
|
||||
if id is None:
|
||||
id = 1
|
||||
|
||||
msg = self.ucentral_configs.get_ap_basic_invalid_cfg(mac, id);
|
||||
return json.loads(msg)
|
||||
|
||||
def device_message_config_switch_basic(self, mac: str, id: int = None) -> str:
|
||||
if mac is None:
|
||||
raise Exception('Cannot format message without MAC specified')
|
||||
|
||||
if id is None:
|
||||
id = 1
|
||||
|
||||
msg = self.ucentral_configs.get_switch_basic_cfg(mac, id);
|
||||
return json.loads(msg)
|
||||
|
||||
def device_message_config_switch_basic_invalid(self, mac: str, id: int = None) -> str:
|
||||
if mac is None:
|
||||
raise Exception('Cannot format message without MAC specified')
|
||||
|
||||
if id is None:
|
||||
id = 1
|
||||
|
||||
msg = self.ucentral_configs.get_switch_basic_invalid_cfg(mac, id);
|
||||
return json.loads(msg)
|
||||
|
||||
def __init__(self, db: str, topic: str) -> None:
|
||||
self.db = db
|
||||
self.conn = None
|
||||
self.topic = topic
|
||||
self.message = Message()
|
||||
self.ucentral_configs = UCentralConfigRequest()
|
||||
|
||||
def __enter__(self) -> kafka.KafkaProducer:
|
||||
return self.connect()
|
||||
@@ -22,28 +131,105 @@ class Producer:
|
||||
self.disconnect()
|
||||
|
||||
def connect(self) -> kafka.KafkaProducer:
|
||||
if self.conn is None:
|
||||
self.conn = kafka.KafkaProducer(bootstrap_servers=self.db, client_id="producer")
|
||||
logger.info("connected to kafka")
|
||||
if self.is_connected() is False:
|
||||
self.conn = kafka.KafkaProducer(
|
||||
bootstrap_servers=self.db,
|
||||
client_id="producer",
|
||||
max_block_ms=12000,
|
||||
request_timeout_ms=12000)
|
||||
logger.info("producer: connected to kafka")
|
||||
else:
|
||||
logger.info("already connected to kafka")
|
||||
logger.info("producer: already connected to kafka")
|
||||
raise Exception('')
|
||||
return self.conn
|
||||
|
||||
def disconnect(self) -> None:
|
||||
if self.conn is None:
|
||||
if self.is_connected() is False:
|
||||
return
|
||||
self.conn.close()
|
||||
logger.info("disconnected from kafka")
|
||||
logger.info("producer: disconnected from kafka")
|
||||
self.conn = None
|
||||
|
||||
def handle_group_creation(self, create: List[Tuple[str, int, str]], delete: List[str]) -> None:
|
||||
def is_connected(self) -> bool:
|
||||
return self.conn is not None
|
||||
|
||||
def handle_single_group_delete(self, group: str, uuid_val: int = None):
|
||||
if group is None:
|
||||
raise Exception('producer: Cannot destroy group without group_id specified!')
|
||||
|
||||
self.conn.send(self.topic, self.message.group_delete(group, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
self.conn.flush()
|
||||
|
||||
def handle_single_group_create(self, group: str, uuid_val: int = None, shard_id: int = None):
|
||||
if group is None:
|
||||
raise Exception('producer: Cannot create new group without group id specified!')
|
||||
|
||||
if shard_id is None:
|
||||
self.conn.send(self.topic, self.message.group_create(group, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
else:
|
||||
self.conn.send(self.topic, self.message.group_create_to_shard(group, shard_id, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
self.conn.flush()
|
||||
|
||||
def handle_group_creation(self, create: List[str], delete: List[str]) -> None:
|
||||
with self as conn:
|
||||
for group, shard_id, name in create:
|
||||
conn.send(self.topic, self.message.group_create(group, shard_id, name),
|
||||
for group in create:
|
||||
conn.send(self.topic, self.message.group_create(group),
|
||||
bytes(group, encoding="utf-8"))
|
||||
for group in delete:
|
||||
conn.send(self.topic, self.message.group_delete(group),
|
||||
bytes(group, encoding="utf-8"))
|
||||
conn.flush()
|
||||
|
||||
def handle_single_device_assign(self, group: str, mac: str, uuid_val: int):
|
||||
if group is None:
|
||||
raise Exception('producer: Cannot assign infra to group without group id specified!')
|
||||
|
||||
if mac is None:
|
||||
raise Exception('producer: Cannot assign infra to group without infra MAC specified!')
|
||||
|
||||
mac_range = MacRange(mac)
|
||||
|
||||
self.conn.send(self.topic, self.message.add_dev_to_group(group, mac_range, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
self.conn.flush()
|
||||
|
||||
def handle_single_device_deassign(self, group: str, mac: str, uuid_val: int):
|
||||
if group is None:
|
||||
raise Exception('Cannot deassign infra from group without group id specified!')
|
||||
|
||||
if mac is None:
|
||||
raise Exception('Cannot deassign infra from group without infra MAC specified!')
|
||||
|
||||
mac_range = MacRange(mac)
|
||||
|
||||
self.conn.send(self.topic, self.message.remove_dev_from_group(group, mac_range, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
self.conn.flush()
|
||||
|
||||
def handle_multiple_devices_assign(self, group: str, mac_list: list, uuid_val: int):
|
||||
if group is None:
|
||||
raise Exception('producer: Cannot assign infra to group without group id specified!')
|
||||
|
||||
if mac_list is None:
|
||||
raise Exception('producer: Cannot assign infra to group without infra MAC list specified!')
|
||||
|
||||
self.conn.send(self.topic, self.message.add_devices_to_group(group, mac_list, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
self.conn.flush()
|
||||
|
||||
def handle_multiple_devices_deassign(self, group: str, mac_list: list, uuid_val: int):
|
||||
if group is None:
|
||||
raise Exception('Cannot deassign infra from group without group id specified!')
|
||||
|
||||
if mac_list is None:
|
||||
raise Exception('Cannot deassign infra from group without infra MAC list specified!')
|
||||
|
||||
self.conn.send(self.topic, self.message.remove_dev_from_group(group, mac_list, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
self.conn.flush()
|
||||
|
||||
def handle_device_assignment(self, add: List[Tuple[str, MacRange]], remove: List[Tuple[str, MacRange]]) -> None:
|
||||
with self as conn:
|
||||
@@ -54,6 +240,12 @@ class Producer:
|
||||
for group, mac_range in remove:
|
||||
conn.send(self.topic, self.message.remove_dev_from_group(group, mac_range),
|
||||
bytes(group, encoding="utf-8"))
|
||||
conn.flush()
|
||||
|
||||
def handle_single_device_message(self, message: dict, group: str, mac: str, uuid_val: int) -> None:
|
||||
self.conn.send(self.topic, self.message.to_device(group, mac, message, 0, uuid_val),
|
||||
bytes(group, encoding="utf-8"))
|
||||
self.conn.flush()
|
||||
|
||||
def handle_device_messages(self, message: dict, group: str, mac_range: MacRange,
|
||||
count: int, time_s: int, interval_s: int) -> None:
|
||||
@@ -69,6 +261,7 @@ class Producer:
|
||||
for mac in mac_range:
|
||||
conn.send(self.topic, self.message.to_device(group, mac, message, seq),
|
||||
bytes(group, encoding="utf-8"))
|
||||
conn.flush()
|
||||
#time.sleep(interval_s)
|
||||
#if time.time() > end:
|
||||
# break
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Tuple
|
||||
from typing import Tuple
|
||||
import copy
|
||||
import json
|
||||
import uuid
|
||||
@@ -23,6 +22,8 @@ class MacRange:
|
||||
Raises ValueError
|
||||
"""
|
||||
def __init__(self, input: str = "XX:XX:XX:XX:XX:XX") -> None:
|
||||
input = input.replace("-", ":", 5)
|
||||
|
||||
self.__base_as_num, self.__len = self.__parse_input(input.upper())
|
||||
self.__idx = 0
|
||||
|
||||
@@ -53,7 +54,9 @@ class MacRange:
|
||||
|
||||
@staticmethod
|
||||
def mac2num(mac: str) -> int:
|
||||
return int(mac.replace(":", ""), base=16)
|
||||
mac = mac.replace(":", "", 5)
|
||||
mac = mac.replace("-", "", 5)
|
||||
return int(mac, base=16)
|
||||
|
||||
@staticmethod
|
||||
def num2mac(mac: int) -> str:
|
||||
@@ -76,19 +79,65 @@ class MacRange:
|
||||
return self.mac2num(base), int(count)
|
||||
return self.mac2num(input), 1
|
||||
|
||||
class UCentralConfigRequest:
|
||||
TEMPLATE_FILE_AP_BASIC = "./kafka_data/cfg_ap_basic.json"
|
||||
TEMPLATE_FILE_AP_BASIC_INVALID = "./kafka_data/cfg_ap_basic_invalid.json"
|
||||
TEMPLATE_FILE_SWITCH_BASIC = "./kafka_data/cfg_switch_basic.json"
|
||||
TEMPLATE_FILE_SWITCH_BASIC_INVALID = "./kafka_data/cfg_switch_basic_invalid.json"
|
||||
|
||||
@staticmethod
|
||||
def parse_uuid(uuid_val = None) -> str:
|
||||
if uuid_val is None:
|
||||
return str(1)
|
||||
|
||||
return str(uuid_val)
|
||||
|
||||
def __init__(self) -> None:
|
||||
with open(self.TEMPLATE_FILE_AP_BASIC) as f:
|
||||
self.ap_basic = f.read()
|
||||
with open(self.TEMPLATE_FILE_AP_BASIC_INVALID) as f:
|
||||
self.ap_basic_invalid = f.read()
|
||||
with open(self.TEMPLATE_FILE_SWITCH_BASIC) as f:
|
||||
self.switch_basic = f.read()
|
||||
with open(self.TEMPLATE_FILE_SWITCH_BASIC_INVALID) as f:
|
||||
self.switch_basic_invalid = f.read()
|
||||
|
||||
def get_ap_basic_cfg(self, mac: str, uuid_val = None):
|
||||
req = copy.deepcopy(self.ap_basic);
|
||||
req = req.replace("MAC_PLACEHOLDER", mac)
|
||||
req = req.replace("UUID_PLACEHOLDER", UCentralConfigRequest.parse_uuid(uuid_val))
|
||||
return req
|
||||
|
||||
def get_ap_basic_invalid_cfg(self, mac: str, uuid_val = None):
|
||||
req = copy.deepcopy(self.ap_basic_invalid);
|
||||
req = req.replace("MAC_PLACEHOLDER", mac)
|
||||
req = req.replace("UUID_PLACEHOLDER", UCentralConfigRequest.parse_uuid(uuid_val))
|
||||
return req
|
||||
|
||||
def get_switch_basic_cfg(self, mac: str, uuid_val = None):
|
||||
req = copy.deepcopy(self.switch_basic);
|
||||
req = req.replace("MAC_PLACEHOLDER", mac)
|
||||
req = req.replace("UUID_PLACEHOLDER", UCentralConfigRequest.parse_uuid(uuid_val))
|
||||
return req
|
||||
|
||||
def get_switch_basic_invalid_cfg(self, mac: str, uuid_val = None):
|
||||
req = copy.deepcopy(self.switch_basic_invalid);
|
||||
req = req.replace("MAC_PLACEHOLDER", mac)
|
||||
req = req.replace("UUID_PLACEHOLDER", UCentralConfigRequest.parse_uuid(uuid_val))
|
||||
return req
|
||||
|
||||
class Message:
|
||||
TEMPLATE_FILE = "./data/message_template.json"
|
||||
TEMPLATE_FILE = "./kafka_data/message_template.json"
|
||||
GROUP_ADD = "add_group"
|
||||
GROUP_ADD_TO_SHARD = "add_group_to_shard"
|
||||
GROUP_DEL = "del_group"
|
||||
DEV_TO_GROUP = "add_to_group"
|
||||
DEV_FROM_GROUP = "del_from_group"
|
||||
TO_DEVICE = "message_device"
|
||||
TO_DEVICE = "message_infra"
|
||||
GROUP_ID = "infra_group_id"
|
||||
GROUP_NAME = "infra_name"
|
||||
SHARD_ID = "infra_shard_id"
|
||||
DEV_LIST = "infra_group_infra_devices"
|
||||
MAC = "mac"
|
||||
SHARD_ID = "shard_id"
|
||||
DEV_LIST = "infra_group_infras"
|
||||
MAC = "infra_group_infra"
|
||||
DATA = "msg"
|
||||
MSG_UUID = "uuid"
|
||||
|
||||
@@ -96,35 +145,61 @@ class Message:
|
||||
with open(self.TEMPLATE_FILE) as f:
|
||||
self.templates = json.loads(f.read())
|
||||
|
||||
def group_create(self, id: str, shard_id: int, name: str) -> bytes:
|
||||
@staticmethod
|
||||
def parse_uuid(uuid_val = None) -> str:
|
||||
if uuid_val is None:
|
||||
return str(uuid.uuid1())
|
||||
|
||||
return str(uuid.UUID(int=uuid_val))
|
||||
|
||||
def group_create(self, id: str, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.GROUP_ADD])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.SHARD_ID] = shard_id
|
||||
msg[self.GROUP_NAME] = name
|
||||
msg[self.MSG_UUID] = str(uuid.uuid1())
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def group_delete(self, id: str) -> bytes:
|
||||
def group_create_to_shard(self, id: str, shard_id: int, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.GROUP_ADD_TO_SHARD])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.SHARD_ID] = shard_id
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def group_delete(self, id: str, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.GROUP_DEL])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.MSG_UUID] = str(uuid.uuid1())
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def add_dev_to_group(self, id: str, mac_range: MacRange) -> bytes:
|
||||
def add_dev_to_group(self, id: str, mac_range: MacRange, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.DEV_TO_GROUP])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.DEV_LIST] = list(mac_range)
|
||||
msg[self.MSG_UUID] = str(uuid.uuid1())
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def remove_dev_from_group(self, id: str, mac_range: MacRange) -> bytes:
|
||||
def remove_dev_from_group(self, id: str, mac_range: MacRange, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.DEV_FROM_GROUP])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.DEV_LIST] = list(mac_range)
|
||||
msg[self.MSG_UUID] = str(uuid.uuid1())
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def to_device(self, id: str, mac: str, data, sequence: int = 0):
|
||||
def add_devices_to_group(self, id: str, mac_list: list, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.DEV_TO_GROUP])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.DEV_LIST] = mac_list
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def remove_devices_from_group(self, id: str, mac_list: list, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.DEV_FROM_GROUP])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.DEV_LIST] = mac_list
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def to_device(self, id: str, mac: str, data, sequence: int = 0, uuid_val: int = None):
|
||||
msg = copy.copy(self.templates[self.TO_DEVICE])
|
||||
msg[self.GROUP_ID] = id
|
||||
msg[self.MAC] = mac
|
||||
@@ -132,7 +207,73 @@ class Message:
|
||||
msg[self.DATA] = data
|
||||
else:
|
||||
msg[self.DATA] = {"data": data}
|
||||
msg[self.MSG_UUID] = str(uuid.uuid1(node=MacRange.mac2num(mac), clock_seq=sequence))
|
||||
#msg[self.MSG_UUID] = str(uuid.uuid1(node=MacRange.mac2num(mac), clock_seq=sequence))
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
|
||||
class MalformedMessage:
|
||||
TEMPLATE_FILE = "./kafka_data/malformed_message_template.json"
|
||||
GROUP_ADD = "add_group"
|
||||
GROUP_ADD_TO_SHARD = "add_group_to_shard"
|
||||
GROUP_DEL = "del_group"
|
||||
DEV_TO_GROUP = "add_to_group"
|
||||
DEV_FROM_GROUP = "del_from_group"
|
||||
TO_DEVICE = "message_infra"
|
||||
SHARD_ID = "shard_id"
|
||||
DEV_LIST = "infra_group_infras"
|
||||
MAC = "infra_group_infra"
|
||||
DATA = "msg"
|
||||
MSG_UUID = "uuid"
|
||||
|
||||
def __init__(self) -> None:
|
||||
with open(self.TEMPLATE_FILE) as f:
|
||||
self.templates = json.loads(f.read())
|
||||
|
||||
@staticmethod
|
||||
def parse_uuid(uuid_val = None) -> str:
|
||||
if uuid_val is None:
|
||||
return str(uuid.uuid1())
|
||||
|
||||
return str(uuid.UUID(int=uuid_val))
|
||||
|
||||
def group_create(self, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.GROUP_ADD])
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def group_create_to_shard(self, shard_id: int, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.GROUP_ADD_TO_SHARD])
|
||||
msg[self.SHARD_ID] = shard_id
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def group_delete(self, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.GROUP_DEL])
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def add_dev_to_group(self, mac_range: MacRange, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.DEV_TO_GROUP])
|
||||
msg[self.DEV_LIST] = list(mac_range)
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def remove_dev_from_group(self, mac_range: MacRange, uuid_val: int = None) -> bytes:
|
||||
msg = copy.copy(self.templates[self.DEV_FROM_GROUP])
|
||||
msg[self.DEV_LIST] = list(mac_range)
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
def to_device(self, mac: str, data, uuid_val: int = None):
|
||||
msg = copy.copy(self.templates[self.TO_DEVICE])
|
||||
msg[self.MAC] = mac
|
||||
if type(data) is dict:
|
||||
msg[self.DATA] = data
|
||||
else:
|
||||
msg[self.DATA] = {"data": data}
|
||||
|
||||
msg[self.MSG_UUID] = Message.parse_uuid(uuid_val)
|
||||
return json.dumps(msg).encode('utf-8')
|
||||
|
||||
|
||||
|
||||
126
utils/psql_client/psql_client.py
Normal file
126
utils/psql_client/psql_client.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import psycopg2
|
||||
from psycopg2 import OperationalError, sql
|
||||
from typing import List, Tuple
|
||||
|
||||
class PostgreSQLClient:
|
||||
def __init__(self, host: str, port: int, database: str, user: str, password: str):
|
||||
"""Initialize the PostgreSQL client with the connection parameters."""
|
||||
self.host = host
|
||||
self.database = database
|
||||
self.user = user
|
||||
self.password = password
|
||||
self.port = port
|
||||
self.connection = None
|
||||
self.cursor = None
|
||||
|
||||
def connect(self):
|
||||
"""Connect to the PostgreSQL database."""
|
||||
try:
|
||||
self.connection = psycopg2.connect(
|
||||
host=self.host,
|
||||
database=self.database,
|
||||
user=self.user,
|
||||
password=self.password,
|
||||
port=self.port
|
||||
)
|
||||
self.cursor = self.connection.cursor()
|
||||
print("Connection successful")
|
||||
except OperationalError as e:
|
||||
print(f"Error: Unable to connect to the database. {e}")
|
||||
|
||||
def execute_query(self, query: str, params=None):
|
||||
"""Execute a single query (SELECT, INSERT, UPDATE, DELETE, etc.)."""
|
||||
if not self.cursor:
|
||||
print("Error: No database connection established.")
|
||||
return None
|
||||
|
||||
try:
|
||||
# Use sql.SQL for parameterized queries to avoid SQL injection
|
||||
if params:
|
||||
self.cursor.execute(sql.SQL(query), params)
|
||||
else:
|
||||
self.cursor.execute(query)
|
||||
self.connection.commit()
|
||||
print("Query executed successfully")
|
||||
except Exception as e:
|
||||
print(f"Error executing query: {e}")
|
||||
self.connection.rollback()
|
||||
return None
|
||||
|
||||
def fetchone(self):
|
||||
"""Fetch one row from the last executed query (used with SELECT)."""
|
||||
result = None
|
||||
|
||||
if self.cursor:
|
||||
try:
|
||||
result = self.cursor.fetchone()
|
||||
except Exception as e:
|
||||
print(f"Error executing fetchone: {e}")
|
||||
else:
|
||||
print("Error: No database connection or query executed.")
|
||||
|
||||
return result
|
||||
|
||||
def fetchall(self):
|
||||
"""Fetch all rows from the last executed query (used with SELECT)."""
|
||||
result = None
|
||||
|
||||
if self.cursor:
|
||||
try:
|
||||
result = self.cursor.fetchall()
|
||||
except Exception as e:
|
||||
print(f"Error executing fetchone: {e}")
|
||||
else:
|
||||
print("Error: No database connection or query executed.")
|
||||
|
||||
return result
|
||||
|
||||
def get_infrastructure_group(self, group_id: int) -> Tuple[int, int, int]:
|
||||
"""Fetch group record by group id."""
|
||||
group_info = tuple()
|
||||
|
||||
self.execute_query(f"select * from infrastructure_groups WHERE id = {group_id};")
|
||||
group_info = self.fetchone()
|
||||
|
||||
return group_info
|
||||
|
||||
def get_all_infrastructure_groups(self) -> List[Tuple[int, int, int]]:
|
||||
"""Fetch group record by group id."""
|
||||
group_list = list()
|
||||
|
||||
self.execute_query(f"select * from infrastructure_groups;")
|
||||
group_list = self.fetchall()
|
||||
|
||||
return group_list
|
||||
|
||||
def get_infra(self, mac: str) -> Tuple[str, int]:
|
||||
"""Fetch group record by infra mac."""
|
||||
infra_info = None
|
||||
|
||||
self.execute_query(f"select * from infras WHERE mac = \'{mac}\';")
|
||||
infra_info = self.fetchone()
|
||||
|
||||
# change mac format from "XX:XX:XX:XX:XX:XX" to "XX-XX-XX-XX-XX-XX"
|
||||
if infra_info:
|
||||
temp_infra = list(infra_info)
|
||||
temp_infra[0] = temp_infra[0].replace(":", "-", 5)
|
||||
infra_info = tuple(temp_infra)
|
||||
|
||||
return infra_info
|
||||
|
||||
def get_infras_by_group_id(self, group_id) -> List[Tuple[str, int]]:
|
||||
"""Fetch group record by infra mac."""
|
||||
infras_info = None
|
||||
|
||||
self.execute_query(f"select * from infras WHERE infra_group_id = \'{group_id}\';")
|
||||
infras_info = self.fetchall()
|
||||
|
||||
return infras_info
|
||||
|
||||
def disconnect(self):
|
||||
"""Close the cursor and connection."""
|
||||
if self.cursor:
|
||||
self.cursor.close()
|
||||
if self.connection:
|
||||
self.connection.close()
|
||||
print("Connection closed.")
|
||||
100
utils/redis_client/redis_client.py
Normal file
100
utils/redis_client/redis_client.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import redis
|
||||
import json
|
||||
|
||||
class RedisClient:
|
||||
def __init__(self, host: str, port: int):
|
||||
"""Initialize the Redis client with the connection parameters."""
|
||||
self.host = host
|
||||
self.port = port
|
||||
self.connection = None
|
||||
|
||||
def connect(self):
|
||||
"""Connect to the Redis database."""
|
||||
try:
|
||||
# Establish connection to Redis server
|
||||
self.connection = redis.StrictRedis(
|
||||
host=self.host, port=self.port,
|
||||
db=0, decode_responses=True, socket_timeout=5.0,
|
||||
socket_connect_timeout=2.0)
|
||||
# Check if the connection is successful
|
||||
self.connection.ping()
|
||||
print(f"Connected to Redis server at {self.host}:{self.port}")
|
||||
except redis.ConnectionError as e:
|
||||
print(f"Unable to connect to Redis: {e}")
|
||||
self.connection = None
|
||||
|
||||
def select_db(self, db_id: int):
|
||||
"""Selects a different database using the SELECT command."""
|
||||
if self.connection:
|
||||
try:
|
||||
self.connection.select(db_id)
|
||||
print(f"Switched to database {db_id}")
|
||||
except redis.RedisError as e:
|
||||
print(f"Error selecting database {db_id}: {e}")
|
||||
else:
|
||||
print("Redis client not connected.")
|
||||
|
||||
def get_infrastructure_group(self, group_id: int) -> dict:
|
||||
return self.connection.hgetall(f"group_id_{group_id}")
|
||||
|
||||
def get_shard(self, shard_id: int) -> dict:
|
||||
return self.connection.hgetall(f"shard_id_{shard_id}")
|
||||
|
||||
def get_infra(self, shard_id: int, mac: str) -> dict:
|
||||
infra = None
|
||||
self.select_db(1)
|
||||
|
||||
infra = self.connection.get(f"shard_id_{shard_id}|{mac}")
|
||||
if infra:
|
||||
infra = json.loads(infra)
|
||||
|
||||
self.select_db(0)
|
||||
|
||||
return infra
|
||||
|
||||
def get(self, key: str) -> str:
|
||||
"""Gets the value of a key."""
|
||||
result = None
|
||||
|
||||
if self.connection:
|
||||
try:
|
||||
result = self.connection.get(key)
|
||||
except redis.RedisError as e:
|
||||
print(f"Error getting {key}: {e}")
|
||||
else:
|
||||
print("Redis client not connected.")
|
||||
|
||||
return result
|
||||
|
||||
def hgetall(self, hash_name: str) -> dict:
|
||||
"""Gets all fields and values from a Redis hash."""
|
||||
result = None
|
||||
|
||||
if self.connection:
|
||||
try:
|
||||
result = self.connection.hgetall(hash_name)
|
||||
except redis.RedisError as e:
|
||||
print(f"Error getting all fields from hash {hash_name}: {e}")
|
||||
else:
|
||||
print("Redis client not connected.")
|
||||
|
||||
return result
|
||||
|
||||
def exists(self, key: str) -> bool:
|
||||
"""Checks if a key exists in Redis."""
|
||||
if self.connection:
|
||||
try:
|
||||
return self.connection.exists(key)
|
||||
except redis.RedisError as e:
|
||||
print(f"Error checking existence of {key}: {e}")
|
||||
else:
|
||||
print("Redis client not connected.")
|
||||
return False
|
||||
|
||||
def disconnect(self):
|
||||
"""Closes the Redis connection."""
|
||||
if self.connection:
|
||||
self.connection.close()
|
||||
print("Connection closed.")
|
||||
else:
|
||||
print("No active Redis client connection to close.")
|
||||
Reference in New Issue
Block a user