mirror of
https://github.com/outbackdingo/openlan-cgw.git
synced 2026-01-27 10:19:56 +00:00
Merge pull request #109 from Telecominfraproject/feat/initial_isolated_tests_execution
Feat/initial isolated tests execution
This commit is contained in:
9
Makefile
9
Makefile
@@ -16,9 +16,9 @@ CGW_BUILD_ENV_IMG_TAG := $(shell cat Dockerfile | sha1sum | awk '{print substr($
|
||||
|
||||
CGW_BUILD_ENV_IMG_CONTAINER_NAME := "cgw_build_env"
|
||||
|
||||
.PHONY: all cgw-app cgw-build-env-img cgw-img stop clean run run_docker_services start-multi-cgw stop-multi-cgw
|
||||
.PHONY: all cgw-app cgw-build-env-img cgw-img stop clean run run_docker_services start-multi-cgw stop-multi-cgw run-tests
|
||||
|
||||
all: cgw-build-env-img run_docker_services run
|
||||
all: start-multi-cgw
|
||||
@echo "uCentral CGW build app (container) done"
|
||||
|
||||
# Executed inside build-env
|
||||
@@ -48,7 +48,7 @@ cgw-img: stop cgw-build-env-img
|
||||
.
|
||||
@echo Docker build done;
|
||||
|
||||
stop:
|
||||
stop: stop-multi-cgw
|
||||
@echo "Stopping / removing container ${CGW_IMG_CONTAINER_NAME}"
|
||||
@docker stop ${CGW_IMG_CONTAINER_NAME} > /dev/null 2>&1 || true;
|
||||
@docker container rm ${CGW_IMG_CONTAINER_NAME} > /dev/null 2>&1 || true;
|
||||
@@ -77,3 +77,6 @@ stop-multi-cgw:
|
||||
|
||||
run_docker_services:
|
||||
@cd ./utils/docker/ && docker compose up -d
|
||||
|
||||
run-tests:
|
||||
@cd ./tests && ./run.sh
|
||||
|
||||
27
README.md
27
README.md
@@ -8,10 +8,14 @@ while others are required to be running for the CGW to operate.
|
||||
|
||||
**NOTE**: while runtime CGW depends on services like kafka, redis and PGSQL, the *make* / *make all* targets
|
||||
would build a complete out-of-the-box setup with default configs and container params:
|
||||
- Kafka, Redis, PGSQL containers would be created and attached to default - automatically created - *docker_cgw_network* network;
|
||||
All three (and one additional - *init-broker-container* - needed for kafka topics initialization) will be created as part of single
|
||||
container project group.
|
||||
- CGW will be created as separate standalone container, attached to same *docker_cgw_network* network;
|
||||
- Kafka, Redis, PGSQL containers would be created and attached to default - automatically created - *docker_cgw_multi_instances_network* network;
|
||||
All three (and one additional - *init-broker-container* - needed for kafka topics initialization) are all part of single docker compose file.
|
||||
- CGW, while also part of the same docker compose file, yet is being partially generated.
|
||||
The reason, is that multiple CGW instances can be created within single compose-file,
|
||||
and thus container details are being generated.
|
||||
|
||||
More information about the compose generation can be found in the
|
||||
'Automated multi-CGW instances start/stop with Docker Compose' topic.
|
||||
|
||||
## gRPC
|
||||
CGW utilizes gRPC to communicate with other CGW instances (referred to as Shards). This functionality does not depend on some external thirdparty services.
|
||||
@@ -53,9 +57,11 @@ Two new docker images will be generated on host system:
|
||||
# Running
|
||||
The following script can be used to launch the CGW app
|
||||
```console
|
||||
$ make run
|
||||
$ make
|
||||
```
|
||||
Command creates and executed (starts) docker container name 'openlan_cgw'
|
||||
Command creates and executed (starts) docker container group consisting of cgw services
|
||||
as well as thirdpart depending services (redis, kafka, pgsql)
|
||||
|
||||
To stop the container from running (remove it) use the following cmd:
|
||||
```console
|
||||
$ make stop
|
||||
@@ -170,9 +176,14 @@ Currently, tests should be run manually by changin PWD to *tests* and launching
|
||||
cd ./test
|
||||
./run.sh
|
||||
```
|
||||
or using make target (added for convinience):
|
||||
```console
|
||||
make run-tests
|
||||
```
|
||||
*NOTE:* currently, tests are not running inside a container.
|
||||
This means, that it's up to the caller make sure tests can communicate with whatever CGW's deployment as well as thirdparty services.
|
||||
E.g. tests inside running *host* enviroment must be able to communicate with CGW, Redis, Kafka, PGSQL etc.
|
||||
To make sure tests can communicate with CGW-enviroment, tests are currently
|
||||
reaching environment through ports exposed to host system.
|
||||
e.g. for WSS - tests try to reach 'wss://localhost:15002' by default and so on.
|
||||
|
||||
# Automated multi-CGW instances start/stop with Docker Compose
|
||||
Automated multi-CGW start/stop based on "docker-compose-template.yml.j2" file located inside the *utils/docker* directory.
|
||||
|
||||
@@ -105,7 +105,24 @@ impl CGWConnectionProcessor {
|
||||
client_cn: MacAddress,
|
||||
allow_mismatch: bool,
|
||||
) -> Result<()> {
|
||||
let ws_stream = tokio_tungstenite::accept_async(tls_stream).await?;
|
||||
let ws_stream = tokio::select! {
|
||||
_val = tokio_tungstenite::accept_async(tls_stream) => {
|
||||
match _val {
|
||||
Ok(s) => s,
|
||||
Err(e) => {
|
||||
error!("Failed to accept TLS stream from: {}! Reason: {}. Closing connection",
|
||||
self.addr, e);
|
||||
return Err(Error::ConnectionProcessor("Failed to accept TLS stream!"));
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: configurable duration (upon server creation)
|
||||
_val = sleep(Duration::from_millis(15000)) => {
|
||||
error!("Failed to accept TLS stream from: {}! Closing connection", self.addr);
|
||||
return Err(Error::ConnectionProcessor("Failed to accept TLS stream for too long"));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
let (sink, mut stream) = ws_stream.split();
|
||||
|
||||
|
||||
@@ -5,10 +5,10 @@ use std::{collections::HashMap, str::FromStr};
|
||||
use crate::cgw_errors::{Error, Result};
|
||||
|
||||
use crate::cgw_ucentral_parser::{
|
||||
CGWUCentralEvent, CGWUCentralEventLog, CGWUCentralEventState, CGWUCentralEventStateClients,
|
||||
CGWUCentralEventStateClientsData, CGWUCentralEventStateClientsType,
|
||||
CGWUCentralEventStateLLDPData, CGWUCentralEventStateLinks, CGWUCentralEventStatePort,
|
||||
CGWUCentralEventType, CGWUCentralJRPCMessage, CGWUCentralEventReply
|
||||
CGWUCentralEvent, CGWUCentralEventLog, CGWUCentralEventReply, CGWUCentralEventState,
|
||||
CGWUCentralEventStateClients, CGWUCentralEventStateClientsData,
|
||||
CGWUCentralEventStateClientsType, CGWUCentralEventStateLLDPData, CGWUCentralEventStateLinks,
|
||||
CGWUCentralEventStatePort, CGWUCentralEventType, CGWUCentralJRPCMessage,
|
||||
};
|
||||
|
||||
fn parse_lldp_data(
|
||||
|
||||
@@ -6,12 +6,14 @@ def cgw_metric_get(host: str = "localhost", port: int = 8080) -> str:
|
||||
metrics = ""
|
||||
|
||||
try:
|
||||
r = requests.get(f"http://{host}:{port}/metrics")
|
||||
print("CGW metrics: " + str(r.status_code) + ', txt:' + r.text)
|
||||
# Try to fetch metrics with 5 seconds timeout value
|
||||
r = requests.get(f"http://{host}:{port}/metrics", timeout=5)
|
||||
print("CGW metrics ret code: " + str(r.status_code))
|
||||
assert r is not None and r.status_code == 200, \
|
||||
f"CGW metrics is not available"
|
||||
metrics = r.text
|
||||
except:
|
||||
except Exception as e:
|
||||
print("CGW metrics: raised exception when tried to fetch metrics:" + e)
|
||||
raise Exception('CGW metrics fetch failed (Not running?)')
|
||||
|
||||
return metrics
|
||||
|
||||
@@ -4,3 +4,4 @@ pytest==8.3.3
|
||||
randmac==0.1
|
||||
psycopg2-binary==2.9.10
|
||||
redis==5.2.0
|
||||
requests==2.32.3
|
||||
|
||||
@@ -2,6 +2,14 @@
|
||||
|
||||
# Separate exports for clearer visibility of _what exactly_
|
||||
# we're putting in python path
|
||||
|
||||
rm -rf /tmp/cgw_tests_runner;
|
||||
mkdir /tmp/cgw_tests_runner && \
|
||||
cp -rf ../tests /tmp/cgw_tests_runner/ && \
|
||||
cp -rf ../utils /tmp/cgw_tests_runner/;
|
||||
|
||||
cd /tmp/cgw_tests_runner/tests
|
||||
|
||||
export PYTHONPATH="$PYTHONPATH:$PWD"
|
||||
export PYTHONPATH="$PYTHONPATH:$PWD/../utils"
|
||||
|
||||
|
||||
@@ -126,7 +126,9 @@ class Device:
|
||||
|
||||
def connect(self):
|
||||
if self._socket is None:
|
||||
self._socket = client.connect(self.server_addr, ssl=self.ssl_context, open_timeout=7200)
|
||||
# 20 seconds is more then enough to establish conne and exchange
|
||||
# them handshakes.
|
||||
self._socket = client.connect(self.server_addr, ssl=self.ssl_context, open_timeout=20, close_timeout=20)
|
||||
return self._socket
|
||||
|
||||
def disconnect(self):
|
||||
|
||||
@@ -132,7 +132,11 @@ class Producer:
|
||||
|
||||
def connect(self) -> kafka.KafkaProducer:
|
||||
if self.is_connected() is False:
|
||||
self.conn = kafka.KafkaProducer(bootstrap_servers=self.db, client_id="producer")
|
||||
self.conn = kafka.KafkaProducer(
|
||||
bootstrap_servers=self.db,
|
||||
client_id="producer",
|
||||
max_block_ms=12000,
|
||||
request_timeout_ms=12000)
|
||||
logger.info("producer: connected to kafka")
|
||||
else:
|
||||
logger.info("producer: already connected to kafka")
|
||||
|
||||
@@ -12,7 +12,10 @@ class RedisClient:
|
||||
"""Connect to the Redis database."""
|
||||
try:
|
||||
# Establish connection to Redis server
|
||||
self.connection = redis.StrictRedis(host=self.host, port=self.port, db=0, decode_responses=True)
|
||||
self.connection = redis.StrictRedis(
|
||||
host=self.host, port=self.port,
|
||||
db=0, decode_responses=True, socket_timeout=5.0,
|
||||
socket_connect_timeout=2.0)
|
||||
# Check if the connection is successful
|
||||
self.connection.ping()
|
||||
print(f"Connected to Redis server at {self.host}:{self.port}")
|
||||
|
||||
Reference in New Issue
Block a user