Compare commits

..

1 Commits

Author SHA1 Message Date
jacky_chang
7df000f08c support EC platform 2023-10-26 11:11:14 +08:00
105 changed files with 3584 additions and 28059 deletions

6
.gitignore vendored
View File

@@ -5,9 +5,3 @@ src/docker/ucentral-client
*.orig
*.rej
docker/*
# Test artifacts and generated files
tests/config-parser/test-config-parser
tests/config-parser/test-report.*
tests/config-parser/test-results.txt
output/

View File

@@ -1,12 +1,9 @@
FROM debian:bullseye
FROM debian:buster
LABEL Description="Ucentral client (Build) environment"
ARG HOME /root
ARG SCHEMA="4.1.0-rc1"
ARG SCHEMA_VERSION="v${SCHEMA}"
ARG SCHEMA_ZIP_FILE="${SCHEMA_VERSION}.zip"
ARG SCHEMA_UNZIPPED="ols-ucentral-schema-${SCHEMA}"
ARG OLS_SCHEMA_SRC="https://github.com/Telecominfraproject/ols-ucentral-schema/archive/refs/tags/${SCHEMA_ZIP_FILE}"
ARG EXTERNAL_LIBS ${HOME}/ucentral-external-libs
SHELL ["/bin/bash", "-c"]
RUN apt-get update -q -y && apt-get -q -y --no-install-recommends install \
@@ -18,26 +15,19 @@ RUN apt-get update -q -y && apt-get -q -y --no-install-recommends install \
libcurl4-openssl-dev \
libev-dev \
libssl-dev \
libnl-route-3-dev \
libnl-3-dev \
apt-utils \
git \
wget \
autoconf \
libtool \
pkg-config \
libjsoncpp-dev \
unzip \
python3 \
python3-jsonschema
libjsoncpp-dev
RUN git config --global http.sslverify false
RUN git clone https://github.com/DaveGamble/cJSON.git ${HOME}/ucentral-external-libs/cJSON/
RUN git clone https://libwebsockets.org/repo/libwebsockets ${HOME}/ucentral-external-libs/libwebsockets/
RUN git clone --recurse-submodules -b v1.50.0 --depth 1 --shallow-submodules https://github.com/grpc/grpc ${HOME}/ucentral-external-libs/grpc/
RUN git clone --recursive --branch v7.1.4 https://github.com/zhaojh329/rtty.git ${HOME}/ucentral-external-libs/rtty/
ADD ${OLS_SCHEMA_SRC} /tmp/
# The following libs should be prebuilt in docker-build-env img to speed-up
# recompilation of only the ucentral-client itself
@@ -49,8 +39,6 @@ RUN cd ${HOME}/ucentral-external-libs/cJSON/ && \
make install
RUN cd ${HOME}/ucentral-external-libs/libwebsockets/ && \
git branch --all && \
git checkout a9b8fe7ebf61b8c0e7891e06e70d558412933a33 && \
mkdir build && \
cd build && \
cmake .. && \
@@ -72,8 +60,3 @@ RUN cd ${HOME}/ucentral-external-libs/rtty/ && \
cd build && \
cmake .. && \
make -j4
RUN unzip /tmp/${SCHEMA_ZIP_FILE} -d ${HOME}/ucentral-external-libs/
RUN cd ${HOME}/ucentral-external-libs/ && \
mv ${SCHEMA_UNZIPPED} ols-ucentral-schema

28
LICENSE
View File

@@ -1,28 +0,0 @@
BSD 3-Clause License
Copyright (c) 2024, Telecom Infra Project
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -7,7 +7,8 @@ IMG_ID := "ucentral-client-build-env"
IMG_TAG := $(shell cat Dockerfile | sha1sum | awk '{print substr($$1,0,11);}')
CONTAINER_NAME := "ucentral_client_build_env"
.PHONY: all clean build-host-env build-final-deb build-ucentral-docker-img run-host-env run-ucentral-docker-img
.PHONY: all clean build-host-env build-final-deb build-ucentral-docker-img run-host-env run-ucentral-docker-img \
plat-ec plat-ec-clean
all: build-host-env build-ucentral-app build-ucentral-docker-img build-final-deb
@@ -20,9 +21,9 @@ build-host-env:
docker build --file Dockerfile --tag ${IMG_ID}:${IMG_TAG} docker
@echo Docker build done;
@echo Saving docker img to local archive...;
if [ ! -f output/docker-ucentral-client-build-env-${IMG_TAG}.gz ] ; then \
if [ ! -f output/docker-ucentral-client-build-env-${IMG_TAG}.gz ] ; then
docker save ${IMG_ID}:${IMG_TAG} | gzip -c - > \
output/docker-ucentral-client-build-env-${IMG_TAG}.gz; \
output/docker-ucentral-client-build-env-${IMG_TAG}.gz;
fi
@echo Docker save done...;
@@ -32,7 +33,6 @@ run-host-env: build-host-env
docker run -d -t --name ${CONTAINER_NAME} \
-v $(realpath ./):/root/ols-nos \
--env UCENTRAL_PLATFORM=$(UCENTRAL_PLATFORM) \
--env PLATFORM_REVISION="$(PLATFORM_REVISION)" \
${IMG_ID}:${IMG_TAG} \
bash
@@ -50,13 +50,8 @@ build-ucentral-app: run-host-env
@echo Running ucentralclient docker-build-env container to build ucentral-client...;
docker exec -t ${CONTAINER_NAME} /root/ols-nos/docker-build-client.sh
docker cp ${CONTAINER_NAME}:/root/deliverables/ src/docker/
# copy the schema version, if it is there
docker cp ${CONTAINER_NAME}:/root/ucentral-external-libs/ols-ucentral-schema/schema.json src/docker/ || true
docker container stop ${CONTAINER_NAME} > /dev/null 2>&1 || true;
docker container rm ${CONTAINER_NAME} > /dev/null 2>&1 || true;
if [ -f version.json ]; then \
cp version.json src/docker/; \
fi
build-ucentral-docker-img: build-ucentral-app
pushd src
@@ -66,8 +61,8 @@ build-ucentral-docker-img: build-ucentral-app
OLDIMG=$$(docker images --format "{{.ID}}" ucentral-client:latest)
docker build --file docker/Dockerfile --tag ucentral-client:latest docker
NEWIMG=$$(docker images --format "{{.ID}}" ucentral-client:latest)
if [ -n "$$OLDIMG" ] && [ ! "$$OLDIMG" = "$$NEWIMG" ]; then \
docker image rm $$OLDIMG; \
if [ -n "$$OLDIMG" ] && [ ! "$$OLDIMG" = "$$NEWIMG" ]; then
docker image rm $$OLDIMG
fi
docker save ucentral-client:latest |gzip -c - > docker-ucentral-client.gz
popd
@@ -86,6 +81,9 @@ build-final-deb: build-ucentral-docker-img
@echo
@echo "ucentral client deb pkg is available under ./output/ dir"
plat-ec:
src/ec-private/build.sh
clean:
docker container stop ${CONTAINER_NAME} > /dev/null 2>&1 || true;
docker container rm ${CONTAINER_NAME} > /dev/null 2>&1 || true;
@@ -96,11 +94,19 @@ clean:
rm -rf src/docker/deliverables || true;
rm -rf src/docker/lib* || true;
rm -rf src/docker/ucentral-client || true;
rm -rf src/docker/version.json || true;
rm -rf src/docker/schema.json || true;
rm -rf src/debian/ucentral-client.substvars 2>/dev/null || true;
rm -rf src/debian/shasta-ucentral-client.debhelper.log 2>/dev/null || true;
rm -rf src/debian/.debhelper src/debian/ucentral-client 2>/dev/null || true;
rm -rf src/debian/shasta-ucentral-client* 2>/dev/null || true;
rm -rf src/debian/debhelper-build-stamp* 2>/dev/null || true;
rm -rf src/debian/files shasta_1.0_amd64.changes shasta_1.0_amd64.buildinfo 2>/dev/null || true;
plat-ec-clean:
rm -rf src/ec-private/cjson
rm -rf src/ec-private/curl
rm -rf src/ec-private/libwebsockets
rm -rf src/ec-private/openssl
rm -rf src/ec-private/openssl
rm -rf src/ec-private/ecapi/build
rm -rf src/ec-private/ucentral
rm -rf output

View File

@@ -1,188 +0,0 @@
# Quick Start: Testing Guide
## TL;DR
```bash
# Test all configs with human-readable output
./run-config-tests.sh
# Generate HTML report
./run-config-tests.sh html
# Test single config
./run-config-tests.sh human ECS4150-TM.json
# Results are in: output/
```
## Common Commands
### Test All Configurations
```bash
./run-config-tests.sh human # Console output with colors
./run-config-tests.sh html # Interactive HTML report
./run-config-tests.sh json # Machine-readable JSON
```
### Test Single Configuration
```bash
./run-config-tests.sh human cfg0.json
./run-config-tests.sh html ECS4150-ACL.json
./run-config-tests.sh json ECS4150-TM.json
```
### View Results
```bash
# Open HTML report in browser
open output/test-report.html # macOS
xdg-open output/test-report.html # Linux
# View text results
cat output/test-results.txt
# Parse JSON results
cat output/test-report.json | jq '.summary'
```
## What the Script Does
1. ✅ Checks Docker is running
2. ✅ Builds Docker environment (only if needed)
3. ✅ Starts/reuses container
4. ✅ Runs tests inside container
5. ✅ Copies results to `output/` directory
6. ✅ Shows summary
## Output Formats
| Format | Use Case | Output File |
|--------|----------|-------------|
| `human` | Interactive development, debugging | `output/test-results.txt` |
| `html` | Reports, sharing, presentations | `output/test-report.html` |
| `json` | CI/CD, automation, metrics | `output/test-report.json` |
## First Run vs Subsequent Runs
**First Run (cold start):**
- Builds Docker environment: ~10 minutes (one-time)
- Runs tests: ~30 seconds
- **Total: ~10 minutes**
**Subsequent Runs (warm start):**
- Reuses environment: ~2 seconds
- Runs tests: ~30 seconds
- **Total: ~30 seconds**
## Troubleshooting
### Docker not running
```bash
# Start Docker Desktop (macOS/Windows)
# OR
sudo systemctl start docker # Linux
```
### Permission denied
```bash
chmod +x run-config-tests.sh
```
### Config not found
```bash
# List available configs
ls config-samples/*.json
```
## CI/CD Integration
### Exit Codes
- `0` = All tests passed ✅
- `1` = Tests failed ❌
- `2` = System error ⚠️
### Example Pipeline
```yaml
- name: Run tests
run: ./run-config-tests.sh json
- name: Check results
run: |
if [ $? -eq 0 ]; then
echo "✅ All tests passed"
else
echo "❌ Tests failed"
exit 1
fi
```
## Available Test Configs
```bash
# List all configs
ls -1 config-samples/*.json | xargs -n1 basename
# Common test configs:
cfg0.json # Basic config
ECS4150-TM.json # Traffic management
ECS4150-ACL.json # Access control lists
ECS4150STP_RSTP.json # Spanning tree
ECS4150_IGMP_Snooping.json # IGMP snooping
ECS4150_POE.json # Power over Ethernet
ECS4150_VLAN.json # VLAN configuration
```
## What Gets Tested
✅ JSON schema validation (structure, types, constraints)
✅ Parser validation (actual C parser implementation)
✅ Property tracking (configured vs unknown properties)
✅ Feature coverage (implemented vs documented features)
✅ Error handling (invalid configs, missing fields)
## Quick Reference
| Task | Command |
|------|---------|
| Test everything | `./run-config-tests.sh` |
| HTML report | `./run-config-tests.sh html` |
| JSON output | `./run-config-tests.sh json` |
| Single config | `./run-config-tests.sh human cfg0.json` |
| View HTML | `open output/test-report.html` |
| View results | `cat output/test-results.txt` |
| Parse JSON | `cat output/test-report.json \| jq` |
## Full Documentation
- **TEST_RUNNER_README.md** - Complete script documentation
- **TESTING_FRAMEWORK.md** - Testing framework overview
- **tests/config-parser/TEST_CONFIG_README.md** - Detailed testing guide
- **TEST_CONFIG_PARSER_DESIGN.md** - Test framework architecture
- **tests/MAINTENANCE.md** - Maintenance procedures
- **README.md** - Project overview and build instructions
## Directory Structure
```
ols-ucentral-client/
├── run-config-tests.sh ← Test runner script
├── output/ ← Test results go here
├── config-samples/ ← Test configurations
└── tests/
├── config-parser/
│ ├── test-config-parser.c ← Test implementation
│ ├── test-stubs.c ← Platform stubs
│ ├── config-parser.h ← Test header
│ ├── Makefile ← Test build system
│ └── TEST_CONFIG_README.md ← Detailed guide
├── schema/
│ ├── validate-schema.py ← Schema validator
│ └── SCHEMA_VALIDATOR_README.md
├── tools/ ← Property database tools
└── MAINTENANCE.md ← Maintenance procedures
```
---
**Need help?** Check TEST_RUNNER_README.md for troubleshooting and advanced usage.

View File

@@ -124,56 +124,3 @@ bash ./partition_script.sh ./
Once certificates are installed and partition is created, rebooting the device is required.
After reboot and uCentral start, service creates <TCA> volume upon start based on physical partition
(by-label provided by udev - /dev/disk/by-label/ONIE-TIP-CA-CERT) automatically.
# Testing
The repository includes a comprehensive testing framework for configuration validation:
## Running Tests
**Quick Start:**
```bash
# Using the test runner script (recommended)
./run-config-tests.sh
# Generate HTML report
./run-config-tests.sh html
# Or run tests directly in the tests directory
cd tests/config-parser
make test-config-full
```
**Docker-based Testing (recommended for consistency):**
```bash
# Run in Docker environment
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/tests/config-parser && make test-config-full"
```
## Test Framework
The testing framework validates configurations through two layers:
1. **Schema Validation** - JSON structure validation against uCentral schema
2. **Parser Testing** - Actual C parser implementation testing with property tracking
Tests are organized in the `tests/` directory:
- `tests/config-parser/` - Configuration parser tests
- `tests/schema/` - Schema validation
- `tests/tools/` - Property database generation tools
- `tests/unit/` - Unit tests
## Documentation
- **[TESTING_FRAMEWORK.md](TESTING_FRAMEWORK.md)** - Testing overview and quick reference
- **[tests/README.md](tests/README.md)** - Complete testing documentation
- **[tests/config-parser/TEST_CONFIG_README.md](tests/config-parser/TEST_CONFIG_README.md)** - Detailed testing guide
- **[tests/MAINTENANCE.md](tests/MAINTENANCE.md)** - Schema and property database maintenance
## Test Configuration
Test configurations are located in `config-samples/`:
- 21 positive test configurations covering various features
- 4 negative test configurations for error handling validation
- JSON schema: `config-samples/ucentral.schema.pretty.json`

View File

@@ -1,461 +0,0 @@
# Configuration Testing Framework
## Overview
The OLS uCentral Client includes a comprehensive configuration testing framework that provides two-layer validation of JSON configurations:
1. **Schema Validation** - Structural validation against the uCentral JSON schema
2. **Parser Testing** - Implementation validation of the C parser with property tracking
This framework enables automated testing, continuous integration, and tracking of configuration feature implementation status.
## Documentation Index
This testing framework includes multiple documentation files, each serving a specific purpose:
### Primary Documentation
1. **[tests/config-parser/TEST_CONFIG_README.md](tests/config-parser/TEST_CONFIG_README.md)** - Complete testing framework guide
- Overview of two-layer validation approach
- Quick start and running tests
- Property tracking system
- Configuration-specific validators
- Test output interpretation
- CI/CD integration
- **Start here** for understanding the testing framework
2. **[tests/schema/SCHEMA_VALIDATOR_README.md](tests/schema/SCHEMA_VALIDATOR_README.md)** - Schema validator detailed documentation
- Standalone validator usage
- Command-line interface
- Programmatic API
- Porting guide for other repositories
- Common validation errors
- **Start here** for schema validation specifics
3. **[tests/MAINTENANCE.md](tests/MAINTENANCE.md)** - Maintenance procedures guide
- Schema update procedures
- Property database update procedures
- Version synchronization
- Testing after updates
- Troubleshooting common issues
- **Start here** when updating schema or property database
4. **[TEST_CONFIG_PARSER_DESIGN.md](TEST_CONFIG_PARSER_DESIGN.md)** - Test framework architecture
- Multi-layer validation design
- Property metadata system (628 properties)
- Property inspection engine
- Test execution flow diagrams
- Data structures and algorithms
- Output format implementations
- **Start here** for understanding the test framework internals
### Supporting Documentation
5. **[README.md](README.md)** - Project overview and build instructions
- Build system architecture
- Platform abstraction layer
- Testing framework integration
- Deployment instructions
## Quick Reference
### Running Tests
**RECOMMENDED: Use the test runner script** (handles Docker automatically):
```bash
# Test all configurations (human-readable output)
./run-config-tests.sh
# Generate HTML report
./run-config-tests.sh html
# Generate JSON report
./run-config-tests.sh json
# Test single configuration
./run-config-tests.sh human cfg0.json
```
**Alternative: Run tests directly in Docker** (manual Docker management):
```bash
# Build the Docker environment first (if not already built)
make build-host-env
# Run all tests (schema + parser) - RECOMMENDED
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/tests/config-parser && make test-config-full"
# Run individual test suites
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/tests/config-parser && make validate-schema"
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/tests/config-parser && make test-config"
# Generate test reports
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/tests/config-parser && make test-config-html"
# Copy report files out of container to view
docker cp ucentral_client_build_env:/root/ols-nos/tests/config-parser/test-report.html output/
```
**Alternative: Run tests locally** (may have OS-specific dependencies):
```bash
# Navigate to test directory
cd tests/config-parser
# Run all tests (schema + parser)
make test-config-full
# Run individual test suites
make validate-schema # Schema validation only
make test-config # Parser tests only
# Generate test reports
make test-config-html # HTML report (browser-viewable)
make test-config-json # JSON report (machine-readable)
make test-config-junit # JUnit XML (CI/CD integration)
```
**Note:** Running tests in Docker is the preferred method as it provides a consistent, reproducible environment regardless of your host OS (macOS, Linux, Windows).
### Key Files
**Test Implementation:**
- `tests/config-parser/test-config-parser.c` - Parser test framework with property tracking (628 properties)
- `tests/config-parser/test-stubs.c` - Platform function stubs for testing
- `tests/schema/validate-schema.py` - Standalone schema validator
- `tests/config-parser/config-parser.h` - Test header exposing cfg_parse()
**Configuration Files:**
- `config-samples/ols.ucentral.schema.pretty.json` - uCentral JSON schema (human-readable)
- `config-samples/*.json` - Test configuration files (25+ configs)
- `config-samples/*invalid*.json` - Negative test cases
**Build System:**
- `tests/config-parser/Makefile` - Test targets and build rules
- `run-config-tests.sh` - Test runner script (recommended)
**Production Code (Minimal Changes):**
- `src/ucentral-client/proto.c` - Added TEST_STATIC macro (2 lines changed)
- `src/ucentral-client/include/router-utils.h` - Added extern declarations (minor change)
## Features
### Schema Validation
- Validates JSON structure against official uCentral schema
- Checks property types, required fields, constraints
- Standalone tool, no dependencies on C code
- Exit codes for CI/CD integration
### Parser Testing
- Tests actual C parser implementation
- Multiple output formats (human-readable, HTML, JSON, JUnit XML)
- Interactive HTML reports with detailed analysis
- Machine-readable JSON for automation
- JUnit XML for CI/CD integration
- Validates configuration processing and struct population
- Configuration-specific validators for business logic
- Memory leak detection
- Hardware constraint validation
### Property Tracking System
- Database of 628 properties and their processing status
- Tracks which properties are parsed by which functions
- Identifies unimplemented features
- Status classification: CONFIGURED, IGNORED, SYSTEM, INVALID, Unknown
- Property usage reports across all test configurations
- 199 properties (32%) with line number references
### Two-Layer Validation Strategy
**Why Both Layers?**
Each layer catches different types of errors:
- **Schema catches**: Type mismatches, missing required fields, constraint violations
- **Parser catches**: Implementation bugs, hardware limits, cross-field dependencies
- **Property tracking catches**: Missing implementations, platform-specific features
See TEST_CONFIG_README.md section "Two-Layer Validation Strategy" for detailed explanation.
## Test Coverage
Current test suite includes:
- 25+ configuration files covering various features
- Positive tests (configs that should parse successfully)
- Negative tests (configs that should fail)
- Feature-specific validators for critical configurations
- Platform stub with 54-port simulation (matches ECS4150 hardware)
- All tests currently passing (25/25)
### Tested Features
- Port configuration (enable/disable, speed, duplex)
- VLAN configuration and membership
- Spanning Tree Protocol (STP, RSTP, PVST, RPVST)
- IGMP Snooping
- Power over Ethernet (PoE)
- IEEE 802.1X Authentication
- DHCP Relay
- Static routing
- System configuration (timezone, hostname, etc.)
### Platform-Specific Features (Schema-Valid, Platform Implementation Required)
- LLDP (Link Layer Discovery Protocol)
- LACP (Link Aggregation Control Protocol)
- ACLs (Access Control Lists)
- DHCP Snooping
- Loop Detection
- Port Mirroring
- Voice VLAN
These features pass schema validation but show as "Unknown" in property reports, indicating they require platform-specific implementation.
## Changes from Base Repository
The testing framework was added with minimal impact to production code:
### New Files Added
1. `tests/config-parser/test-config-parser.c` - Complete test framework with 628-property database
2. `tests/config-parser/test-stubs.c` - Platform stubs
3. `tests/schema/validate-schema.py` - Schema validator
4. `tests/config-parser/config-parser.h` - Test header
5. `tests/config-parser/TEST_CONFIG_README.md` - Framework documentation
6. `tests/schema/SCHEMA_VALIDATOR_README.md` - Validator documentation
7. `tests/MAINTENANCE.md` - Maintenance procedures
8. `tests/config-parser/Makefile` - Test build system
9. `TESTING_FRAMEWORK.md` - This file (documentation index)
10. `TEST_CONFIG_PARSER_DESIGN.md` - Test framework architecture and design
11. `run-config-tests.sh` - Test runner script
### Modified Files
1. `src/ucentral-client/proto.c` - Added TEST_STATIC macro pattern (2 lines)
```c
// Changed from:
static struct plat_cfg *cfg_parse(...)
// Changed to:
#ifdef UCENTRAL_TESTING
#define TEST_STATIC
#else
#define TEST_STATIC static
#endif
TEST_STATIC struct plat_cfg *cfg_parse(...)
```
This allows test code to call cfg_parse() while keeping it static in production builds.
2. `src/ucentral-client/include/router-utils.h` - Added extern declarations
- Exposed necessary functions for test stubs
3. `src/ucentral-client/Makefile` - Removed test targets (moved to tests/config-parser/Makefile)
- Production Makefile now focuses only on deliverable code
### Configuration Files
- Added `config-samples/cfg_invalid_*.json` - Negative test cases
- Added `config-samples/ECS4150_*.json` - Feature-specific test configs
- No changes to existing valid configurations
### Zero Impact on Production
- Production builds: No functional changes, cfg_parse() remains static
- Test builds: cfg_parse() becomes visible with -DUCENTRAL_TESTING flag
- No ABI changes, no performance impact
- No runtime dependencies added
## Integration with Development Workflow
### During Development
```bash
# 1. Make code changes to proto.c
vi src/ucentral-client/proto.c
# 2. Run tests using test runner script
./run-config-tests.sh
# 3. Review property tracking report
# Check for unimplemented features or errors
# 4. If adding new parser function, update property database
vi tests/config-parser/test-config-parser.c
# Add property entries for new function
# 5. Create test configuration
vi config-samples/test-new-feature.json
# 6. Retest
./run-config-tests.sh
```
### Before Committing
```bash
# Ensure all tests pass
./run-config-tests.sh
# Generate full HTML report for review
./run-config-tests.sh html
open output/test-report.html
# Check for property database accuracy
# Review "Property Usage Report" section in HTML report
# Look for unexpected "Unknown" properties
```
### In CI/CD Pipeline
```yaml
test-configurations:
stage: test
script:
- ./run-config-tests.sh json
artifacts:
paths:
- output/test-report.json
- output/test-report.html
when: always
```
## Property Database Management
The property database is a critical component tracking which JSON properties are parsed by which functions.
### Database Structure
```c
static struct property_metadata properties[] = {
{
.path = "interfaces.ethernet.enabled",
.status = PROP_CONFIGURED,
.source_file = "proto.c",
.source_function = "cfg_ethernet_parse",
.source_line = 1119,
.notes = "Enable/disable ethernet interface"
},
// ... 628 total entries (199 with line numbers) ...
};
```
### Key Rules
1. **Only track properties for functions that exist in this repository's proto.c**
2. **Remove entries when parser functions are removed**
3. **Add entries immediately when adding new parser functions**
4. **Use accurate function names** - different platforms may use different names
5. **Properties not in database show as "Unknown"** - this is correct for platform-specific features
See MAINTENANCE.md for complete property database update procedures.
## Schema Management
The schema file defines what configurations are structurally valid.
### Schema Location
- `config-samples/ucentral.schema.pretty.json` - Human-readable version (recommended)
- `config-samples/ols.ucentral.schema.json` - Compact version
### Schema Source
Schema is maintained in the external [ols-ucentral-schema](https://github.com/Telecominfraproject/ols-ucentral-schema) repository.
### Schema Updates
When ols-ucentral-schema releases a new version:
1. Copy new schema to config-samples/
2. Run schema validation on all test configs
3. Fix any configs that fail new requirements
4. Document breaking changes
5. Update property database if new properties are implemented
See MAINTENANCE.md section "Schema Update Procedures" for complete process.
## Platform-Specific Repositories
This is the **base repository** providing the core framework. Platform-specific repositories (like Edgecore EC platform) can:
1. **Fork the test framework** - Copy test files to their repository
2. **Extend property database** - Add entries for platform-specific parser functions
3. **Add platform configs** - Create configs testing platform features
4. **Maintain separate tracking** - Properties "Unknown" in base become "CONFIGURED" in platform
### Example: LLDP Property Status
**In base repository (this repo):**
```
Property: interfaces.ethernet.lldp
Status: Unknown (not in property database)
Note: May require platform-specific implementation
```
**In Edgecore EC platform repository:**
```
Property: interfaces.ethernet.lldp
Parser: cfg_ethernet_lldp_parse()
Status: CONFIGURED
Note: Per-interface LLDP transmit/receive configuration
```
Each platform tracks only the properties it actually implements.
## Troubleshooting
### Common Issues
**Tests fail in Docker but pass locally:**
- Check schema file exists in container
- Verify paths are correct in container environment
- Rebuild container: `make build-host-env`
**Property shows as "Unknown" when it should be CONFIGURED:**
- Verify parser function exists: `grep "function_name" proto.c`
- Check property path matches JSON exactly
- Ensure property entry is in properties[] array
**Schema validation fails for valid config:**
- Schema may be outdated - check version
- Config may use vendor extensions not in base schema
- Validate against specific schema: `./validate-schema.py config.json --schema /path/to/schema.json`
See MAINTENANCE.md "Troubleshooting" section for complete troubleshooting guide.
## Documentation Maintenance
When updating the testing framework:
1. **Update relevant documentation:**
- New features → TEST_CONFIG_README.md
- Schema changes → MAINTENANCE.md + SCHEMA_VALIDATOR_README.md
- Property database changes → MAINTENANCE.md + TEST_CONFIG_README.md
- Build changes → README.md
2. **Keep version information current:**
- Update compatibility matrices
- Document breaking changes
- Maintain changelogs
3. **Update examples:**
- Refresh command output examples
- Update property counts
- Keep test results current
## Contributing
When contributing to the testing framework:
1. **Maintain property database accuracy** - Update when changing parser functions
2. **Add test configurations** - Create configs demonstrating new features
3. **Update documentation** - Keep docs synchronized with code changes
4. **Follow conventions** - Use established patterns for validators and property entries
5. **Test thoroughly** - Run full test suite before committing
## License
BSD-3-Clause (same as parent project)
## See Also
- **[tests/config-parser/TEST_CONFIG_README.md](tests/config-parser/TEST_CONFIG_README.md)** - Complete testing framework guide
- **[TEST_CONFIG_PARSER_DESIGN.md](TEST_CONFIG_PARSER_DESIGN.md)** - Test framework architecture and design
- **[tests/schema/SCHEMA_VALIDATOR_README.md](tests/schema/SCHEMA_VALIDATOR_README.md)** - Schema validator documentation
- **[tests/MAINTENANCE.md](tests/MAINTENANCE.md)** - Update procedures and troubleshooting
- **[TEST_RUNNER_README.md](TEST_RUNNER_README.md)** - Test runner script documentation
- **[QUICK_START_TESTING.md](QUICK_START_TESTING.md)** - Quick start guide
- **[README.md](README.md)** - Project overview and build instructions
- **ols-ucentral-schema repository** - Official schema source

View File

@@ -1,308 +0,0 @@
# Design of test-config-parser.c
The `tests/config-parser/test-config-parser.c` file implements a comprehensive configuration testing framework with a sophisticated multi-layered design. This document describes the architecture and implementation details.
## 1. **Core Architecture: Multi-Layer Validation**
The framework validates configurations through three complementary layers:
### Layer 1: Schema Validation
- Invokes external `tests/schema/validate-schema.py` to verify JSON structure against uCentral schema
- Catches: JSON syntax errors, type mismatches, missing required fields, constraint violations
- If schema validation fails, parsing is skipped to ensure clean error isolation
### Layer 2: Parser Testing
- Calls production `cfg_parse()` function from `src/ucentral-client/proto.c`
- Tests actual C parser implementation with real platform data structures
- Catches: Parser bugs, memory issues, hardware constraints, cross-field dependencies
### Layer 3: Property Tracking
- Deep recursive inspection of JSON tree to classify every property
- Maps properties to property metadata database (628 properties total)
- Tracks which properties are CONFIGURED, IGNORED, INVALID, UNKNOWN, etc.
- 199 properties (32%) include line number references to proto.c
## 2. **Property Metadata System**
### Property Database Structure
```c
struct property_metadata {
const char *path; // JSON path: "ethernet[].speed"
enum property_status status; // CONFIGURED, IGNORED, UNKNOWN, etc.
const char *source_file; // Where processed: "proto.c"
const char *source_function; // Function: "cfg_ethernet_parse"
int source_line; // Line number in proto.c (if available)
const char *notes; // Context/rationale
};
```
**Database contains 628 entries** documenting:
- Which properties are actively parsed (PROP_CONFIGURED)
- Which are intentionally ignored (PROP_IGNORED)
- Which need platform implementation (PROP_UNKNOWN)
- Which are structural containers (PROP_SYSTEM)
- Line numbers for 199 properties (32%) showing exact parsing locations
### Property Status Classification
- **PROP_CONFIGURED**: Successfully processed by parser
- **PROP_MISSING**: Required but absent
- **PROP_IGNORED**: Present but intentionally not processed
- **PROP_INVALID**: Invalid value (out of bounds, wrong type)
- **PROP_INCOMPLETE**: Missing required sub-fields
- **PROP_UNKNOWN**: Needs manual classification/testing (may require platform implementation)
- **PROP_SYSTEM**: Structural container (not leaf value)
## 3. **Property Inspection Engine**
### scan_json_tree_recursive() (lines 1399-1459)
Recursive descent through JSON tree:
1. Traverses entire JSON configuration structure
2. For each property, builds full dot-notation path (e.g., `"interfaces[].ipv4.subnet[].prefix"`)
3. Looks up property in metadata database via `lookup_property_metadata()`
4. Records property validation result with status, value, source location
5. Continues recursion into nested objects/arrays
### lookup_property_metadata() (lines 1314-1348)
Smart property matching:
1. Normalizes path by replacing `[N]` with `[]` (e.g., `ethernet[5].speed``ethernet[].speed`)
2. Searches property database for matching canonical path
3. Returns metadata if found, NULL if unknown property
### scan_for_unprocessed_properties() (lines 1666-1765)
Legacy unprocessed property detection:
- Checks properties against known property lists at each config level
- Reports properties that exist in JSON but aren't in "known" lists
- Used alongside property database for comprehensive coverage
## 4. **Test Execution Flow**
### Main Test Function: test_config_file() (lines 1790-1963)
```
┌─────────────────────────────────────────┐
│ 1. Schema Validation │
│ - validate_against_schema() │
│ - If fails: mark test, skip parsing │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ 2. JSON Parsing │
│ - read_json_file() │
│ - cJSON_Parse() │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ 3. Feature Detection │
│ - detect_json_features() │
│ - Find LLDP, ACL, LACP, etc. │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ 4. Property Inspection │
│ - scan_json_tree_recursive() │
│ - Build property validation list │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ 5. Parser Invocation │
│ - cfg = cfg_parse(json) │
│ - Invoke production parser │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ 6. Feature Statistics │
│ - update_feature_statistics() │
│ - Count ports, VLANs, features │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ 7. Validation (Optional) │
│ - run_validator() for specific │
│ configs (cfg0, PoE, DHCP, etc.) │
└──────────────┬──────────────────────────┘
┌─────────────────────────────────────────┐
│ 8. Result Recording │
│ - finalize_test_result() │
│ - Store in linked list │
└─────────────────────────────────────────┘
```
## 5. **Data Structures**
### test_result (lines 94-128)
Per-test result tracking:
```c
struct test_result {
char filename[256];
int passed;
char error_message[512];
int ports_configured, vlans_configured;
int unprocessed_properties;
// Property counters
int properties_configured;
int properties_missing;
int properties_ignored;
// ... etc
// Feature presence flags
int has_port_config, has_vlan_config;
int has_stp, has_igmp, has_poe;
// ... etc
// Linked list of property validations
struct property_validation *property_validations;
struct test_result *next;
};
```
### property_validation (lines 85-92)
Individual property validation record:
```c
struct property_validation {
char path[128]; // "unit.hostname"
enum property_status status;
char value[512]; // "\"switch01\""
char details[256]; // Additional context
char source_location[128]; // "proto.c:cfg_unit_parse()"
struct property_validation *next;
};
```
## 6. **Feature Statistics Tracking**
### Global Statistics (lines 40-56)
```c
struct feature_stats {
int configs_with_ports;
int configs_with_vlans;
int configs_with_stp;
int configs_with_igmp;
int configs_with_poe;
int configs_with_ieee8021x;
int configs_with_dhcp_relay;
int configs_with_lldp; // JSON-detected
int configs_with_acl; // JSON-detected
int configs_with_lacp; // JSON-detected
// ... etc
};
```
**Two detection methods:**
1. **Parser-based**: Check `plat_cfg` structure for configured values (ports, VLANs, STP mode)
2. **JSON-based**: Detect schema-valid features in JSON that may not be parsed (LLDP, ACL, LACP)
## 7. **Output Formats** (lines 26-31)
### OUTPUT_HUMAN (default)
- Colorful console output with emojis
- Detailed property analysis
- Processing summaries
- Feature statistics
### OUTPUT_JSON (lines 2015-2097)
- Machine-readable JSON report
- Full test results with property details
- CI/CD integration friendly
### OUTPUT_HTML (lines 2099+)
- Interactive web report
- Full test details with styling
- Browser-viewable (982KB typical size)
### OUTPUT_JUNIT (planned)
- JUnit XML format for Jenkins/GitLab CI
## 8. **Validator Registry** (lines 302-343)
Optional per-config validators for deep validation:
```c
static const struct config_validator validators[] = {
{ "cfg0.json", validate_cfg0, "Port disable configuration" },
{ "cfg5_poe.json", validate_cfg_poe, "PoE configuration" },
{ "cfg6_dhcp.json", validate_cfg_dhcp, "DHCP relay" },
// ... etc
};
```
Validators inspect `plat_cfg` structure to verify specific features were correctly parsed.
## 9. **Test Discovery** (lines 1968-2010)
`test_directory()` auto-discovers test configs:
- Scans directory for `.json` files
- Skips `schema.json`, `Readme.json`
- Invokes `test_config_file()` for each config
## 10. **Key Design Patterns**
### Negative Test Support (lines 445-458)
```c
static int is_negative_test(const char *filename) {
if (strstr(filename, "invalid") != NULL) return 1;
if (strstr(filename, "ECS4150_port_isoltaon.json") != NULL) return 1;
return 0;
}
```
Configs expected to fail are marked as "PASS" if parsing fails.
### Schema-First Validation (lines 1818-1836)
Schema validation is a **prerequisite** for parser testing. If schema fails, parser is never invoked, ensuring clean error isolation.
### Linked List Result Storage (lines 221-242)
All test results stored in linked list for:
- Multiple output format generation from same data
- Summary statistics calculation
- Report generation after all tests complete
## 11. **Critical Integration Points**
### With Production Code (minimal impact):
- **proto.c**: Uses `cfg_parse()` exposed via `TEST_STATIC` macro
- **ucentral-log.h**: Registers `test_log_callback()` to capture parser errors (lines 134-160)
- **ucentral-platform.h**: Inspects `struct plat_cfg` to verify parsing results
### With Schema Validator:
- **tests/schema/validate-schema.py**: External Python script invoked via `system()` call
- Schema path: `config-samples/ols.ucentral.schema.pretty.json`
## 12. **Property Database Maintenance Rules**
**Critical Rule**:
> The property database must only contain entries for parser functions that exist in this repository's proto.c. Do not add entries for platform-specific functions that don't exist in the base implementation.
This keeps the base repository clean and allows platform-specific forks to extend the database with their own implementations.
---
## Summary
The design elegantly separates concerns:
1. **Schema layer** validates JSON structure (delegated to Python)
2. **Parser layer** tests C implementation (calls production code)
3. **Property layer** tracks implementation status (metadata database)
4. **Validation layer** verifies specific features (optional validators)
5. **Reporting layer** generates multiple output formats
The property metadata database is the **crown jewel** - it documents the implementation status of 560+ configuration properties, enabling automated detection of unimplemented features and validation of parser coverage.
## Related Documentation
For additional information about the testing framework:
- **TESTING_FRAMEWORK.md** - Overview and documentation index
- **tests/config-parser/TEST_CONFIG_README.md** - Complete testing framework guide
- **tests/schema/SCHEMA_VALIDATOR_README.md** - Schema validator documentation
- **tests/MAINTENANCE.md** - Schema and property database update procedures
- **TEST_RUNNER_README.md** - Test runner script documentation
- **QUICK_START_TESTING.md** - Quick start guide
- **README.md** - Project overview and testing framework integration

View File

@@ -1,469 +0,0 @@
# Test Runner Script Documentation
## Overview
`run-config-tests.sh` is a comprehensive Docker-based test runner for uCentral configuration validation. It automates the entire testing workflow: building the Docker environment, running tests with various output formats, and copying results to the host.
## Features
- **Automatic Docker Environment Management**
- Builds Docker environment only when needed (checks Dockerfile SHA)
- Starts/reuses existing containers intelligently
- No manual Docker commands required
- **Multiple Output Formats**
- **human**: Human-readable console output with colors and detailed analysis
- **html**: Interactive HTML report with test results and property tracking
- **json**: Machine-readable JSON for automation and metrics
- **Flexible Testing**
- Test all configurations in one run
- Test a single configuration file
- Automatic result file naming and organization
- **Production-Ready**
- Exit codes for CI/CD integration (0 = pass, non-zero = fail/issues)
- Colored output for readability
- Comprehensive error handling
- Results automatically copied to `output/` directory
## Usage
### Basic Syntax
```bash
./run-config-tests.sh [format] [config-file]
```
**Parameters:**
- `format` (optional): Output format - `html`, `json`, or `human` (default: `human`)
- `config-file` (optional): Specific config file to test (default: test all configs)
### Examples
#### Test All Configurations
```bash
# Human-readable output (default)
./run-config-tests.sh
# Human-readable output (explicit)
./run-config-tests.sh human
# HTML report
./run-config-tests.sh html
# JSON output
./run-config-tests.sh json
```
#### Test Single Configuration
```bash
# Test single config with human output
./run-config-tests.sh human cfg0.json
# Test single config with HTML report
./run-config-tests.sh html ECS4150-TM.json
# Test single config with JSON output
./run-config-tests.sh json ECS4150-ACL.json
```
## Output Files
All output files are saved to the `output/` directory in the repository root.
### Output File Naming
**All Configs:**
- `test-results.txt` - Human-readable output
- `test-report.html` - HTML report
- `test-report.json` - JSON output
**Single Config:**
- `test-results-{config-name}.txt` - Human-readable output
- `test-report-{config-name}.html` - HTML report
- `test-results-{config-name}.json` - JSON output
### Output Directory Structure
```
output/
├── test-results.txt # All configs, human format
├── test-report.html # All configs, HTML format
├── test-report.json # All configs, JSON format
├── test-results-cfg0.txt # Single config results
├── test-report-ECS4150-TM.html # Single config HTML
└── test-results-ECS4150-ACL.json # Single config JSON
```
## How It Works
### Workflow Steps
1. **Docker Check**: Verifies Docker daemon is running
2. **Environment Build**: Builds Docker environment if needed (caches based on Dockerfile SHA)
3. **Container Start**: Starts or reuses existing container
4. **Test Execution**: Runs tests inside container with specified format
5. **Result Copy**: Copies output files from container to host `output/` directory
6. **Summary**: Displays test summary and output file locations
### Docker Environment Management
The script intelligently manages the Docker environment:
```
Dockerfile unchanged → Skip build (use existing image)
Dockerfile modified → Build new image with new SHA tag
Container exists → Reuse existing container
Container missing → Create new container
Container stopped → Start existing container
```
This ensures fast subsequent runs while detecting when rebuilds are necessary.
## Output Format Details
### Human Format (default)
Human-readable console output with:
- Color-coded pass/fail indicators
- Detailed error messages
- Property usage reports
- Feature coverage analysis
- Schema validation results
**Best for:** Interactive development, debugging, manual testing
**Example:**
```
[TEST] config-samples/cfg0.json
✓ PASS - Schema validation
✓ PASS - Parser validation
Properties: 42 configured, 5 unknown
Total tests: 37
Passed: 37
Failed: 0
```
### HTML Format
Interactive web report with:
- Test result summary table
- Pass/fail status with colors
- Expandable test details
- Property tracking information
- Feature coverage matrix
- Timestamp and metadata
**Best for:** Test reports, sharing results, archiving, presentations
**Open with:**
```bash
open output/test-report.html # macOS
xdg-open output/test-report.html # Linux
start output/test-report.html # Windows
```
### JSON Format
Machine-readable structured data with:
- Test results array
- Pass/fail status
- Error details
- Property usage data
- Timestamps
- Exit codes
**Best for:** CI/CD integration, automation, metrics, analysis
**Structure:**
```json
{
"summary": {
"total": 37,
"passed": 37,
"failed": 0,
"timestamp": "2025-12-15T10:30:00Z"
},
"tests": [
{
"config": "cfg0.json",
"passed": true,
"schema_valid": true,
"parser_valid": true,
"properties": { "configured": 42, "unknown": 5 }
}
]
}
```
## Exit Codes
The script uses exit codes for CI/CD integration:
- `0` - All tests passed successfully
- `1` - Some tests failed or had validation errors
- `2` - System errors (Docker not running, file not found, etc.)
**CI/CD Example:**
```bash
./run-config-tests.sh json
if [ $? -eq 0 ]; then
echo "All tests passed!"
else
echo "Tests failed, see output/test-report.json"
exit 1
fi
```
## Performance
### First Run (Cold Start)
```
Build Docker environment: 5-10 minutes (one-time)
Run all config tests: 10-30 seconds
Total first run: ~10 minutes
```
### Subsequent Runs (Warm Start)
```
Environment check: 1-2 seconds (skipped if unchanged)
Container startup: 1-2 seconds (or reuse running container)
Run all config tests: 10-30 seconds
Total subsequent run: ~15 seconds
```
### Single Config Test
```
Test single config: 1-3 seconds
Total time: ~5 seconds (with running container)
```
## Troubleshooting
### Docker Not Running
**Error:**
```
✗ Docker is not running. Please start Docker and try again.
```
**Solution:**
- Start Docker Desktop (macOS/Windows)
- Start Docker daemon: `sudo systemctl start docker` (Linux)
### Container Build Failed
**Error:**
```
✗ Failed to build environment
```
**Solution:**
```bash
# Clean Docker and rebuild
docker system prune -a
make clean
./run-config-tests.sh
```
### Config File Not Found
**Error:**
```
✗ Config file not found in container: myconfig.json
```
**Solution:**
- Check available configs: `ls config-samples/*.json`
- Ensure config file is in `config-samples/` directory
- Use correct filename (case-sensitive)
### Test Output Not Copied
**Error:**
```
⚠ Output file not found in container: test-report.html
```
**Solution:**
- Check test execution logs for errors
- Verify test completed successfully inside container
- Try running tests manually: `docker exec ucentral_client_build_env bash -c "cd /root/ols-nos/tests/config-parser && make test-config"`
### Permission Denied
**Error:**
```
Permission denied: ./run-config-tests.sh
```
**Solution:**
```bash
chmod +x run-config-tests.sh
```
## Integration with Existing Workflows
### With Makefile
The script is independent of the Makefile but uses the same Docker infrastructure:
```bash
# Build environment (Makefile or script)
make build-host-env
# OR let script build it automatically
# Run tests (script provides better output management)
./run-config-tests.sh html
```
### With CI/CD
#### GitHub Actions
```yaml
name: Configuration Tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Run config tests
run: ./run-config-tests.sh json
- name: Upload test results
uses: actions/upload-artifact@v3
with:
name: test-results
path: output/test-report.json
```
#### GitLab CI
```yaml
test-configs:
stage: test
script:
- ./run-config-tests.sh json
artifacts:
paths:
- output/test-report.json
when: always
```
#### Jenkins
```groovy
stage('Test Configurations') {
steps {
sh './run-config-tests.sh html'
publishHTML([
reportDir: 'output',
reportFiles: 'test-report.html',
reportName: 'Config Test Report'
])
}
}
```
### With Git Hooks
**Pre-commit hook** (test before commit):
```bash
#!/bin/bash
# .git/hooks/pre-commit
echo "Running configuration tests..."
./run-config-tests.sh human
if [ $? -ne 0 ]; then
echo "Tests failed. Commit aborted."
exit 1
fi
```
## Advanced Usage
### Custom Output Directory
Modify the `OUTPUT_DIR` variable in the script:
```bash
# Edit run-config-tests.sh
OUTPUT_DIR="$SCRIPT_DIR/my-custom-output"
```
### Test Specific Config Pattern
```bash
# Test all ACL configs
for config in config-samples/*ACL*.json; do
./run-config-tests.sh json "$(basename $config)"
done
```
### Parallel Testing (Multiple Containers)
```bash
# Start multiple containers for parallel testing
docker exec ucentral_client_build_env_1 bash -c "cd /root/ols-nos/src/ucentral-client && ./test-config-parser config1.json" &
docker exec ucentral_client_build_env_2 bash -c "cd /root/ols-nos/src/ucentral-client && ./test-config-parser config2.json" &
wait
```
### Automated Report Generation
```bash
# Generate all format reports
for format in human html json; do
./run-config-tests.sh $format
done
# Timestamp reports
mv output/test-report.html output/test-report-$(date +%Y%m%d-%H%M%S).html
```
## Comparison with Direct Make Commands
| Feature | run-config-tests.sh | Direct Make |
|---------|---------------------|-------------|
| Docker management | Automatic | Manual |
| Output to host | Automatic | Manual copy |
| Format selection | Command-line arg | Multiple make targets |
| Single config test | Built-in | Manual setup |
| Result organization | Automatic | Manual |
| Error handling | Comprehensive | Basic |
| CI/CD ready | Yes (exit codes) | Requires scripting |
**Recommendation:** Use `run-config-tests.sh` for all testing workflows. It provides a better user experience and handles Docker complexity automatically.
## Related Documentation
- **TESTING_FRAMEWORK.md** - Overview of testing framework
- **tests/config-parser/TEST_CONFIG_README.md** - Complete testing guide
- **TEST_CONFIG_PARSER_DESIGN.md** - Test framework architecture
- **tests/MAINTENANCE.md** - Schema and property database maintenance
- **QUICK_START_TESTING.md** - Quick start guide
- **README.md** - Project overview and build instructions
## Support
For issues or questions:
1. Check troubleshooting section above
2. Review test output in `output/` directory
3. Check Docker container logs: `docker logs ucentral_client_build_env`
4. File issue in repository issue tracker
## Version
Script version: 1.0.0
Last updated: 2025-12-15
Compatible with: uCentral schema 4.1.0-rc1 and later

View File

@@ -11,7 +11,7 @@ cfg2:
cfg3:
Bring ports 1 up, 2 up (Ethernet1, Ethernet2) (admin state);
Destroy any VLAN that is not in the list (in this particular CFG - create VLAN 10,
destroy any other, except for MGMT VLAN 1 - it's not being altered by the
destroye any other, except for MGMT VLAN 1 - it's not being altered by the
uCentral app itself);
Create VLAN 10;
Set VLAN 10 memberlist with the following ports: Ethernet1, Ethernet2;
@@ -39,7 +39,6 @@ cfg5_poe:
- detection mode is 4pt-dot3af;
- power limit is 99900mW (e.g. max per port);
- priority is LOW;
cfg7_ieee80211x.json:
Following json file configures the given topology:
+-----------------+
@@ -65,33 +64,3 @@ cfg7_ieee80211x.json:
to be the same for the given (10.10.20.0/24) network.
.1x client also must have a valid credentials data (both client and radius server
must have same clients credentials configured).
cfg_igmp.json:
Configure igmp snooping and querier on VLAN 1.
Configure igmp static groups:
- 230.1.1.1 with egress port Ethernet1
- 230.2.2.2 with egress ports Ethernet2 & Ethernet3
cfg_rpvstp.json:
Configure VLAN 1;
Configure VLAN 2;
Configure rapid per-vlan STP on VLAN 1 with priority 32768;
Disable STP on VLAN 2.
cfg_port_isolation.json:
Configure port isolation with Ethernet1 as uplink and
Ethernet2 & Ethernet3 as downlink
cfg_services_log.json:
Enable syslog with these parameters:
- remote host addr
- remote host port
- log severity (priority):
* emerg: 0
* alert: 1
* crit: 2
* error: 3
* warning: 4
* notice: 5
* info: 6
* debug: 7

File diff suppressed because it is too large Load Diff

View File

@@ -1,581 +0,0 @@
{
"strict": false,
"uuid": 1765383961,
"unit":
{
"hostname": "MJH-4150",
"leds-active": true,
"random-password": false,
"usage-threshold": 95
},
"ethernet":
[
{
"select-ports":
[
"Ethernet23"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe":
{
"admin-mode": true
},
"lacp-config":
{
"lacp-enable": false,
"lacp-role": "actor",
"lacp-mode": "passive",
"lacp-port-admin-key": 1,
"lacp-port-priority": 32768,
"lacp-system-priority": 32768,
"lacp-timeout": "long"
},
"lldp-interface-config":
{
"lldp-admin-status": "rx-tx",
"lldp-basic-tlv-mgmt-ip-v4": true,
"lldp-basic-tlv-mgmt-ip-v6": true,
"lldp-basic-tlv-port-descr": true,
"lldp-basic-tlv-sys-capab": true,
"lldp-basic-tlv-sys-descr": true,
"lldp-basic-tlv-sys-name": true,
"lldp-dot1-tlv-proto-ident": true,
"lldp-dot1-tlv-proto-vid": true,
"lldp-dot1-tlv-pvid": true,
"lldp-dot1-tlv-vlan-name": true,
"lldp-dot3-tlv-link-agg": true,
"lldp-dot3-tlv-mac-phy": true,
"lldp-dot3-tlv-max-frame": true,
"lldp-dot3-tlv-poe": true,
"lldp-med-location-civic-addr":
{
"lldp-med-location-civic-addr-admin-status": true,
"lldp-med-location-civic-country-code": "CA",
"lldp-med-location-civic-device-type": 1,
"lldp-med-location-civic-ca":
[
{
"lldp-med-location-civic-ca-type": 29,
"lldp-med-location-civic-ca-value": "Mike-WFH"
}
]
},
"lldp-med-notification": true,
"lldp-med-tlv-ext-poe": true,
"lldp-med-tlv-inventory": true,
"lldp-med-tlv-location": true,
"lldp-med-tlv-med-cap": true,
"lldp-med-tlv-network-policy": true,
"lldp-notification": true
},
"dhcp-snoop-port":
{
"dhcp-snoop-port-trust": true,
"dhcp-snoop-port-client-limit": 16,
"dhcp-snoop-port-circuit-id": "1-5c17834a98a0-24"
},
"edge-port": false
},
{
"select-ports":
[
"Ethernet2"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe":
{
"admin-mode": true
},
"lacp-config":
{
"lacp-enable": false,
"lacp-role": "actor",
"lacp-mode": "passive",
"lacp-port-admin-key": 1,
"lacp-port-priority": 32768,
"lacp-system-priority": 32768,
"lacp-timeout": "long"
},
"lldp-interface-config":
{
"lldp-admin-status": "rx-tx",
"lldp-basic-tlv-mgmt-ip-v4": true,
"lldp-basic-tlv-mgmt-ip-v6": true,
"lldp-basic-tlv-port-descr": true,
"lldp-basic-tlv-sys-capab": true,
"lldp-basic-tlv-sys-descr": true,
"lldp-basic-tlv-sys-name": true,
"lldp-dot1-tlv-proto-ident": true,
"lldp-dot1-tlv-proto-vid": true,
"lldp-dot1-tlv-pvid": true,
"lldp-dot1-tlv-vlan-name": true,
"lldp-dot3-tlv-link-agg": true,
"lldp-dot3-tlv-mac-phy": true,
"lldp-dot3-tlv-max-frame": true,
"lldp-dot3-tlv-poe": true,
"lldp-med-location-civic-addr":
{
"lldp-med-location-civic-addr-admin-status": true,
"lldp-med-location-civic-country-code": "CA",
"lldp-med-location-civic-device-type": 1,
"lldp-med-location-civic-ca":
[
{
"lldp-med-location-civic-ca-type": 29,
"lldp-med-location-civic-ca-value": "Mike-WFH"
}
]
},
"lldp-med-notification": true,
"lldp-med-tlv-ext-poe": true,
"lldp-med-tlv-inventory": true,
"lldp-med-tlv-location": true,
"lldp-med-tlv-med-cap": true,
"lldp-med-tlv-network-policy": true,
"lldp-notification": true
},
"dhcp-snoop-port":
{
"dhcp-snoop-port-trust": true,
"dhcp-snoop-port-client-limit": 16,
"dhcp-snoop-port-circuit-id": "1-5c17834a98a0-3"
},
"edge-port": false
},
{
"select-ports":
[
"Ethernet24",
"Ethernet25",
"Ethernet26",
"Ethernet27"
],
"speed": 10000,
"duplex": "full",
"enabled": true,
"dhcp-snoop-port":
{
"dhcp-snoop-port-trust": true
},
"edge-port": false
},
{
"select-ports":
[
"Ethernet0"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe":
{
"admin-mode": true
},
"lacp-config":
{
"lacp-enable": false,
"lacp-role": "actor",
"lacp-mode": "passive",
"lacp-port-admin-key": 1,
"lacp-port-priority": 32768,
"lacp-system-priority": 32768,
"lacp-timeout": "long"
},
"lldp-interface-config":
{
"lldp-admin-status": "rx-tx",
"lldp-basic-tlv-mgmt-ip-v4": true,
"lldp-basic-tlv-mgmt-ip-v6": true,
"lldp-basic-tlv-port-descr": true,
"lldp-basic-tlv-sys-capab": true,
"lldp-basic-tlv-sys-descr": true,
"lldp-basic-tlv-sys-name": true,
"lldp-dot1-tlv-proto-ident": true,
"lldp-dot1-tlv-proto-vid": true,
"lldp-dot1-tlv-pvid": true,
"lldp-dot1-tlv-vlan-name": true,
"lldp-dot3-tlv-link-agg": true,
"lldp-dot3-tlv-mac-phy": true,
"lldp-dot3-tlv-max-frame": true,
"lldp-dot3-tlv-poe": true,
"lldp-med-location-civic-addr":
{
"lldp-med-location-civic-addr-admin-status": true,
"lldp-med-location-civic-country-code": "CA",
"lldp-med-location-civic-device-type": 1,
"lldp-med-location-civic-ca":
[
{
"lldp-med-location-civic-ca-type": 29,
"lldp-med-location-civic-ca-value": "Mike-WFH"
}
]
},
"lldp-med-notification": true,
"lldp-med-tlv-ext-poe": true,
"lldp-med-tlv-inventory": true,
"lldp-med-tlv-location": true,
"lldp-med-tlv-med-cap": true,
"lldp-med-tlv-network-policy": true,
"lldp-notification": true
},
"dhcp-snoop-port":
{
"dhcp-snoop-port-trust": true,
"dhcp-snoop-port-client-limit": 16,
"dhcp-snoop-port-circuit-id": "1-5c17834a98a0-1"
},
"edge-port": false
},
{
"select-ports":
[
"Ethernet4"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe":
{
"admin-mode": true
},
"lacp-config":
{
"lacp-enable": false,
"lacp-role": "actor",
"lacp-mode": "passive",
"lacp-port-admin-key": 1,
"lacp-port-priority": 32768,
"lacp-system-priority": 32768,
"lacp-timeout": "long"
},
"lldp-interface-config":
{
"lldp-admin-status": "rx-tx",
"lldp-basic-tlv-mgmt-ip-v4": true,
"lldp-basic-tlv-mgmt-ip-v6": true,
"lldp-basic-tlv-port-descr": true,
"lldp-basic-tlv-sys-capab": true,
"lldp-basic-tlv-sys-descr": true,
"lldp-basic-tlv-sys-name": true,
"lldp-dot1-tlv-proto-ident": true,
"lldp-dot1-tlv-proto-vid": true,
"lldp-dot1-tlv-pvid": true,
"lldp-dot1-tlv-vlan-name": true,
"lldp-dot3-tlv-link-agg": true,
"lldp-dot3-tlv-mac-phy": true,
"lldp-dot3-tlv-max-frame": true,
"lldp-dot3-tlv-poe": true,
"lldp-med-location-civic-addr":
{
"lldp-med-location-civic-addr-admin-status": true,
"lldp-med-location-civic-country-code": "CA",
"lldp-med-location-civic-device-type": 1,
"lldp-med-location-civic-ca":
[
{
"lldp-med-location-civic-ca-type": 29,
"lldp-med-location-civic-ca-value": "Mike-WFH"
}
]
},
"lldp-med-notification": true,
"lldp-med-tlv-ext-poe": true,
"lldp-med-tlv-inventory": true,
"lldp-med-tlv-location": true,
"lldp-med-tlv-med-cap": true,
"lldp-med-tlv-network-policy": true,
"lldp-notification": true
},
"dhcp-snoop-port":
{
"dhcp-snoop-port-trust": true,
"dhcp-snoop-port-client-limit": 16,
"dhcp-snoop-port-circuit-id": "1-5c17834a98a0-5"
},
"edge-port": false
},
{
"select-ports":
[
"Ethernet1",
"Ethernet3"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe":
{
"admin-mode": true
},
"lacp-config":
{
"lacp-enable": false,
"lacp-role": "actor",
"lacp-mode": "passive",
"lacp-port-admin-key": 1,
"lacp-port-priority": 32768,
"lacp-system-priority": 32768,
"lacp-timeout": "long"
},
"edge-port": false
},
{
"select-ports":
[
"Ethernet5",
"Ethernet7",
"Ethernet8",
"Ethernet9",
"Ethernet10",
"Ethernet11",
"Ethernet12",
"Ethernet13",
"Ethernet14",
"Ethernet15",
"Ethernet16",
"Ethernet17",
"Ethernet18",
"Ethernet19",
"Ethernet20",
"Ethernet21",
"Ethernet22"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe":
{
"admin-mode": true
},
"dhcp-snoop-port":
{
"dhcp-snoop-port-trust": true
},
"edge-port": false
},
{
"select-ports":
[
"Ethernet6"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe":
{
"admin-mode": true
},
"lacp-config":
{
"lacp-enable": false,
"lacp-role": "actor",
"lacp-mode": "passive",
"lacp-port-admin-key": 1,
"lacp-port-priority": 32768,
"lacp-system-priority": 32768,
"lacp-timeout": "long"
},
"lldp-interface-config":
{
"lldp-admin-status": "rx-tx",
"lldp-basic-tlv-mgmt-ip-v4": true,
"lldp-basic-tlv-mgmt-ip-v6": true,
"lldp-basic-tlv-port-descr": true,
"lldp-basic-tlv-sys-capab": true,
"lldp-basic-tlv-sys-descr": true,
"lldp-basic-tlv-sys-name": true,
"lldp-dot1-tlv-proto-ident": true,
"lldp-dot1-tlv-proto-vid": true,
"lldp-dot1-tlv-pvid": true,
"lldp-dot1-tlv-vlan-name": true,
"lldp-dot3-tlv-link-agg": true,
"lldp-dot3-tlv-mac-phy": true,
"lldp-dot3-tlv-max-frame": true,
"lldp-dot3-tlv-poe": true,
"lldp-med-location-civic-addr":
{
"lldp-med-location-civic-addr-admin-status": true,
"lldp-med-location-civic-country-code": "CA",
"lldp-med-location-civic-device-type": 1,
"lldp-med-location-civic-ca":
[
{
"lldp-med-location-civic-ca-type": 29,
"lldp-med-location-civic-ca-value": "Mike-WFH"
}
]
},
"lldp-med-notification": true,
"lldp-med-tlv-ext-poe": true,
"lldp-med-tlv-inventory": true,
"lldp-med-tlv-location": true,
"lldp-med-tlv-med-cap": true,
"lldp-med-tlv-network-policy": true,
"lldp-notification": true
},
"dhcp-snoop-port":
{
"dhcp-snoop-port-trust": true,
"dhcp-snoop-port-client-limit": 16,
"dhcp-snoop-port-circuit-id": "1-5c17834a98a0-7"
},
"edge-port": false
}
],
"switch":
{
"loop-detection":
{
"protocol": "stp",
"instances":
[
{
"enabled": true,
"priority": 32768,
"forward_delay": 15,
"hello_time": 2,
"max_age": 20
}
]
},
"trunk-balance-method": "src-dst-mac",
"jumbo-frames": false,
"dhcp-snooping":
{
"dhcp-snoop-enable": true,
"dhcp-snoop-rate-limit": 1000,
"dhcp-snoop-mac-verify": true,
"dhcp-snoop-inf-opt-82": true,
"dhcp-snoop-inf-opt-encode-subopt": true,
"dhcp-snoop-inf-opt-remoteid": "5c17834a98a0",
"dhcp-snoop-inf-opt-policy": "drop"
},
"lldp-global-config":
{
"lldp-enable": true,
"lldp-holdtime-multiplier": 3,
"lldp-med-fast-start-count": 5,
"lldp-refresh-interval": 60,
"lldp-reinit-delay": 5,
"lldp-tx-delay": 5,
"lldp-notification-interval": 10
},
"mc-lag": false,
"arp-inspect":
{
"ip-arp-inspect": false
}
},
"interfaces":
[
{
"name": "VLAN1",
"role": "upstream",
"services":
[
"lldp",
"ssh"
],
"vlan":
{
"id": 1,
"proto": "802.1q"
},
"ethernet":
[
{
"select-ports":
[
"Ethernet0",
"Ethernet1",
"Ethernet2",
"Ethernet3",
"Ethernet4",
"Ethernet5",
"Ethernet6",
"Ethernet7",
"Ethernet8",
"Ethernet9",
"Ethernet10",
"Ethernet11",
"Ethernet12",
"Ethernet13",
"Ethernet14",
"Ethernet15",
"Ethernet16",
"Ethernet17",
"Ethernet18",
"Ethernet19",
"Ethernet20",
"Ethernet21",
"Ethernet22",
"Ethernet23",
"Ethernet24",
"Ethernet25",
"Ethernet26",
"Ethernet27"
],
"vlan-tag": "un-tagged",
"pvid": true
}
],
"ipv4":
{
"addressing": "dynamic",
"send-hostname": true,
"dhcp-snoop-vlan-enable": true
}
}
],
"services":
{
"lldp":
{
"describe": "MJH-4150",
"location": "Mike-WFH"
},
"ssh":
{
"port": 22,
"password-authentication": true,
"enable": true
},
"log":
{
"host": "192.168.2.38",
"port": 514,
"proto": "udp",
"size": 1000,
"priority": 7
},
"snmp":
{
"enabled": true
}
},
"metrics":
{
"statistics":
{
"interval": 60,
"types":
[
"lldp",
"clients"
]
},
"health":
{
"interval": 60,
"dhcp-local": true,
"dhcp-remote": false,
"dns-local": true,
"dns-remote": true
}
}
}

View File

@@ -1,15 +1,70 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": false,
"speed": 1000,
"select-ports": [
"Ethernet*"
]
}
],
"interfaces": [],
"services": {},
"uuid": 1
}
{
"ethernet": [
{
"duplex": "full",
"enabled": false,
"speed": 1000,
"select-ports": [
"Ethernet0",
"Ethernet1",
"Ethernet2",
"Ethernet3",
"Ethernet4",
"Ethernet5",
"Ethernet6",
"Ethernet7",
"Ethernet8",
"Ethernet9",
"Ethernet10",
"Ethernet11",
"Ethernet12",
"Ethernet13",
"Ethernet14",
"Ethernet15",
"Ethernet16",
"Ethernet17",
"Ethernet18",
"Ethernet19",
"Ethernet20",
"Ethernet21",
"Ethernet22",
"Ethernet23",
"Ethernet24",
"Ethernet25",
"Ethernet26",
"Ethernet27",
"Ethernet28",
"Ethernet29",
"Ethernet30",
"Ethernet31",
"Ethernet32",
"Ethernet33",
"Ethernet34",
"Ethernet35",
"Ethernet36",
"Ethernet37",
"Ethernet38",
"Ethernet39",
"Ethernet40",
"Ethernet41",
"Ethernet42",
"Ethernet43",
"Ethernet44",
"Ethernet45",
"Ethernet46",
"Ethernet47",
"Ethernet48",
"Ethernet52",
"Ethernet56",
"Ethernet60",
"Ethernet64",
"Ethernet68",
"Ethernet72",
"Ethernet76"
]
}
],
"interfaces": [],
"services": {},
"uuid": 1
}

View File

@@ -1,16 +1,16 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1",
"Ethernet2"
]
}
],
"interfaces": [],
"services": {},
"uuid": 1
}
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1",
"Ethernet2"
]
}
],
"interfaces": [],
"services": {},
"uuid": 1
}

View File

@@ -1,23 +1,23 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1"
]
},
{
"duplex": "full",
"enabled": false,
"select-ports": [
"Ethernet2"
],
"speed": 1000
}
],
"interfaces": [],
"services": {},
"uuid": 2
}
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1"
]
},
{
"duplex": "full",
"enabled": false,
"select-ports": [
"Ethernet2"
],
"speed": 1000
}
],
"interfaces": [],
"services": {},
"uuid": 2
}

View File

@@ -1,35 +1,35 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1",
"Ethernet2"
]
}
],
"interfaces": [
{
"vlan": {
"id": 10,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet1",
"Ethernet2"
],
"vlan-tag": "tagged"
}
],
"name": "mgmt",
"role": "upstream",
"services": []
}
],
"services": {},
"uuid": 3
}
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1",
"Ethernet2"
]
}
],
"interfaces": [
{
"vlan": {
"id": 10,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet1",
"Ethernet2"
],
"vlan-tag": "tagged"
}
],
"name": "mgmt",
"role": "upstream",
"services": []
}
],
"services": {},
"uuid": 3
}

View File

@@ -1,51 +1,51 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1",
"Ethernet2"
]
}
],
"interfaces": [
{
"vlan": {
"id": 10,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet1"
],
"vlan-tag": "tagged"
}
],
"name": "mgmt",
"role": "upstream",
"services": []
},
{
"vlan": {
"id": 100,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet2"
],
"vlan-tag": "tagged"
}
],
"name": "mgmt",
"role": "upstream",
"services": []
}
],
"services": {},
"uuid": 3
}
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1",
"Ethernet2"
]
}
],
"interfaces": [
{
"vlan": {
"id": 10,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet1"
],
"vlan-tag": "tagged"
}
],
"name": "mgmt",
"role": "upstream",
"services": []
},
{
"vlan": {
"id": 100,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet2"
],
"vlan-tag": "tagged"
}
],
"name": "mgmt",
"role": "upstream",
"services": []
}
],
"services": {},
"uuid": 3
}

View File

@@ -17,11 +17,7 @@
{
"ipv4": {
"addressing": "static",
"subnet": [
{
"prefix": "20.20.20.20/24"
}
],
"subnet": "20.20.20.20/24",
"dhcp": {
"relay-server": "172.20.254.8",
"circuit-id-format": "{Name}:{VLAN-ID}"
@@ -48,11 +44,7 @@
{
"ipv4": {
"addressing": "static",
"subnet": [
{
"prefix": "30.30.30.30/24"
}
],
"subnet": "30.30.30.30/24",
"dhcp": {
"relay-server": "172.20.10.12",
"circuit-id-format": "{Name}:{VLAN-ID}"
@@ -79,11 +71,7 @@
{
"ipv4": {
"addressing": "static",
"subnet": [
{
"prefix": "172.20.10.181/24"
}
]
"subnet": "172.20.10.181/24"
},
"vlan": {
"id": 20,

View File

@@ -50,11 +50,7 @@
},
"ipv4": {
"addressing": "static",
"subnet": [
{
"prefix": "10.10.20.100/24"
}
]
"subnet": "10.10.20.100/24"
},
"ethernet": [
{
@@ -74,11 +70,7 @@
},
"ipv4": {
"addressing": "static",
"subnet": [
{
"prefix": "10.10.50.100/24"
}
]
"subnet": "10.10.50.100/24"
},
"ethernet": [
{

View File

@@ -1,64 +0,0 @@
{
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true
}
}
],
"interfaces": [
{
"vlan": {
"id": 1,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"multicast": {
"igmp": {
"querier-enable": true,
"query-interval": 60,
"snooping-enable": true,
"version": 3,
"static-mcast-groups": [
{
"address": "230.1.1.1",
"egress-ports": [
"Ethernet1"
]
},
{
"address": "230.2.2.2",
"egress-ports": [
"Ethernet2",
"Ethernet3"
]
}
]
}
},
"subnet": [
{
"prefix": "1.1.1.1/24"
}
]
},
"role": "upstream",
"name": "mgmt-vlan"
}
],
"uuid": 1
}

View File

@@ -1,530 +0,0 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet0"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet1"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet2"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet3"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet4"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet5"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet6"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet7"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet8"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet9"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet10"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet11"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet12"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet13"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet14"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet15"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet16"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet17"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet18"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet19"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet20"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet21"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet22"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet23"
],
"speed": 1000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet24"
],
"speed": 10000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet25"
],
"speed": 10000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet26"
],
"speed": 10000
},
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true,
"detection": "2pt-dot3af",
"priority": "high"
},
"select-ports": [
"Ethernet27"
],
"speed": 10000
}
],
"interfaces": [
{
"ethernet": [
{
"select-ports": [
"Ethernet0",
"Ethernet5",
"Ethernet6",
"Ethernet7",
"Ethernet8",
"Ethernet9",
"Ethernet10",
"Ethernet11",
"Ethernet12",
"Ethernet13",
"Ethernet14",
"Ethernet15",
"Ethernet16",
"Ethernet17",
"Ethernet18",
"Ethernet19",
"Ethernet20",
"Ethernet21",
"Ethernet22",
"Ethernet23",
"Ethernet24",
"Ethernet25",
"Ethernet26",
"Ethernet27"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"addressing": "dynamic"
},
"name": "VLAN1",
"vlan": {
"id": 1
}
},
{
"ethernet": [
{ "pvid": true,
"select-ports": [
"Ethernet1",
"Ethernet2",
"Ethernet3",
"Ethernet4"
],
"vlan-tag": "un-tagged"
},
{
"select-ports": [
"Ethernet0"
],
"vlan-tag": "tagged"
}
],
"ipv4": {
"addressing": "static",
"subnet": [
{
"prefix": "10.1.12.157/24"
}
]
},
"name": "VLAN100",
"vlan": {
"id": 100,
"proto": "802.1q"
}
},
{
"ethernet": [
{
"select-ports": [
"Ethernet5",
"Ethernet6",
"Ethernet8"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"addressing": "static",
"multicast": {
"igmp": {
"fast-leave-enable": true,
"last-member-query-interval": 33,
"max-response-time": 11,
"querier-enable": true,
"query-interval": 14,
"snooping-enable": true,
"static-mcast-groups": [
{
"address": "229.229.229.1",
"egress-ports": [
"Ethernet5",
"Ethernet6",
"Ethernet8"
]
}
],
"version": 3
}
}
},
"role": "upstream",
"services": [
"ssh",
"lldp"
],
"vlan": {
"id": 500,
"proto": "802.1q"
}
}
],
"metrics": {
"dhcp-snooping": {
"filters": [
"ack",
"discover",
"offer",
"request",
"solicit",
"reply",
"renew"
]
},
"health": {
"interval": 60
},
"statistics": {
"interval": 300,
"types": ["lldp",
"clients"
]
}
},
"services": {
"http": {
"enable": true
},
"ssh": {
"enable": true
},
"lldp": {
"describe": "uCentral",
"location": "universe"
}
},
"unit": {
"leds-active": true,
"usage-threshold": 90
},
"uuid": 1719887774
}

View File

@@ -1,13 +0,0 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": ["Ethernet1"]
}
],
"interfaces": "not-an-array",
"services": {},
"uuid": 1
}

View File

@@ -1,13 +0,0 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": [
"Ethernet1"
]
}
],
"uuid": 1
}

View File

@@ -1,13 +0,0 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": ["Ethernet1"]
}
],
"interfaces": [],
"services": ["should-be-object"],
"uuid": 1
}

View File

@@ -1,11 +0,0 @@
{
"ethernet": {
"duplex": "full",
"enabled": true,
"speed": 1000,
"select-ports": ["Ethernet1"]
},
"interfaces": [],
"services": {},
"uuid": 1
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,54 +0,0 @@
{
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true
}
}
],
"interfaces": [
{
"vlan": {
"id": 1,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"vlan-tag": "un-tagged"
}
],
"role": "upstream",
"name": "mgmt-vlan"
}
],
"switch": {
"port-isolation": {
"sessions": [
{
"id": 1,
"uplink": {
"interface-list": [
"Ethernet1"
]
},
"downlink": {
"interface-list": [
"Ethernet2",
"Ethernet3"
]
}
}
]
}
},
"uuid": 1
}

View File

@@ -1,66 +0,0 @@
{
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true
}
}
],
"interfaces": [
{
"vlan": {
"id": 1,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"vlan-tag": "un-tagged"
}
],
"role": "upstream",
"name": "mgmt-vlan"
},
{
"vlan": {
"id": 2,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"vlan-tag": "tagged"
}
],
"role": "upstream",
"name": "mgmt-vlan"
}
],
"switch": {
"loop-detection": {
"protocol": "rpvstp",
"instances": [
{
"id": 1,
"enabled": true,
"priority": 32768
},
{
"id": 2,
"enabled": false
}
]
}
},
"uuid": 1
}

View File

@@ -1,43 +0,0 @@
{
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"speed": 1000,
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": true
}
}
],
"interfaces": [
{
"vlan": {
"id": 1,
"proto": "802.1q"
},
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"vlan-tag": "un-tagged"
}
],
"role": "upstream",
"name": "mgmt-vlan"
}
],
"services": {
"log": {
"port": 2000,
"priority": 7,
"size": 1000,
"host": "192.168.1.10",
"proto": "udp"
}
},
"uuid": 1
}

View File

@@ -1,165 +0,0 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"poe": {
"admin-mode": false,
"power-limit": 12345
},
"select-ports": [
"Ethernet1",
"Ethernet2"
],
"speed": 1000
}
],
"interfaces": [
{
"ethernet": [
{
"pvid": true,
"select-ports": [
"Ethernet1"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"addressing": "static",
"dhcp": {
"circuit-id-format": "{VLAN-ID}",
"relay-server": "192.168.5.1"
},
"subnet": [
{
"prefix": "192.168.2.254/24"
}
]
},
"name": "vlan_2",
"vlan": {
"id": 2
}
},
{
"ethernet": [
{
"pvid": true,
"select-ports": [
"Ethernet2"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"addressing": "static",
"dhcp": {
"circuit-id-format": "{VLAN-ID}",
"relay-server": "192.168.5.1"
},
"subnet": [
{
"prefix": "192.168.3.254/24"
}
]
},
"name": "vlan_3",
"vlan": {
"id": 3
}
},
{
"ethernet": [
{
"pvid": true,
"select-ports": [
"Ethernet4"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"addressing": "static",
"subnet": [
{
"prefix": "192.168.5.254/24"
}
]
},
"name": "vlan_5",
"vlan": {
"id": 5
}
},
{
"ethernet": [
{
"select-ports": [
"Ethernet8",
"Ethernet9"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"addressing": "dynamic"
},
"name": "vlan_1234",
"vlan": {
"id": 1234
}
}
],
"metrics": {
"dhcp-snooping": {
"filters": [
"ack",
"discover",
"offer",
"request",
"solicit",
"reply",
"renew"
]
},
"health": {
"interval": 600
},
"statistics": {
"interval": 1200,
"types": []
}
},
"services": {
"http": {
"enable": true
},
"ssh": {
"enable": true
},
"telnet": {
"enable": true
}
},
"switch": {
"loop-detection": {
"instances": [
{
"enabled": true,
"forward_delay": 15,
"hello_time": 3,
"id": 20,
"max_age": 20,
"priority": 32768
}
],
"protocol": "rstp"
}
},
"unit": {
"leds-active": true,
"usage-threshold": 95
},
"uuid": 1713842091
}

View File

@@ -1,96 +0,0 @@
{
"ethernet": [
{
"duplex": "full",
"enabled": true,
"select-ports": [
"Ethernet*"
],
"speed": 1000
}
],
"interfaces": [
{
"ethernet": [
{
"select-ports": [
"Ethernet*"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"addressing": "dynamic"
},
"name": "VLAN1",
"vlan": {
"id": 1
}
},
{
"ethernet": [
{
"select-ports": [
"Ethernet1"
],
"vlan-tag": "un-tagged"
}
],
"ipv4": {
"voice-vlan-intf-config": {
"voice-vlan-intf-detect-voice": "lldp",
"voice-vlan-intf-mode": "auto",
"voice-vlan-intf-priority": 3,
"voice-vlan-intf-security": true
}
}
}
],
"metrics": {
"health": {
"interval": 300
},
"statistics": {
"interval": 300,
"types": []
}
},
"services": {
"http": {
"enable": true
},
"https": {
"enable": true
},
"ssh": {
"enable": false
},
"telnet": {
"enable": false
}
},
"switch": {
"voice-vlan-config": {
"voice-vlan-ageing-time": 1440,
"voice-vlan-enable": true,
"voice-vlan-id": 100,
"voice-vlan-oui-config": [
{
"voice-vlan-oui-description": "Cisco VoIP Phone",
"voice-vlan-oui-mac": "00:1B:44:11:3A:B7",
"voice-vlan-oui-mask": "FF:FF:FF:00:00:00"
},
{
"voice-vlan-oui-description": "Polycom VoIP Phone",
"voice-vlan-oui-mac": "00:0E:8F:12:34:56",
"voice-vlan-oui-mask": "FF:FF:FF:00:00:00"
}
]
}
},
"unit": {
"leds-active": true,
"usage-threshold": 90
},
"uuid": 1730796040
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,286 +0,0 @@
#!/bin/bash
#
# run-config-tests.sh - Run uCentral configuration tests in Docker
#
# Usage: ./run-config-tests.sh [format] [config-file]
#
# Arguments:
# format - Output format: html, json, human (default: human)
# config-file - Optional specific config file to test (default: all configs)
#
# Examples:
# ./run-config-tests.sh human # Test all configs, human output
# ./run-config-tests.sh html # Test all configs, HTML report
# ./run-config-tests.sh json cfg0.json # Test single config, JSON output
#
set -e
# Color codes for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Configuration
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
CONTAINER_NAME="ucentral_client_build_env"
BUILD_DIR="/root/ols-nos/tests/config-parser"
CONFIG_DIR="/root/ols-nos/config-samples"
OUTPUT_DIR="$SCRIPT_DIR/output"
DOCKERFILE_PATH="$SCRIPT_DIR/Dockerfile"
# Parse arguments
FORMAT="${1:-human}"
SINGLE_CONFIG="${2:-}"
# Validate format
case "$FORMAT" in
html|json|human)
;;
*)
echo -e "${RED}Error: Invalid format '$FORMAT'. Must be 'html', 'json', or 'human'${NC}"
echo "Usage: $0 [html|json|human] [config-file]"
exit 1
;;
esac
# Function to print status messages
print_status() {
echo -e "${BLUE}==>${NC} $1"
}
print_success() {
echo -e "${GREEN}${NC} $1"
}
print_warning() {
echo -e "${YELLOW}${NC} $1"
}
print_error() {
echo -e "${RED}${NC} $1"
}
# Function to check if Docker is running
check_docker() {
if ! docker info > /dev/null 2>&1; then
print_error "Docker is not running. Please start Docker and try again."
exit 1
fi
print_success "Docker is running"
}
# Function to check if container exists
container_exists() {
docker ps -a --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"
}
# Function to check if container is running
container_running() {
docker ps --format '{{.Names}}' | grep -q "^${CONTAINER_NAME}$"
}
# Function to get Dockerfile SHA
get_dockerfile_sha() {
if [ -f "$DOCKERFILE_PATH" ]; then
shasum -a 1 "$DOCKERFILE_PATH" | awk '{print $1}' | cut -c1-8
else
echo "unknown"
fi
}
# Function to build Docker environment if needed
build_environment() {
local current_sha=$(get_dockerfile_sha)
local image_tag="ucentral-build-env:${current_sha}"
# Check if image exists
if docker images --format '{{.Repository}}:{{.Tag}}' | grep -q "^${image_tag}$"; then
print_success "Build environment image already exists (${image_tag})"
return 0
fi
print_status "Building Docker build environment..."
print_status "This may take several minutes on first run..."
if make build-host-env; then
print_success "Build environment created"
else
print_error "Failed to build environment"
exit 1
fi
}
# Function to start container if not running
start_container() {
if container_running; then
print_success "Container is already running"
return 0
fi
if container_exists; then
print_status "Starting existing container..."
docker start "$CONTAINER_NAME" > /dev/null
print_success "Container started"
else
print_status "Creating and starting new container..."
if make run-host-env; then
print_success "Container created and started"
else
print_error "Failed to start container"
exit 1
fi
fi
# Wait for container to be ready
sleep 2
}
# Function to run tests in Docker
run_tests() {
local test_cmd=""
local output_file=""
local copy_files=()
if [ -n "$SINGLE_CONFIG" ]; then
print_status "Running test for single config: $SINGLE_CONFIG"
# Verify config exists in container
if ! docker exec "$CONTAINER_NAME" bash -c "test -f $CONFIG_DIR/$SINGLE_CONFIG"; then
print_error "Config file not found in container: $SINGLE_CONFIG"
print_status "Available configs:"
docker exec "$CONTAINER_NAME" bash -c "ls $CONFIG_DIR/*.json 2>/dev/null | xargs -n1 basename" || true
exit 1
fi
case "$FORMAT" in
html)
output_file="test-report-${SINGLE_CONFIG%.json}.html"
test_cmd="cd $BUILD_DIR && make test-config-parser && LD_LIBRARY_PATH=/usr/local/lib ./test-config-parser --html $CONFIG_DIR/$SINGLE_CONFIG > $BUILD_DIR/$output_file"
copy_files=("$output_file")
;;
json)
output_file="test-results-${SINGLE_CONFIG%.json}.json"
test_cmd="cd $BUILD_DIR && make test-config-parser && LD_LIBRARY_PATH=/usr/local/lib ./test-config-parser --json $CONFIG_DIR/$SINGLE_CONFIG > $BUILD_DIR/$output_file"
copy_files=("$output_file")
;;
human)
output_file="test-results-${SINGLE_CONFIG%.json}.txt"
test_cmd="cd $BUILD_DIR && make test-config-parser && LD_LIBRARY_PATH=/usr/local/lib ./test-config-parser $CONFIG_DIR/$SINGLE_CONFIG 2>&1 | tee $BUILD_DIR/$output_file"
copy_files=("$output_file")
;;
esac
else
print_status "Running tests for all configurations (format: $FORMAT)"
case "$FORMAT" in
html)
output_file="test-report.html"
test_cmd="cd $BUILD_DIR && make test-config-html"
copy_files=("$output_file")
;;
json)
output_file="test-report.json"
test_cmd="cd $BUILD_DIR && make test-config-json"
copy_files=("$output_file")
;;
human)
output_file="test-results.txt"
test_cmd="cd $BUILD_DIR && make test-config-full 2>&1 | tee $BUILD_DIR/$output_file"
copy_files=("$output_file")
;;
esac
fi
print_status "Executing tests in container..."
echo ""
# Run the test command
if docker exec "$CONTAINER_NAME" bash -c "$test_cmd"; then
print_success "Tests completed successfully"
TEST_EXIT_CODE=0
else
TEST_EXIT_CODE=$?
print_warning "Tests completed with issues (exit code: $TEST_EXIT_CODE)"
fi
echo ""
# Create output directory if it doesn't exist
mkdir -p "$OUTPUT_DIR"
# Copy output files from container to host
for file in "${copy_files[@]}"; do
if docker exec "$CONTAINER_NAME" bash -c "test -f $BUILD_DIR/$file"; then
print_status "Copying $file from container to host..."
docker cp "$CONTAINER_NAME:$BUILD_DIR/$file" "$OUTPUT_DIR/$file"
print_success "Output saved: $OUTPUT_DIR/$file"
# Show file info
local file_size=$(du -h "$OUTPUT_DIR/$file" | cut -f1)
echo " Size: $file_size"
else
print_warning "Output file not found in container: $file"
fi
done
return $TEST_EXIT_CODE
}
# Function to print summary
print_summary() {
local exit_code=$1
echo ""
echo "========================================"
echo "Test Run Summary"
echo "========================================"
echo "Format: $FORMAT"
if [ -n "$SINGLE_CONFIG" ]; then
echo "Config: $SINGLE_CONFIG"
else
echo "Config: All configurations"
fi
echo "Output Dir: $OUTPUT_DIR"
echo ""
if [ $exit_code -eq 0 ]; then
print_success "All tests passed!"
else
print_warning "Some tests failed or had issues"
fi
echo ""
echo "Output files:"
ls -lh "$OUTPUT_DIR" | tail -n +2 | while read -r line; do
echo " $line"
done
}
# Main execution
main() {
print_status "uCentral Configuration Test Runner"
echo ""
# Check prerequisites
check_docker
# Build environment if needed
build_environment
# Start container if needed
start_container
# Run tests
run_tests
TEST_RESULT=$?
# Print summary
print_summary $TEST_RESULT
exit $TEST_RESULT
}
# Run main function
main

View File

@@ -36,7 +36,6 @@ override_dh_install:
# home folder.
mkdir -p ${INSTALL}/home/admin
cp scripts/OLS_NOS_fixups.script ${INSTALL}/usr/local/lib
cp scripts/OLS_NOS_upgrade_override.script ${INSTALL}/usr/local/lib
cp docker-ucentral-client.gz ${INSTALL}/usr/local/lib
# Install Vlan1 in-band management configuration
mkdir -p ${INSTALL}/etc/network/interfaces.d/

View File

@@ -21,8 +21,6 @@ COPY /ucentral-client /usr/local/bin/ucentral-client
COPY /rtty /usr/local/bin/
COPY /lib* /usr/local/lib/
COPY /version.jso[n] /etc/
COPY /schema.jso[n] /etc/
RUN ldconfig
RUN ls -l /usr/local/bin/ucentral-client

33
src/ec-private/README.md Executable file
View File

@@ -0,0 +1,33 @@
# Ucentral for EC
Ucentral solution for EC is made of the following parts:
* `ecapi`: a library to communicate with EC via SNMP
# Compiling
## EC Build for Target Device
First build the full EC image for your target device:
* `cd EC_VOB/project_build_environment/<target device>`
* `./make_all`
If this is successful, you can proceed to the next step.
## Build Environment
To successfully build required components the build environments variables must be prepared:
* `cd EC_VOB/project_build_environment/<target device>`
* `cd utils`
* `. build_env_init`
## Building All Components
Presumably you have checked out the [ols-ucentral-src]:
* `cd [ols-ucentral-src]`
* Run `make plat-ec`, which should successfully compile all components
## Creating EC Firmware with Ucentral
After building everything up:
* Check the `output` directory, it should contain all required binaries in appropriate subdirectories
* Copy over these directories to your `EC_VOB/project_build_environment/<target device>/user/thirdpty/ucentral`

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

141
src/ec-private/build.sh Executable file
View File

@@ -0,0 +1,141 @@
#!/bin/bash
UCENTRAL_DIR=${PWD}
EC_BUILD_DIR=${PWD}/src/ec-private
OUT_DIR=${UCENTRAL_DIR}/output
BIN_DIR=${OUT_DIR}/usr/sbin
LIB_DIR=${OUT_DIR}/lib
LIB_OPENSSL=openssl-1.1.1q
LIB_WEBSOCKETS=libwebsockets-4.1.4
LIB_CURL=curl-7.83.1
LIB_CJSON=cJSON-1.7.15
echo "+++++++++++++++++ check EC build environment +++++++++++++++++"
if [ ! "${PROJECT_NAME}" ] || [ ! "${SOURCE_PATH}" ]; then
echo "Error! Please source 'build_env_init' for your build environment."
exit
fi
cp -af ${UCENTRAL_DIR}/src/ucentral-client/* ${EC_BUILD_DIR}/ucentral-client
rm -rf ${OUT_DIR}
if [ ! -d output ]; then
mkdir -p ${BIN_DIR}
mkdir -p ${LIB_DIR}
fi
C_COMPILER="${TOOLCHAIN_PATH}/${CROSS_COMPILE}gcc ."
echo "+++++++++++++++++ openssl +++++++++++++++++"
cd ${EC_BUILD_DIR}
if [ ! -d openssl ]; then
tar -xf ./archive/${LIB_OPENSSL}.tar.gz
mv ${LIB_OPENSSL} openssl
fi
model_name=${D_MODEL_NAME}
if [ "$model_name" == 'ECS4130_AC5' ]; then
platform=linux-aarch64
elif [ "$model_name" == 'ECS4125_10P' ]; then
platform=linux-mips32
else
echo "Error! The model ${model_name} is not in the support lists, please check."
exit 1
fi
cd openssl
./Configure ${platform} --cross-compile-prefix=${CROSS_COMPILE} no-idea no-mdc2 no-rc5 no-ssl2 no-ssl3
make -j${nproc}
if [ "$?" -eq "0" ]; then
cp -af libssl.so.1.1 libcrypto.so.1.1 ${LIB_DIR}
fi
echo "+++++++++++++++++ libwebsockets +++++++++++++++++"
cd ${EC_BUILD_DIR}
if [ ! -d libwebsockets ]; then
tar -xf ./archive/${LIB_WEBSOCKETS}.tar.gz
mv ${LIB_WEBSOCKETS} libwebsockets
patch -s -N -p1 -d libwebsockets/lib < ./patch/libwebsockets/${LIB_WEBSOCKETS}.patch
fi
cd libwebsockets
cmake \
-DOPENSSL_ROOT_DIR=${EC_BUILD_DIR}/openssl \
-DCMAKE_C_COMPILER=${C_COMPILER}
make -j${nproc}
if [ "$?" -eq "0" ]; then
cp -af lib/libwebsockets.so.17 ${LIB_DIR}
fi
echo "+++++++++++++++++ curl +++++++++++++++++"
cd ${EC_BUILD_DIR}
if [ ! -d curl ]; then
tar -xf ./archive/${LIB_CURL}.tar.xz
mv ${LIB_CURL} curl
patch -s -N -p1 -d curl < ./patch/curl/${LIB_CURL}.patch
fi
cd curl
cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_SHARED_LINKER_FLAGS=-L${EC_BUILD_DIR}/openssl
make
if [ "$?" -eq "0" ]; then
cp -af ./lib/libcurl.so ${LIB_DIR}
cp -af ./src/curl ${BIN_DIR}
fi
echo "+++++++++++++++++ cjson +++++++++++++++++"
cd ${EC_BUILD_DIR}
if [ ! -d cjson ]; then
tar -xf ./archive/${LIB_CJSON}.tar.gz
mv ${LIB_CJSON} cjson
fi
cd cjson
cmake -DCMAKE_C_COMPILER=${C_COMPILER}
make
if [ "$?" -eq "0" ]; then
cp -af ./libcjson.so.1.7.15 ${LIB_DIR}
cd ${LIB_DIR}
mv libcjson.so.1.7.15 libcjson.so.1
fi
echo "+++++++++++++++++ ecapi +++++++++++++++++"
cd ${EC_BUILD_DIR}/ecapi
mkdir ${EC_BUILD_DIR}/ecapi/build
cd ${EC_BUILD_DIR}/ecapi/build
cmake -DCMAKE_C_COMPILER=${C_COMPILER} ..
make
if [ "$?" -eq "0" ]; then
cp -af libecapi.so ${LIB_DIR}
fi
echo "+++++++++++++++++ ucentral-client +++++++++++++++++"
if [ ! -d ucentral ]; then
mkdir -p ${EC_BUILD_DIR}/ucentral
fi
cp -af ${UCENTRAL_DIR}/src/ucentral-client ${EC_BUILD_DIR}/ucentral/ucentral-client
cp -af ${EC_BUILD_DIR}/patch/ucentral/* ${EC_BUILD_DIR}/ucentral
mkdir -p ${EC_BUILD_DIR}/ucentral/build
cd ${EC_BUILD_DIR}/ucentral/build
cmake -DCMAKE_C_COMPILER=${C_COMPILER} ..
make
if [ "$?" -eq "0" ]; then
cp -af ucentral-client ${BIN_DIR}
fi
echo "+++++++++++++++++ Strip target binaries +++++++++++++++++"
${TOOLCHAIN_PATH}/${CROSS_COMPILE}strip ${BIN_DIR}/*
${TOOLCHAIN_PATH}/${CROSS_COMPILE}strip ${LIB_DIR}/*

View File

@@ -0,0 +1,31 @@
cmake_minimum_required(VERSION 2.6)
PROJECT(ecapi C)
ADD_DEFINITIONS(-Os -ggdb -Wall -Werror --std=gnu99 -Wmissing-declarations)
SET(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/include)
INCLUDE_DIRECTORIES($ENV{SOURCE_PATH}/sysinclude)
INCLUDE_DIRECTORIES($ENV{SOURCE_PATH}/sysinclude/mibconstants)
INCLUDE_DIRECTORIES($ENV{SOURCE_PATH}/sysinclude/oem/$ENV{PROJECT_NAME})
INCLUDE_DIRECTORIES($ENV{PROJECT_PATH}/user/thirdpty/lua/net-snmp-5.4.4/include)
INCLUDE_DIRECTORIES($ENV{PROJECT_PATH}/user/thirdpty/lua/net-snmp-5.4.4/agent/mibgroup)
#LINK_DIRECTORIES(${CMAKE_CURRENT_SOURCE_DIR}/src/snmp)
FIND_LIBRARY(netsnmp_library netsnmp $ENV{PROJECT_PATH}/user/thirdpty/lua/net-snmp-5.4.4/snmplib/.libs)
#INCLUDE (CheckSymbolExists)
#CHECK_SYMBOL_EXISTS(SYS_getrandom syscall.h getrandom)
if ($ENV{D_MODEL_NAME} STREQUAL ECS4130_AC5)
add_definitions(-DENDIANNESS_ADJUST)
endif()
INCLUDE(snmp/CMakeLists.txt)
INCLUDE(generic/CMakeLists.txt)
ADD_LIBRARY(ecapi SHARED ${LIB_SOURCES})
TARGET_LINK_LIBRARIES(ecapi ${netsnmp_library})

View File

@@ -0,0 +1,3 @@
list(APPEND LIB_SOURCES
${CMAKE_CURRENT_LIST_DIR}/api_print.c
)

View File

@@ -0,0 +1,27 @@
// #include <stdarg.h>
#include "api_print.h"
static bool debug_on = false;
void print_set_debug(bool on) {
debug_on = on;
}
bool print_is_debug(void) {
return debug_on;
}
/*
void print_debug(char *fmt, ...) {
if (print_is_debug()) {
va_list args; va_start(args, fmt);
vfprintf(stdout, fmt, args);
va_end(args);
}
}
void print_err(char *fmt, ...) {
va_list args; va_start(args, fmt);
vfprintf(stderr, fmt, args);
va_end(args);
}*/

View File

@@ -0,0 +1,40 @@
#ifndef API_CONFIG_H
#define API_CONFIG_H
#include <stdbool.h>
#include <stdint.h>
typedef enum {
DPX_HALF = 0,
DPX_FULL,
} duplex_t;
typedef enum {
M_NONE = 1,
M_SFP_FORCED_1000 = 7,
M_SFP_FORCED_10G = 8,
} media_t;
typedef enum {
VL_NONE = 0,
VL_TAGGED,
VL_UNTAGGED,
VL_FORBIDDEN
} vlan_membership_t;
void *open_config_transaction();
void commit_config_transaction(void *tr);
void add_eth_speed(void *tr, uint16_t eth_num, uint32_t speed, duplex_t duplex);
void add_eth_media(void *tr, uint16_t eth_num, media_t media);
void add_l2_vlan(void *tr, uint16_t vlan_id,
uint16_t *tagged_members, // NULL terminated array / NULL if not required
uint16_t *un_tagged_members, // NULL terminated array / NULL if not required
uint16_t *forbidden_members, // NULL terminated array / NULL if not required
uint16_t *pvid_ports // NULL terminated array / NULL if not required
);
#endif

View File

@@ -0,0 +1,8 @@
#ifndef API_CONSTS_H
#define API_CONSTS_H
#define STATUS_SUCCESS 0
#define STATUS_ERROR 1
#define STATUS_TIMEOUT 2
#endif

View File

@@ -0,0 +1,15 @@
#ifndef API_DEVICEID_H
#define API_DEVICEID_H
#include <stdint.h>
#include "api_consts.h"
int dev_get_main_mac(char *mac, int mac_len);
int dev_get_serial(char *serial, int serial_len);
int dev_get_fw_version(char *fw, int fw_len);
int dev_get_uptime(uint32_t *up);
int dev_get_vlan_list(int *vlan_arr, int *num);
int dev_get_vlan_mask_len(int *len);
int dev_get_poe_port_num(int *num);
int dev_get_port_capabilities_val_len(int *len);
#endif

View File

@@ -0,0 +1,13 @@
#ifndef API_PRINT_H
#define API_PRINT_H
#include <stdio.h>
#include <stdbool.h>
void print_set_debug(bool on);
bool print_is_debug(void);
#define print_debug(...) if (print_is_debug()) { fprintf(stdout, __VA_ARGS__); }
#define print_err(...) fprintf(stderr, __VA_ARGS__)
#endif

View File

@@ -0,0 +1,9 @@
#ifndef API_SESSION_H
#define API_SESSION_H
#include "api_consts.h"
int session_start(void);
void session_close(void);
#endif

View File

@@ -0,0 +1,36 @@
#ifndef API_STATS_H
#define API_STATS_H
#include <stdint.h>
#include <stdbool.h>
#include "api_consts.h"
#define IF_LOCATION_SIZE 16
#define IF_NAME_SIZE 32
typedef struct {
uint32_t collisions;
uint64_t multicast ;
uint64_t rx_bytes;
uint32_t rx_dropped;
uint32_t rx_errors;
uint64_t rx_packets;
uint64_t tx_bytes;
uint32_t tx_dropped;
uint32_t tx_errors;
uint64_t tx_packets;
} counters_t;
typedef struct {
char location[IF_LOCATION_SIZE];
char name[IF_NAME_SIZE];
uint32_t uptime;
uint32_t speed_dpx_status;
counters_t counters;
} interface_t;
int get_ethernet_count(int *eth_count);
int get_ethernet_stats(interface_t *eths, int eth_count);
int get_vlans(uint16_t **vlans, int *vlan_count);
#endif

View File

@@ -0,0 +1,41 @@
#ifndef OID_DEFINE_H
#define OID_DEFINE_H
#include <sys_adpt.h>
#include <net-snmp/net-snmp-config.h>
#include <net-snmp/net-snmp-includes.h>
const static oid O_MAIN_MAC[] = { SYS_ADPT_PRIVATEMIB_OID, 1, 5, 6, 1, 0 };
const static oid O_SERIAL[] = { SYS_ADPT_PRIVATEMIB_OID, 1, 1, 3, 1, 10, 1 };
const static oid O_OPCODE_VERSION[] = { SYS_ADPT_PRIVATEMIB_OID, 1, 1, 5, 4, 0 };
const static oid O_SYS_UPTIME[] = { 1, 3, 6, 1, 2, 1, 1, 3, 0 };
const static oid O_VLAN_STATUS[] = { 1, 3, 6, 1, 2, 1, 17, 7, 1, 4, 3, 1, 5};
const static oid O_POE_PORT_ENABLE[] ={1, 3, 6, 1, 2, 1, 105, 1, 1, 1, 3, 1};
const static oid O_PORT_CPAPBILITIES[] = { SYS_ADPT_PRIVATEMIB_OID, 1, 2, 1, 1, 6, 1 };
#define O_FACTORY_DEFAULT SYSTEM_OID"1.24.2.1.1.4.1.70.97.99.116.111.114.121.95.68.101.102.97.117.108.116.95.67.111.110.102.105.103.46.99.102.103"
#define O_FW_UPGRADE_MGMT SYSTEM_OID"1.24.6.1.0"
#define O_DEVICE_MODEL SYSTEM_OID"1.1.5.1.0"
#define O_DEVICE_COMPANY SYSTEM_OID"1.1.5.2.0"
#define O_STR_POE_PORT_ENABLE "1.3.6.1.2.1.105.1.1.1.3.1"
#define O_STR_POE_MAX_POWER SYSTEM_OID"1.28.6.1.13.1"
#define O_STR_POE_USAGE_THRESHOLD "1.3.6.1.2.1.105.1.3.1.1.5.1"
#define O_STR_IF_ADMIN_STATUS "1.3.6.1.2.1.2.2.1.7"
#define O_STR_PORT_CPAPBILITIES SYSTEM_OID"1.2.1.1.6"
#define O_STR_PVID "1.3.6.1.2.1.17.7.1.4.5.1.1"
#define O_STR_VLAN_NAME "1.3.6.1.2.1.17.7.1.4.3.1.1"
#define O_STR_VLAN_EGRESS "1.3.6.1.2.1.17.7.1.4.3.1.2"
#define O_STR_VLAN_STATUS "1.3.6.1.2.1.17.7.1.4.3.1.5"
#define O_STR_VLAN_UNTAGGED "1.3.6.1.2.1.17.7.1.4.3.1.4"
#define O_STR_COPY_SRC_TYPE SYSTEM_OID"1.24.1.1.0"
#define O_STR_COPY_DST_TYPE SYSTEM_OID"1.24.1.3.0"
#define O_STR_COPY_DST_NAME SYSTEM_OID"1.24.1.4.0"
#define O_STR_COPY_FILE_TYPE SYSTEM_OID"1.24.1.5.0"
#define O_STR_COPY_ACTION SYSTEM_OID"1.24.1.8.0"
#define O_NTP_STATUS SYSTEM_OID"1.23.5.1.0"
#define O_SNTP_STATUS SYSTEM_OID"1.23.1.1.0"
#define O_SNTP_INTERVAL SYSTEM_OID"1.23.1.3.0"
#define O_SNTP_SERVER_TYPE SYSTEM_OID"1.23.1.4.1.4"
#define O_SNTP_SERVER_ADDR SYSTEM_OID"1.23.1.4.1.5"
#endif

View File

@@ -0,0 +1,25 @@
#ifndef SNMP_HELPER_H
#define SNMP_HELPER_H
#include <net-snmp/net-snmp-config.h>
#include <net-snmp/net-snmp-includes.h>
#include "oid_define.h"
int snmph_session_start(void);
void snmph_session_close(void);
int snmph_get(const oid *req_oid, size_t req_oid_len, struct snmp_pdu **response);
int snmph_get_argstr(const char *oid_str, struct snmp_pdu **response);
int snmph_get_single_string(const oid *req_oid, size_t req_oid_len, char *buf, int buf_len);
int snmph_get_bulk(const oid *req_oid, size_t req_oid_len, int max, struct snmp_pdu **response);
int snmph_set(const char *oid_str, char type, char *value);
int snmph_set_array(const char *oid_str, char type, const u_char *value, size_t len);
int snmph_walk(const char *oid_str, void *buf, int *num);
enum snmp_walk_node {
SNMP_WALK_NODE_NONE,
SNMP_WALK_NODE_VLAN_STATUS,
SNMP_WALK_NODE_POE_PORT_ENABLE,
};
#endif

View File

@@ -0,0 +1,7 @@
list(APPEND LIB_SOURCES
${CMAKE_CURRENT_LIST_DIR}/device.c
${CMAKE_CURRENT_LIST_DIR}/helper.c
${CMAKE_CURRENT_LIST_DIR}/session.c
${CMAKE_CURRENT_LIST_DIR}/stats.c
)

View File

@@ -0,0 +1,96 @@
#include <sys_adpt.h>
#include "api_device.h"
#include "snmp_helper.h"
int dev_get_main_mac(char *mac, int mac_len) {
int status = snmph_get_single_string(O_MAIN_MAC, OID_LENGTH(O_MAIN_MAC), mac, mac_len);
if (status != STAT_SUCCESS) {
return status;
}
int i = 0, j = 2;
for (i = 3; i < 17; i += 3) {
mac[j++] = mac[i];
mac[j++] = mac[i + 1];
}
mac[12] = 0;
char *c;
for (c = mac; *c; c++) {
if (*c >= 'A' && *c <= 'Z') {
*c += 32;
}
}
return STAT_SUCCESS;
}
int dev_get_serial(char *serial, int serial_len) {
return snmph_get_single_string(O_SERIAL, OID_LENGTH(O_SERIAL), serial, serial_len);
}
int dev_get_fw_version(char *fw, int fw_len) {
return snmph_get_single_string(O_OPCODE_VERSION, OID_LENGTH(O_OPCODE_VERSION), fw, fw_len);
}
int dev_get_uptime(uint32_t *up) {
struct snmp_pdu *response = NULL;
int status = snmph_get(O_SYS_UPTIME, OID_LENGTH(O_SYS_UPTIME), &response);
if (status != STATUS_SUCCESS) return status;
*up = (uint32_t) (response->variables->val.integer[0] / 100 + 0.5);
snmp_free_pdu(response);
return STATUS_SUCCESS;
}
int dev_get_vlan_list(int *vlan_arr, int *num) {
int status;
status = snmph_walk(O_STR_VLAN_STATUS, vlan_arr, num);
return status;
}
int dev_get_vlan_mask_len(int *len) {
char oidstr[MAX_OID_LEN];
struct snmp_pdu *response;
sprintf(oidstr, "%s.%d", O_STR_VLAN_EGRESS, 1);
int status = snmph_get_argstr(oidstr, &response);
if (status != STAT_SUCCESS) {
fprintf(stderr, "Could not retrieve vlan mask length.\n");
return status;
}
*len = response->variables->val_len;
return STATUS_SUCCESS;
}
int dev_get_poe_port_num(int *num) {
int status;
status = snmph_walk(O_STR_POE_PORT_ENABLE, 0, num);
return status;
}
int dev_get_port_capabilities_val_len(int *len) {
int status;
struct snmp_pdu *response = NULL;
status = snmph_get(O_PORT_CPAPBILITIES, OID_LENGTH(O_PORT_CPAPBILITIES), &response);
if (status == STATUS_SUCCESS)
*len = response->variables->val_len;
snmp_free_pdu(response);
return status;
}

View File

@@ -0,0 +1,340 @@
/* MODULE NAME: snmp_helper.c
* PURPOSE:
* for ucentral middleware process.
*
* NOTES:
*
* REASON:
* Description:
* HISTORY
* 2023/02/03 - Saulius P., Created
*
* Copyright(C) Accton Corporation, 2023
*/
/* INCLUDE FILE DECLARATIONS
*/
#include <math.h>
#include "snmp_helper.h"
#include "api_print.h"
static struct snmp_session session, *ss;
int snmph_session_start(void) {
init_snmp("ucmw_snmp");
snmp_sess_init( &session );
session.peername = "127.0.0.1";
session.version = SNMP_VERSION_2c;
session.community = (unsigned char*)"private";
session.community_len = strlen((char*)session.community);
ss = snmp_open(&session);
if (ss) {
return STAT_SUCCESS;
} else {
return STAT_ERROR;
}
}
int snmph_set(const char *oid_str, char type, char *value) {
netsnmp_pdu *pdu, *response = NULL;
size_t name_length;
oid name[MAX_OID_LEN];
int status, exitval = 0;
pdu = snmp_pdu_create(SNMP_MSG_SET);
name_length = MAX_OID_LEN;
if (snmp_parse_oid(oid_str, name, &name_length) == NULL){
snmp_perror(oid_str);
return -1;
} else{
if (snmp_add_var(pdu, name, name_length, type, value)) {
snmp_perror(oid_str);
return -1;
}
}
status = snmp_synch_response(ss, pdu, &response);
if (status == STAT_SUCCESS) {
if (response->errstat != SNMP_ERR_NOERROR) {
fprintf(stderr, "Error in packet.\nReason: %s\n",
snmp_errstring(response->errstat));
exitval = 2;
}
} else if (status == STAT_TIMEOUT) {
fprintf(stderr, "Timeout: No Response from %s\n",
session.peername);
exitval = 1;
} else { /* status == STAT_ERROR */
snmp_sess_perror("snmpset", ss);
exitval = 1;
}
if (response)
snmp_free_pdu(response);
return exitval;
}
int snmph_set_array(const char *oid_str, char type, const u_char *value, size_t len) {
netsnmp_pdu *pdu, *response = NULL;
size_t name_length;
oid name[MAX_OID_LEN];
int status, exitval = 0;
pdu = snmp_pdu_create(SNMP_MSG_SET);
name_length = MAX_OID_LEN;
if (snmp_parse_oid(oid_str, name, &name_length) == NULL){
snmp_perror(oid_str);
return -1;
} else{
if (!snmp_pdu_add_variable(pdu, name, name_length, type, value, len)) {
snmp_perror(oid_str);
return -1;
}
}
status = snmp_synch_response(ss, pdu, &response);
if (status == STAT_SUCCESS) {
if (response->errstat != SNMP_ERR_NOERROR) {
fprintf(stderr, "Error in packet.\nReason: %s\n",
snmp_errstring(response->errstat));
exitval = 2;
}
} else if (status == STAT_TIMEOUT) {
fprintf(stderr, "Timeout: No Response from %s\n",
session.peername);
exitval = 1;
} else { /* status == STAT_ERROR */
snmp_sess_perror("snmpset", ss);
exitval = 1;
}
if (response)
snmp_free_pdu(response);
return exitval;
}
int snmph_get(const oid *req_oid, size_t req_oid_len, struct snmp_pdu **response) {
struct snmp_pdu *request = snmp_pdu_create(SNMP_MSG_GET);
snmp_add_null_var(request, req_oid, req_oid_len);
int status = snmp_synch_response(ss, request, response);
if (*response && (*response)->errstat != SNMP_ERR_NOERROR) {
print_err("Error 1, response with error: %d, %ld\n", status, (*response)->errstat);
snmp_free_pdu(*response);
return STAT_ERROR;
}
if (!(*response)) {
print_err("Error 2: empty SNMP response\n");
return STAT_ERROR;
}
if (status != STAT_SUCCESS) {
print_err("Error 3: bad response status: %d\n", status);
snmp_free_pdu(*response);
}
if (!(*response)->variables) {
print_err("Error 4: empty variable list in response\n");
snmp_free_pdu(*response);
return STAT_ERROR;
}
print_debug("Default return: %d\n", status);
return status;
}
int snmph_get_argstr(const char *oid_str, struct snmp_pdu **response) {
oid name[MAX_OID_LEN];
size_t name_length = MAX_OID_LEN;
if (snmp_parse_oid(oid_str, name, &name_length) == NULL) {
snmp_perror(oid_str);
return -1;
}
struct snmp_pdu *request = snmp_pdu_create(SNMP_MSG_GET);
snmp_add_null_var(request, name, name_length);
int status = snmp_synch_response(ss, request, response);
if (*response && (*response)->errstat != SNMP_ERR_NOERROR) {
print_err("Error 1, response with error: %d, %ld\n", status, (*response)->errstat);
snmp_free_pdu(*response);
return STAT_ERROR;
}
if (!(*response)) {
print_err("Error 2: empty SNMP response\n");
return STAT_ERROR;
}
if (status != STAT_SUCCESS) {
print_err("Error 3: bad response status: %d\n", status);
snmp_free_pdu(*response);
}
if (!(*response)->variables) {
print_err("Error 4: empty variable list in response\n");
snmp_free_pdu(*response);
return STAT_ERROR;
}
print_debug("Default return: %d\n", status);
return status;
}
int snmph_get_single_string(const oid *req_oid, size_t req_oid_len, char *buf, int buf_len) {
struct snmp_pdu *response = NULL;
int status = snmph_get(req_oid, req_oid_len, &response);
if (status != STAT_SUCCESS) {
return status;
}
memset(buf, 0, buf_len);
strncpy(buf, (char*)response->variables->val.string, (int) fmin(buf_len, response->variables->val_len));
// if (response)
snmp_free_pdu(response);
return STAT_SUCCESS;
}
int snmph_get_bulk(const oid *req_oid, size_t req_oid_len, int max, struct snmp_pdu **response) {
struct snmp_pdu *request = snmp_pdu_create(SNMP_MSG_GETBULK);
request->non_repeaters = 0;
request->max_repetitions = max;
snmp_add_null_var(request, req_oid, req_oid_len);
int status = snmp_synch_response(ss, request, response);
// printf("Bulk status: %d\n", status);
if (status == 1) {
snmp_sess_perror("snmpbulkget", ss);
}
if (*response && (*response)->errstat != SNMP_ERR_NOERROR) {
print_err("Error 1, bulk response error: %d, %ld\n", status, (*response)->errstat);
snmp_free_pdu(*response);
return STAT_ERROR;
}
if (!(*response)) {
print_err("Error 2: empty bulk response\n");
return STAT_ERROR;
}
if (status != STAT_SUCCESS) {
print_err("Error 3, bad bulk status: %d\n", status);
snmp_free_pdu(*response);
}
if (!(*response)->variables) {
print_err("Error 4, empty bulk variables\n");
snmp_free_pdu(*response);
return STAT_ERROR;
}
print_debug("Default bulk return: %d\n", status);
return status;
}
int snmph_walk(const char *oid_str, void *buf, int *num) {
netsnmp_pdu *pdu, *response = NULL;
netsnmp_variable_list *vars;
oid name[MAX_OID_LEN];
size_t name_length = MAX_OID_LEN;
int running = 1;
int status = 0;
enum snmp_walk_node node = SNMP_WALK_NODE_NONE;
if (snmp_parse_oid(oid_str, name, &name_length) == NULL) {
snmp_perror(oid_str);
return -1;
}
if (!strcmp(oid_str, O_STR_VLAN_STATUS))
node = SNMP_WALK_NODE_VLAN_STATUS;
else if (!strcmp(oid_str, O_STR_POE_PORT_ENABLE))
node = SNMP_WALK_NODE_POE_PORT_ENABLE;
*num = 0;
while (running) {
/*
* create PDU for GETNEXT request and add object name to request
*/
pdu = snmp_pdu_create(SNMP_MSG_GETNEXT);
snmp_add_null_var(pdu, name, name_length);
/*
* do the request
*/
status = snmp_synch_response(ss, pdu, &response);
if (status == STAT_SUCCESS) {
if (response->errstat == SNMP_ERR_NOERROR) {
/*
* check resulting variables
*/
for (vars = response->variables; vars;
vars = vars->next_variable) {
if (node == SNMP_WALK_NODE_VLAN_STATUS)
{
if ((vars->name[12]==O_VLAN_STATUS[12]) && (vars->name_length==(OID_LENGTH(O_VLAN_STATUS)+1)))
{
((int*)buf)[(*num)++] = vars->name[13];
}
else
running = 0;
}
else if (node == SNMP_WALK_NODE_POE_PORT_ENABLE)
{
if ((vars->name[10]==O_POE_PORT_ENABLE[10]) && (vars->name_length==(OID_LENGTH(O_POE_PORT_ENABLE)+1)))
{
(*num)++;
}
else
running = 0;
}
else
running = 0;
memmove((char *) name, (char *) vars->name, vars->name_length * sizeof(oid));
name_length = vars->name_length;
//print_variable(vars->name, vars->name_length, vars);
}
} else {
running = 0;
}
} else if (status == STAT_TIMEOUT) {
fprintf(stderr, "Timeout: No Response from %s\n",
session.peername);
running = 0;
status = 1;
} else { /* status == STAT_ERROR */
snmp_sess_perror("snmpwalk", ss);
running = 0;
status = 1;
}
if (response)
snmp_free_pdu(response);
}
return status;
}
void snmph_session_close(void) {
snmp_close(ss);
}

View File

@@ -0,0 +1,10 @@
#include "api_session.h"
#include "snmp_helper.h"
int session_start() {
return snmph_session_start();
}
void session_close() {
snmph_session_close();
}

250
src/ec-private/ecapi/snmp/stats.c Executable file
View File

@@ -0,0 +1,250 @@
#include <sys_adpt.h>
#include "api_device.h"
#include "api_stats.h"
#include "snmp_helper.h"
#include "if-mib/ifTable/ifTable_constants.h"
const static oid O_IF_COUNT[] = { 1, 3, 6, 1, 2, 1, 2, 1, 0 };
const static oid O_IF_TYPE[] = { 1, 3, 6, 1, 2, 1, 2, 2, 1, 3 };
// const static oid O_IF_LAST_CHANGE[] = { 1, 3, 6, 1, 2, 1, 2, 2, 1, 9 };
const static oid O_IF_UPTIME[] = { SYS_ADPT_PRIVATEMIB_OID, 1, 2, 1, 1, 19 };
const static oid O_SPEED_DPX_STATUS[] = { SYS_ADPT_PRIVATEMIB_OID, 1, 2, 1, 1, 8 };
const static oid OID_IF_NAME[] = { SYS_ADPT_PRIVATEMIB_OID, 1, 2, 1, 1, 2 };
const static oid O_IF_RX_BYTES_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 6 };
const static oid O_IF_RX_DISCARD_PKTS[] = { 1, 3, 6, 1, 2, 1, 2, 2, 1, 13 };
const static oid O_IF_RX_ERROR_PKTS[] = { 1, 3, 6, 1, 2, 1, 2, 2, 1, 14 };
const static oid O_IF_RX_U_PKTS_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 7 }; // Unicast packets
const static oid O_IF_RX_MUL_PKTS_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 8 }; // Multicast packets
const static oid O_IF_RX_BR_PKTS_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 9 };
const static oid O_IF_TX_BYTES_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 10 };
const static oid O_IF_TX_DISCARD_PKTS[] = { 1, 3, 6, 1, 2, 1, 2, 2, 1, 19 };
const static oid O_IF_TX_ERROR_PKTS[] = { 1, 3, 6, 1, 2, 1, 2, 2, 1, 20 };
const static oid O_IF_TX_U_PKTS_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 11 }; // Unicast packets
const static oid O_IF_TX_MUL_PKTS_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 12 }; // Multicast packets
const static oid O_IF_TX_BR_PKTS_64[] = { 1, 3, 6, 1, 2, 1, 31, 1, 1, 1, 13 };
int get_ethernet_count(int *eth_count) {
struct snmp_pdu *response;
// printf("Try to retrieve IF count...\n");
int status = snmph_get(O_IF_COUNT, OID_LENGTH(O_IF_COUNT), &response);
// printf("Retrieved: %d\n", status);
if (status != STAT_SUCCESS) {
// printf("Could not retrieve interfaces count\n");
return status;
}
// printf("Interfaces: %ld\n", response->variables->val.integer[0]);
long int max_if = response->variables->val.integer[0];
snmp_free_pdu(response);
struct variable_list *vars;
status = snmph_get_bulk(O_IF_TYPE, OID_LENGTH(O_IF_TYPE), max_if, &response);
if (status != STAT_SUCCESS) {
// printf("Could not retrieve types\n");
return STATUS_ERROR;
}
*eth_count = 0;
for(vars = response->variables; vars; vars = vars->next_variable) {
// print_variable(vars->name, vars->name_length, vars);
if (vars->val.integer[0] == IANAIFTYPE_ETHERNETCSMACD) {
(*eth_count)++;
} else {
break;
}
}
snmp_free_pdu(response);
return STATUS_SUCCESS;
}
static int fill_ethernet_stats_32(const oid *req_oid, size_t req_oid_len, int max, uint32_t *val, bool aggregate) {
struct snmp_pdu *response;
struct variable_list *vars;
int status = snmph_get_bulk(req_oid, req_oid_len, max, &response);
if (status != STATUS_SUCCESS) return status;
uint32_t *addr = val;
uint32_t local_val = 0;
int i = 0;
for(vars = response->variables; vars; vars = vars->next_variable) {
memcpy(&local_val, &vars->val.integer[0], sizeof(uint32_t));
addr = (uint32_t *) ((char *) val + (sizeof(interface_t) * (i++)));
if (aggregate) {
*addr += local_val;
} else {
*addr = local_val;
}
// addr = (uint32_t *) ((char *) addr + sizeof(interface_t));
}
snmp_free_pdu(response);
return STATUS_SUCCESS;
}
static int fill_ethernet_stats_64(const oid *req_oid, size_t req_oid_len, int max, uint64_t *val, bool aggregate) {
struct snmp_pdu *response;
struct variable_list *vars;
int status = snmph_get_bulk(req_oid, req_oid_len, max, &response);
if (status != STATUS_SUCCESS) return status;
uint64_t *addr = val;
uint64_t local_val = 0;
int i = 0;
for(vars = response->variables; vars; vars = vars->next_variable) {
#ifdef ENDIANNESS_ADJUST
memcpy(&local_val, &vars->val.counter64[0].low, sizeof(uint64_t));
#else
memcpy(&local_val, &vars->val.counter64[0], sizeof(uint64_t));
#endif
addr = (uint64_t *) ((char *) val + (sizeof(interface_t) * (i++)));
if (aggregate) {
*addr += local_val;
} else {
*addr = local_val;
}
// addr = (uint64_t *) ((char *) addr + sizeof(interface_t));
}
snmp_free_pdu(response);
return STATUS_SUCCESS;
}
int get_ethernet_stats(interface_t *eths, int eth_count) {
uint32_t uptime;
if (dev_get_uptime(&uptime) != STATUS_SUCCESS) return STATUS_ERROR;
/***************** Interface uptime *****************/
if (fill_ethernet_stats_32(O_IF_UPTIME, OID_LENGTH(O_IF_UPTIME), eth_count, &eths[0].uptime, false) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_32(O_SPEED_DPX_STATUS, OID_LENGTH(O_SPEED_DPX_STATUS), eth_count, &eths[0].speed_dpx_status, false) != STATUS_SUCCESS) return STATUS_ERROR;
int i;
for (i = 0; i < eth_count; i++) {
if (eths[i].uptime) {
eths[i].uptime /= 100;// uptime - (eths[i].uptime / 100);
}
snprintf(eths[i].location, IF_LOCATION_SIZE, "%d", i);
}
struct snmp_pdu *response;
struct variable_list *vars;
int status = snmph_get_bulk(OID_IF_NAME, OID_LENGTH(OID_IF_NAME), eth_count, &response);
if (status != STATUS_SUCCESS) return status;
i = 0;
for(vars = response->variables; vars || i < eth_count; vars = vars->next_variable) {
strncpy(eths[i].name, (char *)vars->val.string, IF_NAME_SIZE > vars->val_len ? vars->val_len : IF_NAME_SIZE);
i++;
}
snmp_free_pdu(response);
/***************** Bytes (octets) *****************/
if (fill_ethernet_stats_64(O_IF_RX_BYTES_64, OID_LENGTH(O_IF_RX_BYTES_64), eth_count, &eths[0].counters.rx_bytes, false) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_64(O_IF_TX_BYTES_64, OID_LENGTH(O_IF_TX_BYTES_64), eth_count, &eths[0].counters.tx_bytes, false) != STATUS_SUCCESS) return STATUS_ERROR;
/***************** Packets *****************/
if (fill_ethernet_stats_64(O_IF_RX_MUL_PKTS_64, OID_LENGTH(O_IF_RX_MUL_PKTS_64), eth_count, &eths[0].counters.rx_packets, false) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_64(O_IF_TX_MUL_PKTS_64, OID_LENGTH(O_IF_TX_MUL_PKTS_64), eth_count, &eths[0].counters.tx_packets, false) != STATUS_SUCCESS) return STATUS_ERROR;
// "Multicast is the sum of rx+tx multicast packets"
for (i = 0; i < eth_count; i++) {
eths[i].counters.multicast = eths[i].counters.rx_packets + eths[i].counters.tx_packets;
}
// All packets is a sum (aggregate == true) of unicast, multicast and broadcast packets
if (fill_ethernet_stats_64(O_IF_RX_U_PKTS_64, OID_LENGTH(O_IF_RX_U_PKTS_64), eth_count, &eths[0].counters.rx_packets, true) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_64(O_IF_RX_BR_PKTS_64, OID_LENGTH(O_IF_RX_BR_PKTS_64), eth_count, &eths[0].counters.rx_packets, true) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_64(O_IF_TX_U_PKTS_64, OID_LENGTH(O_IF_TX_U_PKTS_64), eth_count, &eths[0].counters.tx_packets, true) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_64(O_IF_TX_BR_PKTS_64, OID_LENGTH(O_IF_TX_BR_PKTS_64), eth_count, &eths[0].counters.tx_packets, true) != STATUS_SUCCESS) return STATUS_ERROR;
/***************** Errors *****************/
if (fill_ethernet_stats_32(O_IF_RX_ERROR_PKTS, OID_LENGTH(O_IF_RX_ERROR_PKTS), eth_count, &eths[0].counters.rx_errors, false) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_32(O_IF_TX_ERROR_PKTS, OID_LENGTH(O_IF_TX_ERROR_PKTS), eth_count, &eths[0].counters.tx_errors, false) != STATUS_SUCCESS) return STATUS_ERROR;
/***************** Dropped *****************/
if (fill_ethernet_stats_32(O_IF_RX_DISCARD_PKTS, OID_LENGTH(O_IF_RX_DISCARD_PKTS), eth_count, &eths[0].counters.rx_dropped, false) != STATUS_SUCCESS) return STATUS_ERROR;
if (fill_ethernet_stats_32(O_IF_TX_DISCARD_PKTS, OID_LENGTH(O_IF_TX_DISCARD_PKTS), eth_count, &eths[0].counters.tx_dropped, false) != STATUS_SUCCESS) return STATUS_ERROR;
return STATUS_SUCCESS;
}
int get_vlans(uint16_t **vlans, int *vlan_count) {
struct snmp_pdu *response;
struct variable_list *vars;
// printf("Try to retrieve IF count...\n");
int status = snmph_get(O_IF_COUNT, OID_LENGTH(O_IF_COUNT), &response);
// printf("Retrieved: %d\n", status);
if (status != STAT_SUCCESS) {
printf("Could not retrieve interfaces count\n");
return status;
}
// printf("Interfaces: %ld\n", response->variables->val.integer[0]);
long int max_if = response->variables->val.integer[0];
status = snmph_get_bulk(O_IF_TYPE, OID_LENGTH(O_IF_TYPE), max_if, &response);
if (status != STAT_SUCCESS) {
// printf("VLANS: could not retrieve types\n");
return STATUS_ERROR;
}
*vlan_count = 0;
for(vars = response->variables; vars; vars = vars->next_variable) {
// print_variable(vars->name, vars->name_length, vars);
if (vars->val.integer[0] == IANAIFTYPE_L2VLAN || vars->val.integer[0] == IANAIFTYPE_L3IPVLAN) {
// printf("Found VLAN: %d\n", (int) vars->name[vars->name_length - 1]);
(*vlan_count)++;
}
}
(*vlans) = malloc(sizeof(uint16_t) * (*vlan_count));
int i = 0;
for(vars = response->variables; vars; vars = vars->next_variable) {
// print_variable(vars->name, vars->name_length, vars);
if (vars->val.integer[0] == IANAIFTYPE_L2VLAN || vars->val.integer[0] == IANAIFTYPE_L3IPVLAN) {
(*vlans)[i++] = (uint16_t) ((int) vars->name[vars->name_length - 1] - 1000);
}
}
return STATUS_SUCCESS;
}

View File

@@ -0,0 +1,78 @@
diff -Nuar a/CMakeLists.txt b/CMakeLists.txt
--- a/CMakeLists.txt 2023-07-21 09:53:57.450424222 +0800
+++ b/CMakeLists.txt 2023-07-21 11:36:15.395258277 +0800
@@ -1,4 +1,4 @@
-#***************************************************************************
+#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
@@ -185,9 +185,9 @@
mark_as_advanced(CURL_DISABLE_HTTP_AUTH)
option(CURL_DISABLE_IMAP "disables IMAP" OFF)
mark_as_advanced(CURL_DISABLE_IMAP)
-option(CURL_DISABLE_LDAP "disables LDAP" OFF)
+option(CURL_DISABLE_LDAP "disables LDAP" ON)
mark_as_advanced(CURL_DISABLE_LDAP)
-option(CURL_DISABLE_LDAPS "disables LDAPS" OFF)
+option(CURL_DISABLE_LDAPS "disables LDAPS" ON)
mark_as_advanced(CURL_DISABLE_LDAPS)
option(CURL_DISABLE_LIBCURL_OPTION "disables --libcurl option from the curl tool" OFF)
mark_as_advanced(CURL_DISABLE_LIBCURL_OPTION)
@@ -433,7 +433,7 @@
endif()
if(CURL_USE_OPENSSL)
- find_package(OpenSSL REQUIRED)
+ #find_package(OpenSSL REQUIRED)
set(SSL_ENABLED ON)
set(USE_OPENSSL ON)
@@ -441,7 +441,7 @@
# version of CMake. This allows our dependents to get our dependencies
# transitively.
if(NOT CMAKE_VERSION VERSION_LESS 3.4)
- list(APPEND CURL_LIBS OpenSSL::SSL OpenSSL::Crypto)
+ #list(APPEND CURL_LIBS OpenSSL::SSL OpenSSL::Crypto)
else()
list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})
include_directories(${OPENSSL_INCLUDE_DIR})
@@ -595,7 +595,7 @@
set(CMAKE_REQUIRED_LIBRARIES ${OPENSSL_LIBRARIES})
check_library_exists_concat(${CMAKE_LDAP_LIB} ldap_init HAVE_LIBLDAP)
check_library_exists_concat(${CMAKE_LBER_LIB} ber_init HAVE_LIBLBER)
-
+
set(CMAKE_REQUIRED_INCLUDES_BAK ${CMAKE_REQUIRED_INCLUDES})
set(CMAKE_LDAP_INCLUDE_DIR "" CACHE STRING "Path to LDAP include directory")
if(CMAKE_LDAP_INCLUDE_DIR)
@@ -1369,12 +1369,16 @@
add_subdirectory(docs)
endif()
+INCLUDE_DIRECTORIES(../openssl/include)
+FIND_LIBRARY(openssl ssl ../openssl)
+
add_subdirectory(lib)
if(BUILD_CURL_EXE)
add_subdirectory(src)
endif()
+
cmake_dependent_option(BUILD_TESTING "Build tests"
ON "PERL_FOUND;NOT CURL_DISABLE_TESTS"
OFF)
diff -Nuar a/src/CMakeLists.txt b/src/CMakeLists.txt
--- a/src/CMakeLists.txt 2023-07-21 13:47:10.160906907 +0800
+++ b/src/CMakeLists.txt 2023-07-21 13:49:45.205682320 +0800
@@ -98,6 +98,9 @@
#Build curl executable
target_link_libraries(${EXE_NAME} libcurl ${CURL_LIBS})
+target_link_libraries(${EXE_NAME} -lssl)
+target_link_libraries(${EXE_NAME} -lcrypto)
+target_link_libraries(${EXE_NAME} ${CMAKE_SHARED_LINKER_FLAGS})
################################################################################

View File

@@ -0,0 +1,14 @@
--- a/CMakeLists.txt 2020-10-26 04:31:31.000000000 -0700
+++ b/CMakeLists.txt 2023-04-10 20:15:13.399705011 -0700
@@ -102,8 +102,9 @@
# ideally we want to use pipe2()
-
-CHECK_C_SOURCE_COMPILES("#define _GNU_SOURCE\n#include <unistd.h>\nint main(void) {int fd[2];\n return pipe2(fd, 0);\n}\n" LWS_HAVE_PIPE2)
+# jacky
+# comment out this line, use pipe() instead of pipe2()
+#CHECK_C_SOURCE_COMPILES("#define _GNU_SOURCE\n#include <unistd.h>\nint main(void) {int fd[2];\n return pipe2(fd, 0);\n}\n" LWS_HAVE_PIPE2)
# tcp keepalive needs this on linux to work practically... but it only exists
# after kernel 2.6.37

View File

@@ -0,0 +1,49 @@
cmake_minimum_required(VERSION 2.6)
PROJECT(ucentral-client C)
SET(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "-Wl,--copy-dt-needed-entries")
SET(LDFLAGS -fopenmp -Wl,--copy-dt-needed-entries)
INCLUDE_DIRECTORIES(src/include)
INCLUDE_DIRECTORIES(../)
INCLUDE_DIRECTORIES(../curl/include)
INCLUDE_DIRECTORIES(../libwebsockets/include)
INCLUDE_DIRECTORIES(../openssl/include)
INCLUDE_DIRECTORIES(ucentral-client/include)
INCLUDE_DIRECTORIES(ucentral-client)
INCLUDE_DIRECTORIES(src/include)
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_LIST_DIR}/../ecapi/include)
INCLUDE_DIRECTORIES($ENV{SOURCE_PATH}/sysinclude)
INCLUDE_DIRECTORIES($ENV{SOURCE_PATH}/sysinclude/mibconstants)
INCLUDE_DIRECTORIES($ENV{SOURCE_PATH}/sysinclude/oem/$ENV{PROJECT_NAME})
INCLUDE_DIRECTORIES($ENV{PROJECT_PATH}/user/thirdpty/lua/net-snmp-5.4.4/include)
add_definitions(-DPLAT_EC)
if ($ENV{D_MODEL_NAME} STREQUAL ECS4130_AC5)
add_definitions(-DENDIANNESS_ADJUST)
add_definitions(-DNOT_SUPPORT_CAP_2500)
add_definitions(-DNOT_SUPPORT_NTP_DOMAIN_NAME)
add_definitions(-DSYSTEM_OID="1.3.6.1.4.1.259.10.1.55.")
elseif ($ENV{D_MODEL_NAME} STREQUAL ECS4125_10P)
add_definitions(-DSYSTEM_OID="1.3.6.1.4.1.259.10.1.57.")
else()
message(FATAL_ERROR "not support $ENV{D_MODEL_NAME}")
endif()
INCLUDE(ucentral-client/CMakeLists.txt)
INCLUDE(ucentral-client/platform/ec/CMakeLists.txt)
FIND_LIBRARY(cjson cjson ../cjson)
FIND_LIBRARY(curl curl ../curl/lib)
FIND_LIBRARY(openssl ssl ../openssl)
FIND_LIBRARY(websockets websockets ../libwebsockets/lib)
FIND_LIBRARY(crypto crypto ../openssl)
FIND_LIBRARY(ecapi_library ecapi ../ecapi/build)
FIND_LIBRARY(netsnmp_library netsnmp $ENV{PROJECT_PATH}/user/thirdpty/lua/net-snmp-5.4.4/snmplib/.libs)
ADD_EXECUTABLE(ucentral-client ${UC_SOURCES} ${PLAT_SOURCES})
TARGET_LINK_LIBRARIES(ucentral-client ${cjson} ${curl} ${openssl} ${crypto} ${websockets} ${netsnmp_library} ${ecapi_library})

View File

@@ -0,0 +1,9 @@
list(APPEND UC_SOURCES
${CMAKE_CURRENT_LIST_DIR}/proto.c
${CMAKE_CURRENT_LIST_DIR}/router-utils.c
${CMAKE_CURRENT_LIST_DIR}/ucentral-json-parser.c
${CMAKE_CURRENT_LIST_DIR}/ucentral-log.c
${CMAKE_CURRENT_LIST_DIR}/ucentral-client.c
${CMAKE_CURRENT_LIST_DIR}/inet_net_pton.c
)

View File

@@ -0,0 +1,200 @@
/*
* Copyright (c) 1996,1999 by Internet Software Consortium.
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS
* ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE
* CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
* SOFTWARE.
*/
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#ifdef SPRINTF_CHAR
# define SPRINTF(x) strlen(sprintf/**/x)
#else
# define SPRINTF(x) ((size_t)sprintf x)
#endif
static int inet_net_pton_ipv4 (const char *src, u_char *dst,
size_t size) __THROW;
# define __rawmemchr strchr
/*
* static int
* inet_net_pton(af, src, dst, size)
* convert network number from presentation to network format.
* accepts hex octets, hex strings, decimal octets, and /CIDR.
* "size" is in bytes and describes "dst".
* return:
* number of bits, either imputed classfully or specified with /CIDR,
* or -1 if some failure occurred (check errno). ENOENT means it was
* not a valid network specification.
* author:
* Paul Vixie (ISC), June 1996
*/
int
inet_net_pton (int af, const char *src, void *dst, size_t size)
{
switch (af) {
case AF_INET:
return (inet_net_pton_ipv4(src, dst, size));
default:
//__set_errno (EAFNOSUPPORT);
return (-1);
}
}
/*
* static int
* inet_net_pton_ipv4(src, dst, size)
* convert IPv4 network number from presentation to network format.
* accepts hex octets, hex strings, decimal octets, and /CIDR.
* "size" is in bytes and describes "dst".
* return:
* number of bits, either imputed classfully or specified with /CIDR,
* or -1 if some failure occurred (check errno). ENOENT means it was
* not an IPv4 network specification.
* note:
* network byte order assumed. this means 192.5.5.240/28 has
* 0b11110000 in its fourth octet.
* author:
* Paul Vixie (ISC), June 1996
*/
static int
inet_net_pton_ipv4 (const char *src, u_char *dst, size_t size)
{
static const char xdigits[] = "0123456789abcdef";
int n, ch, tmp, dirty, bits;
const u_char *odst = dst;
ch = *src++;
if (ch == '0' && (src[0] == 'x' || src[0] == 'X')
&& isascii(src[1]) && isxdigit(src[1])) {
/* Hexadecimal: Eat nybble string. */
if (size <= 0)
goto emsgsize;
dirty = 0;
tmp = 0; /* To calm down gcc. */
src++; /* skip x or X. */
while (isxdigit((ch = *src++))) {
ch = _tolower(ch);
n = (const char *) __rawmemchr(xdigits, ch) - xdigits;
assert(n >= 0 && n <= 15);
if (dirty == 0)
tmp = n;
else
tmp = (tmp << 4) | n;
if (++dirty == 2) {
if (size-- <= 0)
goto emsgsize;
*dst++ = (u_char) tmp;
dirty = 0;
}
}
if (dirty) { /* Odd trailing nybble? */
if (size-- <= 0)
goto emsgsize;
*dst++ = (u_char) (tmp << 4);
}
} else if (isascii(ch) && isdigit(ch)) {
/* Decimal: eat dotted digit string. */
for (;;) {
tmp = 0;
do {
n = ((const char *) __rawmemchr(xdigits, ch)
- xdigits);
assert(n >= 0 && n <= 9);
tmp *= 10;
tmp += n;
if (tmp > 255)
goto enoent;
} while (isascii((ch = *src++)) && isdigit(ch));
if (size-- <= 0)
goto emsgsize;
*dst++ = (u_char) tmp;
if (ch == '\0' || ch == '/')
break;
if (ch != '.')
goto enoent;
ch = *src++;
if (!isascii(ch) || !isdigit(ch))
goto enoent;
}
} else
goto enoent;
bits = -1;
if (ch == '/' && isascii(src[0]) && isdigit(src[0]) && dst > odst) {
/* CIDR width specifier. Nothing can follow it. */
ch = *src++; /* Skip over the /. */
bits = 0;
do {
n = (const char *) __rawmemchr(xdigits, ch) - xdigits;
assert(n >= 0 && n <= 9);
bits *= 10;
bits += n;
} while (isascii((ch = *src++)) && isdigit(ch));
if (ch != '\0')
goto enoent;
if (bits > 32)
goto emsgsize;
}
/* Firey death and destruction unless we prefetched EOS. */
if (ch != '\0')
goto enoent;
/* If nothing was written to the destination, we found no address. */
if (dst == odst)
goto enoent;
/* If no CIDR spec was given, infer width from net class. */
if (bits == -1) {
if (*odst >= 240) /* Class E */
bits = 32;
else if (*odst >= 224) /* Class D */
bits = 4;
else if (*odst >= 192) /* Class C */
bits = 24;
else if (*odst >= 128) /* Class B */
bits = 16;
else /* Class A */
bits = 8;
/* If imputed mask is narrower than specified octets, widen. */
if (bits >= 8 && bits < ((dst - odst) * 8))
bits = (dst - odst) * 8;
}
/* Extend network to cover the actual mask. */
while (bits > ((dst - odst) * 8)) {
if (size-- <= 0)
goto emsgsize;
*dst++ = '\0';
}
return (bits);
enoent:
//__set_errno (ENOENT);
return (-1);
emsgsize:
//__set_errno (EMSGSIZE);
return (-1);
}

View File

@@ -4,8 +4,3 @@ ntp server 1.pool.ntp.org prefer true
ntp server 2.pool.ntp.org prefer true
ntp server 3.pool.ntp.org prefer true
ntp authenticate
ip dhcp snooping
ip dhcp snooping Vlan1
ntp source-interface Vlan 1
interface range Ethernet 0-100
no ip dhcp snooping trust

View File

@@ -1,2 +0,0 @@
configure terminal
no ip vrf mgmt

View File

@@ -11,7 +11,6 @@ start() {
fi
cp /usr/local/lib/OLS_NOS_fixups.script /home/admin/OLS_NOS_fixups.script
cp /usr/local/lib/OLS_NOS_upgrade_override.script /home/admin/OLS_NOS_upgrade_override.script
if [ $(systemctl is-active config-setup.service) == "active" ]; then
# do nothing on service restart
@@ -30,23 +29,11 @@ start() {
}
wait() {
test -d /var/lib/ucentral || mkdir /var/lib/ucentral
# Wait for at least one Vlan to be created - a signal that telemetry is up.
# Even if vlan table is empty, private 3967 will be allocated with all
# ports in it.
while ! ls /sys/class/net/Vlan* &>/dev/null; do sleep 1; done
# Detect first boot on this version
# Run upgrade overrides before fixups
conf_upgrade_md5sum=$(md5sum /home/admin/OLS_NOS_upgrade_override.script | cut -d ' ' -f1)
if test "$conf_upgrade_md5sum" != "$(test -f /var/lib/ucentral/upgrade-override.md5sum && cat /var/lib/ucentral/upgrade-override.md5sum)"; then
sudo -u admin -- bash "sonic-cli" "/home/admin/OLS_NOS_upgrade_override.script"
echo -n "$conf_upgrade_md5sum" >/var/lib/ucentral/upgrade-override.md5sum
fi
sudo touch /etc/default/in-band-dhcp
# Temporary NTP fixup / WA: configure a list of default NTP servers.
# Should mature into a default-config option to make sure board has right
# time upon any boot (especially first time).
@@ -61,8 +48,6 @@ wait() {
# NOTE: alternatively we could use ifplugd. This also handle del/add scenario
ifup Vlan1 || true
config vlan dhcp 1 enable
# There's an issue with containers starting before DNS server is configured:
# resolf.conf file get copied from host to container upon container start.
# This means, that if resolf.conf gets altered (on host) after container's been
@@ -78,19 +63,9 @@ wait() {
# This also means, that we won't start up untill this URI is accessible.
while ! curl clientauth.one.digicert.com &>/dev/null; do sleep 1; done
# Enable DHCP trusting for uplink (Vlan1) iface
# It's needed to forward DHCP Discover (and replies) from/to DHCP server
# of (untrusted) port clients (EthernetX) of the same Vlan (Vlan1).
# Without this fix underlying Vlan members wouldn't be able to receive
# DHCP-lease IP
trusted_dhcp_if=`sudo -u admin -- bash "sonic-cli" "-c" "show ip arp" | grep -Eo "Ethernet[0-9]+"`
sudo -u admin -- "echo" "configure terminal" > /home/admin/fixup_scr.script
sudo -u admin -- "echo" "interface $trusted_dhcp_if" >> /home/admin/fixup_scr.script
sudo -u admin -- "echo" "ip dhcp snooping trust" >> /home/admin/fixup_scr.script
sudo -u admin -- bash "sonic-cli" "/home/admin/fixup_scr.script"
# change admin password
# NOTE: This could lead to access escalation, if you got image from running device
test -d /var/lib/ucentral || mkdir /var/lib/ucentral
if ! test -f /var/lib/ucentral/admin-cred.changed; then
#ADMIN_PASSWD=`openssl rand -hex 10`
ADMIN_PASSWD=broadcom

View File

@@ -154,11 +154,6 @@
"vlanid": "1"
}
},
"VLAN_INTERFACE": {
"Vlan1": {
"dhcp": "enable"
}
},
"VLAN_MEMBER": {
{% for port in PORT %}
"Vlan1|{{port}}": {
@@ -169,6 +164,11 @@
"INTERFACE": {
"Vlan1": {}
},
"MGMT_VRF_CONFIG": {
"vrf_global": {
"mgmtVrfEnabled": "true"
}
},
"VRF": {
"default": {
"enabled": "true"

View File

@@ -1,8 +1,3 @@
# Production build system for uCentral client
# Configuration parser tests have been moved to tests/config-parser/Makefile
# Unit tests remain here for backward compatibility with original repository structure.
# See TESTING_FRAMEWORK.md for complete test documentation.
.PHONY: test
export CFLAGS+= -Werror -Wall -Wextra
@@ -36,5 +31,4 @@ test-ucentral-json-parser: test-ucentral-json-parser.o ucentral-json-parser.o
./test-ucentral-json-parser 2>/dev/null
clean:
rm -f ucentral-client *.o test-ucentral-json-parser 2>/dev/null
$(MAKE) -C platform/${PLATFORM} clean
rm -f ucentral-client 2>/dev/null

View File

@@ -2,12 +2,11 @@
#include <netinet/in.h>
/* Fixed: Changed 'key' to proper struct definition with semicolon */
struct ucentral_router_fib_key {
/* TODO vrf */
struct in_addr prefix;
int prefix_len;
};
} key;
struct ucentral_router_fib_info { /* Destination info */
enum {
@@ -47,16 +46,16 @@ struct ucentral_router {
struct ucentral_router_fib_db_apply_args {
/* plat whould check info to determine if node channged */
int (*upd_cb)(const struct ucentral_router_fib_node *old_node,
int (*upd_cb)(const struct ucentral_router_fib_node *old,
int olen,
const struct ucentral_router_fib_node *new_node,
const struct ucentral_router_fib_node *new,
int nlen,
void *arg);
/* prefix = new, info = new */
int (*add_cb)(const struct ucentral_router_fib_node *new_node,
int (*add_cb)(const struct ucentral_router_fib_node *new,
int len, void *arg);
/* prefix = none */
int (*del_cb)(const struct ucentral_router_fib_node *old_node,
int (*del_cb)(const struct ucentral_router_fib_node *old,
int len, void *arg);
void *arg;
};
@@ -70,27 +69,26 @@ int ucentral_router_fib_db_append(struct ucentral_router *r,
struct ucentral_router_fib_node *n);
int ucentral_router_fib_key_cmp(const struct ucentral_router_fib_key *a,
const struct ucentral_router_fib_key *b);
int ucentral_router_fib_info_cmp(const struct ucentral_router_fib_info *a,
const struct ucentral_router_fib_info *b);
bool ucentral_router_fib_info_cmp(const struct ucentral_router_fib_info *a,
const struct ucentral_router_fib_info *b);
#define router_db_get(R, I) (I < (R)->len ? &(R)->arr[(I)] : NULL)
#define diff_case_upd(DIFF) (!(DIFF))
#define diff_case_del(DIFF) ((DIFF) > 0)
#define diff_case_add(DIFF) ((DIFF) < 0)
#define router_db_diff_get(NEW, OLD, INEW, IOLD) \
(IOLD) == (OLD)->len \
? -1 \
: (INEW) == (NEW)->len \
? 1 \
: ucentral_router_fib_key_cmp(&(NEW)->arr[(INEW)].key, &(OLD)->arr[(IOLD)].key)
#define for_router_db_diff_CASE_UPD(DIFF) if (!(DIFF))
#define for_router_db_diff_CASE_DEL(DIFF) if ((DIFF) > 0)
#define for_router_db_diff_CASE_ADD(DIFF) if ((DIFF) < 0)
#define for_router_db_diff(NEW, OLD, INEW, IOLD, DIFF) \
for ((INEW) = 0, (IOLD) = 0, (DIFF) = 0; \
\
((IOLD) != (OLD)->len || (INEW) != (NEW)->len); \
\
(DIFF) == 0 ? ++(INEW) && ++(IOLD) : 0, \
(DIFF) > 0 ? ++(IOLD) : 0, \
(DIFF) < 0 ? ++(INEW) : 0 \
for ((INEW) = 0, (IOLD) = 0, (NEW)->sorted ? 0 : ucentral_router_fib_db_sort((NEW)), (OLD)->sorted ? 0 : ucentral_router_fib_db_sort((OLD)); \
((IOLD) != (OLD)->len || (INEW) != (NEW)->len) && \
(( \
(DIFF) = (IOLD) == (OLD)->len ? -1 : (INEW) == (NEW)->len ? 1 : ucentral_router_fib_key_cmp(&(NEW)->arr[(INEW)].key, &(OLD)->arr[(IOLD)].key) \
) || 1); \
(DIFF) == 0 ? ++(INEW) && ++(IOLD) : 0, (DIFF) > 0 ? ++(IOLD) : 0, (DIFF) < 0 ? ++(INEW) : 0\
)
/*
* ((DIFF) == 0 && ++(INEW) && ++(IOLD)) || \
* ((DIFF) > 0 && ++(IOLD)) || \
* ((DIFF) < 0 && ++(INEW)) \
*/

View File

@@ -28,6 +28,37 @@ void uc_log_send_cb_register(void (*cb)(const char *, int sv));
void uc_log_severity_set(enum uc_log_component c, int sv);
void uc_log(enum uc_log_component c, int sv, const char *fmt, ...);
#ifdef PLAT_EC
#define UC_LOG_INFO(...) \
do { \
syslog(LOG_INFO, __VA_ARGS__); \
uc_log(UC_LOG_COMPONENT, UC_LOG_SV_INFO, __VA_ARGS__); \
} while (0)
#define UC_LOG_DBG(FMT, ...) \
do { \
syslog(LOG_DEBUG, "%s:%u: " FMT, __func__, \
(unsigned)__LINE__, ##__VA_ARGS__); \
uc_log(UC_LOG_COMPONENT, UC_LOG_SV_DEBUG, \
FMT, ##__VA_ARGS__); \
} while (0)
#define UC_LOG_ERR(FMT, ...) \
do { \
syslog(LOG_ERR, "%s:%u: " FMT, __func__, \
(unsigned)__LINE__, ##__VA_ARGS__); \
uc_log(UC_LOG_COMPONENT, UC_LOG_SV_ERR, \
FMT, ##__VA_ARGS__); \
} while (0)
#define UC_LOG_CRIT(FMT, ...) \
do { \
syslog(LOG_CRIT, "%s:%u: " FMT, __func__, \
(unsigned)__LINE__, ##__VA_ARGS__); \
uc_log(UC_LOG_COMPONENT, UC_LOG_SV_CRIT, \
FMT, ##__VA_ARGS__); \
} while (0)
#else
#define UC_LOG_INFO(...) \
do { \
syslog(LOG_INFO, __VA_ARGS__); \
@@ -57,5 +88,6 @@ void uc_log(enum uc_log_component c, int sv, const char *fmt, ...);
uc_log(UC_LOG_COMPONENT, UC_LOG_SV_CRIT, \
FMT __VA_OPT__(, ) __VA_ARGS__); \
} while (0)
#endif
#endif

View File

@@ -22,6 +22,9 @@ extern "C" {
#define MAX_NUM_OF_PORTS (100)
#define PORT_MAX_NAME_LEN (32)
#ifdef PLAT_EC
#define VLAN_MAX_NAME_LEN PORT_MAX_NAME_LEN
#endif
#define RTTY_CFG_FIELD_STR_MAX_LEN (64)
#define PLATFORM_INFO_STR_MAX_LEN (96)
#define SYSLOG_CFG_FIELD_STR_MAX_LEN (64)
@@ -31,8 +34,6 @@ extern "C" {
#define RADIUS_CFG_DEFAULT_PRIO (1)
#define HEALTHCHEK_MESSAGE_MAX_COUNT (10)
#define HEALTHCHEK_MESSAGE_MAX_LEN (100)
#define PLATFORM_MAC_STR_SIZE (18)
#define METRICS_WIRED_CLIENTS_MAX_NUM (2000)
/*
* TODO(vb) likely we need to parse interfaces in proto to understand
@@ -41,8 +42,6 @@ extern "C" {
*/
#define PID_TO_NAME(p, name) sprintf(name, "Ethernet%hu", p)
#define NAME_TO_PID(p, name) sscanf((name), "Ethernet%hu", (p))
#define VLAN_TO_NAME(v, name) sprintf((name), "Vlan%hu", (v))
#define NAME_TO_VLAN(v, name) sscanf((name), "Vlan%hu", (v))
struct plat_vlan_memberlist;
struct plat_port_vlan;
@@ -66,18 +65,6 @@ enum plat_ieee8021x_port_host_mode {
PLAT_802_1X_PORT_HOST_MODE_SINGLE_HOST,
};
enum plat_ieee8021x_das_auth_type {
PLAT_802_1X_DAS_AUTH_TYPE_ANY,
PLAT_802_1X_DAS_AUTH_TYPE_ALL,
PLAT_802_1X_DAS_AUTH_TYPE_SESSION_KEY,
};
enum plat_igmp_version {
PLAT_IGMP_VERSION_1,
PLAT_IGMP_VERSION_2,
PLAT_IGMP_VERSION_3
};
#define UCENTRAL_PORT_LLDP_PEER_INFO_MAX_MGMT_IPS (2)
/* Interface LLDP peer's data, as defined in interface.lldp.yml*/
struct plat_port_lldp_peer_info {
@@ -91,7 +78,7 @@ struct plat_port_lldp_peer_info {
/* The chassis name that our neighbour is announcing */
char name[64];
/* The chassis MAC that our neighbour is announcing */
char mac[PLATFORM_MAC_STR_SIZE];
char mac[18];
/* The chassis description that our neighbour is announcing */
char description[512];
/* The management IPs that our neighbour is announcing */
@@ -129,7 +116,7 @@ struct plat_poe_port_state {
struct plat_ieee8021x_authenticated_client_info {
char auth_method[32];
char mac_addr[PLATFORM_MAC_STR_SIZE];
char mac_addr[18];
size_t session_time;
char username[64];
char vlan_type[32];
@@ -266,29 +253,15 @@ struct plat_port_l2 {
struct plat_ipv4 ipv4;
};
struct plat_igmp {
bool exist;
bool snooping_enabled;
bool querier_enabled;
bool fast_leave_enabled;
uint32_t query_interval;
uint32_t last_member_query_interval;
uint32_t max_response_time;
enum plat_igmp_version version;
size_t num_groups;
struct {
struct in_addr addr;
struct plat_ports_list *egress_ports_list;
} *groups;
};
struct plat_port_vlan {
struct plat_vlan_memberlist *members_list_head;
struct plat_ipv4 ipv4;
struct plat_dhcp dhcp;
struct plat_igmp igmp;
uint16_t id;
uint16_t mstp_instance;
#ifdef PLAT_EC
char name[VLAN_MAX_NAME_LEN];
#endif
};
struct plat_vlans_list {
@@ -302,6 +275,9 @@ struct plat_vlan_memberlist {
uint16_t fp_id;
} port;
bool tagged;
#ifdef PLAT_EC
bool pvid;
#endif
struct plat_vlan_memberlist *next;
};
@@ -313,18 +289,6 @@ struct plat_syslog_cfg {
char host[SYSLOG_CFG_FIELD_STR_MAX_LEN];
};
struct plat_enabled_service_cfg {
struct {
bool enabled;
} ssh;
struct telnet {
bool enabled;
} telnet;
struct {
bool enabled;
} http;
};
struct plat_rtty_cfg {
char id[RTTY_CFG_FIELD_STR_MAX_LEN];
char passwd[RTTY_CFG_FIELD_STR_MAX_LEN];
@@ -367,7 +331,6 @@ struct plat_metrics_cfg {
int lldp_enabled;
int clients_enabled;
size_t interval;
unsigned max_mac_count;
/* IE GET max length. Should be enoug. */
char public_ip_lookup[2048];
} state;
@@ -380,16 +343,8 @@ struct plat_unit_poe_cfg {
bool is_usage_threshold_set;
};
struct plat_unit_system_cfg {
char password[64];
bool password_changed;
};
struct plat_unit {
struct plat_unit_poe_cfg poe;
struct plat_unit_system_cfg system;
bool mc_flood_control;
bool querier_enable;
};
enum plat_stp_mode {
@@ -421,31 +376,6 @@ struct plat_radius_hosts_list {
struct plat_radius_host host;
};
struct plat_ieee8021x_dac_host {
char hostname[RADIUS_CFG_HOSTNAME_STR_MAX_LEN];
char passkey[RADIUS_CFG_PASSKEY_STR_MAX_LEN];
};
struct plat_ieee8021x_dac_list {
struct plat_ieee8021x_dac_list *next;
struct plat_ieee8021x_dac_host host;
};
struct plat_port_isolation_session_ports {
struct plat_ports_list *ports_list;
};
struct plat_port_isolation_session {
uint64_t id;
struct plat_port_isolation_session_ports uplink;
struct plat_port_isolation_session_ports downlink;
};
struct plat_port_isolation_cfg {
struct plat_port_isolation_session *sessions;
size_t sessions_num;
};
struct plat_cfg {
struct plat_unit unit;
/* Alloc all ports, but access them only if bit is set. */
@@ -455,7 +385,6 @@ struct plat_cfg {
BITMAP_DECLARE(vlans_to_cfg, MAX_VLANS);
struct plat_metrics_cfg metrics;
struct plat_syslog_cfg *log_cfg;
struct plat_enabled_service_cfg enabled_services_cfg;
/* Port's interfaces (provide l2 iface w/o bridge caps) */
struct plat_port_l2 portsl2[MAX_NUM_OF_PORTS];
struct ucentral_router router;
@@ -464,24 +393,9 @@ struct plat_cfg {
/* Instance zero is for global instance (like common values in rstp) */
struct plat_stp_instance_cfg stp_instances[MAX_VLANS];
struct plat_radius_hosts_list *radius_hosts_list;
struct {
bool is_auth_ctrl_enabled;
bool bounce_port_ignore;
bool disable_port_ignore;
bool ignore_server_key;
bool ignore_session_key;
char server_key[RADIUS_CFG_PASSKEY_STR_MAX_LEN];
enum plat_ieee8021x_das_auth_type das_auth_type;
struct plat_ieee8021x_dac_list *das_dac_list;
} ieee8021x;
struct plat_port_isolation_cfg port_isolation_cfg;
bool ieee8021x_is_auth_ctrl_enabled;
};
struct plat_learned_mac_addr {
char port[PORT_MAX_NAME_LEN];
int vid;
char mac[PLATFORM_MAC_STR_SIZE];
};
typedef void (*plat_alarm_cb)(struct plat_alarm *);
@@ -543,6 +457,9 @@ typedef void (*plat_run_script_cb)(int err, struct plat_run_script_result *,
void *ctx);
enum {
#ifdef PLAT_EC
UCENTRAL_PORT_SPEED_NONE,
#endif
UCENTRAL_PORT_SPEED_10_E,
UCENTRAL_PORT_SPEED_100_E,
UCENTRAL_PORT_SPEED_1000_E,
@@ -555,6 +472,9 @@ enum {
};
enum {
#ifdef PLAT_EC
UCENTRAL_PORT_DUPLEX_NONE,
#endif
UCENTRAL_PORT_DUPLEX_HALF_E,
UCENTRAL_PORT_DUPLEX_FULL_E,
};
@@ -581,60 +501,17 @@ enum {
PLAT_REBOOT_CAUSE_REBOOT_CMD,
PLAT_REBOOT_CAUSE_POWERLOSS,
PLAT_REBOOT_CAUSE_CRASH,
PLAT_REBOOT_CAUSE_UNAVAILABLE,
};
enum sfp_form_factor {
UCENTRAL_SFP_FORM_FACTOR_NA = 0,
UCENTRAL_SFP_FORM_FACTOR_SFP,
UCENTRAL_SFP_FORM_FACTOR_SFP_PLUS,
UCENTRAL_SFP_FORM_FACTOR_SFP_28,
UCENTRAL_SFP_FORM_FACTOR_SFP_DD,
UCENTRAL_SFP_FORM_FACTOR_QSFP,
UCENTRAL_SFP_FORM_FACTOR_QSFP_PLUS,
UCENTRAL_SFP_FORM_FACTOR_QSFP_28,
UCENTRAL_SFP_FORM_FACTOR_QSFP_DD
};
enum sfp_link_mode {
UCENTRAL_SFP_LINK_MODE_NA = 0,
UCENTRAL_SFP_LINK_MODE_1000_X,
UCENTRAL_SFP_LINK_MODE_2500_X,
UCENTRAL_SFP_LINK_MODE_4000_SR,
UCENTRAL_SFP_LINK_MODE_10G_SR,
UCENTRAL_SFP_LINK_MODE_25G_SR,
UCENTRAL_SFP_LINK_MODE_40G_SR,
UCENTRAL_SFP_LINK_MODE_50G_SR,
UCENTRAL_SFP_LINK_MODE_100G_SR,
};
struct plat_port_transceiver_info {
char vendor_name[64];
char part_number[64];
char serial_number[64];
char revision[64];
enum sfp_form_factor form_factor;
enum sfp_link_mode *supported_link_modes;
size_t num_supported_link_modes;
float temperature;
float tx_optical_power;
float rx_optical_power;
float max_module_power;
};
struct plat_port_info {
struct plat_port_counters stats;
struct plat_port_lldp_peer_info lldp_peer_info;
struct plat_ieee8021x_port_info ieee8021x_info;
struct plat_port_transceiver_info transceiver_info;
uint32_t uptime;
uint32_t speed;
uint8_t carrier_up;
uint8_t duplex;
uint8_t has_lldp_peer_info;
uint8_t has_transceiver_info;
char name[PORT_MAX_NAME_LEN];
};
@@ -648,24 +525,6 @@ struct plat_system_info {
double load_average[3]; /* 1, 5, 15 minutes load average */
};
struct plat_iee8021x_coa_counters {
uint64_t coa_req_received;
uint64_t coa_ack_sent;
uint64_t coa_nak_sent;
uint64_t coa_ignored;
uint64_t coa_wrong_attr;
uint64_t coa_wrong_attr_value;
uint64_t coa_wrong_session_context;
uint64_t coa_administratively_prohibited_req;
};
struct plat_gw_address {
struct in_addr ip;
uint32_t metric;
char port[PORT_MAX_NAME_LEN];
char mac[PLATFORM_MAC_STR_SIZE];
};
struct plat_state_info {
struct plat_poe_state poe_state;
struct plat_poe_port_state poe_ports_state[MAX_NUM_OF_PORTS];
@@ -673,15 +532,8 @@ struct plat_state_info {
struct plat_port_info *port_info;
int port_info_count;
struct plat_port_vlan *vlan_info;
size_t vlan_info_count;
struct plat_learned_mac_addr *learned_mac_list;
size_t learned_mac_list_size;
struct plat_gw_address *gw_addr_list;
size_t gw_addr_list_size;
struct plat_system_info system_info;
struct plat_iee8021x_coa_counters ieee8021x_global_coa_counters;
};
struct plat_upgrade_info {
@@ -707,14 +559,7 @@ struct plat_event_callbacks {
plat_poe_link_faultcode_cb poe_link_faultcode_cb;
};
enum plat_script_type {
PLAT_SCRIPT_TYPE_NA = 0,
PLAT_SCRIPT_TYPE_SHELL = 1,
PLAT_SCRIPT_TYPE_DIAGNOSTICS = 2,
};
struct plat_run_script_result {
enum plat_script_type type;
const char *stdout_string;
size_t stdout_string_len;
int exit_status;
@@ -722,7 +567,7 @@ struct plat_run_script_result {
};
struct plat_run_script {
enum plat_script_type type;
const char *type;
const char *script_base64;
plat_run_script_cb cb;
void *ctx;
@@ -741,7 +586,11 @@ int plat_metrics_save(const struct plat_metrics_cfg *cfg);
int plat_metrics_restore(struct plat_metrics_cfg *cfg);
int plat_saved_config_id_get(uint64_t *id);
void plat_config_destroy(struct plat_cfg *cfg);
#ifdef PLAT_EC
int plat_factory_default(bool keep_redirector);
#else
int plat_factory_default(void);
#endif
int plat_rtty(struct plat_rtty_cfg *rtty_cfg);
int plat_upgrade(char *uri, char *signature);
@@ -772,10 +621,15 @@ int plat_run_script(struct plat_run_script *);
int plat_port_list_get(uint16_t list_size, struct plat_ports_list *ports);
int plat_port_num_get(uint16_t *num_of_active_ports);
int plat_running_img_name_get(char *str, size_t str_max_len);
int plat_revision_get(char *str, size_t str_max_len);
int
plat_reboot_cause_get(struct plat_reboot_cause *cause);
int plat_diagnostic(char *res_path);
#ifdef PLAT_EC
void clean_stats();
#endif
#ifdef __cplusplus
}
#endif

View File

@@ -1,19 +1,12 @@
plat.a: plat.o
ar crs $@ $^
plat.o: plat-gnma.o gnma/gnma.full.a netlink/netlink.full.a
plat.o: plat-gnma.o gnma/gnma.full.a
# TODO(vb) get back to this
gcc -r -nostdlib -o $@ $^
gnma/gnma.full.a:
$(MAKE) -C $(dir $@) $(notdir $@)
netlink/netlink.full.a:
$(MAKE) -C $(dir $@) $(notdir $@)
%.o: %.c
ifdef PLATFORM_REVISION
gcc -c -o $@ ${CFLAGS} -I ./ -I ../../include -D PLATFORM_REVISION='"$(PLATFORM_REVISION)"' $^
else
gcc -c -o $@ ${CFLAGS} -I ./ -I ../../include $^
endif

View File

@@ -1,7 +1,7 @@
all: gnma.a
%.o: %.c
gcc -c -o $@ ${CFLAGS} -I ./ -I../../../include -I../netlink $<
gcc -c -o $@ ${CFLAGS} -I ./ -I../../../include $<
gnma.a: gnma_common.o
ar crs $@ $^

File diff suppressed because it is too large Load Diff

View File

@@ -7,7 +7,6 @@
#define GNMA_RADIUS_CFG_HOSTNAME_STR_MAX_LEN (64)
#define GNMA_RADIUS_CFG_PASSKEY_STR_MAX_LEN (64)
#define GNMA_OK 0
#define GNMA_ERR_COMMON -1
#define GNMA_ERR_OVERFLOW -2
@@ -27,16 +26,6 @@ struct gnma_radius_host_key {
char hostname[GNMA_RADIUS_CFG_HOSTNAME_STR_MAX_LEN];
};
struct gnma_das_dac_host_key {
char hostname[GNMA_RADIUS_CFG_HOSTNAME_STR_MAX_LEN];
};
typedef enum _gnma_das_auth_type_t {
GNMA_802_1X_DAS_AUTH_TYPE_ANY,
GNMA_802_1X_DAS_AUTH_TYPE_ALL,
GNMA_802_1X_DAS_AUTH_TYPE_SESSION_KEY,
} gnma_das_auth_type_t;
struct gnma_metadata {
char platform[GNMA_METADATA_STR_MAX_LEN];
char hwsku[GNMA_METADATA_STR_MAX_LEN];
@@ -69,17 +58,6 @@ typedef enum _gnma_port_stat_type_t {
} gnma_port_stat_type_t;
typedef enum _gnma_ieee8021x_das_dac_stat_type_t {
GNMA_IEEE8021X_DAS_DAC_STAT_IN_COA_PKTS,
GNMA_IEEE8021X_DAS_DAC_STAT_OUT_COA_ACK_PKTS,
GNMA_IEEE8021X_DAS_DAC_STAT_OUT_COA_NAK_PKTS,
GNMA_IEEE8021X_DAS_DAC_STAT_IN_COA_IGNORED_PKTS,
GNMA_IEEE8021X_DAS_DAC_STAT_IN_COA_WRONG_ATTR_PKTS,
GNMA_IEEE8021X_DAS_DAC_STAT_IN_COA_WRONG_ATTR_VALUE_PKTS,
GNMA_IEEE8021X_DAS_DAC_STAT_IN_COA_WRONG_SESSION_CONTEXT_PKTS,
GNMA_IEEE8021X_DAS_DAC_STAT_IN_COA_ADMINISTRATIVELY_PROHIBITED_REQ_PKTS,
} gnma_ieee8021x_das_dac_stat_type_t;
struct gnma_alarm {
const char *id;
const char *resource;
@@ -151,9 +129,7 @@ struct gnma_route_attrs {
} connected;
struct {
uint16_t vid;
uint32_t metric;
struct in_addr gw;
struct gnma_port_key egress_port;
} nexthop;
};
};
@@ -280,47 +256,6 @@ struct gnma_vlan_member_bmap {
} vlan[GNMA_MAX_VLANS];
};
typedef enum _gnma_fdb_entry_type_t {
GNMA_FDB_ENTRY_TYPE_STATIC,
GNMA_FDB_ENTRY_TYPE_DYNAMIC,
} gnma_fdb_entry_type_t;
struct gnma_fdb_entry {
struct gnma_port_key port;
gnma_fdb_entry_type_t type;
int vid;
char mac[18];
};
typedef enum _gnma_igmp_version_t {
GNMA_IGMP_VERSION_NA = 0,
GNMA_IGMP_VERSION_1 = 1,
GNMA_IGMP_VERSION_2 = 2,
GNMA_IGMP_VERSION_3 = 3
} gnma_igmp_version_t;
struct gnma_igmp_snoop_attr {
bool enabled;
bool querier_enabled;
bool fast_leave_enabled;
uint32_t query_interval;
uint32_t last_member_query_interval;
uint32_t max_response_time;
gnma_igmp_version_t version;
};
struct gnma_igmp_static_group_attr {
struct in_addr address;
size_t num_ports;
struct gnma_port_key *egress_ports;
};
struct gnma_vlan_ip_t {
uint16_t vid;
uint16_t prefixlen;
struct in_addr address;
};
int gnma_switch_create(/* TODO id */ /* TODO: attr (adr, login, psw) */);
int gnma_port_admin_state_set(struct gnma_port_key *port_key, bool up);
int gnma_port_speed_set(struct gnma_port_key *port_key, const char *speed);
@@ -445,9 +380,6 @@ int gnma_route_remove(uint16_t vr_id /* 0 - default */,
int gnma_route_list_get(uint16_t vr_id, uint32_t *list_size,
struct gnma_ip_prefix *prefix_list,
struct gnma_route_attrs *attr_list);
int gnma_dyn_route_list_get(size_t *list_size,
struct gnma_ip_prefix *prefix_list,
struct gnma_route_attrs *attr_list);
int gnma_stp_mode_set(gnma_stp_mode_t mode, struct gnma_stp_attr *attr);
int gnma_stp_mode_get(gnma_stp_mode_t *mode, struct gnma_stp_attr *attr);
@@ -458,53 +390,23 @@ int gnma_stp_ports_enable(uint32_t list_size, struct gnma_port_key *ports_list);
int gnma_stp_instance_set(uint16_t instance, uint16_t prio,
uint32_t list_size, uint16_t *vid_list);
int gnma_stp_vids_set(uint32_t list_size, uint16_t *vid_list, bool enable);
int gnma_stp_vids_set_all(bool enable);
int gnma_stp_vids_enable(uint32_t list_size, uint16_t *vid_list);
int gnma_stp_vids_enable_all(void);
int gnma_stp_vid_set(uint16_t vid, struct gnma_stp_attr *attr);
int gnma_stp_vid_bulk_get(struct gnma_stp_attr *list, ssize_t size);
int gnma_ieee8021x_system_auth_control_set(bool is_enabled);
int gnma_ieee8021x_system_auth_control_get(bool *is_enabled);
int gnma_ieee8021x_system_auth_clients_get(char *buf, size_t buf_size);
int gnma_ieee8021x_das_bounce_port_ignore_set(bool bounce_port_ignore);
int gnma_ieee8021x_das_bounce_port_ignore_get(bool *bounce_port_ignore);
int gnma_ieee8021x_das_disable_port_ignore_set(bool disable_port_ignore);
int gnma_ieee8021x_das_disable_port_ignore_get(bool *disable_port_ignore);
int gnma_ieee8021x_das_ignore_server_key_set(bool ignore_server_key);
int gnma_ieee8021x_das_ignore_server_key_get(bool *ignore_server_key);
int gnma_ieee8021x_das_ignore_session_key_set(bool ignore_session_key);
int gnma_ieee8021x_das_ignore_session_key_get(bool *ignore_session_key);
int gnma_ieee8021x_das_auth_type_key_set(gnma_das_auth_type_t auth_type);
int gnma_ieee8021x_das_auth_type_key_get(gnma_das_auth_type_t *auth_type);
int gnma_ieee8021x_das_dac_hosts_list_get(size_t *list_size,
struct gnma_das_dac_host_key *das_dac_keys_arr);
int gnma_ieee8021x_das_dac_host_add(struct gnma_das_dac_host_key *key,
const char *passkey);
int gnma_ieee8021x_das_dac_host_remove(struct gnma_das_dac_host_key *key);
int
gnma_iee8021x_das_dac_global_stats_get(uint32_t num_of_counters,
gnma_ieee8021x_das_dac_stat_type_t *counter_ids,
uint64_t *counters);
int gnma_radius_hosts_list_get(size_t *list_size,
struct gnma_radius_host_key *hosts_list);
int gnma_radius_host_add(struct gnma_radius_host_key *key, const char *passkey,
uint16_t auth_port, uint8_t prio);
int gnma_radius_host_remove(struct gnma_radius_host_key *key);
int gnma_mac_address_list_get(size_t *list_size, struct gnma_fdb_entry *list);
int gnma_system_password_set(char *password);
int gnma_igmp_snooping_set(uint16_t vid, struct gnma_igmp_snoop_attr *attr);
int gnma_igmp_static_groups_set(uint16_t vid, size_t num_groups,
struct gnma_igmp_static_group_attr *groups);
int gnma_nei_addr_get(struct gnma_port_key *iface, struct in_addr *ip,
char *mac, size_t buf_size);
int gnma_igmp_iface_groups_get(struct gnma_port_key *iface,
char *buf, size_t *buf_size);
struct gnma_change *gnma_change_create(void);
void gnma_change_destory(struct gnma_change *);
int gnma_change_exec(struct gnma_change *);
int gnma_techsupport_start(char *res_path);
int gnma_ip_iface_addr_get(struct gnma_vlan_ip_t *address_list, size_t *list_size);

View File

@@ -1,10 +0,0 @@
all: netlink.a
%.o: %.c
gcc -c -o $@ ${CFLAGS} -I ./ -I/usr/include/libnl3 -lnl-3 -lnl-route-3 $<
netlink.a: netlink_common.o
ar crs $@ $^
netlink.full.a: netlink.a
ar crsT $@ $^

View File

@@ -1,220 +0,0 @@
#include <sys/socket.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <net/if.h>
#include <netlink/netlink.h>
#include <netlink/route/link.h>
#include <netlink/route/route.h>
#include <netlink/route/addr.h>
#include <errno.h>
#include <netlink_common.h>
#define BUFFER_SIZE 4096
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#define for_each_nlmsg(n, buf, len) \
for (n = (struct nlmsghdr*)buf; \
NLMSG_OK(n, (uint32_t)len) && n->nlmsg_type != NLMSG_DONE; \
n = NLMSG_NEXT(n, len))
#define for_each_rattr(n, buf, len) \
for (n = (struct rtattr*)buf; RTA_OK(n, len); n = RTA_NEXT(n, len))
static int _nl_connect(int *sock)
{
int s;
s = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
if (s == -1)
return -1;
*sock = s;
return 0;
}
static void _nl_disconnect(int sock)
{
close(sock);
}
static int _nl_request_ip_send(int sock)
{
struct sockaddr_nl sa = {.nl_family = AF_NETLINK};
char buf[BUFFER_SIZE];
struct ifaddrmsg *ifa;
struct nlmsghdr *nl;
struct msghdr msg;
struct iovec iov;
int res;
memset(&msg, 0, sizeof(msg));
memset(buf, 0, BUFFER_SIZE);
nl = (struct nlmsghdr*)buf;
nl->nlmsg_len = NLMSG_LENGTH(sizeof(struct ifaddrmsg));
nl->nlmsg_type = RTM_GETADDR;
nl->nlmsg_flags = NLM_F_REQUEST | NLM_F_ROOT;
iov.iov_base = nl;
iov.iov_len = nl->nlmsg_len;
ifa = (struct ifaddrmsg*)NLMSG_DATA(nl);
ifa->ifa_family = AF_INET; /* IPv4 */
msg.msg_name = &sa;
msg.msg_namelen = sizeof(sa);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
res = sendmsg(sock, &msg, 0);
if (res < 0)
return -1;
return 0;
}
static int _nl_response_get(int sock, void *buf, size_t *len)
{
struct iovec iov = {.iov_base = buf, .iov_len = *len};
struct sockaddr_nl sa = {.nl_family = AF_NETLINK};
struct msghdr msg = {
.msg_name = &sa,
.msg_namelen = sizeof(sa),
.msg_iov = &iov,
.msg_iovlen = 1
};
int res;
res = recvmsg(sock, &msg, 0);
if (res < 0)
return -1;
*len = res;
return 0;
}
static int _nl_iface_addr_parse(uint32_t vid, void *buf, size_t len,
unsigned char prefixlen, struct nl_vid_addr *addr)
{
struct rtattr *rta = NULL;
for_each_rattr(rta, buf, len) {
if (rta->rta_type == IFA_LOCAL) {
memcpy(&addr->address, RTA_DATA(rta), sizeof(addr->address));
addr->vid = vid;
addr->prefixlen = prefixlen;
break;
}
}
return 0;
}
static int _nl_response_addr_parse(void *buf,
size_t len,
struct nl_vid_addr *addr_list,
size_t *list_size)
{
struct ifaddrmsg *iface_addr;
struct nlmsghdr *nl = NULL;
char ifname[IF_NAMESIZE];
size_t num_addrs = 0;
uint32_t vid;
int err = 0;
for_each_nlmsg(nl, buf, len) {
if (nl->nlmsg_type == NLMSG_ERROR)
return -1;
if (nl->nlmsg_type != RTM_NEWADDR) /* only care for addr */
continue;
iface_addr = (struct ifaddrmsg*)NLMSG_DATA(nl);
if (!if_indextoname(iface_addr->ifa_index, ifname))
return -1;
if (sscanf(ifname, "Vlan%u", &vid) != 1)
continue;
if (!addr_list || *list_size == 0) {
num_addrs++;
continue;
}
if (num_addrs > *list_size)
return -EOVERFLOW;
err = _nl_iface_addr_parse(vid, IFA_RTA(iface_addr), IFA_PAYLOAD(nl),
iface_addr->ifa_prefixlen,
&addr_list[num_addrs++]);
if (err)
break;
}
if (num_addrs > *list_size)
err = -EOVERFLOW;
*list_size = num_addrs;
if (err)
return err;
return nl->nlmsg_type == NLMSG_DONE? -ENODATA : 0;
}
int nl_get_ip_list(struct nl_vid_addr *addr_list, size_t *list_size)
{
size_t buf_len = BUFFER_SIZE, batch_size = 0, num_addrs = 0;
char buf[BUFFER_SIZE];
int sock = 0;
int err;
err = _nl_connect(&sock);
if (err)
return err;
err = _nl_request_ip_send(sock);
if (err)
goto out;
while (1) {
err = _nl_response_get(sock, buf, &buf_len);
if (err)
goto out;
err = _nl_response_addr_parse(buf, buf_len, NULL, &batch_size);
if (err == -ENODATA) {
err = 0;
break;
}
if (err && err != -EOVERFLOW) {
goto out;
}
num_addrs += batch_size;
if (!addr_list || *list_size == 0)
continue;
if (num_addrs > *list_size) {
err = -EOVERFLOW;
break;
}
err = _nl_response_addr_parse(buf, buf_len, &addr_list[num_addrs - batch_size], &batch_size);
if (unlikely(err == -ENODATA)) {
err = 0;
break;
}
if (err)
goto out;
}
if (num_addrs > *list_size)
err = -EOVERFLOW;
*list_size = num_addrs;
out:
_nl_disconnect(sock);
return err;
}

View File

@@ -1,12 +0,0 @@
#ifndef _NETLINK_COMMON
#define _NETLINK_COMMON
struct nl_vid_addr {
uint16_t vid;
uint16_t prefixlen;
uint32_t address;
};
int nl_get_ip_list(struct nl_vid_addr *addr_list, size_t *list_size);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +0,0 @@
#ifndef _PLAT_REVISION
#define _PLAT_REVISION
#define XSTR(x) STR(x)
#define STR(x) #x
#define PLATFORM_REL_NUM 2.2
#define PLATFORM_BUILD_NUM 5
#ifndef PLATFORM_REVISION
#define PLATFORM_REVISION "Rel " XSTR(PLATFORM_REL_NUM) " build " XSTR(PLATFORM_BUILD_NUM)
#endif
#endif

View File

@@ -0,0 +1,3 @@
list(APPEND PLAT_SOURCES
${CMAKE_CURRENT_LIST_DIR}/plat-ec.c
)

File diff suppressed because it is too large Load Diff

View File

@@ -2,8 +2,4 @@ plat.a: plat-example.o
ar crs $@ $^
%.o: %.c
ifdef PLATFORM_REVISION
gcc -c -o $@ ${CFLAGS} -I ./ -I ../../include -D PLATFORM_REVISION='"$(PLATFORM_REVISION)"' $^
else
gcc -c -o $@ ${CFLAGS} -I ./ -I ../../include $^
endif

View File

@@ -2,7 +2,6 @@
#include <ucentral-platform.h>
#include <ucentral-log.h>
#include <plat-revision.h>
#define UNUSED_PARAM(param) (void)((param))
@@ -13,11 +12,7 @@ int plat_init(void)
int plat_info_get(struct plat_platform_info *info)
{
*info = (struct plat_platform_info){0};
snprintf(info->platform, sizeof info->platform, "%s", "Example Platform" );
snprintf(info->hwsku, sizeof info->hwsku, "%s", "example-platform-sku");
snprintf(info->mac, sizeof info->mac, "%s", "24:fe:9a:0f:48:f0");
UNUSED_PARAM(info);
return 0;
}
@@ -161,45 +156,10 @@ int plat_port_num_get(uint16_t *num_of_active_ports)
UNUSED_PARAM(num_of_active_ports);
return 0;
}
int plat_revision_get(char *str, size_t str_max_len)
{
snprintf(str, str_max_len, PLATFORM_REVISION);
return 0;
}
int plat_reboot_cause_get(struct plat_reboot_cause *cause)
{
UNUSED_PARAM(cause);
return 0;
}
int plat_event_subscribe(const struct plat_event_callbacks *cbs)
{
UNUSED_PARAM(cbs);
return 0;
}
void plat_event_unsubscribe(void)
{
return;
}
int plat_running_img_name_get(char *str, size_t str_max_len)
{
UNUSED_PARAM(str_max_len);
UNUSED_PARAM(str);
return 0;
}
int plat_metrics_save(const struct plat_metrics_cfg *cfg)
{
UNUSED_PARAM(cfg);
return 0;
}
int plat_metrics_restore(struct plat_metrics_cfg *cfg)
{
UNUSED_PARAM(cfg);
return 0;
}
int plat_run_script(struct plat_run_script *p)
{
UNUSED_PARAM(p);
return 0;
}

View File

@@ -1,14 +0,0 @@
#ifndef _PLAT_REVISION
#define _PLAT_REVISION
#define XSTR(x) STR(x)
#define STR(x) #x
#define PLATFORM_REL_NUM 3.2.0
#define PLATFORM_BUILD_NUM 5
#ifndef PLATFORM_REVISION
#define PLATFORM_REVISION "Rel " XSTR(PLATFORM_REL_NUM) " build " XSTR(PLATFORM_BUILD_NUM)
#endif
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -70,13 +70,12 @@ int ucentral_router_fib_key_cmp(const struct ucentral_router_fib_key *a,
return 0;
}
int ucentral_router_fib_info_cmp(const struct ucentral_router_fib_info *a,
const struct ucentral_router_fib_info *b)
/* bool result, as we have no criteria to sort this */
bool ucentral_router_fib_info_cmp(const struct ucentral_router_fib_info *a,
const struct ucentral_router_fib_info *b)
{
if (a->type > b->type)
return 1;
if (a->type < b->type)
return -1;
if (a->type != b->type)
return false;
switch (a->type) {
case UCENTRAL_ROUTE_BLACKHOLE:
@@ -84,32 +83,24 @@ int ucentral_router_fib_info_cmp(const struct ucentral_router_fib_info *a,
case UCENTRAL_ROUTE_UNREACHABLE:
break;
case UCENTRAL_ROUTE_CONNECTED:
if (a->connected.vid > b->connected.vid)
return 1;
if (a->connected.vid < b->connected.vid)
return -1;
if (a->connected.vid != b->connected.vid)
return false;
break;
case UCENTRAL_ROUTE_BROADCAST:
if (a->broadcast.vid > b->broadcast.vid)
return 1;
if (a->broadcast.vid < b->broadcast.vid)
return -1;
if (a->broadcast.vid != b->broadcast.vid)
return false;
break;
case UCENTRAL_ROUTE_NH:
if (a->nh.vid > b->nh.vid)
return 1;
if (a->nh.vid < b->nh.vid)
return -1;
if (a->nh.gw.s_addr > b->nh.gw.s_addr)
return 1;
if (a->nh.gw.s_addr < b->nh.gw.s_addr)
return -1;
if (a->nh.vid != b->nh.vid)
return false;
if (a->nh.gw.s_addr != b->nh.gw.s_addr)
return false;
break;
default:
break;
}
return 0;
return true;
}
static int __fib_node_key_cmp_cb(const void *a, const void *b)

View File

@@ -22,14 +22,18 @@
#include <cjson/cJSON.h>
#include "ucentral.h"
/* WA for parser issue */
/* #include "ucentral-json-parser.h" */
#include "ucentral-json-parser.h"
#include <openssl/conf.h>
#include <openssl/err.h>
#include <openssl/pem.h>
#include <openssl/x509v3.h>
#ifdef PLAT_EC
#include "api_device.h"
#include "api_session.h"
#endif
struct per_vhost_data__minimal {
struct lws_context *context;
struct lws_vhost *vhost;
@@ -48,6 +52,7 @@ time_t conn_time;
static int conn_successfull;
struct plat_metrics_cfg ucentral_metrics;
static struct uc_json_parser parser;
static int interrupted;
static pthread_t sigthread;
@@ -65,10 +70,13 @@ lws_protocols protocols[] = {
};
struct client_config client = {
#ifdef PLAT_EC
.redirector_file = "/etc/ucentral/redirector.json",
.redirector_file_dbg = "/etc/ucentral/firstcontact.hdr",
#else
.redirector_file = "/tmp/ucentral-redirector.json",
.redirector_file_dbg = "/tmp/firstcontact.hdr",
.ols_schema_version_file = "/etc/schema.json",
.ols_client_version_file = "/etc/version.json",
#endif
.server = NULL,
.port = 15002,
.path = "/",
@@ -341,7 +349,6 @@ sul_connect_attempt(struct lws_sorted_usec_list *sul)
UC_LOG_DBG("Connected\n");
}
/* WA for parser issue
static void parse_cb(cJSON *j, void *data)
{
(void)data;
@@ -353,7 +360,6 @@ static void parse_error_cb(void *data)
(void)data;
UC_LOG_ERR("JSON config parse failed");
}
*/
static const char *redirector_host_get(void)
{
@@ -378,8 +384,12 @@ static int gateway_cert_trust(void)
static int redirector_cert_trust(void)
{
#ifdef PLAT_EC
return 1;
#else
char *v = getenv("UC_REDIRECTOR_CERT_TRUST");
return v && *v && strcmp("0", v);
#endif
}
static int
@@ -428,15 +438,12 @@ callback_broker(struct lws *wsi, enum lws_callback_reasons reason,
websocket = wsi;
connect_send();
conn_successfull = 1;
/* WA for parser issue */
/* uc_json_parser_init(&parser, parse_cb, parse_error_cb, 0); */
uc_json_parser_init(&parser, parse_cb, parse_error_cb, 0);
lws_callback_on_writable(websocket);
break;
case LWS_CALLBACK_CLIENT_RECEIVE:
/* WA for parser issue */
/* uc_json_parser_feed(&parser, in, len); */
proto_handle((char *)in);
uc_json_parser_feed(&parser, in, len);
break;
case LWS_CALLBACK_CLIENT_CONNECTION_ERROR:
@@ -450,8 +457,7 @@ callback_broker(struct lws *wsi, enum lws_callback_reasons reason,
/* fall through */
case LWS_CALLBACK_CLIENT_CLOSED:
UC_LOG_INFO("connection closed\n");
/* WA for parser issue */
/* uc_json_parser_uninit(&parser); */
uc_json_parser_uninit(&parser);
websocket = NULL;
set_conn_time();
vhd->client_wsi = NULL;
@@ -691,57 +697,6 @@ static void sigthread_create(void)
}
}
static int get_updated_pass(char *pass, size_t *len) {
char *passwd_file_path = "/var/lib/ucentral/admin-cred.buf";
size_t password_size;
int passwd_fd = -1;
char password[64];
if (access(passwd_file_path, F_OK))
goto out;
passwd_fd = open(passwd_file_path, O_RDONLY);
if (passwd_fd < 0) {
UC_LOG_ERR("Failed to open %s", passwd_file_path);
goto out;
}
memset(&password, 0, sizeof(password));
password_size = read(passwd_fd, &password, sizeof(password));
if (password_size == sizeof(password)) {
UC_LOG_ERR("%s is too big", passwd_file_path);
goto out_close;
}
if (!password_size) {
UC_LOG_ERR("failed to read %s", passwd_file_path);
goto out_close;
}
if (*len < password_size) {
UC_LOG_ERR("out buffer is too small (%lu < %lu)",
*len, password_size);
goto out_close;
}
/* remove password from buffer */
close(passwd_fd);
passwd_fd = -1;
if (remove(passwd_file_path)) {
UC_LOG_ERR("Failed to remove %s", passwd_file_path);
goto out;
}
strncpy(pass, password, password_size);
*len = password_size;
return 0;
out_close:
close(passwd_fd);
out:
return -1;
}
int main(void)
{
int logs = LLL_USER | LLL_ERR | LLL_WARN | LLL_NOTICE | LLL_CLIENT;
@@ -752,11 +707,13 @@ int main(void)
struct lws_context_creation_info info = {0};
bool reboot_reason_sent = false;
char *gw_host = NULL;
size_t password_len;
char password[64];
struct stat st;
int ret;
#ifdef PLAT_EC
sleep(50); // wait for system ready
#endif
sigthread_create(); /* move signal handling to a dedicated thread */
openlog("ucentral-client", LOG_CONS | LOG_NDELAY | LOG_PERROR, LOG_DAEMON);
@@ -774,6 +731,17 @@ int main(void)
uc_log_severity_set(UC_LOG_COMPONENT_CLIENT, UC_LOG_SV_ERR);
uc_log_severity_set(UC_LOG_COMPONENT_PLAT, UC_LOG_SV_ERR);
#ifdef PLAT_EC
int status = session_start();
if (status == STATUS_SUCCESS) {
UC_LOG_INFO("Successfully connected to SNMP!\n");
} else {
UC_LOG_INFO("Could not connect to SNMP!\n");
exit(EXIT_FAILURE);;
}
#endif
if (client_config_read()) {
UC_LOG_CRIT("client_config_read failed");
exit(EXIT_FAILURE);
@@ -783,18 +751,39 @@ int main(void)
UC_LOG_CRIT("Platform initialization failed");
}
plat_revision_get(client.firmware, sizeof(client.firmware));
plat_running_img_name_get(client.firmware, sizeof(client.firmware));
if ((gw_host = getenv("UC_GATEWAY_ADDRESS"))) {
client.server = strdup(gw_host);
#ifdef PLAT_EC
FILE *f = fopen(REDIRECTOR_USER_DEFINE_FILE, "r");
if (f) {
size_t cnt;
char redirector_url[256];
memset(redirector_url, 0, sizeof(redirector_url));
cnt = fread(redirector_url, 1, sizeof(redirector_url), f);
fclose(f);
client.server = redirector_url;
} else {
ret = ucentral_redirector_parse(&gw_host);
if (ret) {
/* parse failed by present redirector file, try to get redirector file from digicert */
#else
if ((gw_host = getenv("UC_GATEWAY_ADDRESS"))) {
gw_host = strdup(gw_host);
} else {
#endif
while (1) {
if (uc_loop_interrupted_get())
goto exit;
if (firstcontact()) {
UC_LOG_INFO(
"Firstcontact failed; trying again in 1 second...\n");
"Firstcontact failed; trying again in 30 second...\n");
#ifdef PLAT_EC
sleep(30);
#else
sleep(1);
#endif
continue;
}
@@ -809,6 +798,11 @@ int main(void)
} else {
client.server = gw_host;
}
#ifdef PLAT_EC
} else {
client.server = gw_host;
}
#endif
}
memset(&info, 0, sizeof info);
@@ -831,20 +825,13 @@ int main(void)
}
sigthread_context_set(context);
password_len = sizeof(password);
if (get_updated_pass(password, &password_len))
password_len = 0;
proto_start();
while (!uc_loop_interrupted_get()) {
lws_service_tsi(context, 0, 0);
if (conn_successfull) {
if (password_len) {
deviceupdate_send(password);
password_len = 0;
}
deviceupdate_send();
if (!reboot_reason_sent) {
device_rebootcause_send();
reboot_reason_sent = true;
@@ -861,5 +848,9 @@ exit:
free(gw_host);
curl_global_cleanup();
#ifdef PLAT_EC
session_close();
clean_stats();
#endif
return 0;
}

View File

@@ -175,7 +175,12 @@ void uc_json_parser_init(struct uc_json_parser *uctx, uc_json_parse_cb cb,
void uc_json_parser_uninit(struct uc_json_parser *uctx)
{
/* The function lejp_destruct() cause segmentation fault on EC platform, comment out this line when building EC platform.
* The function lejp_destruct() describes "no allocations... just let callback know what it happening".
*/
#ifndef PLAT_EC
lejp_destruct(&uctx->ctx);
#endif
free(uctx->str);
cJSON_Delete(uctx->root);
*uctx = (struct uc_json_parser){ 0 };

View File

@@ -32,6 +32,10 @@ extern "C" {
#define UCENTRAL_TMP "/tmp/ucentral.cfg"
#define UCENTRAL_LATEST "/etc/ucentral/ucentral.active"
#ifdef PLAT_EC
#define REDIRECTOR_USER_DEFINE_FILE "/etc/ucentral/redirector-user-defined"
#endif
/* It's expected that dev-id format is the following:
* 11111111-1111-1111-1111-111111111111
* and the max size of such string is 36 symbols.
@@ -41,8 +45,6 @@ extern "C" {
struct client_config {
const char *redirector_file;
const char *redirector_file_dbg;
const char *ols_client_version_file;
const char *ols_schema_version_file;
const char *server;
int16_t port;
const char *path;
@@ -62,14 +64,14 @@ extern time_t conn_time;
extern struct plat_metrics_cfg ucentral_metrics;
/* proto.c */
void proto_handle(char *cmd);
void proto_handle(cJSON *cmd);
void proto_cb_register_uc_send_msg(uc_send_msg_cb cb);
void proto_cb_register_uc_connect_msg_send(uc_send_connect_msg_cb cb);
void connect_send(void);
void ping_send(void);
void health_send(struct plat_health_info *);
void state_send(struct plat_state_info *plat_state_info);
void deviceupdate_send(const char *updated_pass);
void deviceupdate_send(void);
void device_rebootcause_send(void);
void telemetry_send(struct plat_state_info *plat_state_info);
void log_send(const char *message, int severity);

View File

@@ -1,926 +0,0 @@
# Configuration Testing Framework Maintenance Guide
This document provides procedures for maintaining the configuration testing framework as the software evolves. The framework consists of two main components that require periodic updates:
1. **Schema Files** - JSON schema definitions from ols-ucentral-schema repository
2. **Property Database** - Tracking of parsed properties in test-config-parser.c
## Table of Contents
- [Schema Update Procedures](#schema-update-procedures)
- [Property Database Update Procedures](#property-database-update-procedures)
- [Version Synchronization](#version-synchronization)
- [Testing After Updates](#testing-after-updates)
- [Troubleshooting](#troubleshooting)
---
## Schema Update Procedures
### When to Update Schema
Update the schema when:
- New version of ols-ucentral-schema is released
- New configuration properties are added to the schema
- Property definitions change (type, constraints, etc.)
- Schema validation errors appear for valid configurations
- Preparing for new feature development
### Schema Update Process
#### Step 1: Identify Current Schema Version
```bash
cd /path/to/ols-ucentral-client
# Check current schema version
head -20 config-samples/ucentral.schema.pretty.json | grep -i version
# Note the current version for rollback if needed
```
#### Step 2: Obtain New Schema
**Option A: From ols-ucentral-schema Repository**
```bash
# Clone or update the schema repository
cd /tmp
git clone https://github.com/Telecominfraproject/ols-ucentral-schema.git
# OR
cd /path/to/existing/ols-ucentral-schema
git pull origin main
# Check available versions/tags
git tag -l
# Checkout specific version (recommended for stability)
git checkout v4.2.0
# Copy schema to your repository
cp ols.ucentral.schema.json /path/to/ols-ucentral-client/config-samples/
cp ucentral.schema.pretty.json /path/to/ols-ucentral-client/config-samples/
```
**Option B: From Build Artifacts**
If schema is embedded in builds:
```bash
# Extract from build artifacts
cd /path/to/ols-ucentral-client
# Schema may be copied during build process - check Makefile
```
#### Step 3: Validate Schema File
```bash
cd /path/to/ols-ucentral-client/src/ucentral-client
# Verify schema is valid JSON
python3 -c "import json; json.load(open('../../config-samples/ucentral.schema.pretty.json'))"
# Test that validator can load it
python3 validate-schema.py --schema ../../config-samples/ucentral.schema.pretty.json \
../../config-samples/cfg0.json
```
#### Step 4: Test Against Existing Configurations
```bash
# Run schema validation on all test configs
make validate-schema
# Review results for new validation errors
```
**Expected outcomes:**
- **All valid**: Schema is backward compatible
- **Some failures**: Schema may have added requirements or changed definitions
- **Many failures**: Schema may be incompatible - review changes carefully
#### Step 5: Address Validation Failures
If existing valid configs now fail validation:
**A. Investigate Schema Changes**
```bash
# Compare old and new schema
cd config-samples
diff <(jq . ols.ucentral.schema.json.old) <(jq . ols.ucentral.schema.json) > schema-changes.diff
# Look for:
# - New required fields
# - Changed property types
# - New constraints (min/max, enums)
# - Removed properties
```
**B. Update Affected Configurations**
```bash
# For each failing config:
# 1. Review the validation error
./validate-schema.py ../../config-samples/failing-config.json
# 2. Fix the configuration to meet new requirements
vi ../../config-samples/failing-config.json
# 3. Revalidate
./validate-schema.py ../../config-samples/failing-config.json
```
**C. Document Breaking Changes**
Create or update SCHEMA_CHANGES.md:
```markdown
## Schema Update: v4.1.0 → v4.2.0
### Breaking Changes
- `unit.timezone` now required (was optional)
- `interfaces.ethernet.speed` changed from string to enum
- New required field: `switch.system-name`
### Configurations Updated
- cfg0.json - Added unit.timezone
- ECS4150-TM.json - Changed speed format
- All configs - Added switch.system-name with default value
### Migration Guide
For existing configurations:
1. Add `"timezone": "UTC"` to unit section
2. Change speed: `"1000"``"1G"`
3. Add `"system-name": "switch"` to switch section
```
#### Step 6: Update Schema Reference in Code
If schema location or format changed:
```bash
# Update validate-schema.py search paths if needed
vi tests/schema/validate-schema.py
# Update _find_default_schema() method:
# script_dir / "../../config-samples/ucentral.schema.pretty.json",
# script_dir / "../../config-samples/ols.ucentral.schema.json",
```
#### Step 7: Commit Schema Update
```bash
cd /path/to/ols-ucentral-client
# Add updated schema
git add config-samples/ols.ucentral.schema*.json
# Add any fixed configurations
git add config-samples/*.json
# Add documentation
git add src/ucentral-client/SCHEMA_CHANGES.md
# Commit with clear message
git commit -m "Update uCentral schema to v4.2.0
- Updated schema from ols-ucentral-schema v4.2.0
- Fixed 5 test configurations for new requirements
- Added timezone, updated speed enums, added system-name
- See SCHEMA_CHANGES.md for migration guide"
```
### Schema Rollback Procedure
If new schema causes major issues:
```bash
# Revert to previous schema
git checkout HEAD~1 -- config-samples/ols.ucentral.schema*.json
# Or restore from backup
cp config-samples/ucentral.schema.pretty.json.backup \
config-samples/ucentral.schema.pretty.json
# Verify old schema works
make validate-schema
```
---
## Property Database Update Procedures
### When to Update Property Database
Update the property database when:
- New parser functions added to proto.c
- Existing parser functions modified (name change, scope change)
- Properties removed from parser implementation
- Parser refactoring changes function organization
- Adding support for new configuration features
### Property Database Update Process
#### Step 1: Identify Parser Changes
**A. New Feature Development**
If you're actively developing:
```bash
# You know what functions you added
# Example: Added cfg_port_mirroring_parse()
grep -n "cfg_port_mirroring_parse" src/ucentral-client/proto.c
```
**B. Code Update/Merge**
If updating from upstream or merging branches:
```bash
cd /path/to/ols-ucentral-client/src/ucentral-client
# Compare parser functions between versions
git diff HEAD~1 proto.c | grep "^+.*cfg_.*_parse"
# List all current parser functions
grep -n "^static.*cfg_.*_parse\|^cfg_.*_parse" proto.c | awk '{print $3}' | sort
```
**C. Comprehensive Audit**
Periodically audit all functions:
```bash
# Extract all cfg_*_parse functions from proto.c
grep -o "cfg_[a-z_]*_parse" proto.c | sort -u > current-functions.txt
# Extract all parser_function references from property database
grep "parser_function" test-config-parser.c | \
sed 's/.*"\(cfg_[^"]*\)".*/\1/' | sort -u > database-functions.txt
# Find functions in proto.c but NOT in database (missing entries)
comm -23 current-functions.txt database-functions.txt > missing-from-database.txt
# Find functions in database but NOT in proto.c (invalid entries)
comm -13 current-functions.txt database-functions.txt > invalid-in-database.txt
# Review both files
cat missing-from-database.txt
cat invalid-in-database.txt
```
#### Step 2: Remove Invalid Property Entries
For functions that no longer exist:
**A. Identify Properties to Remove**
```bash
# For each invalid function, find its properties
INVALID_FUNC="cfg_old_feature_parse"
grep -n "\"$INVALID_FUNC\"" test-config-parser.c
# This shows line numbers of all property entries using this function
```
**B. Remove Property Entries**
```python
#!/usr/bin/env python3
# remove-properties.py - Helper script to remove property entries
import sys
import re
if len(sys.argv) < 2:
print("Usage: ./remove-properties.py <function_name>")
sys.exit(1)
function_name = sys.argv[1]
with open('test-config-parser.c', 'r') as f:
lines = f.readlines()
# Find and remove entries for this function
output_lines = []
skip_entry = False
brace_count = 0
for line in lines:
# Check if this line starts a property entry with our function
if f'parser_function = "{function_name}"' in line:
# Find the start of this struct (previous { )
# Mark to skip this entire entry
skip_entry = True
# Walk back to find the opening brace
idx = len(output_lines) - 1
while idx >= 0:
if '{' in output_lines[idx]:
output_lines = output_lines[:idx]
break
idx -= 1
continue
if skip_entry:
if '},' in line:
skip_entry = False
continue
output_lines.append(line)
with open('test-config-parser.c', 'w') as f:
f.writelines(output_lines)
print(f"Removed entries for {function_name}")
```
**Usage:**
```bash
cd /path/to/ols-ucentral-client/src/ucentral-client
# Remove entries for obsolete function
python3 remove-properties.py cfg_old_feature_parse
# Verify compilation still works
make clean
make test-config-parser
```
**Manual Removal Alternative:**
```bash
# Edit test-config-parser.c
vi test-config-parser.c
# Search for the function name: /cfg_old_feature_parse
# Delete the entire property entry (from opening { to closing },)
# Example - DELETE THIS ENTIRE BLOCK:
# {
# .path = "some.old.property",
# .parser_function = "cfg_old_feature_parse()",
# .status = PROP_CONFIGURED,
# .notes = "Old feature"
# },
```
#### Step 3: Add New Property Entries
For new parser functions:
**A. Determine What Properties the Function Handles**
```bash
# Read the function to understand what it parses
vi proto.c
# Search for: /cfg_new_feature_parse
# Look for cJSON_GetObjectItem calls to find property names
grep -A 50 "cfg_new_feature_parse" proto.c | grep "cJSON_GetObjectItem"
# Example output:
# cJSON_GetObjectItem(obj, "enabled")
# cJSON_GetObjectItem(obj, "mode")
# cJSON_GetObjectItem(obj, "timeout")
```
**B. Determine Property Paths**
Property paths follow JSON structure:
```
services.new-feature.enabled → "services.new-feature.enabled"
interfaces.ethernet.speed → "interfaces.ethernet.speed"
switch.spanning-tree.enabled → "switch.spanning-tree.enabled"
```
For array items:
```
interfaces.ethernet[].name → "interfaces.ethernet.name"
vlans[].id → "vlans.id"
```
**C. Add Property Entries to Database**
```bash
vi test-config-parser.c
# Find the properties[] array definition
# Add new entries in logical grouping with related properties
# Template:
# {
# .path = "full.property.path",
# .parser_function = "cfg_function_name()",
# .status = PROP_CONFIGURED,
# .notes = "Description of what this property does"
# },
```
**Example Addition:**
```c
static struct property_info properties[] = {
// ... existing entries ...
// Port Mirroring Configuration (NEW)
{
.path = "services.port-mirroring.enabled",
.parser_function = "cfg_port_mirroring_parse()",
.status = PROP_CONFIGURED,
.notes = "Enable/disable port mirroring service"
},
{
.path = "services.port-mirroring.sessions",
.parser_function = "cfg_port_mirroring_parse()",
.status = PROP_CONFIGURED,
.notes = "Array of mirroring session configurations"
},
{
.path = "services.port-mirroring.sessions.id",
.parser_function = "cfg_port_mirroring_parse()",
.status = PROP_CONFIGURED,
.notes = "Session identifier (1-4)"
},
{
.path = "services.port-mirroring.sessions.source-ports",
.parser_function = "cfg_port_mirroring_parse()",
.status = PROP_CONFIGURED,
.notes = "Array of source port names to mirror"
},
{
.path = "services.port-mirroring.sessions.destination-port",
.parser_function = "cfg_port_mirroring_parse()",
.status = PROP_CONFIGURED,
.notes = "Destination port name for mirrored traffic"
},
{
.path = "services.port-mirroring.sessions.direction",
.parser_function = "cfg_port_mirroring_parse()",
.status = PROP_CONFIGURED,
.notes = "Mirror direction: rx, tx, or both"
},
// ... rest of entries ...
};
```
**Guidelines for Property Entries:**
1. **Grouping**: Keep related properties together with a comment header
2. **Ordering**: Follow JSON structure hierarchy (parent before children)
3. **Naming**: Use exact JSON property names (hyphens, not underscores)
4. **Status**: Use PROP_CONFIGURED for actively parsed properties
5. **Notes**: Provide clear, concise description including valid values/ranges
6. **Arrays**: Use singular form without [] in path (e.g., "sessions.id" not "sessions[].id")
#### Step 4: Verify Property Database Accuracy
```bash
cd /path/to/ols-ucentral-client/src/ucentral-client
# Rebuild test suite
make clean
make test-config-parser
# Run tests to see property usage report
make test-config
# Review the [PROPERTY USAGE REPORT] section
# Check for:
# - "Unknown (not in property database)" - missing entries
# - Properties with correct parser_function references
# - Properties marked as CONFIGURED that should be
```
#### Step 5: Test with Configurations Using New Properties
**A. Create Test Configuration**
```bash
cd config-samples
# Create test config demonstrating new feature
cat > test-new-feature.json <<'EOF'
{
"uuid": 1,
"unit": {
"name": "test-new-feature",
"timezone": "UTC"
},
"interfaces": {
"ethernet": [
{"name": "Ethernet0", "enabled": true}
]
},
"services": {
"port-mirroring": {
"enabled": true,
"sessions": [
{
"id": 1,
"source-ports": ["Ethernet0", "Ethernet1"],
"destination-port": "Ethernet10",
"direction": "both"
}
]
}
}
}
EOF
```
**B. Validate Configuration**
```bash
cd /path/to/ols-ucentral-client/src/ucentral-client
# Schema validation
./validate-schema.py ../../config-samples/test-new-feature.json
# Parser test
./test-config-parser ../../config-samples/test-new-feature.json
# Check property report shows properties as CONFIGURED
make test-config | grep -A 5 "port-mirroring"
```
#### Step 6: Document Property Database Changes
Create or update PROPERTY_DATABASE_CHANGES.md:
```markdown
## Property Database Update: 2025-12-12
### Added Properties
- `services.port-mirroring.*` (6 properties)
- Parser: cfg_port_mirroring_parse()
- Feature: Port mirroring/SPAN configuration
- Test config: test-new-feature.json
### Removed Properties
- `services.legacy-feature.*` (4 properties)
- Reason: cfg_legacy_feature_parse() removed in commit abc123
- Migration: Feature deprecated, no replacement
### Modified Properties
- `switch.spanning-tree.mode`
- Changed parser: cfg_stp_parse() → cfg_spanning_tree_parse()
- Reason: Parser function renamed for consistency
```
#### Step 7: Commit Property Database Changes
```bash
cd /path/to/ols-ucentral-client
# Add modified test file
git add tests/config-parser/test-config-parser.c
# Add test configuration if created
git add config-samples/test-new-feature.json
# Add documentation if created
git add tests/PROPERTY_DATABASE_CHANGES.md
# Commit
git commit -m "Update property database for port mirroring feature
- Added 6 property entries for services.port-mirroring
- Properties handled by cfg_port_mirroring_parse()
- Added test-new-feature.json demonstrating configuration
- All tests passing"
```
### Property Database Maintenance Best Practices
1. **Update Immediately**: When adding new parser functions, add property entries immediately
2. **Remove Promptly**: When removing parser functions, clean up property entries in same commit
3. **Test Always**: Run full test suite after any property database changes
4. **Document Changes**: Maintain changelog of database modifications
5. **Review Periodically**: Audit database accuracy quarterly or after major updates
6. **Platform Sync**: If porting to platform repos, document platform-specific additions
---
## Version Synchronization
### Keeping Schema and Property Database in Sync
The schema and property database serve different but complementary purposes:
**Schema** - Defines what's structurally valid
**Property Database** - Tracks what's actually implemented
### Version Compatibility Matrix
Maintain a compatibility matrix:
```markdown
## Version Compatibility
| Client Version | Schema Version | Property Count | Notes |
|----------------|----------------|----------------|-------|
| 1.0.0 | v4.0.0 | 420 | Initial release |
| 1.1.0 | v4.1.0 | 450 | Added STP, IGMP |
| 1.2.0 | v4.1.0 | 465 | Added PoE, 802.1X |
| 2.0.0 | v4.2.0 | 510 | Major feature update |
```
### Update Coordination
When updating both schema and property database:
1. **Schema First**: Update schema, verify existing configs
2. **Implement Features**: Add parser functions for new schema properties
3. **Update Database**: Add property entries for new implementations
4. **Test Thoroughly**: Run complete test suite
5. **Document Together**: Update documentation explaining the changes
### Tracking Implementation Status
Use property reports to track implementation progress:
```bash
# Generate property usage report
make test-config > report.txt
# Count properties by status
grep "Status: CONFIGURED" report.txt | wc -l
grep "Status: Unknown" report.txt | wc -l
# Identify unimplemented schema properties
grep "Unknown (not in property database)" report.txt
```
---
## Testing After Updates
### Complete Test Sequence
After updating schema or property database:
```bash
cd /path/to/ols-ucentral-client
# 1. Clean build
cd src/ucentral-client
make clean
# 2. Rebuild test tools
make test-config-parser
# 3. Validate schema file
python3 -c "import json; json.load(open('../../config-samples/ucentral.schema.pretty.json'))"
# 4. Run schema validation
make validate-schema
# 5. Run parser tests
make test-config
# 6. Run full test suite
make test-config-full
# 7. Review property usage report
make test-config | grep -A 100 "PROPERTY USAGE REPORT"
```
### Validation Checklist
- [ ] Schema file is valid JSON
- [ ] Schema validator loads successfully
- [ ] All positive test configs pass schema validation
- [ ] All negative test configs fail schema validation (expected)
- [ ] All positive test configs pass parser tests
- [ ] Property database has no references to non-existent functions
- [ ] New properties appear in usage report with correct status
- [ ] No unexpected "Unknown" properties for implemented features
- [ ] Test results match expectations (pass/fail counts)
- [ ] No memory leaks (valgrind if available)
### Regression Testing
Keep baseline test results:
```bash
# Save baseline before changes
make test-config-full > test-results-baseline.txt
# After changes, compare
make test-config-full > test-results-new.txt
diff test-results-baseline.txt test-results-new.txt
# Expected differences:
# - New properties in usage report
# - Updated parser function references
# - New test configs results
# Unexpected differences:
# - Previously passing tests now fail
# - Properties changing status unexpectedly
# - Parser errors on existing configs
```
---
## Troubleshooting
### Schema Update Issues
**Issue: Schema validation fails for all configs**
Possible causes:
- Schema file is corrupted or invalid JSON
- Schema path incorrect in validate-schema.py
- Schema format changed (Draft-7 vs Draft-4)
Resolution:
```bash
# Verify schema is valid JSON
python3 -m json.tool config-samples/ucentral.schema.pretty.json > /dev/null
# Check schema path detection
python3 validate-schema.py --schema ../../config-samples/ucentral.schema.pretty.json \
../../config-samples/cfg0.json
# Compare schema $schema property
grep '$schema' config-samples/ucentral.schema.pretty.json
```
**Issue: New schema rejects previously valid configs**
Possible causes:
- Schema added new required fields
- Schema changed property types
- Schema added constraints (min/max, enums)
Resolution:
```bash
# Get detailed error
./validate-schema.py ../../config-samples/failing-config.json
# Compare schemas to find changes
diff old-schema.json new-schema.json
# Update configs to meet new requirements
```
### Property Database Issues
**Issue: Compilation fails after property database update**
Possible causes:
- Syntax error in property entry (missing comma, quote)
- Invalid struct member
- Property array not properly terminated
Resolution:
```bash
# Check compilation error message
make test-config-parser 2>&1 | head -20
# Common fixes:
# - Add missing comma after previous entry
# - Ensure notes string has closing quote
# - Check .path, .parser_function, .status, .notes are all present
```
**Issue: Tests fail after property database update**
Possible causes:
- Removed properties still referenced elsewhere
- Added properties with wrong parser function
- Property paths don't match JSON structure
Resolution:
```bash
# Run specific config test
./test-config-parser ../../config-samples/failing-config.json
# Check property usage report
make test-config | grep -A 10 "property-name"
# Verify parser function exists
grep "function_name" proto.c
```
**Issue: Properties showing as "Unknown" after adding to database**
Possible causes:
- Property path doesn't match JSON exactly
- Parser function name has typo
- Property entry not in properties[] array
Resolution:
```bash
# Check property path in JSON
cat config-samples/test-config.json | jq '.path.to.property'
# Verify parser function name exactly
grep -n "cfg_function_name" proto.c
# Ensure property entry is within properties[] array bounds
# (Check that it's before the closing }; )
```
### General Maintenance Issues
**Issue: Test results inconsistent between runs**
Possible causes:
- Configs modified between runs
- Schema file changed
- Test order dependency (should not happen)
Resolution:
```bash
# Check for uncommitted changes
git status
# Verify schema hasn't changed
git diff config-samples/ucentral.schema.pretty.json
# Run tests multiple times
for i in {1..3}; do make test-config; done
```
**Issue: Docker environment tests fail but local tests pass**
Possible causes:
- Different schema version in container
- Path differences
- Missing dependencies
Resolution:
```bash
# Check schema in container
docker exec ucentral_client_build_env bash -c \
"cat /root/ols-nos/config-samples/ucentral.schema.pretty.json" | head -20
# Verify paths in container
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/src/ucentral-client && ls -la ../../config-samples/*.json"
# Rebuild container if needed
make clean
make build-host-env
```
---
## Quick Reference
### Schema Update Commands
```bash
# Check current version
head -20 config-samples/ucentral.schema.pretty.json | grep version
# Update schema
cp /path/to/new/schema.json config-samples/ucentral.schema.pretty.json
# Validate schema
python3 -m json.tool config-samples/ucentral.schema.pretty.json > /dev/null
# Test with configs
make validate-schema
```
### Property Database Commands
```bash
# List all parser functions
grep -n "^static.*cfg_.*_parse\|^cfg_.*_parse" proto.c | awk '{print $3}' | sort
# Find function in database
grep -n "function_name" test-config-parser.c
# Rebuild and test
make clean && make test-config
# View property report
make test-config | grep -A 100 "PROPERTY USAGE REPORT"
```
### Testing Commands
```bash
# Full test suite
make test-config-full
# Schema only
make validate-schema
# Parser only
make test-config
# Single config
./test-config-parser ../../config-samples/specific-config.json
```
---
## See Also
- **TEST_CONFIG_README.md** - Testing framework documentation
- **SCHEMA_VALIDATOR_README.md** - Schema validator documentation
- **proto.c** - Parser implementation
- **test-config-parser.c** - Property database location
- **ols-ucentral-schema repository** - Official schema source

View File

@@ -1,451 +0,0 @@
# Configuration Testing Framework
## Overview
The OLS uCentral Client includes a comprehensive configuration testing framework that provides two-layer validation of JSON configurations:
1. **Schema Validation** - Structural validation against the uCentral JSON schema
2. **Parser Testing** - Implementation validation of the C parser with property tracking
This framework enables automated testing, continuous integration, and tracking of configuration feature implementation status.
## Documentation Index
This testing framework includes multiple documentation files, each serving a specific purpose:
### Primary Documentation
1. **[TEST_CONFIG_README.md](src/ucentral-client/TEST_CONFIG_README.md)** - Complete testing framework guide
- Overview of two-layer validation approach
- Quick start and running tests
- Property tracking system
- Configuration-specific validators
- Test output interpretation
- CI/CD integration
- **Start here** for understanding the testing framework
2. **[SCHEMA_VALIDATOR_README.md](src/ucentral-client/SCHEMA_VALIDATOR_README.md)** - Schema validator detailed documentation
- Standalone validator usage
- Command-line interface
- Programmatic API
- Porting guide for other repositories
- Common validation errors
- **Start here** for schema validation specifics
3. **[MAINTENANCE.md](src/ucentral-client/MAINTENANCE.md)** - Maintenance procedures guide
- Schema update procedures
- Property database update procedures
- Version synchronization
- Testing after updates
- Troubleshooting common issues
- **Start here** when updating schema or property database
4. **[TEST_CONFIG_PARSER_DESIGN.md](TEST_CONFIG_PARSER_DESIGN.md)** - Test framework architecture
- Multi-layer validation design
- Property metadata system (560+ entries)
- Property inspection engine
- Test execution flow diagrams
- Data structures and algorithms
- Output format implementations
- **Start here** for understanding the test framework internals
### Supporting Documentation
5. **[CLAUDE.md](CLAUDE.md)** - Project overview and AI assistant guidance
- Build system architecture
- Platform abstraction layer
- Testing framework integration
- References to all test-related files
## Quick Reference
### Running Tests
**RECOMMENDED: Run tests inside Docker build environment** to eliminate OS-specific issues (works on macOS, Linux, Windows):
```bash
# Build the Docker environment first (if not already built)
make build-host-env
# Run all tests (schema + parser) - RECOMMENDED
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/src/ucentral-client && make test-config-full"
# Run individual test suites
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/src/ucentral-client && make validate-schema"
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/src/ucentral-client && make test-config"
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/src/ucentral-client && make test"
# Generate test reports
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/src/ucentral-client && make test-config-html"
docker exec ucentral_client_build_env bash -c \
"cd /root/ols-nos/src/ucentral-client && make test-config-json"
# Copy report files out of container to view
docker cp ucentral_client_build_env:/root/ols-nos/src/ucentral-client/test-report.html ./
docker cp ucentral_client_build_env:/root/ols-nos/src/ucentral-client/test-results.json ./
```
**Alternative: Run tests locally** (may have OS-specific dependencies):
```bash
# Navigate to test directory
cd src/ucentral-client
# Run all tests (schema + parser)
make test-config-full
# Run individual test suites
make validate-schema # Schema validation only
make test-config # Parser tests only
make test # Unit tests
# Generate test reports
make test-config-html # HTML report (browser-viewable)
make test-config-json # JSON report (machine-readable)
make test-config-junit # JUnit XML (CI/CD integration)
```
**Note:** Running tests in Docker is the preferred method as it provides a consistent, reproducible environment regardless of your host OS (macOS, Linux, Windows).
### Key Files
**Test Implementation:**
- `tests/config-parser/test-config-parser.c` (3445 lines) - Parser test framework with property tracking
- `tests/config-parser/test-stubs.c` (214 lines) - Platform function stubs for testing
- `tests/schema/validate-schema.py` (305 lines) - Standalone schema validator
- `src/ucentral-client/include/config-parser.h` - Test header exposing cfg_parse()
**Configuration Files:**
- `config-samples/ucentral.schema.pretty.json` - uCentral JSON schema (human-readable)
- `config-samples/ols.ucentral.schema.json` - uCentral JSON schema (compact)
- `config-samples/*.json` - Test configuration files (37+ configs)
- `config-samples/*invalid*.json` - Negative test cases
**Build System:**
- `src/ucentral-client/Makefile` - Test targets and build rules
**Production Code (Minimal Changes):**
- `src/ucentral-client/proto.c` - Added TEST_STATIC macro (2 lines changed)
- `src/ucentral-client/include/router-utils.h` - Added extern declarations (minor change)
## Features
### Schema Validation
- Validates JSON structure against official uCentral schema
- Checks property types, required fields, constraints
- Standalone tool, no dependencies on C code
- Exit codes for CI/CD integration
### Parser Testing
- Tests actual C parser implementation
- Multiple output formats (human-readable, HTML, JSON, JUnit XML)
- Interactive HTML reports with detailed analysis
- Machine-readable JSON for automation
- JUnit XML for CI/CD integration
- Validates configuration processing and struct population
- Configuration-specific validators for business logic
- Memory leak detection
- Hardware constraint validation
### Property Tracking System
- Database of 450+ properties and their processing status
- Tracks which properties are parsed by which functions
- Identifies unimplemented features
- Status classification: CONFIGURED, IGNORED, SYSTEM, INVALID, Unknown
- Property usage reports across all test configurations
### Two-Layer Validation Strategy
**Why Both Layers?**
Each layer catches different types of errors:
- **Schema catches**: Type mismatches, missing required fields, constraint violations
- **Parser catches**: Implementation bugs, hardware limits, cross-field dependencies
- **Property tracking catches**: Missing implementations, platform-specific features
See TEST_CONFIG_README.md section "Two-Layer Validation Strategy" for detailed explanation.
## Test Coverage
Current test suite includes:
- 37+ configuration files covering various features
- Positive tests (configs that should parse successfully)
- Negative tests (configs that should fail)
- Feature-specific validators for critical configurations
- Platform stub with 54-port simulation (matches ECS4150 hardware)
### Tested Features
- Port configuration (enable/disable, speed, duplex)
- VLAN configuration and membership
- Spanning Tree Protocol (STP, RSTP, PVST, RPVST)
- IGMP Snooping
- Power over Ethernet (PoE)
- IEEE 802.1X Authentication
- DHCP Relay
- Static routing
- System configuration (timezone, hostname, etc.)
### Platform-Specific Features (Schema-Valid, Platform Implementation Required)
- LLDP (Link Layer Discovery Protocol)
- LACP (Link Aggregation Control Protocol)
- ACLs (Access Control Lists)
- DHCP Snooping
- Loop Detection
- Port Mirroring
- Voice VLAN
These features pass schema validation but show as "Unknown" in property reports, indicating they require platform-specific implementation.
## Changes from Base Repository
The testing framework was added with minimal impact to production code:
### New Files Added
1. `tests/config-parser/test-config-parser.c` - Complete test framework (3445 lines)
2. `tests/config-parser/test-stubs.c` - Platform stubs (214 lines)
3. `tests/schema/validate-schema.py` - Schema validator (305 lines)
4. `src/ucentral-client/include/config-parser.h` - Test header
5. `tests/config-parser/TEST_CONFIG_README.md` - Framework documentation
6. `tests/schema/SCHEMA_VALIDATOR_README.md` - Validator documentation
7. `tests/MAINTENANCE.md` - Maintenance procedures
8. `tests/config-parser/Makefile` - Test build system
9. `tests/tools/` - Property database generation tools
8. `TESTING_FRAMEWORK.md` - This file (documentation index)
9. `TEST_CONFIG_PARSER_DESIGN.md` - Test framework architecture and design
### Modified Files
1. `src/ucentral-client/proto.c` - Added TEST_STATIC macro pattern (2 lines)
```c
// Changed from:
static struct plat_cfg *cfg_parse(...)
// Changed to:
#ifdef UCENTRAL_TESTING
#define TEST_STATIC
#else
#define TEST_STATIC static
#endif
TEST_STATIC struct plat_cfg *cfg_parse(...)
```
This allows test code to call cfg_parse() while keeping it static in production builds.
2. `src/ucentral-client/include/router-utils.h` - Added extern declarations
- Exposed necessary functions for test stubs
3. `src/ucentral-client/Makefile` - Added test targets
```makefile
test-config-parser: # Build parser test tool
test-config: # Run parser tests
validate-schema: # Run schema validation
test-config-full: # Run both schema + parser tests
```
### Configuration Files
- Added `config-samples/cfg_invalid_*.json` - Negative test cases
- Added `config-samples/ECS4150_*.json` - Feature-specific test configs
- No changes to existing valid configurations
### Zero Impact on Production
- Production builds: No functional changes, cfg_parse() remains static
- Test builds: cfg_parse() becomes visible with -DUCENTRAL_TESTING flag
- No ABI changes, no performance impact
- No runtime dependencies added
## Integration with Development Workflow
### During Development
```bash
# 1. Make code changes to proto.c
vi src/ucentral-client/proto.c
# 2. Run tests
cd src/ucentral-client
make test-config-full
# 3. Review property tracking report
# Check for unimplemented features or errors
# 4. If adding new parser function, update property database
vi test-config-parser.c
# Add property entries for new function
# 5. Create test configuration
vi ../../config-samples/test-new-feature.json
# 6. Retest
make test-config-full
```
### Before Committing
```bash
# Ensure all tests pass
cd src/ucentral-client
make clean
make test-config-full
# Check for property database accuracy
make test-config | grep -A 50 "PROPERTY USAGE REPORT"
# Look for unexpected "Unknown" properties
```
### In CI/CD Pipeline
```yaml
test-configurations:
stage: test
script:
- make build-host-env
- docker exec ucentral_client_build_env bash -c
"cd /root/ols-nos/src/ucentral-client && make test-config-full"
artifacts:
paths:
- src/ucentral-client/test-results.txt
```
## Property Database Management
The property database is a critical component tracking which JSON properties are parsed by which functions.
### Database Structure
```c
static struct property_info properties[] = {
{
.path = "interfaces.ethernet.enabled",
.parser_function = "cfg_ethernet_parse()",
.status = PROP_CONFIGURED,
.notes = "Enable/disable ethernet interface"
},
// ... 450+ more entries ...
};
```
### Key Rules
1. **Only track properties for functions that exist in this repository's proto.c**
2. **Remove entries when parser functions are removed**
3. **Add entries immediately when adding new parser functions**
4. **Use accurate function names** - different platforms may use different names
5. **Properties not in database show as "Unknown"** - this is correct for platform-specific features
See MAINTENANCE.md for complete property database update procedures.
## Schema Management
The schema file defines what configurations are structurally valid.
### Schema Location
- `config-samples/ucentral.schema.pretty.json` - Human-readable version (recommended)
- `config-samples/ols.ucentral.schema.json` - Compact version
### Schema Source
Schema is maintained in the external [ols-ucentral-schema](https://github.com/Telecominfraproject/ols-ucentral-schema) repository.
### Schema Updates
When ols-ucentral-schema releases a new version:
1. Copy new schema to config-samples/
2. Run schema validation on all test configs
3. Fix any configs that fail new requirements
4. Document breaking changes
5. Update property database if new properties are implemented
See MAINTENANCE.md section "Schema Update Procedures" for complete process.
## Platform-Specific Repositories
This is the **base repository** providing the core framework. Platform-specific repositories (like Edgecore EC platform) can:
1. **Fork the test framework** - Copy test files to their repository
2. **Extend property database** - Add entries for platform-specific parser functions
3. **Add platform configs** - Create configs testing platform features
4. **Maintain separate tracking** - Properties "Unknown" in base become "CONFIGURED" in platform
### Example: LLDP Property Status
**In base repository (this repo):**
```
Property: interfaces.ethernet.lldp
Status: Unknown (not in property database)
Note: May require platform-specific implementation
```
**In Edgecore EC platform repository:**
```
Property: interfaces.ethernet.lldp
Parser: cfg_ethernet_lldp_parse()
Status: CONFIGURED
Note: Per-interface LLDP transmit/receive configuration
```
Each platform tracks only the properties it actually implements.
## Troubleshooting
### Common Issues
**Tests fail in Docker but pass locally:**
- Check schema file exists in container
- Verify paths are correct in container environment
- Rebuild container: `make build-host-env`
**Property shows as "Unknown" when it should be CONFIGURED:**
- Verify parser function exists: `grep "function_name" proto.c`
- Check property path matches JSON exactly
- Ensure property entry is in properties[] array
**Schema validation fails for valid config:**
- Schema may be outdated - check version
- Config may use vendor extensions not in base schema
- Validate against specific schema: `./validate-schema.py config.json --schema /path/to/schema.json`
See MAINTENANCE.md "Troubleshooting" section for complete troubleshooting guide.
## Documentation Maintenance
When updating the testing framework:
1. **Update relevant documentation:**
- New features → TEST_CONFIG_README.md
- Schema changes → MAINTENANCE.md + SCHEMA_VALIDATOR_README.md
- Property database changes → MAINTENANCE.md + TEST_CONFIG_README.md
- Build changes → CLAUDE.md
2. **Keep version information current:**
- Update compatibility matrices
- Document breaking changes
- Maintain changelogs
3. **Update examples:**
- Refresh command output examples
- Update property counts
- Keep test results current
## Contributing
When contributing to the testing framework:
1. **Maintain property database accuracy** - Update when changing parser functions
2. **Add test configurations** - Create configs demonstrating new features
3. **Update documentation** - Keep docs synchronized with code changes
4. **Follow conventions** - Use established patterns for validators and property entries
5. **Test thoroughly** - Run full test suite before committing
## License
BSD-3-Clause (same as parent project)
## See Also
- **TEST_CONFIG_README.md** - Complete testing framework guide
- **TEST_CONFIG_PARSER_DESIGN.md** - Test framework architecture and design
- **SCHEMA_VALIDATOR_README.md** - Schema validator documentation
- **MAINTENANCE.md** - Update procedures and troubleshooting
- **CLAUDE.md** - Project overview and build system
- **ols-ucentral-schema repository** - Official schema source

View File

@@ -1,97 +0,0 @@
# Configuration Parser Test Suite Makefile
# This Makefile builds and runs tests for the uCentral configuration parser
.PHONY: test test-config validate-schema test-config-full test-config-html test-config-json test-config-junit clean
# Compiler flags
export CFLAGS+= -Werror -Wall -Wextra
# Paths relative to tests/config-parser/
SRC_DIR = ../../src/ucentral-client
CONFIG_SAMPLES = ../../config-samples
SCHEMA_VALIDATOR = ../schema/validate-schema.py
# Source files from production code
SRC_OBJS = $(SRC_DIR)/ucentral-json-parser.o \
$(SRC_DIR)/ucentral-log.o \
$(SRC_DIR)/router-utils.o \
$(SRC_DIR)/base64.o
# Default target
all: test-config-parser
# Build test files with TEST_STATIC to expose cfg_parse() for testing
test-config-parser.o: test-config-parser.c
gcc -c -o $@ ${CFLAGS} -DUCENTRAL_TESTING -I $(SRC_DIR) -I $(SRC_DIR)/include -I ./ $<
proto-test.o: $(SRC_DIR)/proto.c
gcc -c -o $@ ${CFLAGS} -DUCENTRAL_TESTING -I $(SRC_DIR) -I $(SRC_DIR)/include $<
test-stubs.o: test-stubs.c
gcc -c -o $@ ${CFLAGS} -DUCENTRAL_TESTING -I $(SRC_DIR) -I $(SRC_DIR)/include $<
# Build production source files (if not already built)
$(SRC_DIR)/%.o: $(SRC_DIR)/%.c
$(MAKE) -C $(SRC_DIR) $*.o
# Link test binary
test-config-parser: test-config-parser.o proto-test.o test-stubs.o $(SRC_OBJS)
g++ -o $@ $^ -lcurl -lwebsockets -lcjson -lssl -lcrypto -lpthread -ljsoncpp -lresolv
# Run configuration parser tests
test-config:
@echo "========= running config parser tests ========="
$(MAKE) test-config-parser
LD_LIBRARY_PATH=/usr/local/lib ./test-config-parser $(CONFIG_SAMPLES)
@echo "========= config parser tests completed ========"
# Run schema validation
validate-schema:
@echo "========= running schema validation ========="
@python3 $(SCHEMA_VALIDATOR) $(CONFIG_SAMPLES) || true
@echo "========= schema validation completed ========"
# Combined validation: schema + parser
test-config-full: validate-schema test-config
@echo "========= all validation completed ========"
# Generate HTML test report
test-config-html:
@echo "========= generating HTML test report ========="
$(MAKE) test-config-parser
LD_LIBRARY_PATH=/usr/local/lib ./test-config-parser --html $(CONFIG_SAMPLES) > test-report.html
@echo "========= HTML report generated: test-report.html ========"
# Generate JSON test report
test-config-json:
@echo "========= generating JSON test report ========="
$(MAKE) test-config-parser
LD_LIBRARY_PATH=/usr/local/lib ./test-config-parser --json $(CONFIG_SAMPLES) > test-report.json
@echo "========= JSON report generated: test-report.json ========"
# Generate JUnit XML test report
test-config-junit:
@echo "========= generating JUnit XML test report ========="
$(MAKE) test-config-parser
LD_LIBRARY_PATH=/usr/local/lib ./test-config-parser --junit $(CONFIG_SAMPLES) > test-report.xml
@echo "========= JUnit XML report generated: test-report.xml ========"
# Clean test artifacts
clean:
rm -f test-config-parser proto-test.o test-config-parser.o test-stubs.o
rm -f test-report.html test-report.json test-report.xml test-results.txt
@echo "Test artifacts cleaned"
# Help target
help:
@echo "Configuration Parser Test Suite"
@echo ""
@echo "Available targets:"
@echo " test-config - Run configuration parser tests"
@echo " validate-schema - Run schema validation only"
@echo " test-config-full - Run both schema validation and parser tests"
@echo " test-config-html - Generate HTML test report"
@echo " test-config-json - Generate JSON test report"
@echo " test-config-junit - Generate JUnit XML test report"
@echo " clean - Remove test artifacts"
@echo " help - Show this help message"

File diff suppressed because it is too large Load Diff

View File

@@ -1,28 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
#ifndef CONFIG_PARSER_H
#define CONFIG_PARSER_H
#include <cjson/cJSON.h>
#include <ucentral-platform.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* cfg_parse - Parse a JSON configuration object into a platform configuration
* @config: cJSON object containing the configuration
*
* Parses the uCentral configuration JSON and populates a struct plat_cfg
* with the parsed values. The caller is responsible for freeing the returned
* structure using plat_config_destroy() and free().
*
* Returns: Pointer to allocated plat_cfg on success, NULL on failure
*/
struct plat_cfg *cfg_parse(cJSON *config);
#ifdef __cplusplus
}
#endif
#endif /* CONFIG_PARSER_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,213 +0,0 @@
/* SPDX-License-Identifier: BSD-3-Clause */
/*
* Test Stubs for Configuration Parser Tests
*
* Provides stub/mock implementations of global variables and functions
* that proto.c references but are not needed for testing cfg_parse()
*/
#include <time.h>
#include <string.h>
#include "ucentral.h"
/*
* Minimal stub definition for struct blob
* proto.c uses this type in many functions, but these functions are not
* actually called during cfg_parse() testing. We just need the type to exist
* so proto.c can compile.
*/
struct blob {
char *data;
size_t len;
};
/* Stub global variables needed by proto.c */
struct client_config client = {
.redirector_file = "/tmp/test",
.redirector_file_dbg = "/tmp/test",
.ols_client_version_file = "/tmp/test",
.ols_schema_version_file = "/tmp/test",
.server = "test.example.com",
.port = 443,
.path = "/",
.serial = "TEST123456",
.CN = "test",
.firmware = "1.0.0",
.devid = "00000000-0000-0000-0000-000000000000",
.selfsigned = 0,
.debug = 0
};
time_t conn_time = 0;
struct plat_metrics_cfg ucentral_metrics = {0};
/* Stub platform functions that cfg_parse() needs */
/*
* Return a dummy port count for testing
*
* RATIONALE: Configuration parser tests need to simulate a real hardware platform
* to properly validate port selection and configuration. Originally this returned
* 100 ports, but that didn't match actual ECS hardware models.
*
* FIX: Changed to return 54 ports (Ethernet0-Ethernet53) to accurately simulate
* the ECS4150-54P (54-port PoE) and ECS4150-54T (54-port non-PoE) switch models.
* This ensures test configurations using port ranges or wildcards are validated
* against realistic hardware constraints.
*
* NOTE: This affects all test configs that use "Ethernet*" wildcard or specify
* individual ports - they will now be validated against 0-53 port range.
*/
int plat_port_num_get(uint16_t *num_of_active_ports)
{
/* Return 54 ports (Ethernet0-Ethernet53) to match ECS4150-54P/54T hardware */
*num_of_active_ports = 54;
return 0; /* Success */
}
/* Fill in dummy port list for testing */
int plat_port_list_get(uint16_t list_size, struct plat_ports_list *ports)
{
struct plat_ports_list *port = ports;
uint16_t i;
/* Fill in port names like Ethernet0, Ethernet1, etc. */
for (i = 0; i < list_size && port; i++) {
snprintf(port->name, PORT_MAX_NAME_LEN, "Ethernet%u", i);
port = port->next;
}
return 0; /* Success */
}
/* Stub function for destroying platform config - no-op in tests */
void plat_config_destroy(struct plat_cfg *cfg)
{
(void)cfg; /* Unused in test - just a no-op */
}
/* Additional platform function stubs needed by proto.c */
int plat_saved_config_id_get(uint64_t *id)
{
*id = 0;
return 0;
}
int plat_info_get(struct plat_platform_info *info)
{
strncpy(info->platform, "test", sizeof(info->platform) - 1);
strncpy(info->hwsku, "test", sizeof(info->hwsku) - 1);
strncpy(info->mac, "00:00:00:00:00:00", sizeof(info->mac) - 1);
return 0;
}
int plat_metrics_restore(struct plat_metrics_cfg *cfg)
{
(void)cfg;
return 0;
}
void plat_state_poll_stop(void) {}
void plat_health_poll_stop(void) {}
void plat_telemetry_poll_stop(void) {}
void plat_upgrade_poll_stop(void) {}
void plat_state_poll(void (*cb)(struct plat_state_info *), int period_sec)
{
(void)cb;
(void)period_sec;
}
void plat_health_poll(void (*cb)(struct plat_health_info *), int period_sec)
{
(void)cb;
(void)period_sec;
}
void plat_telemetry_poll(void (*cb)(struct plat_state_info *), int period_sec)
{
(void)cb;
(void)period_sec;
}
void plat_log_flush(void) {}
int plat_config_apply(struct plat_cfg *cfg, uint32_t id)
{
(void)cfg;
(void)id;
return 0;
}
int plat_config_restore(void)
{
return 0;
}
int plat_config_save(uint64_t id)
{
(void)id;
return 0;
}
int plat_metrics_save(const struct plat_metrics_cfg *cfg)
{
(void)cfg;
return 0;
}
char *plat_log_pop_concatenate(void)
{
return NULL;
}
int plat_reboot(void)
{
return 0;
}
int plat_factory_default(void)
{
return 0;
}
int plat_rtty(struct plat_rtty_cfg *rtty_cfg)
{
(void)rtty_cfg;
return 0;
}
int plat_upgrade(char *uri, char *signature)
{
(void)uri;
(void)signature;
return 0;
}
void plat_upgrade_poll(int (*cb)(struct plat_upgrade_info *), int period_sec)
{
(void)cb;
(void)period_sec;
}
int plat_run_script(struct plat_run_script *script)
{
(void)script;
return 0;
}
int plat_reboot_cause_get(struct plat_reboot_cause *cause)
{
cause->cause = PLAT_REBOOT_CAUSE_UNAVAILABLE;
cause->ts = 0;
strncpy(cause->desc, "test", sizeof(cause->desc) - 1);
return 0;
}
int plat_event_subscribe(const struct plat_event_callbacks *cbs)
{
(void)cbs;
return 0;
}
void plat_event_unsubscribe(void) {}

View File

@@ -1,205 +0,0 @@
# uCentral Schema Validator
A modular, portable tool for validating JSON configuration files against the uCentral schema.
## Features
- **Standalone Operation**: Works independently without external dependencies beyond Python 3 + jsonschema
- **Modular Design**: Easy to port to other repositories (EC, etc.)
- **Multiple Output Formats**: Human-readable and machine-readable JSON
- **Directory Validation**: Validate entire directories of configs at once
- **CI/CD Ready**: Exit codes suitable for automated testing
- **Schema Auto-Detection**: Automatically finds schema in common locations
## Installation
The validator requires Python 3 and the `jsonschema` module:
```bash
# In Docker build environment (already installed)
pip3 install jsonschema
# On host system
pip3 install jsonschema
```
## Usage
### Basic Usage
```bash
# Validate a single file
python3 validate-schema.py config.json
# Validate all configs in a directory
python3 validate-schema.py ../../config-samples/
# Specify custom schema
python3 validate-schema.py config.json --schema path/to/schema.json
```
### Output Formats
```bash
# Human-readable output (default)
python3 validate-schema.py config.json
# Machine-readable JSON output
python3 validate-schema.py config.json --format json > report.json
```
### Via Makefile
```bash
# Schema validation only
make validate-schema
# Configuration parser tests only
make test-config
# Both schema validation + parser tests
make test-config-full
```
## Exit Codes
- `0`: All configurations are valid
- `1`: One or more configurations failed validation
- `2`: File/schema errors (file not found, invalid schema, etc.)
## Output Examples
### Valid Configuration
```
✓ Valid: cfg0.json
```
### Invalid Configuration
```
✗ Invalid: bad-config.json
Found 2 validation error(s):
Error 1:
Path: $.ethernet
Message: {'speed': 1000} is not of type 'array'
Validator: type
Error 2:
Path: $.interfaces[0].vlan.id
Message: 5000 is greater than the maximum of 4094
Validator: maximum
```
### Directory Summary
```
Summary: 37 file(s) checked, 34 valid, 3 invalid
```
## Integration with test-config-parser.c
The test-config-parser.c tool automatically calls the schema validator before running parser tests. This provides two-layer validation:
1. **Layer 1 (Schema)**: Structural validation - is the JSON valid per schema?
2. **Layer 2 (Parser)**: Implementation validation - can proto.c process it?
## Porting to Other Repositories
The validator is designed to be repository-agnostic. To port to another repository:
1. Copy `validate-schema.py` to the target repository
2. Ensure the schema file is in one of the search paths, or specify with `--schema`
3. Update Makefile targets if desired
### Default Schema Search Paths
Relative to the validator script location:
- `../../config-samples/ucentral.schema.pretty.json`
- `../../config-samples/ols.ucentral.schema.json`
- `../../../config-samples/ucentral.schema.pretty.json`
- `./ols.ucentral.schema.json`
### Example: Porting to EC Repository
```bash
# Copy validator
cp validate-schema.py /path/to/ec-repo/src/ucentral-client/
# Use with EC's schema location
cd /path/to/ec-repo/src/ucentral-client
python3 validate-schema.py --schema ../../config-tests/schema.json ../../config-tests/
```
## Python API
The `SchemaValidator` class can be imported and used programmatically:
```python
from validate_schema import SchemaValidator
# Initialize validator
validator = SchemaValidator(schema_path="/path/to/schema.json")
# Validate a file
is_valid, errors = validator.validate_file("config.json")
# Validate a config dict
config = {"uuid": 123, "ethernet": []}
is_valid, errors = validator.validate_config(config)
# Validate directory
results = validator.validate_directory("/path/to/configs")
```
## Common Validation Errors
### Type Errors
```
$.ethernet is not of type 'array'
```
**Fix**: Ensure `ethernet` is an array: `"ethernet": [...]`
### Out of Range
```
$.interfaces[0].vlan.id is greater than the maximum of 4094
```
**Fix**: VLAN IDs must be between 1-4094
### Required Property Missing
```
'uuid' is a required property
```
**Fix**: Add the required field: `"uuid": 1234567890`
### Additional Properties Not Allowed
```
Additional properties are not allowed ('unknown_field' was unexpected)
```
**Fix**: Remove the field or check spelling
## Files
- **validate-schema.py**: Standalone schema validator script (305 lines)
- **Makefile**: Build targets for schema validation
- **test-config-parser.c**: Enhanced with schema validation integration
- **SCHEMA_VALIDATOR_README.md**: This documentation
## See Also
- [TEST_CONFIG_README.md](TEST_CONFIG_README.md) - Configuration parser testing guide
- [ucentral.schema.pretty.json](../../config-samples/ucentral.schema.pretty.json) - Official uCentral schema
## License
BSD-3-Clause

View File

@@ -1,275 +0,0 @@
#!/usr/bin/env python3
"""
uCentral Configuration Schema Validator
A modular, standalone tool for validating JSON configuration files against
the uCentral schema. Can be used independently or integrated into test suites.
Usage:
# Validate a single file
./validate-schema.py config.json
# Validate with specific schema
./validate-schema.py config.json --schema path/to/schema.json
# Validate directory of configs
./validate-schema.py config-dir/
# Machine-readable JSON output
./validate-schema.py config.json --format json
# Exit code: 0 = all valid, 1 = validation errors, 2 = file/schema errors
Author: Generated for OLS uCentral Client
License: BSD-3-Clause
"""
import sys
import json
import argparse
import os
from pathlib import Path
from typing import Dict, List, Tuple, Optional
try:
import jsonschema
from jsonschema import Draft7Validator, validators
except ImportError:
print("ERROR: jsonschema module not found. Install with: pip3 install jsonschema", file=sys.stderr)
sys.exit(2)
class SchemaValidator:
"""
Modular schema validator for uCentral configurations.
This class is designed to be easily portable across repositories.
It has no dependencies on specific file paths or repository structure.
"""
def __init__(self, schema_path: Optional[str] = None):
"""
Initialize validator with schema.
Args:
schema_path: Path to JSON schema file. If None, attempts to find
schema in common locations relative to this script.
"""
self.schema_path = schema_path
self.schema = None
self.validator = None
self._load_schema()
def _find_default_schema(self) -> Optional[str]:
"""Find schema in common locations relative to script."""
script_dir = Path(__file__).parent
# Search paths (relative to script location)
search_paths = [
script_dir / "../../config-samples/ucentral.schema.pretty.json",
script_dir / "../../config-samples/ucentral.schema.json",
script_dir / "../../../config-samples/ucentral.schema.pretty.json",
script_dir / "ucentral.schema.json",
]
for path in search_paths:
if path.exists():
return str(path.resolve())
return None
def _load_schema(self):
"""Load and parse the JSON schema."""
if self.schema_path is None:
self.schema_path = self._find_default_schema()
if self.schema_path is None:
raise FileNotFoundError(
"Could not find schema file. Please specify --schema path"
)
try:
with open(self.schema_path, 'r') as f:
self.schema = json.load(f)
except json.JSONDecodeError as e:
raise ValueError(f"Invalid JSON in schema file {self.schema_path}: {e}")
except FileNotFoundError:
raise FileNotFoundError(f"Schema file not found: {self.schema_path}")
# Create validator
self.validator = Draft7Validator(self.schema)
def validate_file(self, config_path: str) -> Tuple[bool, List[Dict]]:
"""Validate a single configuration file against the schema."""
try:
with open(config_path, 'r') as f:
config = json.load(f)
except json.JSONDecodeError as e:
return False, [{
'path': '$',
'message': f'Invalid JSON: {e}',
'validator': 'json_parse'
}]
except FileNotFoundError:
return False, [{
'path': '$',
'message': f'File not found: {config_path}',
'validator': 'file_access'
}]
return self.validate_config(config)
def validate_config(self, config: Dict) -> Tuple[bool, List[Dict]]:
"""Validate a configuration object against the schema."""
errors = []
for error in sorted(self.validator.iter_errors(config), key=str):
# Build JSON path
path = '$.' + '.'.join(str(p) for p in error.absolute_path) if error.absolute_path else '$'
errors.append({
'path': path,
'message': error.message,
'validator': error.validator,
'schema_path': '.'.join(str(p) for p in error.absolute_schema_path) if error.absolute_schema_path else '$'
})
return len(errors) == 0, errors
def validate_directory(self, dir_path: str, pattern: str = "*.json") -> Dict[str, Tuple[bool, List[Dict]]]:
"""Validate all JSON files in a directory."""
results = {}
dir_path_obj = Path(dir_path)
if not dir_path_obj.is_dir():
raise NotADirectoryError(f"Not a directory: {dir_path}")
# Find all matching files
for file_path in sorted(dir_path_obj.glob(pattern)):
# Skip schema files
if 'schema' in file_path.name.lower():
continue
results[file_path.name] = self.validate_file(str(file_path))
return results
def format_human_output(filename: str, is_valid: bool, errors: List[Dict]) -> str:
"""Format validation results in human-readable format."""
output = []
if is_valid:
output.append(f"✓ Valid: {filename}")
else:
output.append(f"✗ Invalid: {filename}")
output.append(f" Found {len(errors)} validation error(s):")
for i, error in enumerate(errors, 1):
output.append(f"\n Error {i}:")
output.append(f" Path: {error['path']}")
output.append(f" Message: {error['message']}")
if error.get('validator'):
output.append(f" Validator: {error['validator']}")
return '\n'.join(output)
def format_json_output(results: Dict[str, Tuple[bool, List[Dict]]]) -> str:
"""Format validation results as JSON."""
output = {
'summary': {
'total': len(results),
'valid': sum(1 for is_valid, _ in results.values() if is_valid),
'invalid': sum(1 for is_valid, _ in results.values() if not is_valid)
},
'results': {}
}
for filename, (is_valid, errors) in results.items():
output['results'][filename] = {
'valid': is_valid,
'errors': errors
}
return json.dumps(output, indent=2)
def main():
"""Main entry point for standalone usage."""
parser = argparse.ArgumentParser(
description='Validate uCentral JSON configurations against schema',
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
%(prog)s config.json
%(prog)s config.json --schema my-schema.json
%(prog)s config-samples/
%(prog)s config-samples/ --format json > report.json
"""
)
parser.add_argument('path',
help='Configuration file or directory to validate')
parser.add_argument('--schema', '-s',
help='Path to JSON schema file (auto-detected if not specified)')
parser.add_argument('--format', '-f',
choices=['human', 'json'],
default='human',
help='Output format (default: human)')
parser.add_argument('--pattern', '-p',
default='*.json',
help='File pattern for directory validation (default: *.json)')
args = parser.parse_args()
# Initialize validator
try:
validator = SchemaValidator(args.schema)
except (FileNotFoundError, ValueError) as e:
print(f"ERROR: {e}", file=sys.stderr)
return 2
# Determine if path is file or directory
path_obj = Path(args.path)
if not path_obj.exists():
print(f"ERROR: Path not found: {args.path}", file=sys.stderr)
return 2
# Validate
results = {}
if path_obj.is_file():
is_valid, errors = validator.validate_file(args.path)
results[path_obj.name] = (is_valid, errors)
elif path_obj.is_dir():
try:
results = validator.validate_directory(args.path, args.pattern)
except NotADirectoryError as e:
print(f"ERROR: {e}", file=sys.stderr)
return 2
else:
print(f"ERROR: Path is neither file nor directory: {args.path}", file=sys.stderr)
return 2
# Format and output results
if args.format == 'json':
print(format_json_output(results))
else:
for filename, (is_valid, errors) in results.items():
print(format_human_output(filename, is_valid, errors))
print() # Blank line between files
# Summary for multiple files
if len(results) > 1:
valid_count = sum(1 for is_valid, _ in results.values() if is_valid)
invalid_count = len(results) - valid_count
print(f"Summary: {len(results)} file(s) checked, {valid_count} valid, {invalid_count} invalid")
# Exit code: 0 if all valid, 1 if any invalid
all_valid = all(is_valid for is_valid, _ in results.values())
return 0 if all_valid else 1
if __name__ == '__main__':
sys.exit(main())

Some files were not shown because too many files have changed in this diff Show More