mirror of
https://github.com/Telecominfraproject/ols-ucentral-client.git
synced 2026-01-27 02:21:38 +00:00
Merge pull request #24 from Telecominfraproject/OLS-915-schema-verification-enhancement
[OLS-915] Schema Verification Enhancement Enhanced schema verification, added 'make clean' before each test run to force correct binary rebuild when switching between stub/platform modes
This commit is contained in:
@@ -3,14 +3,14 @@
|
||||
## TL;DR
|
||||
|
||||
```bash
|
||||
# Test all configs with human-readable output
|
||||
# Test all configs with human-readable output (default)
|
||||
./run-config-tests.sh
|
||||
|
||||
# Generate HTML report
|
||||
./run-config-tests.sh html
|
||||
./run-config-tests.sh --format html
|
||||
|
||||
# Test single config
|
||||
./run-config-tests.sh human ECS4150-TM.json
|
||||
# Test single config with HTML output
|
||||
./run-config-tests.sh --format html cfg0.json
|
||||
|
||||
# Results are in: output/
|
||||
```
|
||||
@@ -20,17 +20,27 @@
|
||||
### Test All Configurations
|
||||
|
||||
```bash
|
||||
./run-config-tests.sh human # Console output with colors
|
||||
./run-config-tests.sh html # Interactive HTML report
|
||||
./run-config-tests.sh json # Machine-readable JSON
|
||||
# Stub mode (default - fast, proto.c parsing only)
|
||||
./run-config-tests.sh # Console output with colors (default)
|
||||
./run-config-tests.sh --format html # Interactive HTML report
|
||||
./run-config-tests.sh --format json # Machine-readable JSON
|
||||
|
||||
# Platform mode (integration testing with platform code)
|
||||
./run-config-tests.sh --mode platform # Console output
|
||||
./run-config-tests.sh --mode platform --format html # HTML report
|
||||
```
|
||||
|
||||
### Test Single Configuration
|
||||
|
||||
```bash
|
||||
./run-config-tests.sh human cfg0.json
|
||||
./run-config-tests.sh html ECS4150-ACL.json
|
||||
./run-config-tests.sh json ECS4150-TM.json
|
||||
# Stub mode (default)
|
||||
./run-config-tests.sh cfg0.json # Human output (default)
|
||||
./run-config-tests.sh --format html cfg0.json # HTML report
|
||||
./run-config-tests.sh --format json cfg0.json # JSON output
|
||||
|
||||
# Platform mode
|
||||
./run-config-tests.sh --mode platform cfg0.json # Human output
|
||||
./run-config-tests.sh --mode platform --format html cfg0.json # HTML report
|
||||
```
|
||||
|
||||
### View Results
|
||||
@@ -106,7 +116,7 @@ ls config-samples/*.json
|
||||
### Example Pipeline
|
||||
```yaml
|
||||
- name: Run tests
|
||||
run: ./run-config-tests.sh json
|
||||
run: ./run-config-tests.sh --format json
|
||||
- name: Check results
|
||||
run: |
|
||||
if [ $? -eq 0 ]; then
|
||||
@@ -141,14 +151,34 @@ ECS4150_VLAN.json # VLAN configuration
|
||||
✅ Feature coverage (implemented vs documented features)
|
||||
✅ Error handling (invalid configs, missing fields)
|
||||
|
||||
## Test Modes
|
||||
|
||||
### Stub Mode (Default - Fast)
|
||||
- Tests proto.c parsing only
|
||||
- Uses simple platform stubs
|
||||
- Shows base properties only
|
||||
- Execution time: ~30 seconds
|
||||
- Use for: Quick validation, CI/CD
|
||||
|
||||
### Platform Mode (Integration)
|
||||
- Tests proto.c + platform code (plat-gnma.c)
|
||||
- Uses platform implementation with mocks
|
||||
- Shows base AND platform properties
|
||||
- Tracks hardware application functions
|
||||
- Execution time: ~45 seconds
|
||||
- Use for: Platform-specific validation
|
||||
|
||||
## Quick Reference
|
||||
|
||||
| Task | Command |
|
||||
|------|---------|
|
||||
| Test everything | `./run-config-tests.sh` |
|
||||
| HTML report | `./run-config-tests.sh html` |
|
||||
| JSON output | `./run-config-tests.sh json` |
|
||||
| Single config | `./run-config-tests.sh human cfg0.json` |
|
||||
| Test everything (stub) | `./run-config-tests.sh` |
|
||||
| Test everything (platform) | `./run-config-tests.sh --mode platform` |
|
||||
| HTML report | `./run-config-tests.sh --format html` |
|
||||
| JSON output | `./run-config-tests.sh --format json` |
|
||||
| Single config | `./run-config-tests.sh cfg0.json` |
|
||||
| Single config HTML | `./run-config-tests.sh -f html cfg0.json` |
|
||||
| Platform mode single config | `./run-config-tests.sh -m platform cfg0.json` |
|
||||
| View HTML | `open output/test-report.html` |
|
||||
| View results | `cat output/test-results.txt` |
|
||||
| Parse JSON | `cat output/test-report.json \| jq` |
|
||||
|
||||
@@ -169,7 +169,7 @@ The repository includes a comprehensive testing framework for configuration vali
|
||||
./run-config-tests.sh
|
||||
|
||||
# Generate HTML report
|
||||
./run-config-tests.sh html
|
||||
./run-config-tests.sh --format html
|
||||
|
||||
# Or run tests directly in the tests directory
|
||||
cd tests/config-parser
|
||||
|
||||
@@ -42,7 +42,7 @@ This testing framework includes multiple documentation files, each serving a spe
|
||||
|
||||
4. **[TEST_CONFIG_PARSER_DESIGN.md](TEST_CONFIG_PARSER_DESIGN.md)** - Test framework architecture
|
||||
- Multi-layer validation design
|
||||
- Property metadata system (628 properties)
|
||||
- Property metadata system (398 schema properties)
|
||||
- Property inspection engine
|
||||
- Test execution flow diagrams
|
||||
- Data structures and algorithms
|
||||
@@ -64,17 +64,18 @@ This testing framework includes multiple documentation files, each serving a spe
|
||||
**RECOMMENDED: Use the test runner script** (handles Docker automatically):
|
||||
|
||||
```bash
|
||||
# Test all configurations (human-readable output)
|
||||
./run-config-tests.sh
|
||||
# Test all configurations in STUB mode (default - fast, proto.c only)
|
||||
./run-config-tests.sh # Human-readable output
|
||||
./run-config-tests.sh --format html # HTML report
|
||||
./run-config-tests.sh --format json # JSON report
|
||||
|
||||
# Generate HTML report
|
||||
./run-config-tests.sh html
|
||||
|
||||
# Generate JSON report
|
||||
./run-config-tests.sh json
|
||||
# Test all configurations in PLATFORM mode (integration testing)
|
||||
./run-config-tests.sh --mode platform # Human-readable output
|
||||
./run-config-tests.sh --mode platform --format html # HTML report
|
||||
|
||||
# Test single configuration
|
||||
./run-config-tests.sh human cfg0.json
|
||||
./run-config-tests.sh cfg0.json # Stub mode
|
||||
./run-config-tests.sh --mode platform cfg0.json # Platform mode
|
||||
```
|
||||
|
||||
**Alternative: Run tests directly in Docker** (manual Docker management):
|
||||
@@ -83,21 +84,29 @@ This testing framework includes multiple documentation files, each serving a spe
|
||||
# Build the Docker environment first (if not already built)
|
||||
make build-host-env
|
||||
|
||||
# Run all tests (schema + parser) - RECOMMENDED
|
||||
# Run all tests in STUB mode (default - fast)
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make test-config-full"
|
||||
|
||||
# Run individual test suites
|
||||
# Run all tests in PLATFORM mode (integration)
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make test-config-full USE_PLATFORM=brcm-sonic"
|
||||
|
||||
# Run individual test suites (stub mode)
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make validate-schema"
|
||||
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make test-config"
|
||||
|
||||
# Generate test reports
|
||||
# Generate test reports (stub mode)
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make test-config-html"
|
||||
|
||||
# Generate test reports (platform mode)
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make test-config-html USE_PLATFORM=brcm-sonic"
|
||||
|
||||
# Copy report files out of container to view
|
||||
docker cp ucentral_client_build_env:/root/ols-nos/tests/config-parser/test-report.html output/
|
||||
```
|
||||
@@ -108,17 +117,24 @@ docker cp ucentral_client_build_env:/root/ols-nos/tests/config-parser/test-repor
|
||||
# Navigate to test directory
|
||||
cd tests/config-parser
|
||||
|
||||
# Run all tests (schema + parser)
|
||||
# Run all tests in STUB mode (default)
|
||||
make test-config-full
|
||||
|
||||
# Run all tests in PLATFORM mode
|
||||
make test-config-full USE_PLATFORM=brcm-sonic
|
||||
|
||||
# Run individual test suites
|
||||
make validate-schema # Schema validation only
|
||||
make test-config # Parser tests only
|
||||
|
||||
# Generate test reports
|
||||
# Generate test reports (stub mode)
|
||||
make test-config-html # HTML report (browser-viewable)
|
||||
make test-config-json # JSON report (machine-readable)
|
||||
make test-config-junit # JUnit XML (CI/CD integration)
|
||||
|
||||
# Generate test reports (platform mode)
|
||||
make test-config-html USE_PLATFORM=brcm-sonic
|
||||
make test-config-json USE_PLATFORM=brcm-sonic
|
||||
```
|
||||
|
||||
**Note:** Running tests in Docker is the preferred method as it provides a consistent, reproducible environment regardless of your host OS (macOS, Linux, Windows).
|
||||
@@ -164,12 +180,12 @@ make test-config-junit # JUnit XML (CI/CD integration)
|
||||
- Hardware constraint validation
|
||||
|
||||
### Property Tracking System
|
||||
- Database of 628 properties and their processing status
|
||||
- Database of all schema properties and their implementation status (398 canonical properties)
|
||||
- Tracks which properties are parsed by which functions
|
||||
- Identifies unimplemented features
|
||||
- Status classification: CONFIGURED, IGNORED, SYSTEM, INVALID, Unknown
|
||||
- Property usage reports across all test configurations
|
||||
- 199 properties (32%) with line number references
|
||||
- Properties with line numbers are implemented, line_number=0 means not yet implemented
|
||||
|
||||
### Two-Layer Validation Strategy
|
||||
|
||||
@@ -297,7 +313,7 @@ vi config-samples/test-new-feature.json
|
||||
./run-config-tests.sh
|
||||
|
||||
# Generate full HTML report for review
|
||||
./run-config-tests.sh html
|
||||
./run-config-tests.sh --format html
|
||||
open output/test-report.html
|
||||
|
||||
# Check for property database accuracy
|
||||
@@ -310,7 +326,7 @@ open output/test-report.html
|
||||
test-configurations:
|
||||
stage: test
|
||||
script:
|
||||
- ./run-config-tests.sh json
|
||||
- ./run-config-tests.sh --format json
|
||||
artifacts:
|
||||
paths:
|
||||
- output/test-report.json
|
||||
@@ -333,7 +349,7 @@ static struct property_metadata properties[] = {
|
||||
.source_line = 1119,
|
||||
.notes = "Enable/disable ethernet interface"
|
||||
},
|
||||
// ... 628 total entries (199 with line numbers) ...
|
||||
// ... entries for all 398 schema properties (with line numbers for implemented properties) ...
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
@@ -18,9 +18,9 @@ The framework validates configurations through three complementary layers:
|
||||
|
||||
### Layer 3: Property Tracking
|
||||
- Deep recursive inspection of JSON tree to classify every property
|
||||
- Maps properties to property metadata database (628 properties total)
|
||||
- Maps properties to property metadata database (398 schema properties)
|
||||
- Tracks which properties are CONFIGURED, IGNORED, INVALID, UNKNOWN, etc.
|
||||
- 199 properties (32%) include line number references to proto.c
|
||||
- Properties with line numbers are implemented in proto.c; line_number=0 means not yet implemented
|
||||
|
||||
## 2. **Property Metadata System**
|
||||
|
||||
@@ -36,12 +36,12 @@ struct property_metadata {
|
||||
};
|
||||
```
|
||||
|
||||
**Database contains 628 entries** documenting:
|
||||
- Which properties are actively parsed (PROP_CONFIGURED)
|
||||
**Database contains entries for all 398 schema properties** documenting:
|
||||
- Which properties are actively parsed (PROP_CONFIGURED with line numbers)
|
||||
- Which are not yet implemented (line_number=0)
|
||||
- Which are intentionally ignored (PROP_IGNORED)
|
||||
- Which need platform implementation (PROP_UNKNOWN)
|
||||
- Which are structural containers (PROP_SYSTEM)
|
||||
- Line numbers for 199 properties (32%) showing exact parsing locations
|
||||
|
||||
### Property Status Classification
|
||||
- **PROP_CONFIGURED**: Successfully processed by parser
|
||||
@@ -293,7 +293,7 @@ The design elegantly separates concerns:
|
||||
4. **Validation layer** verifies specific features (optional validators)
|
||||
5. **Reporting layer** generates multiple output formats
|
||||
|
||||
The property metadata database is the **crown jewel** - it documents the implementation status of 560+ configuration properties, enabling automated detection of unimplemented features and validation of parser coverage.
|
||||
The property metadata database is the **crown jewel** - it documents the implementation status of all 398 schema properties, enabling automated detection of unimplemented features and validation of parser coverage.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
|
||||
@@ -32,11 +32,16 @@
|
||||
### Basic Syntax
|
||||
|
||||
```bash
|
||||
./run-config-tests.sh [format] [config-file]
|
||||
./run-config-tests.sh [OPTIONS] [config-file]
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `format` (optional): Output format - `html`, `json`, or `human` (default: `human`)
|
||||
**Options:**
|
||||
- `-f, --format FORMAT`: Output format - `html`, `json`, or `human` (default: `human`)
|
||||
- `-m, --mode MODE`: Test mode - `stub` or `platform` (default: `stub`)
|
||||
- `-p, --platform NAME`: Platform name for platform mode (default: `brcm-sonic`)
|
||||
- `-h, --help`: Show help message
|
||||
|
||||
**Arguments:**
|
||||
- `config-file` (optional): Specific config file to test (default: test all configs)
|
||||
|
||||
### Examples
|
||||
@@ -47,27 +52,32 @@
|
||||
# Human-readable output (default)
|
||||
./run-config-tests.sh
|
||||
|
||||
# Human-readable output (explicit)
|
||||
./run-config-tests.sh human
|
||||
|
||||
# HTML report
|
||||
./run-config-tests.sh html
|
||||
./run-config-tests.sh --format html
|
||||
# OR short form:
|
||||
./run-config-tests.sh -f html
|
||||
|
||||
# JSON output
|
||||
./run-config-tests.sh json
|
||||
./run-config-tests.sh --format json
|
||||
# OR short form:
|
||||
./run-config-tests.sh -f json
|
||||
```
|
||||
|
||||
#### Test Single Configuration
|
||||
|
||||
```bash
|
||||
# Test single config with human output
|
||||
./run-config-tests.sh human cfg0.json
|
||||
# Test single config with human output (default)
|
||||
./run-config-tests.sh cfg0.json
|
||||
|
||||
# Test single config with HTML report
|
||||
./run-config-tests.sh html ECS4150-TM.json
|
||||
./run-config-tests.sh --format html cfg0.json
|
||||
# OR short form:
|
||||
./run-config-tests.sh -f html cfg0.json
|
||||
|
||||
# Test single config with JSON output
|
||||
./run-config-tests.sh json ECS4150-ACL.json
|
||||
./run-config-tests.sh --format json cfg0.json
|
||||
# OR short form:
|
||||
./run-config-tests.sh -f json cfg0.json
|
||||
```
|
||||
|
||||
## Output Files
|
||||
@@ -210,7 +220,7 @@ The script uses exit codes for CI/CD integration:
|
||||
|
||||
**CI/CD Example:**
|
||||
```bash
|
||||
./run-config-tests.sh json
|
||||
./run-config-tests.sh --format json
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "All tests passed!"
|
||||
else
|
||||
@@ -321,7 +331,7 @@ make build-host-env
|
||||
# OR let script build it automatically
|
||||
|
||||
# Run tests (script provides better output management)
|
||||
./run-config-tests.sh html
|
||||
./run-config-tests.sh --format html
|
||||
```
|
||||
|
||||
### With CI/CD
|
||||
@@ -338,7 +348,7 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Run config tests
|
||||
run: ./run-config-tests.sh json
|
||||
run: ./run-config-tests.sh --format json
|
||||
- name: Upload test results
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
@@ -352,7 +362,7 @@ jobs:
|
||||
test-configs:
|
||||
stage: test
|
||||
script:
|
||||
- ./run-config-tests.sh json
|
||||
- ./run-config-tests.sh --format json
|
||||
artifacts:
|
||||
paths:
|
||||
- output/test-report.json
|
||||
@@ -364,7 +374,7 @@ test-configs:
|
||||
```groovy
|
||||
stage('Test Configurations') {
|
||||
steps {
|
||||
sh './run-config-tests.sh html'
|
||||
sh './run-config-tests.sh --format html'
|
||||
publishHTML([
|
||||
reportDir: 'output',
|
||||
reportFiles: 'test-report.html',
|
||||
@@ -382,7 +392,7 @@ stage('Test Configurations') {
|
||||
# .git/hooks/pre-commit
|
||||
|
||||
echo "Running configuration tests..."
|
||||
./run-config-tests.sh human
|
||||
./run-config-tests.sh
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Tests failed. Commit aborted."
|
||||
@@ -406,7 +416,7 @@ OUTPUT_DIR="$SCRIPT_DIR/my-custom-output"
|
||||
```bash
|
||||
# Test all ACL configs
|
||||
for config in config-samples/*ACL*.json; do
|
||||
./run-config-tests.sh json "$(basename $config)"
|
||||
./run-config-tests.sh --format json "$(basename $config)"
|
||||
done
|
||||
```
|
||||
|
||||
@@ -424,7 +434,7 @@ wait
|
||||
```bash
|
||||
# Generate all format reports
|
||||
for format in human html json; do
|
||||
./run-config-tests.sh $format
|
||||
./run-config-tests.sh --format $format
|
||||
done
|
||||
|
||||
# Timestamp reports
|
||||
|
||||
@@ -240,8 +240,8 @@ run_tests() {
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Build test binary with appropriate mode
|
||||
build_cmd="cd $BUILD_DIR && make test-config-parser $use_platform_flag"
|
||||
# Build test binary with appropriate mode (clean first to ensure correct flags)
|
||||
build_cmd="cd $BUILD_DIR && make clean && make test-config-parser $use_platform_flag"
|
||||
|
||||
case "$FORMAT" in
|
||||
html)
|
||||
@@ -266,17 +266,17 @@ run_tests() {
|
||||
case "$FORMAT" in
|
||||
html)
|
||||
output_file="test-report.html"
|
||||
test_cmd="cd $BUILD_DIR && make test-config-html $use_platform_flag"
|
||||
test_cmd="cd $BUILD_DIR && make clean && make test-config-html $use_platform_flag"
|
||||
copy_files=("$output_file")
|
||||
;;
|
||||
json)
|
||||
output_file="test-report.json"
|
||||
test_cmd="cd $BUILD_DIR && make test-config-json $use_platform_flag"
|
||||
test_cmd="cd $BUILD_DIR && make clean && make test-config-json $use_platform_flag"
|
||||
copy_files=("$output_file")
|
||||
;;
|
||||
human)
|
||||
output_file="test-results.txt"
|
||||
test_cmd="cd $BUILD_DIR && make test-config-full $use_platform_flag 2>&1 | tee $BUILD_DIR/$output_file"
|
||||
test_cmd="cd $BUILD_DIR && make clean && make test-config-full $use_platform_flag 2>&1 | tee $BUILD_DIR/$output_file"
|
||||
copy_files=("$output_file")
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -62,15 +62,13 @@ SCHEMA_SOURCE="../../config-samples/ucentral.schema.pretty.json"
|
||||
# SCHEMA_SOURCE="../../ols-ucentral-schema/schema"
|
||||
|
||||
# 2. Extract all properties from schema
|
||||
# For JSON schema file:
|
||||
python3 -c "import json; print('\n'.join(sorted(set(
|
||||
k for d in json.load(open('$SCHEMA_SOURCE'))['properties'].values()
|
||||
for k in d.get('properties', {}).keys()
|
||||
))))" | sed 's/\[\]$//' > /tmp/all-schema-properties.txt
|
||||
# For JSON schema file (recommended):
|
||||
python3 extract-schema-properties.py ../../config-samples/ucentral.schema.pretty.json \
|
||||
2>/dev/null > /tmp/all-schema-properties.txt
|
||||
|
||||
# For YAML schema directory (if using ols-ucentral-schema repo):
|
||||
# python3 extract-schema-properties.py ../../ols-ucentral-schema/schema ucentral.yml \
|
||||
# 2>/dev/null | sed 's/\[\]$//' > /tmp/all-schema-properties.txt
|
||||
# 2>/dev/null > /tmp/all-schema-properties.txt
|
||||
|
||||
# 3. Generate base database (proto.c)
|
||||
python3 generate-database-from-schema.py \
|
||||
@@ -78,21 +76,17 @@ python3 generate-database-from-schema.py \
|
||||
/tmp/all-schema-properties.txt \
|
||||
/tmp/base-database-new.c
|
||||
|
||||
# 4. Fix array name
|
||||
sed -i '' 's/property_database\[\]/base_property_database[]/' \
|
||||
/tmp/base-database-new.c
|
||||
|
||||
# 5. Generate platform database (plat-gnma.c)
|
||||
# 4. Generate platform database (plat-gnma.c)
|
||||
python3 generate-platform-database-from-schema.py \
|
||||
../../src/ucentral-client/platform/brcm-sonic/plat-gnma.c \
|
||||
/tmp/all-schema-properties.txt \
|
||||
/tmp/platform-database-new.c
|
||||
|
||||
# 6. Install new databases
|
||||
# 5. Install new databases
|
||||
cp /tmp/base-database-new.c ../config-parser/property-database-base.c
|
||||
cp /tmp/platform-database-new.c ../config-parser/property-database-platform-brcm-sonic.c
|
||||
|
||||
# 7. Test in Docker
|
||||
# 6. Test in Docker
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make clean && make test-config-full"
|
||||
```
|
||||
@@ -127,16 +121,13 @@ Generates `property-database-base.c` from proto.c:
|
||||
```bash
|
||||
cd tests/tools
|
||||
|
||||
# Extract schema properties from included JSON file (strip trailing [])
|
||||
python3 -c "import json; print('\n'.join(sorted(set(
|
||||
k for d in json.load(open('../../config-samples/ucentral.schema.pretty.json'))['properties'].values()
|
||||
for k in d.get('properties', {}).keys()
|
||||
))))" | sed 's/\[\]$//' > /tmp/schema-props.txt
|
||||
# Extract schema properties from included JSON file
|
||||
python3 extract-schema-properties.py ../../config-samples/ucentral.schema.pretty.json \
|
||||
2>/dev/null > /tmp/schema-props.txt
|
||||
|
||||
# OR if using YAML from ols-ucentral-schema repository:
|
||||
# python3 extract-schema-properties.py \
|
||||
# ../../ols-ucentral-schema/schema ucentral.yml 2>/dev/null | \
|
||||
# sed 's/\[\]$//' > /tmp/schema-props.txt
|
||||
# python3 extract-schema-properties.py ../../ols-ucentral-schema/schema ucentral.yml \
|
||||
# 2>/dev/null > /tmp/schema-props.txt
|
||||
|
||||
# Generate database
|
||||
python3 generate-database-from-schema.py \
|
||||
@@ -144,8 +135,7 @@ python3 generate-database-from-schema.py \
|
||||
/tmp/schema-props.txt \
|
||||
/tmp/base-db.c
|
||||
|
||||
# Fix array name and install
|
||||
sed -i '' 's/property_database\[\]/base_property_database[]/' /tmp/base-db.c
|
||||
# Install database
|
||||
cp /tmp/base-db.c ../config-parser/property-database-base.c
|
||||
```
|
||||
|
||||
|
||||
@@ -53,6 +53,25 @@ This testing framework includes multiple documentation files, each serving a spe
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Test Modes
|
||||
|
||||
The testing framework supports two modes:
|
||||
|
||||
**Stub Mode (Default - Fast)**
|
||||
- Tests proto.c parsing only
|
||||
- Uses simple platform stubs (test-stubs.c)
|
||||
- Shows base properties only (proto.c)
|
||||
- Fast execution (~30 seconds)
|
||||
- Use for: Quick validation, CI/CD pipelines
|
||||
|
||||
**Platform Mode (Integration)**
|
||||
- Tests proto.c + platform implementation (plat-gnma.c)
|
||||
- Uses platform code with hardware mocks
|
||||
- Shows base AND platform properties (proto.c → plat-gnma.c)
|
||||
- Tracks hardware application functions called
|
||||
- Slower execution (~45 seconds)
|
||||
- Use for: Platform-specific validation, integration testing
|
||||
|
||||
### Running Tests
|
||||
|
||||
**RECOMMENDED: Run tests inside Docker build environment** to eliminate OS-specific issues (works on macOS, Linux, Windows):
|
||||
@@ -61,10 +80,14 @@ This testing framework includes multiple documentation files, each serving a spe
|
||||
# Build the Docker environment first (if not already built)
|
||||
make build-host-env
|
||||
|
||||
# Run all tests (schema + parser) - RECOMMENDED
|
||||
# Run all tests in STUB mode (default - fast)
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make test-config-full"
|
||||
|
||||
# Run all tests in PLATFORM mode (integration)
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make test-config-full USE_PLATFORM=brcm-sonic"
|
||||
|
||||
# Run individual test suites
|
||||
docker exec ucentral_client_build_env bash -c \
|
||||
"cd /root/ols-nos/tests/config-parser && make validate-schema"
|
||||
@@ -93,18 +116,25 @@ docker cp ucentral_client_build_env:/root/ols-nos/tests/config-parser/test-resul
|
||||
# Navigate to test directory
|
||||
cd tests/config-parser
|
||||
|
||||
# Run all tests (schema + parser)
|
||||
# Run all tests in STUB mode (default)
|
||||
make test-config-full
|
||||
|
||||
# Run all tests in PLATFORM mode
|
||||
make test-config-full USE_PLATFORM=brcm-sonic
|
||||
|
||||
# Run individual test suites
|
||||
make validate-schema # Schema validation only
|
||||
make test-config # Parser tests only
|
||||
make test # Unit tests
|
||||
|
||||
# Generate test reports
|
||||
# Generate test reports (stub mode)
|
||||
make test-config-html # HTML report (browser-viewable)
|
||||
make test-config-json # JSON report (machine-readable)
|
||||
make test-config-junit # JUnit XML (CI/CD integration)
|
||||
|
||||
# Generate test reports (platform mode)
|
||||
make test-config-html USE_PLATFORM=brcm-sonic
|
||||
make test-config-json USE_PLATFORM=brcm-sonic
|
||||
```
|
||||
|
||||
**Note:** Running tests in Docker is the preferred method as it provides a consistent, reproducible environment regardless of your host OS (macOS, Linux, Windows).
|
||||
@@ -114,7 +144,7 @@ make test-config-junit # JUnit XML (CI/CD integration)
|
||||
**Test Implementation:**
|
||||
- `tests/config-parser/test-config-parser.c` (3445 lines) - Parser test framework with property tracking
|
||||
- `tests/config-parser/test-stubs.c` (214 lines) - Platform function stubs for testing
|
||||
- `tests/schema/validate-schema.py` (305 lines) - Standalone schema validator
|
||||
- `tests/schema/validate-schema.py` (649 lines) - Standalone schema validator with undefined property detection
|
||||
- `tests/config-parser/config-parser.h` - Test header exposing cfg_parse()
|
||||
|
||||
**Configuration Files:**
|
||||
@@ -150,7 +180,7 @@ make test-config-junit # JUnit XML (CI/CD integration)
|
||||
- Hardware constraint validation
|
||||
|
||||
### Property Tracking System
|
||||
- Database of 450+ properties and their processing status
|
||||
- Database of all schema properties and their implementation status (398 canonical properties)
|
||||
- Tracks which properties are parsed by which functions
|
||||
- Identifies unimplemented features
|
||||
- Status classification: CONFIGURED, IGNORED, SYSTEM, INVALID, Unknown
|
||||
@@ -206,8 +236,8 @@ The testing framework was added with minimal impact to production code:
|
||||
### New Files Added
|
||||
1. `tests/config-parser/test-config-parser.c` - Complete test framework (3445 lines)
|
||||
2. `tests/config-parser/test-stubs.c` - Platform stubs (214 lines)
|
||||
3. `tests/schema/validate-schema.py` - Schema validator (305 lines)
|
||||
4. `src/ucentral-client/include/config-parser.h` - Test header
|
||||
3. `tests/schema/validate-schema.py` - Schema validator (649 lines)
|
||||
4. `tests/config-parser/config-parser.h` - Test header
|
||||
5. `tests/config-parser/TEST_CONFIG_README.md` - Framework documentation
|
||||
6. `tests/schema/SCHEMA_VALIDATOR_README.md` - Validator documentation
|
||||
7. `tests/MAINTENANCE.md` - Maintenance procedures
|
||||
@@ -318,7 +348,7 @@ static struct property_info properties[] = {
|
||||
.status = PROP_CONFIGURED,
|
||||
.notes = "Enable/disable ethernet interface"
|
||||
},
|
||||
// ... 450+ more entries ...
|
||||
// ... entries for all 398 schema properties ...
|
||||
};
|
||||
```
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ endif
|
||||
# Run schema validation
|
||||
validate-schema:
|
||||
@echo "========= Schema Validation ========="
|
||||
@python3 $(SCHEMA_VALIDATOR) $(CONFIG_SAMPLES) || true
|
||||
@python3 $(SCHEMA_VALIDATOR) $(CONFIG_SAMPLES) --check-undefined || true
|
||||
@echo "========= Schema validation completed ========"
|
||||
|
||||
# Combined validation: schema + parser
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
# uCentral Schema Validator
|
||||
|
||||
A modular, portable tool for validating JSON configuration files against the uCentral schema.
|
||||
A modular, portable tool for validating JSON configuration files against the uCentral schema with advanced undefined property detection and typo suggestions.
|
||||
|
||||
## Features
|
||||
|
||||
- **Standalone Operation**: Works independently without external dependencies beyond Python 3 + jsonschema
|
||||
- **Modular Design**: Easy to port to other repositories (platform-specific implementations, etc.)
|
||||
- **Schema Validation**: Full JSON Schema Draft-7 validation (types, enums, constraints, etc.)
|
||||
- **Undefined Property Detection**: Identifies properties in config not defined in schema
|
||||
- **Smart Typo Detection**: Suggests corrections for likely misspellings
|
||||
- Separator mismatches: `lldp_admin_status` → `lldp-admin-status` (underscore vs dash)
|
||||
- Case mismatches: `lacpEnable` → `lacp-enable` (camelCase vs dash-case)
|
||||
- Similar spelling: Edit distance analysis with confidence scoring
|
||||
- **Multiple Output Formats**: Human-readable and machine-readable JSON
|
||||
- **Directory Validation**: Validate entire directories of configs at once
|
||||
- **CI/CD Ready**: Exit codes suitable for automated testing
|
||||
- **CI/CD Ready**: Exit codes and strict mode for pipeline integration
|
||||
- **Schema Auto-Detection**: Automatically finds schema in common locations
|
||||
- **Modular Design**: Easy to port to other repositories (platform-specific implementations, etc.)
|
||||
- **Standalone Operation**: Works independently without external dependencies beyond Python 3 + jsonschema
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -28,24 +34,50 @@ pip3 install jsonschema
|
||||
### Basic Usage
|
||||
|
||||
```bash
|
||||
# Validate a single file
|
||||
# Schema validation only (default behavior)
|
||||
python3 validate-schema.py config.json
|
||||
|
||||
# Check for undefined properties (informational warnings)
|
||||
python3 validate-schema.py config.json --check-undefined
|
||||
|
||||
# Strict mode: treat undefined properties as errors (for CI/CD)
|
||||
python3 validate-schema.py config.json --strict-schema
|
||||
|
||||
# Validate all configs in a directory
|
||||
python3 validate-schema.py ../../config-samples/
|
||||
python3 validate-schema.py ../../config-samples/ --check-undefined
|
||||
|
||||
# Specify custom schema
|
||||
python3 validate-schema.py config.json --schema path/to/schema.json
|
||||
```
|
||||
|
||||
### Understanding Undefined Properties
|
||||
|
||||
**Undefined properties are NOT validation errors** - they are informational warnings that help identify:
|
||||
|
||||
1. **Typos/Misspellings**: Properties that won't be applied even though config is valid
|
||||
- Example: `lldp_admin_status` instead of `lldp-admin-status`
|
||||
- Example: `lacpEnable` instead of `lacp-enable`
|
||||
|
||||
2. **Vendor-Specific Extensions**: ODM/vendor proprietary properties not in schema
|
||||
- Risk: May change without notice, not portable across platforms
|
||||
- Recommendation: Document and coordinate with schema maintainers
|
||||
|
||||
3. **Deprecated Properties**: Properties removed from newer schema versions
|
||||
- Check schema version compatibility
|
||||
|
||||
**When to use each mode:**
|
||||
- **Default mode** (no flags): Standard validation, undefined properties ignored
|
||||
- **`--check-undefined`**: Development mode, see warnings but don't fail builds
|
||||
- **`--strict-schema`**: CI/CD enforcement mode, fail on any undefined properties
|
||||
|
||||
### Output Formats
|
||||
|
||||
```bash
|
||||
# Human-readable output (default)
|
||||
python3 validate-schema.py config.json
|
||||
python3 validate-schema.py config.json --check-undefined
|
||||
|
||||
# Machine-readable JSON output
|
||||
python3 validate-schema.py config.json --format json > report.json
|
||||
python3 validate-schema.py config.json --check-undefined --format json > report.json
|
||||
```
|
||||
|
||||
### Via Makefile
|
||||
@@ -64,21 +96,58 @@ make test-config-full
|
||||
## Exit Codes
|
||||
|
||||
- `0`: All configurations are valid
|
||||
- `1`: One or more configurations failed validation
|
||||
- Undefined properties don't affect exit code unless `--strict-schema` is used
|
||||
- `1`: Validation errors OR (strict mode AND undefined properties found)
|
||||
- `2`: File/schema errors (file not found, invalid schema, etc.)
|
||||
|
||||
## Output Examples
|
||||
|
||||
### Valid Configuration
|
||||
### Valid Configuration (Schema Only)
|
||||
|
||||
```
|
||||
✓ Valid: cfg0.json
|
||||
✓ Schema Valid: cfg0.json
|
||||
```
|
||||
|
||||
### Invalid Configuration
|
||||
### Valid Configuration with Undefined Properties Check
|
||||
|
||||
```
|
||||
✗ Invalid: bad-config.json
|
||||
✓ Schema Valid: cfg0.json
|
||||
✓ All properties defined in schema
|
||||
```
|
||||
|
||||
### Configuration with Undefined Properties
|
||||
|
||||
```
|
||||
✓ Schema Valid: test_config.json
|
||||
|
||||
ℹ️ Undefined Properties (informational):
|
||||
Found 3 property/properties not in schema
|
||||
These may be:
|
||||
• Typos/misspellings (won't be applied even though config is valid)
|
||||
• Vendor-specific extensions (not portable, may change)
|
||||
• Deprecated properties (check schema version)
|
||||
|
||||
1. ethernet[].lldp_admin_status
|
||||
→ Not defined in schema
|
||||
→ Possible matches:
|
||||
✓ ethernet[].lldp-interface-config.lldp-admin-status (use '-' not '_')
|
||||
|
||||
2. ethernet[].lacpEnable
|
||||
→ Not defined in schema
|
||||
→ Possible matches:
|
||||
✓ ethernet[].lacp-config.lacp-enable (use dash-case not camelCase)
|
||||
? interfaces[].ipv4.ip-arp-inspect-vlan.vlan-enable (similar spelling)
|
||||
|
||||
3. ethernet[].custom-property
|
||||
→ Not defined in schema
|
||||
```
|
||||
|
||||
**Note**: This config is schema-valid (exit code 0), but has informational warnings about undefined properties.
|
||||
|
||||
### Invalid Configuration (Schema Errors)
|
||||
|
||||
```
|
||||
✗ Schema Invalid: bad-config.json
|
||||
Found 2 validation error(s):
|
||||
|
||||
Error 1:
|
||||
@@ -92,10 +161,13 @@ make test-config-full
|
||||
Validator: maximum
|
||||
```
|
||||
|
||||
**Note**: This config fails schema validation (exit code 1).
|
||||
|
||||
### Directory Summary
|
||||
|
||||
```
|
||||
Summary: 37 file(s) checked, 34 valid, 3 invalid
|
||||
5 file(s) with undefined properties
|
||||
```
|
||||
|
||||
## Integration with test-config-parser.c
|
||||
@@ -143,43 +215,133 @@ is_valid, errors = validator.validate_config(config)
|
||||
results = validator.validate_directory("/path/to/configs")
|
||||
```
|
||||
|
||||
## Typo Detection Features
|
||||
|
||||
The validator includes intelligent typo detection that identifies naming mistakes and suggests corrections:
|
||||
|
||||
### Separator Mismatches
|
||||
|
||||
**Underscore vs Dash:**
|
||||
```
|
||||
ethernet[].lldp_admin_status ❌ Underscore
|
||||
ethernet[].lldp-admin-status ✓ Correct (dash-case)
|
||||
```
|
||||
|
||||
**camelCase vs dash-case:**
|
||||
```
|
||||
ethernet[].lacpEnable ❌ camelCase
|
||||
ethernet[].lacp-enable ✓ Correct (dash-case)
|
||||
```
|
||||
|
||||
### Similar Spelling
|
||||
|
||||
Uses Levenshtein distance algorithm to find properties with similar spelling:
|
||||
```
|
||||
services.logSettings.enabled ❌ Not in schema
|
||||
services.log.enabled ✓ Possible match (edit distance: 1)
|
||||
```
|
||||
|
||||
### Confidence Levels
|
||||
|
||||
- **✓ High confidence**: Exact match after normalization (separator/case fix)
|
||||
- **? Medium confidence**: Similar spelling (edit distance 2-3)
|
||||
- **No suggestion**: No similar properties found (likely vendor-specific)
|
||||
|
||||
## Common Validation Errors
|
||||
|
||||
### Type Errors
|
||||
### Schema Validation Errors (Exit Code 1)
|
||||
|
||||
These are actual schema violations that must be fixed:
|
||||
|
||||
**Type Errors:**
|
||||
```
|
||||
$.ethernet is not of type 'array'
|
||||
```
|
||||
|
||||
**Fix**: Ensure `ethernet` is an array: `"ethernet": [...]`
|
||||
|
||||
### Out of Range
|
||||
|
||||
**Out of Range:**
|
||||
```
|
||||
$.interfaces[0].vlan.id is greater than the maximum of 4094
|
||||
```
|
||||
|
||||
**Fix**: VLAN IDs must be between 1-4094
|
||||
|
||||
### Required Property Missing
|
||||
|
||||
**Required Property Missing:**
|
||||
```
|
||||
'uuid' is a required property
|
||||
```
|
||||
|
||||
**Fix**: Add the required field: `"uuid": 1234567890`
|
||||
|
||||
### Additional Properties Not Allowed
|
||||
|
||||
**Additional Properties Not Allowed:**
|
||||
```
|
||||
Additional properties are not allowed ('unknown_field' was unexpected)
|
||||
```
|
||||
**Fix**: Remove the field or check spelling (only if schema has `additionalProperties: false`)
|
||||
|
||||
**Fix**: Remove the field or check spelling
|
||||
### Undefined Properties (Exit Code 0 by default)
|
||||
|
||||
These are informational warnings that don't cause validation failure:
|
||||
|
||||
**Typo/Misspelling:**
|
||||
```
|
||||
ℹ️ ethernet[].lldp_admin_status not in schema
|
||||
Suggestion: ethernet[].lldp-interface-config.lldp-admin-status
|
||||
```
|
||||
**Impact**: Property won't be applied even though config is valid
|
||||
|
||||
**Vendor Extension:**
|
||||
```
|
||||
ℹ️ ethernet[].edgecore-specific-property not in schema
|
||||
No suggestions found
|
||||
```
|
||||
**Impact**: Not portable, may change without notice
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
### Basic Pipeline
|
||||
|
||||
```yaml
|
||||
validate-configs:
|
||||
stage: test
|
||||
script:
|
||||
# Standard validation (undefined properties are warnings)
|
||||
- python3 validate-schema.py config-samples/
|
||||
artifacts:
|
||||
when: on_failure
|
||||
paths:
|
||||
- validation-report.json
|
||||
```
|
||||
|
||||
### Strict Enforcement Pipeline
|
||||
|
||||
```yaml
|
||||
validate-configs-strict:
|
||||
stage: test
|
||||
script:
|
||||
# Strict mode: fail on undefined properties
|
||||
- python3 validate-schema.py config-samples/ --strict-schema
|
||||
# Generate JSON report for analysis
|
||||
- python3 validate-schema.py config-samples/ --check-undefined --format json > report.json
|
||||
artifacts:
|
||||
always:
|
||||
paths:
|
||||
- report.json
|
||||
```
|
||||
|
||||
### Development Workflow
|
||||
|
||||
```bash
|
||||
# Before committing: check for typos
|
||||
python3 validate-schema.py my-config.json --check-undefined
|
||||
|
||||
# Review suggestions and fix obvious typos
|
||||
# Document any intentional vendor-specific properties
|
||||
|
||||
# CI/CD will enforce with --strict-schema
|
||||
```
|
||||
|
||||
## Files
|
||||
|
||||
- **validate-schema.py**: Standalone schema validator script (305 lines)
|
||||
- **validate-schema.py**: Standalone schema validator script (649 lines)
|
||||
- **Makefile**: Build targets for schema validation
|
||||
- **test-config-parser.c**: Enhanced with schema validation integration
|
||||
- **SCHEMA_VALIDATOR_README.md**: This documentation
|
||||
|
||||
@@ -6,9 +6,15 @@ A modular, standalone tool for validating JSON configuration files against
|
||||
the uCentral schema. Can be used independently or integrated into test suites.
|
||||
|
||||
Usage:
|
||||
# Validate a single file
|
||||
# Validate a single file (schema validation only)
|
||||
./validate-schema.py config.json
|
||||
|
||||
# Check for undefined properties (informational, doesn't affect exit code)
|
||||
./validate-schema.py config.json --check-undefined
|
||||
|
||||
# Strict mode: treat undefined properties as errors (for CI/CD)
|
||||
./validate-schema.py config.json --strict-schema
|
||||
|
||||
# Validate with specific schema
|
||||
./validate-schema.py config.json --schema path/to/schema.json
|
||||
|
||||
@@ -18,7 +24,19 @@ Usage:
|
||||
# Machine-readable JSON output
|
||||
./validate-schema.py config.json --format json
|
||||
|
||||
# Exit code: 0 = all valid, 1 = validation errors, 2 = file/schema errors
|
||||
Exit codes:
|
||||
0 = all valid (undefined properties don't affect this unless --strict-schema)
|
||||
1 = validation errors OR (strict mode AND undefined properties found)
|
||||
2 = file/schema errors (file not found, invalid schema, etc.)
|
||||
|
||||
Undefined Properties:
|
||||
Properties in config but not defined in schema are INFORMATIONAL warnings,
|
||||
not validation errors. They may indicate:
|
||||
• Typos/misspellings (property won't be applied even though config is valid)
|
||||
• Vendor-specific extensions (not portable across platforms)
|
||||
• Deprecated properties (check schema version)
|
||||
|
||||
Use --strict-schema in CI/CD pipelines to enforce schema compliance.
|
||||
|
||||
Author: Generated for OLS uCentral Client
|
||||
License: BSD-3-Clause
|
||||
@@ -29,7 +47,8 @@ import json
|
||||
import argparse
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
from typing import Dict, List, Tuple, Optional, Set
|
||||
from collections import defaultdict
|
||||
|
||||
try:
|
||||
import jsonschema
|
||||
@@ -39,6 +58,84 @@ except ImportError:
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def levenshtein_distance(s1: str, s2: str) -> int:
|
||||
"""
|
||||
Calculate Levenshtein distance between two strings.
|
||||
Returns minimum number of single-character edits needed to change s1 into s2.
|
||||
"""
|
||||
if len(s1) < len(s2):
|
||||
return levenshtein_distance(s2, s1)
|
||||
|
||||
if len(s2) == 0:
|
||||
return len(s1)
|
||||
|
||||
previous_row = range(len(s2) + 1)
|
||||
for i, c1 in enumerate(s1):
|
||||
current_row = [i + 1]
|
||||
for j, c2 in enumerate(s2):
|
||||
# Cost of insertions, deletions, or substitutions
|
||||
insertions = previous_row[j + 1] + 1
|
||||
deletions = current_row[j] + 1
|
||||
substitutions = previous_row[j] + (c1 != c2)
|
||||
current_row.append(min(insertions, deletions, substitutions))
|
||||
previous_row = current_row
|
||||
|
||||
return previous_row[-1]
|
||||
|
||||
|
||||
def normalize_separator(s: str, target: str = '-') -> str:
|
||||
"""Convert string separators. Default converts underscores and camelCase to dashes."""
|
||||
# Handle camelCase: insert dash before uppercase letters
|
||||
result = []
|
||||
for i, char in enumerate(s):
|
||||
if i > 0 and char.isupper() and s[i-1].islower():
|
||||
result.append('-')
|
||||
result.append(char.lower())
|
||||
|
||||
normalized = ''.join(result)
|
||||
# Replace underscores with target separator
|
||||
normalized = normalized.replace('_', target)
|
||||
return normalized
|
||||
|
||||
|
||||
def detect_naming_issue(config_prop: str, schema_prop: str) -> Optional[Dict[str, str]]:
|
||||
"""
|
||||
Detect if config_prop is a likely typo/variation of schema_prop.
|
||||
Returns dict with issue type and confidence, or None if no clear match.
|
||||
"""
|
||||
# Extract just the property name (last component after last dot or bracket)
|
||||
def get_prop_name(path: str) -> str:
|
||||
# Handle array notation: ethernet[].lldp-config -> lldp-config
|
||||
if '.' in path:
|
||||
return path.split('.')[-1]
|
||||
return path
|
||||
|
||||
config_name = get_prop_name(config_prop)
|
||||
schema_name = get_prop_name(schema_prop)
|
||||
|
||||
# Check for exact match after normalization
|
||||
config_normalized = normalize_separator(config_name, '-')
|
||||
schema_normalized = normalize_separator(schema_name, '-')
|
||||
|
||||
if config_normalized == schema_normalized and config_name != schema_name:
|
||||
# Separator mismatch (dash vs underscore vs camelCase)
|
||||
if '_' in config_name and '-' in schema_name:
|
||||
return {'type': 'separator_mismatch', 'detail': 'underscore_vs_dash', 'confidence': 'high'}
|
||||
elif any(c.isupper() for c in config_name) and '-' in schema_name:
|
||||
return {'type': 'separator_mismatch', 'detail': 'camelCase_vs_dash', 'confidence': 'high'}
|
||||
else:
|
||||
return {'type': 'separator_mismatch', 'detail': 'format_difference', 'confidence': 'high'}
|
||||
|
||||
# Check Levenshtein distance
|
||||
distance = levenshtein_distance(config_name.lower(), schema_name.lower())
|
||||
if distance <= 2:
|
||||
return {'type': 'similar_spelling', 'detail': f'edit_distance_{distance}', 'confidence': 'high' if distance == 1 else 'medium'}
|
||||
elif distance <= 3:
|
||||
return {'type': 'similar_spelling', 'detail': f'edit_distance_{distance}', 'confidence': 'medium'}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class SchemaValidator:
|
||||
"""
|
||||
Modular schema validator for uCentral configurations.
|
||||
@@ -47,17 +144,23 @@ class SchemaValidator:
|
||||
It has no dependencies on specific file paths or repository structure.
|
||||
"""
|
||||
|
||||
def __init__(self, schema_path: Optional[str] = None):
|
||||
def __init__(self, schema_path: Optional[str] = None, check_undefined: bool = False,
|
||||
similarity_threshold: int = 3):
|
||||
"""
|
||||
Initialize validator with schema.
|
||||
|
||||
Args:
|
||||
schema_path: Path to JSON schema file. If None, attempts to find
|
||||
schema in common locations relative to this script.
|
||||
check_undefined: If True, check for properties in config not defined in schema
|
||||
similarity_threshold: Maximum Levenshtein distance for suggesting similar properties
|
||||
"""
|
||||
self.schema_path = schema_path
|
||||
self.schema = None
|
||||
self.validator = None
|
||||
self.check_undefined = check_undefined
|
||||
self.similarity_threshold = similarity_threshold
|
||||
self._schema_properties = None # Cache of all valid schema property paths
|
||||
self._load_schema()
|
||||
|
||||
def _find_default_schema(self) -> Optional[str]:
|
||||
@@ -98,7 +201,185 @@ class SchemaValidator:
|
||||
# Create validator
|
||||
self.validator = Draft7Validator(self.schema)
|
||||
|
||||
def validate_file(self, config_path: str) -> Tuple[bool, List[Dict]]:
|
||||
def _extract_schema_properties(self, schema: Dict = None, path: str = "", visited: Set[str] = None) -> Set[str]:
|
||||
"""
|
||||
Recursively extract all valid property paths from the schema.
|
||||
|
||||
Args:
|
||||
schema: Schema object (or sub-schema) to process
|
||||
path: Current path prefix
|
||||
visited: Set of visited $ref paths to prevent infinite recursion
|
||||
|
||||
Returns:
|
||||
Set of all valid property paths in the schema
|
||||
"""
|
||||
if schema is None:
|
||||
schema = self.schema
|
||||
|
||||
if visited is None:
|
||||
visited = set()
|
||||
|
||||
properties = set()
|
||||
|
||||
# Handle $ref references
|
||||
if '$ref' in schema:
|
||||
ref_path = schema['$ref']
|
||||
|
||||
# Prevent infinite recursion
|
||||
if ref_path in visited:
|
||||
return properties
|
||||
visited.add(ref_path)
|
||||
|
||||
# Resolve $ref (handle #/$defs/name references)
|
||||
if ref_path.startswith('#/'):
|
||||
ref_parts = ref_path[2:].split('/')
|
||||
ref_schema = self.schema
|
||||
for part in ref_parts:
|
||||
ref_schema = ref_schema.get(part, {})
|
||||
|
||||
# Recursively extract from referenced schema
|
||||
return self._extract_schema_properties(ref_schema, path, visited)
|
||||
|
||||
# Handle object properties
|
||||
if 'properties' in schema:
|
||||
for prop_name, prop_schema in schema['properties'].items():
|
||||
prop_path = f"{path}.{prop_name}" if path else prop_name
|
||||
properties.add(prop_path)
|
||||
|
||||
# Recursively process nested properties
|
||||
nested = self._extract_schema_properties(prop_schema, prop_path, visited.copy())
|
||||
properties.update(nested)
|
||||
|
||||
# Handle arrays with items schema
|
||||
if 'items' in schema:
|
||||
items_schema = schema['items']
|
||||
# Use [] notation for arrays
|
||||
array_path = f"{path}[]" if path else "[]"
|
||||
|
||||
# Extract properties from array items
|
||||
nested = self._extract_schema_properties(items_schema, array_path, visited.copy())
|
||||
properties.update(nested)
|
||||
|
||||
# Handle additional properties (if true, allows any property)
|
||||
# We don't add these to valid properties as they're wildcards
|
||||
|
||||
return properties
|
||||
|
||||
def _extract_config_properties(self, config: Dict, path: str = "") -> Set[str]:
|
||||
"""
|
||||
Recursively extract all property paths from a configuration object.
|
||||
|
||||
Args:
|
||||
config: Configuration object to analyze
|
||||
path: Current path prefix
|
||||
|
||||
Returns:
|
||||
Set of all property paths in the configuration
|
||||
"""
|
||||
properties = set()
|
||||
|
||||
if isinstance(config, dict):
|
||||
for key, value in config.items():
|
||||
prop_path = f"{path}.{key}" if path else key
|
||||
properties.add(prop_path)
|
||||
|
||||
# Recursively process nested values
|
||||
nested = self._extract_config_properties(value, prop_path)
|
||||
properties.update(nested)
|
||||
|
||||
elif isinstance(config, list):
|
||||
# For arrays, use [] notation and process all items
|
||||
array_path = f"{path}[]" if path else "[]"
|
||||
|
||||
for item in config:
|
||||
nested = self._extract_config_properties(item, array_path)
|
||||
properties.update(nested)
|
||||
|
||||
return properties
|
||||
|
||||
def _find_similar_properties(self, config_prop: str, schema_props: Set[str]) -> List[Dict]:
|
||||
"""
|
||||
Find schema properties similar to a config property.
|
||||
|
||||
Args:
|
||||
config_prop: Property path from configuration
|
||||
schema_props: Set of all valid schema property paths
|
||||
|
||||
Returns:
|
||||
List of suggestions with similarity information
|
||||
"""
|
||||
suggestions = []
|
||||
|
||||
for schema_prop in schema_props:
|
||||
issue = detect_naming_issue(config_prop, schema_prop)
|
||||
if issue:
|
||||
suggestions.append({
|
||||
'schema_property': schema_prop,
|
||||
'issue_type': issue['type'],
|
||||
'detail': issue['detail'],
|
||||
'confidence': issue['confidence']
|
||||
})
|
||||
|
||||
# Sort by confidence (high first) and then alphabetically
|
||||
confidence_order = {'high': 0, 'medium': 1, 'low': 2}
|
||||
suggestions.sort(key=lambda x: (confidence_order.get(x['confidence'], 3), x['schema_property']))
|
||||
|
||||
return suggestions
|
||||
|
||||
def _check_undefined_properties(self, config: Dict) -> Dict[str, any]:
|
||||
"""
|
||||
Check for properties in config that are not defined in schema.
|
||||
|
||||
Args:
|
||||
config: Configuration object to check
|
||||
|
||||
Returns:
|
||||
Dict with undefined property analysis results
|
||||
"""
|
||||
# Extract all valid schema properties (with caching)
|
||||
if self._schema_properties is None:
|
||||
self._schema_properties = self._extract_schema_properties()
|
||||
|
||||
# Extract all config properties
|
||||
config_props = self._extract_config_properties(config)
|
||||
|
||||
# Find undefined properties
|
||||
undefined = []
|
||||
|
||||
for config_prop in sorted(config_props):
|
||||
# Check if property or its array form exists in schema
|
||||
is_defined = False
|
||||
|
||||
# Direct match
|
||||
if config_prop in self._schema_properties:
|
||||
is_defined = True
|
||||
else:
|
||||
# Check with array index normalization: ethernet[0].speed -> ethernet[].speed
|
||||
normalized_prop = config_prop
|
||||
import re
|
||||
# Replace array indices with []
|
||||
normalized_prop = re.sub(r'\[\d+\]', '[]', normalized_prop)
|
||||
|
||||
if normalized_prop in self._schema_properties:
|
||||
is_defined = True
|
||||
|
||||
if not is_defined:
|
||||
# Find similar properties
|
||||
suggestions = self._find_similar_properties(config_prop, self._schema_properties)
|
||||
|
||||
undefined.append({
|
||||
'path': config_prop,
|
||||
'suggestions': suggestions[:3] # Top 3 suggestions
|
||||
})
|
||||
|
||||
return {
|
||||
'total_config_properties': len(config_props),
|
||||
'total_schema_properties': len(self._schema_properties),
|
||||
'undefined_count': len(undefined),
|
||||
'undefined_properties': undefined
|
||||
}
|
||||
|
||||
def validate_file(self, config_path: str) -> Tuple[bool, List[Dict], Optional[Dict]]:
|
||||
"""Validate a single configuration file against the schema."""
|
||||
try:
|
||||
with open(config_path, 'r') as f:
|
||||
@@ -108,18 +389,26 @@ class SchemaValidator:
|
||||
'path': '$',
|
||||
'message': f'Invalid JSON: {e}',
|
||||
'validator': 'json_parse'
|
||||
}]
|
||||
}], None
|
||||
except FileNotFoundError:
|
||||
return False, [{
|
||||
'path': '$',
|
||||
'message': f'File not found: {config_path}',
|
||||
'validator': 'file_access'
|
||||
}]
|
||||
}], None
|
||||
|
||||
return self.validate_config(config)
|
||||
|
||||
def validate_config(self, config: Dict) -> Tuple[bool, List[Dict]]:
|
||||
"""Validate a configuration object against the schema."""
|
||||
def validate_config(self, config: Dict) -> Tuple[bool, List[Dict], Optional[Dict]]:
|
||||
"""
|
||||
Validate a configuration object against the schema.
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, errors, undefined_analysis)
|
||||
- is_valid: True if no schema validation errors
|
||||
- errors: List of schema validation errors
|
||||
- undefined_analysis: Dict with undefined property analysis (if check_undefined=True)
|
||||
"""
|
||||
errors = []
|
||||
|
||||
for error in sorted(self.validator.iter_errors(config), key=str):
|
||||
@@ -133,9 +422,14 @@ class SchemaValidator:
|
||||
'schema_path': '.'.join(str(p) for p in error.absolute_schema_path) if error.absolute_schema_path else '$'
|
||||
})
|
||||
|
||||
return len(errors) == 0, errors
|
||||
# Check for undefined properties if enabled
|
||||
undefined_analysis = None
|
||||
if self.check_undefined:
|
||||
undefined_analysis = self._check_undefined_properties(config)
|
||||
|
||||
def validate_directory(self, dir_path: str, pattern: str = "*.json") -> Dict[str, Tuple[bool, List[Dict]]]:
|
||||
return len(errors) == 0, errors, undefined_analysis
|
||||
|
||||
def validate_directory(self, dir_path: str, pattern: str = "*.json") -> Dict[str, Tuple[bool, List[Dict], Optional[Dict]]]:
|
||||
"""Validate all JSON files in a directory."""
|
||||
results = {}
|
||||
dir_path_obj = Path(dir_path)
|
||||
@@ -154,14 +448,16 @@ class SchemaValidator:
|
||||
return results
|
||||
|
||||
|
||||
def format_human_output(filename: str, is_valid: bool, errors: List[Dict]) -> str:
|
||||
def format_human_output(filename: str, is_valid: bool, errors: List[Dict],
|
||||
undefined_analysis: Optional[Dict] = None) -> str:
|
||||
"""Format validation results in human-readable format."""
|
||||
output = []
|
||||
|
||||
# Schema validation results
|
||||
if is_valid:
|
||||
output.append(f"✓ Valid: {filename}")
|
||||
output.append(f"✓ Schema Valid: {filename}")
|
||||
else:
|
||||
output.append(f"✗ Invalid: {filename}")
|
||||
output.append(f"✗ Schema Invalid: {filename}")
|
||||
output.append(f" Found {len(errors)} validation error(s):")
|
||||
|
||||
for i, error in enumerate(errors, 1):
|
||||
@@ -171,26 +467,69 @@ def format_human_output(filename: str, is_valid: bool, errors: List[Dict]) -> st
|
||||
if error.get('validator'):
|
||||
output.append(f" Validator: {error['validator']}")
|
||||
|
||||
# Undefined properties analysis (informational warnings, not errors)
|
||||
if undefined_analysis and undefined_analysis['undefined_count'] > 0:
|
||||
output.append(f"\nℹ️ Undefined Properties (informational):")
|
||||
output.append(f" Found {undefined_analysis['undefined_count']} property/properties not in schema")
|
||||
output.append(f" These may be:")
|
||||
output.append(f" • Typos/misspellings (won't be applied even though config is valid)")
|
||||
output.append(f" • Vendor-specific extensions (not portable, may change)")
|
||||
output.append(f" • Deprecated properties (check schema version)\n")
|
||||
|
||||
for i, item in enumerate(undefined_analysis['undefined_properties'], 1):
|
||||
output.append(f" {i}. {item['path']}")
|
||||
output.append(f" → Not defined in schema")
|
||||
|
||||
if item['suggestions']:
|
||||
output.append(f" → Possible matches:")
|
||||
for suggestion in item['suggestions']:
|
||||
confidence_icon = "✓" if suggestion['confidence'] == 'high' else "?"
|
||||
detail_msg = ""
|
||||
if suggestion['issue_type'] == 'separator_mismatch':
|
||||
if 'underscore_vs_dash' in suggestion['detail']:
|
||||
detail_msg = " (use '-' not '_')"
|
||||
elif 'camelCase_vs_dash' in suggestion['detail']:
|
||||
detail_msg = " (use dash-case not camelCase)"
|
||||
elif suggestion['issue_type'] == 'similar_spelling':
|
||||
detail_msg = f" (similar spelling)"
|
||||
|
||||
output.append(f" {confidence_icon} {suggestion['schema_property']}{detail_msg}")
|
||||
output.append("") # Blank line between items
|
||||
|
||||
elif undefined_analysis and undefined_analysis['undefined_count'] == 0:
|
||||
output.append(f"✓ All properties defined in schema")
|
||||
|
||||
return '\n'.join(output)
|
||||
|
||||
|
||||
def format_json_output(results: Dict[str, Tuple[bool, List[Dict]]]) -> str:
|
||||
def format_json_output(results: Dict[str, Tuple[bool, List[Dict], Optional[Dict]]]) -> str:
|
||||
"""Format validation results as JSON."""
|
||||
output = {
|
||||
'summary': {
|
||||
'total': len(results),
|
||||
'valid': sum(1 for is_valid, _ in results.values() if is_valid),
|
||||
'invalid': sum(1 for is_valid, _ in results.values() if not is_valid)
|
||||
'valid': sum(1 for is_valid, _, _ in results.values() if is_valid),
|
||||
'invalid': sum(1 for is_valid, _, _ in results.values() if not is_valid),
|
||||
'with_undefined_properties': sum(1 for _, _, undefined in results.values()
|
||||
if undefined and undefined['undefined_count'] > 0)
|
||||
},
|
||||
'results': {}
|
||||
}
|
||||
|
||||
for filename, (is_valid, errors) in results.items():
|
||||
output['results'][filename] = {
|
||||
'valid': is_valid,
|
||||
for filename, (is_valid, errors, undefined_analysis) in results.items():
|
||||
result_data = {
|
||||
'schema_valid': is_valid,
|
||||
'errors': errors
|
||||
}
|
||||
|
||||
if undefined_analysis:
|
||||
result_data['schema_compliance'] = {
|
||||
'total_config_properties': undefined_analysis['total_config_properties'],
|
||||
'undefined_count': undefined_analysis['undefined_count'],
|
||||
'undefined_properties': undefined_analysis['undefined_properties']
|
||||
}
|
||||
|
||||
output['results'][filename] = result_data
|
||||
|
||||
return json.dumps(output, indent=2)
|
||||
|
||||
|
||||
@@ -219,12 +558,26 @@ Examples:
|
||||
parser.add_argument('--pattern', '-p',
|
||||
default='*.json',
|
||||
help='File pattern for directory validation (default: *.json)')
|
||||
parser.add_argument('--check-undefined', '-u',
|
||||
action='store_true',
|
||||
help='Check for properties not defined in schema (informational, does not affect exit code)')
|
||||
parser.add_argument('--strict-schema',
|
||||
action='store_true',
|
||||
help='Treat undefined properties as errors (exit code 1). Use for CI/CD enforcement. (implies --check-undefined)')
|
||||
parser.add_argument('--similarity-threshold', '-t',
|
||||
type=int,
|
||||
default=3,
|
||||
help='Maximum edit distance for suggesting similar properties (default: 3)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# --strict-schema implies --check-undefined
|
||||
check_undefined = args.check_undefined or args.strict_schema
|
||||
|
||||
# Initialize validator
|
||||
try:
|
||||
validator = SchemaValidator(args.schema)
|
||||
validator = SchemaValidator(args.schema, check_undefined=check_undefined,
|
||||
similarity_threshold=args.similarity_threshold)
|
||||
except (FileNotFoundError, ValueError) as e:
|
||||
print(f"ERROR: {e}", file=sys.stderr)
|
||||
return 2
|
||||
@@ -240,8 +593,8 @@ Examples:
|
||||
results = {}
|
||||
|
||||
if path_obj.is_file():
|
||||
is_valid, errors = validator.validate_file(args.path)
|
||||
results[path_obj.name] = (is_valid, errors)
|
||||
is_valid, errors, undefined_analysis = validator.validate_file(args.path)
|
||||
results[path_obj.name] = (is_valid, errors, undefined_analysis)
|
||||
elif path_obj.is_dir():
|
||||
try:
|
||||
results = validator.validate_directory(args.path, args.pattern)
|
||||
@@ -256,19 +609,40 @@ Examples:
|
||||
if args.format == 'json':
|
||||
print(format_json_output(results))
|
||||
else:
|
||||
for filename, (is_valid, errors) in results.items():
|
||||
print(format_human_output(filename, is_valid, errors))
|
||||
for filename, (is_valid, errors, undefined_analysis) in results.items():
|
||||
print(format_human_output(filename, is_valid, errors, undefined_analysis))
|
||||
print() # Blank line between files
|
||||
|
||||
# Summary for multiple files
|
||||
if len(results) > 1:
|
||||
valid_count = sum(1 for is_valid, _ in results.values() if is_valid)
|
||||
valid_count = sum(1 for is_valid, _, _ in results.values() if is_valid)
|
||||
invalid_count = len(results) - valid_count
|
||||
print(f"Summary: {len(results)} file(s) checked, {valid_count} valid, {invalid_count} invalid")
|
||||
undefined_count = sum(1 for _, _, undefined in results.values()
|
||||
if undefined and undefined['undefined_count'] > 0)
|
||||
|
||||
# Exit code: 0 if all valid, 1 if any invalid
|
||||
all_valid = all(is_valid for is_valid, _ in results.values())
|
||||
return 0 if all_valid else 1
|
||||
print(f"Summary: {len(results)} file(s) checked, {valid_count} valid, {invalid_count} invalid")
|
||||
if check_undefined:
|
||||
print(f" {undefined_count} file(s) with undefined properties")
|
||||
|
||||
# Exit code logic
|
||||
# Only schema validation errors cause failure by default
|
||||
all_valid = all(is_valid for is_valid, _, _ in results.values())
|
||||
|
||||
# In strict mode, undefined properties also cause failure
|
||||
has_undefined = False
|
||||
if args.strict_schema:
|
||||
has_undefined = any(undefined and undefined['undefined_count'] > 0
|
||||
for _, _, undefined in results.values())
|
||||
|
||||
# Exit codes:
|
||||
# 0 = all valid (undefined properties don't affect this unless --strict-schema)
|
||||
# 1 = validation errors OR (strict mode AND undefined properties found)
|
||||
if not all_valid:
|
||||
return 1
|
||||
elif args.strict_schema and has_undefined:
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
Reference in New Issue
Block a user