Wifi 13859 (#961)

* Added hfcl_ion4xi in the sanity, performance, regression workflows and overview page

Signed-off-by: jitendracandela <jitendra.kushavah@candelatech.com>

* Removed hfcl_ion4xi from overview page

Signed-off-by: jitendracandela <jitendra.kushavah@candelatech.com>

---------

Signed-off-by: jitendracandela <jitendra.kushavah@candelatech.com>
This commit is contained in:
Jitendrakumar Kushavah
2024-07-01 17:12:47 +05:30
committed by GitHub
parent b8703913df
commit f1d88e5c21
3 changed files with 599 additions and 11 deletions

View File

@@ -22,7 +22,7 @@ on:
description: "revision of the Open Wifi Helm chart"
ap_models:
required: true
default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111"
default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111,hfcl_ion4xi"
description: "the AP models to test"
ap_version:
required: true
@@ -302,7 +302,7 @@ jobs:
test-hfcl-ion4xe:
needs: ["vars", "build"]
needs: ["vars", "build", "test-edgecore-eap101"]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xe')"
@@ -478,8 +478,185 @@ jobs:
run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owsec
test-hfcl-ion4xi:
needs: [ "vars", "build", "test-hfcl-ion4xe"]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xi')"
env:
AP_MODEL: hfcl_ion4xi
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="performance-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run tests dataplane_tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dataplane_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dtt
testbed: basic-3b
marker_expression: "performance and dataplane_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"'
allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-dataplane_tests"
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dtt --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dtt $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dtt
- name: run tests peak_throughput_tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'peak_throughput_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-ssdbt
testbed: basic-3b
marker_expression: "performance and peak_throughput_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"'
allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-peak_throughput_tests"
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-ssdbt --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-ssdbt $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-ssdbt
- name: run tests client_scale_tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'client_scale_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-wct
testbed: basic-3b
marker_expression: "performance and client_scale_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"'
allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-client_scale_tests"
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-wct --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-wct $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-wct
- name: run tests dual_band_tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dual_band_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-wct
testbed: basic-3b
marker_expression: "performance and dual_band_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"'
allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}-dual_band_tests"
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-wct --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-wct $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-wct
- name: show gw logs
if: failure()
run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-${{ needs.vars.outputs.existing_controller }} logs deployment/owsec
test-edgecore-eap101:
needs: ["vars", "build", "test-hfcl-ion4xe"]
needs: ["vars", "build"]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'edgecore_eap101')"
@@ -2078,7 +2255,7 @@ jobs:
report:
if: "!cancelled()"
runs-on: ubuntu-latest
needs: [vars, test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111]
needs: [vars, test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi]
strategy:
fail-fast: false
matrix:
@@ -2153,7 +2330,7 @@ jobs:
# Cleanup
cleanup:
needs: [test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111]
needs: [test-cig-wf188n, test-edgecore-oap101-6e, test-cig-wf196, test-edgecore-eap102, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-edgecore-eap104, test-yuncore-ax820, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi]
runs-on: ubuntu-latest
if: always()
steps:

View File

@@ -22,7 +22,7 @@ on:
description: "revision of the Open Wifi Helm chart"
ap_models:
required: true
default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111"
default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111,hfcl_ion4xi"
description: "the AP models to test"
ap_version:
required: true
@@ -752,6 +752,95 @@ jobs:
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-hfcl-ion4xi:
needs: [ "vars", "build" ]
runs-on: ubuntu-latest
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xi')"
env:
AP_MODEL: hfcl_ion4xi
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="testing-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run tests
uses: ./.github/actions/run-tests
with:
namespace: ${{ steps.namespace.outputs.name }}
testbed: basic-3b
marker_expression: "${{ needs.vars.outputs.marker_expression }}"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
additional_args: '-o firmware="${{ needs.vars.outputs.ap_version }}"'
allure_results_artifact_name: "allure-results-${{ steps.ap_model.outputs.model }}"
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }} --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }} $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-edgecore-eap101:
needs: ["vars", "build"]
@@ -1116,7 +1205,7 @@ jobs:
report:
if: "!cancelled()"
runs-on: ubuntu-latest
needs: [vars, test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111]
needs: [vars, test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi]
strategy:
fail-fast: false
matrix:
@@ -1167,7 +1256,7 @@ jobs:
# Cleanup
cleanup:
needs: [test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111]
needs: [test-cig-wf188n, test-cig-wf196, test-yuncore-fap655, test-yuncore-ax820, test-edgecore-eap104, test-edgecore-oap101-6e, test-hfcl-ion4xe, test-edgecore-eap101, test-edgecore-eap102, test-cig-wf186w, test-edgecore-eap111, test-hfcl-ion4xi]
runs-on: ubuntu-latest
if: always()
steps:

View File

@@ -22,7 +22,7 @@ on:
description: "revision of the Open Wifi Helm chart"
ap_models:
required: true
default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111"
default: "cig_wf188n,cig_wf196,hfcl_ion4xe,yuncore_fap655,yuncore_ax820,edgecore_oap101-6e,edgecore_eap102,edgecore_eap101,edgecore_eap104,cig_wf186w,edgecore_eap111,hfcl_ion4xi"
description: "the AP models to test"
ap_version:
required: true
@@ -447,6 +447,328 @@ jobs:
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-hfcl-ion4xi:
needs: [ "vars", "build", "test-hfcl-ion4xe" ]
runs-on: [ self-hosted, small ]
timeout-minutes: 1440
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.ap_models), 'hfcl_ion4xi')"
env:
AP_MODEL: hfcl_ion4xi
steps:
- name: Set AP model output
id: ap_model
run: |
echo "model=${AP_MODEL}" >> $GITHUB_OUTPUT
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.8"
# TODO WIFI-7839 delete when issue is resolved on AWS CLI side
- name: install kubectl
run: |
curl -s -LO "https://dl.k8s.io/release/v1.27.6/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: prepare namespace name
id: namespace
run: |
NAMESPACE="regression-${{ github.run_id }}-$(echo ${{ steps.ap_model.outputs.model }} | tr '[:upper:]' '[:lower:]' | tr '_' '-')"
echo "name=${NAMESPACE}" >> $GITHUB_OUTPUT
- name: prepare configuration
run: |
cat << EOF > lab_info.json
${{ secrets.LAB_INFO_JSON }}
EOF
- name: run dfs tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dfs_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dfs
testbed: basic-3b
marker_expression: "ow_regression_lf and dfs_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dfs_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dfs --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dfs $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dfs
- name: run multipsk tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_psk_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multipsk
testbed: basic-3b
marker_expression: "ow_regression_lf and multi_psk_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_psk_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multipsk --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multipsk $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multipsk
- name: run rate_limiting tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting
testbed: basic-3b
marker_expression: "ow_regression_lf and rate_limiting_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting
- name: run rate_limiting_with_radius tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'rate_limiting_with_radius_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-rate-limiting-radius
testbed: basic-3b
marker_expression: "ow_regression_lf and rate_limiting_with_radius_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-rate_limiting_with_radius_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-rate-limiting-radius $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-rate-limiting-radius
- name: run dynamic_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'dynamic_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-dynamic-vlan
testbed: basic-3b
marker_expression: "ow_regression_lf and dynamic_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-dynamic_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-dynamic-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-dynamic-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-dynamic-vlan
- name: run multi_vlan tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'multi_vlan_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-multi-vlan
testbed: basic-3b
marker_expression: "ow_regression_lf and multi_vlan_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-multi_vlan_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-multi-vlan --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-multi-vlan $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-multi-vlan
- name: run strict forwarding tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'strict_forwarding_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-strict-forwarding
testbed: basic-3b
marker_expression: "ow_regression_lf and strict_forwarding_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-strict_forwarding_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-strict-forwarding --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-strict-forwarding $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-strict-forwarding
- name: run captive portal tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'advanced_captive_portal_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-captive-portal
testbed: basic-3b
marker_expression: "ow_regression_lf and advanced_captive_portal_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-advanced_captive_portal_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-captive-portal --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-captive-portal $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-captive-portal
- name: run firmware upgrade & downgrade tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'firmware_upgrade_downgrade')"
with:
namespace: ${{ steps.namespace.outputs.name }}-fw-upgrade-downgrade
testbed: basic-3b
marker_expression: "ow_regression_lf and firmware_upgrade_downgrade"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-firmware_upgrade_downgrade
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-firmware-upgrade-downgrade --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-firmware-upgrade-downgrade $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-firmware-upgrade-downgrade
- name: run ap support bundle tests
uses: ./.github/actions/run-tests
if: "!cancelled() && contains(fromJSON(needs.vars.outputs.marker_expressions), 'asb_tests')"
with:
namespace: ${{ steps.namespace.outputs.name }}-asb
testbed: basic-3b
marker_expression: "ow_regression_lf and asb_tests"
configuration_file: "./lab_info.json"
testing_docker_image: tip-tip-wlan-cloud-docker-repo.jfrog.io/cloud-sdk-nightly:${{ github.run_id }}
allure_results_artifact_name: allure-results-${{ steps.ap_model.outputs.model }}-asb_tests
dns_records_to_resolve: "sec-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build gw-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build fms-${{ needs.vars.outputs.existing_controller }}.cicd.lab.wlan.tip.build"
# necessary because if conditionals in composite actions are currently not respected
- name: get tests logs
if: always()
continue-on-error: true
run: |
podname=$(kubectl get pods -n ${{ steps.namespace.outputs.name }}-asb-tests --no-headers -o custom-columns=":metadata.name" -l job-name=testing | sed "s/pod\///")
kubectl logs --timestamps -n ${{ steps.namespace.outputs.name }}-asb-tests $podname || true
- name: delete namespace
if: always()
continue-on-error: true
run: kubectl delete ns --ignore-not-found=true --wait ${{ steps.namespace.outputs.name }}-asb-tests
- name: show gw logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owgw
- name: show fms logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owfms
- name: show prov logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owprov
- name: show analytics logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owanalytics
- name: show subscription (userportal) logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsub
- name: show sec logs
if: failure()
run: kubectl -n openwifi-qa01 logs deployment/owsec
test-edgecore-eap101:
needs: ["vars", "build"]
@@ -3677,7 +3999,7 @@ jobs:
report:
if: "!cancelled()"
runs-on: ubuntu-latest
needs: [vars, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111]
needs: [vars, test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111, test-hfcl-ion4xi]
strategy:
fail-fast: false
matrix:
@@ -3730,7 +4052,7 @@ jobs:
# Cleanup
cleanup:
needs: [test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111]
needs: [test-hfcl-ion4xe, test-edgecore-eap101, test-yuncore-fap655, test-cig-wf188n, test-edgecore-eap102, test-edgecore-eap104, test-edgecore-oap101-6e, test-yuncore-ax820, test-cig-wf186w, test-cig-wf196, test-edgecore-eap111, test-hfcl-ion4xi]
runs-on: ubuntu-latest
if: always()
steps: