Files
wlan-testing/.github/workflows/manual.yml
2021-10-14 20:21:37 +05:30

203 lines
6.9 KiB
YAML

name: manual pytest execution
env:
# thirdparties
DOCKER_SERVER: tip-tip-wlan-cloud-docker-repo.jfrog.io
DOCKER_USER_NAME: wlan-testing-cicd
DOCKER_USER_PASSWORD: ${{ secrets.DOCKER_USER_PASSWORD }}
# AWS credentials
AWS_EKS_NAME: tip-wlan-main
AWS_DEFAULT_OUTPUT: json
AWS_DEFAULT_REGION: us-east-2
AWS_ACCOUNT_ID: ${{ secrets.AWS_ACCOUNT_ID }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_CLIENT_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_CLIENT_KEY }}
# Cloud SDK certs
CACERT: ${{ secrets.CACERT }}
CAKEY: ${{ secrets.CAKEY }}
ALLURE_CLI_VERSION: 2.14.0
on:
workflow_dispatch:
inputs:
testbed:
default: 'basic-01'
description: 'testbed to execute tests against'
required: false
firmware:
default: 'next-latest'
description: 'Target firmware version to be specified <branch>-<commit | latest>'
required: false
marker_expression:
default: ''
description: "marker expression that will be passed to pytest's -m"
required: false
additional_args:
default: ''
description: 'additional arguments that will be passed to the pytest execution'
required: false
defaults:
run:
shell: bash
jobs:
build:
runs-on: ubuntu-latest
steps:
# checkout needed repositories
- name: Checkout Testing repo
uses: actions/checkout@v2
with:
path: wlan-testing
- name: Checkout LANforge scripts
uses: actions/checkout@v2
with:
path: wlan-lanforge-scripts
repository: Telecominfraproject/wlan-lanforge-scripts
- name: import LANforge scripts
working-directory: wlan-testing
run: ./sync_repos.bash
# build and push docker image
- name: docker login
run: docker login ${{ env.DOCKER_SERVER }} -u ${{ env.DOCKER_USER_NAME }} -p ${{ env.DOCKER_USER_PASSWORD }}
- name: build docker image
working-directory: wlan-testing
run: docker build -t ${{ env.DOCKER_SERVER }}/cloud-sdk-nightly:${{ github.run_id }} -f docker/Dockerfile .
- name: push docker image
run: docker push ${{ env.DOCKER_SERVER }}/cloud-sdk-nightly:${{ github.run_id }}
test:
runs-on: [ self-hosted, small ]
needs: [ build ]
steps:
- name: install JRE
run: |
sudo apt-get update
sudo apt-get install -y default-jre
- name: install aws CLI tool
run: |
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
- name: get EKS access credentials
run: aws eks update-kubeconfig --name ${{ env.AWS_EKS_NAME }}
- name: install kubectl
run: |
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: install Allure CLI tool
run: |
wget https://repo.maven.apache.org/maven2/io/qameta/allure/allure-commandline/${{ env.ALLURE_CLI_VERSION }}/allure-commandline-${{ env.ALLURE_CLI_VERSION }}.tgz
tar -xzf allure-commandline-${{ env.ALLURE_CLI_VERSION }}.tgz
- name: set job name
id: job
run: echo "::set-output name=name::manual-${{ github.run_id }}"
- name: prepare namespace
id: namespace
run: |
NAMESPACE="testing-${{ github.run_id }}-${{ github.event.inputs.testbed }}"
kubectl create ns $NAMESPACE
kubectl config set-context --current --namespace=$NAMESPACE
echo "::set-output name=name::${NAMESPACE}"
- name: create configuration.py secret
run: |
cat << EOF > configuration.py
${{ secrets.LAB_CONFIGURATION }}
EOF
kubectl create secret generic configuration --from-file=configuration=./configuration.py
- name: run tests
run: |
cat <<EOF | kubectl apply -f -
apiVersion: batch/v1
kind: Job
metadata:
name: "${{ steps.job.outputs.name }}"
spec:
template:
spec:
containers:
- name: tests
image: ${{ env.DOCKER_SERVER }}/cloud-sdk-nightly:${{ github.run_id }}
command:
- /bin/bash
- -x
- -c
- |
cd tests
pytest -m "${{ github.event.inputs.marker_expression }}" -s -vvv --testbed="${{ github.event.inputs.testbed }}" -o firmware="${{ github.event.inputs.firmware }}" --alluredir=/tmp/allure-results ${{ github.event.inputs.additional_args }}
ret=\$?
# sleep some time to be able to download the Allure results
sleep 60
exit \$ret
volumeMounts:
- name: configuration
mountPath: "/wlan-testing/tests/configuration.py"
subPath: configuration
readOnly: true
imagePullSecrets:
- name: tip-docker-registry-key
restartPolicy: Never
volumes:
- name: configuration
secret:
secretName: configuration
backoffLimit: 0
EOF
# wait for pod to spawn
sleep 1
podname=$(kubectl get pods --no-headers -o custom-columns=":metadata.name" -l job-name="${{ steps.job.outputs.name }}" | sed "s/pod\///")
kubectl wait "pod/$podname" --for condition=ready --timeout=600s
kubectl logs -f $podname &
until [ -s test_everything.xml ]
do
sleep 10
kubectl cp $podname:/wlan-testing/tests/test_everything.xml test_everything.xml >/dev/null 2>&1
done
echo "tests completed"
echo "downloading allure results..."
kubectl cp $podname:/tmp/allure-results allure-results >/dev/null 2>&1
echo "waiting for pod to exit"
kubectl logs -f $podname >/dev/null 2>&1
exit $(kubectl get pod $podname --output="jsonpath={.status.containerStatuses[].state.terminated.exitCode}")
- name: upload Allure results as artifact
if: always()
uses: actions/upload-artifact@v2
with:
name: allure-results
path: allure-results
- name: generate Allure report
run: allure-${{ env.ALLURE_CLI_VERSION }}/bin/allure generate allure-results
- name: upload Allure report as artifact
uses: actions/upload-artifact@v2
with:
name: allure-report
path: allure-report
- name: cleanup
if: always()
run: |
kubectl delete ns "${{ steps.namespace.outputs.name }}" --wait=true
cleanup:
needs: [ test ]
runs-on: ubuntu-latest
if: always()
steps:
- name: cleanup Docker image
run: curl -u${{ env.DOCKER_USER_NAME }}:${{ env.DOCKER_USER_PASSWORD }} -X DELETE "https://tip.jfrog.io/artifactory/tip-wlan-cloud-docker-repo/cloud-sdk-nightly/${{ github.run_id }}"