From d4df9e8a3af3016b41f37ba4b96334433dee302a Mon Sep 17 00:00:00 2001 From: Ryan Cragun Date: Wed, 27 Sep 2023 10:53:12 -0600 Subject: [PATCH] Backport [QT-602] Run `proxy` and `agent` test scenarios (#23176) into release/1.14.x (#23302) * [QT-602] Run `proxy` and `agent` test scenarios (#23176) Update our `proxy` and `agent` scenarios to support new variants and perform baseline verification and their scenario specific verification. We integrate these updated scenarios into the pipeline by adding them to artifact samples. We've also improved the reliability of the `autopilot` and `replication` scenarios by refactoring our IP address gathering. Previously, we'd ask vault for the primary IP address and use some Terraform logic to determine followers. The leader IP address gathering script was also implicitly responsible for ensuring that a found leader was within a given group of hosts, and thus waiting for a given cluster to have a leader, and also for doing some arithmetic and outputting `replication` specific output data. We've broken these responsibilities into individual modules, improved their error messages, and fixed various races and bugs, including: * Fix a race between creating the file audit device and installing and starting vault in the `replication` scenario. * Fix how we determine our leader and follower IP addresses. We now query vault instead of a prior implementation that inferred the followers and sometimes did not allow all nodes to be an expected leader. * Fix a bug where we'd always always fail on the first wrong condition in the `vault_verify_performance_replication` module. We also performed some maintenance tasks on Enos scenarios byupdating our references from `oss` to `ce` to handle the naming and license changes. We also enabled `shellcheck` linting for enos module scripts. * Rename `oss` to `ce` for license and naming changes. * Convert template enos scripts to scripts that take environment variables. * Add `shellcheck` linting for enos module scripts. * Add additional `backend` and `seal` support to `proxy` and `agent` scenarios. * Update scenarios to include all baseline verification. * Add `proxy` and `agent` scenarios to artifact samples. * Remove IP address verification from the `vault_get_cluster_ips` modules and implement a new `vault_wait_for_leader` module. * Determine follower IP addresses by querying vault in the `vault_get_cluster_ips` module. * Move replication specific behavior out of the `vault_get_cluster_ips` module and into it's own `replication_data` module. * Extend initial version support for the `upgrade` and `autopilot` scenarios. We also discovered an issue with undo_logs that has been described in the VAULT-20259. As such, we've disabled the undo_logs check until it has been fixed. * actions: fix actionlint error and linting logic (#23305) Signed-off-by: Ryan Cragun --- ...build-vault-oss.yml => build-vault-ce.yml} | 0 .github/workflows/build.yml | 24 +- .github/workflows/enos-lint.yml | 30 +- .../workflows/enos-release-testing-oss.yml | 12 +- .github/workflows/enos-run-k8s.yml | 4 +- .github/workflows/test-enos-scenario-ui.yml | 2 +- enos/Makefile | 10 +- enos/enos-globals.hcl | 6 +- enos/enos-modules.hcl | 42 ++- enos/enos-samples-ce-build.hcl | 294 ++++++++++++++++++ enos/enos-samples-ce-release.hcl | 294 ++++++++++++++++++ enos/enos-samples-oss-build.hcl | 142 --------- enos/enos-samples-oss-release.hcl | 142 --------- enos/enos-scenario-agent.hcl | 272 ++++++++++++++-- enos/enos-scenario-autopilot.hcl | 121 ++++++- enos/enos-scenario-proxy.hcl | 269 ++++++++++++++-- enos/enos-scenario-replication.hcl | 146 +++++++-- enos/enos-scenario-smoke.hcl | 63 ++-- enos/enos-scenario-ui.hcl | 34 +- enos/enos-scenario-upgrade.hcl | 136 +++++--- enos/enos-terraform.hcl | 2 +- enos/enos-variables.hcl | 14 +- enos/enos.vars.hcl | 22 +- enos/k8s/enos-scenario-k8s.hcl | 10 +- enos/modules/backend_consul/main.tf | 2 +- enos/modules/backend_consul/variables.tf | 2 +- .../get_local_metadata/scripts/build_date.sh | 2 +- .../get_local_metadata/scripts/version.sh | 2 +- enos/modules/k8s_deploy_vault/raft-config.hcl | 8 - .../scripts/smoke-verify-replication.sh | 12 +- .../scripts/smoke-verify-ui.sh | 4 +- enos/modules/k8s_vault_verify_version/main.tf | 8 +- .../scripts/smoke-verify-version.sh | 33 +- enos/modules/replication_data/main.tf | 104 +++++++ enos/modules/vault_agent/main.tf | 14 +- .../set-up-approle-and-agent.sh | 28 +- .../vault_artifactory_artifact/locals.tf | 6 +- .../vault_artifactory_artifact/main.tf | 4 +- enos/modules/vault_cluster/main.tf | 128 ++++---- .../install-packages.sh | 14 +- .../vault-write-license.sh | 9 +- enos/modules/vault_cluster/variables.tf | 2 +- enos/modules/vault_get_cluster_ips/main.tf | 141 ++++----- .../scripts/get-follower-private-ips.sh | 53 ++++ .../scripts/get-leader-private-ip.sh | 47 +-- enos/modules/vault_proxy/main.tf | 26 +- .../set-up-approle-and-proxy.sh | 18 +- .../{templates => scripts}/use-proxy.sh | 6 +- enos/modules/vault_raft_remove_peer/main.tf | 12 +- .../raft-remove-peer.sh | 10 +- enos/modules/vault_setup_perf_primary/main.tf | 2 +- .../scripts/configure-vault-pr-primary.sh | 2 +- .../vault_unseal_nodes/scripts/unseal-node.sh | 15 +- .../scripts/wait-until-sealed.sh | 4 +- enos/modules/vault_upgrade/main.tf | 24 +- .../get-follower-public-ips.sh | 6 +- .../get-leader-public-ip.sh | 7 +- .../{templates => scripts}/restart-vault.sh | 0 .../modules/vault_verify_agent_output/main.tf | 12 +- .../scripts/verify-vault-agent-output.sh | 16 + .../templates/verify-vault-agent-output.sh | 16 - enos/modules/vault_verify_autopilot/main.tf | 14 +- .../scripts/smoke-verify-autopilot.sh | 43 +++ .../templates/smoke-verify-autopilot.sh | 37 --- .../scripts/verify-replication-status.sh | 64 ++-- .../vault_verify_raft_auto_join_voter/main.tf | 14 +- .../verify-raft-auto-join-voter.sh | 8 +- .../scripts/verify-data.sh | 13 +- enos/modules/vault_verify_replication/main.tf | 8 +- .../smoke-verify-replication.sh | 12 +- enos/modules/vault_verify_ui/main.tf | 8 +- .../scripts/smoke-verify-ui.sh | 21 ++ .../templates/smoke-verify-ui.sh | 17 - enos/modules/vault_verify_ui/variables.tf | 7 +- enos/modules/vault_verify_undo_logs/main.tf | 5 +- .../scripts/smoke-verify-undo-logs.sh | 42 +-- enos/modules/vault_verify_unsealed/main.tf | 11 +- .../verify-vault-node-unsealed.sh | 10 +- enos/modules/vault_verify_version/main.tf | 18 +- .../verify-cluster-version.sh | 19 +- .../scripts/smoke-enable-secrets-kv.sh | 20 +- .../scripts/smoke-write-test-data.sh | 18 +- enos/modules/vault_wait_for_leader/main.tf | 68 ++++ .../scripts/wait-for-leader.sh | 53 ++++ 84 files changed, 2392 insertions(+), 1028 deletions(-) rename .github/workflows/{build-vault-oss.yml => build-vault-ce.yml} (100%) create mode 100644 enos/enos-samples-ce-build.hcl create mode 100644 enos/enos-samples-ce-release.hcl delete mode 100644 enos/enos-samples-oss-build.hcl delete mode 100644 enos/enos-samples-oss-release.hcl create mode 100644 enos/modules/replication_data/main.tf rename enos/modules/vault_agent/{templates => scripts}/set-up-approle-and-agent.sh (70%) rename enos/modules/vault_cluster/{templates => scripts}/install-packages.sh (76%) rename enos/modules/vault_cluster/{templates => scripts}/vault-write-license.sh (76%) create mode 100644 enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh rename enos/modules/vault_proxy/{templates => scripts}/set-up-approle-and-proxy.sh (78%) rename enos/modules/vault_proxy/{templates => scripts}/use-proxy.sh (86%) rename enos/modules/vault_raft_remove_peer/{templates => scripts}/raft-remove-peer.sh (66%) rename enos/modules/vault_upgrade/{templates => scripts}/get-follower-public-ips.sh (78%) rename enos/modules/vault_upgrade/{templates => scripts}/get-leader-public-ip.sh (74%) rename enos/modules/vault_upgrade/{templates => scripts}/restart-vault.sh (100%) create mode 100644 enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh delete mode 100644 enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh create mode 100755 enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh delete mode 100755 enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh rename enos/modules/vault_verify_raft_auto_join_voter/{templates => scripts}/verify-raft-auto-join-voter.sh (62%) rename enos/modules/vault_verify_replication/{templates => scripts}/smoke-verify-replication.sh (82%) create mode 100644 enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh delete mode 100644 enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh rename enos/modules/vault_verify_unsealed/{templates => scripts}/verify-vault-node-unsealed.sh (68%) rename enos/modules/vault_verify_version/{templates => scripts}/verify-cluster-version.sh (79%) create mode 100644 enos/modules/vault_wait_for_leader/main.tf create mode 100644 enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh diff --git a/.github/workflows/build-vault-oss.yml b/.github/workflows/build-vault-ce.yml similarity index 100% rename from .github/workflows/build-vault-oss.yml rename to .github/workflows/build-vault-ce.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 497e562b35..30ce6357e9 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -118,7 +118,7 @@ jobs: - goos: windows goarch: arm fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml + uses: ./.github/workflows/build-vault-ce.yml with: create-packages: false goarch: ${{ matrix.goarch }} @@ -139,7 +139,7 @@ jobs: goos: [linux] goarch: [arm, arm64, 386, amd64] fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml + uses: ./.github/workflows/build-vault-ce.yml with: goarch: ${{ matrix.goarch }} goos: ${{ matrix.goos }} @@ -159,7 +159,7 @@ jobs: goos: [darwin] goarch: [amd64, arm64] fail-fast: true - uses: ./.github/workflows/build-vault-oss.yml + uses: ./.github/workflows/build-vault-ce.yml with: create-packages: false goarch: ${{ matrix.goarch }} @@ -236,17 +236,17 @@ jobs: fail-fast: false matrix: include: - - sample-name: build_oss_linux_amd64_deb + - sample-name: build_ce_linux_amd64_deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb - - sample-name: build_oss_linux_arm64_deb + - sample-name: build_ce_linux_arm64_deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb - - sample-name: build_oss_linux_amd64_rpm + - sample-name: build_ce_linux_amd64_rpm build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm - - sample-name: build_oss_linux_arm64_rpm + - sample-name: build_ce_linux_arm64_rpm build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm - - sample-name: build_oss_linux_amd64_zip + - sample-name: build_ce_linux_amd64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - sample-name: build_oss_linux_arm64_zip + - sample-name: build_ce_linux_arm64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip with: build-artifact-name: ${{ matrix.build-artifact-name }} @@ -325,8 +325,8 @@ jobs: steps: - run: | tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)' - - notify-completed-successfully-failures-oss: + + notify-completed-successfully-failures-ce: if: ${{ always() && github.repository == 'hashicorp/vault' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }} runs-on: ubuntu-latest permissions: @@ -346,7 +346,7 @@ jobs: with: channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official payload: | - {"text":"OSS build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: OSS build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]} + {"text":"CE build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: CE build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]} notify-completed-successfully-failures-ent: if: ${{ always() && github.repository == 'hashicorp/vault-enterprise' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }} diff --git a/.github/workflows/enos-lint.yml b/.github/workflows/enos-lint.yml index 0244fac4f5..1a562baacf 100644 --- a/.github/workflows/enos-lint.yml +++ b/.github/workflows/enos-lint.yml @@ -7,21 +7,37 @@ on: - enos/** jobs: - lint: + metadata: # Only run this workflow on pull requests from hashicorp/vault branches # as we need secrets to install enos. if: "! github.event.pull_request.head.repo.fork" + name: metadata runs-on: ubuntu-latest + outputs: + runs-on: ${{ steps.metadata.outputs.runs-on }} + version: ${{ steps.metadata.outputs.version }} + steps: + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 + - id: set-product-version + uses: hashicorp/actions-set-product-version@v1 + - id: metadata + run: | + echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT" + github_repository="${{ github.repository }}" + if [ "${github_repository##*/}" == "vault-enterprise" ] ; then + echo 'runs-on=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT" + else + echo 'runs-on="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT" + fi + + lint: + needs: metadata + runs-on: ${{ fromJSON(needs.metadata.outputs.runs-on) }} env: GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} steps: - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - - name: Set Product version - id: set-product-version - uses: hashicorp/actions-set-product-version@v1 - - id: get-version - run: echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT" - uses: hashicorp/setup-terraform@v2 with: terraform_wrapper: false @@ -31,5 +47,5 @@ jobs: - name: lint working-directory: ./enos env: - ENOS_VAR_vault_product_version: ${{ steps.get-version.outputs.version }} + ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }} run: make lint diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml index 10c12d7845..27558ee16c 100644 --- a/.github/workflows/enos-release-testing-oss.yml +++ b/.github/workflows/enos-release-testing-oss.yml @@ -43,17 +43,17 @@ jobs: fail-fast: false matrix: include: - - sample-name: release_oss_linux_amd64_deb + - sample-name: release_ce_linux_amd64_deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb - - sample-name: release_oss_linux_arm64_deb + - sample-name: release_ce_linux_arm64_deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb - - sample-name: release_oss_linux_amd64_rpm + - sample-name: release_ce_linux_amd64_rpm build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm - - sample-name: release_oss_linux_arm64_rpm + - sample-name: release_ce_linux_arm64_rpm build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm - - sample-name: release_oss_linux_amd64_zip + - sample-name: release_ce_linux_amd64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip - - sample-name: release_oss_linux_arm64_zip + - sample-name: release_ce_linux_arm64_zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip with: build-artifact-name: ${{ matrix.build-artifact-name }} diff --git a/.github/workflows/enos-run-k8s.yml b/.github/workflows/enos-run-k8s.yml index d1b447da97..adddba47d3 100644 --- a/.github/workflows/enos-run-k8s.yml +++ b/.github/workflows/enos-run-k8s.yml @@ -60,8 +60,8 @@ jobs: echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV" echo "image repo set to 'hashicorp/vault-enterprise'" else - echo "edition=oss" >> "$GITHUB_ENV" - echo "edition set to 'oss'" + echo "edition=ce" >> "$GITHUB_ENV" + echo "edition set to 'ce'" echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV" echo "image repo set to 'hashicorp/vault'" fi diff --git a/.github/workflows/test-enos-scenario-ui.yml b/.github/workflows/test-enos-scenario-ui.yml index 3dc3270b31..fc348ce605 100644 --- a/.github/workflows/test-enos-scenario-ui.yml +++ b/.github/workflows/test-enos-scenario-ui.yml @@ -91,7 +91,7 @@ jobs: echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem chmod 600 ./enos/support/private_key.pem - name: Set Up Vault Enterprise License - if: contains(${{ github.event.repository.name }}, 'ent') + if: contains(github.event.repository.name, 'ent') run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - name: Check Chrome Installed id: chrome-check diff --git a/enos/Makefile b/enos/Makefile index 8155bfcae4..d1f933453d 100644 --- a/enos/Makefile +++ b/enos/Makefile @@ -1,5 +1,5 @@ .PHONY: default -default: check-fmt +default: check-fmt shellcheck .PHONY: check-fmt check-fmt: check-fmt-enos check-fmt-modules @@ -25,7 +25,11 @@ fmt-modules: .PHONY: validate-enos validate-enos: - enos scenario validate + enos scenario validate --timeout 30m0s .PHONY: lint -lint: check-fmt validate-enos +lint: check-fmt shellcheck validate-enos + +.PHONY: shellcheck +shellcheck: + find ./modules/ -type f -name '*.sh' | xargs shellcheck diff --git a/enos/enos-globals.hcl b/enos/enos-globals.hcl index 93268ce45d..2f89487d1a 100644 --- a/enos/enos-globals.hcl +++ b/enos/enos-globals.hcl @@ -4,7 +4,7 @@ globals { backend_tag_key = "VaultStorage" build_tags = { - "oss" = ["ui"] + "ce" = ["ui"] "ent" = ["ui", "enterprise", "ent"] "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] @@ -20,9 +20,7 @@ globals { rhel = ["nc"] } sample_attributes = { - # aws_region = ["us-east-1", "us-west-2"] - # NOTE(9/18/23): use more expensive regions temporarily until AWS network outage is resolved. - aws_region = ["us-east-2", "us-west-1"] + aws_region = ["us-east-1", "us-west-2"] } tags = merge({ "Project Name" : var.project_name diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index d244d2c22c..4a7552190f 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -53,6 +53,10 @@ module "read_license" { source = "./modules/read_license" } +module "replication_data" { + source = "./modules/replication_data" +} + module "shutdown_node" { source = "./modules/shutdown_node" } @@ -128,9 +132,27 @@ module "vault_cluster" { module "vault_get_cluster_ips" { source = "./modules/vault_get_cluster_ips" + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_raft_remove_peer" { + source = "./modules/vault_raft_remove_peer" vault_install_dir = var.vault_install_dir } +module "vault_setup_perf_secondary" { + source = "./modules/vault_setup_perf_secondary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_test_ui" { + source = "./modules/vault_test_ui" + + ui_run_tests = var.ui_run_tests +} + module "vault_unseal_nodes" { source = "./modules/vault_unseal_nodes" @@ -145,6 +167,7 @@ module "vault_upgrade" { vault_instance_count = var.vault_instance_count } + module "vault_verify_autopilot" { source = "./modules/vault_verify_autopilot" @@ -177,7 +200,6 @@ module "vault_verify_replication" { module "vault_verify_ui" { source = "./modules/vault_verify_ui" - vault_install_dir = var.vault_install_dir vault_instance_count = var.vault_instance_count } @@ -194,12 +216,6 @@ module "vault_setup_perf_primary" { vault_install_dir = var.vault_install_dir } -module "vault_setup_perf_secondary" { - source = "./modules/vault_setup_perf_secondary" - - vault_install_dir = var.vault_install_dir -} - module "vault_verify_read_data" { source = "./modules/vault_verify_read_data" @@ -227,13 +243,9 @@ module "vault_verify_write_data" { vault_instance_count = var.vault_instance_count } -module "vault_raft_remove_peer" { - source = "./modules/vault_raft_remove_peer" - vault_install_dir = var.vault_install_dir -} +module "vault_wait_for_leader" { + source = "./modules/vault_wait_for_leader" -module "vault_test_ui" { - source = "./modules/vault_test_ui" - - ui_run_tests = var.ui_run_tests + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count } diff --git a/enos/enos-samples-ce-build.hcl b/enos/enos-samples-ce-build.hcl new file mode 100644 index 0000000000..2df3a23fb9 --- /dev/null +++ b/enos/enos-samples-ce-build.hcl @@ -0,0 +1,294 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +sample "build_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["crt"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["crt"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "build_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["crt"] + artifact_type = ["bundle"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the build pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} diff --git a/enos/enos-samples-ce-release.hcl b/enos/enos-samples-ce-release.hcl new file mode 100644 index 0000000000..6d341e174d --- /dev/null +++ b/enos/enos-samples-ce-release.hcl @@ -0,0 +1,294 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +sample "release_ce_linux_amd64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the release pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "release_ce_linux_arm64_deb" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["ubuntu"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the release pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "release_ce_linux_arm64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the release pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "release_ce_linux_amd64_rpm" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_source = ["artifactory"] + artifact_type = ["package"] + distro = ["rhel"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the release pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "release_ce_linux_amd64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["amd64"] + artifact_type = ["bundle"] + artifact_source = ["artifactory"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the release pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} + +sample "release_ce_linux_arm64_zip" { + attributes = global.sample_attributes + + subset "agent" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "smoke" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "proxy" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + } + } + + subset "upgrade" { + matrix { + arch = ["arm64"] + artifact_source = ["artifactory"] + artifact_type = ["bundle"] + edition = ["ce"] + + exclude { + // Don't test from these versions in the release pipeline because of known issues + // in those older versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11"] + } + } + } +} diff --git a/enos/enos-samples-oss-build.hcl b/enos/enos-samples-oss-build.hcl deleted file mode 100644 index 3c39901a62..0000000000 --- a/enos/enos-samples-oss-build.hcl +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -sample "build_oss_linux_amd64_deb" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["amd64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["amd64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } -} - -sample "build_oss_linux_arm64_deb" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["arm64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["arm64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } -} - -sample "build_oss_linux_arm64_rpm" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["arm64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["arm64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } -} - -sample "build_oss_linux_amd64_rpm" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["amd64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["amd64"] - artifact_source = ["crt"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } -} - -sample "build_oss_linux_amd64_zip" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["amd64"] - artifact_type = ["bundle"] - artifact_source = ["crt"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["amd64"] - artifact_type = ["bundle"] - artifact_source = ["crt"] - edition = ["oss"] - } - } -} - -sample "build_oss_linux_arm64_zip" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["arm64"] - artifact_source = ["crt"] - artifact_type = ["bundle"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["arm64"] - artifact_source = ["crt"] - artifact_type = ["bundle"] - edition = ["oss"] - } - } -} diff --git a/enos/enos-samples-oss-release.hcl b/enos/enos-samples-oss-release.hcl deleted file mode 100644 index 80eaaa042a..0000000000 --- a/enos/enos-samples-oss-release.hcl +++ /dev/null @@ -1,142 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -sample "release_oss_linux_amd64_deb" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["amd64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["amd64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } -} - -sample "release_oss_linux_arm64_deb" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["arm64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["arm64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["ubuntu"] - edition = ["oss"] - } - } -} - -sample "release_oss_linux_arm64_rpm" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["arm64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["arm64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } -} - -sample "release_oss_linux_amd64_rpm" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["amd64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["amd64"] - artifact_source = ["artifactory"] - artifact_type = ["package"] - distro = ["rhel"] - edition = ["oss"] - } - } -} - -sample "release_oss_linux_amd64_zip" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["amd64"] - artifact_type = ["bundle"] - artifact_source = ["artifactory"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["amd64"] - artifact_type = ["bundle"] - artifact_source = ["artifactory"] - edition = ["oss"] - } - } -} - -sample "release_oss_linux_arm64_zip" { - attributes = global.sample_attributes - - subset "smoke" { - matrix { - arch = ["arm64"] - artifact_source = ["artifactory"] - artifact_type = ["bundle"] - edition = ["oss"] - } - } - - subset "upgrade" { - matrix { - arch = ["arm64"] - artifact_source = ["artifactory"] - artifact_type = ["bundle"] - edition = ["oss"] - } - } -} diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl index 04319036da..5a613bf602 100644 --- a/enos/enos-scenario-agent.hcl +++ b/enos/enos-scenario-agent.hcl @@ -5,8 +5,12 @@ scenario "agent" { matrix { arch = ["amd64", "arm64"] artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + backend = ["consul", "raft"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + seal = ["awskms", "shamir"] # Our local builder always creates bundles exclude { @@ -30,12 +34,18 @@ scenario "agent" { ] locals { - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } - install_artifactory_artifact = local.bundle_path == null + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] + } + + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata } step "build_vault" { @@ -43,7 +53,7 @@ scenario "agent" { variables { build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] - bundle_path = local.bundle_path + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -52,7 +62,7 @@ scenario "agent" { artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null arch = matrix.artifact_source == "artifactory" ? matrix.arch : null product_version = var.vault_product_version - artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null + artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null revision = var.vault_revision @@ -71,8 +81,19 @@ scenario "agent" { } } - step "read_license" { - skip_step = matrix.edition == "oss" + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -97,9 +118,49 @@ scenario "agent" { } } + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + step "create_vault_cluster" { module = module.vault_cluster depends_on = [ + step.create_backend_cluster, step.build_vault, step.create_vault_cluster_targets ] @@ -109,17 +170,42 @@ scenario "agent" { } variables { - artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_name = step.create_vault_cluster_targets.cluster_name - enable_audit_devices = var.vault_enable_audit_devices - install_dir = var.vault_install_dir - license = matrix.edition != "oss" ? step.read_license.license : null - local_artifact_path = local.bundle_path - packages = concat(global.packages, global.distro_packages[matrix.distro]) - storage_backend = "raft" - target_hosts = step.create_vault_cluster_targets.hosts - unseal_method = "shamir" + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = matrix.seal + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } @@ -128,6 +214,7 @@ scenario "agent" { depends_on = [ step.build_vault, step.create_vault_cluster, + step.wait_for_leader, ] providers = { @@ -135,6 +222,7 @@ scenario "agent" { } variables { + vault_install_dir = local.vault_install_dir vault_instances = step.create_vault_cluster_targets.hosts vault_root_token = step.create_vault_cluster.root_token vault_agent_template_destination = "/tmp/agent_output.txt" @@ -147,6 +235,7 @@ scenario "agent" { depends_on = [ step.create_vault_cluster, step.start_vault_agent, + step.wait_for_leader, ] providers = { @@ -160,7 +249,147 @@ scenario "agent" { } } - output "awkms_unseal_key_arn" { + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "awskms_unseal_key_arn" { description = "The Vault cluster KMS key arn" value = step.create_vpc.kms_key_arn } @@ -214,9 +443,4 @@ scenario "agent" { description = "The Vault cluster unseal keys hex" value = step.create_vault_cluster.unseal_keys_hex } - - output "vault_audit_device_file_path" { - description = "The file path for the file audit device, if enabled" - value = step.create_vault_cluster.audit_device_file_path - } } diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index fa1960ac06..ffbbdcba15 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -7,7 +7,10 @@ scenario "autopilot" { artifact_source = ["local", "crt", "artifactory"] artifact_type = ["bundle", "package"] distro = ["ubuntu", "rhel"] - edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + // NOTE: when backporting, make sure that our initial versions are less than that + // release branch's version. + initial_version = ["1.11.12", "1.12.11", "1.13.6", "1.14.2"] seal = ["awskms", "shamir"] # Our local builder always creates bundles @@ -114,12 +117,15 @@ scenario "autopilot" { awskms_unseal_key_arn = step.create_vpc.kms_key_arn cluster_name = step.create_vault_cluster_targets.cluster_name install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_license.license : null + license = matrix.edition != "ce" ? step.read_license.license : null packages = concat(global.packages, global.distro_packages[matrix.distro]) - release = var.vault_autopilot_initial_release - storage_backend = "raft" + release = { + edition = matrix.edition + version = matrix.initial_version + } + storage_backend = "raft" storage_backend_addl_config = { - autopilot_upgrade_version = var.vault_autopilot_initial_release.version + autopilot_upgrade_version = matrix.initial_version } target_hosts = step.create_vault_cluster_targets.hosts unseal_method = matrix.seal @@ -141,7 +147,7 @@ scenario "autopilot" { } variables { - vault_instances = step.create_vault_cluster.target_hosts + vault_hosts = step.create_vault_cluster.target_hosts vault_install_dir = local.vault_install_dir vault_root_token = step.create_vault_cluster.root_token } @@ -213,7 +219,7 @@ scenario "autopilot" { force_unseal = matrix.seal == "shamir" initialize_cluster = false install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_license.license : null + license = matrix.edition != "ce" ? step.read_license.license : null local_artifact_path = local.artifact_path manage_service = local.manage_service packages = concat(global.packages, global.distro_packages[matrix.distro]) @@ -285,8 +291,8 @@ scenario "autopilot" { } } - step "get_updated_vault_cluster_ips" { - module = module.vault_get_cluster_ips + step "wait_for_leader_in_upgrade_targets" { + module = module.vault_wait_for_leader depends_on = [ step.create_vault_cluster, step.create_vault_cluster_upgrade_targets, @@ -299,11 +305,30 @@ scenario "autopilot" { } variables { - vault_instances = step.create_vault_cluster.target_hosts - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.root_token - node_public_ip = step.get_vault_cluster_ips.leader_public_ip - added_vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, + step.get_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot, + step.wait_for_leader_in_upgrade_targets, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } @@ -388,9 +413,73 @@ scenario "autopilot" { } } + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_vault_cluster_upgrade_targets, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter, + step.remove_old_nodes + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts + } + } + step "verify_undo_logs_status" { - skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") - module = module.vault_verify_undo_logs + skip_step = true + # NOTE: temporarily disable undo logs checking until it is fixed. See VAULT-20259 + # skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") + module = module.vault_verify_undo_logs depends_on = [ step.create_vault_cluster_upgrade_targets, step.remove_old_nodes, diff --git a/enos/enos-scenario-proxy.hcl b/enos/enos-scenario-proxy.hcl index d2a7074ae6..3cf662ffc1 100644 --- a/enos/enos-scenario-proxy.hcl +++ b/enos/enos-scenario-proxy.hcl @@ -5,8 +5,24 @@ scenario "proxy" { matrix { arch = ["amd64", "arm64"] artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + backend = ["consul", "raft"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + seal = ["awskms", "shamir"] + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } + + # HSM and FIPS 140-2 are only supported on amd64 + exclude { + arch = ["arm64"] + edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + } } terraform_cli = terraform_cli.default @@ -18,11 +34,13 @@ scenario "proxy" { ] locals { - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null + artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } + manage_service = matrix.artifact_type == "bundle" + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } step "get_local_metadata" { @@ -35,7 +53,7 @@ scenario "proxy" { variables { build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] - bundle_path = local.bundle_path + artifact_path = local.artifact_path goarch = matrix.arch goos = "linux" artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null @@ -44,7 +62,7 @@ scenario "proxy" { artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null arch = matrix.artifact_source == "artifactory" ? matrix.arch : null product_version = var.vault_product_version - artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null + artifact_type = matrix.artifact_type distro = matrix.artifact_source == "artifactory" ? matrix.distro : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null revision = var.vault_revision @@ -63,8 +81,19 @@ scenario "proxy" { } } - step "read_license" { - skip_step = matrix.edition == "oss" + // This step reads the contents of the backend license if we're using a Consul backend and + // the edition is "ent". + step "read_backend_license" { + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" + module = module.read_license + + variables { + file_name = global.backend_license_path + } + } + + step "read_vault_license" { + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -89,9 +118,49 @@ scenario "proxy" { } } + step "create_vault_cluster_backend_targets" { + module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_tag_key = global.backend_tag_key + common_tags = global.tags + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_backend_cluster" { + module = "backend_${matrix.backend}" + depends_on = [ + step.create_vault_cluster_backend_targets + ] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + cluster_name = step.create_vault_cluster_backend_targets.cluster_name + cluster_tag_key = global.backend_tag_key + license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + release = { + edition = var.backend_edition + version = matrix.consul_version + } + target_hosts = step.create_vault_cluster_backend_targets.hosts + } + } + step "create_vault_cluster" { module = module.vault_cluster depends_on = [ + step.create_backend_cluster, step.build_vault, step.create_vault_cluster_targets ] @@ -101,17 +170,42 @@ scenario "proxy" { } variables { - artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - awskms_unseal_key_arn = step.create_vpc.kms_key_arn - cluster_name = step.create_vault_cluster_targets.cluster_name - enable_audit_devices = var.vault_enable_audit_devices - install_dir = var.vault_install_dir - license = matrix.edition != "oss" ? step.read_license.license : null - local_artifact_path = local.bundle_path - packages = concat(global.packages, global.distro_packages[matrix.distro]) - storage_backend = "raft" - target_hosts = step.create_vault_cluster_targets.hosts - unseal_method = "shamir" + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name + backend_cluster_tag_key = global.backend_tag_key + cluster_name = step.create_vault_cluster_targets.cluster_name + consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + enable_audit_devices = var.vault_enable_audit_devices + install_dir = local.vault_install_dir + license = matrix.edition != "ce" ? step.read_vault_license.license : null + local_artifact_path = local.artifact_path + manage_service = local.manage_service + packages = concat(global.packages, global.distro_packages[matrix.distro]) + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = matrix.seal + } + } + + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token } } @@ -127,12 +221,147 @@ scenario "proxy" { } variables { - vault_instances = step.create_vault_cluster_targets.hosts - vault_root_token = step.create_vault_cluster.root_token + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token } } - output "awkms_unseal_key_arn" { + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + output "audit_device_file_path" { + description = "The file path for the file audit device, if enabled" + value = step.create_vault_cluster.audit_device_file_path + } + + output "awskms_unseal_key_arn" { description = "The Vault cluster KMS key arn" value = step.create_vpc.kms_key_arn } diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl index e980e262c4..638011b8e6 100644 --- a/enos/enos-scenario-replication.hcl +++ b/enos/enos-scenario-replication.hcl @@ -9,7 +9,7 @@ scenario "replication" { arch = ["amd64", "arm64"] artifact_source = ["local", "crt", "artifactory"] artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] distro = ["ubuntu", "rhel"] edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] primary_backend = ["raft", "consul"] @@ -48,6 +48,11 @@ scenario "replication" { vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] } + step "get_local_metadata" { + skip_step = matrix.artifact_source != "local" + module = module.get_local_metadata + } + step "build_vault" { module = "build_${matrix.artifact_source}" @@ -84,7 +89,7 @@ scenario "replication" { // This step reads the contents of the backend license if we're using a Consul backend and // the edition is "ent". step "read_backend_license" { - skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "oss" + skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "ce" module = module.read_license variables { @@ -241,7 +246,7 @@ scenario "replication" { } : null enable_audit_devices = var.vault_enable_audit_devices install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_vault_license.license : null + license = matrix.edition != "ce" ? step.read_vault_license.license : null local_artifact_path = local.artifact_path manage_service = local.manage_service packages = concat(global.packages, global.distro_packages[matrix.distro]) @@ -298,7 +303,7 @@ scenario "replication" { } : null enable_audit_devices = var.vault_enable_audit_devices install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_vault_license.license : null + license = matrix.edition != "ce" ? step.read_vault_license.license : null local_artifact_path = local.artifact_path manage_service = local.manage_service packages = concat(global.packages, global.distro_packages[matrix.distro]) @@ -340,6 +345,42 @@ scenario "replication" { } } + step "verify_vault_version" { + module = module.vault_verify_version + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version + vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision + vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date + vault_root_token = step.create_primary_cluster.root_token + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.create_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_primary_cluster_targets.hosts + } + } + step "get_primary_cluster_ips" { module = module.vault_get_cluster_ips depends_on = [step.verify_that_vault_primary_cluster_is_unsealed] @@ -349,12 +390,21 @@ scenario "replication" { } variables { - vault_instances = step.create_primary_cluster_targets.hosts + vault_hosts = step.create_primary_cluster_targets.hosts vault_install_dir = local.vault_install_dir vault_root_token = step.create_primary_cluster.root_token } } + step "get_primary_cluster_replication_data" { + module = module.replication_data + depends_on = [step.get_primary_cluster_ips] + + variables { + follower_hosts = step.get_primary_cluster_ips.follower_hosts + } + } + step "get_secondary_cluster_ips" { module = module.vault_get_cluster_ips depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] @@ -364,7 +414,7 @@ scenario "replication" { } variables { - vault_instances = step.create_secondary_cluster_targets.hosts + vault_hosts = step.create_secondary_cluster_targets.hosts vault_install_dir = local.vault_install_dir vault_root_token = step.create_secondary_cluster.root_token } @@ -542,7 +592,7 @@ scenario "replication" { force_unseal = matrix.primary_seal == "shamir" initialize_cluster = false install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_vault_license.license : null + license = matrix.edition != "ce" ? step.read_vault_license.license : null local_artifact_path = local.artifact_path manage_service = local.manage_service packages = concat(global.packages, global.distro_packages[matrix.distro]) @@ -555,7 +605,7 @@ scenario "replication" { } } - step "verify_addtional_primary_nodes_are_unsealed" { + step "verify_additional_primary_nodes_are_unsealed" { module = module.vault_verify_unsealed depends_on = [step.add_additional_nodes_to_primary_cluster] @@ -575,7 +625,7 @@ scenario "replication" { depends_on = [ step.add_additional_nodes_to_primary_cluster, step.create_primary_cluster, - step.verify_addtional_primary_nodes_are_unsealed + step.verify_additional_primary_nodes_are_unsealed ] providers = { @@ -592,8 +642,8 @@ scenario "replication" { step "remove_primary_follower_1" { module = module.shutdown_node depends_on = [ - step.get_primary_cluster_ips, - step.verify_addtional_primary_nodes_are_unsealed + step.get_primary_cluster_replication_data, + step.verify_additional_primary_nodes_are_unsealed ] providers = { @@ -601,7 +651,7 @@ scenario "replication" { } variables { - node_public_ip = step.get_primary_cluster_ips.follower_public_ip_1 + node_public_ip = step.get_primary_cluster_replication_data.follower_public_ip_1 } } @@ -621,12 +671,31 @@ scenario "replication" { } } - step "get_updated_primary_cluster_ips" { - module = module.vault_get_cluster_ips + // After we've removed two nodes from the cluster we need to get an updated set of vault hosts + // to work with. + step "get_remaining_hosts_replication_data" { + module = module.replication_data depends_on = [ - step.add_additional_nodes_to_primary_cluster, - step.remove_primary_follower_1, - step.remove_primary_leader + step.get_primary_cluster_ips, + step.remove_primary_leader, + ] + + variables { + added_hosts = step.create_primary_cluster_additional_targets.hosts + added_hosts_count = var.vault_instance_count + initial_hosts = step.create_primary_cluster_targets.hosts + initial_hosts_count = var.vault_instance_count + removed_follower_host = step.get_primary_cluster_replication_data.follower_host_1 + removed_primary_host = step.get_primary_cluster_ips.leader_host + } + } + + // Wait for the remaining hosts in our cluster to elect a new leader. + step "wait_for_leader_in_remaining_hosts" { + module = module.vault_wait_for_leader + depends_on = [ + step.remove_primary_leader, + step.get_remaining_hosts_replication_data, ] providers = { @@ -634,17 +703,41 @@ scenario "replication" { } variables { - vault_instances = step.create_primary_cluster_targets.hosts - vault_install_dir = local.vault_install_dir - added_vault_instances = step.create_primary_cluster_additional_targets.hosts - vault_root_token = step.create_primary_cluster.root_token - node_public_ip = step.get_primary_cluster_ips.follower_public_ip_2 + timeout = 120 # seconds + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_primary_cluster.root_token + vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts } } + // Get our new leader and follower IP addresses. + step "get_updated_primary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts + vault_install_dir = local.vault_install_dir + vault_instance_count = step.get_remaining_hosts_replication_data.remaining_hosts_count + vault_root_token = step.create_primary_cluster.root_token + } + } + + // Make sure the cluster has the correct performance replication state after the new leader election. step "verify_updated_performance_replication" { - module = module.vault_verify_performance_replication - depends_on = [step.get_updated_primary_cluster_ips] + module = module.vault_verify_performance_replication + depends_on = [ + step.get_remaining_hosts_replication_data, + step.wait_for_leader_in_remaining_hosts, + step.get_updated_primary_cluster_ips, + ] providers = { enos = local.enos_provider[matrix.distro] @@ -709,6 +802,11 @@ scenario "replication" { value = step.create_secondary_cluster_targets.hosts } + output "remaining_hosts" { + description = "The Vault cluster primary hosts after removing the leader and follower" + value = step.get_remaining_hosts_replication_data.remaining_hosts + } + output "initial_primary_replication_status" { description = "The Vault primary cluster performance replication status" value = step.verify_performance_replication.primary_replication_status diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index 56c8b6b1a8..cb8e30e5b9 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -4,12 +4,12 @@ scenario "smoke" { matrix { arch = ["amd64", "arm64"] - backend = ["consul", "raft"] artifact_source = ["local", "crt", "artifactory"] artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] + backend = ["consul", "raft"] + consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] seal = ["awskms", "shamir"] # Our local builder always creates bundles @@ -84,7 +84,7 @@ scenario "smoke" { // This step reads the contents of the backend license if we're using a Consul backend and // the edition is "ent". step "read_backend_license" { - skip_step = matrix.backend == "raft" || var.backend_edition == "oss" + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" module = module.read_license variables { @@ -93,7 +93,7 @@ scenario "smoke" { } step "read_vault_license" { - skip_step = matrix.edition == "oss" + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -182,7 +182,7 @@ scenario "smoke" { } : null enable_audit_devices = var.vault_enable_audit_devices install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_vault_license.license : null + license = matrix.edition != "ce" ? step.read_vault_license.license : null local_artifact_path = local.artifact_path manage_service = local.manage_service packages = concat(global.packages, global.distro_packages[matrix.distro]) @@ -192,8 +192,9 @@ scenario "smoke" { } } - step "get_vault_cluster_ips" { - module = module.vault_get_cluster_ips + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader depends_on = [step.create_vault_cluster] providers = { @@ -201,7 +202,23 @@ scenario "smoke" { } variables { - vault_instances = step.create_vault_cluster_targets.hosts + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.wait_for_leader] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir vault_root_token = step.create_vault_cluster.root_token } @@ -228,7 +245,7 @@ scenario "smoke" { step "verify_vault_unsealed" { module = module.vault_verify_unsealed - depends_on = [step.create_vault_cluster] + depends_on = [step.wait_for_leader] providers = { enos = local.enos_provider[matrix.distro] @@ -261,9 +278,12 @@ scenario "smoke" { } step "verify_raft_auto_join_voter" { - skip_step = matrix.backend != "raft" - module = module.vault_verify_raft_auto_join_voter - depends_on = [step.create_vault_cluster] + skip_step = matrix.backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] providers = { enos = local.enos_provider[matrix.distro] @@ -277,8 +297,11 @@ scenario "smoke" { } step "verify_replication" { - module = module.vault_verify_replication - depends_on = [step.create_vault_cluster] + module = module.vault_verify_replication + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] providers = { enos = local.enos_provider[matrix.distro] @@ -309,16 +332,18 @@ scenario "smoke" { } step "verify_ui" { - module = module.vault_verify_ui - depends_on = [step.create_vault_cluster] + module = module.vault_verify_ui + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_instances = step.create_vault_cluster_targets.hosts - vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts } } diff --git a/enos/enos-scenario-ui.hcl b/enos/enos-scenario-ui.hcl index 0be521d8fa..ea2e20f1db 100644 --- a/enos/enos-scenario-ui.hcl +++ b/enos/enos-scenario-ui.hcl @@ -3,7 +3,7 @@ scenario "ui" { matrix { - edition = ["oss", "ent"] + edition = ["ce", "ent"] backend = ["consul", "raft"] } @@ -20,12 +20,12 @@ scenario "ui" { backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) backend_tag_key = "VaultStorage" build_tags = { - "oss" = ["ui"] + "ce" = ["ui"] "ent" = ["ui", "enterprise", "ent"] } bundle_path = abspath(var.vault_artifact_path) distro = "ubuntu" - consul_version = "1.14.2" + consul_version = "1.16.1" seal = "awskms" tags = merge({ "Project Name" : var.project_name @@ -39,7 +39,7 @@ scenario "ui" { vault_install_dir = var.vault_install_dir vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) vault_tag_key = "Type" // enos_vault_start expects Type as the tag key - ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "oss") ? "!enterprise" : null + ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "ce") ? "!enterprise" : null } step "build_vault" { @@ -71,7 +71,7 @@ scenario "ui" { // This step reads the contents of the backend license if we're using a Consul backend and // the edition is "ent". step "read_backend_license" { - skip_step = matrix.backend == "raft" || var.backend_edition == "oss" + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" module = module.read_license variables { @@ -80,7 +80,7 @@ scenario "ui" { } step "read_vault_license" { - skip_step = matrix.edition == "oss" + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -168,7 +168,7 @@ scenario "ui" { } : null enable_audit_devices = var.vault_enable_audit_devices install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_vault_license.license : null + license = matrix.edition != "ce" ? step.read_vault_license.license : null local_artifact_path = local.bundle_path packages = global.distro_packages["ubuntu"] storage_backend = matrix.backend @@ -177,8 +177,26 @@ scenario "ui" { } } + // Wait for our cluster to elect a leader + step "wait_for_leader" { + module = module.vault_wait_for_leader + depends_on = [step.create_vault_cluster] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + step "test_ui" { - module = module.vault_test_ui + module = module.vault_test_ui + depends_on = [step.wait_for_leader] variables { vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index 9e231758ef..e7996f2735 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -4,12 +4,17 @@ scenario "upgrade" { matrix { arch = ["amd64", "arm64"] - backend = ["consul", "raft"] artifact_source = ["local", "crt", "artifactory"] artifact_type = ["bundle", "package"] - consul_version = ["1.14.2", "1.13.4", "1.12.7"] + backend = ["consul", "raft"] + consul_version = ["1.14.9", "1.15.5", "1.16.1"] distro = ["ubuntu", "rhel"] - edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + // NOTE: when backporting the initial version make sure we don't include initial versions that + // are a higher minor version that our release candidate. Also, prior to 1.11.x the + // /v1/sys/seal-status API has known issues that could cause this scenario to fail when using + // those earlier versions. + initial_version = ["1.8.12", "1.9.10", "1.10.11", "1.11.12", "1.12.11", "1.13.6", "1.14.2"] seal = ["awskms", "shamir"] # Our local builder always creates bundles @@ -23,6 +28,12 @@ scenario "upgrade" { arch = ["arm64"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] } + + # FIPS 140-2 editions began at 1.10 + exclude { + edition = ["ent.fips1402", "ent.hsm.fips1402"] + initial_version = ["1.8.12", "1.9.10"] + } } terraform_cli = terraform_cli.default @@ -85,7 +96,7 @@ scenario "upgrade" { // This step reads the contents of the backend license if we're using a Consul backend and // the edition is "ent". step "read_backend_license" { - skip_step = matrix.backend == "raft" || var.backend_edition == "oss" + skip_step = matrix.backend == "raft" || var.backend_edition == "ce" module = module.read_license variables { @@ -94,7 +105,7 @@ scenario "upgrade" { } step "read_vault_license" { - skip_step = matrix.edition == "oss" + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -182,12 +193,15 @@ scenario "upgrade" { } : null enable_audit_devices = var.vault_enable_audit_devices install_dir = local.vault_install_dir - license = matrix.edition != "oss" ? step.read_vault_license.license : null + license = matrix.edition != "ce" ? step.read_vault_license.license : null packages = concat(global.packages, global.distro_packages[matrix.distro]) - release = var.vault_upgrade_initial_release - storage_backend = matrix.backend - target_hosts = step.create_vault_cluster_targets.hosts - unseal_method = matrix.seal + release = { + edition = matrix.edition + version = matrix.initial_version + } + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = matrix.seal } } @@ -200,7 +214,7 @@ scenario "upgrade" { } variables { - vault_instances = step.create_vault_cluster_targets.hosts + vault_hosts = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir vault_root_token = step.create_vault_cluster.root_token } @@ -210,7 +224,7 @@ scenario "upgrade" { module = module.vault_verify_write_data depends_on = [ step.create_vault_cluster, - step.get_vault_cluster_ips + step.get_vault_cluster_ips, ] providers = { @@ -232,6 +246,7 @@ scenario "upgrade" { module = module.vault_upgrade depends_on = [ step.create_vault_cluster, + step.verify_write_test_data, ] providers = { @@ -249,11 +264,49 @@ scenario "upgrade" { } } + // Wait for our upgraded cluster to elect a leader + step "wait_for_leader_after_upgrade" { + module = module.vault_wait_for_leader + depends_on = [ + step.create_vault_cluster, + step.upgrade_vault, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + timeout = 120 # seconds + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.create_vault_cluster, + step.upgrade_vault, + step.wait_for_leader_after_upgrade, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_hosts = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.root_token + } + } + step "verify_vault_version" { module = module.vault_verify_version depends_on = [ - step.create_backend_cluster, - step.upgrade_vault, + step.get_updated_vault_cluster_ips, ] providers = { @@ -271,30 +324,10 @@ scenario "upgrade" { } } - step "get_updated_vault_cluster_ips" { - module = module.vault_get_cluster_ips - depends_on = [ - step.create_vault_cluster, - step.upgrade_vault - ] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster_targets.hosts - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.root_token - } - } - step "verify_vault_unsealed" { module = module.vault_verify_unsealed depends_on = [ - step.create_vault_cluster, step.get_updated_vault_cluster_ips, - step.upgrade_vault, ] providers = { @@ -329,8 +362,7 @@ scenario "upgrade" { skip_step = matrix.backend != "raft" module = module.vault_verify_raft_auto_join_voter depends_on = [ - step.create_backend_cluster, - step.upgrade_vault, + step.get_updated_vault_cluster_ips, ] providers = { @@ -344,6 +376,38 @@ scenario "upgrade" { } } + step "verify_replication" { + module = module.vault_verify_replication + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_edition = matrix.edition + vault_install_dir = local.vault_install_dir + vault_instances = step.create_vault_cluster_targets.hosts + } + } + + step "verify_ui" { + module = module.vault_verify_ui + depends_on = [ + step.get_updated_vault_cluster_ips, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster_targets.hosts + } + } + output "audit_device_file_path" { description = "The file path for the file audit device, if enabled" value = step.create_vault_cluster.audit_device_file_path diff --git a/enos/enos-terraform.hcl b/enos/enos-terraform.hcl index 95c89fdf24..c8e03fac26 100644 --- a/enos/enos-terraform.hcl +++ b/enos/enos-terraform.hcl @@ -11,7 +11,7 @@ terraform_cli "default" { /* provider_installation { dev_overrides = { - "app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider") + "app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider/dist") } direct {} } diff --git a/enos/enos-variables.hcl b/enos/enos-variables.hcl index c212230ab8..12a9c961f3 100644 --- a/enos/enos-variables.hcl +++ b/enos/enos-variables.hcl @@ -48,7 +48,7 @@ variable "aws_ssh_private_key_path" { variable "backend_edition" { description = "The backend release edition if applicable" type = string - default = "oss" // or "ent" + default = "ce" // or "ent" } variable "backend_instance_type" { @@ -122,14 +122,6 @@ variable "vault_artifact_type" { default = "bundle" } -variable "vault_autopilot_initial_release" { - description = "The Vault release to deploy before upgrading with autopilot" - default = { - edition = "ent" - version = "1.11.0" - } -} - variable "vault_artifact_path" { description = "Path to CRT generated or local vault.zip bundle" type = string @@ -161,7 +153,7 @@ variable "vault_instance_count" { } variable "vault_license_path" { - description = "The path to a valid Vault enterprise edition license. This is only required for non-oss editions" + description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions" type = string default = null } @@ -193,7 +185,7 @@ variable "vault_revision" { variable "vault_upgrade_initial_release" { description = "The Vault release to deploy before upgrading" default = { - edition = "oss" + edition = "ce" // Vault 1.10.5 has a known issue with retry_join. version = "1.10.4" } diff --git a/enos/enos.vars.hcl b/enos/enos.vars.hcl index 7720d11b4d..96e2b3612f 100644 --- a/enos/enos.vars.hcl +++ b/enos/enos.vars.hcl @@ -24,7 +24,7 @@ # aws_ssh_private_key_path = "./support/private_key.pem" # backend_edition is the backend (consul) release edition if applicable to the scenario. -# backend_edition = "oss" +# backend_edition = "ce" # backend_license_path is the license for the backend if applicable (Consul Enterprise)". # backend_license_path = "./support/consul.hclic" @@ -75,14 +75,6 @@ # It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" # vault_artifact_type = "bundle" -# vault_autopilot_initial_release is the version of Vault to deploy before doing an autopilot upgrade -# to the test artifact. -# vault_autopilot_initial_release = { -# edition = "ent" -# version = "1.11.0" -# } -# } - # vault_build_date is the build date for Vault artifact. Some validations will require the binary build # date to match" # vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example @@ -108,7 +100,7 @@ # vault_instance_count = 3 # vault_license_path is the path to a valid Vault enterprise edition license. -# This is only required for non-oss editions" +# This is only required for non-ce editions" # vault_license_path = "./support/vault.hclic" # vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. @@ -122,16 +114,6 @@ # binary and cluster to report this version. # vault_product_version = "1.15.0" -# vault_upgrade_initial_release is the Vault release to deploy before upgrading. - # vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault # binary and cluster to report this revision. # vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" - -# vault_upgrade_initial_release is the Vault release to deploy before doing an in-place upgrade. -# vault_upgrade_initial_release = { -# edition = "oss" -# // Vault 1.10.5 has a known issue with retry_join. -# version = "1.10.4" -# } -# } diff --git a/enos/k8s/enos-scenario-k8s.hcl b/enos/k8s/enos-scenario-k8s.hcl index 6d013b4fea..e40c187059 100644 --- a/enos/k8s/enos-scenario-k8s.hcl +++ b/enos/k8s/enos-scenario-k8s.hcl @@ -3,7 +3,7 @@ scenario "k8s" { matrix { - edition = ["oss", "ent"] + edition = ["ce", "ent"] } terraform_cli = terraform_cli.default @@ -17,7 +17,7 @@ scenario "k8s" { locals { image_path = abspath(var.vault_docker_image_archive) - image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "oss" ? "hashicorp/vault" : "hashicorp/vault-enterprise" + image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "ce" ? "hashicorp/vault" : "hashicorp/vault-enterprise" image_tag = replace(var.vault_product_version, "+ent", "-ent") // The additional '-0' is required in the constraint since without it, the semver function will @@ -27,7 +27,7 @@ scenario "k8s" { } step "read_license" { - skip_step = matrix.edition == "oss" + skip_step = matrix.edition == "ce" module = module.read_license variables { @@ -66,7 +66,7 @@ scenario "k8s" { kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 vault_edition = matrix.edition vault_log_level = var.vault_log_level - ent_license = matrix.edition != "oss" ? step.read_license.license : null + ent_license = matrix.edition != "ce" ? step.read_license.license : null } depends_on = [step.load_docker_image, step.create_kind_cluster] @@ -101,7 +101,7 @@ scenario "k8s" { step "verify_ui" { module = module.k8s_verify_ui - skip_step = matrix.edition == "oss" + skip_step = matrix.edition == "ce" variables { vault_pods = step.deploy_vault.vault_pods diff --git a/enos/modules/backend_consul/main.tf b/enos/modules/backend_consul/main.tf index 2f9f351819..75b4169161 100644 --- a/enos/modules/backend_consul/main.tf +++ b/enos/modules/backend_consul/main.tf @@ -7,7 +7,7 @@ terraform { required_providers { enos = { source = "app.terraform.io/hashicorp-qti/enos" - version = ">= 0.4.0" + version = ">= 0.4.4" } } } diff --git a/enos/modules/backend_consul/variables.tf b/enos/modules/backend_consul/variables.tf index 496fc9967d..34a96d8535 100644 --- a/enos/modules/backend_consul/variables.tf +++ b/enos/modules/backend_consul/variables.tf @@ -63,7 +63,7 @@ variable "release" { description = "Consul release version and edition to install from releases.hashicorp.com" default = { version = "1.15.3" - edition = "oss" + edition = "ce" } } diff --git a/enos/modules/get_local_metadata/scripts/build_date.sh b/enos/modules/get_local_metadata/scripts/build_date.sh index f701cba851..ea63c74d8e 100755 --- a/enos/modules/get_local_metadata/scripts/build_date.sh +++ b/enos/modules/get_local_metadata/scripts/build_date.sh @@ -1,4 +1,4 @@ -#!/bin/env bash +#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 diff --git a/enos/modules/get_local_metadata/scripts/version.sh b/enos/modules/get_local_metadata/scripts/version.sh index 514cb32df5..6b910c404e 100755 --- a/enos/modules/get_local_metadata/scripts/version.sh +++ b/enos/modules/get_local_metadata/scripts/version.sh @@ -1,4 +1,4 @@ -#!/bin/env bash +#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 diff --git a/enos/modules/k8s_deploy_vault/raft-config.hcl b/enos/modules/k8s_deploy_vault/raft-config.hcl index b624dad80f..423390b2d1 100644 --- a/enos/modules/k8s_deploy_vault/raft-config.hcl +++ b/enos/modules/k8s_deploy_vault/raft-config.hcl @@ -7,14 +7,6 @@ listener "tcp" { storage "raft" { path = "/vault/data" - autopilot { - cleanup_dead_servers = "true" - last_contact_threshold = "200ms" - last_contact_failure_threshold = "10m" - max_trailing_logs = 250000 - min_quorum = 5 - server_stabilization_time = "10s" - } } service_registration "kubernetes" {} diff --git a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh index b69874f8f1..96fdf6a320 100755 --- a/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh +++ b/enos/modules/k8s_vault_verify_replication/scripts/smoke-verify-replication.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 @@ -9,14 +9,14 @@ set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } -# Replication STATUS endpoint should have data.mode disabled for OSS release -if [ "$VAULT_EDITION" == "oss" ]; then +# Replication STATUS endpoint should have data.mode disabled for CE release +if [ "$VAULT_EDITION" == "ce" ]; then if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then - fail "replication data mode is not disabled for OSS release!" + fail "replication data mode is not disabled for CE release!" fi else if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then diff --git a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh index 68533a9bec..4372a53086 100755 --- a/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh +++ b/enos/modules/k8s_vault_verify_ui/scripts/smoke-verify-ui.sh @@ -6,8 +6,8 @@ set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then diff --git a/enos/modules/k8s_vault_verify_version/main.tf b/enos/modules/k8s_vault_verify_version/main.tf index e67481a90d..3faf497b29 100644 --- a/enos/modules/k8s_vault_verify_version/main.tf +++ b/enos/modules/k8s_vault_verify_version/main.tf @@ -12,7 +12,7 @@ terraform { locals { instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) - expected_version = var.vault_edition == "oss" ? var.vault_product_version : "${var.vault_product_version}-ent" + expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent" } resource "enos_remote_exec" "release_info" { @@ -38,13 +38,13 @@ resource "enos_local_exec" "smoke-verify-version" { for_each = enos_remote_exec.release_info environment = { - VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) ACTUAL_VERSION = jsondecode(each.value.stdout).version + BUILD_DATE = var.vault_build_date + CHECK_BUILD_DATE = var.check_build_date EXPECTED_VERSION = var.vault_product_version, VAULT_EDITION = var.vault_edition, VAULT_REVISION = var.vault_product_revision, - CHECK_BUILD_DATE = var.check_build_date - BUILD_DATE = var.vault_build_date + VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status) } scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")] diff --git a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh index 09b43b2ac3..4e8fc944db 100755 --- a/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh +++ b/enos/modules/k8s_vault_verify_version/scripts/smoke-verify-version.sh @@ -8,38 +8,39 @@ set -e fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then expected_build_date="" else - build_date="${BUILD_DATE}" - if [[ "${build_date}" == "" ]]; then - build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) + cfg_build_date="${BUILD_DATE}" + if [[ "${cfg_build_date}" == "" ]]; then + cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) fi - expected_build_date=", built $build_date" + expected_build_date=", built $cfg_build_date" fi vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})" case "${VAULT_EDITION}" in - oss) version_expected="${vault_expected_version}${expected_build_date}";; - ent) version_expected="${vault_expected_version}${expected_build_date}";; - ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";; - ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; - ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ce) version_expected="${vault_expected_version}${expected_build_date}";; + ent) version_expected="${vault_expected_version}${expected_build_date}";; + ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";; + ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; + ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; *) fail "(${VAULT_EDITION}) does not match any known Vault editions" esac version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then - echo "Version verification succeeded!" + echo "Version verification succeeded!" else - echo "CHECK_BUILD_DATE: ${CHECK_BUILD_DATE}" - echo "BUILD_DATE: ${BUILD_DATE}" - echo "build_date: ${build_date}" - fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" + echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2 + echo "Given build date: ${BUILD_DATE}" 1>&2 + echo "Interpreted build date: ${cfg_build_date}" 1>&2 + + fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}" fi diff --git a/enos/modules/replication_data/main.tf b/enos/modules/replication_data/main.tf new file mode 100644 index 0000000000..dec9640837 --- /dev/null +++ b/enos/modules/replication_data/main.tf @@ -0,0 +1,104 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +// An arithmetic module for calculating inputs and outputs for various replication steps. + +// Get the first follower out of the hosts set +variable "follower_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +output "follower_host_1" { + value = try(var.follower_hosts[0], null) +} + +output "follower_public_ip_1" { + value = try(var.follower_hosts[0].public_ip, null) +} + +output "follower_private_ip_1" { + value = try(var.follower_hosts[0].private_ip, null) +} + +output "follower_host_2" { + value = try(var.follower_hosts[1], null) +} + +output "follower_public_ip_2" { + value = try(var.follower_hosts[1].public_ip, null) +} + +output "follower_private_ip_2" { + value = try(var.follower_hosts[1].private_ip, null) +} + +// Calculate our remainder hosts after we've added and removed leader +variable "initial_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +variable "initial_hosts_count" { + type = number + default = 0 +} + +variable "added_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + default = {} +} + +variable "added_hosts_count" { + type = number + default = 0 +} + +variable "removed_primary_host" { + type = object({ + private_ip = string + public_ip = string + }) + default = null +} + +variable "removed_follower_host" { + type = object({ + private_ip = string + public_ip = string + }) + default = null +} + +locals { + remaining_hosts_count = max((var.initial_hosts_count + var.added_hosts_count - 2), 0) + indices = [for idx in range(local.remaining_hosts_count) : idx] + remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host]) + remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial)) + remaining_hosts = zipmap(local.indices, local.remaining_hosts_list) +} + +output "remaining_initial_count" { + value = length(local.remaining_initial) +} + +output "remaining_initial_hosts" { + value = local.remaining_initial +} + +output "remaining_hosts_count" { + value = local.remaining_hosts_count +} + +output "remaining_hosts" { + value = local.remaining_hosts +} diff --git a/enos/modules/vault_agent/main.tf b/enos/modules/vault_agent/main.tf index 3ec43ca5a1..067781e4f5 100644 --- a/enos/modules/vault_agent/main.tf +++ b/enos/modules/vault_agent/main.tf @@ -55,12 +55,14 @@ locals { } resource "enos_remote_exec" "set_up_approle_auth_and_agent" { - content = templatefile("${path.module}/templates/set-up-approle-and-agent.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_agent_template_destination = var.vault_agent_template_destination - vault_agent_template_contents = var.vault_agent_template_contents - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination, + VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents, + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")] transport = { ssh = { diff --git a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh similarity index 70% rename from enos/modules/vault_agent/templates/set-up-approle-and-agent.sh rename to enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh index 5b3810b5ee..e939ea1d7c 100644 --- a/enos/modules/vault_agent/templates/set-up-approle-and-agent.sh +++ b/enos/modules/vault_agent/scripts/set-up-approle-and-agent.sh @@ -5,7 +5,7 @@ set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault fail() { echo "$1" 1>&2 @@ -15,14 +15,14 @@ fail() { test -x "$binpath" || fail "unable to locate vault binary at $binpath" export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" # If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) $binpath auth disable approle || true -approle_create_status=$($binpath auth enable approle) +$binpath auth enable approle -approle_status=$($binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000) +$binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') @@ -36,8 +36,8 @@ if [[ "$SECRETID" == '' ]]; then fail "expected SECRETID to be nonempty, but it is empty" fi -echo $ROLEID > /tmp/role-id -echo $SECRETID > /tmp/secret-id +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id cat > /tmp/vault-agent.hcl <<- EOM pid_file = "/tmp/pidfile" @@ -51,18 +51,18 @@ vault { } cache { - enforce_consistency = "always" - use_auto_auth_token = true + enforce_consistency = "always" + use_auto_auth_token = true } listener "tcp" { - address = "127.0.0.1:8100" - tls_disable = true + address = "127.0.0.1:8100" + tls_disable = true } template { - destination = "${vault_agent_template_destination}" - contents = "${vault_agent_template_contents}" + destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}" + contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}" exec { command = "pkill -F /tmp/pidfile" } @@ -72,7 +72,7 @@ auto_auth { method { type = "approle" config = { - role_id_file_path = "/tmp/role-id" + role_id_file_path = "/tmp/role-id" secret_id_file_path = "/tmp/secret-id" } } @@ -89,7 +89,7 @@ EOM pkill -F /tmp/pidfile || true # If the template file already exists, remove it -rm ${vault_agent_template_destination} || true +rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true # Run agent (it will kill itself when it finishes rendering the template) $binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1 diff --git a/enos/modules/vault_artifactory_artifact/locals.tf b/enos/modules/vault_artifactory_artifact/locals.tf index 25f2caeb18..77b4532279 100644 --- a/enos/modules/vault_artifactory_artifact/locals.tf +++ b/enos/modules/vault_artifactory_artifact/locals.tf @@ -21,14 +21,14 @@ locals { // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) artifact_package_release_names = { ubuntu = { - "oss" = "vault_" + "ce" = "vault_" "ent" = "vault-enterprise_", "ent.fips1402" = "vault-enterprise-fips1402_", "ent.hsm" = "vault-enterprise-hsm_", "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", }, rhel = { - "oss" = "vault-" + "ce" = "vault-" "ent" = "vault-enterprise-", "ent.fips1402" = "vault-enterprise-fips1402-", "ent.hsm" = "vault-enterprise-hsm-", @@ -38,7 +38,7 @@ locals { // edition --> artifact name edition artifact_name_edition = { - "oss" = "" + "ce" = "" "ent" = "" "ent.hsm" = ".hsm" "ent.fips1402" = ".fips1402" diff --git a/enos/modules/vault_artifactory_artifact/main.tf b/enos/modules/vault_artifactory_artifact/main.tf index e57d8acd9d..4fe3ba2ce1 100644 --- a/enos/modules/vault_artifactory_artifact/main.tf +++ b/enos/modules/vault_artifactory_artifact/main.tf @@ -16,10 +16,10 @@ data "enos_artifactory_item" "vault" { name = local.artifact_name host = var.artifactory_host repo = var.artifactory_repo - path = var.edition == "oss" ? "vault/*" : "vault-enterprise/*" + path = var.edition == "ce" ? "vault/*" : "vault-enterprise/*" properties = tomap({ "commit" = var.revision - "product-name" = var.edition == "oss" ? "vault" : "vault-enterprise" + "product-name" = var.edition == "ce" ? "vault" : "vault-enterprise" "product-version" = local.artifact_version }) } diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf index a7d6b16198..3daf3eafd7 100644 --- a/enos/modules/vault_cluster/main.tf +++ b/enos/modules/vault_cluster/main.tf @@ -109,9 +109,11 @@ resource "enos_remote_exec" "install_packages" { if length(var.packages) > 0 } - content = templatefile("${path.module}/templates/install-packages.sh", { - packages = join(" ", var.packages) - }) + environment = { + PACKAGES = join(" ", var.packages) + } + + scripts = [abspath("${path.module}/scripts/install-packages.sh")] transport = { ssh = { @@ -271,59 +273,6 @@ resource "enos_vault_unseal" "leader" { } } -# We need to ensure that the directory used for audit logs is present and accessible to the vault -# user on all nodes, since logging will only happen on the leader. -resource "enos_remote_exec" "create_audit_log_dir" { - depends_on = [ - enos_bundle_install.vault, - enos_vault_unseal.leader, - ] - for_each = toset([ - for idx, host in toset(local.instances) : idx - if var.enable_audit_devices - ]) - - environment = { - LOG_FILE_PATH = local.audit_device_file_path - SERVICE_USER = local.vault_service_user - } - - scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")] - - transport = { - ssh = { - host = var.target_hosts[each.value].public_ip - } - } -} - -resource "enos_remote_exec" "enable_audit_devices" { - depends_on = [ - enos_remote_exec.create_audit_log_dir, - enos_vault_unseal.leader, - ] - for_each = toset([ - for idx in local.leader : idx - if local.enable_audit_devices - ]) - - environment = { - VAULT_TOKEN = enos_vault_init.leader[each.key].root_token - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_BIN_PATH = local.bin_path - LOG_FILE_PATH = local.audit_device_file_path - SERVICE_USER = local.vault_service_user - } - - scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")] - - transport = { - ssh = { - host = var.target_hosts[each.key].public_ip - } - } -} - resource "enos_vault_unseal" "followers" { depends_on = [ enos_vault_init.leader, @@ -387,11 +336,13 @@ resource "enos_remote_exec" "vault_write_license" { enos_vault_unseal.maybe_force_unseal, ] - content = templatefile("${path.module}/templates/vault-write-license.sh", { - bin_path = local.bin_path, - root_token = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none") - license = coalesce(var.license, "none") - }) + environment = { + BIN_PATH = local.bin_path, + LICENSE = coalesce(var.license, "none") + VAULT_TOKEN = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none") + } + + scripts = [abspath("${path.module}/scripts/vault-write-license.sh")] transport = { ssh = { @@ -400,6 +351,61 @@ resource "enos_remote_exec" "vault_write_license" { } } +# We need to ensure that the directory used for audit logs is present and accessible to the vault +# user on all nodes, since logging will only happen on the leader. +resource "enos_remote_exec" "create_audit_log_dir" { + depends_on = [ + enos_vault_start.leader, + enos_vault_start.followers, + enos_vault_unseal.leader, + enos_vault_unseal.followers, + enos_vault_unseal.maybe_force_unseal, + ] + for_each = toset([ + for idx, host in toset(local.instances) : idx + if var.enable_audit_devices + ]) + + environment = { + LOG_FILE_PATH = local.audit_device_file_path + SERVICE_USER = local.vault_service_user + } + + scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")] + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_remote_exec" "enable_audit_devices" { + depends_on = [ + enos_remote_exec.create_audit_log_dir, + ] + for_each = toset([ + for idx in local.leader : idx + if local.enable_audit_devices + ]) + + environment = { + VAULT_TOKEN = enos_vault_init.leader[each.key].root_token + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_BIN_PATH = local.bin_path + LOG_FILE_PATH = local.audit_device_file_path + SERVICE_USER = local.vault_service_user + } + + scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")] + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} + resource "enos_local_exec" "wait_for_install_packages" { depends_on = [ enos_remote_exec.install_packages, diff --git a/enos/modules/vault_cluster/templates/install-packages.sh b/enos/modules/vault_cluster/scripts/install-packages.sh similarity index 76% rename from enos/modules/vault_cluster/templates/install-packages.sh rename to enos/modules/vault_cluster/scripts/install-packages.sh index a87e3d8fa4..62cce439f3 100755 --- a/enos/modules/vault_cluster/templates/install-packages.sh +++ b/enos/modules/vault_cluster/scripts/install-packages.sh @@ -5,9 +5,7 @@ set -ex -o pipefail -packages="${packages}" - -if [ "$packages" == "" ] +if [ "$PACKAGES" == "" ] then echo "No dependencies to install." exit 0 @@ -25,14 +23,14 @@ function retry { if [ "$count" -lt "$retries" ]; then sleep "$wait" else - return "$exit" + exit "$exit" fi done return 0 } -echo "Installing Dependencies: $packages" +echo "Installing Dependencies: $PACKAGES" if [ -f /etc/debian_version ]; then # Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we # see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case @@ -41,8 +39,10 @@ if [ -f /etc/debian_version ]; then cd /tmp retry 5 sudo apt update - retry 5 sudo apt install -y $${packages[@]} + # shellcheck disable=2068 + retry 5 sudo apt install -y ${PACKAGES[@]} else cd /tmp - retry 7 sudo yum -y install $${packages[@]} + # shellcheck disable=2068 + retry 7 sudo yum -y install ${PACKAGES[@]} fi diff --git a/enos/modules/vault_cluster/templates/vault-write-license.sh b/enos/modules/vault_cluster/scripts/vault-write-license.sh similarity index 76% rename from enos/modules/vault_cluster/templates/vault-write-license.sh rename to enos/modules/vault_cluster/scripts/vault-write-license.sh index cc03919120..7afccd85ae 100755 --- a/enos/modules/vault_cluster/templates/vault-write-license.sh +++ b/enos/modules/vault_cluster/scripts/vault-write-license.sh @@ -3,8 +3,7 @@ # SPDX-License-Identifier: BUSL-1.1 -license='${license}' -if test $license = "none"; then +if test "$LICENSE" = "none"; then exit 0 fi @@ -29,13 +28,13 @@ function retry { } export VAULT_ADDR=http://localhost:8200 -export VAULT_TOKEN='${root_token}' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" # Temporary hack until we can make the unseal resource handle legacy license # setting. If we're running 1.8 and above then we shouldn't try to set a license. -ver=$(${bin_path} version) +ver=$(${BIN_PATH} version) if [[ "$(echo "$ver" |awk '{print $2}' |awk -F'.' '{print $2}')" -ge 8 ]]; then exit 0 fi -retry 5 ${bin_path} write /sys/license text="$license" +retry 5 "${BIN_PATH}" write /sys/license text="$LICENSE" diff --git a/enos/modules/vault_cluster/variables.tf b/enos/modules/vault_cluster/variables.tf index 554fac0e5c..83aa2a4f60 100644 --- a/enos/modules/vault_cluster/variables.tf +++ b/enos/modules/vault_cluster/variables.tf @@ -92,7 +92,7 @@ variable "consul_release" { description = "Consul release version and edition to install from releases.hashicorp.com" default = { version = "1.15.1" - edition = "oss" + edition = "ce" } } diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf index 4b3f5fb842..fb809d8916 100644 --- a/enos/modules/vault_get_cluster_ips/main.tf +++ b/enos/modules/vault_get_cluster_ips/main.tf @@ -19,124 +19,97 @@ variable "vault_root_token" { description = "The vault root token" } -variable "node_public_ip" { - type = string - description = "The primary node public ip" - default = "" +variable "vault_instance_count" { + type = number + description = "The number of instances in the vault cluster" } -variable "vault_instances" { +variable "vault_hosts" { type = map(object({ private_ip = string public_ip = string })) - description = "The vault cluster instances that were created" -} - -variable "added_vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were added" - default = {} + description = "The vault cluster hosts. These are required to map private ip addresses to public addresses." } locals { - leftover_primary_instances = var.node_public_ip != "" ? { - for k, v in var.vault_instances : k => v if contains(values(v), trimspace(var.node_public_ip)) - } : null - all_instances = var.node_public_ip != "" ? merge(var.added_vault_instances, local.leftover_primary_instances) : var.vault_instances - updated_instance_count = length(local.all_instances) - updated_instances = { - for idx in range(local.updated_instance_count) : idx => { - public_ip = values(local.all_instances)[idx].public_ip - private_ip = values(local.all_instances)[idx].private_ip + follower_hosts_list = [for idx in range(var.vault_instance_count - 1) : { + private_ip = local.follower_private_ips[idx] + public_ip = local.follower_public_ips[idx] } + ] + follower_hosts = { + for idx in range(var.vault_instance_count - 1) : idx => try(local.follower_hosts_list[idx], null) } - node_ip = var.node_public_ip != "" ? var.node_public_ip : local.updated_instances[0].public_ip - instance_private_ips = [ - for k, v in values(tomap(local.updated_instances)) : - tostring(v["private_ip"]) - ] - follower_public_ips = [ - for k, v in values(tomap(local.updated_instances)) : - tostring(v["public_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout) - ] - follower_private_ips = [ - for k, v in values(tomap(local.updated_instances)) : - tostring(v["private_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout) + follower_private_ips = jsondecode(enos_remote_exec.get_follower_private_ips.stdout) + follower_public_ips = [for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if contains( + local.follower_private_ips, var.vault_hosts[idx].private_ip) ] + leader_host = { + private_ip = local.leader_private_ip + public_ip = local.leader_public_ip + } + leader_private_ip = trimspace(enos_remote_exec.get_leader_private_ip.stdout) + leader_public_ip = element([ + for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if var.vault_hosts[idx].private_ip == local.leader_private_ip + ], 0) + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] } resource "enos_remote_exec" "get_leader_private_ip" { environment = { - VAULT_ADDR = "http://127.0.0.1:8200" - VAULT_TOKEN = var.vault_root_token - VAULT_INSTALL_DIR = var.vault_install_dir - VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.instance_private_ips) + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir } scripts = [abspath("${path.module}/scripts/get-leader-private-ip.sh")] transport = { ssh = { - host = local.node_ip + host = var.vault_hosts[0].public_ip } } } -output "leftover_primary_instances" { - value = local.leftover_primary_instances +resource "enos_remote_exec" "get_follower_private_ips" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_LEADER_PRIVATE_IP = local.leader_private_ip + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/get-follower-private-ips.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } } -output "all_instances" { - value = local.all_instances -} - -output "updated_instance_count" { - value = local.updated_instance_count -} - -output "updated_instances" { - value = local.updated_instances -} - -output "leader_private_ip" { - value = trimspace(enos_remote_exec.get_leader_private_ip.stdout) -} - -output "leader_public_ip" { - value = element([ - for k, v in values(tomap(local.all_instances)) : - tostring(v["public_ip"]) if v["private_ip"] == trimspace(enos_remote_exec.get_leader_private_ip.stdout) - ], 0) -} - -output "vault_instance_private_ips" { - value = jsonencode(local.instance_private_ips) -} - -output "follower_public_ips" { - value = local.follower_public_ips -} - -output "follower_public_ip_1" { - value = element(local.follower_public_ips, 0) -} - -output "follower_public_ip_2" { - value = element(local.follower_public_ips, 1) +output "follower_hosts" { + value = local.follower_hosts } output "follower_private_ips" { value = local.follower_private_ips } -output "follower_private_ip_1" { - value = element(local.follower_private_ips, 0) +output "follower_public_ips" { + value = local.follower_public_ips } -output "follower_private_ip_2" { - value = element(local.follower_private_ips, 1) +output "leader_host" { + value = local.leader_host +} + +output "leader_private_ip" { + value = local.leader_private_ip +} + +output "leader_public_ip" { + value = local.leader_public_ip } diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh b/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh new file mode 100644 index 0000000000..1926961174 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-follower-private-ips.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" +[[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + +count=0 +retries=5 +while :; do + # Vault >= 1.10.x has the operator members. If we have that then we'll use it. + if $binpath operator -h 2>&1 | grep members &> /dev/null; then + # Get the folllowers that are part of our private ips. + if followers=$($binpath operator members -format json | jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then + # Make sure that we got all the followers + if jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then + echo "$followers" + exit 0 + fi + fi + else + # We're using an old version of vault so we'll just return ips that don't match the leader. + # Get the public ip addresses of the followers + if followers=$(jq --arg ip "$VAULT_LEADER_PRIVATE_IP" -c '. | map(select(.!=$ip))' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ -n "$followers" ]]; then + echo "$followers" + exit 0 + fi + fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster followers" + fi +done diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh index 7e1655ff84..ffea30c462 100644 --- a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh @@ -5,31 +5,42 @@ set -e -binpath=${VAULT_INSTALL_DIR}/vault -instance_ips=${VAULT_INSTANCE_PRIVATE_IPS} - function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "Unable to locate vault binary at $binpath" + count=0 retries=5 while :; do - # Find the leader private IP address - leader_private_ip=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') - match_ip=$(echo "$instance_ips" |jq -r --argjson ip "$leader_private_ip" 'map(select(. == $ip))') - - if [[ "$leader_private_ip" != 'null' ]] && [[ "$match_ip" != '[]' ]]; then - echo "$leader_private_ip" | sed 's/\"//g' - exit 0 + # Find the leader private IP address + if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + exit 0 fi + fi - wait=$((5 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - sleep "$wait" - else - fail "leader IP address $leader_private_ip was not found in $instance_ips" + # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status. + if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + if [[ -n "$ip" ]]; then + echo "$ip" + exit 0 fi + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Timed out trying to obtain the cluster leader" + fi done diff --git a/enos/modules/vault_proxy/main.tf b/enos/modules/vault_proxy/main.tf index 9a4ab044d7..70736c5d8d 100644 --- a/enos/modules/vault_proxy/main.tf +++ b/enos/modules/vault_proxy/main.tf @@ -52,12 +52,14 @@ locals { } resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { - content = templatefile("${path.module}/templates/set-up-approle-and-proxy.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_proxy_pidfile = var.vault_proxy_pidfile - vault_proxy_address = local.vault_proxy_address - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")] transport = { ssh = { @@ -67,11 +69,13 @@ resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { } resource "enos_remote_exec" "use_proxy" { - content = templatefile("${path.module}/templates/use-proxy.sh", { - vault_install_dir = var.vault_install_dir - vault_proxy_pidfile = var.vault_proxy_pidfile - vault_proxy_address = local.vault_proxy_address - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile + VAULT_PROXY_ADDRESS = local.vault_proxy_address + } + + scripts = [abspath("${path.module}/scripts/use-proxy.sh")] transport = { ssh = { diff --git a/enos/modules/vault_proxy/templates/set-up-approle-and-proxy.sh b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh similarity index 78% rename from enos/modules/vault_proxy/templates/set-up-approle-and-proxy.sh rename to enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh index 8e04554ec3..556cb82248 100644 --- a/enos/modules/vault_proxy/templates/set-up-approle-and-proxy.sh +++ b/enos/modules/vault_proxy/scripts/set-up-approle-and-proxy.sh @@ -5,7 +5,7 @@ set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault fail() { echo "$1" 1>&2 @@ -15,14 +15,14 @@ fail() { test -x "$binpath" || fail "unable to locate vault binary at $binpath" export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" # If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) $binpath auth disable approle || true -approle_create_status=$($binpath auth enable approle) +$binpath auth enable approle -approle_status=$($binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000) +$binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000 ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') @@ -36,14 +36,14 @@ if [[ "$SECRETID" == '' ]]; then fail "expected SECRETID to be nonempty, but it is empty" fi -echo $ROLEID > /tmp/role-id -echo $SECRETID > /tmp/secret-id +echo "$ROLEID" > /tmp/role-id +echo "$SECRETID" > /tmp/secret-id # Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl # The Proxy references the fixed Vault server address of http://127.0.0.1:8200 # The Proxy itself listens at the address http://127.0.0.1:8100 cat > /tmp/vault-proxy.hcl <<- EOM -pid_file = "${vault_proxy_pidfile}" +pid_file = "${VAULT_PROXY_PIDFILE}" vault { address = "http://127.0.0.1:8200" @@ -59,7 +59,7 @@ api_proxy { } listener "tcp" { - address = "${vault_proxy_address}" + address = "${VAULT_PROXY_ADDRESS}" tls_disable = true } @@ -81,7 +81,7 @@ auto_auth { EOM # If Proxy is still running from a previous run, kill it -pkill -F "${vault_proxy_pidfile}" || true +pkill -F "${VAULT_PROXY_PIDFILE}" || true # Run proxy in the background $binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & diff --git a/enos/modules/vault_proxy/templates/use-proxy.sh b/enos/modules/vault_proxy/scripts/use-proxy.sh similarity index 86% rename from enos/modules/vault_proxy/templates/use-proxy.sh rename to enos/modules/vault_proxy/scripts/use-proxy.sh index c644f55866..3e7e543e7a 100644 --- a/enos/modules/vault_proxy/templates/use-proxy.sh +++ b/enos/modules/vault_proxy/scripts/use-proxy.sh @@ -5,7 +5,7 @@ set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault fail() { echo "$1" 1>&2 @@ -16,7 +16,7 @@ test -x "$binpath" || fail "unable to locate vault binary at $binpath" # Will cause the Vault CLI to communicate with the Vault Proxy, since it # is listening at port 8100. -export VAULT_ADDR='http://${vault_proxy_address}' +export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}" # Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token # is used. @@ -29,4 +29,4 @@ unset VAULT_TOKEN $binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login' # Now that we're done, kill the proxy -pkill -F "${vault_proxy_pidfile}" || true +pkill -F "${VAULT_PROXY_PIDFILE}" || true diff --git a/enos/modules/vault_raft_remove_peer/main.tf b/enos/modules/vault_raft_remove_peer/main.tf index 3d759966aa..daf025c471 100644 --- a/enos/modules/vault_raft_remove_peer/main.tf +++ b/enos/modules/vault_raft_remove_peer/main.tf @@ -56,15 +56,13 @@ resource "enos_remote_exec" "vault_raft_remove_peer" { for_each = local.instances environment = { - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = "http://localhost:8200" + REMOVE_VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = "http://localhost:8200" + VAULT_INSTALL_DIR = var.vault_install_dir } - content = templatefile("${path.module}/templates/raft-remove-peer.sh", { - remove_vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - }) + scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")] transport = { ssh = { diff --git a/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh similarity index 66% rename from enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh rename to enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh index 5e908bf222..4fcfa513d3 100644 --- a/enos/modules/vault_raft_remove_peer/templates/raft-remove-peer.sh +++ b/enos/modules/vault_raft_remove_peer/scripts/raft-remove-peer.sh @@ -5,15 +5,16 @@ set -e -binpath=${vault_install_dir}/vault - -node_addr=${remove_vault_cluster_addr} +binpath=${VAULT_INSTALL_DIR}/vault +node_addr=${REMOVE_VAULT_CLUSTER_ADDR} fail() { echo "$1" 2>&1 return 1 } +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + retry() { local retries=$1 shift @@ -35,8 +36,7 @@ retry() { } remove_peer() { - node_id=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id') - if [ "$?" != "0" ];then + if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then fail "failed to get node id of a non-voter node" fi diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf index 14587081e2..fbcf3676d6 100644 --- a/enos/modules/vault_setup_perf_primary/main.tf +++ b/enos/modules/vault_setup_perf_primary/main.tf @@ -39,7 +39,7 @@ resource "enos_remote_exec" "configure_pr_primary" { environment = { VAULT_ADDR = "http://127.0.0.1:8200" VAULT_TOKEN = var.vault_root_token - vault_install_dir = var.vault_install_dir + VAULT_INSTALL_DIR = var.vault_install_dir } scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")] diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh index e355050d02..2ccaf14e4d 100644 --- a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh +++ b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh @@ -5,7 +5,7 @@ set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault fail() { echo "$1" 1>&2 diff --git a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh index 661af0ec0d..b02ffa52c1 100755 --- a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh +++ b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh @@ -5,23 +5,24 @@ binpath=${VAULT_INSTALL_DIR}/vault -IFS="," read -a keys <<< ${UNSEAL_KEYS} +IFS="," read -r -a keys <<< "${UNSEAL_KEYS}" function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } + count=0 retries=5 while :; do - for key in ${keys[@]}; do + for key in "${keys[@]}"; do # Check the Vault seal status seal_status=$($binpath status -format json | jq '.sealed') - + if [[ "$seal_status" == "true" ]]; then - echo "running unseal with $key count $count with retry $retry" >> /tmp/unseal_script.out - $binpath operator unseal $key > /dev/null 2>&1 + echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out + "$binpath" operator unseal "$key" > /dev/null 2>&1 else exit 0 fi diff --git a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh index ff99a92155..5654629c5c 100644 --- a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh +++ b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh @@ -6,8 +6,8 @@ binpath=${VAULT_INSTALL_DIR}/vault function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } count=0 diff --git a/enos/modules/vault_upgrade/main.tf b/enos/modules/vault_upgrade/main.tf index 1ac025e5b2..a2844ab560 100644 --- a/enos/modules/vault_upgrade/main.tf +++ b/enos/modules/vault_upgrade/main.tf @@ -92,10 +92,12 @@ resource "enos_bundle_install" "upgrade_vault_binary" { resource "enos_remote_exec" "get_leader_public_ip" { depends_on = [enos_bundle_install.upgrade_vault_binary] - content = templatefile("${path.module}/templates/get-leader-public-ip.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) + scripts = [abspath("${path.module}/scripts/get-leader-public-ip.sh")] + + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_INSTANCES = jsonencode(local.instances) + } transport = { ssh = { @@ -107,10 +109,12 @@ resource "enos_remote_exec" "get_leader_public_ip" { resource "enos_remote_exec" "get_follower_public_ips" { depends_on = [enos_bundle_install.upgrade_vault_binary] - content = templatefile("${path.module}/templates/get-follower-public-ips.sh", { - vault_install_dir = var.vault_install_dir, - vault_instances = jsonencode(local.instances) - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_INSTANCES = jsonencode(local.instances) + } + + scripts = [abspath("${path.module}/scripts/get-follower-public-ips.sh")] transport = { ssh = { @@ -123,7 +127,7 @@ resource "enos_remote_exec" "restart_followers" { for_each = local.followers depends_on = [enos_remote_exec.get_follower_public_ips] - content = file("${path.module}/templates/restart-vault.sh") + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] transport = { ssh = { @@ -153,7 +157,7 @@ resource "enos_vault_unseal" "followers" { resource "enos_remote_exec" "restart_leader" { depends_on = [enos_vault_unseal.followers] - content = file("${path.module}/templates/restart-vault.sh") + scripts = [abspath("${path.module}/scripts/restart-vault.sh")] transport = { ssh = { diff --git a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh b/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh similarity index 78% rename from enos/modules/vault_upgrade/templates/get-follower-public-ips.sh rename to enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh index c9e1759dba..8cfa1b2fa6 100644 --- a/enos/modules/vault_upgrade/templates/get-follower-public-ips.sh +++ b/enos/modules/vault_upgrade/scripts/get-follower-public-ips.sh @@ -5,13 +5,13 @@ set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault export VAULT_ADDR="http://localhost:8200" -instances='${vault_instances}' +instances=${VAULT_INSTANCES} # Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') +leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') # Get the public ip addresses of the followers follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances") diff --git a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh b/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh similarity index 74% rename from enos/modules/vault_upgrade/templates/get-leader-public-ip.sh rename to enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh index 6753f7f749..40444db774 100644 --- a/enos/modules/vault_upgrade/templates/get-leader-public-ip.sh +++ b/enos/modules/vault_upgrade/scripts/get-leader-public-ip.sh @@ -5,14 +5,15 @@ set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault export VAULT_ADDR="http://localhost:8200" -instances='${vault_instances}' +instances=${VAULT_INSTANCES} # Find the leader -leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') +leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') # Get the public ip address of the leader leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances") +#shellcheck disable=SC2001 echo "$leader_public" | sed 's/\"//g' diff --git a/enos/modules/vault_upgrade/templates/restart-vault.sh b/enos/modules/vault_upgrade/scripts/restart-vault.sh similarity index 100% rename from enos/modules/vault_upgrade/templates/restart-vault.sh rename to enos/modules/vault_upgrade/scripts/restart-vault.sh diff --git a/enos/modules/vault_verify_agent_output/main.tf b/enos/modules/vault_verify_agent_output/main.tf index 3eda9c3722..f759f30510 100644 --- a/enos/modules/vault_verify_agent_output/main.tf +++ b/enos/modules/vault_verify_agent_output/main.tf @@ -42,11 +42,13 @@ locals { } resource "enos_remote_exec" "verify_vault_agent_output" { - content = templatefile("${path.module}/templates/verify-vault-agent-output.sh", { - vault_agent_template_destination = var.vault_agent_template_destination - vault_agent_expected_output = var.vault_agent_expected_output - vault_instances = jsonencode(local.vault_instances) - }) + environment = { + VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination + VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output + VAULT_INSTANCES = jsonencode(local.vault_instances) + } + + scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh new file mode 100644 index 0000000000..044c69130b --- /dev/null +++ b/enos/modules/vault_verify_agent_output/scripts/verify-vault-agent-output.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +fail() { + echo "$1" 1>&2 + return 1 +} + +actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}") +if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then + fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'" +fi diff --git a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh b/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh deleted file mode 100644 index 57a5d1e01e..0000000000 --- a/enos/modules/vault_verify_agent_output/templates/verify-vault-agent-output.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -set -e - -fail() { - echo "$1" 1>&2 - return 1 -} - -actual_output=$(cat ${vault_agent_template_destination}) -if [[ "$actual_output" != "${vault_agent_expected_output}" ]]; then - fail "expected '${vault_agent_expected_output}' to be the Agent output, but got: '$actual_output'" -fi diff --git a/enos/modules/vault_verify_autopilot/main.tf b/enos/modules/vault_verify_autopilot/main.tf index a54d24f4e2..5681e7d848 100644 --- a/enos/modules/vault_verify_autopilot/main.tf +++ b/enos/modules/vault_verify_autopilot/main.tf @@ -54,12 +54,14 @@ locals { resource "enos_remote_exec" "smoke-verify-autopilot" { for_each = local.public_ips - content = templatefile("${path.module}/templates/smoke-verify-autopilot.sh", { - vault_install_dir = var.vault_install_dir - vault_token = var.vault_root_token - vault_autopilot_upgrade_status = var.vault_autopilot_upgrade_status, - vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version, - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_TOKEN = var.vault_root_token, + VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status, + VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh new file mode 100755 index 0000000000..d19f453a07 --- /dev/null +++ b/enos/modules/vault_verify_autopilot/scripts/smoke-verify-autopilot.sh @@ -0,0 +1,43 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +fail() { + echo "$1" 1>&2 + exit 1 +} + +export VAULT_ADDR="http://localhost:8200" + +[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set" +[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +count=0 +retries=8 +while :; do + state=$($binpath read -format=json sys/storage/raft/autopilot/state) + status="$(jq -r '.data.upgrade_info.status' <<< "$state")" + target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" + + if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then + exit 0 + fi + + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + sleep "$wait" + else + echo "$state" + echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status" + echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version" + fail "Autopilot did not get into the correct status" + fi +done diff --git a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh b/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh deleted file mode 100755 index a1fa7174b1..0000000000 --- a/enos/modules/vault_verify_autopilot/templates/smoke-verify-autopilot.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -token="${vault_token}" -autopilot_version="${vault_autopilot_upgrade_version}" -autopilot_status="${vault_autopilot_upgrade_status}" - -export VAULT_ADDR="http://localhost:8200" -export VAULT_TOKEN="$token" - -function fail() { - echo "$1" 1>&2 - exit 1 -} - -count=0 -retries=7 -while :; do - state=$(${vault_install_dir}/vault read -format=json sys/storage/raft/autopilot/state) - status="$(jq -r '.data.upgrade_info.status' <<< "$state")" - target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")" - - if [ "$status" = "$autopilot_status" ] && [ "$target_version" = "$autopilot_version" ]; then - exit 0 - fi - - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "$state" - sleep "$wait" - else - fail "Autopilot did not get into the correct status" - fi -done diff --git a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh index 2368bd2bf2..f401242d6f 100644 --- a/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh +++ b/enos/modules/vault_verify_performance_replication/scripts/verify-replication-status.sh @@ -9,66 +9,76 @@ set -e -binpath=${VAULT_INSTALL_DIR}/vault - -function fail() { - echo "$1" 1>&2 - exit 1 +fail() { + echo "$1" 1>&2 + exit 1 } +[[ -z "$PRIMARY_LEADER_PRIV_IP" ]] && fail "PRIMARY_LEADER_PRIV_IP env variable has not been set" +[[ -z "$SECONDARY_LEADER_PRIV_IP" ]] && fail "SECONDARY_LEADER_PRIV_IP env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + retry() { local retries=$1 shift local count=0 until "$@"; do - exit=$? wait=$((2 ** count)) count=$((count + 1)) if [ "$count" -lt "$retries" ]; then sleep "$wait" else - return "$exit" + fail "$($binpath read -format=json sys/replication/performance/status)" fi done } -test -x "$binpath" || exit 1 - check_pr_status() { pr_status=$($binpath read -format=json sys/replication/performance/status) - cluster_state=$(echo $pr_status | jq -r '.data.state') - connection_mode=$(echo $pr_status | jq -r '.data.mode') + cluster_state=$(echo "$pr_status" | jq -r '.data.state') + connection_mode=$(echo "$pr_status" | jq -r '.data.mode') if [[ "$cluster_state" == 'idle' ]]; then - fail "replication cluster state is $cluster_state" + echo "replication cluster state is idle" 1>&2 + return 1 fi if [[ "$connection_mode" == "primary" ]]; then - connection_status=$(echo $pr_status | jq -r '.data.secondaries[0].connection_status') + connection_status=$(echo "$pr_status" | jq -r '.data.secondaries[0].connection_status') if [[ "$connection_status" == 'disconnected' ]]; then - fail "replication connection status of secondaries is $connection_status" + echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2 + return 1 fi - secondary_cluster_addr=$(echo $pr_status | jq -r '.data.secondaries[0].cluster_address') - if [[ "$secondary_cluster_addr" != "https://"${SECONDARY_LEADER_PRIV_IP}":8201" ]]; then - fail "Expected secondary cluster address $SECONDARY_LEADER_PRIV_IP got $secondary_cluster_addr " + secondary_cluster_addr=$(echo "$pr_status" | jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_PRIV_IP" ]]; then + echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_PRIV_IP, got: $secondary_cluster_addr" 1>&2 + return 1 fi else - connection_status=$(echo $pr_status | jq -r '.data.primaries[0].connection_status') + connection_status=$(echo "$pr_status" | jq -r '.data.primaries[0].connection_status') if [[ "$connection_status" == 'disconnected' ]]; then - fail "replication connection status of secondaries is $connection_status" + echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2 + return 1 fi - primary_cluster_addr=$(echo $pr_status | jq -r '.data.primaries[0].cluster_address') - if [[ "$primary_cluster_addr" != "https://"${PRIMARY_LEADER_PRIV_IP}":8201" ]]; then - fail "Expected primary cluster address $PRIMARY_LEADER_PRIV_IP got $primary_cluster_addr" + primary_cluster_addr=$(echo "$pr_status" | jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') + if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_PRIV_IP" ]]; then + echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_PRIV_IP, got: $primary_cluster_addr" 1>&2 + return 1 fi - known_primary_cluster_addrs=$(echo $pr_status | jq -r '.data.known_primary_cluster_addrs') - # IFS="," read -a cluster_addr <<< ${known_primary_cluster_addrs} - if ! $(echo $known_primary_cluster_addrs |grep -q $PRIMARY_LEADER_PRIV_IP); then - fail "Primary leader address $PRIMARY_LEADER_PRIV_IP not found in Known primary cluster addresses $known_primary_cluster_addrs" + known_primary_cluster_addrs=$(echo "$pr_status" | jq -r '.data.known_primary_cluster_addrs') + if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_PRIV_IP"; then + echo "$PRIMARY_LEADER_PRIV_IP is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2 + return 1 fi fi - echo $pr_status + + echo "$pr_status" + return 0 } # Retry a few times because it can take some time for replication to sync diff --git a/enos/modules/vault_verify_raft_auto_join_voter/main.tf b/enos/modules/vault_verify_raft_auto_join_voter/main.tf index 97a3b71f09..969877713e 100644 --- a/enos/modules/vault_verify_raft_auto_join_voter/main.tf +++ b/enos/modules/vault_verify_raft_auto_join_voter/main.tf @@ -50,12 +50,14 @@ locals { resource "enos_remote_exec" "verify_raft_auto_join_voter" { for_each = local.instances - content = templatefile("${path.module}/templates/verify-raft-auto-join-voter.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - vault_token = var.vault_root_token - }) + environment = { + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault" + VAULT_TOKEN = var.vault_root_token + } + + scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh similarity index 62% rename from enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh rename to enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh index 185ada9e73..c6887ae43e 100644 --- a/enos/modules/vault_verify_raft_auto_join_voter/templates/verify-raft-auto-join-voter.sh +++ b/enos/modules/vault_verify_raft_auto_join_voter/scripts/verify-raft-auto-join-voter.sh @@ -5,7 +5,7 @@ set -e -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault fail() { echo "$1" 2>&1 @@ -33,17 +33,17 @@ retry() { } check_voter_status() { - voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" '.data.config.servers[] | select(.address=="${vault_cluster_addr}") | .voter == $expected') + voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected') if [[ "$voter_status" != 'true' ]]; then - fail "expected ${vault_cluster_addr} to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq '.data.config.servers[] | select(.address==${vault_cluster_addr})')" + fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')" fi } test -x "$binpath" || fail "unable to locate vault binary at $binpath" export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" # Retry a few times because it can take some time for things to settle after # all the nodes are unsealed diff --git a/enos/modules/vault_verify_read_data/scripts/verify-data.sh b/enos/modules/vault_verify_read_data/scripts/verify-data.sh index 1f0406483e..5919aa04c3 100644 --- a/enos/modules/vault_verify_read_data/scripts/verify-data.sh +++ b/enos/modules/vault_verify_read_data/scripts/verify-data.sh @@ -24,21 +24,16 @@ function retry { return 0 } -function fail { - echo "$1" 1>&2 - exit 1 -} - -binpath=${VAULT_INSTALL_DIR}/vault - fail() { echo "$1" 1>&2 return 1 } +binpath="${VAULT_INSTALL_DIR}/vault" + test -x "$binpath" || fail "unable to locate vault binary at $binpath" # To keep the authentication method and module verification consistent between all # Enos scenarios we authenticate using testuser created by vault_verify_write_data module -retry 5 $binpath login -method=userpass username=testuser password=passuser1 -retry 5 $binpath kv get secret/test +retry 5 "$binpath" login -method=userpass username=testuser password=passuser1 +retry 5 "$binpath" kv get secret/test diff --git a/enos/modules/vault_verify_replication/main.tf b/enos/modules/vault_verify_replication/main.tf index 4deb572cc5..3d2f11e545 100644 --- a/enos/modules/vault_verify_replication/main.tf +++ b/enos/modules/vault_verify_replication/main.tf @@ -22,9 +22,11 @@ locals { resource "enos_remote_exec" "smoke-verify-replication" { for_each = local.instances - content = templatefile("${path.module}/templates/smoke-verify-replication.sh", { - vault_edition = var.vault_edition - }) + environment = { + VAULT_EDITION = var.vault_edition + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh similarity index 82% rename from enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh rename to enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh index 666be3dfc2..5ef9afd8b1 100644 --- a/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh +++ b/enos/modules/vault_verify_replication/scripts/smoke-verify-replication.sh @@ -8,18 +8,16 @@ set -e -edition=${vault_edition} - function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } -# Replication status endpoint should have data.mode disabled for OSS release +# Replication status endpoint should have data.mode disabled for CE release status=$(curl -s http://localhost:8200/v1/sys/replication/status) -if [ "$edition" == "oss" ]; then +if [ "$VAULT_EDITION" == "ce" ]; then if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then - fail "replication data mode is not disabled for OSS release!" + fail "replication data mode is not disabled for CE release!" fi else if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then diff --git a/enos/modules/vault_verify_ui/main.tf b/enos/modules/vault_verify_ui/main.tf index 2689f78946..9eddc8f426 100644 --- a/enos/modules/vault_verify_ui/main.tf +++ b/enos/modules/vault_verify_ui/main.tf @@ -22,9 +22,11 @@ locals { resource "enos_remote_exec" "smoke-verify-ui" { for_each = local.instances - content = templatefile("${path.module}/templates/smoke-verify-ui.sh", { - vault_install_dir = var.vault_install_dir, - }) + environment = { + VAULT_ADDR = var.vault_addr, + } + + scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh new file mode 100644 index 0000000000..25ee334ea9 --- /dev/null +++ b/enos/modules/vault_verify_ui/scripts/smoke-verify-ui.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null) +expected="${VAULT_ADDR}/ui/" +if [ "${url_effective}" != "${expected}" ]; then + fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}" +fi + +if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then + fail "Vault UI is not available" +fi diff --git a/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh deleted file mode 100644 index a62c44fe3b..0000000000 --- a/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - - -set -e - -fail() { - echo "$1" 1>&2 - exit 1 -} -if [ "$(curl -s -o /dev/null -w "%%{redirect_url}" http://localhost:8200/)" != "http://localhost:8200/ui/" ]; then - fail "Port 8200 not redirecting to UI" -fi -if curl -s http://localhost:8200/ui/ | grep -q 'Vault UI is not available'; then - fail "Vault UI is not available" -fi diff --git a/enos/modules/vault_verify_ui/variables.tf b/enos/modules/vault_verify_ui/variables.tf index a109e650f1..d06d60ac96 100644 --- a/enos/modules/vault_verify_ui/variables.tf +++ b/enos/modules/vault_verify_ui/variables.tf @@ -1,11 +1,10 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 - -variable "vault_install_dir" { +variable "vault_addr" { type = string - description = "The directory where the Vault binary will be installed" - default = null + description = "The vault cluster address" + default = "http://localhost:8200" } variable "vault_instance_count" { diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf index 1646adf4aa..04809e6174 100644 --- a/enos/modules/vault_verify_undo_logs/main.tf +++ b/enos/modules/vault_verify_undo_logs/main.tf @@ -45,8 +45,9 @@ resource "enos_remote_exec" "smoke-verify-undo-logs" { for_each = local.public_ips environment = { - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = "http://localhost:8200" + VAULT_ADDR = "http://localhost:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_TOKEN = var.vault_root_token } scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh index 83c6c0ab93..080ec079a8 100644 --- a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh +++ b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh @@ -2,29 +2,35 @@ # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 - function fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + count=0 -retries=20 +retries=5 while :; do - leader_address=$(curl -H "X-Vault-Request: true" -H "X-Vault-Token: $VAULT_TOKEN" "$VAULT_ADDR/v1/sys/leader" | jq '.leader_address' | sed 's/\"//g') - state=$(curl --header "X-Vault-Token: $VAULT_TOKEN" "$leader_address/v1/sys/metrics" | jq -r '.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') - target_undo_logs_status="$(jq -r '.Value' <<< "$state")" + state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') + target_undo_logs_status="$(jq -r '.Value' <<< "$state")" - if [ "$target_undo_logs_status" == "1" ]; then - exit 0 - fi + if [ "$target_undo_logs_status" == "1" ]; then + exit 0 + fi - wait=$((2 ** count)) - count=$((count + 1)) - if [ "$count" -lt "$retries" ]; then - echo "$state" - sleep "$wait" - else - fail "Undo_logs did not get into the correct status" - fi + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + echo "Waiting for vault.core.replication.write_undo_logs to have Value:1" + echo "$state" + sleep "$wait" + else + fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value:1" + fi done diff --git a/enos/modules/vault_verify_unsealed/main.tf b/enos/modules/vault_verify_unsealed/main.tf index 3fa8286e79..f555047035 100644 --- a/enos/modules/vault_verify_unsealed/main.tf +++ b/enos/modules/vault_verify_unsealed/main.tf @@ -45,11 +45,12 @@ locals { resource "enos_remote_exec" "verify_node_unsealed" { for_each = local.instances - content = templatefile("${path.module}/templates/verify-vault-node-unsealed.sh", { - vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" - vault_install_dir = var.vault_install_dir - vault_local_binary_path = "${var.vault_install_dir}/vault" - }) + scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")] + + environment = { + VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}" + VAULT_INSTALL_DIR = var.vault_install_dir + } transport = { ssh = { diff --git a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh similarity index 68% rename from enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh rename to enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh index 4ae3bd2a9e..44523f2fd7 100644 --- a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh +++ b/enos/modules/vault_verify_unsealed/scripts/verify-vault-node-unsealed.sh @@ -4,8 +4,7 @@ set -e -# shellcheck disable=SC2154 -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault fail() { echo "$1" 1>&2 @@ -14,12 +13,12 @@ fail() { test -x "$binpath" || fail "unable to locate vault binary at $binpath" -export VAULT_ADDR='http://127.0.0.1:8200' +export VAULT_ADDR=http://localhost:8200 count=0 retries=4 while :; do - health_status=$(curl http://127.0.0.1:8200/v1/sys/health |jq '.') + health_status=$(curl -s "${VAULT_CLUSTER_ADDR}/v1/sys/health" |jq '.') unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') if [[ "$unseal_status" == 'true' ]]; then echo "$health_status" @@ -31,7 +30,6 @@ while :; do if [ "$count" -lt "$retries" ]; then sleep "$wait" else - # shellcheck disable=SC2154 - fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status" + fail "expected ${VAULT_CLUSTER_ADDR} to be unsealed, got unseal status: $unseal_status" fi done diff --git a/enos/modules/vault_verify_version/main.tf b/enos/modules/vault_verify_version/main.tf index 116809eb91..a26f075049 100644 --- a/enos/modules/vault_verify_version/main.tf +++ b/enos/modules/vault_verify_version/main.tf @@ -69,14 +69,16 @@ locals { resource "enos_remote_exec" "verify_all_nodes_have_updated_version" { for_each = local.instances - content = templatefile("${path.module}/templates/verify-cluster-version.sh", { - vault_install_dir = var.vault_install_dir, - vault_build_date = var.vault_build_date, - vault_version = var.vault_product_version, - vault_edition = var.vault_edition, - vault_revision = var.vault_revision, - vault_token = var.vault_root_token, - }) + environment = { + VAULT_INSTALL_DIR = var.vault_install_dir, + VAULT_BUILD_DATE = var.vault_build_date, + VAULT_VERSION = var.vault_product_version, + VAULT_EDITION = var.vault_edition, + VAULT_REVISION = var.vault_revision, + VAULT_TOKEN = var.vault_root_token, + } + + scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")] transport = { ssh = { diff --git a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh similarity index 79% rename from enos/modules/vault_verify_version/templates/verify-cluster-version.sh rename to enos/modules/vault_verify_version/scripts/verify-cluster-version.sh index 7bd6355a34..9ec43876af 100644 --- a/enos/modules/vault_verify_version/templates/verify-cluster-version.sh +++ b/enos/modules/vault_verify_version/scripts/verify-cluster-version.sh @@ -7,26 +7,27 @@ # revision SHA, and edition metadata. set -e -binpath=${vault_install_dir}/vault -edition=${vault_edition} -version=${vault_version} -sha=${vault_revision} -build_date=${vault_build_date} +binpath=${VAULT_INSTALL_DIR}/vault +edition=${VAULT_EDITION} +version=${VAULT_VERSION} +sha=${VAULT_REVISION} +build_date=${VAULT_BUILD_DATE} +# VAULT_TOKEN must also be set fail() { - echo "$1" 1>&2 - exit 1 + echo "$1" 1>&2 + exit 1 } test -x "$binpath" || fail "unable to locate vault binary at $binpath" export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" version_expected="Vault v$version ($sha), built $build_date" case "$edition" in - *oss) ;; + *ce) ;; *ent) ;; *ent.hsm) version_expected="$version_expected (cgo)";; *ent.fips1402) version_expected="$version_expected (cgo)" ;; diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh index b24c354e95..2e90a73527 100644 --- a/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh +++ b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh @@ -5,7 +5,7 @@ set -e -function retry { +retry() { local retries=$1 shift local count=0 @@ -24,11 +24,15 @@ function retry { return 0 } -function fail { - echo "$1" 1>&2 - exit 1 +fail() { + echo "$1" 1>&2 + exit 1 } +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + binpath=${VAULT_INSTALL_DIR}/vault test -x "$binpath" || fail "unable to locate vault binary at $binpath" @@ -36,16 +40,16 @@ test -x "$binpath" || fail "unable to locate vault binary at $binpath" retry 5 "$binpath" status > /dev/null 2>&1 # Create user policy -retry 5 $binpath policy write reguser -< /dev/null 2>&1 +retry 5 "$binpath" auth enable userpass > /dev/null 2>&1 # Create new user and attach reguser policy -retry 5 $binpath write auth/userpass/users/testuser password="passuser1" policies="reguser" +retry 5 "$binpath" write auth/userpass/users/testuser password="passuser1" policies="reguser" -retry 5 $binpath secrets enable -path="secret" kv +retry 5 "$binpath" secrets enable -path="secret" kv diff --git a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh index 50beb0d96f..4bac3b0879 100644 --- a/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh +++ b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh @@ -5,7 +5,7 @@ set -e -function retry { +retry() { local retries=$1 shift local count=0 @@ -24,15 +24,19 @@ function retry { return 0 } -function fail { - echo "$1" 1>&2 - exit 1 +fail() { + echo "$1" 1>&2 + exit 1 } +[[ -z "$TEST_KEY" ]] && fail "TEST_KEY env variable has not been set" +[[ -z "$TEST_VALUE" ]] && fail "TEST_VALUE env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + binpath=${VAULT_INSTALL_DIR}/vault -testkey=${TEST_KEY} -testvalue=${TEST_VALUE} test -x "$binpath" || fail "unable to locate vault binary at $binpath" -retry 5 $binpath kv put secret/test $testkey=$testvalue +retry 5 "$binpath" kv put secret/test "$TEST_KEY=$TEST_VALUE" diff --git a/enos/modules/vault_wait_for_leader/main.tf b/enos/modules/vault_wait_for_leader/main.tf new file mode 100644 index 0000000000..bfeac54763 --- /dev/null +++ b/enos/modules/vault_wait_for_leader/main.tf @@ -0,0 +1,68 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "vault_instance_count" { + type = number + description = "The number of instances in the vault cluster" +} + +variable "vault_hosts" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster hosts that can be expected as a leader" +} + +variable "timeout" { + type = number + description = "The max number of seconds to wait before timing out" + default = 60 +} + +variable "retry_interval" { + type = number + description = "How many seconds to wait between each retry" + default = 2 +} + +locals { + private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] +} + +resource "enos_remote_exec" "wait_for_leader_in_vault_hosts" { + environment = { + RETRY_INTERVAL = var.retry_interval + TIMEOUT_SECONDS = var.timeout + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips) + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")] + + transport = { + ssh = { + host = var.vault_hosts[0].public_ip + } + } +} diff --git a/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh new file mode 100644 index 0000000000..aa4ccb8de7 --- /dev/null +++ b/enos/modules/vault_wait_for_leader/scripts/wait-for-leader.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: BUSL-1.1 + + +set -e + +fail() { + echo "$1" 1>&2 + exit 1 +} + +[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set" +[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set" +[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set" +[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set" +[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set" +[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set" + +binpath=${VAULT_INSTALL_DIR}/vault +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +findLeaderInPrivateIPs() { + # Find the leader private IP address + local leader_private_ip + if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') ; then + # Some older versions of vault don't support reading sys/leader. Fallback to the cli status. + if leader_private_ip=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then + return 1 + fi + fi + + if isIn=$(jq -r --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then + if [[ "$isIn" == "true" ]]; then + echo "$leader_private_ip" + return 0 + fi + fi + + return 1 +} + +begin_time=$(date +%s) +end_time=$((begin_time + TIMEOUT_SECONDS)) +while [ "$(date +%s)" -lt "$end_time" ]; do + if findLeaderInPrivateIPs; then + exit 0 + fi + + sleep "$RETRY_INTERVAL" +done + +fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader."