Backport [QT-602] Run proxy and agent test scenarios (#23176) into release/1.14.x (#23302)

* [QT-602] Run `proxy` and `agent` test scenarios (#23176)

Update our `proxy` and `agent` scenarios to support new variants and
perform baseline verification and their scenario specific verification.
We integrate these updated scenarios into the pipeline by adding them
to artifact samples.

We've also improved the reliability of the `autopilot` and `replication`
scenarios by refactoring our IP address gathering. Previously, we'd ask
vault for the primary IP address and use some Terraform logic to determine
followers. The leader IP address gathering script was also implicitly
responsible for ensuring that a found leader was within a given group of
hosts, and thus waiting for a given cluster to have a leader, and also for
doing some arithmetic and outputting `replication` specific output data.
We've broken these responsibilities into individual modules, improved their
error messages, and fixed various races and bugs, including:
* Fix a race between creating the file audit device and installing and starting
  vault in the `replication` scenario.
* Fix how we determine our leader and follower IP addresses. We now query
  vault instead of a prior implementation that inferred the followers and sometimes
  did not allow all nodes to be an expected leader.
* Fix a bug where we'd always always fail on the first wrong condition
  in the `vault_verify_performance_replication` module.

We also performed some maintenance tasks on Enos scenarios  byupdating our
references from `oss` to `ce` to handle the naming and license changes. We
also enabled `shellcheck` linting for enos module scripts.

* Rename `oss` to `ce` for license and naming changes.
* Convert template enos scripts to scripts that take environment
  variables.
* Add `shellcheck` linting for enos module scripts.
* Add additional `backend` and `seal` support to `proxy` and `agent`
  scenarios.
* Update scenarios to include all baseline verification.
* Add `proxy` and `agent` scenarios to artifact samples.
* Remove IP address verification from the `vault_get_cluster_ips`
  modules and implement a new `vault_wait_for_leader` module.
* Determine follower IP addresses by querying vault in the
  `vault_get_cluster_ips` module.
* Move replication specific behavior out of the `vault_get_cluster_ips`
  module and into it's own `replication_data` module.
* Extend initial version support for the `upgrade` and `autopilot`
  scenarios.

We also discovered an issue with undo_logs that has been described in
the VAULT-20259. As such, we've disabled the undo_logs check until
it has been fixed.

* actions: fix actionlint error and linting logic (#23305)

Signed-off-by: Ryan Cragun <me@ryan.ec>
This commit is contained in:
Ryan Cragun
2023-09-27 10:53:12 -06:00
committed by GitHub
parent e6f5015fde
commit d4df9e8a3a
84 changed files with 2392 additions and 1028 deletions

View File

@@ -118,7 +118,7 @@ jobs:
- goos: windows - goos: windows
goarch: arm goarch: arm
fail-fast: true fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml uses: ./.github/workflows/build-vault-ce.yml
with: with:
create-packages: false create-packages: false
goarch: ${{ matrix.goarch }} goarch: ${{ matrix.goarch }}
@@ -139,7 +139,7 @@ jobs:
goos: [linux] goos: [linux]
goarch: [arm, arm64, 386, amd64] goarch: [arm, arm64, 386, amd64]
fail-fast: true fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml uses: ./.github/workflows/build-vault-ce.yml
with: with:
goarch: ${{ matrix.goarch }} goarch: ${{ matrix.goarch }}
goos: ${{ matrix.goos }} goos: ${{ matrix.goos }}
@@ -159,7 +159,7 @@ jobs:
goos: [darwin] goos: [darwin]
goarch: [amd64, arm64] goarch: [amd64, arm64]
fail-fast: true fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml uses: ./.github/workflows/build-vault-ce.yml
with: with:
create-packages: false create-packages: false
goarch: ${{ matrix.goarch }} goarch: ${{ matrix.goarch }}
@@ -236,17 +236,17 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- sample-name: build_oss_linux_amd64_deb - sample-name: build_ce_linux_amd64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb
- sample-name: build_oss_linux_arm64_deb - sample-name: build_ce_linux_arm64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb
- sample-name: build_oss_linux_amd64_rpm - sample-name: build_ce_linux_amd64_rpm
build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm
- sample-name: build_oss_linux_arm64_rpm - sample-name: build_ce_linux_arm64_rpm
build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm
- sample-name: build_oss_linux_amd64_zip - sample-name: build_ce_linux_amd64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip
- sample-name: build_oss_linux_arm64_zip - sample-name: build_ce_linux_arm64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip
with: with:
build-artifact-name: ${{ matrix.build-artifact-name }} build-artifact-name: ${{ matrix.build-artifact-name }}
@@ -325,8 +325,8 @@ jobs:
steps: steps:
- run: | - run: |
tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)' tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)'
notify-completed-successfully-failures-oss: notify-completed-successfully-failures-ce:
if: ${{ always() && github.repository == 'hashicorp/vault' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }} if: ${{ always() && github.repository == 'hashicorp/vault' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
@@ -346,7 +346,7 @@ jobs:
with: with:
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official
payload: | payload: |
{"text":"OSS build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: OSS build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]} {"text":"CE build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: CE build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]}
notify-completed-successfully-failures-ent: notify-completed-successfully-failures-ent:
if: ${{ always() && github.repository == 'hashicorp/vault-enterprise' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }} if: ${{ always() && github.repository == 'hashicorp/vault-enterprise' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }}

View File

@@ -7,21 +7,37 @@ on:
- enos/** - enos/**
jobs: jobs:
lint: metadata:
# Only run this workflow on pull requests from hashicorp/vault branches # Only run this workflow on pull requests from hashicorp/vault branches
# as we need secrets to install enos. # as we need secrets to install enos.
if: "! github.event.pull_request.head.repo.fork" if: "! github.event.pull_request.head.repo.fork"
name: metadata
runs-on: ubuntu-latest runs-on: ubuntu-latest
outputs:
runs-on: ${{ steps.metadata.outputs.runs-on }}
version: ${{ steps.metadata.outputs.version }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- id: set-product-version
uses: hashicorp/actions-set-product-version@v1
- id: metadata
run: |
echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT"
github_repository="${{ github.repository }}"
if [ "${github_repository##*/}" == "vault-enterprise" ] ; then
echo 'runs-on=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT"
else
echo 'runs-on="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT"
fi
lint:
needs: metadata
runs-on: ${{ fromJSON(needs.metadata.outputs.runs-on) }}
env: env:
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }} ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
steps: steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- name: Set Product version
id: set-product-version
uses: hashicorp/actions-set-product-version@v1
- id: get-version
run: echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT"
- uses: hashicorp/setup-terraform@v2 - uses: hashicorp/setup-terraform@v2
with: with:
terraform_wrapper: false terraform_wrapper: false
@@ -31,5 +47,5 @@ jobs:
- name: lint - name: lint
working-directory: ./enos working-directory: ./enos
env: env:
ENOS_VAR_vault_product_version: ${{ steps.get-version.outputs.version }} ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }}
run: make lint run: make lint

View File

@@ -43,17 +43,17 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- sample-name: release_oss_linux_amd64_deb - sample-name: release_ce_linux_amd64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb
- sample-name: release_oss_linux_arm64_deb - sample-name: release_ce_linux_arm64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb
- sample-name: release_oss_linux_amd64_rpm - sample-name: release_ce_linux_amd64_rpm
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm
- sample-name: release_oss_linux_arm64_rpm - sample-name: release_ce_linux_arm64_rpm
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm
- sample-name: release_oss_linux_amd64_zip - sample-name: release_ce_linux_amd64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip
- sample-name: release_oss_linux_arm64_zip - sample-name: release_ce_linux_arm64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip
with: with:
build-artifact-name: ${{ matrix.build-artifact-name }} build-artifact-name: ${{ matrix.build-artifact-name }}

View File

@@ -60,8 +60,8 @@ jobs:
echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV" echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV"
echo "image repo set to 'hashicorp/vault-enterprise'" echo "image repo set to 'hashicorp/vault-enterprise'"
else else
echo "edition=oss" >> "$GITHUB_ENV" echo "edition=ce" >> "$GITHUB_ENV"
echo "edition set to 'oss'" echo "edition set to 'ce'"
echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV" echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV"
echo "image repo set to 'hashicorp/vault'" echo "image repo set to 'hashicorp/vault'"
fi fi

View File

@@ -91,7 +91,7 @@ jobs:
echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem
chmod 600 ./enos/support/private_key.pem chmod 600 ./enos/support/private_key.pem
- name: Set Up Vault Enterprise License - name: Set Up Vault Enterprise License
if: contains(${{ github.event.repository.name }}, 'ent') if: contains(github.event.repository.name, 'ent')
run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true
- name: Check Chrome Installed - name: Check Chrome Installed
id: chrome-check id: chrome-check

View File

@@ -1,5 +1,5 @@
.PHONY: default .PHONY: default
default: check-fmt default: check-fmt shellcheck
.PHONY: check-fmt .PHONY: check-fmt
check-fmt: check-fmt-enos check-fmt-modules check-fmt: check-fmt-enos check-fmt-modules
@@ -25,7 +25,11 @@ fmt-modules:
.PHONY: validate-enos .PHONY: validate-enos
validate-enos: validate-enos:
enos scenario validate enos scenario validate --timeout 30m0s
.PHONY: lint .PHONY: lint
lint: check-fmt validate-enos lint: check-fmt shellcheck validate-enos
.PHONY: shellcheck
shellcheck:
find ./modules/ -type f -name '*.sh' | xargs shellcheck

View File

@@ -4,7 +4,7 @@
globals { globals {
backend_tag_key = "VaultStorage" backend_tag_key = "VaultStorage"
build_tags = { build_tags = {
"oss" = ["ui"] "ce" = ["ui"]
"ent" = ["ui", "enterprise", "ent"] "ent" = ["ui", "enterprise", "ent"]
"ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"]
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
@@ -20,9 +20,7 @@ globals {
rhel = ["nc"] rhel = ["nc"]
} }
sample_attributes = { sample_attributes = {
# aws_region = ["us-east-1", "us-west-2"] aws_region = ["us-east-1", "us-west-2"]
# NOTE(9/18/23): use more expensive regions temporarily until AWS network outage is resolved.
aws_region = ["us-east-2", "us-west-1"]
} }
tags = merge({ tags = merge({
"Project Name" : var.project_name "Project Name" : var.project_name

View File

@@ -53,6 +53,10 @@ module "read_license" {
source = "./modules/read_license" source = "./modules/read_license"
} }
module "replication_data" {
source = "./modules/replication_data"
}
module "shutdown_node" { module "shutdown_node" {
source = "./modules/shutdown_node" source = "./modules/shutdown_node"
} }
@@ -128,9 +132,27 @@ module "vault_cluster" {
module "vault_get_cluster_ips" { module "vault_get_cluster_ips" {
source = "./modules/vault_get_cluster_ips" source = "./modules/vault_get_cluster_ips"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
}
module "vault_raft_remove_peer" {
source = "./modules/vault_raft_remove_peer"
vault_install_dir = var.vault_install_dir vault_install_dir = var.vault_install_dir
} }
module "vault_setup_perf_secondary" {
source = "./modules/vault_setup_perf_secondary"
vault_install_dir = var.vault_install_dir
}
module "vault_test_ui" {
source = "./modules/vault_test_ui"
ui_run_tests = var.ui_run_tests
}
module "vault_unseal_nodes" { module "vault_unseal_nodes" {
source = "./modules/vault_unseal_nodes" source = "./modules/vault_unseal_nodes"
@@ -145,6 +167,7 @@ module "vault_upgrade" {
vault_instance_count = var.vault_instance_count vault_instance_count = var.vault_instance_count
} }
module "vault_verify_autopilot" { module "vault_verify_autopilot" {
source = "./modules/vault_verify_autopilot" source = "./modules/vault_verify_autopilot"
@@ -177,7 +200,6 @@ module "vault_verify_replication" {
module "vault_verify_ui" { module "vault_verify_ui" {
source = "./modules/vault_verify_ui" source = "./modules/vault_verify_ui"
vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count vault_instance_count = var.vault_instance_count
} }
@@ -194,12 +216,6 @@ module "vault_setup_perf_primary" {
vault_install_dir = var.vault_install_dir vault_install_dir = var.vault_install_dir
} }
module "vault_setup_perf_secondary" {
source = "./modules/vault_setup_perf_secondary"
vault_install_dir = var.vault_install_dir
}
module "vault_verify_read_data" { module "vault_verify_read_data" {
source = "./modules/vault_verify_read_data" source = "./modules/vault_verify_read_data"
@@ -227,13 +243,9 @@ module "vault_verify_write_data" {
vault_instance_count = var.vault_instance_count vault_instance_count = var.vault_instance_count
} }
module "vault_raft_remove_peer" { module "vault_wait_for_leader" {
source = "./modules/vault_raft_remove_peer" source = "./modules/vault_wait_for_leader"
vault_install_dir = var.vault_install_dir
}
module "vault_test_ui" { vault_install_dir = var.vault_install_dir
source = "./modules/vault_test_ui" vault_instance_count = var.vault_instance_count
ui_run_tests = var.ui_run_tests
} }

View File

@@ -0,0 +1,294 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "build_ce_linux_amd64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_arm64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_amd64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "build_ce_linux_arm64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["ce"]
exclude {
// Don't test from these versions in the build pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}

View File

@@ -0,0 +1,294 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "release_ce_linux_amd64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_arm64_deb" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_amd64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}
sample "release_ce_linux_arm64_zip" {
attributes = global.sample_attributes
subset "agent" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "proxy" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["ce"]
exclude {
// Don't test from these versions in the release pipeline because of known issues
// in those older versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
}
}
}

View File

@@ -1,142 +0,0 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "build_oss_linux_amd64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_arm64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["crt"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_amd64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["crt"]
edition = ["oss"]
}
}
}
sample "build_oss_linux_arm64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["crt"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
}

View File

@@ -1,142 +0,0 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
sample "release_oss_linux_amd64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_arm64_deb" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["ubuntu"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_arm64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_amd64_rpm" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_source = ["artifactory"]
artifact_type = ["package"]
distro = ["rhel"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_amd64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["amd64"]
artifact_type = ["bundle"]
artifact_source = ["artifactory"]
edition = ["oss"]
}
}
}
sample "release_oss_linux_arm64_zip" {
attributes = global.sample_attributes
subset "smoke" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
subset "upgrade" {
matrix {
arch = ["arm64"]
artifact_source = ["artifactory"]
artifact_type = ["bundle"]
edition = ["oss"]
}
}
}

View File

@@ -5,8 +5,12 @@ scenario "agent" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = ["amd64", "arm64"]
artifact_source = ["local", "crt", "artifactory"] artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
backend = ["consul", "raft"]
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"] distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
seal = ["awskms", "shamir"]
# Our local builder always creates bundles # Our local builder always creates bundles
exclude { exclude {
@@ -30,12 +34,18 @@ scenario "agent" {
] ]
locals { locals {
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
enos_provider = { enos_provider = {
rhel = provider.enos.rhel rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu ubuntu = provider.enos.ubuntu
} }
install_artifactory_artifact = local.bundle_path == null manage_service = matrix.artifact_type == "bundle"
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
}
step "get_local_metadata" {
skip_step = matrix.artifact_source != "local"
module = module.get_local_metadata
} }
step "build_vault" { step "build_vault" {
@@ -43,7 +53,7 @@ scenario "agent" {
variables { variables {
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition]
bundle_path = local.bundle_path artifact_path = local.artifact_path
goarch = matrix.arch goarch = matrix.arch
goos = "linux" goos = "linux"
artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null
@@ -52,7 +62,7 @@ scenario "agent" {
artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null
arch = matrix.artifact_source == "artifactory" ? matrix.arch : null arch = matrix.artifact_source == "artifactory" ? matrix.arch : null
product_version = var.vault_product_version product_version = var.vault_product_version
artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null artifact_type = matrix.artifact_type
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
revision = var.vault_revision revision = var.vault_revision
@@ -71,8 +81,19 @@ scenario "agent" {
} }
} }
step "read_license" { // This step reads the contents of the backend license if we're using a Consul backend and
skip_step = matrix.edition == "oss" // the edition is "ent".
step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license
variables {
file_name = global.backend_license_path
}
}
step "read_vault_license" {
skip_step = matrix.edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -97,9 +118,49 @@ scenario "agent" {
} }
} }
step "create_vault_cluster_backend_targets" {
module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_tag_key = global.backend_tag_key
common_tags = global.tags
vpc_id = step.create_vpc.vpc_id
}
}
step "create_backend_cluster" {
module = "backend_${matrix.backend}"
depends_on = [
step.create_vault_cluster_backend_targets
]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
cluster_tag_key = global.backend_tag_key
license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
release = {
edition = var.backend_edition
version = matrix.consul_version
}
target_hosts = step.create_vault_cluster_backend_targets.hosts
}
}
step "create_vault_cluster" { step "create_vault_cluster" {
module = module.vault_cluster module = module.vault_cluster
depends_on = [ depends_on = [
step.create_backend_cluster,
step.build_vault, step.build_vault,
step.create_vault_cluster_targets step.create_vault_cluster_targets
] ]
@@ -109,17 +170,42 @@ scenario "agent" {
} }
variables { variables {
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
awskms_unseal_key_arn = step.create_vpc.kms_key_arn awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_name = step.create_vault_cluster_targets.cluster_name backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
enable_audit_devices = var.vault_enable_audit_devices backend_cluster_tag_key = global.backend_tag_key
install_dir = var.vault_install_dir cluster_name = step.create_vault_cluster_targets.cluster_name
license = matrix.edition != "oss" ? step.read_license.license : null consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
local_artifact_path = local.bundle_path consul_release = matrix.backend == "consul" ? {
packages = concat(global.packages, global.distro_packages[matrix.distro]) edition = var.backend_edition
storage_backend = "raft" version = matrix.consul_version
target_hosts = step.create_vault_cluster_targets.hosts } : null
unseal_method = "shamir" enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
}
}
// Wait for our cluster to elect a leader
step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
} }
} }
@@ -128,6 +214,7 @@ scenario "agent" {
depends_on = [ depends_on = [
step.build_vault, step.build_vault,
step.create_vault_cluster, step.create_vault_cluster,
step.wait_for_leader,
] ]
providers = { providers = {
@@ -135,6 +222,7 @@ scenario "agent" {
} }
variables { variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token vault_root_token = step.create_vault_cluster.root_token
vault_agent_template_destination = "/tmp/agent_output.txt" vault_agent_template_destination = "/tmp/agent_output.txt"
@@ -147,6 +235,7 @@ scenario "agent" {
depends_on = [ depends_on = [
step.create_vault_cluster, step.create_vault_cluster,
step.start_vault_agent, step.start_vault_agent,
step.wait_for_leader,
] ]
providers = { providers = {
@@ -160,7 +249,147 @@ scenario "agent" {
} }
} }
output "awkms_unseal_key_arn" { step "get_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_write_test_data" {
module = module.vault_verify_write_data
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_read_test_data" {
module = module.vault_verify_read_data
depends_on = [
step.verify_write_test_data,
step.verify_replication
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
node_public_ips = step.get_vault_cluster_ips.follower_public_ips
vault_install_dir = local.vault_install_dir
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
}
}
output "audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path
}
output "awskms_unseal_key_arn" {
description = "The Vault cluster KMS key arn" description = "The Vault cluster KMS key arn"
value = step.create_vpc.kms_key_arn value = step.create_vpc.kms_key_arn
} }
@@ -214,9 +443,4 @@ scenario "agent" {
description = "The Vault cluster unseal keys hex" description = "The Vault cluster unseal keys hex"
value = step.create_vault_cluster.unseal_keys_hex value = step.create_vault_cluster.unseal_keys_hex
} }
output "vault_audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path
}
} }

View File

@@ -7,7 +7,10 @@ scenario "autopilot" {
artifact_source = ["local", "crt", "artifactory"] artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"] artifact_type = ["bundle", "package"]
distro = ["ubuntu", "rhel"] distro = ["ubuntu", "rhel"]
edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
// NOTE: when backporting, make sure that our initial versions are less than that
// release branch's version.
initial_version = ["1.11.12", "1.12.11", "1.13.6", "1.14.2"]
seal = ["awskms", "shamir"] seal = ["awskms", "shamir"]
# Our local builder always creates bundles # Our local builder always creates bundles
@@ -114,12 +117,15 @@ scenario "autopilot" {
awskms_unseal_key_arn = step.create_vpc.kms_key_arn awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_name = step.create_vault_cluster_targets.cluster_name cluster_name = step.create_vault_cluster_targets.cluster_name
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_license.license : null license = matrix.edition != "ce" ? step.read_license.license : null
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
release = var.vault_autopilot_initial_release release = {
storage_backend = "raft" edition = matrix.edition
version = matrix.initial_version
}
storage_backend = "raft"
storage_backend_addl_config = { storage_backend_addl_config = {
autopilot_upgrade_version = var.vault_autopilot_initial_release.version autopilot_upgrade_version = matrix.initial_version
} }
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal unseal_method = matrix.seal
@@ -141,7 +147,7 @@ scenario "autopilot" {
} }
variables { variables {
vault_instances = step.create_vault_cluster.target_hosts vault_hosts = step.create_vault_cluster.target_hosts
vault_install_dir = local.vault_install_dir vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token vault_root_token = step.create_vault_cluster.root_token
} }
@@ -213,7 +219,7 @@ scenario "autopilot" {
force_unseal = matrix.seal == "shamir" force_unseal = matrix.seal == "shamir"
initialize_cluster = false initialize_cluster = false
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_license.license : null license = matrix.edition != "ce" ? step.read_license.license : null
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
@@ -285,8 +291,8 @@ scenario "autopilot" {
} }
} }
step "get_updated_vault_cluster_ips" { step "wait_for_leader_in_upgrade_targets" {
module = module.vault_get_cluster_ips module = module.vault_wait_for_leader
depends_on = [ depends_on = [
step.create_vault_cluster, step.create_vault_cluster,
step.create_vault_cluster_upgrade_targets, step.create_vault_cluster_upgrade_targets,
@@ -299,11 +305,30 @@ scenario "autopilot" {
} }
variables { variables {
vault_instances = step.create_vault_cluster.target_hosts vault_install_dir = local.vault_install_dir
vault_install_dir = local.vault_install_dir vault_root_token = step.create_vault_cluster.root_token
vault_root_token = step.create_vault_cluster.root_token vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts
node_public_ip = step.get_vault_cluster_ips.leader_public_ip }
added_vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts }
step "get_updated_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.create_vault_cluster,
step.create_vault_cluster_upgrade_targets,
step.get_vault_cluster_ips,
step.upgrade_vault_cluster_with_autopilot,
step.wait_for_leader_in_upgrade_targets,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
} }
} }
@@ -388,9 +413,73 @@ scenario "autopilot" {
} }
} }
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [
step.create_vault_cluster_upgrade_targets,
step.upgrade_vault_cluster_with_autopilot,
step.verify_raft_auto_join_voter,
step.remove_old_nodes
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [
step.create_vault_cluster_upgrade_targets,
step.upgrade_vault_cluster_with_autopilot,
step.verify_raft_auto_join_voter,
step.remove_old_nodes
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [
step.create_vault_cluster_upgrade_targets,
step.upgrade_vault_cluster_with_autopilot,
step.verify_raft_auto_join_voter,
step.remove_old_nodes
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
}
}
step "verify_undo_logs_status" { step "verify_undo_logs_status" {
skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") skip_step = true
module = module.vault_verify_undo_logs # NOTE: temporarily disable undo logs checking until it is fixed. See VAULT-20259
# skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0")
module = module.vault_verify_undo_logs
depends_on = [ depends_on = [
step.create_vault_cluster_upgrade_targets, step.create_vault_cluster_upgrade_targets,
step.remove_old_nodes, step.remove_old_nodes,

View File

@@ -5,8 +5,24 @@ scenario "proxy" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = ["amd64", "arm64"]
artifact_source = ["local", "crt", "artifactory"] artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"]
backend = ["consul", "raft"]
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"] distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
seal = ["awskms", "shamir"]
# Our local builder always creates bundles
exclude {
artifact_source = ["local"]
artifact_type = ["package"]
}
# HSM and FIPS 140-2 are only supported on amd64
exclude {
arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -18,11 +34,13 @@ scenario "proxy" {
] ]
locals { locals {
bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null artifact_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_artifact_path) : null
enos_provider = { enos_provider = {
rhel = provider.enos.rhel rhel = provider.enos.rhel
ubuntu = provider.enos.ubuntu ubuntu = provider.enos.ubuntu
} }
manage_service = matrix.artifact_type == "bundle"
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
} }
step "get_local_metadata" { step "get_local_metadata" {
@@ -35,7 +53,7 @@ scenario "proxy" {
variables { variables {
build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition] build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : global.build_tags[matrix.edition]
bundle_path = local.bundle_path artifact_path = local.artifact_path
goarch = matrix.arch goarch = matrix.arch
goos = "linux" goos = "linux"
artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null
@@ -44,7 +62,7 @@ scenario "proxy" {
artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null
arch = matrix.artifact_source == "artifactory" ? matrix.arch : null arch = matrix.artifact_source == "artifactory" ? matrix.arch : null
product_version = var.vault_product_version product_version = var.vault_product_version
artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null artifact_type = matrix.artifact_type
distro = matrix.artifact_source == "artifactory" ? matrix.distro : null distro = matrix.artifact_source == "artifactory" ? matrix.distro : null
edition = matrix.artifact_source == "artifactory" ? matrix.edition : null edition = matrix.artifact_source == "artifactory" ? matrix.edition : null
revision = var.vault_revision revision = var.vault_revision
@@ -63,8 +81,19 @@ scenario "proxy" {
} }
} }
step "read_license" { // This step reads the contents of the backend license if we're using a Consul backend and
skip_step = matrix.edition == "oss" // the edition is "ent".
step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license
variables {
file_name = global.backend_license_path
}
}
step "read_vault_license" {
skip_step = matrix.edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -89,9 +118,49 @@ scenario "proxy" {
} }
} }
step "create_vault_cluster_backend_targets" {
module = matrix.backend == "consul" ? module.target_ec2_instances : module.target_ec2_shim
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_tag_key = global.backend_tag_key
common_tags = global.tags
vpc_id = step.create_vpc.vpc_id
}
}
step "create_backend_cluster" {
module = "backend_${matrix.backend}"
depends_on = [
step.create_vault_cluster_backend_targets
]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
cluster_tag_key = global.backend_tag_key
license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
release = {
edition = var.backend_edition
version = matrix.consul_version
}
target_hosts = step.create_vault_cluster_backend_targets.hosts
}
}
step "create_vault_cluster" { step "create_vault_cluster" {
module = module.vault_cluster module = module.vault_cluster
depends_on = [ depends_on = [
step.create_backend_cluster,
step.build_vault, step.build_vault,
step.create_vault_cluster_targets step.create_vault_cluster_targets
] ]
@@ -101,17 +170,42 @@ scenario "proxy" {
} }
variables { variables {
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
awskms_unseal_key_arn = step.create_vpc.kms_key_arn awskms_unseal_key_arn = step.create_vpc.kms_key_arn
cluster_name = step.create_vault_cluster_targets.cluster_name backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
enable_audit_devices = var.vault_enable_audit_devices backend_cluster_tag_key = global.backend_tag_key
install_dir = var.vault_install_dir cluster_name = step.create_vault_cluster_targets.cluster_name
license = matrix.edition != "oss" ? step.read_license.license : null consul_license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
local_artifact_path = local.bundle_path consul_release = matrix.backend == "consul" ? {
packages = concat(global.packages, global.distro_packages[matrix.distro]) edition = var.backend_edition
storage_backend = "raft" version = matrix.consul_version
target_hosts = step.create_vault_cluster_targets.hosts } : null
unseal_method = "shamir" enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir
license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path
manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro])
storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
}
}
// Wait for our cluster to elect a leader
step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
} }
} }
@@ -127,12 +221,147 @@ scenario "proxy" {
} }
variables { variables {
vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
} }
} }
output "awkms_unseal_key_arn" { step "get_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_unsealed" {
module = module.vault_verify_unsealed
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_write_test_data" {
module = module.vault_verify_write_data
depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_read_test_data" {
module = module.vault_verify_read_data
depends_on = [
step.verify_write_test_data,
step.verify_replication
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
node_public_ips = step.get_vault_cluster_ips.follower_public_ips
vault_install_dir = local.vault_install_dir
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [step.create_vault_cluster]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
}
}
output "audit_device_file_path" {
description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path
}
output "awskms_unseal_key_arn" {
description = "The Vault cluster KMS key arn" description = "The Vault cluster KMS key arn"
value = step.create_vpc.kms_key_arn value = step.create_vpc.kms_key_arn
} }

View File

@@ -9,7 +9,7 @@ scenario "replication" {
arch = ["amd64", "arm64"] arch = ["amd64", "arm64"]
artifact_source = ["local", "crt", "artifactory"] artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"] artifact_type = ["bundle", "package"]
consul_version = ["1.14.2", "1.13.4", "1.12.7"] consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"] distro = ["ubuntu", "rhel"]
edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
primary_backend = ["raft", "consul"] primary_backend = ["raft", "consul"]
@@ -48,6 +48,11 @@ scenario "replication" {
vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro] vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : global.vault_install_dir_packages[matrix.distro]
} }
step "get_local_metadata" {
skip_step = matrix.artifact_source != "local"
module = module.get_local_metadata
}
step "build_vault" { step "build_vault" {
module = "build_${matrix.artifact_source}" module = "build_${matrix.artifact_source}"
@@ -84,7 +89,7 @@ scenario "replication" {
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "oss" skip_step = (matrix.primary_backend == "raft" && matrix.secondary_backend == "raft") || var.backend_edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -241,7 +246,7 @@ scenario "replication" {
} : null } : null
enable_audit_devices = var.vault_enable_audit_devices enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
@@ -298,7 +303,7 @@ scenario "replication" {
} : null } : null
enable_audit_devices = var.vault_enable_audit_devices enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
@@ -340,6 +345,42 @@ scenario "replication" {
} }
} }
step "verify_vault_version" {
module = module.vault_verify_version
depends_on = [
step.create_primary_cluster
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_primary_cluster_targets.hosts
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision
vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date
vault_root_token = step.create_primary_cluster.root_token
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [
step.create_primary_cluster
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_primary_cluster_targets.hosts
}
}
step "get_primary_cluster_ips" { step "get_primary_cluster_ips" {
module = module.vault_get_cluster_ips module = module.vault_get_cluster_ips
depends_on = [step.verify_that_vault_primary_cluster_is_unsealed] depends_on = [step.verify_that_vault_primary_cluster_is_unsealed]
@@ -349,12 +390,21 @@ scenario "replication" {
} }
variables { variables {
vault_instances = step.create_primary_cluster_targets.hosts vault_hosts = step.create_primary_cluster_targets.hosts
vault_install_dir = local.vault_install_dir vault_install_dir = local.vault_install_dir
vault_root_token = step.create_primary_cluster.root_token vault_root_token = step.create_primary_cluster.root_token
} }
} }
step "get_primary_cluster_replication_data" {
module = module.replication_data
depends_on = [step.get_primary_cluster_ips]
variables {
follower_hosts = step.get_primary_cluster_ips.follower_hosts
}
}
step "get_secondary_cluster_ips" { step "get_secondary_cluster_ips" {
module = module.vault_get_cluster_ips module = module.vault_get_cluster_ips
depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed]
@@ -364,7 +414,7 @@ scenario "replication" {
} }
variables { variables {
vault_instances = step.create_secondary_cluster_targets.hosts vault_hosts = step.create_secondary_cluster_targets.hosts
vault_install_dir = local.vault_install_dir vault_install_dir = local.vault_install_dir
vault_root_token = step.create_secondary_cluster.root_token vault_root_token = step.create_secondary_cluster.root_token
} }
@@ -542,7 +592,7 @@ scenario "replication" {
force_unseal = matrix.primary_seal == "shamir" force_unseal = matrix.primary_seal == "shamir"
initialize_cluster = false initialize_cluster = false
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
@@ -555,7 +605,7 @@ scenario "replication" {
} }
} }
step "verify_addtional_primary_nodes_are_unsealed" { step "verify_additional_primary_nodes_are_unsealed" {
module = module.vault_verify_unsealed module = module.vault_verify_unsealed
depends_on = [step.add_additional_nodes_to_primary_cluster] depends_on = [step.add_additional_nodes_to_primary_cluster]
@@ -575,7 +625,7 @@ scenario "replication" {
depends_on = [ depends_on = [
step.add_additional_nodes_to_primary_cluster, step.add_additional_nodes_to_primary_cluster,
step.create_primary_cluster, step.create_primary_cluster,
step.verify_addtional_primary_nodes_are_unsealed step.verify_additional_primary_nodes_are_unsealed
] ]
providers = { providers = {
@@ -592,8 +642,8 @@ scenario "replication" {
step "remove_primary_follower_1" { step "remove_primary_follower_1" {
module = module.shutdown_node module = module.shutdown_node
depends_on = [ depends_on = [
step.get_primary_cluster_ips, step.get_primary_cluster_replication_data,
step.verify_addtional_primary_nodes_are_unsealed step.verify_additional_primary_nodes_are_unsealed
] ]
providers = { providers = {
@@ -601,7 +651,7 @@ scenario "replication" {
} }
variables { variables {
node_public_ip = step.get_primary_cluster_ips.follower_public_ip_1 node_public_ip = step.get_primary_cluster_replication_data.follower_public_ip_1
} }
} }
@@ -621,12 +671,31 @@ scenario "replication" {
} }
} }
step "get_updated_primary_cluster_ips" { // After we've removed two nodes from the cluster we need to get an updated set of vault hosts
module = module.vault_get_cluster_ips // to work with.
step "get_remaining_hosts_replication_data" {
module = module.replication_data
depends_on = [ depends_on = [
step.add_additional_nodes_to_primary_cluster, step.get_primary_cluster_ips,
step.remove_primary_follower_1, step.remove_primary_leader,
step.remove_primary_leader ]
variables {
added_hosts = step.create_primary_cluster_additional_targets.hosts
added_hosts_count = var.vault_instance_count
initial_hosts = step.create_primary_cluster_targets.hosts
initial_hosts_count = var.vault_instance_count
removed_follower_host = step.get_primary_cluster_replication_data.follower_host_1
removed_primary_host = step.get_primary_cluster_ips.leader_host
}
}
// Wait for the remaining hosts in our cluster to elect a new leader.
step "wait_for_leader_in_remaining_hosts" {
module = module.vault_wait_for_leader
depends_on = [
step.remove_primary_leader,
step.get_remaining_hosts_replication_data,
] ]
providers = { providers = {
@@ -634,17 +703,41 @@ scenario "replication" {
} }
variables { variables {
vault_instances = step.create_primary_cluster_targets.hosts timeout = 120 # seconds
vault_install_dir = local.vault_install_dir vault_install_dir = local.vault_install_dir
added_vault_instances = step.create_primary_cluster_additional_targets.hosts vault_root_token = step.create_primary_cluster.root_token
vault_root_token = step.create_primary_cluster.root_token vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts
node_public_ip = step.get_primary_cluster_ips.follower_public_ip_2
} }
} }
// Get our new leader and follower IP addresses.
step "get_updated_primary_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.get_remaining_hosts_replication_data,
step.wait_for_leader_in_remaining_hosts,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts
vault_install_dir = local.vault_install_dir
vault_instance_count = step.get_remaining_hosts_replication_data.remaining_hosts_count
vault_root_token = step.create_primary_cluster.root_token
}
}
// Make sure the cluster has the correct performance replication state after the new leader election.
step "verify_updated_performance_replication" { step "verify_updated_performance_replication" {
module = module.vault_verify_performance_replication module = module.vault_verify_performance_replication
depends_on = [step.get_updated_primary_cluster_ips] depends_on = [
step.get_remaining_hosts_replication_data,
step.wait_for_leader_in_remaining_hosts,
step.get_updated_primary_cluster_ips,
]
providers = { providers = {
enos = local.enos_provider[matrix.distro] enos = local.enos_provider[matrix.distro]
@@ -709,6 +802,11 @@ scenario "replication" {
value = step.create_secondary_cluster_targets.hosts value = step.create_secondary_cluster_targets.hosts
} }
output "remaining_hosts" {
description = "The Vault cluster primary hosts after removing the leader and follower"
value = step.get_remaining_hosts_replication_data.remaining_hosts
}
output "initial_primary_replication_status" { output "initial_primary_replication_status" {
description = "The Vault primary cluster performance replication status" description = "The Vault primary cluster performance replication status"
value = step.verify_performance_replication.primary_replication_status value = step.verify_performance_replication.primary_replication_status

View File

@@ -4,12 +4,12 @@
scenario "smoke" { scenario "smoke" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = ["amd64", "arm64"]
backend = ["consul", "raft"]
artifact_source = ["local", "crt", "artifactory"] artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"] artifact_type = ["bundle", "package"]
consul_version = ["1.14.2", "1.13.4", "1.12.7"] backend = ["consul", "raft"]
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"] distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
seal = ["awskms", "shamir"] seal = ["awskms", "shamir"]
# Our local builder always creates bundles # Our local builder always creates bundles
@@ -84,7 +84,7 @@ scenario "smoke" {
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "oss" skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -93,7 +93,7 @@ scenario "smoke" {
} }
step "read_vault_license" { step "read_vault_license" {
skip_step = matrix.edition == "oss" skip_step = matrix.edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -182,7 +182,7 @@ scenario "smoke" {
} : null } : null
enable_audit_devices = var.vault_enable_audit_devices enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
@@ -192,8 +192,9 @@ scenario "smoke" {
} }
} }
step "get_vault_cluster_ips" { // Wait for our cluster to elect a leader
module = module.vault_get_cluster_ips step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster] depends_on = [step.create_vault_cluster]
providers = { providers = {
@@ -201,7 +202,23 @@ scenario "smoke" {
} }
variables { variables {
vault_instances = step.create_vault_cluster_targets.hosts timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "get_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [step.wait_for_leader]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token vault_root_token = step.create_vault_cluster.root_token
} }
@@ -228,7 +245,7 @@ scenario "smoke" {
step "verify_vault_unsealed" { step "verify_vault_unsealed" {
module = module.vault_verify_unsealed module = module.vault_verify_unsealed
depends_on = [step.create_vault_cluster] depends_on = [step.wait_for_leader]
providers = { providers = {
enos = local.enos_provider[matrix.distro] enos = local.enos_provider[matrix.distro]
@@ -261,9 +278,12 @@ scenario "smoke" {
} }
step "verify_raft_auto_join_voter" { step "verify_raft_auto_join_voter" {
skip_step = matrix.backend != "raft" skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter module = module.vault_verify_raft_auto_join_voter
depends_on = [step.create_vault_cluster] depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = { providers = {
enos = local.enos_provider[matrix.distro] enos = local.enos_provider[matrix.distro]
@@ -277,8 +297,11 @@ scenario "smoke" {
} }
step "verify_replication" { step "verify_replication" {
module = module.vault_verify_replication module = module.vault_verify_replication
depends_on = [step.create_vault_cluster] depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = { providers = {
enos = local.enos_provider[matrix.distro] enos = local.enos_provider[matrix.distro]
@@ -309,16 +332,18 @@ scenario "smoke" {
} }
step "verify_ui" { step "verify_ui" {
module = module.vault_verify_ui module = module.vault_verify_ui
depends_on = [step.create_vault_cluster] depends_on = [
step.create_vault_cluster,
step.get_vault_cluster_ips
]
providers = { providers = {
enos = local.enos_provider[matrix.distro] enos = local.enos_provider[matrix.distro]
} }
variables { variables {
vault_instances = step.create_vault_cluster_targets.hosts vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
} }
} }

View File

@@ -3,7 +3,7 @@
scenario "ui" { scenario "ui" {
matrix { matrix {
edition = ["oss", "ent"] edition = ["ce", "ent"]
backend = ["consul", "raft"] backend = ["consul", "raft"]
} }
@@ -20,12 +20,12 @@ scenario "ui" {
backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic")) backend_license_path = abspath(var.backend_license_path != null ? var.backend_license_path : joinpath(path.root, "./support/consul.hclic"))
backend_tag_key = "VaultStorage" backend_tag_key = "VaultStorage"
build_tags = { build_tags = {
"oss" = ["ui"] "ce" = ["ui"]
"ent" = ["ui", "enterprise", "ent"] "ent" = ["ui", "enterprise", "ent"]
} }
bundle_path = abspath(var.vault_artifact_path) bundle_path = abspath(var.vault_artifact_path)
distro = "ubuntu" distro = "ubuntu"
consul_version = "1.14.2" consul_version = "1.16.1"
seal = "awskms" seal = "awskms"
tags = merge({ tags = merge({
"Project Name" : var.project_name "Project Name" : var.project_name
@@ -39,7 +39,7 @@ scenario "ui" {
vault_install_dir = var.vault_install_dir vault_install_dir = var.vault_install_dir
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "oss") ? "!enterprise" : null ui_test_filter = var.ui_test_filter != null && try(trimspace(var.ui_test_filter), "") != "" ? var.ui_test_filter : (matrix.edition == "ce") ? "!enterprise" : null
} }
step "build_vault" { step "build_vault" {
@@ -71,7 +71,7 @@ scenario "ui" {
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "oss" skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -80,7 +80,7 @@ scenario "ui" {
} }
step "read_vault_license" { step "read_vault_license" {
skip_step = matrix.edition == "oss" skip_step = matrix.edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -168,7 +168,7 @@ scenario "ui" {
} : null } : null
enable_audit_devices = var.vault_enable_audit_devices enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
local_artifact_path = local.bundle_path local_artifact_path = local.bundle_path
packages = global.distro_packages["ubuntu"] packages = global.distro_packages["ubuntu"]
storage_backend = matrix.backend storage_backend = matrix.backend
@@ -177,8 +177,26 @@ scenario "ui" {
} }
} }
// Wait for our cluster to elect a leader
step "wait_for_leader" {
module = module.vault_wait_for_leader
depends_on = [step.create_vault_cluster]
providers = {
enos = provider.enos.ubuntu
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "test_ui" { step "test_ui" {
module = module.vault_test_ui module = module.vault_test_ui
depends_on = [step.wait_for_leader]
variables { variables {
vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip

View File

@@ -4,12 +4,17 @@
scenario "upgrade" { scenario "upgrade" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = ["amd64", "arm64"]
backend = ["consul", "raft"]
artifact_source = ["local", "crt", "artifactory"] artifact_source = ["local", "crt", "artifactory"]
artifact_type = ["bundle", "package"] artifact_type = ["bundle", "package"]
consul_version = ["1.14.2", "1.13.4", "1.12.7"] backend = ["consul", "raft"]
consul_version = ["1.14.9", "1.15.5", "1.16.1"]
distro = ["ubuntu", "rhel"] distro = ["ubuntu", "rhel"]
edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
// NOTE: when backporting the initial version make sure we don't include initial versions that
// are a higher minor version that our release candidate. Also, prior to 1.11.x the
// /v1/sys/seal-status API has known issues that could cause this scenario to fail when using
// those earlier versions.
initial_version = ["1.8.12", "1.9.10", "1.10.11", "1.11.12", "1.12.11", "1.13.6", "1.14.2"]
seal = ["awskms", "shamir"] seal = ["awskms", "shamir"]
# Our local builder always creates bundles # Our local builder always creates bundles
@@ -23,6 +28,12 @@ scenario "upgrade" {
arch = ["arm64"] arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
} }
# FIPS 140-2 editions began at 1.10
exclude {
edition = ["ent.fips1402", "ent.hsm.fips1402"]
initial_version = ["1.8.12", "1.9.10"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -85,7 +96,7 @@ scenario "upgrade" {
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
skip_step = matrix.backend == "raft" || var.backend_edition == "oss" skip_step = matrix.backend == "raft" || var.backend_edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -94,7 +105,7 @@ scenario "upgrade" {
} }
step "read_vault_license" { step "read_vault_license" {
skip_step = matrix.edition == "oss" skip_step = matrix.edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -182,12 +193,15 @@ scenario "upgrade" {
} : null } : null
enable_audit_devices = var.vault_enable_audit_devices enable_audit_devices = var.vault_enable_audit_devices
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "oss" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
release = var.vault_upgrade_initial_release release = {
storage_backend = matrix.backend edition = matrix.edition
target_hosts = step.create_vault_cluster_targets.hosts version = matrix.initial_version
unseal_method = matrix.seal }
storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts
unseal_method = matrix.seal
} }
} }
@@ -200,7 +214,7 @@ scenario "upgrade" {
} }
variables { variables {
vault_instances = step.create_vault_cluster_targets.hosts vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token vault_root_token = step.create_vault_cluster.root_token
} }
@@ -210,7 +224,7 @@ scenario "upgrade" {
module = module.vault_verify_write_data module = module.vault_verify_write_data
depends_on = [ depends_on = [
step.create_vault_cluster, step.create_vault_cluster,
step.get_vault_cluster_ips step.get_vault_cluster_ips,
] ]
providers = { providers = {
@@ -232,6 +246,7 @@ scenario "upgrade" {
module = module.vault_upgrade module = module.vault_upgrade
depends_on = [ depends_on = [
step.create_vault_cluster, step.create_vault_cluster,
step.verify_write_test_data,
] ]
providers = { providers = {
@@ -249,11 +264,49 @@ scenario "upgrade" {
} }
} }
// Wait for our upgraded cluster to elect a leader
step "wait_for_leader_after_upgrade" {
module = module.vault_wait_for_leader
depends_on = [
step.create_vault_cluster,
step.upgrade_vault,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
timeout = 120 # seconds
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "get_updated_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.create_vault_cluster,
step.upgrade_vault,
step.wait_for_leader_after_upgrade,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_hosts = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_version" { step "verify_vault_version" {
module = module.vault_verify_version module = module.vault_verify_version
depends_on = [ depends_on = [
step.create_backend_cluster, step.get_updated_vault_cluster_ips,
step.upgrade_vault,
] ]
providers = { providers = {
@@ -271,30 +324,10 @@ scenario "upgrade" {
} }
} }
step "get_updated_vault_cluster_ips" {
module = module.vault_get_cluster_ips
depends_on = [
step.create_vault_cluster,
step.upgrade_vault
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
vault_install_dir = local.vault_install_dir
vault_root_token = step.create_vault_cluster.root_token
}
}
step "verify_vault_unsealed" { step "verify_vault_unsealed" {
module = module.vault_verify_unsealed module = module.vault_verify_unsealed
depends_on = [ depends_on = [
step.create_vault_cluster,
step.get_updated_vault_cluster_ips, step.get_updated_vault_cluster_ips,
step.upgrade_vault,
] ]
providers = { providers = {
@@ -329,8 +362,7 @@ scenario "upgrade" {
skip_step = matrix.backend != "raft" skip_step = matrix.backend != "raft"
module = module.vault_verify_raft_auto_join_voter module = module.vault_verify_raft_auto_join_voter
depends_on = [ depends_on = [
step.create_backend_cluster, step.get_updated_vault_cluster_ips,
step.upgrade_vault,
] ]
providers = { providers = {
@@ -344,6 +376,38 @@ scenario "upgrade" {
} }
} }
step "verify_replication" {
module = module.vault_verify_replication
depends_on = [
step.get_updated_vault_cluster_ips,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_edition = matrix.edition
vault_install_dir = local.vault_install_dir
vault_instances = step.create_vault_cluster_targets.hosts
}
}
step "verify_ui" {
module = module.vault_verify_ui
depends_on = [
step.get_updated_vault_cluster_ips,
]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
vault_instances = step.create_vault_cluster_targets.hosts
}
}
output "audit_device_file_path" { output "audit_device_file_path" {
description = "The file path for the file audit device, if enabled" description = "The file path for the file audit device, if enabled"
value = step.create_vault_cluster.audit_device_file_path value = step.create_vault_cluster.audit_device_file_path

View File

@@ -11,7 +11,7 @@ terraform_cli "default" {
/* /*
provider_installation { provider_installation {
dev_overrides = { dev_overrides = {
"app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider") "app.terraform.io/hashicorp-qti/enos" = abspath("../../enos-provider/dist")
} }
direct {} direct {}
} }

View File

@@ -48,7 +48,7 @@ variable "aws_ssh_private_key_path" {
variable "backend_edition" { variable "backend_edition" {
description = "The backend release edition if applicable" description = "The backend release edition if applicable"
type = string type = string
default = "oss" // or "ent" default = "ce" // or "ent"
} }
variable "backend_instance_type" { variable "backend_instance_type" {
@@ -122,14 +122,6 @@ variable "vault_artifact_type" {
default = "bundle" default = "bundle"
} }
variable "vault_autopilot_initial_release" {
description = "The Vault release to deploy before upgrading with autopilot"
default = {
edition = "ent"
version = "1.11.0"
}
}
variable "vault_artifact_path" { variable "vault_artifact_path" {
description = "Path to CRT generated or local vault.zip bundle" description = "Path to CRT generated or local vault.zip bundle"
type = string type = string
@@ -161,7 +153,7 @@ variable "vault_instance_count" {
} }
variable "vault_license_path" { variable "vault_license_path" {
description = "The path to a valid Vault enterprise edition license. This is only required for non-oss editions" description = "The path to a valid Vault enterprise edition license. This is only required for non-ce editions"
type = string type = string
default = null default = null
} }
@@ -193,7 +185,7 @@ variable "vault_revision" {
variable "vault_upgrade_initial_release" { variable "vault_upgrade_initial_release" {
description = "The Vault release to deploy before upgrading" description = "The Vault release to deploy before upgrading"
default = { default = {
edition = "oss" edition = "ce"
// Vault 1.10.5 has a known issue with retry_join. // Vault 1.10.5 has a known issue with retry_join.
version = "1.10.4" version = "1.10.4"
} }

View File

@@ -24,7 +24,7 @@
# aws_ssh_private_key_path = "./support/private_key.pem" # aws_ssh_private_key_path = "./support/private_key.pem"
# backend_edition is the backend (consul) release edition if applicable to the scenario. # backend_edition is the backend (consul) release edition if applicable to the scenario.
# backend_edition = "oss" # backend_edition = "ce"
# backend_license_path is the license for the backend if applicable (Consul Enterprise)". # backend_license_path is the license for the backend if applicable (Consul Enterprise)".
# backend_license_path = "./support/consul.hclic" # backend_license_path = "./support/consul.hclic"
@@ -75,14 +75,6 @@
# It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles" # It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
# vault_artifact_type = "bundle" # vault_artifact_type = "bundle"
# vault_autopilot_initial_release is the version of Vault to deploy before doing an autopilot upgrade
# to the test artifact.
# vault_autopilot_initial_release = {
# edition = "ent"
# version = "1.11.0"
# }
# }
# vault_build_date is the build date for Vault artifact. Some validations will require the binary build # vault_build_date is the build date for Vault artifact. Some validations will require the binary build
# date to match" # date to match"
# vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example # vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example
@@ -108,7 +100,7 @@
# vault_instance_count = 3 # vault_instance_count = 3
# vault_license_path is the path to a valid Vault enterprise edition license. # vault_license_path is the path to a valid Vault enterprise edition license.
# This is only required for non-oss editions" # This is only required for non-ce editions"
# vault_license_path = "./support/vault.hclic" # vault_license_path = "./support/vault.hclic"
# vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants. # vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants.
@@ -122,16 +114,6 @@
# binary and cluster to report this version. # binary and cluster to report this version.
# vault_product_version = "1.15.0" # vault_product_version = "1.15.0"
# vault_upgrade_initial_release is the Vault release to deploy before upgrading.
# vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault # vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault
# binary and cluster to report this revision. # binary and cluster to report this revision.
# vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de" # vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de"
# vault_upgrade_initial_release is the Vault release to deploy before doing an in-place upgrade.
# vault_upgrade_initial_release = {
# edition = "oss"
# // Vault 1.10.5 has a known issue with retry_join.
# version = "1.10.4"
# }
# }

View File

@@ -3,7 +3,7 @@
scenario "k8s" { scenario "k8s" {
matrix { matrix {
edition = ["oss", "ent"] edition = ["ce", "ent"]
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -17,7 +17,7 @@ scenario "k8s" {
locals { locals {
image_path = abspath(var.vault_docker_image_archive) image_path = abspath(var.vault_docker_image_archive)
image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "oss" ? "hashicorp/vault" : "hashicorp/vault-enterprise" image_repo = var.vault_image_repository != null ? var.vault_image_repository : matrix.edition == "ce" ? "hashicorp/vault" : "hashicorp/vault-enterprise"
image_tag = replace(var.vault_product_version, "+ent", "-ent") image_tag = replace(var.vault_product_version, "+ent", "-ent")
// The additional '-0' is required in the constraint since without it, the semver function will // The additional '-0' is required in the constraint since without it, the semver function will
@@ -27,7 +27,7 @@ scenario "k8s" {
} }
step "read_license" { step "read_license" {
skip_step = matrix.edition == "oss" skip_step = matrix.edition == "ce"
module = module.read_license module = module.read_license
variables { variables {
@@ -66,7 +66,7 @@ scenario "k8s" {
kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64 kubeconfig_base64 = step.create_kind_cluster.kubeconfig_base64
vault_edition = matrix.edition vault_edition = matrix.edition
vault_log_level = var.vault_log_level vault_log_level = var.vault_log_level
ent_license = matrix.edition != "oss" ? step.read_license.license : null ent_license = matrix.edition != "ce" ? step.read_license.license : null
} }
depends_on = [step.load_docker_image, step.create_kind_cluster] depends_on = [step.load_docker_image, step.create_kind_cluster]
@@ -101,7 +101,7 @@ scenario "k8s" {
step "verify_ui" { step "verify_ui" {
module = module.k8s_verify_ui module = module.k8s_verify_ui
skip_step = matrix.edition == "oss" skip_step = matrix.edition == "ce"
variables { variables {
vault_pods = step.deploy_vault.vault_pods vault_pods = step.deploy_vault.vault_pods

View File

@@ -7,7 +7,7 @@ terraform {
required_providers { required_providers {
enos = { enos = {
source = "app.terraform.io/hashicorp-qti/enos" source = "app.terraform.io/hashicorp-qti/enos"
version = ">= 0.4.0" version = ">= 0.4.4"
} }
} }
} }

View File

@@ -63,7 +63,7 @@ variable "release" {
description = "Consul release version and edition to install from releases.hashicorp.com" description = "Consul release version and edition to install from releases.hashicorp.com"
default = { default = {
version = "1.15.3" version = "1.15.3"
edition = "oss" edition = "ce"
} }
} }

View File

@@ -1,4 +1,4 @@
#!/bin/env bash #!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1

View File

@@ -1,4 +1,4 @@
#!/bin/env bash #!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1

View File

@@ -7,14 +7,6 @@ listener "tcp" {
storage "raft" { storage "raft" {
path = "/vault/data" path = "/vault/data"
autopilot {
cleanup_dead_servers = "true"
last_contact_threshold = "200ms"
last_contact_failure_threshold = "10m"
max_trailing_logs = 250000
min_quorum = 5
server_stabilization_time = "10s"
}
} }
service_registration "kubernetes" {} service_registration "kubernetes" {}

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
@@ -9,14 +9,14 @@
set -e set -e
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
# Replication STATUS endpoint should have data.mode disabled for OSS release # Replication STATUS endpoint should have data.mode disabled for CE release
if [ "$VAULT_EDITION" == "oss" ]; then if [ "$VAULT_EDITION" == "ce" ]; then
if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then if [ "$(echo "${STATUS}" | jq -r '.data.mode')" != "disabled" ]; then
fail "replication data mode is not disabled for OSS release!" fail "replication data mode is not disabled for CE release!"
fi fi
else else
if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then if [ "$(echo "${STATUS}" | jq -r '.data.dr')" == "" ]; then

View File

@@ -6,8 +6,8 @@
set -e set -e
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then if [ "${REDIRECT_URL}" != "http://localhost:8200/ui/" ]; then

View File

@@ -12,7 +12,7 @@ terraform {
locals { locals {
instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)]) instances = toset([for idx in range(var.vault_instance_count) : tostring(idx)])
expected_version = var.vault_edition == "oss" ? var.vault_product_version : "${var.vault_product_version}-ent" expected_version = var.vault_edition == "ce" ? var.vault_product_version : "${var.vault_product_version}-ent"
} }
resource "enos_remote_exec" "release_info" { resource "enos_remote_exec" "release_info" {
@@ -38,13 +38,13 @@ resource "enos_local_exec" "smoke-verify-version" {
for_each = enos_remote_exec.release_info for_each = enos_remote_exec.release_info
environment = { environment = {
VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status)
ACTUAL_VERSION = jsondecode(each.value.stdout).version ACTUAL_VERSION = jsondecode(each.value.stdout).version
BUILD_DATE = var.vault_build_date
CHECK_BUILD_DATE = var.check_build_date
EXPECTED_VERSION = var.vault_product_version, EXPECTED_VERSION = var.vault_product_version,
VAULT_EDITION = var.vault_edition, VAULT_EDITION = var.vault_edition,
VAULT_REVISION = var.vault_product_revision, VAULT_REVISION = var.vault_product_revision,
CHECK_BUILD_DATE = var.check_build_date VAULT_STATUS = jsonencode(jsondecode(each.value.stdout).status)
BUILD_DATE = var.vault_build_date
} }
scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")] scripts = [abspath("${path.module}/scripts/smoke-verify-version.sh")]

View File

@@ -8,38 +8,39 @@
set -e set -e
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then if [[ "${CHECK_BUILD_DATE}" == "false" ]]; then
expected_build_date="" expected_build_date=""
else else
build_date="${BUILD_DATE}" cfg_build_date="${BUILD_DATE}"
if [[ "${build_date}" == "" ]]; then if [[ "${cfg_build_date}" == "" ]]; then
build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date) cfg_build_date=$(echo "${VAULT_STATUS}" | jq -Mr .build_date)
fi fi
expected_build_date=", built $build_date" expected_build_date=", built $cfg_build_date"
fi fi
vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})" vault_expected_version="Vault v${EXPECTED_VERSION} (${VAULT_REVISION})"
case "${VAULT_EDITION}" in case "${VAULT_EDITION}" in
oss) version_expected="${vault_expected_version}${expected_build_date}";; ce) version_expected="${vault_expected_version}${expected_build_date}";;
ent) version_expected="${vault_expected_version}${expected_build_date}";; ent) version_expected="${vault_expected_version}${expected_build_date}";;
ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";; ent.hsm) version_expected="${vault_expected_version}${expected_build_date} (cgo)";;
ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; ent.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;;
ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;; ent.hsm.fips1402) version_expected="${vault_expected_version}${expected_build_date} (cgo)" ;;
*) fail "(${VAULT_EDITION}) does not match any known Vault editions" *) fail "(${VAULT_EDITION}) does not match any known Vault editions"
esac esac
version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//') version_expected_nosha=$(echo "$version_expected" | awk '!($3="")' | sed 's/ / /' | sed -e 's/[[:space:]]*$//')
if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then if [[ "${ACTUAL_VERSION}" == "$version_expected_nosha" ]] || [[ "${ACTUAL_VERSION}" == "$version_expected" ]]; then
echo "Version verification succeeded!" echo "Version verification succeeded!"
else else
echo "CHECK_BUILD_DATE: ${CHECK_BUILD_DATE}" echo "Version checking enabled: ${CHECK_BUILD_DATE}" 1>&2
echo "BUILD_DATE: ${BUILD_DATE}" echo "Given build date: ${BUILD_DATE}" 1>&2
echo "build_date: ${build_date}" echo "Interpreted build date: ${cfg_build_date}" 1>&2
fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}"
fail "expected Version=$version_expected or $version_expected_nosha, got: ${ACTUAL_VERSION}"
fi fi

View File

@@ -0,0 +1,104 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
// An arithmetic module for calculating inputs and outputs for various replication steps.
// Get the first follower out of the hosts set
variable "follower_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
default = {}
}
output "follower_host_1" {
value = try(var.follower_hosts[0], null)
}
output "follower_public_ip_1" {
value = try(var.follower_hosts[0].public_ip, null)
}
output "follower_private_ip_1" {
value = try(var.follower_hosts[0].private_ip, null)
}
output "follower_host_2" {
value = try(var.follower_hosts[1], null)
}
output "follower_public_ip_2" {
value = try(var.follower_hosts[1].public_ip, null)
}
output "follower_private_ip_2" {
value = try(var.follower_hosts[1].private_ip, null)
}
// Calculate our remainder hosts after we've added and removed leader
variable "initial_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
default = {}
}
variable "initial_hosts_count" {
type = number
default = 0
}
variable "added_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
default = {}
}
variable "added_hosts_count" {
type = number
default = 0
}
variable "removed_primary_host" {
type = object({
private_ip = string
public_ip = string
})
default = null
}
variable "removed_follower_host" {
type = object({
private_ip = string
public_ip = string
})
default = null
}
locals {
remaining_hosts_count = max((var.initial_hosts_count + var.added_hosts_count - 2), 0)
indices = [for idx in range(local.remaining_hosts_count) : idx]
remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host])
remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial))
remaining_hosts = zipmap(local.indices, local.remaining_hosts_list)
}
output "remaining_initial_count" {
value = length(local.remaining_initial)
}
output "remaining_initial_hosts" {
value = local.remaining_initial
}
output "remaining_hosts_count" {
value = local.remaining_hosts_count
}
output "remaining_hosts" {
value = local.remaining_hosts
}

View File

@@ -55,12 +55,14 @@ locals {
} }
resource "enos_remote_exec" "set_up_approle_auth_and_agent" { resource "enos_remote_exec" "set_up_approle_auth_and_agent" {
content = templatefile("${path.module}/templates/set-up-approle-and-agent.sh", { environment = {
vault_install_dir = var.vault_install_dir VAULT_INSTALL_DIR = var.vault_install_dir,
vault_token = var.vault_root_token VAULT_TOKEN = var.vault_root_token,
vault_agent_template_destination = var.vault_agent_template_destination VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination,
vault_agent_template_contents = var.vault_agent_template_contents VAULT_AGENT_TEMPLATE_CONTENTS = var.vault_agent_template_contents,
}) }
scripts = [abspath("${path.module}/scripts/set-up-approle-and-agent.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -5,7 +5,7 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
@@ -15,14 +15,14 @@ fail() {
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200' export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}' [[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) # If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist)
$binpath auth disable approle || true $binpath auth disable approle || true
approle_create_status=$($binpath auth enable approle) $binpath auth enable approle
approle_status=$($binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000) $binpath write auth/approle/role/agent-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000
ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id') ROLEID=$($binpath read --format=json auth/approle/role/agent-role/role-id | jq -r '.data.role_id')
@@ -36,8 +36,8 @@ if [[ "$SECRETID" == '' ]]; then
fail "expected SECRETID to be nonempty, but it is empty" fail "expected SECRETID to be nonempty, but it is empty"
fi fi
echo $ROLEID > /tmp/role-id echo "$ROLEID" > /tmp/role-id
echo $SECRETID > /tmp/secret-id echo "$SECRETID" > /tmp/secret-id
cat > /tmp/vault-agent.hcl <<- EOM cat > /tmp/vault-agent.hcl <<- EOM
pid_file = "/tmp/pidfile" pid_file = "/tmp/pidfile"
@@ -51,18 +51,18 @@ vault {
} }
cache { cache {
enforce_consistency = "always" enforce_consistency = "always"
use_auto_auth_token = true use_auto_auth_token = true
} }
listener "tcp" { listener "tcp" {
address = "127.0.0.1:8100" address = "127.0.0.1:8100"
tls_disable = true tls_disable = true
} }
template { template {
destination = "${vault_agent_template_destination}" destination = "${VAULT_AGENT_TEMPLATE_DESTINATION}"
contents = "${vault_agent_template_contents}" contents = "${VAULT_AGENT_TEMPLATE_CONTENTS}"
exec { exec {
command = "pkill -F /tmp/pidfile" command = "pkill -F /tmp/pidfile"
} }
@@ -72,7 +72,7 @@ auto_auth {
method { method {
type = "approle" type = "approle"
config = { config = {
role_id_file_path = "/tmp/role-id" role_id_file_path = "/tmp/role-id"
secret_id_file_path = "/tmp/secret-id" secret_id_file_path = "/tmp/secret-id"
} }
} }
@@ -89,7 +89,7 @@ EOM
pkill -F /tmp/pidfile || true pkill -F /tmp/pidfile || true
# If the template file already exists, remove it # If the template file already exists, remove it
rm ${vault_agent_template_destination} || true rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true
# Run agent (it will kill itself when it finishes rendering the template) # Run agent (it will kill itself when it finishes rendering the template)
$binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1 $binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1

View File

@@ -21,14 +21,14 @@ locals {
// file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle) // file name prefixes for the install packages of vault for the various distributions and artifact types (package or bundle)
artifact_package_release_names = { artifact_package_release_names = {
ubuntu = { ubuntu = {
"oss" = "vault_" "ce" = "vault_"
"ent" = "vault-enterprise_", "ent" = "vault-enterprise_",
"ent.fips1402" = "vault-enterprise-fips1402_", "ent.fips1402" = "vault-enterprise-fips1402_",
"ent.hsm" = "vault-enterprise-hsm_", "ent.hsm" = "vault-enterprise-hsm_",
"ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_", "ent.hsm.fips1402" = "vault-enterprise-hsm-fips1402_",
}, },
rhel = { rhel = {
"oss" = "vault-" "ce" = "vault-"
"ent" = "vault-enterprise-", "ent" = "vault-enterprise-",
"ent.fips1402" = "vault-enterprise-fips1402-", "ent.fips1402" = "vault-enterprise-fips1402-",
"ent.hsm" = "vault-enterprise-hsm-", "ent.hsm" = "vault-enterprise-hsm-",
@@ -38,7 +38,7 @@ locals {
// edition --> artifact name edition // edition --> artifact name edition
artifact_name_edition = { artifact_name_edition = {
"oss" = "" "ce" = ""
"ent" = "" "ent" = ""
"ent.hsm" = ".hsm" "ent.hsm" = ".hsm"
"ent.fips1402" = ".fips1402" "ent.fips1402" = ".fips1402"

View File

@@ -16,10 +16,10 @@ data "enos_artifactory_item" "vault" {
name = local.artifact_name name = local.artifact_name
host = var.artifactory_host host = var.artifactory_host
repo = var.artifactory_repo repo = var.artifactory_repo
path = var.edition == "oss" ? "vault/*" : "vault-enterprise/*" path = var.edition == "ce" ? "vault/*" : "vault-enterprise/*"
properties = tomap({ properties = tomap({
"commit" = var.revision "commit" = var.revision
"product-name" = var.edition == "oss" ? "vault" : "vault-enterprise" "product-name" = var.edition == "ce" ? "vault" : "vault-enterprise"
"product-version" = local.artifact_version "product-version" = local.artifact_version
}) })
} }

View File

@@ -109,9 +109,11 @@ resource "enos_remote_exec" "install_packages" {
if length(var.packages) > 0 if length(var.packages) > 0
} }
content = templatefile("${path.module}/templates/install-packages.sh", { environment = {
packages = join(" ", var.packages) PACKAGES = join(" ", var.packages)
}) }
scripts = [abspath("${path.module}/scripts/install-packages.sh")]
transport = { transport = {
ssh = { ssh = {
@@ -271,59 +273,6 @@ resource "enos_vault_unseal" "leader" {
} }
} }
# We need to ensure that the directory used for audit logs is present and accessible to the vault
# user on all nodes, since logging will only happen on the leader.
resource "enos_remote_exec" "create_audit_log_dir" {
depends_on = [
enos_bundle_install.vault,
enos_vault_unseal.leader,
]
for_each = toset([
for idx, host in toset(local.instances) : idx
if var.enable_audit_devices
])
environment = {
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")]
transport = {
ssh = {
host = var.target_hosts[each.value].public_ip
}
}
}
resource "enos_remote_exec" "enable_audit_devices" {
depends_on = [
enos_remote_exec.create_audit_log_dir,
enos_vault_unseal.leader,
]
for_each = toset([
for idx in local.leader : idx
if local.enable_audit_devices
])
environment = {
VAULT_TOKEN = enos_vault_init.leader[each.key].root_token
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_BIN_PATH = local.bin_path
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")]
transport = {
ssh = {
host = var.target_hosts[each.key].public_ip
}
}
}
resource "enos_vault_unseal" "followers" { resource "enos_vault_unseal" "followers" {
depends_on = [ depends_on = [
enos_vault_init.leader, enos_vault_init.leader,
@@ -387,11 +336,13 @@ resource "enos_remote_exec" "vault_write_license" {
enos_vault_unseal.maybe_force_unseal, enos_vault_unseal.maybe_force_unseal,
] ]
content = templatefile("${path.module}/templates/vault-write-license.sh", { environment = {
bin_path = local.bin_path, BIN_PATH = local.bin_path,
root_token = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none") LICENSE = coalesce(var.license, "none")
license = coalesce(var.license, "none") VAULT_TOKEN = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none")
}) }
scripts = [abspath("${path.module}/scripts/vault-write-license.sh")]
transport = { transport = {
ssh = { ssh = {
@@ -400,6 +351,61 @@ resource "enos_remote_exec" "vault_write_license" {
} }
} }
# We need to ensure that the directory used for audit logs is present and accessible to the vault
# user on all nodes, since logging will only happen on the leader.
resource "enos_remote_exec" "create_audit_log_dir" {
depends_on = [
enos_vault_start.leader,
enos_vault_start.followers,
enos_vault_unseal.leader,
enos_vault_unseal.followers,
enos_vault_unseal.maybe_force_unseal,
]
for_each = toset([
for idx, host in toset(local.instances) : idx
if var.enable_audit_devices
])
environment = {
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")]
transport = {
ssh = {
host = var.target_hosts[each.value].public_ip
}
}
}
resource "enos_remote_exec" "enable_audit_devices" {
depends_on = [
enos_remote_exec.create_audit_log_dir,
]
for_each = toset([
for idx in local.leader : idx
if local.enable_audit_devices
])
environment = {
VAULT_TOKEN = enos_vault_init.leader[each.key].root_token
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_BIN_PATH = local.bin_path
LOG_FILE_PATH = local.audit_device_file_path
SERVICE_USER = local.vault_service_user
}
scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")]
transport = {
ssh = {
host = var.target_hosts[each.key].public_ip
}
}
}
resource "enos_local_exec" "wait_for_install_packages" { resource "enos_local_exec" "wait_for_install_packages" {
depends_on = [ depends_on = [
enos_remote_exec.install_packages, enos_remote_exec.install_packages,

View File

@@ -5,9 +5,7 @@
set -ex -o pipefail set -ex -o pipefail
packages="${packages}" if [ "$PACKAGES" == "" ]
if [ "$packages" == "" ]
then then
echo "No dependencies to install." echo "No dependencies to install."
exit 0 exit 0
@@ -25,14 +23,14 @@ function retry {
if [ "$count" -lt "$retries" ]; then if [ "$count" -lt "$retries" ]; then
sleep "$wait" sleep "$wait"
else else
return "$exit" exit "$exit"
fi fi
done done
return 0 return 0
} }
echo "Installing Dependencies: $packages" echo "Installing Dependencies: $PACKAGES"
if [ -f /etc/debian_version ]; then if [ -f /etc/debian_version ]; then
# Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we # Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we
# see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case # see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case
@@ -41,8 +39,10 @@ if [ -f /etc/debian_version ]; then
cd /tmp cd /tmp
retry 5 sudo apt update retry 5 sudo apt update
retry 5 sudo apt install -y $${packages[@]} # shellcheck disable=2068
retry 5 sudo apt install -y ${PACKAGES[@]}
else else
cd /tmp cd /tmp
retry 7 sudo yum -y install $${packages[@]} # shellcheck disable=2068
retry 7 sudo yum -y install ${PACKAGES[@]}
fi fi

View File

@@ -3,8 +3,7 @@
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
license='${license}' if test "$LICENSE" = "none"; then
if test $license = "none"; then
exit 0 exit 0
fi fi
@@ -29,13 +28,13 @@ function retry {
} }
export VAULT_ADDR=http://localhost:8200 export VAULT_ADDR=http://localhost:8200
export VAULT_TOKEN='${root_token}' [[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# Temporary hack until we can make the unseal resource handle legacy license # Temporary hack until we can make the unseal resource handle legacy license
# setting. If we're running 1.8 and above then we shouldn't try to set a license. # setting. If we're running 1.8 and above then we shouldn't try to set a license.
ver=$(${bin_path} version) ver=$(${BIN_PATH} version)
if [[ "$(echo "$ver" |awk '{print $2}' |awk -F'.' '{print $2}')" -ge 8 ]]; then if [[ "$(echo "$ver" |awk '{print $2}' |awk -F'.' '{print $2}')" -ge 8 ]]; then
exit 0 exit 0
fi fi
retry 5 ${bin_path} write /sys/license text="$license" retry 5 "${BIN_PATH}" write /sys/license text="$LICENSE"

View File

@@ -92,7 +92,7 @@ variable "consul_release" {
description = "Consul release version and edition to install from releases.hashicorp.com" description = "Consul release version and edition to install from releases.hashicorp.com"
default = { default = {
version = "1.15.1" version = "1.15.1"
edition = "oss" edition = "ce"
} }
} }

View File

@@ -19,124 +19,97 @@ variable "vault_root_token" {
description = "The vault root token" description = "The vault root token"
} }
variable "node_public_ip" { variable "vault_instance_count" {
type = string type = number
description = "The primary node public ip" description = "The number of instances in the vault cluster"
default = ""
} }
variable "vault_instances" { variable "vault_hosts" {
type = map(object({ type = map(object({
private_ip = string private_ip = string
public_ip = string public_ip = string
})) }))
description = "The vault cluster instances that were created" description = "The vault cluster hosts. These are required to map private ip addresses to public addresses."
}
variable "added_vault_instances" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster instances that were added"
default = {}
} }
locals { locals {
leftover_primary_instances = var.node_public_ip != "" ? { follower_hosts_list = [for idx in range(var.vault_instance_count - 1) : {
for k, v in var.vault_instances : k => v if contains(values(v), trimspace(var.node_public_ip)) private_ip = local.follower_private_ips[idx]
} : null public_ip = local.follower_public_ips[idx]
all_instances = var.node_public_ip != "" ? merge(var.added_vault_instances, local.leftover_primary_instances) : var.vault_instances
updated_instance_count = length(local.all_instances)
updated_instances = {
for idx in range(local.updated_instance_count) : idx => {
public_ip = values(local.all_instances)[idx].public_ip
private_ip = values(local.all_instances)[idx].private_ip
} }
]
follower_hosts = {
for idx in range(var.vault_instance_count - 1) : idx => try(local.follower_hosts_list[idx], null)
} }
node_ip = var.node_public_ip != "" ? var.node_public_ip : local.updated_instances[0].public_ip follower_private_ips = jsondecode(enos_remote_exec.get_follower_private_ips.stdout)
instance_private_ips = [ follower_public_ips = [for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if contains(
for k, v in values(tomap(local.updated_instances)) : local.follower_private_ips, var.vault_hosts[idx].private_ip)
tostring(v["private_ip"])
]
follower_public_ips = [
for k, v in values(tomap(local.updated_instances)) :
tostring(v["public_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout)
]
follower_private_ips = [
for k, v in values(tomap(local.updated_instances)) :
tostring(v["private_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout)
] ]
leader_host = {
private_ip = local.leader_private_ip
public_ip = local.leader_public_ip
}
leader_private_ip = trimspace(enos_remote_exec.get_leader_private_ip.stdout)
leader_public_ip = element([
for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if var.vault_hosts[idx].private_ip == local.leader_private_ip
], 0)
private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])]
} }
resource "enos_remote_exec" "get_leader_private_ip" { resource "enos_remote_exec" "get_leader_private_ip" {
environment = { environment = {
VAULT_ADDR = "http://127.0.0.1:8200" VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token VAULT_TOKEN = var.vault_root_token
VAULT_INSTALL_DIR = var.vault_install_dir VAULT_INSTALL_DIR = var.vault_install_dir
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.instance_private_ips)
} }
scripts = [abspath("${path.module}/scripts/get-leader-private-ip.sh")] scripts = [abspath("${path.module}/scripts/get-leader-private-ip.sh")]
transport = { transport = {
ssh = { ssh = {
host = local.node_ip host = var.vault_hosts[0].public_ip
} }
} }
} }
output "leftover_primary_instances" { resource "enos_remote_exec" "get_follower_private_ips" {
value = local.leftover_primary_instances environment = {
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token
VAULT_LEADER_PRIVATE_IP = local.leader_private_ip
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips)
VAULT_INSTALL_DIR = var.vault_install_dir
}
scripts = [abspath("${path.module}/scripts/get-follower-private-ips.sh")]
transport = {
ssh = {
host = var.vault_hosts[0].public_ip
}
}
} }
output "all_instances" { output "follower_hosts" {
value = local.all_instances value = local.follower_hosts
}
output "updated_instance_count" {
value = local.updated_instance_count
}
output "updated_instances" {
value = local.updated_instances
}
output "leader_private_ip" {
value = trimspace(enos_remote_exec.get_leader_private_ip.stdout)
}
output "leader_public_ip" {
value = element([
for k, v in values(tomap(local.all_instances)) :
tostring(v["public_ip"]) if v["private_ip"] == trimspace(enos_remote_exec.get_leader_private_ip.stdout)
], 0)
}
output "vault_instance_private_ips" {
value = jsonencode(local.instance_private_ips)
}
output "follower_public_ips" {
value = local.follower_public_ips
}
output "follower_public_ip_1" {
value = element(local.follower_public_ips, 0)
}
output "follower_public_ip_2" {
value = element(local.follower_public_ips, 1)
} }
output "follower_private_ips" { output "follower_private_ips" {
value = local.follower_private_ips value = local.follower_private_ips
} }
output "follower_private_ip_1" { output "follower_public_ips" {
value = element(local.follower_private_ips, 0) value = local.follower_public_ips
} }
output "follower_private_ip_2" { output "leader_host" {
value = element(local.follower_private_ips, 1) value = local.leader_host
}
output "leader_private_ip" {
value = local.leader_private_ip
}
output "leader_public_ip" {
value = local.leader_public_ip
} }

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
function fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set"
[[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
count=0
retries=5
while :; do
# Vault >= 1.10.x has the operator members. If we have that then we'll use it.
if $binpath operator -h 2>&1 | grep members &> /dev/null; then
# Get the folllowers that are part of our private ips.
if followers=$($binpath operator members -format json | jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then
# Make sure that we got all the followers
if jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then
echo "$followers"
exit 0
fi
fi
else
# We're using an old version of vault so we'll just return ips that don't match the leader.
# Get the public ip addresses of the followers
if followers=$(jq --arg ip "$VAULT_LEADER_PRIVATE_IP" -c '. | map(select(.!=$ip))' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then
if [[ -n "$followers" ]]; then
echo "$followers"
exit 0
fi
fi
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
fail "Timed out trying to obtain the cluster followers"
fi
done

View File

@@ -5,31 +5,42 @@
set -e set -e
binpath=${VAULT_INSTALL_DIR}/vault
instance_ips=${VAULT_INSTANCE_PRIVATE_IPS}
function fail() { function fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
count=0 count=0
retries=5 retries=5
while :; do while :; do
# Find the leader private IP address # Find the leader private IP address
leader_private_ip=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
match_ip=$(echo "$instance_ips" |jq -r --argjson ip "$leader_private_ip" 'map(select(. == $ip))') if [[ -n "$ip" ]]; then
echo "$ip"
if [[ "$leader_private_ip" != 'null' ]] && [[ "$match_ip" != '[]' ]]; then exit 0
echo "$leader_private_ip" | sed 's/\"//g'
exit 0
fi fi
fi
wait=$((5 ** count)) # Some older versions of vault don't support reading sys/leader. Try falling back to the cli status.
count=$((count + 1)) if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
if [ "$count" -lt "$retries" ]; then if [[ -n "$ip" ]]; then
sleep "$wait" echo "$ip"
else exit 0
fail "leader IP address $leader_private_ip was not found in $instance_ips"
fi fi
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
fail "Timed out trying to obtain the cluster leader"
fi
done done

View File

@@ -52,12 +52,14 @@ locals {
} }
resource "enos_remote_exec" "set_up_approle_auth_and_proxy" { resource "enos_remote_exec" "set_up_approle_auth_and_proxy" {
content = templatefile("${path.module}/templates/set-up-approle-and-proxy.sh", { environment = {
vault_install_dir = var.vault_install_dir VAULT_INSTALL_DIR = var.vault_install_dir
vault_token = var.vault_root_token VAULT_TOKEN = var.vault_root_token
vault_proxy_pidfile = var.vault_proxy_pidfile VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile
vault_proxy_address = local.vault_proxy_address VAULT_PROXY_ADDRESS = local.vault_proxy_address
}) }
scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")]
transport = { transport = {
ssh = { ssh = {
@@ -67,11 +69,13 @@ resource "enos_remote_exec" "set_up_approle_auth_and_proxy" {
} }
resource "enos_remote_exec" "use_proxy" { resource "enos_remote_exec" "use_proxy" {
content = templatefile("${path.module}/templates/use-proxy.sh", { environment = {
vault_install_dir = var.vault_install_dir VAULT_INSTALL_DIR = var.vault_install_dir
vault_proxy_pidfile = var.vault_proxy_pidfile VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile
vault_proxy_address = local.vault_proxy_address VAULT_PROXY_ADDRESS = local.vault_proxy_address
}) }
scripts = [abspath("${path.module}/scripts/use-proxy.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -5,7 +5,7 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
@@ -15,14 +15,14 @@ fail() {
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200' export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}' [[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist) # If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist)
$binpath auth disable approle || true $binpath auth disable approle || true
approle_create_status=$($binpath auth enable approle) $binpath auth enable approle
approle_status=$($binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000) $binpath write auth/approle/role/proxy-role secret_id_ttl=700h token_num_uses=1000 token_ttl=600h token_max_ttl=700h secret_id_num_uses=1000
ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id') ROLEID=$($binpath read --format=json auth/approle/role/proxy-role/role-id | jq -r '.data.role_id')
@@ -36,14 +36,14 @@ if [[ "$SECRETID" == '' ]]; then
fail "expected SECRETID to be nonempty, but it is empty" fail "expected SECRETID to be nonempty, but it is empty"
fi fi
echo $ROLEID > /tmp/role-id echo "$ROLEID" > /tmp/role-id
echo $SECRETID > /tmp/secret-id echo "$SECRETID" > /tmp/secret-id
# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl # Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl
# The Proxy references the fixed Vault server address of http://127.0.0.1:8200 # The Proxy references the fixed Vault server address of http://127.0.0.1:8200
# The Proxy itself listens at the address http://127.0.0.1:8100 # The Proxy itself listens at the address http://127.0.0.1:8100
cat > /tmp/vault-proxy.hcl <<- EOM cat > /tmp/vault-proxy.hcl <<- EOM
pid_file = "${vault_proxy_pidfile}" pid_file = "${VAULT_PROXY_PIDFILE}"
vault { vault {
address = "http://127.0.0.1:8200" address = "http://127.0.0.1:8200"
@@ -59,7 +59,7 @@ api_proxy {
} }
listener "tcp" { listener "tcp" {
address = "${vault_proxy_address}" address = "${VAULT_PROXY_ADDRESS}"
tls_disable = true tls_disable = true
} }
@@ -81,7 +81,7 @@ auto_auth {
EOM EOM
# If Proxy is still running from a previous run, kill it # If Proxy is still running from a previous run, kill it
pkill -F "${vault_proxy_pidfile}" || true pkill -F "${VAULT_PROXY_PIDFILE}" || true
# Run proxy in the background # Run proxy in the background
$binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 & $binpath proxy -config=/tmp/vault-proxy.hcl > /tmp/proxy-logs.txt 2>&1 &

View File

@@ -5,7 +5,7 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
@@ -16,7 +16,7 @@ test -x "$binpath" || fail "unable to locate vault binary at $binpath"
# Will cause the Vault CLI to communicate with the Vault Proxy, since it # Will cause the Vault CLI to communicate with the Vault Proxy, since it
# is listening at port 8100. # is listening at port 8100.
export VAULT_ADDR='http://${vault_proxy_address}' export VAULT_ADDR="http://${VAULT_PROXY_ADDRESS}"
# Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token # Explicitly unsetting VAULT_TOKEN to make sure that the Vault Proxy's token
# is used. # is used.
@@ -29,4 +29,4 @@ unset VAULT_TOKEN
$binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login' $binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login'
# Now that we're done, kill the proxy # Now that we're done, kill the proxy
pkill -F "${vault_proxy_pidfile}" || true pkill -F "${VAULT_PROXY_PIDFILE}" || true

View File

@@ -56,15 +56,13 @@ resource "enos_remote_exec" "vault_raft_remove_peer" {
for_each = local.instances for_each = local.instances
environment = { environment = {
VAULT_TOKEN = var.vault_root_token REMOVE_VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
VAULT_ADDR = "http://localhost:8200" VAULT_TOKEN = var.vault_root_token
VAULT_ADDR = "http://localhost:8200"
VAULT_INSTALL_DIR = var.vault_install_dir
} }
content = templatefile("${path.module}/templates/raft-remove-peer.sh", { scripts = [abspath("${path.module}/scripts/raft-remove-peer.sh")]
remove_vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir
vault_local_binary_path = "${var.vault_install_dir}/vault"
})
transport = { transport = {
ssh = { ssh = {

View File

@@ -5,15 +5,16 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
node_addr=${REMOVE_VAULT_CLUSTER_ADDR}
node_addr=${remove_vault_cluster_addr}
fail() { fail() {
echo "$1" 2>&1 echo "$1" 2>&1
return 1 return 1
} }
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
retry() { retry() {
local retries=$1 local retries=$1
shift shift
@@ -35,8 +36,7 @@ retry() {
} }
remove_peer() { remove_peer() {
node_id=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id') if ! node_id=$("$binpath" operator raft list-peers -format json | jq -Mr --argjson expected "false" '.data.config.servers[] | select(.address=='\""$node_addr"\"') | select(.voter==$expected) | .node_id'); then
if [ "$?" != "0" ];then
fail "failed to get node id of a non-voter node" fail "failed to get node id of a non-voter node"
fi fi

View File

@@ -39,7 +39,7 @@ resource "enos_remote_exec" "configure_pr_primary" {
environment = { environment = {
VAULT_ADDR = "http://127.0.0.1:8200" VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token VAULT_TOKEN = var.vault_root_token
vault_install_dir = var.vault_install_dir VAULT_INSTALL_DIR = var.vault_install_dir
} }
scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")] scripts = [abspath("${path.module}/scripts/configure-vault-pr-primary.sh")]

View File

@@ -5,7 +5,7 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2

View File

@@ -5,23 +5,24 @@
binpath=${VAULT_INSTALL_DIR}/vault binpath=${VAULT_INSTALL_DIR}/vault
IFS="," read -a keys <<< ${UNSEAL_KEYS} IFS="," read -r -a keys <<< "${UNSEAL_KEYS}"
function fail() { function fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
count=0 count=0
retries=5 retries=5
while :; do while :; do
for key in ${keys[@]}; do for key in "${keys[@]}"; do
# Check the Vault seal status # Check the Vault seal status
seal_status=$($binpath status -format json | jq '.sealed') seal_status=$($binpath status -format json | jq '.sealed')
if [[ "$seal_status" == "true" ]]; then if [[ "$seal_status" == "true" ]]; then
echo "running unseal with $key count $count with retry $retry" >> /tmp/unseal_script.out echo "running unseal with $key count $count with retry $retries" >> /tmp/unseal_script.out
$binpath operator unseal $key > /dev/null 2>&1 "$binpath" operator unseal "$key" > /dev/null 2>&1
else else
exit 0 exit 0
fi fi

View File

@@ -6,8 +6,8 @@
binpath=${VAULT_INSTALL_DIR}/vault binpath=${VAULT_INSTALL_DIR}/vault
function fail() { function fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
count=0 count=0

View File

@@ -92,10 +92,12 @@ resource "enos_bundle_install" "upgrade_vault_binary" {
resource "enos_remote_exec" "get_leader_public_ip" { resource "enos_remote_exec" "get_leader_public_ip" {
depends_on = [enos_bundle_install.upgrade_vault_binary] depends_on = [enos_bundle_install.upgrade_vault_binary]
content = templatefile("${path.module}/templates/get-leader-public-ip.sh", { scripts = [abspath("${path.module}/scripts/get-leader-public-ip.sh")]
vault_install_dir = var.vault_install_dir,
vault_instances = jsonencode(local.instances) environment = {
}) VAULT_INSTALL_DIR = var.vault_install_dir,
VAULT_INSTANCES = jsonencode(local.instances)
}
transport = { transport = {
ssh = { ssh = {
@@ -107,10 +109,12 @@ resource "enos_remote_exec" "get_leader_public_ip" {
resource "enos_remote_exec" "get_follower_public_ips" { resource "enos_remote_exec" "get_follower_public_ips" {
depends_on = [enos_bundle_install.upgrade_vault_binary] depends_on = [enos_bundle_install.upgrade_vault_binary]
content = templatefile("${path.module}/templates/get-follower-public-ips.sh", { environment = {
vault_install_dir = var.vault_install_dir, VAULT_INSTALL_DIR = var.vault_install_dir,
vault_instances = jsonencode(local.instances) VAULT_INSTANCES = jsonencode(local.instances)
}) }
scripts = [abspath("${path.module}/scripts/get-follower-public-ips.sh")]
transport = { transport = {
ssh = { ssh = {
@@ -123,7 +127,7 @@ resource "enos_remote_exec" "restart_followers" {
for_each = local.followers for_each = local.followers
depends_on = [enos_remote_exec.get_follower_public_ips] depends_on = [enos_remote_exec.get_follower_public_ips]
content = file("${path.module}/templates/restart-vault.sh") scripts = [abspath("${path.module}/scripts/restart-vault.sh")]
transport = { transport = {
ssh = { ssh = {
@@ -153,7 +157,7 @@ resource "enos_vault_unseal" "followers" {
resource "enos_remote_exec" "restart_leader" { resource "enos_remote_exec" "restart_leader" {
depends_on = [enos_vault_unseal.followers] depends_on = [enos_vault_unseal.followers]
content = file("${path.module}/templates/restart-vault.sh") scripts = [abspath("${path.module}/scripts/restart-vault.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -5,13 +5,13 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
export VAULT_ADDR="http://localhost:8200" export VAULT_ADDR="http://localhost:8200"
instances='${vault_instances}' instances=${VAULT_INSTANCES}
# Find the leader # Find the leader
leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
# Get the public ip addresses of the followers # Get the public ip addresses of the followers
follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances") follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances")

View File

@@ -5,14 +5,15 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
export VAULT_ADDR="http://localhost:8200" export VAULT_ADDR="http://localhost:8200"
instances='${vault_instances}' instances=${VAULT_INSTANCES}
# Find the leader # Find the leader
leader_address=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
# Get the public ip address of the leader # Get the public ip address of the leader
leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances") leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances")
#shellcheck disable=SC2001
echo "$leader_public" | sed 's/\"//g' echo "$leader_public" | sed 's/\"//g'

View File

@@ -42,11 +42,13 @@ locals {
} }
resource "enos_remote_exec" "verify_vault_agent_output" { resource "enos_remote_exec" "verify_vault_agent_output" {
content = templatefile("${path.module}/templates/verify-vault-agent-output.sh", { environment = {
vault_agent_template_destination = var.vault_agent_template_destination VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination
vault_agent_expected_output = var.vault_agent_expected_output VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output
vault_instances = jsonencode(local.vault_instances) VAULT_INSTANCES = jsonencode(local.vault_instances)
}) }
scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -0,0 +1,16 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
return 1
}
actual_output=$(cat "${VAULT_AGENT_TEMPLATE_DESTINATION}")
if [[ "$actual_output" != "${VAULT_AGENT_EXPECTED_OUTPUT}" ]]; then
fail "expected '${VAULT_AGENT_EXPECTED_OUTPUT}' to be the Agent output, but got: '$actual_output'"
fi

View File

@@ -1,16 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
return 1
}
actual_output=$(cat ${vault_agent_template_destination})
if [[ "$actual_output" != "${vault_agent_expected_output}" ]]; then
fail "expected '${vault_agent_expected_output}' to be the Agent output, but got: '$actual_output'"
fi

View File

@@ -54,12 +54,14 @@ locals {
resource "enos_remote_exec" "smoke-verify-autopilot" { resource "enos_remote_exec" "smoke-verify-autopilot" {
for_each = local.public_ips for_each = local.public_ips
content = templatefile("${path.module}/templates/smoke-verify-autopilot.sh", { environment = {
vault_install_dir = var.vault_install_dir VAULT_INSTALL_DIR = var.vault_install_dir,
vault_token = var.vault_root_token VAULT_TOKEN = var.vault_root_token,
vault_autopilot_upgrade_status = var.vault_autopilot_upgrade_status, VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status,
vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version, VAULT_AUTOPILOT_UPGRADE_VERSION = var.vault_autopilot_upgrade_version,
}) }
scripts = [abspath("${path.module}/scripts/smoke-verify-autopilot.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -0,0 +1,43 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
fail() {
echo "$1" 1>&2
exit 1
}
export VAULT_ADDR="http://localhost:8200"
[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set"
[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
count=0
retries=8
while :; do
state=$($binpath read -format=json sys/storage/raft/autopilot/state)
status="$(jq -r '.data.upgrade_info.status' <<< "$state")"
target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")"
if [ "$status" = "$VAULT_AUTOPILOT_UPGRADE_STATUS" ] && [ "$target_version" = "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]; then
exit 0
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status"
echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version"
sleep "$wait"
else
echo "$state"
echo "Expected autopilot status to be $VAULT_AUTOPILOT_UPGRADE_STATUS, got $status"
echo "Expected autopilot target_version to be $VAULT_AUTOPILOT_UPGRADE_VERSION, got $target_version"
fail "Autopilot did not get into the correct status"
fi
done

View File

@@ -1,37 +0,0 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
token="${vault_token}"
autopilot_version="${vault_autopilot_upgrade_version}"
autopilot_status="${vault_autopilot_upgrade_status}"
export VAULT_ADDR="http://localhost:8200"
export VAULT_TOKEN="$token"
function fail() {
echo "$1" 1>&2
exit 1
}
count=0
retries=7
while :; do
state=$(${vault_install_dir}/vault read -format=json sys/storage/raft/autopilot/state)
status="$(jq -r '.data.upgrade_info.status' <<< "$state")"
target_version="$(jq -r '.data.upgrade_info.target_version' <<< "$state")"
if [ "$status" = "$autopilot_status" ] && [ "$target_version" = "$autopilot_version" ]; then
exit 0
fi
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
echo "$state"
sleep "$wait"
else
fail "Autopilot did not get into the correct status"
fi
done

View File

@@ -9,66 +9,76 @@
set -e set -e
binpath=${VAULT_INSTALL_DIR}/vault fail() {
echo "$1" 1>&2
function fail() { exit 1
echo "$1" 1>&2
exit 1
} }
[[ -z "$PRIMARY_LEADER_PRIV_IP" ]] && fail "PRIMARY_LEADER_PRIV_IP env variable has not been set"
[[ -z "$SECONDARY_LEADER_PRIV_IP" ]] && fail "SECONDARY_LEADER_PRIV_IP env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
retry() { retry() {
local retries=$1 local retries=$1
shift shift
local count=0 local count=0
until "$@"; do until "$@"; do
exit=$?
wait=$((2 ** count)) wait=$((2 ** count))
count=$((count + 1)) count=$((count + 1))
if [ "$count" -lt "$retries" ]; then if [ "$count" -lt "$retries" ]; then
sleep "$wait" sleep "$wait"
else else
return "$exit" fail "$($binpath read -format=json sys/replication/performance/status)"
fi fi
done done
} }
test -x "$binpath" || exit 1
check_pr_status() { check_pr_status() {
pr_status=$($binpath read -format=json sys/replication/performance/status) pr_status=$($binpath read -format=json sys/replication/performance/status)
cluster_state=$(echo $pr_status | jq -r '.data.state') cluster_state=$(echo "$pr_status" | jq -r '.data.state')
connection_mode=$(echo $pr_status | jq -r '.data.mode') connection_mode=$(echo "$pr_status" | jq -r '.data.mode')
if [[ "$cluster_state" == 'idle' ]]; then if [[ "$cluster_state" == 'idle' ]]; then
fail "replication cluster state is $cluster_state" echo "replication cluster state is idle" 1>&2
return 1
fi fi
if [[ "$connection_mode" == "primary" ]]; then if [[ "$connection_mode" == "primary" ]]; then
connection_status=$(echo $pr_status | jq -r '.data.secondaries[0].connection_status') connection_status=$(echo "$pr_status" | jq -r '.data.secondaries[0].connection_status')
if [[ "$connection_status" == 'disconnected' ]]; then if [[ "$connection_status" == 'disconnected' ]]; then
fail "replication connection status of secondaries is $connection_status" echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2
return 1
fi fi
secondary_cluster_addr=$(echo $pr_status | jq -r '.data.secondaries[0].cluster_address') secondary_cluster_addr=$(echo "$pr_status" | jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
if [[ "$secondary_cluster_addr" != "https://"${SECONDARY_LEADER_PRIV_IP}":8201" ]]; then if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_PRIV_IP" ]]; then
fail "Expected secondary cluster address $SECONDARY_LEADER_PRIV_IP got $secondary_cluster_addr " echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_PRIV_IP, got: $secondary_cluster_addr" 1>&2
return 1
fi fi
else else
connection_status=$(echo $pr_status | jq -r '.data.primaries[0].connection_status') connection_status=$(echo "$pr_status" | jq -r '.data.primaries[0].connection_status')
if [[ "$connection_status" == 'disconnected' ]]; then if [[ "$connection_status" == 'disconnected' ]]; then
fail "replication connection status of secondaries is $connection_status" echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2
return 1
fi fi
primary_cluster_addr=$(echo $pr_status | jq -r '.data.primaries[0].cluster_address') primary_cluster_addr=$(echo "$pr_status" | jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
if [[ "$primary_cluster_addr" != "https://"${PRIMARY_LEADER_PRIV_IP}":8201" ]]; then if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_PRIV_IP" ]]; then
fail "Expected primary cluster address $PRIMARY_LEADER_PRIV_IP got $primary_cluster_addr" echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_PRIV_IP, got: $primary_cluster_addr" 1>&2
return 1
fi fi
known_primary_cluster_addrs=$(echo $pr_status | jq -r '.data.known_primary_cluster_addrs') known_primary_cluster_addrs=$(echo "$pr_status" | jq -r '.data.known_primary_cluster_addrs')
# IFS="," read -a cluster_addr <<< ${known_primary_cluster_addrs} if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_PRIV_IP"; then
if ! $(echo $known_primary_cluster_addrs |grep -q $PRIMARY_LEADER_PRIV_IP); then echo "$PRIMARY_LEADER_PRIV_IP is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2
fail "Primary leader address $PRIMARY_LEADER_PRIV_IP not found in Known primary cluster addresses $known_primary_cluster_addrs" return 1
fi fi
fi fi
echo $pr_status
echo "$pr_status"
return 0
} }
# Retry a few times because it can take some time for replication to sync # Retry a few times because it can take some time for replication to sync

View File

@@ -50,12 +50,14 @@ locals {
resource "enos_remote_exec" "verify_raft_auto_join_voter" { resource "enos_remote_exec" "verify_raft_auto_join_voter" {
for_each = local.instances for_each = local.instances
content = templatefile("${path.module}/templates/verify-raft-auto-join-voter.sh", { environment = {
vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir VAULT_INSTALL_DIR = var.vault_install_dir
vault_local_binary_path = "${var.vault_install_dir}/vault" VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault"
vault_token = var.vault_root_token VAULT_TOKEN = var.vault_root_token
}) }
scripts = [abspath("${path.module}/scripts/verify-raft-auto-join-voter.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -5,7 +5,7 @@
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
fail() { fail() {
echo "$1" 2>&1 echo "$1" 2>&1
@@ -33,17 +33,17 @@ retry() {
} }
check_voter_status() { check_voter_status() {
voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" '.data.config.servers[] | select(.address=="${vault_cluster_addr}") | .voter == $expected') voter_status=$($binpath operator raft list-peers -format json | jq -Mr --argjson expected "true" --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR) | .voter == $expected')
if [[ "$voter_status" != 'true' ]]; then if [[ "$voter_status" != 'true' ]]; then
fail "expected ${vault_cluster_addr} to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq '.data.config.servers[] | select(.address==${vault_cluster_addr})')" fail "expected $VAULT_CLUSTER_ADDR to be raft voter, got raft status for node: $($binpath operator raft list-peers -format json | jq -Mr --arg ADDR "$VAULT_CLUSTER_ADDR" '.data.config.servers[] | select(.address==$ADDR)')"
fi fi
} }
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200' export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}' [[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# Retry a few times because it can take some time for things to settle after # Retry a few times because it can take some time for things to settle after
# all the nodes are unsealed # all the nodes are unsealed

View File

@@ -24,21 +24,16 @@ function retry {
return 0 return 0
} }
function fail {
echo "$1" 1>&2
exit 1
}
binpath=${VAULT_INSTALL_DIR}/vault
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
return 1 return 1
} }
binpath="${VAULT_INSTALL_DIR}/vault"
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
# To keep the authentication method and module verification consistent between all # To keep the authentication method and module verification consistent between all
# Enos scenarios we authenticate using testuser created by vault_verify_write_data module # Enos scenarios we authenticate using testuser created by vault_verify_write_data module
retry 5 $binpath login -method=userpass username=testuser password=passuser1 retry 5 "$binpath" login -method=userpass username=testuser password=passuser1
retry 5 $binpath kv get secret/test retry 5 "$binpath" kv get secret/test

View File

@@ -22,9 +22,11 @@ locals {
resource "enos_remote_exec" "smoke-verify-replication" { resource "enos_remote_exec" "smoke-verify-replication" {
for_each = local.instances for_each = local.instances
content = templatefile("${path.module}/templates/smoke-verify-replication.sh", { environment = {
vault_edition = var.vault_edition VAULT_EDITION = var.vault_edition
}) }
scripts = [abspath("${path.module}/scripts/smoke-verify-replication.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -8,18 +8,16 @@
set -e set -e
edition=${vault_edition}
function fail() { function fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
# Replication status endpoint should have data.mode disabled for OSS release # Replication status endpoint should have data.mode disabled for CE release
status=$(curl -s http://localhost:8200/v1/sys/replication/status) status=$(curl -s http://localhost:8200/v1/sys/replication/status)
if [ "$edition" == "oss" ]; then if [ "$VAULT_EDITION" == "ce" ]; then
if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then
fail "replication data mode is not disabled for OSS release!" fail "replication data mode is not disabled for CE release!"
fi fi
else else
if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then if [ "$(jq -r '.data.dr' <<< "$status")" == "" ]; then

View File

@@ -22,9 +22,11 @@ locals {
resource "enos_remote_exec" "smoke-verify-ui" { resource "enos_remote_exec" "smoke-verify-ui" {
for_each = local.instances for_each = local.instances
content = templatefile("${path.module}/templates/smoke-verify-ui.sh", { environment = {
vault_install_dir = var.vault_install_dir, VAULT_ADDR = var.vault_addr,
}) }
scripts = [abspath("${path.module}/scripts/smoke-verify-ui.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -0,0 +1,21 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
url_effective=$(curl -w "%{url_effective}\n" -I -L -s -S "${VAULT_ADDR}" -o /dev/null)
expected="${VAULT_ADDR}/ui/"
if [ "${url_effective}" != "${expected}" ]; then
fail "Expecting Vault to redirect to UI.\nExpected: ${expected}\nGot: ${url_effective}"
fi
if curl -s "${VAULT_ADDR}/ui/" | grep -q 'Vault UI is not available'; then
fail "Vault UI is not available"
fi

View File

@@ -1,17 +0,0 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
if [ "$(curl -s -o /dev/null -w "%%{redirect_url}" http://localhost:8200/)" != "http://localhost:8200/ui/" ]; then
fail "Port 8200 not redirecting to UI"
fi
if curl -s http://localhost:8200/ui/ | grep -q 'Vault UI is not available'; then
fail "Vault UI is not available"
fi

View File

@@ -1,11 +1,10 @@
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
variable "vault_addr" {
variable "vault_install_dir" {
type = string type = string
description = "The directory where the Vault binary will be installed" description = "The vault cluster address"
default = null default = "http://localhost:8200"
} }
variable "vault_instance_count" { variable "vault_instance_count" {

View File

@@ -45,8 +45,9 @@ resource "enos_remote_exec" "smoke-verify-undo-logs" {
for_each = local.public_ips for_each = local.public_ips
environment = { environment = {
VAULT_TOKEN = var.vault_root_token VAULT_ADDR = "http://localhost:8200"
VAULT_ADDR = "http://localhost:8200" VAULT_INSTALL_DIR = var.vault_install_dir
VAULT_TOKEN = var.vault_root_token
} }
scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")]

View File

@@ -2,29 +2,35 @@
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
function fail() { function fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
count=0 count=0
retries=20 retries=5
while :; do while :; do
leader_address=$(curl -H "X-Vault-Request: true" -H "X-Vault-Token: $VAULT_TOKEN" "$VAULT_ADDR/v1/sys/leader" | jq '.leader_address' | sed 's/\"//g') state=$($binpath read sys/metrics -format=json | jq -r '.data.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")')
state=$(curl --header "X-Vault-Token: $VAULT_TOKEN" "$leader_address/v1/sys/metrics" | jq -r '.Gauges[] | select(.Name == "vault.core.replication.write_undo_logs")') target_undo_logs_status="$(jq -r '.Value' <<< "$state")"
target_undo_logs_status="$(jq -r '.Value' <<< "$state")"
if [ "$target_undo_logs_status" == "1" ]; then if [ "$target_undo_logs_status" == "1" ]; then
exit 0 exit 0
fi fi
wait=$((2 ** count)) wait=$((2 ** count))
count=$((count + 1)) count=$((count + 1))
if [ "$count" -lt "$retries" ]; then if [ "$count" -lt "$retries" ]; then
echo "$state" echo "Waiting for vault.core.replication.write_undo_logs to have Value:1"
sleep "$wait" echo "$state"
else sleep "$wait"
fail "Undo_logs did not get into the correct status" else
fi fail "Timed out waiting for vault.core.replication.write_undo_logs to have Value:1"
fi
done done

View File

@@ -45,11 +45,12 @@ locals {
resource "enos_remote_exec" "verify_node_unsealed" { resource "enos_remote_exec" "verify_node_unsealed" {
for_each = local.instances for_each = local.instances
content = templatefile("${path.module}/templates/verify-vault-node-unsealed.sh", { scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")]
vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
vault_install_dir = var.vault_install_dir environment = {
vault_local_binary_path = "${var.vault_install_dir}/vault" VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
}) VAULT_INSTALL_DIR = var.vault_install_dir
}
transport = { transport = {
ssh = { ssh = {

View File

@@ -4,8 +4,7 @@
set -e set -e
# shellcheck disable=SC2154 binpath=${VAULT_INSTALL_DIR}/vault
binpath=${vault_install_dir}/vault
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
@@ -14,12 +13,12 @@ fail() {
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200' export VAULT_ADDR=http://localhost:8200
count=0 count=0
retries=4 retries=4
while :; do while :; do
health_status=$(curl http://127.0.0.1:8200/v1/sys/health |jq '.') health_status=$(curl -s "${VAULT_CLUSTER_ADDR}/v1/sys/health" |jq '.')
unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected')
if [[ "$unseal_status" == 'true' ]]; then if [[ "$unseal_status" == 'true' ]]; then
echo "$health_status" echo "$health_status"
@@ -31,7 +30,6 @@ while :; do
if [ "$count" -lt "$retries" ]; then if [ "$count" -lt "$retries" ]; then
sleep "$wait" sleep "$wait"
else else
# shellcheck disable=SC2154 fail "expected ${VAULT_CLUSTER_ADDR} to be unsealed, got unseal status: $unseal_status"
fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status"
fi fi
done done

View File

@@ -69,14 +69,16 @@ locals {
resource "enos_remote_exec" "verify_all_nodes_have_updated_version" { resource "enos_remote_exec" "verify_all_nodes_have_updated_version" {
for_each = local.instances for_each = local.instances
content = templatefile("${path.module}/templates/verify-cluster-version.sh", { environment = {
vault_install_dir = var.vault_install_dir, VAULT_INSTALL_DIR = var.vault_install_dir,
vault_build_date = var.vault_build_date, VAULT_BUILD_DATE = var.vault_build_date,
vault_version = var.vault_product_version, VAULT_VERSION = var.vault_product_version,
vault_edition = var.vault_edition, VAULT_EDITION = var.vault_edition,
vault_revision = var.vault_revision, VAULT_REVISION = var.vault_revision,
vault_token = var.vault_root_token, VAULT_TOKEN = var.vault_root_token,
}) }
scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")]
transport = { transport = {
ssh = { ssh = {

View File

@@ -7,26 +7,27 @@
# revision SHA, and edition metadata. # revision SHA, and edition metadata.
set -e set -e
binpath=${vault_install_dir}/vault binpath=${VAULT_INSTALL_DIR}/vault
edition=${vault_edition} edition=${VAULT_EDITION}
version=${vault_version} version=${VAULT_VERSION}
sha=${vault_revision} sha=${VAULT_REVISION}
build_date=${vault_build_date} build_date=${VAULT_BUILD_DATE}
# VAULT_TOKEN must also be set
fail() { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
export VAULT_ADDR='http://127.0.0.1:8200' export VAULT_ADDR='http://127.0.0.1:8200'
export VAULT_TOKEN='${vault_token}' [[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
version_expected="Vault v$version ($sha), built $build_date" version_expected="Vault v$version ($sha), built $build_date"
case "$edition" in case "$edition" in
*oss) ;; *ce) ;;
*ent) ;; *ent) ;;
*ent.hsm) version_expected="$version_expected (cgo)";; *ent.hsm) version_expected="$version_expected (cgo)";;
*ent.fips1402) version_expected="$version_expected (cgo)" ;; *ent.fips1402) version_expected="$version_expected (cgo)" ;;

View File

@@ -5,7 +5,7 @@
set -e set -e
function retry { retry() {
local retries=$1 local retries=$1
shift shift
local count=0 local count=0
@@ -24,11 +24,15 @@ function retry {
return 0 return 0
} }
function fail { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
@@ -36,16 +40,16 @@ test -x "$binpath" || fail "unable to locate vault binary at $binpath"
retry 5 "$binpath" status > /dev/null 2>&1 retry 5 "$binpath" status > /dev/null 2>&1
# Create user policy # Create user policy
retry 5 $binpath policy write reguser -<<EOF retry 5 "$binpath" policy write reguser -<<EOF
path "*" { path "*" {
capabilities = ["read", "list"] capabilities = ["read", "list"]
} }
EOF EOF
# Enable the userpass auth method # Enable the userpass auth method
retry 5 $binpath auth enable userpass > /dev/null 2>&1 retry 5 "$binpath" auth enable userpass > /dev/null 2>&1
# Create new user and attach reguser policy # Create new user and attach reguser policy
retry 5 $binpath write auth/userpass/users/testuser password="passuser1" policies="reguser" retry 5 "$binpath" write auth/userpass/users/testuser password="passuser1" policies="reguser"
retry 5 $binpath secrets enable -path="secret" kv retry 5 "$binpath" secrets enable -path="secret" kv

View File

@@ -5,7 +5,7 @@
set -e set -e
function retry { retry() {
local retries=$1 local retries=$1
shift shift
local count=0 local count=0
@@ -24,15 +24,19 @@ function retry {
return 0 return 0
} }
function fail { fail() {
echo "$1" 1>&2 echo "$1" 1>&2
exit 1 exit 1
} }
[[ -z "$TEST_KEY" ]] && fail "TEST_KEY env variable has not been set"
[[ -z "$TEST_VALUE" ]] && fail "TEST_VALUE env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault binpath=${VAULT_INSTALL_DIR}/vault
testkey=${TEST_KEY}
testvalue=${TEST_VALUE}
test -x "$binpath" || fail "unable to locate vault binary at $binpath" test -x "$binpath" || fail "unable to locate vault binary at $binpath"
retry 5 $binpath kv put secret/test $testkey=$testvalue retry 5 "$binpath" kv put secret/test "$TEST_KEY=$TEST_VALUE"

View File

@@ -0,0 +1,68 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "vault_install_dir" {
type = string
description = "The directory where the Vault binary will be installed"
}
variable "vault_root_token" {
type = string
description = "The vault root token"
}
variable "vault_instance_count" {
type = number
description = "The number of instances in the vault cluster"
}
variable "vault_hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The vault cluster hosts that can be expected as a leader"
}
variable "timeout" {
type = number
description = "The max number of seconds to wait before timing out"
default = 60
}
variable "retry_interval" {
type = number
description = "How many seconds to wait between each retry"
default = 2
}
locals {
private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])]
}
resource "enos_remote_exec" "wait_for_leader_in_vault_hosts" {
environment = {
RETRY_INTERVAL = var.retry_interval
TIMEOUT_SECONDS = var.timeout
VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_TOKEN = var.vault_root_token
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips)
VAULT_INSTALL_DIR = var.vault_install_dir
}
scripts = [abspath("${path.module}/scripts/wait-for-leader.sh")]
transport = {
ssh = {
host = var.vault_hosts[0].public_ip
}
}
}

View File

@@ -0,0 +1,53 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set"
[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
binpath=${VAULT_INSTALL_DIR}/vault
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
findLeaderInPrivateIPs() {
# Find the leader private IP address
local leader_private_ip
if ! leader_private_ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")') ; then
# Some older versions of vault don't support reading sys/leader. Fallback to the cli status.
if leader_private_ip=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
return 1
fi
fi
if isIn=$(jq -r --arg ip "$leader_private_ip" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then
if [[ "$isIn" == "true" ]]; then
echo "$leader_private_ip"
return 0
fi
fi
return 1
}
begin_time=$(date +%s)
end_time=$((begin_time + TIMEOUT_SECONDS))
while [ "$(date +%s)" -lt "$end_time" ]; do
if findLeaderInPrivateIPs; then
exit 0
fi
sleep "$RETRY_INTERVAL"
done
fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader."