diff --git a/enos/README.md b/enos/README.md index 2aef9b9e07..e5b1572d98 100644 --- a/enos/README.md +++ b/enos/README.md @@ -120,11 +120,22 @@ cluster is at the desired version, along with additional verifications. ## Autopilot The [`autopilot` scenario](./enos-scenario-autopilot.hcl) creates a Vault cluster using -the version specified in `vault_upgrade_initial_release`. Next, it creates additional -nodes with the candiate version of Vault as determined by the `builder` variant. +the version specified in `vault_upgrade_initial_release`. It writes test data to the Vault cluster. Next, it creates additional nodes with the candidate version of Vault as determined by the `vault_product_version` variable set. The module uses AWS auto-join to handle discovery and unseals with auto-unseal or Shamir depending on the `seal` variant. After the new nodes have joined and been -unsealed, it waits for Autopilot to upgrade the new nodes and demote the old nodes. +unsealed, it verifies reading stored data on the new nodes. Autopilot upgrade verification checks the upgrade status is "await-server-removal" and the target version is set to the version of upgraded nodes. This test also verifies the undo_logs status for Vault versions 1.13.x + +## Replication +The [`replication` scenario](./enos-scenario-replication.hcl) creates two 3-node Vault clusters and runs following verification steps: + + 1. Writes data on the primary cluster + 1. Enables performance replication + 1. Verifies reading stored data from secondary cluster + 1. Verifies initial replication status between both clusters + 1. Replaces the leader node and one standby node on the primary Vault cluster + 1. Verifies updated replication status between both clusters + + This scenario verifies the performance replication status on both clusters to have their connection_status as "connected" and that the secondary cluster has known_primaries cluster addresses updated to the active nodes IP addresses of the primary Vault cluster. This scenario currently works around issues VAULT-12311 and VAULT-12309. The scenario fails when the primary storage backend is Consul due to issue VAULT-12332 # Variants Both scenarios support a matrix of variants. In order to achieve broad coverage while diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 789fb7805e..a5eac890b9 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -47,10 +47,20 @@ module "get_local_metadata" { source = "./modules/get_local_metadata" } +module "generate_secondary_token" { + source = "./modules/generate_secondary_token" + + vault_install_dir = var.vault_install_dir +} + module "read_license" { source = "./modules/read_license" } +module "shutdown_node" { + source = "./modules/shutdown_node" +} + module "vault_agent" { source = "./modules/vault_agent" @@ -77,6 +87,19 @@ module "vault_cluster" { vault_install_dir = var.vault_install_dir } +module "vault_get_cluster_ips" { + source = "./modules/vault_get_cluster_ips" + + vault_install_dir = var.vault_install_dir +} + +module "vault_unseal_nodes" { + source = "./modules/vault_unseal_nodes" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + module "vault_upgrade" { source = "./modules/vault_upgrade" @@ -107,14 +130,14 @@ module "vault_verify_undo_logs" { } module "vault_verify_replication" { - source = "./modules/vault-verify-replication" + source = "./modules/vault_verify_replication" vault_install_dir = var.vault_install_dir vault_instance_count = var.vault_instance_count } module "vault_verify_ui" { - source = "./modules/vault-verify-ui" + source = "./modules/vault_verify_ui" vault_install_dir = var.vault_install_dir vault_instance_count = var.vault_instance_count @@ -127,6 +150,31 @@ module "vault_verify_unsealed" { vault_instance_count = var.vault_instance_count } +module "vault_setup_perf_primary" { + source = "./modules/vault_setup_perf_primary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_setup_perf_secondary" { + source = "./modules/vault_setup_perf_secondary" + + vault_install_dir = var.vault_install_dir +} + +module "vault_verify_read_data" { + source = "./modules/vault_verify_read_data" + + vault_install_dir = var.vault_install_dir + vault_instance_count = var.vault_instance_count +} + +module "vault_verify_performance_replication" { + source = "./modules/vault_verify_performance_replication" + + vault_install_dir = var.vault_install_dir +} + module "vault_verify_version" { source = "./modules/vault_verify_version" @@ -134,8 +182,8 @@ module "vault_verify_version" { vault_instance_count = var.vault_instance_count } -module "vault_verify_write_test_data" { - source = "./modules/vault-verify-write-data" +module "vault_verify_write_data" { + source = "./modules/vault_verify_write_data" vault_install_dir = var.vault_install_dir vault_instance_count = var.vault_instance_count diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index 26615f51b0..c0fa9bf1a1 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -7,6 +7,12 @@ scenario "autopilot" { edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] seal = ["awskms", "shamir"] undo_logs_status = ["0", "1"] + + # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions + exclude { + edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] + artifact_type = ["package"] + } } terraform_cli = terraform_cli.default @@ -137,6 +143,41 @@ scenario "autopilot" { module = module.get_local_metadata } + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.vault_root_token + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.vault_root_token + } + } + step "create_autopilot_upgrade_storageconfig" { module = module.autopilot_upgrade_storageconfig @@ -150,9 +191,10 @@ scenario "autopilot" { step "upgrade_vault_cluster_with_autopilot" { module = module.vault_cluster depends_on = [ - step.create_vault_cluster, step.build_vault, + step.create_vault_cluster, step.create_autopilot_upgrade_storageconfig, + step.verify_write_test_data ] providers = { @@ -183,6 +225,27 @@ scenario "autopilot" { } } + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips, + step.upgrade_vault_cluster_with_autopilot + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir + added_vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances + vault_root_token = step.create_vault_cluster.vault_root_token + node_public_ip = step.get_vault_cluster_ips.leader_public_ip + } + } + step "verify_vault_unsealed" { module = module.vault_verify_unsealed depends_on = [ @@ -196,8 +259,7 @@ scenario "autopilot" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances } } @@ -214,8 +276,28 @@ scenario "autopilot" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances + vault_root_token = step.upgrade_vault_cluster_with_autopilot.vault_root_token + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.get_updated_vault_cluster_ips, + step.verify_write_test_data, + step.upgrade_vault_cluster_with_autopilot, + step.verify_raft_auto_join_voter + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips + vault_instance_count = 6 + vault_install_dir = local.vault_install_dir } } @@ -223,7 +305,7 @@ scenario "autopilot" { module = module.vault_verify_autopilot depends_on = [ step.upgrade_vault_cluster_with_autopilot, - step.verify_vault_unsealed + step.verify_raft_auto_join_voter ] providers = { @@ -244,7 +326,7 @@ scenario "autopilot" { module = module.vault_verify_undo_logs depends_on = [ step.upgrade_vault_cluster_with_autopilot, - step.verify_vault_unsealed + step.verify_autopilot_upgraded_vault_cluster ] providers = { @@ -252,11 +334,10 @@ scenario "autopilot" { } variables { - vault_install_dir = local.vault_install_dir - vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version - vault_undo_logs_status = matrix.undo_logs_status - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_install_dir = local.vault_install_dir + vault_undo_logs_status = matrix.undo_logs_status + vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances + vault_root_token = step.create_vault_cluster.vault_root_token } } @@ -290,6 +371,21 @@ scenario "autopilot" { value = step.create_vault_cluster.vault_unseal_keys_b64 } + output "vault_cluster_recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.vault_recovery_key_shares + } + + output "vault_cluster_recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.vault_recovery_keys_b64 + } + + output "vault_cluster_recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.vault_recovery_keys_hex + } + output "vault_cluster_unseal_keys_hex" { description = "The Vault cluster unseal keys hex" value = step.create_vault_cluster.vault_unseal_keys_hex diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl new file mode 100644 index 0000000000..d68b83df4a --- /dev/null +++ b/enos/enos-scenario-replication.hcl @@ -0,0 +1,675 @@ +// The replication scenario configures performance replication between two Vault clusters and verifies +// known_primary_cluster_addrs are updated on secondary Vault cluster with the IP addresses of replaced +// nodes on primary Vault cluster +scenario "replication" { + matrix { + arch = ["amd64", "arm64"] + artifact_source = ["local", "crt", "artifactory"] + artifact_type = ["bundle", "package"] + consul_version = ["1.14.2", "1.13.4", "1.12.7"] + distro = ["ubuntu", "rhel"] + edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] + primary_backend = ["raft", "consul"] + primary_seal = ["awskms", "shamir"] + secondary_backend = ["raft", "consul"] + secondary_seal = ["awskms", "shamir"] + + # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions + exclude { + edition = ["ent.fips1402", "ent.hsm.fips1402"] + artifact_type = ["package"] + } + } + + terraform_cli = terraform_cli.default + terraform = terraform.default + providers = [ + provider.aws.default, + provider.enos.ubuntu, + provider.enos.rhel + ] + + locals { + build_tags = { + "ent" = ["ui", "enterprise", "ent"] + "ent.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.fips1402"] + "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] + "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] + } + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + dependencies_to_install = ["jq"] + enos_provider = { + rhel = provider.enos.rhel + ubuntu = provider.enos.ubuntu + } + tags = merge({ + "Project Name" : var.project_name + "Project" : "Enos", + "Environment" : "ci" + }, var.tags) + vault_instance_types = { + amd64 = "t3a.small" + arm64 = "t4g.small" + } + vault_instance_type = coalesce(var.vault_instance_type, local.vault_instance_types[matrix.arch]) + vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic")) + vault_install_dir_packages = { + rhel = "/bin" + ubuntu = "/usr/bin" + } + vault_install_dir = matrix.artifact_type == "bundle" ? var.vault_install_dir : local.vault_install_dir_packages[matrix.distro] + } + + step "build_vault" { + module = "build_${matrix.artifact_source}" + + variables { + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] + bundle_path = local.bundle_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_type + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null + revision = var.vault_revision + } + } + + step "find_azs" { + module = module.az_finder + variables { + instance_type = [ + local.vault_instance_type + ] + } + } + + step "create_vpc" { + module = module.create_vpc + depends_on = [step.find_azs] + + variables { + ami_architectures = [matrix.arch] + availability_zones = step.find_azs.availability_zones + common_tags = local.tags + } + } + + step "read_license" { + module = module.read_license + + variables { + file_name = abspath(joinpath(path.root, "./support/vault.hclic")) + } + } + + step "create_primary_backend_cluster" { + module = "backend_${matrix.primary_backend}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.create_vpc.ami_ids["ubuntu"][matrix.arch] + common_tags = local.tags + consul_release = { + edition = var.backend_edition + version = matrix.consul_version + } + instance_type = var.backend_instance_type + kms_key_arn = step.create_vpc.kms_key_arn + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_vault_primary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_primary_backend_cluster, + step.build_vault, + ] + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + common_tags = local.tags + consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + dependencies_to_install = local.dependencies_to_install + instance_type = local.vault_instance_type + kms_key_arn = step.create_vpc.kms_key_arn + storage_backend = matrix.primary_backend + unseal_method = matrix.primary_seal + vault_local_artifact_path = local.bundle_path + vault_install_dir = local.vault_install_dir + vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + vault_license = step.read_license.license + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_secondary_backend_cluster" { + module = "backend_${matrix.secondary_backend}" + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.create_vpc.ami_ids["ubuntu"][matrix.arch] + common_tags = local.tags + consul_release = { + edition = var.backend_edition + version = matrix.consul_version + } + instance_type = var.backend_instance_type + kms_key_arn = step.create_vpc.kms_key_arn + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_vault_secondary_cluster" { + module = module.vault_cluster + depends_on = [ + step.create_secondary_backend_cluster, + step.build_vault, + ] + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + common_tags = local.tags + consul_cluster_tag = step.create_secondary_backend_cluster.consul_cluster_tag + consul_release = matrix.secondary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + dependencies_to_install = local.dependencies_to_install + instance_type = local.vault_instance_type + kms_key_arn = step.create_vpc.kms_key_arn + storage_backend = matrix.secondary_backend + unseal_method = matrix.secondary_seal + vault_local_artifact_path = local.bundle_path + vault_install_dir = local.vault_install_dir + vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + vault_license = step.read_license.license + vpc_id = step.create_vpc.vpc_id + } + } + + step "verify_vault_primary_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_vault_primary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_primary_cluster.vault_instances + vault_install_dir = local.vault_install_dir + } + } + + step "verify_vault_secondary_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_vault_secondary_cluster + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_secondary_cluster.vault_instances + vault_install_dir = local.vault_install_dir + } + } + + step "get_primary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.verify_vault_primary_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_primary_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_primary_cluster.vault_root_token + } + } + + step "get_secondary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.verify_vault_secondary_unsealed] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_secondary_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_secondary_cluster.vault_root_token + } + } + + step "verify_vault_primary_write_data" { + module = module.vault_verify_write_data + depends_on = [step.get_primary_cluster_ips] + + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_instances = step.create_vault_primary_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_primary_cluster.vault_root_token + } + } + + step "configure_performance_replication_primary" { + module = module.vault_setup_perf_primary + depends_on = [ + step.get_primary_cluster_ips, + step.verify_vault_primary_write_data + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_primary_cluster.vault_root_token + } + } + + step "generate_secondary_token" { + module = module.generate_secondary_token + depends_on = [step.configure_performance_replication_primary] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_primary_cluster.vault_root_token + } + } + + step "configure_performance_replication_secondary" { + module = module.vault_setup_perf_secondary + depends_on = [step.generate_secondary_token] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_secondary_cluster.vault_root_token + wrapping_token = step.generate_secondary_token.secondary_token + } + } + + // After replication is enabled, the secondary cluster followers need to be unsealed + // Secondary unseal keys are passed using the guide https://developer.hashicorp.com/vault/docs/enterprise/replication#seals + step "unseal_secondary_followers" { + module = module.vault_unseal_nodes + depends_on = [ + step.create_vault_primary_cluster, + step.create_vault_secondary_cluster, + step.get_secondary_cluster_ips, + step.configure_performance_replication_secondary + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_vault_primary_cluster.vault_unseal_keys_hex : step.create_vault_primary_cluster.vault_recovery_keys_hex + vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal + } + } + + step "verify_vault_secondary_unsealed_after_replication" { + module = module.vault_verify_unsealed + depends_on = [ + step.unseal_secondary_followers + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_secondary_cluster.vault_instances + vault_install_dir = local.vault_install_dir + } + } + + step "verify_performance_replication" { + module = module.vault_verify_performance_replication + depends_on = [step.verify_vault_secondary_unsealed_after_replication] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + } + } + + step "verify_replicated_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_performance_replication, + step.get_secondary_cluster_ips, + step.verify_vault_primary_write_data + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_secondary_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + + step "add_primary_cluster_nodes" { + module = module.vault_cluster + depends_on = [ + step.create_vpc, + step.create_primary_backend_cluster, + step.create_vault_primary_cluster, + step.verify_replicated_data + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + common_tags = local.tags + consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag + consul_release = matrix.primary_backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null + dependencies_to_install = local.dependencies_to_install + instance_type = local.vault_instance_type + kms_key_arn = step.create_vpc.kms_key_arn + storage_backend = matrix.primary_backend + unseal_method = matrix.primary_seal + vault_cluster_tag = step.create_vault_primary_cluster.vault_cluster_tag + vault_init = false + vault_license = step.read_license.license + vault_local_artifact_path = local.bundle_path + vault_install_dir = local.vault_install_dir + vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + vault_node_prefix = "newprimary_node" + vault_root_token = step.create_vault_primary_cluster.vault_root_token + vault_unseal_when_no_init = matrix.primary_seal == "shamir" + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_vault_primary_cluster.vault_unseal_keys_hex : null + vpc_id = step.create_vpc.vpc_id + } + } + + step "verify_add_node_unsealed" { + module = module.vault_verify_unsealed + depends_on = [step.add_primary_cluster_nodes] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.add_primary_cluster_nodes.vault_instances + vault_install_dir = local.vault_install_dir + } + } + + step "verify_raft_auto_join_voter" { + skip_step = matrix.primary_backend != "raft" + module = module.vault_verify_raft_auto_join_voter + depends_on = [ + step.add_primary_cluster_nodes, + step.create_vault_primary_cluster, + step.verify_add_node_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.add_primary_cluster_nodes.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_primary_cluster.vault_root_token + } + } + + step "remove_primary_follower_1" { + module = module.shutdown_node + depends_on = [ + step.get_primary_cluster_ips, + step.verify_add_node_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ip = step.get_primary_cluster_ips.follower_public_ip_1 + } + } + + step "remove_primary_leader" { + module = module.shutdown_node + depends_on = [ + step.get_primary_cluster_ips, + step.remove_primary_follower_1 + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ip = step.get_primary_cluster_ips.leader_public_ip + } + } + + step "get_updated_primary_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [ + step.add_primary_cluster_nodes, + step.remove_primary_follower_1, + step.remove_primary_leader + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_primary_cluster.vault_instances + vault_install_dir = local.vault_install_dir + added_vault_instances = step.add_primary_cluster_nodes.vault_instances + vault_root_token = step.create_vault_primary_cluster.vault_root_token + node_public_ip = step.get_primary_cluster_ips.follower_public_ip_2 + } + } + + step "verify_updated_performance_replication" { + module = module.vault_verify_performance_replication + depends_on = [step.get_updated_primary_cluster_ips] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + primary_leader_public_ip = step.get_updated_primary_cluster_ips.leader_public_ip + primary_leader_private_ip = step.get_updated_primary_cluster_ips.leader_private_ip + secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip + secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip + vault_install_dir = local.vault_install_dir + } + } + + output "vault_primary_cluster_pub_ips" { + description = "The Vault primary cluster public IPs" + value = step.create_vault_primary_cluster.instance_public_ips + } + + output "vault_primary_cluster_priv_ips" { + description = "The Vault primary cluster private IPs" + value = step.create_vault_primary_cluster.instance_private_ips + } + + output "vault_primary_newnode_pub_ip" { + description = "The Vault added new node on primary cluster public IP" + value = step.add_primary_cluster_nodes.instance_public_ips + } + + output "vault_primary_newnode_priv_ip" { + description = "The Vault added new node on primary cluster private IP" + value = step.add_primary_cluster_nodes.instance_private_ips + } + + output "vault_primary_cluster_root_token" { + description = "The Vault primary cluster root token" + value = step.create_vault_primary_cluster.vault_root_token + } + + output "vault_primary_cluster_unseal_keys_b64" { + description = "The Vault primary cluster unseal keys" + value = step.create_vault_primary_cluster.vault_unseal_keys_b64 + } + + output "vault_primary_cluster_unseal_keys_hex" { + description = "The Vault primary cluster unseal keys hex" + value = step.create_vault_primary_cluster.vault_unseal_keys_hex + } + + output "vault_primary_cluster_recovery_key_shares" { + description = "The Vault primary cluster recovery key shares" + value = step.create_vault_primary_cluster.vault_recovery_key_shares + } + + output "vault_primary_cluster_recovery_keys_b64" { + description = "The Vault primary cluster recovery keys b64" + value = step.create_vault_primary_cluster.vault_recovery_keys_b64 + } + + output "vault_primary_cluster_recovery_keys_hex" { + description = "The Vault primary cluster recovery keys hex" + value = step.create_vault_primary_cluster.vault_recovery_keys_hex + } + + output "vault_secondary_cluster_pub_ips" { + description = "The Vault secondary cluster public IPs" + value = step.create_vault_secondary_cluster.instance_public_ips + } + + output "vault_secondary_cluster_priv_ips" { + description = "The Vault secondary cluster private IPs" + value = step.create_vault_secondary_cluster.instance_private_ips + } + + output "vault_primary_performance_replication_status" { + description = "The Vault primary cluster performance replication status" + value = step.verify_performance_replication.primary_replication_status + } + + output "vault_replication_known_primary_cluster_addrs" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_performance_replication.known_primary_cluster_addrs + } + + output "vault_secondary_performance_replication_status" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_performance_replication.secondary_replication_status + } + + output "vault_primary_updated_performance_replication_status" { + description = "The Vault updated primary cluster performance replication status" + value = step.verify_updated_performance_replication.primary_replication_status + } + + output "vault_updated_replication_known_primary_cluster_addrs" { + description = "The Vault secondary cluster performance replication status" + value = step.verify_updated_performance_replication.known_primary_cluster_addrs + } + + output "verify_secondary_updated_performance_replication_status" { + description = "The Vault updated secondary cluster performance replication status" + value = step.verify_updated_performance_replication.secondary_replication_status + } + + output "primary_replication_data_secondaries" { + description = "The Vault primary cluster secondaries connection status" + value = step.verify_performance_replication.primary_replication_data_secondaries + } + + output "secondary_replication_data_primaries" { + description = "The Vault secondary cluster primaries connection status" + value = step.verify_performance_replication.secondary_replication_data_primaries + } + + output "primary_updated_replication_data_secondaries" { + description = "The Vault updated primary cluster secondaries connection status" + value = step.verify_updated_performance_replication.primary_replication_data_secondaries + } + + output "secondary_updated_replication_data_primaries" { + description = "The Vault updated secondary cluster primaries connection status" + value = step.verify_updated_performance_replication.secondary_replication_data_primaries + } +} diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index 84e9dc886b..7ec49b5265 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -9,9 +9,9 @@ scenario "smoke" { edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] seal = ["awskms", "shamir"] - # Packages are not offered for the oss edition + # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions exclude { - edition = ["oss"] + edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] artifact_type = ["package"] } } @@ -146,9 +146,13 @@ scenario "smoke" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + common_tags = local.tags + consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null dependencies_to_install = local.dependencies_to_install instance_type = local.vault_instance_type kms_key_arn = step.create_vpc.kms_key_arn @@ -162,6 +166,21 @@ scenario "smoke" { } } + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.vault_root_token + } + } + step "verify_vault_version" { module = module.vault_verify_version depends_on = [step.create_vault_cluster] @@ -192,6 +211,25 @@ scenario "smoke" { variables { vault_install_dir = local.vault_install_dir vault_instances = step.create_vault_cluster.vault_instances + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir vault_root_token = step.create_vault_cluster.vault_root_token } } @@ -227,6 +265,23 @@ scenario "smoke" { } } + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.verify_write_test_data, + step.verify_replication + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + step "verify_ui" { module = module.vault_verify_ui depends_on = [step.create_vault_cluster] @@ -241,21 +296,6 @@ scenario "smoke" { } } - step "verify_write_test_data" { - module = module.vault_verify_write_test_data - depends_on = [step.create_vault_cluster] - - providers = { - enos = local.enos_provider[matrix.distro] - } - - variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token - } - } - output "vault_cluster_instance_ids" { description = "The Vault cluster instance IDs" value = step.create_vault_cluster.instance_ids @@ -281,6 +321,21 @@ scenario "smoke" { value = step.create_vault_cluster.vault_root_token } + output "vault_cluster_recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.vault_recovery_key_shares + } + + output "vault_cluster_recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.vault_recovery_keys_b64 + } + + output "vault_cluster_recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.vault_recovery_keys_hex + } + output "vault_cluster_unseal_keys_b64" { description = "The Vault cluster unseal keys" value = step.create_vault_cluster.vault_unseal_keys_b64 diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index 6457320a8e..e538aaacbd 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -9,12 +9,11 @@ scenario "upgrade" { edition = ["oss", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] seal = ["awskms", "shamir"] - # Packages are not offered for the oss edition + # Packages are not offered for the oss, ent.fips1402, and ent.hsm.fips1402 editions exclude { - edition = ["oss"] + edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] artifact_type = ["package"] } - } terraform_cli = terraform_cli.default @@ -150,9 +149,13 @@ scenario "upgrade" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + common_tags = local.tags + consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = matrix.consul_version + } : null dependencies_to_install = local.dependencies_to_install instance_type = local.vault_instance_type kms_key_arn = step.create_vpc.kms_key_arn @@ -165,6 +168,41 @@ scenario "upgrade" { } } + step "get_vault_cluster_ips" { + module = module.vault_get_cluster_ips + depends_on = [step.create_vault_cluster] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.vault_root_token + } + } + + step "verify_write_test_data" { + module = module.vault_verify_write_data + depends_on = [ + step.create_vault_cluster, + step.get_vault_cluster_ips + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + leader_public_ip = step.get_vault_cluster_ips.leader_public_ip + leader_private_ip = step.get_vault_cluster_ips.leader_private_ip + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir + vault_root_token = step.create_vault_cluster.vault_root_token + } + } + # This step upgrades the Vault cluster to the var.vault_product_version # by getting a bundle or package of that version from the matrix.artifact_source step "upgrade_vault" { @@ -210,11 +248,11 @@ scenario "upgrade" { } } - step "verify_vault_unsealed" { - module = module.vault_verify_unsealed + step "get_updated_vault_cluster_ips" { + module = module.vault_get_cluster_ips depends_on = [ step.create_vault_cluster, - step.upgrade_vault, + step.upgrade_vault ] providers = { @@ -228,6 +266,42 @@ scenario "upgrade" { } } + step "verify_vault_unsealed" { + module = module.vault_verify_unsealed + depends_on = [ + step.create_vault_cluster, + step.get_updated_vault_cluster_ips, + step.upgrade_vault, + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + vault_instances = step.create_vault_cluster.vault_instances + vault_install_dir = local.vault_install_dir + } + } + + step "verify_read_test_data" { + module = module.vault_verify_read_data + depends_on = [ + step.get_updated_vault_cluster_ips, + step.verify_write_test_data, + step.verify_vault_unsealed + ] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips + vault_install_dir = local.vault_install_dir + } + } + step "verify_raft_auto_join_voter" { skip_step = matrix.backend != "raft" module = module.vault_verify_raft_auto_join_voter @@ -272,6 +346,21 @@ scenario "upgrade" { value = step.create_vault_cluster.vault_root_token } + output "vault_cluster_recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.vault_recovery_key_shares + } + + output "vault_cluster_recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.vault_recovery_keys_b64 + } + + output "vault_cluster_recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.vault_recovery_keys_hex + } + output "vault_cluster_unseal_keys_b64" { description = "The Vault cluster unseal keys" value = step.create_vault_cluster.vault_unseal_keys_b64 diff --git a/enos/modules/generate_secondary_token/main.tf b/enos/modules/generate_secondary_token/main.tf new file mode 100644 index 0000000000..fbba304bd7 --- /dev/null +++ b/enos/modules/generate_secondary_token/main.tf @@ -0,0 +1,52 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + random = { + source = "hashicorp/random" + version = ">= 3.4.3" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +locals { + token_id = random_uuid.token_id.id + secondary_token = enos_remote_exec.fetch_secondary_token.stdout +} +resource "random_uuid" "token_id" {} + +resource "enos_remote_exec" "fetch_secondary_token" { + depends_on = [random_uuid.token_id] + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + } + + inline = ["${var.vault_install_dir}/vault write sys/replication/performance/primary/secondary-token id=${local.token_id} |sed -n '/^wrapping_token:/p' |awk '{print $2}'"] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} + +output "secondary_token" { + value = local.secondary_token +} diff --git a/enos/modules/shutdown_node/main.tf b/enos/modules/shutdown_node/main.tf new file mode 100644 index 0000000000..0ab4617e16 --- /dev/null +++ b/enos/modules/shutdown_node/main.tf @@ -0,0 +1,22 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "node_public_ip" { + type = string + description = "Node Public IP address" +} + +resource "enos_remote_exec" "shutdown_node" { + inline = ["sudo shutdown -H --no-wall; exit 0"] + + transport = { + ssh = { + host = var.node_public_ip + } + } +} diff --git a/enos/modules/vault-verify-write-data/main.tf b/enos/modules/vault-verify-write-data/main.tf deleted file mode 100644 index 966e833f74..0000000000 --- a/enos/modules/vault-verify-write-data/main.tf +++ /dev/null @@ -1,50 +0,0 @@ - -terraform { - required_providers { - enos = { - source = "app.terraform.io/hashicorp-qti/enos" - } - } -} - -locals { - instances = { - for idx in range(var.vault_instance_count) : idx => { - public_ip = values(var.vault_instances)[idx].public_ip - private_ip = values(var.vault_instances)[idx].private_ip - } - } -} - -resource "enos_remote_exec" "smoke-enable-secrets-kv" { - - content = templatefile("${path.module}/templates/smoke-enable-secrets-kv.sh", { - vault_install_dir = var.vault_install_dir, - vault_token = var.vault_root_token, - }) - - transport = { - ssh = { - host = local.instances[0].public_ip - } - } -} - -# Verify that we can enable the k/v secrets engine and write data to it. -resource "enos_remote_exec" "smoke-write-test-data" { - depends_on = [enos_remote_exec.smoke-enable-secrets-kv] - for_each = local.instances - - content = templatefile("${path.module}/templates/smoke-write-test-data.sh", { - test_key = "smoke${each.key}" - test_value = "fire" - vault_install_dir = var.vault_install_dir, - vault_token = var.vault_root_token, - }) - - transport = { - ssh = { - host = each.value.public_ip - } - } -} diff --git a/enos/modules/vault-verify-write-data/variables.tf b/enos/modules/vault-verify-write-data/variables.tf deleted file mode 100644 index ac00f1091f..0000000000 --- a/enos/modules/vault-verify-write-data/variables.tf +++ /dev/null @@ -1,25 +0,0 @@ - -variable "vault_install_dir" { - type = string - description = "The directory where the Vault binary will be installed" - default = null -} - -variable "vault_instance_count" { - type = number - description = "How many vault instances are in the cluster" -} - -variable "vault_instances" { - type = map(object({ - private_ip = string - public_ip = string - })) - description = "The vault cluster instances that were created" -} - -variable "vault_root_token" { - type = string - description = "The vault root token" - default = null -} diff --git a/enos/modules/vault_get_cluster_ips/main.tf b/enos/modules/vault_get_cluster_ips/main.tf new file mode 100644 index 0000000000..dcb251358f --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/main.tf @@ -0,0 +1,139 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +variable "node_public_ip" { + type = string + description = "The primary node public ip" + default = "" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were created" +} + +variable "added_vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The vault cluster instances that were added" + default = {} +} + +locals { + leftover_primary_instances = var.node_public_ip != "" ? { + for k, v in var.vault_instances : k => v if contains(values(v), trimspace(var.node_public_ip)) + } : null + all_instances = var.node_public_ip != "" ? merge(var.added_vault_instances, local.leftover_primary_instances) : var.vault_instances + updated_instance_count = length(local.all_instances) + updated_instances = { + for idx in range(local.updated_instance_count) : idx => { + public_ip = values(local.all_instances)[idx].public_ip + private_ip = values(local.all_instances)[idx].private_ip + } + } + node_ip = var.node_public_ip != "" ? var.node_public_ip : local.updated_instances[0].public_ip + instance_private_ips = [ + for k, v in values(tomap(local.updated_instances)) : + tostring(v["private_ip"]) + ] + follower_public_ips = [ + for k, v in values(tomap(local.updated_instances)) : + tostring(v["public_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout) + ] + follower_private_ips = [ + for k, v in values(tomap(local.updated_instances)) : + tostring(v["private_ip"]) if v["private_ip"] != trimspace(enos_remote_exec.get_leader_private_ip.stdout) + ] +} + +resource "enos_remote_exec" "get_leader_private_ip" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.instance_private_ips) + } + + scripts = ["${path.module}/scripts/get-leader-private-ip.sh"] + + transport = { + ssh = { + host = local.node_ip + } + } +} + +output "leftover_primary_instances" { + value = local.leftover_primary_instances +} + +output "all_instances" { + value = local.all_instances +} + +output "updated_instance_count" { + value = local.updated_instance_count +} + +output "updated_instances" { + value = local.updated_instances +} + +output "leader_private_ip" { + value = trimspace(enos_remote_exec.get_leader_private_ip.stdout) +} + +output "leader_public_ip" { + value = element([ + for k, v in values(tomap(local.all_instances)) : + tostring(v["public_ip"]) if v["private_ip"] == trimspace(enos_remote_exec.get_leader_private_ip.stdout) + ], 0) +} + +output "vault_instance_private_ips" { + value = jsonencode(local.instance_private_ips) +} + +output "follower_public_ips" { + value = local.follower_public_ips +} + +output "follower_public_ip_1" { + value = element(local.follower_public_ips, 0) +} + +output "follower_public_ip_2" { + value = element(local.follower_public_ips, 1) +} + +output "follower_private_ips" { + value = local.follower_private_ips +} + +output "follower_private_ip_1" { + value = element(local.follower_private_ips, 0) +} + +output "follower_private_ip_2" { + value = element(local.follower_private_ips, 1) +} diff --git a/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh new file mode 100644 index 0000000000..360e9d79d6 --- /dev/null +++ b/enos/modules/vault_get_cluster_ips/scripts/get-leader-private-ip.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault +instance_ips=${VAULT_INSTANCE_PRIVATE_IPS} + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + # Find the leader private IP address + leader_private_ip=$($binpath status -format json | jq '.leader_address | rtrimstr(":8200") | ltrimstr("http://")') + match_ip=$(echo $instance_ips |jq -r --argjson ip $leader_private_ip 'map(select(. == $ip))') + + if [[ "$leader_private_ip" != 'null' ]] && [[ "$match_ip" != '[]' ]]; then + echo "$leader_private_ip" | sed 's/\"//g' + exit 0 + fi + + wait=$((5 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "leader IP address $leader_private_ip was not found in $instance_ips" + fi +done diff --git a/enos/modules/vault_setup_perf_primary/main.tf b/enos/modules/vault_setup_perf_primary/main.tf new file mode 100644 index 0000000000..474c255a7a --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/main.tf @@ -0,0 +1,49 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "primary_leader_private_ip" { + type = string + description = "Vault primary cluster leader Private IP address" +} + +variable "vault_root_token" { + type = string + description = "The vault root token" +} + +resource "enos_remote_exec" "configure_pr_primary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + vault_install_dir = var.vault_install_dir + } + + scripts = ["${path.module}/scripts/configure-vault-pr-primary.sh"] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} diff --git a/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh new file mode 100644 index 0000000000..679729f3f5 --- /dev/null +++ b/enos/modules/vault_setup_perf_primary/scripts/configure-vault-pr-primary.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + +binpath=${vault_install_dir}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Create superuser policy +$binpath policy write superuser -< follower + } + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = ["${path.module}/scripts/wait-until-sealed.sh"] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# The follower nodes on secondary replication cluster incorrectly report +# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309), +# so we restart the followers to clear the status and to autounseal incase of awskms seal type +resource "enos_remote_exec" "restart_followers" { + depends_on = [enos_remote_exec.wait_until_sealed] + for_each = { + for idx, follower in local.followers : idx => follower + } + + inline = ["sudo systemctl restart vault"] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# We cannot use the vault_unseal resouce due to the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311). We use a custom +# script to allow retry for unsealing the secondary followers +resource "enos_remote_exec" "unseal_followers" { + depends_on = [enos_remote_exec.restart_followers] + # The unseal keys are required only for seal_type shamir + for_each = { + for idx, follower in local.followers : idx => follower + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = ["${path.module}/scripts/unseal-node.sh"] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} + +# This is a second attempt needed to unseal the secondary followers +# using a custom script due to get past the known issue +# (https://hashicorp.atlassian.net/browse/VAULT-12311) +resource "enos_remote_exec" "unseal_followers_again" { + depends_on = [enos_remote_exec.unseal_followers] + for_each = { + for idx, follower in local.followers : idx => follower + if var.vault_seal_type == "shamir" + } + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + UNSEAL_KEYS = join(",", var.vault_unseal_keys) + } + + scripts = ["${path.module}/scripts/unseal-node.sh"] + + transport = { + ssh = { + host = element(var.follower_public_ips, each.key) + } + } +} diff --git a/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh new file mode 100755 index 0000000000..6fe00a93de --- /dev/null +++ b/enos/modules/vault_unseal_nodes/scripts/unseal-node.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +binpath=${VAULT_INSTALL_DIR}/vault + +IFS="," read -a keys <<< ${UNSEAL_KEYS} + +function fail() { + echo "$1" 1>&2 + exit 1 +} +count=0 +retries=5 +while :; do + for key in ${keys[@]}; do + + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + echo "running unseal with $key count $count with retry $retry" >> /tmp/unseal_script.out + $binpath operator unseal $key > /dev/null 2>&1 + else + exit 0 + fi + done + + wait=$((1 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "failed to unseal node" + fi +done diff --git a/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh new file mode 100644 index 0000000000..d0ebb1f067 --- /dev/null +++ b/enos/modules/vault_unseal_nodes/scripts/wait-until-sealed.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +binpath=${VAULT_INSTALL_DIR}/vault + +function fail() { + echo "$1" 1>&2 + exit 1 +} + +count=0 +retries=5 +while :; do + # Check the Vault seal status + seal_status=$($binpath status -format json | jq '.sealed') + + if [[ "$seal_status" == "true" ]]; then + exit 0 + fi + + wait=$((3 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + fail "Expected node to be sealed" + fi +done diff --git a/enos/modules/vault_verify_performance_replication/main.tf b/enos/modules/vault_verify_performance_replication/main.tf new file mode 100644 index 0000000000..1faa36d968 --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/main.tf @@ -0,0 +1,138 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_cluster_addr_port" { + description = "The Raft cluster address port" + type = string + default = "8201" +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "primary_leader_public_ip" { + type = string + description = "Vault primary cluster leader Public IP address" +} + +variable "primary_leader_private_ip" { + type = string + description = "Vault primary cluster leader Private IP address" +} + +variable "secondary_leader_public_ip" { + type = string + description = "Vault secondary cluster leader Public IP address" +} + +variable "secondary_leader_private_ip" { + type = string + description = "Vault secondary cluster leader Private IP address" +} + +variable "wrapping_token" { + type = string + description = "The wrapping token created on primary cluster" + default = null +} + +locals { + primary_replication_status = jsondecode(enos_remote_exec.verify_replication_on_primary.stdout) + secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_on_secondary.stdout) +} + +resource "enos_remote_exec" "verify_replication_on_primary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = ["${path.module}/scripts/verify-performance-replication.sh"] + + transport = { + ssh = { + host = var.primary_leader_public_ip + } + } +} + +output "primary_replication_status" { + value = local.primary_replication_status + + precondition { + condition = local.primary_replication_status.data.mode == "primary" && local.primary_replication_status.data.state != "idle" + error_message = "Vault primary cluster mode must be \"primary\" and state must not be \"idle\"." + } +} + +resource "enos_remote_exec" "verify_replication_on_secondary" { + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = ["${path.module}/scripts/verify-performance-replication.sh"] + + transport = { + ssh = { + host = var.secondary_leader_public_ip + } + } +} + +output "known_primary_cluster_addrs" { + value = local.secondary_replication_status.data.known_primary_cluster_addrs + + precondition { + condition = contains(local.secondary_replication_status.data.known_primary_cluster_addrs, "https://${var.primary_leader_private_ip}:8201") + error_message = "Vault secondary cluster known_primary_cluster_addrs must include ${var.primary_leader_private_ip}." + } +} + +output "secondary_replication_status" { + value = local.secondary_replication_status + + precondition { + condition = local.secondary_replication_status.data.mode == "secondary" && local.secondary_replication_status.data.state != "idle" + error_message = "Vault secondary cluster mode must be \"secondary\" and state must not be \"idle\"." + } +} + +output "primary_replication_data_secondaries" { + value = local.primary_replication_status.data.secondaries + + # The secondaries connection_status should be "connected" + precondition { + condition = local.primary_replication_status.data.secondaries[0].connection_status == "connected" + error_message = "connection status to primaries must be \"connected\"." + } + + # The secondaries cluster address must have the secondary leader address + precondition { + condition = local.primary_replication_status.data.secondaries[0].cluster_address == "https://${var.secondary_leader_private_ip}:8201" + error_message = "Vault secondaries cluster_address must be with ${var.secondary_leader_private_ip}." + } +} + +output "secondary_replication_data_primaries" { + value = local.secondary_replication_status.data.primaries + + # The primaries connection_status should be "connected" + precondition { + condition = local.secondary_replication_status.data.primaries[0].connection_status == "connected" + error_message = "connection status to primaries must be \"connected\"." + } + + # The primaries cluster address must have the primary leader address + precondition { + condition = local.secondary_replication_status.data.primaries[0].cluster_address == "https://${var.primary_leader_private_ip}:8201" + error_message = "Vault primaries cluster_address must be ${var.primary_leader_private_ip}." + } +} diff --git a/enos/modules/vault_verify_performance_replication/scripts/verify-performance-replication.sh b/enos/modules/vault_verify_performance_replication/scripts/verify-performance-replication.sh new file mode 100644 index 0000000000..fbbca79b1b --- /dev/null +++ b/enos/modules/vault_verify_performance_replication/scripts/verify-performance-replication.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -e + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 2>&1 + return 1 +} + +retry() { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((10 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +check_pr_status() { + pr_status=$($binpath read -format=json sys/replication/performance/status) + cluster_state=$($binpath read -format=json sys/replication/performance/status | jq -r '.data.state') + + if [[ "$cluster_state" == 'idle' ]]; then + fail "expected cluster state to be not idle" + fi +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# Retry a few times because it can take some time for replication to sync +retry 5 check_pr_status +echo $pr_status diff --git a/enos/modules/vault_verify_read_data/main.tf b/enos/modules/vault_verify_read_data/main.tf new file mode 100644 index 0000000000..8a881bb4b5 --- /dev/null +++ b/enos/modules/vault_verify_read_data/main.tf @@ -0,0 +1,45 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many vault instances are in the cluster" +} + +variable "node_public_ips" { + type = list(string) + description = "Vault cluster node Public IP address" +} + +locals { + followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)]) + vault_bin_path = "${var.vault_install_dir}/vault" +} + +resource "enos_remote_exec" "verify_kv_on_node" { + for_each = { + for idx, follower in local.followers : idx => follower + } + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = ["${path.module}/scripts/verify-data.sh"] + + transport = { + ssh = { + host = element(var.node_public_ips, each.key) + } + } +} diff --git a/enos/modules/vault_verify_read_data/scripts/verify-data.sh b/enos/modules/vault_verify_read_data/scripts/verify-data.sh new file mode 100644 index 0000000000..d150d8f7ef --- /dev/null +++ b/enos/modules/vault_verify_read_data/scripts/verify-data.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +set -e + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +function fail { + echo "$1" 1>&2 + exit 1 +} + +binpath=${VAULT_INSTALL_DIR}/vault + +fail() { + echo "$1" 1>&2 + return 1 +} + +test -x "$binpath" || fail "unable to locate vault binary at $binpath" + +# To keep the authentication method and module verification consistent between all +# Enos scenarios we authenticate using testuser created by vault_verify_write_data module +retry 5 $binpath login -method=userpass username=testuser password=passuser1 +retry 5 $binpath kv get secret/test diff --git a/enos/modules/vault-verify-replication/main.tf b/enos/modules/vault_verify_replication/main.tf similarity index 100% rename from enos/modules/vault-verify-replication/main.tf rename to enos/modules/vault_verify_replication/main.tf diff --git a/enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh b/enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh similarity index 100% rename from enos/modules/vault-verify-replication/templates/smoke-verify-replication.sh rename to enos/modules/vault_verify_replication/templates/smoke-verify-replication.sh diff --git a/enos/modules/vault-verify-replication/variables.tf b/enos/modules/vault_verify_replication/variables.tf similarity index 100% rename from enos/modules/vault-verify-replication/variables.tf rename to enos/modules/vault_verify_replication/variables.tf diff --git a/enos/modules/vault-verify-ui/main.tf b/enos/modules/vault_verify_ui/main.tf similarity index 100% rename from enos/modules/vault-verify-ui/main.tf rename to enos/modules/vault_verify_ui/main.tf diff --git a/enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh b/enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh similarity index 100% rename from enos/modules/vault-verify-ui/templates/smoke-verify-ui.sh rename to enos/modules/vault_verify_ui/templates/smoke-verify-ui.sh diff --git a/enos/modules/vault-verify-ui/variables.tf b/enos/modules/vault_verify_ui/variables.tf similarity index 100% rename from enos/modules/vault-verify-ui/variables.tf rename to enos/modules/vault_verify_ui/variables.tf diff --git a/enos/modules/vault_verify_undo_logs/main.tf b/enos/modules/vault_verify_undo_logs/main.tf index 1d16b29abc..dcde07b967 100644 --- a/enos/modules/vault_verify_undo_logs/main.tf +++ b/enos/modules/vault_verify_undo_logs/main.tf @@ -29,12 +29,6 @@ variable "vault_root_token" { description = "The vault root token" } -variable "vault_autopilot_upgrade_version" { - type = string - description = "The vault version to which autopilot upgraded Vault" - default = null -} - variable "vault_undo_logs_status" { type = string description = "An integer either 0 or 1 which indicates whether undo_logs are disabled or enabled" @@ -54,10 +48,9 @@ resource "enos_remote_exec" "smoke-verify-undo-logs" { for_each = local.public_ips environment = { - VAULT_TOKEN = var.vault_root_token - VAULT_ADDR = "http://localhost:8200" - vault_undo_logs_status = var.vault_undo_logs_status - vault_autopilot_upgrade_version = var.vault_autopilot_upgrade_version + VAULT_TOKEN = var.vault_root_token + VAULT_ADDR = "http://localhost:8200" + VAULT_UNDO_LOGS_STATUS = var.vault_undo_logs_status } scripts = [abspath("${path.module}/scripts/smoke-verify-undo-logs.sh")] diff --git a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh index efc8d0ec0b..8e72d1dec9 100644 --- a/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh +++ b/enos/modules/vault_verify_undo_logs/scripts/smoke-verify-undo-logs.sh @@ -1,6 +1,6 @@ #!/bin/bash -undo_logs_status="${vault_undo_logs_status}" +undo_logs_status="${VAULT_UNDO_LOGS_STATUS}" function fail() { echo "$1" 1>&2 diff --git a/enos/modules/vault_verify_unsealed/main.tf b/enos/modules/vault_verify_unsealed/main.tf index d015adf62a..0b615295ce 100644 --- a/enos/modules/vault_verify_unsealed/main.tf +++ b/enos/modules/vault_verify_unsealed/main.tf @@ -30,11 +30,6 @@ variable "vault_instances" { description = "The vault cluster instances that were created" } -variable "vault_root_token" { - type = string - description = "The vault root token" -} - locals { instances = { for idx in range(var.vault_instance_count) : idx => { @@ -51,7 +46,6 @@ resource "enos_remote_exec" "verify_node_unsealed" { vault_cluster_addr = "${each.value.private_ip}:${var.vault_cluster_addr_port}" vault_install_dir = var.vault_install_dir vault_local_binary_path = "${var.vault_install_dir}/vault" - vault_token = var.vault_root_token }) transport = { diff --git a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh index de3edd6482..aefc75ec11 100644 --- a/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh +++ b/enos/modules/vault_verify_unsealed/templates/verify-vault-node-unsealed.sh @@ -12,9 +12,11 @@ fail() { test -x "$binpath" || fail "unable to locate vault binary at $binpath" export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' +health_status=$(curl http://127.0.0.1:8200/v1/sys/health |jq '.') unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected') if [[ "$unseal_status" != 'true' ]]; then fail "expected ${vault_cluster_addr} to be unsealed, got unseal status: $unseal_status" fi + +echo $health_status diff --git a/enos/modules/vault_verify_write_data/main.tf b/enos/modules/vault_verify_write_data/main.tf new file mode 100644 index 0000000000..77b700dfd0 --- /dev/null +++ b/enos/modules/vault_verify_write_data/main.tf @@ -0,0 +1,92 @@ +terraform { + required_providers { + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + } + } +} + +variable "vault_install_dir" { + type = string + description = "The directory where the Vault binary will be installed" +} + +variable "vault_instance_count" { + type = number + description = "How many Vault instances are in the cluster" +} + +variable "leader_public_ip" { + type = string + description = "Vault cluster leader Public IP address" +} + +variable "leader_private_ip" { + type = string + description = "Vault cluster leader Private IP address" +} + +variable "vault_instances" { + type = map(object({ + private_ip = string + public_ip = string + })) + description = "The Vault cluster instances that were created" +} + +variable "vault_root_token" { + type = string + description = "The Vault root token" + default = null +} + +locals { + instances = { + for idx in range(var.vault_instance_count) : idx => { + public_ip = values(var.vault_instances)[idx].public_ip + private_ip = values(var.vault_instances)[idx].private_ip + } + } +} + +# We use this module to verify write data in all Enos scenarios. Since we cannot use +# Vault token to authenticate to secondary clusters in replication scenario we add a regular user +# here to keep the authentication method and module verification consistent between all scenarios +resource "enos_remote_exec" "smoke-enable-secrets-kv" { + # Only enable the secrets engine on the leader node + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + } + + scripts = ["${path.module}/scripts/smoke-enable-secrets-kv.sh"] + + transport = { + ssh = { + host = var.leader_public_ip + } + } +} + +# Verify that we can enable the k/v secrets engine and write data to it. +resource "enos_remote_exec" "smoke-write-test-data" { + depends_on = [enos_remote_exec.smoke-enable-secrets-kv] + for_each = local.instances + + environment = { + VAULT_ADDR = "http://127.0.0.1:8200" + VAULT_TOKEN = var.vault_root_token + VAULT_INSTALL_DIR = var.vault_install_dir + TEST_KEY = "smoke${each.key}" + TEST_VALUE = "fire" + } + + scripts = ["${path.module}/scripts/smoke-write-test-data.sh"] + + transport = { + ssh = { + host = each.value.public_ip + } + } +} diff --git a/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh similarity index 53% rename from enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh rename to enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh index fb28fd9a82..a1e0e1f020 100644 --- a/enos/modules/vault-verify-write-data/templates/smoke-enable-secrets-kv.sh +++ b/enos/modules/vault_verify_write_data/scripts/smoke-enable-secrets-kv.sh @@ -26,12 +26,23 @@ function fail { exit 1 } -binpath=${vault_install_dir}/vault +binpath=${VAULT_INSTALL_DIR}/vault test -x "$binpath" || fail "unable to locate vault binary at $binpath" -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - retry 5 "$binpath" status > /dev/null 2>&1 -retry 5 $binpath secrets enable -path="secret" kv + +# Create user policy +$binpath policy write reguser -< /dev/null 2>&1 + +# Create new user and attach reguser policy +$binpath write auth/userpass/users/testuser password="passuser1" policies="reguser" + +$binpath secrets enable -path="secret" kv diff --git a/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh similarity index 69% rename from enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh rename to enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh index d514881425..62b357cc89 100644 --- a/enos/modules/vault-verify-write-data/templates/smoke-write-test-data.sh +++ b/enos/modules/vault_verify_write_data/scripts/smoke-write-test-data.sh @@ -26,14 +26,10 @@ function fail { exit 1 } -binpath=${vault_install_dir}/vault -testkey=${test_key} -testvalue=${test_value} +binpath=${VAULT_INSTALL_DIR}/vault +testkey=${TEST_KEY} +testvalue=${TEST_VALUE} test -x "$binpath" || fail "unable to locate vault binary at $binpath" -export VAULT_ADDR='http://127.0.0.1:8200' -export VAULT_TOKEN='${vault_token}' - -retry 5 "$binpath" status > /dev/null 2>&1 retry 5 $binpath kv put secret/test $testkey=$testvalue