mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-10-29 01:32:33 +00:00
VAULT-28146: Add IPV6 support to enos scenarios (#27884)
* VAULT-28146: Add IPV6 support to enos scenarios Add support for testing all raft storage scenarios and variants when running Vault with IPV6 networking. We retain our previous support for IPV4 and create a new variant `ip_version` which can be used to configure the IP version that we wish to test with. It's important to note that the VPC in IPV6 mode is technically mixed and that target machines still associate public IPV6 addresses. That allows us to execute our resources against them from IPV4 networks like developer machines and CI runners. Despite that, we've taken care to ensure that only IPV6 addresses are used in IPV6 mode. Because we previously had assumed the IP Version, Vault address, and listener ports in so many places, this PR is essentially a rewrite and removal of those assumptions. There are also a few places where improvements to scenarios have been included as I encountered them while working on the IPV6 changes. Signed-off-by: Ryan Cragun <me@ryan.ec>
This commit is contained in:
@@ -97,6 +97,7 @@ data "aws_iam_policy_document" "enos_scenario" {
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:CancelSpotFleetRequests",
|
||||
"ec2:CancelSpotInstanceRequests",
|
||||
"ec2:CreateEgressOnlyInternetGateway",
|
||||
"ec2:CreateInternetGateway",
|
||||
"ec2:CreateKeyPair",
|
||||
"ec2:CreateFleet",
|
||||
@@ -110,6 +111,7 @@ data "aws_iam_policy_document" "enos_scenario" {
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:CreateVPC",
|
||||
"ec2:DeleteEgressOnlyInternetGateway",
|
||||
"ec2:DeleteFleets",
|
||||
"ec2:DeleteInternetGateway",
|
||||
"ec2:DeleteLaunchTemplate",
|
||||
@@ -125,6 +127,7 @@ data "aws_iam_policy_document" "enos_scenario" {
|
||||
"ec2:DeleteVPC",
|
||||
"ec2:DescribeAccountAttributes",
|
||||
"ec2:DescribeAvailabilityZones",
|
||||
"ec2:DescribeEgressOnlyInternetGateways",
|
||||
"ec2:DescribeFleets",
|
||||
"ec2:DescribeFleetHistory",
|
||||
"ec2:DescribeFleetInstances",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
globals {
|
||||
description = {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "dev_pr_replication" {
|
||||
description = <<-EOF
|
||||
@@ -138,6 +138,8 @@ scenario "dev_pr_replication" {
|
||||
// We install vault packages from artifactory. If you wish to use one of these variants you'll
|
||||
// need to configure your artifactory credentials.
|
||||
use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm"
|
||||
// The IP version to use for the Vault listener and associated things.
|
||||
ip_version = 4
|
||||
// Zip bundles and local builds don't come with systemd units or any associated configuration.
|
||||
// When this is true we'll let enos handle this for us.
|
||||
manage_service = matrix.artifact == "zip" || matrix.artifact == "local"
|
||||
@@ -341,6 +343,7 @@ scenario "dev_pr_replication" {
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
|
||||
cluster_tag_key = global.vault_tag_key
|
||||
common_tags = global.tags
|
||||
instance_count = try(var.vault_instance_count, 3)
|
||||
seal_key_names = step.create_primary_seal_key.resource_names
|
||||
vpc_id = step.create_vpc.id
|
||||
}
|
||||
@@ -450,12 +453,12 @@ scenario "dev_pr_replication" {
|
||||
variables {
|
||||
cluster_name = step.create_primary_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_primary_cluster_backend_targets.hosts
|
||||
license = matrix.primary_backend == "consul" ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = var.dev_consul_version
|
||||
}
|
||||
target_hosts = step.create_primary_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -514,7 +517,9 @@ scenario "dev_pr_replication" {
|
||||
version = var.dev_consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
install_dir = local.vault_install_dir
|
||||
ip_version = local.ip_version
|
||||
license = step.read_vault_license.license
|
||||
local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
|
||||
manage_service = local.manage_service
|
||||
@@ -523,7 +528,6 @@ scenario "dev_pr_replication" {
|
||||
seal_attributes = step.create_primary_seal_key.attributes
|
||||
seal_type = matrix.primary_seal
|
||||
storage_backend = matrix.primary_backend
|
||||
target_hosts = step.create_primary_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -553,12 +557,12 @@ scenario "dev_pr_replication" {
|
||||
variables {
|
||||
cluster_name = step.create_secondary_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_secondary_cluster_backend_targets.hosts
|
||||
license = matrix.secondary_backend == "consul" ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = var.dev_consul_version
|
||||
}
|
||||
target_hosts = step.create_secondary_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -616,7 +620,9 @@ scenario "dev_pr_replication" {
|
||||
version = var.dev_consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
install_dir = local.vault_install_dir
|
||||
ip_version = local.ip_version
|
||||
license = step.read_vault_license.license
|
||||
local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
|
||||
manage_service = local.manage_service
|
||||
@@ -625,7 +631,6 @@ scenario "dev_pr_replication" {
|
||||
seal_attributes = step.create_secondary_seal_key.attributes
|
||||
seal_type = matrix.secondary_seal
|
||||
storage_backend = matrix.secondary_backend
|
||||
target_hosts = step.create_secondary_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -643,7 +648,8 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
}
|
||||
}
|
||||
@@ -662,7 +668,8 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_secondary_cluster_targets.hosts
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
}
|
||||
}
|
||||
@@ -681,7 +688,9 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
ip_version = local.ip_version
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@@ -701,7 +710,9 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_secondary_cluster_targets.hosts
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
ip_version = local.ip_version
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_secondary_cluster.root_token
|
||||
}
|
||||
@@ -720,9 +731,9 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
leader_host = step.get_primary_cluster_ips.leader_host
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@@ -745,10 +756,10 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
@@ -766,6 +777,7 @@ scenario "dev_pr_replication" {
|
||||
|
||||
variables {
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@@ -783,11 +795,11 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
|
||||
secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_secondary_cluster.root_token
|
||||
wrapping_token = step.generate_secondary_token.secondary_token
|
||||
secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_secondary_cluster.root_token
|
||||
wrapping_token = step.generate_secondary_token.secondary_token
|
||||
}
|
||||
}
|
||||
|
||||
@@ -810,10 +822,11 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex
|
||||
vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal
|
||||
hosts = step.get_secondary_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex
|
||||
vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal
|
||||
}
|
||||
}
|
||||
|
||||
@@ -831,7 +844,8 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_secondary_cluster_targets.hosts
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
}
|
||||
}
|
||||
@@ -849,11 +863,11 @@ scenario "dev_pr_replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
|
||||
secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
|
||||
secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip
|
||||
vault_install_dir = local.vault_install_dir
|
||||
ip_version = local.ip_version
|
||||
primary_leader_host = step.get_primary_cluster_ips.leader_host
|
||||
secondary_leader_host = step.get_secondary_cluster_ips.leader_host
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "dev_single_cluster" {
|
||||
description = <<-EOF
|
||||
@@ -130,6 +130,8 @@ scenario "dev_single_cluster" {
|
||||
// We install vault packages from artifactory. If you wish to use one of these variants you'll
|
||||
// need to configure your artifactory credentials.
|
||||
use_artifactory = matrix.artifact == "deb" || matrix.artifact == "rpm"
|
||||
// The IP version to use for the Vault listener and associated things.
|
||||
ip_version = 4
|
||||
// Zip bundles and local builds don't come with systemd units or any associated configuration.
|
||||
// When this is true we'll let enos handle this for us.
|
||||
manage_service = matrix.artifact == "zip" || matrix.artifact == "local"
|
||||
@@ -373,12 +375,12 @@ scenario "dev_single_cluster" {
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
license = (matrix.backend == "consul" && var.backend_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = var.backend_edition
|
||||
version = var.dev_consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -437,7 +439,9 @@ scenario "dev_single_cluster" {
|
||||
version = var.dev_consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = local.vault_install_dir
|
||||
ip_version = local.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = matrix.artifact == "local" ? abspath(var.vault_artifact_path) : null
|
||||
manage_service = local.manage_service
|
||||
@@ -446,7 +450,6 @@ scenario "dev_single_cluster" {
|
||||
seal_attributes = step.create_seal_key.attributes
|
||||
seal_type = matrix.seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -464,7 +467,7 @@ scenario "dev_single_cluster" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
variable "dev_build_local_ui" {
|
||||
type = bool
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
globals {
|
||||
archs = ["amd64", "arm64"]
|
||||
@@ -19,26 +19,27 @@ globals {
|
||||
consul_editions = ["ce", "ent"]
|
||||
consul_versions = ["1.14.11", "1.15.7", "1.16.3", "1.17.0"]
|
||||
distros = ["amzn2", "leap", "rhel", "sles", "ubuntu"]
|
||||
# Different distros may require different packages, or use different aliases for the same package
|
||||
// Different distros may require different packages, or use different aliases for the same package
|
||||
distro_packages = {
|
||||
amzn2 = ["nc"]
|
||||
leap = ["netcat", "openssl"]
|
||||
rhel = ["nc"]
|
||||
# When installing Vault RPM packages on a SLES AMI, the openssl package provided
|
||||
# isn't named "openssl, which rpm doesn't know how to handle. Therefore we add the
|
||||
# "correctly" named one in our package installation before installing Vault.
|
||||
// When installing Vault RPM packages on a SLES AMI, the openssl package provided
|
||||
// isn't named "openssl, which rpm doesn't know how to handle. Therefore we add the
|
||||
// "correctly" named one in our package installation before installing Vault.
|
||||
sles = ["netcat-openbsd", "openssl"]
|
||||
ubuntu = ["netcat"]
|
||||
}
|
||||
distro_version = {
|
||||
"amzn2" = var.distro_version_amzn2
|
||||
"leap" = var.distro_version_leap
|
||||
"rhel" = var.distro_version_rhel
|
||||
"sles" = var.distro_version_sles
|
||||
"ubuntu" = var.distro_version_ubuntu
|
||||
amzn2 = var.distro_version_amzn2
|
||||
leap = var.distro_version_leap
|
||||
rhel = var.distro_version_rhel
|
||||
sles = var.distro_version_sles
|
||||
ubuntu = var.distro_version_ubuntu
|
||||
}
|
||||
editions = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
|
||||
enterprise_editions = [for e in global.editions : e if e != "ce"]
|
||||
ip_versions = ["4", "6"]
|
||||
package_manager = {
|
||||
"amzn2" = "yum"
|
||||
"leap" = "zypper"
|
||||
@@ -47,6 +48,90 @@ globals {
|
||||
"ubuntu" = "apt"
|
||||
}
|
||||
packages = ["jq"]
|
||||
// Ports that we'll open up for ingress in the security group for all target machines.
|
||||
// Port protocol maps to the IpProtocol schema: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_IpPermission.html
|
||||
ports = {
|
||||
ssh : {
|
||||
description = "SSH"
|
||||
port = 22
|
||||
protocol = "tcp"
|
||||
},
|
||||
vault_agent : {
|
||||
description = "Vault Agent"
|
||||
port = 8100
|
||||
protocol = "tcp"
|
||||
},
|
||||
vault_proxy : {
|
||||
description = "Vault Proxy"
|
||||
port = 8101
|
||||
protocol = "tcp"
|
||||
},
|
||||
vault_listener : {
|
||||
description = "Vault Addr listener"
|
||||
port = 8200
|
||||
protocol = "tcp"
|
||||
},
|
||||
vault_cluster : {
|
||||
description = "Vault Cluster listener"
|
||||
port = 8201
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_rpc : {
|
||||
description = "Consul internal communication"
|
||||
port = 8300
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_serf_lan_tcp : {
|
||||
description = "Consul Serf LAN TCP"
|
||||
port = 8301
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_serf_lan_udp : {
|
||||
description = "Consul Serf LAN UDP"
|
||||
port = 8301
|
||||
protocol = "udp"
|
||||
},
|
||||
consul_serf_wan_tcp : {
|
||||
description = "Consul Serf WAN TCP"
|
||||
port = 8302
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_serf_wan_udp : {
|
||||
description = "Consul Serf WAN UDP"
|
||||
port = 8302
|
||||
protocol = "udp"
|
||||
},
|
||||
consul_http : {
|
||||
description = "Consul HTTP API"
|
||||
port = 8500
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_https : {
|
||||
description = "Consul HTTPS API"
|
||||
port = 8501
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_grpc : {
|
||||
description = "Consul gRPC API"
|
||||
port = 8502
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_grpc_tls : {
|
||||
description = "Consul gRPC TLS API"
|
||||
port = 8503
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_dns_tcp : {
|
||||
description = "Consul TCP DNS Server"
|
||||
port = 8600
|
||||
protocol = "tcp"
|
||||
},
|
||||
consul_dns_udp : {
|
||||
description = "Consul UDP DNS Server"
|
||||
port = 8600
|
||||
protocol = "udp"
|
||||
},
|
||||
}
|
||||
sample_attributes = {
|
||||
aws_region = ["us-east-1", "us-west-2"]
|
||||
distro_version_amzn2 = ["2"]
|
||||
@@ -78,5 +163,5 @@ globals {
|
||||
package = "/usr/bin"
|
||||
}
|
||||
vault_license_path = abspath(var.vault_license_path != null ? var.vault_license_path : joinpath(path.root, "./support/vault.hclic"))
|
||||
vault_tag_key = "Type" // enos_vault_start expects Type as the tag key
|
||||
vault_tag_key = "vault-cluster"
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
module "autopilot_upgrade_storageconfig" {
|
||||
source = "./modules/autopilot_upgrade_storageconfig"
|
||||
@@ -114,7 +114,7 @@ module "stop_vault" {
|
||||
source = "./modules/stop_vault"
|
||||
}
|
||||
|
||||
# create target instances using ec2:CreateFleet
|
||||
// create target instances using ec2:CreateFleet
|
||||
module "target_ec2_fleet" {
|
||||
source = "./modules/target_ec2_fleet"
|
||||
|
||||
@@ -123,25 +123,27 @@ module "target_ec2_fleet" {
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
}
|
||||
|
||||
# create target instances using ec2:RunInstances
|
||||
// create target instances using ec2:RunInstances
|
||||
module "target_ec2_instances" {
|
||||
source = "./modules/target_ec2_instances"
|
||||
|
||||
common_tags = var.tags
|
||||
project_name = var.project_name
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
common_tags = var.tags
|
||||
ports_ingress = values(global.ports)
|
||||
project_name = var.project_name
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
}
|
||||
|
||||
# don't create instances but satisfy the module interface
|
||||
// don't create instances but satisfy the module interface
|
||||
module "target_ec2_shim" {
|
||||
source = "./modules/target_ec2_shim"
|
||||
|
||||
common_tags = var.tags
|
||||
project_name = var.project_name
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
common_tags = var.tags
|
||||
ports_ingress = values(global.ports)
|
||||
project_name = var.project_name
|
||||
ssh_keypair = var.aws_ssh_keypair_name
|
||||
}
|
||||
|
||||
# create target instances using ec2:RequestSpotFleet
|
||||
// create target instances using ec2:RequestSpotFleet
|
||||
module "target_ec2_spot_fleet" {
|
||||
source = "./modules/target_ec2_spot_fleet"
|
||||
|
||||
@@ -153,36 +155,34 @@ module "target_ec2_spot_fleet" {
|
||||
module "vault_agent" {
|
||||
source = "./modules/vault_agent"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_agent_port = global.ports["vault_agent"]["port"]
|
||||
}
|
||||
|
||||
module "vault_proxy" {
|
||||
source = "./modules/vault_proxy"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_proxy_port = global.ports["vault_proxy"]["port"]
|
||||
}
|
||||
|
||||
module "vault_verify_agent_output" {
|
||||
source = "./modules/vault_verify_agent_output"
|
||||
|
||||
vault_instance_count = var.vault_instance_count
|
||||
}
|
||||
|
||||
module "vault_cluster" {
|
||||
source = "./modules/vault_cluster"
|
||||
|
||||
install_dir = var.vault_install_dir
|
||||
consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path))
|
||||
log_level = var.vault_log_level
|
||||
install_dir = var.vault_install_dir
|
||||
consul_license = var.backend_license_path == null ? null : file(abspath(var.backend_license_path))
|
||||
cluster_tag_key = global.vault_tag_key
|
||||
log_level = var.vault_log_level
|
||||
}
|
||||
|
||||
module "vault_get_cluster_ips" {
|
||||
source = "./modules/vault_get_cluster_ips"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_raft_remove_peer" {
|
||||
@@ -211,15 +211,13 @@ module "vault_test_ui" {
|
||||
module "vault_unseal_nodes" {
|
||||
source = "./modules/vault_unseal_nodes"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_upgrade" {
|
||||
source = "./modules/vault_upgrade"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_verify_autopilot" {
|
||||
@@ -227,48 +225,39 @@ module "vault_verify_autopilot" {
|
||||
|
||||
vault_autopilot_upgrade_status = "await-server-removal"
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
}
|
||||
|
||||
module "vault_verify_raft_auto_join_voter" {
|
||||
source = "./modules/vault_verify_raft_auto_join_voter"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_cluster_addr_port = global.ports["vault_cluster"]["port"]
|
||||
}
|
||||
|
||||
module "vault_verify_undo_logs" {
|
||||
source = "./modules/vault_verify_undo_logs"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_verify_default_lcq" {
|
||||
source = "./modules/vault_verify_default_lcq"
|
||||
|
||||
vault_autopilot_default_max_leases = "300000"
|
||||
vault_instance_count = var.vault_instance_count
|
||||
}
|
||||
|
||||
module "vault_verify_replication" {
|
||||
source = "./modules/vault_verify_replication"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
}
|
||||
|
||||
module "vault_verify_ui" {
|
||||
source = "./modules/vault_verify_ui"
|
||||
|
||||
vault_instance_count = var.vault_instance_count
|
||||
}
|
||||
|
||||
module "vault_verify_unsealed" {
|
||||
source = "./modules/vault_verify_unsealed"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_setup_perf_primary" {
|
||||
@@ -280,8 +269,7 @@ module "vault_setup_perf_primary" {
|
||||
module "vault_verify_read_data" {
|
||||
source = "./modules/vault_verify_read_data"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_verify_performance_replication" {
|
||||
@@ -293,15 +281,13 @@ module "vault_verify_performance_replication" {
|
||||
module "vault_verify_version" {
|
||||
source = "./modules/vault_verify_version"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_verify_write_data" {
|
||||
source = "./modules/vault_verify_write_data"
|
||||
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_instance_count = var.vault_instance_count
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
module "vault_wait_for_leader" {
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
provider "aws" "default" {
|
||||
region = var.aws_region
|
||||
}
|
||||
|
||||
# This default SSH user is used in RHEL, Amazon Linux, SUSE, and Leap distros
|
||||
// This default SSH user is used in RHEL, Amazon Linux, SUSE, and Leap distros
|
||||
provider "enos" "ec2_user" {
|
||||
transport = {
|
||||
ssh = {
|
||||
@@ -15,7 +15,7 @@ provider "enos" "ec2_user" {
|
||||
}
|
||||
}
|
||||
|
||||
# This default SSH user is used in the Ubuntu distro
|
||||
// This default SSH user is used in the Ubuntu distro
|
||||
provider "enos" "ubuntu" {
|
||||
transport = {
|
||||
ssh = {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
quality "consul_api_agent_host_read" {
|
||||
description = "The /v1/agent/host Consul API returns host info for each node in the cluster"
|
||||
@@ -355,6 +355,14 @@ quality "vault_license_required_ent" {
|
||||
description = "Vault Enterprise requires a license in order to start"
|
||||
}
|
||||
|
||||
quality "vault_listener_ipv4" {
|
||||
description = "Vault operates on ipv4 TCP listeners"
|
||||
}
|
||||
|
||||
quality "vault_listener_ipv6" {
|
||||
description = "Vault operates on ipv6 TCP listeners"
|
||||
}
|
||||
|
||||
quality "vault_mount_auth" {
|
||||
description = "Vault mounts the auth engine"
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
sample "build_ce_linux_amd64_deb" {
|
||||
attributes = global.sample_attributes
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
sample "release_ce_linux_amd64_deb" {
|
||||
attributes = global.sample_attributes
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "agent" {
|
||||
description = <<-EOF
|
||||
@@ -28,32 +28,39 @@ scenario "agent" {
|
||||
consul_version = global.consul_versions
|
||||
distro = global.distros
|
||||
edition = global.editions
|
||||
ip_version = global.ip_versions
|
||||
seal = global.seals
|
||||
|
||||
# Our local builder always creates bundles
|
||||
// Our local builder always creates bundles
|
||||
exclude {
|
||||
artifact_source = ["local"]
|
||||
artifact_type = ["package"]
|
||||
}
|
||||
|
||||
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
// PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
}
|
||||
|
||||
# arm64 AMIs are not offered for Leap
|
||||
// arm64 AMIs are not offered for Leap
|
||||
exclude {
|
||||
distro = ["leap"]
|
||||
arch = ["arm64"]
|
||||
}
|
||||
|
||||
# softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
# not implemented yet.
|
||||
// softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
// not implemented yet.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
}
|
||||
|
||||
// Testing in IPV6 mode is currently implemented for integrated Raft storage only
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
backend = ["consul"]
|
||||
}
|
||||
}
|
||||
|
||||
terraform_cli = terraform_cli.default
|
||||
@@ -109,6 +116,7 @@ scenario "agent" {
|
||||
|
||||
variables {
|
||||
common_tags = global.tags
|
||||
ip_version = matrix.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,12 +224,12 @@ scenario "agent" {
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,6 +260,8 @@ scenario "agent" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_config_file,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_init,
|
||||
quality.vault_storage_backend_consul,
|
||||
@@ -283,7 +293,9 @@ scenario "agent" {
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
manage_service = local.manage_service
|
||||
@@ -291,7 +303,6 @@ scenario "agent" {
|
||||
seal_attributes = step.create_seal_key.attributes
|
||||
seal_type = matrix.seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -317,8 +328,10 @@ scenario "agent" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -326,7 +339,7 @@ scenario "agent" {
|
||||
|
||||
step "start_vault_agent" {
|
||||
description = global.description.start_vault_agent
|
||||
module = "vault_agent"
|
||||
module = module.vault_agent
|
||||
depends_on = [
|
||||
step.build_vault,
|
||||
step.create_vault_cluster,
|
||||
@@ -343,8 +356,10 @@ scenario "agent" {
|
||||
}
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
vault_agent_template_destination = "/tmp/agent_output.txt"
|
||||
vault_agent_template_contents = "{{ with secret \\\"auth/token/lookup-self\\\" }}orphan={{ .Data.orphan }} display_name={{ .Data.display_name }}{{ end }}"
|
||||
@@ -366,7 +381,7 @@ scenario "agent" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_agent_template_destination = "/tmp/agent_output.txt"
|
||||
vault_agent_expected_output = "orphan=true display_name=approle"
|
||||
}
|
||||
@@ -388,7 +403,9 @@ scenario "agent" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -410,7 +427,8 @@ scenario "agent" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
@@ -436,8 +454,9 @@ scenario "agent" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -461,9 +480,9 @@ scenario "agent" {
|
||||
]
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
leader_host = step.get_vault_cluster_ips.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -485,8 +504,10 @@ scenario "agent" {
|
||||
verifies = quality.vault_raft_voters
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -510,9 +531,9 @@ scenario "agent" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
}
|
||||
}
|
||||
|
||||
@@ -531,7 +552,8 @@ scenario "agent" {
|
||||
verifies = quality.vault_secrets_kv_read
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_vault_cluster_ips.follower_public_ips
|
||||
hosts = step.get_vault_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -548,7 +570,8 @@ scenario "agent" {
|
||||
verifies = quality.vault_ui_assets
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
}
|
||||
}
|
||||
|
||||
@@ -564,7 +587,7 @@ scenario "agent" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "autopilot" {
|
||||
description = <<-EOF
|
||||
@@ -28,44 +28,51 @@ scenario "autopilot" {
|
||||
distro = global.distros
|
||||
edition = global.enterprise_editions
|
||||
initial_version = global.upgrade_initial_versions_ent
|
||||
ip_version = global.ip_versions
|
||||
seal = global.seals
|
||||
|
||||
# Autopilot wasn't available before 1.11.x
|
||||
// Autopilot wasn't available before 1.11.x
|
||||
exclude {
|
||||
initial_version = [for e in matrix.initial_version : e if semverconstraint(e, "<1.11.0-0")]
|
||||
}
|
||||
|
||||
# Our local builder always creates bundles
|
||||
// Our local builder always creates bundles
|
||||
exclude {
|
||||
artifact_source = ["local"]
|
||||
artifact_type = ["package"]
|
||||
}
|
||||
|
||||
# There are no published versions of these artifacts yet. We'll update this to exclude older
|
||||
# versions after our initial publication of these editions for arm64.
|
||||
// There are no published versions of these artifacts yet. We'll update this to exclude older
|
||||
// versions after our initial publication of these editions for arm64.
|
||||
exclude {
|
||||
arch = ["arm64"]
|
||||
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
|
||||
}
|
||||
|
||||
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
// PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
}
|
||||
|
||||
# arm64 AMIs are not offered for Leap
|
||||
// arm64 AMIs are not offered for Leap
|
||||
exclude {
|
||||
distro = ["leap"]
|
||||
arch = ["arm64"]
|
||||
}
|
||||
|
||||
# softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
# not implemented yet.
|
||||
// softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
// not implemented yet.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
}
|
||||
|
||||
// Testing in IPV6 mode is currently implemented for integrated Raft storage only
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
backend = ["consul"]
|
||||
}
|
||||
}
|
||||
|
||||
terraform_cli = terraform_cli.default
|
||||
@@ -123,6 +130,7 @@ scenario "autopilot" {
|
||||
|
||||
variables {
|
||||
common_tags = global.tags
|
||||
ip_version = matrix.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -163,6 +171,7 @@ scenario "autopilot" {
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
|
||||
cluster_tag_key = global.vault_tag_key
|
||||
common_tags = global.tags
|
||||
instance_count = 3
|
||||
seal_key_names = step.create_seal_key.resource_names
|
||||
vpc_id = step.create_vpc.id
|
||||
}
|
||||
@@ -178,11 +187,13 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
variables {
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
|
||||
common_tags = global.tags
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
seal_key_names = step.create_seal_key.resource_names
|
||||
vpc_id = step.create_vpc.id
|
||||
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
|
||||
common_tags = global.tags
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
cluster_tag_key = global.vault_tag_key
|
||||
instance_count = 3
|
||||
seal_key_names = step.create_seal_key.resource_names
|
||||
vpc_id = step.create_vpc.id
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,6 +227,8 @@ scenario "autopilot" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_consul,
|
||||
quality.vault_storage_backend_raft,
|
||||
@@ -238,7 +251,9 @@ scenario "autopilot" {
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
config_mode = matrix.config_mode
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_license.license : null
|
||||
packages = concat(global.packages, global.distro_packages[matrix.distro])
|
||||
release = {
|
||||
@@ -251,7 +266,6 @@ scenario "autopilot" {
|
||||
storage_backend_addl_config = {
|
||||
autopilot_upgrade_version = matrix.initial_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,8 +291,10 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -303,7 +319,9 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster.target_hosts
|
||||
hosts = step.create_vault_cluster.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -329,9 +347,9 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_vault_cluster.target_hosts
|
||||
hosts = step.create_vault_cluster.hosts
|
||||
leader_host = step.get_vault_cluster_ips.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -364,15 +382,17 @@ scenario "autopilot" {
|
||||
|
||||
variables {
|
||||
artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
config_mode = matrix.config_mode
|
||||
log_level = var.vault_log_level
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
force_unseal = matrix.seal == "shamir"
|
||||
hosts = step.create_vault_cluster_upgrade_targets.hosts
|
||||
initialize_cluster = false
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
log_level = var.vault_log_level
|
||||
manage_service = local.manage_service
|
||||
packages = concat(global.packages, global.distro_packages[matrix.distro])
|
||||
root_token = step.create_vault_cluster.root_token
|
||||
@@ -382,7 +402,6 @@ scenario "autopilot" {
|
||||
storage_backend = "raft"
|
||||
storage_backend_addl_config = step.create_autopilot_upgrade_storageconfig.storage_addl_config
|
||||
storage_node_prefix = "upgrade_node"
|
||||
target_hosts = step.create_vault_cluster_upgrade_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -407,8 +426,9 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -427,8 +447,10 @@ scenario "autopilot" {
|
||||
verifies = quality.vault_raft_voters
|
||||
|
||||
variables {
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token
|
||||
}
|
||||
}
|
||||
@@ -452,10 +474,11 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
vault_autopilot_upgrade_status = "await-server-removal"
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster.target_hosts
|
||||
vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token
|
||||
}
|
||||
}
|
||||
@@ -480,9 +503,12 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -508,7 +534,9 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -531,9 +559,9 @@ scenario "autopilot" {
|
||||
verifies = quality.vault_secrets_kv_read
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips
|
||||
vault_instance_count = 6
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
hosts = step.get_updated_vault_cluster_ips.follower_hosts
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -559,11 +587,13 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
operator_instance = step.get_updated_vault_cluster_ips.leader_public_ip
|
||||
remove_vault_instances = step.create_vault_cluster.target_hosts
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instance_count = 3
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
hosts = step.create_vault_cluster.hosts
|
||||
ip_version = matrix.ip_version
|
||||
operator_instance = step.get_updated_vault_cluster_ips.leader_public_ip
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_cluster_addr_port = step.upgrade_vault_cluster_with_autopilot.cluster_port
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
@@ -580,8 +610,7 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
variables {
|
||||
old_vault_instances = step.create_vault_cluster.target_hosts
|
||||
vault_instance_count = 3
|
||||
old_hosts = step.create_vault_cluster.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -605,10 +634,11 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
vault_autopilot_upgrade_status = "idle"
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -634,9 +664,9 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
}
|
||||
}
|
||||
|
||||
@@ -661,7 +691,8 @@ scenario "autopilot" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
@@ -688,14 +719,15 @@ scenario "autopilot" {
|
||||
verifies = quality.vault_ui_assets
|
||||
|
||||
variables {
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
}
|
||||
}
|
||||
|
||||
step "verify_undo_logs_status" {
|
||||
skip_step = true
|
||||
# NOTE: temporarily disable undo logs checking until it is fixed. See VAULT-20259
|
||||
# skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0")
|
||||
// NOTE: temporarily disable undo logs checking until it is fixed. See VAULT-20259
|
||||
// skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0")
|
||||
module = module.vault_verify_undo_logs
|
||||
description = <<-EOF
|
||||
Verifies that undo logs is correctly enabled on newly upgraded target hosts. For this it will
|
||||
@@ -716,13 +748,13 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
variables {
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
# Verify that upgrading from a version <1.16.0 does not introduce Default LCQ
|
||||
// Verify that upgrading from a version <1.16.0 does not introduce Default LCQ
|
||||
step "verify_default_lcq" {
|
||||
description = <<-EOF
|
||||
Verify that the default max lease count is 300,000 when the upgraded nodes are running
|
||||
@@ -743,7 +775,8 @@ scenario "autopilot" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_instances = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
hosts = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
vault_addr = step.upgrade_vault_cluster_with_autopilot.api_addr_localhost
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
vault_autopilot_default_max_leases = local.vault_autopilot_default_max_leases
|
||||
}
|
||||
@@ -761,7 +794,7 @@ scenario "autopilot" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
@@ -811,7 +844,7 @@ scenario "autopilot" {
|
||||
|
||||
output "upgrade_hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.upgrade_vault_cluster_with_autopilot.target_hosts
|
||||
value = step.upgrade_vault_cluster_with_autopilot.hosts
|
||||
}
|
||||
|
||||
output "upgrade_private_ips" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "proxy" {
|
||||
description = <<-EOF
|
||||
@@ -28,32 +28,39 @@ scenario "proxy" {
|
||||
consul_version = global.consul_versions
|
||||
distro = global.distros
|
||||
edition = global.editions
|
||||
ip_version = global.ip_versions
|
||||
seal = global.seals
|
||||
|
||||
# Our local builder always creates bundles
|
||||
// Our local builder always creates bundles
|
||||
exclude {
|
||||
artifact_source = ["local"]
|
||||
artifact_type = ["package"]
|
||||
}
|
||||
|
||||
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
// PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
}
|
||||
|
||||
# arm64 AMIs are not offered for Leap
|
||||
// arm64 AMIs are not offered for Leap
|
||||
exclude {
|
||||
distro = ["leap"]
|
||||
arch = ["arm64"]
|
||||
}
|
||||
|
||||
# softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
# not implemented yet.
|
||||
// softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
// not implemented yet.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
}
|
||||
|
||||
// Testing in IPV6 mode is currently implemented for integrated Raft storage only
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
backend = ["consul"]
|
||||
}
|
||||
}
|
||||
|
||||
terraform_cli = terraform_cli.default
|
||||
@@ -116,6 +123,7 @@ scenario "proxy" {
|
||||
|
||||
variables {
|
||||
common_tags = global.tags
|
||||
ip_version = matrix.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,12 +231,12 @@ scenario "proxy" {
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -260,6 +268,8 @@ scenario "proxy" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_consul,
|
||||
quality.vault_storage_backend_raft,
|
||||
@@ -290,7 +300,9 @@ scenario "proxy" {
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
manage_service = local.manage_service
|
||||
@@ -298,7 +310,6 @@ scenario "proxy" {
|
||||
seal_attributes = step.create_seal_key.attributes
|
||||
seal_type = matrix.seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -318,15 +329,17 @@ scenario "proxy" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
step "start_vault_proxy" {
|
||||
module = "vault_proxy"
|
||||
module = module.vault_proxy
|
||||
depends_on = [
|
||||
step.build_vault,
|
||||
step.create_vault_cluster,
|
||||
@@ -343,8 +356,10 @@ scenario "proxy" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -365,7 +380,9 @@ scenario "proxy" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -387,7 +404,8 @@ scenario "proxy" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
@@ -413,8 +431,9 @@ scenario "proxy" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -438,9 +457,9 @@ scenario "proxy" {
|
||||
]
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
leader_host = step.get_vault_cluster_ips.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -459,8 +478,10 @@ scenario "proxy" {
|
||||
verifies = quality.vault_raft_voters
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -481,9 +502,9 @@ scenario "proxy" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
}
|
||||
}
|
||||
|
||||
@@ -502,7 +523,8 @@ scenario "proxy" {
|
||||
verifies = quality.vault_secrets_kv_read
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_vault_cluster_ips.follower_public_ips
|
||||
hosts = step.get_vault_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -519,7 +541,8 @@ scenario "proxy" {
|
||||
verifies = quality.vault_ui_assets
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
}
|
||||
}
|
||||
|
||||
@@ -535,7 +558,7 @@ scenario "proxy" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "replication" {
|
||||
description = <<-EOF
|
||||
@@ -32,18 +32,19 @@ scenario "replication" {
|
||||
consul_version = global.consul_versions
|
||||
distro = global.distros
|
||||
edition = global.enterprise_editions
|
||||
ip_version = global.ip_versions
|
||||
primary_backend = global.backends
|
||||
primary_seal = global.seals
|
||||
secondary_backend = global.backends
|
||||
secondary_seal = global.seals
|
||||
|
||||
# Our local builder always creates bundles
|
||||
// Our local builder always creates bundles
|
||||
exclude {
|
||||
artifact_source = ["local"]
|
||||
artifact_type = ["package"]
|
||||
}
|
||||
|
||||
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
// PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
exclude {
|
||||
primary_seal = ["pkcs11"]
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
@@ -54,14 +55,14 @@ scenario "replication" {
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
}
|
||||
|
||||
# arm64 AMIs are not offered for Leap
|
||||
// arm64 AMIs are not offered for Leap
|
||||
exclude {
|
||||
distro = ["leap"]
|
||||
arch = ["arm64"]
|
||||
}
|
||||
|
||||
# softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
# not implemented yet.
|
||||
// softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
// not implemented yet.
|
||||
exclude {
|
||||
primary_seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
@@ -71,6 +72,17 @@ scenario "replication" {
|
||||
secondary_seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
}
|
||||
|
||||
// Testing in IPV6 mode is currently implemented for integrated Raft storage only
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
primary_backend = ["consul"]
|
||||
}
|
||||
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
secondary_backend = ["consul"]
|
||||
}
|
||||
}
|
||||
|
||||
terraform_cli = terraform_cli.default
|
||||
@@ -127,6 +139,7 @@ scenario "replication" {
|
||||
|
||||
variables {
|
||||
common_tags = global.tags
|
||||
ip_version = matrix.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -184,7 +197,7 @@ scenario "replication" {
|
||||
}
|
||||
}
|
||||
|
||||
# Create all of our instances for both primary and secondary clusters
|
||||
// Create all of our instances for both primary and secondary clusters
|
||||
step "create_primary_cluster_targets" {
|
||||
description = global.description.create_vault_cluster_targets
|
||||
module = module.target_ec2_instances
|
||||
@@ -314,12 +327,12 @@ scenario "replication" {
|
||||
variables {
|
||||
cluster_name = step.create_primary_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_primary_cluster_backend_targets.hosts
|
||||
license = (matrix.primary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_primary_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -351,6 +364,8 @@ scenario "replication" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_consul,
|
||||
quality.vault_storage_backend_raft,
|
||||
@@ -381,7 +396,9 @@ scenario "replication" {
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
manage_service = local.manage_service
|
||||
@@ -389,7 +406,6 @@ scenario "replication" {
|
||||
seal_attributes = step.create_primary_seal_key.attributes
|
||||
seal_type = matrix.primary_seal
|
||||
storage_backend = matrix.primary_backend
|
||||
target_hosts = step.create_primary_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -413,8 +429,10 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@@ -434,12 +452,12 @@ scenario "replication" {
|
||||
variables {
|
||||
cluster_name = step.create_secondary_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_secondary_cluster_backend_targets.hosts
|
||||
license = (matrix.secondary_backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_secondary_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -483,7 +501,9 @@ scenario "replication" {
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
manage_service = local.manage_service
|
||||
@@ -491,7 +511,6 @@ scenario "replication" {
|
||||
seal_attributes = step.create_secondary_seal_key.attributes
|
||||
seal_type = matrix.secondary_seal
|
||||
storage_backend = matrix.secondary_backend
|
||||
target_hosts = step.create_secondary_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -510,8 +529,10 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_secondary_cluster_targets.hosts
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_secondary_cluster.root_token
|
||||
}
|
||||
@@ -537,7 +558,8 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -562,7 +584,8 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_secondary_cluster_targets.hosts
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -586,7 +609,8 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
@@ -611,7 +635,8 @@ scenario "replication" {
|
||||
verifies = quality.vault_ui_assets
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_primary_cluster_targets.hosts
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -631,25 +656,14 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
step "get_primary_cluster_replication_data" {
|
||||
description = <<-EOF
|
||||
An arithmetic module that we use to determine various metadata about the the leader and
|
||||
follower nodes of the primary cluster so that we can correctly enable performance replication.
|
||||
EOF
|
||||
module = module.replication_data
|
||||
depends_on = [step.get_primary_cluster_ips]
|
||||
|
||||
variables {
|
||||
follower_hosts = step.get_primary_cluster_ips.follower_hosts
|
||||
}
|
||||
}
|
||||
|
||||
step "get_secondary_cluster_ips" {
|
||||
description = global.description.get_vault_cluster_ip_addresses
|
||||
module = module.vault_get_cluster_ips
|
||||
@@ -666,7 +680,9 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_secondary_cluster_targets.hosts
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_secondary_cluster.root_token
|
||||
}
|
||||
@@ -690,9 +706,9 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_primary_cluster_targets.hosts
|
||||
hosts = step.create_primary_cluster_targets.hosts
|
||||
leader_host = step.get_primary_cluster_ips.leader_host
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@@ -723,10 +739,10 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
@@ -747,6 +763,7 @@ scenario "replication" {
|
||||
|
||||
variables {
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@@ -767,11 +784,11 @@ scenario "replication" {
|
||||
verifies = quality.vault_api_sys_replication_performance_secondary_enable_write
|
||||
|
||||
variables {
|
||||
secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
|
||||
secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_secondary_cluster.root_token
|
||||
wrapping_token = step.generate_secondary_token.secondary_token
|
||||
secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_secondary_cluster.root_token
|
||||
wrapping_token = step.generate_secondary_token.secondary_token
|
||||
}
|
||||
}
|
||||
|
||||
@@ -795,10 +812,11 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex
|
||||
vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal
|
||||
hosts = step.get_secondary_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal
|
||||
vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex
|
||||
}
|
||||
}
|
||||
|
||||
@@ -821,7 +839,8 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_secondary_cluster_targets.hosts
|
||||
hosts = step.create_secondary_cluster_targets.hosts
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -847,11 +866,11 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip
|
||||
secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
|
||||
secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
primary_leader_host = step.get_primary_cluster_ips.leader_host
|
||||
secondary_leader_host = step.get_secondary_cluster_ips.leader_host
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -871,7 +890,8 @@ scenario "replication" {
|
||||
verifies = quality.vault_secrets_kv_read
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_secondary_cluster_ips.follower_public_ips
|
||||
hosts = step.get_secondary_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_secondary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -910,6 +930,8 @@ scenario "replication" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_consul,
|
||||
quality.vault_storage_backend_raft,
|
||||
@@ -941,9 +963,11 @@ scenario "replication" {
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
force_unseal = matrix.primary_seal == "shamir"
|
||||
hosts = step.create_primary_cluster_additional_targets.hosts
|
||||
// Don't init when adding nodes into the cluster.
|
||||
initialize_cluster = false
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
manage_service = local.manage_service
|
||||
@@ -954,7 +978,6 @@ scenario "replication" {
|
||||
shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null
|
||||
storage_backend = matrix.primary_backend
|
||||
storage_node_prefix = "newprimary_node"
|
||||
target_hosts = step.create_primary_cluster_additional_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -975,7 +998,8 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_primary_cluster_additional_targets.hosts
|
||||
hosts = step.create_primary_cluster_additional_targets.hosts
|
||||
vault_addr = step.add_additional_nodes_to_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -997,7 +1021,9 @@ scenario "replication" {
|
||||
verifies = quality.vault_raft_voters
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_primary_cluster_additional_targets.hosts
|
||||
hosts = step.create_primary_cluster_additional_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
@@ -1010,8 +1036,7 @@ scenario "replication" {
|
||||
EOF
|
||||
module = module.shutdown_node
|
||||
depends_on = [
|
||||
step.get_primary_cluster_replication_data,
|
||||
step.verify_additional_primary_nodes_are_unsealed
|
||||
step.verify_additional_primary_nodes_are_unsealed,
|
||||
]
|
||||
|
||||
providers = {
|
||||
@@ -1019,7 +1044,7 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
node_public_ip = step.get_primary_cluster_replication_data.follower_public_ip_1
|
||||
host = step.get_primary_cluster_ips.follower_hosts["0"]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1039,7 +1064,7 @@ scenario "replication" {
|
||||
}
|
||||
|
||||
variables {
|
||||
node_public_ip = step.get_primary_cluster_ips.leader_public_ip
|
||||
host = step.get_primary_cluster_ips.leader_host
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1048,7 +1073,7 @@ scenario "replication" {
|
||||
An arithmetic module that we use to determine various metadata about the the leader and
|
||||
follower nodes of the primary cluster so that we can correctly enable performance replication.
|
||||
|
||||
We execute this again to determine information about our hosts after having forced the leader
|
||||
We execute this to determine information about our hosts after having forced the leader
|
||||
and a follower from the cluster.
|
||||
EOF
|
||||
|
||||
@@ -1060,10 +1085,8 @@ scenario "replication" {
|
||||
|
||||
variables {
|
||||
added_hosts = step.create_primary_cluster_additional_targets.hosts
|
||||
added_hosts_count = var.vault_instance_count
|
||||
initial_hosts = step.create_primary_cluster_targets.hosts
|
||||
initial_hosts_count = var.vault_instance_count
|
||||
removed_follower_host = step.get_primary_cluster_replication_data.follower_host_1
|
||||
removed_follower_host = step.get_primary_cluster_ips.follower_hosts["0"]
|
||||
removed_primary_host = step.get_primary_cluster_ips.leader_host
|
||||
}
|
||||
}
|
||||
@@ -1086,10 +1109,12 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
hosts = step.get_remaining_hosts_replication_data.remaining_hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1112,10 +1137,11 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.get_remaining_hosts_replication_data.remaining_hosts
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instance_count = step.get_remaining_hosts_replication_data.remaining_hosts_count
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
hosts = step.get_remaining_hosts_replication_data.remaining_hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_primary_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1145,11 +1171,11 @@ scenario "replication" {
|
||||
]
|
||||
|
||||
variables {
|
||||
primary_leader_public_ip = step.get_updated_primary_cluster_ips.leader_public_ip
|
||||
primary_leader_private_ip = step.get_updated_primary_cluster_ips.leader_private_ip
|
||||
secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip
|
||||
secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
primary_leader_host = step.get_updated_primary_cluster_ips.leader_host
|
||||
secondary_leader_host = step.get_secondary_cluster_ips.leader_host
|
||||
vault_addr = step.create_primary_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "seal_ha" {
|
||||
description = <<-EOF
|
||||
@@ -31,17 +31,18 @@ scenario "seal_ha" {
|
||||
consul_version = global.consul_versions
|
||||
distro = global.distros
|
||||
edition = global.enterprise_editions
|
||||
ip_version = global.ip_versions
|
||||
// Seal HA is only supported with auto-unseal devices.
|
||||
primary_seal = ["awskms", "pkcs11"]
|
||||
secondary_seal = ["awskms", "pkcs11"]
|
||||
|
||||
# Our local builder always creates bundles
|
||||
// Our local builder always creates bundles
|
||||
exclude {
|
||||
artifact_source = ["local"]
|
||||
artifact_type = ["package"]
|
||||
}
|
||||
|
||||
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
// PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
exclude {
|
||||
primary_seal = ["pkcs11"]
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
@@ -52,14 +53,14 @@ scenario "seal_ha" {
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
}
|
||||
|
||||
# arm64 AMIs are not offered for Leap
|
||||
// arm64 AMIs are not offered for Leap
|
||||
exclude {
|
||||
distro = ["leap"]
|
||||
arch = ["arm64"]
|
||||
}
|
||||
|
||||
# softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
# not implemented yet.
|
||||
// softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
// not implemented yet.
|
||||
exclude {
|
||||
primary_seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
@@ -69,6 +70,12 @@ scenario "seal_ha" {
|
||||
secondary_seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
}
|
||||
|
||||
// Testing in IPV6 mode is currently implemented for integrated Raft storage only
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
backend = ["consul"]
|
||||
}
|
||||
}
|
||||
|
||||
terraform_cli = terraform_cli.default
|
||||
@@ -130,6 +137,7 @@ scenario "seal_ha" {
|
||||
|
||||
variables {
|
||||
common_tags = global.tags
|
||||
ip_version = matrix.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,12 +263,12 @@ scenario "seal_ha" {
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,12 +294,14 @@ scenario "seal_ha" {
|
||||
quality.vault_audit_socket,
|
||||
quality.vault_audit_syslog,
|
||||
quality.vault_autojoin_aws,
|
||||
quality.vault_service_start,
|
||||
quality.vault_config_env_variables,
|
||||
quality.vault_config_file,
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_consul,
|
||||
quality.vault_storage_backend_raft,
|
||||
// verified in enos_vault_start resource
|
||||
@@ -321,7 +331,9 @@ scenario "seal_ha" {
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
manage_service = local.manage_service
|
||||
@@ -330,7 +342,6 @@ scenario "seal_ha" {
|
||||
seal_attributes = step.create_primary_seal_key.attributes
|
||||
seal_type = matrix.primary_seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -350,8 +361,10 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -373,7 +386,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -395,8 +410,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -422,9 +438,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
leader_host = step.get_vault_cluster_ips.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -450,7 +466,8 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -470,7 +487,7 @@ scenario "seal_ha" {
|
||||
}
|
||||
|
||||
variables {
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -488,7 +505,9 @@ scenario "seal_ha" {
|
||||
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
manage_service = local.manage_service
|
||||
seal_attributes = step.create_primary_seal_key.attributes
|
||||
@@ -496,7 +515,6 @@ scenario "seal_ha" {
|
||||
seal_type = matrix.primary_seal
|
||||
seal_type_secondary = matrix.secondary_seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -516,8 +534,10 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -539,7 +559,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -561,8 +583,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
leader_host = step.get_leader_ip_for_step_down.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -583,8 +606,10 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
timeout = 120 // seconds
|
||||
ip_version = matrix.ip_version
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -606,7 +631,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -628,8 +655,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -654,7 +682,8 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -677,7 +706,8 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
@@ -700,8 +730,10 @@ scenario "seal_ha" {
|
||||
verifies = quality.vault_raft_voters
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -722,9 +754,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
}
|
||||
}
|
||||
|
||||
@@ -741,7 +773,8 @@ scenario "seal_ha" {
|
||||
verifies = quality.vault_secrets_kv_read
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_updated_cluster_ips.follower_public_ips
|
||||
hosts = step.get_updated_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -758,7 +791,8 @@ scenario "seal_ha" {
|
||||
verifies = quality.vault_ui_assets
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
}
|
||||
}
|
||||
|
||||
@@ -776,9 +810,10 @@ scenario "seal_ha" {
|
||||
verifies = quality.vault_status_seal_type
|
||||
|
||||
variables {
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
seal_type = "multiseal"
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -798,7 +833,7 @@ scenario "seal_ha" {
|
||||
}
|
||||
|
||||
variables {
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -820,14 +855,15 @@ scenario "seal_ha" {
|
||||
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
manage_service = local.manage_service
|
||||
seal_alias = "secondary"
|
||||
seal_attributes = step.create_secondary_seal_key.attributes
|
||||
seal_type = matrix.secondary_seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -847,8 +883,10 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
timeout = 120 // seconds
|
||||
ip_version = matrix.ip_version
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -871,7 +909,9 @@ scenario "seal_ha" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -887,8 +927,9 @@ scenario "seal_ha" {
|
||||
}
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -905,7 +946,8 @@ scenario "seal_ha" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -921,7 +963,8 @@ scenario "seal_ha" {
|
||||
}
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_cluster_ips_after_migration.follower_public_ips
|
||||
hosts = step.get_cluster_ips_after_migration.follower_hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -938,9 +981,10 @@ scenario "seal_ha" {
|
||||
}
|
||||
|
||||
variables {
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
seal_type = matrix.secondary_seal
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -956,7 +1000,7 @@ scenario "seal_ha" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "initial_seal_rewrap" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "smoke" {
|
||||
description = <<-EOF
|
||||
@@ -27,32 +27,39 @@ scenario "smoke" {
|
||||
consul_version = global.consul_versions
|
||||
distro = global.distros
|
||||
edition = global.editions
|
||||
ip_version = global.ip_versions
|
||||
seal = global.seals
|
||||
|
||||
# Our local builder always creates bundles
|
||||
// Our local builder always creates bundles
|
||||
exclude {
|
||||
artifact_source = ["local"]
|
||||
artifact_type = ["package"]
|
||||
}
|
||||
|
||||
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
// PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
}
|
||||
|
||||
# arm64 AMIs are not offered for Leap
|
||||
// arm64 AMIs are not offered for Leap
|
||||
exclude {
|
||||
distro = ["leap"]
|
||||
arch = ["arm64"]
|
||||
}
|
||||
|
||||
# softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
# not implemented yet.
|
||||
// softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
// not implemented yet.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
}
|
||||
|
||||
// Testing in IPV6 mode is currently implemented for integrated Raft storage only
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
backend = ["consul"]
|
||||
}
|
||||
}
|
||||
|
||||
terraform_cli = terraform_cli.default
|
||||
@@ -108,6 +115,7 @@ scenario "smoke" {
|
||||
|
||||
variables {
|
||||
common_tags = global.tags
|
||||
ip_version = matrix.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,12 +221,12 @@ scenario "smoke" {
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -250,6 +258,8 @@ scenario "smoke" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_consul,
|
||||
quality.vault_storage_backend_raft,
|
||||
@@ -280,7 +290,9 @@ scenario "smoke" {
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
manage_service = local.manage_service
|
||||
@@ -288,7 +300,6 @@ scenario "smoke" {
|
||||
seal_attributes = step.create_seal_key.attributes
|
||||
seal_type = matrix.seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,8 +325,10 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
timeout = 120 // seconds
|
||||
ip_version = matrix.ip_version
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -337,7 +350,9 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -359,8 +374,9 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
leader_host = step.get_leader_ip_for_step_down.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -381,8 +397,10 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
timeout = 120 // seconds
|
||||
ip_version = matrix.ip_version
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -404,7 +422,9 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -426,7 +446,8 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
@@ -452,8 +473,9 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -477,9 +499,9 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
leader_host = step.get_vault_cluster_ips.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -501,8 +523,10 @@ scenario "smoke" {
|
||||
verifies = quality.vault_raft_voters
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -526,9 +550,9 @@ scenario "smoke" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
}
|
||||
}
|
||||
|
||||
@@ -547,7 +571,8 @@ scenario "smoke" {
|
||||
verifies = quality.vault_secrets_kv_read
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_vault_cluster_ips.follower_public_ips
|
||||
hosts = step.get_vault_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -567,7 +592,8 @@ scenario "smoke" {
|
||||
verifies = quality.vault_ui_assets
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
}
|
||||
}
|
||||
|
||||
@@ -583,7 +609,7 @@ scenario "smoke" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "ui" {
|
||||
description = <<-EOF
|
||||
@@ -37,6 +37,7 @@ scenario "ui" {
|
||||
artifact_path = abspath(var.vault_artifact_path)
|
||||
distro = "ubuntu"
|
||||
consul_version = "1.17.0"
|
||||
ip_version = 4
|
||||
seal = "awskms"
|
||||
tags = merge({
|
||||
"Project Name" : var.project_name
|
||||
@@ -75,6 +76,7 @@ scenario "ui" {
|
||||
|
||||
variables {
|
||||
common_tags = local.tags
|
||||
ip_version = local.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,12 +179,12 @@ scenario "ui" {
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = local.backend_tag_key
|
||||
hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = local.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -214,6 +216,8 @@ scenario "ui" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_consul,
|
||||
quality.vault_storage_backend_raft,
|
||||
@@ -236,20 +240,22 @@ scenario "ui" {
|
||||
backend_cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
backend_cluster_tag_key = local.backend_tag_key
|
||||
cluster_name = step.create_vault_cluster_targets.cluster_name
|
||||
config_mode = "file"
|
||||
consul_license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
consul_release = matrix.backend == "consul" ? {
|
||||
edition = matrix.consul_edition
|
||||
version = local.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
install_dir = local.vault_install_dir
|
||||
ip_version = local.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
local_artifact_path = local.artifact_path
|
||||
packages = global.distro_packages["ubuntu"]
|
||||
seal_name = step.create_seal_key.resource_name
|
||||
seal_attributes = step.create_seal_key.attributes
|
||||
seal_type = local.seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,8 +275,10 @@ scenario "ui" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
timeout = 120 // seconds
|
||||
ip_version = local.ip_version
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = local.vault_install_dir
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -307,7 +315,7 @@ scenario "ui" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
scenario "upgrade" {
|
||||
description = <<-EOF
|
||||
@@ -30,53 +30,59 @@ scenario "upgrade" {
|
||||
distro = global.distros
|
||||
edition = global.editions
|
||||
initial_version = global.upgrade_initial_versions_ce
|
||||
ip_version = global.ip_versions
|
||||
seal = global.seals
|
||||
|
||||
|
||||
# Our local builder always creates bundles
|
||||
// Our local builder always creates bundles
|
||||
exclude {
|
||||
artifact_source = ["local"]
|
||||
artifact_type = ["package"]
|
||||
}
|
||||
|
||||
# Don't upgrade from super-ancient versions in CI because there are known reliability issues
|
||||
# in those versions that have already been fixed.
|
||||
// Don't upgrade from super-ancient versions in CI because there are known reliability issues
|
||||
// in those versions that have already been fixed.
|
||||
exclude {
|
||||
initial_version = [for e in matrix.initial_version : e if semverconstraint(e, "<1.11.0-0")]
|
||||
}
|
||||
|
||||
# FIPS 140-2 editions were not supported until 1.11.x, even though there are 1.10.x binaries
|
||||
# published.
|
||||
// FIPS 140-2 editions were not supported until 1.11.x, even though there are 1.10.x binaries
|
||||
// published.
|
||||
exclude {
|
||||
edition = ["ent.fips1402", "ent.hsm.fips1402"]
|
||||
initial_version = [for e in matrix.initial_version : e if semverconstraint(e, "<1.11.0-0")]
|
||||
}
|
||||
|
||||
# There are no published versions of these artifacts yet. We'll update this to exclude older
|
||||
# versions after our initial publication of these editions for arm64.
|
||||
// There are no published versions of these artifacts yet. We'll update this to exclude older
|
||||
// versions after our initial publication of these editions for arm64.
|
||||
exclude {
|
||||
arch = ["arm64"]
|
||||
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
|
||||
}
|
||||
|
||||
# PKCS#11 can only be used with hsm editions
|
||||
// PKCS#11 can only be used with hsm editions
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
edition = [for e in matrix.edition : e if !strcontains(e, "hsm")]
|
||||
}
|
||||
|
||||
# arm64 AMIs are not offered for Leap
|
||||
// arm64 AMIs are not offered for Leap
|
||||
exclude {
|
||||
distro = ["leap"]
|
||||
arch = ["arm64"]
|
||||
}
|
||||
|
||||
# softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
# not implemented yet.
|
||||
// softhsm packages not available for leap/sles. Enos support for softhsm on amzn2 is
|
||||
// not implemented yet.
|
||||
exclude {
|
||||
seal = ["pkcs11"]
|
||||
distro = ["amzn2", "leap", "sles"]
|
||||
}
|
||||
|
||||
// Testing in IPV6 mode is currently implemented for integrated Raft storage only
|
||||
exclude {
|
||||
ip_version = ["6"]
|
||||
backend = ["consul"]
|
||||
}
|
||||
}
|
||||
|
||||
terraform_cli = terraform_cli.default
|
||||
@@ -132,6 +138,7 @@ scenario "upgrade" {
|
||||
|
||||
variables {
|
||||
common_tags = global.tags
|
||||
ip_version = matrix.ip_version
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,12 +246,12 @@ scenario "upgrade" {
|
||||
variables {
|
||||
cluster_name = step.create_vault_cluster_backend_targets.cluster_name
|
||||
cluster_tag_key = global.backend_tag_key
|
||||
hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
license = (matrix.backend == "consul" && matrix.consul_edition == "ent") ? step.read_backend_license.license : null
|
||||
release = {
|
||||
edition = matrix.consul_edition
|
||||
version = matrix.consul_version
|
||||
}
|
||||
target_hosts = step.create_vault_cluster_backend_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -277,6 +284,8 @@ scenario "upgrade" {
|
||||
quality.vault_config_log_level,
|
||||
quality.vault_init,
|
||||
quality.vault_license_required_ent,
|
||||
quality.vault_listener_ipv4,
|
||||
quality.vault_listener_ipv6,
|
||||
quality.vault_service_start,
|
||||
quality.vault_storage_backend_raft,
|
||||
// verified in enos_vault_start resource
|
||||
@@ -305,7 +314,10 @@ scenario "upgrade" {
|
||||
version = matrix.consul_version
|
||||
} : null
|
||||
enable_audit_devices = var.vault_enable_audit_devices
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
license = matrix.edition != "ce" ? step.read_vault_license.license : null
|
||||
manage_service = true # always handle systemd for released bundles
|
||||
packages = concat(global.packages, global.distro_packages[matrix.distro])
|
||||
release = {
|
||||
edition = matrix.edition
|
||||
@@ -314,7 +326,6 @@ scenario "upgrade" {
|
||||
seal_attributes = step.create_seal_key.attributes
|
||||
seal_type = matrix.seal
|
||||
storage_backend = matrix.backend
|
||||
target_hosts = step.create_vault_cluster_targets.hosts
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,8 +351,10 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
// Use the install dir for our initial version, which always comes from a zip bundle
|
||||
vault_install_dir = global.vault_install_dir["bundle"]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
@@ -364,7 +377,9 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
// Use the install dir for our initial version, which always comes from a zip bundle
|
||||
vault_install_dir = global.vault_install_dir["bundle"]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
@@ -391,17 +406,17 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
leader_public_ip = step.get_vault_cluster_ips.leader_public_ip
|
||||
leader_private_ip = step.get_vault_cluster_ips.leader_private_ip
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
leader_host = step.get_vault_cluster_ips.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
// Use the install dir for our initial version, which always comes from a zip bundle
|
||||
vault_install_dir = global.vault_install_dir["bundle"]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
|
||||
# This step upgrades the Vault cluster to the var.vault_product_version
|
||||
# by getting a bundle or package of that version from the matrix.artifact_source
|
||||
// This step upgrades the Vault cluster to the var.vault_product_version
|
||||
// by getting a bundle or package of that version from the matrix.artifact_source
|
||||
step "upgrade_vault" {
|
||||
description = <<-EOF
|
||||
Perform an in-place upgrade of the Vault Cluster nodes by first installing a new version
|
||||
@@ -423,13 +438,15 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_api_addr = "http://localhost:8200"
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_local_artifact_path = local.artifact_path
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null
|
||||
vault_local_artifact_path = local.artifact_path
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
vault_seal_type = matrix.seal
|
||||
vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,8 +469,10 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -475,7 +494,9 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -497,8 +518,9 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
leader_host = step.get_leader_ip_for_step_down.leader_host
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -519,8 +541,10 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
timeout = 120 # seconds
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
timeout = 120 // seconds
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -544,7 +568,9 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_hosts = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
@@ -568,7 +594,8 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version
|
||||
@@ -596,7 +623,8 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -622,7 +650,8 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
node_public_ips = step.get_updated_vault_cluster_ips.follower_public_ips
|
||||
hosts = step.get_updated_vault_cluster_ips.follower_hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
}
|
||||
}
|
||||
@@ -642,8 +671,10 @@ scenario "upgrade" {
|
||||
verifies = quality.vault_raft_voters
|
||||
|
||||
variables {
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
ip_version = matrix.ip_version
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
vault_root_token = step.create_vault_cluster.root_token
|
||||
}
|
||||
}
|
||||
@@ -666,9 +697,9 @@ scenario "upgrade" {
|
||||
]
|
||||
|
||||
variables {
|
||||
vault_edition = matrix.edition
|
||||
vault_install_dir = global.vault_install_dir[matrix.artifact_type]
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
vault_edition = matrix.edition
|
||||
}
|
||||
}
|
||||
|
||||
@@ -686,7 +717,8 @@ scenario "upgrade" {
|
||||
verifies = quality.vault_ui_assets
|
||||
|
||||
variables {
|
||||
vault_instances = step.create_vault_cluster_targets.hosts
|
||||
hosts = step.create_vault_cluster_targets.hosts
|
||||
vault_addr = step.create_vault_cluster.api_addr_localhost
|
||||
}
|
||||
}
|
||||
|
||||
@@ -702,7 +734,7 @@ scenario "upgrade" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The Vault cluster target hosts"
|
||||
value = step.create_vault_cluster.target_hosts
|
||||
value = step.create_vault_cluster.hosts
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
|
||||
@@ -1,17 +1,19 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
terraform_cli "default" {
|
||||
plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null
|
||||
}
|
||||
|
||||
terraform_cli "dev" {
|
||||
plugin_cache_dir = var.terraform_plugin_cache_dir != null ? abspath(var.terraform_plugin_cache_dir) : null
|
||||
|
||||
/*
|
||||
provider_installation {
|
||||
dev_overrides = {
|
||||
"registry.terraform.io/hashicorp-forge/enos" = abspath("../../enos-provider/dist")
|
||||
"registry.terraform.io/hashicorp-forge/enos" = try(abspath("../../terraform-provider-enos/dist"), null)
|
||||
}
|
||||
direct {}
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
terraform "default" {
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
variable "artifactory_username" {
|
||||
type = string
|
||||
@@ -130,7 +130,7 @@ variable "ui_run_tests" {
|
||||
}
|
||||
|
||||
variable "vault_artifact_type" {
|
||||
description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
|
||||
description = "The type of Vault artifact to use when installing Vault from artifactory. It should be 'package' for .deb or .rpm package and 'bundle' for .zip bundles"
|
||||
default = "bundle"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,121 +1,121 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
// Copyright (c) HashiCorp, Inc.
|
||||
// SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
# artifactory_username is the username to use when testing an artifact stored in artfactory.
|
||||
# artifactory_username = "yourname@hashicorp.com"
|
||||
// artifactory_username is the username to use when testing an artifact stored in artfactory.
|
||||
// artifactory_username = "yourname@hashicorp.com"
|
||||
|
||||
# artifactory_token is the token to use when authenticating to artifactory.
|
||||
# artifactory_token = "yourtoken"
|
||||
// artifactory_token is the token to use when authenticating to artifactory.
|
||||
// artifactory_token = "yourtoken"
|
||||
|
||||
# artifactory_host is the artifactory host to search for vault artifacts.
|
||||
# artifactory_host = "https://artifactory.hashicorp.engineering/artifactory"
|
||||
// artifactory_host is the artifactory host to search for vault artifacts.
|
||||
// artifactory_host = "https://artifactory.hashicorp.engineering/artifactory"
|
||||
|
||||
# artifactory_repo is the artifactory repo to search for vault artifacts.
|
||||
# artifactory_repo = "hashicorp-crt-stable-local*"
|
||||
// artifactory_repo is the artifactory repo to search for vault artifacts.
|
||||
// artifactory_repo = "hashicorp-crt-stable-local*"
|
||||
|
||||
# aws_region is the AWS region where we'll create infrastructure
|
||||
# for the smoke scenario
|
||||
# aws_region = "us-east-1"
|
||||
// aws_region is the AWS region where we'll create infrastructure
|
||||
// for the smoke scenario
|
||||
// aws_region = "us-east-1"
|
||||
|
||||
# aws_ssh_keypair_name is the AWS keypair to use for SSH
|
||||
# aws_ssh_keypair_name = "enos-ci-ssh-key"
|
||||
// aws_ssh_keypair_name is the AWS keypair to use for SSH
|
||||
// aws_ssh_keypair_name = "enos-ci-ssh-key"
|
||||
|
||||
# aws_ssh_private_key_path is the path to the AWS keypair private key
|
||||
# aws_ssh_private_key_path = "./support/private_key.pem"
|
||||
// aws_ssh_private_key_path is the path to the AWS keypair private key
|
||||
// aws_ssh_private_key_path = "./support/private_key.pem"
|
||||
|
||||
# backend_license_path is the license for the backend if applicable (Consul Enterprise)".
|
||||
# backend_license_path = "./support/consul.hclic"
|
||||
// backend_license_path is the license for the backend if applicable (Consul Enterprise)".
|
||||
// backend_license_path = "./support/consul.hclic"
|
||||
|
||||
# backend_log_level is the server log level for the backend. Supported values include 'trace',
|
||||
# 'debug', 'info', 'warn', 'error'"
|
||||
# backend_log_level = "trace"
|
||||
// backend_log_level is the server log level for the backend. Supported values include 'trace',
|
||||
// 'debug', 'info', 'warn', 'error'"
|
||||
// backend_log_level = "trace"
|
||||
|
||||
# backend_instance_type is the instance type to use for the Vault backend. Must support arm64
|
||||
# backend_instance_type = "t4g.small"
|
||||
// backend_instance_type is the instance type to use for the Vault backend. Must support arm64
|
||||
// backend_instance_type = "t4g.small"
|
||||
|
||||
# project_name is the description of the project. It will often be used to tag infrastructure
|
||||
# resources.
|
||||
# project_name = "vault-enos-integration"
|
||||
// project_name is the description of the project. It will often be used to tag infrastructure
|
||||
// resources.
|
||||
// project_name = "vault-enos-integration"
|
||||
|
||||
# distro_version_amzn2 is the version of Amazon Linux 2 to use for "distro:amzn2" variants
|
||||
# distro_version_amzn2 = "2"
|
||||
// distro_version_amzn2 is the version of Amazon Linux 2 to use for "distro:amzn2" variants
|
||||
// distro_version_amzn2 = "2"
|
||||
|
||||
# distro_version_leap is the version of openSUSE Leap to use for "distro:leap" variants
|
||||
# distro_version_leap = "15.5"
|
||||
// distro_version_leap is the version of openSUSE Leap to use for "distro:leap" variants
|
||||
// distro_version_leap = "15.5"
|
||||
|
||||
# distro_version_rhel is the version of RHEL to use for "distro:rhel" variants.
|
||||
# distro_version_rhel = "9.3" // or "8.9"
|
||||
// distro_version_rhel is the version of RHEL to use for "distro:rhel" variants.
|
||||
// distro_version_rhel = "9.3" // or "8.9"
|
||||
|
||||
# distro_version_sles is the version of SUSE SLES to use for "distro:sles" variants.
|
||||
# distro_version_sles = "v15_sp5_standard"
|
||||
// distro_version_sles is the version of SUSE SLES to use for "distro:sles" variants.
|
||||
// distro_version_sles = "v15_sp5_standard"
|
||||
|
||||
# distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants
|
||||
# distro_version_ubuntu = "22.04" // or "20.04"
|
||||
// distro_version_ubuntu is the version of ubuntu to use for "distro:ubuntu" variants
|
||||
// distro_version_ubuntu = "22.04" // or "20.04"
|
||||
|
||||
# tags are a map of tags that will be applied to infrastructure resources that
|
||||
# support tagging.
|
||||
# tags = { "Project Name" : "Vault", "Something Cool" : "Value" }
|
||||
// tags are a map of tags that will be applied to infrastructure resources that
|
||||
// support tagging.
|
||||
// tags = { "Project Name" : "Vault", "Something Cool" : "Value" }
|
||||
|
||||
# terraform_plugin_cache_dir is the directory to cache Terraform modules and providers.
|
||||
# It must exist.
|
||||
# terraform_plugin_cache_dir = "/Users/<user>/.terraform/plugin-cache-dir
|
||||
// terraform_plugin_cache_dir is the directory to cache Terraform modules and providers.
|
||||
// It must exist.
|
||||
// terraform_plugin_cache_dir = "/Users/<user>/.terraform/plugin-cache-dir
|
||||
|
||||
# ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will
|
||||
# be appended to the ember test command as '-f=\"<filter>\"'.
|
||||
# ui_test_filter = "sometest"
|
||||
// ui_test_filter is the test filter to limit the ui tests to execute for the ui scenario. It will
|
||||
// be appended to the ember test command as '-f=\"<filter>\"'.
|
||||
// ui_test_filter = "sometest"
|
||||
|
||||
# ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a
|
||||
# cluster will be created but no tests will be run.
|
||||
# ui_run_tests = true
|
||||
// ui_run_tests sets whether to run the UI tests or not for the ui scenario. If set to false a
|
||||
// cluster will be created but no tests will be run.
|
||||
// ui_run_tests = true
|
||||
|
||||
# vault_artifact_path is the path to CRT generated or local vault.zip bundle. When
|
||||
# using the "builder:local" variant a bundle will be built from the current branch.
|
||||
# In CI it will use the output of the build workflow.
|
||||
# vault_artifact_path = "./dist/vault.zip"
|
||||
// vault_artifact_path is the path to CRT generated or local vault.zip bundle. When
|
||||
// using the "builder:local" variant a bundle will be built from the current branch.
|
||||
// In CI it will use the output of the build workflow.
|
||||
// vault_artifact_path = "./dist/vault.zip"
|
||||
|
||||
# vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory.
|
||||
# It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
|
||||
# vault_artifact_type = "bundle"
|
||||
// vault_artifact_type is the type of Vault artifact to use when installing Vault from artifactory.
|
||||
// It should be 'package' for .deb or # .rpm package and 'bundle' for .zip bundles"
|
||||
// vault_artifact_type = "bundle"
|
||||
|
||||
# vault_build_date is the build date for Vault artifact. Some validations will require the binary build
|
||||
# date to match"
|
||||
# vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example
|
||||
// vault_build_date is the build date for Vault artifact. Some validations will require the binary build
|
||||
// date to match"
|
||||
// vault_build_date = "2023-07-07T14:06:37Z" // make ci-get-date for example
|
||||
|
||||
# vault_enable_audit_devices sets whether or not to enable every audit device. It true
|
||||
# a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog
|
||||
# audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090
|
||||
# will be enabled. The netcat program is run in listening mode to provide an endpoint
|
||||
# that the socket audit device can connect to.
|
||||
# vault_enable_audit_devices = true
|
||||
// vault_enable_audit_devices sets whether or not to enable every audit device. It true
|
||||
// a file audit device will be enabled at the path /var/log/vault_audit.log, the syslog
|
||||
// audit device will be enabled, and a socket audit device connecting to 127.0.0.1:9090
|
||||
// will be enabled. The netcat program is run in listening mode to provide an endpoint
|
||||
// that the socket audit device can connect to.
|
||||
// vault_enable_audit_devices = true
|
||||
|
||||
# vault_install_dir is the directory where the vault binary will be installed on
|
||||
# the remote machines.
|
||||
# vault_install_dir = "/opt/vault/bin"
|
||||
// vault_install_dir is the directory where the vault binary will be installed on
|
||||
// the remote machines.
|
||||
// vault_install_dir = "/opt/vault/bin"
|
||||
|
||||
# vault_local_binary_path is the path of the local binary that we're upgrading to.
|
||||
# vault_local_binary_path = "./support/vault"
|
||||
// vault_local_binary_path is the path of the local binary that we're upgrading to.
|
||||
// vault_local_binary_path = "./support/vault"
|
||||
|
||||
# vault_instance_type is the instance type to use for the Vault backend
|
||||
# vault_instance_type = "t3.small"
|
||||
// vault_instance_type is the instance type to use for the Vault backend
|
||||
// vault_instance_type = "t3.small"
|
||||
|
||||
# vault_instance_count is how many instances to create for the Vault cluster.
|
||||
# vault_instance_count = 3
|
||||
// vault_instance_count is how many instances to create for the Vault cluster.
|
||||
// vault_instance_count = 3
|
||||
|
||||
# vault_license_path is the path to a valid Vault enterprise edition license.
|
||||
# This is only required for non-ce editions"
|
||||
# vault_license_path = "./support/vault.hclic"
|
||||
// vault_license_path is the path to a valid Vault enterprise edition license.
|
||||
// This is only required for non-ce editions"
|
||||
// vault_license_path = "./support/vault.hclic"
|
||||
|
||||
# vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants.
|
||||
# vault_local_build_tags = ["ui", "ent"]
|
||||
// vault_local_build_tags override the build tags we pass to the Go compiler for builder:local variants.
|
||||
// vault_local_build_tags = ["ui", "ent"]
|
||||
|
||||
# vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are
|
||||
# trace, debug, info, warn, and err."
|
||||
# vault_log_level = "trace"
|
||||
// vault_log_level is the server log level for Vault logs. Supported values (in order of detail) are
|
||||
// trace, debug, info, warn, and err."
|
||||
// vault_log_level = "trace"
|
||||
|
||||
# vault_product_version is the version of Vault we are testing. Some validations will expect the vault
|
||||
# binary and cluster to report this version.
|
||||
# vault_product_version = "1.15.0"
|
||||
// vault_product_version is the version of Vault we are testing. Some validations will expect the vault
|
||||
// binary and cluster to report this version.
|
||||
// vault_product_version = "1.15.0"
|
||||
|
||||
# vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault
|
||||
# binary and cluster to report this revision.
|
||||
# vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de"
|
||||
// vault_revision is the git sha of Vault artifact we are testing. Some validations will expect the vault
|
||||
// binary and cluster to report this revision.
|
||||
// vault_revision = "df733361af26f8bb29b63704168bbc5ab8d083de"
|
||||
|
||||
@@ -17,7 +17,7 @@ locals {
|
||||
}
|
||||
|
||||
resource "enos_bundle_install" "consul" {
|
||||
for_each = var.target_hosts
|
||||
for_each = var.hosts
|
||||
|
||||
destination = var.install_dir
|
||||
release = merge(var.release, { product = "consul" })
|
||||
@@ -40,7 +40,7 @@ resource "enos_consul_start" "consul" {
|
||||
datacenter = "dc1"
|
||||
retry_join = ["provider=aws tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}"]
|
||||
server = true
|
||||
bootstrap_expect = length(var.target_hosts)
|
||||
bootstrap_expect = length(var.hosts)
|
||||
log_level = var.log_level
|
||||
log_file = var.log_dir
|
||||
}
|
||||
@@ -50,7 +50,7 @@ resource "enos_consul_start" "consul" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.key].public_ip
|
||||
host = var.hosts[each.key].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,16 +3,16 @@
|
||||
|
||||
output "private_ips" {
|
||||
description = "Consul cluster target host private_ips"
|
||||
value = [for host in var.target_hosts : host.private_ip]
|
||||
value = [for host in var.hosts : host.private_ip]
|
||||
}
|
||||
|
||||
output "public_ips" {
|
||||
description = "Consul cluster target host public_ips"
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
value = [for host in var.hosts : host.public_ip]
|
||||
}
|
||||
|
||||
output "target_hosts" {
|
||||
output "hosts" {
|
||||
description = "The Consul cluster instances that were created"
|
||||
|
||||
value = var.target_hosts
|
||||
value = var.hosts
|
||||
}
|
||||
|
||||
@@ -25,6 +25,15 @@ variable "data_dir" {
|
||||
default = "/opt/consul/data"
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
description = "The target machines host addresses to use for the consul cluster"
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "install_dir" {
|
||||
type = string
|
||||
description = "The directory where the consul binary will be installed"
|
||||
@@ -66,11 +75,3 @@ variable "release" {
|
||||
edition = "ce"
|
||||
}
|
||||
}
|
||||
|
||||
variable "target_hosts" {
|
||||
description = "The target machines host addresses to use for the consul cluster"
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -53,18 +53,18 @@ variable "release" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "target_hosts" {
|
||||
variable "hosts" {
|
||||
default = null
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
value = [for host in var.target_hosts : host.private_ip]
|
||||
value = [for host in var.hosts : host.private_ip]
|
||||
}
|
||||
|
||||
output "public_ips" {
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
value = [for host in var.hosts : host.public_ip]
|
||||
}
|
||||
|
||||
output "target_hosts" {
|
||||
value = var.target_hosts
|
||||
output "hosts" {
|
||||
value = var.hosts
|
||||
}
|
||||
|
||||
@@ -19,9 +19,11 @@ resource "random_string" "cluster_id" {
|
||||
}
|
||||
|
||||
resource "aws_vpc" "vpc" {
|
||||
cidr_block = var.cidr
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
// Always set the ipv4 cidr block as it's required in "dual-stack" VPCs which we create.
|
||||
cidr_block = var.ipv4_cidr
|
||||
enable_dns_hostnames = true
|
||||
enable_dns_support = true
|
||||
assign_generated_ipv6_cidr_block = var.ip_version == 6
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
@@ -32,11 +34,18 @@ resource "aws_vpc" "vpc" {
|
||||
}
|
||||
|
||||
resource "aws_subnet" "subnet" {
|
||||
count = length(data.aws_availability_zones.available.names)
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
cidr_block = cidrsubnet(var.cidr, 8, count.index)
|
||||
availability_zone = data.aws_availability_zones.available.names[count.index]
|
||||
count = length(data.aws_availability_zones.available.names)
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
availability_zone = data.aws_availability_zones.available.names[count.index]
|
||||
|
||||
// IPV4, but since we need to support ipv4 connections from the machine running enos, we're
|
||||
// always going to need ipv4 available.
|
||||
map_public_ip_on_launch = true
|
||||
cidr_block = cidrsubnet(var.ipv4_cidr, 8, count.index)
|
||||
|
||||
// IPV6, only set these when we want to run in ipv6 mode.
|
||||
assign_ipv6_address_on_creation = var.ip_version == 6
|
||||
ipv6_cidr_block = var.ip_version == 6 ? cidrsubnet(aws_vpc.vpc.ipv6_cidr_block, 4, count.index) : null
|
||||
|
||||
tags = merge(
|
||||
var.common_tags,
|
||||
@@ -46,7 +55,7 @@ resource "aws_subnet" "subnet" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "igw" {
|
||||
resource "aws_internet_gateway" "ipv4" {
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
|
||||
tags = merge(
|
||||
@@ -57,29 +66,43 @@ resource "aws_internet_gateway" "igw" {
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_route" "igw" {
|
||||
resource "aws_egress_only_internet_gateway" "ipv6" {
|
||||
count = var.ip_version == 6 ? 1 : 0
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
}
|
||||
|
||||
resource "aws_route" "igw_ipv4" {
|
||||
route_table_id = aws_vpc.vpc.default_route_table_id
|
||||
destination_cidr_block = "0.0.0.0/0"
|
||||
gateway_id = aws_internet_gateway.igw.id
|
||||
gateway_id = aws_internet_gateway.ipv4.id
|
||||
}
|
||||
|
||||
resource "aws_route" "igw_ipv6" {
|
||||
count = var.ip_version == 6 ? 1 : 0
|
||||
route_table_id = aws_vpc.vpc.default_route_table_id
|
||||
destination_ipv6_cidr_block = "::/0"
|
||||
egress_only_gateway_id = aws_egress_only_internet_gateway.ipv6[0].id
|
||||
}
|
||||
|
||||
resource "aws_security_group" "default" {
|
||||
vpc_id = aws_vpc.vpc.id
|
||||
|
||||
ingress {
|
||||
description = "allow_ingress_from_all"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
description = "allow_ingress_from_all"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null
|
||||
}
|
||||
|
||||
egress {
|
||||
description = "allow_egress_from_all"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
description = "allow_egress_from_all"
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
ipv6_cidr_blocks = var.ip_version == 6 ? ["::/0"] : null
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
|
||||
@@ -6,9 +6,14 @@ output "id" {
|
||||
value = aws_vpc.vpc.id
|
||||
}
|
||||
|
||||
output "cidr" {
|
||||
description = "CIDR for whole VPC"
|
||||
value = var.cidr
|
||||
output "ipv4_cidr" {
|
||||
description = "The VPC subnet CIDR for ipv4 mode"
|
||||
value = var.ipv4_cidr
|
||||
}
|
||||
|
||||
output "ipv6_cidr" {
|
||||
description = "The VPC subnet CIDR for ipv6 mode"
|
||||
value = aws_vpc.vpc.ipv6_cidr_block
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
|
||||
@@ -7,10 +7,21 @@ variable "name" {
|
||||
description = "The name of the VPC"
|
||||
}
|
||||
|
||||
variable "cidr" {
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
default = 4
|
||||
description = "The IP version to use for the default subnet"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "ipv4_cidr" {
|
||||
type = string
|
||||
default = "10.13.0.0/16"
|
||||
description = "CIDR block for the VPC"
|
||||
description = "The CIDR block for the VPC when using IPV4 mode"
|
||||
}
|
||||
|
||||
variable "environment" {
|
||||
|
||||
@@ -11,6 +11,7 @@ terraform {
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
|
||||
@@ -13,6 +13,11 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
@@ -32,12 +37,13 @@ locals {
|
||||
token_id = random_uuid.token_id.id
|
||||
secondary_token = enos_remote_exec.fetch_secondary_token.stdout
|
||||
}
|
||||
|
||||
resource "random_uuid" "token_id" {}
|
||||
|
||||
resource "enos_remote_exec" "fetch_secondary_token" {
|
||||
depends_on = [random_uuid.token_id]
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
|
||||
@@ -41,6 +41,7 @@ variable "packages" {
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
|
||||
@@ -3,68 +3,28 @@
|
||||
|
||||
// An arithmetic module for calculating inputs and outputs for various replication steps.
|
||||
|
||||
// Get the first follower out of the hosts set
|
||||
variable "follower_hosts" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
output "follower_host_1" {
|
||||
value = try(var.follower_hosts[0], null)
|
||||
}
|
||||
|
||||
output "follower_public_ip_1" {
|
||||
value = try(var.follower_hosts[0].public_ip, null)
|
||||
}
|
||||
|
||||
output "follower_private_ip_1" {
|
||||
value = try(var.follower_hosts[0].private_ip, null)
|
||||
}
|
||||
|
||||
output "follower_host_2" {
|
||||
value = try(var.follower_hosts[1], null)
|
||||
}
|
||||
|
||||
output "follower_public_ip_2" {
|
||||
value = try(var.follower_hosts[1].public_ip, null)
|
||||
}
|
||||
|
||||
output "follower_private_ip_2" {
|
||||
value = try(var.follower_hosts[1].private_ip, null)
|
||||
}
|
||||
|
||||
// Calculate our remainder hosts after we've added and removed leader
|
||||
variable "initial_hosts" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "initial_hosts_count" {
|
||||
type = number
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "added_hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "added_hosts_count" {
|
||||
type = number
|
||||
default = 0
|
||||
variable "initial_hosts" {
|
||||
description = "The initial set of Vault cluster hosts before removing and adding hosts"
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "removed_primary_host" {
|
||||
type = object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
})
|
||||
@@ -73,6 +33,7 @@ variable "removed_primary_host" {
|
||||
|
||||
variable "removed_follower_host" {
|
||||
type = object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
})
|
||||
@@ -80,23 +41,9 @@ variable "removed_follower_host" {
|
||||
}
|
||||
|
||||
locals {
|
||||
remaining_hosts_count = max((var.initial_hosts_count + var.added_hosts_count - 2), 0)
|
||||
indices = [for idx in range(local.remaining_hosts_count) : idx]
|
||||
remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host])
|
||||
remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial))
|
||||
remaining_hosts = zipmap(local.indices, local.remaining_hosts_list)
|
||||
}
|
||||
|
||||
output "remaining_initial_count" {
|
||||
value = length(local.remaining_initial)
|
||||
}
|
||||
|
||||
output "remaining_initial_hosts" {
|
||||
value = local.remaining_initial
|
||||
}
|
||||
|
||||
output "remaining_hosts_count" {
|
||||
value = local.remaining_hosts_count
|
||||
remaining_initial = setsubtract(values(var.initial_hosts), [var.removed_primary_host, var.removed_follower_host])
|
||||
remaining_hosts_list = tolist(setunion(values(var.added_hosts), local.remaining_initial))
|
||||
remaining_hosts = { for idx in range(length(local.remaining_hosts_list)) : idx => local.remaining_hosts_list[idx] }
|
||||
}
|
||||
|
||||
output "remaining_hosts" {
|
||||
|
||||
@@ -100,6 +100,13 @@ module "target" {
|
||||
amd64 = "t3a.small"
|
||||
arm64 = "t4g.small"
|
||||
}
|
||||
ports_ingress = [
|
||||
{
|
||||
description = "SSH"
|
||||
port = 22
|
||||
protocol = "tcp"
|
||||
},
|
||||
]
|
||||
// Make sure it's not too long as we use this for aws resources that size maximums that are easy
|
||||
// to hit.
|
||||
project_name = substr("vault-ci-softhsm-${local.id}", 0, 32)
|
||||
|
||||
@@ -9,12 +9,7 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "old_vault_instances" {
|
||||
variable "old_hosts" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
@@ -22,17 +17,8 @@ variable "old_vault_instances" {
|
||||
description = "The vault cluster instances to be shutdown"
|
||||
}
|
||||
|
||||
locals {
|
||||
public_ips = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.old_vault_instances)[idx].public_ip
|
||||
private_ip = values(var.old_vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "shutdown_multiple_nodes" {
|
||||
for_each = local.public_ips
|
||||
for_each = var.old_hosts
|
||||
inline = ["sudo shutdown -H --no-wall; exit 0"]
|
||||
|
||||
transport = {
|
||||
|
||||
@@ -9,9 +9,13 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "node_public_ip" {
|
||||
type = string
|
||||
description = "Node Public IP address"
|
||||
variable "host" {
|
||||
type = object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
})
|
||||
description = "The node to shut down"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "shutdown_node" {
|
||||
@@ -19,7 +23,7 @@ resource "enos_remote_exec" "shutdown_node" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.node_public_ip
|
||||
host = var.host.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ variable "cluster_id" {
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
|
||||
@@ -12,6 +12,7 @@ terraform {
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
|
||||
@@ -12,6 +12,7 @@ terraform {
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
|
||||
@@ -11,6 +11,7 @@ terraform {
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
|
||||
@@ -7,18 +7,36 @@ terraform {
|
||||
# to the public registry
|
||||
enos = {
|
||||
source = "registry.terraform.io/hashicorp-forge/enos"
|
||||
version = ">= 0.4.10"
|
||||
version = ">= 0.5.3"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
api_addr_localhost = var.ip_version == 4 ? "http://127.0.0.1:${var.listener_port}" : "http://[::1]:${var.listener_port}"
|
||||
api_addrs = tolist([for h in var.hosts : {
|
||||
4 : "http://${h.public_ip}:${var.listener_port}",
|
||||
6 : "http://[${h.ipv6}]:${var.listener_port}",
|
||||
}])
|
||||
api_addrs_internal = tolist([for h in var.hosts : {
|
||||
4 : "http://${h.private_ip}:${var.listener_port}",
|
||||
6 : "http://[${h.ipv6}]:${var.listener_port}",
|
||||
}])
|
||||
bin_path = "${var.install_dir}/vault"
|
||||
cluster_addrs = tolist([for h in var.hosts : {
|
||||
4 : "http://${h.public_ip}:${var.cluster_port}",
|
||||
6 : "http://[${h.ipv6}]:${var.cluster_port}",
|
||||
}])
|
||||
cluster_addrs_internal = tolist([for h in var.hosts : {
|
||||
4 : "http://${h.private_ip}:${var.cluster_port}",
|
||||
6 : "http://[${h.ipv6}]:${var.cluster_port}",
|
||||
}])
|
||||
// In order to get Terraform to plan we have to use collections with keys that are known at plan
|
||||
// time. Here we're creating locals that keep track of index values that point to our target hosts.
|
||||
followers = toset(slice(local.instances, 1, length(local.instances)))
|
||||
instances = [for idx in range(length(var.target_hosts)) : tostring(idx)]
|
||||
leader = toset(slice(local.instances, 0, 1))
|
||||
followers = toset(slice(local.instances, 1, length(local.instances)))
|
||||
instances = [for idx in range(length(var.hosts)) : tostring(idx)]
|
||||
leader = toset(slice(local.instances, 0, 1))
|
||||
listener_address = var.ip_version == 4 ? "0.0.0.0:${var.listener_port}" : "[::]:${var.listener_port}"
|
||||
// Handle cases where we might have to distribute HSM tokens for the pkcs11 seal before starting
|
||||
// vault.
|
||||
token_base64 = try(lookup(var.seal_attributes, "token_base64", ""), "")
|
||||
@@ -94,8 +112,9 @@ locals {
|
||||
attributes = null
|
||||
}
|
||||
}
|
||||
seal_secondary = local.seals_secondary[var.seal_type_secondary]
|
||||
storage_config = [for idx, host in var.target_hosts : (var.storage_backend == "raft" ?
|
||||
seal_secondary = local.seals_secondary[var.seal_type_secondary]
|
||||
storage_address = var.ip_version == 4 ? "0.0.0.0:${var.external_storage_port}" : "[::]:${var.external_storage_port}"
|
||||
storage_attributes = [for idx, host in var.hosts : (var.storage_backend == "raft" ?
|
||||
merge(
|
||||
{
|
||||
node_id = "${var.storage_node_prefix}_${idx}"
|
||||
@@ -103,10 +122,16 @@ locals {
|
||||
var.storage_backend_attrs
|
||||
) :
|
||||
{
|
||||
address = "127.0.0.1:8500"
|
||||
address = local.storage_address
|
||||
path = "vault"
|
||||
})
|
||||
]
|
||||
storage_retry_join = {
|
||||
"raft" : {
|
||||
auto_join : "provider=aws addr_type=${var.ip_version == 4 ? "private_v4" : "public_v6"} tag_key=${var.cluster_tag_key} tag_value=${var.cluster_name}",
|
||||
auto_join_scheme : "http",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
# You might be wondering why our start_vault module, which supports shamir, awskms, and pkcs11 seal
|
||||
@@ -141,7 +166,7 @@ module "maybe_configure_hsm" {
|
||||
source = "../softhsm_distribute_vault_keys"
|
||||
count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0
|
||||
|
||||
hosts = var.target_hosts
|
||||
hosts = var.hosts
|
||||
token_base64 = local.token_base64
|
||||
}
|
||||
|
||||
@@ -150,7 +175,7 @@ module "maybe_configure_hsm_secondary" {
|
||||
depends_on = [module.maybe_configure_hsm]
|
||||
count = (var.seal_type == "pkcs11" || var.seal_type_secondary == "pkcs11") ? 1 : 0
|
||||
|
||||
hosts = var.target_hosts
|
||||
hosts = var.hosts
|
||||
token_base64 = local.token_base64_secondary
|
||||
}
|
||||
|
||||
@@ -165,20 +190,21 @@ resource "enos_vault_start" "leader" {
|
||||
config_mode = var.config_mode
|
||||
environment = var.environment
|
||||
config = {
|
||||
api_addr = "http://${var.target_hosts[each.value].private_ip}:8200"
|
||||
cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201"
|
||||
api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version]
|
||||
cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version]
|
||||
cluster_name = var.cluster_name
|
||||
listener = {
|
||||
type = "tcp"
|
||||
attributes = {
|
||||
address = "0.0.0.0:8200"
|
||||
address = local.listener_address
|
||||
tls_disable = "true"
|
||||
}
|
||||
}
|
||||
log_level = var.log_level
|
||||
storage = {
|
||||
type = var.storage_backend
|
||||
attributes = ({ for key, value in local.storage_config[each.key] : key => value })
|
||||
attributes = local.storage_attributes[each.key]
|
||||
retry_join = try(local.storage_retry_join[var.storage_backend], null)
|
||||
}
|
||||
seals = local.seals
|
||||
ui = true
|
||||
@@ -190,7 +216,7 @@ resource "enos_vault_start" "leader" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.value].public_ip
|
||||
host = var.hosts[each.value].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -206,20 +232,21 @@ resource "enos_vault_start" "followers" {
|
||||
config_mode = var.config_mode
|
||||
environment = var.environment
|
||||
config = {
|
||||
api_addr = "http://${var.target_hosts[each.value].private_ip}:8200"
|
||||
cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201"
|
||||
api_addr = local.api_addrs_internal[tonumber(each.value)][var.ip_version]
|
||||
cluster_addr = local.cluster_addrs_internal[tonumber(each.value)][var.ip_version]
|
||||
cluster_name = var.cluster_name
|
||||
listener = {
|
||||
type = "tcp"
|
||||
attributes = {
|
||||
address = "0.0.0.0:8200"
|
||||
address = local.listener_address
|
||||
tls_disable = "true"
|
||||
}
|
||||
}
|
||||
log_level = var.log_level
|
||||
storage = {
|
||||
type = var.storage_backend
|
||||
attributes = { for key, value in local.storage_config[each.key] : key => value }
|
||||
attributes = { for key, value in local.storage_attributes[each.key] : key => value }
|
||||
retry_join = try(local.storage_retry_join[var.storage_backend], null)
|
||||
}
|
||||
seals = local.seals
|
||||
ui = true
|
||||
@@ -231,7 +258,7 @@ resource "enos_vault_start" "followers" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.value].public_ip
|
||||
host = var.hosts[each.value].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,31 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
output "api_addr_localhost" {
|
||||
description = "The localhost API address"
|
||||
value = local.api_addr_localhost
|
||||
}
|
||||
|
||||
output "api_addrs" {
|
||||
description = "The external API addresses of all nodes the cluster"
|
||||
value = local.api_addrs
|
||||
}
|
||||
|
||||
output "cluster_name" {
|
||||
description = "The Vault cluster name"
|
||||
value = var.cluster_name
|
||||
}
|
||||
|
||||
output "cluster_port" {
|
||||
description = "The Vault cluster request forwarding listener port"
|
||||
value = var.cluster_port
|
||||
}
|
||||
|
||||
output "external_storage_port" {
|
||||
description = "The Vault cluster non-raft external storage port"
|
||||
value = var.external_storage_port
|
||||
}
|
||||
|
||||
output "followers" {
|
||||
description = "The follower enos_vault_start resources"
|
||||
value = enos_vault_start.followers
|
||||
@@ -16,18 +36,28 @@ output "leader" {
|
||||
value = enos_vault_start.leader
|
||||
}
|
||||
|
||||
output "ipv6s" {
|
||||
description = "Vault cluster target host ipv6s"
|
||||
value = [for host in var.hosts : host.ipv6]
|
||||
}
|
||||
|
||||
output "listener_port" {
|
||||
description = "The Vault cluster TCP listener port"
|
||||
value = var.listener_port
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
description = "Vault cluster target host private_ips"
|
||||
value = [for host in var.target_hosts : host.private_ip]
|
||||
value = [for host in var.hosts : host.private_ip]
|
||||
}
|
||||
|
||||
output "public_ips" {
|
||||
description = "Vault cluster target host public_ips"
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
value = [for host in var.hosts : host.public_ip]
|
||||
}
|
||||
|
||||
output "target_hosts" {
|
||||
output "hosts" {
|
||||
description = "The vault cluster instances that were created"
|
||||
|
||||
value = var.target_hosts
|
||||
value = var.hosts
|
||||
}
|
||||
|
||||
@@ -6,6 +6,18 @@ variable "cluster_name" {
|
||||
description = "The Vault cluster name"
|
||||
}
|
||||
|
||||
variable "cluster_port" {
|
||||
type = number
|
||||
description = "The cluster port for Vault to listen on"
|
||||
default = 8201
|
||||
}
|
||||
|
||||
variable "cluster_tag_key" {
|
||||
type = string
|
||||
description = "The Vault cluster tag key"
|
||||
default = "retry_join"
|
||||
}
|
||||
|
||||
variable "config_dir" {
|
||||
type = string
|
||||
description = "The directory to use for Vault configuration"
|
||||
@@ -28,12 +40,37 @@ variable "environment" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "external_storage_port" {
|
||||
type = number
|
||||
description = "The port to connect to when using external storage"
|
||||
default = 8500
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
description = "The target machines host addresses to use for the Vault cluster"
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "install_dir" {
|
||||
type = string
|
||||
description = "The directory where the vault binary will be installed"
|
||||
default = "/opt/vault/bin"
|
||||
}
|
||||
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "The IP version to use for the Vault TCP listeners"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "license" {
|
||||
type = string
|
||||
sensitive = true
|
||||
@@ -58,6 +95,12 @@ variable "manage_service" {
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "listener_port" {
|
||||
type = number
|
||||
description = "The port for Vault to listen on"
|
||||
default = 8200
|
||||
}
|
||||
|
||||
variable "seal_alias" {
|
||||
type = string
|
||||
description = "The primary seal alias name"
|
||||
@@ -142,11 +185,3 @@ variable "storage_node_prefix" {
|
||||
description = "A prefix to use for each node in the Vault storage configuration"
|
||||
default = "node"
|
||||
}
|
||||
|
||||
variable "target_hosts" {
|
||||
description = "The target machines host addresses to use for the Vault cluster"
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -18,16 +18,17 @@ variable "service_name" {
|
||||
default = "vault"
|
||||
}
|
||||
|
||||
variable "target_hosts" {
|
||||
variable "hosts" {
|
||||
description = "The target machines host addresses to use for the Vault cluster"
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "shutdown_multiple_nodes" {
|
||||
for_each = var.target_hosts
|
||||
for_each = var.hosts
|
||||
inline = ["sudo systemctl stop ${var.service_name}.service; sleep 5"]
|
||||
|
||||
transport = {
|
||||
|
||||
@@ -10,5 +10,6 @@ output "hosts" {
|
||||
value = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = data.aws_instance.targets[idx].public_ip
|
||||
private_ip = data.aws_instance.targets[idx].private_ip
|
||||
ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null)
|
||||
} }
|
||||
}
|
||||
|
||||
11
enos/modules/target_ec2_instances/locals.tf
Normal file
11
enos/modules/target_ec2_instances/locals.tf
Normal file
@@ -0,0 +1,11 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
locals {
|
||||
hosts = { for idx in range(var.instance_count) : idx => {
|
||||
ipv6 = try(aws_instance.targets[idx].ipv6_addresses[0], "")
|
||||
public_ip = aws_instance.targets[idx].public_ip
|
||||
private_ip = aws_instance.targets[idx].private_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -141,78 +141,21 @@ resource "aws_security_group" "target" {
|
||||
description = "Target instance security group"
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
# SSH traffic
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
# External ingress
|
||||
dynamic "ingress" {
|
||||
for_each = var.ports_ingress
|
||||
|
||||
# Vault traffic
|
||||
ingress {
|
||||
from_port = 8200
|
||||
to_port = 8201
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
formatlist("%s/32", var.ssh_allow_ips)
|
||||
])
|
||||
}
|
||||
|
||||
# Consul traffic
|
||||
ingress {
|
||||
from_port = 8300
|
||||
to_port = 8302
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8301
|
||||
to_port = 8302
|
||||
protocol = "udp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8500
|
||||
to_port = 8503
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8600
|
||||
to_port = 8600
|
||||
protocol = "tcp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 8600
|
||||
to_port = 8600
|
||||
protocol = "udp"
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
])
|
||||
content {
|
||||
from_port = ingress.value.port
|
||||
to_port = ingress.value.port
|
||||
protocol = ingress.value.protocol
|
||||
cidr_blocks = flatten([
|
||||
formatlist("%s/32", data.enos_environment.localhost.public_ipv4_addresses),
|
||||
join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block),
|
||||
formatlist("%s/32", var.ssh_allow_ips)
|
||||
])
|
||||
ipv6_cidr_blocks = data.aws_vpc.vpc.ipv6_cidr_block != "" ? [data.aws_vpc.vpc.ipv6_cidr_block] : null
|
||||
}
|
||||
}
|
||||
|
||||
# Internal traffic
|
||||
@@ -225,10 +168,11 @@ resource "aws_security_group" "target" {
|
||||
|
||||
# External traffic
|
||||
egress {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
protocol = "-1"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
ipv6_cidr_blocks = ["::/0"]
|
||||
}
|
||||
|
||||
tags = merge(
|
||||
@@ -259,11 +203,9 @@ resource "aws_instance" "targets" {
|
||||
}
|
||||
|
||||
module "disable_selinux" {
|
||||
source = "../disable_selinux"
|
||||
count = var.disable_selinux == true ? 1 : 0
|
||||
depends_on = [aws_instance.targets]
|
||||
source = "../disable_selinux"
|
||||
count = var.disable_selinux == true ? 1 : 0
|
||||
|
||||
hosts = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = aws_instance.targets[idx].public_ip
|
||||
private_ip = aws_instance.targets[idx].private_ip
|
||||
} }
|
||||
hosts = local.hosts
|
||||
}
|
||||
|
||||
@@ -7,8 +7,5 @@ output "cluster_name" {
|
||||
|
||||
output "hosts" {
|
||||
description = "The ec2 instance target hosts"
|
||||
value = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = aws_instance.targets[idx].public_ip
|
||||
private_ip = aws_instance.targets[idx].private_ip
|
||||
} }
|
||||
value = local.hosts
|
||||
}
|
||||
|
||||
@@ -24,6 +24,15 @@ variable "common_tags" {
|
||||
default = { "Project" : "vault-ci" }
|
||||
}
|
||||
|
||||
variable "ports_ingress" {
|
||||
description = "Ports mappings to allow for ingress"
|
||||
type = list(object({
|
||||
description = string
|
||||
port = number
|
||||
protocol = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "disable_selinux" {
|
||||
description = "Optionally disable SELinux for certain distros/versions"
|
||||
type = bool
|
||||
|
||||
@@ -24,6 +24,7 @@ variable "instance_mem_max" { default = null }
|
||||
variable "instance_mem_min" { default = null }
|
||||
variable "instance_types" { default = null }
|
||||
variable "max_price" { default = null }
|
||||
variable "ports_ingress" { default = null }
|
||||
variable "project_name" { default = null }
|
||||
variable "seal_key_names" { default = null }
|
||||
variable "ssh_allow_ips" { default = null }
|
||||
@@ -46,5 +47,6 @@ output "hosts" {
|
||||
value = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = "null-public-${idx}"
|
||||
private_ip = "null-private-${idx}"
|
||||
ipv6 = "null-ipv6-${idx}"
|
||||
} }
|
||||
}
|
||||
|
||||
@@ -10,5 +10,6 @@ output "hosts" {
|
||||
value = { for idx in range(var.instance_count) : idx => {
|
||||
public_ip = data.aws_instance.targets[idx].public_ip
|
||||
private_ip = data.aws_instance.targets[idx].private_ip
|
||||
ipv6 = try(data.aws_instance.targets[idx].ipv6_addresses[0], null)
|
||||
} }
|
||||
}
|
||||
|
||||
@@ -12,6 +12,27 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
default = 4
|
||||
description = "The IP version to use for the Vault TCP listeners"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_agent_port" {
|
||||
type = number
|
||||
description = "The listener port number for the Vault Agent"
|
||||
}
|
||||
|
||||
variable "vault_agent_template_destination" {
|
||||
type = string
|
||||
description = "The destination of the template rendered by Agent"
|
||||
@@ -27,35 +48,28 @@ variable "vault_root_token" {
|
||||
description = "The Vault root token"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
locals {
|
||||
vault_instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
agent_listen_addr = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_agent_port}"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "set_up_approle_auth_and_agent" {
|
||||
environment = {
|
||||
AGENT_LISTEN_ADDR = local.agent_listen_addr,
|
||||
VAULT_ADDR = var.vault_addr,
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir,
|
||||
VAULT_TOKEN = var.vault_root_token,
|
||||
VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination,
|
||||
@@ -66,7 +80,12 @@ resource "enos_remote_exec" "set_up_approle_auth_and_agent" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = local.vault_instances[0].public_ip
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
output "vault_agent_listen_addr" {
|
||||
description = "The vault agent listen address"
|
||||
value = local.agent_listen_addr
|
||||
}
|
||||
|
||||
@@ -2,21 +2,23 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
|
||||
fail() {
|
||||
echo "$1" 1>&2
|
||||
return 1
|
||||
}
|
||||
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
export VAULT_ADDR='http://127.0.0.1:8200'
|
||||
[[ -z "$AGENT_LISTEN_ADDR" ]] && fail "AGENT_LISTEN_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
[[ -z "$VAULT_AGENT_TEMPLATE_CONTENTS" ]] && fail "VAULT_AGENT_TEMPLATE_CONTENTS env variable has not been set"
|
||||
[[ -z "$VAULT_AGENT_TEMPLATE_DESTINATION" ]] && fail "VAULT_AGENT_TEMPLATE_DESTINATION env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist)
|
||||
$binpath auth disable approle || true
|
||||
|
||||
@@ -43,7 +45,7 @@ cat > /tmp/vault-agent.hcl <<- EOM
|
||||
pid_file = "/tmp/pidfile"
|
||||
|
||||
vault {
|
||||
address = "http://127.0.0.1:8200"
|
||||
address = "${VAULT_ADDR}"
|
||||
tls_skip_verify = true
|
||||
retry {
|
||||
num_retries = 10
|
||||
@@ -56,7 +58,7 @@ cache {
|
||||
}
|
||||
|
||||
listener "tcp" {
|
||||
address = "127.0.0.1:8100"
|
||||
address = "${AGENT_LISTEN_ADDR}"
|
||||
tls_disable = true
|
||||
}
|
||||
|
||||
@@ -92,4 +94,6 @@ pkill -F /tmp/pidfile || true
|
||||
rm "${VAULT_AGENT_TEMPLATE_DESTINATION}" || true
|
||||
|
||||
# Run agent (it will kill itself when it finishes rendering the template)
|
||||
$binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1
|
||||
if ! $binpath agent -config=/tmp/vault-agent.hcl > /tmp/agent-logs.txt 2>&1; then
|
||||
fail "failed to run vault agent: $(cat /tmp/agent-logs.txt)"
|
||||
fi
|
||||
|
||||
@@ -21,11 +21,11 @@ locals {
|
||||
consul_bin_path = "${var.consul_install_dir}/consul"
|
||||
enable_audit_devices = var.enable_audit_devices && var.initialize_cluster
|
||||
// In order to get Terraform to plan we have to use collections with keys
|
||||
// that are known at plan time. In order for our module to work our var.target_hosts
|
||||
// that are known at plan time. In order for our module to work our var.hosts
|
||||
// must be a map with known keys at plan time. Here we're creating locals
|
||||
// that keep track of index values that point to our target hosts.
|
||||
followers = toset(slice(local.instances, 1, length(local.instances)))
|
||||
instances = [for idx in range(length(var.target_hosts)) : tostring(idx)]
|
||||
instances = [for idx in range(length(var.hosts)) : tostring(idx)]
|
||||
key_shares = {
|
||||
"awskms" = null
|
||||
"shamir" = 5
|
||||
@@ -58,7 +58,7 @@ locals {
|
||||
}
|
||||
|
||||
resource "enos_host_info" "hosts" {
|
||||
for_each = var.target_hosts
|
||||
for_each = var.hosts
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
@@ -69,7 +69,7 @@ resource "enos_host_info" "hosts" {
|
||||
|
||||
resource "enos_bundle_install" "consul" {
|
||||
for_each = {
|
||||
for idx, host in var.target_hosts : idx => var.target_hosts[idx]
|
||||
for idx, host in var.hosts : idx => var.hosts[idx]
|
||||
if var.storage_backend == "consul"
|
||||
}
|
||||
|
||||
@@ -89,12 +89,12 @@ resource "enos_bundle_install" "consul" {
|
||||
module "install_packages" {
|
||||
source = "../install_packages"
|
||||
|
||||
hosts = var.target_hosts
|
||||
hosts = var.hosts
|
||||
packages = var.packages
|
||||
}
|
||||
|
||||
resource "enos_bundle_install" "vault" {
|
||||
for_each = var.target_hosts
|
||||
for_each = var.hosts
|
||||
depends_on = [
|
||||
module.install_packages, // Don't race for the package manager locks with install_packages
|
||||
]
|
||||
@@ -137,7 +137,7 @@ resource "enos_consul_start" "consul" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.key].public_ip
|
||||
host = var.hosts[each.key].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -152,10 +152,16 @@ module "start_vault" {
|
||||
]
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
cluster_port = var.cluster_port
|
||||
cluster_tag_key = var.cluster_tag_key
|
||||
config_dir = var.config_dir
|
||||
config_mode = var.config_mode
|
||||
external_storage_port = var.external_storage_port
|
||||
hosts = var.hosts
|
||||
install_dir = var.install_dir
|
||||
ip_version = var.ip_version
|
||||
license = var.license
|
||||
listener_port = var.listener_port
|
||||
log_level = var.log_level
|
||||
manage_service = var.manage_service
|
||||
seal_attributes = var.seal_attributes
|
||||
@@ -166,7 +172,6 @@ module "start_vault" {
|
||||
storage_backend = var.storage_backend
|
||||
storage_backend_attrs = var.storage_backend_addl_config
|
||||
storage_node_prefix = var.storage_node_prefix
|
||||
target_hosts = var.target_hosts
|
||||
}
|
||||
|
||||
resource "enos_vault_init" "leader" {
|
||||
@@ -189,7 +194,7 @@ resource "enos_vault_init" "leader" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.value].public_ip
|
||||
host = var.hosts[each.value].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -208,7 +213,7 @@ resource "enos_vault_unseal" "leader" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[tolist(local.leader)[0]].public_ip
|
||||
host = var.hosts[tolist(local.leader)[0]].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -232,7 +237,7 @@ resource "enos_vault_unseal" "followers" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.value].public_ip
|
||||
host = var.hosts[each.value].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,12 +251,12 @@ resource "enos_vault_unseal" "maybe_force_unseal" {
|
||||
module.start_vault.followers,
|
||||
]
|
||||
for_each = {
|
||||
for idx, host in var.target_hosts : idx => host
|
||||
for idx, host in var.hosts : idx => host
|
||||
if var.force_unseal && !var.initialize_cluster
|
||||
}
|
||||
|
||||
bin_path = local.bin_path
|
||||
vault_addr = "http://localhost:8200"
|
||||
vault_addr = module.start_vault.api_addr_localhost
|
||||
seal_type = var.seal_type
|
||||
unseal_keys = coalesce(
|
||||
var.shamir_unseal_keys,
|
||||
@@ -272,10 +277,10 @@ resource "enos_remote_exec" "configure_login_shell_profile" {
|
||||
enos_vault_init.leader,
|
||||
enos_vault_unseal.leader,
|
||||
]
|
||||
for_each = var.target_hosts
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = module.start_vault.api_addr_localhost
|
||||
VAULT_TOKEN = var.root_token != null ? var.root_token : try(enos_vault_init.leader[0].root_token, "_")
|
||||
VAULT_INSTALL_DIR = var.install_dir
|
||||
}
|
||||
@@ -294,7 +299,7 @@ resource "enos_file" "motd" {
|
||||
depends_on = [
|
||||
enos_remote_exec.configure_login_shell_profile
|
||||
]
|
||||
for_each = var.target_hosts
|
||||
for_each = var.hosts
|
||||
|
||||
destination = "/etc/motd"
|
||||
content = <<EOF
|
||||
@@ -343,7 +348,7 @@ resource "enos_remote_exec" "create_audit_log_dir" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.value].public_ip
|
||||
host = var.hosts[each.value].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -364,6 +369,7 @@ resource "enos_remote_exec" "start_audit_socket_listener" {
|
||||
])
|
||||
|
||||
environment = {
|
||||
IP_VERSION = var.ip_version
|
||||
NETCAT_COMMAND = local.netcat_command[enos_host_info.hosts[each.key].distro]
|
||||
SOCKET_PORT = local.audit_socket_port
|
||||
}
|
||||
@@ -372,7 +378,7 @@ resource "enos_remote_exec" "start_audit_socket_listener" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.value].public_ip
|
||||
host = var.hosts[each.value].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -388,9 +394,10 @@ resource "enos_remote_exec" "enable_audit_devices" {
|
||||
])
|
||||
|
||||
environment = {
|
||||
IP_VERSION = var.ip_version
|
||||
LOG_FILE_PATH = local.audit_device_file_path
|
||||
SOCKET_PORT = local.audit_socket_port
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = module.start_vault.api_addr_localhost
|
||||
VAULT_BIN_PATH = local.bin_path
|
||||
VAULT_TOKEN = enos_vault_init.leader[each.key].root_token
|
||||
}
|
||||
@@ -399,7 +406,7 @@ resource "enos_remote_exec" "enable_audit_devices" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.target_hosts[each.key].public_ip
|
||||
host = var.hosts[each.key].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,16 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
output "api_addr_localhost" {
|
||||
description = "The localhost API address"
|
||||
value = module.start_vault.api_addr_localhost
|
||||
}
|
||||
|
||||
output "api_addrs" {
|
||||
description = "The external API addresses of all nodes the cluster"
|
||||
value = module.start_vault.api_addrs
|
||||
}
|
||||
|
||||
output "audit_device_file_path" {
|
||||
description = "The file path for the audit device, if enabled"
|
||||
value = var.enable_audit_devices ? local.audit_device_file_path : "file audit device not enabled"
|
||||
@@ -11,14 +21,48 @@ output "cluster_name" {
|
||||
value = var.cluster_name
|
||||
}
|
||||
|
||||
output "cluster_port" {
|
||||
description = "The Vault cluster request forwarding listener port"
|
||||
value = module.start_vault.cluster_port
|
||||
}
|
||||
|
||||
output "external_storage_port" {
|
||||
description = "The Vault cluster non-raft external storage port"
|
||||
value = module.start_vault.external_storage_port
|
||||
}
|
||||
|
||||
output "hosts" {
|
||||
description = "The vault cluster instances that were created"
|
||||
|
||||
value = var.hosts
|
||||
}
|
||||
|
||||
output "ipv6s" {
|
||||
description = "Vault cluster target host ipv6 addresses"
|
||||
value = [for host in var.hosts : host.ipv6]
|
||||
}
|
||||
|
||||
output "keys_base64" {
|
||||
value = try(module.start_vault.keys_base64, null)
|
||||
}
|
||||
|
||||
output "keys_base64_secondary" {
|
||||
value = try(module.start_vault.keys_base64_secondary, null)
|
||||
}
|
||||
|
||||
output "listener_port" {
|
||||
description = "The Vault cluster TCP listener port"
|
||||
value = module.start_vault.listener_port
|
||||
}
|
||||
|
||||
output "private_ips" {
|
||||
description = "Vault cluster target host private_ips"
|
||||
value = [for host in var.target_hosts : host.private_ip]
|
||||
value = [for host in var.hosts : host.private_ip]
|
||||
}
|
||||
|
||||
output "public_ips" {
|
||||
description = "Vault cluster target host public_ips"
|
||||
value = [for host in var.target_hosts : host.public_ip]
|
||||
value = [for host in var.hosts : host.public_ip]
|
||||
}
|
||||
|
||||
output "recovery_keys_b64" {
|
||||
@@ -41,12 +85,6 @@ output "root_token" {
|
||||
value = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none")
|
||||
}
|
||||
|
||||
output "target_hosts" {
|
||||
description = "The vault cluster instances that were created"
|
||||
|
||||
value = var.target_hosts
|
||||
}
|
||||
|
||||
output "unseal_keys_b64" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_b64, [])
|
||||
}
|
||||
@@ -62,11 +100,3 @@ output "unseal_shares" {
|
||||
output "unseal_threshold" {
|
||||
value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1)
|
||||
}
|
||||
|
||||
output "keys_base64" {
|
||||
value = try(module.start_vault.keys_base64, null)
|
||||
}
|
||||
|
||||
output "keys_base64_secondary" {
|
||||
value = try(module.start_vault.keys_base64_secondary, null)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set"
|
||||
[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set"
|
||||
[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
@@ -24,7 +25,11 @@ enable_syslog_audit_device(){
|
||||
}
|
||||
|
||||
enable_socket_audit_device() {
|
||||
"$VAULT_BIN_PATH" audit enable socket address="127.0.0.1:$SOCKET_PORT"
|
||||
if [ "$IP_VERSION" = "4" ]; then
|
||||
"$VAULT_BIN_PATH" audit enable socket address="127.0.0.1:$SOCKET_PORT"
|
||||
else
|
||||
"$VAULT_BIN_PATH" audit enable socket address="[::1]:$SOCKET_PORT"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
@@ -9,9 +9,16 @@ fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set"
|
||||
[[ -z "$NETCAT_COMMAND" ]] && fail "NETCAT_COMMAND env variable has not been set"
|
||||
[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set"
|
||||
|
||||
if [ "$IP_VERSION" = "4" ]; then
|
||||
export SOCKET_ADDR="127.0.0.1"
|
||||
else
|
||||
export SOCKET_ADDR="::1"
|
||||
fi
|
||||
|
||||
socket_listener_procs() {
|
||||
pgrep -x "${NETCAT_COMMAND}"
|
||||
}
|
||||
@@ -21,7 +28,17 @@ kill_socket_listener() {
|
||||
}
|
||||
|
||||
test_socket_listener() {
|
||||
"${NETCAT_COMMAND}" -zvw 2 127.0.0.1 "$SOCKET_PORT" < /dev/null
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
"${NETCAT_COMMAND}" -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null
|
||||
;;
|
||||
6)
|
||||
"${NETCAT_COMMAND}" -6 -zvw 2 "${SOCKET_ADDR}" "$SOCKET_PORT" < /dev/null
|
||||
;;
|
||||
*)
|
||||
fail "unknown IP_VERSION: $IP_VERSION"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
start_socket_listener() {
|
||||
@@ -33,7 +50,17 @@ start_socket_listener() {
|
||||
# Run nc to listen on port 9090 for the socket auditor. We spawn nc
|
||||
# with nohup to ensure that the listener doesn't expect a SIGHUP and
|
||||
# thus block the SSH session from exiting or terminating on exit.
|
||||
nohup nc -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null &
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
nohup nc -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null &
|
||||
;;
|
||||
6)
|
||||
nohup nc -6 -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null &
|
||||
;;
|
||||
*)
|
||||
fail "unknown IP_VERSION: $IP_VERSION"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
read_log() {
|
||||
@@ -43,7 +70,6 @@ read_log() {
|
||||
}
|
||||
|
||||
main() {
|
||||
|
||||
if socket_listener_procs; then
|
||||
# Clean up old nc's that might not be working
|
||||
kill_socket_listener
|
||||
|
||||
@@ -30,6 +30,18 @@ variable "cluster_name" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "cluster_port" {
|
||||
type = number
|
||||
description = "The cluster port for Vault to listen on"
|
||||
default = 8201
|
||||
}
|
||||
|
||||
variable "cluster_tag_key" {
|
||||
type = string
|
||||
description = "The Vault cluster tag key"
|
||||
default = "retry_join"
|
||||
}
|
||||
|
||||
variable "config_dir" {
|
||||
type = string
|
||||
description = "The directory to use for Vault configuration"
|
||||
@@ -112,12 +124,27 @@ variable "enable_audit_devices" {
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "external_storage_port" {
|
||||
type = number
|
||||
description = "The port to connect to when using external storage"
|
||||
default = 8500
|
||||
}
|
||||
|
||||
variable "force_unseal" {
|
||||
type = bool
|
||||
description = "Always unseal the Vault cluster even if we're not initializing it"
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
description = "The target machines host addresses to use for the Vault cluster"
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
variable "initialize_cluster" {
|
||||
type = bool
|
||||
description = "Initialize the Vault cluster"
|
||||
@@ -130,6 +157,16 @@ variable "install_dir" {
|
||||
default = "/opt/vault/bin"
|
||||
}
|
||||
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "The IP version to use for the Vault TCP listeners"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "license" {
|
||||
type = string
|
||||
sensitive = true
|
||||
@@ -137,6 +174,12 @@ variable "license" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "listener_port" {
|
||||
type = number
|
||||
description = "The port for Vault to listen on"
|
||||
default = 8200
|
||||
}
|
||||
|
||||
variable "local_artifact_path" {
|
||||
type = string
|
||||
description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package"
|
||||
@@ -246,11 +289,3 @@ variable "storage_node_prefix" {
|
||||
description = "A prefix to use for each node in the Vault storage configuration"
|
||||
default = "node"
|
||||
}
|
||||
|
||||
variable "target_hosts" {
|
||||
description = "The target machines host addresses to use for the Vault cluster"
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -1,6 +1,13 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
/*
|
||||
|
||||
Given our expected hosts, determine which is currently the leader and verify that all expected
|
||||
nodes are either the leader or a follower.
|
||||
|
||||
*/
|
||||
|
||||
terraform {
|
||||
required_providers {
|
||||
enos = {
|
||||
@@ -9,6 +16,30 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster hosts that are expected to be in the cluster"
|
||||
}
|
||||
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "The IP version used for the Vault TCP listener"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
@@ -19,73 +50,100 @@ variable "vault_root_token" {
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "The number of instances in the vault cluster"
|
||||
}
|
||||
|
||||
variable "vault_hosts" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster hosts. These are required to map private ip addresses to public addresses."
|
||||
}
|
||||
|
||||
locals {
|
||||
follower_hosts_list = [for idx in range(var.vault_instance_count - 1) : {
|
||||
private_ip = local.follower_private_ips[idx]
|
||||
public_ip = local.follower_public_ips[idx]
|
||||
}
|
||||
follower_hosts_list = [
|
||||
for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ?
|
||||
contains(tolist(local.follower_ipv6s), var.hosts[idx].ipv6) :
|
||||
contains(tolist(local.follower_private_ips), var.hosts[idx].private_ip)
|
||||
]
|
||||
follower_hosts = {
|
||||
for idx in range(var.vault_instance_count - 1) : idx => try(local.follower_hosts_list[idx], null)
|
||||
for idx in range(local.host_count - 1) : idx => try(local.follower_hosts_list[idx], null)
|
||||
}
|
||||
follower_private_ips = jsondecode(enos_remote_exec.get_follower_private_ips.stdout)
|
||||
follower_public_ips = [for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if contains(
|
||||
local.follower_private_ips, var.vault_hosts[idx].private_ip)
|
||||
follower_ipv6s = jsondecode(enos_remote_exec.follower_ipv6s.stdout)
|
||||
follower_private_ips = jsondecode(enos_remote_exec.follower_private_ipv4s.stdout)
|
||||
follower_public_ips = [for host in local.follower_hosts : host.public_ip]
|
||||
host_count = length(var.hosts)
|
||||
ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])]
|
||||
leader_host_list = [
|
||||
for idx in range(length(var.hosts)) : var.hosts[idx] if var.ip_version == 6 ?
|
||||
var.hosts[idx].ipv6 == local.leader_ipv6 :
|
||||
var.hosts[idx].private_ip == local.leader_private_ip
|
||||
]
|
||||
leader_host = {
|
||||
private_ip = local.leader_private_ip
|
||||
public_ip = local.leader_public_ip
|
||||
}
|
||||
leader_private_ip = trimspace(enos_remote_exec.get_leader_private_ip.stdout)
|
||||
leader_public_ip = element([
|
||||
for idx in range(var.vault_instance_count) : var.vault_hosts[idx].public_ip if var.vault_hosts[idx].private_ip == local.leader_private_ip
|
||||
], 0)
|
||||
private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])]
|
||||
leader_host = try(local.leader_host_list[0], null)
|
||||
leader_ipv6 = trimspace(enos_remote_exec.leader_ipv6.stdout)
|
||||
leader_private_ip = trimspace(enos_remote_exec.leader_private_ipv4.stdout)
|
||||
leader_public_ip = try(local.leader_host.public_ip, null)
|
||||
private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])]
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "get_leader_private_ip" {
|
||||
resource "enos_remote_exec" "leader_private_ipv4" {
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
IP_VERSION = var.ip_version
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/get-leader-private-ip.sh")]
|
||||
scripts = [abspath("${path.module}/scripts/get-leader-ipv4.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.vault_hosts[0].public_ip
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "get_follower_private_ips" {
|
||||
resource "enos_remote_exec" "leader_ipv6" {
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
VAULT_LEADER_PRIVATE_IP = local.leader_private_ip
|
||||
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips)
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
IP_VERSION = var.ip_version
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/get-follower-private-ips.sh")]
|
||||
scripts = [abspath("${path.module}/scripts/get-leader-ipv6.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.vault_hosts[0].public_ip
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "follower_private_ipv4s" {
|
||||
environment = {
|
||||
IP_VERSION = var.ip_version
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
VAULT_LEADER_PRIVATE_IP = local.leader_private_ip
|
||||
VAULT_PRIVATE_IPS = jsonencode(local.private_ips)
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/get-follower-ipv4s.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "follower_ipv6s" {
|
||||
environment = {
|
||||
IP_VERSION = var.ip_version
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
VAULT_IPV6S = jsonencode(local.ipv6s)
|
||||
VAULT_LEADER_IPV6 = local.leader_ipv6
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/get-follower-ipv6s.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -94,6 +152,10 @@ output "follower_hosts" {
|
||||
value = local.follower_hosts
|
||||
}
|
||||
|
||||
output "follower_ipv6s" {
|
||||
value = local.follower_ipv6s
|
||||
}
|
||||
|
||||
output "follower_private_ips" {
|
||||
value = local.follower_private_ips
|
||||
}
|
||||
@@ -106,6 +168,10 @@ output "leader_host" {
|
||||
value = local.leader_host
|
||||
}
|
||||
|
||||
output "leader_ipv6" {
|
||||
value = local.leader_ipv6
|
||||
}
|
||||
|
||||
output "leader_private_ip" {
|
||||
value = local.leader_private_ip
|
||||
}
|
||||
|
||||
@@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
function fail() {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
|
||||
|
||||
getFollowerPrivateIPsFromOperatorMembers() {
|
||||
if members=$($binpath operator members -format json); then
|
||||
if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then
|
||||
# Make sure that we got all the followers
|
||||
if jq -e --argjson expected "$VAULT_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then
|
||||
echo "$followers"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
removeIP() {
|
||||
local needle
|
||||
local haystack
|
||||
needle=$1
|
||||
haystack=$2
|
||||
if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then
|
||||
if [[ -n "$remain" ]]; then
|
||||
echo "$remain"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
count=0
|
||||
retries=10
|
||||
while :; do
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
[[ -z "$VAULT_PRIVATE_IPS" ]] && fail "VAULT_PRIVATE_IPS env variable has not been set"
|
||||
[[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set"
|
||||
|
||||
# Vault >= 1.10.x has the operator members. If we have that then we'll use it.
|
||||
if $binpath operator -h 2>&1 | grep members &> /dev/null; then
|
||||
if followers=$(getFollowerPrivateIPsFromOperatorMembers); then
|
||||
echo "$followers"
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
removeIP "$VAULT_LEADER_PRIVATE_IP" "$VAULT_PRIVATE_IPS"
|
||||
|
||||
return $?
|
||||
fi
|
||||
;;
|
||||
6)
|
||||
echo '[]'
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
fail "unknown IP_VERSION: $IP_VERSION"
|
||||
;;
|
||||
esac
|
||||
|
||||
wait=$((2 ** count))
|
||||
count=$((count + 1))
|
||||
if [ "$count" -lt "$retries" ]; then
|
||||
sleep "$wait"
|
||||
else
|
||||
fail "Timed out trying to obtain the cluster followers"
|
||||
fi
|
||||
done
|
||||
@@ -0,0 +1,88 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
function fail() {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
echo "$VAULT_IPV6S" > /tmp/vaultipv6s
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
|
||||
|
||||
getFollowerIPV6sFromOperatorMembers() {
|
||||
if members=$($binpath operator members -format json); then
|
||||
if followers=$(echo "$members" | jq -e --argjson expected "$VAULT_IPV6S" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("\\[(.+)\\]") | .[0]) as $followers | $expected - ($expected - $followers)'); then
|
||||
# Make sure that we got all the followers
|
||||
if jq -e --argjson expected "$VAULT_IPV6S" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then
|
||||
echo "$followers"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
removeIP() {
|
||||
local needle
|
||||
local haystack
|
||||
needle=$1
|
||||
haystack=$2
|
||||
if remain=$(jq -e --arg ip "$needle" -c '. | map(select(.!=$ip))' <<< "$haystack"); then
|
||||
if [[ -n "$remain" ]]; then
|
||||
echo "$remain"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
count=0
|
||||
retries=10
|
||||
while :; do
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
echo "[]"
|
||||
exit 0
|
||||
;;
|
||||
6)
|
||||
[[ -z "$VAULT_IPV6S" ]] && fail "VAULT_IPV6S env variable has not been set"
|
||||
[[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set"
|
||||
|
||||
# Vault >= 1.10.x has the operator members. If we have that then we'll use it.
|
||||
if $binpath operator -h 2>&1 | grep members &> /dev/null; then
|
||||
if followers=$(getFollowerIPV6sFromOperatorMembers); then
|
||||
echo "$followers"
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
[[ -z "$VAULT_LEADER_IPV6" ]] && fail "VAULT_LEADER_IPV6 env variable has not been set"
|
||||
removeIP "$VAULT_LEADER_IPV6" "$VAULT_IPV6S"
|
||||
exit $?
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
fail "unknown IP_VERSION: $IP_VERSION"
|
||||
;;
|
||||
esac
|
||||
|
||||
wait=$((2 ** count))
|
||||
count=$((count + 1))
|
||||
if [ "$count" -lt "$retries" ]; then
|
||||
sleep "$wait"
|
||||
else
|
||||
fail "Timed out trying to obtain the cluster followers"
|
||||
fi
|
||||
done
|
||||
@@ -1,55 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
function fail() {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set"
|
||||
[[ -z "$VAULT_LEADER_PRIVATE_IP" ]] && fail "VAULT_LEADER_PRIVATE_IP env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
|
||||
|
||||
count=0
|
||||
retries=10
|
||||
while :; do
|
||||
# Vault >= 1.10.x has the operator members. If we have that then we'll use it.
|
||||
if $binpath operator -h 2>&1 | grep members &> /dev/null; then
|
||||
# Get the folllowers that are part of our private ips.
|
||||
if members=$($binpath operator members -format json); then
|
||||
if followers=$(echo "$members" | jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" -c '.Nodes | map(select(any(.; .active_node==false)) | .api_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")) as $followers | $expected - ($expected - $followers)'); then
|
||||
# Make sure that we got all the followers
|
||||
if jq --argjson expected "$VAULT_INSTANCE_PRIVATE_IPS" --argjson followers "$followers" -ne '$expected | length as $el | $followers | length as $fl | $fl == $el-1' > /dev/null; then
|
||||
echo "$followers"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# We're using an old version of vault so we'll just return ips that don't match the leader.
|
||||
# Get the public ip addresses of the followers
|
||||
if followers=$(jq --arg ip "$VAULT_LEADER_PRIVATE_IP" -c '. | map(select(.!=$ip))' <<< "$VAULT_INSTANCE_PRIVATE_IPS"); then
|
||||
if [[ -n "$followers" ]]; then
|
||||
echo "$followers"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
wait=$((2 ** count))
|
||||
count=$((count + 1))
|
||||
if [ "$count" -lt "$retries" ]; then
|
||||
sleep "$wait"
|
||||
else
|
||||
fail "Timed out trying to obtain the cluster followers"
|
||||
fi
|
||||
done
|
||||
@@ -10,6 +10,7 @@ function fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
@@ -17,14 +18,12 @@ function fail() {
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
|
||||
|
||||
count=0
|
||||
retries=5
|
||||
while :; do
|
||||
findLeaderPrivateIP() {
|
||||
# Find the leader private IP address
|
||||
if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
|
||||
if [[ -n "$ip" ]]; then
|
||||
echo "$ip"
|
||||
exit 0
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -32,10 +31,32 @@ while :; do
|
||||
if ip=$($binpath status -format json | jq -r '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")'); then
|
||||
if [[ -n "$ip" ]]; then
|
||||
echo "$ip"
|
||||
exit 0
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
count=0
|
||||
retries=5
|
||||
while :; do
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
# Find the leader private IP address
|
||||
if ip=$(findLeaderPrivateIP); then
|
||||
echo "$ip"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
6)
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
fail "unknown IP_VERSION: $IP_VERSION"
|
||||
;;
|
||||
esac
|
||||
|
||||
wait=$((2 ** count))
|
||||
count=$((count + 1))
|
||||
if [ "$count" -lt "$retries" ]; then
|
||||
@@ -0,0 +1,67 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
function fail() {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "Unable to locate vault binary at $binpath"
|
||||
|
||||
findLeaderIPV6() {
|
||||
# Find the leader private IP address
|
||||
if ip=$($binpath read sys/leader -format=json | jq -r '.data.leader_address | scan("\\[(.+)\\]") | .[0]'); then
|
||||
if [[ -n "$ip" ]]; then
|
||||
echo "$ip"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Some older versions of vault don't support reading sys/leader. Try falling back to the cli status.
|
||||
if ip=$($binpath status -format json | jq -r '.leader_address | scan("\\[(.+)\\]") | .[0]'); then
|
||||
if [[ -n "$ip" ]]; then
|
||||
echo "$ip"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
count=0
|
||||
retries=5
|
||||
while :; do
|
||||
# Find the leader private IP address
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
exit 0
|
||||
;;
|
||||
6)
|
||||
if ip=$(findLeaderIPV6); then
|
||||
echo "$ip"
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
fail "unknown IP_VERSION: $IP_VERSION"
|
||||
;;
|
||||
esac
|
||||
|
||||
wait=$((2 ** count))
|
||||
count=$((count + 1))
|
||||
if [ "$count" -lt "$retries" ]; then
|
||||
sleep "$wait"
|
||||
else
|
||||
fail "Timed out trying to obtain the cluster leader"
|
||||
fi
|
||||
done
|
||||
@@ -12,22 +12,28 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The Vault root token"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
description = "The IP version to use for the Vault TCP listeners"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
@@ -41,29 +47,34 @@ variable "vault_proxy_pidfile" {
|
||||
default = "/tmp/pidfile"
|
||||
}
|
||||
|
||||
variable "vault_proxy_port" {
|
||||
type = number
|
||||
description = "The Vault Proxy listener port"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The Vault root token"
|
||||
}
|
||||
|
||||
locals {
|
||||
vault_instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
vault_proxy_address = "127.0.0.1:8100"
|
||||
vault_proxy_address = "${var.ip_version == 4 ? "127.0.0.1" : "[::1]"}:${var.vault_proxy_port}"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "set_up_approle_auth_and_proxy" {
|
||||
environment = {
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile
|
||||
VAULT_PROXY_ADDRESS = local.vault_proxy_address
|
||||
VAULT_PROXY_PIDFILE = var.vault_proxy_pidfile
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/set-up-approle-and-proxy.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = local.vault_instances[0].public_ip
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -79,7 +90,7 @@ resource "enos_remote_exec" "use_proxy" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = local.vault_instances[0].public_ip
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ fail() {
|
||||
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
export VAULT_ADDR='http://127.0.0.1:8200'
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
# If approle was already enabled, disable it as we're about to re-enable it (the || true is so we don't fail if it doesn't already exist)
|
||||
@@ -33,20 +33,20 @@ fi
|
||||
SECRETID=$($binpath write -f --format=json auth/approle/role/proxy-role/secret-id | jq -r '.data.secret_id')
|
||||
|
||||
if [[ "$SECRETID" == '' ]]; then
|
||||
fail "expected SECRETID to be nonempty, but it is empty"
|
||||
fail "vault write -f --format=json auth/approle/role/proxy-role/secret-id did not return a .data.secret_id"
|
||||
fi
|
||||
|
||||
echo "$ROLEID" > /tmp/role-id
|
||||
echo "$SECRETID" > /tmp/secret-id
|
||||
|
||||
# Write the Vault Proxy's configuration to /tmp/vault-proxy.hcl
|
||||
# The Proxy references the fixed Vault server address of http://127.0.0.1:8200
|
||||
# The Proxy itself listens at the address http://127.0.0.1:8100
|
||||
# The Proxy references the Vault server address passed in as $VAULT_ADDR
|
||||
# The Proxy itself listens at the address passed in as $VAULT_PROXY_ADDRESS
|
||||
cat > /tmp/vault-proxy.hcl <<- EOM
|
||||
pid_file = "${VAULT_PROXY_PIDFILE}"
|
||||
|
||||
vault {
|
||||
address = "http://127.0.0.1:8200"
|
||||
address = "${VAULT_ADDR}"
|
||||
tls_skip_verify = true
|
||||
retry {
|
||||
num_retries = 10
|
||||
|
||||
@@ -5,13 +5,18 @@
|
||||
|
||||
set -e
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
|
||||
fail() {
|
||||
echo "$1" 1>&2
|
||||
return 1
|
||||
}
|
||||
|
||||
|
||||
[[ -z "$VAULT_PROXY_ADDRESS" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_PROXY_PIDFILE" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
# Will cause the Vault CLI to communicate with the Vault Proxy, since it
|
||||
@@ -26,7 +31,9 @@ unset VAULT_TOKEN
|
||||
# var) to lookup the details of the Proxy's token and make sure that the
|
||||
# .data.path field contains 'auth/approle/login', thus confirming that the Proxy
|
||||
# automatically authenticated itself.
|
||||
$binpath token lookup -format=json | jq -r '.data.path' | grep -q 'auth/approle/login'
|
||||
if ! $binpath token lookup -format=json | jq -Mer --arg expected "auth/approle/login" '.data.path == $expected'; then
|
||||
fail "expected proxy to automatically authenticate using 'auth/approle/login', got: '$($binpath token lookup -format=json | jq -r '.data.path')'"
|
||||
fi
|
||||
|
||||
# Now that we're done, kill the proxy
|
||||
pkill -F "${VAULT_PROXY_PIDFILE}" || true
|
||||
|
||||
@@ -9,20 +9,23 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_cluster_addr_port" {
|
||||
description = "The Raft cluster address port"
|
||||
type = string
|
||||
default = "8201"
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The old vault nodes to be removed"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
description = "The IP version used for the Vault TCP listener"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "operator_instance" {
|
||||
@@ -30,12 +33,19 @@ variable "operator_instance" {
|
||||
description = "The ip address of the operator (Voter) node"
|
||||
}
|
||||
|
||||
variable "remove_vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The old vault nodes to be removed"
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_cluster_addr_port" {
|
||||
description = "The Raft cluster address port"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
@@ -43,22 +53,13 @@ variable "vault_root_token" {
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.remove_vault_instances)[idx].public_ip
|
||||
private_ip = values(var.remove_vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "vault_raft_remove_peer" {
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
REMOVE_VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
|
||||
REMOVE_VAULT_CLUSTER_ADDR = "${var.ip_version == 4 ? "${each.value.private_ip}" : "[${each.value.ipv6}]"}:${var.vault_cluster_addr_port}"
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
VAULT_ADDR = "http://localhost:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
|
||||
@@ -9,25 +9,19 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_cluster_addr_port" {
|
||||
description = "The Raft cluster address port"
|
||||
type = string
|
||||
default = "8201"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "primary_leader_public_ip" {
|
||||
type = string
|
||||
description = "Vault primary cluster leader Public IP address"
|
||||
}
|
||||
|
||||
variable "primary_leader_private_ip" {
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "Vault primary cluster leader Private IP address"
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
@@ -37,7 +31,7 @@ variable "vault_root_token" {
|
||||
|
||||
resource "enos_remote_exec" "configure_pr_primary" {
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
@@ -9,25 +9,19 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_cluster_addr_port" {
|
||||
description = "The Raft cluster address port"
|
||||
type = string
|
||||
default = "8201"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "secondary_leader_public_ip" {
|
||||
type = string
|
||||
description = "Vault secondary cluster leader Public IP address"
|
||||
}
|
||||
|
||||
variable "secondary_leader_private_ip" {
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "Vault secondary cluster leader Private IP address"
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
@@ -40,17 +34,13 @@ variable "wrapping_token" {
|
||||
description = "The wrapping token created on primary cluster"
|
||||
}
|
||||
|
||||
locals {
|
||||
wrapping_token = var.wrapping_token
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "configure_pr_secondary" {
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
inline = ["${var.vault_install_dir}/vault write sys/replication/performance/secondary/enable token=${local.wrapping_token}"]
|
||||
inline = ["${var.vault_install_dir}/vault write sys/replication/performance/secondary/enable token=${var.wrapping_token}"]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
|
||||
@@ -9,22 +9,6 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The vault cluster listen address"
|
||||
default = "http://localhost:8200"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
variable "leader_host" {
|
||||
type = object({
|
||||
private_ip = string
|
||||
@@ -34,6 +18,21 @@ variable "leader_host" {
|
||||
description = "The vault cluster host that can be expected as a leader"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "vault_operator_step_down" {
|
||||
environment = {
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
variable "vault_addr" {
|
||||
description = "The host address for the vault instance to test"
|
||||
description = "The local vault API listen address"
|
||||
type = string
|
||||
}
|
||||
|
||||
|
||||
@@ -10,21 +10,25 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster hosts to unseal"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "follower_public_ips" {
|
||||
type = list(string)
|
||||
description = "Vault cluster follower Public IP addresses"
|
||||
}
|
||||
|
||||
variable "vault_seal_type" {
|
||||
type = string
|
||||
description = "The Vault seal type"
|
||||
@@ -33,18 +37,15 @@ variable "vault_seal_type" {
|
||||
variable "vault_unseal_keys" {}
|
||||
|
||||
locals {
|
||||
followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)])
|
||||
vault_bin_path = "${var.vault_install_dir}/vault"
|
||||
}
|
||||
|
||||
# After replication is enabled the secondary follower nodes are expected to be sealed,
|
||||
# so we wait for the secondary follower nodes to update the seal status
|
||||
resource "enos_remote_exec" "wait_until_sealed" {
|
||||
for_each = {
|
||||
for idx, follower in local.followers : idx => follower
|
||||
}
|
||||
for_each = var.hosts
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
@@ -52,25 +53,26 @@ resource "enos_remote_exec" "wait_until_sealed" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = element(var.follower_public_ips, each.key)
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# The follower nodes on secondary replication cluster incorrectly report
|
||||
# unseal progress 2/3 (Issue: https://hashicorp.atlassian.net/browse/VAULT-12309),
|
||||
# so we restart the followers to clear the status and to autounseal incase of awskms seal type
|
||||
# so we restart the followers to allow them to auto-unseal
|
||||
resource "enos_remote_exec" "restart_followers" {
|
||||
depends_on = [enos_remote_exec.wait_until_sealed]
|
||||
for_each = {
|
||||
for idx, follower in local.followers : idx => follower
|
||||
for idx, host in var.hosts : idx => host
|
||||
if var.vault_seal_type != "shamir"
|
||||
}
|
||||
|
||||
inline = ["sudo systemctl restart vault"]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = element(var.follower_public_ips, each.key)
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,12 +84,12 @@ resource "enos_remote_exec" "unseal_followers" {
|
||||
depends_on = [enos_remote_exec.restart_followers]
|
||||
# The unseal keys are required only for seal_type shamir
|
||||
for_each = {
|
||||
for idx, follower in local.followers : idx => follower
|
||||
for idx, host in var.hosts : idx => host
|
||||
if var.vault_seal_type == "shamir"
|
||||
}
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
UNSEAL_KEYS = join(",", var.vault_unseal_keys)
|
||||
}
|
||||
@@ -96,7 +98,7 @@ resource "enos_remote_exec" "unseal_followers" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = element(var.follower_public_ips, each.key)
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -107,12 +109,12 @@ resource "enos_remote_exec" "unseal_followers" {
|
||||
resource "enos_remote_exec" "unseal_followers_again" {
|
||||
depends_on = [enos_remote_exec.unseal_followers]
|
||||
for_each = {
|
||||
for idx, follower in local.followers : idx => follower
|
||||
for idx, host in var.hosts : idx => host
|
||||
if var.vault_seal_type == "shamir"
|
||||
}
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
UNSEAL_KEYS = join(",", var.vault_unseal_keys)
|
||||
}
|
||||
@@ -121,7 +123,7 @@ resource "enos_remote_exec" "unseal_followers_again" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = element(var.follower_public_ips, each.key)
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,33 +12,29 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_api_addr" {
|
||||
type = string
|
||||
description = "The API address of the Vault cluster"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_local_artifact_path" {
|
||||
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "The IP version used for the Vault TCP listener"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The path to a locally built vault artifact to install"
|
||||
default = null
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_artifactory_release" {
|
||||
@@ -52,6 +48,22 @@ variable "vault_artifactory_release" {
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_local_artifact_path" {
|
||||
type = string
|
||||
description = "The path to a locally built vault artifact to install"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
variable "vault_seal_type" {
|
||||
type = string
|
||||
description = "The Vault seal type"
|
||||
@@ -64,19 +76,11 @@ variable "vault_unseal_keys" {
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)])
|
||||
follower_ips = compact(split(" ", enos_remote_exec.get_follower_public_ips.stdout))
|
||||
vault_bin_path = "${var.vault_install_dir}/vault"
|
||||
}
|
||||
|
||||
resource "enos_bundle_install" "upgrade_vault_binary" {
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
destination = var.vault_install_dir
|
||||
artifactory = var.vault_artifactory_release
|
||||
@@ -89,79 +93,79 @@ resource "enos_bundle_install" "upgrade_vault_binary" {
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "get_leader_public_ip" {
|
||||
module "get_ip_addresses" {
|
||||
source = "../vault_get_cluster_ips"
|
||||
|
||||
depends_on = [enos_bundle_install.upgrade_vault_binary]
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/get-leader-public-ip.sh")]
|
||||
|
||||
environment = {
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir,
|
||||
VAULT_INSTANCES = jsonencode(local.instances)
|
||||
}
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = local.instances[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "get_follower_public_ips" {
|
||||
depends_on = [enos_bundle_install.upgrade_vault_binary]
|
||||
|
||||
environment = {
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir,
|
||||
VAULT_INSTANCES = jsonencode(local.instances)
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/get-follower-public-ips.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = local.instances[0].public_ip
|
||||
}
|
||||
}
|
||||
hosts = var.hosts
|
||||
ip_version = var.ip_version
|
||||
vault_addr = var.vault_addr
|
||||
vault_install_dir = var.vault_install_dir
|
||||
vault_root_token = var.vault_root_token
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "restart_followers" {
|
||||
for_each = local.followers
|
||||
depends_on = [enos_remote_exec.get_follower_public_ips]
|
||||
for_each = module.get_ip_addresses.follower_hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/restart-vault.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = trimspace(local.follower_ips[tonumber(each.key)])
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_vault_unseal" "followers" {
|
||||
depends_on = [enos_remote_exec.restart_followers]
|
||||
for_each = {
|
||||
for idx, follower in local.followers : idx => follower
|
||||
for idx, host in module.get_ip_addresses.follower_hosts : idx => host
|
||||
if var.vault_seal_type == "shamir"
|
||||
}
|
||||
depends_on = [enos_remote_exec.restart_followers]
|
||||
|
||||
bin_path = local.vault_bin_path
|
||||
vault_addr = var.vault_api_addr
|
||||
vault_addr = var.vault_addr
|
||||
seal_type = var.vault_seal_type
|
||||
unseal_keys = var.vault_unseal_keys
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = trimspace(local.follower_ips[each.key])
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module "wait_for_followers_unsealed" {
|
||||
source = "../vault_verify_unsealed"
|
||||
depends_on = [
|
||||
enos_remote_exec.restart_followers,
|
||||
enos_vault_unseal.followers,
|
||||
]
|
||||
|
||||
hosts = module.get_ip_addresses.follower_hosts
|
||||
vault_addr = var.vault_addr
|
||||
vault_install_dir = var.vault_install_dir
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "restart_leader" {
|
||||
depends_on = [enos_vault_unseal.followers]
|
||||
depends_on = [module.wait_for_followers_unsealed]
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/restart-vault.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = trimspace(enos_remote_exec.get_leader_public_ip.stdout)
|
||||
host = module.get_ip_addresses.leader_public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -171,13 +175,13 @@ resource "enos_vault_unseal" "leader" {
|
||||
depends_on = [enos_remote_exec.restart_leader]
|
||||
|
||||
bin_path = local.vault_bin_path
|
||||
vault_addr = var.vault_api_addr
|
||||
vault_addr = var.vault_addr
|
||||
seal_type = var.vault_seal_type
|
||||
unseal_keys = var.vault_unseal_keys
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = trimspace(enos_remote_exec.get_leader_public_ip.stdout)
|
||||
host = module.get_ip_addresses.leader_public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
export VAULT_ADDR="http://localhost:8200"
|
||||
|
||||
instances=${VAULT_INSTANCES}
|
||||
|
||||
# Find the leader
|
||||
leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
|
||||
|
||||
# Get the public ip addresses of the followers
|
||||
follower_ips=$(jq ".[] | select(.private_ip!=$leader_address) | .public_ip" <<< "$instances")
|
||||
|
||||
echo "$follower_ips" | sed 's/\"//g' | tr '\n' ' '
|
||||
@@ -1,19 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
set -e
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
export VAULT_ADDR="http://localhost:8200"
|
||||
|
||||
instances=${VAULT_INSTANCES}
|
||||
|
||||
# Find the leader
|
||||
leader_address=$($binpath status -format json | jq '.leader_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
|
||||
|
||||
# Get the public ip address of the leader
|
||||
leader_public=$(jq ".[] | select(.private_ip==$leader_address) | .public_ip" <<< "$instances")
|
||||
#shellcheck disable=SC2001
|
||||
echo "$leader_public" | sed 's/\"//g'
|
||||
@@ -2,7 +2,43 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
fail() {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
set -eux
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
sudo systemctl restart vault
|
||||
if ! out=$(sudo systemctl stop vault 2>&1); then
|
||||
echo "failed to stop vault: $out: $(sudo systemctl status vault)" 1>&2
|
||||
fi
|
||||
|
||||
if ! out=$(sudo systemctl start vault 2>&1); then
|
||||
echo "failed to start vault: $out: $(sudo systemctl status vault)" 1>&2
|
||||
fi
|
||||
|
||||
count=0
|
||||
retries=5
|
||||
while :; do
|
||||
# Check the Vault seal status
|
||||
status=$($binpath status)
|
||||
code=$?
|
||||
|
||||
if [ $code == 0 ] || [ $code == 2 ]; then
|
||||
# 0 is unsealed and 2 is running but sealed
|
||||
echo "$status"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
printf "Waiting for Vault cluster to be ready: status code: %s, status:\n%s\n" "$code" "$status" 2>&1
|
||||
|
||||
wait=$((3 ** count))
|
||||
count=$((count + 1))
|
||||
if [ "$count" -lt "$retries" ]; then
|
||||
sleep "$wait"
|
||||
else
|
||||
fail "Timed out waiting for Vault node to be ready after restart"
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -9,9 +9,13 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_agent_template_destination" {
|
||||
type = string
|
||||
description = "The destination of the template rendered by Agent"
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_agent_expected_output" {
|
||||
@@ -19,40 +23,22 @@ variable "vault_agent_expected_output" {
|
||||
description = "The output that's expected in the rendered template at vault_agent_template_destination"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
locals {
|
||||
vault_instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
variable "vault_agent_template_destination" {
|
||||
type = string
|
||||
description = "The destination of the template rendered by Agent"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "verify_vault_agent_output" {
|
||||
environment = {
|
||||
VAULT_AGENT_TEMPLATE_DESTINATION = var.vault_agent_template_destination
|
||||
VAULT_AGENT_EXPECTED_OUTPUT = var.vault_agent_expected_output
|
||||
VAULT_INSTANCES = jsonencode(local.vault_instances)
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/verify-vault-agent-output.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = local.vault_instances[0].public_ip
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,32 +9,18 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
variable "vault_autopilot_upgrade_version" {
|
||||
type = string
|
||||
description = "The Vault upgraded version"
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_autopilot_upgrade_status" {
|
||||
@@ -42,19 +28,26 @@ variable "vault_autopilot_upgrade_status" {
|
||||
description = "The autopilot upgrade expected status"
|
||||
}
|
||||
|
||||
locals {
|
||||
public_ips = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
variable "vault_autopilot_upgrade_version" {
|
||||
type = string
|
||||
description = "The Vault upgraded version"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "smoke-verify-autopilot" {
|
||||
for_each = local.public_ips
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir,
|
||||
VAULT_TOKEN = var.vault_root_token,
|
||||
VAULT_AUTOPILOT_UPGRADE_STATUS = var.vault_autopilot_upgrade_status,
|
||||
|
||||
@@ -7,8 +7,7 @@ fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
export VAULT_ADDR="http://localhost:8200"
|
||||
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_AUTOPILOT_UPGRADE_STATUS" ]] && fail "VAULT_AUTOPILOT_UPGRADE_STATUS env variable has not been set"
|
||||
[[ -z "$VAULT_AUTOPILOT_UPGRADE_VERSION" ]] && fail "VAULT_AUTOPILOT_UPGRADE_VERSION env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
@@ -9,57 +9,49 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
variable "vault_autopilot_default_max_leases" {
|
||||
type = string
|
||||
description = "The autopilot upgrade expected max_leases"
|
||||
}
|
||||
|
||||
variable "timeout" {
|
||||
type = number
|
||||
description = "The max number of seconds to wait before timing out"
|
||||
default = 60
|
||||
}
|
||||
|
||||
variable "retry_interval" {
|
||||
type = number
|
||||
description = "How many seconds to wait between each retry"
|
||||
default = 2
|
||||
}
|
||||
|
||||
locals {
|
||||
public_ips = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
variable "timeout" {
|
||||
type = number
|
||||
description = "The max number of seconds to wait before timing out"
|
||||
default = 60
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_autopilot_default_max_leases" {
|
||||
type = string
|
||||
description = "The autopilot upgrade expected max_leases"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "smoke_verify_default_lcq" {
|
||||
for_each = local.public_ips
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
RETRY_INTERVAL = var.retry_interval
|
||||
TIMEOUT_SECONDS = var.timeout
|
||||
VAULT_ADDR = "http://localhost:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
DEFAULT_LCQ = var.vault_autopilot_default_max_leases
|
||||
}
|
||||
|
||||
@@ -7,6 +7,9 @@ function fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Exit early if we haven't been given an expected DEFAULT_LCQ
|
||||
[[ -z "$DEFAULT_LCQ" ]] && exit 0
|
||||
|
||||
[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set"
|
||||
[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
|
||||
@@ -9,10 +9,37 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_cluster_addr_port" {
|
||||
description = "The Raft cluster address port"
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "The IP version used for the Vault TCP listener"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "primary_leader_host" {
|
||||
type = object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
})
|
||||
description = "The primary cluster leader host"
|
||||
}
|
||||
|
||||
variable "secondary_leader_host" {
|
||||
type = object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
})
|
||||
description = "The secondary cluster leader host"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
default = "8201"
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
@@ -20,26 +47,6 @@ variable "vault_install_dir" {
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "primary_leader_public_ip" {
|
||||
type = string
|
||||
description = "Vault primary cluster leader Public IP address"
|
||||
}
|
||||
|
||||
variable "primary_leader_private_ip" {
|
||||
type = string
|
||||
description = "Vault primary cluster leader Private IP address"
|
||||
}
|
||||
|
||||
variable "secondary_leader_public_ip" {
|
||||
type = string
|
||||
description = "Vault secondary cluster leader Public IP address"
|
||||
}
|
||||
|
||||
variable "secondary_leader_private_ip" {
|
||||
type = string
|
||||
description = "Vault secondary cluster leader Private IP address"
|
||||
}
|
||||
|
||||
variable "wrapping_token" {
|
||||
type = string
|
||||
description = "The wrapping token created on primary cluster"
|
||||
@@ -47,40 +54,44 @@ variable "wrapping_token" {
|
||||
}
|
||||
|
||||
locals {
|
||||
primary_leader_addr = var.ip_version == 6 ? var.primary_leader_host.ipv6 : var.primary_leader_host.private_ip
|
||||
secondary_leader_addr = var.ip_version == 6 ? var.secondary_leader_host.ipv6 : var.secondary_leader_host.private_ip
|
||||
primary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_primary.stdout)
|
||||
secondary_replication_status = jsondecode(enos_remote_exec.verify_replication_status_on_secondary.stdout)
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "verify_replication_status_on_primary" {
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
PRIMARY_LEADER_PRIV_IP = var.primary_leader_private_ip
|
||||
SECONDARY_LEADER_PRIV_IP = var.secondary_leader_private_ip
|
||||
IP_VERSION = var.ip_version
|
||||
PRIMARY_LEADER_ADDR = local.primary_leader_addr
|
||||
SECONDARY_LEADER_ADDR = local.secondary_leader_addr
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.primary_leader_public_ip
|
||||
host = var.primary_leader_host.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "verify_replication_status_on_secondary" {
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
PRIMARY_LEADER_PRIV_IP = var.primary_leader_private_ip
|
||||
SECONDARY_LEADER_PRIV_IP = var.secondary_leader_private_ip
|
||||
IP_VERSION = var.ip_version
|
||||
PRIMARY_LEADER_ADDR = local.primary_leader_addr
|
||||
SECONDARY_LEADER_ADDR = local.secondary_leader_addr
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/verify-replication-status.sh")]
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.secondary_leader_public_ip
|
||||
host = var.secondary_leader_host.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,8 +14,9 @@ fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$PRIMARY_LEADER_PRIV_IP" ]] && fail "PRIMARY_LEADER_PRIV_IP env variable has not been set"
|
||||
[[ -z "$SECONDARY_LEADER_PRIV_IP" ]] && fail "SECONDARY_LEADER_PRIV_IP env variable has not been set"
|
||||
[[ -z "$IP_VERSION" ]] && fail "IP_VERSION env variable has not been set"
|
||||
[[ -z "$PRIMARY_LEADER_ADDR" ]] && fail "PRIMARY_LEADER_ADDR env variable has not been set"
|
||||
[[ -z "$SECONDARY_LEADER_ADDR" ]] && fail "SECONDARY_LEADER_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
|
||||
@@ -40,8 +41,8 @@ retry() {
|
||||
|
||||
check_pr_status() {
|
||||
pr_status=$($binpath read -format=json sys/replication/performance/status)
|
||||
cluster_state=$(echo "$pr_status" | jq -r '.data.state')
|
||||
connection_mode=$(echo "$pr_status" | jq -r '.data.mode')
|
||||
cluster_state=$(jq -r '.data.state' <<< "$pr_status")
|
||||
connection_mode=$(jq -r '.data.mode' <<< "$pr_status")
|
||||
|
||||
if [[ "$cluster_state" == 'idle' ]]; then
|
||||
echo "replication cluster state is idle" 1>&2
|
||||
@@ -49,30 +50,38 @@ check_pr_status() {
|
||||
fi
|
||||
|
||||
if [[ "$connection_mode" == "primary" ]]; then
|
||||
connection_status=$(echo "$pr_status" | jq -r '.data.secondaries[0].connection_status')
|
||||
connection_status=$(jq -r '.data.secondaries[0].connection_status' <<< "$pr_status")
|
||||
if [[ "$connection_status" == 'disconnected' ]]; then
|
||||
echo ".data.secondaries[0].connection_status from primary node is 'disconnected'" 1>&2
|
||||
return 1
|
||||
fi
|
||||
secondary_cluster_addr=$(echo "$pr_status" | jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
|
||||
if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_PRIV_IP" ]]; then
|
||||
echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_PRIV_IP, got: $secondary_cluster_addr" 1>&2
|
||||
if [ "$IP_VERSION" == 4 ]; then
|
||||
secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status")
|
||||
else
|
||||
secondary_cluster_addr=$(jq -r '.data.secondaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status")
|
||||
fi
|
||||
if [[ "$secondary_cluster_addr" != "$SECONDARY_LEADER_ADDR" ]]; then
|
||||
echo ".data.secondaries[0].cluster_address should have an IP address of $SECONDARY_LEADER_ADDR, got: $secondary_cluster_addr" 1>&2
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
connection_status=$(echo "$pr_status" | jq -r '.data.primaries[0].connection_status')
|
||||
connection_status=$(jq -r '.data.primaries[0].connection_status' <<< "$pr_status")
|
||||
if [[ "$connection_status" == 'disconnected' ]]; then
|
||||
echo ".data.primaries[0].connection_status from secondary node is 'disconnected'" 1>&2
|
||||
return 1
|
||||
fi
|
||||
primary_cluster_addr=$(echo "$pr_status" | jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")')
|
||||
if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_PRIV_IP" ]]; then
|
||||
echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_PRIV_IP, got: $primary_cluster_addr" 1>&2
|
||||
if [ "$IP_VERSION" == 4 ]; then
|
||||
primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("[0-9]+.[0-9]+.[0-9]+.[0-9]+")' <<< "$pr_status")
|
||||
else
|
||||
primary_cluster_addr=$(jq -r '.data.primaries[0].cluster_address | scan("\\[(.+)\\]") | .[0]' <<< "$pr_status")
|
||||
fi
|
||||
if [[ "$primary_cluster_addr" != "$PRIMARY_LEADER_ADDR" ]]; then
|
||||
echo ".data.primaries[0].cluster_address should have an IP address of $PRIMARY_LEADER_ADDR, got: $primary_cluster_addr" 1>&2
|
||||
return 1
|
||||
fi
|
||||
known_primary_cluster_addrs=$(echo "$pr_status" | jq -r '.data.known_primary_cluster_addrs')
|
||||
if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_PRIV_IP"; then
|
||||
echo "$PRIMARY_LEADER_PRIV_IP is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2
|
||||
known_primary_cluster_addrs=$(jq -r '.data.known_primary_cluster_addrs' <<< "$pr_status")
|
||||
if ! echo "$known_primary_cluster_addrs" | grep -q "$PRIMARY_LEADER_ADDR"; then
|
||||
echo "$PRIMARY_LEADER_ADDR is not in .data.known_primary_cluster_addrs: $known_primary_cluster_addrs" 1>&2
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
@@ -81,5 +90,10 @@ check_pr_status() {
|
||||
return 0
|
||||
}
|
||||
|
||||
|
||||
if [ "$IP_VERSION" != 4 ] && [ "$IP_VERSION" != 6 ]; then
|
||||
fail "unsupported IP_VERSION: $IP_VERSION"
|
||||
fi
|
||||
|
||||
# Retry for a while because it can take some time for replication to sync
|
||||
retry 10 check_pr_status
|
||||
|
||||
@@ -9,10 +9,33 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "The IP version to use for the Vault TCP listeners"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_cluster_addr_port" {
|
||||
description = "The Raft cluster address port"
|
||||
type = string
|
||||
default = "8201"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
@@ -20,38 +43,24 @@ variable "vault_install_dir" {
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
cluster_addrs = {
|
||||
4 : { for k, v in var.hosts : k => "${v.private_ip}:${var.vault_cluster_addr_port}" },
|
||||
6 : { for k, v in var.hosts : k => "[${v.ipv6}]:${var.vault_cluster_addr_port}" },
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "verify_raft_auto_join_voter" {
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_CLUSTER_ADDR = local.cluster_addrs[var.ip_version][each.key]
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
VAULT_LOCAL_BINARY_PATH = "${var.vault_install_dir}/vault"
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
|
||||
@@ -42,7 +42,7 @@ check_voter_status() {
|
||||
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
export VAULT_ADDR='http://127.0.0.1:8200'
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
# Retry a few times because it can take some time for things to settle after
|
||||
|
||||
@@ -9,32 +9,34 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "node_public_ips" {
|
||||
type = list(string)
|
||||
description = "Vault cluster node Public IP address"
|
||||
}
|
||||
|
||||
locals {
|
||||
followers = toset([for idx in range(var.vault_instance_count - 1) : tostring(idx)])
|
||||
vault_bin_path = "${var.vault_install_dir}/vault"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "verify_kv_on_node" {
|
||||
for_each = {
|
||||
for idx, follower in local.followers : idx => follower
|
||||
}
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
@@ -42,7 +44,7 @@ resource "enos_remote_exec" "verify_kv_on_node" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = element(var.node_public_ips, each.key)
|
||||
host = each.value.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,19 +10,31 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_edition" {
|
||||
type = string
|
||||
description = "The vault product edition"
|
||||
default = null
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "smoke-verify-replication" {
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_EDITION = var.vault_edition
|
||||
}
|
||||
|
||||
|
||||
@@ -2,10 +2,6 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
# The Vault replication smoke test, documented in
|
||||
# https://docs.google.com/document/d/16sjIk3hzFDPyY5A9ncxTZV_9gnpYSF1_Vx6UA1iiwgI/edit#heading=h.kgrxf0f1et25
|
||||
|
||||
set -e
|
||||
|
||||
function fail() {
|
||||
@@ -13,8 +9,11 @@ function fail() {
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_EDITION" ]] && fail "VAULT_EDITION env variable has not been set"
|
||||
|
||||
# Replication status endpoint should have data.mode disabled for CE release
|
||||
status=$(curl -s http://localhost:8200/v1/sys/replication/status)
|
||||
status=$(curl "${VAULT_ADDR}/v1/sys/replication/status")
|
||||
if [ "$VAULT_EDITION" == "ce" ]; then
|
||||
if [ "$(jq -r '.data.mode' <<< "$status")" != "disabled" ]; then
|
||||
fail "replication data mode is not disabled for CE release!"
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
variable "vault_edition" {
|
||||
type = string
|
||||
description = "The vault product edition"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
@@ -10,17 +10,22 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "smoke-verify-ui" {
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = var.vault_addr,
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The vault cluster address"
|
||||
default = "http://localhost:8200"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
@@ -9,43 +9,35 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
locals {
|
||||
public_ips = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "smoke-verify-undo-logs" {
|
||||
for_each = local.public_ips
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = "http://localhost:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
}
|
||||
|
||||
@@ -9,10 +9,18 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_cluster_addr_port" {
|
||||
description = "The Raft cluster address port"
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
default = "8201"
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
@@ -20,36 +28,16 @@ variable "vault_install_dir" {
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster instances that were created"
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "verify_node_unsealed" {
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/verify-vault-node-unsealed.sh")]
|
||||
|
||||
environment = {
|
||||
VAULT_CLUSTER_ADDR = "${each.value.private_ip}:${var.vault_cluster_addr_port}"
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
HOST_IPV4 = each.value.public_ip
|
||||
HOST_IPV6 = each.value.ipv6
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
|
||||
transport = {
|
||||
|
||||
@@ -4,23 +4,23 @@
|
||||
|
||||
set -e
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
|
||||
fail() {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
export VAULT_ADDR=http://localhost:8200
|
||||
|
||||
count=0
|
||||
retries=4
|
||||
retries=5
|
||||
while :; do
|
||||
health_status=$(curl -s "${VAULT_CLUSTER_ADDR}/v1/sys/health" |jq '.')
|
||||
unseal_status=$($binpath status -format json | jq -Mr --argjson expected "false" '.sealed == $expected')
|
||||
if [[ "$unseal_status" == 'true' ]]; then
|
||||
health_status=$(curl -s "${VAULT_ADDR}/v1/sys/health" | jq '.')
|
||||
if unseal_status=$($binpath status -format json | jq -Mre --argjson expected "false" '.sealed == $expected'); then
|
||||
echo "$health_status"
|
||||
exit 0
|
||||
fi
|
||||
@@ -30,6 +30,14 @@ while :; do
|
||||
if [ "$count" -lt "$retries" ]; then
|
||||
sleep "$wait"
|
||||
else
|
||||
fail "expected ${VAULT_CLUSTER_ADDR} to be unsealed, got unseal status: $unseal_status"
|
||||
if [ -n "$HOST_IPV6" ]; then
|
||||
fail "expected ${HOST_IPV6} to be unsealed, got unseal status: $unseal_status"
|
||||
else
|
||||
if [ -n "$HOST_IPV4" ]; then
|
||||
fail "expected ${HOST_IPV4} to be unsealed, got unseal status: $unseal_status"
|
||||
else
|
||||
fail "expected ${VAULT_ADDR} to be unsealed, got unseal status: $unseal_status"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -9,42 +9,43 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_build_date" {
|
||||
type = string
|
||||
description = "The Vault artifact build date"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "vault_edition" {
|
||||
type = string
|
||||
description = "The Vault product edition"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many Vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "vault_product_version" {
|
||||
type = string
|
||||
description = "The Vault product version"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "vault_edition" {
|
||||
type = string
|
||||
description = "The Vault product edition"
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "vault_revision" {
|
||||
type = string
|
||||
description = "The Vault product revision"
|
||||
@@ -57,25 +58,17 @@ variable "vault_root_token" {
|
||||
default = null
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "verify_all_nodes_have_updated_version" {
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir,
|
||||
VAULT_ADDR = var.vault_addr,
|
||||
VAULT_BUILD_DATE = var.vault_build_date,
|
||||
VAULT_VERSION = var.vault_product_version,
|
||||
VAULT_EDITION = var.vault_edition,
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir,
|
||||
VAULT_REVISION = var.vault_revision,
|
||||
VAULT_TOKEN = var.vault_root_token,
|
||||
VAULT_VERSION = var.vault_product_version,
|
||||
}
|
||||
|
||||
scripts = [abspath("${path.module}/scripts/verify-cluster-version.sh")]
|
||||
|
||||
@@ -2,28 +2,30 @@
|
||||
# Copyright (c) HashiCorp, Inc.
|
||||
# SPDX-License-Identifier: BUSL-1.1
|
||||
|
||||
|
||||
# Verify the Vault "version" includes the correct base version, build date,
|
||||
# revision SHA, and edition metadata.
|
||||
set -e
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
edition=${VAULT_EDITION}
|
||||
version=${VAULT_VERSION}
|
||||
sha=${VAULT_REVISION}
|
||||
build_date=${VAULT_BUILD_DATE}
|
||||
# VAULT_TOKEN must also be set
|
||||
|
||||
fail() {
|
||||
echo "$1" 1>&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
|
||||
export VAULT_ADDR='http://127.0.0.1:8200'
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_BUILD_DATE" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
[[ -z "$VAULT_EDITION" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
[[ -z "$VAULT_REVISION" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
[[ -z "$VAULT_VERSION" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
edition=${VAULT_EDITION}
|
||||
version=${VAULT_VERSION}
|
||||
sha=${VAULT_REVISION}
|
||||
build_date=${VAULT_BUILD_DATE}
|
||||
|
||||
test -x "$binpath" || fail "unable to locate vault binary at $binpath"
|
||||
version_expected="Vault v$version ($sha), built $build_date"
|
||||
|
||||
case "$edition" in
|
||||
|
||||
@@ -9,56 +9,48 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_instance_count" {
|
||||
type = number
|
||||
description = "How many Vault instances are in the cluster"
|
||||
}
|
||||
|
||||
variable "leader_public_ip" {
|
||||
type = string
|
||||
description = "Vault cluster leader Public IP address"
|
||||
}
|
||||
|
||||
variable "leader_private_ip" {
|
||||
type = string
|
||||
description = "Vault cluster leader Private IP address"
|
||||
}
|
||||
|
||||
variable "vault_instances" {
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The Vault cluster instances that were created"
|
||||
}
|
||||
|
||||
variable "leader_host" {
|
||||
type = object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
})
|
||||
|
||||
description = "Vault cluster leader host"
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
}
|
||||
|
||||
variable "vault_root_token" {
|
||||
type = string
|
||||
description = "The Vault root token"
|
||||
default = null
|
||||
}
|
||||
|
||||
locals {
|
||||
instances = {
|
||||
for idx in range(var.vault_instance_count) : idx => {
|
||||
public_ip = values(var.vault_instances)[idx].public_ip
|
||||
private_ip = values(var.vault_instances)[idx].private_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# We use this module to verify write data in all Enos scenarios. Since we cannot use
|
||||
# Vault token to authenticate to secondary clusters in replication scenario we add a regular user
|
||||
# here to keep the authentication method and module verification consistent between all scenarios
|
||||
resource "enos_remote_exec" "smoke-enable-secrets-kv" {
|
||||
# Only enable the secrets engine on the leader node
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
@@ -67,7 +59,7 @@ resource "enos_remote_exec" "smoke-enable-secrets-kv" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.leader_public_ip
|
||||
host = var.leader_host.public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -75,10 +67,10 @@ resource "enos_remote_exec" "smoke-enable-secrets-kv" {
|
||||
# Verify that we can enable the k/v secrets engine and write data to it.
|
||||
resource "enos_remote_exec" "smoke-write-test-data" {
|
||||
depends_on = [enos_remote_exec.smoke-enable-secrets-kv]
|
||||
for_each = local.instances
|
||||
for_each = var.hosts
|
||||
|
||||
environment = {
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
TEST_KEY = "smoke${each.key}"
|
||||
|
||||
@@ -9,6 +9,30 @@ terraform {
|
||||
}
|
||||
}
|
||||
|
||||
variable "hosts" {
|
||||
type = map(object({
|
||||
ipv6 = string
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster hosts that can be expected as a leader"
|
||||
}
|
||||
|
||||
variable "ip_version" {
|
||||
type = number
|
||||
description = "The IP version used for the Vault TCP listener"
|
||||
|
||||
validation {
|
||||
condition = contains([4, 6], var.ip_version)
|
||||
error_message = "The ip_version must be either 4 or 6"
|
||||
}
|
||||
}
|
||||
|
||||
variable "vault_addr" {
|
||||
type = string
|
||||
description = "The local vault API listen address"
|
||||
}
|
||||
|
||||
variable "vault_install_dir" {
|
||||
type = string
|
||||
description = "The directory where the Vault binary will be installed"
|
||||
@@ -19,14 +43,6 @@ variable "vault_root_token" {
|
||||
description = "The vault root token"
|
||||
}
|
||||
|
||||
variable "vault_hosts" {
|
||||
type = map(object({
|
||||
private_ip = string
|
||||
public_ip = string
|
||||
}))
|
||||
description = "The vault cluster hosts that can be expected as a leader"
|
||||
}
|
||||
|
||||
variable "timeout" {
|
||||
type = number
|
||||
description = "The max number of seconds to wait before timing out"
|
||||
@@ -40,15 +56,18 @@ variable "retry_interval" {
|
||||
}
|
||||
|
||||
locals {
|
||||
private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])]
|
||||
ipv6s = [for k, v in values(tomap(var.hosts)) : tostring(v["ipv6"])]
|
||||
private_ips = [for k, v in values(tomap(var.hosts)) : tostring(v["private_ip"])]
|
||||
}
|
||||
|
||||
resource "enos_remote_exec" "wait_for_leader_in_vault_hosts" {
|
||||
resource "enos_remote_exec" "wait_for_leader_in_hosts" {
|
||||
environment = {
|
||||
RETRY_INTERVAL = var.retry_interval
|
||||
IP_VERSION = var.ip_version
|
||||
TIMEOUT_SECONDS = var.timeout
|
||||
VAULT_ADDR = "http://127.0.0.1:8200"
|
||||
RETRY_INTERVAL = var.retry_interval
|
||||
VAULT_ADDR = var.vault_addr
|
||||
VAULT_TOKEN = var.vault_root_token
|
||||
VAULT_INSTANCE_IPV6S = jsonencode(local.ipv6s)
|
||||
VAULT_INSTANCE_PRIVATE_IPS = jsonencode(local.private_ips)
|
||||
VAULT_INSTALL_DIR = var.vault_install_dir
|
||||
}
|
||||
@@ -57,7 +76,7 @@ resource "enos_remote_exec" "wait_for_leader_in_vault_hosts" {
|
||||
|
||||
transport = {
|
||||
ssh = {
|
||||
host = var.vault_hosts[0].public_ip
|
||||
host = var.hosts[0].public_ip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,6 @@ fail() {
|
||||
[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
|
||||
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTALL_DIR" ]] && fail "VAULT_INSTALL_DIR env variable has not been set"
|
||||
[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set"
|
||||
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
|
||||
|
||||
binpath=${VAULT_INSTALL_DIR}/vault
|
||||
@@ -40,14 +39,59 @@ findLeaderInPrivateIPs() {
|
||||
return 1
|
||||
}
|
||||
|
||||
findLeaderInIPV6s() {
|
||||
# Find the leader private IP address
|
||||
local leader_ipv6
|
||||
if ! leader_ipv6=$($binpath read sys/leader -format=json | jq -er '.data.leader_address | scan("\\[(.+)\\]") | .[0]') ; then
|
||||
# Some older versions of vault don't support reading sys/leader. Fallback to the cli status.
|
||||
if ! leader_ipv6=$($binpath status -format json | jq -er '.leader_address | scan("\\[(.+)\\]") | .[0]'); then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if isIn=$(jq -er --arg ip "$leader_ipv6" 'map(select(. == $ip)) | length == 1' <<< "$VAULT_INSTANCE_IPV6S"); then
|
||||
if [[ "$isIn" == "true" ]]; then
|
||||
echo "$leader_ipv6"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
}
|
||||
|
||||
begin_time=$(date +%s)
|
||||
end_time=$((begin_time + TIMEOUT_SECONDS))
|
||||
while [ "$(date +%s)" -lt "$end_time" ]; do
|
||||
if findLeaderInPrivateIPs; then
|
||||
exit 0
|
||||
fi
|
||||
# Use the default package manager of the current Linux distro to install packages
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
[[ -z "$VAULT_INSTANCE_PRIVATE_IPS" ]] && fail "VAULT_INSTANCE_PRIVATE_IPS env variable has not been set"
|
||||
if findLeaderInPrivateIPs; then
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
6)
|
||||
[[ -z "$VAULT_INSTANCE_IPV6S" ]] && fail "VAULT_INSTANCE_IPV6S env variable has not been set"
|
||||
if findLeaderInIPV6s; then
|
||||
exit 0
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
fail "No matching package manager provided."
|
||||
;;
|
||||
esac
|
||||
|
||||
sleep "$RETRY_INTERVAL"
|
||||
done
|
||||
|
||||
fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader."
|
||||
case $IP_VERSION in
|
||||
4)
|
||||
fail "Timed out waiting for one of $VAULT_INSTANCE_PRIVATE_IPS to be leader."
|
||||
;;
|
||||
6)
|
||||
fail "Timed out waiting for one of $VAULT_INSTANCE_IPV6S to be leader."
|
||||
;;
|
||||
*)
|
||||
fail "Timed out waiting for leader"
|
||||
;;
|
||||
esac
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user