[QT-627] enos: add pkcs11 seal testing with softhsm (#24349)

Add support for testing `+ent.hsm` and `+ent.hsm.fips1402` Vault editions
with `pkcs11` seal types utilizing a shared `softhsm` token. Softhsm2 is
a software HSM that will load seal keys from a local disk via pkcs11.
The pkcs11 seal implementation is fairly complex as we have to create a
one or more shared tokens with various keys and distribute them to all
nodes in the cluster before starting Vault. We also have to ensure that
each sets labels are unique.

We also make a few quality of life updates by utilizing globals for
variants that don't often change and update base versions for various
scenarios.

* Add `seal_pkcs11` module for creating a `pkcs11` seal key using
  `softhsm2` as our backing implementation.
* Require the latest enos provider to gain access to the `enos_user`
  resource to ensure correct ownership and permissions of the
  `softhsm2` data directory and files.
* Add `pkcs11` seal to all scenarios that support configuring a seal
  type.
* Extract system package installation out of the `vault_cluster` module
  and into its own `install_package` module that we can reuse.
* Fix a bug when using the local builder variant that mangled the path.
  This likely slipped in during the migration to auto-version bumping.
* Fix an issue where restarting Vault nodes with a socket seal would
  fail because a seal socket sync wasn't available on all nodes. Now we
  start the socket listener on all nodes to ensure any node can become
  primary and "audit" to the socket listner.
* Remove unused attributes from some verify modules.
* Go back to using cheaper AWS regions.
* Use globals for variants.
* Update initial vault version for `upgrade` and `autopilot` scenarios.
* Update the consul versions for all scenarios that support a consul
  storage backend.

Signed-off-by: Ryan Cragun <me@ryan.ec>
This commit is contained in:
Ryan Cragun
2023-12-08 14:00:45 -07:00
committed by GitHub
parent 309294a25c
commit a087f7b267
43 changed files with 1555 additions and 500 deletions

View File

@@ -2,6 +2,10 @@
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
globals { globals {
archs = ["amd64", "arm64"]
artifact_sources = ["local", "crt", "artifactory"]
artifact_types = ["bundle", "package"]
backends = ["consul", "raft"]
backend_tag_key = "VaultStorage" backend_tag_key = "VaultStorage"
build_tags = { build_tags = {
"ce" = ["ui"] "ce" = ["ui"]
@@ -10,25 +14,32 @@ globals {
"ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"]
"ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"]
} }
consul_versions = ["1.14.11", "1.15.7", "1.16.3", "1.17.0"]
distros = ["ubuntu", "rhel"]
distro_version = { distro_version = {
"rhel" = var.rhel_distro_version "rhel" = var.rhel_distro_version
"ubuntu" = var.ubuntu_distro_version "ubuntu" = var.ubuntu_distro_version
} }
editions = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
packages = ["jq"] packages = ["jq"]
distro_packages = { distro_packages = {
ubuntu = ["netcat"] ubuntu = ["netcat"]
rhel = ["nc"] rhel = ["nc"]
} }
sample_attributes = { sample_attributes = {
# NOTE(9/28/23): Temporarily use us-east-2 due to another networking in us-east-1 aws_region = ["us-east-1", "us-west-2"]
# aws_region = ["us-east-1", "us-west-2"]
aws_region = ["us-east-2", "us-west-2"]
} }
seals = ["awskms", "pkcs11", "shamir"]
tags = merge({ tags = merge({
"Project Name" : var.project_name "Project Name" : var.project_name
"Project" : "Enos", "Project" : "Enos",
"Environment" : "ci" "Environment" : "ci"
}, var.tags) }, var.tags)
// NOTE: when backporting, make sure that our initial versions are less than that
// release branch's version. Also beware if adding versions below 1.11.x. Some scenarios
// that use this global might not work as expected with earlier versions. Below 1.8.x is
// not supported in any way.
upgrade_initial_versions = ["1.11.12", "1.12.11", "1.13.11", "1.14.7", "1.15.3"]
vault_install_dir_packages = { vault_install_dir_packages = {
rhel = "/bin" rhel = "/bin"
ubuntu = "/usr/bin" ubuntu = "/usr/bin"

View File

@@ -49,6 +49,10 @@ module "generate_secondary_token" {
vault_install_dir = var.vault_install_dir vault_install_dir = var.vault_install_dir
} }
module "install_packages" {
source = "./modules/install_packages"
}
module "read_license" { module "read_license" {
source = "./modules/read_license" source = "./modules/read_license"
} }
@@ -57,15 +61,24 @@ module "replication_data" {
source = "./modules/replication_data" source = "./modules/replication_data"
} }
module "seal_key_awskms" { module "seal_awskms" {
source = "./modules/seal_key_awskms" source = "./modules/seal_awskms"
cluster_ssh_keypair = var.aws_ssh_keypair_name
common_tags = var.tags common_tags = var.tags
} }
module "seal_key_shamir" { module "seal_shamir" {
source = "./modules/seal_key_shamir" source = "./modules/seal_shamir"
cluster_ssh_keypair = var.aws_ssh_keypair_name
common_tags = var.tags
}
module "seal_pkcs11" {
source = "./modules/seal_pkcs11"
cluster_ssh_keypair = var.aws_ssh_keypair_name
common_tags = var.tags common_tags = var.tags
} }
@@ -270,19 +283,16 @@ module "vault_wait_for_leader" {
source = "./modules/vault_wait_for_leader" source = "./modules/vault_wait_for_leader"
vault_install_dir = var.vault_install_dir vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
} }
module "vault_wait_for_seal_rewrap" { module "vault_wait_for_seal_rewrap" {
source = "./modules/vault_wait_for_seal_rewrap" source = "./modules/vault_wait_for_seal_rewrap"
vault_install_dir = var.vault_install_dir vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
} }
module "verify_seal_type" { module "verify_seal_type" {
source = "./modules/verify_seal_type" source = "./modules/verify_seal_type"
vault_install_dir = var.vault_install_dir vault_install_dir = var.vault_install_dir
vault_instance_count = var.vault_instance_count
} }

View File

@@ -3,14 +3,14 @@
scenario "agent" { scenario "agent" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = global.archs
artifact_source = ["local", "crt", "artifactory"] artifact_source = global.artifact_sources
artifact_type = ["bundle", "package"] artifact_type = global.artifact_types
backend = ["consul", "raft"] backend = global.backends
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] consul_version = global.consul_versions
distro = ["ubuntu", "rhel"] distro = global.distros
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = global.editions
seal = ["awskms", "shamir"] seal = global.seals
seal_ha_beta = ["true", "false"] seal_ha_beta = ["true", "false"]
# Our local builder always creates bundles # Our local builder always creates bundles
@@ -24,6 +24,12 @@ scenario "agent" {
arch = ["arm64"] arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
} }
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
exclude {
seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -82,15 +88,6 @@ scenario "agent" {
} }
} }
step "create_seal_key" {
module = "seal_key_${matrix.seal}"
variables {
cluster_id = step.create_vpc.cluster_id
common_tags = global.tags
}
}
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
@@ -111,6 +108,20 @@ scenario "agent" {
} }
} }
step "create_seal_key" {
module = "seal_${matrix.seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_id = step.create_vpc.id
common_tags = global.tags
}
}
step "create_vault_cluster_targets" { step "create_vault_cluster_targets" {
module = module.target_ec2_instances module = module.target_ec2_instances
depends_on = [step.create_vpc] depends_on = [step.create_vpc]
@@ -195,8 +206,8 @@ scenario "agent" {
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
seal_attributes = step.create_seal_key.attributes
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_seal_key.resource_name
seal_type = matrix.seal seal_type = matrix.seal
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
@@ -440,9 +451,9 @@ scenario "agent" {
value = step.create_vault_cluster.recovery_keys_hex value = step.create_vault_cluster.recovery_keys_hex
} }
output "seal_key_name" { output "seal_attributes" {
description = "The name of the cluster seal key" description = "The Vault cluster seal attributes"
value = step.create_seal_key.resource_name value = step.create_seal_key.attributes
} }
output "unseal_keys_b64" { output "unseal_keys_b64" {

View File

@@ -3,17 +3,21 @@
scenario "autopilot" { scenario "autopilot" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = global.archs
artifact_source = ["local", "crt", "artifactory"] artifact_source = global.artifact_sources
artifact_type = ["bundle", "package"] artifact_type = global.artifact_types
distro = ["ubuntu", "rhel"] consul_version = global.consul_versions
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] distro = global.distros
// NOTE: when backporting, make sure that our initial versions are less than that edition = global.editions
// release branch's version. initial_version = global.upgrade_initial_versions
initial_version = ["1.11.12", "1.12.11", "1.13.6", "1.14.2"] seal = global.seals
seal = ["awskms", "shamir"]
seal_ha_beta = ["true", "false"] seal_ha_beta = ["true", "false"]
# Autopilot wasn't available before 1.11.x
exclude {
initial_version = ["1.8.12", "1.9.10", "1.10.11"]
}
# Our local builder always creates bundles # Our local builder always creates bundles
exclude { exclude {
artifact_source = ["local"] artifact_source = ["local"]
@@ -25,6 +29,12 @@ scenario "autopilot" {
arch = ["arm64"] arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
} }
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
exclude {
seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -78,15 +88,6 @@ scenario "autopilot" {
} }
} }
step "create_seal_key" {
module = "seal_key_${matrix.seal}"
variables {
cluster_id = step.create_vpc.cluster_id
common_tags = global.tags
}
}
step "read_license" { step "read_license" {
module = module.read_license module = module.read_license
@@ -95,6 +96,20 @@ scenario "autopilot" {
} }
} }
step "create_seal_key" {
module = "seal_${matrix.seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_id = step.create_vpc.id
common_tags = global.tags
}
}
step "create_vault_cluster_targets" { step "create_vault_cluster_targets" {
module = module.target_ec2_instances module = module.target_ec2_instances
depends_on = [step.create_vpc] depends_on = [step.create_vpc]
@@ -112,6 +127,23 @@ scenario "autopilot" {
} }
} }
step "create_vault_cluster_upgrade_targets" {
module = module.target_ec2_instances
depends_on = [step.create_vpc]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
common_tags = global.tags
cluster_name = step.create_vault_cluster_targets.cluster_name
seal_key_names = step.create_seal_key.resource_names
vpc_id = step.create_vpc.id
}
}
step "create_vault_cluster" { step "create_vault_cluster" {
module = module.vault_cluster module = module.vault_cluster
depends_on = [ depends_on = [
@@ -133,8 +165,8 @@ scenario "autopilot" {
edition = matrix.edition edition = matrix.edition
version = matrix.initial_version version = matrix.initial_version
} }
seal_attributes = step.create_seal_key.attributes
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_seal_key.resource_name
seal_type = matrix.seal seal_type = matrix.seal
storage_backend = "raft" storage_backend = "raft"
storage_backend_addl_config = { storage_backend_addl_config = {
@@ -192,23 +224,6 @@ scenario "autopilot" {
} }
} }
step "create_vault_cluster_upgrade_targets" {
module = module.target_ec2_instances
depends_on = [step.create_vpc]
providers = {
enos = local.enos_provider[matrix.distro]
}
variables {
ami_id = step.ec2_info.ami_ids[matrix.arch][matrix.distro][global.distro_version[matrix.distro]]
common_tags = global.tags
cluster_name = step.create_vault_cluster_targets.cluster_name
seal_key_names = step.create_seal_key.resource_names
vpc_id = step.create_vpc.id
}
}
step "upgrade_vault_cluster_with_autopilot" { step "upgrade_vault_cluster_with_autopilot" {
module = module.vault_cluster module = module.vault_cluster
depends_on = [ depends_on = [
@@ -236,7 +251,7 @@ scenario "autopilot" {
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
root_token = step.create_vault_cluster.root_token root_token = step.create_vault_cluster.root_token
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_seal_key.resource_name seal_attributes = step.create_seal_key.attributes
seal_type = matrix.seal seal_type = matrix.seal
shamir_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null shamir_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null
storage_backend = "raft" storage_backend = "raft"
@@ -555,9 +570,9 @@ scenario "autopilot" {
value = step.create_vault_cluster.recovery_keys_hex value = step.create_vault_cluster.recovery_keys_hex
} }
output "seal_key_name" { output "seal_attributes" {
description = "The Vault cluster seal key name" description = "The Vault cluster seal attributes"
value = step.create_seal_key.resource_name value = step.create_seal_key.attributes
} }
output "unseal_keys_b64" { output "unseal_keys_b64" {

View File

@@ -3,14 +3,14 @@
scenario "proxy" { scenario "proxy" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = global.archs
artifact_source = ["local", "crt", "artifactory"] artifact_source = global.artifact_sources
artifact_type = ["bundle", "package"] artifact_type = global.artifact_types
backend = ["consul", "raft"] backend = global.backends
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] consul_version = global.consul_versions
distro = ["ubuntu", "rhel"] distro = global.distros
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = global.editions
seal = ["awskms", "shamir"] seal = global.seals
seal_ha_beta = ["true", "false"] seal_ha_beta = ["true", "false"]
# Our local builder always creates bundles # Our local builder always creates bundles
@@ -24,6 +24,12 @@ scenario "proxy" {
arch = ["arm64"] arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
} }
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
exclude {
seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -82,15 +88,6 @@ scenario "proxy" {
} }
} }
step "create_seal_key" {
module = "seal_key_${matrix.seal}"
variables {
cluster_id = step.create_vpc.cluster_id
common_tags = global.tags
}
}
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
@@ -111,6 +108,20 @@ scenario "proxy" {
} }
} }
step "create_seal_key" {
module = "seal_${matrix.seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_id = step.create_vpc.id
common_tags = global.tags
}
}
step "create_vault_cluster_targets" { step "create_vault_cluster_targets" {
module = module.target_ec2_instances module = module.target_ec2_instances
depends_on = [step.create_vpc] depends_on = [step.create_vpc]
@@ -196,7 +207,7 @@ scenario "proxy" {
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_seal_key.resource_name seal_attributes = step.create_seal_key.attributes
seal_type = matrix.seal seal_type = matrix.seal
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
@@ -412,9 +423,9 @@ scenario "proxy" {
value = step.create_vault_cluster.recovery_keys_hex value = step.create_vault_cluster.recovery_keys_hex
} }
output "seal_key_name" { output "seal_attributes" {
description = "The Vault cluster seal key name" description = "The Vault cluster seal attributes"
value = step.create_seal_key.resource_name value = step.create_seal_key.attributes
} }
output "unseal_keys_b64" { output "unseal_keys_b64" {

View File

@@ -6,17 +6,17 @@
// nodes on primary Vault cluster // nodes on primary Vault cluster
scenario "replication" { scenario "replication" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = global.archs
artifact_source = ["local", "crt", "artifactory"] artifact_source = global.artifact_sources
artifact_type = ["bundle", "package"] artifact_type = global.artifact_types
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] consul_version = global.consul_versions
distro = ["ubuntu", "rhel"] distro = global.distros
edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = global.editions
primary_backend = ["raft", "consul"] primary_backend = global.backends
primary_seal = ["awskms", "shamir"] primary_seal = global.seals
seal_ha_beta = ["true", "false"] seal_ha_beta = ["true", "false"]
secondary_backend = ["raft", "consul"] secondary_backend = global.backends
secondary_seal = ["awskms", "shamir"] secondary_seal = global.seals
# Our local builder always creates bundles # Our local builder always creates bundles
exclude { exclude {
@@ -29,6 +29,17 @@ scenario "replication" {
arch = ["arm64"] arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
} }
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
exclude {
primary_seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
exclude {
secondary_seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -87,26 +98,6 @@ scenario "replication" {
} }
} }
step "create_primary_seal_key" {
module = "seal_key_${matrix.primary_seal}"
variables {
cluster_id = step.create_vpc.cluster_id
cluster_meta = "primary"
common_tags = global.tags
}
}
step "create_secondary_seal_key" {
module = "seal_key_${matrix.secondary_seal}"
variables {
cluster_id = step.create_vpc.cluster_id
cluster_meta = "secondary"
common_tags = global.tags
}
}
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
@@ -126,6 +117,37 @@ scenario "replication" {
} }
} }
step "create_primary_seal_key" {
module = "seal_${matrix.primary_seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_id = step.create_vpc.id
cluster_meta = "primary"
common_tags = global.tags
}
}
step "create_secondary_seal_key" {
module = "seal_${matrix.secondary_seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_id = step.create_vpc.id
cluster_meta = "secondary"
common_tags = global.tags
other_resources = step.create_primary_seal_key.resource_names
}
}
# Create all of our instances for both primary and secondary clusters # Create all of our instances for both primary and secondary clusters
step "create_primary_cluster_targets" { step "create_primary_cluster_targets" {
module = module.target_ec2_instances module = module.target_ec2_instances
@@ -270,8 +292,8 @@ scenario "replication" {
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
seal_attributes = step.create_primary_seal_key.attributes
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_primary_seal_key.resource_name
seal_type = matrix.primary_seal seal_type = matrix.primary_seal
storage_backend = matrix.primary_backend storage_backend = matrix.primary_backend
target_hosts = step.create_primary_cluster_targets.hosts target_hosts = step.create_primary_cluster_targets.hosts
@@ -328,8 +350,8 @@ scenario "replication" {
local_artifact_path = local.artifact_path local_artifact_path = local.artifact_path
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
seal_attributes = step.create_secondary_seal_key.attributes
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_secondary_seal_key.resource_name
seal_type = matrix.secondary_seal seal_type = matrix.secondary_seal
storage_backend = matrix.secondary_backend storage_backend = matrix.secondary_backend
target_hosts = step.create_secondary_cluster_targets.hosts target_hosts = step.create_secondary_cluster_targets.hosts
@@ -625,7 +647,7 @@ scenario "replication" {
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
root_token = step.create_primary_cluster.root_token root_token = step.create_primary_cluster.root_token
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_primary_seal_key.resource_name seal_attributes = step.create_primary_seal_key.attributes
seal_type = matrix.primary_seal seal_type = matrix.primary_seal
shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null
storage_backend = matrix.primary_backend storage_backend = matrix.primary_backend

View File

@@ -3,15 +3,16 @@
scenario "seal_ha" { scenario "seal_ha" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = global.archs
artifact_source = ["local", "crt", "artifactory"] artifact_source = global.artifact_sources
artifact_type = ["bundle", "package"] artifact_type = global.artifact_types
backend = ["consul", "raft"] backend = global.backends
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] consul_version = global.consul_versions
distro = ["ubuntu", "rhel"] distro = global.distros
edition = ["ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = global.editions
primary_seal = ["awskms"] // Seal HA is only supported with auto-unseal devices.
secondary_seal = ["awskms"] primary_seal = ["awskms", "pkcs11"]
secondary_seal = ["awskms", "pkcs11"]
# Our local builder always creates bundles # Our local builder always creates bundles
exclude { exclude {
@@ -24,6 +25,17 @@ scenario "seal_ha" {
arch = ["arm64"] arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
} }
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
exclude {
primary_seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
exclude {
secondary_seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -83,20 +95,30 @@ scenario "seal_ha" {
} }
step "create_primary_seal_key" { step "create_primary_seal_key" {
module = "seal_key_${matrix.primary_seal}" module = "seal_${matrix.primary_seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables { variables {
cluster_id = step.create_vpc.cluster_id cluster_id = step.create_vpc.id
cluster_meta = "primary" cluster_meta = "primary"
common_tags = global.tags common_tags = global.tags
} }
} }
step "create_secondary_seal_key" { step "create_secondary_seal_key" {
module = "seal_key_${matrix.secondary_seal}" module = "seal_${matrix.secondary_seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables { variables {
cluster_id = step.create_vpc.cluster_id cluster_id = step.create_vpc.id
cluster_meta = "secondary" cluster_meta = "secondary"
common_tags = global.tags common_tags = global.tags
other_resources = step.create_primary_seal_key.resource_names other_resources = step.create_primary_seal_key.resource_names
@@ -150,9 +172,9 @@ scenario "seal_ha" {
variables { variables {
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
seal_key_names = step.create_secondary_seal_key.resource_names
cluster_tag_key = global.backend_tag_key cluster_tag_key = global.backend_tag_key
common_tags = global.tags common_tags = global.tags
seal_key_names = step.create_secondary_seal_key.resource_names
vpc_id = step.create_vpc.id vpc_id = step.create_vpc.id
} }
} }
@@ -208,8 +230,8 @@ scenario "seal_ha" {
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
// Only configure our primary seal during our initial cluster setup // Only configure our primary seal during our initial cluster setup
seal_attributes = step.create_primary_seal_key.attributes
seal_type = matrix.primary_seal seal_type = matrix.primary_seal
seal_key_name = step.create_primary_seal_key.resource_name
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
} }
@@ -333,10 +355,10 @@ scenario "seal_ha" {
install_dir = local.vault_install_dir install_dir = local.vault_install_dir
license = matrix.edition != "ce" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
manage_service = local.manage_service manage_service = local.manage_service
seal_attributes = step.create_primary_seal_key.attributes
seal_attributes_secondary = step.create_secondary_seal_key.attributes
seal_type = matrix.primary_seal seal_type = matrix.primary_seal
seal_key_name = step.create_primary_seal_key.resource_name
seal_type_secondary = matrix.secondary_seal seal_type_secondary = matrix.secondary_seal
seal_key_name_secondary = step.create_secondary_seal_key.resource_name
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
} }
@@ -539,8 +561,8 @@ scenario "seal_ha" {
license = matrix.edition != "ce" ? step.read_vault_license.license : null license = matrix.edition != "ce" ? step.read_vault_license.license : null
manage_service = local.manage_service manage_service = local.manage_service
seal_alias = "secondary" seal_alias = "secondary"
seal_attributes = step.create_secondary_seal_key.attributes
seal_type = matrix.secondary_seal seal_type = matrix.secondary_seal
seal_key_name = step.create_secondary_seal_key.resource_name
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
} }
@@ -661,9 +683,19 @@ scenario "seal_ha" {
value = step.create_vault_cluster.target_hosts value = step.create_vault_cluster.target_hosts
} }
output "primary_seal_key_name" { output "initial_seal_rewrap" {
description = "The Vault cluster primary seal key name" description = "The initial seal rewrap status"
value = step.create_primary_seal_key.resource_name value = step.wait_for_initial_seal_rewrap.stdout
}
output "post_migration_seal_rewrap" {
description = "The seal rewrap status after migrating the primary seal"
value = step.wait_for_seal_rewrap_after_migration.stdout
}
output "primary_seal_attributes" {
description = "The Vault cluster primary seal attributes"
value = step.create_primary_seal_key.attributes
} }
output "private_ips" { output "private_ips" {
@@ -696,9 +728,9 @@ scenario "seal_ha" {
value = step.create_vault_cluster.recovery_keys_hex value = step.create_vault_cluster.recovery_keys_hex
} }
output "secondary_seal_key_name" { output "secondary_seal_attributes" {
description = "The Vault cluster secondary seal key name" description = "The Vault cluster secondary seal attributes"
value = step.create_secondary_seal_key.resource_name value = step.create_secondary_seal_key.attributes
} }
output "unseal_keys_b64" { output "unseal_keys_b64" {

View File

@@ -3,14 +3,14 @@
scenario "smoke" { scenario "smoke" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = global.archs
artifact_source = ["local", "crt", "artifactory"] artifact_source = global.artifact_sources
artifact_type = ["bundle", "package"] artifact_type = global.artifact_types
backend = ["consul", "raft"] backend = global.backends
consul_version = ["1.12.9", "1.13.9", "1.14.9", "1.15.5", "1.16.1"] consul_version = global.consul_versions
distro = ["ubuntu", "rhel"] distro = global.distros
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = global.editions
seal = ["awskms", "shamir"] seal = global.seals
seal_ha_beta = ["true", "false"] seal_ha_beta = ["true", "false"]
# Our local builder always creates bundles # Our local builder always creates bundles
@@ -24,6 +24,12 @@ scenario "smoke" {
arch = ["arm64"] arch = ["arm64"]
edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm", "ent.hsm.fips1402"]
} }
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
exclude {
seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -82,15 +88,6 @@ scenario "smoke" {
} }
} }
step "create_seal_key" {
module = "seal_key_${matrix.seal}"
variables {
cluster_id = step.create_vpc.cluster_id
common_tags = global.tags
}
}
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
@@ -111,6 +108,20 @@ scenario "smoke" {
} }
} }
step "create_seal_key" {
module = "seal_${matrix.seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_id = step.create_vpc.id
common_tags = global.tags
}
}
step "create_vault_cluster_targets" { step "create_vault_cluster_targets" {
module = module.target_ec2_instances module = module.target_ec2_instances
depends_on = [step.create_vpc] depends_on = [step.create_vpc]
@@ -172,7 +183,7 @@ scenario "smoke" {
depends_on = [ depends_on = [
step.create_backend_cluster, step.create_backend_cluster,
step.build_vault, step.build_vault,
step.create_vault_cluster_targets step.create_vault_cluster_targets,
] ]
providers = { providers = {
@@ -196,7 +207,7 @@ scenario "smoke" {
manage_service = local.manage_service manage_service = local.manage_service
packages = concat(global.packages, global.distro_packages[matrix.distro]) packages = concat(global.packages, global.distro_packages[matrix.distro])
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_seal_key.resource_name seal_attributes = step.create_seal_key.attributes
seal_type = matrix.seal seal_type = matrix.seal
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
@@ -403,9 +414,9 @@ scenario "smoke" {
value = step.create_vault_cluster.recovery_keys_hex value = step.create_vault_cluster.recovery_keys_hex
} }
output "seal_key_name" { output "seal_key_attributes" {
description = "The Vault cluster seal key name" description = "The Vault cluster seal attributes"
value = step.create_seal_key.name value = step.create_seal_key.attributes
} }
output "unseal_keys_b64" { output "unseal_keys_b64" {

View File

@@ -3,8 +3,8 @@
scenario "ui" { scenario "ui" {
matrix { matrix {
backend = global.backends
edition = ["ce", "ent"] edition = ["ce", "ent"]
backend = ["consul", "raft"]
seal_ha_beta = ["true", "false"] seal_ha_beta = ["true", "false"]
} }
@@ -26,7 +26,7 @@ scenario "ui" {
} }
bundle_path = abspath(var.vault_artifact_path) bundle_path = abspath(var.vault_artifact_path)
distro = "ubuntu" distro = "ubuntu"
consul_version = "1.16.1" consul_version = "1.17.0"
seal = "awskms" seal = "awskms"
tags = merge({ tags = merge({
"Project Name" : var.project_name "Project Name" : var.project_name
@@ -70,7 +70,7 @@ scenario "ui" {
} }
step "create_seal_key" { step "create_seal_key" {
module = "seal_key_${local.seal}" module = "seal_${local.seal}"
variables { variables {
cluster_id = step.create_vpc.cluster_id cluster_id = step.create_vpc.cluster_id
@@ -110,7 +110,7 @@ scenario "ui" {
ami_id = step.ec2_info.ami_ids[local.arch][local.distro][var.ubuntu_distro_version] ami_id = step.ec2_info.ami_ids[local.arch][local.distro][var.ubuntu_distro_version]
cluster_tag_key = local.vault_tag_key cluster_tag_key = local.vault_tag_key
common_tags = local.tags common_tags = local.tags
seal_key_names = step.create_seal_key.resource_names seal_names = step.create_seal_key.resource_names
vpc_id = step.create_vpc.id vpc_id = step.create_vpc.id
} }
} }
@@ -127,7 +127,7 @@ scenario "ui" {
ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"] ami_id = step.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
cluster_tag_key = local.backend_tag_key cluster_tag_key = local.backend_tag_key
common_tags = local.tags common_tags = local.tags
seal_key_names = step.create_seal_key.resource_names seal_names = step.create_seal_key.resource_names
vpc_id = step.create_vpc.id vpc_id = step.create_vpc.id
} }
} }
@@ -181,7 +181,7 @@ scenario "ui" {
local_artifact_path = local.bundle_path local_artifact_path = local.bundle_path
packages = global.distro_packages["ubuntu"] packages = global.distro_packages["ubuntu"]
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_seal_key.resource_name seal_name = step.create_seal_key.resource_name
seal_type = local.seal seal_type = local.seal
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
@@ -263,7 +263,7 @@ scenario "ui" {
value = step.create_vault_cluster.root_token value = step.create_vault_cluster.root_token
} }
output "seal_key_name" { output "seal_name" {
description = "The Vault cluster seal key name" description = "The Vault cluster seal key name"
value = step.create_seal_key.resource_name value = step.create_seal_key.resource_name
} }

View File

@@ -3,19 +3,21 @@
scenario "upgrade" { scenario "upgrade" {
matrix { matrix {
arch = ["amd64", "arm64"] arch = global.archs
artifact_source = ["local", "crt", "artifactory"] artifact_source = global.artifact_sources
artifact_type = ["bundle", "package"] artifact_type = global.artifact_types
backend = ["consul", "raft"] backend = global.backends
consul_version = ["1.14.9", "1.15.5", "1.16.1"] consul_version = global.consul_versions
distro = ["ubuntu", "rhel"] distro = global.distros
edition = ["ce", "ent", "ent.fips1402", "ent.hsm", "ent.hsm.fips1402"] edition = global.editions
// NOTE: when backporting the initial version make sure we don't include initial versions that // NOTE: when backporting the initial version make sure we don't include initial versions that
// are a higher minor version that our release candidate. Also, prior to 1.11.x the // are a higher minor version that our release candidate. Also, prior to 1.11.x the
// /v1/sys/seal-status API has known issues that could cause this scenario to fail when using // /v1/sys/seal-status API has known issues that could cause this scenario to fail when using
// those earlier versions. // those earlier versions, therefore support from 1.8.x to 1.10.x is unreliable. Prior to 1.8.x
initial_version = ["1.11.12", "1.12.11", "1.13.6", "1.14.2"] // is not supported due to changes with vault's signaling of systemd and the enos-provider
seal = ["awskms", "shamir"] // no longer supporting setting the license via the license API.
initial_version = global.upgrade_initial_versions
seal = global.seals
seal_ha_beta = ["true", "false"] seal_ha_beta = ["true", "false"]
# Our local builder always creates bundles # Our local builder always creates bundles
@@ -35,6 +37,12 @@ scenario "upgrade" {
edition = ["ent.fips1402", "ent.hsm.fips1402"] edition = ["ent.fips1402", "ent.hsm.fips1402"]
initial_version = ["1.8.12", "1.9.10"] initial_version = ["1.8.12", "1.9.10"]
} }
# PKCS#11 can only be used on ent.hsm and ent.hsm.fips1402.
exclude {
seal = ["pkcs11"]
edition = ["ce", "ent", "ent.fips1402"]
}
} }
terraform_cli = terraform_cli.default terraform_cli = terraform_cli.default
@@ -94,15 +102,6 @@ scenario "upgrade" {
} }
} }
step "create_seal_key" {
module = "seal_key_${matrix.seal}"
variables {
cluster_id = step.create_vpc.cluster_id
common_tags = global.tags
}
}
// This step reads the contents of the backend license if we're using a Consul backend and // This step reads the contents of the backend license if we're using a Consul backend and
// the edition is "ent". // the edition is "ent".
step "read_backend_license" { step "read_backend_license" {
@@ -123,6 +122,20 @@ scenario "upgrade" {
} }
} }
step "create_seal_key" {
module = "seal_${matrix.seal}"
depends_on = [step.create_vpc]
providers = {
enos = provider.enos.ubuntu
}
variables {
cluster_id = step.create_vpc.id
common_tags = global.tags
}
}
step "create_vault_cluster_targets" { step "create_vault_cluster_targets" {
module = module.target_ec2_instances module = module.target_ec2_instances
depends_on = [step.create_vpc] depends_on = [step.create_vpc]
@@ -209,7 +222,7 @@ scenario "upgrade" {
version = matrix.initial_version version = matrix.initial_version
} }
seal_ha_beta = matrix.seal_ha_beta seal_ha_beta = matrix.seal_ha_beta
seal_key_name = step.create_seal_key.resource_name seal_attributes = step.create_seal_key.attributes
seal_type = matrix.seal seal_type = matrix.seal
storage_backend = matrix.backend storage_backend = matrix.backend
target_hosts = step.create_vault_cluster_targets.hosts target_hosts = step.create_vault_cluster_targets.hosts
@@ -464,9 +477,9 @@ scenario "upgrade" {
value = step.create_vault_cluster.recovery_keys_hex value = step.create_vault_cluster.recovery_keys_hex
} }
output "seal_key_name" { output "seal_name" {
description = "The Vault cluster seal key name" description = "The Vault cluster seal attributes"
value = step.create_seal_key.resource_name value = step.create_seal_key.attributes
} }
output "unseal_keys_b64" { output "unseal_keys_b64" {

View File

@@ -9,11 +9,6 @@ terraform {
} }
} }
variable "bundle_path" {
type = string
default = "/tmp/vault.zip"
}
variable "build_tags" { variable "build_tags" {
type = list(string) type = list(string)
description = "The build tags to pass to the Go compiler" description = "The build tags to pass to the Go compiler"
@@ -36,7 +31,10 @@ variable "artifactory_repo" { default = null }
variable "artifactory_username" { default = null } variable "artifactory_username" { default = null }
variable "artifactory_token" { default = null } variable "artifactory_token" { default = null }
variable "arch" { default = null } variable "arch" { default = null }
variable "artifact_path" { default = null } variable "artifact_path" {
type = string
default = "/tmp/vault.zip"
}
variable "artifact_type" { default = null } variable "artifact_type" { default = null }
variable "distro" { default = null } variable "distro" { default = null }
variable "edition" { default = null } variable "edition" { default = null }
@@ -53,7 +51,7 @@ resource "enos_local_exec" "build" {
environment = { environment = {
BASE_VERSION = module.local_metadata.version_base BASE_VERSION = module.local_metadata.version_base
BIN_PATH = "dist" BIN_PATH = "dist"
BUNDLE_PATH = var.bundle_path, BUNDLE_PATH = var.artifact_path,
GO_TAGS = join(" ", var.build_tags) GO_TAGS = join(" ", var.build_tags)
GOARCH = var.goarch GOARCH = var.goarch
GOOS = var.goos GOOS = var.goos

View File

@@ -0,0 +1,53 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "packages" {
type = list(string)
default = []
}
variable "hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The hosts to install packages on"
}
variable "timeout" {
type = number
description = "The max number of seconds to wait before timing out"
default = 120
}
variable "retry_interval" {
type = number
description = "How many seconds to wait between each retry"
default = 2
}
resource "enos_remote_exec" "install_packages" {
for_each = var.hosts
environment = {
PACKAGES = length(var.packages) >= 1 ? join(" ", var.packages) : "__skip"
RETRY_INTERVAL = var.retry_interval
TIMEOUT_SECONDS = var.timeout
}
scripts = [abspath("${path.module}/scripts/install-packages.sh")]
transport = {
ssh = {
host = each.value.public_ip
}
}
}

View File

@@ -0,0 +1,49 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set"
[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
[[ -z "$PACKAGES" ]] && fail "PACKAGES env variable has not been set"
install_packages() {
if [ "$PACKAGES" = "__skip" ]; then
return 0
fi
echo "Installing Dependencies: $PACKAGES"
if [ -f /etc/debian_version ]; then
# Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we
# see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case
# we'll just install our packages.
grep ec2 /etc/apt/sources.list || true
cd /tmp
sudo apt update
# shellcheck disable=2068
sudo apt install -y ${PACKAGES[@]}
else
cd /tmp
# shellcheck disable=2068
sudo yum -y install ${PACKAGES[@]}
fi
}
begin_time=$(date +%s)
end_time=$((begin_time + TIMEOUT_SECONDS))
while [ "$(date +%s)" -lt "$end_time" ]; do
if install_packages; then
exit 0
fi
sleep "$RETRY_INTERVAL"
done
fail "Timed out waiting for packages to install"

View File

@@ -1,6 +1,14 @@
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "cluster_id" { variable "cluster_id" {
type = string type = string
} }
@@ -10,6 +18,11 @@ variable "cluster_meta" {
default = null default = null
} }
variable "cluster_ssh_keypair" {
type = string
default = null
}
variable "common_tags" { variable "common_tags" {
type = map(string) type = map(string)
default = null default = null
@@ -35,22 +48,21 @@ resource "aws_kms_alias" "alias" {
target_key_id = aws_kms_key.key.key_id target_key_id = aws_kms_key.key.key_id
} }
output "alias" { output "attributes" {
description = "The key alias name" description = "Seal device specific attributes"
value = aws_kms_alias.alias.name value = {
} kms_key_id = aws_kms_key.key.arn
}
output "id" {
description = "The key ID"
value = aws_kms_key.key.key_id
} }
// We output our resource name and a collection of those passed in to create a full list of key
// resources that might be required for instance roles that are associated with some unseal types.
output "resource_name" { output "resource_name" {
description = "The ARN" description = "The awskms key name"
value = aws_kms_key.key.arn value = aws_kms_key.key.arn
} }
output "resource_names" { output "resource_names" {
description = "The list of names" description = "The list of awskms key names to associate with a role"
value = compact(concat([aws_kms_key.key.arn], var.other_resources)) value = compact(concat([aws_kms_key.key.arn], var.other_resources))
} }

View File

@@ -1,17 +0,0 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# A shim unseal key module for shamir seal types
variable "cluster_id" { default = null }
variable "cluster_meta" { default = null }
variable "common_tags" { default = null }
variable "names" {
type = list(string)
default = []
}
output "alias" { value = null }
output "id" { value = null }
output "resource_name" { value = null }
output "resource_names" { value = var.names }

View File

@@ -0,0 +1,126 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
/*
A seal module that emulates using a real PKCS#11 HSM. For this we'll use softhsm2. You'll
need softhsm2 and opensc installed to get access to the userspace tools and dynamic library that
Vault Enterprise will use. Here we'll take in the vault hosts and use the one of the nodes
to generate the hsm slot and the tokens, and then we'll copy the softhsm tokens to the other nodes.
Using softhsm2 and opensc is a bit complicated but here's a cheat sheet for getting started.
$ brew install softhsm opensc
or
$ sudo apt install softhsm2 opensc
Create a softhsm slot. You can use anything you want for the pin and the supervisor pin. This will
output the slot identifier, which you'll use as the `slot` parameter in the seal config.
$ softhsm2-util --init-token --free --so-pin=1234 --pin=1234 --label="seal" | grep -oE '[0-9]+$'
You can see the slots:
$ softhsm2-util --show-slots
Or use opensc's pkcs11-tool. Make sure to use your pin for the -p flag. The module that we refer
to is the location of the shared library that we need to provide to Vault Enterprise. Depending on
your platform or installation method this could be different.
$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 -IL
Find yours
$ find /usr/local -type f -name libsofthsm2.so -print -quit
Your tokens will be installed in the default directories.tokendir. See man softhsm2.conf(5) for
more details. On macOS from brew this is /usr/local/var/lib/softhsm/tokens/
Vault Enterprise supports creating the HSM keys, but for softhsm2 that would require us to
initialize with one node before copying the contents. So instead we'll create an HSM key and HMAC
key that we'll copy everywhere.
$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_hmac --id 1 --key-type GENERIC:32 --private --sensitive
$ pkcs11-tool --module /usr/local/Cellar/softhsm/2.6.1/lib/softhsm/libsofthsm2.so -a seal -p 1234 --token-label seal --keygen --usage-sign --label hsm_aes --id 2 --key-type AES:32 --private --sensitive --usage-wrap
Now you should be able to configure Vault Enterprise seal stanza.
*/
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "cluster_id" {
type = string
description = "The VPC ID of the cluster"
}
variable "cluster_meta" {
type = string
default = null
description = "Any metadata that needs to be passed in. If we're creating multiple softhsm tokens this value could be a prior KEYS_BASE64"
}
variable "cluster_ssh_keypair" {
type = string
description = "The ssh keypair of the vault cluster. We need this to used the inherited provider for our target"
}
variable "common_tags" {
type = map(string)
default = null
}
variable "other_resources" {
type = list(string)
default = []
}
resource "random_string" "id" {
length = 8
numeric = false
special = false
upper = false
}
module "ec2_info" {
source = "../ec2_info"
}
locals {
id = "${var.cluster_id}-${random_string.id.result}"
}
module "target" {
source = "../target_ec2_instances"
ami_id = module.ec2_info.ami_ids["arm64"]["ubuntu"]["22.04"]
cluster_tag_key = local.id
common_tags = var.common_tags
instance_count = 1
instance_types = {
amd64 = "t3a.small"
arm64 = "t4g.small"
}
// Make sure it's not too long as we use this for aws resources that size maximums that are easy
// to hit.
project_name = substr("vault-ci-softhsm-${local.id}", 0, 32)
ssh_keypair = var.cluster_ssh_keypair
vpc_id = var.cluster_id
}
module "create_vault_keys" {
source = "../softhsm_create_vault_keys"
cluster_id = var.cluster_id
hosts = module.target.hosts
}
// Our attributes contain all required keys for the seal stanza and our base64 encoded softhsm
// token and keys.
output "attributes" {
description = "Seal device specific attributes"
value = module.create_vault_keys.all_attributes
}
// Shim for chaining seals that require IAM roles
output "resource_name" { value = null }
output "resource_names" { value = var.other_resources }

View File

@@ -0,0 +1,27 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
# A shim seal module for shamir seals. For Shamir seals the enos_vault_init resource will take care
# of creating our seal.
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "cluster_id" { default = null }
variable "cluster_meta" { default = null }
variable "cluster_ssh_keypair" { default = null }
variable "common_tags" { default = null }
variable "image_id" { default = null }
variable "other_resources" {
type = list(string)
default = []
}
output "resource_name" { value = null }
output "resource_names" { value = var.other_resources }
output "attributes" { value = null }

View File

@@ -0,0 +1,131 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "cluster_id" {
type = string
}
variable "hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The hosts that will have access to the softhsm"
}
locals {
pin = resource.random_string.pin.result
aes_label = "vault_hsm_aes_${local.pin}"
hmac_label = "vault_hsm_hmac_${local.pin}"
target = tomap({ "1" = var.hosts[0] })
token = "${var.cluster_id}_${local.pin}"
}
resource "random_string" "pin" {
length = 5
lower = true
upper = false
numeric = true
special = false
}
module "install" {
source = "../softhsm_install"
hosts = local.target
include_tools = true # make sure opensc is also installed as we need it to create keys
}
module "initialize" {
source = "../softhsm_init"
depends_on = [module.install]
hosts = local.target
}
// Create our keys. Our stdout contains the requried the values for the pksc11 seal stanza
// as JSON. https://developer.hashicorp.com/vault/docs/configuration/seal/pkcs11#pkcs11-parameters
resource "enos_remote_exec" "create_keys" {
depends_on = [
module.install,
module.initialize,
]
environment = {
AES_LABEL = local.aes_label
HMAC_LABEL = local.hmac_label
PIN = resource.random_string.pin.result
TOKEN_DIR = module.initialize.token_dir
TOKEN_LABEL = local.token
SO_PIN = resource.random_string.pin.result
}
scripts = [abspath("${path.module}/scripts/create-keys.sh")]
transport = {
ssh = {
host = var.hosts[0].public_ip
}
}
}
// Get our softhsm token. Stdout is a base64 encoded gzipped tarball of the softhsm token dir. This
// allows us to pass around binary data inside of Terraform's type system.
resource "enos_remote_exec" "get_keys" {
depends_on = [enos_remote_exec.create_keys]
environment = {
TOKEN_DIR = module.initialize.token_dir
}
scripts = [abspath("${path.module}/scripts/get-keys.sh")]
transport = {
ssh = {
host = var.hosts[0].public_ip
}
}
}
locals {
seal_attributes = jsondecode(resource.enos_remote_exec.create_keys.stdout)
}
output "seal_attributes" {
description = "Seal device specific attributes. Contains all required keys for the seal stanza"
value = local.seal_attributes
}
output "token_base64" {
description = "The softhsm token and keys gzipped tarball in base64"
value = enos_remote_exec.get_keys.stdout
}
output "token_dir" {
description = "The softhsm directory where tokens and keys are stored"
value = module.initialize.token_dir
}
output "token_label" {
description = "The HSM slot token label"
value = local.token
}
output "all_attributes" {
description = "Seal device specific attributes"
value = merge(
local.seal_attributes,
{
token_base64 = enos_remote_exec.get_keys.stdout,
token_dir = module.initialize.token_dir
},
)
}

View File

@@ -0,0 +1,82 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$AES_LABEL" ]] && fail "AES_LABEL env variable has not been set"
[[ -z "$HMAC_LABEL" ]] && fail "HMAC_LABEL env variable has not been set"
[[ -z "$PIN" ]] && fail "PIN env variable has not been set"
[[ -z "$SO_PIN" ]] && fail "SO_PIN env variable has not been set"
[[ -z "$TOKEN_LABEL" ]] && fail "TOKEN_LABEL env variable has not been set"
[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set"
if ! type softhsm2-util &> /dev/null; then
fail "unable to locate softhsm2-util in PATH. Have you installed softhsm?"
fi
if ! type pkcs11-tool &> /dev/null; then
fail "unable to locate pkcs11-tool in PATH. Have you installed opensc?"
fi
# Create an HSM slot and return the slot number in decimal value.
create_slot() {
sudo softhsm2-util --init-token --free --so-pin="$SO_PIN" --pin="$PIN" --label="$TOKEN_LABEL" | grep -oE '[0-9]+$'
}
# Find the location of our softhsm shared object.
find_softhsm_so() {
sudo find /usr -type f -name libsofthsm2.so -print -quit
}
# Create key a key in the slot. Args: module, key label, id number, key type
keygen() {
sudo pkcs11-tool --keygen --usage-sign --private --sensitive --usage-wrap \
--module "$1" \
-p "$PIN" \
--token-label "$TOKEN_LABEL" \
--label "$2" \
--id "$3" \
--key-type "$4"
}
# Create our softhsm slot and keys
main() {
local slot
if ! slot=$(create_slot); then
fail "failed to create softhsm token slot"
fi
local so
if ! so=$(find_softhsm_so); then
fail "unable to locate libsofthsm2.so shared object"
fi
if ! keygen "$so" "$AES_LABEL" 1 'AES:32' 1>&2; then
fail "failed to create AES key"
fi
if ! keygen "$so" "$HMAC_LABEL" 2 'GENERIC:32' 1>&2; then
fail "failed to create HMAC key"
fi
# Return our seal configuration attributes as JSON
cat <<EOF
{
"lib": "${so}",
"slot": "${slot}",
"pin": "${PIN}",
"key_label": "${AES_LABEL}",
"hmac_key_label": "${HMAC_LABEL}",
"generate_key": "false"
}
EOF
exit 0
}
main

View File

@@ -0,0 +1,20 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set"
# Tar up our token. We have to do this as a superuser because softhsm is owned by root.
sudo tar -czf token.tgz -C "$TOKEN_DIR" .
me="$(whoami)"
sudo chown "$me:$me" token.tgz
# Write the value STDOUT as base64 so we can handle binary data as a string
base64 -i token.tgz

View File

@@ -0,0 +1,108 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
version = ">= 0.4.9"
}
}
}
variable "hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The hosts for whom we'll distribute the softhsm tokens and keys"
}
variable "token_base64" {
type = string
description = "The base64 encoded gzipped tarball of the softhsm token"
}
locals {
// The user/group name for softhsm
softhsm_groups = {
"rhel" = "ods"
"ubuntu" = "softhsm"
}
// Determine if we should skip distribution. If we haven't been passed in a base64 token tarball
// we should short circuit the rest of the module.
skip = var.token_base64 == null || var.token_base64 == "" ? true : false
}
module "install" {
// TODO: Should packages take a string instead of array so we can plan with unknown values that could change?
source = "../softhsm_install"
hosts = var.hosts
include_tools = false # we don't need opensc on machines that did not create the HSM.
}
module "initialize" {
source = "../softhsm_init"
depends_on = [module.install]
hosts = var.hosts
skip = local.skip
}
# In order for the vault service to access our keys we need to deal with ownership of files. Make
# sure we have a vault user on the machine if it doesn't already exist. Our distribution script
# below will handle adding vault to the "softhsm" group and setting ownership of the tokens.
resource "enos_user" "vault" {
for_each = var.hosts
name = "vault"
home_dir = "/etc/vault.d"
shell = "/bin/false"
transport = {
ssh = {
host = each.value.public_ip
}
}
}
// Get the host information so we can ensure that the correct user/group is used for softhsm.
resource "enos_host_info" "hosts" {
for_each = var.hosts
transport = {
ssh = {
host = each.value.public_ip
}
}
}
// Distribute our softhsm token and keys to the given hosts.
resource "enos_remote_exec" "distribute_token" {
for_each = var.hosts
depends_on = [
module.initialize,
enos_user.vault,
enos_host_info.hosts,
]
environment = {
TOKEN_BASE64 = var.token_base64
TOKEN_DIR = module.initialize.token_dir
SOFTHSM_GROUP = local.softhsm_groups[enos_host_info.hosts[each.key].distro]
}
scripts = [abspath("${path.module}/scripts/distribute-token.sh")]
transport = {
ssh = {
host = each.value.public_ip
}
}
}
output "lib" {
value = module.install.lib
}

View File

@@ -0,0 +1,31 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -ex
fail() {
echo "$1" 1>&2
exit 1
}
# If we're not given keys we'll short circuit. This should only happen if we're skipping distribution
# because we haven't created a token or keys.
if [ -z "$TOKEN_BASE64" ]; then
echo "TOKEN_BASE64 environment variable was unset. Assuming we don't need to distribute our token" 1>&2
exit 0
fi
[[ -z "$SOFTHSM_GROUP" ]] && fail "SOFTHSM_GROUP env variable has not been set"
[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set"
# Convert our base64 encoded gzipped tarball of the softhsm token back into a tarball.
base64 --decode - > token.tgz <<< "$TOKEN_BASE64"
# Expand it. We assume it was written with the correct directory metadata. Do this as a superuser
# because the token directory should be owned by root.
sudo tar -xvf token.tgz -C "$TOKEN_DIR"
# Make sure the vault user is in the softhsm group to get access to the tokens.
sudo usermod -aG "$SOFTHSM_GROUP" vault
sudo chown -R "vault:$SOFTHSM_GROUP" "$TOKEN_DIR"

View File

@@ -0,0 +1,81 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
version = ">= 0.4.9"
}
}
}
variable "hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The hosts for whom default softhsm configuration will be applied"
}
variable "skip" {
type = bool
default = false
description = "Whether or not to skip initializing softhsm"
}
locals {
// The location on disk to write the softhsm tokens to
token_dir = "/var/lib/softhsm/tokens"
// Where the default configuration is
config_paths = {
"rhel" = "/etc/softhsm2.conf"
"ubuntu" = "/etc/softhsm/softhsm2.conf"
}
host_key = element(keys(enos_host_info.hosts), 0)
config_path = local.config_paths[enos_host_info.hosts[local.host_key].distro]
}
resource "enos_host_info" "hosts" {
for_each = var.hosts
transport = {
ssh = {
host = each.value.public_ip
}
}
}
resource "enos_remote_exec" "init_softhsm" {
for_each = var.hosts
depends_on = [enos_host_info.hosts]
environment = {
CONFIG_PATH = local.config_paths[enos_host_info.hosts[each.key].distro]
TOKEN_DIR = local.token_dir
SKIP = var.skip ? "true" : "false"
}
scripts = [abspath("${path.module}/scripts/init-softhsm.sh")]
transport = {
ssh = {
host = each.value.public_ip
}
}
}
output "config_path" {
// Technically this is actually just the first config path of our hosts.
value = local.config_path
}
output "token_dir" {
value = local.token_dir
}
output "skipped" {
value = var.skip
}

View File

@@ -0,0 +1,30 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$CONFIG_PATH" ]] && fail "CONFIG_PATH env variable has not been set"
[[ -z "$TOKEN_DIR" ]] && fail "TOKEN_DIR env variable has not been set"
[[ -z "$SKIP" ]] && fail "SKIP env variable has not been set"
if [ "$SKIP" == "true" ]; then
exit 0
fi
cat <<EOF | sudo tee "$CONFIG_PATH"
directories.tokendir = $TOKEN_DIR
objectstore.backend = file
log.level = DEBUG
slots.removable = false
slots.mechanisms = ALL
library.reset_on_fork = false
EOF
sudo mkdir -p "$TOKEN_DIR"
sudo chmod 0770 "$TOKEN_DIR"

View File

@@ -0,0 +1,78 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
terraform {
required_providers {
enos = {
source = "app.terraform.io/hashicorp-qti/enos"
}
}
}
variable "hosts" {
type = map(object({
private_ip = string
public_ip = string
}))
description = "The hosts that will have access to the softhsm. We assume they're all the same platform and architecture"
}
variable "include_tools" {
type = bool
default = false
description = "Install opensc pkcs11-tools along with softhsm"
}
variable "retry_interval" {
type = string
default = "2"
description = "How long to wait between retries"
}
variable "timeout" {
type = string
default = "15"
description = "How many seconds to wait before timing out"
}
locals {
packages = var.include_tools ? ["softhsm", "opensc"] : ["softhsm"]
}
module "install_softhsm" {
source = "../install_packages"
hosts = var.hosts
packages = local.packages
}
resource "enos_remote_exec" "find_shared_object" {
for_each = var.hosts
depends_on = [module.install_softhsm]
environment = {
RETRY_INTERVAL = var.retry_interval
TIMEOUT_SECONDS = var.timeout
}
scripts = [abspath("${path.module}/scripts/find-shared-object.sh")]
transport = {
ssh = {
host = each.value.public_ip
}
}
}
locals {
object_paths = compact(distinct(values(enos_remote_exec.find_shared_object)[*].stdout))
}
output "lib" {
value = local.object_paths[0]
precondition {
condition = length(local.object_paths) == 1
error_message = "SoftHSM targets cannot have different libsofthsm2.so shared object paths. Are they all the same Linux distro?"
}
}

View File

@@ -0,0 +1,26 @@
#!/bin/bash
## Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -e
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$RETRY_INTERVAL" ]] && fail "RETRY_INTERVAL env variable has not been set"
[[ -z "$TIMEOUT_SECONDS" ]] && fail "TIMEOUT_SECONDS env variable has not been set"
begin_time=$(date +%s)
end_time=$((begin_time + TIMEOUT_SECONDS))
while [ "$(date +%s)" -lt "$end_time" ]; do
if so=$(sudo find /usr -type f -name libsofthsm2.so -print -quit); then
echo "$so"
exit 0
fi
sleep "$RETRY_INTERVAL"
done
fail "Timed out trying to locate libsofthsm2.so shared object"

View File

@@ -7,71 +7,89 @@ terraform {
# to the public registry # to the public registry
enos = { enos = {
source = "app.terraform.io/hashicorp-qti/enos" source = "app.terraform.io/hashicorp-qti/enos"
version = ">= 0.4.7" version = ">= 0.4.8"
} }
} }
} }
data "enos_environment" "localhost" {}
locals { locals {
bin_path = "${var.install_dir}/vault" bin_path = "${var.install_dir}/vault"
environment = local.seal_secondary == null ? var.environment : merge( environment = local.seal_secondary == null ? var.environment : merge(
var.environment, var.environment,
{ VAULT_ENABLE_SEAL_HA_BETA : tobool(var.seal_ha_beta) }, { VAULT_ENABLE_SEAL_HA_BETA : tobool(var.seal_ha_beta) },
) )
// In order to get Terraform to plan we have to use collections with keys // In order to get Terraform to plan we have to use collections with keys that are known at plan
// that are known at plan time. In order for our module to work our var.target_hosts // time. Here we're creating locals that keep track of index values that point to our target hosts.
// must be a map with known keys at plan time. Here we're creating locals
// that keep track of index values that point to our target hosts.
followers = toset(slice(local.instances, 1, length(local.instances))) followers = toset(slice(local.instances, 1, length(local.instances)))
instances = [for idx in range(length(var.target_hosts)) : tostring(idx)] instances = [for idx in range(length(var.target_hosts)) : tostring(idx)]
key_shares = {
"awskms" = null
"shamir" = 5
}
key_threshold = {
"awskms" = null
"shamir" = 3
}
leader = toset(slice(local.instances, 0, 1)) leader = toset(slice(local.instances, 0, 1))
recovery_shares = { // Handle cases where we might have to distribute HSM tokens for the pkcs11 seal before starting
"awskms" = 5 // vault.
"shamir" = null token_base64 = try(lookup(var.seal_attributes, "token_base64", ""), "")
} token_base64_secondary = try(lookup(var.seal_attributes_secondary, "token_base64", ""), "")
recovery_threshold = { // This module currently supports up to two defined seals. Most of our locals logic here is for
"awskms" = 3 // creating the correct seal configuration.
"shamir" = null seals = {
}
seals = local.seal_secondary.type == "none" ? { primary = local.seal_primary } : {
primary = local.seal_primary primary = local.seal_primary
secondary = local.seal_secondary secondary = local.seal_secondary
} }
seals_primary = { seals_primary = {
"awskms" = { awskms = {
type = "awskms" type = "awskms"
attributes = { attributes = merge(
{
name = var.seal_alias name = var.seal_alias
priority = var.seal_priority priority = var.seal_priority
kms_key_id = var.seal_key_name }, var.seal_attributes
)
} }
pkcs11 = {
type = "pkcs11"
attributes = merge(
{
name = var.seal_alias
priority = var.seal_priority
},
// Strip out attributes that aren't supposed to be in seal stanza like our base64 encoded
// softhsm blob and the token directory. We'll also inject the shared object library
// location that we detect on the target machines. This allows use to create the token and
// keys on a machines that have different shared object locations.
merge(
try({ for key, val in var.seal_attributes : key => val if key != "token_base64" && key != "token_dir" }, {}),
try({ lib = module.maybe_configure_hsm.lib }, {})
),
)
} }
"shamir" = { shamir = {
type = "shamir" type = "shamir"
attributes = null attributes = null
} }
} }
seal_primary = local.seals_primary[var.seal_type] seal_primary = local.seals_primary[var.seal_type]
seals_secondary = { seals_secondary = {
"awskms" = { awskms = {
type = "awskms" type = "awskms"
attributes = { attributes = merge(
{
name = var.seal_alias_secondary name = var.seal_alias_secondary
priority = var.seal_priority_secondary priority = var.seal_priority_secondary
kms_key_id = var.seal_key_name_secondary }, var.seal_attributes_secondary
)
} }
pkcs11 = {
type = "pkcs11"
attributes = merge(
{
name = var.seal_alias_secondary
priority = var.seal_priority_secondary
},
merge(
try({ for key, val in var.seal_attributes_secondary : key => val if key != "token_base64" && key != "token_dir" }, {}),
try({ lib = module.maybe_configure_hsm_secondary.lib }, {})
),
)
} }
"none" = { none = {
type = "none" type = "none"
attributes = null attributes = null
} }
@@ -91,8 +109,54 @@ locals {
] ]
} }
# You might be wondering why our start_vault module, which supports shamir, awskms, and pkcs11 seal
# types, contains sub-modules that are only used for HSM. Well, each of those seal devices has
# different requirements and as such we have some seal specific requirements before starting Vault.
#
# A Shamir seal key cannot exist until Vault has already started, so this modules responsibility for
# shamir seals is ensuring that the seal type is passed to the enos_vault_start resource. That's it.
#
# Auto-unseal with a KMS requires that we configure the enos_vault_start resource with the correct
# seal type and the attributes necessary to know which KMS key to use. Vault should automatically
# unseal if we've given it the correct configuration. As long as Vault is able to access the key
# in the KMS it should be able to start. That's normally done via roles associated to the target
# machines, which is outside the scope of this module.
#
# Auto-unseal with an HSM and PKCS#11 is more complicated because a shared object library, which is
# how we interface with the HSM, must be present on each node in order to start Vault. In the real
# world this means an actual HSM in the same rack or data center as every node in the Vault cluster,
# but in our case we're creating ephemeral infrastructure for these test scenarios and don't have a
# real HSM available. We could use CloudHSM or the like, but at the time of writing CloudHSM
# provisioning takes anywhere from 30 to 60 minutes and costs upwards of $2 dollars an hour. That's
# far too long and expensive for scenarios we'll run fairly frequently. Instead, we test using a
# software HSM. Using a software HSM solves the cost and speed problems but creates new set of
# problems. We need to ensure every node in the cluster has access to the same "HSM" and with
# softhsm that means the same software, configuration, tokens and keys. Our `seal_pkcs11` module
# takes care of creating the token and keys, but that's the end of the road for that module. It's
# our job to ensure that when we're starting Vault with a software HSM that we'll ensure the correct
# software, configuration and data are available on the nodes. That's where the following two
# modules come in. They handle installing the required software, configuring it, and distributing
# the key data that was passed in via seal attributes.
module "maybe_configure_hsm" {
source = "../softhsm_distribute_vault_keys"
hosts = var.target_hosts
token_base64 = local.token_base64
}
module "maybe_configure_hsm_secondary" {
source = "../softhsm_distribute_vault_keys"
depends_on = [module.maybe_configure_hsm]
hosts = var.target_hosts
token_base64 = local.token_base64_secondary
}
resource "enos_vault_start" "leader" { resource "enos_vault_start" "leader" {
for_each = local.leader for_each = local.leader
depends_on = [
module.maybe_configure_hsm_secondary,
]
bin_path = local.bin_path bin_path = local.bin_path
config_dir = var.config_dir config_dir = var.config_dir
@@ -167,3 +231,11 @@ resource "enos_vault_start" "followers" {
} }
} }
} }
output "token_base64" {
value = local.token_base64
}
output "token_base64_secondary" {
value = local.token_base64_secondary
}

View File

@@ -65,15 +65,13 @@ variable "seal_alias_secondary" {
default = "secondary" default = "secondary"
} }
variable "seal_key_name" { variable "seal_attributes" {
type = string description = "The primary auto-unseal attributes"
description = "The primary auto-unseal key name"
default = null default = null
} }
variable "seal_key_name_secondary" { variable "seal_attributes_secondary" {
type = string description = "The secondary auto-unseal attributes"
description = "The secondary auto-unseal key name"
default = null default = null
} }
@@ -95,8 +93,8 @@ variable "seal_type" {
default = "awskms" default = "awskms"
validation { validation {
condition = contains(["awskms", "shamir"], var.seal_type) condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type)
error_message = "The seal_type must be either awskms or shamir. No other unseal methods are supported." error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported."
} }
} }
@@ -106,8 +104,8 @@ variable "seal_type_secondary" {
default = "none" default = "none"
validation { validation {
condition = contains(["awskms", "none"], var.seal_type_secondary) condition = contains(["awskms", "pkcs11", "none"], var.seal_type_secondary)
error_message = "The secondary_seal_type must be 'awskms' or 'none'. No other secondary unseal methods are supported." error_message = "The secondary_seal_type must be 'awskms', 'pkcs11' or 'none'. No other secondary seal types are supported."
} }
} }

View File

@@ -50,7 +50,7 @@ variable "project_name" {
variable "seal_key_names" { variable "seal_key_names" {
type = list(string) type = list(string)
description = "The key management seal key names" description = "The key management seal key names"
default = null default = []
} }
variable "ssh_allow_ips" { variable "ssh_allow_ips" {

View File

@@ -16,6 +16,7 @@ data "enos_environment" "localhost" {}
locals { locals {
audit_device_file_path = "/var/log/vault/vault_audit.log" audit_device_file_path = "/var/log/vault/vault_audit.log"
audit_socket_port = "9090"
bin_path = "${var.install_dir}/vault" bin_path = "${var.install_dir}/vault"
consul_bin_path = "${var.consul_install_dir}/consul" consul_bin_path = "${var.consul_install_dir}/consul"
enable_audit_devices = var.enable_audit_devices && var.initialize_cluster enable_audit_devices = var.enable_audit_devices && var.initialize_cluster
@@ -28,19 +29,23 @@ locals {
key_shares = { key_shares = {
"awskms" = null "awskms" = null
"shamir" = 5 "shamir" = 5
"pkcs11" = null
} }
key_threshold = { key_threshold = {
"awskms" = null "awskms" = null
"shamir" = 3 "shamir" = 3
"pkcs11" = null
} }
leader = toset(slice(local.instances, 0, 1)) leader = toset(slice(local.instances, 0, 1))
recovery_shares = { recovery_shares = {
"awskms" = 5 "awskms" = 5
"shamir" = null "shamir" = null
"pkcs11" = 5
} }
recovery_threshold = { recovery_threshold = {
"awskms" = 3 "awskms" = 3
"shamir" = null "shamir" = null
"pkcs11" = 3
} }
vault_service_user = "vault" vault_service_user = "vault"
} }
@@ -76,26 +81,14 @@ resource "enos_bundle_install" "vault" {
} }
} }
resource "enos_remote_exec" "install_packages" { module "install_packages" {
source = "../install_packages"
depends_on = [ depends_on = [
enos_bundle_install.vault, // Don't race for the package manager locks with vault install enos_bundle_install.vault, // Don't race for the package manager locks with vault install
] ]
for_each = {
for idx, host in var.target_hosts : idx => var.target_hosts[idx]
if length(var.packages) > 0
}
environment = { hosts = var.target_hosts
PACKAGES = join(" ", var.packages) packages = var.packages
}
scripts = [abspath("${path.module}/scripts/install-packages.sh")]
transport = {
ssh = {
host = each.value.public_ip
}
}
} }
resource "enos_consul_start" "consul" { resource "enos_consul_start" "consul" {
@@ -138,9 +131,9 @@ module "start_vault" {
license = var.license license = var.license
log_level = var.log_level log_level = var.log_level
manage_service = var.manage_service manage_service = var.manage_service
seal_attributes = var.seal_attributes
seal_attributes_secondary = var.seal_attributes_secondary
seal_ha_beta = var.seal_ha_beta seal_ha_beta = var.seal_ha_beta
seal_key_name = var.seal_key_name
seal_key_name_secondary = var.seal_key_name_secondary
seal_type = var.seal_type seal_type = var.seal_type
seal_type_secondary = var.seal_type_secondary seal_type_secondary = var.seal_type_secondary
service_username = local.vault_service_user service_username = local.vault_service_user
@@ -265,7 +258,35 @@ resource "enos_remote_exec" "create_audit_log_dir" {
SERVICE_USER = local.vault_service_user SERVICE_USER = local.vault_service_user
} }
scripts = [abspath("${path.module}/scripts/create_audit_log_dir.sh")] scripts = [abspath("${path.module}/scripts/create-audit-log-dir.sh")]
transport = {
ssh = {
host = var.target_hosts[each.value].public_ip
}
}
}
# We need to ensure that the socket listener used for the audit socket device is listening on each
# node in the cluster. If we have a leader election or vault is restarted it'll fail unless the
# listener is running.
resource "enos_remote_exec" "start_audit_socket_listener" {
depends_on = [
module.start_vault,
enos_vault_unseal.leader,
enos_vault_unseal.followers,
enos_vault_unseal.maybe_force_unseal,
]
for_each = toset([
for idx, host in toset(local.instances) : idx
if var.enable_audit_devices
])
environment = {
SOCKET_PORT = local.audit_socket_port
}
scripts = [abspath("${path.module}/scripts/start-audit-socket-listener.sh")]
transport = { transport = {
ssh = { ssh = {
@@ -277,6 +298,7 @@ resource "enos_remote_exec" "create_audit_log_dir" {
resource "enos_remote_exec" "enable_audit_devices" { resource "enos_remote_exec" "enable_audit_devices" {
depends_on = [ depends_on = [
enos_remote_exec.create_audit_log_dir, enos_remote_exec.create_audit_log_dir,
enos_remote_exec.start_audit_socket_listener,
] ]
for_each = toset([ for_each = toset([
for idx in local.leader : idx for idx in local.leader : idx
@@ -284,14 +306,14 @@ resource "enos_remote_exec" "enable_audit_devices" {
]) ])
environment = { environment = {
VAULT_TOKEN = enos_vault_init.leader[each.key].root_token LOG_FILE_PATH = local.audit_device_file_path
SOCKET_PORT = local.audit_socket_port
VAULT_ADDR = "http://127.0.0.1:8200" VAULT_ADDR = "http://127.0.0.1:8200"
VAULT_BIN_PATH = local.bin_path VAULT_BIN_PATH = local.bin_path
LOG_FILE_PATH = local.audit_device_file_path VAULT_TOKEN = enos_vault_init.leader[each.key].root_token
SERVICE_USER = local.vault_service_user
} }
scripts = [abspath("${path.module}/scripts/enable_audit_logging.sh")] scripts = [abspath("${path.module}/scripts/enable-audit-devices.sh")]
transport = { transport = {
ssh = { ssh = {
@@ -299,11 +321,3 @@ resource "enos_remote_exec" "enable_audit_devices" {
} }
} }
} }
resource "enos_local_exec" "wait_for_install_packages" {
depends_on = [
enos_remote_exec.install_packages,
]
inline = ["true"]
}

View File

@@ -62,3 +62,11 @@ output "unseal_shares" {
output "unseal_threshold" { output "unseal_threshold" {
value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1) value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1)
} }
output "keys_base64" {
value = try(module.start_vault.keys_base64, null)
}
output "keys_base64_secondary" {
value = try(module.start_vault.keys_base64_secondary, null)
}

View File

@@ -2,9 +2,16 @@
# Copyright (c) HashiCorp, Inc. # Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1 # SPDX-License-Identifier: BUSL-1.1
set -eux set -eux
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set"
[[ -z "$SERVICE_USER" ]] && fail "SERVICE_USER env variable has not been set"
LOG_DIR=$(dirname "$LOG_FILE_PATH") LOG_DIR=$(dirname "$LOG_FILE_PATH")
function retry { function retry {

View File

@@ -0,0 +1,48 @@
#!/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -exo pipefail
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$LOG_FILE_PATH" ]] && fail "LOG_FILE_PATH env variable has not been set"
[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set"
[[ -z "$VAULT_ADDR" ]] && fail "VAULT_ADDR env variable has not been set"
[[ -z "$VAULT_BIN_PATH" ]] && fail "VAULT_BIN_PATH env variable has not been set"
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
enable_file_audit_device() {
$VAULT_BIN_PATH audit enable file file_path="$LOG_FILE_PATH"
}
enable_syslog_audit_device(){
$VAULT_BIN_PATH audit enable syslog tag="vault" facility="AUTH"
}
enable_socket_audit_device() {
"$VAULT_BIN_PATH" audit enable socket address="127.0.0.1:$SOCKET_PORT"
}
main() {
if ! enable_file_audit_device; then
fail "Failed to enable vault file audit device"
fi
if ! enable_syslog_audit_device; then
fail "Failed to enable vault syslog audit device"
fi
if ! enable_socket_audit_device; then
local log
log=$(cat /tmp/vault-socket.log)
fail "Failed to enable vault socket audit device: listener log: $log"
fi
return 0
}
main

View File

@@ -1,38 +0,0 @@
#!/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -exo pipefail
# Run nc to listen on port 9090 for the socket auditor. We spawn nc
# with nohup to ensure that the listener doesn't expect a SIGHUP and
# thus block the SSH session from exiting or terminating on exit.
# We immediately write to STDIN from /dev/null to give nc an
# immediate EOF so as to not block on expecting STDIN.
nohup nc -kl 9090 &> /dev/null < /dev/null &
# Wait for nc to be listening before we attempt to enable the socket auditor.
attempts=3
count=0
until nc -zv 127.0.0.1 9090 &> /dev/null < /dev/null; do
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -le "$attempts" ]; then
sleep "$wait"
if ! pgrep -x nc; then
nohup nc -kl 9090 &> /dev/null < /dev/null &
fi
else
echo "Timed out waiting for nc to listen on 127.0.0.1:9090" 1>&2
exit 1
fi
done
sleep 1
# Enable the auditors.
$VAULT_BIN_PATH audit enable file file_path="$LOG_FILE_PATH"
$VAULT_BIN_PATH audit enable syslog tag="vault" facility="AUTH"
$VAULT_BIN_PATH audit enable socket address="127.0.0.1:9090" || true

View File

@@ -1,48 +0,0 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -ex -o pipefail
if [ "$PACKAGES" == "" ]
then
echo "No dependencies to install."
exit 0
fi
function retry {
local retries=$1
shift
local count=0
until "$@"; do
exit=$?
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
exit "$exit"
fi
done
return 0
}
echo "Installing Dependencies: $PACKAGES"
if [ -f /etc/debian_version ]; then
# Do our best to make sure that we don't race with cloud-init. Wait a reasonable time until we
# see ec2 in the sources list. Very rarely cloud-init will take longer than we wait. In that case
# we'll just install our packages.
retry 7 grep ec2 /etc/apt/sources.list || true
cd /tmp
retry 5 sudo apt update
# shellcheck disable=2068
retry 5 sudo apt install -y ${PACKAGES[@]}
else
cd /tmp
# shellcheck disable=2068
retry 7 sudo yum -y install ${PACKAGES[@]}
fi

View File

@@ -0,0 +1,64 @@
#!/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
set -exo pipefail
fail() {
echo "$1" 1>&2
exit 1
}
[[ -z "$SOCKET_PORT" ]] && fail "SOCKET_PORT env variable has not been set"
socket_listener_procs() {
pgrep -x nc
}
kill_socket_listener() {
pkill nc
}
test_socket_listener() {
nc -zvw 2 127.0.0.1 "$SOCKET_PORT" < /dev/null
}
start_socket_listener() {
if socket_listener_procs; then
test_socket_listener
return $?
fi
# Run nc to listen on port 9090 for the socket auditor. We spawn nc
# with nohup to ensure that the listener doesn't expect a SIGHUP and
# thus block the SSH session from exiting or terminating on exit.
nohup nc -kl "$SOCKET_PORT" >> /tmp/vault-socket.log 2>&1 < /dev/null &
}
read_log() {
local f
f=/tmp/vault-socket.log
[[ -f "$f" ]] && cat "$f"
}
main() {
if socket_listener_procs; then
# Clean up old nc's that might not be working
kill_socket_listener
fi
if ! start_socket_listener; then
fail "Failed to start audit socket listener: socket listener log: $(read_log)"
fi
# wait for nc to listen
sleep 1
if ! test_socket_listener; then
fail "Error testing socket listener: socket listener log: $(read_log)"
fi
return 0
}
main

View File

@@ -1,40 +0,0 @@
#!/bin/bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
if test "$LICENSE" = "none"; then
exit 0
fi
function retry {
local retries=$1
shift
local count=0
until "$@"; do
exit=$?
wait=$((2 ** count))
count=$((count + 1))
if [ "$count" -lt "$retries" ]; then
sleep "$wait"
else
return "$exit"
fi
done
return 0
}
export VAULT_ADDR=http://localhost:8200
[[ -z "$VAULT_TOKEN" ]] && fail "VAULT_TOKEN env variable has not been set"
# Temporary hack until we can make the unseal resource handle legacy license
# setting. If we're running 1.8 and above then we shouldn't try to set a license.
ver=$(${BIN_PATH} version)
if [[ "$(echo "$ver" |awk '{print $2}' |awk -F'.' '{print $2}')" -ge 8 ]]; then
exit 0
fi
retry 5 "${BIN_PATH}" write /sys/license text="$LICENSE"

View File

@@ -170,37 +170,35 @@ variable "seal_ha_beta" {
default = true default = true
} }
variable "seal_key_name" { variable "seal_attributes" {
type = string description = "The auto-unseal device attributes"
description = "The auto-unseal key name"
default = null default = null
} }
variable "seal_key_name_secondary" { variable "seal_attributes_secondary" {
type = string description = "The secondary auto-unseal device attributes"
description = "The secondary auto-unseal key name"
default = null default = null
} }
variable "seal_type" { variable "seal_type" {
type = string type = string
description = "The method by which to unseal the Vault cluster" description = "The primary seal device type"
default = "awskms" default = "awskms"
validation { validation {
condition = contains(["awskms", "shamir"], var.seal_type) condition = contains(["awskms", "pkcs11", "shamir"], var.seal_type)
error_message = "The seal_type must be either awskms or shamir. No other unseal methods are supported." error_message = "The seal_type must be either 'awskms', 'pkcs11', or 'shamir'. No other seal types are supported."
} }
} }
variable "seal_type_secondary" { variable "seal_type_secondary" {
type = string type = string
description = "A secondary HA seal method. Only supported in Vault Enterprise >= 1.15" description = "A secondary HA seal device type. Only supported in Vault Enterprise >= 1.15"
default = "none" default = "none"
validation { validation {
condition = contains(["awskms", "none"], var.seal_type_secondary) condition = contains(["awskms", "none", "pkcs11"], var.seal_type_secondary)
error_message = "The secondary_seal_type must be 'awskms' or 'none'. No other secondary unseal methods are supported." error_message = "The secondary_seal_type must be 'awskms', 'none', or 'pkcs11'. No other secondary seal types are supported."
} }
} }

View File

@@ -47,4 +47,4 @@ export VAULT_ADDR='http://127.0.0.1:8200'
# Retry a few times because it can take some time for things to settle after # Retry a few times because it can take some time for things to settle after
# all the nodes are unsealed # all the nodes are unsealed
retry 5 check_voter_status retry 7 check_voter_status

View File

@@ -19,11 +19,6 @@ variable "vault_root_token" {
description = "The vault root token" description = "The vault root token"
} }
variable "vault_instance_count" {
type = number
description = "The number of instances in the vault cluster"
}
variable "vault_hosts" { variable "vault_hosts" {
type = map(object({ type = map(object({
private_ip = string private_ip = string

View File

@@ -19,11 +19,6 @@ variable "vault_root_token" {
description = "The vault root token" description = "The vault root token"
} }
variable "vault_instance_count" {
type = number
description = "The number of instances in the vault cluster"
}
variable "vault_hosts" { variable "vault_hosts" {
type = map(object({ type = map(object({
private_ip = string private_ip = string
@@ -46,9 +41,11 @@ variable "retry_interval" {
locals { locals {
private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])] private_ips = [for k, v in values(tomap(var.vault_hosts)) : tostring(v["private_ip"])]
first_key = element(keys(enos_remote_exec.wait_for_seal_rewrap_to_be_completed), 0)
} }
resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" { resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" {
for_each = var.vault_hosts
environment = { environment = {
RETRY_INTERVAL = var.retry_interval RETRY_INTERVAL = var.retry_interval
TIMEOUT_SECONDS = var.timeout TIMEOUT_SECONDS = var.timeout
@@ -61,7 +58,15 @@ resource "enos_remote_exec" "wait_for_seal_rewrap_to_be_completed" {
transport = { transport = {
ssh = { ssh = {
host = var.vault_hosts[0].public_ip host = each.value.public_ip
} }
} }
} }
output "stdout" {
value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout
}
output "stderr" {
value = enos_remote_exec.wait_for_seal_rewrap_to_be_completed[local.first_key].stdout
}

View File

@@ -51,6 +51,12 @@ waitForRewrap() {
return 1 return 1
fi fi
if jq -e '.entries.processed == 0' <<< "$data" &> /dev/null; then
echo "A seal rewrap has not been started yet. Number of processed entries is zero and a rewrap is not yet running."
return 1
fi
echo "$data"
return 0 return 0
} }

View File

@@ -14,11 +14,6 @@ variable "vault_install_dir" {
description = "The directory where the Vault binary will be installed" description = "The directory where the Vault binary will be installed"
} }
variable "vault_instance_count" {
type = number
description = "How many vault instances are in the cluster"
}
variable "vault_hosts" { variable "vault_hosts" {
type = map(object({ type = map(object({
private_ip = string private_ip = string