Merge branch 'main' into raft-wal

This commit is contained in:
Josh Black
2023-11-06 13:17:21 -08:00
1389 changed files with 38192 additions and 17000 deletions

View File

@@ -12,10 +12,5 @@ project {
"ui/node_modules/**",
"enos/modules/k8s_deploy_vault/raft-config.hcl",
"plugins/database/postgresql/scram/**",
# licensed under MPL - ignoring for now until the copywrite tool can support
# multiple licenses per repo.
"sdk/**",
"api/**",
"shamir/**'"
]
}

7
.github/.secret_scanning.yml vendored Normal file
View File

@@ -0,0 +1,7 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
paths-ignore:
- '**/*.mdx' # any file ending in .mdx
- '**/*.md' # any file ending in .md
- '**/*_test.go' # any file ending in _test.go

View File

@@ -29,8 +29,9 @@ else
fi
# git diff with ... shows the differences between base_commit and head_commit starting at the last common commit
changed_dir=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq)
change_count=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq | wc -l)
# excluding the changelog directory
changed_dir=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq | sed '/changelog/d')
change_count=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq | sed '/changelog/d' | wc -l)
# There are 4 main conditions to check:
#

View File

@@ -118,7 +118,7 @@ jobs:
- goos: windows
goarch: arm
fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml
uses: ./.github/workflows/build-vault-ce.yml
with:
create-packages: false
goarch: ${{ matrix.goarch }}
@@ -139,7 +139,7 @@ jobs:
goos: [linux]
goarch: [arm, arm64, 386, amd64]
fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml
uses: ./.github/workflows/build-vault-ce.yml
with:
goarch: ${{ matrix.goarch }}
goos: ${{ matrix.goos }}
@@ -159,7 +159,7 @@ jobs:
goos: [darwin]
goarch: [amd64, arm64]
fail-fast: true
uses: ./.github/workflows/build-vault-oss.yml
uses: ./.github/workflows/build-vault-ce.yml
with:
create-packages: false
goarch: ${{ matrix.goarch }}
@@ -236,17 +236,17 @@ jobs:
fail-fast: false
matrix:
include:
- sample-name: build_oss_linux_amd64_deb
- sample-name: build_ce_linux_amd64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb
- sample-name: build_oss_linux_arm64_deb
- sample-name: build_ce_linux_arm64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb
- sample-name: build_oss_linux_amd64_rpm
- sample-name: build_ce_linux_amd64_rpm
build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm
- sample-name: build_oss_linux_arm64_rpm
- sample-name: build_ce_linux_arm64_rpm
build-artifact-name: vault-${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm
- sample-name: build_oss_linux_amd64_zip
- sample-name: build_ce_linux_amd64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip
- sample-name: build_oss_linux_arm64_zip
- sample-name: build_ce_linux_arm64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip
with:
build-artifact-name: ${{ matrix.build-artifact-name }}
@@ -326,7 +326,7 @@ jobs:
- run: |
tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)'
notify-completed-successfully-failures-oss:
notify-completed-successfully-failures-ce:
if: ${{ always() && github.repository == 'hashicorp/vault' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }}
runs-on: ubuntu-latest
permissions:
@@ -336,6 +336,13 @@ jobs:
fail-fast: false
needs:
- completed-successfully
- build-other
- build-linux
- build-darwin
- build-docker
- build-ubi
- test
- test-docker-k8s
steps:
- name: send-notification
uses: slackapi/slack-github-action@e28cf165c92ffef168d23c5c9000cffc8a25e117 # v1.24.0
@@ -344,9 +351,40 @@ jobs:
env:
SLACK_BOT_TOKEN: ${{ secrets.SLACK_BOT_TOKEN }}
with:
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing
payload: |
{"text":"OSS build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: OSS build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]}
{
"text": "CE build failures on ${{ github.ref_name }}",
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": ":rotating_light: CE build failures on ${{ github.ref_name }} :rotating_light:",
"emoji": true
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "${{ (needs.build-other.result != 'failure' && needs.build-linux.result != 'failure' && needs.build-darwin.result != 'failure' && needs.build-docker.result != 'failure' && needs.build-ubi.result != 'failure') && ':white_check_mark:' || ':x:' }} Build results\n${{ (needs.test.result != 'failure' && needs.test-docker-k8s.result != 'failure') && ':white_check_mark:' || ':x:' }} Enos tests"
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "View Failing Workflow",
"emoji": true
},
"url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
}
]
}
notify-completed-successfully-failures-ent:
if: ${{ always() && github.repository == 'hashicorp/vault-enterprise' && needs.completed-successfully.result == 'failure' && (github.ref_name == 'main' || startsWith(github.ref_name, 'release/')) }}
@@ -358,6 +396,13 @@ jobs:
fail-fast: false
needs:
- completed-successfully
- build-other
- build-linux
- build-darwin
- build-docker
- build-ubi
- test
- test-docker-k8s
steps:
- id: vault-auth
name: Vault Authenticate
@@ -374,7 +419,38 @@ jobs:
- name: send-notification
uses: hashicorp/cloud-gha-slack-notifier@730a033037b8e603adf99ebd3085f0fdfe75e2f4 #v1
with:
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official
channel-id: "C05AABYEA9Y" # sent to #feed-vault-ci-official, use "C05Q4D5V89W"/test-vault-ci-slack-integration for testing
slack-bot-token: ${{ steps.secrets.outputs.SLACK_BOT_TOKEN }}
payload: |
{"text":"Enterprise build failures on ${{ github.ref_name }}","blocks":[{"type":"header","text":{"type":"plain_text","text":":rotating_light: Enterprise build failures :rotating_light:","emoji":true}},{"type":"divider"},{"type":"section","text":{"type":"mrkdwn","text":"build(s) failed on ${{ github.ref_name }}"},"accessory":{"type":"button","text":{"type":"plain_text","text":"View Failing Workflow","emoji":true},"url":"${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"}}]}
{
"text": "Enterprise build failures on ${{ github.ref_name }}",
"blocks": [
{
"type": "header",
"text": {
"type": "plain_text",
"text": ":rotating_light: Enterprise build failures on ${{ github.ref_name }} :rotating_light:",
"emoji": true
}
},
{
"type": "divider"
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "${{ (needs.build-other.result != 'failure' && needs.build-linux.result != 'failure' && needs.build-darwin.result != 'failure' && needs.build-docker.result != 'failure' && needs.build-ubi.result != 'failure') && ':white_check_mark:' || ':x:' }} Build results\n${{ (needs.test.result != 'failure' && needs.test-docker-k8s.result != 'failure') && ':white_check_mark:' || ':x:' }} Enos tests"
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "View Failing Workflow",
"emoji": true
},
"url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
}
]
}

View File

@@ -28,6 +28,7 @@ jobs:
compute-xlarge: ${{ steps.setup-outputs.outputs.compute-xlarge }}
enterprise: ${{ steps.setup-outputs.outputs.enterprise }}
go-tags: ${{ steps.setup-outputs.outputs.go-tags }}
checkout-ref: ${{ steps.checkout-ref-output.outputs.checkout-ref }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- id: setup-outputs
@@ -59,6 +60,14 @@ jobs:
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
no-restore: true # don't download them on a cache hit
# control checking out head instead of default ref by a GH label
# if checkout-head label is added to a PR, checkout HEAD otherwise checkout ref
- if: ${{ !contains(github.event.pull_request.labels.*.name, 'checkout-head') }}
run: echo "CHECKOUT_REF=${{ github.ref }}" >> "$GITHUB_ENV"
- if: ${{ contains(github.event.pull_request.labels.*.name, 'checkout-head') }}
run: echo "CHECKOUT_REF=${{ github.event.pull_request.head.sha }}" >> "$GITHUB_ENV"
- id: checkout-ref-output
run: echo "checkout-ref=${{ env.CHECKOUT_REF }}" >> "$GITHUB_OUTPUT"
verify-changes:
name: Verify doc-ui only PRs
@@ -85,6 +94,7 @@ jobs:
runs-on: ${{ needs.setup.outputs.compute-large }}
enterprise: ${{ needs.setup.outputs.enterprise }}
test-timing-cache-key: go-test-timing-standard
checkout-ref: ${{ needs.setup.outputs.checkout-ref }}
secrets: inherit
test-go-testonly:
@@ -105,6 +115,7 @@ jobs:
runs-on: ${{ needs.setup.outputs.compute-large }}
enterprise: ${{ needs.setup.outputs.enterprise }}
test-timing-cache-enabled: false
checkout-ref: ${{ needs.setup.outputs.checkout-ref }}
secrets: inherit
test-go-race:
@@ -131,16 +142,18 @@ jobs:
enterprise: ${{ needs.setup.outputs.enterprise }}
name: "race"
test-timing-cache-key: go-test-timing-race
checkout-ref: ${{ needs.setup.outputs.checkout-ref }}
secrets: inherit
test-go-fips:
name: Run Go tests with FIPS configuration
# Only run this job for the enterprise repo if the PR is not docs/ui only
# Only run fips on the enterprise repo, and only if it's main or a release branch
# (i.e. not a PR), or is a PR with the label "fips"
if: |
github.event.pull_request.draft == false &&
needs.setup.outputs.enterprise == 1 &&
needs.verify-changes.outputs.is_docs_change == 'false' &&
needs.verify-changes.outputs.is_ui_change == 'false'
needs.verify-changes.outputs.is_ui_change == 'false' &&
(contains(github.event.pull_request.labels.*.name, 'fips') || github.ref_name == 'main' || startsWith(github.ref_name, 'release/'))
needs:
- setup
- verify-changes
@@ -157,6 +170,7 @@ jobs:
enterprise: ${{ needs.setup.outputs.enterprise }}
name: "fips"
test-timing-cache-key: go-test-timing-fips
checkout-ref: ${{ needs.setup.outputs.checkout-ref }}
secrets: inherit
test-ui:
@@ -233,15 +247,21 @@ jobs:
make ci-bootstrap dev
- id: test-ui
name: test-ui
if: github.repository == 'hashicorp/vault-enterprise'
env:
VAULT_LICENSE: ${{ steps.secrets.outputs.VAULT_LICENSE }}
run: |
export PATH="${PWD}/bin:${PATH}"
if [ "${{ github.repository }}" == 'hashicorp/vault' ] ; then
export VAULT_LICENSE="${{ secrets.VAULT_LICENSE }}"
fi
# Run Ember tests
cd ui
mkdir -p test-results/qunit
yarn test
- id: test-ui-oss
if: github.repository == 'hashicorp/vault'
name: test-ui-oss
run: |
export PATH="${PWD}/bin:${PATH}"
# Run Ember tests
cd ui
mkdir -p test-results/qunit

24
.github/workflows/copywrite.yml vendored Normal file
View File

@@ -0,0 +1,24 @@
name: Check Copywrite Headers
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review]
push:
branches:
- main
- release/**
jobs:
copywrite:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@8ade135a41bc03ea155e62e844d188df1ea18608 # v4.1.0
- uses: hashicorp/setup-copywrite@867a1a2a064a0626db322392806428f7dc59cb3e # v1.1.2
name: Setup Copywrite
with:
version: v0.16.4
archive-checksum: c299f830e6eef7e126a3c6ef99ac6f43a3c132d830c769e0d36fa347fa1af254
- name: Check Header Compliance
run: make ci-copywriteheaders
permissions:
contents: read

View File

@@ -7,29 +7,47 @@ on:
- enos/**
jobs:
lint:
metadata:
# Only run this workflow on pull requests from hashicorp/vault branches
# as we need secrets to install enos.
if: "! github.event.pull_request.head.repo.fork"
name: metadata
runs-on: ubuntu-latest
outputs:
runs-on: ${{ steps.metadata.outputs.runs-on }}
version: ${{ steps.metadata.outputs.version }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- id: set-product-version
uses: hashicorp/actions-set-product-version@v1
- id: metadata
run: |
echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT"
github_repository="${{ github.repository }}"
if [ "${github_repository##*/}" == "vault-enterprise" ] ; then
echo 'runs-on=["self-hosted","ondemand","linux","type=c6a.4xlarge"]' >> "$GITHUB_OUTPUT"
else
echo 'runs-on="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT"
fi
lint:
needs: metadata
runs-on: ${{ fromJSON(needs.metadata.outputs.runs-on) }}
env:
GITHUB_TOKEN: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
ENOS_VAR_tfc_api_token: ${{ secrets.TF_API_TOKEN }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
- name: Set Product version
id: set-product-version
uses: hashicorp/actions-set-product-version@v1
- id: get-version
run: echo "version=${{ steps.set-product-version.outputs.product-version }}" >> "$GITHUB_OUTPUT"
- uses: hashicorp/setup-terraform@v2
with:
terraform_wrapper: false
- uses: hashicorp/action-setup-enos@v1
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
- name: Ensure shellcheck is available for linting
run: which shellcheck || (sudo apt update && sudo apt install -y shellcheck)
- name: lint
working-directory: ./enos
env:
ENOS_VAR_vault_product_version: ${{ steps.get-version.outputs.version }}
ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.version }}
run: make lint

View File

@@ -6,19 +6,13 @@ on:
- enos-release-testing-oss
- enos-release-testing-oss::*
# cancel existing runs of the same workflow on the same ref
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
jobs:
product-metadata:
if: ${{ startsWith(github.event.client_payload.payload.branch, 'release/') }}
runs-on: ubuntu-latest
outputs:
vault-revision: ${{ steps.get-metadata.outputs.vault-revision }}
vault-version: ${{ steps.set-product-version.outputs.product-version }}
vault-revision: ${{ github.event.client_payload.payload.sha }}
vault-version: ${{ github.event.client_payload.payload.version }}
vault-version-package: ${{ steps.get-metadata.outputs.vault-version-package }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
@@ -26,15 +20,11 @@ jobs:
# Check out the repository at the same Git SHA that was used to create
# the artifacts to get the correct metadata.
ref: ${{ github.event.client_payload.payload.sha }}
- name: Set Product version
id: set-product-version
uses: hashicorp/actions-set-product-version@v1
- id: get-metadata
env:
VAULT_VERSION: ${{ github.event.client_payload.payload.version }}
run: |
# shellcheck disable=SC2129
echo "vault-revision=$(make ci-get-revision)" >> "$GITHUB_OUTPUT"
echo "vault-version-package=$(echo ${{ steps.set-product-version.outputs.product-version }} | awk '{ gsub("-","~",$1); print $1 }')" >> "$GITHUB_OUTPUT"
# Get the workflow summary similar to CRT workflows
echo "vault-version-package=$(make ci-get-version-package)" >> "$GITHUB_OUTPUT"
- name: Release Artifact Info
run: |
# shellcheck disable=SC2129
@@ -53,17 +43,17 @@ jobs:
fail-fast: false
matrix:
include:
- sample-name: release_oss_linux_amd64_deb
- sample-name: release_ce_linux_amd64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_amd64.deb
- sample-name: release_oss_linux_arm64_deb
- sample-name: release_ce_linux_arm64_deb
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1_arm64.deb
- sample-name: release_oss_linux_amd64_rpm
- sample-name: release_ce_linux_amd64_rpm
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.x86_64.rpm
- sample-name: release_oss_linux_arm64_rpm
- sample-name: release_ce_linux_arm64_rpm
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version-package }}-1.aarch64.rpm
- sample-name: release_oss_linux_amd64_zip
- sample-name: release_ce_linux_amd64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_amd64.zip
- sample-name: release_oss_linux_arm64_zip
- sample-name: release_ce_linux_arm64_zip
build-artifact-name: vault_${{ needs.product-metadata.outputs.vault-version }}_linux_arm64.zip
with:
build-artifact-name: ${{ matrix.build-artifact-name }}

View File

@@ -60,8 +60,8 @@ jobs:
echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV"
echo "image repo set to 'hashicorp/vault-enterprise'"
else
echo "edition=oss" >> "$GITHUB_ENV"
echo "edition set to 'oss'"
echo "edition=ce" >> "$GITHUB_ENV"
echo "edition set to 'ce'"
echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV"
echo "image repo set to 'hashicorp/vault'"
fi

View File

@@ -15,11 +15,12 @@ on:
jobs:
scan:
runs-on: ['linux', 'large']
runs-on: ${{ fromJSON(vars.RUNNER_XL) }}
# The first check ensures this doesn't run on community-contributed PRs, who
# won't have the permissions to run this job.
if: ${{ (github.repository != 'hashicorp/vault' || (github.event.pull_request.head.repo.full_name == github.event.pull_request.base.repo.full_name))
&& (github.actor != 'dependabot[bot]' || github.actor != 'hc-github-team-secure-vault-core') }}
&& (github.actor != 'dependabot[bot]') && ( github.actor != 'hc-github-team-secure-vault-core') }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
@@ -40,7 +41,7 @@ jobs:
repository: hashicorp/security-scanner
token: ${{ secrets.HASHIBOT_PRODSEC_GITHUB_TOKEN }}
path: security-scanner
ref: 52d94588851f38a416f11c1e727131b3c8b0dd4d
ref: main
- name: Install dependencies
shell: bash
@@ -57,7 +58,7 @@ jobs:
mv scan-plugin-codeql "$HOME/.bin"
# Semgrep
python3 -m pip install semgrep
python3 -m pip install semgrep==1.45.0
# CodeQL
LATEST=$(gh release list --repo https://github.com/github/codeql-action | cut -f 3 | sort --version-sort | tail -n1)

View File

@@ -96,7 +96,7 @@ jobs:
echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem
chmod 600 ./enos/support/private_key.pem
- name: Set Up Vault Enterprise License
if: contains(${{ github.event.repository.name }}, 'ent')
if: contains(github.event.repository.name, 'ent')
run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true
- name: Check Chrome Installed
id: chrome-check

View File

@@ -67,6 +67,11 @@ on:
required: false
default: go-test-reports
type: string
checkout-ref:
description: The ref to use for checkout.
required: false
default: ${{ github.ref }}
type: string
env: ${{ fromJSON(inputs.env-vars) }}
@@ -78,6 +83,8 @@ jobs:
runs-on: ${{ fromJSON(inputs.runs-on) }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
with:
ref: ${{ inputs.checkout-ref }}
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
@@ -214,6 +221,8 @@ jobs:
TIMEOUT_IN_MINUTES: ${{ inputs.timeout-minutes }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
with:
ref: ${{ inputs.checkout-ref }}
- uses: ./.github/actions/set-up-go
with:
github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }}
@@ -255,6 +264,36 @@ jobs:
GOPRIVATE: github.com/hashicorp/*
run: time make ci-bootstrap dev
- uses: ./.github/actions/set-up-gotestsum
- name: Install gVisor
# Enterprise repo runners do not allow sudo, so can't install gVisor there yet.
if: ${{ !inputs.enterprise }}
run: |
(
set -e
ARCH="$(uname -m)"
URL="https://storage.googleapis.com/gvisor/releases/release/latest/${ARCH}"
wget --quiet "${URL}/runsc" "${URL}/runsc.sha512" \
"${URL}/containerd-shim-runsc-v1" "${URL}/containerd-shim-runsc-v1.sha512"
sha512sum -c runsc.sha512 \
-c containerd-shim-runsc-v1.sha512
rm -f -- *.sha512
chmod a+rx runsc containerd-shim-runsc-v1
sudo mv runsc containerd-shim-runsc-v1 /usr/local/bin
)
sudo tee /etc/docker/daemon.json <<EOF
{
"runtimes": {
"runsc": {
"path": "/usr/local/bin/runsc",
"runtimeArgs": [
"--host-uds=all",
"--host-fifo=open"
]
}
}
}
EOF
sudo systemctl reload docker
- id: run-go-tests
name: Run Go tests
timeout-minutes: ${{ fromJSON(env.TIMEOUT_IN_MINUTES) }}
@@ -311,6 +350,9 @@ jobs:
fi
fi
export VAULT_TEST_LOG_DIR=$(pwd)/test-results/go-test/logs-${{ matrix.id }}
mkdir -p $VAULT_TEST_LOG_DIR
# shellcheck disable=SC2086 # can't quote RERUN_FAILS
GOARCH=${{ inputs.go-arch }} \
gotestsum --format=short-verbose \
@@ -434,6 +476,7 @@ jobs:
name: test-results
path: test-results/go-test
- run: |
rm -rf test-results/go-test/logs
ls -lhR test-results/go-test
find test-results/go-test -mindepth 1 -mtime +3 -delete

View File

@@ -29,13 +29,17 @@ on:
ssh-key-name:
type: string
default: ${{ github.event.repository.name }}-ci-ssh-key
vault-version:
required: true
vault-edition:
required: false
type: string
default: oss
# The Git commit SHA used as the revision when building vault
vault-revision:
required: true
type: string
vault-version:
required: true
type: string
jobs:
metadata:
@@ -43,6 +47,7 @@ jobs:
outputs:
build-date: ${{ steps.metadata.outputs.build-date }}
sample: ${{ steps.metadata.outputs.sample }}
vault-version: ${{ steps.metadata.outputs.vault-version }}
steps:
- uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3
with:
@@ -56,6 +61,14 @@ jobs:
sample="$(enos scenario sample observe ${{ inputs.sample-name }} --chdir ./enos --min 1 --max ${{ inputs.sample-max }} --seed "$(date +%s%N)" --format json | jq -c ".observation.elements")"
echo "sample=$sample"
echo "sample=$sample" >> "$GITHUB_OUTPUT"
if [[ "${{ inputs.vault-edition }}" == "oss" ]]; then
echo "vault-version=${{ inputs.vault-version }}" >> "$GITHUB_OUTPUT"
else
# shellcheck disable=2001
vault_version="$(sed 's/+ent/+${{ inputs.vault-edition }}/g' <<< '${{ inputs.vault-version }}')"
echo "vault-version=$vault_version"
echo "vault-version=$vault_version" >> "$GITHUB_OUTPUT"
fi
# Run the Enos test scenario(s)
run:
@@ -78,7 +91,7 @@ jobs:
ENOS_VAR_terraform_plugin_cache_dir: ./support/terraform-plugin-cache
ENOS_VAR_vault_artifact_path: ./support/downloads/${{ inputs.build-artifact-name }}
ENOS_VAR_vault_build_date: ${{ needs.metadata.outputs.build-date }}
ENOS_VAR_vault_product_version: ${{ inputs.vault-version }}
ENOS_VAR_vault_product_version: ${{ needs.metadata.outputs.vault-version }}
ENOS_VAR_vault_revision: ${{ inputs.vault-revision }}
ENOS_VAR_vault_license_path: ./support/vault.hclic
ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data
@@ -115,16 +128,15 @@ jobs:
- if: contains(inputs.sample-name, 'ent')
name: Configure Vault license
run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true
- name: Run Enos scenario
id: run
# Continue once and retry to handle occasional blips when creating
# infrastructure.
- id: launch
name: enos scenario launch ${{ matrix.scenario.id.filter }}
# Continue once and retry to handle occasional blips when creating infrastructure.
continue-on-error: true
run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }}
- name: Retry Enos scenario if necessary
id: run_retry
if: steps.run.outcome == 'failure'
run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }}
run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }}
- if: steps.launch.outcome == 'failure'
id: launch_retry
name: Retry enos scenario launch ${{ matrix.scenario.id.filter }}
run: enos scenario launch --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }}
- name: Upload Debug Data
if: failure()
uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce # v3.1.2
@@ -134,11 +146,14 @@ jobs:
path: ${{ env.ENOS_DEBUG_DATA_ROOT_DIR }}
retention-days: 30
continue-on-error: true
- name: Ensure scenario has been destroyed
- if: ${{ always() }}
id: destroy
if: ${{ always() }}
# With Enos version 0.0.11 the destroy step returns an error if the infrastructure
# is already destroyed by enos run. So temporarily setting it to continue on error in GHA
name: enos scenario destroy ${{ matrix.scenario.id.filter }}
continue-on-error: true
run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }}
- if: steps.destroy.outcome == 'failure'
id: destroy_retry
name: Retry enos scenario destroy ${{ matrix.scenario.id.filter }}
continue-on-error: true
run: enos scenario destroy --timeout 60m0s --chdir ./enos ${{ matrix.scenario.id.filter }}
- name: Clean up Enos runtime directories
@@ -149,28 +164,34 @@ jobs:
rm -rf /tmp/enos*
rm -rf ./enos/support
rm -rf ./enos/.enos
# Send a Slack notification to #feed-vault-enos-failures if the 'run' step fails.
# There is an incoming webhook set up on the "Enos Vault Failure Bot" Slackbot https://api.slack.com/apps/A05E31CH1LG/incoming-webhooks
- name: Send Slack notification on Enos run failure
# Send slack notifications to #feed-vault-enos-failures any of our enos scenario commands fail.
# There is an incoming webhook set up on the "Enos Vault Failure Bot" Slackbot:
# https://api.slack.com/apps/A05E31CH1LG/incoming-webhooks
- if: ${{ always() && ! cancelled() }}
name: Notify launch failed
uses: hashicorp/actions-slack-status@v1
if: ${{ always() && ! cancelled() }}
with:
failure-message: "An Enos scenario `run` failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.run.outcome }}
failure-message: "enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.launch.outcome }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
# Send a Slack notification to #feed-vault-enos-failures if the 'run_retry' step fails.
- name: Send Slack notification on Enos run_retry failure
- if: ${{ always() && ! cancelled() }}
name: Notify retry launch failed
uses: hashicorp/actions-slack-status@v1
if: ${{ always() && ! cancelled() }}
with:
failure-message: "An Enos scenario `run_retry` failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.run_retry.outcome }}
failure-message: "retry enos scenario launch ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.launch_retry.outcome }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
# Send a Slack notification to #feed-vault-enos-failures if the 'destroy' step fails.
- name: Send Slack notification on Enos destroy failure
- if: ${{ always() && ! cancelled() }}
name: Notify destroy failed
uses: hashicorp/actions-slack-status@v1
if: ${{ always() && ! cancelled() }}
with:
failure-message: "An Enos scenario `destroy` failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
failure-message: "enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.destroy.outcome }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}
- if: ${{ always() && ! cancelled() }}
name: Notify retry destroy failed
uses: hashicorp/actions-slack-status@v1
with:
failure-message: "retry enos scenario destroy ${{ matrix.scenario.id.filter}} failed. \nTriggering event: `${{ github.event_name }}` \nActor: `${{ github.actor }}`"
status: ${{ steps.destroy_retry.outcome }}
slack-webhook-url: ${{ secrets.SLACK_WEBHOOK_URL }}

3
.gitignore vendored
View File

@@ -72,6 +72,9 @@ enos/**/kubeconfig
.idea
.vscode
# VSCode debugger executable
__debug_bin*
dist/*
# ignore ctags

View File

@@ -1 +1 @@
1.21.1
1.21.3

View File

@@ -29,6 +29,7 @@ RestartSec=5
TimeoutStopSec=30
LimitNOFILE=65536
LimitMEMLOCK=infinity
LimitCORE=0
[Install]
WantedBy=multi-user.target

View File

@@ -2,6 +2,404 @@
- [v1.0.0 - v1.9.10](CHANGELOG-pre-v1.10.md)
- [v0.11.6 and earlier](CHANGELOG-v0.md)
## 1.15.1
### October 25, 2023
CHANGES:
* core: Bump Go version to 1.21.3.
IMPROVEMENTS:
* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)]
* auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too. [[GH-22996](https://github.com/hashicorp/vault/pull/22996)]
* secrets-sync (enterprise): Added telemetry on number of destinations and associations per type.
* ui: Adds a warning when whitespace is detected in a key of a KV secret [[GH-23702](https://github.com/hashicorp/vault/pull/23702)]
* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)]
* ui: Surface warning banner if UI has stopped auto-refreshing token [[GH-23143](https://github.com/hashicorp/vault/pull/23143)]
* ui: show banner when resultant-acl check fails due to permissions or wrong namespace. [[GH-23503](https://github.com/hashicorp/vault/pull/23503)]
BUG FIXES:
* Seal HA (enterprise/beta): Fix rejection of a seal configuration change
from two to one auto seal due to persistence of the previous seal type being
"multiseal". [[GH-23573](https://github.com/hashicorp/vault/pull/23573)]
* audit: Fix bug reopening 'file' audit devices on SIGHUP. [[GH-23598](https://github.com/hashicorp/vault/pull/23598)]
* auth/aws: Fixes a panic that can occur in IAM-based login when a [client config](https://developer.hashicorp.com/vault/api-docs/auth/aws#configure-client) does not exist. [[GH-23555](https://github.com/hashicorp/vault/pull/23555)]
* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)]
* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)]
* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)]
* kmip (enterprise): Improve handling of failures due to storage replication issues.
* kmip (enterprise): Return a structure in the response for query function Query Server Information.
* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)]
* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster.
* replication (enterprise): Fix a missing unlock when changing replication state
* secrets-sync (enterprise): Fixed issue where we could sync a deleted secret
* secrets/aws: update credential rotation deadline when static role rotation period is updated [[GH-23528](https://github.com/hashicorp/vault/pull/23528)]
* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)]
* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)]
* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key
* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported.
* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations
* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)]
* storage/consul: fix a bug where an active node in a specific sort of network
partition could continue to write data to Consul after a new leader is elected
potentially causing data loss or corruption for keys with many concurrent
writers. For Enterprise clusters this could cause corruption of the merkle trees
leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)]
* ui: Assumes version 1 for kv engines when options are null because no version is specified [[GH-23585](https://github.com/hashicorp/vault/pull/23585)]
* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)]
* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)]
* ui: Fix bug where auth items were not listed when within a namespace. [[GH-23446](https://github.com/hashicorp/vault/pull/23446)]
* ui: Fix regression that broke the oktaNumberChallenge on the ui. [[GH-23565](https://github.com/hashicorp/vault/pull/23565)]
* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)]
* ui: Fixes issue where you could not share the list view URL from the KV v2 secrets engine. [[GH-23620](https://github.com/hashicorp/vault/pull/23620)]
* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)]
* ui: Fixes issues displaying accurate TLS state in dashboard configuration details [[GH-23726](https://github.com/hashicorp/vault/pull/23726)]
## 1.15.0
### September 27, 2023
SECURITY:
* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)]
* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8.[[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)]
CHANGES:
* auth/alicloud: Update plugin to v0.16.0 [[GH-22646](https://github.com/hashicorp/vault/pull/22646)]
* auth/azure: Update plugin to v0.16.0 [[GH-22277](https://github.com/hashicorp/vault/pull/22277)]
* auth/azure: Update plugin to v0.16.1 [[GH-22795](https://github.com/hashicorp/vault/pull/22795)]
* auth/azure: Update plugin to v0.16.2 [[GH-23060](https://github.com/hashicorp/vault/pull/23060)]
* auth/cf: Update plugin to v0.15.1 [[GH-22758](https://github.com/hashicorp/vault/pull/22758)]
* auth/gcp: Update plugin to v0.16.1 [[GH-22612](https://github.com/hashicorp/vault/pull/22612)]
* auth/jwt: Update plugin to v0.17.0 [[GH-22678](https://github.com/hashicorp/vault/pull/22678)]
* auth/kerberos: Update plugin to v0.10.1 [[GH-22797](https://github.com/hashicorp/vault/pull/22797)]
* auth/kubernetes: Update plugin to v0.17.0 [[GH-22709](https://github.com/hashicorp/vault/pull/22709)]
* auth/kubernetes: Update plugin to v0.17.1 [[GH-22879](https://github.com/hashicorp/vault/pull/22879)]
* auth/ldap: Normalize HTTP response codes when invalid credentials are provided [[GH-21282](https://github.com/hashicorp/vault/pull/21282)]
* auth/oci: Update plugin to v0.14.2 [[GH-22805](https://github.com/hashicorp/vault/pull/22805)]
* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy
* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace),
which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)]
* core: Bump Go version to 1.21.1.
* database/couchbase: Update plugin to v0.9.3 [[GH-22854](https://github.com/hashicorp/vault/pull/22854)]
* database/couchbase: Update plugin to v0.9.4 [[GH-22871](https://github.com/hashicorp/vault/pull/22871)]
* database/elasticsearch: Update plugin to v0.13.3 [[GH-22696](https://github.com/hashicorp/vault/pull/22696)]
* database/mongodbatlas: Update plugin to v0.10.1 [[GH-22655](https://github.com/hashicorp/vault/pull/22655)]
* database/redis-elasticache: Update plugin to v0.2.2 [[GH-22584](https://github.com/hashicorp/vault/pull/22584)]
* database/redis-elasticache: Update plugin to v0.2.3 [[GH-22598](https://github.com/hashicorp/vault/pull/22598)]
* database/redis: Update plugin to v0.2.2 [[GH-22654](https://github.com/hashicorp/vault/pull/22654)]
* database/snowflake: Update plugin to v0.9.0 [[GH-22516](https://github.com/hashicorp/vault/pull/22516)]
* events: Log level for processing an event dropped from info to debug. [[GH-22997](https://github.com/hashicorp/vault/pull/22997)]
* events: `data_path` will include full data path of secret, including name. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)]
* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host
* sdk/logical/events: `EventSender` interface method is now `SendEvent` instead of `Send`. [[GH-22487](https://github.com/hashicorp/vault/pull/22487)]
* secrets/ad: Update plugin to v0.16.1 [[GH-22856](https://github.com/hashicorp/vault/pull/22856)]
* secrets/alicloud: Update plugin to v0.15.1 [[GH-22533](https://github.com/hashicorp/vault/pull/22533)]
* secrets/azure: Update plugin to v0.16.2 [[GH-22799](https://github.com/hashicorp/vault/pull/22799)]
* secrets/azure: Update plugin to v0.16.3 [[GH-22824](https://github.com/hashicorp/vault/pull/22824)]
* secrets/gcp: Update plugin to v0.17.0 [[GH-22746](https://github.com/hashicorp/vault/pull/22746)]
* secrets/gcpkms: Update plugin to v0.15.1 [[GH-22757](https://github.com/hashicorp/vault/pull/22757)]
* secrets/keymgmt: Update plugin to v0.9.3
* secrets/kubernetes: Update plugin to v0.6.0 [[GH-22823](https://github.com/hashicorp/vault/pull/22823)]
* secrets/kv: Update plugin to v0.16.1 [[GH-22716](https://github.com/hashicorp/vault/pull/22716)]
* secrets/mongodbatlas: Update plugin to v0.10.1 [[GH-22748](https://github.com/hashicorp/vault/pull/22748)]
* secrets/openldap: Update plugin to v0.11.2 [[GH-22734](https://github.com/hashicorp/vault/pull/22734)]
* secrets/terraform: Update plugin to v0.7.3 [[GH-22907](https://github.com/hashicorp/vault/pull/22907)]
* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied.
* storage/aerospike: Aerospike storage shouldn't be used on 32-bit architectures and is now unsupported on them. [[GH-20825](https://github.com/hashicorp/vault/pull/20825)]
* telemetry: Replace `vault.rollback.attempt.{MOUNT_POINT}` and `vault.route.rollback.{MOUNT_POINT}` metrics with `vault.rollback.attempt` and `vault.route.rollback metrics` by default. Added a telemetry configuration `add_mount_point_rollback_metrics` which, when set to true, causes vault to emit the metrics with mount points in their names. [[GH-22400](https://github.com/hashicorp/vault/pull/22400)]
FEATURES:
* **Certificate Issuance External Policy Service (CIEPS) (enterprise)**: Allow highly-customizable operator control of certificate validation and generation through the PKI Secrets Engine.
* **Copyable KV v2 paths in UI**: KV v2 secret paths are copyable for use in CLI commands or API calls [[GH-22551](https://github.com/hashicorp/vault/pull/22551)]
* **Dashboard UI**: Dashboard is now available in the UI as the new landing page. [[GH-21057](https://github.com/hashicorp/vault/pull/21057)]
* **Database Static Role Advanced TTL Management**: Adds the ability to rotate
* **Event System**: Add subscribe capability and subscribe_event_types to policies for events. [[GH-22474](https://github.com/hashicorp/vault/pull/22474)]
static roles on a defined schedule. [[GH-22484](https://github.com/hashicorp/vault/pull/22484)]
* **GCP IAM Support**: Adds support for IAM-based authentication to MySQL and PostgreSQL backends using Google Cloud SQL. [[GH-22445](https://github.com/hashicorp/vault/pull/22445)]
* **Improved KV V2 UI**: Updated and restructured secret engine for KV (version 2 only) [[GH-22559](https://github.com/hashicorp/vault/pull/22559)]
* **Merkle Tree Corruption Detection (enterprise)**: Add a new endpoint to check merkle tree corruption.
* **Plugin Containers**: Vault supports registering, managing, and running plugins inside a container on Linux. [[GH-22712](https://github.com/hashicorp/vault/pull/22712)]
* **SAML Auth Method (enterprise)**: Enable users to authenticate with Vault using their identity in a SAML Identity Provider.
* **Seal High Availability Beta (enterprise)**: operators can try out configuring more than one automatic seal for resilience against seal provider outages. Not for production use at this time.
* **Secrets Sync (enterprise)**: Add the ability to synchronize KVv2 secret with external secrets manager solutions.
* **UI LDAP secrets engine**: Add LDAP secrets engine to the UI. [[GH-20790](https://github.com/hashicorp/vault/pull/20790)]
IMPROVEMENTS:
* Bump github.com/hashicorp/go-plugin version v1.4.9 -> v1.4.10 [[GH-20966](https://github.com/hashicorp/vault/pull/20966)]
* api: add support for cloning a Client's tls.Config. [[GH-21424](https://github.com/hashicorp/vault/pull/21424)]
* api: adding a new api sys method for replication status [[GH-20995](https://github.com/hashicorp/vault/pull/20995)]
* audit: add core audit events experiment [[GH-21628](https://github.com/hashicorp/vault/pull/21628)]
* auth/aws: Added support for signed GET requests for authenticating to vault using the aws iam method. [[GH-10961](https://github.com/hashicorp/vault/pull/10961)]
* auth/azure: Add support for azure workload identity authentication (see issue
#18257). Update go-kms-wrapping dependency to include [PR
#155](https://github.com/hashicorp/go-kms-wrapping/pull/155) [[GH-22994](https://github.com/hashicorp/vault/pull/22994)]
* auth/azure: Added Azure API configurable retry options [[GH-23059](https://github.com/hashicorp/vault/pull/23059)]
* auth/cert: Adds support for requiring hexadecimal-encoded non-string certificate extension values [[GH-21830](https://github.com/hashicorp/vault/pull/21830)]
* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)]
* auto-auth/azure: Added Azure Workload Identity Federation support to auto-auth (for Vault Agent and Vault Proxy). [[GH-22264](https://github.com/hashicorp/vault/pull/22264)]
* auto-auth: added support for LDAP auto-auth [[GH-21641](https://github.com/hashicorp/vault/pull/21641)]
* aws/auth: Adds a new config field `use_sts_region_from_client` which allows for using dynamic regional sts endpoints based on Authorization header when using IAM-based authentication. [[GH-21960](https://github.com/hashicorp/vault/pull/21960)]
* command/server: add `-dev-tls-san` flag to configure subject alternative names for the certificate generated when using `-dev-tls`. [[GH-22657](https://github.com/hashicorp/vault/pull/22657)]
* core (ent) : Add field that allows lease-count namespace quotas to be inherited by child namespaces.
* core : Add field that allows rate-limit namespace quotas to be inherited by child namespaces. [[GH-22452](https://github.com/hashicorp/vault/pull/22452)]
* core/fips: Add RPM, DEB packages of FIPS 140-2 and HSM+FIPS 140-2 Vault Enterprise.
* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)]
* core: Add a new periodic metric to track the number of available policies, `vault.policy.configured.count`. [[GH-21010](https://github.com/hashicorp/vault/pull/21010)]
* core: Fix OpenAPI representation and `-output-policy` recognition of some non-standard sudo paths [[GH-21772](https://github.com/hashicorp/vault/pull/21772)]
* core: Fix regexes for `sys/raw/` and `sys/leases/lookup/` to match prevailing conventions [[GH-21760](https://github.com/hashicorp/vault/pull/21760)]
* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)]
* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)]
* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy [[GH-22304](https://github.com/hashicorp/vault/pull/22304)]
* core: add a listener configuration "chroot_namespace" that forces requests to use a namespace hierarchy
* core: remove unnecessary *BarrierView field from backendEntry struct [[GH-20933](https://github.com/hashicorp/vault/pull/20933)]
* core: use Go stdlib functionalities instead of explicit byte/string conversions [[GH-21854](https://github.com/hashicorp/vault/pull/21854)]
* eventbus: updated go-eventlogger library to allow removal of nodes referenced by pipelines (used for subscriptions) [[GH-21623](https://github.com/hashicorp/vault/pull/21623)]
* events: Allow subscriptions to multiple namespaces [[GH-22540](https://github.com/hashicorp/vault/pull/22540)]
* events: Enabled by default [[GH-22815](https://github.com/hashicorp/vault/pull/22815)]
* events: WebSocket subscriptions add support for boolean filter expressions [[GH-22835](https://github.com/hashicorp/vault/pull/22835)]
* framework: Make it an error for `CreateOperation` to be defined without an `ExistenceCheck`, thereby fixing misleading `x-vault-createSupported` in OpenAPI [[GH-18492](https://github.com/hashicorp/vault/pull/18492)]
* kmip (enterprise): Add namespace lock and unlock support [[GH-21925](https://github.com/hashicorp/vault/pull/21925)]
* openapi: Better mount points for kv-v1 and kv-v2 in openapi.json [[GH-21563](https://github.com/hashicorp/vault/pull/21563)]
* openapi: Fix generated types for duration strings [[GH-20841](https://github.com/hashicorp/vault/pull/20841)]
* openapi: Fix generation of correct fields in some rarer cases [[GH-21942](https://github.com/hashicorp/vault/pull/21942)]
* openapi: Fix response definitions for list operations [[GH-21934](https://github.com/hashicorp/vault/pull/21934)]
* openapi: List operations are now given first-class representation in the OpenAPI document, rather than sometimes being overlaid with a read operation at the same path [[GH-21723](https://github.com/hashicorp/vault/pull/21723)]
* plugins: Containerized plugins can be configured to still work when running with systemd's PrivateTmp=true setting. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)]
* replication (enterprise): Avoid logging warning if request is forwarded from a performance standby and not a performance secondary
* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase.
* sdk/framework: Adds replication state helper for backends to check for read-only storage [[GH-21743](https://github.com/hashicorp/vault/pull/21743)]
* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)]
* secrets/db: Remove the `service_account_json` parameter when reading DB connection details [[GH-23256](https://github.com/hashicorp/vault/pull/23256)]
* secrets/pki: Add a parameter to allow ExtKeyUsage field usage from a role within ACME. [[GH-21702](https://github.com/hashicorp/vault/pull/21702)]
* secrets/transform (enterprise): Switch to pgx PostgreSQL driver for better timeout handling
* secrets/transit: Add support to create CSRs from keys in transit engine and import/export x509 certificates [[GH-21081](https://github.com/hashicorp/vault/pull/21081)]
* storage/dynamodb: Added three permit pool metrics for the DynamoDB backend, `pending_permits`, `active_permits`, and `pool_size`. [[GH-21742](https://github.com/hashicorp/vault/pull/21742)]
* storage/etcd: Make etcd parameter MaxCallSendMsgSize configurable [[GH-12666](https://github.com/hashicorp/vault/pull/12666)]
* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)]
* sys/metrics (enterprise): Adds a gauge metric that tracks whether enterprise builtin secret plugins are enabled. [[GH-21681](https://github.com/hashicorp/vault/pull/21681)]
* ui: Add API Explorer link to Sidebar, under Tools. [[GH-21578](https://github.com/hashicorp/vault/pull/21578)]
* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)]
* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)]
* ui: Adds mount configuration details to Kubernetes secrets engine configuration view [[GH-22926](https://github.com/hashicorp/vault/pull/22926)]
* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)]
* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)]
* ui: Display minus icon for empty MaskedInput value. Show MaskedInput for KV secrets without values [[GH-22039](https://github.com/hashicorp/vault/pull/22039)]
* ui: JSON diff view available in "Create New Version" form for KV v2 [[GH-22593](https://github.com/hashicorp/vault/pull/22593)]
* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)]
* ui: Move access to KV V2 version diff view to toolbar in Version History [[GH-23200](https://github.com/hashicorp/vault/pull/23200)]
* ui: Update pki mount configuration details to match the new mount configuration details pattern [[GH-23166](https://github.com/hashicorp/vault/pull/23166)]
* ui: add example modal to policy form [[GH-21583](https://github.com/hashicorp/vault/pull/21583)]
* ui: adds allowed_user_ids field to create role form and user_ids to generate certificates form in pki [[GH-22191](https://github.com/hashicorp/vault/pull/22191)]
* ui: display CertificateCard instead of MaskedInput for certificates in PKI [[GH-22160](https://github.com/hashicorp/vault/pull/22160)]
* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)]
* ui: implement hashicorp design system [alert](https://helios.hashicorp.design/components/alert) component [[GH-21375](https://github.com/hashicorp/vault/pull/21375)]
* ui: update detail views that render ttl durations to display full unit instead of letter (i.e. 'days' instead of 'd') [[GH-20697](https://github.com/hashicorp/vault/pull/20697)]
* ui: update unseal and DR operation token flow components [[GH-21871](https://github.com/hashicorp/vault/pull/21871)]
* ui: upgrade Ember to 4.12 [[GH-22122](https://github.com/hashicorp/vault/pull/22122)]
DEPRECATIONS:
* auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17 [[GH-23050](https://github.com/hashicorp/vault/pull/23050)]
BUG FIXES:
* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)]
* agent: Environment variable VAULT_CACERT_BYTES now works for Vault Agent templates. [[GH-22322](https://github.com/hashicorp/vault/pull/22322)]
* agent: Fix "generate-config" command documentation URL [[GH-21466](https://github.com/hashicorp/vault/pull/21466)]
* api/client: Fix deadlock in client.CloneWithHeaders when used alongside other client methods. [[GH-22410](https://github.com/hashicorp/vault/pull/22410)]
* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)]
* audit: Prevent panic due to nil pointer receiver for audit header formatting. [[GH-22694](https://github.com/hashicorp/vault/pull/22694)]
* auth/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21800](https://github.com/hashicorp/vault/pull/21800)]
* auth/token, sys: Fix path-help being unavailable for some list-only endpoints [[GH-18571](https://github.com/hashicorp/vault/pull/18571)]
* auth/token: Fix parsing of `auth/token/create` fields to avoid incorrect warnings about ignored parameters [[GH-18556](https://github.com/hashicorp/vault/pull/18556)]
* awsutil: Update awsutil to v0.2.3 to fix a regression where Vault no longer
respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NAME`. [[GH-21951](https://github.com/hashicorp/vault/pull/21951)]
* cli: Avoid printing "Success" message when `-field` flag is provided during a `vault write`. [[GH-21546](https://github.com/hashicorp/vault/pull/21546)]
* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)]
* core (enterprise): Fix sentinel policy check logic so that sentinel
policies are not used when Sentinel feature isn't licensed.
* core (enterprise): Remove MFA Configuration for namespace when deleting namespace
* core/managed-keys (enterprise): Allow certain symmetric PKCS#11 managed key mechanisms (AES CBC with and without padding) to operate without an HMAC.
* core/metrics: vault.raft_storage.bolt.write.time should be a counter not a summary [[GH-22468](https://github.com/hashicorp/vault/pull/22468)]
* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context.
Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)]
* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)]
* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)]
* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)]
* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)]
* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)]
* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)]
* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)]
* core: Fixed issue with some durations not being properly parsed to include days. [[GH-21357](https://github.com/hashicorp/vault/pull/21357)]
* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)]
* core: fix race when updating a mount's route entry tainted status and incoming requests [[GH-21640](https://github.com/hashicorp/vault/pull/21640)]
* events: Ensure subscription resources are cleaned up on close. [[GH-23042](https://github.com/hashicorp/vault/pull/23042)]
* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)]
* identity/mfa: Fixes to OpenAPI representation and returned error codes for `identity/mfa/method/*` APIs [[GH-20879](https://github.com/hashicorp/vault/pull/20879)]
* identity: Remove caseSensitivityKey to prevent errors while loading groups which could result in missing groups in memDB when duplicates are found. [[GH-20965](https://github.com/hashicorp/vault/pull/20965)]
* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)]
* openapi: Fix response schema for PKI Issue requests [[GH-21449](https://github.com/hashicorp/vault/pull/21449)]
* openapi: Fix schema definitions for PKI EAB APIs [[GH-21458](https://github.com/hashicorp/vault/pull/21458)]
* plugins: Containerized plugins can be run with mlock enabled. [[GH-23215](https://github.com/hashicorp/vault/pull/23215)]
* plugins: Fix instance where Vault could fail to kill broken/unresponsive plugins. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)]
* plugins: Fix instance where broken/unresponsive plugins could cause Vault to hang. [[GH-22914](https://github.com/hashicorp/vault/pull/22914)]
* plugins: Runtime catalog returns 404 instead of 500 when reading a runtime that does not exist [[GH-23171](https://github.com/hashicorp/vault/pull/23171)]
* plugins: `vault plugin runtime list` can successfully list plugin runtimes with GET [[GH-23171](https://github.com/hashicorp/vault/pull/23171)]
* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)]
* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath
* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable
* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs
* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards.
* replication (enterprise): Sort cluster addresses returned by echo requests, so that primary-addrs only gets persisted when the
set of addrs changes.
* replication (enterprise): update primary cluster address after DR failover
* sdk/ldaputil: Properly escape user filters when using UPN domains
sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)]
* secrets/azure: Fix intermittent 401s by preventing performance secondary clusters from rotating root credentials. [[GH-21631](https://github.com/hashicorp/vault/pull/21631)]
* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22330](https://github.com/hashicorp/vault/pull/22330)]
* secrets/pki: Fix bug with ACME tidy, 'unable to determine acme base folder path'. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)]
* secrets/pki: Fix preserving acme_account_safety_buffer on config/auto-tidy. [[GH-21870](https://github.com/hashicorp/vault/pull/21870)]
* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)]
* secrets/pki: Prevent deleted issuers from reappearing when migrating from a version 1 bundle to a version 2 bundle (versions including 1.13.0, 1.12.2, and 1.11.6); when managed keys were removed but referenced in the Vault 1.10 legacy CA bundle, this the error: `no managed key found with uuid`. [[GH-21316](https://github.com/hashicorp/vault/pull/21316)]
* secrets/pki: allowed_domains are now compared in a case-insensitive manner if they use glob patterns [[GH-22126](https://github.com/hashicorp/vault/pull/22126)]
* secrets/transform (enterprise): Batch items with repeated tokens in the tokenization decode api will now contain the decoded_value element
* secrets/transform (enterprise): Fix nil panic when deleting a template with tokenization transformations present
* secrets/transform (enterprise): Fix nil panic when encoding a tokenization transformation on a non-active node
* secrets/transform (enterprise): Grab shared locks for various read operations, only escalating to write locks if work is required
* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute
* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)]
* serviceregistration: Fix bug where multiple nodes in a secondary cluster could be labelled active after updating the cluster's primary [[GH-21642](https://github.com/hashicorp/vault/pull/21642)]
* storage/consul: Consul service registration tags are now case-sensitive. [[GH-6483](https://github.com/hashicorp/vault/pull/6483)]
* storage/raft: Fix race where new follower joining can get pruned by dead server cleanup. [[GH-20986](https://github.com/hashicorp/vault/pull/20986)]
* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)]
* ui: Adds missing values to details view after generating PKI certificate [[GH-21635](https://github.com/hashicorp/vault/pull/21635)]
* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)]
* ui: Fix display for "Last Vault Rotation" timestamp for static database roles which was not rendering or copyable [[GH-22519](https://github.com/hashicorp/vault/pull/22519)]
* ui: Fix styling for username input when editing a user [[GH-21771](https://github.com/hashicorp/vault/pull/21771)]
* ui: Fix styling for viewing certificate in kubernetes configuration [[GH-21968](https://github.com/hashicorp/vault/pull/21968)]
* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)]
* ui: Fixed an issue where editing an SSH role would clear `default_critical_options` and `default_extension` if left unchanged. [[GH-21739](https://github.com/hashicorp/vault/pull/21739)]
* ui: Fixed secrets, leases, and policies filter dropping focus after a single character [[GH-21767](https://github.com/hashicorp/vault/pull/21767)]
* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)]
* ui: Fixes form field label tooltip alignment [[GH-22832](https://github.com/hashicorp/vault/pull/22832)]
* ui: Fixes issue with certain navigational links incorrectly displaying in child namespaces [[GH-21562](https://github.com/hashicorp/vault/pull/21562)]
* ui: Fixes login screen display issue with Safari browser [[GH-21582](https://github.com/hashicorp/vault/pull/21582)]
* ui: Fixes problem displaying certificates issued with unsupported signature algorithms (i.e. ed25519) [[GH-21926](https://github.com/hashicorp/vault/pull/21926)]
* ui: Fixes styling of private key input when configuring an SSH key [[GH-21531](https://github.com/hashicorp/vault/pull/21531)]
* ui: Surface DOMException error when browser settings prevent localStorage. [[GH-21503](https://github.com/hashicorp/vault/pull/21503)]
* ui: correct doctype for index.html [[GH-22153](https://github.com/hashicorp/vault/pull/22153)]
* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)]
* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)]
* ui: fixes long namespace names overflow in the sidebar
* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)]
* ui: fixes text readability issue in revoke token confirmation dialog [[GH-22390](https://github.com/hashicorp/vault/pull/22390)]
## 1.14.5
### October 25, 2023
CHANGES:
* core: Bump Go version to 1.20.10.
* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host
IMPROVEMENTS:
* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)]
* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)]
* ui: Adds toggle to KV secrets engine value download modal to optionally stringify value in downloaded file [[GH-23747](https://github.com/hashicorp/vault/pull/23747)]
BUG FIXES:
* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)]
* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)]
* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)]
* kmip (enterprise): Improve handling of failures due to storage replication issues.
* kmip (enterprise): Return a structure in the response for query function Query Server Information.
* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)]
* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster.
* replication (enterprise): Fix a missing unlock when changing replication state
* secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies [[GH-23010](https://github.com/hashicorp/vault/pull/23010)]
* secrets/pki: Stop processing in-flight ACME verifications when an active node steps down [[GH-23278](https://github.com/hashicorp/vault/pull/23278)]
* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key
* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported.
* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations
* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)]
* storage/consul: fix a bug where an active node in a specific sort of network
partition could continue to write data to Consul after a new leader is elected
potentially causing data loss or corruption for keys with many concurrent
writers. For Enterprise clusters this could cause corruption of the merkle trees
leading to failure to complete merkle sync without a full re-index. [[GH-23013](https://github.com/hashicorp/vault/pull/23013)]
* ui: Decode the connection url for display on the connection details page [[GH-23695](https://github.com/hashicorp/vault/pull/23695)]
* ui: Fix AWS secret engine to allow empty policy_document field. [[GH-23470](https://github.com/hashicorp/vault/pull/23470)]
* ui: Fix the copy token button in the sidebar navigation window when in a collapsed state. [[GH-23331](https://github.com/hashicorp/vault/pull/23331)]
* ui: Fixes issue with sidebar navigation links disappearing when navigating to policies when a user is not authorized [[GH-23516](https://github.com/hashicorp/vault/pull/23516)]
## 1.14.4
### September 27, 2023
SECURITY:
* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)]
CHANGES:
* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy
IMPROVEMENTS:
* ui: Add pagination to PKI roles, keys, issuers, and certificates list pages [[GH-23193](https://github.com/hashicorp/vault/pull/23193)]
* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)]
* ui: Adds tidy_revoked_certs to PKI tidy status page [[GH-23232](https://github.com/hashicorp/vault/pull/23232)]
* ui: Adds warning before downloading KV v2 secret values [[GH-23260](https://github.com/hashicorp/vault/pull/23260)]
BUG FIXES:
* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)]
* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)]
* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)]
* ui: Fix the issue where confirm delete dropdown is being cut off [[GH-23066](https://github.com/hashicorp/vault/pull/23066)]
* ui: Fixes filter and search bug in secrets engines [[GH-23123](https://github.com/hashicorp/vault/pull/23123)]
* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)]
## 1.14.3
### September 13, 2023
SECURITY:
* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)]
CHANGES:
* core: Bump Go version to 1.20.8.
FEATURES:
* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption.
IMPROVEMENTS:
* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)]
* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)]
* kmip (enterprise): reduce latency of KMIP operation handling
BUG FIXES:
* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)]
* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)]
* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)]
* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)]
* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)]
* kmip (enterprise): fix date handling error with some re-key operations
* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)]
* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable
* secrets/transit: fix panic when providing non-PEM formatted public key for import [[GH-22753](https://github.com/hashicorp/vault/pull/22753)]
* ui: fixes long namespace names overflow in the sidebar
## 1.14.2
### August 30, 2023
@@ -57,9 +455,15 @@ sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https
## 1.14.1
### July 25, 2023
SECURITY
* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)]
* core/namespace (enterprise): An unhandled error in Vault Enterprises namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)]
CHANGES:
* auth/ldap: Normalize HTTP response codes when invalid credentials are provided [[GH-21282](https://github.com/hashicorp/vault/pull/21282)]
* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace),
which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)]
* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied.
@@ -109,6 +513,10 @@ respects `AWS_ROLE_ARN`, `AWS_WEB_IDENTITY_TOKEN_FILE`, and `AWS_ROLE_SESSION_NA
## 1.14.0
### June 21, 2023
SECURITY:
* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)]
BREAKING CHANGES:
* secrets/pki: Maintaining running count of certificates will be turned off by default.
@@ -326,6 +734,34 @@ with a new entity alias to be incorrectly forwarded from perf standbys. [[GH-211
* ui: fixes key_bits and signature_bits reverting to default values when editing a pki role [[GH-20907](https://github.com/hashicorp/vault/pull/20907)]
* ui: wait for wanted message event during OIDC callback instead of using the first message event [[GH-18521](https://github.com/hashicorp/vault/pull/18521)]
## 1.13.9
### October 25, 2023
CHANGES:
* core: Bump Go version to 1.20.10.
* replication (enterprise): Switch to non-deprecated gRPC field for resolver target host
IMPROVEMENTS:
* api/plugins: add `tls-server-name` arg for plugin registration [[GH-23549](https://github.com/hashicorp/vault/pull/23549)]
* core: Use a worker pool for the rollback manager. Add new metrics for the rollback manager to track the queued tasks. [[GH-22567](https://github.com/hashicorp/vault/pull/22567)]
BUG FIXES:
* command/server: Fix bug with sigusr2 where pprof files were not closed correctly [[GH-23636](https://github.com/hashicorp/vault/pull/23636)]
* events: Ignore sending context to give more time for events to send [[GH-23500](https://github.com/hashicorp/vault/pull/23500)]
* expiration: Prevent large lease loads from delaying state changes, e.g. becoming active or standby. [[GH-23282](https://github.com/hashicorp/vault/pull/23282)]
* kmip (enterprise): Improve handling of failures due to storage replication issues.
* kmip (enterprise): Return a structure in the response for query function Query Server Information.
* mongo-db: allow non-admin database for root credential rotation [[GH-23240](https://github.com/hashicorp/vault/pull/23240)]
* replication (enterprise): Fix a bug where undo logs would only get enabled on the initial node in a cluster.
* replication (enterprise): Fix a missing unlock when changing replication state
* secrets/transit (enterprise): Address an issue using sign/verify operations with managed keys returning an error about it not containing a private key
* secrets/transit (enterprise): Address panic when using GCP,AWS,Azure managed keys for encryption operations. At this time all encryption operations for the cloud providers have been disabled, only signing operations are supported.
* secrets/transit (enterprise): Apply hashing arguments and defaults to managed key sign/verify operations
* secrets/transit: Do not allow auto rotation on managed_key key types [[GH-23723](https://github.com/hashicorp/vault/pull/23723)]
## 1.13.6
### August 30, 2023
@@ -365,13 +801,113 @@ sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https
* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)]
* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)]
* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)]
## 1.13.5
### July 25, 2023
## 1.13.8
### September 27, 2023
SECURITY:
* sentinel (enterprise): Sentinel RGP policies allowed for cross-namespace denial-of-service. This vulnerability, CVE-2023-3775, is fixed in Vault Enterprise 1.15.0, 1.14.4, and 1.13.8. [[HSEC-2023-29](https://discuss.hashicorp.com/t/hcsec-2023-29-vault-enterprise-s-sentinel-rgp-policies-allowed-for-cross-namespace-denial-of-service/58653)]
CHANGES:
* core (enterprise): Ensure Role Governing Policies are only applied down the namespace hierarchy
IMPROVEMENTS:
* ui: Added allowed_domains_template field for CA type role in SSH engine [[GH-23119](https://github.com/hashicorp/vault/pull/23119)]
BUG FIXES:
* core: Fixes list password policy to include those with names containing / characters. [[GH-23155](https://github.com/hashicorp/vault/pull/23155)]
* secrets/pki: Fix removal of issuers to clean up unreferenced CRLs. [[GH-23007](https://github.com/hashicorp/vault/pull/23007)]
* ui (enterprise): Fix error message when generating SSH credential with control group [[GH-23025](https://github.com/hashicorp/vault/pull/23025)]
* ui: Fixes old pki's filter and search roles page bug [[GH-22810](https://github.com/hashicorp/vault/pull/22810)]
* ui: don't exclude features present on license [[GH-22855](https://github.com/hashicorp/vault/pull/22855)]
## 1.13.7
### September 13, 2023
SECURITY:
* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. This vulnerability, CVE-2023-4680, is fixed in Vault 1.14.3, 1.13.7, and 1.12.11. [[GH-22852](https://github.com/hashicorp/vault/pull/22852), [HSEC-2023-28](https://discuss.hashicorp.com/t/hcsec-2023-28-vault-s-transit-secrets-engine-allowed-nonce-specified-without-convergent-encryption/58249)]
CHANGES:
* core: Bump Go version to 1.20.8.
* database/snowflake: Update plugin to v0.7.3 [[GH-22591](https://github.com/hashicorp/vault/pull/22591)]
FEATURES:
* ** Merkle Tree Corruption Detection (enterprise) **: Add a new endpoint to check merkle tree corruption.
IMPROVEMENTS:
* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)]
* core/quotas: Add configuration to allow skipping of expensive role calculations [[GH-22651](https://github.com/hashicorp/vault/pull/22651)]
* kmip (enterprise): reduce latency of KMIP operation handling
BUG FIXES:
* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)]
* core/quotas: Only perform ResolveRoleOperation for role-based quotas and lease creation. [[GH-22597](https://github.com/hashicorp/vault/pull/22597)]
* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)]
* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)]
* core: All subloggers now reflect configured log level on reload. [[GH-22038](https://github.com/hashicorp/vault/pull/22038)]
* kmip (enterprise): fix date handling error with some re-key operations
* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)]
* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable
## 1.13.6
### August 30, 2023
CHANGES:
* core: Bump Go version to 1.20.7.
IMPROVEMENTS:
* core: Log rollback manager failures during unmount, remount to prevent replication failures on secondary clusters. [[GH-22235](https://github.com/hashicorp/vault/pull/22235)]
* replication (enterprise): Make reindex less disruptive by allowing writes during the flush phase.
* secrets/database: Improves error logging for static role rotations by including the database and role names. [[GH-22253](https://github.com/hashicorp/vault/pull/22253)]
* storage/raft: Cap the minimum dead_server_last_contact_threshold to 1m. [[GH-22040](https://github.com/hashicorp/vault/pull/22040)]
* ui: KV View Secret card will link to list view if input ends in "/" [[GH-22502](https://github.com/hashicorp/vault/pull/22502)]
* ui: enables create and update KV secret workflow when control group present [[GH-22471](https://github.com/hashicorp/vault/pull/22471)]
BUG FIXES:
* activity (enterprise): Fix misattribution of entities to no or child namespace auth methods [[GH-18809](https://github.com/hashicorp/vault/pull/18809)]
* api: Fix breakage with UNIX domain socket addresses introduced by newest Go versions as a security fix. [[GH-22523](https://github.com/hashicorp/vault/pull/22523)]
* core (enterprise): Remove MFA Configuration for namespace when deleting namespace
* core/quotas (enterprise): Fix a case where we were applying login roles to lease count quotas in a non-login context.
Also fix a related potential deadlock. [[GH-21110](https://github.com/hashicorp/vault/pull/21110)]
* core: Remove "expiration manager is nil on tokenstore" error log for unauth requests on DR secondary as they do not have expiration manager. [[GH-22137](https://github.com/hashicorp/vault/pull/22137)]
* core: Fix bug where background thread to update locked user entries runs on DR secondaries. [[GH-22355](https://github.com/hashicorp/vault/pull/22355)]
* core: Fix readonly errors that could occur while loading mounts/auths during unseal [[GH-22362](https://github.com/hashicorp/vault/pull/22362)]
* core: Fixed an instance where incorrect route entries would get tainted. We now pre-calculate namespace specific paths to avoid this. [[GH-21470](https://github.com/hashicorp/vault/pull/21470)]
* expiration: Fix a deadlock that could occur when a revocation failure happens while restoring leases on startup. [[GH-22374](https://github.com/hashicorp/vault/pull/22374)]
* license: Add autoloaded license path to the cache exempt list. This is to ensure the license changes on the active node is observed on the perfStandby node. [[GH-22363](https://github.com/hashicorp/vault/pull/22363)]
* replication (enterprise): Fix bug sync invalidate CoreReplicatedClusterInfoPath
* replication (enterprise): Fix panic when update-primary was called on demoted clusters using update_primary_addrs
* replication (enterprise): Fixing a bug by which the atomicity of a merkle diff result could be affected. This means it could be a source of a merkle-diff & sync process failing to switch into stream-wal mode afterwards.
* sdk/ldaputil: Properly escape user filters when using UPN domains
sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https://github.com/hashicorp/vault/pull/22249)]
* secrets/ldap: Fix bug causing schema and password_policy to be overwritten in config. [[GH-22331](https://github.com/hashicorp/vault/pull/22331)]
* secrets/transform (enterprise): Tidy operations will be re-scheduled at a minimum of every minute, not a maximum of every minute
* ui: Fix blank page or ghost secret when canceling KV secret create [[GH-22541](https://github.com/hashicorp/vault/pull/22541)]
* ui: fixes `max_versions` default for secret metadata unintentionally overriding kv engine defaults [[GH-22394](https://github.com/hashicorp/vault/pull/22394)]
* ui: fixes model defaults overwriting input value when user tries to clear form input [[GH-22458](https://github.com/hashicorp/vault/pull/22458)]
## 1.13.5
### July 25, 2023
SECURITY:
* auth/ldap: Normalize HTTP response codes when invalid credentials are provided to prevent user enumeration. This vulnerability, CVE-2023-3462, is fixed in Vault 1.14.1 and 1.13.5. [[GH-21282](https://github.com/hashicorp/vault/pull/21282), [HSEC-2023-24](https://discuss.hashicorp.com/t/hcsec-2023-24-vaults-ldap-auth-method-allows-for-user-enumeration/56714)]
* core/namespace (enterprise): An unhandled error in Vault Enterprises namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)]
CHANGES:
* auth/ldap: Normalize HTTP response codes when invalid credentials are provided [[GH-21282](https://github.com/hashicorp/vault/pull/21282)]
* core/namespace (enterprise): Introduce the concept of high-privilege namespace (administrative namespace),
which will have access to some system backend paths that were previously only accessible in the root namespace. [[GH-21215](https://github.com/hashicorp/vault/pull/21215)]
* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied.
@@ -905,6 +1441,26 @@ non-voter. In some scenarios this resulted in loss of quorum. [[GH-18263](https
* ui: fixes reliance on secure context (https) by removing methods using the Crypto interface [[GH-19403](https://github.com/hashicorp/vault/pull/19403)]
* ui: show Get credentials button for static roles detail page when a user has the proper permissions. [[GH-19190](https://github.com/hashicorp/vault/pull/19190)]
## 1.12.11
### September 13, 2023
SECURITY:
* secrets/transit: fix a regression that was honoring nonces provided in non-convergent modes during encryption. [[GH-22852](https://github.com/hashicorp/vault/pull/22852)]
IMPROVEMENTS:
* auth/ldap: improved login speed by adding concurrency to LDAP token group searches [[GH-22659](https://github.com/hashicorp/vault/pull/22659)]
* kmip (enterprise): reduce latency of KMIP operation handling
BUG FIXES:
* cli: Fix the CLI failing to return wrapping information for KV PUT and PATCH operations when format is set to `table`. [[GH-22818](https://github.com/hashicorp/vault/pull/22818)]
* core/quotas: Reduce overhead for role calculation when using cloud auth methods. [[GH-22583](https://github.com/hashicorp/vault/pull/22583)]
* core/seal: add a workaround for potential connection [[hangs](https://github.com/Azure/azure-sdk-for-go/issues/21346)] in Azure autoseals. [[GH-22760](https://github.com/hashicorp/vault/pull/22760)]
* raft/autopilot: Add dr-token flag for raft autopilot cli commands [[GH-21165](https://github.com/hashicorp/vault/pull/21165)]
* replication (enterprise): Fix discovery of bad primary cluster addresses to be more reliable
## 1.12.10
### August 30, 2023
@@ -942,6 +1498,10 @@ sdk/ldaputil: use EscapeLDAPValue implementation from cap/ldap [[GH-22249](https
## 1.12.9
### July 25, 2023
SECURITY:
* core/namespace (enterprise): An unhandled error in Vault Enterprises namespace creation may cause the Vault process to crash, potentially resulting in denial of service. This vulnerability, CVE-2023-3774, is fixed in Vault Enterprise 1.14.1, 1.13.5, and 1.12.9. [[HSEC_2023-23](https://discuss.hashicorp.com/t/hcsec-2023-23-vault-enterprise-namespace-creation-may-lead-to-denial-of-service/56617)]
CHANGES:
* secrets/transform (enterprise): Enforce a transformation role's max_ttl setting on encode requests, a warning will be returned if max_ttl was applied.
@@ -1027,6 +1587,10 @@ have its own changelog entry. Fix wrong lock used in ListAuths link meta interf
## 1.12.7
### June 08, 2023
SECURITY:
* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)]
CHANGES:
* core: Bump Go version to 1.19.9.
@@ -1571,6 +2135,10 @@ have its own changelog entry. [[GH-21260](https://github.com/hashicorp/vault/pul
## 1.11.11
### June 08, 2023
SECURITY:
* ui: key-value v2 (kv-v2) diff viewer allowed HTML injection into the Vault web UI through key values. This vulnerability, CVE-2023-2121, is fixed in Vault 1.14.0, 1.13.3, 1.12.7, and 1.11.11. [[HSEC-2023-17](https://discuss.hashicorp.com/t/hcsec-2023-17-vault-s-kv-diff-viewer-allowed-html-injection/54814)]
CHANGES:
* core: Bump Go version to 1.19.9.

View File

@@ -22,6 +22,9 @@
/builtin/logical/postgresql/ @hashicorp/vault-ecosystem-applications
/builtin/logical/rabbitmq/ @hashicorp/vault-ecosystem-applications
# Identity Integrations (OIDC, tokens)
/vault/identity_store_oidc* @hashicorp/vault-ecosystem-applications
/plugins/ @hashicorp/vault-ecosystem
/vault/plugin_catalog.go @hashicorp/vault-ecosystem

View File

@@ -2,7 +2,7 @@
# SPDX-License-Identifier: BUSL-1.1
## DOCKERHUB DOCKERFILE ##
FROM alpine:3.15 as default
FROM alpine:3.18 as default
ARG BIN_NAME
# NAME and PRODUCT_VERSION are the name of the software in releases.hashicorp.com

View File

@@ -4,7 +4,7 @@ License text copyright (c) 2020 MariaDB Corporation Ab, All Rights Reserved.
Parameters
Licensor: HashiCorp, Inc.
Licensed Work: Vault 1.15.0. The Licensed Work is (c) 2023 HashiCorp, Inc.
Licensed Work: Vault 1.15.1. The Licensed Work is (c) 2023 HashiCorp, Inc.
Additional Use Grant: You may make production use of the Licensed Work,
provided such use does not include offering the Licensed Work
to third parties on a hosted or embedded basis which is

View File

@@ -11,8 +11,7 @@ EXTERNAL_TOOLS_CI=\
golang.org/x/tools/cmd/goimports \
github.com/golangci/revgrep/cmd/revgrep \
mvdan.cc/gofumpt \
honnef.co/go/tools/cmd/staticcheck \
github.com/bufbuild/buf/cmd/buf
honnef.co/go/tools/cmd/staticcheck
EXTERNAL_TOOLS=\
github.com/client9/misspell/cmd/misspell
GOFMT_FILES?=$$(find . -name '*.go' | grep -v pb.go | grep -v vendor)
@@ -173,10 +172,12 @@ ci-bootstrap: .ci-bootstrap
echo "Installing/Updating $$tool" ; \
GO111MODULE=off $(GO_CMD) get -u $$tool; \
done
go install github.com/bufbuild/buf/cmd/buf@v1.25.0
@touch .ci-bootstrap
# bootstrap the build by downloading additional tools that may be used by devs
bootstrap: ci-bootstrap
@sh -c "'$(CURDIR)/scripts/goversioncheck.sh' '$(GO_VERSION_MIN)'"
go generate -tags tools tools/tools.go
go install github.com/bufbuild/buf/cmd/buf@v1.25.0
@@ -316,3 +317,12 @@ ci-get-version-package:
.PHONY: ci-prepare-legal
ci-prepare-legal:
@$(CURDIR)/scripts/ci-helper.sh prepare-legal
.PHONY: ci-copywriteheaders
ci-copywriteheaders:
copywrite headers --plan
# Special case for MPL headers in /api, /sdk, and /shamir
cd api && $(CURDIR)/scripts/copywrite-exceptions.sh
cd sdk && $(CURDIR)/scripts/copywrite-exceptions.sh
cd shamir && $(CURDIR)/scripts/copywrite-exceptions.sh

8
api/.copywrite.hcl Normal file
View File

@@ -0,0 +1,8 @@
schema_version = 1
project {
license = "MPL-2.0"
copyright_year = 2023
header_ignore = []
}

View File

@@ -51,6 +51,7 @@ type PluginAPIClientMeta struct {
flagCAPath string
flagClientCert string
flagClientKey string
flagServerName string
flagInsecure bool
}
@@ -62,6 +63,7 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet {
fs.StringVar(&f.flagCAPath, "ca-path", "", "")
fs.StringVar(&f.flagClientCert, "client-cert", "", "")
fs.StringVar(&f.flagClientKey, "client-key", "", "")
fs.StringVar(&f.flagServerName, "tls-server-name", "", "")
fs.BoolVar(&f.flagInsecure, "tls-skip-verify", false, "")
return fs
@@ -70,13 +72,13 @@ func (f *PluginAPIClientMeta) FlagSet() *flag.FlagSet {
// GetTLSConfig will return a TLSConfig based off the values from the flags
func (f *PluginAPIClientMeta) GetTLSConfig() *TLSConfig {
// If we need custom TLS configuration, then set it
if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure {
if f.flagCACert != "" || f.flagCAPath != "" || f.flagClientCert != "" || f.flagClientKey != "" || f.flagInsecure || f.flagServerName != "" {
t := &TLSConfig{
CACert: f.flagCACert,
CAPath: f.flagCAPath,
ClientCert: f.flagClientCert,
ClientKey: f.flagClientKey,
TLSServerName: "",
TLSServerName: f.flagServerName,
Insecure: f.flagInsecure,
}

View File

@@ -42,6 +42,10 @@ type Secret struct {
// cubbyhole of the given token (which has a TTL of the given number of
// seconds)
WrapInfo *SecretWrapInfo `json:"wrap_info,omitempty"`
// MountType, if non-empty, provides some information about what kind
// of mount this secret came from.
MountType string `json:"mount_type,omitempty"`
}
// TokenID returns the standardized token ID (token) for the given secret.

View File

@@ -1,3 +1,6 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MPL-2.0
package api
import (

View File

@@ -109,6 +109,7 @@ type SealStatusResponse struct {
ClusterName string `json:"cluster_name,omitempty"`
ClusterID string `json:"cluster_id,omitempty"`
RecoverySeal bool `json:"recovery_seal"`
RecoverySealType string `json:"recovery_seal_type,omitempty"`
StorageType string `json:"storage_type,omitempty"`
HCPLinkStatus string `json:"hcp_link_status,omitempty"`
HCPLinkResourceID string `json:"hcp_link_resource_ID,omitempty"`

View File

@@ -13,8 +13,9 @@ import (
// getDefaultOptions returns options with their default values.
func getDefaultOptions() options {
return options{
withNow: time.Now(),
withFormat: JSONFormat,
withNow: time.Now(),
withFormat: JSONFormat,
withHMACAccessor: true,
}
}
@@ -108,11 +109,7 @@ func WithFormat(f string) Option {
// WithPrefix provides an Option to represent a prefix for a file sink.
func WithPrefix(prefix string) Option {
return func(o *options) error {
prefix = strings.TrimSpace(prefix)
if prefix != "" {
o.withPrefix = prefix
}
o.withPrefix = prefix
return nil
}

View File

@@ -211,9 +211,9 @@ func TestOptions_WithPrefix(t *testing.T) {
ExpectedValue: "",
},
"whitespace": {
Value: " ",
IsErrorExpected: false,
ExpectedErrorMessage: "",
Value: " ",
IsErrorExpected: false,
ExpectedValue: " ",
},
"valid": {
Value: "test",

View File

@@ -1,3 +1,6 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package audit
import (

View File

@@ -98,7 +98,7 @@ func TestFormatJSON_formatRequest(t *testing.T) {
for name, tc := range cases {
var buf bytes.Buffer
cfg, err := NewFormatterConfig()
cfg, err := NewFormatterConfig(WithHMACAccessor(false))
require.NoError(t, err)
f, err := NewEntryFormatter(cfg, ss)
require.NoError(t, err)

View File

@@ -22,6 +22,11 @@ import (
"github.com/hashicorp/vault/sdk/logical"
)
const (
stdout = "stdout"
discard = "discard"
)
func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool, headersConfig audit.HeaderFormatter) (audit.Backend, error) {
if conf.SaltConfig == nil {
return nil, fmt.Errorf("nil salt config")
@@ -39,53 +44,45 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
}
// normalize path if configured for stdout
if strings.EqualFold(path, "stdout") {
path = "stdout"
if strings.EqualFold(path, stdout) {
path = stdout
}
if strings.EqualFold(path, "discard") {
path = "discard"
if strings.EqualFold(path, discard) {
path = discard
}
format, ok := conf.Config["format"]
if !ok {
format = audit.JSONFormat.String()
}
switch format {
case audit.JSONFormat.String(), audit.JSONxFormat.String():
default:
return nil, fmt.Errorf("unknown format type %q", format)
var cfgOpts []audit.Option
if format, ok := conf.Config["format"]; ok {
cfgOpts = append(cfgOpts, audit.WithFormat(format))
}
// Check if hashing of accessor is disabled
hmacAccessor := true
if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
value, err := strconv.ParseBool(hmacAccessorRaw)
v, err := strconv.ParseBool(hmacAccessorRaw)
if err != nil {
return nil, err
}
hmacAccessor = value
cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
}
// Check if raw logging is enabled
logRaw := false
if raw, ok := conf.Config["log_raw"]; ok {
b, err := strconv.ParseBool(raw)
v, err := strconv.ParseBool(raw)
if err != nil {
return nil, err
}
logRaw = b
cfgOpts = append(cfgOpts, audit.WithRaw(v))
}
elideListResponses := false
if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
value, err := strconv.ParseBool(elideListResponsesRaw)
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return nil, err
}
elideListResponses = value
cfgOpts = append(cfgOpts, audit.WithElision(v))
}
// Check if mode is provided
mode := os.FileMode(0o600)
if modeRaw, ok := conf.Config["mode"]; ok {
m, err := strconv.ParseUint(modeRaw, 8, 32)
@@ -95,7 +92,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
switch m {
case 0:
// if mode is 0000, then do not modify file mode
if path != "stdout" && path != "discard" {
if path != stdout && path != discard {
fileInfo, err := os.Stat(path)
if err != nil {
return nil, err
@@ -107,12 +104,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
}
}
cfg, err := audit.NewFormatterConfig(
audit.WithElision(elideListResponses),
audit.WithFormat(format),
audit.WithHMACAccessor(hmacAccessor),
audit.WithRaw(logRaw),
)
cfg, err := audit.NewFormatterConfig(cfgOpts...)
if err != nil {
return nil, err
}
@@ -136,11 +128,13 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
return nil, fmt.Errorf("error creating formatter: %w", err)
}
var w audit.Writer
switch format {
case "json":
switch b.formatConfig.RequiredFormat {
case audit.JSONFormat:
w = &audit.JSONWriter{Prefix: conf.Config["prefix"]}
case "jsonx":
case audit.JSONxFormat:
w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]}
default:
return nil, fmt.Errorf("unknown format type %q", b.formatConfig.RequiredFormat)
}
fw, err := audit.NewEntryFormatterWriter(b.formatConfig, f, w)
@@ -164,16 +158,24 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
var sinkNode eventlogger.Node
switch path {
case "stdout":
sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewStdoutSinkNode(format)}
case "discard":
case stdout:
sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewStdoutSinkNode(b.formatConfig.RequiredFormat.String())}
case discard:
sinkNode = &audit.SinkWrapper{Name: path, Sink: event.NewNoopSink()}
default:
var err error
var opts []event.Option
// Check if mode is provided
if modeRaw, ok := conf.Config["mode"]; ok {
opts = append(opts, event.WithFileMode(modeRaw))
}
// The NewFileSink function attempts to open the file and will
// return an error if it can't.
n, err := event.NewFileSink(b.path, format, event.WithFileMode(strconv.FormatUint(uint64(mode), 8)))
n, err := event.NewFileSink(
b.path,
b.formatConfig.RequiredFormat.String(), opts...)
if err != nil {
return nil, fmt.Errorf("file sink creation failed for path %q: %w", path, err)
}
@@ -189,8 +191,8 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
b.nodeMap[sinkNodeID] = sinkNode
} else {
switch path {
case "stdout":
case "discard":
case stdout:
case discard:
default:
// Ensure that the file can be successfully opened for writing;
// otherwise it will be too late to catch later without problems
@@ -257,9 +259,9 @@ func (b *Backend) Salt(ctx context.Context) (*salt.Salt, error) {
func (b *Backend) LogRequest(ctx context.Context, in *logical.LogInput) error {
var writer io.Writer
switch b.path {
case "stdout":
case stdout:
writer = os.Stdout
case "discard":
case discard:
return nil
}
@@ -288,7 +290,7 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er
if _, err := reader.WriteTo(writer); err == nil {
b.fileLock.Unlock()
return nil
} else if b.path == "stdout" {
} else if b.path == stdout {
b.fileLock.Unlock()
return err
}
@@ -313,9 +315,9 @@ func (b *Backend) log(_ context.Context, buf *bytes.Buffer, writer io.Writer) er
func (b *Backend) LogResponse(ctx context.Context, in *logical.LogInput) error {
var writer io.Writer
switch b.path {
case "stdout":
case stdout:
writer = os.Stdout
case "discard":
case discard:
return nil
}
@@ -337,9 +339,9 @@ func (b *Backend) LogTestMessage(ctx context.Context, in *logical.LogInput, conf
// Old behavior
var writer io.Writer
switch b.path {
case "stdout":
case stdout:
writer = os.Stdout
case "discard":
case discard:
return nil
}
@@ -389,27 +391,39 @@ func (b *Backend) open() error {
}
func (b *Backend) Reload(_ context.Context) error {
switch b.path {
case "stdout", "discard":
// When there are nodes created in the map, use the eventlogger behavior.
if len(b.nodeMap) > 0 {
for _, n := range b.nodeMap {
if n.Type() == eventlogger.NodeTypeSink {
return n.Reopen()
}
}
return nil
}
} else {
// old non-eventlogger behavior
switch b.path {
case stdout, discard:
return nil
}
b.fileLock.Lock()
defer b.fileLock.Unlock()
b.fileLock.Lock()
defer b.fileLock.Unlock()
if b.f == nil {
return b.open()
}
err := b.f.Close()
// Set to nil here so that even if we error out, on the next access open()
// will be tried
b.f = nil
if err != nil {
return err
}
if b.f == nil {
return b.open()
}
err := b.f.Close()
// Set to nil here so that even if we error out, on the next access open()
// will be tried
b.f = nil
if err != nil {
return err
}
return b.open()
}
func (b *Backend) Invalidate(_ context.Context) {

View File

@@ -12,9 +12,10 @@ import (
"sync"
"time"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/helper/salt"
@@ -38,7 +39,6 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
if !ok {
socketType = "tcp"
}
writeDeadline, ok := conf.Config["write_timeout"]
if !ok {
writeDeadline = "2s"
@@ -48,51 +48,39 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
return nil, err
}
format, ok := conf.Config["format"]
if !ok {
format = audit.JSONFormat.String()
}
switch format {
case audit.JSONFormat.String(), audit.JSONxFormat.String():
default:
return nil, fmt.Errorf("unknown format type %q", format)
var cfgOpts []audit.Option
if format, ok := conf.Config["format"]; ok {
cfgOpts = append(cfgOpts, audit.WithFormat(format))
}
// Check if hashing of accessor is disabled
hmacAccessor := true
if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
value, err := strconv.ParseBool(hmacAccessorRaw)
v, err := strconv.ParseBool(hmacAccessorRaw)
if err != nil {
return nil, err
}
hmacAccessor = value
cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
}
// Check if raw logging is enabled
logRaw := false
if raw, ok := conf.Config["log_raw"]; ok {
b, err := strconv.ParseBool(raw)
v, err := strconv.ParseBool(raw)
if err != nil {
return nil, err
}
logRaw = b
cfgOpts = append(cfgOpts, audit.WithRaw(v))
}
elideListResponses := false
if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
value, err := strconv.ParseBool(elideListResponsesRaw)
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return nil, err
}
elideListResponses = value
cfgOpts = append(cfgOpts, audit.WithElision(v))
}
cfg, err := audit.NewFormatterConfig(
audit.WithElision(elideListResponses),
audit.WithFormat(format),
audit.WithHMACAccessor(hmacAccessor),
audit.WithRaw(logRaw),
)
cfg, err := audit.NewFormatterConfig(cfgOpts...)
if err != nil {
return nil, err
}
@@ -113,10 +101,10 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
return nil, fmt.Errorf("error creating formatter: %w", err)
}
var w audit.Writer
switch format {
case audit.JSONFormat.String():
switch b.formatConfig.RequiredFormat {
case audit.JSONFormat:
w = &audit.JSONWriter{Prefix: conf.Config["prefix"]}
case audit.JSONxFormat.String():
case audit.JSONxFormat:
w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]}
}
@@ -128,6 +116,16 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
b.formatter = fw
if useEventLogger {
var opts []event.Option
if socketType, ok := conf.Config["socket_type"]; ok {
opts = append(opts, event.WithSocketType(socketType))
}
if writeDeadline, ok := conf.Config["write_timeout"]; ok {
opts = append(opts, event.WithMaxDuration(writeDeadline))
}
b.nodeIDList = make([]eventlogger.NodeID, 2)
b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
@@ -138,7 +136,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
b.nodeIDList[0] = formatterNodeID
b.nodeMap[formatterNodeID] = f
n, err := event.NewSocketSink(format, address, event.WithSocketType(socketType), event.WithMaxDuration(writeDuration.String()))
n, err := event.NewSocketSink(b.formatConfig.RequiredFormat.String(), address, opts...)
if err != nil {
return nil, fmt.Errorf("error creating socket sink node: %w", err)
}

View File

@@ -39,57 +39,45 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
tag = "vault"
}
format, ok := conf.Config["format"]
if !ok {
format = audit.JSONFormat.String()
}
switch format {
case audit.JSONFormat.String(), audit.JSONxFormat.String():
default:
return nil, fmt.Errorf("unknown format type %q", format)
var cfgOpts []audit.Option
if format, ok := conf.Config["format"]; ok {
cfgOpts = append(cfgOpts, audit.WithFormat(format))
}
// Check if hashing of accessor is disabled
hmacAccessor := true
if hmacAccessorRaw, ok := conf.Config["hmac_accessor"]; ok {
value, err := strconv.ParseBool(hmacAccessorRaw)
v, err := strconv.ParseBool(hmacAccessorRaw)
if err != nil {
return nil, err
}
hmacAccessor = value
cfgOpts = append(cfgOpts, audit.WithHMACAccessor(v))
}
// Check if raw logging is enabled
logRaw := false
if raw, ok := conf.Config["log_raw"]; ok {
b, err := strconv.ParseBool(raw)
v, err := strconv.ParseBool(raw)
if err != nil {
return nil, err
}
logRaw = b
cfgOpts = append(cfgOpts, audit.WithRaw(v))
}
elideListResponses := false
if elideListResponsesRaw, ok := conf.Config["elide_list_responses"]; ok {
value, err := strconv.ParseBool(elideListResponsesRaw)
v, err := strconv.ParseBool(elideListResponsesRaw)
if err != nil {
return nil, err
}
elideListResponses = value
cfgOpts = append(cfgOpts, audit.WithElision(v))
}
// Get the logger
logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag)
cfg, err := audit.NewFormatterConfig(cfgOpts...)
if err != nil {
return nil, err
}
cfg, err := audit.NewFormatterConfig(
audit.WithElision(elideListResponses),
audit.WithFormat(format),
audit.WithHMACAccessor(hmacAccessor),
audit.WithRaw(logRaw),
)
// Get the logger
logger, err := gsyslog.NewLogger(gsyslog.LOG_INFO, facility, tag)
if err != nil {
return nil, err
}
@@ -108,10 +96,10 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
}
var w audit.Writer
switch format {
case audit.JSONFormat.String():
switch b.formatConfig.RequiredFormat {
case audit.JSONFormat:
w = &audit.JSONWriter{Prefix: conf.Config["prefix"]}
case audit.JSONxFormat.String():
case audit.JSONxFormat:
w = &audit.JSONxWriter{Prefix: conf.Config["prefix"]}
}
@@ -123,6 +111,17 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
b.formatter = fw
if useEventLogger {
var opts []event.Option
// Get facility or default to AUTH
if facility, ok := conf.Config["facility"]; ok {
opts = append(opts, event.WithFacility(facility))
}
if tag, ok := conf.Config["tag"]; ok {
opts = append(opts, event.WithTag(tag))
}
b.nodeIDList = make([]eventlogger.NodeID, 2)
b.nodeMap = make(map[eventlogger.NodeID]eventlogger.Node)
@@ -133,7 +132,7 @@ func Factory(ctx context.Context, conf *audit.BackendConfig, useEventLogger bool
b.nodeIDList[0] = formatterNodeID
b.nodeMap[formatterNodeID] = f
n, err := event.NewSyslogSink(format, event.WithFacility(facility), event.WithTag(tag))
n, err := event.NewSyslogSink(b.formatConfig.RequiredFormat.String(), opts...)
if err != nil {
return nil, fmt.Errorf("error creating syslog sink node: %w", err)
}

View File

@@ -99,7 +99,7 @@ func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request
return nil, err
}
if roleIDIndex == nil {
return logical.ErrorResponse("invalid role ID"), nil
return logical.ErrorResponse("invalid role or secret ID"), nil
}
roleName := roleIDIndex.Name
@@ -113,7 +113,7 @@ func (b *backend) pathLoginResolveRole(ctx context.Context, req *logical.Request
return nil, err
}
if role == nil {
return logical.ErrorResponse("invalid role ID"), nil
return logical.ErrorResponse("invalid role or secret ID"), nil
}
return logical.ResolveRoleResponse(roleName)
@@ -134,7 +134,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat
return nil, err
}
if roleIDIndex == nil {
return logical.ErrorResponse("invalid role ID"), nil
return logical.ErrorResponse("invalid role or secret ID"), nil
}
roleName := roleIDIndex.Name
@@ -148,7 +148,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat
return nil, err
}
if role == nil {
return logical.ErrorResponse("invalid role ID"), nil
return logical.ErrorResponse("invalid role or secret ID"), nil
}
metadata := make(map[string]string)
@@ -184,7 +184,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat
return nil, err
}
if entry == nil {
return logical.ErrorResponse("invalid secret id"), logical.ErrInvalidCredentials
return logical.ErrorResponse("invalid role or secret ID"), logical.ErrInvalidCredentials
}
// If a secret ID entry does not have a corresponding accessor
@@ -204,7 +204,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat
return nil, err
}
if entry == nil {
return logical.ErrorResponse("invalid secret id"), nil
return logical.ErrorResponse("invalid role or secret ID"), nil
}
accessorEntry, err := b.secretIDAccessorEntry(ctx, req.Storage, entry.SecretIDAccessor, role.SecretIDPrefix)
@@ -217,7 +217,7 @@ func (b *backend) pathLoginUpdate(ctx context.Context, req *logical.Request, dat
return nil, fmt.Errorf("error deleting secret ID %q from storage: %w", secretIDHMAC, err)
}
}
return logical.ErrorResponse("invalid secret id"), nil
return logical.ErrorResponse("invalid role or secret ID"), nil
}
switch {

View File

@@ -354,7 +354,7 @@ func TestAppRole_RoleDoesNotExist(t *testing.T) {
t.Fatal("Error not part of response.")
}
if !strings.Contains(errString, "invalid role ID") {
if !strings.Contains(errString, "invalid role or secret ID") {
t.Fatalf("Error was not due to invalid role ID. Error: %s", errString)
}
}

View File

@@ -923,7 +923,7 @@ func TestAppRoleSecretIDLookup(t *testing.T) {
expected := &logical.Response{
Data: map[string]interface{}{
"http_content_type": "application/json",
"http_raw_body": `{"request_id":"","lease_id":"","renewable":false,"lease_duration":0,"data":{"error":"failed to find accessor entry for secret_id_accessor: \"invalid\""},"wrap_info":null,"warnings":null,"auth":null}`,
"http_raw_body": `{"request_id":"","lease_id":"","renewable":false,"lease_duration":0,"data":{"error":"failed to find accessor entry for secret_id_accessor: \"invalid\""},"wrap_info":null,"warnings":null,"auth":null,"mount_type":""}`,
"http_status_code": 404,
},
}

View File

@@ -1504,7 +1504,7 @@ func buildCallerIdentityLoginData(request *http.Request, roleName string) (map[s
"iam_request_url": base64.StdEncoding.EncodeToString([]byte(request.URL.String())),
"iam_request_headers": base64.StdEncoding.EncodeToString(headersJson),
"iam_request_body": base64.StdEncoding.EncodeToString(requestBody),
"request_role": roleName,
"role": roleName,
}, nil
}

View File

@@ -292,7 +292,7 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context,
config, err := b.lockedClientConfigEntry(ctx, req.Storage)
if err != nil {
return "", nil, nil, logical.ErrorResponse("error getting configuration"), nil
return "", nil, nil, nil, fmt.Errorf("error getting configuration: %w", err)
}
endpoint := "https://sts.amazonaws.com"
@@ -319,23 +319,23 @@ func (b *backend) pathLoginIamGetRoleNameCallerIdAndEntity(ctx context.Context,
if config.MaxRetries >= 0 {
maxRetries = config.MaxRetries
}
}
// Extract and use a regional STS endpoint
// based on the region set in the Authorization header.
if config.UseSTSRegionFromClient {
clientSpecifiedRegion, err := awsRegionFromHeader(headers.Get("Authorization"))
if err != nil {
return "", nil, nil, logical.ErrorResponse("region missing from Authorization header"), nil
// Extract and use a regional STS endpoint
// based on the region set in the Authorization header.
if config.UseSTSRegionFromClient {
clientSpecifiedRegion, err := awsRegionFromHeader(headers.Get("Authorization"))
if err != nil {
return "", nil, nil, logical.ErrorResponse("region missing from Authorization header"), nil
}
url, err := stsRegionalEndpoint(clientSpecifiedRegion)
if err != nil {
return "", nil, nil, logical.ErrorResponse(err.Error()), nil
}
b.Logger().Debug("use_sts_region_from_client set; using region specified from header", "region", clientSpecifiedRegion)
endpoint = url
}
url, err := stsRegionalEndpoint(clientSpecifiedRegion)
if err != nil {
return "", nil, nil, logical.ErrorResponse(err.Error()), nil
}
b.Logger().Debug("use_sts_region_from_client set; using region specified from header", "region", clientSpecifiedRegion)
endpoint = url
}
b.Logger().Debug("submitting caller identity request", "endpoint", endpoint)

View File

@@ -308,6 +308,56 @@ func TestBackend_validateVaultPostRequestValues(t *testing.T) {
}
}
// TestBackend_pathLogin_NoClientConfig covers logging in via IAM auth when the
// client config does not exist. This is a regression test to cover potential
// panics when referencing the potentially-nil config in the login handler. For
// details see https://github.com/hashicorp/vault/issues/23361.
func TestBackend_pathLogin_NoClientConfig(t *testing.T) {
storage := new(logical.InmemStorage)
config := logical.TestBackendConfig()
config.StorageView = storage
b, err := Backend(config)
if err != nil {
t.Fatal(err)
}
err = b.Setup(context.Background(), config)
if err != nil {
t.Fatal(err)
}
// Intentionally left out the client configuration
roleEntry := &awsRoleEntry{
RoleID: "foo",
Version: currentRoleStorageVersion,
AuthType: iamAuthType,
}
err = b.setRole(context.Background(), storage, testValidRoleName, roleEntry)
if err != nil {
t.Fatal(err)
}
loginData, err := defaultLoginData()
if err != nil {
t.Fatal(err)
}
loginRequest := &logical.Request{
Operation: logical.UpdateOperation,
Path: "login",
Storage: storage,
Data: loginData,
Connection: &logical.Connection{},
}
resp, err := b.HandleRequest(context.Background(), loginRequest)
if err != nil {
t.Fatalf("expected nil error, got: %v", err)
}
if !resp.IsError() {
t.Fatalf("expected error response, got: %+v", resp)
}
}
// TestBackend_pathLogin_IAMHeaders tests login with iam_request_headers,
// supporting both base64 encoded string and JSON headers
func TestBackend_pathLogin_IAMHeaders(t *testing.T) {

View File

@@ -89,6 +89,27 @@ func TestSign(t *testing.T) {
}
func TestDSASignAndVerifyWithOpenSSL(t *testing.T) {
dsaPublicCert := []byte(`-----BEGIN CERTIFICATE-----
MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV
bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du
MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r
bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE
ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC
AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD
Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE
exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii
Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4
V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI
puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl
nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp
rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt
1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT
ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G
CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688
qzy/7yePTlhlpj+ahMM=
-----END CERTIFICATE-----`)
content := []byte("Hello World")
// write the content to a temp file
tmpContentFile, err := ioutil.TempFile("", "TestDSASignAndVerifyWithOpenSSL_content")

View File

@@ -1,181 +0,0 @@
//go:build go1.11 || go1.12 || go1.13 || go1.14 || go1.15
package pkcs7
import (
"crypto/x509"
"encoding/pem"
"io/ioutil"
"os"
"os/exec"
"testing"
)
func TestVerifyEC2(t *testing.T) {
fixture := UnmarshalDSATestFixture(EC2IdentityDocumentFixture)
p7, err := Parse(fixture.Input)
if err != nil {
t.Errorf("Parse encountered unexpected error: %v", err)
}
p7.Certificates = []*x509.Certificate{fixture.Certificate}
if err := p7.Verify(); err != nil {
t.Errorf("Verify failed with error: %v", err)
}
}
var EC2IdentityDocumentFixture = `
-----BEGIN PKCS7-----
MIAGCSqGSIb3DQEHAqCAMIACAQExCzAJBgUrDgMCGgUAMIAGCSqGSIb3DQEHAaCA
JIAEggGmewogICJwcml2YXRlSXAiIDogIjE3Mi4zMC4wLjI1MiIsCiAgImRldnBh
eVByb2R1Y3RDb2RlcyIgOiBudWxsLAogICJhdmFpbGFiaWxpdHlab25lIiA6ICJ1
cy1lYXN0LTFhIiwKICAidmVyc2lvbiIgOiAiMjAxMC0wOC0zMSIsCiAgImluc3Rh
bmNlSWQiIDogImktZjc5ZmU1NmMiLAogICJiaWxsaW5nUHJvZHVjdHMiIDogbnVs
bCwKICAiaW5zdGFuY2VUeXBlIiA6ICJ0Mi5taWNybyIsCiAgImFjY291bnRJZCIg
OiAiMTIxNjU5MDE0MzM0IiwKICAiaW1hZ2VJZCIgOiAiYW1pLWZjZTNjNjk2IiwK
ICAicGVuZGluZ1RpbWUiIDogIjIwMTYtMDQtMDhUMDM6MDE6MzhaIiwKICAiYXJj
aGl0ZWN0dXJlIiA6ICJ4ODZfNjQiLAogICJrZXJuZWxJZCIgOiBudWxsLAogICJy
YW1kaXNrSWQiIDogbnVsbCwKICAicmVnaW9uIiA6ICJ1cy1lYXN0LTEiCn0AAAAA
AAAxggEYMIIBFAIBATBpMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5n
dG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2Vi
IFNlcnZpY2VzIExMQwIJAJa6SNnlXhpnMAkGBSsOAwIaBQCgXTAYBgkqhkiG9w0B
CQMxCwYJKoZIhvcNAQcBMBwGCSqGSIb3DQEJBTEPFw0xNjA0MDgwMzAxNDRaMCMG
CSqGSIb3DQEJBDEWBBTuUc28eBXmImAautC+wOjqcFCBVjAJBgcqhkjOOAQDBC8w
LQIVAKA54NxGHWWCz5InboDmY/GHs33nAhQ6O/ZI86NwjA9Vz3RNMUJrUPU5tAAA
AAAAAA==
-----END PKCS7-----
-----BEGIN CERTIFICATE-----
MIIC7TCCAq0CCQCWukjZ5V4aZzAJBgcqhkjOOAQDMFwxCzAJBgNVBAYTAlVTMRkw
FwYDVQQIExBXYXNoaW5ndG9uIFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYD
VQQKExdBbWF6b24gV2ViIFNlcnZpY2VzIExMQzAeFw0xMjAxMDUxMjU2MTJaFw0z
ODAxMDUxMjU2MTJaMFwxCzAJBgNVBAYTAlVTMRkwFwYDVQQIExBXYXNoaW5ndG9u
IFN0YXRlMRAwDgYDVQQHEwdTZWF0dGxlMSAwHgYDVQQKExdBbWF6b24gV2ViIFNl
cnZpY2VzIExMQzCCAbcwggEsBgcqhkjOOAQBMIIBHwKBgQCjkvcS2bb1VQ4yt/5e
ih5OO6kK/n1Lzllr7D8ZwtQP8fOEpp5E2ng+D6Ud1Z1gYipr58Kj3nssSNpI6bX3
VyIQzK7wLclnd/YozqNNmgIyZecN7EglK9ITHJLP+x8FtUpt3QbyYXJdmVMegN6P
hviYt5JH/nYl4hh3Pa1HJdskgQIVALVJ3ER11+Ko4tP6nwvHwh6+ERYRAoGBAI1j
k+tkqMVHuAFcvAGKocTgsjJem6/5qomzJuKDmbJNu9Qxw3rAotXau8Qe+MBcJl/U
hhy1KHVpCGl9fueQ2s6IL0CaO/buycU1CiYQk40KNHCcHfNiZbdlx1E9rpUp7bnF
lRa2v1ntMX3caRVDdbtPEWmdxSCYsYFDk4mZrOLBA4GEAAKBgEbmeve5f8LIE/Gf
MNmP9CM5eovQOGx5ho8WqD+aTebs+k2tn92BBPqeZqpWRa5P/+jrdKml1qx4llHW
MXrs3IgIb6+hUIB+S8dz8/mmO0bpr76RoZVCXYab2CZedFut7qc3WUH9+EUAH5mw
vSeDCOUMYQR7R9LINYwouHIziqQYMAkGByqGSM44BAMDLwAwLAIUWXBlk40xTwSw
7HX32MxXYruse9ACFBNGmdX2ZBrVNGrN9N2f6ROk0k9K
-----END CERTIFICATE-----`
func TestDSASignWithOpenSSLAndVerify(t *testing.T) {
content := []byte(`
A ship in port is safe,
but that's not what ships are built for.
-- Grace Hopper`)
// write the content to a temp file
tmpContentFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_content")
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile(tmpContentFile.Name(), content, 0o755)
// write the signer cert to a temp file
tmpSignerCertFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signer")
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile(tmpSignerCertFile.Name(), dsaPublicCert, 0o755)
// write the signer key to a temp file
tmpSignerKeyFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_key")
if err != nil {
t.Fatal(err)
}
ioutil.WriteFile(tmpSignerKeyFile.Name(), dsaPrivateKey, 0o755)
tmpSignedFile, err := ioutil.TempFile("", "TestDSASignWithOpenSSLAndVerify_signature")
if err != nil {
t.Fatal(err)
}
// call openssl to sign the content
opensslCMD := exec.Command("openssl", "smime", "-sign", "-nodetach", "-md", "sha1",
"-in", tmpContentFile.Name(), "-out", tmpSignedFile.Name(),
"-signer", tmpSignerCertFile.Name(), "-inkey", tmpSignerKeyFile.Name(),
"-certfile", tmpSignerCertFile.Name(), "-outform", "PEM")
out, err := opensslCMD.CombinedOutput()
if err != nil {
t.Fatalf("openssl command failed with %s: %s", err, out)
}
// verify the signed content
pemSignature, err := ioutil.ReadFile(tmpSignedFile.Name())
if err != nil {
t.Fatal(err)
}
t.Logf("%s\n", pemSignature)
derBlock, _ := pem.Decode(pemSignature)
if derBlock == nil {
t.Fatalf("failed to read DER block from signature PEM %s", tmpSignedFile.Name())
}
p7, err := Parse(derBlock.Bytes)
if err != nil {
t.Fatalf("Parse encountered unexpected error: %v", err)
}
if err := p7.Verify(); err != nil {
t.Fatalf("Verify failed with error: %v", err)
}
os.Remove(tmpSignerCertFile.Name()) // clean up
os.Remove(tmpSignerKeyFile.Name()) // clean up
os.Remove(tmpContentFile.Name()) // clean up
}
var dsaPrivateKey = []byte(`-----BEGIN PRIVATE KEY-----
MIIBSwIBADCCASwGByqGSM44BAEwggEfAoGBAP1/U4EddRIpUt9KnC7s5Of2EbdS
PO9EAMMeP4C2USZpRV1AIlH7WT2NWPq/xfW6MPbLm1Vs14E7gB00b/JmYLdrmVCl
pJ+f6AR7ECLCT7up1/63xhv4O1fnxqimFQ8E+4P208UewwI1VBNaFpEy9nXzrith
1yrv8iIDGZ3RSAHHAhUAl2BQjxUjC8yykrmCouuEC/BYHPUCgYEA9+GghdabPd7L
vKtcNrhXuXmUr7v6OuqC+VdMCz0HgmdRWVeOutRZT+ZxBxCBgLRJFnEj6EwoFhO3
zwkyjMim4TwWeotUfI0o4KOuHiuzpnWRbqN/C/ohNWLx+2J6ASQ7zKTxvqhRkImo
g9/hWuWfBpKLZl6Ae1UlZAFMO/7PSSoEFgIUfW4aPdQBn9gJZp2KuNpzgHzvfsE=
-----END PRIVATE KEY-----`)
var dsaPublicCert = []byte(`-----BEGIN CERTIFICATE-----
MIIDOjCCAvWgAwIBAgIEPCY/UDANBglghkgBZQMEAwIFADBsMRAwDgYDVQQGEwdV
bmtub3duMRAwDgYDVQQIEwdVbmtub3duMRAwDgYDVQQHEwdVbmtub3duMRAwDgYD
VQQKEwdVbmtub3duMRAwDgYDVQQLEwdVbmtub3duMRAwDgYDVQQDEwdVbmtub3du
MB4XDTE4MTAyMjEzNDMwN1oXDTQ2MDMwOTEzNDMwN1owbDEQMA4GA1UEBhMHVW5r
bm93bjEQMA4GA1UECBMHVW5rbm93bjEQMA4GA1UEBxMHVW5rbm93bjEQMA4GA1UE
ChMHVW5rbm93bjEQMA4GA1UECxMHVW5rbm93bjEQMA4GA1UEAxMHVW5rbm93bjCC
AbgwggEsBgcqhkjOOAQBMIIBHwKBgQD9f1OBHXUSKVLfSpwu7OTn9hG3UjzvRADD
Hj+AtlEmaUVdQCJR+1k9jVj6v8X1ujD2y5tVbNeBO4AdNG/yZmC3a5lQpaSfn+gE
exAiwk+7qdf+t8Yb+DtX58aophUPBPuD9tPFHsMCNVQTWhaRMvZ1864rYdcq7/Ii
Axmd0UgBxwIVAJdgUI8VIwvMspK5gqLrhAvwWBz1AoGBAPfhoIXWmz3ey7yrXDa4
V7l5lK+7+jrqgvlXTAs9B4JnUVlXjrrUWU/mcQcQgYC0SRZxI+hMKBYTt88JMozI
puE8FnqLVHyNKOCjrh4rs6Z1kW6jfwv6ITVi8ftiegEkO8yk8b6oUZCJqIPf4Vrl
nwaSi2ZegHtVJWQBTDv+z0kqA4GFAAKBgQDCriMPbEVBoRK4SOUeFwg7+VRf4TTp
rcOQC9IVVoCjXzuWEGrp3ZI7YWJSpFnSch4lk29RH8O0HpI/NOzKnOBtnKr782pt
1k/bJVMH9EaLd6MKnAVjrCDMYBB0MhebZ8QHY2elZZCWoqDYAcIDOsEx+m4NLErT
ypPnjS5M0jm1PKMhMB8wHQYDVR0OBBYEFC0Yt5XdM0Kc95IX8NQ8XRssGPx7MA0G
CWCGSAFlAwQDAgUAAzAAMC0CFQCIgQtrZZ9hdZG1ROhR5hc8nYEmbgIUAIlgC688
qzy/7yePTlhlpj+ahMM=
-----END CERTIFICATE-----`)
type DSATestFixture struct {
Input []byte
Certificate *x509.Certificate
}
func UnmarshalDSATestFixture(testPEMBlock string) DSATestFixture {
var result DSATestFixture
var derBlock *pem.Block
pemBlock := []byte(testPEMBlock)
for {
derBlock, pemBlock = pem.Decode(pemBlock)
if derBlock == nil {
break
}
switch derBlock.Type {
case "PKCS7":
result.Input = derBlock.Bytes
case "CERTIFICATE":
result.Certificate, _ = x509.ParseCertificate(derBlock.Bytes)
}
}
return result
}

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"strings"
"github.com/hashicorp/cap/ldap"
"github.com/hashicorp/go-secure-stdlib/strutil"
"github.com/hashicorp/vault/sdk/framework"
@@ -76,82 +77,25 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri
return "", nil, logical.ErrorResponse("password cannot be of zero length when passwordless binds are being denied"), nil, nil
}
ldapClient := ldaputil.Client{
Logger: b.Logger(),
LDAP: ldaputil.NewLDAP(),
}
c, err := ldapClient.DialLDAP(cfg.ConfigEntry)
ldapClient, err := ldap.NewClient(ctx, ldaputil.ConvertConfig(cfg.ConfigEntry))
if err != nil {
return "", nil, logical.ErrorResponse(err.Error()), nil, nil
}
if c == nil {
return "", nil, logical.ErrorResponse("invalid connection returned from LDAP dial"), nil, nil
}
// Clean connection
defer c.Close()
defer ldapClient.Close(ctx)
userBindDN, err := ldapClient.GetUserBindDN(cfg.ConfigEntry, c, username)
c, err := ldapClient.Authenticate(ctx, username, password, ldap.WithGroups(), ldap.WithUserAttributes())
if err != nil {
if b.Logger().IsDebug() {
b.Logger().Debug("error getting user bind DN", "error", err)
if strings.Contains(err.Error(), "discovery of user bind DN failed") ||
strings.Contains(err.Error(), "unable to bind user") {
return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials
}
return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials
}
if b.Logger().IsDebug() {
b.Logger().Debug("user binddn fetched", "username", username, "binddn", userBindDN)
}
// Try to bind as the login user. This is where the actual authentication takes place.
if len(password) > 0 {
err = c.Bind(userBindDN, password)
} else {
err = c.UnauthenticatedBind(userBindDN)
}
if err != nil {
if b.Logger().IsDebug() {
b.Logger().Debug("ldap bind failed", "error", err)
}
return "", nil, logical.ErrorResponse(errUserBindFailed), nil, logical.ErrInvalidCredentials
}
// We re-bind to the BindDN if it's defined because we assume
// the BindDN should be the one to search, not the user logging in.
if cfg.BindDN != "" && cfg.BindPassword != "" {
if err := c.Bind(cfg.BindDN, cfg.BindPassword); err != nil {
if b.Logger().IsDebug() {
b.Logger().Debug("error while attempting to re-bind with the BindDN User", "error", err)
}
return "", nil, logical.ErrorResponse("ldap operation failed: failed to re-bind with the BindDN user"), nil, logical.ErrInvalidCredentials
}
if b.Logger().IsDebug() {
b.Logger().Debug("re-bound to original binddn")
}
}
userDN, err := ldapClient.GetUserDN(cfg.ConfigEntry, c, userBindDN, username)
if err != nil {
return "", nil, logical.ErrorResponse(err.Error()), nil, nil
}
if cfg.AnonymousGroupSearch {
c, err = ldapClient.DialLDAP(cfg.ConfigEntry)
if err != nil {
return "", nil, logical.ErrorResponse("ldap operation failed: failed to connect to LDAP server"), nil, nil
}
defer c.Close() // Defer closing of this connection as the deferal above closes the other defined connection
}
ldapGroups, err := ldapClient.GetLdapGroups(cfg.ConfigEntry, c, userDN, username)
if err != nil {
return "", nil, logical.ErrorResponse(err.Error()), nil, nil
}
if b.Logger().IsDebug() {
b.Logger().Debug("groups fetched from server", "num_server_groups", len(ldapGroups), "server_groups", ldapGroups)
}
ldapGroups := c.Groups
ldapResponse := &logical.Response{
Data: map[string]interface{}{},
}
@@ -162,6 +106,10 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri
ldapResponse.AddWarning(errString)
}
for _, warning := range c.Warnings {
ldapResponse.AddWarning(string(warning))
}
var allGroups []string
canonicalUsername := username
cs := *cfg.CaseSensitiveNames
@@ -206,13 +154,11 @@ func (b *backend) Login(ctx context.Context, req *logical.Request, username stri
return username, policies, ldapResponse, allGroups, nil
}
entityAliasAttribute, err := ldapClient.GetUserAliasAttributeValue(cfg.ConfigEntry, c, username)
if err != nil {
return "", nil, logical.ErrorResponse(err.Error()), nil, nil
}
if entityAliasAttribute == "" {
userAttrValues := c.UserAttributes[cfg.UserAttr]
if len(userAttrValues) == 0 {
return "", nil, logical.ErrorResponse("missing entity alias attribute value"), nil, nil
}
entityAliasAttribute := userAttrValues[0]
return entityAliasAttribute, policies, ldapResponse, allGroups, nil
}

View File

@@ -212,6 +212,20 @@ func (b *backend) pathStaticRolesWrite(ctx context.Context, req *logical.Request
if err != nil {
return nil, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", config.Name, err)
}
} else {
// creds already exist, so all we need to do is update the rotation
// what here stays the same and what changes? Can we change the name?
i, err := b.credRotationQueue.PopByKey(config.Name)
if err != nil {
return nil, fmt.Errorf("expected an item with name %q, but got an error: %w", config.Name, err)
}
i.Value = config
// update the next rotation to occur at now + the new rotation period
i.Priority = time.Now().Add(config.RotationPeriod).Unix()
err = b.credRotationQueue.Push(i)
if err != nil {
return nil, fmt.Errorf("failed to add updated item into the rotation queue for role %q: %w", config.Name, err)
}
}
return &logical.Response{

View File

@@ -124,12 +124,21 @@ func TestStaticRolesWrite(t *testing.T) {
bgCTX := context.Background()
cases := []struct {
name string
opts []awsutil.MockIAMOption
name string
// objects to return from mock IAM.
// You'll need a GetUserOutput (to validate the existence of the user being written,
// the keys the user has already been assigned,
// and the new key vault requests.
opts []awsutil.MockIAMOption // objects to return from the mock IAM
// the name, username if updating, and rotation_period of the user. This is the inbound request the cod would get.
data map[string]interface{}
expectedError bool
findUser bool
isUpdate bool
// if data is sent the name "johnny", then we'll match an existing user with rotation period 24 hours.
isUpdate bool
newPriority int64 // update time of new item in queue, skip if isUpdate false. There is a wiggle room of 5 seconds
// so the deltas between the old and the new update time should be larger than that to ensure the difference
// can be detected.
}{
{
name: "happy path",
@@ -168,7 +177,7 @@ func TestStaticRolesWrite(t *testing.T) {
expectedError: true,
},
{
name: "update existing user",
name: "update existing user, decreased rotation duration",
opts: []awsutil.MockIAMOption{
awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}),
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
@@ -187,8 +196,33 @@ func TestStaticRolesWrite(t *testing.T) {
"name": "johnny",
"rotation_period": "19m",
},
findUser: true,
isUpdate: true,
findUser: true,
isUpdate: true,
newPriority: time.Now().Add(19 * time.Minute).Unix(),
},
{
name: "update existing user, increased rotation duration",
opts: []awsutil.MockIAMOption{
awsutil.WithGetUserOutput(&iam.GetUserOutput{User: &iam.User{UserName: aws.String("john-doe"), UserId: aws.String("unique-id")}}),
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
AccessKeyMetadata: []*iam.AccessKeyMetadata{},
IsTruncated: aws.Bool(false),
}),
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
AccessKey: &iam.AccessKey{
AccessKeyId: aws.String("abcdefghijklmnopqrstuvwxyz"),
SecretAccessKey: aws.String("zyxwvutsrqponmlkjihgfedcba"),
UserName: aws.String("john-doe"),
},
}),
},
data: map[string]interface{}{
"name": "johnny",
"rotation_period": "40h",
},
findUser: true,
isUpdate: true,
newPriority: time.Now().Add(40 * time.Hour).Unix(),
},
}
@@ -269,6 +303,11 @@ func TestStaticRolesWrite(t *testing.T) {
expectedData = staticRole
}
var actualItem *queue.Item
if c.isUpdate {
actualItem, _ = b.credRotationQueue.PopByKey(expectedData.Name)
}
if u, ok := fieldData.GetOk("username"); ok {
expectedData.Username = u.(string)
}
@@ -289,6 +328,20 @@ func TestStaticRolesWrite(t *testing.T) {
if en, an := expectedData.Name, actualData.Name; en != an {
t.Fatalf("mismatched role name, expected %q, but got %q", en, an)
}
// one-off to avoid importing/casting
abs := func(x int64) int64 {
if x < 0 {
return -x
}
return x
}
if c.isUpdate {
if ep, ap := c.newPriority, actualItem.Priority; abs(ep-ap) > 5 { // 5 second wiggle room for how long the test takes
t.Fatalf("mismatched updated priority, expected %d but got %d", ep, ap)
}
}
})
}
}

View File

@@ -5,6 +5,7 @@ package aws
import (
"context"
"errors"
"fmt"
"time"
@@ -37,13 +38,13 @@ func (b *backend) rotateExpiredStaticCreds(ctx context.Context, req *logical.Req
}
// rotateCredential pops an element from the priority queue, and if it is expired, rotate and re-push.
// If a cred was rotated, it returns true, otherwise false.
func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage) (rotated bool, err error) {
// If a cred was ready for rotation, return true, otherwise return false.
func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage) (wasReady bool, err error) {
// If queue is empty or first item does not need a rotation (priority is next rotation timestamp) there is nothing to do
item, err := b.credRotationQueue.Pop()
if err != nil {
// the queue is just empty, which is fine.
if err == queue.ErrEmpty {
if errors.Is(err, queue.ErrEmpty) {
return false, nil
}
return false, fmt.Errorf("failed to pop from queue for role %q: %w", item.Key, err)
@@ -62,14 +63,21 @@ func (b *backend) rotateCredential(ctx context.Context, storage logical.Storage)
err = b.createCredential(ctx, storage, cfg, true)
if err != nil {
return false, err
// put it back in the queue with a backoff
item.Priority = time.Now().Add(10 * time.Second).Unix()
innerErr := b.credRotationQueue.Push(item)
if innerErr != nil {
return true, fmt.Errorf("failed to add item into the rotation queue for role %q(%w), while attempting to recover from failure to create credential: %w", cfg.Name, innerErr, err)
}
// there was one that "should have" rotated, so we want to keep looking further down the queue
return true, err
}
// set new priority and re-queue
item.Priority = time.Now().Add(cfg.RotationPeriod).Unix()
err = b.credRotationQueue.Push(item)
if err != nil {
return false, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", cfg.Name, err)
return true, fmt.Errorf("failed to add item into the rotation queue for role %q: %w", cfg.Name, err)
}
return true, nil

View File

@@ -349,3 +349,92 @@ func TestCreateCredential(t *testing.T) {
})
}
}
// TestRequeueOnError verifies that in the case of an error, the entry will still be in the queue for later rotation
func TestRequeueOnError(t *testing.T) {
bgCTX := context.Background()
cred := staticRoleEntry{
Name: "test",
Username: "jane-doe",
RotationPeriod: 30 * time.Minute,
}
ak := "long-access-key-id"
oldSecret := "abcdefghijklmnopqrstuvwxyz"
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b := Backend(config)
// go through the process of adding a key
miam, err := awsutil.NewMockIAM(
awsutil.WithListAccessKeysOutput(&iam.ListAccessKeysOutput{
AccessKeyMetadata: []*iam.AccessKeyMetadata{
{},
},
}),
// initial key to store
awsutil.WithCreateAccessKeyOutput(&iam.CreateAccessKeyOutput{
AccessKey: &iam.AccessKey{
AccessKeyId: aws.String(ak),
SecretAccessKey: aws.String(oldSecret),
},
}),
awsutil.WithGetUserOutput(&iam.GetUserOutput{
User: &iam.User{
UserId: aws.String(cred.ID),
UserName: aws.String(cred.Username),
},
}),
)(nil)
if err != nil {
t.Fail()
}
b.iamClient = miam
err = b.createCredential(bgCTX, config.StorageView, cred, true)
if err != nil {
t.Fatalf("couldn't insert credential: %s", err)
}
// put the cred in the queue but age it out
item := &queue.Item{
Key: cred.Name,
Value: cred,
Priority: time.Now().Add(-10 * time.Minute).Unix(),
}
err = b.credRotationQueue.Push(item)
if err != nil {
t.Fatalf("couldn't push item onto queue: %s", err)
}
// update the mock iam with the next requests
miam, err = awsutil.NewMockIAM(
awsutil.WithGetUserError(errors.New("oh no")),
)(nil)
if err != nil {
t.Fatalf("couldn't initialize the mock iam: %s", err)
}
b.iamClient = miam
// now rotate, but it will fail
r, e := b.rotateCredential(bgCTX, config.StorageView)
if !r {
t.Fatalf("rotate credential should return true in this case, but it didn't")
}
if e == nil {
t.Fatalf("we expected an error when rotating a credential, but didn't get one")
}
// the queue should be updated though
i, e := b.credRotationQueue.PopByKey(cred.Name)
if err != nil {
t.Fatalf("queue error: %s", e)
}
delta := time.Now().Add(10*time.Second).Unix() - i.Priority
if delta < -5 || delta > 5 {
t.Fatalf("priority should be within 5 seconds of our backoff interval")
}
}

View File

@@ -849,6 +849,22 @@ func TestBackend_Roles(t *testing.T) {
}
}
func TestBackend_Enterprise_Diff_Namespace_Revocation(t *testing.T) {
if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense {
t.Skip("Skipping: No enterprise license found")
}
testBackendEntDiffNamespaceRevocation(t)
}
func TestBackend_Enterprise_Diff_Partition_Revocation(t *testing.T) {
if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense {
t.Skip("Skipping: No enterprise license found")
}
testBackendEntDiffPartitionRevocation(t)
}
func TestBackend_Enterprise_Namespace(t *testing.T) {
if _, hasLicense := os.LookupEnv("CONSUL_LICENSE"); !hasLicense {
t.Skip("Skipping: No enterprise license found")
@@ -865,6 +881,268 @@ func TestBackend_Enterprise_Partition(t *testing.T) {
testBackendEntPartition(t)
}
func testBackendEntDiffNamespaceRevocation(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true)
defer cleanup()
// Perform additional Consul configuration
consulapiConfig := consulapi.DefaultNonPooledConfig()
consulapiConfig.Address = consulConfig.Address()
consulapiConfig.Token = consulConfig.Token
client, err := consulapi.NewClient(consulapiConfig)
if err != nil {
t.Fatal(err)
}
// Create Policy in default namespace to manage ACLs in a different
// namespace
nsPol := &consulapi.ACLPolicy{
Name: "diff-ns-test",
Description: "policy to test management of ACLs in one ns from another",
Rules: `namespace "ns1" {
acl="write"
}
`,
}
pol, _, err := client.ACL().PolicyCreate(nsPol, nil)
if err != nil {
t.Fatal(err)
}
// Create new Token in default namespace with new ACL
cToken, _, err := client.ACL().TokenCreate(
&consulapi.ACLToken{
Policies: []*consulapi.ACLLink{{ID: pol.ID}},
}, nil)
if err != nil {
t.Fatal(err)
}
// Write backend config
connData := map[string]interface{}{
"address": consulConfig.Address(),
"token": cToken.SecretID,
}
req := &logical.Request{
Storage: config.StorageView,
Operation: logical.UpdateOperation,
Path: "config/access",
Data: connData,
}
_, err = b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
// Create the role in namespace "ns1"
req.Path = "roles/test-ns"
req.Data = map[string]interface{}{
"consul_policies": []string{"ns-test"},
"lease": "6h",
"consul_namespace": "ns1",
}
_, err = b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
// Get Token
req.Operation = logical.ReadOperation
req.Path = "creds/test-ns"
resp, err := b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("resp nil")
}
if resp.IsError() {
t.Fatalf("resp is error: %v", resp.Error())
}
generatedSecret := resp.Secret
generatedSecret.TTL = 6 * time.Hour
// Verify Secret
var d struct {
Token string `mapstructure:"token"`
Accessor string `mapstructure:"accessor"`
ConsulNamespace string `mapstructure:"consul_namespace"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
t.Fatal(err)
}
if d.ConsulNamespace != "ns1" {
t.Fatalf("Failed to access namespace")
}
// Revoke the credential
req.Operation = logical.RevokeOperation
req.Secret = generatedSecret
_, err = b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("Revocation failed: %v", err)
}
// Build a management client and verify that the token does not exist anymore
consulmgmtConfig := consulapi.DefaultNonPooledConfig()
consulmgmtConfig.Address = connData["address"].(string)
consulmgmtConfig.Token = connData["token"].(string)
mgmtclient, err := consulapi.NewClient(consulmgmtConfig)
if err != nil {
t.Fatal(err)
}
q := &consulapi.QueryOptions{
Datacenter: "DC1",
Namespace: "ns1",
}
_, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q)
if err == nil {
t.Fatal("err: expected error")
}
}
func testBackendEntDiffPartitionRevocation(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
b, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
cleanup, consulConfig := consul.PrepareTestContainer(t, "", true, true)
defer cleanup()
// Perform additional Consul configuration
consulapiConfig := consulapi.DefaultNonPooledConfig()
consulapiConfig.Address = consulConfig.Address()
consulapiConfig.Token = consulConfig.Token
client, err := consulapi.NewClient(consulapiConfig)
if err != nil {
t.Fatal(err)
}
// Create Policy in default partition to manage ACLs in a different
// partition
partPol := &consulapi.ACLPolicy{
Name: "diff-part-test",
Description: "policy to test management of ACLs in one part from another",
Rules: `partition "part1" {
acl="write"
}
`,
}
pol, _, err := client.ACL().PolicyCreate(partPol, nil)
if err != nil {
t.Fatal(err)
}
// Create new Token in default partition with new ACL
cToken, _, err := client.ACL().TokenCreate(
&consulapi.ACLToken{
Policies: []*consulapi.ACLLink{{ID: pol.ID}},
}, nil)
if err != nil {
t.Fatal(err)
}
// Write backend config
connData := map[string]interface{}{
"address": consulConfig.Address(),
"token": cToken.SecretID,
}
req := &logical.Request{
Storage: config.StorageView,
Operation: logical.UpdateOperation,
Path: "config/access",
Data: connData,
}
_, err = b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
// Create the role in partition "part1"
req.Path = "roles/test-part"
req.Data = map[string]interface{}{
"consul_policies": []string{"part-test"},
"lease": "6h",
"partition": "part1",
}
_, err = b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
// Get Token
req.Operation = logical.ReadOperation
req.Path = "creds/test-part"
resp, err := b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatal(err)
}
if resp == nil {
t.Fatal("resp nil")
}
if resp.IsError() {
t.Fatalf("resp is error: %v", resp.Error())
}
generatedSecret := resp.Secret
generatedSecret.TTL = 6 * time.Hour
// Verify Secret
var d struct {
Token string `mapstructure:"token"`
Accessor string `mapstructure:"accessor"`
Partition string `mapstructure:"partition"`
}
if err := mapstructure.Decode(resp.Data, &d); err != nil {
t.Fatal(err)
}
if d.Partition != "part1" {
t.Fatalf("Failed to access partition")
}
// Revoke the credential
req.Operation = logical.RevokeOperation
req.Secret = generatedSecret
_, err = b.HandleRequest(context.Background(), req)
if err != nil {
t.Fatalf("Revocation failed: %v", err)
}
// Build a management client and verify that the token does not exist anymore
consulmgmtConfig := consulapi.DefaultNonPooledConfig()
consulmgmtConfig.Address = connData["address"].(string)
consulmgmtConfig.Token = connData["token"].(string)
mgmtclient, err := consulapi.NewClient(consulmgmtConfig)
if err != nil {
t.Fatal(err)
}
q := &consulapi.QueryOptions{
Datacenter: "DC1",
Partition: "part1",
}
_, _, err = mgmtclient.ACL().TokenRead(d.Accessor, q)
if err == nil {
t.Fatal("err: expected error")
}
}
func testBackendEntNamespace(t *testing.T) {
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}

View File

@@ -7,6 +7,7 @@ import (
"context"
"fmt"
"github.com/hashicorp/consul/api"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/logical"
)
@@ -84,6 +85,24 @@ func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d
version = versionRaw.(string)
}
// Extract Consul Namespace and Partition info from secret
var revokeWriteOptions *api.WriteOptions
var namespace, partition string
namespaceRaw, ok := req.Data["consul_namespace"]
if ok {
namespace = namespaceRaw.(string)
}
partitionRaw, ok := req.Data["partition"]
if ok {
partition = partitionRaw.(string)
}
revokeWriteOptions = &api.WriteOptions{
Namespace: namespace,
Partition: partition,
}
switch version {
case "":
// Pre 1.4 tokens
@@ -92,7 +111,7 @@ func (b *backend) secretTokenRevoke(ctx context.Context, req *logical.Request, d
return nil, err
}
case tokenPolicyType:
_, err := c.ACL().TokenDelete(tokenRaw.(string), nil)
_, err := c.ACL().TokenDelete(tokenRaw.(string), revokeWriteOptions)
if err != nil {
return nil, err
}

View File

@@ -17,12 +17,10 @@ import (
"github.com/go-test/deep"
"github.com/hashicorp/go-hclog"
mongodbatlas "github.com/hashicorp/vault-plugin-database-mongodbatlas"
"github.com/hashicorp/vault/helper/builtinplugins"
"github.com/hashicorp/vault/helper/namespace"
postgreshelper "github.com/hashicorp/vault/helper/testhelpers/postgresql"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/plugins/database/mongodb"
"github.com/hashicorp/vault/plugins/database/postgresql"
v4 "github.com/hashicorp/vault/sdk/database/dbplugin"
v5 "github.com/hashicorp/vault/sdk/database/dbplugin/v5"
@@ -36,6 +34,29 @@ import (
"github.com/mitchellh/mapstructure"
)
func getClusterPostgresDB(t *testing.T) (*vault.TestCluster, logical.SystemView) {
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
"database": Factory,
},
BuiltinRegistry: builtinplugins.Registry,
}
cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
HandlerFunc: vaulthttp.Handler,
})
cluster.Start()
cores := cluster.Cores
vault.TestWaitActive(t, cores[0].Core)
os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
sys := vault.TestDynamicSystemView(cores[0].Core, nil)
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", []string{}, "")
return cluster, sys
}
func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
@@ -54,29 +75,10 @@ func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
sys := vault.TestDynamicSystemView(cores[0].Core, nil)
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Postgres", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_Mongo", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodb-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoMultiplexed", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlas", []string{}, "")
vault.TestAddTestPlugin(t, cores[0].Core, "mongodbatlas-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MongoAtlasMultiplexed", []string{}, "")
return cluster, sys
}
func TestBackend_PluginMain_Postgres(t *testing.T) {
if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" {
return
}
dbType, err := postgresql.New()
if err != nil {
t.Fatalf("Failed to initialize postgres: %s", err)
}
v5.Serve(dbType.(v5.Database))
}
func TestBackend_PluginMain_PostgresMultiplexed(t *testing.T) {
if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" {
return
@@ -85,48 +87,6 @@ func TestBackend_PluginMain_PostgresMultiplexed(t *testing.T) {
v5.ServeMultiplex(postgresql.New)
}
func TestBackend_PluginMain_Mongo(t *testing.T) {
if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" {
return
}
dbType, err := mongodb.New()
if err != nil {
t.Fatalf("Failed to initialize mongodb: %s", err)
}
v5.Serve(dbType.(v5.Database))
}
func TestBackend_PluginMain_MongoMultiplexed(t *testing.T) {
if os.Getenv(pluginutil.PluginVaultVersionEnv) == "" {
return
}
v5.ServeMultiplex(mongodb.New)
}
func TestBackend_PluginMain_MongoAtlas(t *testing.T) {
if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
return
}
dbType, err := mongodbatlas.New()
if err != nil {
t.Fatalf("Failed to initialize mongodbatlas: %s", err)
}
v5.Serve(dbType.(v5.Database))
}
func TestBackend_PluginMain_MongoAtlasMultiplexed(t *testing.T) {
if os.Getenv(pluginutil.PluginUnwrapTokenEnv) == "" {
return
}
v5.ServeMultiplex(mongodbatlas.New)
}
func TestBackend_RoleUpgrade(t *testing.T) {
storage := &logical.InmemStorage{}
backend := &databaseBackend{}
@@ -183,7 +143,7 @@ func TestBackend_config_connection(t *testing.T) {
var resp *logical.Response
var err error
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -370,7 +330,7 @@ func TestBackend_config_connection(t *testing.T) {
}
func TestBackend_BadConnectionString(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -419,7 +379,7 @@ func TestBackend_BadConnectionString(t *testing.T) {
}
func TestBackend_basic(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -626,7 +586,7 @@ func TestBackend_basic(t *testing.T) {
}
func TestBackend_connectionCrud(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -811,7 +771,7 @@ func TestBackend_connectionCrud(t *testing.T) {
}
func TestBackend_roleCrud(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -1064,7 +1024,7 @@ func TestBackend_roleCrud(t *testing.T) {
}
func TestBackend_allowedRoles(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -1261,7 +1221,7 @@ func TestBackend_allowedRoles(t *testing.T) {
}
func TestBackend_RotateRootCredentials(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -1362,7 +1322,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) {
}
func TestBackend_ConnectionURL_redacted(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
t.Cleanup(cluster.Cleanup)
config := logical.TestBackendConfig()

View File

@@ -282,6 +282,7 @@ func (b *databaseBackend) connectionReadHandler() framework.OperationFunc {
delete(config.ConnectionDetails, "password")
delete(config.ConnectionDetails, "private_key")
delete(config.ConnectionDetails, "service_account_json")
return &logical.Response{
Data: structs.New(config).Map(),

View File

@@ -127,7 +127,7 @@ func TestWriteConfig_PluginVersionInStorage(t *testing.T) {
}
func TestWriteConfig_HelpfulErrorMessageWhenBuiltinOverridden(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
t.Cleanup(cluster.Cleanup)
config := logical.TestBackendConfig()

View File

@@ -560,8 +560,7 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l
role.StaticAccount.Username = username
rotationPeriodSecondsRaw, rotationPeriodOk := data.GetOk("rotation_period")
rotationSchedule := data.Get("rotation_schedule").(string)
rotationScheduleOk := rotationSchedule != ""
rotationScheduleRaw, rotationScheduleOk := data.GetOk("rotation_schedule")
rotationWindowSecondsRaw, rotationWindowOk := data.GetOk("rotation_window")
if rotationScheduleOk && rotationPeriodOk {
@@ -591,6 +590,7 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l
}
if rotationScheduleOk {
rotationSchedule := rotationScheduleRaw.(string)
parsedSchedule, err := b.schedule.Parse(rotationSchedule)
if err != nil {
return logical.ErrorResponse("could not parse rotation_schedule", "error", err), nil
@@ -683,15 +683,13 @@ func (b *databaseBackend) pathStaticRoleCreateUpdate(ctx context.Context, req *l
}
}
_, rotationPeriodChanged := data.Raw["rotation_period"]
_, rotationScheduleChanged := data.Raw["rotation_schedule"]
if rotationPeriodChanged {
if rotationPeriodOk {
b.logger.Debug("init priority for RotationPeriod", "lvr", lvr, "next", lvr.Add(role.StaticAccount.RotationPeriod))
item.Priority = lvr.Add(role.StaticAccount.RotationPeriod).Unix()
} else if rotationScheduleChanged {
} else if rotationScheduleOk {
next := role.StaticAccount.Schedule.Next(lvr)
b.logger.Debug("init priority for Schedule", "lvr", lvr, "next", next)
item.Priority = role.StaticAccount.Schedule.Next(lvr).Unix()
item.Priority = next.Unix()
}
// Add their rotation to the queue

View File

@@ -205,7 +205,7 @@ func TestBackend_Roles_CredentialTypes(t *testing.T) {
}
func TestBackend_StaticRole_Config(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -470,7 +470,7 @@ func TestBackend_StaticRole_Config(t *testing.T) {
}
func TestBackend_StaticRole_ReadCreds(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -650,7 +650,7 @@ func TestBackend_StaticRole_ReadCreds(t *testing.T) {
}
func TestBackend_StaticRole_Updates(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -842,8 +842,114 @@ func TestBackend_StaticRole_Updates(t *testing.T) {
}
}
func TestBackend_StaticRole_Updates_RotationSchedule(t *testing.T) {
ctx := context.Background()
b, storage, mockDB := getBackend(t)
defer b.Cleanup(ctx)
configureDBMount(t, storage)
data := map[string]interface{}{
"name": "plugin-role-test-updates",
"db_name": "mockv5",
"rotation_statements": testRoleStaticUpdate,
"username": dbUser,
"rotation_schedule": "0 0 */2 * * *",
"rotation_window": "1h",
}
mockDB.On("UpdateUser", mock.Anything, mock.Anything).
Return(v5.UpdateUserResponse{}, nil).
Once()
req := &logical.Request{
Operation: logical.CreateOperation,
Path: "static-roles/plugin-role-test-updates",
Storage: storage,
Data: data,
}
resp, err := b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Read the role
data = map[string]interface{}{}
req = &logical.Request{
Operation: logical.ReadOperation,
Path: "static-roles/plugin-role-test-updates",
Storage: storage,
Data: data,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
rotation := resp.Data["rotation_schedule"].(string)
window := resp.Data["rotation_window"].(float64)
// update rotation_schedule and window
updateData := map[string]interface{}{
"name": "plugin-role-test-updates",
"db_name": "mockv5",
"username": dbUser,
"rotation_schedule": "0 0 */1 * * *",
"rotation_window": "2h",
}
req = &logical.Request{
Operation: logical.UpdateOperation,
Path: "static-roles/plugin-role-test-updates",
Storage: storage,
Data: updateData,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// re-read the role
data = map[string]interface{}{}
req = &logical.Request{
Operation: logical.ReadOperation,
Path: "static-roles/plugin-role-test-updates",
Storage: storage,
Data: data,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
newRotation := resp.Data["rotation_schedule"].(string)
if newRotation == rotation {
t.Fatalf("expected change in rotation, but got old value: %#v", newRotation)
}
newWindow := resp.Data["rotation_window"].(float64)
if newWindow == window {
t.Fatalf("expected change in rotation_window, but got old value: %#v", newWindow)
}
// verify that rotation_schedule is only required when creating
updateData = map[string]interface{}{
"name": "plugin-role-test-updates",
"db_name": "mockv5",
"username": dbUser,
"rotation_statements": testRoleStaticUpdateRotation,
}
req = &logical.Request{
Operation: logical.UpdateOperation,
Path: "static-roles/plugin-role-test-updates",
Storage: storage,
Data: updateData,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
}
func TestBackend_StaticRole_Role_name_check(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()

View File

@@ -27,7 +27,7 @@ const (
// - Password has been altered on the database
// - Password has not been updated in storage
func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -170,7 +170,7 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) {
// - Password has not been altered on the database
// - Password has not been updated in storage
func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -274,7 +274,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) {
// - Password has been altered on the database
// - Password has been updated in storage
func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()

View File

@@ -36,12 +36,12 @@ import (
const (
dbUser = "vaultstatictest"
dbUserDefaultPassword = "password"
testMinRotationWindowSeconds = 10
testMinRotationWindowSeconds = 5
testScheduleParseOptions = cron.Second | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow
)
func TestBackend_StaticRole_Rotation_basic(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -246,11 +246,175 @@ func TestBackend_StaticRole_Rotation_basic(t *testing.T) {
}
}
// TestBackend_StaticRole_Rotation_Schedule_ErrorRecover tests that failed
// rotations can successfully recover and that they do not occur outside of a
// rotation window.
func TestBackend_StaticRole_Rotation_Schedule_ErrorRecover(t *testing.T) {
cluster, sys := getClusterPostgresDB(t)
t.Cleanup(cluster.Cleanup)
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
config.System = sys
lb, err := Factory(context.Background(), config)
if err != nil {
t.Fatal(err)
}
b, ok := lb.(*databaseBackend)
if !ok {
t.Fatal("could not convert to db backend")
}
defer b.Cleanup(context.Background())
b.schedule = &TestSchedule{}
cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
t.Cleanup(cleanup)
// create the database user
createTestPGUser(t, connURL, dbUser, dbUserDefaultPassword, testRoleStaticCreate)
verifyPgConn(t, dbUser, dbUserDefaultPassword, connURL)
// Configure a connection
connectionData := map[string]interface{}{
"connection_url": connURL,
"plugin_name": "postgresql-database-plugin",
"verify_connection": false,
"allowed_roles": []string{"*"},
"name": "plugin-test",
}
configureConnection(t, b, config.StorageView, connectionData)
// create the role that will rotate every 10th second
// rotations will not be allowed after 5s
data := map[string]interface{}{
"name": "plugin-role-test",
"db_name": "plugin-test",
"rotation_statements": testRoleStaticUpdate,
"rotation_schedule": "*/10 * * * * *",
"rotation_window": "5s",
"username": dbUser,
}
req := &logical.Request{
Operation: logical.CreateOperation,
Path: "static-roles/plugin-role-test",
Storage: config.StorageView,
Data: data,
}
resp, err := b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Read the creds
req = &logical.Request{
Operation: logical.ReadOperation,
Path: "static-creds/plugin-role-test",
Storage: config.StorageView,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
username := resp.Data["username"].(string)
originalPassword := resp.Data["password"].(string)
if username == "" || originalPassword == "" {
t.Fatalf("empty username (%s) or password (%s)", username, originalPassword)
}
// Verify username/password
verifyPgConn(t, dbUser, originalPassword, connURL)
// Set invalid connection URL so we fail to rotate
connectionData["connection_url"] = strings.Replace(connURL, "postgres:secret", "postgres:foo", 1)
configureConnection(t, b, config.StorageView, connectionData)
// determine next rotation schedules based on current test time
rotationSchedule := data["rotation_schedule"].(string)
schedule, err := b.schedule.Parse(rotationSchedule)
if err != nil {
t.Fatalf("could not parse rotation_schedule: %s", err)
}
next := schedule.Next(time.Now()) // the next rotation time we expect
time.Sleep(next.Sub(time.Now()))
// Re-Read the creds after schedule expiration
req = &logical.Request{
Operation: logical.ReadOperation,
Path: "static-creds/plugin-role-test",
Storage: config.StorageView,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
checkPassword := resp.Data["password"].(string)
if originalPassword != checkPassword {
// should match because rotations should be failing
t.Fatalf("expected passwords to match, got (%s)", checkPassword)
}
// wait until we are outside the rotation window so that rotations will not occur
next = schedule.Next(time.Now()) // the next rotation time after now
time.Sleep(next.Add(time.Second * 6).Sub(time.Now()))
// reset to valid connection URL so we do not fail to rotate anymore
connectionData["connection_url"] = connURL
configureConnection(t, b, config.StorageView, connectionData)
// we are outside a rotation window, Re-Read the creds
req = &logical.Request{
Operation: logical.ReadOperation,
Path: "static-creds/plugin-role-test",
Storage: config.StorageView,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
checkPassword = resp.Data["password"].(string)
if originalPassword != checkPassword {
// should match because rotations should not occur outside the rotation window
t.Fatalf("expected passwords to match, got (%s)", checkPassword)
}
// Verify new username/password
verifyPgConn(t, username, checkPassword, connURL)
// sleep until the next rotation time with a buffer to ensure we had time to rotate
next = schedule.Next(time.Now()) // the next rotation time we expect
time.Sleep(next.Add(time.Second * 5).Sub(time.Now()))
// Re-Read the creds
req = &logical.Request{
Operation: logical.ReadOperation,
Path: "static-creds/plugin-role-test",
Storage: config.StorageView,
}
resp, err = b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
checkPassword = resp.Data["password"].(string)
if originalPassword == checkPassword {
// should differ because we slept until the next rotation time
t.Fatalf("expected passwords to differ, got (%s)", checkPassword)
}
// Verify new username/password
verifyPgConn(t, username, checkPassword, connURL)
}
// Sanity check to make sure we don't allow an attempt of rotating credentials
// for non-static accounts, which doesn't make sense anyway, but doesn't hurt to
// verify we return an error
func TestBackend_StaticRole_Rotation_NonStaticError(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -354,7 +518,7 @@ func TestBackend_StaticRole_Rotation_NonStaticError(t *testing.T) {
}
func TestBackend_StaticRole_Rotation_Revoke_user(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -532,7 +696,7 @@ func verifyPgConn(t *testing.T, username, password, connURL string) {
//
// First scenario, WAL contains a role name that does not exist.
func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_not_found(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
ctx := context.Background()
@@ -573,7 +737,7 @@ func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_not_found(t *testing.
// Second scenario, WAL contains a role name that does exist, but the role's
// LastVaultRotation is greater than the WAL has
func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_newer_rotation_date(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
ctx := context.Background()
@@ -846,7 +1010,7 @@ func testBackend_StaticRole_Rotations(t *testing.T, createUser userCreator, opts
}
}()
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -1010,7 +1174,7 @@ type createUserCommand struct {
// Demonstrates a bug fix for the credential rotation not releasing locks
func TestBackend_StaticRole_Rotation_LockRegression(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -1089,7 +1253,7 @@ func TestBackend_StaticRole_Rotation_LockRegression(t *testing.T) {
}
func TestBackend_StaticRole_Rotation_Invalid_Role(t *testing.T) {
cluster, sys := getCluster(t)
cluster, sys := getClusterPostgresDB(t)
defer cluster.Cleanup()
config := logical.TestBackendConfig()
@@ -1537,6 +1701,19 @@ func capturePasswords(t *testing.T, b logical.Backend, config *logical.BackendCo
return pws
}
func configureConnection(t *testing.T, b *databaseBackend, s logical.Storage, data map[string]interface{}) {
req := &logical.Request{
Operation: logical.UpdateOperation,
Path: "config/" + data["name"].(string),
Storage: s,
Data: data,
}
resp, err := b.HandleRequest(namespace.RootContext(nil), req)
if err != nil || (resp != nil && resp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
}
func newBoolPtr(b bool) *bool {
v := b
return &v

View File

@@ -1,3 +1,6 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package schedule
import (

View File

@@ -10,6 +10,7 @@ import (
"sync"
"time"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/logical"
)
@@ -42,6 +43,7 @@ type ChallengeValidation struct {
type ChallengeQueueEntry struct {
Identifier string
RetryAfter time.Time
NumRetries int // Track if we are spinning on a corrupted challenge
}
type ACMEChallengeEngine struct {
@@ -97,7 +99,7 @@ func (ace *ACMEChallengeEngine) Run(b *backend, state *acmeState, sc *storageCon
b.Logger().Error("failed loading existing ACME challenge validations:", "err", err)
}
for true {
for {
// err == nil on shutdown.
b.Logger().Debug("Starting ACME challenge validation engine")
err := ace._run(b, state)
@@ -119,7 +121,7 @@ func (ace *ACMEChallengeEngine) _run(b *backend, state *acmeState) error {
// We want at most a certain number of workers operating to verify
// challenges.
var finishedWorkersChannels []chan bool
for true {
for {
// Wait until we've got more work to do.
select {
case <-ace.Closing:
@@ -201,12 +203,17 @@ func (ace *ACMEChallengeEngine) _run(b *backend, state *acmeState) error {
// looping through the queue until we hit a repeat.
firstIdentifier = ""
// If we are no longer the active node, break out
if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) {
break
}
// Here, we got a piece of work that is ready to check; create a
// channel and a new go routine and run it. Note that this still
// could have a RetryAfter date we're not aware of (e.g., if the
// cluster restarted as we do not read the entries there).
channel := make(chan bool, 1)
go ace.VerifyChallenge(runnerSC, task.Identifier, channel, config)
go ace.VerifyChallenge(runnerSC, task.Identifier, task.NumRetries, channel, config)
finishedWorkersChannels = append(finishedWorkersChannels, channel)
startedWork = true
}
@@ -305,8 +312,9 @@ func (ace *ACMEChallengeEngine) AcceptChallenge(sc *storageContext, account stri
return nil
}
func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id string, finished chan bool, config *acmeConfigEntry) {
sc, _ /* cancel func */ := runnerSc.WithFreshTimeout(MaxChallengeTimeout)
func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id string, validationQueueRetries int, finished chan bool, config *acmeConfigEntry) {
sc, cancel := runnerSc.WithFreshTimeout(MaxChallengeTimeout)
defer cancel()
runnerSc.Backend.Logger().Debug("Starting verification of challenge", "id", id)
if retry, retryAfter, err := ace._verifyChallenge(sc, id, config); err != nil {
@@ -316,11 +324,28 @@ func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id str
sc.Backend.Logger().Error(fmt.Sprintf("ACME validation failed for %v: %v", id, err))
if retry {
validationQueueRetries++
// The retry logic within _verifyChallenge is dependent on being able to read and decode
// the ACME challenge entries. If we encounter such failures we would retry forever, so
// we have a secondary check here to see if we are consistently looping within the validation
// queue that is larger than the normal retry attempts we would allow.
if validationQueueRetries > MaxRetryAttempts*2 {
sc.Backend.Logger().Warn("reached max error attempts within challenge queue: %v, giving up", id)
_, _, err = ace._verifyChallengeCleanup(sc, nil, id)
if err != nil {
sc.Backend.Logger().Warn("Failed cleaning up challenge entry: %v", err)
}
finished <- true
return
}
ace.ValidationLock.Lock()
defer ace.ValidationLock.Unlock()
ace.Validations.PushBack(&ChallengeQueueEntry{
Identifier: id,
RetryAfter: retryAfter,
NumRetries: validationQueueRetries,
})
// Let the validator know there's a pending challenge.
@@ -343,23 +368,23 @@ func (ace *ACMEChallengeEngine) VerifyChallenge(runnerSc *storageContext, id str
func (ace *ACMEChallengeEngine) _verifyChallenge(sc *storageContext, id string, config *acmeConfigEntry) (bool, time.Time, error) {
now := time.Now()
backoffTime := now.Add(1 * time.Second)
path := acmeValidationPrefix + id
challengeEntry, err := sc.Storage.Get(sc.Context, path)
if err != nil {
return true, now, fmt.Errorf("error loading challenge %v: %w", id, err)
return true, backoffTime, fmt.Errorf("error loading challenge %v: %w", id, err)
}
if challengeEntry == nil {
// Something must've successfully cleaned up our storage entry from
// under us. Assume we don't need to rerun, else the client will
// trigger us to re-run.
err = nil
return ace._verifyChallengeCleanup(sc, err, id)
return ace._verifyChallengeCleanup(sc, nil, id)
}
var cv *ChallengeValidation
if err := challengeEntry.DecodeJSON(&cv); err != nil {
return true, now, fmt.Errorf("error decoding challenge %v: %w", id, err)
return true, backoffTime, fmt.Errorf("error decoding challenge %v: %w", id, err)
}
if now.Before(cv.RetryAfter) {
@@ -369,7 +394,7 @@ func (ace *ACMEChallengeEngine) _verifyChallenge(sc *storageContext, id string,
authzPath := getAuthorizationPath(cv.Account, cv.Authorization)
authz, err := loadAuthorizationAtPath(sc, authzPath)
if err != nil {
return true, now, fmt.Errorf("error loading authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err)
return true, backoffTime, fmt.Errorf("error loading authorization %v/%v for challenge %v: %w", cv.Account, cv.Authorization, id, err)
}
if authz.Status != ACMEAuthorizationPending {
@@ -527,7 +552,7 @@ func (ace *ACMEChallengeEngine) _verifyChallengeCleanup(sc *storageContext, err
// Remove our ChallengeValidation entry only.
if deleteErr := sc.Storage.Delete(sc.Context, acmeValidationPrefix+id); deleteErr != nil {
return true, now.Add(-1 * time.Second), fmt.Errorf("error deleting challenge %v (error prior to cleanup, if any: %v): %w", id, err, deleteErr)
return true, now.Add(1 * time.Second), fmt.Errorf("error deleting challenge %v (error prior to cleanup, if any: %v): %w", id, err, deleteErr)
}
if err != nil {

View File

@@ -18,6 +18,7 @@ import (
"github.com/hashicorp/go-secure-stdlib/nonceutil"
"github.com/hashicorp/vault/sdk/framework"
"github.com/hashicorp/vault/sdk/helper/consts"
"github.com/hashicorp/vault/sdk/logical"
)
@@ -76,6 +77,13 @@ func (a *acmeState) Initialize(b *backend, sc *storageContext) error {
return fmt.Errorf("error initializing ACME engine: %w", err)
}
if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) {
// It is assumed, that if the node does become the active node later
// the plugin is re-initialized, so this is safe. It also spares the node
// from loading the existing queue into memory for no reason.
b.Logger().Debug("Not on an active node, skipping starting ACME challenge validation engine")
return nil
}
// Kick off our ACME challenge validation engine.
go a.validator.Run(b, a, sc)
@@ -83,6 +91,15 @@ func (a *acmeState) Initialize(b *backend, sc *storageContext) error {
return nil
}
func (a *acmeState) Shutdown(b *backend) {
// If we aren't the active node, nothing to shutdown
if b.System().ReplicationState().HasState(consts.ReplicationDRSecondary | consts.ReplicationPerformanceStandby) {
return
}
a.validator.Closing <- struct{}{}
}
func (a *acmeState) markConfigDirty() {
a.configDirty.Store(true)
}

View File

@@ -144,6 +144,13 @@ func Backend(conf *logical.BackendConfig) *backend {
unifiedRevocationWritePathPrefix,
unifiedDeltaWALPath,
},
Binary: []string{
"ocsp", // OCSP POST
"ocsp/*", // OCSP GET
"unified-ocsp", // Unified OCSP POST
"unified-ocsp/*", // Unified OCSP GET
},
},
Paths: []*framework.Path{
@@ -430,7 +437,8 @@ func (b *backend) initialize(ctx context.Context, ir *logical.InitializationRequ
func (b *backend) cleanup(ctx context.Context) {
sc := b.makeStorageContext(ctx, b.storage)
b.acmeState.validator.Closing <- struct{}{}
b.acmeState.Shutdown(b)
b.cleanupEnt(sc)
}

View File

@@ -12,12 +12,15 @@ import (
"testing"
"time"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/vault/api"
"github.com/hashicorp/vault/helper/constants"
vaulthttp "github.com/hashicorp/vault/http"
"github.com/hashicorp/vault/sdk/helper/testhelpers/schema"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/stretchr/testify/require"
)
@@ -1436,3 +1439,90 @@ hbiiPARizZA/Tsna/9ox1qDT
require.NotNil(t, resp)
require.Empty(t, resp.Warnings)
}
func TestCRLIssuerRemoval(t *testing.T) {
t.Parallel()
ctx := context.Background()
b, s := CreateBackendWithStorage(t)
if constants.IsEnterprise {
// We don't really care about the whole cross cluster replication
// stuff, but we do want to enable unified CRLs if we can, so that
// unified CRLs get built.
_, err := CBWrite(b, s, "config/crl", map[string]interface{}{
"cross_cluster_revocation": true,
"auto_rebuild": true,
})
require.NoError(t, err, "failed enabling unified CRLs on enterprise")
}
// Create a single root, configure delta CRLs, and rotate CRLs to prep a
// starting state.
_, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{
"common_name": "Root R1",
"key_type": "ec",
})
require.NoError(t, err)
_, err = CBWrite(b, s, "config/crl", map[string]interface{}{
"enable_delta": true,
"auto_rebuild": true,
})
require.NoError(t, err)
_, err = CBRead(b, s, "crl/rotate")
require.NoError(t, err)
// List items in storage under both CRL paths so we know what is there in
// the "good" state.
crlList, err := s.List(ctx, "crls/")
require.NoError(t, err)
require.Contains(t, crlList, "config")
require.Greater(t, len(crlList), 1)
unifiedCRLList, err := s.List(ctx, "unified-crls/")
require.NoError(t, err)
require.Contains(t, unifiedCRLList, "config")
require.Greater(t, len(unifiedCRLList), 1)
// Now, create a bunch of issuers, generate CRLs, and remove them.
var keyIDs []string
var issuerIDs []string
for i := 1; i <= 25; i++ {
resp, err := CBWrite(b, s, "root/generate/internal", map[string]interface{}{
"common_name": fmt.Sprintf("Root X%v", i),
"key_type": "ec",
})
require.NoError(t, err)
require.NotNil(t, resp)
key := string(resp.Data["key_id"].(keyID))
keyIDs = append(keyIDs, key)
issuer := string(resp.Data["issuer_id"].(issuerID))
issuerIDs = append(issuerIDs, issuer)
}
_, err = CBRead(b, s, "crl/rotate")
require.NoError(t, err)
for _, issuer := range issuerIDs {
_, err := CBDelete(b, s, "issuer/"+issuer)
require.NoError(t, err)
}
for _, key := range keyIDs {
_, err := CBDelete(b, s, "key/"+key)
require.NoError(t, err)
}
// Finally list storage entries again to ensure they are cleaned up.
afterCRLList, err := s.List(ctx, "crls/")
require.NoError(t, err)
for _, entry := range crlList {
require.Contains(t, afterCRLList, entry)
}
require.Equal(t, len(afterCRLList), len(crlList))
afterUnifiedCRLList, err := s.List(ctx, "unified-crls/")
require.NoError(t, err)
for _, entry := range unifiedCRLList {
require.Contains(t, afterUnifiedCRLList, entry)
}
require.Equal(t, len(afterUnifiedCRLList), len(unifiedCRLList))
}

View File

@@ -64,9 +64,10 @@ func (ts *TestServer) setupRunner(domain string, network string) {
ContainerName: "bind9-dns-" + strings.ReplaceAll(domain, ".", "-"),
NetworkName: network,
Ports: []string{"53/udp"},
LogConsumer: func(s string) {
ts.log.Info(s)
},
// DNS container logging was disabled to reduce content within CI logs.
//LogConsumer: func(s string) {
// ts.log.Info(s)
//},
})
require.NoError(ts.t, err)
}

View File

@@ -1117,6 +1117,18 @@ func (b *backend) pathDeleteIssuer(ctx context.Context, req *logical.Request, da
response.AddWarning(msg)
}
// Finally, we need to rebuild both the local and the unified CRLs. This
// will free up any now unnecessary space used in both the CRL config
// and for the underlying CRL.
warnings, err := b.crlBuilder.rebuild(sc, true)
if err != nil {
return nil, err
}
for index, warning := range warnings {
response.AddWarning(fmt.Sprintf("Warning %d during CRL rebuild: %v", index+1, warning))
}
return response, nil
}

View File

@@ -930,7 +930,91 @@ func areCertificatesEqual(cert1 *x509.Certificate, cert2 *x509.Certificate) bool
return bytes.Equal(cert1.Raw, cert2.Raw)
}
func (sc *storageContext) _cleanupInternalCRLMapping(mapping *internalCRLConfigEntry, path string) error {
// Track which CRL IDs are presently referred to by issuers; any other CRL
// IDs are subject to cleanup.
//
// Unused IDs both need to be removed from this map (cleaning up the size
// of this storage entry) but also the full CRLs removed from disk.
presentMap := make(map[crlID]bool)
for _, id := range mapping.IssuerIDCRLMap {
presentMap[id] = true
}
// Identify which CRL IDs exist and are candidates for removal;
// theoretically these three maps should be in sync, but were added
// at different times.
toRemove := make(map[crlID]bool)
for id := range mapping.CRLNumberMap {
if !presentMap[id] {
toRemove[id] = true
}
}
for id := range mapping.LastCompleteNumberMap {
if !presentMap[id] {
toRemove[id] = true
}
}
for id := range mapping.CRLExpirationMap {
if !presentMap[id] {
toRemove[id] = true
}
}
// Depending on which path we're writing this config to, we need to
// remove CRLs from the relevant folder too.
isLocal := path == storageLocalCRLConfig
baseCRLPath := "crls/"
if !isLocal {
baseCRLPath = "unified-crls/"
}
for id := range toRemove {
// Clean up space in this mapping...
delete(mapping.CRLNumberMap, id)
delete(mapping.LastCompleteNumberMap, id)
delete(mapping.CRLExpirationMap, id)
// And clean up space on disk from the fat CRL mapping.
crlPath := baseCRLPath + string(id)
deltaCRLPath := crlPath + "-delta"
if err := sc.Storage.Delete(sc.Context, crlPath); err != nil {
return fmt.Errorf("failed to delete unreferenced CRL %v: %w", id, err)
}
if err := sc.Storage.Delete(sc.Context, deltaCRLPath); err != nil {
return fmt.Errorf("failed to delete unreferenced delta CRL %v: %w", id, err)
}
}
// Lastly, some CRLs could've been partially removed from the map but
// not from disk. Check to see if we have any dangling CRLs and remove
// them too.
list, err := sc.Storage.List(sc.Context, baseCRLPath)
if err != nil {
return fmt.Errorf("failed listing all CRLs: %w", err)
}
for _, crl := range list {
if crl == "config" || strings.HasSuffix(crl, "/") {
continue
}
if presentMap[crlID(crl)] {
continue
}
if err := sc.Storage.Delete(sc.Context, baseCRLPath+"/"+crl); err != nil {
return fmt.Errorf("failed cleaning up orphaned CRL %v: %w", crl, err)
}
}
return nil
}
func (sc *storageContext) _setInternalCRLConfig(mapping *internalCRLConfigEntry, path string) error {
if err := sc._cleanupInternalCRLMapping(mapping, path); err != nil {
return fmt.Errorf("failed to clean up internal CRL mapping: %w", err)
}
json, err := logical.StorageEntryJSON(path, mapping)
if err != nil {
return err

View File

@@ -192,7 +192,7 @@ func SubtestACMECaddy(configTemplate string, enableEAB bool) func(*testing.T, *V
// Start a cURL container.
curlRunner, err := hDocker.NewServiceRunner(hDocker.RunOptions{
ImageRepo: "docker.mirror.hashicorp.services/curlimages/curl",
ImageTag: "latest",
ImageTag: "8.4.0",
ContainerName: fmt.Sprintf("curl_test_%s", runID),
NetworkName: vaultNetwork,
Entrypoint: []string{"sleep", sleepTimer},

View File

@@ -275,6 +275,11 @@ func (b *backend) rotateIfRequired(ctx context.Context, req *logical.Request, ke
return nil
}
// We can't auto-rotate managed keys
if p.Type == keysutil.KeyType_MANAGED_KEY {
return nil
}
// Retrieve the latest version of the policy and determine if it is time to rotate.
latestKey := p.Keys[strconv.Itoa(p.LatestVersion)]
if time.Now().After(latestKey.CreationTime.Add(p.AutoRotatePeriod)) {

View File

@@ -7,6 +7,7 @@ import (
"context"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"github.com/hashicorp/vault/helper/constants"
@@ -141,7 +142,23 @@ func (b *backend) pathDatakeyWrite(ctx context.Context, req *logical.Request, d
return nil, err
}
ciphertext, err := p.Encrypt(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey))
var managedKeyFactory ManagedKeyFactory
if p.Type == keysutil.KeyType_MANAGED_KEY {
managedKeySystemView, ok := b.System().(logical.ManagedKeySystemView)
if !ok {
return nil, errors.New("unsupported system view")
}
managedKeyFactory = ManagedKeyFactory{
managedKeyParams: keysutil.ManagedKeyParameters{
ManagedKeySystemView: managedKeySystemView,
BackendUUID: b.backendUUID,
Context: ctx,
},
}
}
ciphertext, err := p.EncryptWithFactory(ver, context, nonce, base64.StdEncoding.EncodeToString(newKey), nil, managedKeyFactory)
if err != nil {
switch err.(type) {
case errutil.UserError:

View File

@@ -218,6 +218,10 @@ func (b *backend) pathKeysConfigWrite(ctx context.Context, req *logical.Request,
p.AutoRotatePeriod = autoRotatePeriod
persistNeeded = true
}
if p.Type == keysutil.KeyType_MANAGED_KEY && autoRotatePeriod != 0 {
return logical.ErrorResponse("Auto rotation can not be set for managed keys"), nil
}
}
if !persistNeeded {

View File

@@ -353,10 +353,6 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") {
return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest
}
// Get the policy
p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{
Storage: req.Storage,
@@ -377,6 +373,13 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr
return logical.ErrorResponse(fmt.Sprintf("key type %v does not support signing", p.Type)), logical.ErrInvalidRequest
}
// Allow managed keys to specify no hash algo without additional conditions.
if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY {
if !prehashed || sigAlgorithm != "pkcs1v15" {
return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest
}
}
batchInputRaw := d.Raw["batch_input"]
var batchInputItems []batchRequestSignItem
if batchInputRaw != nil {
@@ -419,8 +422,10 @@ func (b *backend) pathSignWrite(ctx context.Context, req *logical.Request, d *fr
if p.Type.HashSignatureInput() && !prehashed {
hf := keysutil.HashFuncMap[hashAlgorithm]()
hf.Write(input)
input = hf.Sum(nil)
if hf != nil {
hf.Write(input)
input = hf.Sum(nil)
}
}
contextRaw := item["context"]
@@ -606,10 +611,6 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d *
return logical.ErrorResponse(err.Error()), logical.ErrInvalidRequest
}
if hashAlgorithm == keysutil.HashTypeNone && (!prehashed || sigAlgorithm != "pkcs1v15") {
return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest
}
// Get the policy
p, _, err := b.GetPolicy(ctx, keysutil.PolicyRequest{
Storage: req.Storage,
@@ -630,6 +631,13 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d *
return logical.ErrorResponse(fmt.Sprintf("key type %v does not support verification", p.Type)), logical.ErrInvalidRequest
}
// Allow managed keys to specify no hash algo without additional conditions.
if hashAlgorithm == keysutil.HashTypeNone && p.Type != keysutil.KeyType_MANAGED_KEY {
if !prehashed || sigAlgorithm != "pkcs1v15" {
return logical.ErrorResponse("hash_algorithm=none requires both prehashed=true and signature_algorithm=pkcs1v15"), logical.ErrInvalidRequest
}
}
response := make([]batchResponseVerifyItem, len(batchInputItems))
for i, item := range batchInputItems {
@@ -657,8 +665,10 @@ func (b *backend) pathVerifyWrite(ctx context.Context, req *logical.Request, d *
if p.Type.HashSignatureInput() && !prehashed {
hf := keysutil.HashFuncMap[hashAlgorithm]()
hf.Write(input)
input = hf.Sum(nil)
if hf != nil {
hf.Write(input)
input = hf.Sum(nil)
}
}
contextRaw := item["context"]

3
changelog/14998.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
ui: Update mount backend form to use selectable cards
```

4
changelog/17076.txt Normal file
View File

@@ -0,0 +1,4 @@
```release-note:improvement
core/cli: Warning related to VAULT_ADDR & -address not set with CLI requests.
```

3
changelog/19811.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
cli/kv: Undelete now properly handles KV-V2 mount paths that are more than one layer deep.
```

View File

@@ -1,3 +1,3 @@
```release-note:feature
auto-auth: support ldap auth
```release-note:improvement
auto-auth: added support for LDAP auto-auth
```

View File

@@ -1,3 +0,0 @@
```release-note:bug
core: All subloggers now reflect configured log level on reload.
```

5
changelog/22185.txt Normal file
View File

@@ -0,0 +1,5 @@
```release-note:improvement
auth/ldap: introduce cap/ldap.Client for LDAP authentication
auth/ldap: deprecates `connection_timeout` in favor of `request_timeout` for timeouts
sdk/ldaputil: deprecates Client in favor of cap/ldap.Client
```

3
changelog/22333.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
ui: Implement Helios Design System copy button component making copy buttons accessible
```

3
changelog/22812.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
agent: allow users to specify files for child process stdout/stderr
```

3
changelog/22855.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
ui: don't exclude features present on license
```

3
changelog/22926.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
ui: Adds mount configuration details to Kubernetes secrets engine configuration view
```

5
changelog/22994.txt Normal file
View File

@@ -0,0 +1,5 @@
```release-note:improvement
auth/azure: Add support for azure workload identity authentication (see issue
#18257). Update go-kms-wrapping dependency to include [PR
#155](https://github.com/hashicorp/go-kms-wrapping/pull/155)
```

3
changelog/22996.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
auto-auth/azure: Support setting the `authenticate_from_environment` variable to "true" and "false" string literals, too.
```

4
changelog/22997.txt Normal file
View File

@@ -0,0 +1,4 @@
```release-note:change
events: Log level for processing an event dropped from info to debug.
```

3
changelog/23007.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
secrets/pki: Fix removal of issuers to clean up unreferenced CRLs.
```

3
changelog/23010.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
secrets/consul: Fix revocations when Vault has an access token using specific namespace and admin partition policies
```

7
changelog/23013.txt Normal file
View File

@@ -0,0 +1,7 @@
```release-note:bug
storage/consul: fix a bug where an active node in a specific sort of network
partition could continue to write data to Consul after a new leader is elected
potentially causing data loss or corruption for keys with many concurrent
writers. For Enterprise clusters this could cause corruption of the merkle trees
leading to failure to complete merkle sync without a full re-index.
```

5
changelog/23022.txt Normal file
View File

@@ -0,0 +1,5 @@
```release-note:improvement
core: update sys/seal-status (and CLI vault status) to report the type of
the seal when unsealed, as well as the type of the recovery seal if an
auto-seal.
```

3
changelog/23025.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
ui (enterprise): Fix error message when generating SSH credential with control group
```

4
changelog/23042.txt Normal file
View File

@@ -0,0 +1,4 @@
```release-note:bug
events: Ensure subscription resources are cleaned up on close.
```

3
changelog/23047.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
api: added new API field to Vault responses, `mount_type`, returning mount information (e.g. `kv` for KVV1/KVV2) for mount when appropriate.
```

3
changelog/23050.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:deprecation
auth/centrify: Centrify plugin is deprecated as of 1.15, slated for removal in 1.17
```

3
changelog/23059.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
auth/azure: Added Azure API configurable retry options
```

3
changelog/23060.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:change
auth/azure: Update plugin to v0.16.2
```

3
changelog/23066.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
ui: Fix the issue where confirm delete dropdown is being cut off
```

3
changelog/23103.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
cap/ldap: Downgrade go-ldap client from v3.4.5 to v3.4.4 due to race condition found
```

3
changelog/23118.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
ldaputil: Disable tests for ARM64
```

3
changelog/23119.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
ui: Added allowed_domains_template field for CA type role in SSH engine
```

3
changelog/23123.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
ui: Fixes filter and search bug in secrets engines
```

3
changelog/23143.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
ui: Surface warning banner if UI has stopped auto-refreshing token
```

3
changelog/23155.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:bug
core: Fixes list password policy to include those with names containing / characters.
```

3
changelog/23160.txt Normal file
View File

@@ -0,0 +1,3 @@
```release-note:improvement
replication: Add re-index status metric to telemetry
```

Some files were not shown because too many files have changed in this diff Show More