Bump boulder version to release-2023-06-12

This commit is contained in:
Arjan H
2023-06-13 18:46:42 +02:00
parent 66b51217df
commit d7f4c10fd9
10 changed files with 42 additions and 116 deletions

View File

@@ -8,7 +8,7 @@ TMP_DIR=$(pwd)/tmp
rm -rf $TMP_DIR && mkdir -p $TMP_DIR/{admin,bin,logs,src}
boulderDir=$TMP_DIR/src
boulderTag="release-2023-05-22"
boulderTag="release-2023-06-12"
boulderUrl="https://github.com/letsencrypt/boulder/"
cloneDir=$(pwd)/..

View File

@@ -1,5 +1,5 @@
diff --git a/docker-compose.yml b/docker-compose.yml
index 4fe5b4749..e70a007ef 100644
index e63e560cd..676d94ada 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,8 +1,9 @@
@@ -38,10 +38,10 @@ index 4fe5b4749..e70a007ef 100644
entrypoint: labca/entrypoint.sh
working_dir: &boulder_working_dir /opt/boulder
logging:
@@ -74,30 +74,32 @@ services:
@@ -74,8 +74,10 @@ services:
bconsul:
image: hashicorp/consul:1.13.1
image: hashicorp/consul:1.14.2
+ depends_on:
+ - control
volumes:
@@ -50,6 +50,10 @@ index 4fe5b4749..e70a007ef 100644
networks:
consulnet:
ipv4_address: 10.55.55.10
@@ -83,25 +85,24 @@ services:
ipv4_address: 10.77.77.10
rednet:
ipv4_address: 10.88.88.10
- command: "consul agent -dev -config-format=hcl -config-file=/test/consul/config.hcl"
+ command: "consul agent -dev -config-format=hcl -config-file=/opt/boulder/labca/consul/config.hcl"
@@ -68,7 +72,6 @@ index 4fe5b4749..e70a007ef 100644
+ - ldata:/opt/labca/data
+ - nginx_html:/opt/wwwstatic
+ - backup:/opt/backup
+ #- .:/boulder
+ - boulder_data:/opt/boulder/labca
expose:
- 3000
@@ -82,7 +85,7 @@ index 4fe5b4749..e70a007ef 100644
logging:
driver: "json-file"
options:
@@ -114,37 +116,45 @@ services:
@@ -118,37 +119,45 @@ services:
- 80:80
- 443:443
volumes:

View File

@@ -30,7 +30,7 @@ dockerComposeVersion="v2.5.0"
labcaUrl="https://github.com/hakwerk/labca/"
boulderUrl="https://github.com/letsencrypt/boulder/"
boulderTag="release-2023-05-22"
boulderTag="release-2023-06-12"
# Feature flags
flag_skip_redis=true

View File

@@ -55,11 +55,9 @@ sed -i -e "s|/hierarchy/intermediate-cert-rsa-a.pem|labca/test-ca.pem|" config/o
sed -i -e "s|/hierarchy/intermediate-cert-rsa-a.pem|labca/test-ca.pem|" config/crl-storer.json
sed -i -e "s|/hierarchy/intermediate-cert-rsa-a.pem|labca/test-ca.pem|" config/crl-updater.json
sed -i -e "s|/hierarchy/intermediate-cert-rsa-a.pem|labca/test-ca.pem|" config/ra.json
sed -i -e "s|/hierarchy/intermediate-cert-rsa-a.pem|labca/test-ca.pem|" issuer-ocsp-responder.json
sed -i -e "s|/hierarchy/intermediate-cert-rsa-a.pem|labca/test-ca.pem|" cert-ceremonies/intermediate-ocsp-rsa.yaml
sed -i -e "s|/hierarchy/intermediate-cert-rsa-a.pem|labca/test-ca.pem|" v2_integration.py
sed -i -e "s|/hierarchy/root-cert-rsa.pem|labca/test-root.pem|" cert-ceremonies/root-ceremony-rsa.yaml
sed -i -e "s|/hierarchy/root-cert-rsa.pem|labca/test-root.pem|" cert-ceremonies/intermediate-ocsp-rsa.yaml
sed -i -e "s|/hierarchy/root-cert-rsa.pem|labca/test-root.pem|" cert-ceremonies/root-crl-rsa.yaml
sed -i -e "s|/hierarchy/root-cert-rsa.pem|labca/test-root.pem|" cert-ceremonies/intermediate-ceremony-rsa.yaml
sed -i -e "s|/hierarchy/root-cert-rsa.pem|labca/test-root.pem|" config/publisher.json
sed -i -e "s|/hierarchy/root-cert-rsa.pem|labca/test-root.pem|" config/wfe2.json
@@ -79,6 +77,10 @@ sed -i -e "s/\"dnsTimeout\": \".*\"/\"dnsTimeout\": \"3s\"/" config/ra.json
sed -i -e "s/\"dnsTimeout\": \".*\"/\"dnsTimeout\": \"3s\"/" config/va.json
sed -i -e "s/\"dnsTimeout\": \".*\"/\"dnsTimeout\": \"3s\"/" config/va-remote-a.json
sed -i -e "s/\"dnsTimeout\": \".*\"/\"dnsTimeout\": \"3s\"/" config/va-remote-b.json
sed -i -e "s/\"stdoutlevel\": 4,/\"stdoutlevel\": 6,/" config/ca-a.json
sed -i -e "s/\"stdoutlevel\": 4,/\"stdoutlevel\": 6,/" config/ca-b.json
sed -i -e "s/\"stdoutlevel\": 4,/\"stdoutlevel\": 6,/" config/va-remote-a.json
sed -i -e "s/\"stdoutlevel\": 4,/\"stdoutlevel\": 6,/" config/va-remote-b.json
if [ "$flag_skip_redis" == true ]; then
sed -i -e "s/^\(.*wait-for-it.sh.*4218\)/#\1/" entrypoint.sh

View File

@@ -1,8 +1,8 @@
diff --git a/cmd/config.go b/cmd/config.go
index 99ff43d02..a91f51d7d 100644
index 18213cf51..a612afd18 100644
--- a/cmd/config.go
+++ b/cmd/config.go
@@ -460,7 +460,7 @@ type GRPCServerConfig struct {
@@ -464,7 +464,7 @@ type GRPCServerConfig struct {
// this controls how long it takes before a client learns about changes to its
// backends.
// https://pkg.go.dev/google.golang.org/grpc/keepalive#ServerParameters

View File

@@ -1,5 +1,5 @@
diff --git a/cmd/crl-storer/main.go b/cmd/crl-storer/main.go
index 1f75fc305..84bafea5f 100644
index 073c62032..b59db781c 100644
--- a/cmd/crl-storer/main.go
+++ b/cmd/crl-storer/main.go
@@ -46,6 +46,9 @@ type Config struct {
@@ -20,4 +20,4 @@ index 1f75fc305..84bafea5f 100644
+ csi, err := storer.New(issuers, s3client, c.CRLStorer.S3Bucket, c.CRLStorer.LocalStorePath, scope, logger, clk)
cmd.FailOnError(err, "Failed to create CRLStorer impl")
start, err := bgrpc.NewServer(c.CRLStorer.GRPC).Add(
start, err := bgrpc.NewServer(c.CRLStorer.GRPC, logger).Add(

View File

@@ -1,5 +1,5 @@
diff --git a/docker-compose.yml b/docker-compose.yml
index 5eb8a5513..05d16611b 100644
index 4b62cf28d..f2b242fb5 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -20,8 +20,6 @@ services:
@@ -43,9 +43,9 @@ index 5eb8a5513..05d16611b 100644
- ipv4_address: 10.33.33.3
-
bconsul:
image: hashicorp/consul:1.13.1
image: hashicorp/consul:1.14.2
volumes:
@@ -142,13 +120,6 @@ networks:
@@ -146,13 +124,6 @@ networks:
config:
- subnet: 10.88.88.0/24

View File

@@ -1,5 +1,5 @@
diff --git a/docker-compose.yml b/docker-compose.yml
index 05d16611b..4fe5b4749 100644
index f2b242fb5..e63e560cd 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -8,10 +8,12 @@ services:
@@ -58,7 +58,7 @@ index 05d16611b..4fe5b4749 100644
logging:
- driver: none
- bproxysql:
- image: proxysql/proxysql:2.4.4
- image: proxysql/proxysql:2.5.2
- # The --initial flag force resets the ProxySQL database on startup. By
- # default, ProxySQL ignores new configuration if the database already
- # exists. Without this flag, new configuration wouldn't be applied until you
@@ -79,9 +79,9 @@ index 05d16611b..4fe5b4749 100644
+ restart: always
bconsul:
image: hashicorp/consul:1.13.1
@@ -84,26 +81,70 @@ services:
ipv4_address: 10.55.55.10
image: hashicorp/consul:1.14.2
@@ -88,26 +85,70 @@ services:
ipv4_address: 10.88.88.10
command: "consul agent -dev -config-format=hcl -config-file=/test/consul/config.hcl"
- netaccess:

View File

@@ -2,7 +2,13 @@ diff --git a/test/entrypoint.sh b/test/entrypoint.sh
index 5ca9929..f18e1d8 100755
--- a/test/entrypoint.sh
+++ b/test/entrypoint.sh
@@ -16,6 +16,18 @@ wait_tcp_port boulder-mysql 3306
@@ -13,12 +13,21 @@
# make sure we can reach the mysqldb.
./test/wait-for-it.sh boulder-mysql 3306
-# make sure we can reach the proxysql.
-./test/wait-for-it.sh bproxysql 6032
-
# create the database
MYSQL_CONTAINER=1 $DIR/create_db.sh

View File

@@ -1,5 +1,5 @@
diff --git a/cmd/ocsp-responder/main.go b/cmd/ocsp-responder/main.go
index 39a0dac43..c7e0dc02d 100644
index 52027e8cd..320a41917 100644
--- a/cmd/ocsp-responder/main.go
+++ b/cmd/ocsp-responder/main.go
@@ -88,7 +88,7 @@ type Config struct {
@@ -11,97 +11,12 @@ index 39a0dac43..c7e0dc02d 100644
// TLS client certificate, private key, and trusted root bundle.
TLS cmd.TLSConfig `validate:"required_without=Source,structonly"`
@@ -154,49 +154,51 @@ as generated by Boulder's ceremony command.
@@ -153,7 +153,7 @@ as generated by Boulder's ceremony command.
}
source, err = responder.NewMemorySourceFromFile(filename, logger)
cmd.FailOnError(err, fmt.Sprintf("Couldn't read file: %s", url.Path))
} else {
- // Set up the redis source and the combined multiplex source.
- rocspRWClient, err := rocsp_config.MakeClient(c.OCSPResponder.Redis, clk, scope)
- cmd.FailOnError(err, "Could not make redis client")
-
- err = rocspRWClient.Ping(context.Background())
- cmd.FailOnError(err, "pinging Redis")
-
- liveSigningPeriod := c.OCSPResponder.LiveSigningPeriod.Duration
- if liveSigningPeriod == 0 {
- liveSigningPeriod = 60 * time.Hour
- }
-
- tlsConfig, err := c.OCSPResponder.TLS.Load(scope)
- cmd.FailOnError(err, "TLS config")
-
- raConn, err := bgrpc.ClientSetup(c.OCSPResponder.RAService, tlsConfig, scope, clk)
- cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA")
- rac := rapb.NewRegistrationAuthorityClient(raConn)
-
- maxInflight := c.OCSPResponder.MaxInflightSignings
- if maxInflight == 0 {
- maxInflight = 1000
+ if c.OCSPResponder.Redis != nil {
+ // Set up the redis source and the combined multiplex source.
+ rocspRWClient, err := rocsp_config.MakeClient(c.OCSPResponder.Redis, clk, scope)
+ cmd.FailOnError(err, "Could not make redis client")
+
+ err = rocspRWClient.Ping(context.Background())
+ cmd.FailOnError(err, "pinging Redis")
+
+ liveSigningPeriod := c.OCSPResponder.LiveSigningPeriod.Duration
+ if liveSigningPeriod == 0 {
+ liveSigningPeriod = 60 * time.Hour
+ }
+
+ tlsConfig, err := c.OCSPResponder.TLS.Load(scope)
+ cmd.FailOnError(err, "TLS config")
+
+ raConn, err := bgrpc.ClientSetup(c.OCSPResponder.RAService, tlsConfig, scope, clk)
+ cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to RA")
+ rac := rapb.NewRegistrationAuthorityClient(raConn)
+
+ maxInflight := c.OCSPResponder.MaxInflightSignings
+ if maxInflight == 0 {
+ maxInflight = 1000
+ }
+ liveSource := live.New(rac, int64(maxInflight), c.OCSPResponder.MaxSigningWaiters)
+
+ rocspSource, err := redis_responder.NewRedisSource(rocspRWClient, liveSource, liveSigningPeriod, clk, scope, logger)
+ cmd.FailOnError(err, "Could not create redis source")
+
+ var dbMap *db.WrappedMap
+ if c.OCSPResponder.DB != (cmd.DBConfig{}) {
+ dbMap, err = sa.InitWrappedDb(c.OCSPResponder.DB, scope, logger)
+ cmd.FailOnError(err, "While initializing dbMap")
+ }
+
+ var sac sapb.StorageAuthorityReadOnlyClient
+ if c.OCSPResponder.SAService != nil {
+ saConn, err := bgrpc.ClientSetup(c.OCSPResponder.SAService, tlsConfig, scope, clk)
+ cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
+ sac = sapb.NewStorageAuthorityReadOnlyClient(saConn)
+ }
+
+ source, err = redis_responder.NewCheckedRedisSource(rocspSource, dbMap, sac, scope, logger)
+ cmd.FailOnError(err, "Could not create checkedRedis source")
}
- liveSource := live.New(rac, int64(maxInflight), c.OCSPResponder.MaxSigningWaiters)
-
- rocspSource, err := redis_responder.NewRedisSource(rocspRWClient, liveSource, liveSigningPeriod, clk, scope, logger)
- cmd.FailOnError(err, "Could not create redis source")
-
- var dbMap *db.WrappedMap
- if c.OCSPResponder.DB != (cmd.DBConfig{}) {
- dbMap, err = sa.InitWrappedDb(c.OCSPResponder.DB, scope, logger)
- cmd.FailOnError(err, "While initializing dbMap")
- }
-
- var sac sapb.StorageAuthorityReadOnlyClient
- if c.OCSPResponder.SAService != nil {
- saConn, err := bgrpc.ClientSetup(c.OCSPResponder.SAService, tlsConfig, scope, clk)
- cmd.FailOnError(err, "Failed to load credentials and create gRPC connection to SA")
- sac = sapb.NewStorageAuthorityReadOnlyClient(saConn)
- }
-
- source, err = redis_responder.NewCheckedRedisSource(rocspSource, dbMap, sac, scope, logger)
- cmd.FailOnError(err, "Could not create checkedRedis source")
// Load the certificate from the file path.
issuerCerts := make([]*issuance.Certificate, len(c.OCSPResponder.IssuerCerts))
- } else {
+ } else if c.OCSPResponder.Redis != nil {
// Set up the redis source and the combined multiplex source.
rocspRWClient, err := rocsp_config.MakeClient(c.OCSPResponder.Redis, clk, scope)
cmd.FailOnError(err, "Could not make redis client")