From 66f960e5e4cc8d07485e560105d43cd58e131b2b Mon Sep 17 00:00:00 2001 From: Arjan H Date: Thu, 22 Dec 2022 19:34:34 +0100 Subject: [PATCH] Bump boulder version to release-2022-12-19 --- install | 2 +- patches/docker-compose-redis.patch | 49 ++++++++++++++++-------------- patches/docker-compose.patch | 10 +++--- patches/ocsp-responder_main.patch | 20 ++++++------ patches/ra_ra.patch | 47 ++++++++++++++++++++++++++-- 5 files changed, 87 insertions(+), 41 deletions(-) diff --git a/install b/install index 62017e4..7e6a5cc 100755 --- a/install +++ b/install @@ -24,7 +24,7 @@ dockerComposeVersion="v2.5.0" labcaUrl="https://github.com/hakwerk/labca/" boulderUrl="https://github.com/letsencrypt/boulder/" -boulderTag="release-2022-12-05" +boulderTag="release-2022-12-19" # Feature flags flag_skip_redis=true diff --git a/patches/docker-compose-redis.patch b/patches/docker-compose-redis.patch index 1a479d6..8c6fc89 100644 --- a/patches/docker-compose-redis.patch +++ b/patches/docker-compose-redis.patch @@ -1,5 +1,5 @@ diff --git a/docker-compose.yml b/docker-compose.yml -index 02460285..ddde0231 100644 +index 81cdec17..28efa8ab 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -24,8 +24,6 @@ services: @@ -19,66 +19,70 @@ index 02460285..ddde0231 100644 - bconsul entrypoint: test/entrypoint.sh working_dir: &boulder_working_dir /boulder -@@ -65,79 +62,6 @@ services: +@@ -64,82 +61,6 @@ services: + command: mysqld --bind-address=0.0.0.0 --slow-query-log --log-output=TABLE --log-queries-not-using-indexes=ON logging: - driver: none - + driver: none +- # TODO(#6517): replace all bredis_ services with those from +- # docker-compose.next.yml. - bredis_1: - image: redis:6.2.7 - volumes: - - ./test/:/test/:cached -- command: redis-server /test/redis.config +- command: redis-server /test/redis-cluster.config - networks: - redisnet: -- ipv4_address: 10.33.33.2 +- ipv4_address: 10.33.33.2 - - bredis_2: - image: redis:6.2.7 - volumes: - - ./test/:/test/:cached -- command: redis-server /test/redis.config +- command: redis-server /test/redis-cluster.config - networks: - redisnet: -- ipv4_address: 10.33.33.3 +- ipv4_address: 10.33.33.3 - - bredis_3: - image: redis:6.2.7 - volumes: - - ./test/:/test/:cached -- command: redis-server /test/redis.config +- command: redis-server /test/redis-cluster.config - networks: - redisnet: -- ipv4_address: 10.33.33.4 +- ipv4_address: 10.33.33.4 - - bredis_4: - image: redis:6.2.7 - volumes: - - ./test/:/test/:cached -- command: redis-server /test/redis.config +- command: redis-server /test/redis-cluster.config - networks: - redisnet: -- ipv4_address: 10.33.33.5 +- ipv4_address: 10.33.33.5 - - bredis_5: - image: redis:6.2.7 - volumes: - - ./test/:/test/:cached -- command: redis-server /test/redis.config +- command: redis-server /test/redis-cluster.config - networks: - redisnet: -- ipv4_address: 10.33.33.6 +- ipv4_address: 10.33.33.6 - - bredis_6: - image: redis:6.2.7 - volumes: - - ./test/:/test/:cached -- command: redis-server /test/redis.config +- command: redis-server /test/redis-cluster.config - networks: - redisnet: -- ipv4_address: 10.33.33.7 -- +- ipv4_address: 10.33.33.7 +- # TODO(#6517): remove bredis_clusterer. - bredis_clusterer: - image: redis:6.2.7 +- environment: +- BOULDER_CONFIG_DIR: *boulder_config_dir - volumes: - - ./test/:/test/:cached - - ./cluster/:/cluster/:cached @@ -92,14 +96,13 @@ index 02460285..ddde0231 100644 - - bredis_6 - networks: - redisnet: -- ipv4_address: 10.33.33.10 -- aliases: -- - boulder-redis-clusterer -- +- ipv4_address: 10.33.33.10 +- aliases: +- - boulder-redis-clusterer + bconsul: image: hashicorp/consul:1.13.1 - volumes: -@@ -174,13 +98,6 @@ networks: +@@ -177,13 +98,6 @@ networks: config: - subnet: 10.88.88.0/24 diff --git a/patches/docker-compose.patch b/patches/docker-compose.patch index a157e6b..2d441bd 100644 --- a/patches/docker-compose.patch +++ b/patches/docker-compose.patch @@ -1,13 +1,13 @@ diff --git a/docker-compose.yml b/docker-compose.yml -index ddde0231..b06711b7 100644 +index 28efa8ab..1501dd10 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,7 +8,7 @@ services: image: &boulder_image letsencrypt/boulder-tools:${BOULDER_TOOLS_TAG:-go1.19.2_2022-10-05} environment: FAKE_DNS: 10.77.77.77 -- BOULDER_CONFIG_DIR: test/config -+ BOULDER_CONFIG_DIR: labca/config +- BOULDER_CONFIG_DIR: &boulder_config_dir test/config ++ BOULDER_CONFIG_DIR: &boulder_config_dir labca/config GOFLAGS: -mod=vendor # Go 1.18 turned off SHA-1 validation on CSRs (and certs, but that doesn't # affect us) by default, but it can be turned back on with the x509sha1 @@ -56,7 +56,7 @@ index ddde0231..b06711b7 100644 # small. command: mysqld --bind-address=0.0.0.0 --slow-query-log --log-output=TABLE --log-queries-not-using-indexes=ON logging: -- driver: none +- driver: none + driver: "json-file" + options: + max-size: "500k" @@ -75,7 +75,7 @@ index ddde0231..b06711b7 100644 - environment: - GO111MODULE: "on" - GOFLAGS: -mod=vendor -- BOULDER_CONFIG_DIR: test/config +- BOULDER_CONFIG_DIR: *boulder_config_dir networks: - bluenet volumes: diff --git a/patches/ocsp-responder_main.patch b/patches/ocsp-responder_main.patch index 88e3c58..db9a03f 100644 --- a/patches/ocsp-responder_main.patch +++ b/patches/ocsp-responder_main.patch @@ -1,16 +1,16 @@ diff --git a/cmd/ocsp-responder/main.go b/cmd/ocsp-responder/main.go -index e70db31b..ddab5d45 100644 +index a5f91a07..940c18a9 100644 --- a/cmd/ocsp-responder/main.go +++ b/cmd/ocsp-responder/main.go -@@ -166,42 +166,44 @@ as generated by Boulder's ceremony command. +@@ -178,42 +178,44 @@ as generated by Boulder's ceremony command. dbMap, err := sa.InitWrappedDb(config.DB, scope, logger) cmd.FailOnError(err, "While initializing dbMap") - // Set up the redis source and the combined multiplex source. -- rocspReader, err := rocsp_config.MakeClient(&c.OCSPResponder.Redis, clk, scope) +- rocspRWClient, err := rocsp_config.MakeClient(&c.OCSPResponder.Redis, clk, scope) - cmd.FailOnError(err, "Could not make redis client") - -- err = rocspReader.Ping(context.Background()) +- err = rocspRWClient.Ping(context.Background()) - cmd.FailOnError(err, "pinging Redis") - - liveSigningPeriod := c.OCSPResponder.LiveSigningPeriod.Duration @@ -29,9 +29,9 @@ index e70db31b..ddab5d45 100644 - if maxInflight == 0 { - maxInflight = 1000 - } -- liveSource := live.New(rac, int64(maxInflight)) +- liveSource := live.New(rac, int64(maxInflight), c.OCSPResponder.MaxSigningWaiters) - -- rocspSource, err := redis_responder.NewRedisSource(rocspReader, liveSource, liveSigningPeriod, clk, scope, logger) +- rocspSource, err := redis_responder.NewRedisSource(rocspRWClient, liveSource, liveSigningPeriod, clk, scope, logger) - cmd.FailOnError(err, "Could not create redis source") - - var sac sapb.StorageAuthorityReadOnlyClient @@ -41,10 +41,10 @@ index e70db31b..ddab5d45 100644 - sac = sapb.NewStorageAuthorityReadOnlyClient(saConn) + if c.OCSPResponder.Redis.Addrs != nil { + // Set up the redis source and the combined multiplex source. -+ rocspReader, err := rocsp_config.MakeClient(&c.OCSPResponder.Redis, clk, scope) ++ rocspRWClient, err := rocsp_config.MakeClient(&c.OCSPResponder.Redis, clk, scope) + cmd.FailOnError(err, "Could not make redis client") + -+ err = rocspReader.Ping(context.Background()) ++ err = rocspRWClient.Ping(context.Background()) + cmd.FailOnError(err, "pinging Redis") + + liveSigningPeriod := c.OCSPResponder.LiveSigningPeriod.Duration @@ -63,9 +63,9 @@ index e70db31b..ddab5d45 100644 + if maxInflight == 0 { + maxInflight = 1000 + } -+ liveSource := live.New(rac, int64(maxInflight)) ++ liveSource := live.New(rac, int64(maxInflight), c.OCSPResponder.MaxSigningWaiters) + -+ rocspSource, err := redis_responder.NewRedisSource(rocspReader, liveSource, liveSigningPeriod, clk, scope, logger) ++ rocspSource, err := redis_responder.NewRedisSource(rocspRWClient, liveSource, liveSigningPeriod, clk, scope, logger) + cmd.FailOnError(err, "Could not create redis source") + + var sac sapb.StorageAuthorityReadOnlyClient diff --git a/patches/ra_ra.patch b/patches/ra_ra.patch index dc96a64..1e0c211 100644 --- a/patches/ra_ra.patch +++ b/patches/ra_ra.patch @@ -1,5 +1,5 @@ diff --git a/ra/ra.go b/ra/ra.go -index dd15485e..24e3a4da 100644 +index 5316edba..f1ff1200 100644 --- a/ra/ra.go +++ b/ra/ra.go @@ -32,7 +32,6 @@ import ( @@ -10,7 +10,16 @@ index dd15485e..24e3a4da 100644 "github.com/letsencrypt/boulder/probs" pubpb "github.com/letsencrypt/boulder/publisher/proto" rapb "github.com/letsencrypt/boulder/ra/proto" -@@ -477,7 +476,7 @@ func (ra *RegistrationAuthorityImpl) validateContacts(contacts []string) error { +@@ -393,7 +392,7 @@ func (ra *RegistrationAuthorityImpl) checkRegistrationLimits(ctx context.Context + ra.log.Infof("Rate limit exceeded, RegistrationsByIPRange, IP: %s", ip) + // For the fuzzyRegLimit we use a new error message that specifically + // mentions that the limit being exceeded is applied to a *range* of IPs +- return berrors.RateLimitError(0, "too many registrations for this IP range") ++ return berrors.RateLimitError(ra.rlPolicies.RateLimitsURL(), 0, "too many registrations for this IP range") + } + ra.rateLimitCounter.WithLabelValues("registrations_by_ip_range", "pass").Inc() + +@@ -504,7 +503,7 @@ func (ra *RegistrationAuthorityImpl) validateContacts(contacts []string) error { contact, ) } @@ -19,3 +28,37 @@ index dd15485e..24e3a4da 100644 if err != nil { return err } +@@ -543,7 +542,7 @@ func (ra *RegistrationAuthorityImpl) checkPendingAuthorizationLimit(ctx context. + if countPB.Count >= limit.GetThreshold(noKey, regID) { + ra.rateLimitCounter.WithLabelValues("pending_authorizations_by_registration_id", "exceeded").Inc() + ra.log.Infof("Rate limit exceeded, PendingAuthorizationsByRegID, regID: %d", regID) +- return berrors.RateLimitError(0, "too many currently pending authorizations: %d", countPB.Count) ++ return berrors.RateLimitError(ra.rlPolicies.RateLimitsURL(), 0, "too many currently pending authorizations: %d", countPB.Count) + } + ra.rateLimitCounter.WithLabelValues("pending_authorizations_by_registration_id", "pass").Inc() + } +@@ -623,7 +622,7 @@ func (ra *RegistrationAuthorityImpl) checkNewOrdersPerAccountLimit(ctx context.C + noKey := "" + if count.Count >= limit.GetThreshold(noKey, acctID) { + ra.rateLimitCounter.WithLabelValues("new_order_by_registration_id", "exceeded").Inc() +- return berrors.RateLimitError(0, "too many new orders recently") ++ return berrors.RateLimitError(ra.rlPolicies.RateLimitsURL(), 0, "too many new orders recently") + } + ra.rateLimitCounter.WithLabelValues("new_order_by_registration_id", "pass").Inc() + return nil +@@ -1384,12 +1383,12 @@ func (ra *RegistrationAuthorityImpl) checkCertificatesPerNameLimit(ctx context.C + for _, name := range namesOutOfLimit { + subErrors = append(subErrors, berrors.SubBoulderError{ + Identifier: identifier.DNSIdentifier(name), +- BoulderError: berrors.RateLimitError(retryAfter, "too many certificates already issued. Retry after %s", retryString).(*berrors.BoulderError), ++ BoulderError: berrors.RateLimitError(ra.rlPolicies.RateLimitsURL(), retryAfter, "too many certificates already issued. Retry after %s", retryString).(*berrors.BoulderError), + }) + } +- return berrors.RateLimitError(retryAfter, "too many certificates already issued for multiple names (%q and %d others). Retry after %s", namesOutOfLimit[0], len(namesOutOfLimit), retryString).(*berrors.BoulderError).WithSubErrors(subErrors) ++ return berrors.RateLimitError(ra.rlPolicies.RateLimitsURL(), retryAfter, "too many certificates already issued for multiple names (%q and %d others). Retry after %s", namesOutOfLimit[0], len(namesOutOfLimit), retryString).(*berrors.BoulderError).WithSubErrors(subErrors) + } +- return berrors.RateLimitError(retryAfter, "too many certificates already issued for %q. Retry after %s", namesOutOfLimit[0], retryString) ++ return berrors.RateLimitError(ra.rlPolicies.RateLimitsURL(), retryAfter, "too many certificates already issued for %q. Retry after %s", namesOutOfLimit[0], retryString) + } + ra.rateLimitCounter.WithLabelValues("certificates_for_domain", "pass").Inc() +