Files
labca/patches/storer_storer.patch
2025-12-27 16:40:48 +01:00

169 lines
5.8 KiB
Diff

diff --git a/crl/storer/storer.go b/crl/storer/storer.go
index e0a49a19c..2b8090a5e 100644
--- a/crl/storer/storer.go
+++ b/crl/storer/storer.go
@@ -9,8 +9,12 @@ import (
"errors"
"fmt"
"io"
+ "io/fs"
"math/big"
+ "os"
+ "path/filepath"
"slices"
+ "sort"
"time"
"github.com/aws/aws-sdk-go-v2/service/s3"
@@ -40,6 +44,7 @@ type crlStorer struct {
cspb.UnsafeCRLStorerServer
s3Client simpleS3
s3Bucket string
+ localStorePath string
issuers map[issuance.NameID]*issuance.Certificate
uploadCount *prometheus.CounterVec
sizeHistogram *prometheus.HistogramVec
@@ -54,6 +59,7 @@ func New(
issuers []*issuance.Certificate,
s3Client simpleS3,
s3Bucket string,
+ localStorePath string,
stats prometheus.Registerer,
log blog.Logger,
clk clock.Clock,
@@ -84,6 +90,7 @@ func New(
issuers: issuersByNameID,
s3Client: s3Client,
s3Bucket: s3Bucket,
+ localStorePath: localStorePath,
uploadCount: uploadCount,
sizeHistogram: sizeHistogram,
latencyHistogram: latencyHistogram,
@@ -170,14 +177,21 @@ func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLR
// additional safety check against clock skew and potential races, if multiple
// crl-updaters are working on the same shard at the same time. We only run
// these checks if we found a CRL, so we don't block uploading brand new CRLs.
- filename := fmt.Sprintf("%d/%d.crl", issuer.NameID(), shardIdx)
- prevObj, err := cs.s3Client.GetObject(stream.Context(), &s3.GetObjectInput{
- Bucket: &cs.s3Bucket,
- Key: &filename,
- })
+ var prevObj *s3.GetObjectOutput
+ var filename string
+ if cs.localStorePath == "" {
+ filename = fmt.Sprintf("%d/%d.crl", issuer.NameID(), shardIdx)
+ prevObj, err = cs.s3Client.GetObject(stream.Context(), &s3.GetObjectInput{
+ Bucket: &cs.s3Bucket,
+ Key: &filename,
+ })
+ } else {
+ prevObj, err = getLocalFile(cs.localStorePath, issuer.NameID())
+ }
+
if err != nil {
var smithyErr *smithyhttp.ResponseError
- if !errors.As(err, &smithyErr) || smithyErr.HTTPStatusCode() != 404 {
+ if !errors.Is(err, fs.ErrNotExist) && (!errors.As(err, &smithyErr) || smithyErr.HTTPStatusCode() != 404) {
return fmt.Errorf("getting previous CRL for %s: %w", crlId, err)
}
cs.log.Infof("No previous CRL found for %s, proceeding", crlId)
@@ -214,7 +228,7 @@ func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLR
}
}
if !uriMatch {
- return fmt.Errorf("IDP does not match previous: %v !∩ %v", idpURIs, prevURIs)
+ cs.log.Warningf("IDP does not match previous: %v !∩ %v", idpURIs, prevURIs)
}
}
@@ -224,17 +238,21 @@ func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLR
checksum := sha256.Sum256(crlBytes)
checksumb64 := base64.StdEncoding.EncodeToString(checksum[:])
crlContentType := "application/pkix-crl"
- _, err = cs.s3Client.PutObject(stream.Context(), &s3.PutObjectInput{
- Bucket: &cs.s3Bucket,
- Key: &filename,
- Body: bytes.NewReader(crlBytes),
- ChecksumAlgorithm: types.ChecksumAlgorithmSha256,
- ChecksumSHA256: &checksumb64,
- ContentType: &crlContentType,
- Metadata: map[string]string{"crlNumber": crlNumber.String()},
- Expires: &expires,
- CacheControl: &cacheControl,
- })
+ if cs.localStorePath == "" {
+ _, err = cs.s3Client.PutObject(stream.Context(), &s3.PutObjectInput{
+ Bucket: &cs.s3Bucket,
+ Key: &filename,
+ Body: bytes.NewReader(crlBytes),
+ ChecksumAlgorithm: types.ChecksumAlgorithmSha256,
+ ChecksumSHA256: &checksumb64,
+ ContentType: &crlContentType,
+ Metadata: map[string]string{"crlNumber": crlNumber.String()},
+ Expires: &expires,
+ CacheControl: &cacheControl,
+ })
+ } else {
+ err = storeLocalFile(cs.localStorePath, issuer.NameID(), crlNumber, shardIdx, bytes.NewReader(crlBytes))
+ }
latency := cs.clk.Now().Sub(start)
cs.latencyHistogram.WithLabelValues(issuer.Subject.CommonName).Observe(latency.Seconds())
@@ -253,3 +271,56 @@ func (cs *crlStorer) UploadCRL(stream grpc.ClientStreamingServer[cspb.UploadCRLR
return stream.SendAndClose(&emptypb.Empty{})
}
+
+func storeLocalFile(path string, nameID issuance.NameID, crlNumber *big.Int, shardIdx int64, crlBytes io.Reader) error {
+ // Write the file
+ fn := fmt.Sprintf("%s%c%d-%d-%d.crl", path, os.PathSeparator, nameID, crlNumber, shardIdx)
+ out, err := os.Create(fn)
+ defer out.Close()
+ if err != nil {
+ return fmt.Errorf("creating local crl file: %w", err)
+ }
+
+ _, err = io.Copy(out, crlBytes)
+ if err != nil {
+ return fmt.Errorf("storing crl locally: %w", err)
+ }
+
+ // Create/update the symlink
+ ln := fmt.Sprintf("%s%c%d.crl", path, os.PathSeparator, nameID)
+ lntmp := ln + ".new"
+ fn = fmt.Sprintf("%d-%d-%d.crl", nameID, crlNumber, shardIdx)
+
+ if err = os.Remove(lntmp); err != nil && !errors.Is(err, fs.ErrNotExist) {
+ return fmt.Errorf("removing symlink: %w", err)
+ }
+ if err = os.Symlink(fn, lntmp); err != nil {
+ return fmt.Errorf("creating symlink: %w", err)
+ }
+ if err = os.Rename(lntmp, ln); err != nil {
+ return fmt.Errorf("renaming symlink: %w", err)
+ }
+
+ // Remove old files (keep last five)
+ pt := fmt.Sprintf("%s%c%d-*-%d.crl", path, os.PathSeparator, nameID, shardIdx)
+ matches, err := filepath.Glob(pt)
+ sort.Strings(matches)
+ for i := 0; i < len(matches)-5; i++ {
+ err := os.Remove(matches[i])
+ if err != nil {
+ return fmt.Errorf("deleting file %s: %w", matches[i], err)
+ }
+ }
+
+ return nil
+}
+
+func getLocalFile(path string, nameID issuance.NameID) (*s3.GetObjectOutput, error) {
+ res := &s3.GetObjectOutput{}
+
+ fn := fmt.Sprintf("%s%c%d.crl", path, os.PathSeparator, nameID)
+ lf, err := os.Open(fn)
+
+ res.Body = lf
+ return res, err
+}