Merge pull request #1832 from hashicorp/unexpose-transit-structs

Not exposing structs from transit's package
This commit is contained in:
Vishal Nayak
2016-09-01 12:14:55 -04:00
committed by GitHub
5 changed files with 63 additions and 63 deletions

View File

@@ -534,7 +534,7 @@ func testAccStepDecryptDatakey(t *testing.T, name string,
func TestKeyUpgrade(t *testing.T) {
key, _ := uuid.GenerateRandomBytes(32)
p := &Policy{
p := &policy{
Name: "test",
Key: key,
CipherMode: "aes-gcm",
@@ -555,7 +555,7 @@ func TestDerivedKeyUpgrade(t *testing.T) {
key, _ := uuid.GenerateRandomBytes(32)
context, _ := uuid.GenerateRandomBytes(32)
p := &Policy{
p := &policy{
Name: "test",
Key: key,
CipherMode: "aes-gcm",
@@ -643,7 +643,7 @@ func testConvergentEncryptionCommon(t *testing.T, ver int) {
t.Fatalf("bad: expected error response, got %#v", *resp)
}
p := &Policy{
p := &policy{
Name: "testkey",
CipherMode: "aes-gcm",
Derived: true,

View File

@@ -26,7 +26,7 @@ type lockManager struct {
locksMutex sync.RWMutex
// If caching is enabled, the map of name to in-memory policy cache
cache map[string]*Policy
cache map[string]*policy
// Used for global locking, and as the cache map mutex
cacheMutex sync.RWMutex
@@ -37,7 +37,7 @@ func newLockManager(cacheDisabled bool) *lockManager {
locks: map[string]*sync.RWMutex{},
}
if !cacheDisabled {
lm.cache = map[string]*Policy{}
lm.cache = map[string]*policy{}
}
return lm
}
@@ -104,7 +104,7 @@ func (lm *lockManager) UnlockPolicy(lock *sync.RWMutex, lockType bool) {
// Get the policy with a read lock. If we get an error saying an exclusive lock
// is needed (for instance, for an upgrade/migration), give up the read lock,
// call again with an exclusive lock, then swap back out for a read lock.
func (lm *lockManager) GetPolicyShared(storage logical.Storage, name string) (*Policy, *sync.RWMutex, error) {
func (lm *lockManager) GetPolicyShared(storage logical.Storage, name string) (*policy, *sync.RWMutex, error) {
p, lock, _, err := lm.getPolicyCommon(storage, name, false, false, false, shared)
if err == nil ||
(err != nil && err != errNeedExclusiveLock) {
@@ -124,7 +124,7 @@ func (lm *lockManager) GetPolicyShared(storage logical.Storage, name string) (*P
}
// Get the policy with an exclusive lock
func (lm *lockManager) GetPolicyExclusive(storage logical.Storage, name string) (*Policy, *sync.RWMutex, error) {
func (lm *lockManager) GetPolicyExclusive(storage logical.Storage, name string) (*policy, *sync.RWMutex, error) {
p, lock, _, err := lm.getPolicyCommon(storage, name, false, false, false, exclusive)
return p, lock, err
}
@@ -132,7 +132,7 @@ func (lm *lockManager) GetPolicyExclusive(storage logical.Storage, name string)
// Get the policy with a read lock; if it returns that an exclusive lock is
// needed, retry. If successful, call one more time to get a read lock and
// return the value.
func (lm *lockManager) GetPolicyUpsert(storage logical.Storage, name string, derived, convergent bool) (*Policy, *sync.RWMutex, bool, error) {
func (lm *lockManager) GetPolicyUpsert(storage logical.Storage, name string, derived, convergent bool) (*policy, *sync.RWMutex, bool, error) {
p, lock, _, err := lm.getPolicyCommon(storage, name, true, derived, convergent, shared)
if err == nil ||
(err != nil && err != errNeedExclusiveLock) {
@@ -155,10 +155,10 @@ func (lm *lockManager) GetPolicyUpsert(storage logical.Storage, name string, der
// When the function returns, a lock will be held on the policy if err == nil.
// It is the caller's responsibility to unlock.
func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, upsert, derived, convergent, lockType bool) (*Policy, *sync.RWMutex, bool, error) {
func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, upsert, derived, convergent, lockType bool) (*policy, *sync.RWMutex, bool, error) {
lock := lm.policyLock(name, lockType)
var p *Policy
var p *policy
var err error
// Check if it's in our cache. If so, return right away.
@@ -196,7 +196,7 @@ func (lm *lockManager) getPolicyCommon(storage logical.Storage, name string, ups
return nil, nil, false, fmt.Errorf("convergent encryption requires derivation to be enabled")
}
p = &Policy{
p = &policy{
Name: name,
CipherMode: "aes-gcm",
Derived: derived,
@@ -271,7 +271,7 @@ func (lm *lockManager) DeletePolicy(storage logical.Storage, name string) error
defer lock.Unlock()
defer lm.cacheMutex.Unlock()
var p *Policy
var p *policy
var err error
if lm.CacheActive() {
@@ -308,7 +308,7 @@ func (lm *lockManager) DeletePolicy(storage logical.Storage, name string) error
return nil
}
func (lm *lockManager) getStoredPolicy(storage logical.Storage, name string) (*Policy, error) {
func (lm *lockManager) getStoredPolicy(storage logical.Storage, name string) (*policy, error) {
// Check if the policy already exists
raw, err := storage.Get("policy/" + name)
if err != nil {
@@ -319,8 +319,8 @@ func (lm *lockManager) getStoredPolicy(storage logical.Storage, name string) (*P
}
// Decode the policy
policy := &Policy{
Keys: KeyEntryMap{},
policy := &policy{
Keys: keyEntryMap{},
}
err = jsonutil.DecodeJSON(raw.Value, policy)
if err != nil {

View File

@@ -91,7 +91,7 @@ func (b *backend) pathEncryptWrite(
}
// Get the policy
var p *Policy
var p *policy
var lock *sync.RWMutex
var upserted bool
if req.Operation == logical.CreateOperation {

View File

@@ -31,18 +31,18 @@ const (
const ErrTooOld = "ciphertext version is disallowed by policy (too old)"
// KeyEntry stores the key and metadata
type KeyEntry struct {
// keyEntry stores the key and metadata
type keyEntry struct {
Key []byte `json:"key"`
CreationTime int64 `json:"creation_time"`
}
// KeyEntryMap is used to allow JSON marshal/unmarshal
type KeyEntryMap map[int]KeyEntry
// keyEntryMap is used to allow JSON marshal/unmarshal
type keyEntryMap map[int]keyEntry
// MarshalJSON implements JSON marshaling
func (kem KeyEntryMap) MarshalJSON() ([]byte, error) {
intermediate := map[string]KeyEntry{}
func (kem keyEntryMap) MarshalJSON() ([]byte, error) {
intermediate := map[string]keyEntry{}
for k, v := range kem {
intermediate[strconv.Itoa(k)] = v
}
@@ -50,8 +50,8 @@ func (kem KeyEntryMap) MarshalJSON() ([]byte, error) {
}
// MarshalJSON implements JSON unmarshaling
func (kem KeyEntryMap) UnmarshalJSON(data []byte) error {
intermediate := map[string]KeyEntry{}
func (kem keyEntryMap) UnmarshalJSON(data []byte) error {
intermediate := map[string]keyEntry{}
if err := jsonutil.DecodeJSON(data, &intermediate); err != nil {
return err
}
@@ -67,10 +67,10 @@ func (kem KeyEntryMap) UnmarshalJSON(data []byte) error {
}
// Policy is the struct used to store metadata
type Policy struct {
type policy struct {
Name string `json:"name"`
Key []byte `json:"key,omitempty"` //DEPRECATED
Keys KeyEntryMap `json:"keys"`
Keys keyEntryMap `json:"keys"`
CipherMode string `json:"cipher"`
// Derived keys MUST provide a context and the master underlying key is
@@ -100,19 +100,19 @@ type Policy struct {
// ArchivedKeys stores old keys. This is used to keep the key loading time sane
// when there are huge numbers of rotations.
type ArchivedKeys struct {
Keys []KeyEntry `json:"keys"`
type archivedKeys struct {
Keys []keyEntry `json:"keys"`
}
func (p *Policy) loadArchive(storage logical.Storage) (*ArchivedKeys, error) {
archive := &ArchivedKeys{}
func (p *policy) loadArchive(storage logical.Storage) (*archivedKeys, error) {
archive := &archivedKeys{}
raw, err := storage.Get("archive/" + p.Name)
if err != nil {
return nil, err
}
if raw == nil {
archive.Keys = make([]KeyEntry, 0)
archive.Keys = make([]keyEntry, 0)
return archive, nil
}
@@ -123,7 +123,7 @@ func (p *Policy) loadArchive(storage logical.Storage) (*ArchivedKeys, error) {
return archive, nil
}
func (p *Policy) storeArchive(archive *ArchivedKeys, storage logical.Storage) error {
func (p *policy) storeArchive(archive *archivedKeys, storage logical.Storage) error {
// Encode the policy
buf, err := json.Marshal(archive)
if err != nil {
@@ -145,8 +145,8 @@ func (p *Policy) storeArchive(archive *ArchivedKeys, storage logical.Storage) er
// handleArchiving manages the movement of keys to and from the policy archive.
// This should *ONLY* be called from Persist() since it assumes that the policy
// will be persisted afterwards.
func (p *Policy) handleArchiving(storage logical.Storage) error {
// We need to move keys that are no longer accessible to ArchivedKeys, and keys
func (p *policy) handleArchiving(storage logical.Storage) error {
// We need to move keys that are no longer accessible to archivedKeys, and keys
// that now need to be accessible back here.
//
// For safety, because there isn't really a good reason to, we never delete
@@ -193,7 +193,7 @@ func (p *Policy) handleArchiving(storage logical.Storage) error {
// key version
if len(archive.Keys) < p.LatestVersion+1 {
// Increase the size of the archive slice
newKeys := make([]KeyEntry, p.LatestVersion+1)
newKeys := make([]keyEntry, p.LatestVersion+1)
copy(newKeys, archive.Keys)
archive.Keys = newKeys
}
@@ -219,7 +219,7 @@ func (p *Policy) handleArchiving(storage logical.Storage) error {
return nil
}
func (p *Policy) Persist(storage logical.Storage) error {
func (p *policy) Persist(storage logical.Storage) error {
err := p.handleArchiving(storage)
if err != nil {
return err
@@ -243,11 +243,11 @@ func (p *Policy) Persist(storage logical.Storage) error {
return nil
}
func (p *Policy) Serialize() ([]byte, error) {
func (p *policy) Serialize() ([]byte, error) {
return json.Marshal(p)
}
func (p *Policy) needsUpgrade() bool {
func (p *policy) needsUpgrade() bool {
// Ensure we've moved from Key -> Keys
if p.Key != nil && len(p.Key) > 0 {
return true
@@ -277,7 +277,7 @@ func (p *Policy) needsUpgrade() bool {
return false
}
func (p *Policy) upgrade(storage logical.Storage) error {
func (p *policy) upgrade(storage logical.Storage) error {
persistNeeded := false
// Ensure we've moved from Key -> Keys
if p.Key != nil && len(p.Key) > 0 {
@@ -322,7 +322,7 @@ func (p *Policy) upgrade(storage logical.Storage) error {
// on the policy. If derivation is disabled the raw key is used and no context
// is required, otherwise the KDF mode is used with the context to derive the
// proper key.
func (p *Policy) DeriveKey(context []byte, ver int) ([]byte, error) {
func (p *policy) DeriveKey(context []byte, ver int) ([]byte, error) {
if p.Keys == nil || p.LatestVersion == 0 {
return nil, errutil.InternalError{Err: "unable to access the key; no key versions found"}
}
@@ -367,7 +367,7 @@ func (p *Policy) DeriveKey(context []byte, ver int) ([]byte, error) {
}
}
func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) {
func (p *policy) Encrypt(context, nonce []byte, value string) (string, error) {
// Decode the plaintext value
plaintext, err := base64.StdEncoding.DecodeString(value)
if err != nil {
@@ -437,7 +437,7 @@ func (p *Policy) Encrypt(context, nonce []byte, value string) (string, error) {
return encoded, nil
}
func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) {
func (p *policy) Decrypt(context, nonce []byte, value string) (string, error) {
// Verify the prefix
if !strings.HasPrefix(value, "vault:v") {
return "", errutil.UserError{Err: "invalid ciphertext: no prefix"}
@@ -520,12 +520,12 @@ func (p *Policy) Decrypt(context, nonce []byte, value string) (string, error) {
return base64.StdEncoding.EncodeToString(plain), nil
}
func (p *Policy) rotate(storage logical.Storage) error {
func (p *policy) rotate(storage logical.Storage) error {
if p.Keys == nil {
// This is an initial key rotation when generating a new policy. We
// don't need to call migrate here because if we've called getPolicy to
// get the policy in the first place it will have been run.
p.Keys = KeyEntryMap{}
p.Keys = keyEntryMap{}
}
// Generate a 256bit key
@@ -537,7 +537,7 @@ func (p *Policy) rotate(storage logical.Storage) error {
p.LatestVersion += 1
p.Keys[p.LatestVersion] = KeyEntry{
p.Keys[p.LatestVersion] = keyEntry{
Key: newKey,
CreationTime: time.Now().Unix(),
}
@@ -554,9 +554,9 @@ func (p *Policy) rotate(storage logical.Storage) error {
return p.Persist(storage)
}
func (p *Policy) migrateKeyToKeysMap() {
p.Keys = KeyEntryMap{
1: KeyEntry{
func (p *policy) migrateKeyToKeysMap() {
p.Keys = keyEntryMap{
1: keyEntry{
Key: p.Key,
CreationTime: time.Now().Unix(),
},

View File

@@ -8,11 +8,11 @@ import (
)
var (
keysArchive []KeyEntry
keysArchive []keyEntry
)
func resetKeysArchive() {
keysArchive = []KeyEntry{KeyEntry{}}
keysArchive = []keyEntry{keyEntry{}}
}
func Test_KeyUpgrade(t *testing.T) {
@@ -262,7 +262,7 @@ func testArchivingCommon(t *testing.T, lm *lockManager) {
}
func checkKeys(t *testing.T,
policy *Policy,
p *policy,
storage logical.Storage,
action string,
archiveVer, latestVer, keysSize int) {
@@ -273,55 +273,55 @@ func checkKeys(t *testing.T,
"but keys archive is of size %d", latestVer, latestVer+1, len(keysArchive))
}
archive, err := policy.loadArchive(storage)
archive, err := p.loadArchive(storage)
if err != nil {
t.Fatal(err)
}
badArchiveVer := false
if archiveVer == 0 {
if len(archive.Keys) != 0 || policy.ArchiveVersion != 0 {
if len(archive.Keys) != 0 || p.ArchiveVersion != 0 {
badArchiveVer = true
}
} else {
// We need to subtract one because we have the indexes match key
// versions, which start at 1. So for an archive version of 1, we
// actually have two entries -- a blank 0 entry, and the key at spot 1
if archiveVer != len(archive.Keys)-1 || archiveVer != policy.ArchiveVersion {
if archiveVer != len(archive.Keys)-1 || archiveVer != p.ArchiveVersion {
badArchiveVer = true
}
}
if badArchiveVer {
t.Fatalf(
"expected archive version %d, found length of archive keys %d and policy archive version %d",
archiveVer, len(archive.Keys), policy.ArchiveVersion,
archiveVer, len(archive.Keys), p.ArchiveVersion,
)
}
if latestVer != policy.LatestVersion {
if latestVer != p.LatestVersion {
t.Fatalf(
"expected latest version %d, found %d",
latestVer, policy.LatestVersion,
latestVer, p.LatestVersion,
)
}
if keysSize != len(policy.Keys) {
if keysSize != len(p.Keys) {
t.Fatalf(
"expected keys size %d, found %d, action is %s, policy is \n%#v\n",
keysSize, len(policy.Keys), action, policy,
keysSize, len(p.Keys), action, p,
)
}
for i := policy.MinDecryptionVersion; i <= policy.LatestVersion; i++ {
if _, ok := policy.Keys[i]; !ok {
for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
if _, ok := p.Keys[i]; !ok {
t.Fatalf(
"expected key %d, did not find it in policy keys", i,
)
}
}
for i := policy.MinDecryptionVersion; i <= policy.LatestVersion; i++ {
if !reflect.DeepEqual(policy.Keys[i], keysArchive[i]) {
for i := p.MinDecryptionVersion; i <= p.LatestVersion; i++ {
if !reflect.DeepEqual(p.Keys[i], keysArchive[i]) {
t.Fatalf("key %d not equivalent between policy keys and test keys archive", i)
}
}