mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-11-03 19:58:17 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			752 lines
		
	
	
		
			26 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			752 lines
		
	
	
		
			26 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
/*
 | 
						|
Copyright 2020 The Kubernetes Authors.
 | 
						|
 | 
						|
Licensed under the Apache License, Version 2.0 (the "License");
 | 
						|
you may not use this file except in compliance with the License.
 | 
						|
You may obtain a copy of the License at
 | 
						|
 | 
						|
    http://www.apache.org/licenses/LICENSE-2.0
 | 
						|
 | 
						|
Unless required by applicable law or agreed to in writing, software
 | 
						|
distributed under the License is distributed on an "AS IS" BASIS,
 | 
						|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
						|
See the License for the specific language governing permissions and
 | 
						|
limitations under the License.
 | 
						|
*/
 | 
						|
 | 
						|
package resourceclaim
 | 
						|
 | 
						|
import (
 | 
						|
	"errors"
 | 
						|
	"fmt"
 | 
						|
	"sort"
 | 
						|
	"sync"
 | 
						|
	"testing"
 | 
						|
	"time"
 | 
						|
 | 
						|
	"github.com/onsi/gomega"
 | 
						|
	"github.com/stretchr/testify/assert"
 | 
						|
 | 
						|
	v1 "k8s.io/api/core/v1"
 | 
						|
	resourceapi "k8s.io/api/resource/v1beta1"
 | 
						|
	apierrors "k8s.io/apimachinery/pkg/api/errors"
 | 
						|
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
						|
	"k8s.io/apimachinery/pkg/runtime"
 | 
						|
	"k8s.io/apimachinery/pkg/types"
 | 
						|
	"k8s.io/client-go/informers"
 | 
						|
	"k8s.io/client-go/kubernetes/fake"
 | 
						|
	k8stesting "k8s.io/client-go/testing"
 | 
						|
	"k8s.io/component-base/metrics/testutil"
 | 
						|
	"k8s.io/kubernetes/pkg/controller"
 | 
						|
	"k8s.io/kubernetes/pkg/controller/resourceclaim/metrics"
 | 
						|
	"k8s.io/kubernetes/test/utils/ktesting"
 | 
						|
)
 | 
						|
 | 
						|
var (
 | 
						|
	testPodName          = "test-pod"
 | 
						|
	testNamespace        = "my-namespace"
 | 
						|
	testPodUID           = types.UID("uidpod1")
 | 
						|
	otherNamespace       = "not-my-namespace"
 | 
						|
	podResourceClaimName = "acme-resource"
 | 
						|
	templateName         = "my-template"
 | 
						|
	className            = "my-resource-class"
 | 
						|
	nodeName             = "worker"
 | 
						|
 | 
						|
	testPod             = makePod(testPodName, testNamespace, testPodUID)
 | 
						|
	testPodWithResource = makePod(testPodName, testNamespace, testPodUID, *makePodResourceClaim(podResourceClaimName, templateName))
 | 
						|
	otherTestPod        = makePod(testPodName+"-II", testNamespace, testPodUID+"-II")
 | 
						|
 | 
						|
	testClaim              = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, makeOwnerReference(testPodWithResource, true))
 | 
						|
	testClaimAllocated     = allocateClaim(testClaim)
 | 
						|
	testClaimReserved      = reserveClaim(testClaimAllocated, testPodWithResource)
 | 
						|
	testClaimReservedTwice = reserveClaim(testClaimReserved, otherTestPod)
 | 
						|
 | 
						|
	generatedTestClaim          = makeGeneratedClaim(podResourceClaimName, testPodName+"-"+podResourceClaimName+"-", testNamespace, className, 1, makeOwnerReference(testPodWithResource, true))
 | 
						|
	generatedTestClaimAllocated = allocateClaim(generatedTestClaim)
 | 
						|
	generatedTestClaimReserved  = reserveClaim(generatedTestClaimAllocated, testPodWithResource)
 | 
						|
 | 
						|
	conflictingClaim    = makeClaim(testPodName+"-"+podResourceClaimName, testNamespace, className, nil)
 | 
						|
	otherNamespaceClaim = makeClaim(testPodName+"-"+podResourceClaimName, otherNamespace, className, nil)
 | 
						|
	template            = makeTemplate(templateName, testNamespace, className)
 | 
						|
 | 
						|
	testPodWithNodeName = func() *v1.Pod {
 | 
						|
		pod := testPodWithResource.DeepCopy()
 | 
						|
		pod.Spec.NodeName = nodeName
 | 
						|
		pod.Status.ResourceClaimStatuses = append(pod.Status.ResourceClaimStatuses, v1.PodResourceClaimStatus{
 | 
						|
			Name:              pod.Spec.ResourceClaims[0].Name,
 | 
						|
			ResourceClaimName: &generatedTestClaim.Name,
 | 
						|
		})
 | 
						|
		return pod
 | 
						|
	}()
 | 
						|
)
 | 
						|
 | 
						|
func TestSyncHandler(t *testing.T) {
 | 
						|
	tests := []struct {
 | 
						|
		name                   string
 | 
						|
		key                    string
 | 
						|
		adminAccessEnabled     bool
 | 
						|
		prioritizedListEnabled bool
 | 
						|
		claims                 []*resourceapi.ResourceClaim
 | 
						|
		claimsInCache          []*resourceapi.ResourceClaim
 | 
						|
		pods                   []*v1.Pod
 | 
						|
		podsLater              []*v1.Pod
 | 
						|
		templates              []*resourceapi.ResourceClaimTemplate
 | 
						|
		expectedClaims         []resourceapi.ResourceClaim
 | 
						|
		expectedStatuses       map[string][]v1.PodResourceClaimStatus
 | 
						|
		expectedError          bool
 | 
						|
		expectedMetrics        expectedMetrics
 | 
						|
	}{
 | 
						|
		{
 | 
						|
			name:           "create",
 | 
						|
			pods:           []*v1.Pod{testPodWithResource},
 | 
						|
			templates:      []*resourceapi.ResourceClaimTemplate{template},
 | 
						|
			key:            podKey(testPodWithResource),
 | 
						|
			expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
 | 
						|
			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
 | 
						|
				testPodWithResource.Name: {
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				},
 | 
						|
			},
 | 
						|
			expectedMetrics: expectedMetrics{1, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "nop",
 | 
						|
			pods: []*v1.Pod{func() *v1.Pod {
 | 
						|
				pod := testPodWithResource.DeepCopy()
 | 
						|
				pod.Status.ResourceClaimStatuses = []v1.PodResourceClaimStatus{
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				}
 | 
						|
				return pod
 | 
						|
			}()},
 | 
						|
			templates:      []*resourceapi.ResourceClaimTemplate{template},
 | 
						|
			key:            podKey(testPodWithResource),
 | 
						|
			claims:         []*resourceapi.ResourceClaim{generatedTestClaim},
 | 
						|
			expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
 | 
						|
			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
 | 
						|
				testPodWithResource.Name: {
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				},
 | 
						|
			},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "recreate",
 | 
						|
			pods: []*v1.Pod{func() *v1.Pod {
 | 
						|
				pod := testPodWithResource.DeepCopy()
 | 
						|
				pod.Status.ResourceClaimStatuses = []v1.PodResourceClaimStatus{
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				}
 | 
						|
				return pod
 | 
						|
			}()},
 | 
						|
			templates:      []*resourceapi.ResourceClaimTemplate{template},
 | 
						|
			key:            podKey(testPodWithResource),
 | 
						|
			expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
 | 
						|
			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
 | 
						|
				testPodWithResource.Name: {
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				},
 | 
						|
			},
 | 
						|
			expectedMetrics: expectedMetrics{1, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:          "missing-template",
 | 
						|
			pods:          []*v1.Pod{testPodWithResource},
 | 
						|
			templates:     nil,
 | 
						|
			key:           podKey(testPodWithResource),
 | 
						|
			expectedError: true,
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:           "find-existing-claim-by-label",
 | 
						|
			pods:           []*v1.Pod{testPodWithResource},
 | 
						|
			key:            podKey(testPodWithResource),
 | 
						|
			claims:         []*resourceapi.ResourceClaim{generatedTestClaim},
 | 
						|
			expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaim},
 | 
						|
			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
 | 
						|
				testPodWithResource.Name: {
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				},
 | 
						|
			},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:          "find-created-claim-in-cache",
 | 
						|
			pods:          []*v1.Pod{testPodWithResource},
 | 
						|
			key:           podKey(testPodWithResource),
 | 
						|
			claimsInCache: []*resourceapi.ResourceClaim{generatedTestClaim},
 | 
						|
			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
 | 
						|
				testPodWithResource.Name: {
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				},
 | 
						|
			},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "no-such-pod",
 | 
						|
			key:  podKey(testPodWithResource),
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "pod-deleted",
 | 
						|
			pods: func() []*v1.Pod {
 | 
						|
				deleted := metav1.Now()
 | 
						|
				pods := []*v1.Pod{testPodWithResource.DeepCopy()}
 | 
						|
				pods[0].DeletionTimestamp = &deleted
 | 
						|
				return pods
 | 
						|
			}(),
 | 
						|
			key: podKey(testPodWithResource),
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "no-volumes",
 | 
						|
			pods: []*v1.Pod{testPod},
 | 
						|
			key:  podKey(testPod),
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:           "create-with-other-claim",
 | 
						|
			pods:           []*v1.Pod{testPodWithResource},
 | 
						|
			templates:      []*resourceapi.ResourceClaimTemplate{template},
 | 
						|
			key:            podKey(testPodWithResource),
 | 
						|
			claims:         []*resourceapi.ResourceClaim{otherNamespaceClaim},
 | 
						|
			expectedClaims: []resourceapi.ResourceClaim{*otherNamespaceClaim, *generatedTestClaim},
 | 
						|
			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
 | 
						|
				testPodWithResource.Name: {
 | 
						|
					{Name: testPodWithResource.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				},
 | 
						|
			},
 | 
						|
			expectedMetrics: expectedMetrics{1, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:           "wrong-claim-owner",
 | 
						|
			pods:           []*v1.Pod{testPodWithResource},
 | 
						|
			key:            podKey(testPodWithResource),
 | 
						|
			claims:         []*resourceapi.ResourceClaim{conflictingClaim},
 | 
						|
			expectedClaims: []resourceapi.ResourceClaim{*conflictingClaim},
 | 
						|
			expectedError:  true,
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:            "create-conflict",
 | 
						|
			pods:            []*v1.Pod{testPodWithResource},
 | 
						|
			templates:       []*resourceapi.ResourceClaimTemplate{template},
 | 
						|
			key:             podKey(testPodWithResource),
 | 
						|
			expectedMetrics: expectedMetrics{1, 1},
 | 
						|
			expectedError:   true,
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:            "stay-reserved-seen",
 | 
						|
			pods:            []*v1.Pod{testPodWithResource},
 | 
						|
			key:             claimKey(testClaimReserved),
 | 
						|
			claims:          []*resourceapi.ResourceClaim{testClaimReserved},
 | 
						|
			expectedClaims:  []resourceapi.ResourceClaim{*testClaimReserved},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:            "stay-reserved-not-seen",
 | 
						|
			podsLater:       []*v1.Pod{testPodWithResource},
 | 
						|
			key:             claimKey(testClaimReserved),
 | 
						|
			claims:          []*resourceapi.ResourceClaim{testClaimReserved},
 | 
						|
			expectedClaims:  []resourceapi.ResourceClaim{*testClaimReserved},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:   "clear-reserved-structured",
 | 
						|
			pods:   []*v1.Pod{},
 | 
						|
			key:    claimKey(testClaimReserved),
 | 
						|
			claims: []*resourceapi.ResourceClaim{structuredParameters(testClaimReserved)},
 | 
						|
			expectedClaims: func() []resourceapi.ResourceClaim {
 | 
						|
				claim := testClaimAllocated.DeepCopy()
 | 
						|
				claim.Finalizers = []string{}
 | 
						|
				claim.Status.Allocation = nil
 | 
						|
				return []resourceapi.ResourceClaim{*claim}
 | 
						|
			}(),
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "dont-clear-reserved-structured",
 | 
						|
			pods: []*v1.Pod{testPodWithResource},
 | 
						|
			key:  claimKey(testClaimReserved),
 | 
						|
			claims: func() []*resourceapi.ResourceClaim {
 | 
						|
				claim := structuredParameters(testClaimReserved)
 | 
						|
				claim = reserveClaim(claim, otherTestPod)
 | 
						|
				return []*resourceapi.ResourceClaim{claim}
 | 
						|
			}(),
 | 
						|
			expectedClaims:  []resourceapi.ResourceClaim{*structuredParameters(testClaimReserved)},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "clear-reserved-structured-deleted",
 | 
						|
			pods: []*v1.Pod{},
 | 
						|
			key:  claimKey(testClaimReserved),
 | 
						|
			claims: func() []*resourceapi.ResourceClaim {
 | 
						|
				claim := structuredParameters(testClaimReserved.DeepCopy())
 | 
						|
				claim.DeletionTimestamp = &metav1.Time{}
 | 
						|
				return []*resourceapi.ResourceClaim{claim}
 | 
						|
			}(),
 | 
						|
			expectedClaims: func() []resourceapi.ResourceClaim {
 | 
						|
				claim := structuredParameters(testClaimAllocated.DeepCopy())
 | 
						|
				claim.DeletionTimestamp = &metav1.Time{}
 | 
						|
				claim.Finalizers = []string{}
 | 
						|
				claim.Status.Allocation = nil
 | 
						|
				return []resourceapi.ResourceClaim{*claim}
 | 
						|
			}(),
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "structured-deleted",
 | 
						|
			pods: []*v1.Pod{},
 | 
						|
			key:  claimKey(testClaimReserved),
 | 
						|
			claims: func() []*resourceapi.ResourceClaim {
 | 
						|
				claim := structuredParameters(testClaimAllocated.DeepCopy())
 | 
						|
				claim.DeletionTimestamp = &metav1.Time{}
 | 
						|
				return []*resourceapi.ResourceClaim{claim}
 | 
						|
			}(),
 | 
						|
			expectedClaims: func() []resourceapi.ResourceClaim {
 | 
						|
				claim := structuredParameters(testClaimAllocated.DeepCopy())
 | 
						|
				claim.DeletionTimestamp = &metav1.Time{}
 | 
						|
				claim.Finalizers = []string{}
 | 
						|
				claim.Status.Allocation = nil
 | 
						|
				return []resourceapi.ResourceClaim{*claim}
 | 
						|
			}(),
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "clear-reserved-when-done",
 | 
						|
			pods: func() []*v1.Pod {
 | 
						|
				pods := []*v1.Pod{testPodWithResource.DeepCopy()}
 | 
						|
				pods[0].Status.Phase = v1.PodSucceeded
 | 
						|
				return pods
 | 
						|
			}(),
 | 
						|
			key: claimKey(testClaimReserved),
 | 
						|
			claims: func() []*resourceapi.ResourceClaim {
 | 
						|
				claims := []*resourceapi.ResourceClaim{testClaimReserved.DeepCopy()}
 | 
						|
				claims[0].OwnerReferences = nil
 | 
						|
				return claims
 | 
						|
			}(),
 | 
						|
			expectedClaims: func() []resourceapi.ResourceClaim {
 | 
						|
				claims := []resourceapi.ResourceClaim{*testClaimAllocated.DeepCopy()}
 | 
						|
				claims[0].OwnerReferences = nil
 | 
						|
				return claims
 | 
						|
			}(),
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:            "remove-reserved",
 | 
						|
			pods:            []*v1.Pod{testPod},
 | 
						|
			key:             claimKey(testClaimReservedTwice),
 | 
						|
			claims:          []*resourceapi.ResourceClaim{testClaimReservedTwice},
 | 
						|
			expectedClaims:  []resourceapi.ResourceClaim{*testClaimReserved},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name: "delete-claim-when-done",
 | 
						|
			pods: func() []*v1.Pod {
 | 
						|
				pods := []*v1.Pod{testPodWithResource.DeepCopy()}
 | 
						|
				pods[0].Status.Phase = v1.PodSucceeded
 | 
						|
				return pods
 | 
						|
			}(),
 | 
						|
			key:             claimKey(testClaimReserved),
 | 
						|
			claims:          []*resourceapi.ResourceClaim{testClaimReserved},
 | 
						|
			expectedClaims:  nil,
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
		{
 | 
						|
			name:           "add-reserved",
 | 
						|
			pods:           []*v1.Pod{testPodWithNodeName},
 | 
						|
			key:            podKey(testPodWithNodeName),
 | 
						|
			templates:      []*resourceapi.ResourceClaimTemplate{template},
 | 
						|
			claims:         []*resourceapi.ResourceClaim{generatedTestClaimAllocated},
 | 
						|
			expectedClaims: []resourceapi.ResourceClaim{*generatedTestClaimReserved},
 | 
						|
			expectedStatuses: map[string][]v1.PodResourceClaimStatus{
 | 
						|
				testPodWithNodeName.Name: {
 | 
						|
					{Name: testPodWithNodeName.Spec.ResourceClaims[0].Name, ResourceClaimName: &generatedTestClaim.Name},
 | 
						|
				},
 | 
						|
			},
 | 
						|
			expectedMetrics: expectedMetrics{0, 0},
 | 
						|
		},
 | 
						|
	}
 | 
						|
 | 
						|
	for _, tc := range tests {
 | 
						|
		// Run sequentially because of global logging and global metrics.
 | 
						|
		t.Run(tc.name, func(t *testing.T) {
 | 
						|
			tCtx := ktesting.Init(t)
 | 
						|
			tCtx = ktesting.WithCancel(tCtx)
 | 
						|
 | 
						|
			var objects []runtime.Object
 | 
						|
			for _, pod := range tc.pods {
 | 
						|
				objects = append(objects, pod)
 | 
						|
			}
 | 
						|
			for _, claim := range tc.claims {
 | 
						|
				objects = append(objects, claim)
 | 
						|
			}
 | 
						|
			for _, template := range tc.templates {
 | 
						|
				objects = append(objects, template)
 | 
						|
			}
 | 
						|
 | 
						|
			fakeKubeClient := createTestClient(objects...)
 | 
						|
			if tc.expectedMetrics.numFailures > 0 {
 | 
						|
				fakeKubeClient.PrependReactor("create", "resourceclaims", func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
 | 
						|
					return true, nil, apierrors.NewConflict(action.GetResource().GroupResource(), "fake name", errors.New("fake conflict"))
 | 
						|
				})
 | 
						|
			}
 | 
						|
			setupMetrics()
 | 
						|
			informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
 | 
						|
			podInformer := informerFactory.Core().V1().Pods()
 | 
						|
			claimInformer := informerFactory.Resource().V1beta1().ResourceClaims()
 | 
						|
			templateInformer := informerFactory.Resource().V1beta1().ResourceClaimTemplates()
 | 
						|
 | 
						|
			features := Features{
 | 
						|
				AdminAccess:     tc.adminAccessEnabled,
 | 
						|
				PrioritizedList: tc.prioritizedListEnabled,
 | 
						|
			}
 | 
						|
			ec, err := NewController(tCtx.Logger(), features, fakeKubeClient, podInformer, claimInformer, templateInformer)
 | 
						|
			if err != nil {
 | 
						|
				t.Fatalf("error creating ephemeral controller : %v", err)
 | 
						|
			}
 | 
						|
 | 
						|
			// Ensure informers are up-to-date.
 | 
						|
			informerFactory.Start(tCtx.Done())
 | 
						|
			stopInformers := func() {
 | 
						|
				tCtx.Cancel("stopping informers")
 | 
						|
				informerFactory.Shutdown()
 | 
						|
			}
 | 
						|
			defer stopInformers()
 | 
						|
			informerFactory.WaitForCacheSync(tCtx.Done())
 | 
						|
 | 
						|
			// Add claims that only exist in the mutation cache.
 | 
						|
			for _, claim := range tc.claimsInCache {
 | 
						|
				ec.claimCache.Mutation(claim)
 | 
						|
			}
 | 
						|
 | 
						|
			// Simulate race: stop informers, add more pods that the controller doesn't know about.
 | 
						|
			stopInformers()
 | 
						|
			for _, pod := range tc.podsLater {
 | 
						|
				_, err := fakeKubeClient.CoreV1().Pods(pod.Namespace).Create(tCtx, pod, metav1.CreateOptions{})
 | 
						|
				if err != nil {
 | 
						|
					t.Fatalf("unexpected error while creating pod: %v", err)
 | 
						|
				}
 | 
						|
			}
 | 
						|
 | 
						|
			err = ec.syncHandler(tCtx, tc.key)
 | 
						|
			if err != nil && !tc.expectedError {
 | 
						|
				t.Fatalf("unexpected error while running handler: %v", err)
 | 
						|
			}
 | 
						|
			if err == nil && tc.expectedError {
 | 
						|
				t.Fatalf("unexpected success")
 | 
						|
			}
 | 
						|
 | 
						|
			claims, err := fakeKubeClient.ResourceV1beta1().ResourceClaims("").List(tCtx, metav1.ListOptions{})
 | 
						|
			if err != nil {
 | 
						|
				t.Fatalf("unexpected error while listing claims: %v", err)
 | 
						|
			}
 | 
						|
			assert.Equal(t, normalizeClaims(tc.expectedClaims), normalizeClaims(claims.Items))
 | 
						|
 | 
						|
			pods, err := fakeKubeClient.CoreV1().Pods("").List(tCtx, metav1.ListOptions{})
 | 
						|
			if err != nil {
 | 
						|
				t.Fatalf("unexpected error while listing pods: %v", err)
 | 
						|
			}
 | 
						|
			var actualStatuses map[string][]v1.PodResourceClaimStatus
 | 
						|
			for _, pod := range pods.Items {
 | 
						|
				if len(pod.Status.ResourceClaimStatuses) == 0 {
 | 
						|
					continue
 | 
						|
				}
 | 
						|
				if actualStatuses == nil {
 | 
						|
					actualStatuses = make(map[string][]v1.PodResourceClaimStatus)
 | 
						|
				}
 | 
						|
				actualStatuses[pod.Name] = pod.Status.ResourceClaimStatuses
 | 
						|
			}
 | 
						|
			assert.Equal(t, tc.expectedStatuses, actualStatuses, "pod resource claim statuses")
 | 
						|
 | 
						|
			expectMetrics(t, tc.expectedMetrics)
 | 
						|
		})
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
func TestResourceClaimEventHandler(t *testing.T) {
 | 
						|
	tCtx := ktesting.Init(t)
 | 
						|
	tCtx = ktesting.WithCancel(tCtx)
 | 
						|
 | 
						|
	fakeKubeClient := createTestClient()
 | 
						|
	setupMetrics()
 | 
						|
	informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
 | 
						|
	podInformer := informerFactory.Core().V1().Pods()
 | 
						|
	claimInformer := informerFactory.Resource().V1beta1().ResourceClaims()
 | 
						|
	templateInformer := informerFactory.Resource().V1beta1().ResourceClaimTemplates()
 | 
						|
	claimClient := fakeKubeClient.ResourceV1beta1().ResourceClaims(testNamespace)
 | 
						|
 | 
						|
	_, err := NewController(tCtx.Logger(), Features{}, fakeKubeClient, podInformer, claimInformer, templateInformer)
 | 
						|
	tCtx.ExpectNoError(err, "creating ephemeral controller")
 | 
						|
 | 
						|
	informerFactory.Start(tCtx.Done())
 | 
						|
	stopInformers := func() {
 | 
						|
		tCtx.Cancel("stopping informers")
 | 
						|
		informerFactory.Shutdown()
 | 
						|
	}
 | 
						|
	defer stopInformers()
 | 
						|
 | 
						|
	var em numMetrics
 | 
						|
 | 
						|
	_, err = claimClient.Create(tCtx, testClaim, metav1.CreateOptions{})
 | 
						|
	em.claims++
 | 
						|
	ktesting.Step(tCtx, "create claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Eventually(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	modifiedClaim := testClaim.DeepCopy()
 | 
						|
	modifiedClaim.Labels = map[string]string{"foo": "bar"}
 | 
						|
	_, err = claimClient.Update(tCtx, modifiedClaim, metav1.UpdateOptions{})
 | 
						|
	ktesting.Step(tCtx, "modify claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Consistently(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	_, err = claimClient.Update(tCtx, testClaimAllocated, metav1.UpdateOptions{})
 | 
						|
	em.allocated++
 | 
						|
	ktesting.Step(tCtx, "allocate claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Eventually(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	modifiedClaim = testClaimAllocated.DeepCopy()
 | 
						|
	modifiedClaim.Labels = map[string]string{"foo": "bar2"}
 | 
						|
	_, err = claimClient.Update(tCtx, modifiedClaim, metav1.UpdateOptions{})
 | 
						|
	ktesting.Step(tCtx, "modify claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Consistently(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	otherClaimAllocated := testClaimAllocated.DeepCopy()
 | 
						|
	otherClaimAllocated.Name += "2"
 | 
						|
	_, err = claimClient.Create(tCtx, otherClaimAllocated, metav1.CreateOptions{})
 | 
						|
	em.claims++
 | 
						|
	em.allocated++
 | 
						|
	ktesting.Step(tCtx, "create allocated claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Eventually(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	_, err = claimClient.Update(tCtx, testClaim, metav1.UpdateOptions{})
 | 
						|
	em.allocated--
 | 
						|
	ktesting.Step(tCtx, "deallocate claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Eventually(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	err = claimClient.Delete(tCtx, testClaim.Name, metav1.DeleteOptions{})
 | 
						|
	em.claims--
 | 
						|
	ktesting.Step(tCtx, "delete deallocated claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Eventually(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	err = claimClient.Delete(tCtx, otherClaimAllocated.Name, metav1.DeleteOptions{})
 | 
						|
	em.claims--
 | 
						|
	em.allocated--
 | 
						|
	ktesting.Step(tCtx, "delete allocated claim", func(tCtx ktesting.TContext) {
 | 
						|
		tCtx.ExpectNoError(err)
 | 
						|
		em.Eventually(tCtx)
 | 
						|
	})
 | 
						|
 | 
						|
	em.Consistently(tCtx)
 | 
						|
}
 | 
						|
 | 
						|
func makeClaim(name, namespace, classname string, owner *metav1.OwnerReference) *resourceapi.ResourceClaim {
 | 
						|
	claim := &resourceapi.ResourceClaim{
 | 
						|
		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
 | 
						|
	}
 | 
						|
	if owner != nil {
 | 
						|
		claim.OwnerReferences = []metav1.OwnerReference{*owner}
 | 
						|
	}
 | 
						|
 | 
						|
	return claim
 | 
						|
}
 | 
						|
 | 
						|
func makeGeneratedClaim(podClaimName, generateName, namespace, classname string, createCounter int, owner *metav1.OwnerReference) *resourceapi.ResourceClaim {
 | 
						|
	claim := &resourceapi.ResourceClaim{
 | 
						|
		ObjectMeta: metav1.ObjectMeta{
 | 
						|
			Name:         fmt.Sprintf("%s-%d", generateName, createCounter),
 | 
						|
			GenerateName: generateName,
 | 
						|
			Namespace:    namespace,
 | 
						|
			Annotations:  map[string]string{"resource.kubernetes.io/pod-claim-name": podClaimName},
 | 
						|
		},
 | 
						|
	}
 | 
						|
	if owner != nil {
 | 
						|
		claim.OwnerReferences = []metav1.OwnerReference{*owner}
 | 
						|
	}
 | 
						|
 | 
						|
	return claim
 | 
						|
}
 | 
						|
 | 
						|
func allocateClaim(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
 | 
						|
	claim = claim.DeepCopy()
 | 
						|
	claim.Status.Allocation = &resourceapi.AllocationResult{}
 | 
						|
	return claim
 | 
						|
}
 | 
						|
 | 
						|
func structuredParameters(claim *resourceapi.ResourceClaim) *resourceapi.ResourceClaim {
 | 
						|
	claim = claim.DeepCopy()
 | 
						|
	// As far the controller is concerned, a claim was allocated by us if it has
 | 
						|
	// this finalizer. For testing we don't need to update the allocation result.
 | 
						|
	claim.Finalizers = append(claim.Finalizers, resourceapi.Finalizer)
 | 
						|
	return claim
 | 
						|
}
 | 
						|
 | 
						|
func reserveClaim(claim *resourceapi.ResourceClaim, pod *v1.Pod) *resourceapi.ResourceClaim {
 | 
						|
	claim = claim.DeepCopy()
 | 
						|
	claim.Status.ReservedFor = append(claim.Status.ReservedFor,
 | 
						|
		resourceapi.ResourceClaimConsumerReference{
 | 
						|
			Resource: "pods",
 | 
						|
			Name:     pod.Name,
 | 
						|
			UID:      pod.UID,
 | 
						|
		},
 | 
						|
	)
 | 
						|
	return claim
 | 
						|
}
 | 
						|
 | 
						|
func makePodResourceClaim(name, templateName string) *v1.PodResourceClaim {
 | 
						|
	return &v1.PodResourceClaim{
 | 
						|
		Name:                      name,
 | 
						|
		ResourceClaimTemplateName: &templateName,
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
func makePod(name, namespace string, uid types.UID, podClaims ...v1.PodResourceClaim) *v1.Pod {
 | 
						|
	pod := &v1.Pod{
 | 
						|
		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, UID: uid},
 | 
						|
		Spec: v1.PodSpec{
 | 
						|
			ResourceClaims: podClaims,
 | 
						|
		},
 | 
						|
	}
 | 
						|
 | 
						|
	return pod
 | 
						|
}
 | 
						|
 | 
						|
func makeTemplate(name, namespace, classname string) *resourceapi.ResourceClaimTemplate {
 | 
						|
	template := &resourceapi.ResourceClaimTemplate{
 | 
						|
		ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
 | 
						|
	}
 | 
						|
	return template
 | 
						|
}
 | 
						|
 | 
						|
func podKey(pod *v1.Pod) string {
 | 
						|
	return podKeyPrefix + pod.Namespace + "/" + pod.Name
 | 
						|
}
 | 
						|
 | 
						|
func claimKey(claim *resourceapi.ResourceClaim) string {
 | 
						|
	return claimKeyPrefix + claim.Namespace + "/" + claim.Name
 | 
						|
}
 | 
						|
 | 
						|
func makeOwnerReference(pod *v1.Pod, isController bool) *metav1.OwnerReference {
 | 
						|
	isTrue := true
 | 
						|
	return &metav1.OwnerReference{
 | 
						|
		APIVersion:         "v1",
 | 
						|
		Kind:               "Pod",
 | 
						|
		Name:               pod.Name,
 | 
						|
		UID:                pod.UID,
 | 
						|
		Controller:         &isController,
 | 
						|
		BlockOwnerDeletion: &isTrue,
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
func normalizeClaims(claims []resourceapi.ResourceClaim) []resourceapi.ResourceClaim {
 | 
						|
	sort.Slice(claims, func(i, j int) bool {
 | 
						|
		if claims[i].Namespace < claims[j].Namespace {
 | 
						|
			return true
 | 
						|
		}
 | 
						|
		if claims[i].Namespace > claims[j].Namespace {
 | 
						|
			return false
 | 
						|
		}
 | 
						|
		return claims[i].Name < claims[j].Name
 | 
						|
	})
 | 
						|
	for i := range claims {
 | 
						|
		if len(claims[i].Status.ReservedFor) == 0 {
 | 
						|
			claims[i].Status.ReservedFor = nil
 | 
						|
		}
 | 
						|
	}
 | 
						|
	return claims
 | 
						|
}
 | 
						|
 | 
						|
func createTestClient(objects ...runtime.Object) *fake.Clientset {
 | 
						|
	fakeClient := fake.NewSimpleClientset(objects...)
 | 
						|
	fakeClient.PrependReactor("create", "resourceclaims", createResourceClaimReactor())
 | 
						|
	return fakeClient
 | 
						|
}
 | 
						|
 | 
						|
// createResourceClaimReactor implements the logic required for the GenerateName field to work when using
 | 
						|
// the fake client. Add it with client.PrependReactor to your fake client.
 | 
						|
func createResourceClaimReactor() func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
 | 
						|
	nameCounter := 1
 | 
						|
	var mutex sync.Mutex
 | 
						|
	return func(action k8stesting.Action) (handled bool, ret runtime.Object, err error) {
 | 
						|
		mutex.Lock()
 | 
						|
		defer mutex.Unlock()
 | 
						|
		claim := action.(k8stesting.CreateAction).GetObject().(*resourceapi.ResourceClaim)
 | 
						|
		if claim.Name == "" && claim.GenerateName != "" {
 | 
						|
			claim.Name = fmt.Sprintf("%s-%d", claim.GenerateName, nameCounter)
 | 
						|
		}
 | 
						|
		nameCounter++
 | 
						|
		return false, nil, nil
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
// Metrics helpers
 | 
						|
 | 
						|
type numMetrics struct {
 | 
						|
	claims    float64
 | 
						|
	allocated float64
 | 
						|
}
 | 
						|
 | 
						|
func getNumMetric() (em numMetrics, err error) {
 | 
						|
	em.claims, err = testutil.GetGaugeMetricValue(metrics.NumResourceClaims)
 | 
						|
	if err != nil {
 | 
						|
		return
 | 
						|
	}
 | 
						|
	em.allocated, err = testutil.GetGaugeMetricValue(metrics.NumAllocatedResourceClaims)
 | 
						|
	return
 | 
						|
}
 | 
						|
 | 
						|
func (em numMetrics) Eventually(tCtx ktesting.TContext) {
 | 
						|
	g := gomega.NewWithT(tCtx)
 | 
						|
	tCtx.Helper()
 | 
						|
 | 
						|
	g.Eventually(getNumMetric).WithTimeout(5 * time.Second).Should(gomega.Equal(em))
 | 
						|
}
 | 
						|
 | 
						|
func (em numMetrics) Consistently(tCtx ktesting.TContext) {
 | 
						|
	g := gomega.NewWithT(tCtx)
 | 
						|
	tCtx.Helper()
 | 
						|
 | 
						|
	g.Consistently(getNumMetric).WithTimeout(time.Second).Should(gomega.Equal(em))
 | 
						|
}
 | 
						|
 | 
						|
type expectedMetrics struct {
 | 
						|
	numCreated  int
 | 
						|
	numFailures int
 | 
						|
}
 | 
						|
 | 
						|
func expectMetrics(t *testing.T, em expectedMetrics) {
 | 
						|
	t.Helper()
 | 
						|
 | 
						|
	actualCreated, err := testutil.GetCounterMetricValue(metrics.ResourceClaimCreateAttempts)
 | 
						|
	handleErr(t, err, "ResourceClaimCreate")
 | 
						|
	if actualCreated != float64(em.numCreated) {
 | 
						|
		t.Errorf("Expected claims to be created %d, got %v", em.numCreated, actualCreated)
 | 
						|
	}
 | 
						|
	actualConflicts, err := testutil.GetCounterMetricValue(metrics.ResourceClaimCreateFailures)
 | 
						|
	handleErr(t, err, "ResourceClaimCreate/Conflict")
 | 
						|
	if actualConflicts != float64(em.numFailures) {
 | 
						|
		t.Errorf("Expected claims to have conflicts %d, got %v", em.numFailures, actualConflicts)
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
func handleErr(t *testing.T, err error, metricName string) {
 | 
						|
	if err != nil {
 | 
						|
		t.Errorf("Failed to get %s value, err: %v", metricName, err)
 | 
						|
	}
 | 
						|
}
 | 
						|
 | 
						|
func setupMetrics() {
 | 
						|
	metrics.RegisterMetrics()
 | 
						|
	metrics.ResourceClaimCreateAttempts.Reset()
 | 
						|
	metrics.ResourceClaimCreateFailures.Reset()
 | 
						|
	metrics.NumResourceClaims.Set(0)
 | 
						|
	metrics.NumAllocatedResourceClaims.Set(0)
 | 
						|
}
 |