mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-07 22:03:22 +00:00
DRA: bump API v1alpha2 -> v1alpha3
This is in preparation for revamping the resource.k8s.io completely. Because there will be no support for transitioning from v1alpha2 to v1alpha3, the roundtrip test data for that API in 1.29 and 1.30 gets removed. Repeating the version in the import name of the API packages is not really required. It was done for a while to support simpler grepping for usage of alpha APIs, but there are better ways for that now. So during this transition, "resourceapi" gets used instead of "resourcev1alpha3" and the version gets dropped from informer and lister imports. The advantage is that the next bump to v1beta1 will affect fewer source code lines. Only source code where the version really matters (like API registration) retains the versioned import.
This commit is contained in:
@@ -37,7 +37,7 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
@@ -48,7 +48,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/client-go/discovery/cached/memory"
|
||||
resourceapiinformer "k8s.io/client-go/informers/resource/v1alpha2"
|
||||
resourceapiinformer "k8s.io/client-go/informers/resource/v1alpha3"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
@@ -113,20 +113,20 @@ func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes {
|
||||
_, err = claimInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj any) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
claim := obj.(*resourcev1alpha2.ResourceClaim)
|
||||
claim := obj.(*resourceapi.ResourceClaim)
|
||||
framework.Logf("New claim:\n%s", format.Object(claim, 1))
|
||||
validateClaim(claim)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj any) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
oldClaim := oldObj.(*resourcev1alpha2.ResourceClaim)
|
||||
newClaim := newObj.(*resourcev1alpha2.ResourceClaim)
|
||||
oldClaim := oldObj.(*resourceapi.ResourceClaim)
|
||||
newClaim := newObj.(*resourceapi.ResourceClaim)
|
||||
framework.Logf("Updated claim:\n%s\nDiff:\n%s", format.Object(newClaim, 1), cmp.Diff(oldClaim, newClaim))
|
||||
validateClaim(newClaim)
|
||||
},
|
||||
DeleteFunc: func(obj any) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
claim := obj.(*resourcev1alpha2.ResourceClaim)
|
||||
claim := obj.(*resourceapi.ResourceClaim)
|
||||
framework.Logf("Deleted claim:\n%s", format.Object(claim, 1))
|
||||
},
|
||||
})
|
||||
@@ -140,7 +140,7 @@ func NewNodes(f *framework.Framework, minNodes, maxNodes int) *Nodes {
|
||||
return nodes
|
||||
}
|
||||
|
||||
func validateClaim(claim *resourcev1alpha2.ResourceClaim) {
|
||||
func validateClaim(claim *resourceapi.ResourceClaim) {
|
||||
// The apiserver doesn't enforce that a claim always has a finalizer
|
||||
// while being allocated. This is a convention that whoever allocates a
|
||||
// claim has to follow to prevent using a claim that is at risk of
|
||||
@@ -267,7 +267,7 @@ func (d *Driver) SetUp(nodes *Nodes, resources app.Resources) {
|
||||
d.classParameterAPIKind = "ConfigMap"
|
||||
case parameterModeStructured:
|
||||
d.parameterAPIGroup = "resource.k8s.io"
|
||||
d.parameterAPIVersion = "v1alpha2"
|
||||
d.parameterAPIVersion = "v1alpha3"
|
||||
d.claimParameterAPIKind = "ResourceClaimParameters"
|
||||
d.classParameterAPIKind = "ResourceClassParameters"
|
||||
default:
|
||||
@@ -526,8 +526,8 @@ func (d *Driver) TearDown() {
|
||||
}
|
||||
|
||||
func (d *Driver) IsGone(ctx context.Context) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) ([]resourcev1alpha2.ResourceSlice, error) {
|
||||
slices, err := d.f.ClientSet.ResourceV1alpha2().ResourceSlices().List(ctx, metav1.ListOptions{FieldSelector: "driverName=" + d.Name})
|
||||
gomega.Eventually(ctx, func(ctx context.Context) ([]resourceapi.ResourceSlice, error) {
|
||||
slices, err := d.f.ClientSet.ResourceV1alpha3().ResourceSlices().List(ctx, metav1.ListOptions{FieldSelector: "driverName=" + d.Name})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ import (
|
||||
"github.com/onsi/gomega/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -100,7 +100,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
ginkgo.By("waiting for container startup to fail")
|
||||
parameters := b.parameters()
|
||||
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod, template := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
|
||||
b.create(ctx, parameters, pod, template)
|
||||
|
||||
@@ -126,36 +126,36 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
// Pretend that the resource is allocated and reserved for some other entity.
|
||||
// Until the resourceclaim controller learns to remove reservations for
|
||||
// arbitrary types we can simply fake somthing here.
|
||||
claim := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim := b.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
b.create(ctx, claim)
|
||||
|
||||
claim, err := f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
claim, err := f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "get claim")
|
||||
|
||||
claim.Finalizers = append(claim.Finalizers, "e2e.test/delete-protection")
|
||||
claim, err = f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).Update(ctx, claim, metav1.UpdateOptions{})
|
||||
claim, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Update(ctx, claim, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "add claim finalizer")
|
||||
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
claim.Status.Allocation = nil
|
||||
claim.Status.ReservedFor = nil
|
||||
claim, err = f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
claim, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "update claim")
|
||||
|
||||
claim.Finalizers = nil
|
||||
_, err = f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).Update(ctx, claim, metav1.UpdateOptions{})
|
||||
_, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Update(ctx, claim, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "remove claim finalizer")
|
||||
})
|
||||
|
||||
claim.Status.Allocation = &resourcev1alpha2.AllocationResult{}
|
||||
claim.Status.Allocation = &resourceapi.AllocationResult{}
|
||||
claim.Status.DriverName = driver.Name
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourcev1alpha2.ResourceClaimConsumerReference{
|
||||
claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourceapi.ResourceClaimConsumerReference{
|
||||
APIGroup: "example.com",
|
||||
Resource: "some",
|
||||
Name: "thing",
|
||||
UID: "12345",
|
||||
})
|
||||
claim, err = f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
claim, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "update claim")
|
||||
|
||||
pod := b.podExternal()
|
||||
@@ -180,7 +180,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
ginkgo.It("must unprepare resources for force-deleted pod", func(ctx context.Context) {
|
||||
parameters := b.parameters()
|
||||
claim := b.externalClaim(resourcev1alpha2.AllocationModeImmediate)
|
||||
claim := b.externalClaim(resourceapi.AllocationModeImmediate)
|
||||
pod := b.podExternal()
|
||||
zero := int64(0)
|
||||
pod.Spec.TerminationGracePeriodSeconds = &zero
|
||||
@@ -203,7 +203,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
ginkgo.It("must skip NodePrepareResource if not used by any container", func(ctx context.Context) {
|
||||
parameters := b.parameters()
|
||||
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod, template := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
for i := range pod.Spec.Containers {
|
||||
pod.Spec.Containers[i].Resources.Claims = nil
|
||||
}
|
||||
@@ -219,7 +219,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
// claimTests tries out several different combinations of pods with
|
||||
// claims, both inline and external.
|
||||
claimTests := func(b *builder, driver *Driver, allocationMode resourcev1alpha2.AllocationMode) {
|
||||
claimTests := func(b *builder, driver *Driver, allocationMode resourceapi.AllocationMode) {
|
||||
ginkgo.It("supports simple pod referencing inline resource claim", func(ctx context.Context) {
|
||||
objects, expectedEnv := b.flexibleParameters()
|
||||
pod, template := b.podInline(allocationMode)
|
||||
@@ -300,8 +300,8 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.By("waiting for pod to finish")
|
||||
framework.ExpectNoError(e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace), "wait for pod to finish")
|
||||
ginkgo.By("waiting for claim to be unreserved")
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (*resourcev1alpha2.ResourceClaim, error) {
|
||||
return f.ClientSet.ResourceV1alpha2().ResourceClaims(pod.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) {
|
||||
return f.ClientSet.ResourceV1alpha3().ResourceClaims(pod.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
}).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.ReservedFor", gomega.BeEmpty()), "reservation should have been removed")
|
||||
})
|
||||
|
||||
@@ -315,8 +315,8 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.By("waiting for pod to finish")
|
||||
framework.ExpectNoError(e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace), "wait for pod to finish")
|
||||
ginkgo.By("waiting for claim to be deleted")
|
||||
gomega.Eventually(ctx, func(ctx context.Context) ([]resourcev1alpha2.ResourceClaim, error) {
|
||||
claims, err := f.ClientSet.ResourceV1alpha2().ResourceClaims(pod.Namespace).List(ctx, metav1.ListOptions{})
|
||||
gomega.Eventually(ctx, func(ctx context.Context) ([]resourceapi.ResourceClaim, error) {
|
||||
claims, err := f.ClientSet.ResourceV1alpha3().ResourceClaims(pod.Namespace).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -344,13 +344,13 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.It("must deallocate after use when using delayed allocation", func(ctx context.Context) {
|
||||
objects, expectedEnv := b.flexibleParameters()
|
||||
pod := b.podExternal()
|
||||
claim := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim := b.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
objects = append(objects, claim, pod)
|
||||
b.create(ctx, objects...)
|
||||
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (*resourcev1alpha2.ResourceClaim, error) {
|
||||
return b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
}).WithTimeout(f.Timeouts.PodDelete).ShouldNot(gomega.HaveField("Status.Allocation", (*resourcev1alpha2.AllocationResult)(nil)))
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) {
|
||||
return b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
}).WithTimeout(f.Timeouts.PodDelete).ShouldNot(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil)))
|
||||
|
||||
b.testPod(ctx, f.ClientSet, pod, expectedEnv...)
|
||||
|
||||
@@ -358,9 +358,9 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
framework.ExpectNoError(b.f.ClientSet.CoreV1().Pods(b.f.Namespace.Name).Delete(ctx, pod.Name, metav1.DeleteOptions{}))
|
||||
|
||||
ginkgo.By("waiting for claim to get deallocated")
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (*resourcev1alpha2.ResourceClaim, error) {
|
||||
return b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
}).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", (*resourcev1alpha2.AllocationResult)(nil)))
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) {
|
||||
return b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
|
||||
}).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil)))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -383,7 +383,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.It("supports claim and class parameters", func(ctx context.Context) {
|
||||
objects, expectedEnv := b.flexibleParameters()
|
||||
|
||||
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod, template := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
objects = append(objects, pod, template)
|
||||
|
||||
b.create(ctx, objects...)
|
||||
@@ -395,7 +395,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
objects, expectedEnv := b.flexibleParameters()
|
||||
pods := make([]*v1.Pod, numPods)
|
||||
for i := 0; i < numPods; i++ {
|
||||
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod, template := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
pods[i] = pod
|
||||
objects = append(objects, pod, template)
|
||||
}
|
||||
@@ -421,7 +421,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
ginkgo.It("supports sharing a claim concurrently", func(ctx context.Context) {
|
||||
objects, expectedEnv := b.flexibleParameters()
|
||||
objects = append(objects, b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer))
|
||||
objects = append(objects, b.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer))
|
||||
|
||||
pods := make([]*v1.Pod, numPods)
|
||||
for i := 0; i < numPods; i++ {
|
||||
@@ -456,10 +456,10 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
case parameterModeConfigMap:
|
||||
ginkgo.Skip("cannot change the driver's controller behavior on-the-fly")
|
||||
case parameterModeTranslated, parameterModeStructured:
|
||||
objects[len(objects)-1].(*resourcev1alpha2.ResourceClaimParameters).Shareable = false
|
||||
objects[len(objects)-1].(*resourceapi.ResourceClaimParameters).Shareable = false
|
||||
}
|
||||
|
||||
objects = append(objects, b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer))
|
||||
objects = append(objects, b.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer))
|
||||
|
||||
pods := make([]*v1.Pod, numPods)
|
||||
for i := 0; i < numPods; i++ {
|
||||
@@ -491,8 +491,8 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
ginkgo.It("retries pod scheduling after creating resource class", func(ctx context.Context) {
|
||||
objects, expectedEnv := b.flexibleParameters()
|
||||
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
class, err := f.ClientSet.ResourceV1alpha2().ResourceClasses().Get(ctx, template.Spec.Spec.ResourceClassName, metav1.GetOptions{})
|
||||
pod, template := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
class, err := f.ClientSet.ResourceV1alpha3().ResourceClasses().Get(ctx, template.Spec.Spec.ResourceClassName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
template.Spec.Spec.ResourceClassName += "-b"
|
||||
objects = append(objects, template, pod)
|
||||
@@ -510,10 +510,10 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
ginkgo.It("retries pod scheduling after updating resource class", func(ctx context.Context) {
|
||||
objects, expectedEnv := b.flexibleParameters()
|
||||
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod, template := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
|
||||
// First modify the class so that it matches no nodes.
|
||||
class, err := f.ClientSet.ResourceV1alpha2().ResourceClasses().Get(ctx, template.Spec.Spec.ResourceClassName, metav1.GetOptions{})
|
||||
class, err := f.ClientSet.ResourceV1alpha3().ResourceClasses().Get(ctx, template.Spec.Spec.ResourceClassName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
class.SuitableNodes = &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
@@ -528,7 +528,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
},
|
||||
},
|
||||
}
|
||||
class, err = f.ClientSet.ResourceV1alpha2().ResourceClasses().Update(ctx, class, metav1.UpdateOptions{})
|
||||
class, err = f.ClientSet.ResourceV1alpha3().ResourceClasses().Update(ctx, class, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Now create the pod.
|
||||
@@ -539,14 +539,14 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
// Unblock the pod.
|
||||
class.SuitableNodes = nil
|
||||
_, err = f.ClientSet.ResourceV1alpha2().ResourceClasses().Update(ctx, class, metav1.UpdateOptions{})
|
||||
_, err = f.ClientSet.ResourceV1alpha3().ResourceClasses().Update(ctx, class, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
b.testPod(ctx, f.ClientSet, pod, expectedEnv...)
|
||||
})
|
||||
|
||||
ginkgo.It("runs a pod without a generated resource claim", func(ctx context.Context) {
|
||||
pod, _ /* template */ := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod, _ /* template */ := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
created := b.create(ctx, pod)
|
||||
pod = created[0].(*v1.Pod)
|
||||
|
||||
@@ -564,11 +564,11 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
})
|
||||
|
||||
ginkgo.Context("with delayed allocation", func() {
|
||||
claimTests(b, driver, resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claimTests(b, driver, resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
})
|
||||
|
||||
ginkgo.Context("with immediate allocation", func() {
|
||||
claimTests(b, driver, resourcev1alpha2.AllocationModeImmediate)
|
||||
claimTests(b, driver, resourceapi.AllocationModeImmediate)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -603,7 +603,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
}
|
||||
pod1 := createPod()
|
||||
pod2 := createPod()
|
||||
claim := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim := b.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
b.create(ctx, parameters, claim, pod1, pod2)
|
||||
|
||||
for _, pod := range []*v1.Pod{pod1, pod2} {
|
||||
@@ -624,7 +624,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
instance := f.UniqueName + "-test-app"
|
||||
pod := b.podExternal()
|
||||
pod.Labels[label] = instance
|
||||
claim := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim := b.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
b.create(ctx, parameters, claim, pod)
|
||||
|
||||
ginkgo.By("wait for test pod " + pod.Name + " to run")
|
||||
@@ -655,7 +655,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.DeferCleanup(e2enode.RemoveTaintOffNode, f.ClientSet, nodeName, taint)
|
||||
|
||||
ginkgo.By("waiting for claim to get deallocated")
|
||||
gomega.Eventually(ctx, framework.GetObject(b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Get, claim.Name, metav1.GetOptions{})).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", gomega.BeNil()))
|
||||
gomega.Eventually(ctx, framework.GetObject(b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get, claim.Name, metav1.GetOptions{})).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", gomega.BeNil()))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -714,13 +714,13 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
parameters1 := b.parameters()
|
||||
parameters2 := b2.parameters()
|
||||
// Order is relevant here: each pod must be matched with its own claim.
|
||||
pod1claim1 := b.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod1claim1 := b.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
pod1 := b.podExternal()
|
||||
pod2claim1 := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod2claim1 := b2.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
pod2 := b2.podExternal()
|
||||
|
||||
// Add another claim to pod1.
|
||||
pod1claim2 := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod1claim2 := b2.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
pod1.Spec.ResourceClaims = append(pod1.Spec.ResourceClaims,
|
||||
v1.PodResourceClaim{
|
||||
Name: "claim-other",
|
||||
@@ -750,7 +750,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.By("waiting for one claim from driver1 to be allocated")
|
||||
var nodeSelector *v1.NodeSelector
|
||||
gomega.Eventually(ctx, func(ctx context.Context) (int, error) {
|
||||
claims, err := f.ClientSet.ResourceV1alpha2().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
claims, err := f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -801,7 +801,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
driver.parameterMode = parameterMode
|
||||
b := newBuilder(f, driver)
|
||||
|
||||
tests := func(allocationMode resourcev1alpha2.AllocationMode) {
|
||||
tests := func(allocationMode resourceapi.AllocationMode) {
|
||||
ginkgo.It("uses all resources", func(ctx context.Context) {
|
||||
objs, _ := b.flexibleParameters()
|
||||
var pods []*v1.Pod
|
||||
@@ -841,11 +841,11 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
}
|
||||
|
||||
ginkgo.Context("with delayed allocation", func() {
|
||||
tests(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
tests(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
})
|
||||
|
||||
ginkgo.Context("with immediate allocation", func() {
|
||||
tests(resourcev1alpha2.AllocationModeImmediate)
|
||||
tests(resourceapi.AllocationModeImmediate)
|
||||
})
|
||||
})
|
||||
}
|
||||
@@ -873,7 +873,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
ginkgo.It("truncates the name of a generated resource claim", func(ctx context.Context) {
|
||||
parameters := b.parameters()
|
||||
pod, template := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
pod, template := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
pod.Name = strings.Repeat("p", 63)
|
||||
pod.Spec.ResourceClaims[0].Name = strings.Repeat("c", 63)
|
||||
pod.Spec.Containers[0].Resources.Claims[0].Name = pod.Spec.ResourceClaims[0].Name
|
||||
@@ -915,18 +915,18 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
// This is the slice that we try to create. It needs to be deleted
|
||||
// after testing, if it still exists at that time.
|
||||
fictionalNodeSlice := &resourcev1alpha2.ResourceSlice{
|
||||
fictionalNodeSlice := &resourceapi.ResourceSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fictionalNodeName + "-slice",
|
||||
},
|
||||
NodeName: fictionalNodeName,
|
||||
DriverName: "dra.example.com",
|
||||
ResourceModel: resourcev1alpha2.ResourceModel{
|
||||
NamedResources: &resourcev1alpha2.NamedResourcesResources{},
|
||||
ResourceModel: resourceapi.ResourceModel{
|
||||
NamedResources: &resourceapi.NamedResourcesResources{},
|
||||
},
|
||||
}
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
err := f.ClientSet.ResourceV1alpha2().ResourceSlices().Delete(ctx, fictionalNodeSlice.Name, metav1.DeleteOptions{})
|
||||
err := f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, fictionalNodeSlice.Name, metav1.DeleteOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@@ -935,36 +935,36 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
// Message from test-driver/deploy/example/plugin-permissions.yaml
|
||||
matchVAPDeniedError := gomega.MatchError(gomega.ContainSubstring("may only modify resourceslices that belong to the node the pod is running on"))
|
||||
|
||||
mustCreate := func(clientSet kubernetes.Interface, clientName string, slice *resourcev1alpha2.ResourceSlice) *resourcev1alpha2.ResourceSlice {
|
||||
mustCreate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) *resourceapi.ResourceSlice {
|
||||
ginkgo.GinkgoHelper()
|
||||
slice, err := clientSet.ResourceV1alpha2().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
|
||||
slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("CREATE: %s + %s", clientName, slice.Name))
|
||||
return slice
|
||||
}
|
||||
mustUpdate := func(clientSet kubernetes.Interface, clientName string, slice *resourcev1alpha2.ResourceSlice) *resourcev1alpha2.ResourceSlice {
|
||||
mustUpdate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) *resourceapi.ResourceSlice {
|
||||
ginkgo.GinkgoHelper()
|
||||
slice, err := clientSet.ResourceV1alpha2().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
|
||||
slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("UPDATE: %s + %s", clientName, slice.Name))
|
||||
return slice
|
||||
}
|
||||
mustDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourcev1alpha2.ResourceSlice) {
|
||||
mustDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) {
|
||||
ginkgo.GinkgoHelper()
|
||||
err := clientSet.ResourceV1alpha2().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
|
||||
err := clientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, fmt.Sprintf("DELETE: %s + %s", clientName, slice.Name))
|
||||
}
|
||||
mustFailToCreate := func(clientSet kubernetes.Interface, clientName string, slice *resourcev1alpha2.ResourceSlice, matchError types.GomegaMatcher) {
|
||||
mustFailToCreate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice, matchError types.GomegaMatcher) {
|
||||
ginkgo.GinkgoHelper()
|
||||
_, err := clientSet.ResourceV1alpha2().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
|
||||
_, err := clientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
|
||||
gomega.Expect(err).To(matchError, fmt.Sprintf("CREATE: %s + %s", clientName, slice.Name))
|
||||
}
|
||||
mustFailToUpdate := func(clientSet kubernetes.Interface, clientName string, slice *resourcev1alpha2.ResourceSlice, matchError types.GomegaMatcher) {
|
||||
mustFailToUpdate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice, matchError types.GomegaMatcher) {
|
||||
ginkgo.GinkgoHelper()
|
||||
_, err := clientSet.ResourceV1alpha2().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
|
||||
_, err := clientSet.ResourceV1alpha3().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
|
||||
gomega.Expect(err).To(matchError, fmt.Sprintf("UPDATE: %s + %s", clientName, slice.Name))
|
||||
}
|
||||
mustFailToDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourcev1alpha2.ResourceSlice, matchError types.GomegaMatcher) {
|
||||
mustFailToDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice, matchError types.GomegaMatcher) {
|
||||
ginkgo.GinkgoHelper()
|
||||
err := clientSet.ResourceV1alpha2().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
|
||||
err := clientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
|
||||
gomega.Expect(err).To(matchError, fmt.Sprintf("DELETE: %s + %s", clientName, slice.Name))
|
||||
}
|
||||
|
||||
@@ -987,7 +987,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
|
||||
// Now check for exactly the right set of objects for all nodes.
|
||||
ginkgo.By("check if ResourceSlice object(s) exist on the API server")
|
||||
resourceClient := f.ClientSet.ResourceV1alpha2().ResourceSlices()
|
||||
resourceClient := f.ClientSet.ResourceV1alpha3().ResourceSlices()
|
||||
var expectedObjects []any
|
||||
for _, nodeName := range nodes.NodeNames {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
@@ -1009,14 +1009,14 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
}),
|
||||
"NodeName": gomega.Equal(nodeName),
|
||||
"DriverName": gomega.Equal(driver.Name),
|
||||
"ResourceModel": gomega.Equal(resourcev1alpha2.ResourceModel{NamedResources: &resourcev1alpha2.NamedResourcesResources{
|
||||
Instances: []resourcev1alpha2.NamedResourcesInstance{{Name: "instance-00"}},
|
||||
"ResourceModel": gomega.Equal(resourceapi.ResourceModel{NamedResources: &resourceapi.NamedResourcesResources{
|
||||
Instances: []resourceapi.NamedResourcesInstance{{Name: "instance-00"}},
|
||||
}}),
|
||||
}),
|
||||
)
|
||||
}
|
||||
matchSlices := gomega.ContainElements(expectedObjects...)
|
||||
getSlices := func(ctx context.Context) ([]resourcev1alpha2.ResourceSlice, error) {
|
||||
getSlices := func(ctx context.Context) ([]resourceapi.ResourceSlice, error) {
|
||||
slices, err := resourceClient.List(ctx, metav1.ListOptions{FieldSelector: fmt.Sprintf("driverName=%s", driverName)})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -1060,7 +1060,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.It("reuses an allocated immediate claim", func(ctx context.Context) {
|
||||
objects := []klog.KMetadata{
|
||||
b.parameters(),
|
||||
b.externalClaim(resourcev1alpha2.AllocationModeImmediate),
|
||||
b.externalClaim(resourceapi.AllocationModeImmediate),
|
||||
}
|
||||
podExternal := b.podExternal()
|
||||
|
||||
@@ -1068,7 +1068,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
// try to bind two pods at the same time.
|
||||
numPods := 5
|
||||
for i := 0; i < numPods; i++ {
|
||||
podInline, claimTemplate := b.podInline(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
podInline, claimTemplate := b.podInline(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
podInline.Spec.Containers[0].Resources.Claims = append(podInline.Spec.Containers[0].Resources.Claims, podExternal.Spec.Containers[0].Resources.Claims[0])
|
||||
podInline.Spec.ResourceClaims = append(podInline.Spec.ResourceClaims, podExternal.Spec.ResourceClaims[0])
|
||||
objects = append(objects, claimTemplate, podInline)
|
||||
@@ -1124,7 +1124,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.It("shares an allocated immediate claim", func(ctx context.Context) {
|
||||
objects := []klog.KMetadata{
|
||||
b.parameters(),
|
||||
b.externalClaim(resourcev1alpha2.AllocationModeImmediate),
|
||||
b.externalClaim(resourceapi.AllocationModeImmediate),
|
||||
}
|
||||
// Create many pods to increase the chance that the scheduler will
|
||||
// try to bind two pods at the same time.
|
||||
@@ -1147,7 +1147,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
// This does not work for resource claim templates and only isn't
|
||||
// a problem here because the resource is network-attached and available
|
||||
// on all nodes.
|
||||
preScheduledTests := func(b *builder, driver *Driver, allocationMode resourcev1alpha2.AllocationMode) {
|
||||
preScheduledTests := func(b *builder, driver *Driver, allocationMode resourceapi.AllocationMode) {
|
||||
ginkgo.It("supports scheduled pod referencing inline resource claim", func(ctx context.Context) {
|
||||
parameters := b.parameters()
|
||||
pod, template := b.podInline(allocationMode)
|
||||
@@ -1171,8 +1171,8 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.Context("with delayed allocation and setting ReservedFor", func() {
|
||||
driver := NewDriver(f, nodes, networkResources)
|
||||
b := newBuilder(f, driver)
|
||||
preScheduledTests(b, driver, resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claimTests(b, driver, resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
preScheduledTests(b, driver, resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
claimTests(b, driver, resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
})
|
||||
|
||||
ginkgo.Context("with delayed allocation and not setting ReservedFor", func() {
|
||||
@@ -1182,15 +1182,15 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
return resources
|
||||
})
|
||||
b := newBuilder(f, driver)
|
||||
preScheduledTests(b, driver, resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claimTests(b, driver, resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
preScheduledTests(b, driver, resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
claimTests(b, driver, resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
})
|
||||
|
||||
ginkgo.Context("with immediate allocation", func() {
|
||||
driver := NewDriver(f, nodes, networkResources)
|
||||
b := newBuilder(f, driver)
|
||||
preScheduledTests(b, driver, resourcev1alpha2.AllocationModeImmediate)
|
||||
claimTests(b, driver, resourcev1alpha2.AllocationModeImmediate)
|
||||
preScheduledTests(b, driver, resourceapi.AllocationModeImmediate)
|
||||
claimTests(b, driver, resourceapi.AllocationModeImmediate)
|
||||
|
||||
})
|
||||
})
|
||||
@@ -1209,12 +1209,12 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
ginkgo.It("work", func(ctx context.Context) {
|
||||
parameters1 := b1.parameters()
|
||||
parameters2 := b2.parameters()
|
||||
claim1 := b1.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim1b := b1.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim2 := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim2b := b2.externalClaim(resourcev1alpha2.AllocationModeWaitForFirstConsumer)
|
||||
claim1 := b1.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
claim1b := b1.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
claim2 := b2.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
claim2b := b2.externalClaim(resourceapi.AllocationModeWaitForFirstConsumer)
|
||||
pod := b1.podExternal()
|
||||
for i, claim := range []*resourcev1alpha2.ResourceClaim{claim1b, claim2, claim2b} {
|
||||
for i, claim := range []*resourceapi.ResourceClaim{claim1b, claim2, claim2b} {
|
||||
claim := claim
|
||||
pod.Spec.ResourceClaims = append(pod.Spec.ResourceClaims,
|
||||
v1.PodResourceClaim{
|
||||
@@ -1258,8 +1258,8 @@ func (b *builder) className() string {
|
||||
|
||||
// class returns the resource class that the builder's other objects
|
||||
// reference.
|
||||
func (b *builder) class() *resourcev1alpha2.ResourceClass {
|
||||
class := &resourcev1alpha2.ResourceClass{
|
||||
func (b *builder) class() *resourceapi.ResourceClass {
|
||||
class := &resourceapi.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: b.className(),
|
||||
},
|
||||
@@ -1268,7 +1268,7 @@ func (b *builder) class() *resourcev1alpha2.ResourceClass {
|
||||
StructuredParameters: ptr.To(b.driver.parameterMode != parameterModeConfigMap),
|
||||
}
|
||||
if b.classParametersName != "" {
|
||||
class.ParametersRef = &resourcev1alpha2.ResourceClassParametersReference{
|
||||
class.ParametersRef = &resourceapi.ResourceClassParametersReference{
|
||||
APIGroup: b.driver.parameterAPIGroup,
|
||||
Kind: b.driver.classParameterAPIKind,
|
||||
Name: b.classParametersName,
|
||||
@@ -1298,19 +1298,19 @@ func (b *builder) nodeSelector() *v1.NodeSelector {
|
||||
|
||||
// externalClaim returns external resource claim
|
||||
// that test pods can reference
|
||||
func (b *builder) externalClaim(allocationMode resourcev1alpha2.AllocationMode) *resourcev1alpha2.ResourceClaim {
|
||||
func (b *builder) externalClaim(allocationMode resourceapi.AllocationMode) *resourceapi.ResourceClaim {
|
||||
b.claimCounter++
|
||||
name := "external-claim" + b.driver.NameSuffix // This is what podExternal expects.
|
||||
if b.claimCounter > 1 {
|
||||
name += fmt.Sprintf("-%d", b.claimCounter)
|
||||
}
|
||||
return &resourcev1alpha2.ResourceClaim{
|
||||
return &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: b.className(),
|
||||
ParametersRef: &resourcev1alpha2.ResourceClaimParametersReference{
|
||||
ParametersRef: &resourceapi.ResourceClaimParametersReference{
|
||||
APIGroup: b.driver.parameterAPIGroup,
|
||||
Kind: b.driver.claimParameterAPIKind,
|
||||
Name: b.parametersName(),
|
||||
@@ -1381,22 +1381,22 @@ func (b *builder) parameters(kv ...string) *v1.ConfigMap {
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) classParameters(generatedFrom string, kv ...string) *resourcev1alpha2.ResourceClassParameters {
|
||||
func (b *builder) classParameters(generatedFrom string, kv ...string) *resourceapi.ResourceClassParameters {
|
||||
raw := b.rawParameterData(kv...)
|
||||
b.parametersCounter++
|
||||
parameters := &resourcev1alpha2.ResourceClassParameters{
|
||||
parameters := &resourceapi.ResourceClassParameters{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: b.f.Namespace.Name,
|
||||
Name: b.parametersName(),
|
||||
},
|
||||
|
||||
VendorParameters: []resourcev1alpha2.VendorParameters{
|
||||
VendorParameters: []resourceapi.VendorParameters{
|
||||
{DriverName: b.driver.Name, Parameters: runtime.RawExtension{Raw: raw}},
|
||||
},
|
||||
}
|
||||
|
||||
if generatedFrom != "" {
|
||||
parameters.GeneratedFrom = &resourcev1alpha2.ResourceClassParametersReference{
|
||||
parameters.GeneratedFrom = &resourceapi.ResourceClassParametersReference{
|
||||
Kind: "ConfigMap",
|
||||
Namespace: b.f.Namespace.Name,
|
||||
Name: generatedFrom,
|
||||
@@ -1406,9 +1406,9 @@ func (b *builder) classParameters(generatedFrom string, kv ...string) *resourcev
|
||||
return parameters
|
||||
}
|
||||
|
||||
func (b *builder) claimParameters(generatedFrom string, claimKV, requestKV []string) *resourcev1alpha2.ResourceClaimParameters {
|
||||
func (b *builder) claimParameters(generatedFrom string, claimKV, requestKV []string) *resourceapi.ResourceClaimParameters {
|
||||
b.parametersCounter++
|
||||
parameters := &resourcev1alpha2.ResourceClaimParameters{
|
||||
parameters := &resourceapi.ResourceClaimParameters{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: b.f.Namespace.Name,
|
||||
Name: b.parametersName(),
|
||||
@@ -1419,15 +1419,15 @@ func (b *builder) claimParameters(generatedFrom string, claimKV, requestKV []str
|
||||
// Without any request, nothing gets allocated and vendor
|
||||
// parameters are also not passed down because they get
|
||||
// attached to the allocation result.
|
||||
DriverRequests: []resourcev1alpha2.DriverRequests{
|
||||
DriverRequests: []resourceapi.DriverRequests{
|
||||
{
|
||||
DriverName: b.driver.Name,
|
||||
VendorParameters: runtime.RawExtension{Raw: b.rawParameterData(claimKV...)},
|
||||
Requests: []resourcev1alpha2.ResourceRequest{
|
||||
Requests: []resourceapi.ResourceRequest{
|
||||
{
|
||||
VendorParameters: runtime.RawExtension{Raw: b.rawParameterData(requestKV...)},
|
||||
ResourceRequestModel: resourcev1alpha2.ResourceRequestModel{
|
||||
NamedResources: &resourcev1alpha2.NamedResourcesRequest{
|
||||
ResourceRequestModel: resourceapi.ResourceRequestModel{
|
||||
NamedResources: &resourceapi.NamedResourcesRequest{
|
||||
Selector: "true",
|
||||
},
|
||||
},
|
||||
@@ -1438,7 +1438,7 @@ func (b *builder) claimParameters(generatedFrom string, claimKV, requestKV []str
|
||||
}
|
||||
|
||||
if generatedFrom != "" {
|
||||
parameters.GeneratedFrom = &resourcev1alpha2.ResourceClaimParametersReference{
|
||||
parameters.GeneratedFrom = &resourceapi.ResourceClaimParametersReference{
|
||||
Kind: "ConfigMap",
|
||||
Name: generatedFrom,
|
||||
}
|
||||
@@ -1493,7 +1493,7 @@ func (b *builder) pod() *v1.Pod {
|
||||
}
|
||||
|
||||
// makePodInline adds an inline resource claim with default class name and parameters.
|
||||
func (b *builder) podInline(allocationMode resourcev1alpha2.AllocationMode) (*v1.Pod, *resourcev1alpha2.ResourceClaimTemplate) {
|
||||
func (b *builder) podInline(allocationMode resourceapi.AllocationMode) (*v1.Pod, *resourceapi.ResourceClaimTemplate) {
|
||||
pod := b.pod()
|
||||
pod.Spec.Containers[0].Name = "with-resource"
|
||||
podClaimName := "my-inline-claim"
|
||||
@@ -1504,15 +1504,15 @@ func (b *builder) podInline(allocationMode resourcev1alpha2.AllocationMode) (*v1
|
||||
ResourceClaimTemplateName: ptr.To(pod.Name),
|
||||
},
|
||||
}
|
||||
template := &resourcev1alpha2.ResourceClaimTemplate{
|
||||
template := &resourceapi.ResourceClaimTemplate{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
},
|
||||
Spec: resourcev1alpha2.ResourceClaimTemplateSpec{
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimTemplateSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: b.className(),
|
||||
ParametersRef: &resourcev1alpha2.ResourceClaimParametersReference{
|
||||
ParametersRef: &resourceapi.ResourceClaimParametersReference{
|
||||
APIGroup: b.driver.parameterAPIGroup,
|
||||
Kind: b.driver.claimParameterAPIKind,
|
||||
Name: b.parametersName(),
|
||||
@@ -1525,7 +1525,7 @@ func (b *builder) podInline(allocationMode resourcev1alpha2.AllocationMode) (*v1
|
||||
}
|
||||
|
||||
// podInlineMultiple returns a pod with inline resource claim referenced by 3 containers
|
||||
func (b *builder) podInlineMultiple(allocationMode resourcev1alpha2.AllocationMode) (*v1.Pod, *resourcev1alpha2.ResourceClaimTemplate) {
|
||||
func (b *builder) podInlineMultiple(allocationMode resourceapi.AllocationMode) (*v1.Pod, *resourceapi.ResourceClaimTemplate) {
|
||||
pod, template := b.podInline(allocationMode)
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, *pod.Spec.Containers[0].DeepCopy(), *pod.Spec.Containers[0].DeepCopy())
|
||||
pod.Spec.Containers[1].Name = pod.Spec.Containers[1].Name + "-1"
|
||||
@@ -1566,28 +1566,28 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) []klog.KMe
|
||||
var err error
|
||||
var createdObj klog.KMetadata
|
||||
switch obj := obj.(type) {
|
||||
case *resourcev1alpha2.ResourceClass:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceClasses().Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourceapi.ResourceClass:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceClasses().Create(ctx, obj, metav1.CreateOptions{})
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
err := b.f.ClientSet.ResourceV1alpha2().ResourceClasses().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
|
||||
err := b.f.ClientSet.ResourceV1alpha3().ResourceClasses().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "delete resource class")
|
||||
})
|
||||
case *v1.Pod:
|
||||
createdObj, err = b.f.ClientSet.CoreV1().Pods(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *v1.ConfigMap:
|
||||
createdObj, err = b.f.ClientSet.CoreV1().ConfigMaps(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourcev1alpha2.ResourceClaim:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourcev1alpha2.ResourceClaimTemplate:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourcev1alpha2.ResourceClassParameters:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceClassParameters(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourcev1alpha2.ResourceClaimParameters:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceClaimParameters(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourcev1alpha2.ResourceSlice:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha2().ResourceSlices().Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourceapi.ResourceClaim:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourceapi.ResourceClaimTemplate:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourceapi.ResourceClassParameters:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceClassParameters(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourceapi.ResourceClaimParameters:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceClaimParameters(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
|
||||
case *resourceapi.ResourceSlice:
|
||||
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, obj, metav1.CreateOptions{})
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
err := b.f.ClientSet.ResourceV1alpha2().ResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
|
||||
err := b.f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "delete node resource slice")
|
||||
})
|
||||
default:
|
||||
@@ -1659,14 +1659,14 @@ func (b *builder) tearDown(ctx context.Context) {
|
||||
return b.listTestPods(ctx)
|
||||
}).WithTimeout(time.Minute).Should(gomega.BeEmpty(), "remaining pods despite deletion")
|
||||
|
||||
claims, err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
claims, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "get resource claims")
|
||||
for _, claim := range claims.Items {
|
||||
if claim.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("deleting %T %s", &claim, klog.KObj(&claim)))
|
||||
err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
|
||||
err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "delete claim")
|
||||
}
|
||||
@@ -1678,8 +1678,8 @@ func (b *builder) tearDown(ctx context.Context) {
|
||||
}
|
||||
|
||||
ginkgo.By("waiting for claims to be deallocated and deleted")
|
||||
gomega.Eventually(func() ([]resourcev1alpha2.ResourceClaim, error) {
|
||||
claims, err := b.f.ClientSet.ResourceV1alpha2().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
gomega.Eventually(func() ([]resourceapi.ResourceClaim, error) {
|
||||
claims, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ nodes:
|
||||
v: "5"
|
||||
apiServer:
|
||||
extraArgs:
|
||||
runtime-config: "resource.k8s.io/v1alpha2=true"
|
||||
runtime-config: "resource.k8s.io/v1alpha3=true"
|
||||
- |
|
||||
kind: InitConfiguration
|
||||
nodeRegistration:
|
||||
|
||||
@@ -55,7 +55,7 @@ kubelet<->dynamic resource allocation plugin interaction.
|
||||
|
||||
To try out the feature, build Kubernetes, then in one console run:
|
||||
```console
|
||||
RUNTIME_CONFIG="resource.k8s.io/v1alpha2" FEATURE_GATES=DynamicResourceAllocation=true ALLOW_PRIVILEGED=1 ./hack/local-up-cluster.sh -O
|
||||
RUNTIME_CONFIG="resource.k8s.io/v1alpha3" FEATURE_GATES=DynamicResourceAllocation=true ALLOW_PRIVILEGED=1 ./hack/local-up-cluster.sh -O
|
||||
```
|
||||
|
||||
In another:
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -69,11 +69,11 @@ func (r Resources) AllNodes(nodeLister listersv1.NodeLister) []string {
|
||||
return r.Nodes
|
||||
}
|
||||
|
||||
func (r Resources) NewAllocation(node string, data []byte) *resourcev1alpha2.AllocationResult {
|
||||
allocation := &resourcev1alpha2.AllocationResult{
|
||||
func (r Resources) NewAllocation(node string, data []byte) *resourceapi.AllocationResult {
|
||||
allocation := &resourceapi.AllocationResult{
|
||||
Shareable: r.Shareable,
|
||||
}
|
||||
allocation.ResourceHandles = []resourcev1alpha2.ResourceHandle{
|
||||
allocation.ResourceHandles = []resourceapi.ResourceHandle{
|
||||
{
|
||||
DriverName: r.DriverName,
|
||||
Data: string(data),
|
||||
@@ -196,7 +196,7 @@ func (c *ExampleController) GetNumDeallocations() int64 {
|
||||
return c.numDeallocations
|
||||
}
|
||||
|
||||
func (c *ExampleController) GetClassParameters(ctx context.Context, class *resourcev1alpha2.ResourceClass) (interface{}, error) {
|
||||
func (c *ExampleController) GetClassParameters(ctx context.Context, class *resourceapi.ResourceClass) (interface{}, error) {
|
||||
if class.ParametersRef != nil {
|
||||
if class.ParametersRef.APIGroup != "" ||
|
||||
class.ParametersRef.Kind != "ConfigMap" {
|
||||
@@ -207,7 +207,7 @@ func (c *ExampleController) GetClassParameters(ctx context.Context, class *resou
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, class *resourcev1alpha2.ResourceClass, classParameters interface{}) (interface{}, error) {
|
||||
func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resourceapi.ResourceClaim, class *resourceapi.ResourceClass, classParameters interface{}) (interface{}, error) {
|
||||
if claim.Spec.ParametersRef != nil {
|
||||
if claim.Spec.ParametersRef.APIGroup != "" ||
|
||||
claim.Spec.ParametersRef.Kind != "ConfigMap" {
|
||||
@@ -249,7 +249,7 @@ func (c *ExampleController) allocateOneByOne(ctx context.Context, claimAllocatio
|
||||
}
|
||||
|
||||
// allocate simply copies parameters as JSON map into a ResourceHandle.
|
||||
func (c *ExampleController) allocateOne(ctx context.Context, claim *resourcev1alpha2.ResourceClaim, claimParameters interface{}, class *resourcev1alpha2.ResourceClass, classParameters interface{}, selectedNode string) (result *resourcev1alpha2.AllocationResult, err error) {
|
||||
func (c *ExampleController) allocateOne(ctx context.Context, claim *resourceapi.ResourceClaim, claimParameters interface{}, class *resourceapi.ResourceClass, classParameters interface{}, selectedNode string) (result *resourceapi.AllocationResult, err error) {
|
||||
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Allocate"), "claim", klog.KObj(claim), "uid", claim.UID)
|
||||
defer func() {
|
||||
logger.V(3).Info("done", "result", result, "err", err)
|
||||
@@ -319,7 +319,7 @@ func (c *ExampleController) allocateOne(ctx context.Context, claim *resourcev1al
|
||||
return allocation, nil
|
||||
}
|
||||
|
||||
func (c *ExampleController) Deallocate(ctx context.Context, claim *resourcev1alpha2.ResourceClaim) error {
|
||||
func (c *ExampleController) Deallocate(ctx context.Context, claim *resourceapi.ResourceClaim) error {
|
||||
logger := klog.LoggerWithValues(klog.LoggerWithName(klog.FromContext(ctx), "Deallocate"), "claim", klog.KObj(claim), "uid", claim.UID)
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
resourceapi "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -256,7 +256,7 @@ func (ex *ExamplePlugin) nodePrepareResource(ctx context.Context, claimReq *drap
|
||||
// that it understands.
|
||||
var resourceHandle string
|
||||
var structuredResourceHandle *resourceapi.StructuredResourceHandle
|
||||
claim, err := ex.kubeClient.ResourceV1alpha2().ResourceClaims(claimReq.Namespace).Get(ctx, claimReq.Name, metav1.GetOptions{})
|
||||
claim, err := ex.kubeClient.ResourceV1alpha3().ResourceClaims(claimReq.Namespace).Get(ctx, claimReq.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("retrieve claim %s/%s: %w", claimReq.Namespace, claimReq.Name, err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
# When using it instead of a functional one, scheduling a pod leads to:
|
||||
# Warning FailedScheduling 16s default-scheduler 0/1 nodes are available: 1 excluded via potential node filter in resource class.
|
||||
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClass
|
||||
metadata:
|
||||
name: example
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
a: b
|
||||
---
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaim
|
||||
metadata:
|
||||
name: external-claim
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
data:
|
||||
a: b
|
||||
---
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimTemplate
|
||||
metadata:
|
||||
name: pause-template
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
a: b
|
||||
---
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimTemplate
|
||||
metadata:
|
||||
name: test-inline-claim-template
|
||||
|
||||
@@ -8,7 +8,7 @@ metadata:
|
||||
data:
|
||||
a: b
|
||||
---
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaim
|
||||
metadata:
|
||||
name: shared-claim
|
||||
|
||||
@@ -6,7 +6,7 @@ metadata:
|
||||
data:
|
||||
a: b
|
||||
---
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaim
|
||||
metadata:
|
||||
name: example
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClass
|
||||
metadata:
|
||||
name: example
|
||||
|
||||
@@ -38,7 +38,7 @@ import (
|
||||
"github.com/onsi/gomega/types"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -74,7 +74,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
// When plugin and kubelet get killed at the end of the tests, they leave ResourceSlices behind.
|
||||
// Perhaps garbage collection would eventually remove them (not sure how the node instance
|
||||
// is managed), but this could take time. Let's clean up explicitly.
|
||||
framework.ExpectNoError(f.ClientSet.ResourceV1alpha2().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}))
|
||||
framework.ExpectNoError(f.ClientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}))
|
||||
})
|
||||
})
|
||||
|
||||
@@ -469,8 +469,8 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
|
||||
})
|
||||
|
||||
f.Context("ResourceSlice", f.WithSerial(), func() {
|
||||
listResources := func(ctx context.Context) ([]resourcev1alpha2.ResourceSlice, error) {
|
||||
slices, err := f.ClientSet.ResourceV1alpha2().ResourceSlices().List(ctx, metav1.ListOptions{})
|
||||
listResources := func(ctx context.Context) ([]resourceapi.ResourceSlice, error) {
|
||||
slices, err := f.ClientSet.ResourceV1alpha3().ResourceSlices().List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -562,7 +562,7 @@ func newKubeletPlugin(ctx context.Context, clientSet kubernetes.Interface, nodeN
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
// kubelet should do this eventually, but better make sure.
|
||||
// A separate test checks this explicitly.
|
||||
framework.ExpectNoError(clientSet.ResourceV1alpha2().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "driverName=" + driverName}))
|
||||
framework.ExpectNoError(clientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "driverName=" + driverName}))
|
||||
})
|
||||
ginkgo.DeferCleanup(plugin.Stop)
|
||||
|
||||
@@ -575,31 +575,31 @@ func newKubeletPlugin(ctx context.Context, clientSet kubernetes.Interface, nodeN
|
||||
// and placed on the node without involving the scheduler and the DRA controller
|
||||
func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, nodename, namespace, className, claimName, podName string, deferPodDeletion bool, pluginNames []string) *v1.Pod {
|
||||
// ResourceClass
|
||||
class := &resourcev1alpha2.ResourceClass{
|
||||
class := &resourceapi.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: className,
|
||||
},
|
||||
DriverName: "controller",
|
||||
}
|
||||
_, err := clientSet.ResourceV1alpha2().ResourceClasses().Create(ctx, class, metav1.CreateOptions{})
|
||||
_, err := clientSet.ResourceV1alpha3().ResourceClasses().Create(ctx, class, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.DeferCleanup(clientSet.ResourceV1alpha2().ResourceClasses().Delete, className, metav1.DeleteOptions{})
|
||||
ginkgo.DeferCleanup(clientSet.ResourceV1alpha3().ResourceClasses().Delete, className, metav1.DeleteOptions{})
|
||||
|
||||
// ResourceClaim
|
||||
podClaimName := "resource-claim"
|
||||
claim := &resourcev1alpha2.ResourceClaim{
|
||||
claim := &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: claimName,
|
||||
},
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: className,
|
||||
},
|
||||
}
|
||||
createdClaim, err := clientSet.ResourceV1alpha2().ResourceClaims(namespace).Create(ctx, claim, metav1.CreateOptions{})
|
||||
createdClaim, err := clientSet.ResourceV1alpha3().ResourceClaims(namespace).Create(ctx, claim, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.DeferCleanup(clientSet.ResourceV1alpha2().ResourceClaims(namespace).Delete, claimName, metav1.DeleteOptions{})
|
||||
ginkgo.DeferCleanup(clientSet.ResourceV1alpha3().ResourceClaims(namespace).Delete, claimName, metav1.DeleteOptions{})
|
||||
|
||||
// Pod
|
||||
containerName := "testcontainer"
|
||||
@@ -638,46 +638,46 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
|
||||
|
||||
// Update claim status: set ReservedFor and AllocationResult
|
||||
// NOTE: This is usually done by the DRA controller
|
||||
resourceHandlers := make([]resourcev1alpha2.ResourceHandle, len(pluginNames))
|
||||
resourceHandlers := make([]resourceapi.ResourceHandle, len(pluginNames))
|
||||
for i, pluginName := range pluginNames {
|
||||
resourceHandlers[i] = resourcev1alpha2.ResourceHandle{
|
||||
resourceHandlers[i] = resourceapi.ResourceHandle{
|
||||
DriverName: pluginName,
|
||||
Data: "{\"EnvVars\":{\"DRA_PARAM1\":\"PARAM1_VALUE\"},\"NodeName\":\"\"}",
|
||||
}
|
||||
}
|
||||
createdClaim.Status = resourcev1alpha2.ResourceClaimStatus{
|
||||
createdClaim.Status = resourceapi.ResourceClaimStatus{
|
||||
DriverName: "controller",
|
||||
ReservedFor: []resourcev1alpha2.ResourceClaimConsumerReference{
|
||||
ReservedFor: []resourceapi.ResourceClaimConsumerReference{
|
||||
{Resource: "pods", Name: podName, UID: createdPod.UID},
|
||||
},
|
||||
Allocation: &resourcev1alpha2.AllocationResult{
|
||||
Allocation: &resourceapi.AllocationResult{
|
||||
ResourceHandles: resourceHandlers,
|
||||
},
|
||||
}
|
||||
_, err = clientSet.ResourceV1alpha2().ResourceClaims(namespace).UpdateStatus(ctx, createdClaim, metav1.UpdateOptions{})
|
||||
_, err = clientSet.ResourceV1alpha3().ResourceClaims(namespace).UpdateStatus(ctx, createdClaim, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func createTestResourceSlice(ctx context.Context, clientSet kubernetes.Interface, nodeName, driverName string) {
|
||||
slice := &resourcev1alpha2.ResourceSlice{
|
||||
slice := &resourceapi.ResourceSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
NodeName: nodeName,
|
||||
DriverName: driverName,
|
||||
ResourceModel: resourcev1alpha2.ResourceModel{
|
||||
NamedResources: &resourcev1alpha2.NamedResourcesResources{},
|
||||
ResourceModel: resourceapi.ResourceModel{
|
||||
NamedResources: &resourceapi.NamedResourcesResources{},
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating ResourceSlice %s", nodeName))
|
||||
slice, err := clientSet.ResourceV1alpha2().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
|
||||
slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "create ResourceSlice")
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
ginkgo.By(fmt.Sprintf("Deleting ResourceSlice %s", nodeName))
|
||||
err := clientSet.ResourceV1alpha2().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
|
||||
err := clientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "delete ResourceSlice")
|
||||
}
|
||||
|
||||
@@ -59,8 +59,8 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": false}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 25}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts.
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node2"]}]}}`, // Not really a conflict with status_test.go: Apply just stores both nodes. Conflict testing therefore gets disabled for podschedulingcontexts.
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"driverName": "other.example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
// standard for []metav1.Condition
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
|
||||
@@ -152,10 +152,10 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
|
||||
gvr("awesome.bears.com", "v3", "pandas"): `{"spec": {"replicas": 302}}`,
|
||||
gvr("apiregistration.k8s.io", "v1beta1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("apiregistration.k8s.io", "v1", "apiservices"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"group": "foo2.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): `{"driverName": "other.example.com"}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"spec": {"selectedNode": "node2name"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclasses"): `{"driverName": "other.example.com"}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"spec": {"resourceClassName": "class2name"}}`, // ResourceClassName is immutable, but that doesn't matter for the test.
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaimtemplates"): `{"spec": {"spec": {"resourceClassName": "class2name"}}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{}`,
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"paramKind": {"apiVersion": "apps/v1", "kind": "Deployment"}}}`,
|
||||
gvr("admissionregistration.k8s.io", "v1beta1", "validatingadmissionpolicies"): `{"metadata": {"labels": {"a":"c"}}, "spec": {"paramKind": {"apiVersion": "apps/v1", "kind": "Deployment"}}}`,
|
||||
|
||||
@@ -52,8 +52,8 @@ var statusData = map[schema.GroupVersionResource]string{
|
||||
gvr("storage.k8s.io", "v1", "volumeattachments"): `{"status": {"attached": true}}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"status": {"currentHealthy": 5}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): `{"status": {"resourceClaims": [{"name": "my-claim", "unsuitableNodes": ["node1"]}]}}`,
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): `{"status": {"driverName": "example.com"}}`,
|
||||
gvr("internal.apiserver.k8s.io", "v1alpha1", "storageversions"): `{"status": {"commonEncodingVersion":"v1","storageVersions":[{"apiServerID":"1","decodableVersions":["v1","v2"],"encodingVersion":"v1"}],"conditions":[{"type":"AllEncodingVersionsEqual","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"allEncodingVersionsEqual","message":"all encoding versions are set to v1"}]}}`,
|
||||
// standard for []metav1.Condition
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"False","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
|
||||
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
coordination "k8s.io/api/coordination/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
"k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@@ -108,17 +108,17 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceClaims("ns").Create(context.TODO(), &v1alpha2.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mynamedresourceclaim"}, Spec: v1alpha2.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mynamedresourceclaim"}, Spec: resourceapi.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceClaims("ns").Create(context.TODO(), &v1alpha2.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mytemplatizedresourceclaim"}, Spec: v1alpha2.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mytemplatizedresourceclaim"}, Spec: resourceapi.ResourceClaimSpec{ResourceClassName: "example.com"}}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
model := v1alpha2.ResourceModel{NamedResources: &v1alpha2.NamedResourcesResources{}}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceSlices().Create(context.TODO(), &v1alpha2.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice1"}, NodeName: "node1", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
model := resourceapi.ResourceModel{NamedResources: &resourceapi.NamedResourcesResources{}}
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice1"}, NodeName: "node1", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, err := superuserClient.ResourceV1alpha2().ResourceSlices().Create(context.TODO(), &v1alpha2.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice2"}, NodeName: "node2", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := superuserClient.ResourceV1alpha3().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice2"}, NodeName: "node2", DriverName: "dra.example.com", ResourceModel: model}, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -193,13 +193,13 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
}
|
||||
getResourceClaim := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.ResourceV1alpha2().ResourceClaims("ns").Get(context.TODO(), "mynamedresourceclaim", metav1.GetOptions{})
|
||||
_, err := client.ResourceV1alpha3().ResourceClaims("ns").Get(context.TODO(), "mynamedresourceclaim", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
getResourceClaimTemplate := func(client clientset.Interface) func() error {
|
||||
return func() error {
|
||||
_, err := client.ResourceV1alpha2().ResourceClaims("ns").Get(context.TODO(), "mytemplatizedresourceclaim", metav1.GetOptions{})
|
||||
_, err := client.ResourceV1alpha3().ResourceClaims("ns").Get(context.TODO(), "mytemplatizedresourceclaim", metav1.GetOptions{})
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -209,7 +209,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
if nodeName != nil {
|
||||
listOptions.FieldSelector = "nodeName=" + *nodeName
|
||||
}
|
||||
return client.ResourceV1alpha2().ResourceSlices().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
|
||||
return client.ResourceV1alpha3().ResourceSlices().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
|
||||
}
|
||||
}
|
||||
addResourceClaimTemplateReference := func(client clientset.Interface) func() error {
|
||||
@@ -663,7 +663,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
expectAllowed(t, deleteResourceSliceCollection(csiNode1Client, ptr.To("node1")))
|
||||
|
||||
// One slice must have been deleted, the other not.
|
||||
slices, err := superuserClient.ResourceV1alpha2().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
slices, err := superuserClient.ResourceV1alpha3().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -676,7 +676,7 @@ func TestNodeAuthorizer(t *testing.T) {
|
||||
|
||||
// Superuser can delete.
|
||||
expectAllowed(t, deleteResourceSliceCollection(superuserClient, nil))
|
||||
slices, err = superuserClient.ResourceV1alpha2().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
slices, err = superuserClient.ResourceV1alpha3().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -403,32 +403,32 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/resource/v1alpha2
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclasses"): {
|
||||
// k8s.io/kubernetes/pkg/apis/resource/v1alpha3
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclasses"): {
|
||||
Stub: `{"metadata": {"name": "class1name"}, "driverName": "example.com"}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclasses/class1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaims"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaims"): {
|
||||
Stub: `{"metadata": {"name": "claim1name"}, "spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaims/" + namespace + "/claim1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaimtemplates"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaimtemplates"): {
|
||||
Stub: `{"metadata": {"name": "claimtemplate1name"}, "spec": {"spec": {"resourceClassName": "class1name", "allocationMode": "WaitForFirstConsumer"}}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaimtemplates/" + namespace + "/claimtemplate1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "podschedulingcontexts"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "podschedulingcontexts"): {
|
||||
Stub: `{"metadata": {"name": "pod1name"}, "spec": {"selectedNode": "node1name", "potentialNodes": ["node1name", "node2name"]}}`,
|
||||
ExpectedEtcdPath: "/registry/podschedulingcontexts/" + namespace + "/pod1name",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclassparameters"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclassparameters"): {
|
||||
Stub: `{"metadata": {"name": "class1parameters"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclassparameters/" + namespace + "/class1parameters",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceclaimparameters"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceclaimparameters"): {
|
||||
Stub: `{"metadata": {"name": "claim1parameters"}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceclaimparameters/" + namespace + "/claim1parameters",
|
||||
},
|
||||
gvr("resource.k8s.io", "v1alpha2", "resourceslices"): {
|
||||
gvr("resource.k8s.io", "v1alpha3", "resourceslices"): {
|
||||
Stub: `{"metadata": {"name": "node1slice"}, "nodeName": "worker1", "driverName": "dra.example.com", "namedResources": {}}`,
|
||||
ExpectedEtcdPath: "/registry/resourceslices/node1slice",
|
||||
},
|
||||
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -679,30 +679,30 @@ func TestPodSchedulingContextSSA(t *testing.T) {
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := testCtx.ClientSet.ResourceV1alpha2().ResourceClasses().DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil {
|
||||
if err := testCtx.ClientSet.ResourceV1alpha3().ResourceClasses().DeleteCollection(testCtx.Ctx, metav1.DeleteOptions{}, metav1.ListOptions{}); err != nil {
|
||||
t.Errorf("Unexpected error deleting ResourceClasses: %v", err)
|
||||
}
|
||||
}()
|
||||
class := &resourcev1alpha2.ResourceClass{
|
||||
class := &resourceapi.ResourceClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-class",
|
||||
},
|
||||
DriverName: "does-not-matter",
|
||||
}
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha2().ResourceClasses().Create(testCtx.Ctx, class, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha3().ResourceClasses().Create(testCtx.Ctx, class, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create class: %v", err)
|
||||
}
|
||||
|
||||
claim := &resourcev1alpha2.ResourceClaim{
|
||||
claim := &resourceapi.ResourceClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "my-claim",
|
||||
Namespace: testCtx.NS.Name,
|
||||
},
|
||||
Spec: resourcev1alpha2.ResourceClaimSpec{
|
||||
Spec: resourceapi.ResourceClaimSpec{
|
||||
ResourceClassName: class.Name,
|
||||
},
|
||||
}
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha2().ResourceClaims(claim.Namespace).Create(testCtx.Ctx, claim, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha3().ResourceClaims(claim.Namespace).Create(testCtx.Ctx, claim, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to create claim: %v", err)
|
||||
}
|
||||
|
||||
@@ -719,11 +719,11 @@ func TestPodSchedulingContextSSA(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check that the PodSchedulingContext exists and has a selected node.
|
||||
var schedulingCtx *resourcev1alpha2.PodSchedulingContext
|
||||
var schedulingCtx *resourceapi.PodSchedulingContext
|
||||
if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Microsecond, 30*time.Second, true,
|
||||
func(context.Context) (bool, error) {
|
||||
var err error
|
||||
schedulingCtx, err = testCtx.ClientSet.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).Get(testCtx.Ctx, pod.Name, metav1.GetOptions{})
|
||||
schedulingCtx, err = testCtx.ClientSet.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).Get(testCtx.Ctx, pod.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
@@ -756,12 +756,12 @@ func TestPodSchedulingContextSSA(t *testing.T) {
|
||||
|
||||
// Now force the scheduler to update the PodSchedulingContext by setting UnsuitableNodes so that
|
||||
// the selected node is not suitable.
|
||||
schedulingCtx.Status.ResourceClaims = []resourcev1alpha2.ResourceClaimSchedulingStatus{{
|
||||
schedulingCtx.Status.ResourceClaims = []resourceapi.ResourceClaimSchedulingStatus{{
|
||||
Name: podClaimName,
|
||||
UnsuitableNodes: []string{schedulingCtx.Spec.SelectedNode},
|
||||
}}
|
||||
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha2().PodSchedulingContexts(pod.Namespace).UpdateStatus(testCtx.Ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.ResourceV1alpha3().PodSchedulingContexts(pod.Namespace).UpdateStatus(testCtx.Ctx, schedulingCtx, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Unexpected PodSchedulingContext status update error: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaim
|
||||
metadata:
|
||||
name: test-claim-{{.Index}}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaim
|
||||
metadata:
|
||||
name: test-claim-{{.Index}}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimParameters
|
||||
metadata:
|
||||
name: test-claim-parameters
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimTemplate
|
||||
metadata:
|
||||
name: test-claim-template
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClaimTemplate
|
||||
metadata:
|
||||
name: test-claim-template
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClass
|
||||
metadata:
|
||||
name: test-class
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
apiVersion: resource.k8s.io/v1alpha2
|
||||
apiVersion: resource.k8s.io/v1alpha3
|
||||
kind: ResourceClass
|
||||
metadata:
|
||||
name: test-class
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
@@ -84,7 +84,7 @@ func (op *createResourceClaimsOp) requiredNamespaces() []string {
|
||||
func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
|
||||
tCtx.Logf("creating %d claims in namespace %q", op.Count, op.Namespace)
|
||||
|
||||
var claimTemplate *resourcev1alpha2.ResourceClaim
|
||||
var claimTemplate *resourceapi.ResourceClaim
|
||||
if err := getSpecFromFile(&op.TemplatePath, &claimTemplate); err != nil {
|
||||
tCtx.Fatalf("parsing ResourceClaim %q: %v", op.TemplatePath, err)
|
||||
}
|
||||
@@ -92,7 +92,7 @@ func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
|
||||
var mutex sync.Mutex
|
||||
create := func(i int) {
|
||||
err := func() error {
|
||||
if _, err := tCtx.Client().ResourceV1alpha2().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
|
||||
if _, err := tCtx.Client().ResourceV1alpha3().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("create claim: %v", err)
|
||||
}
|
||||
return nil
|
||||
@@ -197,11 +197,11 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
|
||||
if op.StructuredParameters {
|
||||
for _, nodeName := range resources.Nodes {
|
||||
slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
|
||||
_, err := tCtx.Client().ResourceV1alpha2().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
|
||||
_, err := tCtx.Client().ResourceV1alpha3().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
|
||||
tCtx.ExpectNoError(err, "create node resource slice")
|
||||
}
|
||||
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
|
||||
err := tCtx.Client().ResourceV1alpha2().ResourceSlices().DeleteCollection(tCtx,
|
||||
err := tCtx.Client().ResourceV1alpha3().ResourceSlices().DeleteCollection(tCtx,
|
||||
metav1.DeleteOptions{},
|
||||
metav1.ListOptions{FieldSelector: "driverName=" + op.DriverName},
|
||||
)
|
||||
@@ -229,8 +229,8 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
|
||||
})
|
||||
}
|
||||
|
||||
func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.ResourceSlice {
|
||||
slice := &resourcev1alpha2.ResourceSlice{
|
||||
func resourceSlice(driverName, nodeName string, capacity int) *resourceapi.ResourceSlice {
|
||||
slice := &resourceapi.ResourceSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
@@ -238,14 +238,14 @@ func resourceSlice(driverName, nodeName string, capacity int) *resourcev1alpha2.
|
||||
NodeName: nodeName,
|
||||
DriverName: driverName,
|
||||
|
||||
ResourceModel: resourcev1alpha2.ResourceModel{
|
||||
NamedResources: &resourcev1alpha2.NamedResourcesResources{},
|
||||
ResourceModel: resourceapi.ResourceModel{
|
||||
NamedResources: &resourceapi.NamedResourcesResources{},
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < capacity; i++ {
|
||||
slice.ResourceModel.NamedResources.Instances = append(slice.ResourceModel.NamedResources.Instances,
|
||||
resourcev1alpha2.NamedResourcesInstance{
|
||||
resourceapi.NamedResourcesInstance{
|
||||
Name: fmt.Sprintf("instance-%d", i),
|
||||
},
|
||||
)
|
||||
|
||||
@@ -87,7 +87,7 @@ func mustSetupCluster(tCtx ktesting.TContext, config *config.KubeSchedulerConfig
|
||||
// except for DRA API group when needed.
|
||||
runtimeConfig := []string{"api/alpha=false"}
|
||||
if enabledFeatures[features.DynamicResourceAllocation] {
|
||||
runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha2=true")
|
||||
runtimeConfig = append(runtimeConfig, "resource.k8s.io/v1alpha3=true")
|
||||
}
|
||||
customFlags := []string{
|
||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1"
|
||||
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
|
||||
resourceapi "k8s.io/api/resource/v1alpha3"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@@ -130,9 +130,9 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf
|
||||
|
||||
func CreateResourceClaimController(ctx context.Context, tb ktesting.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() {
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
schedulingInformer := informerFactory.Resource().V1alpha2().PodSchedulingContexts()
|
||||
claimInformer := informerFactory.Resource().V1alpha2().ResourceClaims()
|
||||
claimTemplateInformer := informerFactory.Resource().V1alpha2().ResourceClaimTemplates()
|
||||
schedulingInformer := informerFactory.Resource().V1alpha3().PodSchedulingContexts()
|
||||
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
|
||||
claimTemplateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
|
||||
claimController, err := resourceclaim.NewController(klog.FromContext(ctx), clientSet, podInformer, schedulingInformer, claimInformer, claimTemplateInformer)
|
||||
if err != nil {
|
||||
tb.Fatalf("Error creating claim controller: %v", err)
|
||||
@@ -512,7 +512,7 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf
|
||||
options.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority", "StorageObjectInUseProtection"}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
|
||||
options.APIEnablement.RuntimeConfig = cliflag.ConfigurationMap{
|
||||
resourcev1alpha2.SchemeGroupVersion.String(): "true",
|
||||
resourceapi.SchemeGroupVersion.String(): "true",
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
Reference in New Issue
Block a user