DRA: use v1beta1 API

No code is left which depends on the v1alpha3, except of course the code
implementing that version.
This commit is contained in:
Patrick Ohly
2024-10-11 11:13:53 +02:00
parent 81fd64256c
commit 33ea278c51
48 changed files with 306 additions and 270 deletions

View File

@@ -411,8 +411,8 @@ func startResourceClaimController(ctx context.Context, controllerContext Control
utilfeature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
controllerContext.ClientBuilder.ClientOrDie("resource-claim-controller"),
controllerContext.InformerFactory.Core().V1().Pods(),
controllerContext.InformerFactory.Resource().V1alpha3().ResourceClaims(),
controllerContext.InformerFactory.Resource().V1alpha3().ResourceClaimTemplates())
controllerContext.InformerFactory.Resource().V1beta1().ResourceClaims(),
controllerContext.InformerFactory.Resource().V1beta1().ResourceClaimTemplates())
if err != nil {
return nil, true, fmt.Errorf("failed to start resource claim controller: %v", err)
}

View File

@@ -25,7 +25,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -33,12 +33,12 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
corev1apply "k8s.io/client-go/applyconfigurations/core/v1"
v1informers "k8s.io/client-go/informers/core/v1"
resourceinformers "k8s.io/client-go/informers/resource/v1alpha3"
resourceinformers "k8s.io/client-go/informers/resource/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
v1listers "k8s.io/client-go/listers/core/v1"
resourcelisters "k8s.io/client-go/listers/resource/v1alpha3"
resourcelisters "k8s.io/client-go/listers/resource/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
@@ -661,7 +661,7 @@ func (ec *Controller) handleClaim(ctx context.Context, pod *v1.Pod, podClaim v1.
}
metrics.ResourceClaimCreateAttempts.Inc()
claimName := claim.Name
claim, err = ec.kubeClient.ResourceV1alpha3().ResourceClaims(pod.Namespace).Create(ctx, claim, metav1.CreateOptions{})
claim, err = ec.kubeClient.ResourceV1beta1().ResourceClaims(pod.Namespace).Create(ctx, claim, metav1.CreateOptions{})
if err != nil {
metrics.ResourceClaimCreateFailures.Inc()
return fmt.Errorf("create ResourceClaim %s: %v", claimName, err)
@@ -730,7 +730,7 @@ func (ec *Controller) reserveForPod(ctx context.Context, pod *v1.Pod, claim *res
Name: pod.Name,
UID: pod.UID,
})
if _, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
if _, err := ec.kubeClient.ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("reserve claim %s for pod: %w", klog.KObj(claim), err)
}
return nil
@@ -843,7 +843,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
}
}
claim, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
claim, err := ec.kubeClient.ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return err
}
@@ -853,7 +853,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourceapi.Finalizer)
if builtinControllerFinalizer >= 0 && claim.Status.Allocation == nil {
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
if _, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}); err != nil {
if _, err := ec.kubeClient.ResourceV1beta1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}); err != nil {
return err
}
}
@@ -865,14 +865,14 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
// deleted. As above we then need to clear the allocation.
claim.Status.Allocation = nil
var err error
claim, err = ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
claim, err = ec.kubeClient.ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return err
}
}
// Whether it was allocated or not, remove the finalizer to unblock removal.
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
_, err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
_, err := ec.kubeClient.ResourceV1beta1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return err
}
@@ -893,7 +893,7 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
// We are certain that the owning pod is not going to need
// the claim and therefore remove the claim.
logger.V(5).Info("deleting unused generated claim", "claim", klog.KObj(claim), "pod", klog.KObj(pod))
err := ec.kubeClient.ResourceV1alpha3().ResourceClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
err := ec.kubeClient.ResourceV1beta1().ResourceClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
if err != nil {
return fmt.Errorf("delete claim %s: %w", klog.KObj(claim), err)
}

View File

@@ -28,7 +28,7 @@ import (
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -387,8 +387,8 @@ func TestSyncHandler(t *testing.T) {
setupMetrics()
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
podInformer := informerFactory.Core().V1().Pods()
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
claimInformer := informerFactory.Resource().V1beta1().ResourceClaims()
templateInformer := informerFactory.Resource().V1beta1().ResourceClaimTemplates()
ec, err := NewController(tCtx.Logger(), tc.adminAccessEnabled, fakeKubeClient, podInformer, claimInformer, templateInformer)
if err != nil {
@@ -426,7 +426,7 @@ func TestSyncHandler(t *testing.T) {
t.Fatalf("unexpected success")
}
claims, err := fakeKubeClient.ResourceV1alpha3().ResourceClaims("").List(tCtx, metav1.ListOptions{})
claims, err := fakeKubeClient.ResourceV1beta1().ResourceClaims("").List(tCtx, metav1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error while listing claims: %v", err)
}
@@ -461,9 +461,9 @@ func TestResourceClaimEventHandler(t *testing.T) {
setupMetrics()
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
podInformer := informerFactory.Core().V1().Pods()
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
templateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
claimClient := fakeKubeClient.ResourceV1alpha3().ResourceClaims(testNamespace)
claimInformer := informerFactory.Resource().V1beta1().ResourceClaims()
templateInformer := informerFactory.Resource().V1beta1().ResourceClaimTemplates()
claimClient := fakeKubeClient.ResourceV1beta1().ResourceClaims(testNamespace)
_, err := NewController(tCtx.Logger(), false /* admin access */, fakeKubeClient, podInformer, claimInformer, templateInformer)
tCtx.ExpectNoError(err, "creating ephemeral controller")

View File

@@ -34,7 +34,7 @@ import (
authorizationcel "k8s.io/apiserver/pkg/authorization/cel"
utilfeature "k8s.io/apiserver/pkg/util/feature"
versionedinformers "k8s.io/client-go/informers"
resourceinformers "k8s.io/client-go/informers/resource/v1alpha3"
resourceinformers "k8s.io/client-go/informers/resource/v1beta1"
"k8s.io/kubernetes/pkg/auth/authorizer/abac"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/kubernetes/pkg/features"
@@ -99,7 +99,7 @@ func (config Config) New(ctx context.Context, serverID string) (authorizer.Autho
case authzconfig.AuthorizerType(modes.ModeNode):
var slices resourceinformers.ResourceSliceInformer
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
slices = config.VersionedInformerFactory.Resource().V1alpha3().ResourceSlices()
slices = config.VersionedInformerFactory.Resource().V1beta1().ResourceSlices()
}
node.RegisterMetrics()
graph := node.NewGraph()

View File

@@ -22,7 +22,7 @@ import (
"slices"
"sync"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/cm/dra/state"

View File

@@ -25,7 +25,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"

View File

@@ -23,7 +23,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
@@ -188,7 +188,7 @@ func (m *ManagerImpl) prepareResources(ctx context.Context, pod *v1.Pod) error {
continue
}
// Query claim object from the API server
resourceClaim, err := m.kubeClient.ResourceV1alpha3().ResourceClaims(pod.Namespace).Get(
resourceClaim, err := m.kubeClient.ResourceV1beta1().ResourceClaims(pod.Namespace).Get(
ctx,
*claimName,
metav1.GetOptions{})

View File

@@ -32,7 +32,7 @@ import (
"google.golang.org/grpc"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
@@ -560,11 +560,11 @@ func TestPrepareResources(t *testing.T) {
}
if test.claim != nil {
if _, err := fakeKubeClient.ResourceV1alpha3().ResourceClaims(test.pod.Namespace).Create(tCtx, test.claim, metav1.CreateOptions{}); err != nil {
if _, err := fakeKubeClient.ResourceV1beta1().ResourceClaims(test.pod.Namespace).Create(tCtx, test.claim, metav1.CreateOptions{}); err != nil {
t.Fatalf("failed to create ResourceClaim %s: %+v", test.claim.Name, err)
}
defer func() {
require.NoError(t, fakeKubeClient.ResourceV1alpha3().ResourceClaims(test.pod.Namespace).Delete(tCtx, test.claim.Name, metav1.DeleteOptions{}))
require.NoError(t, fakeKubeClient.ResourceV1beta1().ResourceClaims(test.pod.Namespace).Delete(tCtx, test.claim.Name, metav1.DeleteOptions{}))
}()
}
@@ -948,7 +948,7 @@ func TestParallelPrepareUnprepareResources(t *testing.T) {
}
claim := genTestClaim(claimName, driverName, deviceName, string(podUID))
if _, err = fakeKubeClient.ResourceV1alpha3().ResourceClaims(pod.Namespace).Create(tCtx, claim, metav1.CreateOptions{}); err != nil {
if _, err = fakeKubeClient.ResourceV1beta1().ResourceClaims(pod.Namespace).Create(tCtx, claim, metav1.CreateOptions{}); err != nil {
t.Errorf("failed to create ResourceClaim %s: %+v", claim.Name, err)
return
}

View File

@@ -23,7 +23,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
@@ -111,7 +111,7 @@ func (h *RegistrationHandler) wipeResourceSlices(driver string) {
fieldSelector[resourceapi.ResourceSliceSelectorDriver] = driver
}
err = h.kubeClient.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: fieldSelector.String()})
err = h.kubeClient.ResourceV1beta1().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: fieldSelector.String()})
switch {
case err == nil:
logger.V(3).Info("Deleted ResourceSlices", "fieldSelector", fieldSelector)

View File

@@ -41,7 +41,7 @@ import (
flowcontrolv1 "k8s.io/api/flowcontrol/v1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
storagev1 "k8s.io/api/storage/v1"
storagev1beta1 "k8s.io/api/storage/v1beta1"

View File

@@ -21,7 +21,7 @@ import (
"strings"
corev1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -29,7 +29,7 @@ import (
quota "k8s.io/apiserver/pkg/quota/v1"
"k8s.io/apiserver/pkg/quota/v1/generic"
resourceinternal "k8s.io/kubernetes/pkg/apis/resource"
resourceversioned "k8s.io/kubernetes/pkg/apis/resource/v1alpha3"
resourceversioned "k8s.io/kubernetes/pkg/apis/resource/v1beta1"
)
// The name used for object count quota. This evaluator takes over counting
@@ -149,7 +149,7 @@ func toExternalResourceClaimOrError(obj runtime.Object) (*resourceapi.ResourceCl
case *resourceapi.ResourceClaim:
claim = t
case *resourceinternal.ResourceClaim:
if err := resourceversioned.Convert_resource_ResourceClaim_To_v1alpha3_ResourceClaim(t, claim, nil); err != nil {
if err := resourceversioned.Convert_resource_ResourceClaim_To_v1beta1_ResourceClaim(t, claim, nil); err != nil {
return nil, err
}
default:

View File

@@ -532,7 +532,7 @@ func addAllEventHandlers(
}
case framework.ResourceSlice:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = informerFactory.Resource().V1alpha3().ResourceSlices().Informer().AddEventHandler(
if handlerRegistration, err = informerFactory.Resource().V1beta1().ResourceSlices().Informer().AddEventHandler(
buildEvtResHandler(at, framework.ResourceSlice),
); err != nil {
return err
@@ -541,7 +541,7 @@ func addAllEventHandlers(
}
case framework.DeviceClass:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = informerFactory.Resource().V1alpha3().DeviceClasses().Informer().AddEventHandler(
if handlerRegistration, err = informerFactory.Resource().V1beta1().DeviceClasses().Informer().AddEventHandler(
buildEvtResHandler(at, framework.DeviceClass),
); err != nil {
return err

View File

@@ -27,7 +27,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@@ -365,7 +365,7 @@ func TestAddAllEventHandlers(t *testing.T) {
dynInformerFactory := dynamicinformer.NewDynamicSharedInformerFactory(dynclient, 0)
var resourceClaimCache *assumecache.AssumeCache
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
resourceClaimInformer := informerFactory.Resource().V1alpha3().ResourceClaims().Informer()
resourceClaimInformer := informerFactory.Resource().V1beta1().ResourceClaims().Informer()
resourceClaimCache = assumecache.NewAssumeCache(logger, resourceClaimInformer, "ResourceClaim", "", nil)
}

View File

@@ -21,7 +21,7 @@ limitations under the License.
package contract
import (
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/dynamic-resource-allocation/structured"

View File

@@ -17,7 +17,7 @@ limitations under the License.
package framework
import (
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/dynamic-resource-allocation/structured"

View File

@@ -19,7 +19,7 @@ package dynamicresources
import (
"sync"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"k8s.io/dynamic-resource-allocation/structured"

View File

@@ -21,12 +21,12 @@ import (
"fmt"
"sync"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/informers"
resourcelisters "k8s.io/client-go/listers/resource/v1alpha3"
resourcelisters "k8s.io/client-go/listers/resource/v1beta1"
"k8s.io/dynamic-resource-allocation/structured"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/scheduler/framework"
@@ -53,8 +53,8 @@ func NewDRAManager(ctx context.Context, claimsCache *assumecache.AssumeCache, in
allocatedDevices: newAllocatedDevices(logger),
logger: logger,
},
resourceSliceLister: &resourceSliceLister{sliceLister: informerFactory.Resource().V1alpha3().ResourceSlices().Lister()},
deviceClassLister: &deviceClassLister{classLister: informerFactory.Resource().V1alpha3().DeviceClasses().Lister()},
resourceSliceLister: &resourceSliceLister{sliceLister: informerFactory.Resource().V1beta1().ResourceSlices().Lister()},
deviceClassLister: &deviceClassLister{classLister: informerFactory.Resource().V1beta1().DeviceClasses().Lister()},
}
// Reacting to events is more efficient than iterating over the list

View File

@@ -26,7 +26,7 @@ import (
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -627,7 +627,7 @@ func (pl *DynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS
claim.Status.ReservedFor = nil
claim.Status.Allocation = nil
logger.V(5).Info("Deallocation of ResourceClaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
if _, err := pl.clientset.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
if _, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
return nil, statusError(logger, err)
}
return nil, framework.NewStatus(framework.Unschedulable, "deallocation of ResourceClaim completed")
@@ -747,7 +747,7 @@ func (pl *DynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
pod.UID,
)
logger.V(5).Info("unreserve", "resourceclaim", klog.KObj(claim), "pod", klog.KObj(pod))
claim, err := pl.clientset.ResourceV1alpha3().ResourceClaims(claim.Namespace).Patch(ctx, claim.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "status")
claim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).Patch(ctx, claim.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "status")
if err != nil {
// We will get here again when pod scheduling is retried.
logger.Error(err, "unreserve", "resourceclaim", klog.KObj(claim))
@@ -822,7 +822,7 @@ func (pl *DynamicResources) bindClaim(ctx context.Context, state *stateData, ind
refreshClaim := false
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if refreshClaim {
updatedClaim, err := pl.clientset.ResourceV1alpha3().ResourceClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
updatedClaim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("get updated claim %s after conflict: %w", klog.KObj(claim), err)
}
@@ -847,7 +847,7 @@ func (pl *DynamicResources) bindClaim(ctx context.Context, state *stateData, ind
// If we were interrupted in the past, it might already be set and we simply continue.
if !slices.Contains(claim.Finalizers, resourceapi.Finalizer) {
claim.Finalizers = append(claim.Finalizers, resourceapi.Finalizer)
updatedClaim, err := pl.clientset.ResourceV1alpha3().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
updatedClaim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("add finalizer to claim %s: %w", klog.KObj(claim), err)
}
@@ -860,7 +860,7 @@ func (pl *DynamicResources) bindClaim(ctx context.Context, state *stateData, ind
// preconditions. The apiserver will tell us with a
// non-conflict error if this isn't possible.
claim.Status.ReservedFor = append(claim.Status.ReservedFor, resourceapi.ResourceClaimConsumerReference{Resource: "pods", Name: pod.Name, UID: pod.UID})
updatedClaim, err := pl.clientset.ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
updatedClaim, err := pl.clientset.ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
if err != nil {
if allocation != nil {
return fmt.Errorf("add allocation and reservation to claim %s: %w", klog.KObj(claim), err)

View File

@@ -31,7 +31,7 @@ import (
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apiruntime "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -973,7 +973,7 @@ func (tc *testContext) verify(t *testing.T, expected result, initialObjects []me
func (tc *testContext) listAll(t *testing.T) (objects []metav1.Object) {
t.Helper()
claims, err := tc.client.ResourceV1alpha3().ResourceClaims("").List(tc.ctx, metav1.ListOptions{})
claims, err := tc.client.ResourceV1beta1().ResourceClaims("").List(tc.ctx, metav1.ListOptions{})
require.NoError(t, err, "list claims")
for _, claim := range claims.Items {
claim := claim
@@ -1016,7 +1016,7 @@ func (tc *testContext) updateAPIServer(t *testing.T, objects []metav1.Object, up
t.Logf("Updating %T %q, diff (-old, +new):\n%s", obj, obj.GetName(), diff)
switch obj := obj.(type) {
case *resourceapi.ResourceClaim:
obj, err := tc.client.ResourceV1alpha3().ResourceClaims(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
obj, err := tc.client.ResourceV1beta1().ResourceClaims(obj.Namespace).Update(tc.ctx, obj, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error during prepare update: %v", err)
}
@@ -1069,7 +1069,7 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourceapi.ResourceClaim,
tc.client.PrependReactor("*", "*", reactor)
tc.informerFactory = informers.NewSharedInformerFactory(tc.client, 0)
tc.draManager = NewDRAManager(tCtx, assumecache.NewAssumeCache(tCtx.Logger(), tc.informerFactory.Resource().V1alpha3().ResourceClaims().Informer(), "resource claim", "", nil), tc.informerFactory)
tc.draManager = NewDRAManager(tCtx, assumecache.NewAssumeCache(tCtx.Logger(), tc.informerFactory.Resource().V1beta1().ResourceClaims().Informer(), "resource claim", "", nil), tc.informerFactory)
opts := []runtime.Option{
runtime.WithClientSet(tc.client),
runtime.WithInformerFactory(tc.informerFactory),
@@ -1089,11 +1089,11 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourceapi.ResourceClaim,
// The tests use the API to create the objects because then reactors
// get triggered.
for _, claim := range claims {
_, err := tc.client.ResourceV1alpha3().ResourceClaims(claim.Namespace).Create(tc.ctx, claim, metav1.CreateOptions{})
_, err := tc.client.ResourceV1beta1().ResourceClaims(claim.Namespace).Create(tc.ctx, claim, metav1.CreateOptions{})
require.NoError(t, err, "create resource claim")
}
for _, class := range classes {
_, err := tc.client.ResourceV1alpha3().DeviceClasses().Create(tc.ctx, class, metav1.CreateOptions{})
_, err := tc.client.ResourceV1beta1().DeviceClasses().Create(tc.ctx, class, metav1.CreateOptions{})
require.NoError(t, err, "create resource class")
}
@@ -1281,7 +1281,7 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) {
// Some test claims already have it. Clear for create.
createClaim := claim.DeepCopy()
createClaim.UID = ""
storedClaim, err := testCtx.client.ResourceV1alpha3().ResourceClaims(createClaim.Namespace).Create(tCtx, createClaim, metav1.CreateOptions{})
storedClaim, err := testCtx.client.ResourceV1beta1().ResourceClaims(createClaim.Namespace).Create(tCtx, createClaim, metav1.CreateOptions{})
if err != nil {
t.Fatalf("create claim: expected no error, got: %v", err)
}
@@ -1296,7 +1296,7 @@ func Test_isSchedulableAfterClaimChange(t *testing.T) {
updateClaim.UID = cachedClaim.(*resourceapi.ResourceClaim).UID
updateClaim.ResourceVersion = cachedClaim.(*resourceapi.ResourceClaim).ResourceVersion
storedClaim, err := testCtx.client.ResourceV1alpha3().ResourceClaims(updateClaim.Namespace).Update(tCtx, updateClaim, metav1.UpdateOptions{})
storedClaim, err := testCtx.client.ResourceV1beta1().ResourceClaims(updateClaim.Namespace).Update(tCtx, updateClaim, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("update claim: expected no error, got: %v", err)
}

View File

@@ -299,7 +299,7 @@ func New(ctx context.Context,
var resourceClaimCache *assumecache.AssumeCache
var draManager framework.SharedDRAManager
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
resourceClaimInformer := informerFactory.Resource().V1alpha3().ResourceClaims().Informer()
resourceClaimInformer := informerFactory.Resource().V1beta1().ResourceClaims().Informer()
resourceClaimCache = assumecache.NewAssumeCache(logger, resourceClaimInformer, "ResourceClaim", "", nil)
draManager = dynamicresources.NewDRAManager(ctx, resourceClaimCache, informerFactory)
}

View File

@@ -23,7 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -22,11 +22,11 @@ import (
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/util/wait"
corev1informers "k8s.io/client-go/informers/core/v1"
resourceinformers "k8s.io/client-go/informers/resource/v1alpha3"
resourceinformers "k8s.io/client-go/informers/resource/v1beta1"
storageinformers "k8s.io/client-go/informers/storage/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/dynamic-resource-allocation/resourceclaim"

View File

@@ -28,7 +28,7 @@ import (
"time"
corev1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"

View File

@@ -18,5 +18,5 @@ limitations under the License.
// unique strings are faster to compare and more efficient when used as key in
// a map.
//
// +k8s:conversion-gen=k8s.io/api/resource/v1alpha3
// +k8s:conversion-gen=k8s.io/api/resource/v1beta1
package api

View File

@@ -49,7 +49,7 @@ type Device struct {
type BasicDevice struct {
Attributes map[QualifiedName]DeviceAttribute
Capacity map[QualifiedName]resource.Quantity
Capacity map[QualifiedName]DeviceCapacity
}
type QualifiedName string
@@ -62,3 +62,7 @@ type DeviceAttribute struct {
StringValue *string
VersionValue *string
}
type DeviceCapacity struct {
Quantity resource.Quantity
}

View File

@@ -25,8 +25,7 @@ import (
unsafe "unsafe"
v1 "k8s.io/api/core/v1"
v1alpha3 "k8s.io/api/resource/v1alpha3"
resource "k8s.io/apimachinery/pkg/api/resource"
v1beta1 "k8s.io/api/resource/v1beta1"
conversion "k8s.io/apimachinery/pkg/conversion"
runtime "k8s.io/apimachinery/pkg/runtime"
)
@@ -38,63 +37,73 @@ func init() {
// RegisterConversions adds conversion functions to the given scheme.
// Public to allow building arbitrary schemes.
func RegisterConversions(s *runtime.Scheme) error {
if err := s.AddGeneratedConversionFunc((*BasicDevice)(nil), (*v1alpha3.BasicDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_BasicDevice_To_v1alpha3_BasicDevice(a.(*BasicDevice), b.(*v1alpha3.BasicDevice), scope)
if err := s.AddGeneratedConversionFunc((*BasicDevice)(nil), (*v1beta1.BasicDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_BasicDevice_To_v1beta1_BasicDevice(a.(*BasicDevice), b.(*v1beta1.BasicDevice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha3.BasicDevice)(nil), (*BasicDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_BasicDevice_To_api_BasicDevice(a.(*v1alpha3.BasicDevice), b.(*BasicDevice), scope)
if err := s.AddGeneratedConversionFunc((*v1beta1.BasicDevice)(nil), (*BasicDevice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_BasicDevice_To_api_BasicDevice(a.(*v1beta1.BasicDevice), b.(*BasicDevice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*Device)(nil), (*v1alpha3.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Device_To_v1alpha3_Device(a.(*Device), b.(*v1alpha3.Device), scope)
if err := s.AddGeneratedConversionFunc((*Device)(nil), (*v1beta1.Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_Device_To_v1beta1_Device(a.(*Device), b.(*v1beta1.Device), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha3.Device)(nil), (*Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_Device_To_api_Device(a.(*v1alpha3.Device), b.(*Device), scope)
if err := s.AddGeneratedConversionFunc((*v1beta1.Device)(nil), (*Device)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_Device_To_api_Device(a.(*v1beta1.Device), b.(*Device), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*DeviceAttribute)(nil), (*v1alpha3.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeviceAttribute_To_v1alpha3_DeviceAttribute(a.(*DeviceAttribute), b.(*v1alpha3.DeviceAttribute), scope)
if err := s.AddGeneratedConversionFunc((*DeviceAttribute)(nil), (*v1beta1.DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(a.(*DeviceAttribute), b.(*v1beta1.DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha3.DeviceAttribute)(nil), (*DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_DeviceAttribute_To_api_DeviceAttribute(a.(*v1alpha3.DeviceAttribute), b.(*DeviceAttribute), scope)
if err := s.AddGeneratedConversionFunc((*v1beta1.DeviceAttribute)(nil), (*DeviceAttribute)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(a.(*v1beta1.DeviceAttribute), b.(*DeviceAttribute), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourcePool)(nil), (*v1alpha3.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_ResourcePool_To_v1alpha3_ResourcePool(a.(*ResourcePool), b.(*v1alpha3.ResourcePool), scope)
if err := s.AddGeneratedConversionFunc((*DeviceCapacity)(nil), (*v1beta1.DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(a.(*DeviceCapacity), b.(*v1beta1.DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha3.ResourcePool)(nil), (*ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ResourcePool_To_api_ResourcePool(a.(*v1alpha3.ResourcePool), b.(*ResourcePool), scope)
if err := s.AddGeneratedConversionFunc((*v1beta1.DeviceCapacity)(nil), (*DeviceCapacity)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(a.(*v1beta1.DeviceCapacity), b.(*DeviceCapacity), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourceSlice)(nil), (*v1alpha3.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_ResourceSlice_To_v1alpha3_ResourceSlice(a.(*ResourceSlice), b.(*v1alpha3.ResourceSlice), scope)
if err := s.AddGeneratedConversionFunc((*ResourcePool)(nil), (*v1beta1.ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_ResourcePool_To_v1beta1_ResourcePool(a.(*ResourcePool), b.(*v1beta1.ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha3.ResourceSlice)(nil), (*ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ResourceSlice_To_api_ResourceSlice(a.(*v1alpha3.ResourceSlice), b.(*ResourceSlice), scope)
if err := s.AddGeneratedConversionFunc((*v1beta1.ResourcePool)(nil), (*ResourcePool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourcePool_To_api_ResourcePool(a.(*v1beta1.ResourcePool), b.(*ResourcePool), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourceSliceSpec)(nil), (*v1alpha3.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(a.(*ResourceSliceSpec), b.(*v1alpha3.ResourceSliceSpec), scope)
if err := s.AddGeneratedConversionFunc((*ResourceSlice)(nil), (*v1beta1.ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_ResourceSlice_To_v1beta1_ResourceSlice(a.(*ResourceSlice), b.(*v1beta1.ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1alpha3.ResourceSliceSpec)(nil), (*ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(a.(*v1alpha3.ResourceSliceSpec), b.(*ResourceSliceSpec), scope)
if err := s.AddGeneratedConversionFunc((*v1beta1.ResourceSlice)(nil), (*ResourceSlice)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(a.(*v1beta1.ResourceSlice), b.(*ResourceSlice), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*ResourceSliceSpec)(nil), (*v1beta1.ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(a.(*ResourceSliceSpec), b.(*v1beta1.ResourceSliceSpec), scope)
}); err != nil {
return err
}
if err := s.AddGeneratedConversionFunc((*v1beta1.ResourceSliceSpec)(nil), (*ResourceSliceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(a.(*v1beta1.ResourceSliceSpec), b.(*ResourceSliceSpec), scope)
}); err != nil {
return err
}
@@ -111,42 +120,42 @@ func RegisterConversions(s *runtime.Scheme) error {
return nil
}
func autoConvert_api_BasicDevice_To_v1alpha3_BasicDevice(in *BasicDevice, out *v1alpha3.BasicDevice, s conversion.Scope) error {
out.Attributes = *(*map[v1alpha3.QualifiedName]v1alpha3.DeviceAttribute)(unsafe.Pointer(&in.Attributes))
out.Capacity = *(*map[v1alpha3.QualifiedName]resource.Quantity)(unsafe.Pointer(&in.Capacity))
func autoConvert_api_BasicDevice_To_v1beta1_BasicDevice(in *BasicDevice, out *v1beta1.BasicDevice, s conversion.Scope) error {
out.Attributes = *(*map[v1beta1.QualifiedName]v1beta1.DeviceAttribute)(unsafe.Pointer(&in.Attributes))
out.Capacity = *(*map[v1beta1.QualifiedName]v1beta1.DeviceCapacity)(unsafe.Pointer(&in.Capacity))
return nil
}
// Convert_api_BasicDevice_To_v1alpha3_BasicDevice is an autogenerated conversion function.
func Convert_api_BasicDevice_To_v1alpha3_BasicDevice(in *BasicDevice, out *v1alpha3.BasicDevice, s conversion.Scope) error {
return autoConvert_api_BasicDevice_To_v1alpha3_BasicDevice(in, out, s)
// Convert_api_BasicDevice_To_v1beta1_BasicDevice is an autogenerated conversion function.
func Convert_api_BasicDevice_To_v1beta1_BasicDevice(in *BasicDevice, out *v1beta1.BasicDevice, s conversion.Scope) error {
return autoConvert_api_BasicDevice_To_v1beta1_BasicDevice(in, out, s)
}
func autoConvert_v1alpha3_BasicDevice_To_api_BasicDevice(in *v1alpha3.BasicDevice, out *BasicDevice, s conversion.Scope) error {
func autoConvert_v1beta1_BasicDevice_To_api_BasicDevice(in *v1beta1.BasicDevice, out *BasicDevice, s conversion.Scope) error {
out.Attributes = *(*map[QualifiedName]DeviceAttribute)(unsafe.Pointer(&in.Attributes))
out.Capacity = *(*map[QualifiedName]resource.Quantity)(unsafe.Pointer(&in.Capacity))
out.Capacity = *(*map[QualifiedName]DeviceCapacity)(unsafe.Pointer(&in.Capacity))
return nil
}
// Convert_v1alpha3_BasicDevice_To_api_BasicDevice is an autogenerated conversion function.
func Convert_v1alpha3_BasicDevice_To_api_BasicDevice(in *v1alpha3.BasicDevice, out *BasicDevice, s conversion.Scope) error {
return autoConvert_v1alpha3_BasicDevice_To_api_BasicDevice(in, out, s)
// Convert_v1beta1_BasicDevice_To_api_BasicDevice is an autogenerated conversion function.
func Convert_v1beta1_BasicDevice_To_api_BasicDevice(in *v1beta1.BasicDevice, out *BasicDevice, s conversion.Scope) error {
return autoConvert_v1beta1_BasicDevice_To_api_BasicDevice(in, out, s)
}
func autoConvert_api_Device_To_v1alpha3_Device(in *Device, out *v1alpha3.Device, s conversion.Scope) error {
func autoConvert_api_Device_To_v1beta1_Device(in *Device, out *v1beta1.Device, s conversion.Scope) error {
if err := Convert_api_UniqueString_To_string(&in.Name, &out.Name, s); err != nil {
return err
}
out.Basic = (*v1alpha3.BasicDevice)(unsafe.Pointer(in.Basic))
out.Basic = (*v1beta1.BasicDevice)(unsafe.Pointer(in.Basic))
return nil
}
// Convert_api_Device_To_v1alpha3_Device is an autogenerated conversion function.
func Convert_api_Device_To_v1alpha3_Device(in *Device, out *v1alpha3.Device, s conversion.Scope) error {
return autoConvert_api_Device_To_v1alpha3_Device(in, out, s)
// Convert_api_Device_To_v1beta1_Device is an autogenerated conversion function.
func Convert_api_Device_To_v1beta1_Device(in *Device, out *v1beta1.Device, s conversion.Scope) error {
return autoConvert_api_Device_To_v1beta1_Device(in, out, s)
}
func autoConvert_v1alpha3_Device_To_api_Device(in *v1alpha3.Device, out *Device, s conversion.Scope) error {
func autoConvert_v1beta1_Device_To_api_Device(in *v1beta1.Device, out *Device, s conversion.Scope) error {
if err := Convert_string_To_api_UniqueString(&in.Name, &out.Name, s); err != nil {
return err
}
@@ -154,12 +163,12 @@ func autoConvert_v1alpha3_Device_To_api_Device(in *v1alpha3.Device, out *Device,
return nil
}
// Convert_v1alpha3_Device_To_api_Device is an autogenerated conversion function.
func Convert_v1alpha3_Device_To_api_Device(in *v1alpha3.Device, out *Device, s conversion.Scope) error {
return autoConvert_v1alpha3_Device_To_api_Device(in, out, s)
// Convert_v1beta1_Device_To_api_Device is an autogenerated conversion function.
func Convert_v1beta1_Device_To_api_Device(in *v1beta1.Device, out *Device, s conversion.Scope) error {
return autoConvert_v1beta1_Device_To_api_Device(in, out, s)
}
func autoConvert_api_DeviceAttribute_To_v1alpha3_DeviceAttribute(in *DeviceAttribute, out *v1alpha3.DeviceAttribute, s conversion.Scope) error {
func autoConvert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(in *DeviceAttribute, out *v1beta1.DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
@@ -167,12 +176,12 @@ func autoConvert_api_DeviceAttribute_To_v1alpha3_DeviceAttribute(in *DeviceAttri
return nil
}
// Convert_api_DeviceAttribute_To_v1alpha3_DeviceAttribute is an autogenerated conversion function.
func Convert_api_DeviceAttribute_To_v1alpha3_DeviceAttribute(in *DeviceAttribute, out *v1alpha3.DeviceAttribute, s conversion.Scope) error {
return autoConvert_api_DeviceAttribute_To_v1alpha3_DeviceAttribute(in, out, s)
// Convert_api_DeviceAttribute_To_v1beta1_DeviceAttribute is an autogenerated conversion function.
func Convert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(in *DeviceAttribute, out *v1beta1.DeviceAttribute, s conversion.Scope) error {
return autoConvert_api_DeviceAttribute_To_v1beta1_DeviceAttribute(in, out, s)
}
func autoConvert_v1alpha3_DeviceAttribute_To_api_DeviceAttribute(in *v1alpha3.DeviceAttribute, out *DeviceAttribute, s conversion.Scope) error {
func autoConvert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(in *v1beta1.DeviceAttribute, out *DeviceAttribute, s conversion.Scope) error {
out.IntValue = (*int64)(unsafe.Pointer(in.IntValue))
out.BoolValue = (*bool)(unsafe.Pointer(in.BoolValue))
out.StringValue = (*string)(unsafe.Pointer(in.StringValue))
@@ -180,12 +189,32 @@ func autoConvert_v1alpha3_DeviceAttribute_To_api_DeviceAttribute(in *v1alpha3.De
return nil
}
// Convert_v1alpha3_DeviceAttribute_To_api_DeviceAttribute is an autogenerated conversion function.
func Convert_v1alpha3_DeviceAttribute_To_api_DeviceAttribute(in *v1alpha3.DeviceAttribute, out *DeviceAttribute, s conversion.Scope) error {
return autoConvert_v1alpha3_DeviceAttribute_To_api_DeviceAttribute(in, out, s)
// Convert_v1beta1_DeviceAttribute_To_api_DeviceAttribute is an autogenerated conversion function.
func Convert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(in *v1beta1.DeviceAttribute, out *DeviceAttribute, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceAttribute_To_api_DeviceAttribute(in, out, s)
}
func autoConvert_api_ResourcePool_To_v1alpha3_ResourcePool(in *ResourcePool, out *v1alpha3.ResourcePool, s conversion.Scope) error {
func autoConvert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(in *DeviceCapacity, out *v1beta1.DeviceCapacity, s conversion.Scope) error {
out.Quantity = in.Quantity
return nil
}
// Convert_api_DeviceCapacity_To_v1beta1_DeviceCapacity is an autogenerated conversion function.
func Convert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(in *DeviceCapacity, out *v1beta1.DeviceCapacity, s conversion.Scope) error {
return autoConvert_api_DeviceCapacity_To_v1beta1_DeviceCapacity(in, out, s)
}
func autoConvert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(in *v1beta1.DeviceCapacity, out *DeviceCapacity, s conversion.Scope) error {
out.Quantity = in.Quantity
return nil
}
// Convert_v1beta1_DeviceCapacity_To_api_DeviceCapacity is an autogenerated conversion function.
func Convert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(in *v1beta1.DeviceCapacity, out *DeviceCapacity, s conversion.Scope) error {
return autoConvert_v1beta1_DeviceCapacity_To_api_DeviceCapacity(in, out, s)
}
func autoConvert_api_ResourcePool_To_v1beta1_ResourcePool(in *ResourcePool, out *v1beta1.ResourcePool, s conversion.Scope) error {
if err := Convert_api_UniqueString_To_string(&in.Name, &out.Name, s); err != nil {
return err
}
@@ -194,12 +223,12 @@ func autoConvert_api_ResourcePool_To_v1alpha3_ResourcePool(in *ResourcePool, out
return nil
}
// Convert_api_ResourcePool_To_v1alpha3_ResourcePool is an autogenerated conversion function.
func Convert_api_ResourcePool_To_v1alpha3_ResourcePool(in *ResourcePool, out *v1alpha3.ResourcePool, s conversion.Scope) error {
return autoConvert_api_ResourcePool_To_v1alpha3_ResourcePool(in, out, s)
// Convert_api_ResourcePool_To_v1beta1_ResourcePool is an autogenerated conversion function.
func Convert_api_ResourcePool_To_v1beta1_ResourcePool(in *ResourcePool, out *v1beta1.ResourcePool, s conversion.Scope) error {
return autoConvert_api_ResourcePool_To_v1beta1_ResourcePool(in, out, s)
}
func autoConvert_v1alpha3_ResourcePool_To_api_ResourcePool(in *v1alpha3.ResourcePool, out *ResourcePool, s conversion.Scope) error {
func autoConvert_v1beta1_ResourcePool_To_api_ResourcePool(in *v1beta1.ResourcePool, out *ResourcePool, s conversion.Scope) error {
if err := Convert_string_To_api_UniqueString(&in.Name, &out.Name, s); err != nil {
return err
}
@@ -208,42 +237,42 @@ func autoConvert_v1alpha3_ResourcePool_To_api_ResourcePool(in *v1alpha3.Resource
return nil
}
// Convert_v1alpha3_ResourcePool_To_api_ResourcePool is an autogenerated conversion function.
func Convert_v1alpha3_ResourcePool_To_api_ResourcePool(in *v1alpha3.ResourcePool, out *ResourcePool, s conversion.Scope) error {
return autoConvert_v1alpha3_ResourcePool_To_api_ResourcePool(in, out, s)
// Convert_v1beta1_ResourcePool_To_api_ResourcePool is an autogenerated conversion function.
func Convert_v1beta1_ResourcePool_To_api_ResourcePool(in *v1beta1.ResourcePool, out *ResourcePool, s conversion.Scope) error {
return autoConvert_v1beta1_ResourcePool_To_api_ResourcePool(in, out, s)
}
func autoConvert_api_ResourceSlice_To_v1alpha3_ResourceSlice(in *ResourceSlice, out *v1alpha3.ResourceSlice, s conversion.Scope) error {
func autoConvert_api_ResourceSlice_To_v1beta1_ResourceSlice(in *ResourceSlice, out *v1beta1.ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
if err := Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_api_ResourceSlice_To_v1alpha3_ResourceSlice is an autogenerated conversion function.
func Convert_api_ResourceSlice_To_v1alpha3_ResourceSlice(in *ResourceSlice, out *v1alpha3.ResourceSlice, s conversion.Scope) error {
return autoConvert_api_ResourceSlice_To_v1alpha3_ResourceSlice(in, out, s)
// Convert_api_ResourceSlice_To_v1beta1_ResourceSlice is an autogenerated conversion function.
func Convert_api_ResourceSlice_To_v1beta1_ResourceSlice(in *ResourceSlice, out *v1beta1.ResourceSlice, s conversion.Scope) error {
return autoConvert_api_ResourceSlice_To_v1beta1_ResourceSlice(in, out, s)
}
func autoConvert_v1alpha3_ResourceSlice_To_api_ResourceSlice(in *v1alpha3.ResourceSlice, out *ResourceSlice, s conversion.Scope) error {
func autoConvert_v1beta1_ResourceSlice_To_api_ResourceSlice(in *v1beta1.ResourceSlice, out *ResourceSlice, s conversion.Scope) error {
out.ObjectMeta = in.ObjectMeta
if err := Convert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
if err := Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(&in.Spec, &out.Spec, s); err != nil {
return err
}
return nil
}
// Convert_v1alpha3_ResourceSlice_To_api_ResourceSlice is an autogenerated conversion function.
func Convert_v1alpha3_ResourceSlice_To_api_ResourceSlice(in *v1alpha3.ResourceSlice, out *ResourceSlice, s conversion.Scope) error {
return autoConvert_v1alpha3_ResourceSlice_To_api_ResourceSlice(in, out, s)
// Convert_v1beta1_ResourceSlice_To_api_ResourceSlice is an autogenerated conversion function.
func Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(in *v1beta1.ResourceSlice, out *ResourceSlice, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceSlice_To_api_ResourceSlice(in, out, s)
}
func autoConvert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(in *ResourceSliceSpec, out *v1alpha3.ResourceSliceSpec, s conversion.Scope) error {
func autoConvert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in *ResourceSliceSpec, out *v1beta1.ResourceSliceSpec, s conversion.Scope) error {
if err := Convert_api_UniqueString_To_string(&in.Driver, &out.Driver, s); err != nil {
return err
}
if err := Convert_api_ResourcePool_To_v1alpha3_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
if err := Convert_api_ResourcePool_To_v1beta1_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
if err := Convert_api_UniqueString_To_string(&in.NodeName, &out.NodeName, s); err != nil {
@@ -253,9 +282,9 @@ func autoConvert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(in *Resourc
out.AllNodes = in.AllNodes
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]v1alpha3.Device, len(*in))
*out = make([]v1beta1.Device, len(*in))
for i := range *in {
if err := Convert_api_Device_To_v1alpha3_Device(&(*in)[i], &(*out)[i], s); err != nil {
if err := Convert_api_Device_To_v1beta1_Device(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
@@ -265,16 +294,16 @@ func autoConvert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(in *Resourc
return nil
}
// Convert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec is an autogenerated conversion function.
func Convert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(in *ResourceSliceSpec, out *v1alpha3.ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(in, out, s)
// Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec is an autogenerated conversion function.
func Convert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in *ResourceSliceSpec, out *v1beta1.ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_api_ResourceSliceSpec_To_v1beta1_ResourceSliceSpec(in, out, s)
}
func autoConvert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1alpha3.ResourceSliceSpec, out *ResourceSliceSpec, s conversion.Scope) error {
func autoConvert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1beta1.ResourceSliceSpec, out *ResourceSliceSpec, s conversion.Scope) error {
if err := Convert_string_To_api_UniqueString(&in.Driver, &out.Driver, s); err != nil {
return err
}
if err := Convert_v1alpha3_ResourcePool_To_api_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
if err := Convert_v1beta1_ResourcePool_To_api_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
if err := Convert_string_To_api_UniqueString(&in.NodeName, &out.NodeName, s); err != nil {
@@ -286,7 +315,7 @@ func autoConvert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1alpha
in, out := &in.Devices, &out.Devices
*out = make([]Device, len(*in))
for i := range *in {
if err := Convert_v1alpha3_Device_To_api_Device(&(*in)[i], &(*out)[i], s); err != nil {
if err := Convert_v1beta1_Device_To_api_Device(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
@@ -296,7 +325,7 @@ func autoConvert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1alpha
return nil
}
// Convert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec is an autogenerated conversion function.
func Convert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1alpha3.ResourceSliceSpec, out *ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(in, out, s)
// Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec is an autogenerated conversion function.
func Convert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1beta1.ResourceSliceSpec, out *ResourceSliceSpec, s conversion.Scope) error {
return autoConvert_v1beta1_ResourceSliceSpec_To_api_ResourceSliceSpec(in, out, s)
}

View File

@@ -31,8 +31,7 @@ import (
"github.com/google/cel-go/common/types/traits"
"github.com/google/cel-go/ext"
resourceapi "k8s.io/api/resource/v1alpha3"
"k8s.io/apimachinery/pkg/api/resource"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/util/version"
celconfig "k8s.io/apiserver/pkg/apis/cel"
apiservercel "k8s.io/apiserver/pkg/cel"
@@ -82,7 +81,7 @@ type Device struct {
// string attribute.
Driver string
Attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute
Capacity map[resourceapi.QualifiedName]resource.Quantity
Capacity map[resourceapi.QualifiedName]resourceapi.DeviceCapacity
}
type compiler struct {
@@ -211,12 +210,12 @@ func (c CompilationResult) DeviceMatches(ctx context.Context, input Device) (boo
}
capacity := make(map[string]any)
for name, quantity := range input.Capacity {
for name, cap := range input.Capacity {
domain, id := parseQualifiedName(name, input.Driver)
if capacity[domain] == nil {
capacity[domain] = make(map[string]apiservercel.Quantity)
}
capacity[domain].(map[string]apiservercel.Quantity)[id] = apiservercel.Quantity{Quantity: &quantity}
capacity[domain].(map[string]apiservercel.Quantity)[id] = apiservercel.Quantity{Quantity: &cap.Quantity}
}
variables := map[string]any{

View File

@@ -21,7 +21,7 @@ import (
"strings"
"testing"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2/ktesting"
"k8s.io/utils/ptr"

View File

@@ -26,7 +26,7 @@ import (
"google.golang.org/grpc"
"k8s.io/klog/v2"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/dynamic-resource-allocation/resourceslice"

View File

@@ -28,7 +28,7 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

View File

@@ -24,7 +24,7 @@ import (
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)

View File

@@ -27,7 +27,7 @@ import (
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -35,7 +35,7 @@ import (
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
resourceinformers "k8s.io/client-go/informers/resource/v1alpha3"
resourceinformers "k8s.io/client-go/informers/resource/v1beta1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@@ -589,7 +589,7 @@ func (c *Controller) syncPool(ctx context.Context, poolName string) error {
slice.Spec.Devices = pool.Slices[i].Devices
logger.V(5).Info("Updating existing resource slice", "slice", klog.KObj(slice))
slice, err := c.kubeClient.ResourceV1alpha3().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
slice, err := c.kubeClient.ResourceV1beta1().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("update resource slice: %w", err)
}
@@ -645,7 +645,7 @@ func (c *Controller) syncPool(ctx context.Context, poolName string) error {
// Using a https://pkg.go.dev/k8s.io/client-go/tools/cache#MutationCache
// avoids that.
logger.V(5).Info("Creating new resource slice")
slice, err := c.kubeClient.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
slice, err := c.kubeClient.ResourceV1beta1().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("create resource slice: %w", err)
}
@@ -681,7 +681,7 @@ func (c *Controller) syncPool(ctx context.Context, poolName string) error {
// changes on the server. The only downside is the extra API
// call. This isn't as bad as extra creates.
logger.V(5).Info("Deleting obsolete resource slice", "slice", klog.KObj(slice), "deleteOptions", options)
err := c.kubeClient.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, options)
err := c.kubeClient.ResourceV1beta1().ResourceSlices().Delete(ctx, slice.Name, options)
switch {
case err == nil:
atomic.AddInt64(&c.numDeletes, 1)

View File

@@ -28,7 +28,7 @@ import (
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -668,7 +668,7 @@ func TestControllerSyncPool(t *testing.T) {
ctrl.run(ctx)
// Check ResourceSlices
resourceSlices, err := kubeClient.ResourceV1alpha3().ResourceSlices().List(ctx, metav1.ListOptions{})
resourceSlices, err := kubeClient.ResourceV1beta1().ResourceSlices().List(ctx, metav1.ListOptions{})
require.NoError(t, err, "list resource slices")
sortResourceSlices(test.expectedResourceSlices)

View File

@@ -23,7 +23,7 @@ package resourceslice
import (
v1 "k8s.io/api/core/v1"
v1alpha3 "k8s.io/api/resource/v1alpha3"
v1beta1 "k8s.io/api/resource/v1beta1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -98,7 +98,7 @@ func (in *Slice) DeepCopyInto(out *Slice) {
*out = *in
if in.Devices != nil {
in, out := &in.Devices, &out.Devices
*out = make([]v1alpha3.Device, len(*in))
*out = make([]v1beta1.Device, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}

View File

@@ -24,7 +24,7 @@ import (
"strings"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/util/sets"
draapi "k8s.io/dynamic-resource-allocation/api"
"k8s.io/dynamic-resource-allocation/cel"
@@ -713,7 +713,7 @@ func (alloc *allocator) selectorsMatch(r requestIndices, device *draapi.BasicDev
// If this conversion turns out to be expensive, the CEL package could be converted
// to use unique strings.
var d resourceapi.BasicDevice
if err := draapi.Convert_api_BasicDevice_To_v1alpha3_BasicDevice(device, &d, nil); err != nil {
if err := draapi.Convert_api_BasicDevice_To_v1beta1_BasicDevice(device, &d, nil); err != nil {
return false, fmt.Errorf("convert BasicDevice: %w", err)
}
matches, details, err := expr.DeviceMatches(alloc.ctx, cel.Device{Driver: deviceID.Driver.String(), Attributes: d.Attributes, Capacity: d.Capacity})

View File

@@ -27,7 +27,7 @@ import (
"github.com/onsi/gomega/types"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource"
@@ -185,13 +185,17 @@ func claimWithDeviceConfig(name, request, class, driver, attribute string) *reso
// generate a Device object with the given name, capacity and attributes.
func device(name string, capacity map[resourceapi.QualifiedName]resource.Quantity, attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute) resourceapi.Device {
return resourceapi.Device{
device := resourceapi.Device{
Name: name,
Basic: &resourceapi.BasicDevice{
Attributes: attributes,
Capacity: capacity,
},
}
device.Basic.Capacity = make(map[resourceapi.QualifiedName]resourceapi.DeviceCapacity, len(capacity))
for name, quantity := range capacity {
device.Basic.Capacity[name] = resourceapi.DeviceCapacity{Quantity: quantity}
}
return device
}
// generate a ResourceSlice object with the given name, node,

View File

@@ -21,7 +21,7 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-helpers/scheduling/corev1/nodeaffinity"
draapi "k8s.io/dynamic-resource-allocation/api"
@@ -89,7 +89,7 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
func addSlice(pools map[PoolID]*Pool, s *resourceapi.ResourceSlice) error {
var slice draapi.ResourceSlice
if err := draapi.Convert_v1alpha3_ResourceSlice_To_api_ResourceSlice(s, &slice, nil); err != nil {
if err := draapi.Convert_v1beta1_ResourceSlice_To_api_ResourceSlice(s, &slice, nil); err != nil {
return fmt.Errorf("convert ResourceSlice: %w", err)
}

View File

@@ -25,7 +25,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
schedulingv1 "k8s.io/api/scheduling/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -519,7 +519,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a ResourceClaim")
claim := newTestResourceClaimForQuota("test-claim")
claim, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
claim, err = f.ClientSet.ResourceV1beta1().ResourceClaims(f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures resource claim creation")
@@ -530,7 +530,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting a ResourceClaim")
err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
err = f.ClientSet.ResourceV1beta1().ResourceClaims(f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")

View File

@@ -37,7 +37,7 @@ import (
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -47,7 +47,7 @@ import (
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
"k8s.io/client-go/discovery/cached/memory"
resourceapiinformer "k8s.io/client-go/informers/resource/v1alpha3"
resourceapiinformer "k8s.io/client-go/informers/resource/v1beta1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
@@ -298,10 +298,10 @@ func (d *Driver) SetUp(nodes *Nodes, resources Resources, devicesPerNode ...map[
})
}
_, err := d.f.ClientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
_, err := d.f.ClientSet.ResourceV1beta1().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) {
framework.ExpectNoError(d.f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{}))
framework.ExpectNoError(d.f.ClientSet.ResourceV1beta1().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{}))
})
}
@@ -573,7 +573,7 @@ func (d *Driver) TearDown() {
func (d *Driver) IsGone(ctx context.Context) {
gomega.Eventually(ctx, func(ctx context.Context) ([]resourceapi.ResourceSlice, error) {
slices, err := d.f.ClientSet.ResourceV1alpha3().ResourceSlices().List(ctx, metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + d.Name})
slices, err := d.f.ClientSet.ResourceV1beta1().ResourceSlices().List(ctx, metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + d.Name})
if err != nil {
return nil, err
}

View File

@@ -35,7 +35,7 @@ import (
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -351,7 +351,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
framework.ExpectNoError(e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace), "wait for pod to finish")
ginkgo.By("waiting for claim to be unreserved")
gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) {
return f.ClientSet.ResourceV1alpha3().ResourceClaims(pod.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
return f.ClientSet.ResourceV1beta1().ResourceClaims(pod.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
}).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.ReservedFor", gomega.BeEmpty()), "reservation should have been removed")
})
@@ -364,7 +364,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
framework.ExpectNoError(e2epod.WaitForPodNoLongerRunningInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace), "wait for pod to finish")
ginkgo.By("waiting for claim to be deleted")
gomega.Eventually(ctx, func(ctx context.Context) ([]resourceapi.ResourceClaim, error) {
claims, err := f.ClientSet.ResourceV1alpha3().ResourceClaims(pod.Namespace).List(ctx, metav1.ListOptions{})
claims, err := f.ClientSet.ResourceV1beta1().ResourceClaims(pod.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
@@ -390,7 +390,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
b.create(ctx, claim, pod)
gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) {
return b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
return b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
}).WithTimeout(f.Timeouts.PodDelete).ShouldNot(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil)))
b.testPod(ctx, f.ClientSet, pod)
@@ -400,7 +400,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
ginkgo.By("waiting for claim to get deallocated")
gomega.Eventually(ctx, func(ctx context.Context) (*resourceapi.ResourceClaim, error) {
return b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
return b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Get(ctx, claim.Name, metav1.GetOptions{})
}).WithTimeout(f.Timeouts.PodDelete).Should(gomega.HaveField("Status.Allocation", (*resourceapi.AllocationResult)(nil)))
})
}
@@ -523,7 +523,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
var objects []klog.KMetadata
pod, template := b.podInline()
deviceClassName := template.Spec.Spec.Devices.Requests[0].DeviceClassName
class, err := f.ClientSet.ResourceV1alpha3().DeviceClasses().Get(ctx, deviceClassName, metav1.GetOptions{})
class, err := f.ClientSet.ResourceV1beta1().DeviceClasses().Get(ctx, deviceClassName, metav1.GetOptions{})
framework.ExpectNoError(err)
deviceClassName += "-b"
template.Spec.Spec.Devices.Requests[0].DeviceClassName = deviceClassName
@@ -546,7 +546,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
// First modify the class so that it matches no nodes (for classic DRA) and no devices (structured parameters).
deviceClassName := template.Spec.Spec.Devices.Requests[0].DeviceClassName
class, err := f.ClientSet.ResourceV1alpha3().DeviceClasses().Get(ctx, deviceClassName, metav1.GetOptions{})
class, err := f.ClientSet.ResourceV1beta1().DeviceClasses().Get(ctx, deviceClassName, metav1.GetOptions{})
framework.ExpectNoError(err)
originalClass := class.DeepCopy()
class.Spec.Selectors = []resourceapi.DeviceSelector{{
@@ -554,7 +554,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
Expression: "false",
},
}}
class, err = f.ClientSet.ResourceV1alpha3().DeviceClasses().Update(ctx, class, metav1.UpdateOptions{})
class, err = f.ClientSet.ResourceV1beta1().DeviceClasses().Update(ctx, class, metav1.UpdateOptions{})
framework.ExpectNoError(err)
// Now create the pod.
@@ -565,7 +565,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
// Unblock the pod.
class.Spec.Selectors = originalClass.Spec.Selectors
_, err = f.ClientSet.ResourceV1alpha3().DeviceClasses().Update(ctx, class, metav1.UpdateOptions{})
_, err = f.ClientSet.ResourceV1beta1().DeviceClasses().Update(ctx, class, metav1.UpdateOptions{})
framework.ExpectNoError(err)
b.testPod(ctx, f.ClientSet, pod, expectedEnv...)
@@ -628,7 +628,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
// here we try it in combination with the actual scheduler and can extend it with
// other checks, like event handling (future extension).
gomega.Eventually(ctx, framework.ListObjects(f.ClientSet.ResourceV1alpha3().ResourceSlices().List,
gomega.Eventually(ctx, framework.ListObjects(f.ClientSet.ResourceV1beta1().ResourceSlices().List,
metav1.ListOptions{
FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driver.Name,
},
@@ -783,14 +783,14 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
framework.ExpectNoError(err, "start controller")
ginkgo.DeferCleanup(func(ctx context.Context) {
controller.Stop()
err := f.ClientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
err := f.ClientSet.ResourceV1beta1().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driverName,
})
framework.ExpectNoError(err, "delete resource slices")
})
// Eventually we should have all desired slices.
listSlices := framework.ListObjects(f.ClientSet.ResourceV1alpha3().ResourceSlices().List, metav1.ListOptions{
listSlices := framework.ListObjects(f.ClientSet.ResourceV1beta1().ResourceSlices().List, metav1.ListOptions{
FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driverName,
})
gomega.Eventually(ctx, listSlices).WithTimeout(time.Minute).Should(gomega.HaveField("Items", gomega.HaveLen(numSlices)))
@@ -853,19 +853,19 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
matchVAPError := gomega.MatchError(gomega.ContainSubstring("admin access to devices not enabled" /* in namespace " + b.f.Namespace.Name */))
gomega.Eventually(ctx, func(ctx context.Context) error {
// First delete, in case that it succeeded earlier.
if err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
if err := b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
return err
}
_, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
_, err := b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
return err
}).Should(matchVAPError)
gomega.Eventually(ctx, func(ctx context.Context) error {
// First delete, in case that it succeeded earlier.
if err := b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Delete(ctx, claimTemplate.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
if err := b.f.ClientSet.ResourceV1beta1().ResourceClaimTemplates(b.f.Namespace.Name).Delete(ctx, claimTemplate.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
return err
}
_, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, claimTemplate, metav1.CreateOptions{})
_, err := b.f.ClientSet.ResourceV1beta1().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, claimTemplate, metav1.CreateOptions{})
return err
}).Should(matchVAPError)
@@ -875,11 +875,11 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
metav1.ApplyOptions{FieldManager: b.f.UniqueName})
framework.ExpectNoError(err)
gomega.Eventually(ctx, func(ctx context.Context) error {
_, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
_, err := b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
return err
}).Should(gomega.Succeed())
gomega.Eventually(ctx, func(ctx context.Context) error {
_, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, claimTemplate, metav1.CreateOptions{})
_, err := b.f.ClientSet.ResourceV1beta1().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, claimTemplate, metav1.CreateOptions{})
return err
}).Should(gomega.Succeed())
})
@@ -909,7 +909,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
},
},
}
_, err := f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
_, err := f.ClientSet.ResourceV1beta1().ResourceClaims(f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{})
framework.ExpectNoError(err, "create first claim")
resourceName := "count/resourceclaims.resource.k8s.io"
@@ -936,7 +936,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
// Now creating another claim should fail.
claim2 := claim.DeepCopy()
claim2.Name = "claim-1"
_, err = f.ClientSet.ResourceV1alpha3().ResourceClaims(f.Namespace.Name).Create(ctx, claim2, metav1.CreateOptions{})
_, err = f.ClientSet.ResourceV1beta1().ResourceClaims(f.Namespace.Name).Create(ctx, claim2, metav1.CreateOptions{})
gomega.Expect(err).Should(gomega.MatchError(gomega.ContainSubstring("exceeded quota: object-count, requested: count/resourceclaims.resource.k8s.io=1, used: count/resourceclaims.resource.k8s.io=1, limited: count/resourceclaims.resource.k8s.io=1")), "creating second claim not allowed")
})
@@ -1018,7 +1018,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
},
}
ginkgo.DeferCleanup(func(ctx context.Context) {
err := f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, fictionalNodeSlice.Name, metav1.DeleteOptions{})
err := f.ClientSet.ResourceV1beta1().ResourceSlices().Delete(ctx, fictionalNodeSlice.Name, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}
@@ -1037,19 +1037,19 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
}
mustCreate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) *resourceapi.ResourceSlice {
ginkgo.GinkgoHelper()
slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
slice, err := clientSet.ResourceV1beta1().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("CREATE: %s + %s", clientName, slice.Name))
return slice
}
mustUpdate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) *resourceapi.ResourceSlice {
ginkgo.GinkgoHelper()
slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
slice, err := clientSet.ResourceV1beta1().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("UPDATE: %s + %s", clientName, slice.Name))
return slice
}
mustDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) {
ginkgo.GinkgoHelper()
err := clientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
err := clientSet.ResourceV1beta1().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, fmt.Sprintf("DELETE: %s + %s", clientName, slice.Name))
}
mustCreateAndDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) {
@@ -1059,17 +1059,17 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
}
mustFailToCreate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice, matchError types.GomegaMatcher) {
ginkgo.GinkgoHelper()
_, err := clientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
_, err := clientSet.ResourceV1beta1().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
gomega.Expect(err).To(matchError, fmt.Sprintf("CREATE: %s + %s", clientName, slice.Name))
}
mustFailToUpdate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice, matchError types.GomegaMatcher) {
ginkgo.GinkgoHelper()
_, err := clientSet.ResourceV1alpha3().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
_, err := clientSet.ResourceV1beta1().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{})
gomega.Expect(err).To(matchError, fmt.Sprintf("UPDATE: %s + %s", clientName, slice.Name))
}
mustFailToDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice, matchError types.GomegaMatcher) {
ginkgo.GinkgoHelper()
err := clientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
err := clientSet.ResourceV1beta1().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
gomega.Expect(err).To(matchError, fmt.Sprintf("DELETE: %s + %s", clientName, slice.Name))
}
@@ -1102,7 +1102,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
},
}
ginkgo.DeferCleanup(func(ctx context.Context) {
err := f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, clusterSlice.Name, metav1.DeleteOptions{})
err := f.ClientSet.ResourceV1beta1().ResourceSlices().Delete(ctx, clusterSlice.Name, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}
@@ -1129,7 +1129,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
// Now check for exactly the right set of objects for all nodes.
ginkgo.By("check if ResourceSlice object(s) exist on the API server")
resourceClient := f.ClientSet.ResourceV1alpha3().ResourceSlices()
resourceClient := f.ClientSet.ResourceV1beta1().ResourceSlices()
var expectedObjects []any
for _, nodeName := range nodes.NodeNames {
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
@@ -1243,7 +1243,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
// We need to clean up explicitly because the normal
// cleanup doesn't work (driver shuts down first).
// framework.ExpectNoError(f.ClientSet.ResourceV1alpha3().ResourceClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}))
// framework.ExpectNoError(f.ClientSet.ResourceV1beta1().ResourceClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}))
framework.ExpectNoError(f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{}))
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodDelete))
})
@@ -1433,9 +1433,9 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) []klog.KMe
var createdObj klog.KMetadata
switch obj := obj.(type) {
case *resourceapi.DeviceClass:
createdObj, err = b.f.ClientSet.ResourceV1alpha3().DeviceClasses().Create(ctx, obj, metav1.CreateOptions{})
createdObj, err = b.f.ClientSet.ResourceV1beta1().DeviceClasses().Create(ctx, obj, metav1.CreateOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := b.f.ClientSet.ResourceV1alpha3().DeviceClasses().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
err := b.f.ClientSet.ResourceV1beta1().DeviceClasses().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "delete device class")
})
case *v1.Pod:
@@ -1443,13 +1443,13 @@ func (b *builder) create(ctx context.Context, objs ...klog.KMetadata) []klog.KMe
case *v1.ConfigMap:
createdObj, err = b.f.ClientSet.CoreV1().ConfigMaps(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourceapi.ResourceClaim:
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
createdObj, err = b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourceapi.ResourceClaimTemplate:
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
createdObj, err = b.f.ClientSet.ResourceV1beta1().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, obj, metav1.CreateOptions{})
case *resourceapi.ResourceSlice:
createdObj, err = b.f.ClientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, obj, metav1.CreateOptions{})
createdObj, err = b.f.ClientSet.ResourceV1beta1().ResourceSlices().Create(ctx, obj, metav1.CreateOptions{})
ginkgo.DeferCleanup(func(ctx context.Context) {
err := b.f.ClientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
err := b.f.ClientSet.ResourceV1beta1().ResourceSlices().Delete(ctx, createdObj.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "delete node resource slice")
})
case *appsv1.DaemonSet:
@@ -1552,14 +1552,14 @@ func (b *builder) tearDown(ctx context.Context) {
return b.listTestPods(ctx)
}).WithTimeout(time.Minute).Should(gomega.BeEmpty(), "remaining pods despite deletion")
claims, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
claims, err := b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "get resource claims")
for _, claim := range claims.Items {
if claim.DeletionTimestamp != nil {
continue
}
ginkgo.By(fmt.Sprintf("deleting %T %s", &claim, klog.KObj(&claim)))
err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
err := b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "delete claim")
}
@@ -1572,7 +1572,7 @@ func (b *builder) tearDown(ctx context.Context) {
ginkgo.By("waiting for claims to be deallocated and deleted")
gomega.Eventually(func() ([]resourceapi.ResourceClaim, error) {
claims, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
claims, err := b.f.ClientSet.ResourceV1beta1().ResourceClaims(b.f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}

View File

@@ -31,7 +31,7 @@ import (
"google.golang.org/grpc"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
@@ -282,7 +282,7 @@ func (ex *ExamplePlugin) nodePrepareResource(ctx context.Context, claimReq *drap
// The plugin must retrieve the claim itself to get it in the version
// that it understands.
claim, err := ex.kubeClient.ResourceV1alpha3().ResourceClaims(claimReq.Namespace).Get(ctx, claimReq.Name, metav1.GetOptions{})
claim, err := ex.kubeClient.ResourceV1beta1().ResourceClaims(claimReq.Namespace).Get(ctx, claimReq.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("retrieve claim %s/%s: %w", claimReq.Namespace, claimReq.Name, err)
}

View File

@@ -19,7 +19,7 @@ spec:
matchConstraints:
resourceRules:
- apiGroups: ["resource.k8s.io"]
apiVersions: ["v1alpha3"]
apiVersions: ["v1alpha3", "v1beta1"]
operations: ["CREATE", "UPDATE"]
resources: ["resourceclaims"]
validations:
@@ -49,7 +49,7 @@ spec:
matchConstraints:
resourceRules:
- apiGroups: ["resource.k8s.io"]
apiVersions: ["v1alpha3"]
apiVersions: ["v1alpha3", "v1beta1"]
operations: ["CREATE", "UPDATE"]
resources: ["resourceclaimtemplates"]
validations:

View File

@@ -47,7 +47,7 @@ spec:
matchConstraints:
resourceRules:
- apiGroups: ["resource.k8s.io"]
apiVersions: ["v1alpha3"]
apiVersions: ["v1alpha3", "v1beta1"]
operations: ["CREATE", "UPDATE", "DELETE"]
resources: ["resourceslices"]
matchConditions:

View File

@@ -40,7 +40,7 @@ import (
"github.com/onsi/gomega/types"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -92,7 +92,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
// When plugin and kubelet get killed at the end of the tests, they leave ResourceSlices behind.
// Perhaps garbage collection would eventually remove them (not sure how the node instance
// is managed), but this could take time. Let's clean up explicitly.
framework.ExpectNoError(f.ClientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}))
framework.ExpectNoError(f.ClientSet.ResourceV1beta1().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{}))
})
})
@@ -487,7 +487,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
f.Context("ResourceSlice", f.WithSerial(), func() {
listResources := func(ctx context.Context) ([]resourceapi.ResourceSlice, error) {
slices, err := f.ClientSet.ResourceV1alpha3().ResourceSlices().List(ctx, metav1.ListOptions{})
slices, err := f.ClientSet.ResourceV1beta1().ResourceSlices().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, err
}
@@ -577,7 +577,7 @@ func newKubeletPlugin(ctx context.Context, clientSet kubernetes.Interface, nodeN
ginkgo.DeferCleanup(func(ctx context.Context) {
// kubelet should do this eventually, but better make sure.
// A separate test checks this explicitly.
framework.ExpectNoError(clientSet.ResourceV1alpha3().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driverName}))
framework.ExpectNoError(clientSet.ResourceV1beta1().ResourceSlices().DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + driverName}))
})
ginkgo.DeferCleanup(plugin.Stop)
@@ -595,10 +595,10 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
Name: className,
},
}
_, err := clientSet.ResourceV1alpha3().DeviceClasses().Create(ctx, class, metav1.CreateOptions{})
_, err := clientSet.ResourceV1beta1().DeviceClasses().Create(ctx, class, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(clientSet.ResourceV1alpha3().DeviceClasses().Delete, className, metav1.DeleteOptions{})
ginkgo.DeferCleanup(clientSet.ResourceV1beta1().DeviceClasses().Delete, className, metav1.DeleteOptions{})
// ResourceClaim
podClaimName := "resource-claim"
@@ -615,10 +615,10 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
},
},
}
createdClaim, err := clientSet.ResourceV1alpha3().ResourceClaims(namespace).Create(ctx, claim, metav1.CreateOptions{})
createdClaim, err := clientSet.ResourceV1beta1().ResourceClaims(namespace).Create(ctx, claim, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(clientSet.ResourceV1alpha3().ResourceClaims(namespace).Delete, claimName, metav1.DeleteOptions{})
ginkgo.DeferCleanup(clientSet.ResourceV1beta1().ResourceClaims(namespace).Delete, claimName, metav1.DeleteOptions{})
// The pod checks its own env with grep. Each driver injects its own parameters,
// with the driver name as part of the variable name. Sorting ensures that a
@@ -702,7 +702,7 @@ func createTestObjects(ctx context.Context, clientSet kubernetes.Interface, node
},
},
}
_, err = clientSet.ResourceV1alpha3().ResourceClaims(namespace).UpdateStatus(ctx, createdClaim, metav1.UpdateOptions{})
_, err = clientSet.ResourceV1beta1().ResourceClaims(namespace).UpdateStatus(ctx, createdClaim, metav1.UpdateOptions{})
framework.ExpectNoError(err)
return pod
@@ -724,11 +724,11 @@ func createTestResourceSlice(ctx context.Context, clientSet kubernetes.Interface
}
ginkgo.By(fmt.Sprintf("Creating ResourceSlice %s", nodeName))
slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
slice, err := clientSet.ResourceV1beta1().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{})
framework.ExpectNoError(err, "create ResourceSlice")
ginkgo.DeferCleanup(func(ctx context.Context) {
ginkgo.By(fmt.Sprintf("Deleting ResourceSlice %s", nodeName))
err := clientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
err := clientSet.ResourceV1beta1().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "delete ResourceSlice")
}

View File

@@ -24,7 +24,7 @@ import (
"testing"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
resourcev1alpha3 "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
extclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -163,15 +163,15 @@ func RunAuthzSelectorsLibraryTests(t *testing.T, featureEnabled bool) {
{
name: "ResourceClaim",
createObject: func() error {
obj := &resourcev1alpha3.ResourceClaim{
obj := &resourceapi.ResourceClaim{
ObjectMeta: metav1.ObjectMeta{Name: "test"},
Spec: resourcev1alpha3.ResourceClaimSpec{
Devices: resourcev1alpha3.DeviceClaim{
Requests: []resourcev1alpha3.DeviceRequest{{
Spec: resourceapi.ResourceClaimSpec{
Devices: resourceapi.DeviceClaim{
Requests: []resourceapi.DeviceRequest{{
Name: "req-0",
DeviceClassName: "example-class",
Selectors: []resourcev1alpha3.DeviceSelector{{
CEL: &resourcev1alpha3.CELDeviceSelector{
Selectors: []resourceapi.DeviceSelector{{
CEL: &resourceapi.CELDeviceSelector{
Expression: boolFieldSelectorExpression,
},
}},
@@ -179,7 +179,7 @@ func RunAuthzSelectorsLibraryTests(t *testing.T, featureEnabled bool) {
},
},
}
_, err := c.ResourceV1alpha3().ResourceClaims("default").Create(context.TODO(), obj, metav1.CreateOptions{})
_, err := c.ResourceV1beta1().ResourceClaims("default").Create(context.TODO(), obj, metav1.CreateOptions{})
return err
},
// authorizer is not available to resource APIs

View File

@@ -27,7 +27,7 @@ import (
coordination "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -108,16 +108,16 @@ func TestNodeAuthorizer(t *testing.T) {
if _, err := superuserClient.CoreV1().ConfigMaps("ns").Create(context.TODO(), &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "myconfigmap"}}, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.ResourceV1alpha3().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mynamedresourceclaim"}}, metav1.CreateOptions{}); err != nil {
if _, err := superuserClient.ResourceV1beta1().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mynamedresourceclaim"}}, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.ResourceV1alpha3().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mytemplatizedresourceclaim"}}, metav1.CreateOptions{}); err != nil {
if _, err := superuserClient.ResourceV1beta1().ResourceClaims("ns").Create(context.TODO(), &resourceapi.ResourceClaim{ObjectMeta: metav1.ObjectMeta{Name: "mytemplatizedresourceclaim"}}, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.ResourceV1alpha3().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node1", Driver: "dra.example.com", Pool: resourceapi.ResourcePool{Name: "node1-slice", ResourceSliceCount: 1}}}, metav1.CreateOptions{}); err != nil {
if _, err := superuserClient.ResourceV1beta1().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice1"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node1", Driver: "dra.example.com", Pool: resourceapi.ResourcePool{Name: "node1-slice", ResourceSliceCount: 1}}}, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
if _, err := superuserClient.ResourceV1alpha3().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node2", Driver: "dra.example.com", Pool: resourceapi.ResourcePool{Name: "node2-slice", ResourceSliceCount: 1}}}, metav1.CreateOptions{}); err != nil {
if _, err := superuserClient.ResourceV1beta1().ResourceSlices().Create(context.TODO(), &resourceapi.ResourceSlice{ObjectMeta: metav1.ObjectMeta{Name: "myslice2"}, Spec: resourceapi.ResourceSliceSpec{NodeName: "node2", Driver: "dra.example.com", Pool: resourceapi.ResourcePool{Name: "node2-slice", ResourceSliceCount: 1}}}, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
@@ -192,13 +192,13 @@ func TestNodeAuthorizer(t *testing.T) {
}
getResourceClaim := func(client clientset.Interface) func() error {
return func() error {
_, err := client.ResourceV1alpha3().ResourceClaims("ns").Get(context.TODO(), "mynamedresourceclaim", metav1.GetOptions{})
_, err := client.ResourceV1beta1().ResourceClaims("ns").Get(context.TODO(), "mynamedresourceclaim", metav1.GetOptions{})
return err
}
}
getResourceClaimTemplate := func(client clientset.Interface) func() error {
return func() error {
_, err := client.ResourceV1alpha3().ResourceClaims("ns").Get(context.TODO(), "mytemplatizedresourceclaim", metav1.GetOptions{})
_, err := client.ResourceV1beta1().ResourceClaims("ns").Get(context.TODO(), "mytemplatizedresourceclaim", metav1.GetOptions{})
return err
}
}
@@ -208,7 +208,7 @@ func TestNodeAuthorizer(t *testing.T) {
if nodeName != nil {
listOptions.FieldSelector = resourceapi.ResourceSliceSelectorNodeName + "=" + *nodeName
}
return client.ResourceV1alpha3().ResourceSlices().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
return client.ResourceV1beta1().ResourceSlices().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, listOptions)
}
}
addResourceClaimTemplateReference := func(client clientset.Interface) func() error {
@@ -662,7 +662,7 @@ func TestNodeAuthorizer(t *testing.T) {
expectAllowed(t, deleteResourceSliceCollection(csiNode1Client, ptr.To("node1")))
// One slice must have been deleted, the other not.
slices, err := superuserClient.ResourceV1alpha3().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
slices, err := superuserClient.ResourceV1beta1().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}
@@ -675,7 +675,7 @@ func TestNodeAuthorizer(t *testing.T) {
// Superuser can delete.
expectAllowed(t, deleteResourceSliceCollection(superuserClient, nil))
slices, err = superuserClient.ResourceV1alpha3().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
slices, err = superuserClient.ResourceV1beta1().ResourceSlices().List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatal(err)
}

View File

@@ -26,7 +26,7 @@ import (
"github.com/stretchr/testify/require"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@@ -102,7 +102,7 @@ func (op *createResourceClaimsOp) run(tCtx ktesting.TContext) {
var mutex sync.Mutex
create := func(i int) {
err := func() error {
if _, err := tCtx.Client().ResourceV1alpha3().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
if _, err := tCtx.Client().ResourceV1beta1().ResourceClaims(op.Namespace).Create(tCtx, claimTemplate.DeepCopy(), metav1.CreateOptions{}); err != nil {
return fmt.Errorf("create claim: %v", err)
}
return nil
@@ -191,11 +191,11 @@ func (op *createResourceDriverOp) run(tCtx ktesting.TContext) {
for _, nodeName := range driverNodes {
slice := resourceSlice(op.DriverName, nodeName, op.MaxClaimsPerNode)
_, err := tCtx.Client().ResourceV1alpha3().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
_, err := tCtx.Client().ResourceV1beta1().ResourceSlices().Create(tCtx, slice, metav1.CreateOptions{})
tCtx.ExpectNoError(err, "create node resource slice")
}
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
err := tCtx.Client().ResourceV1alpha3().ResourceSlices().DeleteCollection(tCtx,
err := tCtx.Client().ResourceV1beta1().ResourceSlices().DeleteCollection(tCtx,
metav1.DeleteOptions{},
metav1.ListOptions{FieldSelector: resourceapi.ResourceSliceSelectorDriver + "=" + op.DriverName},
)
@@ -230,8 +230,8 @@ func resourceSlice(driverName, nodeName string, capacity int) *resourceapi.Resou
"driverVersion": {VersionValue: ptr.To("1.2.3")},
"dra.example.com/numa": {IntValue: ptr.To(int64(i))},
},
Capacity: map[resourceapi.QualifiedName]resource.Quantity{
"memory": resource.MustParse("1Gi"),
Capacity: map[resourceapi.QualifiedName]resourceapi.DeviceCapacity{
"memory": {Quantity: resource.MustParse("1Gi")},
},
},
},
@@ -267,7 +267,7 @@ func (op *allocResourceClaimsOp) patchParams(w *workload) (realOp, error) {
func (op *allocResourceClaimsOp) requiredNamespaces() []string { return nil }
func (op *allocResourceClaimsOp) run(tCtx ktesting.TContext) {
claims, err := tCtx.Client().ResourceV1alpha3().ResourceClaims(op.Namespace).List(tCtx, metav1.ListOptions{})
claims, err := tCtx.Client().ResourceV1beta1().ResourceClaims(op.Namespace).List(tCtx, metav1.ListOptions{})
tCtx.ExpectNoError(err, "list claims")
tCtx.Logf("allocating %d ResourceClaims", len(claims.Items))
tCtx = ktesting.WithCancel(tCtx)
@@ -275,7 +275,7 @@ func (op *allocResourceClaimsOp) run(tCtx ktesting.TContext) {
// Track cluster state.
informerFactory := informers.NewSharedInformerFactory(tCtx.Client(), 0)
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims().Informer()
claimInformer := informerFactory.Resource().V1beta1().ResourceClaims().Informer()
nodeLister := informerFactory.Core().V1().Nodes().Lister()
draManager := dynamicresources.NewDRAManager(tCtx, assumecache.NewAssumeCache(tCtx.Logger(), claimInformer, "ResourceClaim", "", nil), informerFactory)
informerFactory.Start(tCtx.Done())
@@ -333,7 +333,7 @@ claims:
if result != nil {
claim = claim.DeepCopy()
claim.Status.Allocation = &result[0]
claim, err := tCtx.Client().ResourceV1alpha3().ResourceClaims(claim.Namespace).UpdateStatus(tCtx, claim, metav1.UpdateOptions{})
claim, err := tCtx.Client().ResourceV1beta1().ResourceClaims(claim.Namespace).UpdateStatus(tCtx, claim, metav1.UpdateOptions{})
tCtx.ExpectNoError(err, "update claim status with allocation")
tCtx.ExpectNoError(draManager.ResourceClaims().AssumeClaimAfterAPICall(claim), "assume claim")
continue claims

View File

@@ -28,7 +28,7 @@ import (
v1 "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1"
resourceapi "k8s.io/api/resource/v1alpha3"
resourceapi "k8s.io/api/resource/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
@@ -130,8 +130,8 @@ func StartScheduler(ctx context.Context, clientSet clientset.Interface, kubeConf
func CreateResourceClaimController(ctx context.Context, tb ktesting.TB, clientSet clientset.Interface, informerFactory informers.SharedInformerFactory) func() {
podInformer := informerFactory.Core().V1().Pods()
claimInformer := informerFactory.Resource().V1alpha3().ResourceClaims()
claimTemplateInformer := informerFactory.Resource().V1alpha3().ResourceClaimTemplates()
claimInformer := informerFactory.Resource().V1beta1().ResourceClaims()
claimTemplateInformer := informerFactory.Resource().V1beta1().ResourceClaimTemplates()
claimController, err := resourceclaim.NewController(klog.FromContext(ctx), true /* admin access */, clientSet, podInformer, claimInformer, claimTemplateInformer)
if err != nil {
tb.Fatalf("Error creating claim controller: %v", err)