Merge pull request #123516 from pohly/dra-structured-parameters

DRA: structured parameters
This commit is contained in:
Kubernetes Prow Robot
2024-03-07 19:24:48 -08:00
committed by GitHub
158 changed files with 30314 additions and 952 deletions

View File

@@ -51,6 +51,13 @@ API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS
API rule violation: names_match,k8s.io/api/networking/v1alpha1,ServiceCIDRSpec,CIDRs
API rule violation: names_match,k8s.io/api/resource/v1alpha2,NamedResourcesAttributeValue,BoolValue
API rule violation: names_match,k8s.io/api/resource/v1alpha2,NamedResourcesAttributeValue,IntSliceValue
API rule violation: names_match,k8s.io/api/resource/v1alpha2,NamedResourcesAttributeValue,IntValue
API rule violation: names_match,k8s.io/api/resource/v1alpha2,NamedResourcesAttributeValue,QuantityValue
API rule violation: names_match,k8s.io/api/resource/v1alpha2,NamedResourcesAttributeValue,StringSliceValue
API rule violation: names_match,k8s.io/api/resource/v1alpha2,NamedResourcesAttributeValue,StringValue
API rule violation: names_match,k8s.io/api/resource/v1alpha2,NamedResourcesAttributeValue,VersionValue
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource

View File

@@ -1926,6 +1926,26 @@
"watch"
]
},
{
"resource": "resourceclaimparameters",
"responseKind": {
"group": "",
"kind": "ResourceClaimParameters",
"version": ""
},
"scope": "Namespaced",
"singularResource": "resourceclaimparameters",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
},
{
"resource": "resourceclaims",
"responseKind": {
@@ -2000,6 +2020,46 @@
"update",
"watch"
]
},
{
"resource": "resourceclassparameters",
"responseKind": {
"group": "",
"kind": "ResourceClassParameters",
"version": ""
},
"scope": "Namespaced",
"singularResource": "resourceclassparameters",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
},
{
"resource": "resourceslices",
"responseKind": {
"group": "",
"kind": "ResourceSlice",
"version": ""
},
"scope": "Cluster",
"singularResource": "resourceslice",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
}
],
"version": "v1alpha2"

View File

@@ -31,6 +31,23 @@
"update"
]
},
{
"kind": "ResourceClaimParameters",
"name": "resourceclaimparameters",
"namespaced": true,
"singularName": "resourceclaimparameters",
"storageVersionHash": "DWM408h+ZHE=",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
},
{
"kind": "ResourceClaim",
"name": "resourceclaims",
@@ -92,6 +109,40 @@
"update",
"watch"
]
},
{
"kind": "ResourceClassParameters",
"name": "resourceclassparameters",
"namespaced": true,
"singularName": "resourceclassparameters",
"storageVersionHash": "MDq5XoTnXWQ=",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
},
{
"kind": "ResourceSlice",
"name": "resourceslices",
"namespaced": false,
"singularName": "resourceslice",
"storageVersionHash": "IECvOcO76kw=",
"verbs": [
"create",
"delete",
"deletecollection",
"get",
"list",
"patch",
"update",
"watch"
]
}
]
}

File diff suppressed because it is too large Load Diff

View File

@@ -815,6 +815,7 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
kubeDeps.Mounter,
kubeDeps.CAdvisorInterface,
cm.NodeConfig{
NodeName: nodeName,
RuntimeCgroupsName: s.RuntimeCgroups,
SystemCgroupsName: s.SystemCgroups,
KubeletCgroupsName: s.KubeletCgroups,

View File

@@ -37,7 +37,7 @@ API_KNOWN_VIOLATIONS_DIR="${API_KNOWN_VIOLATIONS_DIR:-"${KUBE_ROOT}/api/api-rule
OUT_DIR="_output"
BOILERPLATE_FILENAME="hack/boilerplate/boilerplate.generatego.txt"
APPLYCONFIG_PKG="k8s.io/client-go/applyconfigurations"
PLURAL_EXCEPTIONS="Endpoints:Endpoints"
PLURAL_EXCEPTIONS="Endpoints:Endpoints,ResourceClaimParameters:ResourceClaimParameters,ResourceClassParameters:ResourceClassParameters"
# Any time we call sort, we want it in the same locale.
export LC_ALL="C"

View File

@@ -0,0 +1,104 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import "k8s.io/apimachinery/pkg/api/resource"
// NamedResourcesResources is used in NodeResourceModel.
type NamedResourcesResources struct {
// The list of all individual resources instances currently available.
Instances []NamedResourcesInstance
}
// NamedResourcesInstance represents one individual hardware instance that can be selected based
// on its attributes.
type NamedResourcesInstance struct {
// Name is unique identifier among all resource instances managed by
// the driver on the node. It must be a DNS subdomain.
Name string
// Attributes defines the attributes of this resource instance.
// The name of each attribute must be unique.
Attributes []NamedResourcesAttribute
}
// NamedResourcesAttribute is a combination of an attribute name and its value.
type NamedResourcesAttribute struct {
// Name is unique identifier among all resource instances managed by
// the driver on the node. It must be a DNS subdomain.
Name string
NamedResourcesAttributeValue
}
// NamedResourcesAttributeValue must have one and only one field set.
type NamedResourcesAttributeValue struct {
// QuantityValue is a quantity.
QuantityValue *resource.Quantity
// BoolValue is a true/false value.
BoolValue *bool
// IntValue is a 64-bit integer.
IntValue *int64
// IntSliceValue is an array of 64-bit integers.
IntSliceValue *NamedResourcesIntSlice
// StringValue is a string.
StringValue *string
// StringSliceValue is an array of strings.
StringSliceValue *NamedResourcesStringSlice
// VersionValue is a semantic version according to semver.org spec 2.0.0.
VersionValue *string
}
// NamedResourcesIntSlice contains a slice of 64-bit integers.
type NamedResourcesIntSlice struct {
// Ints is the slice of 64-bit integers.
Ints []int64
}
// NamedResourcesStringSlice contains a slice of strings.
type NamedResourcesStringSlice struct {
// Strings is the slice of strings.
Strings []string
}
// NamedResourcesRequest is used in ResourceRequestModel.
type NamedResourcesRequest struct {
// Selector is a CEL expression which must evaluate to true if a
// resource instance is suitable. The language is as defined in
// https://kubernetes.io/docs/reference/using-api/cel/
//
// In addition, for each type NamedResourcesin AttributeValue there is a map that
// resolves to the corresponding value of the instance under evaluation.
// For example:
//
// attributes.quantity["a"].isGreaterThan(quantity("0")) &&
// attributes.stringslice["b"].isSorted()
Selector string
}
// NamedResourcesFilter is used in ResourceFilterModel.
type NamedResourcesFilter struct {
// Selector is a selector like the one in Request. It must be true for
// a resource instance to be suitable for a claim using the class.
Selector string
}
// NamedResourcesAllocationResult is used in AllocationResultModel.
type NamedResourcesAllocationResult struct {
// Name is the name of the selected resource instance.
Name string
}

View File

@@ -60,6 +60,12 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ResourceClaimTemplateList{},
&PodSchedulingContext{},
&PodSchedulingContextList{},
&ResourceSlice{},
&ResourceSliceList{},
&ResourceClaimParameters{},
&ResourceClaimParametersList{},
&ResourceClassParameters{},
&ResourceClassParametersList{},
)
return nil

View File

@@ -0,0 +1,178 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"fmt"
"regexp"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/cel"
"k8s.io/apiserver/pkg/cel/environment"
namedresourcescel "k8s.io/dynamic-resource-allocation/structured/namedresources/cel"
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/resource"
)
var (
validateInstanceName = corevalidation.ValidateDNS1123Subdomain
validateAttributeName = corevalidation.ValidateDNS1123Subdomain
)
type Options struct {
// StoredExpressions must be true if and only if validating CEL
// expressions that were already stored persistently. This makes
// validation more permissive by enabling CEL definitions that are not
// valid yet for new expressions.
StoredExpressions bool
}
func ValidateResources(resources *resource.NamedResourcesResources, fldPath *field.Path) field.ErrorList {
allErrs := validateInstances(resources.Instances, fldPath.Child("instances"))
return allErrs
}
func validateInstances(instances []resource.NamedResourcesInstance, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
instanceNames := sets.New[string]()
for i, instance := range instances {
idxPath := fldPath.Index(i)
instanceName := instance.Name
allErrs = append(allErrs, validateInstanceName(instanceName, idxPath.Child("name"))...)
if instanceNames.Has(instanceName) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), instanceName))
} else {
instanceNames.Insert(instanceName)
}
allErrs = append(allErrs, validateAttributes(instance.Attributes, idxPath.Child("attributes"))...)
}
return allErrs
}
var (
numericIdentifier = `(0|[1-9]\d*)`
preReleaseIdentifier = `(0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)`
buildIdentifier = `[0-9a-zA-Z-]+`
semverRe = regexp.MustCompile(`^` +
// dot-separated version segments (e.g. 1.2.3)
numericIdentifier + `\.` + numericIdentifier + `\.` + numericIdentifier +
// optional dot-separated prerelease segments (e.g. -alpha.PRERELEASE.1)
`(-` + preReleaseIdentifier + `(\.` + preReleaseIdentifier + `)*)?` +
// optional dot-separated build identifier segments (e.g. +build.id.20240305)
`(\+` + buildIdentifier + `(\.` + buildIdentifier + `)*)?` +
`$`)
)
func validateAttributes(attributes []resource.NamedResourcesAttribute, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
attributeNames := sets.New[string]()
for i, attribute := range attributes {
idxPath := fldPath.Index(i)
attributeName := attribute.Name
allErrs = append(allErrs, validateAttributeName(attributeName, idxPath.Child("name"))...)
if attributeNames.Has(attributeName) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), attributeName))
} else {
attributeNames.Insert(attributeName)
}
entries := sets.New[string]()
if attribute.QuantityValue != nil {
entries.Insert("quantity")
}
if attribute.BoolValue != nil {
entries.Insert("bool")
}
if attribute.IntValue != nil {
entries.Insert("int")
}
if attribute.IntSliceValue != nil {
entries.Insert("intSlice")
}
if attribute.StringValue != nil {
entries.Insert("string")
}
if attribute.StringSliceValue != nil {
entries.Insert("stringSlice")
}
if attribute.VersionValue != nil {
entries.Insert("version")
if !semverRe.MatchString(*attribute.VersionValue) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("version"), *attribute.VersionValue, "must be a string compatible with semver.org spec 2.0.0"))
}
}
switch len(entries) {
case 0:
allErrs = append(allErrs, field.Required(idxPath, "exactly one value must be set"))
case 1:
// Okay.
default:
allErrs = append(allErrs, field.Invalid(idxPath, sets.List(entries), "exactly one field must be set, not several"))
}
}
return allErrs
}
func ValidateRequest(opts Options, request *resource.NamedResourcesRequest, fldPath *field.Path) field.ErrorList {
return validateSelector(opts, request.Selector, fldPath.Child("selector"))
}
func ValidateFilter(opts Options, filter *resource.NamedResourcesFilter, fldPath *field.Path) field.ErrorList {
return validateSelector(opts, filter.Selector, fldPath.Child("selector"))
}
func validateSelector(opts Options, selector string, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if selector == "" {
allErrs = append(allErrs, field.Required(fldPath, ""))
} else {
envType := environment.NewExpressions
if opts.StoredExpressions {
envType = environment.StoredExpressions
}
result := namedresourcescel.Compiler.CompileCELExpression(selector, envType)
if result.Error != nil {
allErrs = append(allErrs, convertCELErrorToValidationError(fldPath, selector, result.Error))
}
}
return allErrs
}
func convertCELErrorToValidationError(fldPath *field.Path, expression string, err *cel.Error) *field.Error {
switch err.Type {
case cel.ErrorTypeRequired:
return field.Required(fldPath, err.Detail)
case cel.ErrorTypeInvalid:
return field.Invalid(fldPath, expression, err.Detail)
case cel.ErrorTypeInternal:
return field.InternalError(fldPath, err)
}
return field.InternalError(fldPath, fmt.Errorf("unsupported error type: %w", err))
}
func ValidateAllocationResult(result *resource.NamedResourcesAllocationResult, fldPath *field.Path) field.ErrorList {
return validateInstanceName(result.Name, fldPath.Child("name"))
}

View File

@@ -0,0 +1,188 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/validation/field"
resourceapi "k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/ptr"
)
func testResources(instances []resourceapi.NamedResourcesInstance) *resourceapi.NamedResourcesResources {
resources := &resourceapi.NamedResourcesResources{
Instances: instances,
}
return resources
}
func TestValidateResources(t *testing.T) {
goodName := "foo"
badName := "!@#$%^"
quantity := resource.MustParse("1")
scenarios := map[string]struct {
resources *resourceapi.NamedResourcesResources
wantFailures field.ErrorList
}{
"empty": {
resources: testResources(nil),
},
"good": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName}}),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("instances").Index(0).Child("name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: badName}}),
},
"duplicate-name": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("instances").Index(1).Child("name"), goodName)},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName}, {Name: goodName}}),
},
"quantity": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{QuantityValue: &quantity}}}}}),
},
"bool": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{BoolValue: ptr.To(true)}}}}}),
},
"int": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{IntValue: ptr.To(int64(1))}}}}}),
},
"int-slice": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{IntSliceValue: &resourceapi.NamedResourcesIntSlice{Ints: []int64{1, 2, 3}}}}}}}),
},
"string": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{StringValue: ptr.To("hello")}}}}}),
},
"string-slice": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{StringSliceValue: &resourceapi.NamedResourcesStringSlice{Strings: []string{"hello"}}}}}}}),
},
"version-okay": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0.0")}}}}}),
},
"version-beta": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0.0-beta")}}}}}),
},
"version-beta-1": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0.0-beta.1")}}}}}),
},
"version-build": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0.0+build")}}}}}),
},
"version-build-1": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0.0+build.1")}}}}}),
},
"version-beta-1-build-1": {
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0.0-beta.1+build.1")}}}}}),
},
"version-bad": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("instances").Index(0).Child("attributes").Index(0).Child("version"), "1.0", "must be a string compatible with semver.org spec 2.0.0")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0")}}}}}),
},
"version-bad-leading-zeros": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("instances").Index(0).Child("attributes").Index(0).Child("version"), "01.0.0", "must be a string compatible with semver.org spec 2.0.0")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("01.0.0")}}}}}),
},
"version-bad-leading-zeros-middle": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("instances").Index(0).Child("attributes").Index(0).Child("version"), "1.00.0", "must be a string compatible with semver.org spec 2.0.0")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.00.0")}}}}}),
},
"version-bad-leading-zeros-end": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("instances").Index(0).Child("attributes").Index(0).Child("version"), "1.0.00", "must be a string compatible with semver.org spec 2.0.0")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To("1.0.00")}}}}}),
},
"version-bad-spaces": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("instances").Index(0).Child("attributes").Index(0).Child("version"), " 1.0.0 ", "must be a string compatible with semver.org spec 2.0.0")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{VersionValue: ptr.To(" 1.0.0 ")}}}}}),
},
"empty-attribute": {
wantFailures: field.ErrorList{field.Required(field.NewPath("instances").Index(0).Child("attributes").Index(0), "exactly one value must be set")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName}}}}),
},
"duplicate-value": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("instances").Index(0).Child("attributes").Index(0), []string{"bool", "int"}, "exactly one field must be set, not several")},
resources: testResources([]resourceapi.NamedResourcesInstance{{Name: goodName, Attributes: []resourceapi.NamedResourcesAttribute{{Name: goodName, NamedResourcesAttributeValue: resourceapi.NamedResourcesAttributeValue{BoolValue: ptr.To(true), IntValue: ptr.To(int64(1))}}}}}),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateResources(scenario.resources, nil)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateSelector(t *testing.T) {
scenarios := map[string]struct {
selector string
wantFailures field.ErrorList
}{
"okay": {
selector: "true",
},
"empty": {
selector: "",
wantFailures: field.ErrorList{field.Required(nil, "")},
},
"undefined": {
selector: "nosuchvar",
wantFailures: field.ErrorList{field.Invalid(nil, "nosuchvar", "compilation failed: ERROR: <input>:1:1: undeclared reference to 'nosuchvar' (in container '')\n | nosuchvar\n | ^")},
},
"wrong-type": {
selector: "1",
wantFailures: field.ErrorList{field.Invalid(nil, "1", "must evaluate to bool")},
},
"quantity": {
selector: `attributes.quantity["name"].isGreaterThan(quantity("0"))`,
},
"bool": {
selector: `attributes.bool["name"]`,
},
"int": {
selector: `attributes.int["name"] > 0`,
},
"intslice": {
selector: `attributes.intslice["name"].isSorted()`,
},
"string": {
selector: `attributes.string["name"] == "fish"`,
},
"stringslice": {
selector: `attributes.stringslice["name"].isSorted()`,
},
"version": {
selector: `attributes.version["name"].isGreaterThan(semver("1.0.0"))`,
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
// At the moment, there's no difference between stored and new expressions.
// This uses the stricter validation.
opts := Options{
StoredExpressions: false,
}
errs := validateSelector(opts, scenario.selector, nil)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@@ -18,6 +18,7 @@ package resource
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/apis/core"
)
@@ -185,11 +186,49 @@ type ResourceHandle struct {
// future, but not reduced.
// +optional
Data string
// If StructuredData is set, then it needs to be used instead of Data.
StructuredData *StructuredResourceHandle
}
// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data.
const ResourceHandleDataMaxSize = 16 * 1024
// StructuredResourceHandle is the in-tree representation of the allocation result.
type StructuredResourceHandle struct {
// VendorClassParameters are the per-claim configuration parameters
// from the resource class at the time that the claim was allocated.
VendorClassParameters runtime.Object
// VendorClaimParameters are the per-claim configuration parameters
// from the resource claim parameters at the time that the claim was
// allocated.
VendorClaimParameters runtime.Object
// NodeName is the name of the node providing the necessary resources
// if the resources are local to a node.
NodeName string
// Results lists all allocated driver resources.
Results []DriverAllocationResult
}
// DriverAllocationResult contains vendor parameters and the allocation result for
// one request.
type DriverAllocationResult struct {
// VendorRequestParameters are the per-request configuration parameters
// from the time that the claim was allocated.
VendorRequestParameters runtime.Object
AllocationResultModel
}
// AllocationResultModel must have one and only one field set.
type AllocationResultModel struct {
// NamedResources describes the allocation result when using the named resources model.
NamedResources *NamedResourcesAllocationResult
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClaimList is a collection of claims.
@@ -323,6 +362,10 @@ type ResourceClass struct {
// Setting this field is optional. If null, all nodes are candidates.
// +optional
SuitableNodes *core.NodeSelector
// If and only if allocation of claims using this class is handled
// via structured parameters, then StructuredParameters must be set to true.
StructuredParameters *bool
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -434,3 +477,173 @@ type ResourceClaimTemplateList struct {
// Items is the list of resource claim templates.
Items []ResourceClaimTemplate
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceSlice provides information about available
// resources on individual nodes.
type ResourceSlice struct {
metav1.TypeMeta
// Standard object metadata
metav1.ObjectMeta
// NodeName identifies the node which provides the resources
// if they are local to a node.
//
// A field selector can be used to list only ResourceSlice
// objects with a certain node name.
NodeName string
// DriverName identifies the DRA driver providing the capacity information.
// A field selector can be used to list only ResourceSlice
// objects with a certain driver name.
DriverName string
NodeResourceModel
}
// NodeResourceModel must have one and only one field set.
type NodeResourceModel struct {
// NamedResources describes available resources using the named resources model.
NamedResources *NamedResourcesResources
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceSliceList is a collection of ResourceSlices.
type ResourceSliceList struct {
metav1.TypeMeta
// Standard list metadata
metav1.ListMeta
// Items is the list of node resource capacity objects.
Items []ResourceSlice
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClaimParameters defines resource requests for a ResourceClaim in an
// in-tree format understood by Kubernetes.
type ResourceClaimParameters struct {
metav1.TypeMeta
// Standard object metadata
metav1.ObjectMeta
// If this object was created from some other resource, then this links
// back to that resource. This field is used to find the in-tree representation
// of the claim parameters when the parameter reference of the claim refers
// to some unknown type.
GeneratedFrom *ResourceClaimParametersReference
// Shareable indicates whether the allocated claim is meant to be shareable
// by multiple consumers at the same time.
Shareable bool
// DriverRequests describes all resources that are needed for the
// allocated claim. A single claim may use resources coming from
// different drivers. For each driver, this array has at most one
// entry which then may have one or more per-driver requests.
//
// May be empty, in which case the claim can always be allocated.
DriverRequests []DriverRequests
}
// DriverRequests describes all resources that are needed from one particular driver.
type DriverRequests struct {
// DriverName is the name used by the DRA driver kubelet plugin.
DriverName string
// VendorParameters are arbitrary setup parameters for all requests of the
// claim. They are ignored while allocating the claim.
VendorParameters runtime.Object
// Requests describes all resources that are needed from the driver.
Requests []ResourceRequest
}
// ResourceRequest is a request for resources from one particular driver.
type ResourceRequest struct {
// VendorParameters are arbitrary setup parameters for the requested
// resource. They are ignored while allocating a claim.
VendorParameters runtime.Object
ResourceRequestModel
}
// ResourceRequestModel must have one and only one field set.
type ResourceRequestModel struct {
// NamedResources describes a request for resources with the named resources model.
NamedResources *NamedResourcesRequest
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClaimParametersList is a collection of ResourceClaimParameters.
type ResourceClaimParametersList struct {
metav1.TypeMeta
// Standard list metadata
metav1.ListMeta
// Items is the list of node resource capacity objects.
Items []ResourceClaimParameters
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClassParameters defines resource requests for a ResourceClass in an
// in-tree format understood by Kubernetes.
type ResourceClassParameters struct {
metav1.TypeMeta
// Standard object metadata
metav1.ObjectMeta
// If this object was created from some other resource, then this links
// back to that resource. This field is used to find the in-tree representation
// of the class parameters when the parameter reference of the class refers
// to some unknown type.
GeneratedFrom *ResourceClassParametersReference
// VendorParameters are arbitrary setup parameters for all claims using
// this class. They are ignored while allocating the claim. There must
// not be more than one entry per driver.
VendorParameters []VendorParameters
// Filters describes additional contraints that must be met when using the class.
Filters []ResourceFilter
}
// ResourceFilter is a filter for resources from one particular driver.
type ResourceFilter struct {
// DriverName is the name used by the DRA driver kubelet plugin.
DriverName string
ResourceFilterModel
}
// ResourceFilterModel must have one and only one field set.
type ResourceFilterModel struct {
// NamedResources describes a resource filter using the named resources model.
NamedResources *NamedResourcesFilter
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ResourceClassParametersList is a collection of ResourceClassParameters.
type ResourceClassParametersList struct {
metav1.TypeMeta
// Standard list metadata
metav1.ListMeta
// Items is the list of node resource capacity objects.
Items []ResourceClassParameters
}
// VendorParameters are opaque parameters for one particular driver.
type VendorParameters struct {
// DriverName is the name used by the DRA driver kubelet plugin.
DriverName string
// Parameters can be arbitrary setup parameters. They are ignored while
// allocating a claim.
Parameters runtime.Object
}

View File

@@ -17,9 +17,23 @@ limitations under the License.
package v1alpha2
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime"
)
func addConversionFuncs(scheme *runtime.Scheme) error {
if err := scheme.AddFieldLabelConversionFunc(SchemeGroupVersion.WithKind("ResourceSlice"),
func(label, value string) (string, string, error) {
switch label {
case "metadata.name", "nodeName", "driverName":
return label, value, nil
default:
return "", "", fmt.Errorf("field label not supported for %s: %s", SchemeGroupVersion.WithKind("ResourceSlice"), label)
}
}); err != nil {
return err
}
return nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -17,12 +17,14 @@ limitations under the License.
package validation
import (
apiequality "k8s.io/apimachinery/pkg/api/equality"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
corevalidation "k8s.io/kubernetes/pkg/apis/core/validation"
"k8s.io/kubernetes/pkg/apis/resource"
namedresourcesvalidation "k8s.io/kubernetes/pkg/apis/resource/structured/namedresources/validation"
)
// validateResourceDriverName reuses the validation of a CSI driver because
@@ -41,7 +43,7 @@ func validateResourceClaimSpec(spec *resource.ResourceClaimSpec, fldPath *field.
for _, msg := range corevalidation.ValidateClassName(spec.ResourceClassName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("resourceClassName"), spec.ResourceClassName, msg))
}
allErrs = append(allErrs, validateResourceClaimParameters(spec.ParametersRef, fldPath.Child("parametersRef"))...)
allErrs = append(allErrs, validateResourceClaimParametersRef(spec.ParametersRef, fldPath.Child("parametersRef"))...)
if !supportedAllocationModes.Has(string(spec.AllocationMode)) {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("allocationMode"), spec.AllocationMode, supportedAllocationModes.List()))
}
@@ -54,7 +56,7 @@ var supportedAllocationModes = sets.NewString(string(resource.AllocationModeImme
// function for Kind and Name in both types, but generics cannot be used to
// access common fields in structs.
func validateResourceClaimParameters(ref *resource.ResourceClaimParametersReference, fldPath *field.Path) field.ErrorList {
func validateResourceClaimParametersRef(ref *resource.ResourceClaimParametersReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if ref != nil {
if ref.Kind == "" {
@@ -200,6 +202,12 @@ func validateResourceHandles(resourceHandles []resource.ResourceHandle, maxSize
if len(resourceHandle.Data) > resource.ResourceHandleDataMaxSize {
allErrs = append(allErrs, field.TooLongMaxLength(idxPath.Child("data"), len(resourceHandle.Data), resource.ResourceHandleDataMaxSize))
}
if resourceHandle.StructuredData != nil {
allErrs = append(allErrs, validateStructuredResourceHandle(resourceHandle.StructuredData, idxPath.Child("structuredData"))...)
}
if len(resourceHandle.Data) > 0 && resourceHandle.StructuredData != nil {
allErrs = append(allErrs, field.Invalid(idxPath, nil, "data and structuredData are mutually exclusive"))
}
}
if len(resourceHandles) > maxSize {
// Dumping the entire field into the error message is likely to be too long,
@@ -210,6 +218,42 @@ func validateResourceHandles(resourceHandles []resource.ResourceHandle, maxSize
return allErrs
}
func validateStructuredResourceHandle(handle *resource.StructuredResourceHandle, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if handle.NodeName != "" {
allErrs = append(allErrs, validateNodeName(handle.NodeName, fldPath.Child("nodeName"))...)
}
allErrs = append(allErrs, validateDriverAllocationResults(handle.Results, fldPath.Child("results"))...)
return allErrs
}
func validateDriverAllocationResults(results []resource.DriverAllocationResult, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
for index, result := range results {
idxPath := fldPath.Index(index)
allErrs = append(allErrs, validateAllocationResultModel(&result.AllocationResultModel, idxPath)...)
}
return allErrs
}
func validateAllocationResultModel(model *resource.AllocationResultModel, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
entries := sets.New[string]()
if model.NamedResources != nil {
entries.Insert("namedResources")
allErrs = append(allErrs, namedresourcesvalidation.ValidateAllocationResult(model.NamedResources, fldPath.Child("namedResources"))...)
}
switch len(entries) {
case 0:
allErrs = append(allErrs, field.Required(fldPath, "exactly one structured model field must be set"))
case 1:
// Okay.
default:
allErrs = append(allErrs, field.Invalid(fldPath, sets.List(entries), "exactly one field must be set, not several"))
}
return allErrs
}
func validateResourceClaimUserReference(ref resource.ResourceClaimConsumerReference, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if ref.Resource == "" {
@@ -345,3 +389,181 @@ func validateNodeName(name string, fldPath *field.Path) field.ErrorList {
}
return allErrs
}
// ValidateResourceSlice tests if a ResourceSlice object is valid.
func ValidateResourceSlice(resourceSlice *resource.ResourceSlice) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&resourceSlice.ObjectMeta, false, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
if resourceSlice.NodeName != "" {
allErrs = append(allErrs, validateNodeName(resourceSlice.NodeName, field.NewPath("nodeName"))...)
}
allErrs = append(allErrs, validateResourceDriverName(resourceSlice.DriverName, field.NewPath("driverName"))...)
allErrs = append(allErrs, validateNodeResourceModel(&resourceSlice.NodeResourceModel, nil)...)
return allErrs
}
func validateNodeResourceModel(model *resource.NodeResourceModel, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
entries := sets.New[string]()
if model.NamedResources != nil {
entries.Insert("namedResources")
allErrs = append(allErrs, namedresourcesvalidation.ValidateResources(model.NamedResources, fldPath.Child("namedResources"))...)
}
switch len(entries) {
case 0:
allErrs = append(allErrs, field.Required(fldPath, "exactly one structured model field must be set"))
case 1:
// Okay.
default:
allErrs = append(allErrs, field.Invalid(fldPath, sets.List(entries), "exactly one field must be set, not several"))
}
return allErrs
}
// ValidateResourceSlice tests if a ResourceSlice update is valid.
func ValidateResourceSliceUpdate(resourceSlice, oldResourceSlice *resource.ResourceSlice) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceSlice.ObjectMeta, &oldResourceSlice.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateResourceSlice(resourceSlice)...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(resourceSlice.NodeName, oldResourceSlice.NodeName, field.NewPath("nodeName"))...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(resourceSlice.DriverName, oldResourceSlice.DriverName, field.NewPath("driverName"))...)
return allErrs
}
// ValidateResourceClaimParameters tests if a ResourceClaimParameters object is valid for creation.
func ValidateResourceClaimParameters(parameters *resource.ResourceClaimParameters) field.ErrorList {
return validateResourceClaimParameters(parameters, false)
}
func validateResourceClaimParameters(parameters *resource.ResourceClaimParameters, requestStored bool) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&parameters.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
allErrs = append(allErrs, validateResourceClaimParametersRef(parameters.GeneratedFrom, field.NewPath("generatedFrom"))...)
allErrs = append(allErrs, validateDriverRequests(parameters.DriverRequests, field.NewPath("driverRequests"), requestStored)...)
return allErrs
}
func validateDriverRequests(requests []resource.DriverRequests, fldPath *field.Path, requestStored bool) field.ErrorList {
var allErrs field.ErrorList
driverNames := sets.New[string]()
for i, request := range requests {
idxPath := fldPath.Index(i)
driverName := request.DriverName
allErrs = append(allErrs, validateResourceDriverName(driverName, idxPath.Child("driverName"))...)
if driverNames.Has(driverName) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("driverName"), driverName))
} else {
driverNames.Insert(driverName)
}
allErrs = append(allErrs, validateResourceRequests(request.Requests, idxPath.Child("requests"), requestStored)...)
}
return allErrs
}
func validateResourceRequests(requests []resource.ResourceRequest, fldPath *field.Path, requestStored bool) field.ErrorList {
var allErrs field.ErrorList
for i, request := range requests {
idxPath := fldPath.Index(i)
allErrs = append(allErrs, validateResourceRequestModel(&request.ResourceRequestModel, idxPath, requestStored)...)
}
if len(requests) == 0 {
// We could allow this, it just doesn't make sense: the entire entry would get ignored and thus
// should have been left out entirely.
allErrs = append(allErrs, field.Required(fldPath, "empty entries with no requests are not allowed"))
}
return allErrs
}
func validateResourceRequestModel(model *resource.ResourceRequestModel, fldPath *field.Path, requestStored bool) field.ErrorList {
var allErrs field.ErrorList
entries := sets.New[string]()
if model.NamedResources != nil {
entries.Insert("namedResources")
allErrs = append(allErrs, namedresourcesvalidation.ValidateRequest(namedresourcesvalidation.Options{StoredExpressions: requestStored}, model.NamedResources, fldPath.Child("namedResources"))...)
}
switch len(entries) {
case 0:
allErrs = append(allErrs, field.Required(fldPath, "exactly one structured model field must be set"))
case 1:
// Okay.
default:
allErrs = append(allErrs, field.Invalid(fldPath, sets.List(entries), "exactly one field must be set, not several"))
}
return allErrs
}
// ValidateResourceClaimParameters tests if a ResourceClaimParameters update is valid.
func ValidateResourceClaimParametersUpdate(resourceClaimParameters, oldResourceClaimParameters *resource.ResourceClaimParameters) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClaimParameters.ObjectMeta, &oldResourceClaimParameters.ObjectMeta, field.NewPath("metadata"))
requestStored := apiequality.Semantic.DeepEqual(oldResourceClaimParameters.DriverRequests, resourceClaimParameters.DriverRequests)
allErrs = append(allErrs, validateResourceClaimParameters(resourceClaimParameters, requestStored)...)
return allErrs
}
// ValidateResourceClassParameters tests if a ResourceClassParameters object is valid for creation.
func ValidateResourceClassParameters(parameters *resource.ResourceClassParameters) field.ErrorList {
return validateResourceClassParameters(parameters, false)
}
func validateResourceClassParameters(parameters *resource.ResourceClassParameters, filtersStored bool) field.ErrorList {
allErrs := corevalidation.ValidateObjectMeta(&parameters.ObjectMeta, true, apimachineryvalidation.NameIsDNSSubdomain, field.NewPath("metadata"))
allErrs = append(allErrs, validateClassParameters(parameters.GeneratedFrom, field.NewPath("generatedFrom"))...)
allErrs = append(allErrs, validateResourceFilters(parameters.Filters, field.NewPath("filters"), filtersStored)...)
allErrs = append(allErrs, validateVendorParameters(parameters.VendorParameters, field.NewPath("vendorParameters"))...)
return allErrs
}
func validateResourceFilters(filters []resource.ResourceFilter, fldPath *field.Path, filtersStored bool) field.ErrorList {
var allErrs field.ErrorList
driverNames := sets.New[string]()
for i, filter := range filters {
idxPath := fldPath.Index(i)
driverName := filter.DriverName
allErrs = append(allErrs, validateResourceDriverName(driverName, idxPath.Child("driverName"))...)
if driverNames.Has(driverName) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("driverName"), driverName))
} else {
driverNames.Insert(driverName)
}
allErrs = append(allErrs, validateResourceFilterModel(&filter.ResourceFilterModel, idxPath, filtersStored)...)
}
return allErrs
}
func validateResourceFilterModel(model *resource.ResourceFilterModel, fldPath *field.Path, filtersStored bool) field.ErrorList {
var allErrs field.ErrorList
entries := sets.New[string]()
if model.NamedResources != nil {
entries.Insert("namedResources")
allErrs = append(allErrs, namedresourcesvalidation.ValidateFilter(namedresourcesvalidation.Options{StoredExpressions: filtersStored}, model.NamedResources, fldPath.Child("namedResources"))...)
}
switch len(entries) {
case 0:
allErrs = append(allErrs, field.Required(fldPath, "exactly one structured model field must be set"))
case 1:
// Okay.
default:
allErrs = append(allErrs, field.Invalid(fldPath, sets.List(entries), "exactly one field must be set, not several"))
}
return allErrs
}
func validateVendorParameters(parameters []resource.VendorParameters, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
driverNames := sets.New[string]()
for i, parameters := range parameters {
idxPath := fldPath.Index(i)
driverName := parameters.DriverName
allErrs = append(allErrs, validateResourceDriverName(driverName, idxPath.Child("driverName"))...)
if driverNames.Has(driverName) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("driverName"), driverName))
} else {
driverNames.Insert(driverName)
}
}
return allErrs
}
// ValidateResourceClassParameters tests if a ResourceClassParameters update is valid.
func ValidateResourceClassParametersUpdate(resourceClassParameters, oldResourceClassParameters *resource.ResourceClassParameters) field.ErrorList {
allErrs := corevalidation.ValidateObjectMetaUpdate(&resourceClassParameters.ObjectMeta, &oldResourceClassParameters.ObjectMeta, field.NewPath("metadata"))
allErrs = append(allErrs, ValidateResourceClassParameters(resourceClassParameters)...)
return allErrs
}

View File

@@ -380,6 +380,90 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
return claim
},
},
"valid-add-empty-allocation-structured": {
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandles: []resource.ResourceHandle{
{
DriverName: "valid",
StructuredData: &resource.StructuredResourceHandle{},
},
},
}
return claim
},
},
"valid-add-allocation-structured": {
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandles: []resource.ResourceHandle{
{
DriverName: "valid",
StructuredData: &resource.StructuredResourceHandle{
NodeName: "worker",
},
},
},
}
return claim
},
},
"invalid-add-allocation-structured": {
wantFailures: field.ErrorList{
field.Invalid(field.NewPath("status", "allocation", "resourceHandles").Index(0).Child("structuredData", "nodeName"), "&^!", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
field.Required(field.NewPath("status", "allocation", "resourceHandles").Index(0).Child("structuredData", "results").Index(1), "exactly one structured model field must be set"),
},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandles: []resource.ResourceHandle{
{
DriverName: "valid",
StructuredData: &resource.StructuredResourceHandle{
NodeName: "&^!",
Results: []resource.DriverAllocationResult{
{
AllocationResultModel: resource.AllocationResultModel{
NamedResources: &resource.NamedResourcesAllocationResult{
Name: "some-resource-instance",
},
},
},
{
AllocationResultModel: resource.AllocationResultModel{}, // invalid
},
},
},
},
},
}
return claim
},
},
"invalid-duplicated-data": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("status", "allocation", "resourceHandles").Index(0), nil, "data and structuredData are mutually exclusive")},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"
claim.Status.Allocation = &resource.AllocationResult{
ResourceHandles: []resource.ResourceHandle{
{
DriverName: "valid",
Data: "something",
StructuredData: &resource.StructuredResourceHandle{
NodeName: "worker",
},
},
},
}
return claim
},
},
"invalid-allocation-resourceHandles": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "allocation", "resourceHandles"), resource.AllocationResultResourceHandlesMaxSize+1, resource.AllocationResultResourceHandlesMaxSize)},
oldClaim: validClaim,
@@ -413,7 +497,7 @@ func TestValidateClaimStatusUpdate(t *testing.T) {
},
},
"invalid-allocation-resource-handle-data": {
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "allocation", "resourceHandles[0]", "data"), resource.ResourceHandleDataMaxSize+1, resource.ResourceHandleDataMaxSize)},
wantFailures: field.ErrorList{field.TooLongMaxLength(field.NewPath("status", "allocation", "resourceHandles").Index(0).Child("data"), resource.ResourceHandleDataMaxSize+1, resource.ResourceHandleDataMaxSize)},
oldClaim: validClaim,
update: func(claim *resource.ResourceClaim) *resource.ResourceClaim {
claim.Status.DriverName = "valid"

View File

@@ -0,0 +1,306 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/ptr"
)
func testResourceClaimParameters(name, namespace string, requests []resource.DriverRequests) *resource.ResourceClaimParameters {
return &resource.ResourceClaimParameters{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
DriverRequests: requests,
}
}
var goodRequests []resource.DriverRequests
func TestValidateResourceClaimParameters(t *testing.T) {
goodName := "foo"
badName := "!@#$%^"
badValue := "spaces not allowed"
now := metav1.Now()
scenarios := map[string]struct {
parameters *resource.ResourceClaimParameters
wantFailures field.ErrorList
}{
"good": {
parameters: testResourceClaimParameters(goodName, goodName, goodRequests),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
parameters: testResourceClaimParameters("", goodName, goodRequests),
},
"missing-namespace": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
parameters: testResourceClaimParameters(goodName, "", goodRequests),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
parameters: testResourceClaimParameters(badName, goodName, goodRequests),
},
"bad-namespace": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "namespace"), badName, "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')")},
parameters: testResourceClaimParameters(goodName, badName, goodRequests),
},
"generate-name": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.GenerateName = "prefix-"
return parameters
}(),
},
"uid": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return parameters
}(),
},
"resource-version": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.ResourceVersion = "1"
return parameters
}(),
},
"generation": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.Generation = 100
return parameters
}(),
},
"creation-timestamp": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.CreationTimestamp = now
return parameters
}(),
},
"deletion-grace-period-seconds": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.DeletionGracePeriodSeconds = ptr.To[int64](10)
return parameters
}(),
},
"owner-references": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return parameters
}(),
},
"finalizers": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.Finalizers = []string{
"example.com/foo",
}
return parameters
}(),
},
"managed-fields": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return parameters
}(),
},
"good-labels": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return parameters
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.Labels = map[string]string{
"hello-world": badValue,
}
return parameters
}(),
},
"good-annotations": {
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.Annotations = map[string]string{
"foo": "bar",
}
return parameters
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.Annotations = map[string]string{
badName: "hello world",
}
return parameters
}(),
},
"empty-model": {
wantFailures: field.ErrorList{field.Required(field.NewPath("driverRequests").Index(0).Child("requests").Index(0), "exactly one structured model field must be set")},
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.DriverRequests = []resource.DriverRequests{{DriverName: goodName, Requests: []resource.ResourceRequest{{}}}}
return parameters
}(),
},
"empty-requests": {
wantFailures: field.ErrorList{field.Required(field.NewPath("driverRequests").Index(0).Child("requests"), "empty entries with no requests are not allowed")},
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.DriverRequests = []resource.DriverRequests{{DriverName: goodName}}
return parameters
}(),
},
"invalid-driver": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverRequests").Index(1).Child("driverName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.DriverRequests = []resource.DriverRequests{
{
DriverName: goodName,
Requests: []resource.ResourceRequest{
{
ResourceRequestModel: resource.ResourceRequestModel{
NamedResources: &resource.NamedResourcesRequest{Selector: "true"},
},
},
},
},
{
DriverName: badName,
Requests: []resource.ResourceRequest{
{
ResourceRequestModel: resource.ResourceRequestModel{
NamedResources: &resource.NamedResourcesRequest{Selector: "true"},
},
},
},
},
}
return parameters
}(),
},
"duplicate-driver": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("driverRequests").Index(1).Child("driverName"), goodName)},
parameters: func() *resource.ResourceClaimParameters {
parameters := testResourceClaimParameters(goodName, goodName, goodRequests)
parameters.DriverRequests = []resource.DriverRequests{
{
DriverName: goodName,
Requests: []resource.ResourceRequest{
{
ResourceRequestModel: resource.ResourceRequestModel{
NamedResources: &resource.NamedResourcesRequest{Selector: "true"},
},
},
},
},
{
DriverName: goodName,
Requests: []resource.ResourceRequest{
{
ResourceRequestModel: resource.ResourceRequestModel{
NamedResources: &resource.NamedResourcesRequest{Selector: "true"},
},
},
},
},
}
return parameters
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateResourceClaimParameters(scenario.parameters)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateResourceClaimParametersUpdate(t *testing.T) {
name := "valid"
validResourceClaimParameters := testResourceClaimParameters(name, name, nil)
scenarios := map[string]struct {
oldResourceClaimParameters *resource.ResourceClaimParameters
update func(claim *resource.ResourceClaimParameters) *resource.ResourceClaimParameters
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldResourceClaimParameters: validResourceClaimParameters,
update: func(claim *resource.ResourceClaimParameters) *resource.ResourceClaimParameters { return claim },
},
"invalid-name-update": {
oldResourceClaimParameters: validResourceClaimParameters,
update: func(claim *resource.ResourceClaimParameters) *resource.ResourceClaimParameters {
claim.Name += "-update"
return claim
},
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), name+"-update", "field is immutable")},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldResourceClaimParameters.ResourceVersion = "1"
errs := ValidateResourceClaimParametersUpdate(scenario.update(scenario.oldResourceClaimParameters.DeepCopy()), scenario.oldResourceClaimParameters)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@@ -0,0 +1,313 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/ptr"
)
func testResourceClassParameters(name, namespace string, filters []resource.ResourceFilter) *resource.ResourceClassParameters {
return &resource.ResourceClassParameters{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Filters: filters,
}
}
var goodFilters []resource.ResourceFilter
func TestValidateResourceClassParameters(t *testing.T) {
goodName := "foo"
badName := "!@#$%^"
badValue := "spaces not allowed"
now := metav1.Now()
scenarios := map[string]struct {
parameters *resource.ResourceClassParameters
wantFailures field.ErrorList
}{
"good": {
parameters: testResourceClassParameters(goodName, goodName, goodFilters),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
parameters: testResourceClassParameters("", goodName, goodFilters),
},
"missing-namespace": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "namespace"), "")},
parameters: testResourceClassParameters(goodName, "", goodFilters),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
parameters: testResourceClassParameters(badName, goodName, goodFilters),
},
"bad-namespace": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "namespace"), badName, "a lowercase RFC 1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')")},
parameters: testResourceClassParameters(goodName, badName, goodFilters),
},
"generate-name": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.GenerateName = "prefix-"
return parameters
}(),
},
"uid": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return parameters
}(),
},
"resource-version": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.ResourceVersion = "1"
return parameters
}(),
},
"generation": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Generation = 100
return parameters
}(),
},
"creation-timestamp": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.CreationTimestamp = now
return parameters
}(),
},
"deletion-grace-period-seconds": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.DeletionGracePeriodSeconds = ptr.To[int64](10)
return parameters
}(),
},
"owner-references": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return parameters
}(),
},
"finalizers": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Finalizers = []string{
"example.com/foo",
}
return parameters
}(),
},
"managed-fields": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return parameters
}(),
},
"good-labels": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return parameters
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Labels = map[string]string{
"hello-world": badValue,
}
return parameters
}(),
},
"good-annotations": {
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Annotations = map[string]string{
"foo": "bar",
}
return parameters
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Annotations = map[string]string{
badName: "hello world",
}
return parameters
}(),
},
"empty-model": {
wantFailures: field.ErrorList{field.Required(field.NewPath("filters").Index(0), "exactly one structured model field must be set")},
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Filters = []resource.ResourceFilter{{DriverName: goodName}}
return parameters
}(),
},
"filters-invalid-driver": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("filters").Index(1).Child("driverName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Filters = []resource.ResourceFilter{
{
DriverName: goodName,
ResourceFilterModel: resource.ResourceFilterModel{
NamedResources: &resource.NamedResourcesFilter{Selector: "true"},
},
},
{
DriverName: badName,
ResourceFilterModel: resource.ResourceFilterModel{
NamedResources: &resource.NamedResourcesFilter{Selector: "true"},
},
},
}
return parameters
}(),
},
"filters-duplicate-driver": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("filters").Index(1).Child("driverName"), goodName)},
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.Filters = []resource.ResourceFilter{
{
DriverName: goodName,
ResourceFilterModel: resource.ResourceFilterModel{
NamedResources: &resource.NamedResourcesFilter{Selector: "true"},
},
},
{
DriverName: goodName,
ResourceFilterModel: resource.ResourceFilterModel{
NamedResources: &resource.NamedResourcesFilter{Selector: "true"},
},
},
}
return parameters
}(),
},
"parameters-invalid-driver": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("vendorParameters").Index(1).Child("driverName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.VendorParameters = []resource.VendorParameters{
{
DriverName: goodName,
},
{
DriverName: badName,
},
}
return parameters
}(),
},
"parameters-duplicate-driver": {
wantFailures: field.ErrorList{field.Duplicate(field.NewPath("vendorParameters").Index(1).Child("driverName"), goodName)},
parameters: func() *resource.ResourceClassParameters {
parameters := testResourceClassParameters(goodName, goodName, goodFilters)
parameters.VendorParameters = []resource.VendorParameters{
{
DriverName: goodName,
},
{
DriverName: goodName,
},
}
return parameters
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateResourceClassParameters(scenario.parameters)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateResourceClassParametersUpdate(t *testing.T) {
name := "valid"
validResourceClassParameters := testResourceClassParameters(name, name, nil)
scenarios := map[string]struct {
oldResourceClassParameters *resource.ResourceClassParameters
update func(class *resource.ResourceClassParameters) *resource.ResourceClassParameters
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldResourceClassParameters: validResourceClassParameters,
update: func(class *resource.ResourceClassParameters) *resource.ResourceClassParameters { return class },
},
"invalid-name-update": {
oldResourceClassParameters: validResourceClassParameters,
update: func(class *resource.ResourceClassParameters) *resource.ResourceClassParameters {
class.Name += "-update"
return class
},
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), name+"-update", "field is immutable")},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldResourceClassParameters.ResourceVersion = "1"
errs := ValidateResourceClassParametersUpdate(scenario.update(scenario.oldResourceClassParameters.DeepCopy()), scenario.oldResourceClassParameters)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@@ -0,0 +1,255 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/utils/ptr"
)
func testResourceSlice(name, nodeName, driverName string) *resource.ResourceSlice {
return &resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
NodeName: nodeName,
DriverName: driverName,
NodeResourceModel: resource.NodeResourceModel{
NamedResources: &resource.NamedResourcesResources{},
},
}
}
func TestValidateResourceSlice(t *testing.T) {
goodName := "foo"
badName := "!@#$%^"
driverName := "test.example.com"
now := metav1.Now()
badValue := "spaces not allowed"
scenarios := map[string]struct {
slice *resource.ResourceSlice
wantFailures field.ErrorList
}{
"good": {
slice: testResourceSlice(goodName, goodName, driverName),
},
"missing-name": {
wantFailures: field.ErrorList{field.Required(field.NewPath("metadata", "name"), "name or generateName is required")},
slice: testResourceSlice("", goodName, driverName),
},
"bad-name": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
slice: testResourceSlice(badName, goodName, driverName),
},
"generate-name": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.GenerateName = "prefix-"
return slice
}(),
},
"uid": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
return slice
}(),
},
"resource-version": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.ResourceVersion = "1"
return slice
}(),
},
"generation": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Generation = 100
return slice
}(),
},
"creation-timestamp": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.CreationTimestamp = now
return slice
}(),
},
"deletion-grace-period-seconds": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.DeletionGracePeriodSeconds = ptr.To[int64](10)
return slice
}(),
},
"owner-references": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: "v1",
Kind: "pod",
Name: "foo",
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
},
}
return slice
}(),
},
"finalizers": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Finalizers = []string{
"example.com/foo",
}
return slice
}(),
},
"managed-fields": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.ManagedFields = []metav1.ManagedFieldsEntry{
{
FieldsType: "FieldsV1",
Operation: "Apply",
APIVersion: "apps/v1",
Manager: "foo",
},
}
return slice
}(),
},
"good-labels": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Labels = map[string]string{
"apps.kubernetes.io/name": "test",
}
return slice
}(),
},
"bad-labels": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "labels"), badValue, "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyValue', or 'my_value', or '12345', regex used for validation is '(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])?')")},
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Labels = map[string]string{
"hello-world": badValue,
}
return slice
}(),
},
"good-annotations": {
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Annotations = map[string]string{
"foo": "bar",
}
return slice
}(),
},
"bad-annotations": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "annotations"), badName, "name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')")},
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.Annotations = map[string]string{
badName: "hello world",
}
return slice
}(),
},
"bad-nodename": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("nodeName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
slice: testResourceSlice(goodName, badName, driverName),
},
"bad-drivername": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), badName, "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')")},
slice: testResourceSlice(goodName, goodName, badName),
},
"empty-model": {
wantFailures: field.ErrorList{field.Required(nil, "exactly one structured model field must be set")},
slice: func() *resource.ResourceSlice {
slice := testResourceSlice(goodName, goodName, driverName)
slice.NodeResourceModel = resource.NodeResourceModel{}
return slice
}(),
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
errs := ValidateResourceSlice(scenario.slice)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}
func TestValidateResourceSliceUpdate(t *testing.T) {
name := "valid"
validResourceSlice := testResourceSlice(name, name, name)
scenarios := map[string]struct {
oldResourceSlice *resource.ResourceSlice
update func(slice *resource.ResourceSlice) *resource.ResourceSlice
wantFailures field.ErrorList
}{
"valid-no-op-update": {
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice { return slice },
},
"invalid-name-update": {
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice {
slice.Name += "-update"
return slice
},
wantFailures: field.ErrorList{field.Invalid(field.NewPath("metadata", "name"), name+"-update", "field is immutable")},
},
"invalid-update-nodename": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("nodeName"), name+"-updated", "field is immutable")},
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice {
slice.NodeName += "-updated"
return slice
},
},
"invalid-update-drivername": {
wantFailures: field.ErrorList{field.Invalid(field.NewPath("driverName"), name+"-updated", "field is immutable")},
oldResourceSlice: validResourceSlice,
update: func(slice *resource.ResourceSlice) *resource.ResourceSlice {
slice.DriverName += "-updated"
return slice
},
},
}
for name, scenario := range scenarios {
t.Run(name, func(t *testing.T) {
scenario.oldResourceSlice.ResourceVersion = "1"
errs := ValidateResourceSliceUpdate(scenario.update(scenario.oldResourceSlice.DeepCopy()), scenario.oldResourceSlice)
assert.Equal(t, scenario.wantFailures, errs)
})
}
}

View File

@@ -32,7 +32,9 @@ func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
if in.ResourceHandles != nil {
in, out := &in.ResourceHandles, &out.ResourceHandles
*out = make([]ResourceHandle, len(*in))
copy(*out, *in)
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AvailableOnNodes != nil {
in, out := &in.AvailableOnNodes, &out.AvailableOnNodes
@@ -52,6 +54,298 @@ func (in *AllocationResult) DeepCopy() *AllocationResult {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocationResultModel) DeepCopyInto(out *AllocationResultModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesAllocationResult)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResultModel.
func (in *AllocationResultModel) DeepCopy() *AllocationResultModel {
if in == nil {
return nil
}
out := new(AllocationResultModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DriverAllocationResult) DeepCopyInto(out *DriverAllocationResult) {
*out = *in
if in.VendorRequestParameters != nil {
out.VendorRequestParameters = in.VendorRequestParameters.DeepCopyObject()
}
in.AllocationResultModel.DeepCopyInto(&out.AllocationResultModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverAllocationResult.
func (in *DriverAllocationResult) DeepCopy() *DriverAllocationResult {
if in == nil {
return nil
}
out := new(DriverAllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DriverRequests) DeepCopyInto(out *DriverRequests) {
*out = *in
if in.VendorParameters != nil {
out.VendorParameters = in.VendorParameters.DeepCopyObject()
}
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]ResourceRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverRequests.
func (in *DriverRequests) DeepCopy() *DriverRequests {
if in == nil {
return nil
}
out := new(DriverRequests)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesAllocationResult) DeepCopyInto(out *NamedResourcesAllocationResult) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAllocationResult.
func (in *NamedResourcesAllocationResult) DeepCopy() *NamedResourcesAllocationResult {
if in == nil {
return nil
}
out := new(NamedResourcesAllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesAttribute) DeepCopyInto(out *NamedResourcesAttribute) {
*out = *in
in.NamedResourcesAttributeValue.DeepCopyInto(&out.NamedResourcesAttributeValue)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttribute.
func (in *NamedResourcesAttribute) DeepCopy() *NamedResourcesAttribute {
if in == nil {
return nil
}
out := new(NamedResourcesAttribute)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesAttributeValue) DeepCopyInto(out *NamedResourcesAttributeValue) {
*out = *in
if in.QuantityValue != nil {
in, out := &in.QuantityValue, &out.QuantityValue
x := (*in).DeepCopy()
*out = &x
}
if in.BoolValue != nil {
in, out := &in.BoolValue, &out.BoolValue
*out = new(bool)
**out = **in
}
if in.IntValue != nil {
in, out := &in.IntValue, &out.IntValue
*out = new(int64)
**out = **in
}
if in.IntSliceValue != nil {
in, out := &in.IntSliceValue, &out.IntSliceValue
*out = new(NamedResourcesIntSlice)
(*in).DeepCopyInto(*out)
}
if in.StringValue != nil {
in, out := &in.StringValue, &out.StringValue
*out = new(string)
**out = **in
}
if in.StringSliceValue != nil {
in, out := &in.StringSliceValue, &out.StringSliceValue
*out = new(NamedResourcesStringSlice)
(*in).DeepCopyInto(*out)
}
if in.VersionValue != nil {
in, out := &in.VersionValue, &out.VersionValue
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttributeValue.
func (in *NamedResourcesAttributeValue) DeepCopy() *NamedResourcesAttributeValue {
if in == nil {
return nil
}
out := new(NamedResourcesAttributeValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesFilter) DeepCopyInto(out *NamedResourcesFilter) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesFilter.
func (in *NamedResourcesFilter) DeepCopy() *NamedResourcesFilter {
if in == nil {
return nil
}
out := new(NamedResourcesFilter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesInstance) DeepCopyInto(out *NamedResourcesInstance) {
*out = *in
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = make([]NamedResourcesAttribute, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesInstance.
func (in *NamedResourcesInstance) DeepCopy() *NamedResourcesInstance {
if in == nil {
return nil
}
out := new(NamedResourcesInstance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesIntSlice) DeepCopyInto(out *NamedResourcesIntSlice) {
*out = *in
if in.Ints != nil {
in, out := &in.Ints, &out.Ints
*out = make([]int64, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesIntSlice.
func (in *NamedResourcesIntSlice) DeepCopy() *NamedResourcesIntSlice {
if in == nil {
return nil
}
out := new(NamedResourcesIntSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesRequest) DeepCopyInto(out *NamedResourcesRequest) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesRequest.
func (in *NamedResourcesRequest) DeepCopy() *NamedResourcesRequest {
if in == nil {
return nil
}
out := new(NamedResourcesRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesResources) DeepCopyInto(out *NamedResourcesResources) {
*out = *in
if in.Instances != nil {
in, out := &in.Instances, &out.Instances
*out = make([]NamedResourcesInstance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesResources.
func (in *NamedResourcesResources) DeepCopy() *NamedResourcesResources {
if in == nil {
return nil
}
out := new(NamedResourcesResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesStringSlice) DeepCopyInto(out *NamedResourcesStringSlice) {
*out = *in
if in.Strings != nil {
in, out := &in.Strings, &out.Strings
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesStringSlice.
func (in *NamedResourcesStringSlice) DeepCopy() *NamedResourcesStringSlice {
if in == nil {
return nil
}
out := new(NamedResourcesStringSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceModel) DeepCopyInto(out *NodeResourceModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesResources)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceModel.
func (in *NodeResourceModel) DeepCopy() *NodeResourceModel {
if in == nil {
return nil
}
out := new(NodeResourceModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
*out = *in
@@ -234,6 +528,77 @@ func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimParameters) DeepCopyInto(out *ResourceClaimParameters) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.GeneratedFrom != nil {
in, out := &in.GeneratedFrom, &out.GeneratedFrom
*out = new(ResourceClaimParametersReference)
**out = **in
}
if in.DriverRequests != nil {
in, out := &in.DriverRequests, &out.DriverRequests
*out = make([]DriverRequests, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParameters.
func (in *ResourceClaimParameters) DeepCopy() *ResourceClaimParameters {
if in == nil {
return nil
}
out := new(ResourceClaimParameters)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimParameters) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimParametersList) DeepCopyInto(out *ResourceClaimParametersList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClaimParameters, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersList.
func (in *ResourceClaimParametersList) DeepCopy() *ResourceClaimParametersList {
if in == nil {
return nil
}
out := new(ResourceClaimParametersList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimParametersList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) {
*out = *in
@@ -411,6 +776,11 @@ func (in *ResourceClass) DeepCopyInto(out *ResourceClass) {
*out = new(core.NodeSelector)
(*in).DeepCopyInto(*out)
}
if in.StructuredParameters != nil {
in, out := &in.StructuredParameters, &out.StructuredParameters
*out = new(bool)
**out = **in
}
return
}
@@ -465,6 +835,84 @@ func (in *ResourceClassList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassParameters) DeepCopyInto(out *ResourceClassParameters) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.GeneratedFrom != nil {
in, out := &in.GeneratedFrom, &out.GeneratedFrom
*out = new(ResourceClassParametersReference)
**out = **in
}
if in.VendorParameters != nil {
in, out := &in.VendorParameters, &out.VendorParameters
*out = make([]VendorParameters, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
*out = make([]ResourceFilter, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParameters.
func (in *ResourceClassParameters) DeepCopy() *ResourceClassParameters {
if in == nil {
return nil
}
out := new(ResourceClassParameters)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClassParameters) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassParametersList) DeepCopyInto(out *ResourceClassParametersList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClassParameters, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersList.
func (in *ResourceClassParametersList) DeepCopy() *ResourceClassParametersList {
if in == nil {
return nil
}
out := new(ResourceClassParametersList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClassParametersList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) {
*out = *in
@@ -481,9 +929,52 @@ func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersR
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceFilter) DeepCopyInto(out *ResourceFilter) {
*out = *in
in.ResourceFilterModel.DeepCopyInto(&out.ResourceFilterModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilter.
func (in *ResourceFilter) DeepCopy() *ResourceFilter {
if in == nil {
return nil
}
out := new(ResourceFilter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceFilterModel) DeepCopyInto(out *ResourceFilterModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesFilter)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilterModel.
func (in *ResourceFilterModel) DeepCopy() *ResourceFilterModel {
if in == nil {
return nil
}
out := new(ResourceFilterModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) {
*out = *in
if in.StructuredData != nil {
in, out := &in.StructuredData, &out.StructuredData
*out = new(StructuredResourceHandle)
(*in).DeepCopyInto(*out)
}
return
}
@@ -496,3 +987,152 @@ func (in *ResourceHandle) DeepCopy() *ResourceHandle {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequest) DeepCopyInto(out *ResourceRequest) {
*out = *in
if in.VendorParameters != nil {
out.VendorParameters = in.VendorParameters.DeepCopyObject()
}
in.ResourceRequestModel.DeepCopyInto(&out.ResourceRequestModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequest.
func (in *ResourceRequest) DeepCopy() *ResourceRequest {
if in == nil {
return nil
}
out := new(ResourceRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequestModel) DeepCopyInto(out *ResourceRequestModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesRequest)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequestModel.
func (in *ResourceRequestModel) DeepCopy() *ResourceRequestModel {
if in == nil {
return nil
}
out := new(ResourceRequestModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.NodeResourceModel.DeepCopyInto(&out.NodeResourceModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
func (in *ResourceSlice) DeepCopy() *ResourceSlice {
if in == nil {
return nil
}
out := new(ResourceSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
if in == nil {
return nil
}
out := new(ResourceSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) {
*out = *in
if in.VendorClassParameters != nil {
out.VendorClassParameters = in.VendorClassParameters.DeepCopyObject()
}
if in.VendorClaimParameters != nil {
out.VendorClaimParameters = in.VendorClaimParameters.DeepCopyObject()
}
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]DriverAllocationResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StructuredResourceHandle.
func (in *StructuredResourceHandle) DeepCopy() *StructuredResourceHandle {
if in == nil {
return nil
}
out := new(StructuredResourceHandle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VendorParameters) DeepCopyInto(out *VendorParameters) {
*out = *in
if in.Parameters != nil {
out.Parameters = in.Parameters.DeepCopyObject()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VendorParameters.
func (in *VendorParameters) DeepCopy() *VendorParameters {
if in == nil {
return nil
}
out := new(VendorParameters)
in.DeepCopyInto(out)
return out
}

View File

@@ -20,6 +20,7 @@ import (
"context"
"errors"
"fmt"
"slices"
"strings"
"time"
@@ -832,9 +833,11 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
return fmt.Errorf("unsupported ReservedFor entry: %v", reservedFor)
}
logger.V(5).Info("claim reserved for counts", "currentCount", len(claim.Status.ReservedFor), "claim", klog.KRef(namespace, name), "updatedCount", len(valid))
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourcev1alpha2.Finalizer)
logger.V(5).Info("claim reserved for counts", "currentCount", len(claim.Status.ReservedFor), "claim", klog.KRef(namespace, name), "updatedCount", len(valid), "builtinController", builtinControllerFinalizer >= 0)
if len(valid) < len(claim.Status.ReservedFor) {
// TODO (#113700): patch
// This is not using a patch because we want the update to fail if anything
// changed in the meantime.
claim := claim.DeepCopy()
claim.Status.ReservedFor = valid
@@ -854,12 +857,53 @@ func (ec *Controller) syncClaim(ctx context.Context, namespace, name string) err
// those, the resource claim controller will trigger deletion when the
// pod is done. However, it doesn't hurt to also trigger deallocation
// for such claims and not checking for them keeps this code simpler.
if len(valid) == 0 &&
claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer {
claim.Status.DeallocationRequested = true
if len(valid) == 0 {
if builtinControllerFinalizer >= 0 {
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer ||
claim.DeletionTimestamp != nil {
// Allocated by scheduler with structured parameters. We can "deallocate"
// by clearing the allocation.
claim.Status.Allocation = nil
}
} else if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer {
// DRA driver controller in the control plane
// needs to do the deallocation.
claim.Status.DeallocationRequested = true
}
// In all other cases, we keep the claim allocated, in particular for immediate allocation
// with a control plane controller.
}
_, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
claim, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return err
}
// Now also remove the finalizer if it is not needed anymore.
// Note that the index may have changed as a result of the UpdateStatus call.
builtinControllerFinalizer := slices.Index(claim.Finalizers, resourcev1alpha2.Finalizer)
if builtinControllerFinalizer >= 0 && claim.Status.Allocation == nil {
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
if _, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}); err != nil {
return err
}
}
} else if builtinControllerFinalizer >= 0 && claim.DeletionTimestamp != nil && len(valid) == 0 {
claim := claim.DeepCopy()
if claim.Status.Allocation != nil {
// This can happen when a claim with immediate allocation
// stopped being used, remained allocated, and then got
// deleted. As above we then need to clear the allocation.
claim.Status.Allocation = nil
var err error
claim, err = ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return err
}
}
// Whether it was allocated or not, remove the finalizer to unblock removal.
claim.Finalizers = slices.Delete(claim.Finalizers, builtinControllerFinalizer, builtinControllerFinalizer+1)
_, err := ec.kubeClient.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{})
if err != nil {
return err
}

File diff suppressed because it is too large Load Diff

View File

@@ -31,9 +31,12 @@ import (
"k8s.io/apiserver/pkg/apis/apiserver/load"
"k8s.io/apiserver/pkg/apis/apiserver/validation"
"k8s.io/apiserver/pkg/authorization/authorizer"
utilfeature "k8s.io/apiserver/pkg/util/feature"
versionedinformers "k8s.io/client-go/informers"
resourcev1alpha2informers "k8s.io/client-go/informers/resource/v1alpha2"
"k8s.io/kubernetes/pkg/auth/authorizer/abac"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/node"
"k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac"
@@ -90,6 +93,10 @@ func (config Config) New(ctx context.Context, serverID string) (authorizer.Autho
// Keep cases in sync with constant list in k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes/modes.go.
switch configuredAuthorizer.Type {
case authzconfig.AuthorizerType(modes.ModeNode):
var slices resourcev1alpha2informers.ResourceSliceInformer
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
slices = config.VersionedInformerFactory.Resource().V1alpha2().ResourceSlices()
}
node.RegisterMetrics()
graph := node.NewGraph()
node.AddGraphEventHandlers(
@@ -98,6 +105,7 @@ func (config Config) New(ctx context.Context, serverID string) (authorizer.Autho
config.VersionedInformerFactory.Core().V1().Pods(),
config.VersionedInformerFactory.Core().V1().PersistentVolumes(),
config.VersionedInformerFactory.Storage().V1().VolumeAttachments(),
slices, // Nil check in AddGraphEventHandlers can be removed when always creating this.
)
r.nodeAuthorizer = node.NewAuthorizer(graph, nodeidentifier.NewDefaultNodeIdentifier(), bootstrappolicy.NodeRules())

View File

@@ -135,6 +135,7 @@ type ContainerManager interface {
}
type NodeConfig struct {
NodeName types.NodeName
RuntimeCgroupsName string
SystemCgroupsName string
KubeletCgroupsName string

View File

@@ -308,7 +308,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
// initialize DRA manager
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.DynamicResourceAllocation) {
klog.InfoS("Creating Dynamic Resource Allocation (DRA) manager")
cm.draManager, err = dra.NewManagerImpl(kubeClient, nodeConfig.KubeletRootDir)
cm.draManager, err = dra.NewManagerImpl(kubeClient, nodeConfig.KubeletRootDir, nodeConfig.NodeName)
if err != nil {
return nil, err
}

View File

@@ -21,6 +21,7 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
resourceapi "k8s.io/api/resource/v1alpha2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
@@ -44,7 +45,7 @@ type ManagerImpl struct {
}
// NewManagerImpl creates a new manager.
func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string) (*ManagerImpl, error) {
func NewManagerImpl(kubeClient clientset.Interface, stateFileDirectory string, nodeName types.NodeName) (*ManagerImpl, error) {
klog.V(2).InfoS("Creating DRA manager")
claimInfoCache, err := newClaimInfoCache(stateFileDirectory, draManagerStateFileName)
@@ -143,6 +144,9 @@ func (m *ManagerImpl) PrepareResources(pod *v1.Pod) error {
Name: resourceClaim.Name,
ResourceHandle: resourceHandle.Data,
}
if resourceHandle.StructuredData != nil {
claim.StructuredResourceHandle = []*resourceapi.StructuredResourceHandle{resourceHandle.StructuredData}
}
batches[pluginName] = append(batches[pluginName], claim)
}
claimInfos[resourceClaim.UID] = claimInfo

View File

@@ -154,7 +154,7 @@ func TestNewManagerImpl(t *testing.T) {
},
} {
t.Run(test.description, func(t *testing.T) {
manager, err := NewManagerImpl(kubeClient, test.stateFileDirectory)
manager, err := NewManagerImpl(kubeClient, test.stateFileDirectory, "worker")
if test.wantErr {
assert.Error(t, err)
return
@@ -287,7 +287,7 @@ func TestGetResources(t *testing.T) {
},
} {
t.Run(test.description, func(t *testing.T) {
manager, err := NewManagerImpl(kubeClient, t.TempDir())
manager, err := NewManagerImpl(kubeClient, t.TempDir(), "worker")
assert.NoError(t, err)
if test.claimInfo != nil {
@@ -760,7 +760,7 @@ func TestPrepareResources(t *testing.T) {
}
defer draServerInfo.teardownFn()
plg := plugin.NewRegistrationHandler()
plg := plugin.NewRegistrationHandler(nil, "worker")
if err := plg.RegisterPlugin(test.driverName, draServerInfo.socketName, []string{"1.27"}); err != nil {
t.Fatalf("failed to register plugin %s, err: %v", test.driverName, err)
}
@@ -1060,7 +1060,7 @@ func TestUnprepareResources(t *testing.T) {
}
defer draServerInfo.teardownFn()
plg := plugin.NewRegistrationHandler()
plg := plugin.NewRegistrationHandler(nil, "worker")
if err := plg.RegisterPlugin(test.driverName, draServerInfo.socketName, []string{"1.27"}); err != nil {
t.Fatalf("failed to register plugin %s, err: %v", test.driverName, err)
}

View File

@@ -54,13 +54,14 @@ func (v1alpha2rm v1alpha2NodeResourceManager) Prepare(ctx context.Context, conn
}
for _, claim := range req.Claims {
res, err := nodeClient.NodePrepareResource(ctx,
&drapbv1alpha2.NodePrepareResourceRequest{
Namespace: claim.Namespace,
ClaimUid: claim.Uid,
ClaimName: claim.Name,
ResourceHandle: claim.ResourceHandle,
})
req := &drapbv1alpha2.NodePrepareResourceRequest{
Namespace: claim.Namespace,
ClaimUid: claim.Uid,
ClaimName: claim.Name,
ResourceHandle: claim.ResourceHandle,
StructuredResourceHandle: claim.StructuredResourceHandle,
}
res, err := nodeClient.NodePrepareResource(ctx, req)
result := &drapb.NodePrepareResourceResponse{}
if err != nil {
result.Error = err.Error()
@@ -197,3 +198,20 @@ func (p *plugin) NodeUnprepareResources(
logger.V(4).Info(log("done calling NodeUnprepareResources rpc"), "response", response, "err", err)
return response, err
}
func (p *plugin) NodeListAndWatchResources(
ctx context.Context,
req *drapb.NodeListAndWatchResourcesRequest,
opts ...grpc.CallOption,
) (drapb.Node_NodeListAndWatchResourcesClient, error) {
logger := klog.FromContext(ctx)
logger.V(4).Info(log("calling NodeListAndWatchResources rpc"), "request", req)
conn, err := p.getOrCreateGRPCConn()
if err != nil {
return nil, err
}
nodeClient := drapb.NewNodeClient(conn)
return nodeClient.NodeListAndWatchResources(ctx, req, opts...)
}

View File

@@ -43,6 +43,16 @@ func (f *fakeV1alpha3GRPCServer) NodeUnprepareResource(ctx context.Context, in *
return &drapbv1alpha3.NodeUnprepareResourcesResponse{}, nil
}
func (f *fakeV1alpha3GRPCServer) NodeListAndWatchResources(req *drapbv1alpha3.NodeListAndWatchResourcesRequest, srv drapbv1alpha3.Node_NodeListAndWatchResourcesServer) error {
if err := srv.Send(&drapbv1alpha3.NodeListAndWatchResourcesResponse{}); err != nil {
return err
}
if err := srv.Send(&drapbv1alpha3.NodeListAndWatchResourcesResponse{}); err != nil {
return err
}
return nil
}
type fakeV1alpha2GRPCServer struct {
drapbv1alpha2.UnimplementedNodeServer
}
@@ -288,3 +298,82 @@ func TestNodeUnprepareResource(t *testing.T) {
})
}
}
func TestListAndWatchResources(t *testing.T) {
for _, test := range []struct {
description string
serverSetup func(string) (string, tearDown, error)
serverVersion string
request *drapbv1alpha3.NodeListAndWatchResourcesRequest
responses []*drapbv1alpha3.NodeListAndWatchResourcesResponse
expectError string
}{
{
description: "server supports NodeResources API",
serverSetup: setupFakeGRPCServer,
serverVersion: v1alpha3Version,
request: &drapbv1alpha3.NodeListAndWatchResourcesRequest{},
responses: []*drapbv1alpha3.NodeListAndWatchResourcesResponse{
{},
{},
},
expectError: "EOF",
},
{
description: "server doesn't support NodeResources API",
serverSetup: setupFakeGRPCServer,
serverVersion: v1alpha2Version,
request: new(drapbv1alpha3.NodeListAndWatchResourcesRequest),
expectError: "Unimplemented",
},
} {
t.Run(test.description, func(t *testing.T) {
addr, teardown, err := setupFakeGRPCServer(test.serverVersion)
if err != nil {
t.Fatal(err)
}
defer teardown()
p := &plugin{
endpoint: addr,
version: v1alpha3Version,
}
conn, err := p.getOrCreateGRPCConn()
defer func() {
err := conn.Close()
if err != nil {
t.Error(err)
}
}()
if err != nil {
t.Fatal(err)
}
draPlugins.add("dummy-plugin", p)
defer draPlugins.delete("dummy-plugin")
client, err := NewDRAPluginClient("dummy-plugin")
if err != nil {
t.Fatal(err)
}
stream, err := client.NodeListAndWatchResources(context.Background(), test.request)
if err != nil {
t.Fatal(err)
}
var actualResponses []*drapbv1alpha3.NodeListAndWatchResourcesResponse
var actualErr error
for {
resp, err := stream.Recv()
if err != nil {
actualErr = err
break
}
actualResponses = append(actualResponses, resp)
}
assert.Equal(t, test.responses, actualResponses)
assert.Contains(t, actualErr.Error(), test.expectError)
})
}
}

View File

@@ -0,0 +1,479 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugin
import (
"context"
"errors"
"fmt"
"io"
"sync"
"time"
"github.com/google/go-cmp/cmp"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
resourceapi "k8s.io/api/resource/v1alpha2"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
resourceinformers "k8s.io/client-go/informers/resource/v1alpha2"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
drapb "k8s.io/kubelet/pkg/apis/dra/v1alpha3"
)
const (
// resyncPeriod for informer
// TODO (https://github.com/kubernetes/kubernetes/issues/123688): disable?
resyncPeriod = time.Duration(10 * time.Minute)
)
// nodeResourcesController collects resource information from all registered
// plugins and synchronizes that information with ResourceSlice objects.
type nodeResourcesController struct {
ctx context.Context
kubeClient kubernetes.Interface
nodeName string
wg sync.WaitGroup
queue workqueue.RateLimitingInterface
sliceStore cache.Store
mutex sync.RWMutex
activePlugins map[string]*activePlugin
}
// activePlugin holds the resource information about one plugin
// and the gRPC stream that is used to retrieve that. The context
// used by that stream can be canceled separately to stop
// the monitoring.
type activePlugin struct {
// cancel is the function which cancels the monitorPlugin goroutine
// for this plugin.
cancel func(reason error)
// resources is protected by the nodeResourcesController read/write lock.
// When receiving updates from the driver, the entire slice gets replaced,
// so it is okay to not do a deep copy of it. Only retrieving the slice
// must be protected by a read lock.
resources []*resourceapi.NodeResourceModel
}
// startNodeResourcesController constructs a new controller and starts it.
//
// If a kubeClient is provided, then it synchronizes ResourceSlices
// with the resource information provided by plugins. Without it,
// the controller is inactive. This can happen when kubelet is run stand-alone
// without an apiserver. In that case we can't and don't need to publish
// ResourceSlices.
func startNodeResourcesController(ctx context.Context, kubeClient kubernetes.Interface, nodeName string) *nodeResourcesController {
if kubeClient == nil {
return nil
}
logger := klog.FromContext(ctx)
logger = klog.LoggerWithName(logger, "node resources controller")
ctx = klog.NewContext(ctx, logger)
c := &nodeResourcesController{
ctx: ctx,
kubeClient: kubeClient,
nodeName: nodeName,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "node_resource_slices"),
activePlugins: make(map[string]*activePlugin),
}
c.wg.Add(1)
go func() {
defer c.wg.Done()
c.run(ctx)
}()
return c
}
// waitForStop blocks until all background activity spawned by
// the controller has stopped. The context passed to start must
// be canceled for that to happen.
//
// Not needed at the moment, but if it was, this is what it would
// look like...
// func (c *nodeResourcesController) waitForStop() {
// if c == nil {
// return
// }
//
// c.wg.Wait()
// }
// addPlugin is called whenever a plugin has been (re-)registered.
func (c *nodeResourcesController) addPlugin(driverName string, pluginInstance *plugin) {
if c == nil {
return
}
klog.FromContext(c.ctx).V(2).Info("Adding plugin", "driverName", driverName)
c.mutex.Lock()
defer c.mutex.Unlock()
if active := c.activePlugins[driverName]; active != nil {
active.cancel(errors.New("plugin has re-registered"))
}
active := &activePlugin{}
cancelCtx, cancel := context.WithCancelCause(c.ctx)
active.cancel = cancel
c.activePlugins[driverName] = active
c.queue.Add(driverName)
c.wg.Add(1)
go func() {
defer c.wg.Done()
c.monitorPlugin(cancelCtx, active, driverName, pluginInstance)
}()
}
// removePlugin is called whenever a plugin has been unregistered.
func (c *nodeResourcesController) removePlugin(driverName string) {
if c == nil {
return
}
klog.FromContext(c.ctx).V(2).Info("Removing plugin", "driverName", driverName)
c.mutex.Lock()
defer c.mutex.Unlock()
if active, ok := c.activePlugins[driverName]; ok {
active.cancel(errors.New("plugin has unregistered"))
delete(c.activePlugins, driverName)
c.queue.Add(driverName)
}
}
// monitorPlugin calls the plugin to retrieve resource information and caches
// all responses that it gets for processing in the sync method. It keeps
// retrying until an error or EOF response indicates that no further data is
// going to be sent, then watch resources of the plugin stops until it
// re-registers.
func (c *nodeResourcesController) monitorPlugin(ctx context.Context, active *activePlugin, driverName string, pluginInstance *plugin) {
logger := klog.FromContext(ctx)
logger = klog.LoggerWithValues(logger, "driverName", driverName)
logger.Info("Starting to monitor node resources of the plugin")
defer func() {
r := recover()
logger.Info("Stopping to monitor node resources of the plugin", "reason", context.Cause(ctx), "err", ctx.Err(), "recover", r)
}()
// Keep trying until canceled.
for ctx.Err() == nil {
logger.V(5).Info("Calling NodeListAndWatchResources")
stream, err := pluginInstance.NodeListAndWatchResources(ctx, new(drapb.NodeListAndWatchResourcesRequest))
if err != nil {
switch {
case status.Convert(err).Code() == codes.Unimplemented:
// The plugin simply doesn't provide node resources.
active.cancel(errors.New("plugin does not support node resource reporting"))
default:
// This is a problem, report it and retry.
logger.Error(err, "Creating gRPC stream for node resources failed")
// TODO (https://github.com/kubernetes/kubernetes/issues/123689): expontential backoff?
select {
case <-time.After(5 * time.Second):
case <-ctx.Done():
}
}
continue
}
for {
response, err := stream.Recv()
if err != nil {
switch {
case errors.Is(err, io.EOF):
// This is okay. Some plugins might never change their
// resources after reporting them once.
active.cancel(errors.New("plugin has closed the stream"))
case status.Convert(err).Code() == codes.Unimplemented:
// The plugin has the method, does not really implement it.
active.cancel(errors.New("plugin does not support node resource reporting"))
case ctx.Err() == nil:
// This is a problem, report it and retry.
logger.Error(err, "Reading node resources from gRPC stream failed")
// TODO (https://github.com/kubernetes/kubernetes/issues/123689): expontential backoff?
select {
case <-time.After(5 * time.Second):
case <-ctx.Done():
}
}
break
}
if loggerV := logger.V(6); loggerV.Enabled() {
loggerV.Info("Driver resources updated", "resources", response.Resources)
} else {
logger.V(5).Info("Driver resources updated", "numResources", len(response.Resources))
}
c.mutex.Lock()
active.resources = response.Resources
c.mutex.Unlock()
c.queue.Add(driverName)
}
}
}
// run is running in the background. It handles blocking initialization (like
// syncing the informer) and then syncs the actual with the desired state.
func (c *nodeResourcesController) run(ctx context.Context) {
logger := klog.FromContext(ctx)
// When kubelet starts, we have two choices:
// - Sync immediately, which in practice will delete all ResourceSlices
// because no plugin has registered yet. We could do a DeleteCollection
// to speed this up.
// - Wait a bit, then sync. If all plugins have re-registered in the meantime,
// we might not need to change any ResourceSlice.
//
// For now syncing starts immediately, with no DeleteCollection. This
// can be reconsidered later.
// While kubelet starts up, there are errors:
// E0226 13:41:19.880621 126334 reflector.go:150] k8s.io/client-go@v0.0.0/tools/cache/reflector.go:232: Failed to watch *v1alpha2.ResourceSlice: failed to list *v1alpha2.ResourceSlice: resourceslices.resource.k8s.io is forbidden: User "system:anonymous" cannot list resource "resourceslices" in API group "resource.k8s.io" at the cluster scope
//
// The credentials used by kubeClient seem to get swapped out later,
// because eventually these list calls succeed.
// TODO (https://github.com/kubernetes/kubernetes/issues/123691): can we avoid these error log entries? Perhaps wait here?
// We could use an indexer on driver name, but that seems overkill.
informer := resourceinformers.NewFilteredResourceSliceInformer(c.kubeClient, resyncPeriod, nil, func(options *metav1.ListOptions) {
options.FieldSelector = "nodeName=" + c.nodeName
})
c.sliceStore = informer.GetStore()
handler, err := informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj any) {
slice, ok := obj.(*resourceapi.ResourceSlice)
if !ok {
return
}
logger.V(5).Info("ResourceSlice add", "slice", klog.KObj(slice))
c.queue.Add(slice.DriverName)
},
UpdateFunc: func(old, new any) {
oldSlice, ok := old.(*resourceapi.ResourceSlice)
if !ok {
return
}
newSlice, ok := new.(*resourceapi.ResourceSlice)
if !ok {
return
}
if loggerV := logger.V(6); loggerV.Enabled() {
loggerV.Info("ResourceSlice update", "slice", klog.KObj(newSlice), "diff", cmp.Diff(oldSlice, newSlice))
} else {
logger.V(5).Info("ResourceSlice update", "slice", klog.KObj(newSlice))
}
c.queue.Add(newSlice.DriverName)
},
DeleteFunc: func(obj any) {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
slice, ok := obj.(*resourceapi.ResourceSlice)
if !ok {
return
}
logger.V(5).Info("ResourceSlice delete", "slice", klog.KObj(slice))
c.queue.Add(slice.DriverName)
},
})
if err != nil {
logger.Error(err, "Registering event handler on the ResourceSlice informer failed, disabling resource monitoring")
return
}
// Start informer and wait for our cache to be populated.
c.wg.Add(1)
go func() {
defer c.wg.Done()
informer.Run(ctx.Done())
}()
for !handler.HasSynced() {
select {
case <-time.After(time.Second):
case <-ctx.Done():
return
}
}
logger.Info("ResourceSlice informer has synced")
for c.processNextWorkItem(ctx) {
}
}
func (c *nodeResourcesController) processNextWorkItem(ctx context.Context) bool {
key, shutdown := c.queue.Get()
if shutdown {
return false
}
defer c.queue.Done(key)
driverName := key.(string)
// Panics are caught and treated like errors.
var err error
func() {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("internal error: %v", r)
}
}()
err = c.sync(ctx, driverName)
}()
if err != nil {
// TODO (https://github.com/kubernetes/enhancements/issues/3077): contextual logging in utilruntime
utilruntime.HandleError(fmt.Errorf("processing driver %v: %v", driverName, err))
c.queue.AddRateLimited(key)
// Return without removing the work item from the queue.
// It will be retried.
return true
}
c.queue.Forget(key)
return true
}
func (c *nodeResourcesController) sync(ctx context.Context, driverName string) error {
logger := klog.FromContext(ctx)
// Gather information about the actual and desired state.
slices := c.sliceStore.List()
var driverResources []*resourceapi.NodeResourceModel
c.mutex.RLock()
if active, ok := c.activePlugins[driverName]; ok {
// No need for a deep copy, the entire slice gets replaced on writes.
driverResources = active.resources
}
c.mutex.RUnlock()
// Resources that are not yet stored in any slice need to be published.
// Here we track the indices of any resources that are already stored.
storedResourceIndices := sets.New[int]()
// Slices that don't match any driver resource can either be updated (if there
// are new driver resources that need to be stored) or they need to be deleted.
obsoleteSlices := make([]*resourceapi.ResourceSlice, 0, len(slices))
// Match slices with resource information.
for _, obj := range slices {
slice := obj.(*resourceapi.ResourceSlice)
if slice.DriverName != driverName {
continue
}
index := indexOfModel(driverResources, &slice.NodeResourceModel)
if index >= 0 {
storedResourceIndices.Insert(index)
continue
}
obsoleteSlices = append(obsoleteSlices, slice)
}
if loggerV := logger.V(6); loggerV.Enabled() {
// Dump entire resource information.
loggerV.Info("Syncing existing driver node resource slices with driver resources", "slices", klog.KObjSlice(slices), "resources", driverResources)
} else {
logger.V(5).Info("Syncing existing driver node resource slices with driver resources", "slices", klog.KObjSlice(slices), "numResources", len(driverResources))
}
// Update stale slices before removing what's left.
//
// We don't really know which of these slices might have
// been used for "the" driver resource because they don't
// have a unique ID. In practice, a driver is most likely
// to just give us one NodeResourceModel, in which case
// this isn't a problem at all. If we have more than one,
// then at least conceptually it currently doesn't matter
// where we publish it.
//
// The long-term goal is to move the handling of
// ResourceSlice objects into the driver, with kubelet
// just acting as a REST proxy. The advantage of that will
// be that kubelet won't need to support the same
// resource API version as the driver and the control plane.
// With that approach, the driver will be able to match
// up objects more intelligently.
numObsoleteSlices := len(obsoleteSlices)
for index, resource := range driverResources {
if storedResourceIndices.Has(index) {
// No need to do anything, it is already stored exactly
// like this in an existing slice.
continue
}
if numObsoleteSlices > 0 {
// Update one existing slice.
slice := obsoleteSlices[numObsoleteSlices-1]
numObsoleteSlices--
slice = slice.DeepCopy()
slice.NodeResourceModel = *resource
logger.V(5).Info("Reusing existing node resource slice", "slice", klog.KObj(slice))
if _, err := c.kubeClient.ResourceV1alpha2().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("update node resource slice: %w", err)
}
continue
}
// Create a new slice.
slice := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
GenerateName: c.nodeName + "-" + driverName + "-",
// TODO (https://github.com/kubernetes/kubernetes/issues/123692): node object as owner
},
NodeName: c.nodeName,
DriverName: driverName,
NodeResourceModel: *resource,
}
logger.V(5).Info("Creating new node resource slice", "slice", klog.KObj(slice))
if _, err := c.kubeClient.ResourceV1alpha2().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{}); err != nil {
return fmt.Errorf("create node resource slice: %w", err)
}
}
// All remaining slices are truly orphaned.
for i := 0; i < numObsoleteSlices; i++ {
slice := obsoleteSlices[i]
logger.V(5).Info("Deleting obsolete node resource slice", "slice", klog.KObj(slice))
if err := c.kubeClient.ResourceV1alpha2().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{}); err != nil {
return fmt.Errorf("delete node resource slice: %w", err)
}
}
return nil
}
func indexOfModel(models []*resourceapi.NodeResourceModel, model *resourceapi.NodeResourceModel) int {
for index, m := range models {
if apiequality.Semantic.DeepEqual(m, model) {
return index
}
}
return -1
}

View File

@@ -29,6 +29,7 @@ import (
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials/insecure"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
@@ -94,11 +95,23 @@ func (p *plugin) setVersion(version string) {
}
// RegistrationHandler is the handler which is fed to the pluginwatcher API.
type RegistrationHandler struct{}
type RegistrationHandler struct {
controller *nodeResourcesController
}
// NewPluginHandler returns new registration handler.
func NewRegistrationHandler() *RegistrationHandler {
return &RegistrationHandler{}
//
// Must only be called once per process because it manages global state.
// If a kubeClient is provided, then it synchronizes ResourceSlices
// with the resource information provided by plugins.
func NewRegistrationHandler(kubeClient kubernetes.Interface, nodeName string) *RegistrationHandler {
handler := &RegistrationHandler{}
// If kubelet ever gets an API for stopping registration handlers, then
// that would need to be hooked up with stopping the controller.
handler.controller = startNodeResourcesController(context.TODO(), kubeClient, nodeName)
return handler
}
// RegisterPlugin is called when a plugin can be registered.
@@ -110,15 +123,18 @@ func (h *RegistrationHandler) RegisterPlugin(pluginName string, endpoint string,
return err
}
// Storing endpoint of newly registered DRA Plugin into the map, where plugin name will be the key
// all other DRA components will be able to get the actual socket of DRA plugins by its name.
// By default we assume the supported plugin version is v1alpha3
draPlugins.add(pluginName, &plugin{
pluginInstance := &plugin{
conn: nil,
endpoint: endpoint,
version: v1alpha3Version,
highestSupportedVersion: highestSupportedVersion,
})
}
// Storing endpoint of newly registered DRA Plugin into the map, where plugin name will be the key
// all other DRA components will be able to get the actual socket of DRA plugins by its name.
// By default we assume the supported plugin version is v1alpha3
draPlugins.add(pluginName, pluginInstance)
h.controller.addPlugin(pluginName, pluginInstance)
return nil
}
@@ -178,6 +194,7 @@ func deregisterPlugin(pluginName string) {
func (h *RegistrationHandler) DeRegisterPlugin(pluginName string) {
klog.InfoS("DeRegister DRA plugin", "name", pluginName)
deregisterPlugin(pluginName)
h.controller.removePlugin(pluginName)
}
// ValidatePlugin is called by kubelet's plugin watcher upon detection

View File

@@ -23,6 +23,10 @@ import (
)
func TestRegistrationHandler_ValidatePlugin(t *testing.T) {
newRegistrationHandler := func() *RegistrationHandler {
return NewRegistrationHandler(nil, "worker")
}
for _, test := range []struct {
description string
handler func() *RegistrationHandler
@@ -33,19 +37,19 @@ func TestRegistrationHandler_ValidatePlugin(t *testing.T) {
}{
{
description: "no versions provided",
handler: NewRegistrationHandler,
handler: newRegistrationHandler,
shouldError: true,
},
{
description: "unsupported version",
handler: NewRegistrationHandler,
handler: newRegistrationHandler,
versions: []string{"v2.0.0"},
shouldError: true,
},
{
description: "plugin already registered with a higher supported version",
handler: func() *RegistrationHandler {
handler := NewRegistrationHandler()
handler := newRegistrationHandler()
if err := handler.RegisterPlugin("this-plugin-already-exists-and-has-a-long-name-so-it-doesnt-collide", "", []string{"v1.1.0"}); err != nil {
t.Fatal(err)
}
@@ -57,7 +61,7 @@ func TestRegistrationHandler_ValidatePlugin(t *testing.T) {
},
{
description: "should validate the plugin",
handler: NewRegistrationHandler,
handler: newRegistrationHandler,
pluginName: "this-is-a-dummy-plugin-with-a-long-name-so-it-doesnt-collide",
versions: []string{"v1.3.0"},
},
@@ -74,7 +78,7 @@ func TestRegistrationHandler_ValidatePlugin(t *testing.T) {
}
t.Cleanup(func() {
handler := NewRegistrationHandler()
handler := newRegistrationHandler()
handler.DeRegisterPlugin("this-plugin-already-exists-and-has-a-long-name-so-it-doesnt-collide")
handler.DeRegisterPlugin("this-is-a-dummy-plugin-with-a-long-name-so-it-doesnt-collide")
})

View File

@@ -37,6 +37,13 @@ func assertStateEqual(t *testing.T, restoredState, expectedState ClaimInfoStateL
assert.Equal(t, expectedState, restoredState, "expected ClaimInfoState does not equal to restored one")
}
// TODO (https://github.com/kubernetes/kubernetes/issues/123552): reconsider what data gets stored in checkpoints and whether that is really necessary.
//
// As it stands now, a "v1" checkpoint contains data for types like the resourcev1alpha2.ResourceHandle
// which may change over time as new fields get added in a backward-compatible way (not unusual
// for API types). That breaks checksuming with pkg/util/hash because it is based on spew output.
// That output includes those new fields.
func TestCheckpointGetOrCreate(t *testing.T) {
testCases := []struct {
description string
@@ -52,7 +59,7 @@ func TestCheckpointGetOrCreate(t *testing.T) {
},
{
"Restore checkpoint - single claim",
`{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"a\": \"b\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":4194867564}`,
`{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"a\": \"b\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":113577689}`,
"",
[]ClaimInfoState{
{
@@ -76,7 +83,7 @@ func TestCheckpointGetOrCreate(t *testing.T) {
},
{
"Restore checkpoint - single claim - multiple devices",
`{"version":"v1","entries":[{"DriverName":"meta-test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver-1.cdi.k8s.io","data":"{\"a\": \"b\"}"},{"driverName":"test-driver-2.cdi.k8s.io","data":"{\"c\": \"d\"}"}],"CDIDevices":{"test-driver-1.cdi.k8s.io":["example-1.com/example-1=cdi-example-1"],"test-driver-2.cdi.k8s.io":["example-2.com/example-2=cdi-example-2"]}}],"checksum":360176657}`,
`{"version":"v1","entries":[{"DriverName":"meta-test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver-1.cdi.k8s.io","data":"{\"a\": \"b\"}"},{"driverName":"test-driver-2.cdi.k8s.io","data":"{\"c\": \"d\"}"}],"CDIDevices":{"test-driver-1.cdi.k8s.io":["example-1.com/example-1=cdi-example-1"],"test-driver-2.cdi.k8s.io":["example-2.com/example-2=cdi-example-2"]}}],"checksum":1466990255}`,
"",
[]ClaimInfoState{
{
@@ -105,7 +112,7 @@ func TestCheckpointGetOrCreate(t *testing.T) {
},
{
"Restore checkpoint - multiple claims",
`{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name-1","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example-1","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"a\": \"b\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-1"]}},{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name-2","ClaimUID":"4cf8db2d-06c0-7d70-1a51-e59b25b2c16c","ClaimName":"example-2","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"c\": \"d\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-2"]}}],"checksum":103176902}`,
`{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name-1","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example-1","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"a\": \"b\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-1"]}},{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name-2","ClaimUID":"4cf8db2d-06c0-7d70-1a51-e59b25b2c16c","ClaimName":"example-2","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"c\": \"d\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example-2"]}}],"checksum":471181742}`,
"",
[]ClaimInfoState{
{
@@ -218,7 +225,7 @@ func TestCheckpointStateStore(t *testing.T) {
},
}
expectedCheckpoint := `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"a\": \"b\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":4194867564}`
expectedCheckpoint := `{"version":"v1","entries":[{"DriverName":"test-driver.cdi.k8s.io","ClassName":"class-name","ClaimUID":"067798be-454e-4be4-9047-1aa06aea63f7","ClaimName":"example","Namespace":"default","PodUIDs":{"139cdb46-f989-4f17-9561-ca10cfb509a6":{}},"ResourceHandles":[{"driverName":"test-driver.cdi.k8s.io","data":"{\"a\": \"b\"}"}],"CDIDevices":{"test-driver.cdi.k8s.io":["example.com/example=cdi-example"]}}],"checksum":113577689}`
// Should return an error, stateDir cannot be an empty string
if _, err := NewCheckpointState("", testingCheckpoint); err == nil {

View File

@@ -1561,7 +1561,7 @@ func (kl *Kubelet) initializeRuntimeDependentModules() {
kl.pluginManager.AddHandler(pluginwatcherapi.CSIPlugin, plugincache.PluginHandler(csi.PluginHandler))
// Adding Registration Callback function for DRA Plugin
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
kl.pluginManager.AddHandler(pluginwatcherapi.DRAPlugin, plugincache.PluginHandler(draplugin.NewRegistrationHandler()))
kl.pluginManager.AddHandler(pluginwatcherapi.DRAPlugin, plugincache.PluginHandler(draplugin.NewRegistrationHandler(kl.kubeClient, kl.hostname)))
}
// Adding Registration Callback function for Device Manager
kl.pluginManager.AddHandler(pluginwatcherapi.DevicePlugin, kl.containerManager.GetPluginRegistrationHandler())

View File

@@ -656,6 +656,31 @@ func AddHandlers(h printers.PrintHandler) {
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContext)
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContextList)
resourceClaimParametersColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "GeneratedFrom", Type: "string", Description: resourcev1alpha2.ResourceClaimParameters{}.SwaggerDoc()["generatedFrom"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(resourceClaimParametersColumnDefinitions, printResourceClaimParameters)
_ = h.TableHandler(resourceClaimParametersColumnDefinitions, printResourceClaimParametersList)
resourceClassParametersColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "GeneratedFrom", Type: "string", Description: resourcev1alpha2.ResourceClassParameters{}.SwaggerDoc()["generatedFrom"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(resourceClassParametersColumnDefinitions, printResourceClassParameters)
_ = h.TableHandler(resourceClassParametersColumnDefinitions, printResourceClassParametersList)
nodeResourceCapacityColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "Node", Type: "string", Description: resourcev1alpha2.ResourceSlice{}.SwaggerDoc()["nodeName"]},
{Name: "Driver", Type: "string", Description: resourcev1alpha2.ResourceSlice{}.SwaggerDoc()["driverName"]},
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
}
_ = h.TableHandler(nodeResourceCapacityColumnDefinitions, printResourceSlice)
_ = h.TableHandler(nodeResourceCapacityColumnDefinitions, printResourceSliceList)
serviceCIDRColumnDefinitions := []metav1.TableColumnDefinition{
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
{Name: "CIDRs", Type: "string", Description: networkingv1alpha1.ServiceCIDRSpec{}.SwaggerDoc()["cidrs"]},
@@ -3046,6 +3071,77 @@ func printPodSchedulingContextList(list *resource.PodSchedulingContextList, opti
return rows, nil
}
func printResourceClaimParameters(obj *resource.ResourceClaimParameters, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
generatedFrom := ""
if obj.GeneratedFrom != nil {
generatedFrom = fmt.Sprintf("%s.%s %s", obj.GeneratedFrom.Kind, obj.GeneratedFrom.APIGroup, obj.GeneratedFrom.Name)
}
row.Cells = append(row.Cells, obj.Name, generatedFrom, translateTimestampSince(obj.CreationTimestamp))
return []metav1.TableRow{row}, nil
}
func printResourceClaimParametersList(list *resource.ResourceClaimParametersList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printResourceClaimParameters(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printResourceClassParameters(obj *resource.ResourceClassParameters, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
generatedFrom := ""
if obj.GeneratedFrom != nil {
generatedFrom = fmt.Sprintf("%s.%s %s", obj.GeneratedFrom.Kind, obj.GeneratedFrom.APIGroup, obj.GeneratedFrom.Name)
}
row.Cells = append(row.Cells, obj.Name, generatedFrom, translateTimestampSince(obj.CreationTimestamp))
return []metav1.TableRow{row}, nil
}
func printResourceClassParametersList(list *resource.ResourceClassParametersList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printResourceClassParameters(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printResourceSlice(obj *resource.ResourceSlice, options printers.GenerateOptions) ([]metav1.TableRow, error) {
row := metav1.TableRow{
Object: runtime.RawExtension{Object: obj},
}
row.Cells = append(row.Cells, obj.Name, obj.NodeName, obj.DriverName, translateTimestampSince(obj.CreationTimestamp))
return []metav1.TableRow{row}, nil
}
func printResourceSliceList(list *resource.ResourceSliceList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
rows := make([]metav1.TableRow, 0, len(list.Items))
for i := range list.Items {
r, err := printResourceSlice(&list.Items[i], options)
if err != nil {
return nil, err
}
rows = append(rows, r...)
}
return rows, nil
}
func printBoolPtr(value *bool) string {
if value != nil {
return printBool(*value)

View File

@@ -0,0 +1,57 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/printers"
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
"k8s.io/kubernetes/pkg/registry/resource/resourceclaimparameters"
)
// REST implements a RESTStorage for ResourceClaimParameters.
type REST struct {
*genericregistry.Store
}
// NewREST returns a RESTStorage object that will work against ResourceClaimParameters.
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &resource.ResourceClaimParameters{} },
NewListFunc: func() runtime.Object { return &resource.ResourceClaimParametersList{} },
PredicateFunc: resourceclaimparameters.Match,
DefaultQualifiedResource: resource.Resource("resourceclaimparameters"),
SingularQualifiedResource: resource.Resource("resourceclaimparameters"),
CreateStrategy: resourceclaimparameters.Strategy,
UpdateStrategy: resourceclaimparameters.Strategy,
DeleteStrategy: resourceclaimparameters.Strategy,
ReturnDeletedObject: true,
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: resourceclaimparameters.GetAttrs}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err
}
return &REST{store}, nil
}

View File

@@ -0,0 +1,145 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
"k8s.io/kubernetes/pkg/apis/resource"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
"k8s.io/kubernetes/pkg/registry/registrytest"
)
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
restOptions := generic.RESTOptions{
StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 1,
ResourcePrefix: "resourceclaimparameters",
}
resourceClassStorage, err := NewREST(restOptions)
if err != nil {
t.Fatalf("unexpected error from REST storage: %v", err)
}
return resourceClassStorage, server
}
func validNewResourceClaimParameters(name string) *resource.ResourceClaimParameters {
return &resource.ResourceClaimParameters{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
},
}
}
func TestCreate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
resourceClass := validNewResourceClaimParameters("foo")
resourceClass.ObjectMeta = metav1.ObjectMeta{}
test.TestCreate(
// valid
resourceClass,
// invalid
&resource.ResourceClaimParameters{
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
},
)
}
func TestUpdate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestUpdate(
// valid
validNewResourceClaimParameters("foo"),
// updateFunc
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.ResourceClaimParameters)
object.Labels = map[string]string{"foo": "bar"}
return object
},
// invalid update
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.ResourceClaimParameters)
object.Labels = map[string]string{"&$^^#%@": "1"}
return object
},
)
}
func TestDelete(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
test.TestDelete(validNewResourceClaimParameters("foo"))
}
func TestGet(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestGet(validNewResourceClaimParameters("foo"))
}
func TestList(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestList(validNewResourceClaimParameters("foo"))
}
func TestWatch(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestWatch(
validNewResourceClaimParameters("foo"),
// matching labels
[]labels.Set{},
// not matching labels
[]labels.Set{
{"foo": "bar"},
},
// matching fields
[]fields.Set{
{"metadata.name": "foo"},
},
// not matching fields
[]fields.Set{
{"metadata.name": "bar"},
},
)
}

View File

@@ -0,0 +1,103 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceclaimparameters
import (
"context"
"errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/apis/resource/validation"
)
// resourceClaimParametersStrategy implements behavior for ResourceClaimParameters objects
type resourceClaimParametersStrategy struct {
runtime.ObjectTyper
names.NameGenerator
}
var Strategy = resourceClaimParametersStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
func (resourceClaimParametersStrategy) NamespaceScoped() bool {
return true
}
func (resourceClaimParametersStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
}
func (resourceClaimParametersStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
resourceClaimParameters := obj.(*resource.ResourceClaimParameters)
return validation.ValidateResourceClaimParameters(resourceClaimParameters)
}
func (resourceClaimParametersStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
return nil
}
func (resourceClaimParametersStrategy) Canonicalize(obj runtime.Object) {
}
func (resourceClaimParametersStrategy) AllowCreateOnUpdate() bool {
return false
}
func (resourceClaimParametersStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
}
func (resourceClaimParametersStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateResourceClaimParametersUpdate(obj.(*resource.ResourceClaimParameters), old.(*resource.ResourceClaimParameters))
}
func (resourceClaimParametersStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
return nil
}
func (resourceClaimParametersStrategy) AllowUnconditionalUpdate() bool {
return true
}
// Match returns a generic matcher for a given label and field selector.
func Match(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: GetAttrs,
}
}
// GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
parameters, ok := obj.(*resource.ResourceClaimParameters)
if !ok {
return nil, nil, errors.New("not a resourceclaim")
}
return labels.Set(parameters.Labels), toSelectableFields(parameters), nil
}
// toSelectableFields returns a field set that represents the object
func toSelectableFields(claim *resource.ResourceClaimParameters) fields.Set {
fields := generic.ObjectMetaFieldsSet(&claim.ObjectMeta, true)
return fields
}

View File

@@ -0,0 +1,81 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceclaimparameters
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/apis/resource"
)
var resourceClaimParameters = &resource.ResourceClaimParameters{
ObjectMeta: metav1.ObjectMeta{
Name: "valid",
Namespace: "ns",
},
}
func TestClassStrategy(t *testing.T) {
if !Strategy.NamespaceScoped() {
t.Errorf("ResourceClaimParameters must be namespace scoped")
}
if Strategy.AllowCreateOnUpdate() {
t.Errorf("ResourceClaimParameters should not allow create on update")
}
}
func TestClassStrategyCreate(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
resourceClaimParameters := resourceClaimParameters.DeepCopy()
Strategy.PrepareForCreate(ctx, resourceClaimParameters)
errs := Strategy.Validate(ctx, resourceClaimParameters)
if len(errs) != 0 {
t.Errorf("unexpected error validating for create %v", errs)
}
}
func TestClassStrategyUpdate(t *testing.T) {
t.Run("no-changes-okay", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
resourceClaimParameters := resourceClaimParameters.DeepCopy()
newObj := resourceClaimParameters.DeepCopy()
newObj.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newObj, resourceClaimParameters)
errs := Strategy.ValidateUpdate(ctx, newObj, resourceClaimParameters)
if len(errs) != 0 {
t.Errorf("unexpected validation errors: %v", errs)
}
})
t.Run("name-change-not-allowed", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
resourceClaimParameters := resourceClaimParameters.DeepCopy()
newObj := resourceClaimParameters.DeepCopy()
newObj.Name += "-2"
newObj.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newObj, resourceClaimParameters)
errs := Strategy.ValidateUpdate(ctx, newObj, resourceClaimParameters)
if len(errs) == 0 {
t.Errorf("expected a validation error")
}
})
}

View File

@@ -0,0 +1,57 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/printers"
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
"k8s.io/kubernetes/pkg/registry/resource/resourceclassparameters"
)
// REST implements a RESTStorage for ResourceClassParameters.
type REST struct {
*genericregistry.Store
}
// NewREST returns a RESTStorage object that will work against ResourceClassParameters.
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &resource.ResourceClassParameters{} },
NewListFunc: func() runtime.Object { return &resource.ResourceClassParametersList{} },
PredicateFunc: resourceclassparameters.Match,
DefaultQualifiedResource: resource.Resource("resourceclassparameters"),
SingularQualifiedResource: resource.Resource("resourceclassparameters"),
CreateStrategy: resourceclassparameters.Strategy,
UpdateStrategy: resourceclassparameters.Strategy,
DeleteStrategy: resourceclassparameters.Strategy,
ReturnDeletedObject: true,
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
}
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: resourceclassparameters.GetAttrs}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err
}
return &REST{store}, nil
}

View File

@@ -0,0 +1,145 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
"k8s.io/kubernetes/pkg/apis/resource"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
"k8s.io/kubernetes/pkg/registry/registrytest"
)
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
restOptions := generic.RESTOptions{
StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 1,
ResourcePrefix: "resourceclassparameters",
}
resourceClassStorage, err := NewREST(restOptions)
if err != nil {
t.Fatalf("unexpected error from REST storage: %v", err)
}
return resourceClassStorage, server
}
func validNewResourceClassParameters(name string) *resource.ResourceClassParameters {
return &resource.ResourceClassParameters{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
},
}
}
func TestCreate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
resourceClass := validNewResourceClassParameters("foo")
resourceClass.ObjectMeta = metav1.ObjectMeta{}
test.TestCreate(
// valid
resourceClass,
// invalid
&resource.ResourceClassParameters{
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
},
)
}
func TestUpdate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestUpdate(
// valid
validNewResourceClassParameters("foo"),
// updateFunc
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.ResourceClassParameters)
object.Labels = map[string]string{"foo": "bar"}
return object
},
// invalid update
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.ResourceClassParameters)
object.Labels = map[string]string{"&$^^#%@": "1"}
return object
},
)
}
func TestDelete(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ReturnDeletedObject()
test.TestDelete(validNewResourceClassParameters("foo"))
}
func TestGet(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestGet(validNewResourceClassParameters("foo"))
}
func TestList(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestList(validNewResourceClassParameters("foo"))
}
func TestWatch(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store)
test.TestWatch(
validNewResourceClassParameters("foo"),
// matching labels
[]labels.Set{},
// not matching labels
[]labels.Set{
{"foo": "bar"},
},
// matching fields
[]fields.Set{
{"metadata.name": "foo"},
},
// not matching fields
[]fields.Set{
{"metadata.name": "bar"},
},
)
}

View File

@@ -0,0 +1,103 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceclassparameters
import (
"context"
"errors"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/apis/resource/validation"
)
// resourceClassParametersStrategy implements behavior for ResourceClassParameters objects
type resourceClassParametersStrategy struct {
runtime.ObjectTyper
names.NameGenerator
}
var Strategy = resourceClassParametersStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
func (resourceClassParametersStrategy) NamespaceScoped() bool {
return true
}
func (resourceClassParametersStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
}
func (resourceClassParametersStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
resourceClassParameters := obj.(*resource.ResourceClassParameters)
return validation.ValidateResourceClassParameters(resourceClassParameters)
}
func (resourceClassParametersStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
return nil
}
func (resourceClassParametersStrategy) Canonicalize(obj runtime.Object) {
}
func (resourceClassParametersStrategy) AllowCreateOnUpdate() bool {
return false
}
func (resourceClassParametersStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
}
func (resourceClassParametersStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateResourceClassParametersUpdate(obj.(*resource.ResourceClassParameters), old.(*resource.ResourceClassParameters))
}
func (resourceClassParametersStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
return nil
}
func (resourceClassParametersStrategy) AllowUnconditionalUpdate() bool {
return true
}
// Match returns a generic matcher for a given label and field selector.
func Match(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: GetAttrs,
}
}
// GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
parameters, ok := obj.(*resource.ResourceClassParameters)
if !ok {
return nil, nil, errors.New("not a resourceclassparameters")
}
return labels.Set(parameters.Labels), toSelectableFields(parameters), nil
}
// toSelectableFields returns a field set that represents the object
func toSelectableFields(class *resource.ResourceClassParameters) fields.Set {
fields := generic.ObjectMetaFieldsSet(&class.ObjectMeta, true)
return fields
}

View File

@@ -0,0 +1,81 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceclassparameters
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/apis/resource"
)
var resourceClassParameters = &resource.ResourceClassParameters{
ObjectMeta: metav1.ObjectMeta{
Name: "valid",
Namespace: "ns",
},
}
func TestClassStrategy(t *testing.T) {
if !Strategy.NamespaceScoped() {
t.Errorf("ResourceClassParameters must be namespace scoped")
}
if Strategy.AllowCreateOnUpdate() {
t.Errorf("ResourceClassParameters should not allow create on update")
}
}
func TestClassStrategyCreate(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
resourceClassParameters := resourceClassParameters.DeepCopy()
Strategy.PrepareForCreate(ctx, resourceClassParameters)
errs := Strategy.Validate(ctx, resourceClassParameters)
if len(errs) != 0 {
t.Errorf("unexpected error validating for create %v", errs)
}
}
func TestClassStrategyUpdate(t *testing.T) {
t.Run("no-changes-okay", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
resourceClassParameters := resourceClassParameters.DeepCopy()
newObj := resourceClassParameters.DeepCopy()
newObj.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newObj, resourceClassParameters)
errs := Strategy.ValidateUpdate(ctx, newObj, resourceClassParameters)
if len(errs) != 0 {
t.Errorf("unexpected validation errors: %v", errs)
}
})
t.Run("name-change-not-allowed", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
resourceClassParameters := resourceClassParameters.DeepCopy()
newObj := resourceClassParameters.DeepCopy()
newObj.Name += "-2"
newObj.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newObj, resourceClassParameters)
errs := Strategy.ValidateUpdate(ctx, newObj, resourceClassParameters)
if len(errs) == 0 {
t.Errorf("expected a validation error")
}
})
}

View File

@@ -0,0 +1,62 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/printers"
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
"k8s.io/kubernetes/pkg/registry/resource/resourceslice"
)
// REST implements a RESTStorage for ResourceSlice.
type REST struct {
*genericregistry.Store
}
// NewREST returns a RESTStorage object that will work against ResourceSlice.
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
store := &genericregistry.Store{
NewFunc: func() runtime.Object { return &resource.ResourceSlice{} },
NewListFunc: func() runtime.Object { return &resource.ResourceSliceList{} },
PredicateFunc: resourceslice.Match,
DefaultQualifiedResource: resource.Resource("resourceslices"),
SingularQualifiedResource: resource.Resource("resourceslice"),
CreateStrategy: resourceslice.Strategy,
UpdateStrategy: resourceslice.Strategy,
DeleteStrategy: resourceslice.Strategy,
ReturnDeletedObject: true,
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
}
options := &generic.StoreOptions{
RESTOptions: optsGetter,
AttrFunc: resourceslice.GetAttrs,
TriggerFunc: resourceslice.TriggerFunc,
Indexers: resourceslice.Indexers(),
}
if err := store.CompleteWithOptions(options); err != nil {
return nil, err
}
return &REST{store}, nil
}

View File

@@ -0,0 +1,149 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/registry/generic"
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
"k8s.io/kubernetes/pkg/apis/resource"
_ "k8s.io/kubernetes/pkg/apis/resource/install"
"k8s.io/kubernetes/pkg/registry/registrytest"
)
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
etcdStorage, server := registrytest.NewEtcdStorage(t, resource.GroupName)
restOptions := generic.RESTOptions{
StorageConfig: etcdStorage,
Decorator: generic.UndecoratedStorage,
DeleteCollectionWorkers: 1,
ResourcePrefix: "resourceslices",
}
resourceClassStorage, err := NewREST(restOptions)
if err != nil {
t.Fatalf("unexpected error from REST storage: %v", err)
}
return resourceClassStorage, server
}
func validNewResourceSlice(name string) *resource.ResourceSlice {
return &resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
NodeName: name,
DriverName: "cdi.example.com",
NodeResourceModel: resource.NodeResourceModel{
NamedResources: &resource.NamedResourcesResources{},
},
}
}
func TestCreate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
resourceClass := validNewResourceSlice("foo")
resourceClass.ObjectMeta = metav1.ObjectMeta{GenerateName: "foo"}
test.TestCreate(
// valid
resourceClass,
// invalid
&resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: "*BadName!"},
},
)
}
func TestUpdate(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestUpdate(
// valid
validNewResourceSlice("foo"),
// updateFunc
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.ResourceSlice)
object.Labels = map[string]string{"foo": "bar"}
return object
},
// invalid update
func(obj runtime.Object) runtime.Object {
object := obj.(*resource.ResourceSlice)
object.DriverName = ""
return object
},
)
}
func TestDelete(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope().ReturnDeletedObject()
test.TestDelete(validNewResourceSlice("foo"))
}
func TestGet(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestGet(validNewResourceSlice("foo"))
}
func TestList(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestList(validNewResourceSlice("foo"))
}
func TestWatch(t *testing.T) {
storage, server := newStorage(t)
defer server.Terminate(t)
defer storage.Store.DestroyFunc()
test := genericregistrytest.New(t, storage.Store).ClusterScope()
test.TestWatch(
validNewResourceSlice("foo"),
// matching labels
[]labels.Set{},
// not matching labels
[]labels.Set{
{"foo": "bar"},
},
// matching fields
[]fields.Set{
{"metadata.name": "foo"},
},
// not matching fields
[]fields.Set{
{"metadata.name": "bar"},
},
)
}

View File

@@ -0,0 +1,139 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceslice
import (
"context"
"fmt"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/resource"
"k8s.io/kubernetes/pkg/apis/resource/validation"
)
// resourceSliceStrategy implements behavior for ResourceSlice objects
type resourceSliceStrategy struct {
runtime.ObjectTyper
names.NameGenerator
}
var Strategy = resourceSliceStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
func (resourceSliceStrategy) NamespaceScoped() bool {
return false
}
func (resourceSliceStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
}
func (resourceSliceStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
slice := obj.(*resource.ResourceSlice)
return validation.ValidateResourceSlice(slice)
}
func (resourceSliceStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
return nil
}
func (resourceSliceStrategy) Canonicalize(obj runtime.Object) {
}
func (resourceSliceStrategy) AllowCreateOnUpdate() bool {
return false
}
func (resourceSliceStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
}
func (resourceSliceStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
return validation.ValidateResourceSliceUpdate(obj.(*resource.ResourceSlice), old.(*resource.ResourceSlice))
}
func (resourceSliceStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
return nil
}
func (resourceSliceStrategy) AllowUnconditionalUpdate() bool {
return true
}
var TriggerFunc = map[string]storage.IndexerFunc{
// Only one index is supported:
// https://github.com/kubernetes/kubernetes/blob/3aa8c59fec0bf339e67ca80ea7905c817baeca85/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go#L346-L350
"nodeName": nodeNameTriggerFunc,
}
func nodeNameTriggerFunc(obj runtime.Object) string {
return obj.(*resource.ResourceSlice).NodeName
}
// Indexers returns the indexers for ResourceSlice.
func Indexers() *cache.Indexers {
return &cache.Indexers{
storage.FieldIndex("nodeName"): nodeNameIndexFunc,
}
}
func nodeNameIndexFunc(obj interface{}) ([]string, error) {
slice, ok := obj.(*resource.ResourceSlice)
if !ok {
return nil, fmt.Errorf("not a ResourceSlice")
}
return []string{slice.NodeName}, nil
}
// GetAttrs returns labels and fields of a given object for filtering purposes.
func GetAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
slice, ok := obj.(*resource.ResourceSlice)
if !ok {
return nil, nil, fmt.Errorf("not a ResourceSlice")
}
return labels.Set(slice.ObjectMeta.Labels), toSelectableFields(slice), nil
}
// Match returns a generic matcher for a given label and field selector.
func Match(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
return storage.SelectionPredicate{
Label: label,
Field: field,
GetAttrs: GetAttrs,
IndexFields: []string{"nodeName"},
}
}
// toSelectableFields returns a field set that represents the object
// TODO: fields are not labels, and the validation rules for them do not apply.
func toSelectableFields(slice *resource.ResourceSlice) fields.Set {
// The purpose of allocation with a given number of elements is to reduce
// amount of allocations needed to create the fields.Set. If you add any
// field here or the number of object-meta related fields changes, this should
// be adjusted.
fields := make(fields.Set, 3)
fields["nodeName"] = slice.NodeName
fields["driverName"] = slice.DriverName
// Adds one field.
return generic.AddObjectMetaFieldsSet(fields, &slice.ObjectMeta, false)
}

View File

@@ -0,0 +1,85 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resourceslice
import (
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/kubernetes/pkg/apis/resource"
)
var slice = &resource.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "valid-class",
},
NodeName: "valid-node-name",
DriverName: "testdriver.example.com",
NodeResourceModel: resource.NodeResourceModel{
NamedResources: &resource.NamedResourcesResources{},
},
}
func TestClassStrategy(t *testing.T) {
if Strategy.NamespaceScoped() {
t.Errorf("ResourceSlice must not be namespace scoped")
}
if Strategy.AllowCreateOnUpdate() {
t.Errorf("ResourceSlice should not allow create on update")
}
}
func TestClassStrategyCreate(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
slice := slice.DeepCopy()
Strategy.PrepareForCreate(ctx, slice)
errs := Strategy.Validate(ctx, slice)
if len(errs) != 0 {
t.Errorf("unexpected error validating for create %v", errs)
}
}
func TestClassStrategyUpdate(t *testing.T) {
t.Run("no-changes-okay", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
slice := slice.DeepCopy()
newClass := slice.DeepCopy()
newClass.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newClass, slice)
errs := Strategy.ValidateUpdate(ctx, newClass, slice)
if len(errs) != 0 {
t.Errorf("unexpected validation errors: %v", errs)
}
})
t.Run("name-change-not-allowed", func(t *testing.T) {
ctx := genericapirequest.NewDefaultContext()
slice := slice.DeepCopy()
newClass := slice.DeepCopy()
newClass.Name = "valid-class-2"
newClass.ResourceVersion = "4"
Strategy.PrepareForUpdate(ctx, newClass, slice)
errs := Strategy.ValidateUpdate(ctx, newClass, slice)
if len(errs) == 0 {
t.Errorf("expected a validation error")
}
})
}

View File

@@ -26,8 +26,11 @@ import (
"k8s.io/kubernetes/pkg/apis/resource"
podschedulingcontextsstore "k8s.io/kubernetes/pkg/registry/resource/podschedulingcontext/storage"
resourceclaimstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaim/storage"
resourceclaimparametersstore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimparameters/storage"
resourceclaimtemplatestore "k8s.io/kubernetes/pkg/registry/resource/resourceclaimtemplate/storage"
resourceclassstore "k8s.io/kubernetes/pkg/registry/resource/resourceclass/storage"
resourceclassparametersstore "k8s.io/kubernetes/pkg/registry/resource/resourceclassparameters/storage"
resourceslicestore "k8s.io/kubernetes/pkg/registry/resource/resourceslice/storage"
)
type RESTStorageProvider struct{}
@@ -83,6 +86,30 @@ func (p RESTStorageProvider) v1alpha2Storage(apiResourceConfigSource serverstora
storage[resource+"/status"] = podSchedulingStatusStorage
}
if resource := "resourceclaimparameters"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
resourceClaimParametersStorage, err := resourceclaimparametersstore.NewREST(restOptionsGetter)
if err != nil {
return nil, err
}
storage[resource] = resourceClaimParametersStorage
}
if resource := "resourceclassparameters"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
resourceClassParametersStorage, err := resourceclassparametersstore.NewREST(restOptionsGetter)
if err != nil {
return nil, err
}
storage[resource] = resourceClassParametersStorage
}
if resource := "resourceslices"; apiResourceConfigSource.ResourceEnabled(resourcev1alpha2.SchemeGroupVersion.WithResource(resource)) {
resourceSliceStorage, err := resourceslicestore.NewREST(restOptionsGetter)
if err != nil {
return nil, err
}
storage[resource] = resourceSliceStorage
}
return storage, nil
}

View File

@@ -472,6 +472,24 @@ func addAllEventHandlers(
}
handlers = append(handlers, handlerRegistration)
}
case framework.ResourceClaimParameters:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = informerFactory.Resource().V1alpha2().ResourceClaimParameters().Informer().AddEventHandler(
buildEvtResHandler(at, framework.ResourceClaimParameters, "ResourceClaimParameters"),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
}
case framework.ResourceClassParameters:
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
if handlerRegistration, err = informerFactory.Resource().V1alpha2().ResourceClassParameters().Informer().AddEventHandler(
buildEvtResHandler(at, framework.ResourceClassParameters, "ResourceClassParameters"),
); err != nil {
return err
}
handlers = append(handlers, handlerRegistration)
}
case framework.StorageClass:
if at&framework.Add != 0 {
if handlerRegistration, err = informerFactory.Storage().V1().StorageClasses().Informer().AddEventHandler(

View File

@@ -18,8 +18,10 @@ package dynamicresources
import (
"context"
"encoding/json"
"errors"
"fmt"
"slices"
"sort"
"sync"
@@ -30,6 +32,7 @@ import (
apiequality "k8s.io/apimachinery/pkg/api/equality"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
@@ -43,6 +46,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/utils/ptr"
)
@@ -70,37 +74,52 @@ type stateData struct {
// Empty if the Pod has no claims.
claims []*resourcev1alpha2.ResourceClaim
// podSchedulingState keeps track of the PodSchedulingContext
// (if one exists) and the changes made to it.
podSchedulingState podSchedulingState
// resourceModel contains the information about available and allocated resources when using
// structured parameters and the pod needs this information.
resources resources
// mutex must be locked while accessing any of the fields below.
mutex sync.Mutex
// The indices of all claims that:
// - are allocated
// - use delayed allocation
// - use delayed allocation or the builtin controller
// - were not available on at least one node
//
// Set in parallel during Filter, so write access there must be
// protected by the mutex. Used by PostFilter.
unavailableClaims sets.Set[int]
// podSchedulingState keeps track of the PodSchedulingContext
// (if one exists) and the changes made to it.
podSchedulingState podSchedulingState
mutex sync.Mutex
informationsForClaim []informationForClaim
}
func (d *stateData) Clone() framework.StateData {
return d
}
type informationForClaim struct {
// The availableOnNode node filter of the claim converted from the
// v1 API to nodeaffinity.NodeSelector by PreFilter for repeated
// evaluation in Filter. Nil for claim which don't have it.
availableOnNode *nodeaffinity.NodeSelector
// The status of the claim got from the
// schedulingCtx by PreFilter for repeated
// evaluation in Filter. Nil for claim which don't have it.
status *resourcev1alpha2.ResourceClaimSchedulingStatus
}
func (d *stateData) Clone() framework.StateData {
return d
// structuredParameters is true if the claim is handled via the builtin
// controller.
structuredParameters bool
controller *claimController
// Set by Reserved, published by PreBind.
allocation *resourcev1alpha2.AllocationResult
allocationDriverName string
}
type podSchedulingState struct {
@@ -256,23 +275,90 @@ type dynamicResources struct {
claimLister resourcev1alpha2listers.ResourceClaimLister
classLister resourcev1alpha2listers.ResourceClassLister
podSchedulingContextLister resourcev1alpha2listers.PodSchedulingContextLister
claimParametersLister resourcev1alpha2listers.ResourceClaimParametersLister
classParametersLister resourcev1alpha2listers.ResourceClassParametersLister
resourceSliceLister resourcev1alpha2listers.ResourceSliceLister
claimNameLookup *resourceclaim.Lookup
// claimAssumeCache enables temporarily storing a newer claim object
// while the scheduler has allocated it and the corresponding object
// update from the apiserver has not been processed by the claim
// informer callbacks. Claims get added here in PreBind and removed by
// the informer callback (based on the "newer than" comparison in the
// assume cache).
//
// It uses cache.MetaNamespaceKeyFunc to generate object names, which
// therefore are "<namespace>/<name>".
//
// This is necessary to ensure that reconstructing the resource usage
// at the start of a pod scheduling cycle doesn't reuse the resources
// assigned to such a claim. Alternatively, claim allocation state
// could also get tracked across pod scheduling cycles, but that
// - adds complexity (need to carefully sync state with informer events
// for claims and ResourceSlices)
// - would make integration with cluster autoscaler harder because it would need
// to trigger informer callbacks.
//
// When implementing cluster autoscaler support, this assume cache or
// something like it (see https://github.com/kubernetes/kubernetes/pull/112202)
// might have to be managed by the cluster autoscaler.
claimAssumeCache volumebinding.AssumeCache
// inFlightAllocations is map from claim UUIDs to claim objects for those claims
// for which allocation was triggered during a scheduling cycle and the
// corresponding claim status update call in PreBind has not been done
// yet. If another pod needs the claim, the pod is treated as "not
// schedulable yet". The cluster event for the claim status update will
// make it schedulable.
//
// This mechanism avoids the following problem:
// - Pod A triggers allocation for claim X.
// - Pod B shares access to that claim and gets scheduled because
// the claim is assumed to be allocated.
// - PreBind for pod B is called first, tries to update reservedFor and
// fails because the claim is not really allocated yet.
//
// We could avoid the ordering problem by allowing either pod A or pod B
// to set the allocation. But that is more complicated and leads to another
// problem:
// - Pod A and B get scheduled as above.
// - PreBind for pod A gets called first, then fails with a temporary API error.
// It removes the updated claim from the assume cache because of that.
// - PreBind for pod B gets called next and succeeds with adding the
// allocation and its own reservedFor entry.
// - The assume cache is now not reflecting that the claim is allocated,
// which could lead to reusing the same resource for some other claim.
//
// A sync.Map is used because in practice sharing of a claim between
// pods is expected to be rare compared to per-pod claim, so we end up
// hitting the "multiple goroutines read, write, and overwrite entries
// for disjoint sets of keys" case that sync.Map is optimized for.
inFlightAllocations sync.Map
}
// New initializes a new plugin and returns it.
func New(_ context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) {
func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts feature.Features) (framework.Plugin, error) {
if !fts.EnableDynamicResourceAllocation {
// Disabled, won't do anything.
return &dynamicResources{}, nil
}
return &dynamicResources{
logger := klog.FromContext(ctx)
pl := &dynamicResources{
enabled: true,
fh: fh,
clientset: fh.ClientSet(),
claimLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Lister(),
classLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClasses().Lister(),
podSchedulingContextLister: fh.SharedInformerFactory().Resource().V1alpha2().PodSchedulingContexts().Lister(),
}, nil
claimParametersLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaimParameters().Lister(),
classParametersLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceClassParameters().Lister(),
resourceSliceLister: fh.SharedInformerFactory().Resource().V1alpha2().ResourceSlices().Lister(),
claimNameLookup: resourceclaim.NewNameLookup(fh.ClientSet()),
claimAssumeCache: volumebinding.NewAssumeCache(logger, fh.SharedInformerFactory().Resource().V1alpha2().ResourceClaims().Informer(), "claim", "", nil),
}
return pl, nil
}
var _ framework.PreEnqueuePlugin = &dynamicResources{}
@@ -296,7 +382,13 @@ func (pl *dynamicResources) EventsToRegister() []framework.ClusterEventWithHint
if !pl.enabled {
return nil
}
events := []framework.ClusterEventWithHint{
// Changes for claim or class parameters creation may make pods
// schedulable which depend on claims using those parameters.
{Event: framework.ClusterEvent{Resource: framework.ResourceClaimParameters, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterClaimParametersChange},
{Event: framework.ClusterEvent{Resource: framework.ResourceClassParameters, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterClassParametersChange},
// Allocation is tracked in ResourceClaims, so any changes may make the pods schedulable.
{Event: framework.ClusterEvent{Resource: framework.ResourceClaim, ActionType: framework.Add | framework.Update}, QueueingHintFn: pl.isSchedulableAfterClaimChange},
// When a driver has provided additional information, a pod waiting for that information
@@ -321,6 +413,149 @@ func (pl *dynamicResources) PreEnqueue(ctx context.Context, pod *v1.Pod) (status
return nil
}
// isSchedulableAfterClaimParametersChange is invoked for add and update claim parameters events reported by
// an informer. It checks whether that change made a previously unschedulable
// pod schedulable. It errs on the side of letting a pod scheduling attempt
// happen. The delete claim event will not invoke it, so newObj will never be nil.
func (pl *dynamicResources) isSchedulableAfterClaimParametersChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) {
originalParameters, modifiedParameters, err := schedutil.As[*resourcev1alpha2.ResourceClaimParameters](oldObj, newObj)
if err != nil {
// Shouldn't happen.
return framework.Queue, fmt.Errorf("unexpected object in isSchedulableAfterClaimParametersChange: %w", err)
}
usesParameters := false
if err := pl.foreachPodResourceClaim(pod, func(_ string, claim *resourcev1alpha2.ResourceClaim) {
ref := claim.Spec.ParametersRef
if ref == nil {
return
}
// Using in-tree parameters directly?
if ref.APIGroup == resourcev1alpha2.SchemeGroupVersion.Group &&
ref.Kind == "ResourceClaimParameters" {
if modifiedParameters.Name == ref.Name {
usesParameters = true
}
return
}
// Need to look for translated parameters.
generatedFrom := modifiedParameters.GeneratedFrom
if generatedFrom == nil {
return
}
if generatedFrom.APIGroup == ref.APIGroup &&
generatedFrom.Kind == ref.Kind &&
generatedFrom.Name == ref.Name {
usesParameters = true
}
}); err != nil {
// This is not an unexpected error: we know that
// foreachPodResourceClaim only returns errors for "not
// schedulable".
logger.V(4).Info("pod is not schedulable", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedParameters), "reason", err.Error())
return framework.QueueSkip, nil
}
if !usesParameters {
// This were not the parameters the pod was waiting for.
logger.V(6).Info("unrelated claim parameters got modified", "pod", klog.KObj(pod), "claimParameters", klog.KObj(modifiedParameters))
return framework.QueueSkip, nil
}
if originalParameters == nil {
logger.V(4).Info("claim parameters for pod got created", "pod", klog.KObj(pod), "claimParameters", klog.KObj(modifiedParameters))
return framework.Queue, nil
}
// Modifications may or may not be relevant. If the entire
// requests are as before, then something else must have changed
// and we don't care.
if apiequality.Semantic.DeepEqual(&originalParameters.DriverRequests, &modifiedParameters.DriverRequests) {
logger.V(6).Info("claim parameters for pod got modified where the pod doesn't care", "pod", klog.KObj(pod), "claimParameters", klog.KObj(modifiedParameters))
return framework.QueueSkip, nil
}
logger.V(4).Info("requests in claim parameters for pod got updated", "pod", klog.KObj(pod), "claimParameters", klog.KObj(modifiedParameters))
return framework.Queue, nil
}
// isSchedulableAfterClassParametersChange is invoked for add and update class parameters events reported by
// an informer. It checks whether that change made a previously unschedulable
// pod schedulable. It errs on the side of letting a pod scheduling attempt
// happen. The delete class event will not invoke it, so newObj will never be nil.
func (pl *dynamicResources) isSchedulableAfterClassParametersChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) {
originalParameters, modifiedParameters, err := schedutil.As[*resourcev1alpha2.ResourceClassParameters](oldObj, newObj)
if err != nil {
// Shouldn't happen.
return framework.Queue, fmt.Errorf("unexpected object in isSchedulableAfterClassParametersChange: %w", err)
}
usesParameters := false
if err := pl.foreachPodResourceClaim(pod, func(_ string, claim *resourcev1alpha2.ResourceClaim) {
class, err := pl.classLister.Get(claim.Spec.ResourceClassName)
if err != nil {
if !apierrors.IsNotFound(err) {
logger.Error(err, "look up resource class")
}
return
}
ref := class.ParametersRef
if ref == nil {
return
}
// Using in-tree parameters directly?
if ref.APIGroup == resourcev1alpha2.SchemeGroupVersion.Group &&
ref.Kind == "ResourceClassParameters" {
if modifiedParameters.Name == ref.Name {
usesParameters = true
}
return
}
// Need to look for translated parameters.
generatedFrom := modifiedParameters.GeneratedFrom
if generatedFrom == nil {
return
}
if generatedFrom.APIGroup == ref.APIGroup &&
generatedFrom.Kind == ref.Kind &&
generatedFrom.Name == ref.Name {
usesParameters = true
}
}); err != nil {
// This is not an unexpected error: we know that
// foreachPodResourceClaim only returns errors for "not
// schedulable".
logger.V(4).Info("pod is not schedulable", "pod", klog.KObj(pod), "classParameters", klog.KObj(modifiedParameters), "reason", err.Error())
return framework.QueueSkip, nil
}
if !usesParameters {
// This were not the parameters the pod was waiting for.
logger.V(6).Info("unrelated class parameters got modified", "pod", klog.KObj(pod), "classParameters", klog.KObj(modifiedParameters))
return framework.QueueSkip, nil
}
if originalParameters == nil {
logger.V(4).Info("class parameters for pod got created", "pod", klog.KObj(pod), "class", klog.KObj(modifiedParameters))
return framework.Queue, nil
}
// Modifications may or may not be relevant. If the entire
// requests are as before, then something else must have changed
// and we don't care.
if apiequality.Semantic.DeepEqual(&originalParameters.Filters, &modifiedParameters.Filters) {
logger.V(6).Info("class parameters for pod got modified where the pod doesn't care", "pod", klog.KObj(pod), "classParameters", klog.KObj(modifiedParameters))
return framework.QueueSkip, nil
}
logger.V(4).Info("filters in class parameters for pod got updated", "pod", klog.KObj(pod), "classParameters", klog.KObj(modifiedParameters))
return framework.Queue, nil
}
// isSchedulableAfterClaimChange is invoked for add and update claim events reported by
// an informer. It checks whether that change made a previously unschedulable
// pod schedulable. It errs on the side of letting a pod scheduling attempt
@@ -345,6 +580,33 @@ func (pl *dynamicResources) isSchedulableAfterClaimChange(logger klog.Logger, po
return framework.QueueSkip, nil
}
if originalClaim != nil &&
resourceclaim.IsAllocatedWithStructuredParameters(originalClaim) &&
modifiedClaim.Status.Allocation == nil {
// A claim with structured parameters was deallocated. This might have made
// resources available for other pods.
//
// TODO (https://github.com/kubernetes/kubernetes/issues/123697):
// check that the pending claims depend on structured parameters (depends on refactoring foreachPodResourceClaim, see other TODO).
//
// There is a small race here:
// - The dynamicresources plugin allocates claim A and updates the assume cache.
// - A second pod gets marked as unschedulable based on that assume cache.
// - Before the informer cache here catches up, the pod runs, terminates and
// the claim gets deallocated without ever sending the claim status with
// allocation to the scheduler.
// - The comparison below is for a *very* old claim with no allocation and the
// new claim where the allocation is already removed again, so no
// RemovedClaimAllocation event gets emitted.
//
// This is extremely unlikely and thus a fix is not needed for alpha in Kubernetes 1.30.
// TODO (https://github.com/kubernetes/kubernetes/issues/123698): The solution is to somehow integrate the assume cache
// into the event mechanism. This can be tackled together with adding autoscaler
// support, which also needs to do something with the assume cache.
logger.V(6).Info("claim with structured parameters got deallocated", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim))
return framework.Queue, nil
}
if !usesClaim {
// This was not the claim the pod was waiting for.
logger.V(6).Info("unrelated claim got modified", "pod", klog.KObj(pod), "claim", klog.KObj(modifiedClaim))
@@ -526,7 +788,7 @@ func (pl *dynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourcev1alpha2.
// It calls an optional handler for those claims that it finds.
func (pl *dynamicResources) foreachPodResourceClaim(pod *v1.Pod, cb func(podResourceName string, claim *resourcev1alpha2.ResourceClaim)) error {
for _, resource := range pod.Spec.ResourceClaims {
claimName, mustCheckOwner, err := resourceclaim.Name(pod, &resource)
claimName, mustCheckOwner, err := pl.claimNameLookup.Name(pod, &resource)
if err != nil {
return err
}
@@ -578,24 +840,21 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
return nil, statusUnschedulable(logger, err.Error())
}
logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(claims))
// If the pod does not reference any claim,
// DynamicResources Filter has nothing to do with the Pod.
if len(claims) == 0 {
return nil, framework.NewStatus(framework.Skip)
}
// Fetch s.podSchedulingState.schedulingCtx, it's going to be needed when checking claims.
// Fetch PodSchedulingContext, it's going to be needed when checking claims.
if err := s.podSchedulingState.init(ctx, pod, pl.podSchedulingContextLister); err != nil {
return nil, statusError(logger, err)
}
s.informationsForClaim = make([]informationForClaim, len(claims))
needResourceInformation := false
for index, claim := range claims {
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeImmediate &&
claim.Status.Allocation == nil {
// This will get resolved by the resource driver.
return nil, statusUnschedulable(logger, "unallocated immediate resourceclaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
}
if claim.Status.DeallocationRequested {
// This will get resolved by the resource driver.
return nil, statusUnschedulable(logger, "resourceclaim must be reallocated", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
@@ -606,16 +865,20 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
// Resource is in use. The pod has to wait.
return nil, statusUnschedulable(logger, "resourceclaim in use", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
}
if claim.Status.Allocation != nil &&
claim.Status.Allocation.AvailableOnNodes != nil {
nodeSelector, err := nodeaffinity.NewNodeSelector(claim.Status.Allocation.AvailableOnNodes)
if err != nil {
return nil, statusError(logger, err)
if claim.Status.Allocation != nil {
if claim.Status.Allocation.AvailableOnNodes != nil {
nodeSelector, err := nodeaffinity.NewNodeSelector(claim.Status.Allocation.AvailableOnNodes)
if err != nil {
return nil, statusError(logger, err)
}
s.informationsForClaim[index].availableOnNode = nodeSelector
}
s.informationsForClaim[index].availableOnNode = nodeSelector
}
if claim.Status.Allocation == nil &&
claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer {
// The claim was allocated by the scheduler if it has the finalizer that is
// reserved for Kubernetes.
s.informationsForClaim[index].structuredParameters = slices.Contains(claim.Finalizers, resourcev1alpha2.Finalizer)
} else {
// The ResourceClass might have a node filter. This is
// useful for trimming the initial set of potential
// nodes before we ask the driver(s) for information
@@ -638,16 +901,144 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
}
s.informationsForClaim[index].availableOnNode = selector
}
// Now we need information from drivers.
s.informationsForClaim[index].status = statusForClaim(s.podSchedulingState.schedulingCtx, pod.Spec.ResourceClaims[index].Name)
if class.StructuredParameters != nil && *class.StructuredParameters {
s.informationsForClaim[index].structuredParameters = true
// Allocation in flight? Better wait for that
// to finish, see inFlightAllocations
// documentation for details.
if _, found := pl.inFlightAllocations.Load(claim.UID); found {
return nil, statusUnschedulable(logger, fmt.Sprintf("resource claim %s is in the process of being allocated", klog.KObj(claim)))
}
// We need the claim and class parameters. If
// they don't exist yet, the pod has to wait.
//
// TODO (https://github.com/kubernetes/kubernetes/issues/123697):
// check this already in foreachPodResourceClaim, together with setting up informationsForClaim.
// Then PreEnqueue will also check for existence of parameters.
classParameters, claimParameters, status := pl.lookupParameters(logger, class, claim)
if status != nil {
return nil, status
}
controller, err := newClaimController(logger, class, classParameters, claimParameters)
if err != nil {
return nil, statusError(logger, err)
}
s.informationsForClaim[index].controller = controller
needResourceInformation = true
} else if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeImmediate {
// This will get resolved by the resource driver.
return nil, statusUnschedulable(logger, "unallocated immediate resourceclaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
}
}
}
if needResourceInformation {
// Doing this over and over again for each pod could be avoided
// by parsing once when creating the plugin and then updating
// that state in informer callbacks. But that would cause
// problems for using the plugin in the Cluster Autoscaler. If
// this step here turns out to be expensive, we may have to
// maintain and update state more persistently.
//
// Claims are treated as "allocated" if they are in the assume cache
// or currently their allocation is in-flight.
resources, err := newResourceModel(logger, pl.resourceSliceLister, pl.claimAssumeCache, &pl.inFlightAllocations)
logger.V(5).Info("Resource usage", "resources", klog.Format(resources))
if err != nil {
return nil, statusError(logger, err)
}
s.resources = resources
}
s.claims = claims
state.Write(stateKey, s)
return nil, nil
}
func (pl *dynamicResources) lookupParameters(logger klog.Logger, class *resourcev1alpha2.ResourceClass, claim *resourcev1alpha2.ResourceClaim) (classParameters *resourcev1alpha2.ResourceClassParameters, claimParameters *resourcev1alpha2.ResourceClaimParameters, status *framework.Status) {
classParameters, status = pl.lookupClassParameters(logger, class)
if status != nil {
return
}
claimParameters, status = pl.lookupClaimParameters(logger, claim)
return
}
func (pl *dynamicResources) lookupClassParameters(logger klog.Logger, class *resourcev1alpha2.ResourceClass) (*resourcev1alpha2.ResourceClassParameters, *framework.Status) {
if class.ParametersRef == nil {
return nil, nil
}
if class.ParametersRef.APIGroup == resourcev1alpha2.SchemeGroupVersion.Group &&
class.ParametersRef.Kind == "ResourceClassParameters" {
// Use the parameters which were referenced directly.
parameters, err := pl.classParametersLister.ResourceClassParameters(class.ParametersRef.Namespace).Get(class.ParametersRef.Name)
if err != nil {
if apierrors.IsNotFound(err) {
return nil, statusUnschedulable(logger, fmt.Sprintf("class parameters %s not found", klog.KRef(class.ParametersRef.Namespace, class.ParametersRef.Name)))
}
return nil, statusError(logger, fmt.Errorf("get class parameters %s: %v", klog.KRef(class.Namespace, class.ParametersRef.Name), err))
}
return parameters, nil
}
// TODO (https://github.com/kubernetes/kubernetes/issues/123731): use an indexer
allParameters, err := pl.classParametersLister.ResourceClassParameters(class.Namespace).List(labels.Everything())
if err != nil {
return nil, statusError(logger, fmt.Errorf("listing class parameters failed: %v", err))
}
for _, parameters := range allParameters {
if parameters.GeneratedFrom == nil {
continue
}
if parameters.GeneratedFrom.APIGroup == class.ParametersRef.APIGroup &&
parameters.GeneratedFrom.Kind == class.ParametersRef.Kind &&
parameters.GeneratedFrom.Name == class.ParametersRef.Name &&
parameters.GeneratedFrom.Namespace == class.ParametersRef.Namespace {
return parameters, nil
}
}
return nil, statusUnschedulable(logger, fmt.Sprintf("generated class parameters for %s.%s %s not found", class.ParametersRef.Kind, class.ParametersRef.APIGroup, klog.KRef(class.Namespace, class.ParametersRef.Name)))
}
func (pl *dynamicResources) lookupClaimParameters(logger klog.Logger, claim *resourcev1alpha2.ResourceClaim) (*resourcev1alpha2.ResourceClaimParameters, *framework.Status) {
if claim.Spec.ParametersRef == nil {
return nil, nil
}
if claim.Spec.ParametersRef.APIGroup == resourcev1alpha2.SchemeGroupVersion.Group &&
claim.Spec.ParametersRef.Kind == "ResourceClaimParameters" {
// Use the parameters which were referenced directly.
parameters, err := pl.claimParametersLister.ResourceClaimParameters(claim.Namespace).Get(claim.Spec.ParametersRef.Name)
if err != nil {
if apierrors.IsNotFound(err) {
return nil, statusUnschedulable(logger, fmt.Sprintf("claim parameters %s not found", klog.KRef(claim.Namespace, claim.Spec.ParametersRef.Name)))
}
return nil, statusError(logger, fmt.Errorf("get claim parameters %s: %v", klog.KRef(claim.Namespace, claim.Spec.ParametersRef.Name), err))
}
return parameters, nil
}
// TODO (https://github.com/kubernetes/kubernetes/issues/123731): use an indexer
allParameters, err := pl.claimParametersLister.ResourceClaimParameters(claim.Namespace).List(labels.Everything())
if err != nil {
return nil, statusError(logger, fmt.Errorf("listing claim parameters failed: %v", err))
}
for _, parameters := range allParameters {
if parameters.GeneratedFrom == nil {
continue
}
if parameters.GeneratedFrom.APIGroup == claim.Spec.ParametersRef.APIGroup &&
parameters.GeneratedFrom.Kind == claim.Spec.ParametersRef.Kind &&
parameters.GeneratedFrom.Name == claim.Spec.ParametersRef.Name {
return parameters, nil
}
}
return nil, statusUnschedulable(logger, fmt.Sprintf("generated claim parameters for %s.%s %s not found", claim.Spec.ParametersRef.Kind, claim.Spec.ParametersRef.APIGroup, klog.KRef(claim.Namespace, claim.Spec.ParametersRef.Name)))
}
// PreFilterExtensions returns prefilter extensions, pod add and remove.
func (pl *dynamicResources) PreFilterExtensions() framework.PreFilterExtensions {
return nil
@@ -703,22 +1094,39 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState
case claim.Status.DeallocationRequested:
// We shouldn't get here. PreFilter already checked this.
return statusUnschedulable(logger, "resourceclaim must be reallocated", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim))
case claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer:
case claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer ||
state.informationsForClaim[index].structuredParameters:
if selector := state.informationsForClaim[index].availableOnNode; selector != nil {
if matches := selector.Match(node); !matches {
return statusUnschedulable(logger, "excluded by resource class node filter", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclassName", claim.Spec.ResourceClassName)
}
}
if status := state.informationsForClaim[index].status; status != nil {
for _, unsuitableNode := range status.UnsuitableNodes {
if node.Name == unsuitableNode {
return statusUnschedulable(logger, "resourceclaim cannot be allocated for the node (unsuitable)", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim), "unsuitablenodes", status.UnsuitableNodes)
// Can the builtin controller tell us whether the node is suitable?
if state.informationsForClaim[index].structuredParameters {
suitable, err := state.informationsForClaim[index].controller.nodeIsSuitable(ctx, node.Name, state.resources)
if err != nil {
// An error indicates that something wasn't configured correctly, for example
// writing a CEL expression which doesn't handle a map lookup error. Normally
// this should never fail. We could return an error here, but then the pod
// would get retried. Instead we ignore the node.
return statusUnschedulable(logger, fmt.Sprintf("checking structured parameters failed: %v", err), "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim))
}
if !suitable {
return statusUnschedulable(logger, "resourceclaim cannot be allocated for the node (unsuitable)", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim))
}
} else {
if status := state.informationsForClaim[index].status; status != nil {
for _, unsuitableNode := range status.UnsuitableNodes {
if node.Name == unsuitableNode {
return statusUnschedulable(logger, "resourceclaim cannot be allocated for the node (unsuitable)", "pod", klog.KObj(pod), "node", klog.KObj(node), "resourceclaim", klog.KObj(claim), "unsuitablenodes", status.UnsuitableNodes)
}
}
}
}
default:
// This should have been delayed allocation. Immediate
// allocation was already checked for in PreFilter.
// This claim should have been handled above.
// Immediate allocation with control plane controller
// was already checked for in PreFilter.
return statusError(logger, fmt.Errorf("internal error, unexpected allocation mode %v", claim.Spec.AllocationMode))
}
}
@@ -736,7 +1144,11 @@ func (pl *dynamicResources) Filter(ctx context.Context, cs *framework.CycleState
// delayed allocation. Claims with immediate allocation
// would just get allocated again for a random node,
// which is unlikely to help the pod.
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer {
//
// Claims with builtin controller are handled like
// claims with delayed allocation.
if claim.Spec.AllocationMode == resourcev1alpha2.AllocationModeWaitForFirstConsumer ||
state.informationsForClaim[index].controller != nil {
state.unavailableClaims.Insert(index)
}
}
@@ -769,12 +1181,19 @@ func (pl *dynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS
claim := state.claims[index]
if len(claim.Status.ReservedFor) == 0 ||
len(claim.Status.ReservedFor) == 1 && claim.Status.ReservedFor[0].UID == pod.UID {
// Is the claim is handled by the builtin controller?
// Then we can simply clear the allocation. Once the
// claim informer catches up, the controllers will
// be notified about this change.
clearAllocation := state.informationsForClaim[index].controller != nil
// Before we tell a driver to deallocate a claim, we
// have to stop telling it to allocate. Otherwise,
// depending on timing, it will deallocate the claim,
// see a PodSchedulingContext with selected node, and
// allocate again for that same node.
if state.podSchedulingState.schedulingCtx != nil &&
if !clearAllocation &&
state.podSchedulingState.schedulingCtx != nil &&
state.podSchedulingState.schedulingCtx.Spec.SelectedNode != "" {
state.podSchedulingState.selectedNode = ptr.To("")
if err := state.podSchedulingState.publish(ctx, pod, pl.clientset); err != nil {
@@ -782,9 +1201,13 @@ func (pl *dynamicResources) PostFilter(ctx context.Context, cs *framework.CycleS
}
}
claim := state.claims[index].DeepCopy()
claim.Status.DeallocationRequested = true
claim := claim.DeepCopy()
claim.Status.ReservedFor = nil
if clearAllocation {
claim.Status.Allocation = nil
} else {
claim.Status.DeallocationRequested = true
}
logger.V(5).Info("Requesting deallocation of ResourceClaim", "pod", klog.KObj(pod), "resourceclaim", klog.KObj(claim))
if _, err := pl.clientset.ResourceV1alpha2().ResourceClaims(claim.Namespace).UpdateStatus(ctx, claim, metav1.UpdateOptions{}); err != nil {
return nil, statusError(logger, err)
@@ -815,14 +1238,15 @@ func (pl *dynamicResources) PreScore(ctx context.Context, cs *framework.CycleSta
logger := klog.FromContext(ctx)
pending := false
for _, claim := range state.claims {
if claim.Status.Allocation == nil {
for index, claim := range state.claims {
if claim.Status.Allocation == nil &&
state.informationsForClaim[index].controller == nil {
pending = true
break
}
}
if !pending {
logger.V(5).Info("no pending claims", "pod", klog.KObj(pod))
logger.V(5).Info("no pending claims with control plane controller", "pod", klog.KObj(pod))
return nil
}
@@ -889,7 +1313,7 @@ func haveNode(nodeNames []string, nodeName string) bool {
}
// Reserve reserves claims for the pod.
func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
if !pl.enabled {
return nil
}
@@ -903,6 +1327,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
numDelayedAllocationPending := 0
numClaimsWithStatusInfo := 0
claimsWithBuiltinController := make([]int, 0, len(state.claims))
logger := klog.FromContext(ctx)
for index, claim := range state.claims {
if claim.Status.Allocation != nil {
@@ -914,7 +1339,13 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
continue
}
// Must be delayed allocation.
// Do we have the builtin controller?
if state.informationsForClaim[index].controller != nil {
claimsWithBuiltinController = append(claimsWithBuiltinController, index)
continue
}
// Must be delayed allocation with control plane controller.
numDelayedAllocationPending++
// Did the driver provide information that steered node
@@ -924,12 +1355,12 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
}
}
if numDelayedAllocationPending == 0 {
if numDelayedAllocationPending == 0 && len(claimsWithBuiltinController) == 0 {
// Nothing left to do.
return nil
}
if !state.preScored {
if !state.preScored && numDelayedAllocationPending > 0 {
// There was only one candidate that passed the Filters and
// therefore PreScore was not called.
//
@@ -944,11 +1375,33 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
}
}
// Prepare allocation of claims handled by the schedulder.
for _, index := range claimsWithBuiltinController {
claim := state.claims[index]
driverName, allocation, err := state.informationsForClaim[index].controller.allocate(ctx, nodeName, state.resources)
if err != nil {
// We checked before that the node is suitable. This shouldn't have failed,
// so treat this as an error.
return statusError(logger, fmt.Errorf("claim allocation failed unexpectedly: %v", err))
}
state.informationsForClaim[index].allocation = allocation
state.informationsForClaim[index].allocationDriverName = driverName
claim = claim.DeepCopy()
claim.Status.DriverName = driverName
claim.Status.Allocation = allocation
pl.inFlightAllocations.Store(claim.UID, claim)
logger.V(5).Info("Reserved resource in allocation result", "claim", klog.KObj(claim), "driver", driverName, "allocation", klog.Format(allocation))
}
// When there is only one pending resource, we can go ahead with
// requesting allocation even when we don't have the information from
// the driver yet. Otherwise we wait for information before blindly
// making a decision that might have to be reversed later.
if numDelayedAllocationPending == 1 || numClaimsWithStatusInfo == numDelayedAllocationPending {
//
// If all pending claims are handled with the builtin controller,
// there is no need for a PodSchedulingContext change.
if numDelayedAllocationPending == 1 && len(claimsWithBuiltinController) == 0 ||
numClaimsWithStatusInfo+len(claimsWithBuiltinController) == numDelayedAllocationPending && len(claimsWithBuiltinController) < numDelayedAllocationPending {
// TODO: can we increase the chance that the scheduler picks
// the same node as before when allocation is on-going,
// assuming that that node still fits the pod? Picking a
@@ -970,6 +1423,13 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
return nil
}
// If all pending claims are handled with the builtin controller, then
// we can allow the pod to proceed. Allocating and reserving the claims
// will be done in PreBind.
if numDelayedAllocationPending == 0 {
return nil
}
// More than one pending claim and not enough information about all of them.
//
// TODO: can or should we ensure that schedulingCtx gets aborted while
@@ -1016,7 +1476,15 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
}
}
for _, claim := range state.claims {
for index, claim := range state.claims {
// If allocation was in-flight, then it's not anymore and we need to revert the
// claim object in the assume cache to what it was before.
if state.informationsForClaim[index].controller != nil {
if _, found := pl.inFlightAllocations.LoadAndDelete(state.claims[index].UID); found {
pl.claimAssumeCache.Restore(claim.Namespace + "/" + claim.Name)
}
}
if claim.Status.Allocation != nil &&
resourceclaim.IsReservedForPod(pod, claim) {
// Remove pod from ReservedFor. A strategic-merge-patch is used
@@ -1038,7 +1506,10 @@ func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleSt
// PreBind gets called in a separate goroutine after it has been determined
// that the pod should get bound to this node. Because Reserve did not actually
// reserve claims, we need to do it now. If that fails, we return an error and
// reserve claims, we need to do it now. For claims with the builtin controller,
// we also handle the allocation.
//
// If anything fails, we return an error and
// the pod will have to go into the backoff queue. The scheduler will call
// Unreserve as part of the error handling.
func (pl *dynamicResources) PreBind(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) *framework.Status {
@@ -1056,6 +1527,7 @@ func (pl *dynamicResources) PreBind(ctx context.Context, cs *framework.CycleStat
logger := klog.FromContext(ctx)
// Was publishing delayed? If yes, do it now and then cause binding to stop.
// This will not happen if all claims get handled by builtin controllers.
if state.podSchedulingState.isDirty() {
if err := state.podSchedulingState.publish(ctx, pod, pl.clientset); err != nil {
return statusError(logger, err)
@@ -1065,23 +1537,7 @@ func (pl *dynamicResources) PreBind(ctx context.Context, cs *framework.CycleStat
for index, claim := range state.claims {
if !resourceclaim.IsReservedForPod(pod, claim) {
// The claim might be stale, for example because the claim can get shared and some
// other goroutine has updated it in the meantime. We therefore cannot use
// SSA here to add the pod because then we would have to send the entire slice
// or use different field manager strings for each entry.
//
// With a strategic-merge-patch, we can simply send one new entry. The apiserver
// validation will catch if two goroutines try to do that at the same time and
// the claim cannot be shared.
patch := fmt.Sprintf(`{"metadata": {"uid": %q}, "status": { "reservedFor": [ {"resource": "pods", "name": %q, "uid": %q} ] }}`,
claim.UID,
pod.Name,
pod.UID,
)
logger.V(5).Info("reserve", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}, "resourceclaim", klog.KObj(claim))
claim, err := pl.clientset.ResourceV1alpha2().ResourceClaims(claim.Namespace).Patch(ctx, claim.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "status")
logger.V(5).Info("reserved", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}, "resourceclaim", klog.Format(claim))
// TODO: metric for update errors.
claim, err := pl.bindClaim(ctx, state, index, pod, nodeName)
if err != nil {
return statusError(logger, err)
}
@@ -1093,6 +1549,79 @@ func (pl *dynamicResources) PreBind(ctx context.Context, cs *framework.CycleStat
return nil
}
// bindClaim gets called by PreBind for claim which is not reserved for the pod yet.
// It might not even be allocated. bindClaim then ensures that the allocation
// and reservation are recorded. This finishes the work started in Reserve.
func (pl *dynamicResources) bindClaim(ctx context.Context, state *stateData, index int, pod *v1.Pod, nodeName string) (patchedClaim *resourcev1alpha2.ResourceClaim, finalErr error) {
logger := klog.FromContext(ctx)
claim := state.claims[index]
allocationPatch := ""
allocation := state.informationsForClaim[index].allocation
logger.V(5).Info("preparing claim status patch", "claim", klog.KObj(state.claims[index]), "allocation", klog.Format(allocation))
// Do we need to store an allocation result from Reserve?
if allocation != nil {
buffer, err := json.Marshal(allocation)
if err != nil {
return nil, fmt.Errorf("marshaling AllocationResult failed: %v", err)
}
allocationPatch = fmt.Sprintf(`"driverName": %q, "allocation": %s, `, state.informationsForClaim[index].allocationDriverName, string(buffer))
// The finalizer needs to be added in a normal update. Using a simple update is fine
// because we don't expect concurrent modifications while the claim is not allocated
// yet. If there are any, we want to fail.
//
// If we were interrupted in the past, it might already be set and we simply continue.
if !slices.Contains(claim.Finalizers, resourcev1alpha2.Finalizer) {
claim := state.claims[index].DeepCopy()
claim.Finalizers = append(claim.Finalizers, resourcev1alpha2.Finalizer)
if _, err := pl.clientset.ResourceV1alpha2().ResourceClaims(claim.Namespace).Update(ctx, claim, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("add finalizer: %v", err)
}
}
}
// The claim might be stale, for example because the claim can get shared and some
// other goroutine has updated it in the meantime. We therefore cannot use
// SSA here to add the pod because then we would have to send the entire slice
// or use different field manager strings for each entry.
//
// With a strategic-merge-patch, we can simply send one new entry. The apiserver
// validation will catch if two goroutines try to do that at the same time and
// the claim cannot be shared.
//
// Note that this also works when the allocation result gets added twice because
// two pods both started using a shared claim: the first pod to get here adds the
// allocation result. The second pod then only adds itself to reservedFor.
patch := fmt.Sprintf(`{"metadata": {"uid": %q}, "status": {%s "reservedFor": [ {"resource": "pods", "name": %q, "uid": %q} ] }}`,
claim.UID,
allocationPatch,
pod.Name,
pod.UID,
)
if loggerV := logger.V(6); loggerV.Enabled() {
logger.V(5).Info("reserve", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}, "resourceclaim", klog.KObj(claim), "patch", patch)
} else {
logger.V(5).Info("reserve", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}, "resourceclaim", klog.KObj(claim))
}
claim, err := pl.clientset.ResourceV1alpha2().ResourceClaims(claim.Namespace).Patch(ctx, claim.Name, types.StrategicMergePatchType, []byte(patch), metav1.PatchOptions{}, "status")
logger.V(5).Info("reserved", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}, "resourceclaim", klog.Format(claim), "err", err)
if allocationPatch != "" {
// The scheduler was handling allocation. Now that has
// completed, either successfully or with a failure.
if err == nil {
// This can fail, but only for reasons that are okay (concurrent delete or update).
// Shouldn't happen in this case.
if err := pl.claimAssumeCache.Assume(claim); err != nil {
logger.V(5).Info("Claim not stored in assume cache", "err", err)
}
}
pl.inFlightAllocations.Delete(claim.UID)
}
return claim, err
}
// PostBind is called after a pod is successfully bound to a node. Now we are
// sure that a PodSchedulingContext object, if it exists, is definitely not going to
// be needed anymore and can delete it. This is a one-shot thing, there won't

View File

@@ -309,8 +309,9 @@ func TestPlugin(t *testing.T) {
},
},
"waiting-for-immediate-allocation": {
pod: podWithClaimName,
claims: []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim},
pod: podWithClaimName,
claims: []*resourcev1alpha2.ResourceClaim{pendingImmediateClaim},
classes: []*resourcev1alpha2.ResourceClass{resourceClass},
want: want{
prefilter: result{
status: framework.NewStatus(framework.UnschedulableAndUnresolvable, `unallocated immediate resourceclaim`),
@@ -812,7 +813,6 @@ func setup(t *testing.T, nodes []*v1.Node, claims []*resourcev1alpha2.ResourceCl
tc.client = fake.NewSimpleClientset()
reactor := createReactor(tc.client.Tracker())
tc.client.PrependReactor("*", "*", reactor)
tc.informerFactory = informers.NewSharedInformerFactory(tc.client, 0)
opts := []runtime.Option{

View File

@@ -0,0 +1,153 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package namedresources
import (
"context"
"errors"
"fmt"
"slices"
resourceapi "k8s.io/api/resource/v1alpha2"
"k8s.io/apiserver/pkg/cel/environment"
"k8s.io/dynamic-resource-allocation/structured/namedresources/cel"
)
// These types and fields are all exported to allow logging them with
// pretty-printed JSON.
type Model struct {
Instances []InstanceAllocation
}
type InstanceAllocation struct {
Allocated bool
Instance *resourceapi.NamedResourcesInstance
}
// AddResources must be called first to create entries for all existing
// resource instances. The resources parameter may be nil.
func AddResources(m *Model, resources *resourceapi.NamedResourcesResources) {
if resources == nil {
return
}
for i := range resources.Instances {
m.Instances = append(m.Instances, InstanceAllocation{Instance: &resources.Instances[i]})
}
}
// AddAllocation may get called after AddResources to mark some resource
// instances as allocated. The result parameter may be nil.
func AddAllocation(m *Model, result *resourceapi.NamedResourcesAllocationResult) {
if result == nil {
return
}
for i := range m.Instances {
if m.Instances[i].Instance.Name == result.Name {
m.Instances[i].Allocated = true
break
}
}
}
func NewClaimController(filter *resourceapi.NamedResourcesFilter, requests []*resourceapi.NamedResourcesRequest) (*Controller, error) {
c := &Controller{}
if filter != nil {
compilation := cel.Compiler.CompileCELExpression(filter.Selector, environment.StoredExpressions)
if compilation.Error != nil {
// Shouldn't happen because of validation.
return nil, fmt.Errorf("compile class filter CEL expression: %w", compilation.Error)
}
c.filter = &compilation
}
for _, request := range requests {
compilation := cel.Compiler.CompileCELExpression(request.Selector, environment.StoredExpressions)
if compilation.Error != nil {
// Shouldn't happen because of validation.
return nil, fmt.Errorf("compile request CEL expression: %w", compilation.Error)
}
c.requests = append(c.requests, compilation)
}
return c, nil
}
type Controller struct {
filter *cel.CompilationResult
requests []cel.CompilationResult
}
func (c *Controller) NodeIsSuitable(ctx context.Context, model Model) (bool, error) {
indices, err := c.allocate(ctx, model)
return len(indices) == len(c.requests), err
}
func (c *Controller) Allocate(ctx context.Context, model Model) ([]*resourceapi.NamedResourcesAllocationResult, error) {
indices, err := c.allocate(ctx, model)
if err != nil {
return nil, err
}
if len(indices) != len(c.requests) {
return nil, errors.New("insufficient resources")
}
results := make([]*resourceapi.NamedResourcesAllocationResult, len(c.requests))
for i := range c.requests {
results[i] = &resourceapi.NamedResourcesAllocationResult{Name: model.Instances[indices[i]].Instance.Name}
}
return results, nil
}
func (c *Controller) allocate(ctx context.Context, model Model) ([]int, error) {
// Shallow copy, we need to modify the allocated boolean.
instances := slices.Clone(model.Instances)
indices := make([]int, 0, len(c.requests))
for _, request := range c.requests {
for i, instance := range instances {
if instance.Allocated {
continue
}
if c.filter != nil {
okay, err := c.filter.Evaluate(ctx, instance.Instance.Attributes)
if err != nil {
return nil, fmt.Errorf("evaluate filter CEL expression: %w", err)
}
if !okay {
continue
}
}
okay, err := request.Evaluate(ctx, instance.Instance.Attributes)
if err != nil {
return nil, fmt.Errorf("evaluate request CEL expression: %w", err)
}
if !okay {
continue
}
// Found a matching, unallocated instance. Let's use it.
//
// A more thorough search would include backtracking because
// allocating one "large" instances for a "small" request may
// make a following "large" request impossible to satisfy when
// only "small" instances are left.
instances[i].Allocated = true
indices = append(indices, i)
break
}
}
return indices, nil
}

View File

@@ -0,0 +1,241 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dynamicresources
import (
"context"
"fmt"
"sync"
v1 "k8s.io/api/core/v1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
resourcev1alpha2listers "k8s.io/client-go/listers/resource/v1alpha2"
"k8s.io/klog/v2"
namedresourcesmodel "k8s.io/kubernetes/pkg/scheduler/framework/plugins/dynamicresources/structured/namedresources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
)
// resources is a map "node name" -> "driver name" -> available and
// allocated resources per structured parameter model.
type resources map[string]map[string]ResourceModels
// ResourceModels may have more than one entry because it is valid for a driver to
// use more than one structured parameter model.
type ResourceModels struct {
NamedResources namedresourcesmodel.Model
}
// newResourceModel parses the available information about resources. Objects
// with an unknown structured parameter model silently ignored. An error gets
// logged later when parameters required for a pod depend on such an unknown
// model.
func newResourceModel(logger klog.Logger, resourceSliceLister resourcev1alpha2listers.ResourceSliceLister, claimAssumeCache volumebinding.AssumeCache, inFlightAllocations *sync.Map) (resources, error) {
model := make(resources)
slices, err := resourceSliceLister.List(labels.Everything())
if err != nil {
return nil, fmt.Errorf("list node resource slices: %w", err)
}
for _, slice := range slices {
if model[slice.NodeName] == nil {
model[slice.NodeName] = make(map[string]ResourceModels)
}
resource := model[slice.NodeName][slice.DriverName]
namedresourcesmodel.AddResources(&resource.NamedResources, slice.NamedResources)
model[slice.NodeName][slice.DriverName] = resource
}
objs := claimAssumeCache.List(nil)
for _, obj := range objs {
claim, ok := obj.(*resourcev1alpha2.ResourceClaim)
if !ok {
return nil, fmt.Errorf("got unexpected object of type %T from claim assume cache", obj)
}
if obj, ok := inFlightAllocations.Load(claim.UID); ok {
// If the allocation is in-flight, then we have to use the allocation
// from that claim.
claim = obj.(*resourcev1alpha2.ResourceClaim)
}
if claim.Status.Allocation == nil {
continue
}
for _, handle := range claim.Status.Allocation.ResourceHandles {
structured := handle.StructuredData
if structured == nil {
continue
}
if model[structured.NodeName] == nil {
model[structured.NodeName] = make(map[string]ResourceModels)
}
resource := model[structured.NodeName][handle.DriverName]
for _, result := range structured.Results {
// Call AddAllocation for each known model. Each call itself needs to check for nil.
namedresourcesmodel.AddAllocation(&resource.NamedResources, result.NamedResources)
}
}
}
return model, nil
}
func newClaimController(logger klog.Logger, class *resourcev1alpha2.ResourceClass, classParameters *resourcev1alpha2.ResourceClassParameters, claimParameters *resourcev1alpha2.ResourceClaimParameters) (*claimController, error) {
// Each node driver is separate from the others. Each driver may have
// multiple requests which need to be allocated together, so here
// we have to collect them per model.
type perDriverRequests struct {
parameters []runtime.RawExtension
requests []*resourcev1alpha2.NamedResourcesRequest
}
namedresourcesRequests := make(map[string]perDriverRequests)
for i, request := range claimParameters.DriverRequests {
driverName := request.DriverName
p := namedresourcesRequests[driverName]
for e, request := range request.Requests {
switch {
case request.ResourceRequestModel.NamedResources != nil:
p.parameters = append(p.parameters, request.VendorParameters)
p.requests = append(p.requests, request.ResourceRequestModel.NamedResources)
default:
return nil, fmt.Errorf("claim parameters %s: driverRequersts[%d].requests[%d]: no supported structured parameters found", klog.KObj(claimParameters), i, e)
}
}
if len(p.requests) > 0 {
namedresourcesRequests[driverName] = p
}
}
c := &claimController{
class: class,
classParameters: classParameters,
claimParameters: claimParameters,
namedresources: make(map[string]perDriverController, len(namedresourcesRequests)),
}
for driverName, perDriver := range namedresourcesRequests {
var filter *resourcev1alpha2.NamedResourcesFilter
if classParameters != nil {
for _, f := range classParameters.Filters {
if f.DriverName == driverName && f.ResourceFilterModel.NamedResources != nil {
filter = f.ResourceFilterModel.NamedResources
break
}
}
}
controller, err := namedresourcesmodel.NewClaimController(filter, perDriver.requests)
if err != nil {
return nil, fmt.Errorf("creating claim controller for named resources structured model: %w", err)
}
c.namedresources[driverName] = perDriverController{
parameters: perDriver.parameters,
controller: controller,
}
}
return c, nil
}
// claimController currently wraps exactly one structured parameter model.
type claimController struct {
class *resourcev1alpha2.ResourceClass
classParameters *resourcev1alpha2.ResourceClassParameters
claimParameters *resourcev1alpha2.ResourceClaimParameters
namedresources map[string]perDriverController
}
type perDriverController struct {
parameters []runtime.RawExtension
controller *namedresourcesmodel.Controller
}
func (c claimController) nodeIsSuitable(ctx context.Context, nodeName string, resources resources) (bool, error) {
nodeResources := resources[nodeName]
for driverName, perDriver := range c.namedresources {
okay, err := perDriver.controller.NodeIsSuitable(ctx, nodeResources[driverName].NamedResources)
if err != nil {
// This is an error in the CEL expression which needs
// to be fixed. Better fail very visibly instead of
// ignoring the node.
return false, fmt.Errorf("checking node %q and resources of driver %q: %w", nodeName, driverName, err)
}
if !okay {
return false, nil
}
}
return true, nil
}
func (c claimController) allocate(ctx context.Context, nodeName string, resources resources) (string, *resourcev1alpha2.AllocationResult, error) {
allocation := &resourcev1alpha2.AllocationResult{
Shareable: c.claimParameters.Shareable,
AvailableOnNodes: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{Key: "kubernetes.io/hostname", Operator: v1.NodeSelectorOpIn, Values: []string{nodeName}},
},
},
},
},
}
nodeResources := resources[nodeName]
for driverName, perDriver := range c.namedresources {
// Must return one entry for each request. The entry may be nil. This way,
// the result can be correlated with the per-request parameters.
results, err := perDriver.controller.Allocate(ctx, nodeResources[driverName].NamedResources)
if err != nil {
return "", nil, fmt.Errorf("allocating via named resources structured model: %w", err)
}
handle := resourcev1alpha2.ResourceHandle{
DriverName: driverName,
StructuredData: &resourcev1alpha2.StructuredResourceHandle{
NodeName: nodeName,
},
}
for i, result := range results {
if result == nil {
continue
}
handle.StructuredData.Results = append(handle.StructuredData.Results,
resourcev1alpha2.DriverAllocationResult{
VendorRequestParameters: perDriver.parameters[i],
AllocationResultModel: resourcev1alpha2.AllocationResultModel{
NamedResources: result,
},
},
)
}
if c.classParameters != nil {
for _, p := range c.classParameters.VendorParameters {
if p.DriverName == driverName {
handle.StructuredData.VendorClassParameters = p.Parameters
break
}
}
}
for _, request := range c.claimParameters.DriverRequests {
if request.DriverName == driverName {
handle.StructuredData.VendorClaimParameters = request.VendorParameters
break
}
}
allocation.ResourceHandles = append(allocation.ResourceHandles, handle)
}
return c.class.DriverName, allocation, nil
}

View File

@@ -283,10 +283,16 @@ func (c *assumeCache) List(indexObj interface{}) []interface{} {
defer c.rwMutex.RUnlock()
allObjs := []interface{}{}
objs, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
if err != nil {
c.logger.Error(err, "List index error")
return nil
var objs []interface{}
if c.indexName != "" {
o, err := c.store.Index(c.indexName, &objInfo{latestObj: indexObj})
if err != nil {
c.logger.Error(err, "List index error")
return nil
}
objs = o
} else {
objs = c.store.List()
}
for _, obj := range objs {

View File

@@ -72,17 +72,19 @@ const (
// - a Pod that is deleted
// - a Pod that was assumed, but gets un-assumed due to some errors in the binding cycle.
// - an existing Pod that was unscheduled but gets scheduled to a Node.
Pod GVK = "Pod"
Node GVK = "Node"
PersistentVolume GVK = "PersistentVolume"
PersistentVolumeClaim GVK = "PersistentVolumeClaim"
CSINode GVK = "storage.k8s.io/CSINode"
CSIDriver GVK = "storage.k8s.io/CSIDriver"
CSIStorageCapacity GVK = "storage.k8s.io/CSIStorageCapacity"
StorageClass GVK = "storage.k8s.io/StorageClass"
PodSchedulingContext GVK = "PodSchedulingContext"
ResourceClaim GVK = "ResourceClaim"
ResourceClass GVK = "ResourceClass"
Pod GVK = "Pod"
Node GVK = "Node"
PersistentVolume GVK = "PersistentVolume"
PersistentVolumeClaim GVK = "PersistentVolumeClaim"
CSINode GVK = "storage.k8s.io/CSINode"
CSIDriver GVK = "storage.k8s.io/CSIDriver"
CSIStorageCapacity GVK = "storage.k8s.io/CSIStorageCapacity"
StorageClass GVK = "storage.k8s.io/StorageClass"
PodSchedulingContext GVK = "PodSchedulingContext"
ResourceClaim GVK = "ResourceClaim"
ResourceClass GVK = "ResourceClass"
ResourceClaimParameters GVK = "ResourceClaimParameters"
ResourceClassParameters GVK = "ResourceClassParameters"
// WildCard is a special GVK to match all resources.
// e.g., If you register `{Resource: "*", ActionType: All}` in EventsToRegister,
@@ -176,6 +178,8 @@ func UnrollWildCardResource() []ClusterEventWithHint {
{Event: ClusterEvent{Resource: PodSchedulingContext, ActionType: All}},
{Event: ClusterEvent{Resource: ResourceClaim, ActionType: All}},
{Event: ClusterEvent{Resource: ResourceClass, ActionType: All}},
{Event: ClusterEvent{Resource: ResourceClaimParameters, ActionType: All}},
{Event: ClusterEvent{Resource: ResourceClassParameters, ActionType: All}},
}
}

View File

@@ -644,6 +644,12 @@ func Test_buildQueueingHintMap(t *testing.T) {
{Resource: framework.ResourceClass, ActionType: framework.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: framework.ResourceClaimParameters, ActionType: framework.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
{Resource: framework.ResourceClassParameters, ActionType: framework.All}: {
{PluginName: filterWithoutEnqueueExtensions, QueueingHintFn: defaultQueueingHintFn},
},
},
},
{
@@ -768,17 +774,19 @@ func Test_UnionedGVKs(t *testing.T) {
Disabled: []schedulerapi.Plugin{{Name: "*"}}, // disable default plugins
},
want: map[framework.GVK]framework.ActionType{
framework.Pod: framework.All,
framework.Node: framework.All,
framework.CSINode: framework.All,
framework.CSIDriver: framework.All,
framework.CSIStorageCapacity: framework.All,
framework.PersistentVolume: framework.All,
framework.PersistentVolumeClaim: framework.All,
framework.StorageClass: framework.All,
framework.PodSchedulingContext: framework.All,
framework.ResourceClaim: framework.All,
framework.ResourceClass: framework.All,
framework.Pod: framework.All,
framework.Node: framework.All,
framework.CSINode: framework.All,
framework.CSIDriver: framework.All,
framework.CSIStorageCapacity: framework.All,
framework.PersistentVolume: framework.All,
framework.PersistentVolumeClaim: framework.All,
framework.StorageClass: framework.All,
framework.PodSchedulingContext: framework.All,
framework.ResourceClaim: framework.All,
framework.ResourceClass: framework.All,
framework.ResourceClaimParameters: framework.All,
framework.ResourceClassParameters: framework.All,
},
},
{

View File

@@ -18,6 +18,7 @@ package noderestriction
import (
"context"
"errors"
"fmt"
"io"
"strings"
@@ -25,7 +26,7 @@ import (
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
@@ -40,6 +41,7 @@ import (
coordapi "k8s.io/kubernetes/pkg/apis/coordination"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/apis/resource"
storage "k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/kubernetes/pkg/features"
@@ -71,7 +73,8 @@ type Plugin struct {
podsGetter corev1lister.PodLister
nodesGetter corev1lister.NodeLister
expansionRecoveryEnabled bool
expansionRecoveryEnabled bool
dynamicResourceAllocationEnabled bool
}
var (
@@ -83,6 +86,7 @@ var (
// InspectFeatureGates allows setting bools without taking a dep on a global variable
func (p *Plugin) InspectFeatureGates(featureGates featuregate.FeatureGate) {
p.expansionRecoveryEnabled = featureGates.Enabled(features.RecoverVolumeExpansionFailure)
p.dynamicResourceAllocationEnabled = featureGates.Enabled(features.DynamicResourceAllocation)
}
// SetExternalKubeInformerFactory registers an informer factory into Plugin
@@ -106,12 +110,13 @@ func (p *Plugin) ValidateInitialization() error {
}
var (
podResource = api.Resource("pods")
nodeResource = api.Resource("nodes")
pvcResource = api.Resource("persistentvolumeclaims")
svcacctResource = api.Resource("serviceaccounts")
leaseResource = coordapi.Resource("leases")
csiNodeResource = storage.Resource("csinodes")
podResource = api.Resource("pods")
nodeResource = api.Resource("nodes")
pvcResource = api.Resource("persistentvolumeclaims")
svcacctResource = api.Resource("serviceaccounts")
leaseResource = coordapi.Resource("leases")
csiNodeResource = storage.Resource("csinodes")
resourceSliceResource = resource.Resource("resourceslices")
)
// Admit checks the admission policy and triggers corresponding actions
@@ -163,6 +168,9 @@ func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.
case csiNodeResource:
return p.admitCSINode(nodeName, a)
case resourceSliceResource:
return p.admitResourceSlice(nodeName, a)
default:
return nil
}
@@ -178,7 +186,7 @@ func (p *Plugin) admitPod(nodeName string, a admission.Attributes) error {
case admission.Delete:
// get the existing pod
existingPod, err := p.podsGetter.Pods(a.GetNamespace()).Get(a.GetName())
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return err
}
if err != nil {
@@ -233,7 +241,7 @@ func (p *Plugin) admitPodCreate(nodeName string, a admission.Attributes) error {
// Verify the node UID.
node, err := p.nodesGetter.Get(nodeName)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return err
}
if err != nil {
@@ -351,7 +359,7 @@ func (p *Plugin) admitPodEviction(nodeName string, a admission.Attributes) error
}
// get the existing pod
existingPod, err := p.podsGetter.Pods(a.GetNamespace()).Get(podName)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return err
}
if err != nil {
@@ -412,7 +420,7 @@ func (p *Plugin) admitPVCStatus(nodeName string, a admission.Attributes) error {
// ensure no metadata changed. nodes should not be able to relabel, add finalizers/owners, etc
if !apiequality.Semantic.DeepEqual(oldPVC, newPVC) {
return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to update fields other than status.capacity and status.conditions: %v", nodeName, cmp.Diff(oldPVC, newPVC)))
return admission.NewForbidden(a, fmt.Errorf("node %q is not allowed to update fields other than status.quantity and status.conditions: %v", nodeName, cmp.Diff(oldPVC, newPVC)))
}
return nil
@@ -564,7 +572,7 @@ func (p *Plugin) admitServiceAccount(nodeName string, a admission.Attributes) er
return admission.NewForbidden(a, fmt.Errorf("node requested token with a pod binding without a uid"))
}
pod, err := p.podsGetter.Pods(a.GetNamespace()).Get(ref.Name)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
return err
}
if err != nil {
@@ -630,3 +638,20 @@ func (p *Plugin) admitCSINode(nodeName string, a admission.Attributes) error {
return nil
}
func (p *Plugin) admitResourceSlice(nodeName string, a admission.Attributes) error {
// The create request must come from a node with the same name as the NodeName field.
// Other requests gets checked by the node authorizer.
if a.GetOperation() == admission.Create {
slice, ok := a.GetObject().(*resource.ResourceSlice)
if !ok {
return admission.NewForbidden(a, fmt.Errorf("unexpected type %T", a.GetObject()))
}
if slice.NodeName != nodeName {
return admission.NewForbidden(a, errors.New("can only create ResourceSlice with the same NodeName as the requesting node"))
}
}
return nil
}

View File

@@ -44,6 +44,7 @@ import (
"k8s.io/kubernetes/pkg/apis/coordination"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/policy"
resourceapi "k8s.io/kubernetes/pkg/apis/resource"
storage "k8s.io/kubernetes/pkg/apis/storage"
"k8s.io/kubernetes/pkg/auth/nodeidentifier"
"k8s.io/utils/pointer"
@@ -1601,3 +1602,84 @@ func createPodAttributes(pod *api.Pod, user user.Info) admission.Attributes {
podKind := api.Kind("Pod").WithVersion("v1")
return admission.NewAttributesRecord(pod, nil, podKind, pod.Namespace, pod.Name, podResource, "", admission.Create, &metav1.CreateOptions{}, false, user)
}
func TestAdmitResourceSlice(t *testing.T) {
apiResource := resourceapi.SchemeGroupVersion.WithResource("resourceslices")
nodename := "mynode"
mynode := &user.DefaultInfo{Name: "system:node:" + nodename, Groups: []string{"system:nodes"}}
err := "can only create ResourceSlice with the same NodeName as the requesting node"
sliceNode := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "something",
},
NodeName: nodename,
}
sliceOtherNode := &resourceapi.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "something",
},
NodeName: nodename + "-other",
}
tests := map[string]struct {
operation admission.Operation
obj runtime.Object
featureEnabled bool
expectError string
}{
"create allowed, enabled": {
operation: admission.Create,
obj: sliceNode,
featureEnabled: true,
expectError: "",
},
"create disallowed, enabled": {
operation: admission.Create,
obj: sliceOtherNode,
featureEnabled: true,
expectError: err,
},
"create allowed, disabled": {
operation: admission.Create,
obj: sliceNode,
featureEnabled: false,
expectError: "",
},
"create disallowed, disabled": {
operation: admission.Create,
obj: sliceOtherNode,
featureEnabled: false,
expectError: err,
},
"update allowed, same node": {
operation: admission.Update,
obj: sliceNode,
featureEnabled: true,
expectError: "",
},
"update allowed, other node": {
operation: admission.Update,
obj: sliceOtherNode,
featureEnabled: true,
expectError: "",
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
attributes := admission.NewAttributesRecord(
test.obj, nil, schema.GroupVersionKind{},
"", "foo", apiResource, "", test.operation, &metav1.CreateOptions{}, false, mynode)
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.DynamicResourceAllocation, test.featureEnabled)()
a := &admitTestCase{
name: name,
attributes: attributes,
features: feature.DefaultFeatureGate,
err: test.expectError,
}
a.run(t)
})
}
}

View File

@@ -114,6 +114,7 @@ type vertexType byte
const (
configMapVertexType vertexType = iota
sliceVertexType
nodeVertexType
podVertexType
pvcVertexType
@@ -126,6 +127,7 @@ const (
var vertexTypes = map[vertexType]string{
configMapVertexType: "configmap",
sliceVertexType: "resourceslice",
nodeVertexType: "node",
podVertexType: "pod",
pvcVertexType: "pvc",
@@ -492,3 +494,34 @@ func (g *Graph) DeleteVolumeAttachment(name string) {
defer g.lock.Unlock()
g.deleteVertex_locked(vaVertexType, "", name)
}
// AddResourceSlice sets up edges for the following relationships:
//
// node resource slice -> node
func (g *Graph) AddResourceSlice(sliceName, nodeName string) {
start := time.Now()
defer func() {
graphActionsDuration.WithLabelValues("AddResourceSlice").Observe(time.Since(start).Seconds())
}()
g.lock.Lock()
defer g.lock.Unlock()
// clear existing edges
g.deleteVertex_locked(sliceVertexType, "", sliceName)
// if we have a node, establish new edges
if len(nodeName) > 0 {
sliceVertex := g.getOrCreateVertex_locked(sliceVertexType, "", sliceName)
nodeVertex := g.getOrCreateVertex_locked(nodeVertexType, "", nodeName)
g.graph.SetEdge(newDestinationEdge(sliceVertex, nodeVertex, nodeVertex))
}
}
func (g *Graph) DeleteResourceSlice(sliceName string) {
start := time.Now()
defer func() {
graphActionsDuration.WithLabelValues("DeleteResourceSlice").Observe(time.Since(start).Seconds())
}()
g.lock.Lock()
defer g.lock.Unlock()
g.deleteVertex_locked(sliceVertexType, "", sliceName)
}

View File

@@ -22,9 +22,11 @@ import (
"k8s.io/klog/v2"
corev1 "k8s.io/api/core/v1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/util/wait"
corev1informers "k8s.io/client-go/informers/core/v1"
resourcev1alpha2informers "k8s.io/client-go/informers/resource/v1alpha2"
storageinformers "k8s.io/client-go/informers/storage/v1"
"k8s.io/client-go/tools/cache"
)
@@ -39,6 +41,7 @@ func AddGraphEventHandlers(
pods corev1informers.PodInformer,
pvs corev1informers.PersistentVolumeInformer,
attachments storageinformers.VolumeAttachmentInformer,
slices resourcev1alpha2informers.ResourceSliceInformer,
) {
g := &graphPopulator{
graph: graph,
@@ -62,8 +65,20 @@ func AddGraphEventHandlers(
DeleteFunc: g.deleteVolumeAttachment,
})
go cache.WaitForNamedCacheSync("node_authorizer", wait.NeverStop,
podHandler.HasSynced, pvsHandler.HasSynced, attachHandler.HasSynced)
synced := []cache.InformerSynced{
podHandler.HasSynced, pvsHandler.HasSynced, attachHandler.HasSynced,
}
if slices != nil {
sliceHandler, _ := slices.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: g.addResourceSlice,
UpdateFunc: nil, // Not needed, NodeName is immutable.
DeleteFunc: g.deleteResourceSlice,
})
synced = append(synced, sliceHandler.HasSynced)
}
go cache.WaitForNamedCacheSync("node_authorizer", wait.NeverStop, synced...)
}
func (g *graphPopulator) addPod(obj interface{}) {
@@ -184,3 +199,24 @@ func (g *graphPopulator) deleteVolumeAttachment(obj interface{}) {
}
g.graph.DeleteVolumeAttachment(attachment.Name)
}
func (g *graphPopulator) addResourceSlice(obj interface{}) {
slice, ok := obj.(*resourcev1alpha2.ResourceSlice)
if !ok {
klog.Infof("unexpected type %T", obj)
return
}
g.graph.AddResourceSlice(slice.Name, slice.NodeName)
}
func (g *graphPopulator) deleteResourceSlice(obj interface{}) {
if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok {
obj = tombstone.Obj
}
slice, ok := obj.(*resourcev1alpha2.ResourceSlice)
if !ok {
klog.Infof("unexpected type %T", obj)
return
}
g.graph.DeleteResourceSlice(slice.Name)
}

View File

@@ -50,7 +50,12 @@ import (
// node <- pod <- pvc <- pv
// node <- pod <- pvc <- pv <- secret
// node <- pod <- ResourceClaim
// 4. For other resources, authorize all nodes uniformly using statically defined rules
// 4. If a request is for a resourceslice, then authorize access if there is an
// edge from the existing slice object to the node, which is the case if the
// existing object has the node in its NodeName field. For create, the access gets
// granted because the noderestriction admission plugin checks that the NodeName
// is set to the node.
// 5. For other resources, authorize all nodes uniformly using statically defined rules
type NodeAuthorizer struct {
graph *Graph
identifier nodeidentifier.NodeIdentifier
@@ -76,6 +81,7 @@ func NewAuthorizer(graph *Graph, identifier nodeidentifier.NodeIdentifier, rules
var (
configMapResource = api.Resource("configmaps")
secretResource = api.Resource("secrets")
resourceSlice = resourceapi.Resource("resourceslices")
pvcResource = api.Resource("persistentvolumeclaims")
pvResource = api.Resource("persistentvolumes")
resourceClaimResource = resourceapi.Resource("resourceclaims")
@@ -130,6 +136,8 @@ func (r *NodeAuthorizer) Authorize(ctx context.Context, attrs authorizer.Attribu
return r.authorizeLease(nodeName, attrs)
case csiNodeResource:
return r.authorizeCSINode(nodeName, attrs)
case resourceSlice:
return r.authorizeResourceSlice(nodeName, attrs)
}
}
@@ -294,6 +302,39 @@ func (r *NodeAuthorizer) authorizeCSINode(nodeName string, attrs authorizer.Attr
return authorizer.DecisionAllow, "", nil
}
// authorizeResourceSlice authorizes node requests to ResourceSlice resource.k8s.io/resourceslices
func (r *NodeAuthorizer) authorizeResourceSlice(nodeName string, attrs authorizer.Attributes) (authorizer.Decision, string, error) {
if len(attrs.GetSubresource()) > 0 {
klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
return authorizer.DecisionNoOpinion, "cannot authorize ResourceSlice subresources", nil
}
// allowed verbs: get, create, update, patch, delete
verb := attrs.GetVerb()
switch verb {
case "get", "create", "update", "patch", "delete":
// Okay, but check individual object permission below.
case "watch", "list":
// Okay. The kubelet is trusted to use a filter for its own objects.
return authorizer.DecisionAllow, "", nil
default:
klog.V(2).Infof("NODE DENY: '%s' %#v", nodeName, attrs)
return authorizer.DecisionNoOpinion, "can only get, create, update, patch, or delete a ResourceSlice", nil
}
// The request must come from a node with the same name as the ResourceSlice.NodeName field.
//
// For create, the noderestriction admission plugin is performing this check.
// Here we don't have access to the content of the new object.
if verb == "create" {
return authorizer.DecisionAllow, "", nil
}
// For any other verb, checking the existing object must have established that access
// is allowed by recording a graph edge.
return r.authorize(nodeName, sliceVertexType, attrs)
}
// hasPathFrom returns true if there is a directed path from the specified type/namespace/name to the specified Node
func (r *NodeAuthorizer) hasPathFrom(nodeName string, startingType vertexType, startingNamespace, startingName string) (bool, error) {
r.graph.lock.RLock()

View File

@@ -28,6 +28,7 @@ import (
"time"
corev1 "k8s.io/api/core/v1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/authentication/user"
@@ -55,9 +56,10 @@ func TestAuthorizer(t *testing.T) {
uniqueResourceClaimsPerPod: 1,
uniqueResourceClaimTemplatesPerPod: 1,
uniqueResourceClaimTemplatesWithClaimPerPod: 1,
nodeResourceCapacitiesPerNode: 2,
}
nodes, pods, pvs, attachments := generate(opts)
populate(g, nodes, pods, pvs, attachments)
nodes, pods, pvs, attachments, slices := generate(opts)
populate(g, nodes, pods, pvs, attachments, slices)
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
@@ -336,6 +338,67 @@ func TestAuthorizer(t *testing.T) {
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "csinodes", APIGroup: "storage.k8s.io", Name: "node0"},
expect: authorizer.DecisionAllow,
},
// ResourceSlice
{
name: "disallowed ResourceSlice with subresource",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "resourceslices", Subresource: "status", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed get another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed update another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "update", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed patch another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "patch", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "disallowed delete another node's ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node1"},
expect: authorizer.DecisionNoOpinion,
},
{
name: "allowed list ResourceSlices",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "list", Resource: "resourceslices", APIGroup: "resource.k8s.io"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed watch ResourceSlices",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "watch", Resource: "resourceslices", APIGroup: "resource.k8s.io"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed get ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "get", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed create ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "create", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed update ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "update", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed patch ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "patch", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
{
name: "allowed delete ResourceSlice",
attrs: authorizer.AttributesRecord{User: node0, ResourceRequest: true, Verb: "delete", Resource: "resourceslices", APIGroup: "resource.k8s.io", Name: "slice0-node0"},
expect: authorizer.DecisionAllow,
},
}
for _, tc := range tests {
@@ -497,6 +560,8 @@ type sampleDataOpts struct {
uniqueResourceClaimsPerPod int
uniqueResourceClaimTemplatesPerPod int
uniqueResourceClaimTemplatesWithClaimPerPod int
nodeResourceCapacitiesPerNode int
}
func BenchmarkPopulationAllocation(b *testing.B) {
@@ -513,12 +578,12 @@ func BenchmarkPopulationAllocation(b *testing.B) {
uniquePVCsPerPod: 1,
}
nodes, pods, pvs, attachments := generate(opts)
nodes, pods, pvs, attachments, slices := generate(opts)
b.ResetTimer()
for i := 0; i < b.N; i++ {
g := NewGraph()
populate(g, nodes, pods, pvs, attachments)
populate(g, nodes, pods, pvs, attachments, slices)
}
}
@@ -544,14 +609,14 @@ func BenchmarkPopulationRetention(b *testing.B) {
uniquePVCsPerPod: 1,
}
nodes, pods, pvs, attachments := generate(opts)
nodes, pods, pvs, attachments, slices := generate(opts)
// Garbage collect before the first iteration
runtime.GC()
b.ResetTimer()
for i := 0; i < b.N; i++ {
g := NewGraph()
populate(g, nodes, pods, pvs, attachments)
populate(g, nodes, pods, pvs, attachments, slices)
if i == 0 {
f, _ := os.Create("BenchmarkPopulationRetention.profile")
@@ -582,9 +647,9 @@ func BenchmarkWriteIndexMaintenance(b *testing.B) {
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
nodes, pods, pvs, attachments := generate(opts)
nodes, pods, pvs, attachments, slices := generate(opts)
g := NewGraph()
populate(g, nodes, pods, pvs, attachments)
populate(g, nodes, pods, pvs, attachments, slices)
// Garbage collect before the first iteration
runtime.GC()
b.ResetTimer()
@@ -616,8 +681,8 @@ func BenchmarkAuthorization(b *testing.B) {
sharedPVCsPerPod: 0,
uniquePVCsPerPod: 1,
}
nodes, pods, pvs, attachments := generate(opts)
populate(g, nodes, pods, pvs, attachments)
nodes, pods, pvs, attachments, slices := generate(opts)
populate(g, nodes, pods, pvs, attachments, slices)
identifier := nodeidentifier.NewDefaultNodeIdentifier()
authz := NewAuthorizer(g, identifier, bootstrappolicy.NodeRules())
@@ -766,7 +831,7 @@ func BenchmarkAuthorization(b *testing.B) {
}
}
func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*corev1.PersistentVolume, attachments []*storagev1.VolumeAttachment) {
func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*corev1.PersistentVolume, attachments []*storagev1.VolumeAttachment, slices []*resourcev1alpha2.ResourceSlice) {
p := &graphPopulator{}
p.graph = graph
for _, pod := range pods {
@@ -778,6 +843,9 @@ func populate(graph *Graph, nodes []*corev1.Node, pods []*corev1.Pod, pvs []*cor
for _, attachment := range attachments {
p.addVolumeAttachment(attachment)
}
for _, slice := range slices {
p.addResourceSlice(slice)
}
}
func randomSubset(a, b int) []int {
@@ -791,11 +859,12 @@ func randomSubset(a, b int) []int {
// the secret/configmap/pvc/node references in the pod and pv objects are named to indicate the connections between the objects.
// for example, secret0-pod0-node0 is a secret referenced by pod0 which is bound to node0.
// when populated into the graph, the node authorizer should allow node0 to access that secret, but not node1.
func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.PersistentVolume, []*storagev1.VolumeAttachment) {
func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.PersistentVolume, []*storagev1.VolumeAttachment, []*resourcev1alpha2.ResourceSlice) {
nodes := make([]*corev1.Node, 0, opts.nodes)
pods := make([]*corev1.Pod, 0, opts.nodes*opts.podsPerNode)
pvs := make([]*corev1.PersistentVolume, 0, (opts.nodes*opts.podsPerNode*opts.uniquePVCsPerPod)+(opts.sharedPVCsPerPod*opts.namespaces))
attachments := make([]*storagev1.VolumeAttachment, 0, opts.nodes*opts.attachmentsPerNode)
slices := make([]*resourcev1alpha2.ResourceSlice, 0, opts.nodes*opts.nodeResourceCapacitiesPerNode)
rand.Seed(12345)
@@ -821,8 +890,17 @@ func generate(opts *sampleDataOpts) ([]*corev1.Node, []*corev1.Pod, []*corev1.Pe
ObjectMeta: metav1.ObjectMeta{Name: nodeName},
Spec: corev1.NodeSpec{},
})
for p := 0; p <= opts.nodeResourceCapacitiesPerNode; p++ {
name := fmt.Sprintf("slice%d-%s", p, nodeName)
slice := &resourcev1alpha2.ResourceSlice{
ObjectMeta: metav1.ObjectMeta{Name: name},
NodeName: nodeName,
}
slices = append(slices, slice)
}
}
return nodes, pods, pvs, attachments
return nodes, pods, pvs, attachments, slices
}
func generatePod(name, namespace, nodeName, svcAccountName string, opts *sampleDataOpts) (*corev1.Pod, []*corev1.PersistentVolume) {

View File

@@ -214,7 +214,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create", "delete").Groups(resourceGroup).Resources("resourceclaims").RuleOrDie(),
rbacv1helpers.NewRule("get", "list", "watch", "create", "update", "patch").Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(),
rbacv1helpers.NewRule("update", "patch").Groups(resourceGroup).Resources("resourceclaims", "resourceclaims/status").RuleOrDie(),
rbacv1helpers.NewRule("update", "patch").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
eventsRule(),
},

View File

@@ -576,11 +576,13 @@ func ClusterRoles() []rbacv1.ClusterRole {
// Needed for dynamic resource allocation.
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
kubeSchedulerRules = append(kubeSchedulerRules,
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclaims", "resourceclasses").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceclasses").RuleOrDie(),
rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims").RuleOrDie(),
rbacv1helpers.NewRule(ReadUpdate...).Groups(resourceGroup).Resources("resourceclaims/status").RuleOrDie(),
rbacv1helpers.NewRule(ReadWrite...).Groups(resourceGroup).Resources("podschedulingcontexts").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("podschedulingcontexts/status").RuleOrDie(),
rbacv1helpers.NewRule(ReadUpdate...).Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
rbacv1helpers.NewRule(Read...).Groups(resourceGroup).Resources("resourceslices", "resourceclassparameters", "resourceclaimparameters").RuleOrDie(),
)
}
roles = append(roles, rbacv1.ClusterRole{

View File

@@ -250,6 +250,9 @@
allowedImports:
- k8s.io/api
- k8s.io/apimachinery
- k8s.io/apiserver/pkg/apis/cel
- k8s.io/apiserver/pkg/cel
- k8s.io/apiserver/pkg/cel/environment
- k8s.io/client-go
- k8s.io/dynamic-resource-allocation
- k8s.io/klog

View File

@@ -2265,6 +2265,8 @@ rules:
branch: master
- repository: component-base
branch: master
- repository: kms
branch: master
- repository: kubelet
branch: master
source:

File diff suppressed because it is too large Load Diff

View File

@@ -22,6 +22,7 @@ syntax = "proto2";
package k8s.io.api.resource.v1alpha2;
import "k8s.io/api/core/v1/generated.proto";
import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
@@ -63,6 +64,158 @@ message AllocationResult {
optional bool shareable = 3;
}
// AllocationResultModel must have one and only one field set.
message AllocationResultModel {
// NamedResources describes the allocation result when using the named resources model.
//
// +optional
optional NamedResourcesAllocationResult namedResources = 1;
}
// DriverAllocationResult contains vendor parameters and the allocation result for
// one request.
message DriverAllocationResult {
// VendorRequestParameters are the per-request configuration parameters
// from the time that the claim was allocated.
//
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorRequestParameters = 1;
optional AllocationResultModel allocationResultModel = 2;
}
// DriverRequests describes all resources that are needed from one particular driver.
message DriverRequests {
// DriverName is the name used by the DRA driver kubelet plugin.
optional string driverName = 1;
// VendorParameters are arbitrary setup parameters for all requests of the
// claim. They are ignored while allocating the claim.
//
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 2;
// Requests describes all resources that are needed from the driver.
// +listType=atomic
repeated ResourceRequest requests = 3;
}
// NamedResourcesAllocationResult is used in AllocationResultModel.
message NamedResourcesAllocationResult {
// Name is the name of the selected resource instance.
optional string name = 1;
}
// NamedResourcesAttribute is a combination of an attribute name and its value.
message NamedResourcesAttribute {
// Name is unique identifier among all resource instances managed by
// the driver on the node. It must be a DNS subdomain.
optional string name = 1;
optional NamedResourcesAttributeValue attributeValue = 2;
}
// NamedResourcesAttributeValue must have one and only one field set.
message NamedResourcesAttributeValue {
// QuantityValue is a quantity.
optional k8s.io.apimachinery.pkg.api.resource.Quantity quantity = 6;
// BoolValue is a true/false value.
optional bool bool = 2;
// IntValue is a 64-bit integer.
optional int64 int = 7;
// IntSliceValue is an array of 64-bit integers.
optional NamedResourcesIntSlice intSlice = 8;
// StringValue is a string.
optional string string = 5;
// StringSliceValue is an array of strings.
optional NamedResourcesStringSlice stringSlice = 9;
// VersionValue is a semantic version according to semver.org spec 2.0.0.
optional string version = 10;
}
// NamedResourcesFilter is used in ResourceFilterModel.
message NamedResourcesFilter {
// Selector is a CEL expression which must evaluate to true if a
// resource instance is suitable. The language is as defined in
// https://kubernetes.io/docs/reference/using-api/cel/
//
// In addition, for each type NamedResourcesin AttributeValue there is a map that
// resolves to the corresponding value of the instance under evaluation.
// For example:
//
// attributes.quantity["a"].isGreaterThan(quantity("0")) &&
// attributes.stringslice["b"].isSorted()
optional string selector = 1;
}
// NamedResourcesInstance represents one individual hardware instance that can be selected based
// on its attributes.
message NamedResourcesInstance {
// Name is unique identifier among all resource instances managed by
// the driver on the node. It must be a DNS subdomain.
optional string name = 1;
// Attributes defines the attributes of this resource instance.
// The name of each attribute must be unique.
//
// +listType=atomic
// +optional
repeated NamedResourcesAttribute attributes = 2;
}
// NamedResourcesIntSlice contains a slice of 64-bit integers.
message NamedResourcesIntSlice {
// Ints is the slice of 64-bit integers.
//
// +listType=atomic
repeated int64 ints = 1;
}
// NamedResourcesRequest is used in ResourceRequestModel.
message NamedResourcesRequest {
// Selector is a CEL expression which must evaluate to true if a
// resource instance is suitable. The language is as defined in
// https://kubernetes.io/docs/reference/using-api/cel/
//
// In addition, for each type NamedResourcesin AttributeValue there is a map that
// resolves to the corresponding value of the instance under evaluation.
// For example:
//
// attributes.quantity["a"].isGreaterThan(quantity("0")) &&
// attributes.stringslice["b"].isSorted()
optional string selector = 1;
}
// NamedResourcesResources is used in NodeResourceModel.
message NamedResourcesResources {
// The list of all individual resources instances currently available.
//
// +listType=atomic
repeated NamedResourcesInstance instances = 1;
}
// NamedResourcesStringSlice contains a slice of strings.
message NamedResourcesStringSlice {
// Strings is the slice of strings.
//
// +listType=atomic
repeated string strings = 1;
}
// NodeResourceModel must have one and only one field set.
message NodeResourceModel {
// NamedResources describes available resources using the named resources model.
//
// +optional
optional NamedResourcesResources namedResources = 1;
}
// PodSchedulingContext objects hold information that is needed to schedule
// a Pod with ResourceClaims that use "WaitForFirstConsumer" allocation
// mode.
@@ -176,6 +329,45 @@ message ResourceClaimList {
repeated ResourceClaim items = 2;
}
// ResourceClaimParameters defines resource requests for a ResourceClaim in an
// in-tree format understood by Kubernetes.
message ResourceClaimParameters {
// Standard object metadata
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// If this object was created from some other resource, then this links
// back to that resource. This field is used to find the in-tree representation
// of the claim parameters when the parameter reference of the claim refers
// to some unknown type.
// +optional
optional ResourceClaimParametersReference generatedFrom = 2;
// Shareable indicates whether the allocated claim is meant to be shareable
// by multiple consumers at the same time.
// +optional
optional bool shareable = 3;
// DriverRequests describes all resources that are needed for the
// allocated claim. A single claim may use resources coming from
// different drivers. For each driver, this array has at most one
// entry which then may have one or more per-driver requests.
//
// May be empty, in which case the claim can always be allocated.
//
// +listType=atomic
repeated DriverRequests driverRequests = 4;
}
// ResourceClaimParametersList is a collection of ResourceClaimParameters.
message ResourceClaimParametersList {
// Standard list metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of node resource capacity objects.
repeated ResourceClaimParameters items = 2;
}
// ResourceClaimParametersReference contains enough information to let you
// locate the parameters for a ResourceClaim. The object must be in the same
// namespace as the ResourceClaim.
@@ -344,6 +536,11 @@ message ResourceClass {
// Setting this field is optional. If null, all nodes are candidates.
// +optional
optional k8s.io.api.core.v1.NodeSelector suitableNodes = 4;
// If and only if allocation of claims using this class is handled
// via structured parameters, then StructuredParameters must be set to true.
// +optional
optional bool structuredParameters = 5;
}
// ResourceClassList is a collection of classes.
@@ -356,6 +553,43 @@ message ResourceClassList {
repeated ResourceClass items = 2;
}
// ResourceClassParameters defines resource requests for a ResourceClass in an
// in-tree format understood by Kubernetes.
message ResourceClassParameters {
// Standard object metadata
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// If this object was created from some other resource, then this links
// back to that resource. This field is used to find the in-tree representation
// of the class parameters when the parameter reference of the class refers
// to some unknown type.
// +optional
optional ResourceClassParametersReference generatedFrom = 2;
// VendorParameters are arbitrary setup parameters for all claims using
// this class. They are ignored while allocating the claim. There must
// not be more than one entry per driver.
//
// +listType=atomic
// +optional
repeated VendorParameters vendorParameters = 3;
// Filters describes additional contraints that must be met when using the class.
//
// +listType=atomic
repeated ResourceFilter filters = 4;
}
// ResourceClassParametersList is a collection of ResourceClassParameters.
message ResourceClassParametersList {
// Standard list metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of node resource capacity objects.
repeated ResourceClassParameters items = 2;
}
// ResourceClassParametersReference contains enough information to let you
// locate the parameters for a ResourceClass.
message ResourceClassParametersReference {
@@ -379,6 +613,22 @@ message ResourceClassParametersReference {
optional string namespace = 4;
}
// ResourceFilter is a filter for resources from one particular driver.
message ResourceFilter {
// DriverName is the name used by the DRA driver kubelet plugin.
optional string driverName = 1;
optional ResourceFilterModel resourceFilterModel = 2;
}
// ResourceFilterModel must have one and only one field set.
message ResourceFilterModel {
// NamedResources describes a resource filter using the named resources model.
//
// +optional
optional NamedResourcesFilter namedResources = 1;
}
// ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.
message ResourceHandle {
// DriverName specifies the name of the resource driver whose kubelet
@@ -398,5 +648,102 @@ message ResourceHandle {
// future, but not reduced.
// +optional
optional string data = 2;
// If StructuredData is set, then it needs to be used instead of Data.
//
// +optional
optional StructuredResourceHandle structuredData = 5;
}
// ResourceRequest is a request for resources from one particular driver.
message ResourceRequest {
// VendorParameters are arbitrary setup parameters for the requested
// resource. They are ignored while allocating a claim.
//
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorParameters = 1;
optional ResourceRequestModel resourceRequestModel = 2;
}
// ResourceRequestModel must have one and only one field set.
message ResourceRequestModel {
// NamedResources describes a request for resources with the named resources model.
//
// +optional
optional NamedResourcesRequest namedResources = 1;
}
// ResourceSlice provides information about available
// resources on individual nodes.
message ResourceSlice {
// Standard object metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// NodeName identifies the node which provides the resources
// if they are local to a node.
//
// A field selector can be used to list only ResourceSlice
// objects with a certain node name.
//
// +optional
optional string nodeName = 2;
// DriverName identifies the DRA driver providing the capacity information.
// A field selector can be used to list only ResourceSlice
// objects with a certain driver name.
optional string driverName = 3;
optional NodeResourceModel nodeResourceModel = 4;
}
// ResourceSliceList is a collection of ResourceSlices.
message ResourceSliceList {
// Standard list metadata
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// Items is the list of node resource capacity objects.
repeated ResourceSlice items = 2;
}
// StructuredResourceHandle is the in-tree representation of the allocation result.
message StructuredResourceHandle {
// VendorClassParameters are the per-claim configuration parameters
// from the resource class at the time that the claim was allocated.
//
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClassParameters = 1;
// VendorClaimParameters are the per-claim configuration parameters
// from the resource claim parameters at the time that the claim was
// allocated.
//
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension vendorClaimParameters = 2;
// NodeName is the name of the node providing the necessary resources
// if the resources are local to a node.
//
// +optional
optional string nodeName = 4;
// Results lists all allocated driver resources.
//
// +listType=atomic
repeated DriverAllocationResult results = 5;
}
// VendorParameters are opaque parameters for one particular driver.
message VendorParameters {
// DriverName is the name used by the DRA driver kubelet plugin.
optional string driverName = 1;
// Parameters can be arbitrary setup parameters. They are ignored while
// allocating a claim.
//
// +optional
optional k8s.io.apimachinery.pkg.runtime.RawExtension parameters = 2;
}

View File

@@ -0,0 +1,127 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
"k8s.io/apimachinery/pkg/api/resource"
)
// NamedResourcesResources is used in NodeResourceModel.
type NamedResourcesResources struct {
// The list of all individual resources instances currently available.
//
// +listType=atomic
Instances []NamedResourcesInstance `json:"instances" protobuf:"bytes,1,name=instances"`
}
// NamedResourcesInstance represents one individual hardware instance that can be selected based
// on its attributes.
type NamedResourcesInstance struct {
// Name is unique identifier among all resource instances managed by
// the driver on the node. It must be a DNS subdomain.
Name string `json:"name" protobuf:"bytes,1,name=name"`
// Attributes defines the attributes of this resource instance.
// The name of each attribute must be unique.
//
// +listType=atomic
// +optional
Attributes []NamedResourcesAttribute `json:"attributes,omitempty" protobuf:"bytes,2,opt,name=attributes"`
}
// NamedResourcesAttribute is a combination of an attribute name and its value.
type NamedResourcesAttribute struct {
// Name is unique identifier among all resource instances managed by
// the driver on the node. It must be a DNS subdomain.
Name string `json:"name" protobuf:"bytes,1,name=name"`
NamedResourcesAttributeValue `json:",inline" protobuf:"bytes,2,opt,name=attributeValue"`
}
// The Go field names below have a Value suffix to avoid a conflict between the
// field "String" and the corresponding method. That method is required.
// The Kubernetes API is defined without that suffix to keep it more natural.
// NamedResourcesAttributeValue must have one and only one field set.
type NamedResourcesAttributeValue struct {
// QuantityValue is a quantity.
QuantityValue *resource.Quantity `json:"quantity,omitempty" protobuf:"bytes,6,opt,name=quantity"`
// BoolValue is a true/false value.
BoolValue *bool `json:"bool,omitempty" protobuf:"bytes,2,opt,name=bool"`
// IntValue is a 64-bit integer.
IntValue *int64 `json:"int,omitempty" protobuf:"varint,7,opt,name=int"`
// IntSliceValue is an array of 64-bit integers.
IntSliceValue *NamedResourcesIntSlice `json:"intSlice,omitempty" protobuf:"varint,8,rep,name=intSlice"`
// StringValue is a string.
StringValue *string `json:"string,omitempty" protobuf:"bytes,5,opt,name=string"`
// StringSliceValue is an array of strings.
StringSliceValue *NamedResourcesStringSlice `json:"stringSlice,omitempty" protobuf:"bytes,9,rep,name=stringSlice"`
// VersionValue is a semantic version according to semver.org spec 2.0.0.
VersionValue *string `json:"version,omitempty" protobuf:"bytes,10,opt,name=version"`
}
// NamedResourcesIntSlice contains a slice of 64-bit integers.
type NamedResourcesIntSlice struct {
// Ints is the slice of 64-bit integers.
//
// +listType=atomic
Ints []int64 `json:"ints" protobuf:"bytes,1,opt,name=ints"`
}
// NamedResourcesStringSlice contains a slice of strings.
type NamedResourcesStringSlice struct {
// Strings is the slice of strings.
//
// +listType=atomic
Strings []string `json:"strings" protobuf:"bytes,1,opt,name=strings"`
}
// NamedResourcesRequest is used in ResourceRequestModel.
type NamedResourcesRequest struct {
// Selector is a CEL expression which must evaluate to true if a
// resource instance is suitable. The language is as defined in
// https://kubernetes.io/docs/reference/using-api/cel/
//
// In addition, for each type NamedResourcesin AttributeValue there is a map that
// resolves to the corresponding value of the instance under evaluation.
// For example:
//
// attributes.quantity["a"].isGreaterThan(quantity("0")) &&
// attributes.stringslice["b"].isSorted()
Selector string `json:"selector" protobuf:"bytes,1,name=selector"`
}
// NamedResourcesFilter is used in ResourceFilterModel.
type NamedResourcesFilter struct {
// Selector is a CEL expression which must evaluate to true if a
// resource instance is suitable. The language is as defined in
// https://kubernetes.io/docs/reference/using-api/cel/
//
// In addition, for each type NamedResourcesin AttributeValue there is a map that
// resolves to the corresponding value of the instance under evaluation.
// For example:
//
// attributes.quantity["a"].isGreaterThan(quantity("0")) &&
// attributes.stringslice["b"].isSorted()
Selector string `json:"selector" protobuf:"bytes,1,name=selector"`
}
// NamedResourcesAllocationResult is used in AllocationResultModel.
type NamedResourcesAllocationResult struct {
// Name is the name of the selected resource instance.
Name string `json:"name" protobuf:"bytes,1,name=name"`
}

View File

@@ -52,6 +52,12 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ResourceClaimTemplateList{},
&PodSchedulingContext{},
&PodSchedulingContextList{},
&ResourceSlice{},
&ResourceSliceList{},
&ResourceClaimParameters{},
&ResourceClaimParametersList{},
&ResourceClassParameters{},
&ResourceClassParametersList{},
)
// Add common types

View File

@@ -19,9 +19,16 @@ package v1alpha2
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
)
const (
// Finalizer is the finalizer that gets set for claims
// which were allocated through a builtin controller.
Finalizer = "dra.k8s.io/delete-protection"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.26
@@ -192,11 +199,63 @@ type ResourceHandle struct {
// future, but not reduced.
// +optional
Data string `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"`
// If StructuredData is set, then it needs to be used instead of Data.
//
// +optional
StructuredData *StructuredResourceHandle `json:"structuredData,omitempty" protobuf:"bytes,5,opt,name=structuredData"`
}
// ResourceHandleDataMaxSize represents the maximum size of resourceHandle.data.
const ResourceHandleDataMaxSize = 16 * 1024
// StructuredResourceHandle is the in-tree representation of the allocation result.
type StructuredResourceHandle struct {
// VendorClassParameters are the per-claim configuration parameters
// from the resource class at the time that the claim was allocated.
//
// +optional
VendorClassParameters runtime.RawExtension `json:"vendorClassParameters,omitempty" protobuf:"bytes,1,opt,name=vendorClassParameters"`
// VendorClaimParameters are the per-claim configuration parameters
// from the resource claim parameters at the time that the claim was
// allocated.
//
// +optional
VendorClaimParameters runtime.RawExtension `json:"vendorClaimParameters,omitempty" protobuf:"bytes,2,opt,name=vendorClaimParameters"`
// NodeName is the name of the node providing the necessary resources
// if the resources are local to a node.
//
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,4,name=nodeName"`
// Results lists all allocated driver resources.
//
// +listType=atomic
Results []DriverAllocationResult `json:"results" protobuf:"bytes,5,name=results"`
}
// DriverAllocationResult contains vendor parameters and the allocation result for
// one request.
type DriverAllocationResult struct {
// VendorRequestParameters are the per-request configuration parameters
// from the time that the claim was allocated.
//
// +optional
VendorRequestParameters runtime.RawExtension `json:"vendorRequestParameters,omitempty" protobuf:"bytes,1,opt,name=vendorRequestParameters"`
AllocationResultModel `json:",inline" protobuf:"bytes,2,name=allocationResultModel"`
}
// AllocationResultModel must have one and only one field set.
type AllocationResultModel struct {
// NamedResources describes the allocation result when using the named resources model.
//
// +optional
NamedResources *NamedResourcesAllocationResult `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.26
@@ -347,6 +406,11 @@ type ResourceClass struct {
// Setting this field is optional. If null, all nodes are candidates.
// +optional
SuitableNodes *v1.NodeSelector `json:"suitableNodes,omitempty" protobuf:"bytes,4,opt,name=suitableNodes"`
// If and only if allocation of claims using this class is handled
// via structured parameters, then StructuredParameters must be set to true.
// +optional
StructuredParameters *bool `json:"structuredParameters,omitempty" protobuf:"bytes,5,opt,name=structuredParameters"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@@ -462,3 +526,212 @@ type ResourceClaimTemplateList struct {
// Items is the list of resource claim templates.
Items []ResourceClaimTemplate `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// ResourceSlice provides information about available
// resources on individual nodes.
type ResourceSlice struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// NodeName identifies the node which provides the resources
// if they are local to a node.
//
// A field selector can be used to list only ResourceSlice
// objects with a certain node name.
//
// +optional
NodeName string `json:"nodeName,omitempty" protobuf:"bytes,2,opt,name=nodeName"`
// DriverName identifies the DRA driver providing the capacity information.
// A field selector can be used to list only ResourceSlice
// objects with a certain driver name.
DriverName string `json:"driverName" protobuf:"bytes,3,name=driverName"`
NodeResourceModel `json:",inline" protobuf:"bytes,4,name=nodeResourceModel"`
}
// NodeResourceModel must have one and only one field set.
type NodeResourceModel struct {
// NamedResources describes available resources using the named resources model.
//
// +optional
NamedResources *NamedResourcesResources `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// ResourceSliceList is a collection of ResourceSlices.
type ResourceSliceList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of node resource capacity objects.
Items []ResourceSlice `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// ResourceClaimParameters defines resource requests for a ResourceClaim in an
// in-tree format understood by Kubernetes.
type ResourceClaimParameters struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// If this object was created from some other resource, then this links
// back to that resource. This field is used to find the in-tree representation
// of the claim parameters when the parameter reference of the claim refers
// to some unknown type.
// +optional
GeneratedFrom *ResourceClaimParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"`
// Shareable indicates whether the allocated claim is meant to be shareable
// by multiple consumers at the same time.
// +optional
Shareable bool `json:"shareable,omitempty" protobuf:"bytes,3,opt,name=shareable"`
// DriverRequests describes all resources that are needed for the
// allocated claim. A single claim may use resources coming from
// different drivers. For each driver, this array has at most one
// entry which then may have one or more per-driver requests.
//
// May be empty, in which case the claim can always be allocated.
//
// +listType=atomic
DriverRequests []DriverRequests `json:"driverRequests,omitempty" protobuf:"bytes,4,opt,name=driverRequests"`
}
// DriverRequests describes all resources that are needed from one particular driver.
type DriverRequests struct {
// DriverName is the name used by the DRA driver kubelet plugin.
DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
// VendorParameters are arbitrary setup parameters for all requests of the
// claim. They are ignored while allocating the claim.
//
// +optional
VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,2,opt,name=vendorParameters"`
// Requests describes all resources that are needed from the driver.
// +listType=atomic
Requests []ResourceRequest `json:"requests,omitempty" protobuf:"bytes,3,opt,name=requests"`
}
// ResourceRequest is a request for resources from one particular driver.
type ResourceRequest struct {
// VendorParameters are arbitrary setup parameters for the requested
// resource. They are ignored while allocating a claim.
//
// +optional
VendorParameters runtime.RawExtension `json:"vendorParameters,omitempty" protobuf:"bytes,1,opt,name=vendorParameters"`
ResourceRequestModel `json:",inline" protobuf:"bytes,2,name=resourceRequestModel"`
}
// ResourceRequestModel must have one and only one field set.
type ResourceRequestModel struct {
// NamedResources describes a request for resources with the named resources model.
//
// +optional
NamedResources *NamedResourcesRequest `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// ResourceClaimParametersList is a collection of ResourceClaimParameters.
type ResourceClaimParametersList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of node resource capacity objects.
Items []ResourceClaimParameters `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// ResourceClassParameters defines resource requests for a ResourceClass in an
// in-tree format understood by Kubernetes.
type ResourceClassParameters struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// If this object was created from some other resource, then this links
// back to that resource. This field is used to find the in-tree representation
// of the class parameters when the parameter reference of the class refers
// to some unknown type.
// +optional
GeneratedFrom *ResourceClassParametersReference `json:"generatedFrom,omitempty" protobuf:"bytes,2,opt,name=generatedFrom"`
// VendorParameters are arbitrary setup parameters for all claims using
// this class. They are ignored while allocating the claim. There must
// not be more than one entry per driver.
//
// +listType=atomic
// +optional
VendorParameters []VendorParameters `json:"vendorParameters,omitempty" protobuf:"bytes,3,opt,name=vendorParameters"`
// Filters describes additional contraints that must be met when using the class.
//
// +listType=atomic
Filters []ResourceFilter `json:"filters,omitempty" protobuf:"bytes,4,opt,name=filters"`
}
// ResourceFilter is a filter for resources from one particular driver.
type ResourceFilter struct {
// DriverName is the name used by the DRA driver kubelet plugin.
DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
ResourceFilterModel `json:",inline" protobuf:"bytes,2,name=resourceFilterModel"`
}
// ResourceFilterModel must have one and only one field set.
type ResourceFilterModel struct {
// NamedResources describes a resource filter using the named resources model.
//
// +optional
NamedResources *NamedResourcesFilter `json:"namedResources,omitempty" protobuf:"bytes,1,opt,name=namedResources"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:prerelease-lifecycle-gen:introduced=1.30
// ResourceClassParametersList is a collection of ResourceClassParameters.
type ResourceClassParametersList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Items is the list of node resource capacity objects.
Items []ResourceClassParameters `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// VendorParameters are opaque parameters for one particular driver.
type VendorParameters struct {
// DriverName is the name used by the DRA driver kubelet plugin.
DriverName string `json:"driverName,omitempty" protobuf:"bytes,1,opt,name=driverName"`
// Parameters can be arbitrary setup parameters. They are ignored while
// allocating a claim.
//
// +optional
Parameters runtime.RawExtension `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"`
}

View File

@@ -38,6 +38,44 @@ func (AllocationResult) SwaggerDoc() map[string]string {
return map_AllocationResult
}
var map_AllocationResultModel = map[string]string{
"": "AllocationResultModel must have one and only one field set.",
"namedResources": "NamedResources describes the allocation result when using the named resources model.",
}
func (AllocationResultModel) SwaggerDoc() map[string]string {
return map_AllocationResultModel
}
var map_DriverAllocationResult = map[string]string{
"": "DriverAllocationResult contains vendor parameters and the allocation result for one request.",
"vendorRequestParameters": "VendorRequestParameters are the per-request configuration parameters from the time that the claim was allocated.",
}
func (DriverAllocationResult) SwaggerDoc() map[string]string {
return map_DriverAllocationResult
}
var map_DriverRequests = map[string]string{
"": "DriverRequests describes all resources that are needed from one particular driver.",
"driverName": "DriverName is the name used by the DRA driver kubelet plugin.",
"vendorParameters": "VendorParameters are arbitrary setup parameters for all requests of the claim. They are ignored while allocating the claim.",
"requests": "Requests describes all resources that are needed from the driver.",
}
func (DriverRequests) SwaggerDoc() map[string]string {
return map_DriverRequests
}
var map_NodeResourceModel = map[string]string{
"": "NodeResourceModel must have one and only one field set.",
"namedResources": "NamedResources describes available resources using the named resources model.",
}
func (NodeResourceModel) SwaggerDoc() map[string]string {
return map_NodeResourceModel
}
var map_PodSchedulingContext = map[string]string{
"": "PodSchedulingContext objects hold information that is needed to schedule a Pod with ResourceClaims that use \"WaitForFirstConsumer\" allocation mode.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"metadata": "Standard object metadata",
@@ -111,6 +149,28 @@ func (ResourceClaimList) SwaggerDoc() map[string]string {
return map_ResourceClaimList
}
var map_ResourceClaimParameters = map[string]string{
"": "ResourceClaimParameters defines resource requests for a ResourceClaim in an in-tree format understood by Kubernetes.",
"metadata": "Standard object metadata",
"generatedFrom": "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the claim parameters when the parameter reference of the claim refers to some unknown type.",
"shareable": "Shareable indicates whether the allocated claim is meant to be shareable by multiple consumers at the same time.",
"driverRequests": "DriverRequests describes all resources that are needed for the allocated claim. A single claim may use resources coming from different drivers. For each driver, this array has at most one entry which then may have one or more per-driver requests.\n\nMay be empty, in which case the claim can always be allocated.",
}
func (ResourceClaimParameters) SwaggerDoc() map[string]string {
return map_ResourceClaimParameters
}
var map_ResourceClaimParametersList = map[string]string{
"": "ResourceClaimParametersList is a collection of ResourceClaimParameters.",
"metadata": "Standard list metadata",
"items": "Items is the list of node resource capacity objects.",
}
func (ResourceClaimParametersList) SwaggerDoc() map[string]string {
return map_ResourceClaimParametersList
}
var map_ResourceClaimParametersReference = map[string]string{
"": "ResourceClaimParametersReference contains enough information to let you locate the parameters for a ResourceClaim. The object must be in the same namespace as the ResourceClaim.",
"apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.",
@@ -186,11 +246,12 @@ func (ResourceClaimTemplateSpec) SwaggerDoc() map[string]string {
}
var map_ResourceClass = map[string]string{
"": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"metadata": "Standard object metadata",
"driverName": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).",
"parametersRef": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.",
"suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.",
"": "ResourceClass is used by administrators to influence how resources are allocated.\n\nThis is an alpha type and requires enabling the DynamicResourceAllocation feature gate.",
"metadata": "Standard object metadata",
"driverName": "DriverName defines the name of the dynamic resource driver that is used for allocation of a ResourceClaim that uses this class.\n\nResource drivers have a unique name in forward domain order (acme.example.com).",
"parametersRef": "ParametersRef references an arbitrary separate object that may hold parameters that will be used by the driver when allocating a resource that uses this class. A dynamic resource driver can distinguish between parameters stored here and and those stored in ResourceClaimSpec.",
"suitableNodes": "Only nodes matching the selector will be considered by the scheduler when trying to find a Node that fits a Pod when that Pod uses a ResourceClaim that has not been allocated yet.\n\nSetting this field is optional. If null, all nodes are candidates.",
"structuredParameters": "If and only if allocation of claims using this class is handled via structured parameters, then StructuredParameters must be set to true.",
}
func (ResourceClass) SwaggerDoc() map[string]string {
@@ -207,6 +268,28 @@ func (ResourceClassList) SwaggerDoc() map[string]string {
return map_ResourceClassList
}
var map_ResourceClassParameters = map[string]string{
"": "ResourceClassParameters defines resource requests for a ResourceClass in an in-tree format understood by Kubernetes.",
"metadata": "Standard object metadata",
"generatedFrom": "If this object was created from some other resource, then this links back to that resource. This field is used to find the in-tree representation of the class parameters when the parameter reference of the class refers to some unknown type.",
"vendorParameters": "VendorParameters are arbitrary setup parameters for all claims using this class. They are ignored while allocating the claim. There must not be more than one entry per driver.",
"filters": "Filters describes additional contraints that must be met when using the class.",
}
func (ResourceClassParameters) SwaggerDoc() map[string]string {
return map_ResourceClassParameters
}
var map_ResourceClassParametersList = map[string]string{
"": "ResourceClassParametersList is a collection of ResourceClassParameters.",
"metadata": "Standard list metadata",
"items": "Items is the list of node resource capacity objects.",
}
func (ResourceClassParametersList) SwaggerDoc() map[string]string {
return map_ResourceClassParametersList
}
var map_ResourceClassParametersReference = map[string]string{
"": "ResourceClassParametersReference contains enough information to let you locate the parameters for a ResourceClass.",
"apiGroup": "APIGroup is the group for the resource being referenced. It is empty for the core API. This matches the group in the APIVersion that is used when creating the resources.",
@@ -219,14 +302,94 @@ func (ResourceClassParametersReference) SwaggerDoc() map[string]string {
return map_ResourceClassParametersReference
}
var map_ResourceFilter = map[string]string{
"": "ResourceFilter is a filter for resources from one particular driver.",
"driverName": "DriverName is the name used by the DRA driver kubelet plugin.",
}
func (ResourceFilter) SwaggerDoc() map[string]string {
return map_ResourceFilter
}
var map_ResourceFilterModel = map[string]string{
"": "ResourceFilterModel must have one and only one field set.",
"namedResources": "NamedResources describes a resource filter using the named resources model.",
}
func (ResourceFilterModel) SwaggerDoc() map[string]string {
return map_ResourceFilterModel
}
var map_ResourceHandle = map[string]string{
"": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.",
"driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.",
"data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.",
"": "ResourceHandle holds opaque resource data for processing by a specific kubelet plugin.",
"driverName": "DriverName specifies the name of the resource driver whose kubelet plugin should be invoked to process this ResourceHandle's data once it lands on a node. This may differ from the DriverName set in ResourceClaimStatus this ResourceHandle is embedded in.",
"data": "Data contains the opaque data associated with this ResourceHandle. It is set by the controller component of the resource driver whose name matches the DriverName set in the ResourceClaimStatus this ResourceHandle is embedded in. It is set at allocation time and is intended for processing by the kubelet plugin whose name matches the DriverName set in this ResourceHandle.\n\nThe maximum size of this field is 16KiB. This may get increased in the future, but not reduced.",
"structuredData": "If StructuredData is set, then it needs to be used instead of Data.",
}
func (ResourceHandle) SwaggerDoc() map[string]string {
return map_ResourceHandle
}
var map_ResourceRequest = map[string]string{
"": "ResourceRequest is a request for resources from one particular driver.",
"vendorParameters": "VendorParameters are arbitrary setup parameters for the requested resource. They are ignored while allocating a claim.",
}
func (ResourceRequest) SwaggerDoc() map[string]string {
return map_ResourceRequest
}
var map_ResourceRequestModel = map[string]string{
"": "ResourceRequestModel must have one and only one field set.",
"namedResources": "NamedResources describes a request for resources with the named resources model.",
}
func (ResourceRequestModel) SwaggerDoc() map[string]string {
return map_ResourceRequestModel
}
var map_ResourceSlice = map[string]string{
"": "ResourceSlice provides information about available resources on individual nodes.",
"metadata": "Standard object metadata",
"nodeName": "NodeName identifies the node which provides the resources if they are local to a node.\n\nA field selector can be used to list only ResourceSlice objects with a certain node name.",
"driverName": "DriverName identifies the DRA driver providing the capacity information. A field selector can be used to list only ResourceSlice objects with a certain driver name.",
}
func (ResourceSlice) SwaggerDoc() map[string]string {
return map_ResourceSlice
}
var map_ResourceSliceList = map[string]string{
"": "ResourceSliceList is a collection of ResourceSlices.",
"metadata": "Standard list metadata",
"items": "Items is the list of node resource capacity objects.",
}
func (ResourceSliceList) SwaggerDoc() map[string]string {
return map_ResourceSliceList
}
var map_StructuredResourceHandle = map[string]string{
"": "StructuredResourceHandle is the in-tree representation of the allocation result.",
"vendorClassParameters": "VendorClassParameters are the per-claim configuration parameters from the resource class at the time that the claim was allocated.",
"vendorClaimParameters": "VendorClaimParameters are the per-claim configuration parameters from the resource claim parameters at the time that the claim was allocated.",
"nodeName": "NodeName is the name of the node providing the necessary resources if the resources are local to a node.",
"results": "Results lists all allocated driver resources.",
}
func (StructuredResourceHandle) SwaggerDoc() map[string]string {
return map_StructuredResourceHandle
}
var map_VendorParameters = map[string]string{
"": "VendorParameters are opaque parameters for one particular driver.",
"driverName": "DriverName is the name used by the DRA driver kubelet plugin.",
"parameters": "Parameters can be arbitrary setup parameters. They are ignored while allocating a claim.",
}
func (VendorParameters) SwaggerDoc() map[string]string {
return map_VendorParameters
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@@ -32,7 +32,9 @@ func (in *AllocationResult) DeepCopyInto(out *AllocationResult) {
if in.ResourceHandles != nil {
in, out := &in.ResourceHandles, &out.ResourceHandles
*out = make([]ResourceHandle, len(*in))
copy(*out, *in)
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.AvailableOnNodes != nil {
in, out := &in.AvailableOnNodes, &out.AvailableOnNodes
@@ -52,6 +54,294 @@ func (in *AllocationResult) DeepCopy() *AllocationResult {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AllocationResultModel) DeepCopyInto(out *AllocationResultModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesAllocationResult)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllocationResultModel.
func (in *AllocationResultModel) DeepCopy() *AllocationResultModel {
if in == nil {
return nil
}
out := new(AllocationResultModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DriverAllocationResult) DeepCopyInto(out *DriverAllocationResult) {
*out = *in
in.VendorRequestParameters.DeepCopyInto(&out.VendorRequestParameters)
in.AllocationResultModel.DeepCopyInto(&out.AllocationResultModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverAllocationResult.
func (in *DriverAllocationResult) DeepCopy() *DriverAllocationResult {
if in == nil {
return nil
}
out := new(DriverAllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DriverRequests) DeepCopyInto(out *DriverRequests) {
*out = *in
in.VendorParameters.DeepCopyInto(&out.VendorParameters)
if in.Requests != nil {
in, out := &in.Requests, &out.Requests
*out = make([]ResourceRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DriverRequests.
func (in *DriverRequests) DeepCopy() *DriverRequests {
if in == nil {
return nil
}
out := new(DriverRequests)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesAllocationResult) DeepCopyInto(out *NamedResourcesAllocationResult) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAllocationResult.
func (in *NamedResourcesAllocationResult) DeepCopy() *NamedResourcesAllocationResult {
if in == nil {
return nil
}
out := new(NamedResourcesAllocationResult)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesAttribute) DeepCopyInto(out *NamedResourcesAttribute) {
*out = *in
in.NamedResourcesAttributeValue.DeepCopyInto(&out.NamedResourcesAttributeValue)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttribute.
func (in *NamedResourcesAttribute) DeepCopy() *NamedResourcesAttribute {
if in == nil {
return nil
}
out := new(NamedResourcesAttribute)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesAttributeValue) DeepCopyInto(out *NamedResourcesAttributeValue) {
*out = *in
if in.QuantityValue != nil {
in, out := &in.QuantityValue, &out.QuantityValue
x := (*in).DeepCopy()
*out = &x
}
if in.BoolValue != nil {
in, out := &in.BoolValue, &out.BoolValue
*out = new(bool)
**out = **in
}
if in.IntValue != nil {
in, out := &in.IntValue, &out.IntValue
*out = new(int64)
**out = **in
}
if in.IntSliceValue != nil {
in, out := &in.IntSliceValue, &out.IntSliceValue
*out = new(NamedResourcesIntSlice)
(*in).DeepCopyInto(*out)
}
if in.StringValue != nil {
in, out := &in.StringValue, &out.StringValue
*out = new(string)
**out = **in
}
if in.StringSliceValue != nil {
in, out := &in.StringSliceValue, &out.StringSliceValue
*out = new(NamedResourcesStringSlice)
(*in).DeepCopyInto(*out)
}
if in.VersionValue != nil {
in, out := &in.VersionValue, &out.VersionValue
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesAttributeValue.
func (in *NamedResourcesAttributeValue) DeepCopy() *NamedResourcesAttributeValue {
if in == nil {
return nil
}
out := new(NamedResourcesAttributeValue)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesFilter) DeepCopyInto(out *NamedResourcesFilter) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesFilter.
func (in *NamedResourcesFilter) DeepCopy() *NamedResourcesFilter {
if in == nil {
return nil
}
out := new(NamedResourcesFilter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesInstance) DeepCopyInto(out *NamedResourcesInstance) {
*out = *in
if in.Attributes != nil {
in, out := &in.Attributes, &out.Attributes
*out = make([]NamedResourcesAttribute, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesInstance.
func (in *NamedResourcesInstance) DeepCopy() *NamedResourcesInstance {
if in == nil {
return nil
}
out := new(NamedResourcesInstance)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesIntSlice) DeepCopyInto(out *NamedResourcesIntSlice) {
*out = *in
if in.Ints != nil {
in, out := &in.Ints, &out.Ints
*out = make([]int64, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesIntSlice.
func (in *NamedResourcesIntSlice) DeepCopy() *NamedResourcesIntSlice {
if in == nil {
return nil
}
out := new(NamedResourcesIntSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesRequest) DeepCopyInto(out *NamedResourcesRequest) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesRequest.
func (in *NamedResourcesRequest) DeepCopy() *NamedResourcesRequest {
if in == nil {
return nil
}
out := new(NamedResourcesRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesResources) DeepCopyInto(out *NamedResourcesResources) {
*out = *in
if in.Instances != nil {
in, out := &in.Instances, &out.Instances
*out = make([]NamedResourcesInstance, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesResources.
func (in *NamedResourcesResources) DeepCopy() *NamedResourcesResources {
if in == nil {
return nil
}
out := new(NamedResourcesResources)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamedResourcesStringSlice) DeepCopyInto(out *NamedResourcesStringSlice) {
*out = *in
if in.Strings != nil {
in, out := &in.Strings, &out.Strings
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedResourcesStringSlice.
func (in *NamedResourcesStringSlice) DeepCopy() *NamedResourcesStringSlice {
if in == nil {
return nil
}
out := new(NamedResourcesStringSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourceModel) DeepCopyInto(out *NodeResourceModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesResources)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceModel.
func (in *NodeResourceModel) DeepCopy() *NodeResourceModel {
if in == nil {
return nil
}
out := new(NodeResourceModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PodSchedulingContext) DeepCopyInto(out *PodSchedulingContext) {
*out = *in
@@ -234,6 +524,77 @@ func (in *ResourceClaimList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimParameters) DeepCopyInto(out *ResourceClaimParameters) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.GeneratedFrom != nil {
in, out := &in.GeneratedFrom, &out.GeneratedFrom
*out = new(ResourceClaimParametersReference)
**out = **in
}
if in.DriverRequests != nil {
in, out := &in.DriverRequests, &out.DriverRequests
*out = make([]DriverRequests, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParameters.
func (in *ResourceClaimParameters) DeepCopy() *ResourceClaimParameters {
if in == nil {
return nil
}
out := new(ResourceClaimParameters)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimParameters) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimParametersList) DeepCopyInto(out *ResourceClaimParametersList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClaimParameters, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClaimParametersList.
func (in *ResourceClaimParametersList) DeepCopy() *ResourceClaimParametersList {
if in == nil {
return nil
}
out := new(ResourceClaimParametersList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClaimParametersList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClaimParametersReference) DeepCopyInto(out *ResourceClaimParametersReference) {
*out = *in
@@ -411,6 +772,11 @@ func (in *ResourceClass) DeepCopyInto(out *ResourceClass) {
*out = new(v1.NodeSelector)
(*in).DeepCopyInto(*out)
}
if in.StructuredParameters != nil {
in, out := &in.StructuredParameters, &out.StructuredParameters
*out = new(bool)
**out = **in
}
return
}
@@ -465,6 +831,84 @@ func (in *ResourceClassList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassParameters) DeepCopyInto(out *ResourceClassParameters) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
if in.GeneratedFrom != nil {
in, out := &in.GeneratedFrom, &out.GeneratedFrom
*out = new(ResourceClassParametersReference)
**out = **in
}
if in.VendorParameters != nil {
in, out := &in.VendorParameters, &out.VendorParameters
*out = make([]VendorParameters, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Filters != nil {
in, out := &in.Filters, &out.Filters
*out = make([]ResourceFilter, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParameters.
func (in *ResourceClassParameters) DeepCopy() *ResourceClassParameters {
if in == nil {
return nil
}
out := new(ResourceClassParameters)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClassParameters) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassParametersList) DeepCopyInto(out *ResourceClassParametersList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceClassParameters, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceClassParametersList.
func (in *ResourceClassParametersList) DeepCopy() *ResourceClassParametersList {
if in == nil {
return nil
}
out := new(ResourceClassParametersList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceClassParametersList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceClassParametersReference) DeepCopyInto(out *ResourceClassParametersReference) {
*out = *in
@@ -481,9 +925,52 @@ func (in *ResourceClassParametersReference) DeepCopy() *ResourceClassParametersR
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceFilter) DeepCopyInto(out *ResourceFilter) {
*out = *in
in.ResourceFilterModel.DeepCopyInto(&out.ResourceFilterModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilter.
func (in *ResourceFilter) DeepCopy() *ResourceFilter {
if in == nil {
return nil
}
out := new(ResourceFilter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceFilterModel) DeepCopyInto(out *ResourceFilterModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesFilter)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFilterModel.
func (in *ResourceFilterModel) DeepCopy() *ResourceFilterModel {
if in == nil {
return nil
}
out := new(ResourceFilterModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceHandle) DeepCopyInto(out *ResourceHandle) {
*out = *in
if in.StructuredData != nil {
in, out := &in.StructuredData, &out.StructuredData
*out = new(StructuredResourceHandle)
(*in).DeepCopyInto(*out)
}
return
}
@@ -496,3 +983,144 @@ func (in *ResourceHandle) DeepCopy() *ResourceHandle {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequest) DeepCopyInto(out *ResourceRequest) {
*out = *in
in.VendorParameters.DeepCopyInto(&out.VendorParameters)
in.ResourceRequestModel.DeepCopyInto(&out.ResourceRequestModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequest.
func (in *ResourceRequest) DeepCopy() *ResourceRequest {
if in == nil {
return nil
}
out := new(ResourceRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceRequestModel) DeepCopyInto(out *ResourceRequestModel) {
*out = *in
if in.NamedResources != nil {
in, out := &in.NamedResources, &out.NamedResources
*out = new(NamedResourcesRequest)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequestModel.
func (in *ResourceRequestModel) DeepCopy() *ResourceRequestModel {
if in == nil {
return nil
}
out := new(ResourceRequestModel)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSlice) DeepCopyInto(out *ResourceSlice) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.NodeResourceModel.DeepCopyInto(&out.NodeResourceModel)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSlice.
func (in *ResourceSlice) DeepCopy() *ResourceSlice {
if in == nil {
return nil
}
out := new(ResourceSlice)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSlice) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSliceList) DeepCopyInto(out *ResourceSliceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ResourceSlice, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSliceList.
func (in *ResourceSliceList) DeepCopy() *ResourceSliceList {
if in == nil {
return nil
}
out := new(ResourceSliceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ResourceSliceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StructuredResourceHandle) DeepCopyInto(out *StructuredResourceHandle) {
*out = *in
in.VendorClassParameters.DeepCopyInto(&out.VendorClassParameters)
in.VendorClaimParameters.DeepCopyInto(&out.VendorClaimParameters)
if in.Results != nil {
in, out := &in.Results, &out.Results
*out = make([]DriverAllocationResult, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StructuredResourceHandle.
func (in *StructuredResourceHandle) DeepCopy() *StructuredResourceHandle {
if in == nil {
return nil
}
out := new(StructuredResourceHandle)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VendorParameters) DeepCopyInto(out *VendorParameters) {
*out = *in
in.Parameters.DeepCopyInto(&out.Parameters)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VendorParameters.
func (in *VendorParameters) DeepCopy() *VendorParameters {
if in == nil {
return nil
}
out := new(VendorParameters)
in.DeepCopyInto(out)
return out
}

View File

@@ -58,7 +58,47 @@
"resourceHandles": [
{
"driverName": "driverNameValue",
"data": "dataValue"
"data": "dataValue",
"structuredData": {
"vendorClassParameters": {
"apiVersion": "example.com/v1",
"kind": "CustomType",
"spec": {
"replicas": 1
},
"status": {
"available": 1
}
},
"vendorClaimParameters": {
"apiVersion": "example.com/v1",
"kind": "CustomType",
"spec": {
"replicas": 1
},
"status": {
"available": 1
}
},
"nodeName": "nodeNameValue",
"results": [
{
"vendorRequestParameters": {
"apiVersion": "example.com/v1",
"kind": "CustomType",
"spec": {
"replicas": 1
},
"status": {
"available": 1
}
},
"namedResources": {
"name": "nameValue"
}
}
]
}
}
],
"availableOnNodes": {

View File

@@ -56,6 +56,32 @@ status:
resourceHandles:
- data: dataValue
driverName: driverNameValue
structuredData:
nodeName: nodeNameValue
results:
- namedResources:
name: nameValue
vendorRequestParameters:
apiVersion: example.com/v1
kind: CustomType
spec:
replicas: 1
status:
available: 1
vendorClaimParameters:
apiVersion: example.com/v1
kind: CustomType
spec:
replicas: 1
status:
available: 1
vendorClassParameters:
apiVersion: example.com/v1
kind: CustomType
spec:
replicas: 1
status:
available: 1
shareable: true
deallocationRequested: true
driverName: driverNameValue

View File

@@ -0,0 +1,84 @@
{
"kind": "ResourceClaimParameters",
"apiVersion": "resource.k8s.io/v1alpha2",
"metadata": {
"name": "nameValue",
"generateName": "generateNameValue",
"namespace": "namespaceValue",
"selfLink": "selfLinkValue",
"uid": "uidValue",
"resourceVersion": "resourceVersionValue",
"generation": 7,
"creationTimestamp": "2008-01-01T01:01:01Z",
"deletionTimestamp": "2009-01-01T01:01:01Z",
"deletionGracePeriodSeconds": 10,
"labels": {
"labelsKey": "labelsValue"
},
"annotations": {
"annotationsKey": "annotationsValue"
},
"ownerReferences": [
{
"apiVersion": "apiVersionValue",
"kind": "kindValue",
"name": "nameValue",
"uid": "uidValue",
"controller": true,
"blockOwnerDeletion": true
}
],
"finalizers": [
"finalizersValue"
],
"managedFields": [
{
"manager": "managerValue",
"operation": "operationValue",
"apiVersion": "apiVersionValue",
"time": "2004-01-01T01:01:01Z",
"fieldsType": "fieldsTypeValue",
"fieldsV1": {},
"subresource": "subresourceValue"
}
]
},
"generatedFrom": {
"apiGroup": "apiGroupValue",
"kind": "kindValue",
"name": "nameValue"
},
"shareable": true,
"driverRequests": [
{
"driverName": "driverNameValue",
"vendorParameters": {
"apiVersion": "example.com/v1",
"kind": "CustomType",
"spec": {
"replicas": 1
},
"status": {
"available": 1
}
},
"requests": [
{
"vendorParameters": {
"apiVersion": "example.com/v1",
"kind": "CustomType",
"spec": {
"replicas": 1
},
"status": {
"available": 1
}
},
"namedResources": {
"selector": "selectorValue"
}
}
]
}
]
}

View File

@@ -0,0 +1,58 @@
apiVersion: resource.k8s.io/v1alpha2
driverRequests:
- driverName: driverNameValue
requests:
- namedResources:
selector: selectorValue
vendorParameters:
apiVersion: example.com/v1
kind: CustomType
spec:
replicas: 1
status:
available: 1
vendorParameters:
apiVersion: example.com/v1
kind: CustomType
spec:
replicas: 1
status:
available: 1
generatedFrom:
apiGroup: apiGroupValue
kind: kindValue
name: nameValue
kind: ResourceClaimParameters
metadata:
annotations:
annotationsKey: annotationsValue
creationTimestamp: "2008-01-01T01:01:01Z"
deletionGracePeriodSeconds: 10
deletionTimestamp: "2009-01-01T01:01:01Z"
finalizers:
- finalizersValue
generateName: generateNameValue
generation: 7
labels:
labelsKey: labelsValue
managedFields:
- apiVersion: apiVersionValue
fieldsType: fieldsTypeValue
fieldsV1: {}
manager: managerValue
operation: operationValue
subresource: subresourceValue
time: "2004-01-01T01:01:01Z"
name: nameValue
namespace: namespaceValue
ownerReferences:
- apiVersion: apiVersionValue
blockOwnerDeletion: true
controller: true
kind: kindValue
name: nameValue
uid: uidValue
resourceVersion: resourceVersionValue
selfLink: selfLinkValue
uid: uidValue
shareable: true

View File

@@ -73,5 +73,6 @@
]
}
]
}
},
"structuredParameters": true
}

View File

@@ -38,6 +38,7 @@ parametersRef:
kind: kindValue
name: nameValue
namespace: namespaceValue
structuredParameters: true
suitableNodes:
nodeSelectorTerms:
- matchExpressions:

View File

@@ -0,0 +1,75 @@
{
"kind": "ResourceClassParameters",
"apiVersion": "resource.k8s.io/v1alpha2",
"metadata": {
"name": "nameValue",
"generateName": "generateNameValue",
"namespace": "namespaceValue",
"selfLink": "selfLinkValue",
"uid": "uidValue",
"resourceVersion": "resourceVersionValue",
"generation": 7,
"creationTimestamp": "2008-01-01T01:01:01Z",
"deletionTimestamp": "2009-01-01T01:01:01Z",
"deletionGracePeriodSeconds": 10,
"labels": {
"labelsKey": "labelsValue"
},
"annotations": {
"annotationsKey": "annotationsValue"
},
"ownerReferences": [
{
"apiVersion": "apiVersionValue",
"kind": "kindValue",
"name": "nameValue",
"uid": "uidValue",
"controller": true,
"blockOwnerDeletion": true
}
],
"finalizers": [
"finalizersValue"
],
"managedFields": [
{
"manager": "managerValue",
"operation": "operationValue",
"apiVersion": "apiVersionValue",
"time": "2004-01-01T01:01:01Z",
"fieldsType": "fieldsTypeValue",
"fieldsV1": {},
"subresource": "subresourceValue"
}
]
},
"generatedFrom": {
"apiGroup": "apiGroupValue",
"kind": "kindValue",
"name": "nameValue",
"namespace": "namespaceValue"
},
"vendorParameters": [
{
"driverName": "driverNameValue",
"parameters": {
"apiVersion": "example.com/v1",
"kind": "CustomType",
"spec": {
"replicas": 1
},
"status": {
"available": 1
}
}
}
],
"filters": [
{
"driverName": "driverNameValue",
"namedResources": {
"selector": "selectorValue"
}
}
]
}

View File

@@ -0,0 +1,52 @@
apiVersion: resource.k8s.io/v1alpha2
filters:
- driverName: driverNameValue
namedResources:
selector: selectorValue
generatedFrom:
apiGroup: apiGroupValue
kind: kindValue
name: nameValue
namespace: namespaceValue
kind: ResourceClassParameters
metadata:
annotations:
annotationsKey: annotationsValue
creationTimestamp: "2008-01-01T01:01:01Z"
deletionGracePeriodSeconds: 10
deletionTimestamp: "2009-01-01T01:01:01Z"
finalizers:
- finalizersValue
generateName: generateNameValue
generation: 7
labels:
labelsKey: labelsValue
managedFields:
- apiVersion: apiVersionValue
fieldsType: fieldsTypeValue
fieldsV1: {}
manager: managerValue
operation: operationValue
subresource: subresourceValue
time: "2004-01-01T01:01:01Z"
name: nameValue
namespace: namespaceValue
ownerReferences:
- apiVersion: apiVersionValue
blockOwnerDeletion: true
controller: true
kind: kindValue
name: nameValue
uid: uidValue
resourceVersion: resourceVersionValue
selfLink: selfLinkValue
uid: uidValue
vendorParameters:
- driverName: driverNameValue
parameters:
apiVersion: example.com/v1
kind: CustomType
spec:
replicas: 1
status:
available: 1

View File

@@ -0,0 +1,75 @@
{
"kind": "ResourceSlice",
"apiVersion": "resource.k8s.io/v1alpha2",
"metadata": {
"name": "nameValue",
"generateName": "generateNameValue",
"namespace": "namespaceValue",
"selfLink": "selfLinkValue",
"uid": "uidValue",
"resourceVersion": "resourceVersionValue",
"generation": 7,
"creationTimestamp": "2008-01-01T01:01:01Z",
"deletionTimestamp": "2009-01-01T01:01:01Z",
"deletionGracePeriodSeconds": 10,
"labels": {
"labelsKey": "labelsValue"
},
"annotations": {
"annotationsKey": "annotationsValue"
},
"ownerReferences": [
{
"apiVersion": "apiVersionValue",
"kind": "kindValue",
"name": "nameValue",
"uid": "uidValue",
"controller": true,
"blockOwnerDeletion": true
}
],
"finalizers": [
"finalizersValue"
],
"managedFields": [
{
"manager": "managerValue",
"operation": "operationValue",
"apiVersion": "apiVersionValue",
"time": "2004-01-01T01:01:01Z",
"fieldsType": "fieldsTypeValue",
"fieldsV1": {},
"subresource": "subresourceValue"
}
]
},
"nodeName": "nodeNameValue",
"driverName": "driverNameValue",
"namedResources": {
"instances": [
{
"name": "nameValue",
"attributes": [
{
"name": "nameValue",
"quantity": "0",
"bool": true,
"int": 7,
"intSlice": {
"ints": [
1
]
},
"string": "stringValue",
"stringSlice": {
"strings": [
"stringsValue"
]
},
"version": "versionValue"
}
]
}
]
}
}

View File

@@ -0,0 +1,52 @@
apiVersion: resource.k8s.io/v1alpha2
driverName: driverNameValue
kind: ResourceSlice
metadata:
annotations:
annotationsKey: annotationsValue
creationTimestamp: "2008-01-01T01:01:01Z"
deletionGracePeriodSeconds: 10
deletionTimestamp: "2009-01-01T01:01:01Z"
finalizers:
- finalizersValue
generateName: generateNameValue
generation: 7
labels:
labelsKey: labelsValue
managedFields:
- apiVersion: apiVersionValue
fieldsType: fieldsTypeValue
fieldsV1: {}
manager: managerValue
operation: operationValue
subresource: subresourceValue
time: "2004-01-01T01:01:01Z"
name: nameValue
namespace: namespaceValue
ownerReferences:
- apiVersion: apiVersionValue
blockOwnerDeletion: true
controller: true
kind: kindValue
name: nameValue
uid: uidValue
resourceVersion: resourceVersionValue
selfLink: selfLinkValue
uid: uidValue
namedResources:
instances:
- attributes:
- bool: true
int: 7
intSlice:
ints:
- 1
name: nameValue
quantity: "0"
string: stringValue
stringSlice:
strings:
- stringsValue
version: versionValue
name: nameValue
nodeName: nodeNameValue

View File

@@ -11966,6 +11966,119 @@ var schemaYAML = typed.YAMLObject(`types:
- name: shareable
type:
scalar: boolean
- name: io.k8s.api.resource.v1alpha2.DriverAllocationResult
map:
fields:
- name: namedResources
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult
- name: vendorRequestParameters
type:
namedType: __untyped_atomic_
- name: io.k8s.api.resource.v1alpha2.DriverRequests
map:
fields:
- name: driverName
type:
scalar: string
- name: requests
type:
list:
elementType:
namedType: io.k8s.api.resource.v1alpha2.ResourceRequest
elementRelationship: atomic
- name: vendorParameters
type:
namedType: __untyped_atomic_
- name: io.k8s.api.resource.v1alpha2.NamedResourcesAllocationResult
map:
fields:
- name: name
type:
scalar: string
default: ""
- name: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute
map:
fields:
- name: bool
type:
scalar: boolean
- name: int
type:
scalar: numeric
- name: intSlice
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice
- name: name
type:
scalar: string
default: ""
- name: quantity
type:
namedType: io.k8s.apimachinery.pkg.api.resource.Quantity
- name: string
type:
scalar: string
- name: stringSlice
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice
- name: version
type:
scalar: string
- name: io.k8s.api.resource.v1alpha2.NamedResourcesFilter
map:
fields:
- name: selector
type:
scalar: string
default: ""
- name: io.k8s.api.resource.v1alpha2.NamedResourcesInstance
map:
fields:
- name: attributes
type:
list:
elementType:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesAttribute
elementRelationship: atomic
- name: name
type:
scalar: string
default: ""
- name: io.k8s.api.resource.v1alpha2.NamedResourcesIntSlice
map:
fields:
- name: ints
type:
list:
elementType:
scalar: numeric
elementRelationship: atomic
- name: io.k8s.api.resource.v1alpha2.NamedResourcesRequest
map:
fields:
- name: selector
type:
scalar: string
default: ""
- name: io.k8s.api.resource.v1alpha2.NamedResourcesResources
map:
fields:
- name: instances
type:
list:
elementType:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesInstance
elementRelationship: atomic
- name: io.k8s.api.resource.v1alpha2.NamedResourcesStringSlice
map:
fields:
- name: strings
type:
list:
elementType:
scalar: string
elementRelationship: atomic
- name: io.k8s.api.resource.v1alpha2.PodSchedulingContext
map:
fields:
@@ -12049,6 +12162,31 @@ var schemaYAML = typed.YAMLObject(`types:
type:
scalar: string
default: ""
- name: io.k8s.api.resource.v1alpha2.ResourceClaimParameters
map:
fields:
- name: apiVersion
type:
scalar: string
- name: driverRequests
type:
list:
elementType:
namedType: io.k8s.api.resource.v1alpha2.DriverRequests
elementRelationship: atomic
- name: generatedFrom
type:
namedType: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: shareable
type:
scalar: boolean
- name: io.k8s.api.resource.v1alpha2.ResourceClaimParametersReference
map:
fields:
@@ -12156,9 +12294,40 @@ var schemaYAML = typed.YAMLObject(`types:
- name: parametersRef
type:
namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference
- name: structuredParameters
type:
scalar: boolean
- name: suitableNodes
type:
namedType: io.k8s.api.core.v1.NodeSelector
- name: io.k8s.api.resource.v1alpha2.ResourceClassParameters
map:
fields:
- name: apiVersion
type:
scalar: string
- name: filters
type:
list:
elementType:
namedType: io.k8s.api.resource.v1alpha2.ResourceFilter
elementRelationship: atomic
- name: generatedFrom
type:
namedType: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: vendorParameters
type:
list:
elementType:
namedType: io.k8s.api.resource.v1alpha2.VendorParameters
elementRelationship: atomic
- name: io.k8s.api.resource.v1alpha2.ResourceClassParametersReference
map:
fields:
@@ -12176,6 +12345,15 @@ var schemaYAML = typed.YAMLObject(`types:
- name: namespace
type:
scalar: string
- name: io.k8s.api.resource.v1alpha2.ResourceFilter
map:
fields:
- name: driverName
type:
scalar: string
- name: namedResources
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesFilter
- name: io.k8s.api.resource.v1alpha2.ResourceHandle
map:
fields:
@@ -12185,6 +12363,68 @@ var schemaYAML = typed.YAMLObject(`types:
- name: driverName
type:
scalar: string
- name: structuredData
type:
namedType: io.k8s.api.resource.v1alpha2.StructuredResourceHandle
- name: io.k8s.api.resource.v1alpha2.ResourceRequest
map:
fields:
- name: namedResources
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesRequest
- name: vendorParameters
type:
namedType: __untyped_atomic_
- name: io.k8s.api.resource.v1alpha2.ResourceSlice
map:
fields:
- name: apiVersion
type:
scalar: string
- name: driverName
type:
scalar: string
default: ""
- name: kind
type:
scalar: string
- name: metadata
type:
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
default: {}
- name: namedResources
type:
namedType: io.k8s.api.resource.v1alpha2.NamedResourcesResources
- name: nodeName
type:
scalar: string
- name: io.k8s.api.resource.v1alpha2.StructuredResourceHandle
map:
fields:
- name: nodeName
type:
scalar: string
- name: results
type:
list:
elementType:
namedType: io.k8s.api.resource.v1alpha2.DriverAllocationResult
elementRelationship: atomic
- name: vendorClaimParameters
type:
namedType: __untyped_atomic_
- name: vendorClassParameters
type:
namedType: __untyped_atomic_
- name: io.k8s.api.resource.v1alpha2.VendorParameters
map:
fields:
- name: driverName
type:
scalar: string
- name: parameters
type:
namedType: __untyped_atomic_
- name: io.k8s.api.scheduling.v1.PriorityClass
map:
fields:

View File

@@ -0,0 +1,39 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
// AllocationResultModelApplyConfiguration represents an declarative configuration of the AllocationResultModel type for use
// with apply.
type AllocationResultModelApplyConfiguration struct {
NamedResources *NamedResourcesAllocationResultApplyConfiguration `json:"namedResources,omitempty"`
}
// AllocationResultModelApplyConfiguration constructs an declarative configuration of the AllocationResultModel type for use with
// apply.
func AllocationResultModel() *AllocationResultModelApplyConfiguration {
return &AllocationResultModelApplyConfiguration{}
}
// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NamedResources field is set to the value of the last call.
func (b *AllocationResultModelApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *AllocationResultModelApplyConfiguration {
b.NamedResources = value
return b
}

View File

@@ -0,0 +1,52 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DriverAllocationResultApplyConfiguration represents an declarative configuration of the DriverAllocationResult type for use
// with apply.
type DriverAllocationResultApplyConfiguration struct {
VendorRequestParameters *runtime.RawExtension `json:"vendorRequestParameters,omitempty"`
AllocationResultModelApplyConfiguration `json:",inline"`
}
// DriverAllocationResultApplyConfiguration constructs an declarative configuration of the DriverAllocationResult type for use with
// apply.
func DriverAllocationResult() *DriverAllocationResultApplyConfiguration {
return &DriverAllocationResultApplyConfiguration{}
}
// WithVendorRequestParameters sets the VendorRequestParameters field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VendorRequestParameters field is set to the value of the last call.
func (b *DriverAllocationResultApplyConfiguration) WithVendorRequestParameters(value runtime.RawExtension) *DriverAllocationResultApplyConfiguration {
b.VendorRequestParameters = &value
return b
}
// WithNamedResources sets the NamedResources field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the NamedResources field is set to the value of the last call.
func (b *DriverAllocationResultApplyConfiguration) WithNamedResources(value *NamedResourcesAllocationResultApplyConfiguration) *DriverAllocationResultApplyConfiguration {
b.NamedResources = value
return b
}

View File

@@ -0,0 +1,66 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DriverRequestsApplyConfiguration represents an declarative configuration of the DriverRequests type for use
// with apply.
type DriverRequestsApplyConfiguration struct {
DriverName *string `json:"driverName,omitempty"`
VendorParameters *runtime.RawExtension `json:"vendorParameters,omitempty"`
Requests []ResourceRequestApplyConfiguration `json:"requests,omitempty"`
}
// DriverRequestsApplyConfiguration constructs an declarative configuration of the DriverRequests type for use with
// apply.
func DriverRequests() *DriverRequestsApplyConfiguration {
return &DriverRequestsApplyConfiguration{}
}
// WithDriverName sets the DriverName field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the DriverName field is set to the value of the last call.
func (b *DriverRequestsApplyConfiguration) WithDriverName(value string) *DriverRequestsApplyConfiguration {
b.DriverName = &value
return b
}
// WithVendorParameters sets the VendorParameters field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VendorParameters field is set to the value of the last call.
func (b *DriverRequestsApplyConfiguration) WithVendorParameters(value runtime.RawExtension) *DriverRequestsApplyConfiguration {
b.VendorParameters = &value
return b
}
// WithRequests adds the given value to the Requests field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Requests field.
func (b *DriverRequestsApplyConfiguration) WithRequests(values ...*ResourceRequestApplyConfiguration) *DriverRequestsApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithRequests")
}
b.Requests = append(b.Requests, *values[i])
}
return b
}

View File

@@ -0,0 +1,39 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
// NamedResourcesAllocationResultApplyConfiguration represents an declarative configuration of the NamedResourcesAllocationResult type for use
// with apply.
type NamedResourcesAllocationResultApplyConfiguration struct {
Name *string `json:"name,omitempty"`
}
// NamedResourcesAllocationResultApplyConfiguration constructs an declarative configuration of the NamedResourcesAllocationResult type for use with
// apply.
func NamedResourcesAllocationResult() *NamedResourcesAllocationResultApplyConfiguration {
return &NamedResourcesAllocationResultApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *NamedResourcesAllocationResultApplyConfiguration) WithName(value string) *NamedResourcesAllocationResultApplyConfiguration {
b.Name = &value
return b
}

View File

@@ -0,0 +1,100 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
import (
resource "k8s.io/apimachinery/pkg/api/resource"
)
// NamedResourcesAttributeApplyConfiguration represents an declarative configuration of the NamedResourcesAttribute type for use
// with apply.
type NamedResourcesAttributeApplyConfiguration struct {
Name *string `json:"name,omitempty"`
NamedResourcesAttributeValueApplyConfiguration `json:",inline"`
}
// NamedResourcesAttributeApplyConfiguration constructs an declarative configuration of the NamedResourcesAttribute type for use with
// apply.
func NamedResourcesAttribute() *NamedResourcesAttributeApplyConfiguration {
return &NamedResourcesAttributeApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithName(value string) *NamedResourcesAttributeApplyConfiguration {
b.Name = &value
return b
}
// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the QuantityValue field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeApplyConfiguration {
b.QuantityValue = &value
return b
}
// WithBoolValue sets the BoolValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the BoolValue field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeApplyConfiguration {
b.BoolValue = &value
return b
}
// WithIntValue sets the IntValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IntValue field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeApplyConfiguration {
b.IntValue = &value
return b
}
// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IntSliceValue field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration {
b.IntSliceValue = value
return b
}
// WithStringValue sets the StringValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StringValue field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeApplyConfiguration {
b.StringValue = &value
return b
}
// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StringSliceValue field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeApplyConfiguration {
b.StringSliceValue = value
return b
}
// WithVersionValue sets the VersionValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VersionValue field is set to the value of the last call.
func (b *NamedResourcesAttributeApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeApplyConfiguration {
b.VersionValue = &value
return b
}

View File

@@ -0,0 +1,97 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
import (
resource "k8s.io/apimachinery/pkg/api/resource"
)
// NamedResourcesAttributeValueApplyConfiguration represents an declarative configuration of the NamedResourcesAttributeValue type for use
// with apply.
type NamedResourcesAttributeValueApplyConfiguration struct {
QuantityValue *resource.Quantity `json:"quantity,omitempty"`
BoolValue *bool `json:"bool,omitempty"`
IntValue *int64 `json:"int,omitempty"`
IntSliceValue *NamedResourcesIntSliceApplyConfiguration `json:"intSlice,omitempty"`
StringValue *string `json:"string,omitempty"`
StringSliceValue *NamedResourcesStringSliceApplyConfiguration `json:"stringSlice,omitempty"`
VersionValue *string `json:"version,omitempty"`
}
// NamedResourcesAttributeValueApplyConfiguration constructs an declarative configuration of the NamedResourcesAttributeValue type for use with
// apply.
func NamedResourcesAttributeValue() *NamedResourcesAttributeValueApplyConfiguration {
return &NamedResourcesAttributeValueApplyConfiguration{}
}
// WithQuantityValue sets the QuantityValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the QuantityValue field is set to the value of the last call.
func (b *NamedResourcesAttributeValueApplyConfiguration) WithQuantityValue(value resource.Quantity) *NamedResourcesAttributeValueApplyConfiguration {
b.QuantityValue = &value
return b
}
// WithBoolValue sets the BoolValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the BoolValue field is set to the value of the last call.
func (b *NamedResourcesAttributeValueApplyConfiguration) WithBoolValue(value bool) *NamedResourcesAttributeValueApplyConfiguration {
b.BoolValue = &value
return b
}
// WithIntValue sets the IntValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IntValue field is set to the value of the last call.
func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntValue(value int64) *NamedResourcesAttributeValueApplyConfiguration {
b.IntValue = &value
return b
}
// WithIntSliceValue sets the IntSliceValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the IntSliceValue field is set to the value of the last call.
func (b *NamedResourcesAttributeValueApplyConfiguration) WithIntSliceValue(value *NamedResourcesIntSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration {
b.IntSliceValue = value
return b
}
// WithStringValue sets the StringValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StringValue field is set to the value of the last call.
func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringValue(value string) *NamedResourcesAttributeValueApplyConfiguration {
b.StringValue = &value
return b
}
// WithStringSliceValue sets the StringSliceValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the StringSliceValue field is set to the value of the last call.
func (b *NamedResourcesAttributeValueApplyConfiguration) WithStringSliceValue(value *NamedResourcesStringSliceApplyConfiguration) *NamedResourcesAttributeValueApplyConfiguration {
b.StringSliceValue = value
return b
}
// WithVersionValue sets the VersionValue field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the VersionValue field is set to the value of the last call.
func (b *NamedResourcesAttributeValueApplyConfiguration) WithVersionValue(value string) *NamedResourcesAttributeValueApplyConfiguration {
b.VersionValue = &value
return b
}

View File

@@ -0,0 +1,39 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
// NamedResourcesFilterApplyConfiguration represents an declarative configuration of the NamedResourcesFilter type for use
// with apply.
type NamedResourcesFilterApplyConfiguration struct {
Selector *string `json:"selector,omitempty"`
}
// NamedResourcesFilterApplyConfiguration constructs an declarative configuration of the NamedResourcesFilter type for use with
// apply.
func NamedResourcesFilter() *NamedResourcesFilterApplyConfiguration {
return &NamedResourcesFilterApplyConfiguration{}
}
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
func (b *NamedResourcesFilterApplyConfiguration) WithSelector(value string) *NamedResourcesFilterApplyConfiguration {
b.Selector = &value
return b
}

View File

@@ -0,0 +1,53 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
// NamedResourcesInstanceApplyConfiguration represents an declarative configuration of the NamedResourcesInstance type for use
// with apply.
type NamedResourcesInstanceApplyConfiguration struct {
Name *string `json:"name,omitempty"`
Attributes []NamedResourcesAttributeApplyConfiguration `json:"attributes,omitempty"`
}
// NamedResourcesInstanceApplyConfiguration constructs an declarative configuration of the NamedResourcesInstance type for use with
// apply.
func NamedResourcesInstance() *NamedResourcesInstanceApplyConfiguration {
return &NamedResourcesInstanceApplyConfiguration{}
}
// WithName sets the Name field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Name field is set to the value of the last call.
func (b *NamedResourcesInstanceApplyConfiguration) WithName(value string) *NamedResourcesInstanceApplyConfiguration {
b.Name = &value
return b
}
// WithAttributes adds the given value to the Attributes field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Attributes field.
func (b *NamedResourcesInstanceApplyConfiguration) WithAttributes(values ...*NamedResourcesAttributeApplyConfiguration) *NamedResourcesInstanceApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithAttributes")
}
b.Attributes = append(b.Attributes, *values[i])
}
return b
}

View File

@@ -0,0 +1,41 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
// NamedResourcesIntSliceApplyConfiguration represents an declarative configuration of the NamedResourcesIntSlice type for use
// with apply.
type NamedResourcesIntSliceApplyConfiguration struct {
Ints []int64 `json:"ints,omitempty"`
}
// NamedResourcesIntSliceApplyConfiguration constructs an declarative configuration of the NamedResourcesIntSlice type for use with
// apply.
func NamedResourcesIntSlice() *NamedResourcesIntSliceApplyConfiguration {
return &NamedResourcesIntSliceApplyConfiguration{}
}
// WithInts adds the given value to the Ints field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Ints field.
func (b *NamedResourcesIntSliceApplyConfiguration) WithInts(values ...int64) *NamedResourcesIntSliceApplyConfiguration {
for i := range values {
b.Ints = append(b.Ints, values[i])
}
return b
}

View File

@@ -0,0 +1,39 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
// NamedResourcesRequestApplyConfiguration represents an declarative configuration of the NamedResourcesRequest type for use
// with apply.
type NamedResourcesRequestApplyConfiguration struct {
Selector *string `json:"selector,omitempty"`
}
// NamedResourcesRequestApplyConfiguration constructs an declarative configuration of the NamedResourcesRequest type for use with
// apply.
func NamedResourcesRequest() *NamedResourcesRequestApplyConfiguration {
return &NamedResourcesRequestApplyConfiguration{}
}
// WithSelector sets the Selector field in the declarative configuration to the given value
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
// If called multiple times, the Selector field is set to the value of the last call.
func (b *NamedResourcesRequestApplyConfiguration) WithSelector(value string) *NamedResourcesRequestApplyConfiguration {
b.Selector = &value
return b
}

View File

@@ -0,0 +1,44 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by applyconfiguration-gen. DO NOT EDIT.
package v1alpha2
// NamedResourcesResourcesApplyConfiguration represents an declarative configuration of the NamedResourcesResources type for use
// with apply.
type NamedResourcesResourcesApplyConfiguration struct {
Instances []NamedResourcesInstanceApplyConfiguration `json:"instances,omitempty"`
}
// NamedResourcesResourcesApplyConfiguration constructs an declarative configuration of the NamedResourcesResources type for use with
// apply.
func NamedResourcesResources() *NamedResourcesResourcesApplyConfiguration {
return &NamedResourcesResourcesApplyConfiguration{}
}
// WithInstances adds the given value to the Instances field in the declarative configuration
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
// If called multiple times, values provided by each call will be appended to the Instances field.
func (b *NamedResourcesResourcesApplyConfiguration) WithInstances(values ...*NamedResourcesInstanceApplyConfiguration) *NamedResourcesResourcesApplyConfiguration {
for i := range values {
if values[i] == nil {
panic("nil value passed to WithInstances")
}
b.Instances = append(b.Instances, *values[i])
}
return b
}

Some files were not shown because too many files have changed in this diff Show More