Merge pull request #87299 from mikedanese/ctx

context in client-go
This commit is contained in:
Kubernetes Prow Robot
2020-02-08 06:43:52 -08:00
committed by GitHub
954 changed files with 8533 additions and 7714 deletions

View File

@@ -102,16 +102,16 @@ var _ = SIGDescribe("Aggregator", func() {
func cleanTest(client clientset.Interface, aggrclient *aggregatorclient.Clientset, namespace string) {
// delete the APIService first to avoid causing discovery errors
_ = aggrclient.ApiregistrationV1().APIServices().Delete("v1alpha1.wardle.example.com", nil)
_ = aggrclient.ApiregistrationV1().APIServices().Delete(context.TODO(), "v1alpha1.wardle.example.com", nil)
_ = client.AppsV1().Deployments(namespace).Delete("sample-apiserver-deployment", nil)
_ = client.CoreV1().Secrets(namespace).Delete("sample-apiserver-secret", nil)
_ = client.CoreV1().Services(namespace).Delete("sample-api", nil)
_ = client.CoreV1().ServiceAccounts(namespace).Delete("sample-apiserver", nil)
_ = client.RbacV1().RoleBindings("kube-system").Delete("wardler-auth-reader", nil)
_ = client.RbacV1().ClusterRoleBindings().Delete("wardler:"+namespace+":auth-delegator", nil)
_ = client.RbacV1().ClusterRoles().Delete("sample-apiserver-reader", nil)
_ = client.RbacV1().ClusterRoleBindings().Delete("wardler:"+namespace+":sample-apiserver-reader", nil)
_ = client.AppsV1().Deployments(namespace).Delete(context.TODO(), "sample-apiserver-deployment", nil)
_ = client.CoreV1().Secrets(namespace).Delete(context.TODO(), "sample-apiserver-secret", nil)
_ = client.CoreV1().Services(namespace).Delete(context.TODO(), "sample-api", nil)
_ = client.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), "sample-apiserver", nil)
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), "wardler-auth-reader", nil)
_ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":auth-delegator", nil)
_ = client.RbacV1().ClusterRoles().Delete(context.TODO(), "sample-apiserver-reader", nil)
_ = client.RbacV1().ClusterRoleBindings().Delete(context.TODO(), "wardler:"+namespace+":sample-apiserver-reader", nil)
}
// TestSampleAPIServer is a basic test if the sample-apiserver code from 1.10 and compiled against 1.10
@@ -139,12 +139,12 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
"tls.key": certCtx.key,
},
}
_, err := client.CoreV1().Secrets(namespace).Create(secret)
_, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// kubectl create -f clusterrole.yaml
_, err = client.RbacV1().ClusterRoles().Create(&rbacv1.ClusterRole{
// role for listing ValidatingWebhookConfiguration/MutatingWebhookConfiguration/Namespaces
_, err = client.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver-reader"},
Rules: []rbacv1.PolicyRule{
rbacv1helpers.NewRule("get", "list", "watch").Groups("").Resources("namespaces").RuleOrDie(),
@@ -153,7 +153,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
})
framework.ExpectNoError(err, "creating cluster role %s", "sample-apiserver-reader")
_, err = client.RbacV1().ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":sample-apiserver-reader",
},
@@ -174,7 +174,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
framework.ExpectNoError(err, "creating cluster role binding %s", "wardler:"+namespace+":sample-apiserver-reader")
// kubectl create -f authDelegator.yaml
_, err = client.RbacV1().ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{
_, err = client.RbacV1().ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler:" + namespace + ":auth-delegator",
},
@@ -272,7 +272,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
},
},
}
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
framework.ExpectNoError(err, "waiting for the deployment of image %s in %s in %s to complete", image, deploymentName, namespace)
@@ -298,16 +298,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
},
},
}
_, err = client.CoreV1().Services(namespace).Create(service)
_, err = client.CoreV1().Services(namespace).Create(context.TODO(), service)
framework.ExpectNoError(err, "creating service %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f serviceAccount.yaml
sa := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "sample-apiserver"}}
_, err = client.CoreV1().ServiceAccounts(namespace).Create(sa)
_, err = client.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), sa)
framework.ExpectNoError(err, "creating service account %s in namespace %s", "sample-apiserver", namespace)
// kubectl create -f auth-reader.yaml
_, err = client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
_, err = client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: "wardler-auth-reader",
Annotations: map[string]string{
@@ -322,7 +322,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "default", // "sample-apiserver",
Name: "default",
Namespace: namespace,
},
},
@@ -337,7 +337,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
framework.ExpectNoError(err, "deploying extension apiserver in namespace %s", namespace)
// kubectl create -f apiservice.yaml
_, err = aggrclient.ApiregistrationV1().APIServices().Create(&apiregistrationv1.APIService{
_, err = aggrclient.ApiregistrationV1().APIServices().Create(context.TODO(), &apiregistrationv1.APIService{
ObjectMeta: metav1.ObjectMeta{Name: "v1alpha1.wardle.example.com"},
Spec: apiregistrationv1.APIServiceSpec{
Service: &apiregistrationv1.ServiceReference{
@@ -361,8 +361,8 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
err = pollTimed(100*time.Millisecond, 60*time.Second, func() (bool, error) {
currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get("v1alpha1.wardle.example.com", metav1.GetOptions{})
currentPods, _ = client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
currentAPIService, _ = aggrclient.ApiregistrationV1().APIServices().Get(context.TODO(), "v1alpha1.wardle.example.com", metav1.GetOptions{})
currentPods, _ = client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
request := restClient.Get().AbsPath("/apis/wardle.example.com/v1alpha1/namespaces/default/flunders")
request.SetHeader("Accept", "application/json")
@@ -421,7 +421,7 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
framework.ExpectEqual(u.GetKind(), "Flunder")
framework.ExpectEqual(u.GetName(), flunderName)
pods, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
pods, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "getting pods for flunders service")
// kubectl get flunders -v 9
@@ -519,11 +519,11 @@ func validateErrorWithDebugInfo(f *framework.Framework, err error, pods *v1.PodL
msg := fmt.Sprintf(msg, fields...)
msg += fmt.Sprintf(" but received unexpected error:\n%v", err)
client := f.ClientSet
ep, err := client.CoreV1().Endpoints(namespace).Get("sample-api", metav1.GetOptions{})
ep, err := client.CoreV1().Endpoints(namespace).Get(context.TODO(), "sample-api", metav1.GetOptions{})
if err == nil {
msg += fmt.Sprintf("\nFound endpoints for sample-api:\n%v", ep)
}
pds, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
pds, err := client.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err == nil {
msg += fmt.Sprintf("\nFound pods in %s:\n%v", namespace, pds)
msg += fmt.Sprintf("\nOriginal pods in %s:\n%v", namespace, pods)

View File

@@ -53,7 +53,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("creating a large number of resources")
workqueue.ParallelizeUntil(context.TODO(), 20, numberOfTotalResources, func(i int) {
for tries := 3; tries >= 0; tries-- {
_, err := client.Create(&v1.PodTemplate{
_, err := client.Create(context.TODO(), &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("template-%04d", i),
},
@@ -85,7 +85,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
var lastRV string
for {
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
list, err := client.List(opts)
list, err := client.List(context.TODO(), opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
@@ -116,7 +116,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving those results all at once")
opts := metav1.ListOptions{Limit: numberOfTotalResources + 1}
list, err := client.List(opts)
list, err := client.List(context.TODO(), opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
gomega.Expect(list.Items).To(gomega.HaveLen(numberOfTotalResources))
})
@@ -130,7 +130,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
oneTenth := int64(numberOfTotalResources / 10)
opts := metav1.ListOptions{}
opts.Limit = oneTenth
list, err := client.List(opts)
list, err := client.List(context.TODO(), opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
firstToken := list.Continue
firstRV := list.ResourceVersion
@@ -148,7 +148,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
opts.Continue = firstToken
var inconsistentToken string
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
_, err := client.List(opts)
_, err := client.List(context.TODO(), opts)
if err == nil {
framework.Logf("Token %s has not expired yet", firstToken)
return false, nil
@@ -171,7 +171,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
ginkgo.By("retrieving the second page again with the token received with the error message")
opts.Continue = inconsistentToken
list, err = client.List(opts)
list, err = client.List(context.TODO(), opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given inconsistent continue token %s and limit: %d", ns, opts.Continue, opts.Limit)
framework.ExpectNotEqual(list.ResourceVersion, firstRV)
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("==", opts.Limit))
@@ -194,7 +194,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
opts.Continue = list.Continue
lastRV := list.ResourceVersion
for {
list, err := client.List(opts)
list, err := client.List(context.TODO(), opts)
framework.ExpectNoError(err, "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
if shouldCheckRemainingItem() {
if list.GetContinue() == "" {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"fmt"
"time"
@@ -208,17 +209,17 @@ var _ = SIGDescribe("CustomResourceConversionWebhook [Privileged:ClusterAdmin]",
})
func cleanCRDWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.CoreV1().Services(namespaceName).Delete(serviceCRDName, nil)
_ = client.AppsV1().Deployments(namespaceName).Delete(deploymentCRDName, nil)
_ = client.CoreV1().Secrets(namespaceName).Delete(secretCRDName, nil)
_ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingCRDName, nil)
_ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceCRDName, nil)
_ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentCRDName, nil)
_ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretCRDName, nil)
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingCRDName, nil)
}
func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespace string) {
ginkgo.By("Create role binding to let cr conversion webhook read extension-apiserver-authentication")
client := f.ClientSet
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
_, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
_, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleBindingCRDName,
},
@@ -227,7 +228,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
// Webhook uses the default service account.
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
@@ -259,7 +260,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
},
}
namespace := f.Namespace.Name
_, err := client.CoreV1().Secrets(namespace).Create(secret)
_, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// Create the deployment of the webhook
@@ -335,7 +336,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
},
},
}
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentCRDName, namespace)
ginkgo.By("Wait for the deployment to be ready")
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentCRDName, "1", image)
@@ -363,7 +364,7 @@ func deployCustomResourceWebhookAndService(f *framework.Framework, image string,
},
},
}
_, err = client.CoreV1().Services(namespace).Create(service)
_, err = client.CoreV1().Services(namespace).Create(context.TODO(), service)
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceCRDName, namespace)
ginkgo.By("Verifying the service has paired with the endpoint")

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -392,7 +393,7 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
{"op":"test","path":"/spec/versions/1/name","value":"v3"},
{"op": "replace", "path": "/spec/versions/1/name", "value": "v4"}
]`)
crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(crdMultiVer.Crd.Name, types.JSONPatchType, patch)
crdMultiVer.Crd, err = crdMultiVer.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crdMultiVer.Crd.Name, types.JSONPatchType, patch)
if err != nil {
framework.Failf("%v", err)
}
@@ -440,12 +441,12 @@ var _ = SIGDescribe("CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]", fu
}
ginkgo.By("mark a version not serverd")
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(crd.Crd.Name, metav1.GetOptions{})
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Crd.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("%v", err)
}
crd.Crd.Spec.Versions[1].Served = false
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(crd.Crd)
crd.Crd, err = crd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Update(context.TODO(), crd.Crd)
if err != nil {
framework.Failf("%v", err)
}

View File

@@ -110,7 +110,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
}()
selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID}
list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(selectorListOpts)
list, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), selectorListOpts)
framework.ExpectNoError(err, "listing CustomResourceDefinitions")
framework.ExpectEqual(len(list.Items), testListSize)
for _, actual := range list.Items {
@@ -130,7 +130,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
// Use delete collection to remove the CRDs
err = fixtures.DeleteV1CustomResourceDefinitions(selectorListOpts, apiExtensionClient)
framework.ExpectNoError(err, "deleting CustomResourceDefinitions")
_, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(crd.Name, metav1.GetOptions{})
_, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "getting remaining CustomResourceDefinition")
})
@@ -170,15 +170,14 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
framework.Failf("Expected CustomResourceDefinition Spec to match status sub-resource Spec, but got:\n%s", diff.ObjectReflectDiff(status.Spec, crd.Spec))
}
status.Status.Conditions = append(status.Status.Conditions, updateCondition)
updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(status)
updated, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().UpdateStatus(context.TODO(), status)
return err
})
framework.ExpectNoError(err, "updating CustomResourceDefinition status")
expectCondition(updated.Status.Conditions, updateCondition)
patchCondition := v1.CustomResourceDefinitionCondition{Message: "patched"}
patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(
crd.GetName(),
patched, err := apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.GetName(),
types.JSONPatchType,
[]byte(`[{"op": "add", "path": "/status/conditions", "value": [{"message": "patched"}]}]`),
"status")
@@ -305,7 +304,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
framework.ExpectNoError(err, "creating CR")
// Setting default for a to "A" and waiting for the CR to get defaulted on read
crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(crd.Name, types.JSONPatchType, []byte(`[
crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[
{"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default", "value": "A"}
]`))
framework.ExpectNoError(err, "setting default for a to \"A\" in schema")
@@ -344,7 +343,7 @@ var _ = SIGDescribe("CustomResourceDefinition resources [Privileged:ClusterAdmin
framework.ExpectEqual(v, "A", "\"a\" is defaulted to \"A\"")
// Deleting default for a, adding default "B" for b and waiting for the CR to get defaulted on read for b
crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(crd.Name, types.JSONPatchType, []byte(`[
crd, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), crd.Name, types.JSONPatchType, []byte(`[
{"op":"remove","path":"/spec/versions/0/schema/openAPIV3Schema/properties/a/default"},
{"op":"add","path":"/spec/versions/0/schema/openAPIV3Schema/properties/b/default", "value": "B"}
]`))

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -115,7 +116,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
ginkgo.By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
pods, err := podClient.List(context.TODO(), options)
if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
@@ -124,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
}
framework.Logf("apiserver has recovered")
@@ -134,7 +135,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
ginkgo.By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
pods, err := podClient.List(context.TODO(), options)
framework.ExpectNoError(err, "failed to list pods in namespace: %s, that match label selector: %s", f.Namespace.Name, rcSelector.String())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && podutil.IsPodReady(&pod) {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"encoding/json"
"fmt"
"sync/atomic"
@@ -183,7 +184,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
for object, num := range objects {
switch object {
case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
}
@@ -192,7 +193,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d pods, got %d pods", num, len(pods.Items)))
}
case "Deployments":
deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(metav1.ListOptions{})
deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
}
@@ -201,7 +202,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d Deployments, got %d Deployments", num, len(deployments.Items)))
}
case "ReplicaSets":
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
}
@@ -210,7 +211,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d rs, got %d rs", num, len(rs.Items)))
}
case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(metav1.ListOptions{})
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err)
}
@@ -219,7 +220,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d RCs, got %d RCs", num, len(rcs.Items)))
}
case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(metav1.ListOptions{})
cronJobs, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err)
}
@@ -228,7 +229,7 @@ func verifyRemainingObjects(f *framework.Framework, objects map[string]int) (boo
ginkgo.By(fmt.Sprintf("expected %d cronjobs, got %d cronjobs", num, len(cronJobs.Items)))
}
case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
}
@@ -320,13 +321,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "delete_pods")
rc := newOwnerRC(f, rcName, 2, uniqLabels)
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
rc, err := rcClient.Create(context.TODO(), rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(metav1.ListOptions{})
pods, err := podClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
}
@@ -345,7 +346,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the rc")
deleteOptions := getBackgroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for all pods to be garbage collected")
@@ -355,7 +356,7 @@ var _ = SIGDescribe("Garbage collector", func() {
return verifyRemainingObjects(f, objects)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(metav1.ListOptions{})
remainingPods, err := podClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("failed to list pods post mortem: %v", err)
} else {
@@ -378,13 +379,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "orphan_pods")
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels)
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
rc, err := rcClient.Create(context.TODO(), rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
}
@@ -399,7 +400,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the rc")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for the rc to be deleted")
@@ -411,7 +412,7 @@ var _ = SIGDescribe("Garbage collector", func() {
// parallel, the GC controller might get distracted by other tests.
// According to the test logs, 120s is enough time.
if err := wait.Poll(5*time.Second, 120*time.Second, func() (bool, error) {
rcs, err := rcClient.List(metav1.ListOptions{})
rcs, err := rcClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rcs: %v", err)
}
@@ -424,7 +425,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
time.Sleep(30 * time.Second)
pods, err := podClient.List(metav1.ListOptions{})
pods, err := podClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to list pods: %v", err)
}
@@ -444,13 +445,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "orphan_pods_nil_option")
rc := newOwnerRC(f, rcName, 2, uniqLabels)
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
rc, err := rcClient.Create(context.TODO(), rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
}
@@ -464,12 +465,12 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the rc")
deleteOptions := &metav1.DeleteOptions{}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
time.Sleep(30 * time.Second)
pods, err := podClient.List(metav1.ListOptions{})
pods, err := podClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to list pods: %v", err)
}
@@ -492,14 +493,14 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "delete_rs")
deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
ginkgo.By("create the deployment")
createdDeployment, err := deployClient.Create(deployment)
createdDeployment, err := deployClient.Create(context.TODO(), deployment)
if err != nil {
framework.Failf("Failed to create deployment: %v", err)
}
// wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
rsList, err := rsClient.List(metav1.ListOptions{})
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
}
@@ -513,7 +514,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the deployment")
deleteOptions := getBackgroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
}
ginkgo.By("wait for all rs to be garbage collected")
@@ -524,7 +525,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err != nil {
errList := make([]error, 0)
errList = append(errList, err)
remainingRSs, err := rsClient.List(metav1.ListOptions{})
remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
} else {
@@ -551,14 +552,14 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "orphan_rs")
deployment := newOwnerDeployment(f, deploymentName, uniqLabels)
ginkgo.By("create the deployment")
createdDeployment, err := deployClient.Create(deployment)
createdDeployment, err := deployClient.Create(context.TODO(), deployment)
if err != nil {
framework.Failf("Failed to create deployment: %v", err)
}
// wait for deployment to create some rs
ginkgo.By("Wait for the Deployment to create new ReplicaSet")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
rsList, err := rsClient.List(metav1.ListOptions{})
rsList, err := rsClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
}
@@ -572,12 +573,12 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the deployment")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
if err := deployClient.Delete(context.TODO(), deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
}
ginkgo.By("wait for deployment deletion to see if the garbage collector mistakenly deletes the rs")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
dList, err := deployClient.List(metav1.ListOptions{})
dList, err := deployClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
}
@@ -594,13 +595,13 @@ var _ = SIGDescribe("Garbage collector", func() {
}
if !ok {
errList := make([]error, 0)
remainingRSs, err := rsClient.List(metav1.ListOptions{})
remainingRSs, err := rsClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
} else {
errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs))
}
remainingDSs, err := deployClient.List(metav1.ListOptions{})
remainingDSs, err := deployClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err))
} else {
@@ -609,7 +610,7 @@ var _ = SIGDescribe("Garbage collector", func() {
aggregatedError := utilerrors.NewAggregate(errList)
framework.Failf("Failed to verify remaining deployments, rs, and pods: %v", aggregatedError)
}
rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(metav1.ListOptions{})
rs, err := clientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("Failed to list ReplicaSet %v", err)
}
@@ -635,13 +636,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabels := getUniqLabel("gctest", "delete_pods_foreground")
rc := newOwnerRC(f, rcName, estimateMaximumPods(clientSet, 10, 100), uniqLabels)
ginkgo.By("create the rc")
rc, err := rcClient.Create(rc)
rc, err := rcClient.Create(context.TODO(), rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
rc, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
}
@@ -655,7 +656,7 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("delete the rc")
deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for the rc to be deleted")
@@ -665,9 +666,9 @@ var _ = SIGDescribe("Garbage collector", func() {
// deletion and dependent deletion processing. For now, increase the timeout
// and investigate the processing delay.
if err := wait.Poll(1*time.Second, 60*time.Second, func() (bool, error) {
_, err := rcClient.Get(rc.Name, metav1.GetOptions{})
_, err := rcClient.Get(context.TODO(), rc.Name, metav1.GetOptions{})
if err == nil {
pods, _ := podClient.List(metav1.ListOptions{})
pods, _ := podClient.List(context.TODO(), metav1.ListOptions{})
framework.Logf("%d pods remaining", len(pods.Items))
count := 0
for _, pod := range pods.Items {
@@ -684,7 +685,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
return false, err
}); err != nil {
pods, err2 := podClient.List(metav1.ListOptions{})
pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{})
if err2 != nil {
framework.Failf("%v", err2)
}
@@ -696,7 +697,7 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to delete the rc: %v", err)
}
// There shouldn't be any pods
pods, err := podClient.List(metav1.ListOptions{})
pods, err := podClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("%v", err)
}
@@ -722,7 +723,7 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabelsDeleted := getUniqLabel("gctest_d", "valid_and_pending_owners_d")
rc1 := newOwnerRC(f, rc1Name, replicas, uniqLabelsDeleted)
ginkgo.By("create the rc1")
rc1, err := rcClient.Create(rc1)
rc1, err := rcClient.Create(context.TODO(), rc1)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
@@ -730,13 +731,13 @@ var _ = SIGDescribe("Garbage collector", func() {
uniqLabelsStay := getUniqLabel("gctest_s", "valid_and_pending_owners_s")
rc2 := newOwnerRC(f, rc2Name, 0, uniqLabelsStay)
ginkgo.By("create the rc2")
rc2, err = rcClient.Create(rc2)
rc2, err = rcClient.Create(context.TODO(), rc2)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc1 to be stable
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc1, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
rc1, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
}
@@ -748,28 +749,28 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
ginkgo.By(fmt.Sprintf("set half of pods created by rc %s to have rc %s as owner as well", rc1Name, rc2Name))
pods, err := podClient.List(metav1.ListOptions{})
pods, err := podClient.List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list pods in namespace: %s", f.Namespace.Name)
patch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"ReplicationController","name":"%s","uid":"%s"}]}}`, rc2.ObjectMeta.Name, rc2.ObjectMeta.UID)
for i := 0; i < halfReplicas; i++ {
pod := pods.Items[i]
_, err := podClient.Patch(pod.Name, types.StrategicMergePatchType, []byte(patch))
_, err := podClient.Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, []byte(patch))
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod.Name, f.Namespace.Name, patch)
}
ginkgo.By(fmt.Sprintf("delete the rc %s", rc1Name))
deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc1.UID))
if err := rcClient.Delete(rc1.ObjectMeta.Name, deleteOptions); err != nil {
if err := rcClient.Delete(context.TODO(), rc1.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
ginkgo.By("wait for the rc to be deleted")
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) {
_, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
_, err := rcClient.Get(context.TODO(), rc1.Name, metav1.GetOptions{})
if err == nil {
pods, _ := podClient.List(metav1.ListOptions{})
pods, _ := podClient.List(context.TODO(), metav1.ListOptions{})
framework.Logf("%d pods remaining", len(pods.Items))
count := 0
for _, pod := range pods.Items {
@@ -786,7 +787,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
return false, err
}); err != nil {
pods, err2 := podClient.List(metav1.ListOptions{})
pods, err2 := podClient.List(context.TODO(), metav1.ListOptions{})
if err2 != nil {
framework.Failf("%v", err2)
}
@@ -798,7 +799,7 @@ var _ = SIGDescribe("Garbage collector", func() {
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
}
// half of the pods should still exist,
pods, err = podClient.List(metav1.ListOptions{})
pods, err = podClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("%v", err)
}
@@ -828,43 +829,43 @@ var _ = SIGDescribe("Garbage collector", func() {
podClient := clientSet.CoreV1().Pods(f.Namespace.Name)
pod1Name := "pod1"
pod1 := newGCPod(pod1Name)
pod1, err := podClient.Create(pod1)
pod1, err := podClient.Create(context.TODO(), pod1)
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod1Name, f.Namespace.Name)
pod2Name := "pod2"
pod2 := newGCPod(pod2Name)
pod2, err = podClient.Create(pod2)
pod2, err = podClient.Create(context.TODO(), pod2)
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod2Name, f.Namespace.Name)
pod3Name := "pod3"
pod3 := newGCPod(pod3Name)
pod3, err = podClient.Create(pod3)
pod3, err = podClient.Create(context.TODO(), pod3)
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod3Name, f.Namespace.Name)
// create circular dependency
addRefPatch := func(name string, uid types.UID) []byte {
return []byte(fmt.Sprintf(`{"metadata":{"ownerReferences":[{"apiVersion":"v1","kind":"Pod","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}]}}`, name, uid))
}
patch1 := addRefPatch(pod3.Name, pod3.UID)
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
pod1, err = podClient.Patch(context.TODO(), pod1.Name, types.StrategicMergePatchType, patch1)
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
patch2 := addRefPatch(pod1.Name, pod1.UID)
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
pod2, err = podClient.Patch(context.TODO(), pod2.Name, types.StrategicMergePatchType, patch2)
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
patch3 := addRefPatch(pod2.Name, pod2.UID)
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
pod3, err = podClient.Patch(context.TODO(), pod3.Name, types.StrategicMergePatchType, patch3)
framework.ExpectNoError(err, "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
// delete one pod, should result in the deletion of all pods
deleteOptions := getForegroundOptions()
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
err = podClient.Delete(pod1.ObjectMeta.Name, deleteOptions)
err = podClient.Delete(context.TODO(), pod1.ObjectMeta.Name, deleteOptions)
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod1.Name, f.Namespace.Name)
var pods *v1.PodList
var err2 error
// TODO: shorten the timeout when we make GC's periodic API rediscovery more efficient.
// Tracked at https://github.com/kubernetes/kubernetes/issues/50046.
if err := wait.Poll(5*time.Second, 90*time.Second, func() (bool, error) {
pods, err2 = podClient.List(metav1.ListOptions{})
pods, err2 = podClient.List(context.TODO(), metav1.ListOptions{})
if err2 != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
}
@@ -1124,12 +1125,12 @@ var _ = SIGDescribe("Garbage collector", func() {
ginkgo.By("Create the cronjob")
cronJob := newCronJob("simple", "*/1 * * * ?")
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(cronJob)
cronJob, err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Create(context.TODO(), cronJob)
framework.ExpectNoError(err, "failed to create cronjob: %+v, in namespace: %s", cronJob, f.Namespace.Name)
ginkgo.By("Wait for the CronJob to create new Job")
err = wait.PollImmediate(500*time.Millisecond, 2*time.Minute, func() (bool, error) {
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
}
@@ -1140,7 +1141,7 @@ var _ = SIGDescribe("Garbage collector", func() {
}
ginkgo.By("Delete the cronjob")
if err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Delete(cronJob.Name, getBackgroundOptions()); err != nil {
if err := f.ClientSet.BatchV1beta1().CronJobs(f.Namespace.Name).Delete(context.TODO(), cronJob.Name, getBackgroundOptions()); err != nil {
framework.Failf("Failed to delete the CronJob: %v", err)
}
ginkgo.By("Verify if cronjob does not leave jobs nor pods behind")

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"strconv"
"time"
@@ -110,7 +111,7 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := metav1.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options)
pods, err := podClient.List(context.TODO(), options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
@@ -119,13 +120,13 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector,
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
w, err := podClient.Watch(context.TODO(), options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
}
ginkgo.By("creating the pod")
pod, err = podClient.Create(pod)
pod, err = podClient.Create(context.TODO(), pod)
if err != nil {
framework.Failf("Failed to create pod: %v", err)
}
@@ -135,7 +136,7 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector,
ResourceVersion: pod.ResourceVersion,
}
pods, err = podClient.List(options)
pods, err = podClient.List(context.TODO(), options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
@@ -150,7 +151,7 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("deleting the pod gracefully")
gracePeriod := int64(31)
if err := podClient.Delete(pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
if err := podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
framework.Failf("Failed to delete pod: %v", err)
}
@@ -226,7 +227,7 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := metav1.ListOptions{LabelSelector: selector}
cronJobs, err := cronJobClient.List(options)
cronJobs, err := cronJobClient.List(context.TODO(), options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
}
@@ -235,13 +236,13 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector,
ResourceVersion: cronJobs.ListMeta.ResourceVersion,
}
w, err := cronJobClient.Watch(options)
w, err := cronJobClient.Watch(context.TODO(), options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
}
ginkgo.By("creating the cronJob")
cronJob, err = cronJobClient.Create(cronJob)
cronJob, err = cronJobClient.Create(context.TODO(), cronJob)
if err != nil {
framework.Failf("Failed to create cronJob: %v", err)
}
@@ -251,7 +252,7 @@ var _ = SIGDescribe("Generated clientset", func() {
LabelSelector: selector,
ResourceVersion: cronJob.ResourceVersion,
}
cronJobs, err = cronJobClient.List(options)
cronJobs, err = cronJobClient.List(context.TODO(), options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
}
@@ -263,12 +264,12 @@ var _ = SIGDescribe("Generated clientset", func() {
ginkgo.By("deleting the cronJob")
// Use DeletePropagationBackground so the CronJob is really gone when the call returns.
propagationPolicy := metav1.DeletePropagationBackground
if err := cronJobClient.Delete(cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
if err := cronJobClient.Delete(context.TODO(), cronJob.Name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}); err != nil {
framework.Failf("Failed to delete cronJob: %v", err)
}
options = metav1.ListOptions{LabelSelector: selector}
cronJobs, err = cronJobClient.List(options)
cronJobs, err = cronJobClient.List(context.TODO(), options)
if err != nil {
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"encoding/json"
"fmt"
"strings"
@@ -67,7 +68,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) {
var cnt = 0
nsList, err := f.ClientSet.CoreV1().Namespaces().List(metav1.ListOptions{})
nsList, err := f.ClientSet.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
@@ -109,21 +110,21 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
},
},
}
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, namespace.Name)
ginkgo.By("Waiting for the pod to have running status")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod))
ginkgo.By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, nil)
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
ginkgo.By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60) + *pod.Spec.TerminationGracePeriodSeconds
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) {
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
return true, nil
}
@@ -135,7 +136,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
ginkgo.By("Verifying there are no pods in the namespace")
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(pod.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Pods(namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectError(err, "failed to get pod %s in namespace: %s", pod.Name, namespace.Name)
}
@@ -169,18 +170,18 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
}},
},
}
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(service)
service, err = f.ClientSet.CoreV1().Services(namespace.Name).Create(context.TODO(), service)
framework.ExpectNoError(err, "failed to create service %s in namespace %s", serviceName, namespace.Name)
ginkgo.By("Deleting the namespace")
err = f.ClientSet.CoreV1().Namespaces().Delete(namespace.Name, nil)
err = f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), namespace.Name, nil)
framework.ExpectNoError(err, "failed to delete namespace: %s", namespace.Name)
ginkgo.By("Waiting for the namespace to be removed.")
maxWaitSeconds := int64(60)
framework.ExpectNoError(wait.Poll(1*time.Second, time.Duration(maxWaitSeconds)*time.Second,
func() (bool, error) {
_, err = f.ClientSet.CoreV1().Namespaces().Get(namespace.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespace.Name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
return true, nil
}
@@ -192,7 +193,7 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
framework.ExpectNoError(err, "failed to create namespace: %s", namespaceName)
ginkgo.By("Verifying there is no service in the namespace")
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(service.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Services(namespace.Name).Get(context.TODO(), service.Name, metav1.GetOptions{})
framework.ExpectError(err, "failed to get service %s in namespace: %s", service.Name, namespace.Name)
}
@@ -270,11 +271,11 @@ var _ = SIGDescribe("Namespaces [Serial]", func() {
},
})
framework.ExpectNoError(err, "failed to marshal JSON patch data")
_, err = f.ClientSet.CoreV1().Namespaces().Patch(namespaceName, types.StrategicMergePatchType, []byte(nspatch))
_, err = f.ClientSet.CoreV1().Namespaces().Patch(context.TODO(), namespaceName, types.StrategicMergePatchType, []byte(nspatch))
framework.ExpectNoError(err, "failed to patch Namespace")
ginkgo.By("get the Namespace and ensuring it has the label")
namespace, err := f.ClientSet.CoreV1().Namespaces().Get(namespaceName, metav1.GetOptions{})
namespace, err := f.ClientSet.CoreV1().Namespaces().Get(context.TODO(), namespaceName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get Namespace")
framework.ExpectEqual(namespace.ObjectMeta.Labels["testLabel"], "testValue", "namespace not patched")
})

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"fmt"
"strconv"
@@ -49,11 +50,11 @@ var _ = SIGDescribe("client-go should negotiate", func() {
cfg.AcceptContentTypes = accept
c := kubernetes.NewForConfigOrDie(cfg)
svcs, err := c.CoreV1().Services("default").Get("kubernetes", metav1.GetOptions{})
svcs, err := c.CoreV1().Services("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{})
framework.ExpectNoError(err)
rv, err := strconv.Atoi(svcs.ResourceVersion)
framework.ExpectNoError(err)
w, err := c.CoreV1().Services("default").Watch(metav1.ListOptions{ResourceVersion: strconv.Itoa(rv - 1)})
w, err := c.CoreV1().Services("default").Watch(context.TODO(), metav1.ListOptions{ResourceVersion: strconv.Itoa(rv - 1)})
framework.ExpectNoError(err)
defer w.Stop()

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"fmt"
"strconv"
"time"
@@ -100,7 +101,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a Service")
service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP)
service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(service)
service, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), service)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures service creation")
@@ -111,7 +112,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting a Service")
err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(service.Name, nil)
err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(context.TODO(), service.Name, nil)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
@@ -133,7 +134,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(metav1.ListOptions{})
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(secrets.Items) == found {
// loop until the number of secrets has stabilized for 5 seconds
@@ -167,7 +168,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a Secret")
secret := newTestSecretForQuota("test-secret")
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures secret creation")
@@ -179,7 +180,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting a secret")
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
@@ -224,7 +225,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
podToUpdate := pod
@@ -243,7 +244,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
requests[v1.ResourceCPU] = resource.MustParse("600m")
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectError(err)
ginkgo.By("Not allowing a pod to be created that exceeds remaining quota(validation on extended resources)")
@@ -255,7 +256,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
requests[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
limits[v1.ResourceName(extendedResourceName)] = resource.MustParse("2")
pod = newTestPodForQuota(f, "fail-pod-for-extended-resource", requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectError(err)
ginkgo.By("Ensuring a pod cannot update its resource requirements")
@@ -265,7 +266,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
requests[v1.ResourceMemory] = resource.MustParse("100Mi")
requests[v1.ResourceEphemeralStorage] = resource.MustParse("10Gi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(podToUpdate)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), podToUpdate)
framework.ExpectError(err)
ginkgo.By("Ensuring attempts to update pod resource requirements did not change quota usage")
@@ -273,7 +274,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -298,7 +299,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(metav1.ListOptions{})
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(configmaps.Items) == found {
// loop until the number of configmaps has stabilized for 5 seconds
@@ -331,7 +332,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a ConfigMap")
configMap := newTestConfigMapForQuota("test-configmap")
configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures configMap creation")
@@ -345,7 +346,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting a ConfigMap")
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil)
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), configMap.Name, nil)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
@@ -381,7 +382,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a ReplicationController")
replicationController := newTestReplicationControllerForQuota("test-rc", "nginx", 0)
replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(replicationController)
replicationController, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), replicationController)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures replication controller creation")
@@ -396,7 +397,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
// detached. ReplicationControllers default to "orphan", which
// is different from most resources. (Why? To preserve a common
// workflow from prior to the GC's introduction.)
err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(replicationController.Name, &metav1.DeleteOptions{
err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Delete(context.TODO(), replicationController.Name, &metav1.DeleteOptions{
PropagationPolicy: func() *metav1.DeletionPropagation {
p := metav1.DeletePropagationBackground
return &p
@@ -437,7 +438,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a ReplicaSet")
replicaSet := newTestReplicaSetForQuota("test-rs", "nginx", 0)
replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(replicaSet)
replicaSet, err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), replicaSet)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures replicaset creation")
@@ -447,7 +448,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting a ReplicaSet")
err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(replicaSet.Name, nil)
err = f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Delete(context.TODO(), replicaSet.Name, nil)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
@@ -485,7 +486,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a PersistentVolumeClaim")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
@@ -496,7 +497,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil)
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, nil)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
@@ -539,7 +540,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a PersistentVolumeClaim with storage class")
pvc := newTestPersistentVolumeClaimForQuota("test-claim")
pvc.Spec.StorageClassName = &classGold
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(pvc)
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status captures persistent volume claim creation")
@@ -553,7 +554,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting a PersistentVolumeClaim")
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(pvc.Name, nil)
err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, nil)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released usage")
@@ -587,7 +588,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
err = updateResourceQuotaUntilUsageAppears(f.ClientSet, f.Namespace.Name, quotaName, v1.ResourceName(countResourceName))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(quotaName, nil)
err = f.ClientSet.CoreV1().ResourceQuotas(f.Namespace.Name).Delete(context.TODO(), quotaName, nil)
framework.ExpectNoError(err)
ginkgo.By("Counting existing ResourceQuota")
@@ -689,7 +690,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
@@ -711,7 +712,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -728,7 +729,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
@@ -750,7 +751,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -793,7 +794,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
@@ -807,7 +808,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -823,7 +824,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
@@ -837,7 +838,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -871,7 +872,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Getting a ResourceQuota")
resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
resourceQuotaResult, err := client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("1"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("500Mi"))
@@ -879,13 +880,13 @@ var _ = SIGDescribe("ResourceQuota", func() {
ginkgo.By("Updating a ResourceQuota")
resourceQuota.Spec.Hard[v1.ResourceCPU] = resource.MustParse("2")
resourceQuota.Spec.Hard[v1.ResourceMemory] = resource.MustParse("1Gi")
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(resourceQuota)
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota)
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi"))
ginkgo.By("Verifying a ResourceQuota was modified")
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
resourceQuotaResult, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceCPU], resource.MustParse("2"))
framework.ExpectEqual(resourceQuotaResult.Spec.Hard[v1.ResourceMemory], resource.MustParse("1Gi"))
@@ -895,7 +896,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying the deleted ResourceQuota")
_, err = client.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
_, err = client.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true)
})
})
@@ -923,7 +924,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
ginkgo.By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with best effort scope captures the pod usage")
@@ -937,7 +938,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -953,7 +954,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not best effort scope captures the pod usage")
@@ -967,7 +968,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1005,7 +1006,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
limits[v1.ResourceCPU] = resource.MustParse("1")
limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with not terminating scope captures the pod usage")
@@ -1027,7 +1028,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1044,7 +1045,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with terminating scope captures the pod usage")
@@ -1066,7 +1067,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podName, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1085,7 +1086,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass1"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
@@ -1104,7 +1105,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating a pod with priority class")
podName := "testpod-pclass1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass1")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
@@ -1113,7 +1114,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1124,7 +1125,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass2"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
@@ -1143,7 +1144,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating first pod with priority class should pass")
podName := "testpod-pclass2-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
@@ -1154,11 +1155,11 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating 2nd pod with priority class should fail")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass2")
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2)
framework.ExpectError(err)
ginkgo.By("Deleting first pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1169,7 +1170,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against 2 pods with different priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass3"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
@@ -1188,7 +1189,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating a pod with priority class with pclass3")
podName := "testpod-pclass3-1"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope remains same")
@@ -1199,7 +1200,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating a 2nd pod with priority class pclass3")
podName2 := "testpod-pclass2-2"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass3")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope remains same")
@@ -1208,17 +1209,17 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's multiple priority class scope (quota set to pod count: 2) against 2 pods with same priority classes.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass5"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)})
_, err = f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass6"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
@@ -1237,7 +1238,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating a pod with priority class pclass5")
podName := "testpod-pclass5"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass5")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
@@ -1248,7 +1249,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating 2nd pod with priority class pclass6")
podName2 := "testpod-pclass6"
pod2 := newTestPodForQuotaWithPriority(f, podName2, v1.ResourceList{}, v1.ResourceList{}, "pclass6")
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod2)
pod2, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod2)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope is updated with the pod usage")
@@ -1257,9 +1258,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting both pods")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod2.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1270,7 +1271,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpNotIn).", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass7"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
@@ -1289,7 +1290,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating a pod with priority class pclass7")
podName := "testpod-pclass7"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass7")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is not used")
@@ -1298,13 +1299,13 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
})
ginkgo.It("should verify ResourceQuota's priority class scope (quota set to pod count: 1) against a pod with different priority class (ScopeSelectorOpExists).", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass8"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
@@ -1323,7 +1324,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.By("Creating a pod with priority class pclass8")
podName := "testpod-pclass8"
pod := newTestPodForQuotaWithPriority(f, podName, v1.ResourceList{}, v1.ResourceList{}, "pclass8")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class is updated with the pod usage")
@@ -1332,7 +1333,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1343,7 +1344,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
ginkgo.It("should verify ResourceQuota's priority class scope (cpu, memory quota set) against a pod with same priority class.", func() {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: "pclass9"}, Value: int32(1000)})
framework.ExpectEqual(err == nil || apierrors.IsAlreadyExists(err), true)
hard := v1.ResourceList{}
@@ -1377,7 +1378,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
limit[v1.ResourceMemory] = resource.MustParse("2Gi")
pod := newTestPodForQuotaWithPriority(f, podName, request, limit, "pclass9")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota with priority class scope captures the pod usage")
@@ -1390,7 +1391,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
framework.ExpectNoError(err)
ginkgo.By("Deleting the pod")
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1656,12 +1657,12 @@ func newTestSecretForQuota(name string) *v1.Secret {
// createResourceQuota in the specified namespace
func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) {
return c.CoreV1().ResourceQuotas(namespace).Create(resourceQuota)
return c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), resourceQuota)
}
// deleteResourceQuota with the specified name
func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
return c.CoreV1().ResourceQuotas(namespace).Delete(name, nil)
return c.CoreV1().ResourceQuotas(namespace).Delete(context.TODO(), name, nil)
}
// countResourceQuota counts the number of ResourceQuota in the specified namespace
@@ -1670,7 +1671,7 @@ func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
func countResourceQuota(c clientset.Interface, namespace string) (int, error) {
found, unchanged := 0, 0
return found, wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{})
resourceQuotas, err := c.CoreV1().ResourceQuotas(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(resourceQuotas.Items) == found {
// loop until the number of resource quotas has stabilized for 5 seconds
@@ -1686,7 +1687,7 @@ func countResourceQuota(c clientset.Interface, namespace string) (int, error) {
// wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -1709,7 +1710,7 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R
// for the specific resource name.
func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName string, resourceName v1.ResourceName) error {
return wait.Poll(framework.Poll, 1*time.Minute, func() (bool, error) {
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
resourceQuota, err := c.CoreV1().ResourceQuotas(ns).Get(context.TODO(), quotaName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -1722,7 +1723,7 @@ func updateResourceQuotaUntilUsageAppears(c clientset.Interface, ns, quotaName s
current := resourceQuota.Spec.Hard[resourceName]
current.Add(resource.MustParse("1"))
resourceQuota.Spec.Hard[resourceName] = current
_, err = c.CoreV1().ResourceQuotas(ns).Update(resourceQuota)
_, err = c.CoreV1().ResourceQuotas(ns).Update(context.TODO(), resourceQuota)
// ignoring conflicts since someone else may already updated it.
if apierrors.IsConflict(err) {
return false, nil

View File

@@ -55,7 +55,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
podName := "pod-1"
framework.Logf("Creating pod %s", podName)
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
_, err := c.CoreV1().Pods(ns).Create(context.TODO(), newTablePod(podName))
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", podName, ns)
table := &metav1beta1.Table{}
@@ -83,7 +83,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
ginkgo.By("creating a large number of resources")
workqueue.ParallelizeUntil(context.TODO(), 5, 20, func(i int) {
for tries := 3; tries >= 0; tries-- {
_, err := client.Create(&v1.PodTemplate{
_, err := client.Create(context.TODO(), &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("template-%04d", i),
},

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apimachinery
import (
"context"
"fmt"
"math/rand"
"time"
@@ -84,7 +85,7 @@ var _ = SIGDescribe("Watchers", func() {
}
ginkgo.By("creating a configmap with label A and ensuring the correct watchers observe the notification")
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapA)
testConfigMapA, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapA)
framework.ExpectNoError(err, "failed to create a configmap with label %s in namespace: %s", multipleWatchersLabelValueA, ns)
expectEvent(watchA, watch.Added, testConfigMapA)
expectEvent(watchAB, watch.Added, testConfigMapA)
@@ -109,21 +110,21 @@ var _ = SIGDescribe("Watchers", func() {
expectNoEvent(watchB, watch.Modified, testConfigMapA)
ginkgo.By("deleting configmap A and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapA.GetName(), nil)
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapA.GetName(), nil)
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapA.GetName(), ns)
expectEvent(watchA, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil)
expectNoEvent(watchB, watch.Deleted, nil)
ginkgo.By("creating a configmap with label B and ensuring the correct watchers observe the notification")
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMapB)
testConfigMapB, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMapB)
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMapB, ns)
expectEvent(watchB, watch.Added, testConfigMapB)
expectEvent(watchAB, watch.Added, testConfigMapB)
expectNoEvent(watchA, watch.Added, testConfigMapB)
ginkgo.By("deleting configmap B and ensuring the correct watchers observe the notification")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMapB.GetName(), nil)
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMapB.GetName(), nil)
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMapB.GetName(), ns)
expectEvent(watchB, watch.Deleted, nil)
expectEvent(watchAB, watch.Deleted, nil)
@@ -149,7 +150,7 @@ var _ = SIGDescribe("Watchers", func() {
}
ginkgo.By("creating a new configmap")
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
testConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap)
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", testConfigMap.GetName(), ns)
ginkgo.By("modifying the configmap once")
@@ -165,7 +166,7 @@ var _ = SIGDescribe("Watchers", func() {
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a second time", testConfigMap.GetName(), ns)
ginkgo.By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil)
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", testConfigMap.GetName(), ns)
ginkgo.By("creating a watch on configmaps from the resource version returned by the first update")
@@ -202,7 +203,7 @@ var _ = SIGDescribe("Watchers", func() {
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", watchRestartedLabelValue)
ginkgo.By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap)
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("modifying the configmap once")
@@ -234,7 +235,7 @@ var _ = SIGDescribe("Watchers", func() {
framework.ExpectNoError(err, "failed to create a new watch on configmaps from the last resource version %s observed by the first watch", lastEventConfigMap.ObjectMeta.ResourceVersion)
ginkgo.By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil)
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("Expecting to observe notifications for all changes to the configmap since the first watch closed")
@@ -267,7 +268,7 @@ var _ = SIGDescribe("Watchers", func() {
framework.ExpectNoError(err, "failed to create a watch on configmap with label: %s", toBeChangedLabelValue)
ginkgo.By("creating a new configmap")
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(testConfigMap)
testConfigMap, err = c.CoreV1().ConfigMaps(ns).Create(context.TODO(), testConfigMap)
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("modifying the configmap once")
@@ -309,7 +310,7 @@ var _ = SIGDescribe("Watchers", func() {
framework.ExpectNoError(err, "failed to update configmap %s in namespace %s a third time", configMapName, ns)
ginkgo.By("deleting the configmap")
err = c.CoreV1().ConfigMaps(ns).Delete(testConfigMap.GetName(), nil)
err = c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), testConfigMap.GetName(), nil)
framework.ExpectNoError(err, "failed to delete configmap %s in namespace: %s", configMapName, ns)
ginkgo.By("Expecting to observe an add notification for the watched object when the label value was restored")
@@ -344,7 +345,7 @@ var _ = SIGDescribe("Watchers", func() {
wcs := []watch.Interface{}
resourceVersion := "0"
for i := 0; i < iterations; i++ {
wc, err := c.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{ResourceVersion: resourceVersion})
wc, err := c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), metav1.ListOptions{ResourceVersion: resourceVersion})
framework.ExpectNoError(err, "Failed to watch configmaps in the namespace %s", ns)
wcs = append(wcs, wc)
resourceVersion = waitForNextConfigMapEvent(wcs[0]).ResourceVersion
@@ -378,7 +379,7 @@ func watchConfigMaps(f *framework.Framework, resourceVersion string, labels ...s
},
}),
}
return c.CoreV1().ConfigMaps(ns).Watch(opts)
return c.CoreV1().ConfigMaps(ns).Watch(context.TODO(), opts)
}
func int64ptr(i int) *int64 {
@@ -470,18 +471,18 @@ func produceConfigMapEvents(f *framework.Framework, stopc <-chan struct{}, minWa
switch op {
case createEvent:
cm.Name = name(i)
_, err := c.CoreV1().ConfigMaps(ns).Create(cm)
_, err := c.CoreV1().ConfigMaps(ns).Create(context.TODO(), cm)
framework.ExpectNoError(err, "Failed to create configmap %s in namespace %s", cm.Name, ns)
existing = append(existing, i)
i++
case updateEvent:
idx := rand.Intn(len(existing))
cm.Name = name(existing[idx])
_, err := c.CoreV1().ConfigMaps(ns).Update(cm)
_, err := c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm)
framework.ExpectNoError(err, "Failed to update configmap %s in namespace %s", cm.Name, ns)
case deleteEvent:
idx := rand.Intn(len(existing))
err := c.CoreV1().ConfigMaps(ns).Delete(name(existing[idx]), &metav1.DeleteOptions{})
err := c.CoreV1().ConfigMaps(ns).Delete(context.TODO(), name(existing[idx]), &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %s in namespace %s", name(existing[idx]), ns)
existing = append(existing[:idx], existing[idx+1:]...)
default:

View File

@@ -423,7 +423,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
})
framework.ExpectNoError(err, "Creating validating webhook configuration")
defer func() {
err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(hook.Name, nil)
err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), hook.Name, nil)
framework.ExpectNoError(err, "Deleting validating webhook configuration")
}()
@@ -434,9 +434,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
ginkgo.By("Creating a configMap that does not comply to the validation webhook rules")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err == nil {
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
return false, nil
}
@@ -448,10 +448,10 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
ginkgo.By("Updating a validating webhook configuration's rules to not include the create operation")
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
h, err := admissionClient.ValidatingWebhookConfigurations().Get(f.UniqueName, metav1.GetOptions{})
h, err := admissionClient.ValidatingWebhookConfigurations().Get(context.TODO(), f.UniqueName, metav1.GetOptions{})
framework.ExpectNoError(err, "Getting validating webhook configuration")
h.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update}
_, err = admissionClient.ValidatingWebhookConfigurations().Update(h)
_, err = admissionClient.ValidatingWebhookConfigurations().Update(context.TODO(), h)
return err
})
framework.ExpectNoError(err, "Updating validating webhook configuration")
@@ -459,22 +459,21 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
ginkgo.By("Creating a configMap that does not comply to the validation webhook rules")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err != nil {
if !strings.Contains(err.Error(), "denied") {
return false, err
}
return false, nil
}
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
return true, nil
})
framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be allowed creation since webhook was updated to not validate create", f.Namespace.Name)
ginkgo.By("Patching a validating webhook configuration's rules to include the create operation")
hook, err = admissionClient.ValidatingWebhookConfigurations().Patch(
f.UniqueName,
hook, err = admissionClient.ValidatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName,
types.JSONPatchType,
[]byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`))
framework.ExpectNoError(err, "Patching validating webhook configuration")
@@ -482,9 +481,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
ginkgo.By("Creating a configMap that does not comply to the validation webhook rules")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err == nil {
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
return false, nil
}
@@ -519,7 +518,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
})
framework.ExpectNoError(err, "Creating mutating webhook configuration")
defer func() {
err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(hook.Name, nil)
err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), hook.Name, nil)
framework.ExpectNoError(err, "Deleting mutating webhook configuration")
}()
@@ -527,21 +526,21 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
hook, err = admissionClient.MutatingWebhookConfigurations().Get(f.UniqueName, metav1.GetOptions{})
hook, err = admissionClient.MutatingWebhookConfigurations().Get(context.TODO(), f.UniqueName, metav1.GetOptions{})
framework.ExpectNoError(err, "Getting mutating webhook configuration")
ginkgo.By("Updating a mutating webhook configuration's rules to not include the create operation")
hook.Webhooks[0].Rules[0].Operations = []admissionregistrationv1.OperationType{admissionregistrationv1.Update}
hook, err = admissionClient.MutatingWebhookConfigurations().Update(hook)
hook, err = admissionClient.MutatingWebhookConfigurations().Update(context.TODO(), hook)
framework.ExpectNoError(err, "Updating mutating webhook configuration")
ginkgo.By("Creating a configMap that should not be mutated")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err != nil {
return false, err
}
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
_, ok := created.Data["mutation-stage-1"]
return !ok, nil
@@ -549,8 +548,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
framework.ExpectNoError(err, "Waiting for configMap in namespace %s this is not mutated", f.Namespace.Name)
ginkgo.By("Patching a mutating webhook configuration's rules to include the create operation")
hook, err = admissionClient.MutatingWebhookConfigurations().Patch(
f.UniqueName,
hook, err = admissionClient.MutatingWebhookConfigurations().Patch(context.TODO(), f.UniqueName,
types.JSONPatchType,
[]byte(`[{"op": "replace", "path": "/webhooks/0/rules/0/operations", "value": ["CREATE"]}]`))
framework.ExpectNoError(err, "Patching mutating webhook configuration")
@@ -558,11 +556,11 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
ginkgo.By("Creating a configMap that should be mutated")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err != nil {
return false, err
}
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
_, ok := created.Data["mutation-stage-1"]
return ok, nil
@@ -599,7 +597,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID}
ginkgo.By("Listing all of the created validation webhooks")
list, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(selectorListOpts)
list, err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().List(context.TODO(), selectorListOpts)
framework.ExpectNoError(err, "Listing validating webhook configurations")
framework.ExpectEqual(len(list.Items), testListSize)
@@ -610,9 +608,9 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
ginkgo.By("Creating a configMap that does not comply to the validation webhook rules")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err == nil {
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
return false, nil
}
@@ -624,20 +622,20 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be denied creation by validating webhook", f.Namespace.Name)
ginkgo.By("Deleting the collection of validation webhooks")
err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(nil, selectorListOpts)
err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().DeleteCollection(context.TODO(), nil, selectorListOpts)
framework.ExpectNoError(err, "Deleting collection of validating webhook configurations")
ginkgo.By("Creating a configMap that does not comply to the validation webhook rules")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedNonCompliantConfigMap(string(uuid.NewUUID()), f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err != nil {
if !strings.Contains(err.Error(), "denied") {
return false, err
}
return false, nil
}
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
return true, nil
})
@@ -673,7 +671,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
selectorListOpts := metav1.ListOptions{LabelSelector: "e2e-list-test-uuid=" + testUUID}
ginkgo.By("Listing all of the created validation webhooks")
list, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(selectorListOpts)
list, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().List(context.TODO(), selectorListOpts)
framework.ExpectNoError(err, "Listing mutating webhook configurations")
framework.ExpectEqual(len(list.Items), testListSize)
@@ -684,11 +682,11 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
ginkgo.By("Creating a configMap that should be mutated")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err != nil {
return false, err
}
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
_, ok := created.Data["mutation-stage-1"]
return ok, nil
@@ -696,17 +694,17 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
framework.ExpectNoError(err, "Waiting for configMap in namespace %s to be mutated", f.Namespace.Name)
ginkgo.By("Deleting the collection of validation webhooks")
err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(nil, selectorListOpts)
err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().DeleteCollection(context.TODO(), nil, selectorListOpts)
framework.ExpectNoError(err, "Deleting collection of mutating webhook configurations")
ginkgo.By("Creating a configMap that should not be mutated")
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
cm := namedToBeMutatedConfigMap(string(uuid.NewUUID()), f)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(cm)
created, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm)
if err != nil {
return false, err
}
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(cm.Name, nil)
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), cm.Name, nil)
framework.ExpectNoError(err, "Deleting successfully created configMap")
_, ok := created.Data["mutation-stage-1"]
return !ok, nil
@@ -719,7 +717,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
ginkgo.By("Create role binding to let webhook read extension-apiserver-authentication")
client := f.ClientSet
// Create the role binding to allow the webhook read the extension-apiserver-authentication configmap
_, err := client.RbacV1().RoleBindings("kube-system").Create(&rbacv1.RoleBinding{
_, err := client.RbacV1().RoleBindings("kube-system").Create(context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: roleBindingName,
Annotations: map[string]string{
@@ -731,7 +729,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
Kind: "Role",
Name: "extension-apiserver-authentication-reader",
},
// Webhook uses the default service account.
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
@@ -763,7 +761,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
},
}
namespace := f.Namespace.Name
_, err := client.CoreV1().Secrets(namespace).Create(secret)
_, err := client.CoreV1().Secrets(namespace).Create(context.TODO(), secret)
framework.ExpectNoError(err, "creating secret %q in namespace %q", secretName, namespace)
// Create the deployment of the webhook
@@ -839,7 +837,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
},
},
}
deployment, err := client.AppsV1().Deployments(namespace).Create(d)
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), d)
framework.ExpectNoError(err, "creating deployment %s in namespace %s", deploymentName, namespace)
ginkgo.By("Wait for the deployment to be ready")
err = e2edeploy.WaitForDeploymentRevisionAndImage(client, namespace, deploymentName, "1", image)
@@ -867,7 +865,7 @@ func deployWebhookAndService(f *framework.Framework, image string, certCtx *cert
},
},
}
_, err = client.CoreV1().Services(namespace).Create(service)
_, err = client.CoreV1().Services(namespace).Create(context.TODO(), service)
framework.ExpectNoError(err, "creating service %s in namespace %s", serviceName, namespace)
ginkgo.By("Verifying the service has paired with the endpoint")
@@ -911,7 +909,7 @@ func registerWebhook(f *framework.Framework, configName string, certCtx *certCon
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
@@ -963,7 +961,7 @@ func registerWebhookForAttachingPod(f *framework.Framework, configName string, c
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
@@ -988,14 +986,16 @@ func registerMutatingWebhookForConfigMap(f *framework.Framework, configName stri
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) }
return func() {
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
func testMutatingConfigMapWebhook(f *framework.Framework) {
ginkgo.By("create a configmap that should be updated by the webhook")
client := f.ClientSet
configMap := toBeMutatedConfigMap(f)
mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
mutatedConfigMap, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap)
framework.ExpectNoError(err)
expectedConfigMapData := map[string]string{
"mutation-start": "yes",
@@ -1054,14 +1054,16 @@ func registerMutatingWebhookForPod(f *framework.Framework, configName string, ce
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) }
return func() {
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
func testMutatingPodWebhook(f *framework.Framework) {
ginkgo.By("create a pod that should be updated by the webhook")
client := f.ClientSet
pod := toBeMutatedPod(f)
mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
mutatedPod, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
gomega.Expect(err).To(gomega.BeNil())
if len(mutatedPod.Spec.InitContainers) != 1 {
framework.Failf("expect pod to have 1 init container, got %#v", mutatedPod.Spec.InitContainers)
@@ -1095,7 +1097,7 @@ func testWebhook(f *framework.Framework) {
client := f.ClientSet
// Creating the pod, the request should be rejected
pod := nonCompliantPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectError(err, "create pod %s in namespace %s should have been denied by webhook", pod.Name, f.Namespace.Name)
expectedErrMsg1 := "the pod contains unwanted container name"
if !strings.Contains(err.Error(), expectedErrMsg1) {
@@ -1110,7 +1112,7 @@ func testWebhook(f *framework.Framework) {
client = f.ClientSet
// Creating the pod, the request should be rejected
pod = hangingPod(f)
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectError(err, "create pod %s in namespace %s should have caused webhook to hang", pod.Name, f.Namespace.Name)
// ensure the error is webhook-related, not client-side
if !strings.Contains(err.Error(), "webhook") {
@@ -1121,14 +1123,14 @@ func testWebhook(f *framework.Framework) {
framework.Failf("expect error %q, got %q", "deadline", err.Error())
}
// ensure the pod was not actually created
if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) {
if _, err := client.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}); !apierrors.IsNotFound(err) {
framework.Failf("expect notfound error looking for rejected pod, got %v", err)
}
ginkgo.By("create a configmap that should be denied by the webhook")
// Creating the configmap, the request should be rejected
configmap := nonCompliantConfigMap(f)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap)
framework.ExpectError(err, "create configmap %s in namespace %s should have been denied by the webhook", configmap.Name, f.Namespace.Name)
expectedErrMsg := "the configmap contains unwanted key and value"
if !strings.Contains(err.Error(), expectedErrMsg) {
@@ -1145,7 +1147,7 @@ func testWebhook(f *framework.Framework) {
"admit": "this",
},
}
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configmap)
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, f.Namespace.Name)
ginkgo.By("update (PUT) the admitted configmap to a non-compliant one should be rejected by the webhook")
@@ -1163,7 +1165,7 @@ func testWebhook(f *framework.Framework) {
ginkgo.By("update (PATCH) the admitted configmap to a non-compliant one should be rejected by the webhook")
patch := nonCompliantConfigMapPatch()
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
_, err = client.CoreV1().ConfigMaps(f.Namespace.Name).Patch(context.TODO(), allowedConfigMapName, types.StrategicMergePatchType, []byte(patch))
framework.ExpectError(err, "update admitted configmap %s in namespace %s by strategic merge patch to a non-compliant one should be rejected by webhook. Patch: %+v", allowedConfigMapName, f.Namespace.Name, patch)
if !strings.Contains(err.Error(), expectedErrMsg) {
framework.Failf("expect error contains %q, got %q", expectedErrMsg, err.Error())
@@ -1179,11 +1181,11 @@ func testWebhook(f *framework.Framework) {
}})
framework.ExpectNoError(err, "creating namespace %q", skippedNamespaceName)
// clean up the namespace
defer client.CoreV1().Namespaces().Delete(skippedNamespaceName, nil)
defer client.CoreV1().Namespaces().Delete(context.TODO(), skippedNamespaceName, nil)
ginkgo.By("create a configmap that violates the webhook policy but is in a whitelisted namespace")
configmap = nonCompliantConfigMap(f)
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(configmap)
_, err = client.CoreV1().ConfigMaps(skippedNamespaceName).Create(context.TODO(), configmap)
framework.ExpectNoError(err, "failed to create configmap %s in namespace: %s", configmap.Name, skippedNamespaceName)
}
@@ -1191,7 +1193,7 @@ func testAttachingPodWebhook(f *framework.Framework) {
ginkgo.By("create a pod")
client := f.ClientSet
pod := toBeAttachedPod(f)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err := client.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err, "failed to create pod %s in namespace: %s", pod.Name, f.Namespace.Name)
err = e2epod.WaitForPodNameRunningInNamespace(client, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err, "error while waiting for pod %s to go to Running phase in namespace: %s", pod.Name, f.Namespace.Name)
@@ -1272,7 +1274,7 @@ func registerFailClosedWebhook(f *framework.Framework, configName string, certCt
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
@@ -1287,7 +1289,7 @@ func testFailClosedWebhook(f *framework.Framework) {
},
}})
framework.ExpectNoError(err, "creating namespace %q", failNamespaceName)
defer client.CoreV1().Namespaces().Delete(failNamespaceName, nil)
defer client.CoreV1().Namespaces().Delete(context.TODO(), failNamespaceName, nil)
ginkgo.By("create a configmap should be unconditionally rejected by the webhook")
configmap := &v1.ConfigMap{
@@ -1295,7 +1297,7 @@ func testFailClosedWebhook(f *framework.Framework) {
Name: "foo",
},
}
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(configmap)
_, err = client.CoreV1().ConfigMaps(failNamespaceName).Create(context.TODO(), configmap)
framework.ExpectError(err, "create configmap in namespace: %s should be unconditionally rejected by the webhook", failNamespaceName)
if !apierrors.IsInternalError(err) {
framework.Failf("expect an internal error, got %#v", err)
@@ -1358,7 +1360,7 @@ func registerValidatingWebhookForWebhookConfigurations(f *framework.Framework, c
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
err := client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
}
@@ -1419,7 +1421,7 @@ func registerMutatingWebhookForWebhookConfigurations(f *framework.Framework, con
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil)
err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
}
@@ -1487,7 +1489,7 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str
ginkgo.By("Deleting the validating-webhook-configuration, which should be possible to remove")
err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
err = client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
ginkgo.By("Creating a dummy mutating-webhook-configuration object")
@@ -1543,13 +1545,13 @@ func testWebhooksForWebhookConfigurations(f *framework.Framework, configName str
ginkgo.By("Deleting the mutating-webhook-configuration, which should be possible to remove")
err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil)
err = client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
framework.ExpectNoError(err, "deleting webhook config %s with namespace %s", configName, namespace)
}
func createNamespace(f *framework.Framework, ns *v1.Namespace) error {
return wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
_, err := f.ClientSet.CoreV1().Namespaces().Create(ns)
_, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), ns)
if err != nil {
if strings.HasPrefix(err.Error(), "object is being deleted:") {
return false, nil
@@ -1654,11 +1656,11 @@ func updateConfigMap(c clientset.Interface, ns, name string, update updateConfig
var cm *v1.ConfigMap
pollErr := wait.PollImmediate(2*time.Second, 1*time.Minute, func() (bool, error) {
var err error
if cm, err = c.CoreV1().ConfigMaps(ns).Get(name, metav1.GetOptions{}); err != nil {
if cm, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
return false, err
}
update(cm)
if cm, err = c.CoreV1().ConfigMaps(ns).Update(cm); err == nil {
if cm, err = c.CoreV1().ConfigMaps(ns).Update(context.TODO(), cm); err == nil {
return true, nil
}
// Only retry update on conflict
@@ -1693,10 +1695,10 @@ func updateCustomResource(c dynamic.ResourceInterface, ns, name string, update u
}
func cleanWebhookTest(client clientset.Interface, namespaceName string) {
_ = client.CoreV1().Services(namespaceName).Delete(serviceName, nil)
_ = client.AppsV1().Deployments(namespaceName).Delete(deploymentName, nil)
_ = client.CoreV1().Secrets(namespaceName).Delete(secretName, nil)
_ = client.RbacV1().RoleBindings("kube-system").Delete(roleBindingName, nil)
_ = client.CoreV1().Services(namespaceName).Delete(context.TODO(), serviceName, nil)
_ = client.AppsV1().Deployments(namespaceName).Delete(context.TODO(), deploymentName, nil)
_ = client.CoreV1().Secrets(namespaceName).Delete(context.TODO(), secretName, nil)
_ = client.RbacV1().RoleBindings("kube-system").Delete(context.TODO(), roleBindingName, nil)
}
func registerWebhookForCustomResource(f *framework.Framework, configName string, certCtx *certContext, testcrd *crd.TestCrd, servicePort int32) func() {
@@ -1746,7 +1748,7 @@ func registerWebhookForCustomResource(f *framework.Framework, configName string,
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
@@ -1823,7 +1825,9 @@ func registerMutatingWebhookForCustomResource(f *framework.Framework, configName
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() { client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(configName, nil) }
return func() {
client.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
func testCustomResourceWebhook(f *framework.Framework, crd *apiextensionsv1.CustomResourceDefinition, customResourceClient dynamic.ResourceInterface) {
@@ -1983,7 +1987,7 @@ func testMultiVersionCustomResourceWebhook(f *framework.Framework, testcrd *crd.
]
}
}`
_, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch))
_, err = testcrd.APIExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Patch(context.TODO(), testcrd.Crd.Name, types.StrategicMergePatchType, []byte(apiVersionWithV2StoragePatch))
framework.ExpectNoError(err, "failed to patch custom resource definition %s in namespace: %s", testcrd.Crd.Name, f.Namespace.Name)
ginkgo.By("Patching the custom resource while v2 is storage version")
@@ -2054,7 +2058,7 @@ func registerValidatingWebhookForCRD(f *framework.Framework, configName string,
err = waitWebhookConfigurationReady(f)
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
@@ -2112,7 +2116,7 @@ func testCRDDenyWebhook(f *framework.Framework) {
}
// create CRD
_, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(crd)
_, err = apiExtensionClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd)
framework.ExpectError(err, "create custom resource definition %s should be denied by webhook", crd.Name)
expectedErrMsg := "the crd contains unwanted label"
if !strings.Contains(err.Error(), expectedErrMsg) {
@@ -2124,13 +2128,13 @@ func labelNamespace(f *framework.Framework, namespace string) {
client := f.ClientSet
// Add a unique label to the namespace
ns, err := client.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{})
ns, err := client.CoreV1().Namespaces().Get(context.TODO(), namespace, metav1.GetOptions{})
framework.ExpectNoError(err, "error getting namespace %s", namespace)
if ns.Labels == nil {
ns.Labels = map[string]string{}
}
ns.Labels[f.UniqueName] = "true"
_, err = client.CoreV1().Namespaces().Update(ns)
_, err = client.CoreV1().Namespaces().Update(context.TODO(), ns)
framework.ExpectNoError(err, "error labeling namespace %s", namespace)
}
@@ -2184,7 +2188,7 @@ func registerSlowWebhook(f *framework.Framework, configName string, certCtx *cer
framework.ExpectNoError(err, "waiting for webhook configuration to be ready")
return func() {
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(configName, nil)
client.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), configName, nil)
}
}
@@ -2192,7 +2196,7 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) {
ginkgo.By("Request fails when timeout (1s) is shorter than slow webhook latency (5s)")
client := f.ClientSet
name := "e2e-test-slow-webhook-configmap"
_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}})
_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}})
framework.ExpectError(err, "create configmap in namespace %s should have timed-out reaching slow webhook", f.Namespace.Name)
// http timeout message: context deadline exceeded
// dial timeout message: dial tcp {address}: i/o timeout
@@ -2206,9 +2210,9 @@ func testSlowWebhookTimeoutFailEarly(f *framework.Framework) {
func testSlowWebhookTimeoutNoError(f *framework.Framework) {
client := f.ClientSet
name := "e2e-test-slow-webhook-configmap"
_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}})
_, err := client.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: name}})
gomega.Expect(err).To(gomega.BeNil())
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{})
err = client.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{})
gomega.Expect(err).To(gomega.BeNil())
}
@@ -2267,7 +2271,7 @@ func createValidatingWebhookConfiguration(f *framework.Framework, config *admiss
}
framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName)
}
return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(config)
return f.ClientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Create(context.TODO(), config)
}
// createMutatingWebhookConfiguration ensures the webhook config scopes object or namespace selection
@@ -2282,7 +2286,7 @@ func createMutatingWebhookConfiguration(f *framework.Framework, config *admissio
}
framework.Failf(`webhook %s in config %s has no namespace or object selector with %s="true", and can interfere with other tests`, webhook.Name, config.Name, f.UniqueName)
}
return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(config)
return f.ClientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(context.TODO(), config)
}
func newDenyPodWebhookFixture(f *framework.Framework, certCtx *certContext, servicePort int32) admissionregistrationv1.ValidatingWebhook {
@@ -2385,7 +2389,7 @@ func newMutateConfigMapWebhookFixture(f *framework.Framework, certCtx *certConte
// createWebhookConfigurationReadyNamespace creates a separate namespace for webhook configuration ready markers to
// prevent cross-talk with webhook configurations being tested.
func createWebhookConfigurationReadyNamespace(f *framework.Framework) {
ns, err := f.ClientSet.CoreV1().Namespaces().Create(&v1.Namespace{
ns, err := f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: f.Namespace.Name + "-markers",
Labels: map[string]string{f.UniqueName + "-markers": "true"},
@@ -2409,7 +2413,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error {
},
},
}
_, err := cmClient.Create(marker)
_, err := cmClient.Create(context.TODO(), marker)
if err != nil {
// The always-deny webhook does not provide a reason, so check for the error string we expect
if strings.Contains(err.Error(), "denied") {
@@ -2418,7 +2422,7 @@ func waitWebhookConfigurationReady(f *framework.Framework) error {
return false, err
}
// best effort cleanup of markers that are no longer needed
_ = cmClient.Delete(marker.GetName(), nil)
_ = cmClient.Delete(context.TODO(), marker.GetName(), nil)
framework.Logf("Waiting for webhook configuration to be ready...")
return false, nil
})

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"time"
@@ -70,7 +71,7 @@ var _ = SIGDescribe("CronJob", func() {
framework.ExpectNoError(err, "Failed to wait for active jobs in CronJob %s in namespace %s", cronJob.Name, f.Namespace.Name)
ginkgo.By("Ensuring at least two running jobs exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(len(activeJobs)).To(gomega.BeNumerically(">=", 2))
@@ -95,7 +96,7 @@ var _ = SIGDescribe("CronJob", func() {
framework.ExpectError(err)
ginkgo.By("Ensuring no job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
gomega.Expect(jobs.Items).To(gomega.HaveLen(0))
@@ -122,7 +123,7 @@ var _ = SIGDescribe("CronJob", func() {
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the CronJobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
@@ -154,7 +155,7 @@ var _ = SIGDescribe("CronJob", func() {
gomega.Expect(cronJob.Status.Active).Should(gomega.HaveLen(1))
ginkgo.By("Ensuring exactly one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(metav1.ListOptions{})
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to list the jobs in namespace %s", f.Namespace.Name)
activeJobs, _ := filterActiveJobs(jobs)
gomega.Expect(activeJobs).To(gomega.HaveLen(1))
@@ -266,7 +267,7 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1beta1
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists in namespace %s", ns)
ginkgo.By("Ensuring a finished job exists by listing jobs explicitly")
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to ensure a finished cronjob exists by listing jobs explicitly in namespace %s", ns)
activeJobs, finishedJobs := filterActiveJobs(jobs)
if len(finishedJobs) != 1 {
@@ -282,7 +283,7 @@ func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1beta1
framework.ExpectNoError(err, "Failed to ensure that pods for job does not exists anymore in namespace %s", ns)
ginkgo.By("Ensuring there is 1 finished job by listing jobs explicitly")
jobs, err = c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
jobs, err = c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "Failed to ensure there is one finished job by listing job explicitly in namespace %s", ns)
activeJobs, finishedJobs = filterActiveJobs(jobs)
if len(finishedJobs) != 1 {
@@ -354,16 +355,16 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batchv1beta1.Concur
}
func createCronJob(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) (*batchv1beta1.CronJob, error) {
return c.BatchV1beta1().CronJobs(ns).Create(cronJob)
return c.BatchV1beta1().CronJobs(ns).Create(context.TODO(), cronJob)
}
func getCronJob(c clientset.Interface, ns, name string) (*batchv1beta1.CronJob, error) {
return c.BatchV1beta1().CronJobs(ns).Get(name, metav1.GetOptions{})
return c.BatchV1beta1().CronJobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
}
func deleteCronJob(c clientset.Interface, ns, name string) error {
propagationPolicy := metav1.DeletePropagationBackground // Also delete jobs and pods related to cronjob
return c.BatchV1beta1().CronJobs(ns).Delete(name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
return c.BatchV1beta1().CronJobs(ns).Delete(context.TODO(), name, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy})
}
// Wait for at least given amount of active jobs.
@@ -415,7 +416,7 @@ func waitForJobNotActive(c clientset.Interface, ns, cronJobName, jobName string)
// Wait for a job to disappear by listing them explicitly.
func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
@@ -433,7 +434,7 @@ func waitForJobToDisappear(c clientset.Interface, ns string, targetJob *batchv1.
func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batchv1.Job) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
options := metav1.ListOptions{LabelSelector: fmt.Sprintf("controller-uid=%s", targetJob.UID)}
pods, err := c.CoreV1().Pods(ns).List(options)
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
return false, err
}
@@ -444,7 +445,7 @@ func waitForJobsPodToDisappear(c clientset.Interface, ns string, targetJob *batc
// Wait for a job to be replaced with a new one.
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
@@ -463,7 +464,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
// waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
@@ -474,7 +475,7 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
// waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.BatchV1().Jobs(ns).List(metav1.ListOptions{})
jobs, err := c.BatchV1().Jobs(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"strconv"
"time"
@@ -177,7 +178,7 @@ func replacePods(pods []*v1.Pod, store cache.Store) {
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
options := metav1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := c.CoreV1().Pods(ns).List(options)
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
failedContainers := 0
containerRestartNodes := sets.NewString()
@@ -227,12 +228,12 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector.String()
obj, err := f.ClientSet.CoreV1().Pods(ns).List(options)
obj, err := f.ClientSet.CoreV1().Pods(ns).List(context.TODO(), options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector.String()
return f.ClientSet.CoreV1().Pods(ns).Watch(options)
return f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), options)
},
},
&v1.Pod{},

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"reflect"
"strings"
@@ -67,7 +68,7 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a
daemonsets := c.AppsV1().DaemonSets(namespace)
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
if ds, err = daemonsets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
}
@@ -75,7 +76,7 @@ func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, a
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(ds)
if ds, err = daemonsets.Update(ds); err == nil {
if ds, err = daemonsets.Update(context.TODO(), ds); err == nil {
framework.Logf("Updating DaemonSet %s", name)
return true, nil
}
@@ -98,7 +99,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.AfterEach(func() {
// Clean up
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{})
daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "unable to dump DaemonSets")
if daemonsets != nil && len(daemonsets.Items) > 0 {
for _, ds := range daemonsets.Items {
@@ -108,12 +109,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pod to be reaped")
}
}
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
} else {
framework.Logf("unable to dump daemonsets: %v", err)
}
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
} else {
framework.Logf("unable to dump pods: %v", err)
@@ -153,7 +154,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName}
ginkgo.By(fmt.Sprintf("Creating simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label))
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
@@ -165,7 +166,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Stop a daemon pod, check that the daemon pod is revived.")
podList := listDaemonPods(c, ns, label)
pod := podList.Items[0]
err = c.CoreV1().Pods(ns).Delete(pod.Name, nil)
err = c.CoreV1().Pods(ns).Delete(context.TODO(), pod.Name, nil)
framework.ExpectNoError(err)
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
@@ -182,7 +183,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating daemon %q with a node selector", dsName)
ds := newDaemonSet(dsName, image, complexLabel)
ds.Spec.Template.Spec.NodeSelector = nodeSelector
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds)
framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
@@ -211,7 +212,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update DaemonSet node selector to green, and change its update strategy to RollingUpdate")
patch := fmt.Sprintf(`{"spec":{"template":{"spec":{"nodeSelector":{"%s":"%s"}}},"updateStrategy":{"type":"RollingUpdate"}}}`,
daemonsetColorLabel, greenNode.Labels[daemonsetColorLabel])
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch))
framework.ExpectNoError(err, "error patching daemon set")
daemonSetLabels, _ = separateDaemonSetNodeLabels(greenNode.Labels)
framework.ExpectEqual(len(daemonSetLabels), 1)
@@ -245,7 +246,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
},
},
}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds)
framework.ExpectNoError(err)
ginkgo.By("Initially, daemon pods should not be running on any nodes.")
@@ -279,7 +280,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName}
ginkgo.By(fmt.Sprintf("Creating a simple DaemonSet %q", dsName))
ds, err := c.AppsV1().DaemonSets(ns).Create(newDaemonSet(dsName, image, label))
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), newDaemonSet(dsName, image, label))
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
@@ -293,7 +294,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
pod := podList.Items[0]
pod.ResourceVersion = ""
pod.Status.Phase = v1.PodFailed
_, err = c.CoreV1().Pods(ns).UpdateStatus(&pod)
_, err = c.CoreV1().Pods(ns).UpdateStatus(context.TODO(), &pod)
framework.ExpectNoError(err, "error failing a daemon pod")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to revive")
@@ -311,7 +312,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds)
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
@@ -319,7 +320,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
first := curHistory(listDaemonHistories(c, ns, label), ds)
@@ -329,7 +330,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch))
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods images aren't updated.")
@@ -341,7 +342,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
@@ -360,7 +361,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
ds, err := c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds)
framework.ExpectNoError(err)
ginkgo.By("Check that daemon pods launch on every node of the cluster.")
@@ -368,7 +369,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 1)
cur := curHistory(listDaemonHistories(c, ns, label), ds)
@@ -378,12 +379,12 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.By("Update daemon pods image.")
patch := getDaemonSetImagePatch(ds.Spec.Template.Spec.Containers[0].Name, AgnhostImage)
ds, err = c.AppsV1().DaemonSets(ns).Patch(dsName, types.StrategicMergePatchType, []byte(patch))
ds, err = c.AppsV1().DaemonSets(ns).Patch(context.TODO(), dsName, types.StrategicMergePatchType, []byte(patch))
framework.ExpectNoError(err)
// Time to complete the rolling upgrade is proportional to the number of nodes in the cluster.
// Get the number of nodes, and set the timeout appropriately.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
nodeCount := len(nodes.Items)
retryTimeout := dsRetryTimeout + time.Duration(nodeCount*30)*time.Second
@@ -397,7 +398,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ExpectNoError(err, "error waiting for daemon pod to start")
// Check history and labels
ds, err = c.AppsV1().DaemonSets(ns).Get(ds.Name, metav1.GetOptions{})
ds, err = c.AppsV1().DaemonSets(ns).Get(context.TODO(), ds.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
waitForHistoryCreated(c, ns, label, 2)
cur = curHistory(listDaemonHistories(c, ns, label), ds)
@@ -419,7 +420,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
label := map[string]string{daemonsetNameLabel: dsName}
ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err = c.AppsV1().DaemonSets(ns).Create(ds)
ds, err = c.AppsV1().DaemonSets(ns).Create(context.TODO(), ds)
framework.ExpectNoError(err)
framework.Logf("Check that daemon pods launch on every node of the cluster")
@@ -518,7 +519,7 @@ func newDaemonSet(dsName, image string, label map[string]string) *appsv1.DaemonS
func listDaemonPods(c clientset.Interface, ns string, label map[string]string) *v1.PodList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(options)
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
gomega.Expect(len(podList.Items)).To(gomega.BeNumerically(">", 0))
return podList
@@ -555,7 +556,7 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Namespace, error) {
nsClient := c.CoreV1().Namespaces()
ns, err := nsClient.Get(nsName, metav1.GetOptions{})
ns, err := nsClient.Get(context.TODO(), nsName, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -568,7 +569,7 @@ func updateNamespaceAnnotations(c clientset.Interface, nsName string) (*v1.Names
ns.Annotations[n] = ""
}
return nsClient.Update(ns)
return nsClient.Update(context.TODO(), ns)
}
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
@@ -576,7 +577,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
var newNode *v1.Node
var newLabels map[string]string
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
node, err := nodeClient.Get(nodeName, metav1.GetOptions{})
node, err := nodeClient.Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -591,7 +592,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
for k, v := range labels {
node.Labels[k] = v
}
newNode, err = nodeClient.Update(node)
newNode, err = nodeClient.Update(context.TODO(), node)
if err == nil {
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err
@@ -613,7 +614,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
func checkDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNames []string) func() (bool, error) {
return func() (bool, error) {
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Logf("could not get the pod list: %v", err)
return false, nil
@@ -658,7 +659,7 @@ func checkRunningOnAllNodes(f *framework.Framework, ds *appsv1.DaemonSet) func()
}
func schedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string {
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
@@ -702,7 +703,7 @@ func checkRunningOnNoNodes(f *framework.Framework, ds *appsv1.DaemonSet) func()
}
func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
ds, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Get(context.TODO(), dsName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Could not get daemon set from v1")
}
@@ -715,7 +716,7 @@ func checkDaemonStatus(f *framework.Framework, dsName string) error {
func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.DaemonSet, image string, maxUnavailable int) func() (bool, error) {
return func() (bool, error) {
podList, err := c.CoreV1().Pods(ds.Namespace).List(metav1.ListOptions{})
podList, err := c.CoreV1().Pods(ds.Namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
@@ -766,7 +767,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
listHistoryFn := func() (bool, error) {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options)
if err != nil {
return false, err
}
@@ -783,7 +784,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
func listDaemonHistories(c clientset.Interface, ns string, label map[string]string) *appsv1.ControllerRevisionList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
historyList, err := c.AppsV1().ControllerRevisions(ns).List(options)
historyList, err := c.AppsV1().ControllerRevisions(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
gomega.Expect(len(historyList.Items)).To(gomega.BeNumerically(">", 0))
return historyList
@@ -810,7 +811,7 @@ func curHistory(historyList *appsv1.ControllerRevisionList, ds *appsv1.DaemonSet
func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func() (bool, error) {
return func() (bool, error) {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}); err != nil {
if _, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil {
if apierrors.IsNotFound(err) {
return true, nil
}

View File

@@ -134,7 +134,7 @@ var _ = SIGDescribe("Deployment", func() {
})
func failureTrap(c clientset.Interface, ns string) {
deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
deployments, err := c.AppsV1().Deployments(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
return
@@ -160,7 +160,7 @@ func failureTrap(c clientset.Interface, ns string) {
return
}
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
return
@@ -172,7 +172,7 @@ func failureTrap(c clientset.Interface, ns string) {
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
}
options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(rs.Namespace).List(options)
podList, err := c.CoreV1().Pods(rs.Namespace).List(context.TODO(), options)
if err != nil {
framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
continue
@@ -189,7 +189,7 @@ func intOrStrP(num int) *intstr.IntOrString {
}
func stopDeployment(c clientset.Interface, ns, deploymentName string) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.Logf("Deleting deployment %s", deploymentName)
@@ -197,20 +197,20 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
framework.ExpectNoError(err)
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
_, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectError(err)
framework.ExpectEqual(apierrors.IsNotFound(err), true)
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err)
options := metav1.ListOptions{LabelSelector: selector.String()}
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
rss, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.CoreV1().Pods(ns).List(options)
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
return false, err
}
@@ -234,7 +234,7 @@ func testDeleteDeployment(f *framework.Framework) {
framework.Logf("Creating simple deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(d)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
// Wait for it to be updated to revision 1
@@ -244,7 +244,7 @@ func testDeleteDeployment(f *framework.Framework) {
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
framework.ExpectNoError(err)
@@ -270,7 +270,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
rs.Annotations = annotations
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs)
framework.ExpectNoError(err)
// Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
@@ -280,7 +280,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
deploymentName := "test-rolling-update-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
// Wait for it to be updated to revision 3546343826724305833.
@@ -294,7 +294,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// There should be 1 old RS (webserver-controller, which is adopted)
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
framework.ExpectNoError(err)
@@ -309,7 +309,7 @@ func testRecreateDeployment(f *framework.Framework) {
deploymentName := "test-recreate-deployment"
framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, AgnhostImageName, AgnhostImage, appsv1.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
// Wait for it to be updated to revision 1
@@ -347,7 +347,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
rsName := "test-cleanup-controller"
replicas := int32(1)
revisionHistoryLimit := utilpointer.Int32Ptr(0)
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil))
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil))
framework.ExpectNoError(err)
// Verify that the required pods have come up.
@@ -358,7 +358,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: labels.Everything().String()})
framework.ExpectNoError(err, "Failed to query for pods: %v", err)
options := metav1.ListOptions{
@@ -366,7 +366,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}
stopCh := make(chan struct{})
defer close(stopCh)
w, err := c.CoreV1().Pods(ns).Watch(options)
w, err := c.CoreV1().Pods(ns).Watch(context.TODO(), options)
framework.ExpectNoError(err)
go func() {
// There should be only one pod being created, which is the pod with the agnhost image.
@@ -396,7 +396,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
}()
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Spec.RevisionHistoryLimit = revisionHistoryLimit
_, err = c.AppsV1().Deployments(ns).Create(d)
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Waiting for deployment %s history to be cleaned up", deploymentName))
@@ -418,7 +418,7 @@ func testRolloverDeployment(f *framework.Framework) {
rsName := "test-rollover-controller"
rsReplicas := int32(1)
_, err := c.AppsV1().ReplicaSets(ns).Create(newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil))
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), newRS(rsName, rsReplicas, rsPodLabels, WebserverImageName, WebserverImage, nil))
framework.ExpectNoError(err)
// Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, podName, false, rsReplicas)
@@ -442,11 +442,11 @@ func testRolloverDeployment(f *framework.Framework) {
MaxSurge: intOrStrP(1),
}
newDeployment.Spec.MinReadySeconds = int32(10)
_, err = c.AppsV1().Deployments(ns).Create(newDeployment)
_, err = c.AppsV1().Deployments(ns).Create(context.TODO(), newDeployment)
framework.ExpectNoError(err)
// Verify that the pods were scaled up and down as expected.
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
@@ -457,7 +457,7 @@ func testRolloverDeployment(f *framework.Framework) {
framework.ExpectNoError(err)
framework.Logf("Ensure that both replica sets have 1 created replica")
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
ensureReplicas(oldRS, int32(1))
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
@@ -488,11 +488,11 @@ func testRolloverDeployment(f *framework.Framework) {
framework.ExpectNoError(err)
framework.Logf("Ensure that both old replica sets have no replicas")
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
ensureReplicas(oldRS, int32(0))
// Not really the new replica set anymore but we GET by name so that's fine.
newRS, err = c.AppsV1().ReplicaSets(ns).Get(newRS.Name, metav1.GetOptions{})
newRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), newRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ensureReplicas(newRS, int32(0))
}
@@ -532,7 +532,7 @@ func testIterativeDeployments(f *framework.Framework) {
d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
iterations := 20
@@ -595,7 +595,7 @@ func testIterativeDeployments(f *framework.Framework) {
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err)
opts := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(opts)
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), opts)
framework.ExpectNoError(err)
if len(podList.Items) == 0 {
framework.Logf("%02d: no deployment pods to delete", i)
@@ -607,7 +607,7 @@ func testIterativeDeployments(f *framework.Framework) {
}
name := podList.Items[p].Name
framework.Logf("%02d: deleting deployment pod %q", i, name)
err := c.CoreV1().Pods(ns).Delete(name, nil)
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil)
if err != nil && !apierrors.IsNotFound(err) {
framework.ExpectNoError(err)
}
@@ -616,7 +616,7 @@ func testIterativeDeployments(f *framework.Framework) {
}
// unpause the deployment if we end up pausing it
deployment, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deployment.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment.Spec.Paused {
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
@@ -646,7 +646,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d)
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
@@ -673,7 +673,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
deploymentName = "test-adopt-deployment"
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(d)
deploy, err = c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err)
@@ -708,7 +708,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.AppsV1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
framework.Logf("Waiting for observed generation %d", deployment.Generation)
@@ -756,7 +756,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
err = waitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
framework.ExpectNoError(err)
@@ -780,7 +780,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
err = waitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
framework.ExpectNoError(err)
@@ -801,9 +801,9 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.ExpectNoError(err)
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), firstRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), secondRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
@@ -846,7 +846,7 @@ func waitDeploymentReplicaSetsOrphaned(c clientset.Interface, ns string, label m
func listDeploymentReplicaSets(c clientset.Interface, ns string, label map[string]string) *appsv1.ReplicaSetList {
selector := labels.Set(label).AsSelector()
options := metav1.ListOptions{LabelSelector: selector.String()}
rsList, err := c.AppsV1().ReplicaSets(ns).List(options)
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), options)
framework.ExpectNoError(err)
gomega.Expect(len(rsList.Items)).To(gomega.BeNumerically(">", 0))
return rsList
@@ -856,7 +856,7 @@ func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) er
trueVar := true
deleteOptions := &metav1.DeleteOptions{OrphanDependents: &trueVar}
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID))
return c.AppsV1().Deployments(d.Namespace).Delete(d.Name, deleteOptions)
return c.AppsV1().Deployments(d.Namespace).Delete(context.TODO(), d.Name, deleteOptions)
}
func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framework) {
@@ -890,7 +890,7 @@ func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framew
MaxSurge: intOrStrP(1),
MaxUnavailable: intOrStrP(0),
}
deployment, err := c.AppsV1().Deployments(ns).Create(d)
deployment, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d)
framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err)
@@ -1023,7 +1023,7 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
return fmt.Errorf("deployment %q does not use a Recreate strategy: %s", d.Name, d.Spec.Strategy.Type)
}
w, err := c.AppsV1().Deployments(d.Namespace).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
w, err := c.AppsV1().Deployments(d.Namespace).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: d.Name, ResourceVersion: d.ResourceVersion}))
if err != nil {
return err
}
@@ -1065,7 +1065,7 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
var d *appsv1.Deployment
pollErr := wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -1088,7 +1088,7 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, replicaSet *appsv1.ReplicaSet) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -1104,7 +1104,7 @@ func waitForReplicaSetDesiredReplicas(rsClient appsclient.ReplicaSetsGetter, rep
func waitForReplicaSetTargetSpecReplicas(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"time"
@@ -69,7 +70,7 @@ var _ = SIGDescribe("DisruptionController", func() {
// Since disruptionAllowed starts out 0, if we see it ever become positive,
// that means the controller is working.
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -240,7 +241,7 @@ func createPDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable
MinAvailable: &minAvailable,
},
}
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb)
framework.ExpectNoError(err, "Waiting for the pdb to be created with minAvailable %d in namespace %s", minAvailable.IntVal, ns)
waitForPdbToBeProcessed(cs, ns)
}
@@ -256,19 +257,19 @@ func createPDBMaxUnavailableOrDie(cs kubernetes.Interface, ns string, maxUnavail
MaxUnavailable: &maxUnavailable,
},
}
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(&pdb)
_, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Create(context.TODO(), &pdb)
framework.ExpectNoError(err, "Waiting for the pdb to be created with maxUnavailable %d in namespace %s", maxUnavailable.IntVal, ns)
waitForPdbToBeProcessed(cs, ns)
}
func updatePDBMinAvailableOrDie(cs kubernetes.Interface, ns string, minAvailable intstr.IntOrString) {
err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
old, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
old, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
return err
}
old.Spec.MinAvailable = &minAvailable
if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(old); err != nil {
if _, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Update(context.TODO(), old); err != nil {
return err
}
return nil
@@ -297,7 +298,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
},
}
_, err := cs.CoreV1().Pods(ns).Create(pod)
_, err := cs.CoreV1().Pods(ns).Create(context.TODO(), pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
}
}
@@ -305,7 +306,7 @@ func createPodsOrDie(cs kubernetes.Interface, ns string, n int) {
func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
ginkgo.By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
pods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: "foo=bar"})
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: "foo=bar"})
if err != nil {
return false, err
}
@@ -364,14 +365,14 @@ func createReplicaSetOrDie(cs kubernetes.Interface, ns string, size int32, exclu
},
}
_, err := cs.AppsV1().ReplicaSets(ns).Create(rs)
_, err := cs.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs)
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
}
func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err error) {
ginkgo.By("locating a running pod")
err = wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
podList, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{})
podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return false, err
}
@@ -392,7 +393,7 @@ func locateRunningPod(cs kubernetes.Interface, ns string) (pod *v1.Pod, err erro
func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string) {
ginkgo.By("Waiting for the pdb to be processed")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -407,7 +408,7 @@ func waitForPdbToBeProcessed(cs kubernetes.Interface, ns string) {
func waitForPdbToObserveHealthyPods(cs kubernetes.Interface, ns string, healthyCount int32) {
ginkgo.By("Waiting for the pdb to observed all healthy pods")
err := wait.PollImmediate(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
pdb, err := cs.PolicyV1beta1().PodDisruptionBudgets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"time"
@@ -260,7 +261,7 @@ var _ = SIGDescribe("Job", func() {
// waitForJobFailure uses c to wait for up to timeout for the Job named jobName in namespace ns to fail.
func waitForJobFailure(c clientset.Interface, ns, jobName string, timeout time.Duration, reason string) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"strings"
"time"
@@ -100,7 +101,7 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod {
}
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
pod, err := c.CoreV1().Pods(namespace).Create(context.TODO(), podOnNode(podName, nodeName, framework.ServeHostnameImage))
if err == nil {
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else {
@@ -145,14 +146,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions
nodeOpts := metav1.ListOptions{}
nodes, err := c.CoreV1().Nodes().List(nodeOpts)
nodes, err := c.CoreV1().Nodes().List(context.TODO(), nodeOpts)
framework.ExpectNoError(err)
e2enode.Filter(nodes, func(node v1.Node) bool {
if !e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts)
if err != nil || len(pods.Items) <= 0 {
return false
}
@@ -176,12 +177,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector.String()
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
obj, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector.String()
return f.ClientSet.CoreV1().Nodes().Watch(options)
return f.ClientSet.CoreV1().Nodes().Watch(context.TODO(), options)
},
},
&v1.Node{},
@@ -256,11 +257,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) // list pods after all have been scheduled
framework.ExpectNoError(err)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// This creates a temporary network partition, verifies that 'podNameToDisappear',
@@ -298,7 +299,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// verify that it is really on the requested node
{
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), additionalPod, metav1.GetOptions{})
framework.ExpectNoError(err)
if pod.Spec.NodeName != node.Name {
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
@@ -325,11 +326,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) // list pods after all have been scheduled
framework.ExpectNoError(err)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// This creates a temporary network partition, verifies that 'podNameToDisappear',
@@ -367,7 +368,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), headlessService)
framework.ExpectNoError(err)
c = f.ClientSet
ns = f.Namespace.Name
@@ -385,7 +386,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps)
framework.ExpectNoError(err)
nn, err := e2enode.TotalRegistered(f.ClientSet)
@@ -402,13 +403,13 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
e2eskipper.SkipUnlessSSHKeyPresent()
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ps)
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ps.Spec.Replicas, ps)
pod := e2esset.GetPodList(c, ps).Items[0]
node, err := c.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// Blocks outgoing network traffic on 'node'. Then verifies that 'podNameToDisappear',
@@ -450,11 +451,11 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("choose a node with at least one pod - we will block some network traffic on this node")
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options) // list pods after all have been scheduled
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) // list pods after all have been scheduled
framework.ExpectNoError(err)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// This creates a temporary network partition, verifies that the job has 'parallelism' number of
@@ -501,7 +502,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return false
}
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts)
if err != nil || len(pods.Items) <= 0 {
return false
}
@@ -515,7 +516,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts)
framework.ExpectNoError(err)
podTolerationTimes := map[string]time.Duration{}
// This test doesn't add tolerations by itself, but because they may be present in the cluster
@@ -564,12 +565,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector.String()
obj, err := f.ClientSet.CoreV1().Nodes().List(options)
obj, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector.String()
return f.ClientSet.CoreV1().Nodes().Watch(options)
return f.ClientSet.CoreV1().Nodes().Watch(context.TODO(), options)
},
},
&v1.Node{},
@@ -625,7 +626,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
sleepTime := maxTolerationTime + 20*time.Second
ginkgo.By(fmt.Sprintf("Sleeping for %v and checking if all Pods were evicted", sleepTime))
time.Sleep(sleepTime)
pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(podOpts)
pods, err = c.CoreV1().Pods(v1.NamespaceAll).List(context.TODO(), podOpts)
framework.ExpectNoError(err)
seenRunning := []string{}
for _, pod := range pods.Items {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"time"
@@ -126,7 +127,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(newRC)
_, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), newRC)
framework.ExpectNoError(err)
// Check that pods for the new RC were created.
@@ -144,7 +145,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
}
err = f.WaitForPodRunning(pod.Name)
if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
@@ -183,11 +184,11 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
quota := newPodQuota(name, "2")
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota)
framework.ExpectNoError(err)
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -202,14 +203,14 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating rc %q that asks for more than the allowed pod quota", name))
rc := newRC(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(rc)
rc, err = c.CoreV1().ReplicationControllers(namespace).Create(context.TODO(), rc)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking rc %q has the desired failure condition set", name))
generation := rc.Generation
conditions := rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -238,7 +239,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
generation = rc.Generation
conditions = rc.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -281,12 +282,12 @@ func testRCAdoptMatchingOrphans(f *framework.Framework) {
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt)
framework.ExpectNoError(err)
ginkgo.By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
// The Pod p should either be adopted or deleted by the RC
if apierrors.IsNotFound(err) {
return true, nil
@@ -310,7 +311,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
replicas := int32(1)
rcSt := newRC(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rcSt.Spec.Selector = map[string]string{"name": name}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(rcSt)
rc, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), rcSt)
framework.ExpectNoError(err)
ginkgo.By("When the matched label of one of its pods change")
@@ -319,11 +320,11 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
p := pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod)
if err != nil && apierrors.IsConflict(err) {
return false, nil
}
@@ -336,7 +337,7 @@ func testRCReleaseControlledNotMatching(f *framework.Framework) {
ginkgo.By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rc.UID {
@@ -361,12 +362,12 @@ func updateReplicationControllerWithRetries(c clientset.Interface, namespace, na
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error
if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{}); err != nil {
if rc, err = c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(rc)
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(rc); err == nil {
if rc, err = c.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc); err == nil {
framework.Logf("Updating replication controller %q", name)
return true, nil
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package apps
import (
"context"
"fmt"
"time"
@@ -127,7 +128,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
framework.Logf("Creating ReplicaSet %s", name)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), newRS)
framework.ExpectNoError(err)
// Check that pods for the new RS were created.
@@ -145,7 +146,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
}
err = f.WaitForPodRunning(pod.Name)
if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
@@ -184,11 +185,11 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
quota := newPodQuota(name, "2")
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
_, err := c.CoreV1().ResourceQuotas(namespace).Create(context.TODO(), quota)
framework.ExpectNoError(err)
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
quota, err = c.CoreV1().ResourceQuotas(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -203,14 +204,14 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", name))
rs := newRS(name, 3, map[string]string{"name": name}, WebserverImageName, WebserverImage, nil)
rs, err = c.AppsV1().ReplicaSets(namespace).Create(rs)
rs, err = c.AppsV1().ReplicaSets(namespace).Create(context.TODO(), rs)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Checking replica set %q has the desired failure condition set", name))
generation := rs.Generation
conditions := rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -240,7 +241,7 @@ func testReplicaSetConditionCheck(f *framework.Framework) {
generation = rs.Generation
conditions = rs.Status.Conditions
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
rs, err = c.AppsV1().ReplicaSets(namespace).Get(name, metav1.GetOptions{})
rs, err = c.AppsV1().ReplicaSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -283,12 +284,12 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
replicas := int32(1)
rsSt := newRS(name, replicas, map[string]string{"name": name}, name, WebserverImage, nil)
rsSt.Spec.Selector = &metav1.LabelSelector{MatchLabels: map[string]string{"name": name}}
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(rsSt)
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(context.TODO(), rsSt)
framework.ExpectNoError(err)
ginkgo.By("Then the orphan pod is adopted")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
// The Pod p should either be adopted or deleted by the ReplicaSet
if apierrors.IsNotFound(err) {
return true, nil
@@ -311,11 +312,11 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
p = &pods.Items[0]
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
pod.Labels = map[string]string{"name": "not-matching-name"}
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Update(context.TODO(), pod)
if err != nil && apierrors.IsConflict(err) {
return false, nil
}
@@ -328,7 +329,7 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
ginkgo.By("Then the pod is released")
err = wait.PollImmediate(1*time.Second, 1*time.Minute, func() (bool, error) {
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(p.Name, metav1.GetOptions{})
p2, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
for _, owner := range p2.OwnerReferences {
if *owner.Controller && owner.UID == rs.UID {

View File

@@ -103,7 +103,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating service " + headlessSvcName + " in namespace " + ns)
headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels)
_, err := c.CoreV1().Services(ns).Create(headlessService)
_, err := c.CoreV1().Services(ns).Create(context.TODO(), headlessService)
framework.ExpectNoError(err)
})
@@ -123,7 +123,7 @@ var _ = SIGDescribe("StatefulSet", func() {
*(ss.Spec.Replicas) = 3
e2esset.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
ginkgo.By("Saturating stateful set " + ss.Name)
@@ -165,7 +165,7 @@ var _ = SIGDescribe("StatefulSet", func() {
// Replace ss with the one returned from Create() so it has the UID.
// Save Kind since it won't be populated in the returned ss.
kind := ss.Kind
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
ss.Kind = kind
@@ -247,7 +247,7 @@ var _ = SIGDescribe("StatefulSet", func() {
*(ss.Spec.Replicas) = 2
e2esset.PauseNewPods(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(ss)
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
e2esset.WaitForRunning(c, 1, 0, ss)
@@ -314,7 +314,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}()}
}(),
}
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
@@ -499,7 +499,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss.Spec.UpdateStrategy = appsv1.StatefulSetUpdateStrategy{
Type: appsv1.OnDeleteStatefulSetStrategyType,
}
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
@@ -573,7 +573,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ConformanceIt("Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow]", func() {
psLabels := klabels.Set(labels)
ginkgo.By("Initializing watcher for selector " + psLabels.String())
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
watcher, err := f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), metav1.ListOptions{
LabelSelector: psLabels.AsSelector().String(),
})
framework.ExpectNoError(err)
@@ -581,7 +581,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating stateful set " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
setHTTPProbe(ss)
ss, err = c.AppsV1().StatefulSets(ns).Create(ss)
ss, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
@@ -616,7 +616,7 @@ var _ = SIGDescribe("StatefulSet", func() {
framework.ExpectNoError(err)
ginkgo.By("Scale down will halt with unhealthy stateful pod")
watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(metav1.ListOptions{
watcher, err = f.ClientSet.CoreV1().Pods(ns).Watch(context.TODO(), metav1.ListOptions{
LabelSelector: psLabels.AsSelector().String(),
})
framework.ExpectNoError(err)
@@ -661,7 +661,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, psLabels)
ss.Spec.PodManagementPolicy = appsv1.ParallelPodManagement
setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
ginkgo.By("Waiting until all stateful set " + ssName + " replicas will be running in namespace " + ns)
@@ -720,7 +720,7 @@ var _ = SIGDescribe("StatefulSet", func() {
NodeName: node.Name,
},
}
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
ginkgo.By("Creating statefulset with conflicting port in namespace " + f.Namespace.Name)
@@ -728,7 +728,7 @@ var _ = SIGDescribe("StatefulSet", func() {
statefulPodContainer := &ss.Spec.Template.Spec.Containers[0]
statefulPodContainer.Ports = append(statefulPodContainer.Ports, conflictingPort)
ss.Spec.Template.Spec.NodeName = node.Name
_, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(ss)
_, err = f.ClientSet.AppsV1().StatefulSets(f.Namespace.Name).Create(context.TODO(), ss)
framework.ExpectNoError(err)
ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
@@ -738,7 +738,7 @@ var _ = SIGDescribe("StatefulSet", func() {
var initialStatefulPodUID types.UID
ginkgo.By("Waiting until stateful pod " + statefulPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
w, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Watch(context.TODO(), metav1.SingleObject(metav1.ObjectMeta{Name: statefulPodName}))
framework.ExpectNoError(err)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), statefulPodTimeout)
defer cancel()
@@ -763,13 +763,13 @@ var _ = SIGDescribe("StatefulSet", func() {
}
ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
// we may catch delete event, that's why we are waiting for running phase like this, and not with watchtools.UntilWithoutRetry
gomega.Eventually(func() error {
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(statefulPodName, metav1.GetOptions{})
statefulPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), statefulPodName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -793,13 +793,13 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
ss := e2esset.NewStatefulSet(ssName, ns, headlessSvcName, 1, nil, nil, labels)
setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{})
scale, err := c.AppsV1().StatefulSets(ns).GetScale(context.TODO(), ssName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get scale subresource: %v", err)
}
@@ -809,14 +809,14 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("updating a scale subresource")
scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = 2
scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale)
scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(context.TODO(), ssName, scale)
if err != nil {
framework.Failf("Failed to put scale subresource: %v", err)
}
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
ginkgo.By("verifying the statefulset Spec.Replicas was modified")
ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{})
ss, err = c.AppsV1().StatefulSets(ns).Get(context.TODO(), ssName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get statefulset resource: %v", err)
}
@@ -1086,7 +1086,7 @@ func pollReadWithTimeout(statefulPod statefulPodTester, statefulPodNumber int, k
// PVCs and one using no storage.
func rollbackTest(c clientset.Interface, ns string, ss *appsv1.StatefulSet) {
setHTTPProbe(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(ss)
ss, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss)
framework.ExpectNoError(err)
e2esset.WaitForRunningAndReady(c, *ss.Spec.Replicas, ss)
ss = waitForStatus(c, ss)
@@ -1268,7 +1268,7 @@ func restorePodHTTPProbe(ss *appsv1.StatefulSet, pod *v1.Pod) error {
func deleteStatefulPodAtIndex(c clientset.Interface, index int, ss *appsv1.StatefulSet) {
name := getStatefulSetPodNameAtIndex(index, ss)
noGrace := int64(0)
if err := c.CoreV1().Pods(ss.Namespace).Delete(name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
if err := c.CoreV1().Pods(ss.Namespace).Delete(context.TODO(), name, &metav1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
framework.Failf("Failed to delete stateful pod %v for StatefulSet %v/%v: %v", name, ss.Namespace, ss.Name, err)
}
}
@@ -1287,12 +1287,12 @@ func updateStatefulSetWithRetries(c clientset.Interface, namespace, name string,
statefulSets := c.AppsV1().StatefulSets(namespace)
var updateErr error
pollErr := wait.Poll(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
if statefulSet, err = statefulSets.Get(name, metav1.GetOptions{}); err != nil {
if statefulSet, err = statefulSets.Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
return false, err
}
// Apply the update, then attempt to push it to the apiserver.
applyUpdate(statefulSet)
if statefulSet, err = statefulSets.Update(statefulSet); err == nil {
if statefulSet, err = statefulSets.Update(context.TODO(), statefulSet); err == nil {
framework.Logf("Updating stateful set %s", name)
return true, nil
}
@@ -1307,7 +1307,7 @@ func updateStatefulSetWithRetries(c clientset.Interface, namespace, name string,
// getStatefulSet gets the StatefulSet named name in namespace.
func getStatefulSet(c clientset.Interface, namespace, name string) *appsv1.StatefulSet {
ss, err := c.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
ss, err := c.AppsV1().StatefulSets(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get StatefulSet %s/%s: %v", namespace, name, err)
}

View File

@@ -84,19 +84,19 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
f.PodClient().CreateSync(pod)
_, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
_, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-pod")
podChan, err := f.PodClient().Watch(watchOptions)
podChan, err := f.PodClient().Watch(context.TODO(), watchOptions)
framework.ExpectNoError(err, "failed to create watch for pods")
podChan.Stop()
f.PodClient().Update(pod.Name, updatePod)
_, err = f.PodClient().List(metav1.ListOptions{})
_, err = f.PodClient().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list pods")
_, err = f.PodClient().Patch(pod.Name, types.JSONPatchType, patch)
_, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch pod")
f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
@@ -206,26 +206,26 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
podLabels := map[string]string{"name": "audit-deployment-pod"}
d := e2edeploy.NewDeployment("audit-deployment", int32(1), podLabels, "agnhost", imageutils.GetE2EImage(imageutils.Agnhost), appsv1.RecreateDeploymentStrategyType)
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(d)
_, err := f.ClientSet.AppsV1().Deployments(namespace).Create(context.TODO(), d)
framework.ExpectNoError(err, "failed to create audit-deployment")
_, err = f.ClientSet.AppsV1().Deployments(namespace).Get(d.Name, metav1.GetOptions{})
_, err = f.ClientSet.AppsV1().Deployments(namespace).Get(context.TODO(), d.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-deployment")
deploymentChan, err := f.ClientSet.AppsV1().Deployments(namespace).Watch(watchOptions)
deploymentChan, err := f.ClientSet.AppsV1().Deployments(namespace).Watch(context.TODO(), watchOptions)
framework.ExpectNoError(err, "failed to create watch for deployments")
deploymentChan.Stop()
_, err = f.ClientSet.AppsV1().Deployments(namespace).Update(d)
_, err = f.ClientSet.AppsV1().Deployments(namespace).Update(context.TODO(), d)
framework.ExpectNoError(err, "failed to update audit-deployment")
_, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(d.Name, types.JSONPatchType, patch)
_, err = f.ClientSet.AppsV1().Deployments(namespace).Patch(context.TODO(), d.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch deployment")
_, err = f.ClientSet.AppsV1().Deployments(namespace).List(metav1.ListOptions{})
_, err = f.ClientSet.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to create list deployments")
err = f.ClientSet.AppsV1().Deployments(namespace).Delete("audit-deployment", &metav1.DeleteOptions{})
err = f.ClientSet.AppsV1().Deployments(namespace).Delete(context.TODO(), "audit-deployment", &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete deployments")
expectEvents(f, []utils.AuditEvent{
@@ -339,26 +339,26 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
},
}
_, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(configMap)
_, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap)
framework.ExpectNoError(err, "failed to create audit-configmap")
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(configMap.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Get(context.TODO(), configMap.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-configmap")
configMapChan, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Watch(watchOptions)
configMapChan, err := f.ClientSet.CoreV1().ConfigMaps(namespace).Watch(context.TODO(), watchOptions)
framework.ExpectNoError(err, "failed to create watch for config maps")
configMapChan.Stop()
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(configMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap)
framework.ExpectNoError(err, "failed to update audit-configmap")
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(configMap.Name, types.JSONPatchType, patch)
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).Patch(context.TODO(), configMap.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch configmap")
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{})
_, err = f.ClientSet.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list config maps")
err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(configMap.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), configMap.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete audit-configmap")
expectEvents(f, []utils.AuditEvent{
@@ -471,26 +471,26 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
"top-secret": []byte("foo-bar"),
},
}
_, err := f.ClientSet.CoreV1().Secrets(namespace).Create(secret)
_, err := f.ClientSet.CoreV1().Secrets(namespace).Create(context.TODO(), secret)
framework.ExpectNoError(err, "failed to create audit-secret")
_, err = f.ClientSet.CoreV1().Secrets(namespace).Get(secret.Name, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().Secrets(namespace).Get(context.TODO(), secret.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-secret")
secretChan, err := f.ClientSet.CoreV1().Secrets(namespace).Watch(watchOptions)
secretChan, err := f.ClientSet.CoreV1().Secrets(namespace).Watch(context.TODO(), watchOptions)
framework.ExpectNoError(err, "failed to create watch for secrets")
secretChan.Stop()
_, err = f.ClientSet.CoreV1().Secrets(namespace).Update(secret)
_, err = f.ClientSet.CoreV1().Secrets(namespace).Update(context.TODO(), secret)
framework.ExpectNoError(err, "failed to update audit-secret")
_, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(secret.Name, types.JSONPatchType, patch)
_, err = f.ClientSet.CoreV1().Secrets(namespace).Patch(context.TODO(), secret.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch secret")
_, err = f.ClientSet.CoreV1().Secrets(namespace).List(metav1.ListOptions{})
_, err = f.ClientSet.CoreV1().Secrets(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list secrets")
err = f.ClientSet.CoreV1().Secrets(namespace).Delete(secret.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(namespace).Delete(context.TODO(), secret.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete audit-secret")
expectEvents(f, []utils.AuditEvent{
@@ -670,7 +670,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
anonymousClient, err := clientset.NewForConfig(config)
framework.ExpectNoError(err)
_, err = anonymousClient.CoreV1().Pods(namespace).Get("another-audit-pod", metav1.GetOptions{})
_, err = anonymousClient.CoreV1().Pods(namespace).Get(context.TODO(), "another-audit-pod", metav1.GetOptions{})
expectForbidden(err)
expectEvents(f, []utils.AuditEvent{
@@ -703,7 +703,7 @@ var _ = SIGDescribe("Advanced Audit [DisabledForLargeClusters][Flaky]", func() {
impersonatedClient, err := clientset.NewForConfig(config)
framework.ExpectNoError(err)
_, err = impersonatedClient.CoreV1().Pods(namespace).List(metav1.ListOptions{})
_, err = impersonatedClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list pods")
expectEvents(f, []utils.AuditEvent{

View File

@@ -17,6 +17,7 @@ limitations under the License.
package auth
import (
"context"
"fmt"
"strings"
"time"
@@ -58,14 +59,14 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
anonymousClient, err := clientset.NewForConfig(config)
framework.ExpectNoError(err, "failed to create the anonymous client")
_, err = f.ClientSet.CoreV1().Namespaces().Create(&v1.Namespace{
_, err = f.ClientSet.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "audit",
},
})
framework.ExpectNoError(err, "failed to create namespace")
_, err = f.ClientSet.CoreV1().Pods(namespace).Create(&v1.Pod{
_, err = f.ClientSet.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "audit-proxy",
Labels: map[string]string{
@@ -89,7 +90,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
})
framework.ExpectNoError(err, "failed to create proxy pod")
_, err = f.ClientSet.CoreV1().Services(namespace).Create(&v1.Service{
_, err = f.ClientSet.CoreV1().Services(namespace).Create(context.TODO(), &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "audit",
},
@@ -110,7 +111,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
var podIP string
// get pod ip
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{})
p, err := f.ClientSet.CoreV1().Pods(namespace).Get(context.TODO(), "audit-proxy", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
framework.Logf("waiting for audit-proxy pod to be present")
return false, nil
@@ -150,7 +151,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
},
}
_, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(&sink)
_, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(context.TODO(), &sink)
framework.ExpectNoError(err, "failed to create audit sink")
framework.Logf("created audit sink")
@@ -194,20 +195,20 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
f.PodClient().CreateSync(pod)
_, err := f.PodClient().Get(pod.Name, metav1.GetOptions{})
_, err := f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get audit-pod")
podChan, err := f.PodClient().Watch(watchOptions)
podChan, err := f.PodClient().Watch(context.TODO(), watchOptions)
framework.ExpectNoError(err, "failed to create watch for pods")
for range podChan.ResultChan() {
}
f.PodClient().Update(pod.Name, updatePod)
_, err = f.PodClient().List(metav1.ListOptions{})
_, err = f.PodClient().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list pods")
_, err = f.PodClient().Patch(pod.Name, types.JSONPatchType, patch)
_, err = f.PodClient().Patch(context.TODO(), pod.Name, types.JSONPatchType, patch)
framework.ExpectNoError(err, "failed to patch pod")
f.PodClient().DeleteSync(pod.Name, &metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
@@ -323,7 +324,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
// get a pod with unauthorized user
{
func() {
_, err := anonymousClient.CoreV1().Pods(namespace).Get("another-audit-pod", metav1.GetOptions{})
_, err := anonymousClient.CoreV1().Pods(namespace).Get(context.TODO(), "another-audit-pod", metav1.GetOptions{})
expectForbidden(err)
},
[]utils.AuditEvent{
@@ -375,7 +376,7 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
return len(missingReport.MissingEvents) == 0, nil
})
framework.ExpectNoError(err, "after %v failed to observe audit events", pollingTimeout)
err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Delete("test", &metav1.DeleteOptions{})
err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Delete(context.TODO(), "test", &metav1.DeleteOptions{})
framework.ExpectNoError(err, "could not delete audit configuration")
})
})

View File

@@ -17,6 +17,7 @@ limitations under the License.
package auth
import (
"context"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
@@ -67,7 +68,7 @@ var _ = SIGDescribe("Certificates API", func() {
csrs := f.ClientSet.CertificatesV1beta1().CertificateSigningRequests()
framework.Logf("creating CSR")
csr, err = csrs.Create(csr)
csr, err = csrs.Create(context.TODO(), csr)
framework.ExpectNoError(err)
csrName := csr.Name
@@ -83,7 +84,7 @@ var _ = SIGDescribe("Certificates API", func() {
}
csr, err = csrs.UpdateApproval(csr)
if err != nil {
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
csr, _ = csrs.Get(context.TODO(), csrName, metav1.GetOptions{})
framework.Logf("err updating approval: %v", err)
return false, nil
}
@@ -92,7 +93,7 @@ var _ = SIGDescribe("Certificates API", func() {
framework.Logf("waiting for CSR to be signed")
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
csr, err = csrs.Get(csrName, metav1.GetOptions{})
csr, err = csrs.Get(context.TODO(), csrName, metav1.GetOptions{})
if err != nil {
framework.Logf("error getting csr: %v", err)
return false, nil
@@ -118,6 +119,6 @@ var _ = SIGDescribe("Certificates API", func() {
newClient, err := v1beta1client.NewForConfig(rcfg)
framework.ExpectNoError(err)
framework.ExpectNoError(newClient.CertificateSigningRequests().Delete(csrName, nil))
framework.ExpectNoError(newClient.CertificateSigningRequests().Delete(context.TODO(), csrName, nil))
})
})

View File

@@ -17,6 +17,7 @@ limitations under the License.
package auth
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
@@ -38,7 +39,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
ginkgo.BeforeEach(func() {
ns = f.Namespace.Name
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns)
framework.ExpectNotEqual(len(nodeList.Items), 0)
@@ -49,7 +50,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
// make sure ServiceAccount admission controller is enabled, so secret generation on SA creation works
saName := "default"
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(context.TODO(), saName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName)
framework.ExpectNotEqual(len(sa.Secrets), 0)
@@ -74,7 +75,7 @@ var _ = SIGDescribe("[Feature:NodeAuthenticator]", func() {
},
AutomountServiceAccountToken: &trueValue,
}
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(newSA)
_, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Create(context.TODO(), newSA)
framework.ExpectNoError(err, "failed to create service account (%s:%s)", ns, newSA.Name)
pod := createNodeAuthTestPod(f)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package auth
import (
"context"
"fmt"
"time"
@@ -49,13 +50,13 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
ginkgo.BeforeEach(func() {
ns = f.Namespace.Name
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
nodeList, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "failed to list nodes in namespace: %s", ns)
framework.ExpectNotEqual(len(nodeList.Items), 0)
nodeName = nodeList.Items[0].Name
asUser = nodeNamePrefix + nodeName
saName := "default"
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(saName, metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(ns).Get(context.TODO(), saName, metav1.GetOptions{})
framework.ExpectNotEqual(len(sa.Secrets), 0)
framework.ExpectNoError(err, "failed to retrieve service account (%s:%s)", ns, saName)
defaultSaSecret = sa.Secrets[0].Name
@@ -71,17 +72,17 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
})
ginkgo.It("Getting a non-existent secret should exit with the Forbidden error, not a NotFound error", func() {
_, err := c.CoreV1().Secrets(ns).Get("foo", metav1.GetOptions{})
_, err := c.CoreV1().Secrets(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsForbidden(err), true)
})
ginkgo.It("Getting an existing secret should exit with the Forbidden error", func() {
_, err := c.CoreV1().Secrets(ns).Get(defaultSaSecret, metav1.GetOptions{})
_, err := c.CoreV1().Secrets(ns).Get(context.TODO(), defaultSaSecret, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsForbidden(err), true)
})
ginkgo.It("Getting a non-existent configmap should exit with the Forbidden error, not a NotFound error", func() {
_, err := c.CoreV1().ConfigMaps(ns).Get("foo", metav1.GetOptions{})
_, err := c.CoreV1().ConfigMaps(ns).Get(context.TODO(), "foo", metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsForbidden(err), true)
})
@@ -96,9 +97,9 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
"data": "content",
},
}
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(configmap)
_, err := f.ClientSet.CoreV1().ConfigMaps(ns).Create(context.TODO(), configmap)
framework.ExpectNoError(err, "failed to create configmap (%s:%s) %+v", ns, configmap.Name, *configmap)
_, err = c.CoreV1().ConfigMaps(ns).Get(configmap.Name, metav1.GetOptions{})
_, err = c.CoreV1().ConfigMaps(ns).Get(context.TODO(), configmap.Name, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsForbidden(err), true)
})
@@ -113,11 +114,11 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
"data": []byte("keep it secret"),
},
}
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(secret)
_, err := f.ClientSet.CoreV1().Secrets(ns).Create(context.TODO(), secret)
framework.ExpectNoError(err, "failed to create secret (%s:%s)", ns, secret.Name)
ginkgo.By("Node should not get the secret")
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
_, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsForbidden(err), true)
ginkgo.By("Create a pod that use the secret")
@@ -146,14 +147,14 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
},
}
_, err = f.ClientSet.CoreV1().Pods(ns).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(ns).Create(context.TODO(), pod)
framework.ExpectNoError(err, "failed to create pod (%s:%s)", ns, pod.Name)
ginkgo.By("The node should able to access the secret")
itv := framework.Poll
dur := 1 * time.Minute
err = wait.Poll(itv, dur, func() (bool, error) {
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
_, err = c.CoreV1().Secrets(ns).Get(context.TODO(), secret.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get secret %v, err: %v", secret.Name, err)
return false, nil
@@ -172,13 +173,13 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
},
}
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
_, err := c.CoreV1().Nodes().Create(node)
_, err := c.CoreV1().Nodes().Create(context.TODO(), node)
framework.ExpectEqual(apierrors.IsForbidden(err), true)
})
ginkgo.It("A node shouldn't be able to delete another node", func() {
ginkgo.By(fmt.Sprintf("Create node foo by user: %v", asUser))
err := c.CoreV1().Nodes().Delete("foo", &metav1.DeleteOptions{})
err := c.CoreV1().Nodes().Delete(context.TODO(), "foo", &metav1.DeleteOptions{})
framework.ExpectEqual(apierrors.IsForbidden(err), true)
})
})

View File

@@ -17,6 +17,7 @@ limitations under the License.
package auth
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
@@ -78,7 +79,7 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
ginkgo.It("should forbid pod creation when no PSP is available", func() {
ginkgo.By("Running a restricted pod")
_, err := c.CoreV1().Pods(ns).Create(restrictedPod("restricted"))
_, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("restricted"))
expectForbidden(err)
})
@@ -88,12 +89,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
defer cleanup()
ginkgo.By("Running a restricted pod")
pod, err := c.CoreV1().Pods(ns).Create(restrictedPod("allowed"))
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), restrictedPod("allowed"))
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
testPrivilegedPods(func(pod *v1.Pod) {
_, err := c.CoreV1().Pods(ns).Create(pod)
_, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod)
expectForbidden(err)
})
})
@@ -107,12 +108,12 @@ var _ = SIGDescribe("PodSecurityPolicy", func() {
defer cleanup()
testPrivilegedPods(func(pod *v1.Pod) {
p, err := c.CoreV1().Pods(ns).Create(pod)
p, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
// Verify expected PSP was used.
p, err = c.CoreV1().Pods(ns).Get(p.Name, metav1.GetOptions{})
p, err = c.CoreV1().Pods(ns).Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
framework.ExpectEqual(found, true, "PSP annotation not found")
@@ -214,11 +215,11 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu
ns := f.Namespace.Name
name := fmt.Sprintf("%s-%s", ns, psp.Name)
psp.Name = name
psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(psp)
psp, err := f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp)
framework.ExpectNoError(err, "Failed to create PSP")
// Create the Role to bind it to the namespace.
_, err = f.ClientSet.RbacV1().Roles(ns).Create(&rbacv1.Role{
_, err = f.ClientSet.RbacV1().Roles(ns).Create(context.TODO(), &rbacv1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
@@ -245,7 +246,7 @@ func createAndBindPSP(f *framework.Framework, pspTemplate *policyv1beta1.PodSecu
return psp, func() {
// Cleanup non-namespaced PSP object.
f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{})
f.ClientSet.PolicyV1beta1().PodSecurityPolicies().Delete(context.TODO(), name, &metav1.DeleteOptions{})
}
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package auth
import (
"context"
"fmt"
"path"
"regexp"
@@ -49,7 +50,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
var secrets []v1.ObjectReference
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
ginkgo.By("waiting for a single token reference")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
framework.Logf("default service account was not found")
return false, nil
@@ -75,19 +76,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
{
ginkgo.By("ensuring the single token reference persists")
time.Sleep(2 * time.Second)
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(sa.Secrets, secrets)
}
// delete the referenced secret
ginkgo.By("deleting the service account token")
framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secrets[0].Name, nil))
framework.ExpectNoError(f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secrets[0].Name, nil))
// wait for the referenced secret to be removed, and another one autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
ginkgo.By("waiting for a new token reference")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
if err != nil {
framework.Logf("error getting default service account: %v", err)
return false, err
@@ -113,7 +114,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
{
ginkgo.By("ensuring the single token reference persists")
time.Sleep(2 * time.Second)
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(sa.Secrets, secrets)
}
@@ -121,17 +122,17 @@ var _ = SIGDescribe("ServiceAccounts", func() {
// delete the reference from the service account
ginkgo.By("deleting the reference to the service account token")
{
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
framework.ExpectNoError(err)
sa.Secrets = nil
_, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(sa)
_, updateErr := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Update(context.TODO(), sa)
framework.ExpectNoError(updateErr)
}
// wait for another one to be autocreated
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
ginkgo.By("waiting for a new token to be created and added")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
if err != nil {
framework.Logf("error getting default service account: %v", err)
return false, err
@@ -153,7 +154,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
{
ginkgo.By("ensuring the single token reference persists")
time.Sleep(2 * time.Second)
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "default", metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(sa.Secrets, secrets)
}
@@ -171,13 +172,13 @@ var _ = SIGDescribe("ServiceAccounts", func() {
framework.ConformanceIt("should mount an API token into pods ", func() {
var rootCAContent string
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(&v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount-test"}})
framework.ExpectNoError(err)
// Standard get, update retry loop
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
ginkgo.By("getting the auto-created API token")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("mount-test", metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), "mount-test", metav1.GetOptions{})
if apierrors.IsNotFound(err) {
framework.Logf("mount-test service account was not found")
return false, nil
@@ -191,7 +192,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
return false, nil
}
for _, secretRef := range sa.Secrets {
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue
@@ -207,7 +208,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}))
zero := int64(0)
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod-service-account-" + string(uuid.NewUUID()),
},
@@ -216,10 +217,10 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Containers: []v1.Container{{
Name: "test",
Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"sleep", "100000"}, // run and pause
Command: []string{"sleep", "100000"},
}},
TerminationGracePeriodSeconds: &zero, // terminate quickly when deleted
RestartPolicy: v1.RestartPolicyNever, // never restart
TerminationGracePeriodSeconds: &zero,
RestartPolicy: v1.RestartPolicyNever,
},
})
framework.ExpectNoError(err)
@@ -238,7 +239,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
framework.ExpectEqual(mountedNamespace, f.Namespace.Name)
// Token should be a valid credential that identifies the pod's service account
tokenReview := &authenticationv1.TokenReview{Spec: authenticationv1.TokenReviewSpec{Token: mountedToken}}
tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(tokenReview)
tokenReview, err = f.ClientSet.AuthenticationV1().TokenReviews().Create(context.TODO(), tokenReview)
framework.ExpectNoError(err)
framework.ExpectEqual(tokenReview.Status.Authenticated, true)
framework.ExpectEqual(tokenReview.Status.Error, "")
@@ -281,15 +282,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
falseValue := false
mountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "mount"}, AutomountServiceAccountToken: &trueValue}
nomountSA := &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: "nomount"}, AutomountServiceAccountToken: &falseValue}
mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(mountSA)
mountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), mountSA)
framework.ExpectNoError(err)
nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(nomountSA)
nomountSA, err = f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(context.TODO(), nomountSA)
framework.ExpectNoError(err)
// Standard get, update retry loop
framework.ExpectNoError(wait.Poll(time.Millisecond*500, framework.ServiceAccountProvisionTimeout, func() (bool, error) {
ginkgo.By("getting the auto-created API token")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{})
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(context.TODO(), mountSA.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
framework.Logf("mount service account was not found")
return false, nil
@@ -303,7 +304,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
return false, nil
}
for _, secretRef := range sa.Secrets {
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretRef.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue
@@ -393,7 +394,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
AutomountServiceAccountToken: tc.AutomountPodSpec,
},
}
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
framework.Logf("created pod %s", tc.PodName)
@@ -418,7 +419,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
cfg, err := framework.LoadConfig()
framework.ExpectNoError(err)
if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(&v1.ConfigMap{
if _, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: "kube-root-ca.crt",
},
@@ -488,7 +489,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}},
},
}
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectNoError(err)
framework.Logf("created pod")

View File

@@ -17,6 +17,7 @@ limitations under the License.
package autoscaling
import (
"context"
"strings"
"time"
@@ -36,7 +37,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
SIGDescribe("Autoscaling a service", func() {
ginkgo.BeforeEach(func() {
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled")
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package autoscaling
import (
"context"
"encoding/json"
"fmt"
"math"
@@ -71,7 +72,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
e2eskipper.SkipUnlessProviderIs("gce", "gke", "kubemark")
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled")
}
@@ -115,7 +116,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
makeSchedulableLoop:
@@ -255,7 +256,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
// annotate all nodes with no-scale-down
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{
FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String(),
@@ -456,7 +457,7 @@ func addAnnotation(f *framework.Framework, nodes []v1.Node, key, value string) e
return err
}
_, err = f.ClientSet.CoreV1().Nodes().Patch(string(node.Name), types.StrategicMergePatchType, patchBytes)
_, err = f.ClientSet.CoreV1().Nodes().Patch(context.TODO(), string(node.Name), types.StrategicMergePatchType, patchBytes)
if err != nil {
return err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package autoscaling
import (
"context"
"fmt"
"io/ioutil"
"math"
@@ -147,7 +148,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
expectedNodes += size
}
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
@@ -178,7 +179,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
ginkgo.By("Waiting for NotTriggerScaleUp event")
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{})
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
@@ -621,7 +622,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
registeredNodes := sets.NewString()
for nodeName := range newNodesSet {
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err == nil && node != nil {
registeredNodes.Insert(nodeName)
} else {
@@ -778,7 +779,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
ginkgo.By("Make remaining nodes unschedulable")
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
@@ -857,7 +858,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ngNodes, err := framework.GetGroupNodes(minMig)
framework.ExpectNoError(err)
framework.ExpectEqual(len(ngNodes) == 1, true)
node, err := f.ClientSet.CoreV1().Nodes().Get(ngNodes[0], metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), ngNodes[0], metav1.GetOptions{})
ginkgo.By(fmt.Sprintf("Target node for scale-down: %s", node.Name))
framework.ExpectNoError(err)
@@ -905,7 +906,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Block network connectivity to some nodes to simulate unhealthy cluster")
nodesToBreakCount := int(math.Ceil(math.Max(float64(unhealthyClusterThreshold), 0.5*float64(clusterSize))))
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
@@ -1012,7 +1013,7 @@ func execCmd(args ...string) *exec.Cmd {
func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace string, podsPerNode, pdbSize int, verifyFunction func(int)) {
increasedSize := manuallyIncreaseClusterSize(f, migSizes)
nodes, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
framework.ExpectNoError(err)
@@ -1035,10 +1036,10 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
MinAvailable: &minAvailable,
},
}
_, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(pdb)
_, err = f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Create(context.TODO(), pdb)
defer func() {
f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(pdb.Name, &metav1.DeleteOptions{})
f.ClientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Delete(context.TODO(), pdb.Name, &metav1.DeleteOptions{})
}()
framework.ExpectNoError(err)
@@ -1346,7 +1347,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time
// WaitForClusterSizeFuncWithUnready waits until the cluster size matches the given function and assumes some unready nodes.
func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration, expectedUnready int) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@@ -1373,7 +1374,7 @@ func WaitForClusterSizeFuncWithUnready(c clientset.Interface, sizeFunc func(int)
func waitForCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface, tolerateUnreadyCount int) error {
var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
}
@@ -1413,7 +1414,7 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf
}
func getAnyNode(c clientset.Interface) *v1.Node {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@@ -1448,10 +1449,10 @@ func drainNode(f *framework.Framework, node *v1.Node) {
ginkgo.By("Manually drain the single node")
podOpts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts)
framework.ExpectNoError(err)
for _, pod := range pods.Items {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
framework.ExpectNoError(err)
}
}
@@ -1459,7 +1460,7 @@ func drainNode(f *framework.Framework, node *v1.Node) {
func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
ginkgo.By(fmt.Sprintf("Taint node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
freshNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -1473,7 +1474,7 @@ func makeNodeUnschedulable(c clientset.Interface, node *v1.Node) error {
Value: "DisabledForTest",
Effect: v1.TaintEffectNoSchedule,
})
_, err = c.CoreV1().Nodes().Update(freshNode)
_, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode)
if err == nil {
return nil
}
@@ -1496,7 +1497,7 @@ func (CriticalAddonsOnlyError) Error() string {
func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAddonsOnly bool) error {
ginkgo.By(fmt.Sprintf("Remove taint from node %s", node.Name))
for j := 0; j < 3; j++ {
freshNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
freshNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -1514,7 +1515,7 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node, failOnCriticalAdd
return nil
}
freshNode.Spec.Taints = newTaints
_, err = c.CoreV1().Nodes().Update(freshNode)
_, err = c.CoreV1().Nodes().Update(context.TODO(), freshNode)
if err == nil {
return nil
}
@@ -1571,7 +1572,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{})
if err != nil {
return err
}
@@ -1594,7 +1595,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in
if err != nil {
return err
}
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{})
if err != nil {
return err
}
@@ -1675,7 +1676,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
if err != nil {
return err
}
rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
rc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{})
if err != nil {
return err
}
@@ -1689,7 +1690,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
// (we retry 409 errors in case rc reference got out of sync)
for j := 0; j < 3; j++ {
*rc.Spec.Replicas = int32((i + 1) * podsPerNode)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(rc)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(context.TODO(), rc)
if err == nil {
break
}
@@ -1697,14 +1698,14 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
return err
}
klog.Warningf("Got 409 conflict when trying to scale RC, retries left: %v", 3-j)
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{})
if err != nil {
return err
}
}
err = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) {
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(id, metav1.GetOptions{})
rc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), id, metav1.GetOptions{})
if err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) {
return false, nil
}
@@ -1751,7 +1752,7 @@ func manuallyIncreaseClusterSize(f *framework.Framework, originalSizes map[strin
// Try to get clusterwide health from CA status configmap.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getClusterwideStatus(c clientset.Interface) (string, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return "", err
}
@@ -1800,7 +1801,7 @@ func getStatusTimestamp(status string) (time.Time, error) {
// Try to get scaleup statuses of all node groups.
// Status configmap is not parsing-friendly, so evil regexpery follows.
func getScaleUpStatus(c clientset.Interface) (*scaleUpStatus, error) {
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
configMap, err := c.CoreV1().ConfigMaps("kube-system").Get(context.TODO(), "cluster-autoscaler-status", metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -1879,7 +1880,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
var finalErr error
for _, newPdbName := range newPdbs {
ginkgo.By(fmt.Sprintf("Delete PodDisruptionBudget %v", newPdbName))
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(newPdbName, &metav1.DeleteOptions{})
err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Delete(context.TODO(), newPdbName, &metav1.DeleteOptions{})
if err != nil {
// log error, but attempt to remove other pdbs
klog.Errorf("Failed to delete PodDisruptionBudget %v, err: %v", newPdbName, err)
@@ -1917,7 +1918,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) {
MinAvailable: &minAvailable,
},
}
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(pdb)
_, err := f.ClientSet.PolicyV1beta1().PodDisruptionBudgets("kube-system").Create(context.TODO(), pdb)
newPdbs = append(newPdbs, pdbName)
if err != nil {
@@ -1933,7 +1934,7 @@ func createPriorityClasses(f *framework.Framework) func() {
highPriorityClassName: 1000,
}
for className, priority := range priorityClasses {
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(&schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
_, err := f.ClientSet.SchedulingV1().PriorityClasses().Create(context.TODO(), &schedulingv1.PriorityClass{ObjectMeta: metav1.ObjectMeta{Name: className}, Value: priority})
if err != nil {
klog.Errorf("Error creating priority class: %v", err)
}
@@ -1942,7 +1943,7 @@ func createPriorityClasses(f *framework.Framework) func() {
return func() {
for className := range priorityClasses {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(className, nil)
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(context.TODO(), className, nil)
if err != nil {
klog.Errorf("Error deleting priority class: %v", err)
}

View File

@@ -276,24 +276,24 @@ func (tc *CustomMetricTestCase) Run() {
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.initialReplicas)
// Autoscale the deployment
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa)
_, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(context.TODO(), tc.hpa)
if err != nil {
framework.Failf("Failed to create HPA: %v", err)
}
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(context.TODO(), tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{})
waitForReplicas(tc.deployment.ObjectMeta.Name, tc.framework.Namespace.ObjectMeta.Name, tc.kubeClient, 15*time.Minute, tc.scaledReplicas)
}
func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) error {
if deployment != nil {
_, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(deployment)
_, err := cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Create(context.TODO(), deployment)
if err != nil {
return err
}
}
if pod != nil {
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(pod)
_, err := cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Create(context.TODO(), pod)
if err != nil {
return err
}
@@ -303,10 +303,10 @@ func createDeploymentToScale(f *framework.Framework, cs clientset.Interface, dep
func cleanupDeploymentsToScale(f *framework.Framework, cs clientset.Interface, deployment *appsv1.Deployment, pod *v1.Pod) {
if deployment != nil {
_ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
_ = cs.AppsV1().Deployments(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), deployment.ObjectMeta.Name, &metav1.DeleteOptions{})
}
if pod != nil {
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(pod.ObjectMeta.Name, &metav1.DeleteOptions{})
_ = cs.CoreV1().Pods(f.Namespace.ObjectMeta.Name).Delete(context.TODO(), pod.ObjectMeta.Name, &metav1.DeleteOptions{})
}
}
@@ -440,7 +440,7 @@ func externalHPA(namespace string, metricTargets map[string]externalMetricTarget
func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, timeout time.Duration, desiredReplicas int) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
deployment, err := cs.AppsV1().Deployments(namespace).Get(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package autoscaling
import (
"context"
"fmt"
"math"
"strings"
@@ -265,7 +266,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
}
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), DNSAutoscalerLabelName, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -273,7 +274,7 @@ func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
}
func deleteDNSScalingConfigMap(c clientset.Interface) error {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(context.TODO(), DNSAutoscalerLabelName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap deleted.")
@@ -299,7 +300,7 @@ func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
}
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
_, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(configMap)
_, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Update(context.TODO(), configMap)
if err != nil {
return err
}
@@ -310,7 +311,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
func getDNSReplicas(c clientset.Interface) (int, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(listOpts)
deployments, err := c.AppsV1().Deployments(metav1.NamespaceSystem).List(context.TODO(), listOpts)
if err != nil {
return 0, err
}
@@ -325,7 +326,7 @@ func getDNSReplicas(c clientset.Interface) (int, error) {
func deleteDNSAutoscalerPod(c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
listOpts := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
pods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(context.TODO(), listOpts)
if err != nil {
return err
}
@@ -334,7 +335,7 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
}
podName := pods.Items[0].Name
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(context.TODO(), podName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling pod %v deleted.", podName)

View File

@@ -18,6 +18,7 @@ package gcp
import (
"bytes"
"context"
"fmt"
"io"
"os"
@@ -301,7 +302,7 @@ var _ = SIGDescribe("Addon update", func() {
// Delete the "ensure exist class" addon at the end.
defer func() {
framework.Logf("Cleaning up ensure exist class addon.")
err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)
err := f.ClientSet.CoreV1().Services(addonNsName).Delete(context.TODO(), "addon-ensure-exists-test", nil)
framework.ExpectNoError(err)
}()
@@ -335,7 +336,7 @@ var _ = SIGDescribe("Addon update", func() {
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
ginkgo.By("verify invalid addons weren't created")
_, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{})
_, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get(context.TODO(), "invalid-addon-test", metav1.GetOptions{})
framework.ExpectError(err)
// Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function.
@@ -362,7 +363,7 @@ func waitForReplicationControllerwithSelectorInAddonTest(c clientset.Interface,
// waitForReplicationController waits until the RC appears (exist == true), or disappears (exist == false)
func waitForReplicationController(c clientset.Interface, namespace, name string, exist bool, interval, timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := c.CoreV1().ReplicationControllers(namespace).Get(name, metav1.GetOptions{})
_, err := c.CoreV1().ReplicationControllers(namespace).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Get ReplicationController %s in namespace %s failed (%v).", name, namespace, err)
return !exist, nil
@@ -381,7 +382,7 @@ func waitForReplicationController(c clientset.Interface, namespace, name string,
func waitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
services, err := c.CoreV1().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
services, err := c.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(services.Items) != 0:
framework.Logf("Service with %s in namespace %s found.", selector.String(), namespace)
@@ -408,7 +409,7 @@ func waitForServiceWithSelector(c clientset.Interface, namespace string, selecto
func waitForReplicationControllerWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval,
timeout time.Duration) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
rcs, err := c.CoreV1().ReplicationControllers(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
switch {
case len(rcs.Items) != 0:
framework.Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gcp
import (
"context"
"fmt"
"os/exec"
"path"
@@ -124,7 +125,7 @@ func generateMasterRegexp(prefix string) string {
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func waitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Logf("Failed to list nodes: %v", err)
continue

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gcp
import (
"context"
"fmt"
"strings"
"time"
@@ -111,7 +112,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Eventually(func() error {
pass := true
for _, node := range originalNodes.Items {
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
pass = false
}
@@ -148,7 +149,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
}
framework.ExpectNotEqual(deletedNodeName, "")
gomega.Eventually(func() error {
if _, err := leaseClient.Get(deletedNodeName, metav1.GetOptions{}); err == nil {
if _, err := leaseClient.Get(context.TODO(), deletedNodeName, metav1.GetOptions{}); err == nil {
return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName)
}
return nil
@@ -157,7 +158,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
ginkgo.By("verify node leases still exist for remaining nodes")
gomega.Eventually(func() error {
for _, node := range targetNodes.Items {
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
if _, err := leaseClient.Get(context.TODO(), node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
return err
}
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gcp
import (
"context"
"fmt"
"strings"
"sync"
@@ -70,7 +71,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// events for the kube-system namespace on failures
namespaceName := metav1.NamespaceSystem
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{})
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
@@ -232,7 +233,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// Get the node initially.
framework.Logf("Getting %s", name)
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Couldn't get node %s", name)
return false

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gcp
import (
"context"
"fmt"
"strings"
"time"
@@ -33,12 +34,12 @@ import (
)
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
rc, err := c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
rc, err := c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return err
}
*(rc.Spec.Replicas) = replicas
_, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(rc)
_, err = c.CoreV1().ReplicationControllers(rc.Namespace).Update(context.TODO(), rc)
return err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package cloud
import (
"context"
"time"
v1 "k8s.io/api/core/v1"
@@ -65,7 +66,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
framework.ExpectNoError(err)
framework.ExpectEqual(len(newNodes), len(origNodes.Items)-1)
_, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
_, err = c.CoreV1().Nodes().Get(context.TODO(), nodeToDelete.Name, metav1.GetOptions{})
if err == nil {
framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name)
} else if !apierrors.IsNotFound(err) {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"encoding/json"
"fmt"
@@ -43,7 +44,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -91,7 +92,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
configMap := newEnvFromConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -140,17 +141,17 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
ginkgo.By(fmt.Sprintf("Creating ConfigMap %v/%v", f.Namespace.Name, configMap.Name))
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap)
framework.ExpectNoError(err, "failed to create ConfigMap")
configMap.Data = map[string]string{
"data": "value",
}
ginkgo.By(fmt.Sprintf("Updating configMap %v/%v", f.Namespace.Name, configMap.Name))
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap)
framework.ExpectNoError(err, "failed to update ConfigMap")
configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(name, metav1.GetOptions{})
configMapFromUpdate, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get ConfigMap")
ginkgo.By(fmt.Sprintf("Verifying update of ConfigMap %v/%v", f.Namespace.Name, configMap.Name))
framework.ExpectEqual(configMapFromUpdate.Data, configMap.Data)
@@ -160,7 +161,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
testNamespaceName := f.Namespace.Name
testConfigMapName := "test-configmap" + string(uuid.NewUUID())
_, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(&v1.ConfigMap{
_, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Create(context.TODO(), &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: testConfigMapName,
Labels: map[string]string{
@@ -185,16 +186,16 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
})
framework.ExpectNoError(err, "failed to marshal patch data")
_, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload))
_, err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Patch(context.TODO(), testConfigMapName, types.StrategicMergePatchType, []byte(configMapPatchPayload))
framework.ExpectNoError(err, "failed to patch ConfigMap")
configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(testConfigMapName, metav1.GetOptions{})
configMap, err := f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).Get(context.TODO(), testConfigMapName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get ConfigMap")
framework.ExpectEqual(configMap.Data["valueName"], "value1", "failed to patch ConfigMap")
framework.ExpectEqual(configMap.Labels["test-configmap"], "patched", "failed to patch ConfigMap")
// listing in all namespaces to hit the endpoint
configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(metav1.ListOptions{
configMapList, err := f.ClientSet.CoreV1().ConfigMaps("").List(context.TODO(), metav1.ListOptions{
LabelSelector: "test-configmap-static=true",
})
framework.ExpectNoError(err, "failed to list ConfigMaps with LabelSelector")
@@ -211,7 +212,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
}
framework.ExpectEqual(testConfigMapFound, true, "failed to find ConfigMap in list")
err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{
err = f.ClientSet.CoreV1().ConfigMaps(testNamespaceName).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "test-configmap-static=true",
})
framework.ExpectNoError(err, "failed to delete ConfigMap collection with LabelSelector")
@@ -245,5 +246,5 @@ func newConfigMapWithEmptyKey(f *framework.Framework) (*v1.ConfigMap, error) {
}
ginkgo.By(fmt.Sprintf("Creating configMap that has name %s", configMap.Name))
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
return f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"path"
@@ -138,7 +139,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -188,7 +189,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
ginkgo.By("waiting to observe update in volume")
@@ -225,7 +226,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -343,12 +344,12 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@@ -452,18 +453,18 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
@@ -491,7 +492,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -557,43 +558,43 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
name := "immutable"
configMap := newConfigMap(f, name)
currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap)
currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap)
framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace)
currentConfigMap.Data["data-4"] = "value-4"
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap)
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap)
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
// Mark config map as immutable.
trueVal := true
currentConfigMap.Immutable = &trueVal
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap)
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap)
framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace)
// Ensure data can't be changed now.
currentConfigMap.Data["data-5"] = "value-5"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap)
framework.ExpectEqual(apierrors.IsInvalid(err), true)
// Ensure config map can't be switched from immutable to mutable.
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(name, metav1.GetOptions{})
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
framework.ExpectEqual(*currentConfigMap.Immutable, true)
falseVal := false
currentConfigMap.Immutable = &falseVal
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap)
framework.ExpectEqual(apierrors.IsInvalid(err), true)
// Ensure that metadata can be changed.
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(name, metav1.GetOptions{})
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
currentConfigMap.Labels = map[string]string{"label1": "value1"}
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(currentConfigMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap)
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
// Ensure that immutable config map can be deleted.
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace)
})
@@ -644,7 +645,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -720,7 +721,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -855,7 +856,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
// creating a pod with configMap object, but with different key which is not present in configMap object.

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"time"
@@ -69,11 +70,11 @@ func (cc *ConformanceContainer) Create() {
}
func (cc *ConformanceContainer) Delete() error {
return cc.PodClient.Delete(cc.podName, metav1.NewDeleteOptions(0))
return cc.PodClient.Delete(context.TODO(), cc.podName, metav1.NewDeleteOptions(0))
}
func (cc *ConformanceContainer) IsReady() (bool, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -81,7 +82,7 @@ func (cc *ConformanceContainer) IsReady() (bool, error) {
}
func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
if err != nil {
return v1.PodUnknown, err
}
@@ -89,7 +90,7 @@ func (cc *ConformanceContainer) GetPhase() (v1.PodPhase, error) {
}
func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
pod, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
pod, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
if err != nil {
return v1.ContainerStatus{}, err
}
@@ -101,7 +102,7 @@ func (cc *ConformanceContainer) GetStatus() (v1.ContainerStatus, error) {
}
func (cc *ConformanceContainer) Present() (bool, error) {
_, err := cc.PodClient.Get(cc.podName, metav1.GetOptions{})
_, err := cc.PodClient.Get(context.TODO(), cc.podName, metav1.GetOptions{})
if err == nil {
return true, nil
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"net/url"
"time"
@@ -64,7 +65,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
p := podClient.Create(testWebServerPodSpec(probe.withInitialDelay().build(), nil, containerName, 80))
f.WaitForPodReady(p.Name)
p, err := podClient.Get(p.Name, metav1.GetOptions{})
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
@@ -96,14 +97,14 @@ var _ = framework.KubeDescribe("Probing container", func() {
framework.ConformanceIt("with readiness probe that fails should never be ready and never restart [NodeConformance]", func() {
p := podClient.Create(testWebServerPodSpec(probe.withFailing().build(), nil, "test-webserver", 80))
gomega.Consistently(func() (bool, error) {
p, err := podClient.Get(p.Name, metav1.GetOptions{})
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return podutil.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name, metav1.GetOptions{})
p, err := podClient.Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, _ := testutils.PodRunningReady(p)
@@ -413,7 +414,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// At the end of the test, clean up by removing the pod.
defer func() {
ginkgo.By("deleting the pod")
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
}()
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
@@ -427,7 +428,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// Check the pod's current state and verify that restartCount is present.
ginkgo.By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
@@ -437,7 +438,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"time"
@@ -166,7 +167,7 @@ var _ = ginkgo.Describe("[sig-storage] Downward API volume", func() {
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
gomega.Eventually(func() (string, error) {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"path"
@@ -286,7 +287,7 @@ var _ = ginkgo.Describe("[sig-storage] EmptyDir volumes", func() {
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
ginkgo.By("Geting the pod")
pod, err = f.PodClient().Get(pod.Name, metav1.GetOptions{})
pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %s", pod.Name)
ginkgo.By("Reading file content from the nginx-container")

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"time"
@@ -637,7 +638,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
ginkgo.By("Waiting for container to restart")
restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -666,7 +667,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@@ -199,7 +199,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta))
framework.ExpectNoError(err, "error watching a pod")
wr := watch.NewRecorder(w)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
@@ -269,7 +269,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta))
framework.ExpectNoError(err, "error watching a pod")
wr := watch.NewRecorder(w)
ctx, cancel := watchtools.ContextWithOptionalTimeout(context.Background(), framework.PodStartTimeout)
@@ -340,7 +340,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
}
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta))
framework.ExpectNoError(err, "error watching a pod")
wr := watch.NewRecorder(w)
@@ -457,7 +457,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
w, err := podClient.Watch(context.TODO(), metav1.SingleObject(startedPod.ObjectMeta))
framework.ExpectNoError(err, "error watching a pod")
wr := watch.NewRecorder(w)

View File

@@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
*/
framework.ConformanceIt("should have an terminated reason [NodeConformance]", func() {
gomega.Eventually(func() error {
podData, err := podClient.Get(podName, metav1.GetOptions{})
podData, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -130,7 +130,7 @@ var _ = framework.KubeDescribe("Kubelet", func() {
Description: Create a Pod with terminated state. This terminated pod MUST be able to be deleted.
*/
framework.ConformanceIt("should be possible to delete [NodeConformance]", func() {
err := podClient.Delete(podName, &metav1.DeleteOptions{})
err := podClient.Delete(context.TODO(), podName, &metav1.DeleteOptions{})
gomega.Expect(err).To(gomega.BeNil(), fmt.Sprintf("Error deleting Pod %v", err))
})
})

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"encoding/json"
"fmt"
"time"
@@ -81,10 +82,10 @@ var _ = framework.KubeDescribe("Lease", func() {
},
}
createdLease, err := leaseClient.Create(lease)
createdLease, err := leaseClient.Create(context.TODO(), lease)
framework.ExpectNoError(err, "creating Lease failed")
readLease, err := leaseClient.Get(name, metav1.GetOptions{})
readLease, err := leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(lease.Spec, readLease.Spec), true)
@@ -96,10 +97,10 @@ var _ = framework.KubeDescribe("Lease", func() {
LeaseTransitions: pointer.Int32Ptr(1),
}
_, err = leaseClient.Update(createdLease)
_, err = leaseClient.Update(context.TODO(), createdLease)
framework.ExpectNoError(err, "updating Lease failed")
readLease, err = leaseClient.Get(name, metav1.GetOptions{})
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(createdLease.Spec, readLease.Spec), true)
@@ -114,10 +115,10 @@ var _ = framework.KubeDescribe("Lease", func() {
patchBytes, err := getPatchBytes(readLease, patchedLease)
framework.ExpectNoError(err, "creating patch failed")
_, err = leaseClient.Patch(name, types.StrategicMergePatchType, patchBytes)
_, err = leaseClient.Patch(context.TODO(), name, types.StrategicMergePatchType, patchBytes)
framework.ExpectNoError(err, "patching Lease failed")
readLease, err = leaseClient.Get(name, metav1.GetOptions{})
readLease, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "couldn't read Lease")
framework.ExpectEqual(apiequality.Semantic.DeepEqual(patchedLease.Spec, readLease.Spec), true)
@@ -135,25 +136,25 @@ var _ = framework.KubeDescribe("Lease", func() {
LeaseTransitions: pointer.Int32Ptr(0),
},
}
_, err = leaseClient.Create(lease2)
_, err = leaseClient.Create(context.TODO(), lease2)
framework.ExpectNoError(err, "creating Lease failed")
leases, err := leaseClient.List(metav1.ListOptions{})
leases, err := leaseClient.List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases")
framework.ExpectEqual(len(leases.Items), 2)
selector := labels.Set(map[string]string{"deletecollection": "true"}).AsSelector()
err = leaseClient.DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()})
err = leaseClient.DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err, "couldn't delete collection")
leases, err = leaseClient.List(metav1.ListOptions{})
leases, err = leaseClient.List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, "couldn't list Leases")
framework.ExpectEqual(len(leases.Items), 1)
err = leaseClient.Delete(name, &metav1.DeleteOptions{})
err = leaseClient.Delete(context.TODO(), name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "deleting Lease failed")
_, err = leaseClient.Get(name, metav1.GetOptions{})
_, err = leaseClient.Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectEqual(apierrors.IsNotFound(err), true)
})
})

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"time"
@@ -54,7 +55,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
)
ginkgo.By("check that lease for this Kubelet exists in the kube-node-lease namespace")
gomega.Eventually(func() error {
lease, err = leaseClient.Get(nodeName, metav1.GetOptions{})
lease, err = leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -65,7 +66,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
ginkgo.By("check that node lease is updated at least once within the lease duration")
gomega.Eventually(func() error {
newLease, err := leaseClient.Get(nodeName, metav1.GetOptions{})
newLease, err := leaseClient.Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -91,7 +92,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
leaseList *coordinationv1.LeaseList
)
gomega.Eventually(func() error {
leaseList, err = leaseClient.List(metav1.ListOptions{})
leaseList, err = leaseClient.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
@@ -116,7 +117,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
var err error
var lease *coordinationv1.Lease
gomega.Eventually(func() error {
lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(nodeName, metav1.GetOptions{})
lease, err = f.ClientSet.CoordinationV1().Leases(v1.NamespaceNodeLease).Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
return err
}
@@ -175,7 +176,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
// This check on node status is only meaningful when this e2e test is
// running as cluster e2e test, because node e2e test does not create and
// run controller manager, i.e., no node lifecycle controller.
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := f.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
gomega.Expect(err).To(gomega.BeNil())
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue)
@@ -184,7 +185,7 @@ var _ = framework.KubeDescribe("NodeLease", func() {
})
func getHeartbeatTimeAndStatus(clientSet clientset.Interface, nodeName string) (time.Time, v1.NodeStatus) {
node, err := clientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
node, err := clientSet.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
gomega.Expect(err).To(gomega.BeNil())
_, readyCondition := testutils.GetNodeCondition(&node.Status, v1.NodeReady)
framework.ExpectEqual(readyCondition.Status, v1.ConditionTrue)

View File

@@ -18,6 +18,7 @@ package common
import (
"bytes"
"context"
"fmt"
"io"
"runtime/debug"
@@ -63,7 +64,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
p, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" {
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
@@ -111,7 +112,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
var previousFinishedAt time.Time
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
time.Sleep(time.Second)
pod, err := podClient.Get(podName, metav1.GetOptions{})
pod, err := podClient.Get(context.TODO(), podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok {
@@ -230,7 +231,7 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
pods, err := podClient.List(context.TODO(), options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 0)
options = metav1.ListOptions{
@@ -242,7 +243,7 @@ var _ = framework.KubeDescribe("Pods", func() {
lw := &cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.LabelSelector = selector.String()
podList, err := podClient.List(options)
podList, err := podClient.List(context.TODO(), options)
if err == nil {
select {
case listCompleted <- true:
@@ -256,7 +257,7 @@ var _ = framework.KubeDescribe("Pods", func() {
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.LabelSelector = selector.String()
return podClient.Watch(options)
return podClient.Watch(context.TODO(), options)
},
}
_, _, w, _ := watchtools.NewIndexerInformerWatcher(lw, &v1.Pod{})
@@ -268,7 +269,7 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
pods, err = podClient.List(context.TODO(), options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
@@ -291,11 +292,11 @@ var _ = framework.KubeDescribe("Pods", func() {
// may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to GET scheduled pod")
ginkgo.By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(30))
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30))
framework.ExpectNoError(err, "failed to delete pod")
ginkgo.By("verifying the kubelet observed the termination notice")
@@ -348,7 +349,7 @@ var _ = framework.KubeDescribe("Pods", func() {
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
pods, err = podClient.List(context.TODO(), options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 0)
})
@@ -386,7 +387,7 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
pods, err := podClient.List(context.TODO(), options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
@@ -401,7 +402,7 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = metav1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
pods, err = podClient.List(context.TODO(), options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
framework.Logf("Pod update OK")
@@ -440,7 +441,7 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := metav1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
pods, err := podClient.List(context.TODO(), options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
@@ -504,7 +505,7 @@ var _ = framework.KubeDescribe("Pods", func() {
},
},
}
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(svc)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), svc)
framework.ExpectNoError(err, "failed to create service")
// Make a client pod that verifies that it has the service environment variables.
@@ -828,7 +829,7 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false initially.")
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate1))
_, err := podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
_, err := podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "True")), "status")
framework.ExpectNoError(err)
// Sleep for 10 seconds.
time.Sleep(syncLoopFrequency)
@@ -836,12 +837,12 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.ExpectEqual(podClient.PodIsReady(podName), false, "Expect pod's Ready condition to be false with only one condition in readinessGates equal to True")
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to true", readinessGate2))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate2, "True")), "status")
framework.ExpectNoError(err)
validatePodReadiness(true)
ginkgo.By(fmt.Sprintf("patching pod status with condition %q to false", readinessGate1))
_, err = podClient.Patch(podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
_, err = podClient.Patch(context.TODO(), podName, types.StrategicMergePatchType, []byte(fmt.Sprintf(patchStatusFmt, readinessGate1, "False")), "status")
framework.ExpectNoError(err)
validatePodReadiness(false)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"encoding/json"
v1 "k8s.io/api/core/v1"
@@ -36,14 +37,14 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() {
podTemplateName := "nginx-pod-template-" + string(uuid.NewUUID())
// get a list of PodTemplates (in all namespaces to hit endpoint)
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(metav1.ListOptions{
podTemplateList, err := f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{
LabelSelector: "podtemplate-static=true",
})
framework.ExpectNoError(err, "failed to list all PodTemplates")
framework.ExpectEqual(len(podTemplateList.Items), 0, "unable to find templates")
// create a PodTemplate
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(&v1.PodTemplate{
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Create(context.TODO(), &v1.PodTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: podTemplateName,
Labels: map[string]string{
@@ -61,7 +62,7 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() {
framework.ExpectNoError(err, "failed to create PodTemplate")
// get template
podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(podTemplateName, metav1.GetOptions{})
podTemplateRead, err := f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get created PodTemplate")
framework.ExpectEqual(podTemplateRead.ObjectMeta.Name, podTemplateName)
@@ -74,20 +75,20 @@ var _ = ginkgo.Describe("[sig-architecture] PodTemplates", func() {
},
})
framework.ExpectNoError(err, "failed to marshal patch data")
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch))
_, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Patch(context.TODO(), podTemplateName, types.StrategicMergePatchType, []byte(podTemplatePatch))
framework.ExpectNoError(err, "failed to patch PodTemplate")
// get template (ensure label is there)
podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(podTemplateName, metav1.GetOptions{})
podTemplateRead, err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Get(context.TODO(), podTemplateName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get PodTemplate")
framework.ExpectEqual(podTemplateRead.ObjectMeta.Labels["podtemplate"], "patched", "failed to patch template, new label not found")
// delete the PodTemplate
err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(podTemplateName, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().PodTemplates(testNamespaceName).Delete(context.TODO(), podTemplateName, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "failed to delete PodTemplate")
// list the PodTemplates
podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(metav1.ListOptions{
podTemplateList, err = f.ClientSet.CoreV1().PodTemplates("").List(context.TODO(), metav1.ListOptions{
LabelSelector: "podtemplate-static=true",
})
framework.ExpectNoError(err, "failed to list PodTemplate")

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"k8s.io/api/core/v1"
@@ -62,11 +63,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected combined", func() {
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"path"
@@ -137,7 +138,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -193,7 +194,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(configMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
ginkgo.By("waiting to observe update in volume")
@@ -252,12 +253,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
@@ -379,18 +380,18 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(deleteConfigMap.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
updateConfigMap.ResourceVersion = "" // to force update
delete(updateConfigMap.Data, "data-1")
updateConfigMap.Data["data-3"] = "value-3"
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(updateConfigMap)
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap)
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
@@ -418,7 +419,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -524,7 +525,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool,
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
@@ -605,7 +606,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fs
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"time"
@@ -166,7 +167,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected downwardAPI", func() {
ginkgo.By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
pod, err := podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
gomega.Eventually(func() (string, error) {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"path"
@@ -101,7 +102,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
@@ -127,7 +128,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -254,12 +255,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@@ -381,18 +382,18 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret)
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
@@ -434,7 +435,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -512,7 +513,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"path"
"time"
@@ -299,9 +300,9 @@ while true; do sleep 1; done
}
secret.Name = "image-pull-secret-" + string(uuid.NewUUID())
ginkgo.By("create image pull secret")
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret)
framework.ExpectNoError(err)
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil)
defer f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), secret.Name, nil)
container.ImagePullSecrets = []string{secret.Name}
}
// checkContainerStatus checks whether the container status matches expectation.

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"time"
@@ -66,12 +67,12 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
rcClient := f.ClientSet.NodeV1beta1().RuntimeClasses()
ginkgo.By("Deleting RuntimeClass "+rcName, func() {
err := rcClient.Delete(rcName, nil)
err := rcClient.Delete(context.TODO(), rcName, nil)
framework.ExpectNoError(err, "failed to delete RuntimeClass %s", rcName)
ginkgo.By("Waiting for the RuntimeClass to disappear")
framework.ExpectNoError(wait.PollImmediate(framework.Poll, time.Minute, func() (bool, error) {
_, err := rcClient.Get(rcName, metav1.GetOptions{})
_, err := rcClient.Get(context.TODO(), rcName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil // done
}
@@ -91,7 +92,7 @@ var _ = ginkgo.Describe("[sig-node] RuntimeClass", func() {
func createRuntimeClass(f *framework.Framework, name, handler string) string {
uniqueName := fmt.Sprintf("%s-%s", f.Namespace.Name, name)
rc := runtimeclasstest.NewRuntimeClass(uniqueName, handler)
rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(rc)
rc, err := f.ClientSet.NodeV1beta1().RuntimeClasses().Create(context.TODO(), rc)
framework.ExpectNoError(err, "failed to create RuntimeClass resource")
return rc.GetName()
}
@@ -122,7 +123,7 @@ func expectPodRejection(f *framework.Framework, pod *v1.Pod) {
pod = f.PodClient().Create(pod)
expectSandboxFailureEvent(f, pod, fmt.Sprintf("\"%s\" not found", *pod.Spec.RuntimeClassName))
} else {
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod)
framework.ExpectError(err, "should be forbidden")
framework.ExpectEqual(apierrors.IsForbidden(err), true, "should be forbidden error")
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"encoding/json"
"fmt"
@@ -45,7 +46,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -93,7 +94,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
secret := newEnvFromSecret(f.Namespace.Name, name)
ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -153,7 +154,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
secretTestName := "test-secret-" + string(uuid.NewUUID())
// create a secret in the test namespace
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(&v1.Secret{
_, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretTestName,
Labels: map[string]string{
@@ -169,7 +170,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
ginkgo.By("listing secrets in all namespaces to ensure that there are more than zero")
// list all secrets in all namespaces to ensure endpoint coverage
secretsList, err := f.ClientSet.CoreV1().Secrets("").List(metav1.ListOptions{
secretsList, err := f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{
LabelSelector: "testsecret-constant=true",
})
framework.ExpectNoError(err, "failed to list secrets")
@@ -196,10 +197,10 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
"data": map[string][]byte{"key": []byte(secretPatchNewData)},
})
framework.ExpectNoError(err, "failed to marshal JSON")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch))
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Patch(context.TODO(), secretCreatedName, types.StrategicMergePatchType, []byte(secretPatch))
framework.ExpectNoError(err, "failed to patch secret")
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretCreatedName, metav1.GetOptions{})
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), secretCreatedName, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get secret")
secretDecodedstring, err := base64.StdEncoding.DecodeString(string(secret.Data["key"]))
@@ -208,14 +209,14 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
framework.ExpectEqual(string(secretDecodedstring), "value1", "found secret, but the data wasn't updated from the patch")
ginkgo.By("deleting the secret using a LabelSelector")
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(&metav1.DeleteOptions{}, metav1.ListOptions{
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).DeleteCollection(context.TODO(), &metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: "testsecret=true",
})
framework.ExpectNoError(err, "failed to delete patched secret")
ginkgo.By("listing secrets in all namespaces, searching for label name and value in patch")
// list all secrets in all namespaces
secretsList, err = f.ClientSet.CoreV1().Secrets("").List(metav1.ListOptions{
secretsList, err = f.ClientSet.CoreV1().Secrets("").List(context.TODO(), metav1.ListOptions{
LabelSelector: "testsecret-constant=true",
})
framework.ExpectNoError(err, "failed to list secrets")
@@ -257,5 +258,5 @@ func createEmptyKeySecretForTest(f *framework.Framework) (*v1.Secret, error) {
},
}
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
return f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"fmt"
"path"
@@ -107,7 +108,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
@@ -133,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -244,12 +245,12 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
@@ -347,18 +348,18 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(deleteSecret.Name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
updateSecret.ResourceVersion = "" // to force update
delete(updateSecret.Data, "data-1")
updateSecret.Data["data-3"] = []byte("value-3")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(updateSecret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret)
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret); err != nil {
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
@@ -375,43 +376,43 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
name := "immutable"
secret := secretForTest(f.Namespace.Name, name)
currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret)
framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace)
currentSecret.Data["data-4"] = []byte("value-4\n")
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret)
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret)
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
// Mark secret as immutable.
trueVal := true
currentSecret.Immutable = &trueVal
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret)
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret)
framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace)
// Ensure data can't be changed now.
currentSecret.Data["data-5"] = []byte("value-5\n")
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret)
framework.ExpectEqual(apierrors.IsInvalid(err), true)
// Ensure secret can't be switched from immutable to mutable.
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(name, metav1.GetOptions{})
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
framework.ExpectEqual(*currentSecret.Immutable, true)
falseVal := false
currentSecret.Immutable = &falseVal
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret)
framework.ExpectEqual(apierrors.IsInvalid(err), true)
// Ensure that metadata can be changed.
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(name, metav1.GetOptions{})
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
currentSecret.Labels = map[string]string{"label1": "value1"}
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(currentSecret)
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret)
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
// Ensure that immutable secret can be deleted.
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(name, &metav1.DeleteOptions{})
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace)
})
@@ -460,7 +461,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -529,7 +530,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
@@ -647,7 +648,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
// creating a pod with secret object, with the key which is not present in secret object.

View File

@@ -17,6 +17,7 @@ limitations under the License.
package common
import (
"context"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@@ -90,7 +91,7 @@ var _ = framework.KubeDescribe("Sysctls [LinuxOnly] [NodeFeature:Sysctls]", func
ginkgo.By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Checking that the pod succeeded")
@@ -130,7 +131,7 @@ var _ = framework.KubeDescribe("Sysctls [LinuxOnly] [NodeFeature:Sysctls]", func
ginkgo.By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
framework.ExpectNoError(err)
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Checking that the pod succeeded")
@@ -170,7 +171,7 @@ var _ = framework.KubeDescribe("Sysctls [LinuxOnly] [NodeFeature:Sysctls]", func
ginkgo.By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
_, err := client.Create(pod)
_, err := client.Create(context.TODO(), pod)
gomega.Expect(err).NotTo(gomega.BeNil())
gomega.Expect(err.Error()).To(gomega.ContainSubstring(`Invalid value: "foo-"`))

View File

@@ -18,6 +18,7 @@ package common
import (
"bytes"
"context"
"fmt"
"text/template"
"time"
@@ -140,7 +141,7 @@ func svcByName(name string, port int) *v1.Service {
// NewSVCByName creates a service by name.
func NewSVCByName(c clientset.Interface, ns, name string) error {
const testPort = 9376
_, err := c.CoreV1().Services(ns).Create(svcByName(name, testPort))
_, err := c.CoreV1().Services(ns).Create(context.TODO(), svcByName(name, testPort))
return err
}
@@ -152,7 +153,7 @@ func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePe
containerArgs = []string{"serve-hostname"}
}
return c.CoreV1().ReplicationControllers(ns).Create(rcByNamePort(
return c.CoreV1().ReplicationControllers(ns).Create(context.TODO(), rcByNamePort(
name, replicas, framework.ServeHostnameImage, containerArgs, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}
@@ -189,7 +190,7 @@ func RestartNodes(c clientset.Interface, nodes []v1.Node) error {
for i := range nodes {
node := &nodes[i]
if err := wait.Poll(30*time.Second, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err)
}

View File

@@ -43,6 +43,7 @@ limitations under the License.
package common
import (
"context"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
@@ -129,7 +130,7 @@ var _ = ginkgo.Describe("[sig-storage] GCP Volumes", func() {
name := config.Prefix + "-server"
defer func() {
volume.TestCleanup(f, config)
err := c.CoreV1().Endpoints(namespace.Name).Delete(name, nil)
err := c.CoreV1().Endpoints(namespace.Name).Delete(context.TODO(), name, nil)
framework.ExpectNoError(err, "defer: Gluster delete endpoints failed")
}()

View File

@@ -17,6 +17,7 @@ limitations under the License.
package e2e
import (
"context"
"fmt"
"os"
"path"
@@ -121,12 +122,12 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
return
}
p.Namespace = ns
if _, err := c.CoreV1().Pods(ns).Create(p); err != nil {
if _, err := c.CoreV1().Pods(ns).Create(context.TODO(), p); err != nil {
framework.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.CoreV1().Pods(ns).Delete(p.Name, nil); err != nil {
if err := c.CoreV1().Pods(ns).Delete(context.TODO(), p.Name, nil); err != nil {
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
@@ -150,7 +151,7 @@ func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
// but we can detect if a cluster is dual stack because pods have two addresses (one per family)
func getDefaultClusterIPFamily(c clientset.Interface) string {
// Get the ClusterIP of the kubernetes service created in the default namespace
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get("kubernetes", metav1.GetOptions{})
svc, err := c.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get kubernetes service ClusterIP: %v", err)
}
@@ -170,7 +171,7 @@ func waitForDaemonSets(c clientset.Interface, ns string, allowedNotReadyNodes in
timeout, ns)
return wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
dsList, err := c.AppsV1().DaemonSets(ns).List(metav1.ListOptions{})
dsList, err := c.AppsV1().DaemonSets(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package e2e
import (
"context"
"fmt"
"path/filepath"
"sync"
@@ -79,7 +80,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
err := e2epod.WaitForPodNameRunningInNamespace(c, podName, ns)
framework.ExpectNoError(err)
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package auth
import (
"context"
"sync"
"time"
@@ -65,7 +66,7 @@ func WaitForNamedAuthorizationUpdate(c v1authorization.SubjectAccessReviewsGette
}
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
response, err := c.SubjectAccessReviews().Create(review)
response, err := c.SubjectAccessReviews().Create(context.TODO(), review)
if err != nil {
return false, err
}
@@ -85,7 +86,7 @@ func BindClusterRole(c bindingsGetter, clusterRole, ns string, subjects ...rbacv
}
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
_, err := c.ClusterRoleBindings().Create(&rbacv1.ClusterRoleBinding{
_, err := c.ClusterRoleBindings().Create(context.TODO(), &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: ns + "--" + clusterRole,
},
@@ -122,7 +123,7 @@ func bindInNamespace(c bindingsGetter, roleType, role, ns string, subjects ...rb
}
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
_, err := c.RoleBindings(ns).Create(&rbacv1.RoleBinding{
_, err := c.RoleBindings(ns).Create(context.TODO(), &rbacv1.RoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: ns + "--" + role,
},
@@ -149,7 +150,7 @@ var (
// IsRBACEnabled returns true if RBAC is enabled. Otherwise false.
func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool {
isRBACEnabledOnce.Do(func() {
crs, err := crGetter.ClusterRoles().List(metav1.ListOptions{})
crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
isRBACEnabled = false

View File

@@ -320,21 +320,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case KindRC:
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
}
return int(replicationController.Status.ReadyReplicas)
case KindDeployment:
deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.ReadyReplicas)
case KindReplicaSet:
rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(context.TODO(), rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
framework.Failf(rsIsNil)
@@ -348,7 +348,7 @@ func (rc *ResourceConsumer) GetReplicas() int {
// GetHpa get the corresponding horizontalPodAutoscaler object
func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) {
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{})
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(context.TODO(), name, metav1.GetOptions{})
}
// WaitForReplicas wait for the desired replicas
@@ -418,14 +418,14 @@ func (rc *ResourceConsumer) CleanUp() {
time.Sleep(10 * time.Second)
kind := rc.kind.GroupKind()
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, nil))
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(rc.clientSet, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, nil))
}
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) {
ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
_, err := c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Annotations: serviceAnnotations,
@@ -480,7 +480,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
ginkgo.By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl"
_, err = c.CoreV1().Services(ns).Create(&v1.Service{
_, err = c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: controllerName,
},
@@ -534,14 +534,14 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
TargetCPUUtilizationPercentage: &cpu,
},
}
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa)
framework.ExpectNoError(errHPA)
return hpa
}
// DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources.
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, nil)
}
// runReplicaSet launches (and verifies correctness) of a replicaset.

View File

@@ -17,6 +17,7 @@ limitations under the License.
package deployment
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
@@ -71,7 +72,7 @@ func NewDeployment(deploymentName string, replicas int32, podLabels map[string]s
// CreateDeployment creates a deployment.
func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[string]string, nodeSelector map[string]string, namespace string, pvclaims []*v1.PersistentVolumeClaim, command string) (*appsv1.Deployment, error) {
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.AppsV1().Deployments(namespace).Create(deploymentSpec)
deployment, err := client.AppsV1().Deployments(namespace).Create(context.TODO(), deploymentSpec)
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
}
@@ -93,7 +94,7 @@ func GetPodsForDeployment(client clientset.Interface, deployment *appsv1.Deploym
return nil, fmt.Errorf("expected a new replica set for deployment %q, found none", deployment.Name)
}
podListFunc := func(namespace string, options metav1.ListOptions) (*v1.PodList, error) {
return client.CoreV1().Pods(namespace).List(options)
return client.CoreV1().Pods(namespace).List(context.TODO(), options)
}
rsList := []*appsv1.ReplicaSet{replicaSet}
podList, err := deploymentutil.ListPods(deployment, rsList, podListFunc)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package events
import (
"context"
"fmt"
"strings"
"sync"
@@ -50,14 +51,14 @@ func ObserveNodeUpdateAfterAction(c clientset.Interface, nodeName string, nodePr
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector.String()
ls, err := c.CoreV1().Nodes().List(options)
ls, err := c.CoreV1().Nodes().List(context.TODO(), options)
return ls, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// Signal parent goroutine that watching has begun.
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
options.FieldSelector = nodeSelector.String()
w, err := c.CoreV1().Nodes().Watch(options)
w, err := c.CoreV1().Nodes().Watch(context.TODO(), options)
return w, err
},
},
@@ -107,13 +108,13 @@ func ObserveEventAfterAction(c clientset.Interface, ns string, eventPredicate fu
_, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
ls, err := c.CoreV1().Events(ns).List(options)
ls, err := c.CoreV1().Events(ns).List(context.TODO(), options)
return ls, err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
// Signal parent goroutine that watching has begun.
defer informerStartedGuard.Do(func() { close(informerStartedChan) })
w, err := c.CoreV1().Events(ns).Watch(options)
w, err := c.CoreV1().Events(ns).Watch(context.TODO(), options)
return w, err
},
},
@@ -162,7 +163,7 @@ func WaitTimeoutForEvent(c clientset.Interface, namespace, eventSelector, msg st
func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) wait.ConditionFunc {
options := metav1.ListOptions{FieldSelector: eventSelector}
return func() (bool, error) {
events, err := c.CoreV1().Events(namespace).List(options)
events, err := c.CoreV1().Events(namespace).List(context.TODO(), options)
if err != nil {
return false, fmt.Errorf("got error while getting events: %v", err)
}

View File

@@ -18,6 +18,7 @@ package framework
import (
"bytes"
"context"
"io"
"net/url"
"strings"
@@ -110,14 +111,14 @@ func (f *Framework) ExecShellInContainer(podName, containerName string, cmd stri
}
func (f *Framework) execCommandInPod(podName string, cmd ...string) string {
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
}
func (f *Framework) execCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
ExpectNoError(err, "failed to get pod %v", podName)
gomega.Expect(pod.Spec.Containers).NotTo(gomega.BeEmpty())
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)

View File

@@ -22,6 +22,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"io/ioutil"
"math/rand"
@@ -385,7 +386,7 @@ func (f *Framework) AfterEach() {
if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) {
for _, ns := range f.namespacesToDelete {
ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
if err := f.ClientSet.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil {
if err := f.ClientSet.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, nil); err != nil {
if !apierrors.IsNotFound(err) {
nsDeletionErrors[ns.Name] = err
@@ -600,7 +601,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
}}
}
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(&v1.Service{
service, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(context.TODO(), &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "service-for-" + appName,
Labels: map[string]string{
@@ -625,7 +626,7 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
}
for i, node := range nodes.Items {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(&v1.Pod{
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: podLabels,
@@ -776,9 +777,9 @@ func filterLabels(selectors map[string]string, cli clientset.Interface, ns strin
if len(selectors) > 0 {
selector = labels.SelectorFromSet(labels.Set(selectors))
options := metav1.ListOptions{LabelSelector: selector.String()}
pl, err = cli.CoreV1().Pods(ns).List(options)
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), options)
} else {
pl, err = cli.CoreV1().Pods(ns).List(metav1.ListOptions{})
pl, err = cli.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
}
return pl, err
}

View File

@@ -18,6 +18,7 @@ package ingress
import (
"bytes"
"context"
"crypto/rand"
"crypto/rsa"
"crypto/tls"
@@ -403,14 +404,14 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin
},
}
var s *v1.Secret
if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil {
if s, err = kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{}); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though.
framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
s.Data = secret.Data
_, err = kubeClient.CoreV1().Secrets(namespace).Update(s)
_, err = kubeClient.CoreV1().Secrets(namespace).Update(context.TODO(), s)
} else {
framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
_, err = kubeClient.CoreV1().Secrets(namespace).Create(secret)
_, err = kubeClient.CoreV1().Secrets(namespace).Create(context.TODO(), secret)
}
return host, cert, key, err
}
@@ -462,11 +463,11 @@ func (j *TestJig) CreateIngress(manifestPath, ns string, ingAnnotations map[stri
j.Logger.Infof("creating service")
framework.RunKubectlOrDieInput(ns, read("svc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
if len(svcAnnotations) > 0 {
svcList, err := j.Client.CoreV1().Services(ns).List(metav1.ListOptions{})
svcList, err := j.Client.CoreV1().Services(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, svc := range svcList.Items {
svc.Annotations = svcAnnotations
_, err = j.Client.CoreV1().Services(ns).Update(&svc)
_, err = j.Client.CoreV1().Services(ns).Update(context.TODO(), &svc)
framework.ExpectNoError(err)
}
}
@@ -536,7 +537,7 @@ func ingressToManifest(ing *networkingv1beta1.Ingress, path string) error {
// runCreate runs the required command to create the given ingress.
func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) {
if j.Class != MulticlusterIngressClassValue {
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(ing)
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Create(context.TODO(), ing)
}
// Use kubemci to create a multicluster ingress.
filePath := framework.TestContext.OutputDir + "/mci.yaml"
@@ -550,7 +551,7 @@ func (j *TestJig) runCreate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.
// runUpdate runs the required command to update the given ingress.
func (j *TestJig) runUpdate(ing *networkingv1beta1.Ingress) (*networkingv1beta1.Ingress, error) {
if j.Class != MulticlusterIngressClassValue {
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(ing)
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Update(context.TODO(), ing)
}
// Use kubemci to update a multicluster ingress.
// kubemci does not have an update command. We use "create --force" to update an existing ingress.
@@ -567,7 +568,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
var err error
ns, name := j.Ingress.Namespace, j.Ingress.Name
for i := 0; i < 3; i++ {
j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Failf("failed to get ingress %s/%s: %v", ns, name, err)
}
@@ -658,7 +659,7 @@ func (j *TestJig) tryDeleteGivenIngress(ing *networkingv1beta1.Ingress) {
// runDelete runs the required command to delete the given ingress.
func (j *TestJig) runDelete(ing *networkingv1beta1.Ingress) error {
if j.Class != MulticlusterIngressClassValue {
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(ing.Name, nil)
return j.Client.NetworkingV1beta1().Ingresses(ing.Namespace).Delete(context.TODO(), ing.Name, nil)
}
// Use kubemci to delete a multicluster ingress.
filePath := framework.TestContext.OutputDir + "/mci.yaml"
@@ -698,7 +699,7 @@ func getIngressAddress(client clientset.Interface, ns, name, class string) ([]st
if class == MulticlusterIngressClassValue {
return getIngressAddressFromKubemci(name)
}
ing, err := client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
ing, err := client.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
@@ -850,7 +851,7 @@ func (j *TestJig) pollServiceNodePort(ns, name string, port int) error {
// getSvcNodePort returns the node port for the given service:port.
func getSvcNodePort(client clientset.Interface, ns, name string, svcPort int) (int, error) {
svc, err := client.CoreV1().Services(ns).Get(name, metav1.GetOptions{})
svc, err := client.CoreV1().Services(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return 0, err
}
@@ -876,7 +877,7 @@ func getPortURL(client clientset.Interface, ns, name string, svcPort int) (strin
// kube-proxy NodePorts won't work.
var nodes *v1.NodeList
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err = client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@@ -923,7 +924,7 @@ func (j *TestJig) GetIngressNodePorts(includeDefaultBackend bool) []string {
func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.ServicePort {
svcPorts := make(map[string]v1.ServicePort)
if includeDefaultBackend {
defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(defaultBackendName, metav1.GetOptions{})
defaultSvc, err := j.Client.CoreV1().Services(metav1.NamespaceSystem).Get(context.TODO(), defaultBackendName, metav1.GetOptions{})
framework.ExpectNoError(err)
svcPorts[defaultBackendName] = defaultSvc.Spec.Ports[0]
}
@@ -938,7 +939,7 @@ func (j *TestJig) GetServicePorts(includeDefaultBackend bool) map[string]v1.Serv
}
}
for _, svcName := range backendSvcs {
svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(svcName, metav1.GetOptions{})
svc, err := j.Client.CoreV1().Services(j.Ingress.Namespace).Get(context.TODO(), svcName, metav1.GetOptions{})
framework.ExpectNoError(err)
svcPorts[svcName] = svc.Spec.Ports[0]
}
@@ -1018,14 +1019,14 @@ func (cont *NginxIngressController) Init() {
framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDieInput(cont.Ns, read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))
rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{})
rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get(context.TODO(), "nginx-ingress-controller", metav1.GetOptions{})
framework.ExpectNoError(err)
cont.rc = rc
framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel))
pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()})
pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(context.TODO(), metav1.ListOptions{LabelSelector: sel.String()})
framework.ExpectNoError(err)
if len(pods.Items) == 0 {
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
@@ -1119,11 +1120,11 @@ func generateBacksideHTTPSDeploymentSpec() *appsv1.Deployment {
// SetUpBacksideHTTPSIngress sets up deployment, service and ingress with backside HTTPS configured.
func (j *TestJig) SetUpBacksideHTTPSIngress(cs clientset.Interface, namespace string, staticIPName string) (*appsv1.Deployment, *v1.Service, *networkingv1beta1.Ingress, error) {
deployCreated, err := cs.AppsV1().Deployments(namespace).Create(generateBacksideHTTPSDeploymentSpec())
deployCreated, err := cs.AppsV1().Deployments(namespace).Create(context.TODO(), generateBacksideHTTPSDeploymentSpec())
if err != nil {
return nil, nil, nil, err
}
svcCreated, err := cs.CoreV1().Services(namespace).Create(generateBacksideHTTPSServiceSpec())
svcCreated, err := cs.CoreV1().Services(namespace).Create(context.TODO(), generateBacksideHTTPSServiceSpec())
if err != nil {
return nil, nil, nil, err
}
@@ -1150,12 +1151,12 @@ func (j *TestJig) DeleteTestResource(cs clientset.Interface, deploy *appsv1.Depl
}
}
if svc != nil {
if err := cs.CoreV1().Services(svc.Namespace).Delete(svc.Name, nil); err != nil {
if err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, nil); err != nil {
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
}
}
if deploy != nil {
if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(deploy.Name, nil); err != nil {
if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(context.TODO(), deploy.Name, nil); err != nil {
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err))
}
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package job
import (
"context"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -26,18 +27,18 @@ import (
// GetJob uses c to get the Job in namespace ns named name. If the returned error is nil, the returned Job is valid.
func GetJob(c clientset.Interface, ns, name string) (*batchv1.Job, error) {
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
}
// GetJobPods returns a list of Pods belonging to a Job.
func GetJobPods(c clientset.Interface, ns, jobName string) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{JobSelectorKey: jobName}))
options := metav1.ListOptions{LabelSelector: label.String()}
return c.CoreV1().Pods(ns).List(options)
return c.CoreV1().Pods(ns).List(context.TODO(), options)
}
// CreateJob uses c to create job in namespace ns. If the returned error is nil, the returned Job is valid and has
// been created.
func CreateJob(c clientset.Interface, ns string, job *batchv1.Job) (*batchv1.Job, error) {
return c.BatchV1().Jobs(ns).Create(job)
return c.BatchV1().Jobs(ns).Create(context.TODO(), job)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package job
import (
"context"
"time"
"k8s.io/api/core/v1"
@@ -49,7 +50,7 @@ func WaitForAllJobPodsRunning(c clientset.Interface, ns, jobName string, paralle
// WaitForJobComplete uses c to wait for completions to complete for the Job jobName in namespace ns.
func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(framework.Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -60,7 +61,7 @@ func WaitForJobComplete(c clientset.Interface, ns, jobName string, completions i
// WaitForJobFinish uses c to wait for the Job jobName in namespace ns to finish (either Failed or Complete).
func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
return wait.PollImmediate(framework.Poll, JobTimeout, func() (bool, error) {
curr, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
curr, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -71,7 +72,7 @@ func WaitForJobFinish(c clientset.Interface, ns, jobName string) error {
// WaitForJobGone uses c to wait for up to timeout for the Job named jobName in namespace ns to be removed.
func WaitForJobGone(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
_, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
_, err := c.BatchV1().Jobs(ns).Get(context.TODO(), jobName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil
}

View File

@@ -18,6 +18,7 @@ package kubectl
import (
"bytes"
"context"
"fmt"
"os/exec"
"path/filepath"
@@ -97,7 +98,7 @@ func (tk *TestKubeconfig) KubectlCmd(args ...string) *exec.Cmd {
// LogFailedContainers runs `kubectl logs` on a failed containers.
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return

View File

@@ -116,7 +116,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor
client: c,
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
}
nodes, err := m.client.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := m.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
}
@@ -478,7 +478,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI
// Start starts collectors.
func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := r.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
framework.Failf("ResourceMonitor: unable to get list of nodes: %v", err)
}

View File

@@ -56,7 +56,7 @@ type Grabber struct {
func NewMetricsGrabber(c clientset.Interface, ec clientset.Interface, kubelets bool, scheduler bool, controllers bool, apiServer bool, clusterAutoscaler bool) (*Grabber, error) {
registeredMaster := false
masterName := ""
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, err
}
@@ -101,7 +101,7 @@ func (g *Grabber) HasRegisteredMaster() bool {
// GrabFromKubelet returns metrics from kubelet
func (g *Grabber) GrabFromKubelet(nodeName string) (KubeletMetrics, error) {
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()})
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{api.ObjectNameField: nodeName}.AsSelector().String()})
if err != nil {
return KubeletMetrics{}, err
}
@@ -215,7 +215,7 @@ func (g *Grabber) Grab() (Collection, error) {
}
if g.grabFromKubelets {
result.KubeletMetrics = make(map[string]KubeletMetrics)
nodes, err := g.client.CoreV1().Nodes().List(metav1.ListOptions{})
nodes, err := g.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
errs = append(errs, err)
} else {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package network
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@@ -552,7 +553,7 @@ func (config *NetworkingTestConfig) createSessionAffinityService(selector map[st
// DeleteNodePortService deletes NodePort service.
func (config *NetworkingTestConfig) DeleteNodePortService() {
err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
err := config.getServiceClient().Delete(context.TODO(), config.NodePortService.Name, nil)
framework.ExpectNoError(err, "error while deleting NodePortService. err:%v)", err)
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
}
@@ -569,14 +570,14 @@ func (config *NetworkingTestConfig) createTestPods() {
framework.ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
var err error
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
config.TestContainerPod, err = config.getPodClient().Get(context.TODO(), testContainerPod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
}
if config.HostNetwork {
framework.ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
config.HostTestContainerPod, err = config.getPodClient().Get(context.TODO(), hostTestContainerPod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
}
@@ -584,13 +585,13 @@ func (config *NetworkingTestConfig) createTestPods() {
}
func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
_, err := config.getServiceClient().Create(serviceSpec)
_, err := config.getServiceClient().Create(context.TODO(), serviceSpec)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = framework.WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
framework.ExpectNoError(err, fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{})
createdService, err := config.getServiceClient().Get(context.TODO(), serviceSpec.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
return createdService
@@ -666,7 +667,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
runningPods := make([]*v1.Pod, 0, len(nodes))
for _, p := range createdPods {
framework.ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name, metav1.GetOptions{})
rp, err := config.getPodClient().Get(context.TODO(), p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
runningPods = append(runningPods, rp)
}
@@ -677,7 +678,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
// DeleteNetProxyPod deletes the first endpoint pod and waits for it being removed.
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0]
config.getPodClient().Delete(pod.Name, metav1.NewDeleteOptions(0))
config.getPodClient().Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted.
err := e2epod.WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"fmt"
"net"
"strings"
@@ -336,7 +337,7 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return nil, nil, fmt.Errorf("get nodes error: %s", err)
}
@@ -460,7 +461,7 @@ func hasNonblockingTaint(node *v1.Node, nonblockingTaints string) bool {
func PodNodePairs(c clientset.Interface, ns string) ([]PodNode, error) {
var result []PodNode
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return result, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package node
import (
"context"
"fmt"
"regexp"
"time"
@@ -56,7 +57,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
err := wait.PollImmediate(poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{ResourceVersion: "0"})
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
@@ -68,7 +69,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
notReady = append(notReady, node)
}
}
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{ResourceVersion: "0"})
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
@@ -122,7 +123,7 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
e2elog.Logf("Couldn't get node %s", name)
continue
@@ -182,7 +183,7 @@ func waitListSchedulableNodes(c clientset.Interface) (*v1.NodeList, error) {
var nodes *v1.NodeList
var err error
if wait.PollImmediate(poll, singleCallTimeout, func() (bool, error) {
nodes, err = c.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
nodes, err = c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
@@ -219,7 +220,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
ResourceVersion: "0",
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.CoreV1().Nodes().List(opts)
nodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
if err != nil {
e2elog.Logf("Unexpected error listing nodes: %v", err)
if testutils.IsRetryableAPIError(err) {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package pod
import (
"context"
"fmt"
"time"
@@ -35,7 +36,7 @@ var (
// CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
@@ -45,7 +46,7 @@ func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSe
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
@@ -60,7 +61,7 @@ func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeC
// CreatePod with given claims based on node selector
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
@@ -70,7 +71,7 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}
@@ -90,7 +91,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string,
pod.Spec.NodeSelector = node.Selector
pod.Spec.Affinity = node.Affinity
pod, err := client.CoreV1().Pods(namespace).Create(pod)
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
}
@@ -101,7 +102,7 @@ func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string,
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package pod
import (
"context"
"fmt"
"time"
@@ -36,7 +37,7 @@ const (
// DeletePodOrFail deletes the pod of the specified namespace and name.
func DeletePodOrFail(c clientset.Interface, ns, name string) {
ginkgo.By(fmt.Sprintf("Deleting pod %s in namespace %s", name, ns))
err := c.CoreV1().Pods(ns).Delete(name, nil)
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, nil)
expectNoError(err, "failed to delete pod %s in namespace %s", name, ns)
}
@@ -53,7 +54,7 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
// not existing.
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(podName, nil)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, nil)
if err != nil {
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted

View File

@@ -86,7 +86,7 @@ func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele
func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.CoreV1().Pods(r.ns).List(options)
currentPods, err := r.c.CoreV1().Pods(r.ns).List(context.TODO(), options)
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
@@ -147,7 +147,7 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
func podRunning(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -163,7 +163,7 @@ func podRunning(c clientset.Interface, podName, namespace string) wait.Condition
func podCompleted(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -177,7 +177,7 @@ func podCompleted(c clientset.Interface, podName, namespace string) wait.Conditi
func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -196,7 +196,7 @@ func podRunningAndReady(c clientset.Interface, podName, namespace string) wait.C
func podNotPending(c clientset.Interface, podName, namespace string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(namespace).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -222,7 +222,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
options := metav1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.CoreV1().Pods(ns).List(options)
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
return nil, err
}
@@ -350,7 +350,7 @@ func logPodTerminationMessages(pods []v1.Pod) {
// DumpAllPodInfoForNamespace logs all pod information for a given namespace.
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace string) {
pods, err := c.CoreV1().Pods(namespace).List(metav1.ListOptions{})
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("unable to fetch pod debug info: %v", err)
}
@@ -436,10 +436,10 @@ func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tw
if tweak != nil {
tweak(pod)
}
execPod, err := client.CoreV1().Pods(ns).Create(pod)
execPod, err := client.CoreV1().Pods(ns).Create(context.TODO(), pod)
expectNoError(err, "failed to create new exec pod in namespace: %s", ns)
err = wait.PollImmediate(poll, 5*time.Minute, func() (bool, error) {
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(execPod.Name, metav1.GetOptions{})
retrievedPod, err := client.CoreV1().Pods(execPod.Namespace).Get(context.TODO(), execPod.Name, metav1.GetOptions{})
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
@@ -528,7 +528,7 @@ func getPodLogsInternal(c clientset.Interface, namespace, podName, containerName
// GetPodsInNamespace returns the pods in the given namespace.
func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return []*v1.Pod{}, err
}

View File

@@ -18,6 +18,7 @@ package pod
import (
"bytes"
"context"
"errors"
"fmt"
"sync"
@@ -125,7 +126,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
// checked.
replicas, replicaOk := int32(0), int32(0)
rcList, err := c.CoreV1().ReplicationControllers(ns).List(metav1.ListOptions{})
rcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
@@ -138,7 +139,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
replicaOk += rc.Status.ReadyReplicas
}
rsList, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{})
rsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error getting replication sets in namespace %q: %v", ns, err)
if testutils.IsRetryableAPIError(err) {
@@ -151,7 +152,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
replicaOk += rs.Status.ReadyReplicas
}
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
podList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
e2elog.Logf("Error getting pods in namespace '%s': %v", ns, err)
if testutils.IsRetryableAPIError(err) {
@@ -211,7 +212,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
func WaitForPodCondition(c clientset.Interface, ns, podName, desc string, timeout time.Duration, condition podCondition) error {
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
e2elog.Logf("Pod %q in namespace %q not found. Error: %v", podName, ns, err)
@@ -297,7 +298,7 @@ func WaitForPodNameUnschedulableInNamespace(c clientset.Interface, podName, name
func WaitForMatchPodsCondition(c clientset.Interface, opts metav1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
e2elog.Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(opts)
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), opts)
if err != nil {
return err
}
@@ -386,7 +387,7 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam
// than "not found" then that error is returned and the wait stops.
func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, timeout time.Duration) error {
return wait.PollImmediate(poll, timeout, func() (bool, error) {
_, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
return true, nil // done
}
@@ -402,7 +403,7 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
return wait.PollImmediate(interval, timeout, func() (bool, error) {
e2elog.Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(options)
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
return false, nil
@@ -489,7 +490,7 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label
func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
for t := time.Now(); time.Since(t) < podListTimeout; time.Sleep(poll) {
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err = c.CoreV1().Pods(ns).List(options)
pods, err = c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
if testutils.IsRetryableAPIError(err) {
continue
@@ -540,7 +541,7 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := metav1.ListOptions{LabelSelector: label.String()}
return wait.Poll(poll, 5*time.Minute, func() (bool, error) {
pods, err := c.CoreV1().Pods(ns).List(options)
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil {
return false, nil
}

View File

@@ -78,7 +78,7 @@ var expectedErrors = regexp.MustCompile(`container .* in pod .* is (terminated|w
// running pods, but that then would have the disadvantage that
// already deleted pods aren't covered.
func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogOutput) error {
watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{})
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
if err != nil {
return errors.Wrap(err, "cannot create Pod event watcher")
}
@@ -90,7 +90,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
m.Lock()
defer m.Unlock()
pods, err := cs.CoreV1().Pods(ns).List(meta.ListOptions{})
pods, err := cs.CoreV1().Pods(ns).List(context.TODO(), meta.ListOptions{})
if err != nil {
if to.StatusWriter != nil {
fmt.Fprintf(to.StatusWriter, "ERROR: get pod list in %s: %s\n", ns, err)
@@ -213,7 +213,7 @@ func CopyAllLogs(ctx context.Context, cs clientset.Interface, ns string, to LogO
// WatchPods prints pod status events for a certain namespace or all namespaces
// when namespace name is empty.
func WatchPods(ctx context.Context, cs clientset.Interface, ns string, to io.Writer) error {
watcher, err := cs.CoreV1().Pods(ns).Watch(meta.ListOptions{})
watcher, err := cs.CoreV1().Pods(ns).Watch(context.TODO(), meta.ListOptions{})
if err != nil {
return errors.Wrap(err, "cannot create Pod event watcher")
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"regexp"
"sync"
@@ -78,7 +79,7 @@ type PodClient struct {
// Create creates a new pod according to the framework specifications (don't wait for it to start).
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
c.mungeSpec(pod)
p, err := c.PodInterface.Create(pod)
p, err := c.PodInterface.Create(context.TODO(), pod)
ExpectNoError(err, "Error creating Pod")
return p
}
@@ -89,7 +90,7 @@ func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
p := c.Create(pod)
ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(c.f.ClientSet, p.Name, namespace))
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
p, err := c.Get(p.Name, metav1.GetOptions{})
p, err := c.Get(context.TODO(), p.Name, metav1.GetOptions{})
ExpectNoError(err)
return p
}
@@ -115,12 +116,12 @@ func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
// pod object.
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(name, metav1.GetOptions{})
pod, err := c.PodInterface.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
}
updateFn(pod)
_, err = c.PodInterface.Update(pod)
_, err = c.PodInterface.Update(context.TODO(), pod)
if err == nil {
Logf("Successfully updated pod %q", name)
return true, nil
@@ -137,7 +138,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
// disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options *metav1.DeleteOptions, timeout time.Duration) {
namespace := c.f.Namespace.Name
err := c.Delete(name, options)
err := c.Delete(context.TODO(), name, options)
if err != nil && !apierrors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)
}
@@ -259,7 +260,7 @@ func (c *PodClient) MatchContainerOutput(name string, containerName string, expe
// PodIsReady returns true if the specified pod is ready. Otherwise false.
func (c *PodClient) PodIsReady(name string) bool {
pod, err := c.Get(name, metav1.GetOptions{})
pod, err := c.Get(context.TODO(), name, metav1.GetOptions{})
ExpectNoError(err)
return podutil.IsPodReady(pod)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gce
import (
"context"
"fmt"
"net/http"
"os/exec"
@@ -371,7 +372,7 @@ func GetGCECloud() (*gcecloud.Cloud, error) {
// GetClusterID returns cluster ID
func GetClusterID(c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(gcecloud.UIDConfigMapName, metav1.GetOptions{})
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gce
import (
"context"
"crypto/sha256"
"encoding/json"
"fmt"
@@ -126,7 +127,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.
func (cont *IngressController) getL7AddonUID() (string, error) {
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(uidConfigMap, metav1.GetOptions{})
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), uidConfigMap, metav1.GetOptions{})
if err != nil {
return "", err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gce
import (
"context"
"fmt"
"time"
@@ -77,7 +78,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
// Make sure that addon/system pods are running, so dump
// events for the kube-system namespace on failures
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace))
events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(metav1.ListOptions{})
events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gce
import (
"context"
"fmt"
"strings"
"time"
@@ -84,7 +85,7 @@ func WaitForNodeBootIdsToChange(c clientset.Interface, nodes []v1.Node, timeout
for i := range nodes {
node := &nodes[i]
if err := wait.Poll(30*time.Second, timeout, func() (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(node.Name, metav1.GetOptions{})
newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second)
return false, nil

View File

@@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"sync"
@@ -83,7 +84,7 @@ func privilegedPSP(name string) *policyv1beta1.PodSecurityPolicy {
// IsPodSecurityPolicyEnabled returns true if PodSecurityPolicy is enabled. Otherwise false.
func IsPodSecurityPolicyEnabled(kubeClient clientset.Interface) bool {
isPSPEnabledOnce.Do(func() {
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
psps, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
isPSPEnabled = false
@@ -109,8 +110,7 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
}
// Create the privileged PSP & role
privilegedPSPOnce.Do(func() {
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(
podSecurityPolicyPrivileged, metav1.GetOptions{})
_, err := kubeClient.PolicyV1beta1().PodSecurityPolicies().Get(context.TODO(), podSecurityPolicyPrivileged, metav1.GetOptions{})
if !apierrors.IsNotFound(err) {
// Privileged PSP was already created.
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
@@ -118,14 +118,14 @@ func CreatePrivilegedPSPBinding(kubeClient clientset.Interface, namespace string
}
psp := privilegedPSP(podSecurityPolicyPrivileged)
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(psp)
_, err = kubeClient.PolicyV1beta1().PodSecurityPolicies().Create(context.TODO(), psp)
if !apierrors.IsAlreadyExists(err) {
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
}
if auth.IsRBACEnabled(kubeClient.RbacV1()) {
// Create the Role to bind it to the namespace.
_, err = kubeClient.RbacV1().ClusterRoles().Create(&rbacv1.ClusterRole{
_, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
Rules: []rbacv1.PolicyRule{{
APIGroups: []string{"extensions"},

View File

@@ -17,6 +17,7 @@ limitations under the License.
package framework
import (
"context"
"fmt"
"time"
@@ -185,7 +186,7 @@ func PVPVCMapCleanup(c clientset.Interface, ns string, pvols PVMap, claims PVCMa
func DeletePersistentVolume(c clientset.Interface, pvName string) error {
if c != nil && len(pvName) > 0 {
framework.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(pvName, nil)
err := c.CoreV1().PersistentVolumes().Delete(context.TODO(), pvName, nil)
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
}
@@ -197,7 +198,7 @@ func DeletePersistentVolume(c clientset.Interface, pvName string) error {
func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns string) error {
if c != nil && len(pvcName) > 0 {
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(pvcName, nil)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, nil)
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
}
@@ -224,7 +225,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
}
// examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
}
@@ -255,7 +256,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
var boundPVs, deletedPVCs int
for pvName := range pvols {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
}
@@ -270,7 +271,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
}
// get the pvc for the delete call below
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(cr.Name, metav1.GetOptions{})
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
if err == nil {
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
return err
@@ -292,7 +293,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
// create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
pv, err := c.CoreV1().PersistentVolumes().Create(pv)
pv, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv)
if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
}
@@ -306,7 +307,7 @@ func CreatePV(c clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVol
// CreatePVC creates the PVC resource. Fails test on error.
func CreatePVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), pvc)
if err != nil {
return nil, fmt.Errorf("PVC Create API error: %v", err)
}
@@ -446,11 +447,11 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
}
// Re-get the pv and pvc objects
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
}
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{})
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PVC Get API error: %v", err)
}
@@ -496,7 +497,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
}
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
}
@@ -688,12 +689,12 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
return persistentvolumes, err
}
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
}
// Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
}
@@ -705,7 +706,7 @@ func WaitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.Persist
func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.CoreV1().PersistentVolumes().Get(pvName, metav1.GetOptions{})
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
if err != nil {
framework.Logf("Get persistent volume %s in failed, ignoring for %v: %v", pvName, Poll, err)
continue
@@ -734,7 +735,7 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
phaseFoundInAllClaims := true
for _, pvcName := range pvcNames {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get claim %q, retrying in %v. Error: %v", pvcName, Poll, err)
continue
@@ -772,7 +773,7 @@ func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
// GetDefaultStorageClassName returns default storageClass or return error
func GetDefaultStorageClassName(c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{})
list, err := c.StorageV1().StorageClasses().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return "", fmt.Errorf("Error listing storage classes: %v", err)
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package replicaset
import (
"context"
"fmt"
"time"
@@ -30,7 +31,7 @@ import (
// WaitForReadyReplicaSet waits until the replicaset has all of its replicas ready.
func WaitForReadyReplicaSet(c clientset.Interface, ns, name string) error {
err := wait.Poll(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
rs, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -52,7 +53,7 @@ func WaitForReplicaSetTargetAvailableReplicas(c clientset.Interface, replicaSet
func WaitForReplicaSetTargetAvailableReplicasWithTimeout(c clientset.Interface, replicaSet *appsv1.ReplicaSet, targetReplicaNum int32, timeout time.Duration) error {
desiredGeneration := replicaSet.Generation
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name, metav1.GetOptions{})
rs, err := c.AppsV1().ReplicaSets(replicaSet.Namespace).Get(context.TODO(), replicaSet.Name, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@@ -17,6 +17,7 @@ limitations under the License.
package resource
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
@@ -39,15 +40,15 @@ import (
func GetRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
switch kind {
case api.Kind("ReplicationController"):
return c.CoreV1().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
return c.CoreV1().ReplicationControllers(ns).Get(context.TODO(), name, metav1.GetOptions{})
case extensionsinternal.Kind("ReplicaSet"), appsinternal.Kind("ReplicaSet"):
return c.AppsV1().ReplicaSets(ns).Get(name, metav1.GetOptions{})
return c.AppsV1().ReplicaSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
case extensionsinternal.Kind("Deployment"), appsinternal.Kind("Deployment"):
return c.AppsV1().Deployments(ns).Get(name, metav1.GetOptions{})
return c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
case extensionsinternal.Kind("DaemonSet"):
return c.AppsV1().DaemonSets(ns).Get(name, metav1.GetOptions{})
return c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
case batchinternal.Kind("Job"):
return c.BatchV1().Jobs(ns).Get(name, metav1.GetOptions{})
return c.BatchV1().Jobs(ns).Get(context.TODO(), name, metav1.GetOptions{})
default:
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
}

View File

@@ -397,7 +397,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
// Tracks kube-system pods if no valid PodList is passed in.
var err error
if pods == nil {
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
pods, err = c.CoreV1().Pods("kube-system").List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("Error while listing Pods: %v", err)
return nil, err
@@ -421,7 +421,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
dnsNodes[pod.Spec.NodeName] = true
}
}
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
nodeList, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
if err != nil {
Logf("Error while listing Nodes: %v", err)
return nil, err

View File

@@ -17,6 +17,7 @@ limitations under the License.
package security
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
@@ -119,7 +120,7 @@ done`, testCmd)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(
clientset, pod.Name, nsName))
var err error
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
} else {
pod = podClient.CreateSync(pod)
@@ -155,7 +156,7 @@ profile %s flags=(attach_disconnected) {
profileName: profile,
},
}
_, err := clientset.CoreV1().ConfigMaps(nsName).Create(cm)
_, err := clientset.CoreV1().ConfigMaps(nsName).Create(context.TODO(), cm)
framework.ExpectNoError(err, "Failed to create apparmor-profiles ConfigMap")
}
@@ -223,7 +224,7 @@ func createAppArmorProfileLoader(nsName string, clientset clientset.Interface) {
},
},
}
_, err := clientset.CoreV1().ReplicationControllers(nsName).Create(loader)
_, err := clientset.CoreV1().ReplicationControllers(nsName).Create(context.TODO(), loader)
framework.ExpectNoError(err, "Failed to create apparmor-loader ReplicationController")
// Wait for loader to be ready.

View File

@@ -17,6 +17,7 @@ limitations under the License.
package service
import (
"context"
"fmt"
"net"
"regexp"
@@ -105,7 +106,7 @@ func (j *TestJig) CreateTCPServiceWithPort(tweak func(svc *v1.Service), port int
if tweak != nil {
tweak(svc)
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc)
if err != nil {
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err)
}
@@ -120,7 +121,7 @@ func (j *TestJig) CreateTCPService(tweak func(svc *v1.Service)) (*v1.Service, er
if tweak != nil {
tweak(svc)
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc)
if err != nil {
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err)
}
@@ -135,7 +136,7 @@ func (j *TestJig) CreateUDPService(tweak func(svc *v1.Service)) (*v1.Service, er
if tweak != nil {
tweak(svc)
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc)
if err != nil {
return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err)
}
@@ -160,7 +161,7 @@ func (j *TestJig) CreateExternalNameService(tweak func(svc *v1.Service)) (*v1.Se
if tweak != nil {
tweak(svc)
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc)
if err != nil {
return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err)
}
@@ -252,7 +253,7 @@ func (j *TestJig) CreateLoadBalancerService(timeout time.Duration, tweak func(sv
if tweak != nil {
tweak(svc)
}
_, err := j.Client.CoreV1().Services(j.Namespace).Create(svc)
_, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc)
if err != nil {
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
}
@@ -284,7 +285,7 @@ func (j *TestJig) GetEndpointNodes() (map[string][]string, error) {
// GetEndpointNodeNames returns a string set of node names on which the
// endpoints of the given Service are running.
func (j *TestJig) GetEndpointNodeNames() (sets.String, error) {
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(j.Name, metav1.GetOptions{})
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err)
}
@@ -305,7 +306,7 @@ func (j *TestJig) GetEndpointNodeNames() (sets.String, error) {
// WaitForEndpointOnNode waits for a service endpoint on the given node.
func (j *TestJig) WaitForEndpointOnNode(nodeName string) error {
return wait.PollImmediate(framework.Poll, LoadBalancerPropagationTimeoutDefault, func() (bool, error) {
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(j.Name, metav1.GetOptions{})
endpoints, err := j.Client.CoreV1().Endpoints(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Get endpoints for service %s/%s failed (%s)", j.Namespace, j.Name, err)
return false, nil
@@ -340,12 +341,12 @@ func (j *TestJig) WaitForAvailableEndpoint(timeout time.Duration) error {
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
options.FieldSelector = endpointSelector.String()
obj, err := j.Client.CoreV1().Endpoints(j.Namespace).List(options)
obj, err := j.Client.CoreV1().Endpoints(j.Namespace).List(context.TODO(), options)
return runtime.Object(obj), err
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
options.FieldSelector = endpointSelector.String()
return j.Client.CoreV1().Endpoints(j.Namespace).Watch(options)
return j.Client.CoreV1().Endpoints(j.Namespace).Watch(context.TODO(), options)
},
},
&v1.Endpoints{},
@@ -437,12 +438,12 @@ func (j *TestJig) sanityCheckService(svc *v1.Service, svcType v1.ServiceType) (*
// face of timeouts and conflicts.
func (j *TestJig) UpdateService(update func(*v1.Service)) (*v1.Service, error) {
for i := 0; i < 3; i++ {
service, err := j.Client.CoreV1().Services(j.Namespace).Get(j.Name, metav1.GetOptions{})
service, err := j.Client.CoreV1().Services(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err)
}
update(service)
result, err := j.Client.CoreV1().Services(j.Namespace).Update(service)
result, err := j.Client.CoreV1().Services(j.Namespace).Update(context.TODO(), service)
if err == nil {
return j.sanityCheckService(result, service.Spec.Type)
}
@@ -534,7 +535,7 @@ func (j *TestJig) WaitForLoadBalancerDestroy(ip string, port int, timeout time.D
func (j *TestJig) waitForCondition(timeout time.Duration, message string, conditionFn func(*v1.Service) bool) (*v1.Service, error) {
var service *v1.Service
pollFunc := func() (bool, error) {
svc, err := j.Client.CoreV1().Services(j.Namespace).Get(j.Name, metav1.GetOptions{})
svc, err := j.Client.CoreV1().Services(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -618,7 +619,7 @@ func (j *TestJig) AddRCAntiAffinity(rc *v1.ReplicationController) {
// CreatePDB returns a PodDisruptionBudget for the given ReplicationController, or returns an error if a PodDisruptionBudget isn't ready
func (j *TestJig) CreatePDB(rc *v1.ReplicationController) (*policyv1beta1.PodDisruptionBudget, error) {
pdb := j.newPDBTemplate(rc)
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Create(pdb)
newPdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Create(context.TODO(), pdb)
if err != nil {
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
}
@@ -658,7 +659,7 @@ func (j *TestJig) Run(tweak func(rc *v1.ReplicationController)) (*v1.Replication
if tweak != nil {
tweak(rc)
}
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(rc)
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(context.TODO(), rc)
if err != nil {
return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err)
}
@@ -675,14 +676,14 @@ func (j *TestJig) Run(tweak func(rc *v1.ReplicationController)) (*v1.Replication
// Scale scales pods to the given replicas
func (j *TestJig) Scale(replicas int) error {
rc := j.Name
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(rc, metav1.GetOptions{})
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(context.TODO(), rc, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get scale for RC %q: %v", rc, err)
}
scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = int32(replicas)
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(rc, scale)
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(context.TODO(), rc, scale)
if err != nil {
return fmt.Errorf("failed to scale RC %q: %v", rc, err)
}
@@ -699,7 +700,7 @@ func (j *TestJig) Scale(replicas int) error {
func (j *TestJig) waitForPdbReady() error {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Get(j.Name, metav1.GetOptions{})
pdb, err := j.Client.PolicyV1beta1().PodDisruptionBudgets(j.Namespace).Get(context.TODO(), j.Name, metav1.GetOptions{})
if err != nil {
return err
}
@@ -718,7 +719,7 @@ func (j *TestJig) waitForPodsCreated(replicas int) ([]string, error) {
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := j.Client.CoreV1().Pods(j.Namespace).List(options)
pods, err := j.Client.CoreV1().Pods(j.Namespace).List(context.TODO(), options)
if err != nil {
return nil, err
}

Some files were not shown because too many files have changed in this diff Show More