From 01d916167b92d3be2408822a1eadf832ee270f8c Mon Sep 17 00:00:00 2001 From: David Zhu Date: Fri, 9 Mar 2018 10:57:50 -0800 Subject: [PATCH 1/2] Add GCE-PD CSI Driver test to E2E test suite --- test/e2e/framework/util.go | 16 ++++ test/e2e/manifest/manifest.go | 17 +++++ test/e2e/storage/BUILD | 3 +- .../storage/{csi_hostpath.go => csi_defs.go} | 58 ++++++++++++++ test/e2e/storage/csi_volumes.go | 64 +++++++++++++++- .../gce-pd/controller_service.yaml | 13 ++++ .../storage-csi/gce-pd/controller_ss.yaml | 75 +++++++++++++++++++ .../storage-csi/gce-pd/node_ds.yaml | 72 ++++++++++++++++++ 8 files changed, 315 insertions(+), 3 deletions(-) rename test/e2e/storage/{csi_hostpath.go => csi_defs.go} (67%) create mode 100644 test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml create mode 100644 test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml create mode 100644 test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 9c146806b16..38c1ebf4ca8 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -376,6 +376,22 @@ func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) { } } +func SkipUnlessSecretExistsAfterWait(c clientset.Interface, name, namespace string, timeout time.Duration) { + Logf("Waiting for secret %v in namespace %v to exist in duration %v", name, namespace, timeout) + start := time.Now() + if wait.PollImmediate(15*time.Second, timeout, func() (bool, error) { + _, err := c.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + Logf("Secret %v in namespace %v still does not exist after duration %v", name, namespace, time.Since(start)) + return false, nil + } + return true, nil + }) != nil { + Skipf("Secret %v in namespace %v did not exist after timeout of %v", name, namespace, timeout) + } + Logf("Secret %v in namespace %v found after duration %v", name, namespace, time.Since(start)) +} + func SkipIfContainerRuntimeIs(runtimes ...string) { for _, runtime := range runtimes { if runtime == TestContext.ContainerRuntime { diff --git a/test/e2e/manifest/manifest.go b/test/e2e/manifest/manifest.go index 1bd78e32703..bc7d111e8fd 100644 --- a/test/e2e/manifest/manifest.go +++ b/test/e2e/manifest/manifest.go @@ -125,3 +125,20 @@ func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) { } return &ss, nil } + +// DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns. +func DaemonSetFromManifest(fileName, ns string) (*apps.DaemonSet, error) { + var ds apps.DaemonSet + data := generated.ReadOrDie(fileName) + + json, err := utilyaml.ToJSON(data) + if err != nil { + return nil, err + } + err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &ds) + if err != nil { + return nil, err + } + ds.Namespace = ns + return &ds, nil +} diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index dd375081265..099cdc855e3 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "csi_hostpath.go", + "csi_defs.go", "csi_volumes.go", "empty_dir_wrapper.go", "flexvolume.go", @@ -39,6 +39,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", "//test/e2e/generated:go_default_library", + "//test/e2e/manifest:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/e2e/storage/vsphere:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/csi_hostpath.go b/test/e2e/storage/csi_defs.go similarity index 67% rename from test/e2e/storage/csi_hostpath.go rename to test/e2e/storage/csi_defs.go index bddc8dc6aa2..8e956bdb681 100644 --- a/test/e2e/storage/csi_hostpath.go +++ b/test/e2e/storage/csi_defs.go @@ -21,10 +21,12 @@ package storage import ( "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/manifest" ) const ( @@ -197,3 +199,59 @@ func csiHostPathPod( framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret)) return ret } + +func csiGCEPDSetup( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, + f *framework.Framework, + nodeSA *v1.ServiceAccount, + controllerSA *v1.ServiceAccount, +) { + // Get API Objects from manifests + nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace) + framework.ExpectNoError(err, "Failed to create DaemonSet from manifest") + nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName() + + controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace) + framework.ExpectNoError(err, "Failed to create StatefulSet from manifest") + controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName() + + controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml") + framework.ExpectNoError(err, "Failed to create Service from manifest") + + // Got all objects from manifests now try to delete objects + err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil) + if err != nil { + if !apierrs.IsNotFound(err) { + framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName()) + } + } + + err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil) + if err != nil { + if !apierrs.IsNotFound(err) { + framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName()) + } + } + err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil) + if err != nil { + if !apierrs.IsNotFound(err) { + framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName()) + } + } + if teardown { + return + } + + // Create new API Objects through client + _, err = client.CoreV1().Services(config.Namespace).Create(controllerservice) + framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name) + + _, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss) + framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name) + + _, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds) + framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name) + +} diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 76adbe93cf9..170fa9d2a45 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "fmt" "math/rand" "time" @@ -28,10 +29,12 @@ import ( clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) const ( @@ -95,7 +98,7 @@ func csiServiceAccount( componentName string, teardown bool, ) *v1.ServiceAccount { - By("Creating a CSI service account") + By(fmt.Sprintf("Creating a CSI service account for %v", componentName)) serviceAccountName := config.Prefix + "-" + componentName + "-service-account" serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace) sa := &v1.ServiceAccount{ @@ -130,7 +133,7 @@ func csiClusterRoleBindings( sa *v1.ServiceAccount, clusterRolesNames []string, ) { - By("Binding cluster roles to the CSI service account") + By(fmt.Sprintf("Binding cluster roles %v to the CSI service account %v", clusterRolesNames, sa.GetName())) clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings() for _, clusterRoleName := range clusterRolesNames { @@ -237,4 +240,61 @@ var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() { testDynamicProvisioning(t, cs, claim, class) }) }) + + Describe("[Feature: CSI] Sanity CSI plugin test using GCE-PD CSI driver", func() { + var ( + controllerClusterRoles []string = []string{ + csiExternalAttacherClusterRoleName, + csiExternalProvisionerClusterRoleName, + } + nodeClusterRoles []string = []string{ + csiDriverRegistrarClusterRoleName, + } + controllerServiceAccount *v1.ServiceAccount + nodeServiceAccount *v1.ServiceAccount + ) + + BeforeEach(func() { + framework.SkipUnlessProviderIs("gce", "gke") + // Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa" + // kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}} + // TODO(GITHUBISSUE): Inject the necessary credentials automatically to the driver containers in e2e test + framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute) + + By("deploying gce-pd driver") + controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false) + nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false) + csiClusterRoleBindings(cs, config, false, controllerServiceAccount, controllerClusterRoles) + csiClusterRoleBindings(cs, config, false, nodeServiceAccount, nodeClusterRoles) + csiGCEPDSetup(cs, config, false, f, nodeServiceAccount, controllerServiceAccount) + }) + + AfterEach(func() { + By("uninstalling gce-pd driver") + csiGCEPDSetup(cs, config, true, f, nodeServiceAccount, controllerServiceAccount) + csiClusterRoleBindings(cs, config, true, controllerServiceAccount, controllerClusterRoles) + csiClusterRoleBindings(cs, config, true, nodeServiceAccount, nodeClusterRoles) + csiServiceAccount(cs, config, "gce-controller", true) + csiServiceAccount(cs, config, "gce-node", true) + }) + + It("should provision storage with a GCE-PD CSI driver", func() { + nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain] + Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName()) + t := storageClassTest{ + name: "csi-gce-pd", + provisioner: "csi-gce-pd", + parameters: map[string]string{"type": "pd-standard", "zone": nodeZone}, + claimSize: "5Gi", + expectedSize: "5Gi", + nodeName: node.Name, + } + + claim := newClaim(t, ns.GetName(), "") + class := newStorageClass(t, ns.GetName(), "") + claim.Spec.StorageClassName = &class.ObjectMeta.Name + testDynamicProvisioning(t, cs, claim, class) + }) + + }) }) diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml new file mode 100644 index 00000000000..7142ceb56a4 --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml @@ -0,0 +1,13 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-gce-pd + labels: + app: csi-gce-pd +spec: + selector: + app: csi-gce-pd + ports: + - name: dummy + port: 12345 + \ No newline at end of file diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml new file mode 100644 index 00000000000..fec87821648 --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml @@ -0,0 +1,75 @@ +kind: StatefulSet +apiVersion: apps/v1beta1 +metadata: + name: csi-gce-controller +spec: + serviceName: "csi-gce-pd" + replicas: 1 + selector: + matchLabels: + app: csi-gce-pd-driver + template: + metadata: + labels: + app: csi-gce-pd-driver + spec: + serviceAccount: csi-gce-pd + containers: + - name: csi-external-provisioner + imagePullPolicy: Always + image: quay.io/k8scsi/csi-provisioner:v0.2.0 + args: + - "--v=5" + - "--provisioner=csi-gce-pd" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: csi-attacher + imagePullPolicy: Always + image: quay.io/k8scsi/csi-attacher:v0.2.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /csi/csi.sock + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: gce-driver + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + imagePullPolicy: Always + image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: GOOGLE_APPLICATION_CREDENTIALS + value: "/etc/service-account/cloud-sa.json" + volumeMounts: + - name: socket-dir + mountPath: /csi + - name: cloud-sa-volume + readOnly: true + mountPath: "/etc/service-account" + volumes: + - name: socket-dir + emptyDir: {} + - name: cloud-sa-volume + secret: + secretName: cloud-sa diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml new file mode 100644 index 00000000000..e6886e6efa8 --- /dev/null +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml @@ -0,0 +1,72 @@ +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: csi-gce-node +spec: + selector: + matchLabels: + app: csi-gce-driver + serviceName: csi-gce + template: + metadata: + labels: + app: csi-gce-driver + spec: + serviceAccount: csi-gce-pd + containers: + - name: csi-driver-registrar + imagePullPolicy: Always + image: quay.io/k8scsi/driver-registrar:v0.2.0 + args: + - "--v=5" + - "--csi-address=$(ADDRESS)" + env: + - name: ADDRESS + value: /csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: plugin-dir + mountPath: /csi + - name: gce-driver + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + imagePullPolicy: Always + image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha + args: + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + volumeMounts: + - name: kubelet-dir + mountPath: /var/lib/kubelet + mountPropagation: "Bidirectional" + - name: plugin-dir + mountPath: /csi + - name: device-dir + mountPath: /host/dev + volumes: + - name: kubelet-dir + hostPath: + path: /var/lib/kubelet + type: Directory + - name: plugin-dir + hostPath: + path: /var/lib/kubelet/plugins/com.google.csi.gcepd/ + type: DirectoryOrCreate + - name: device-dir + hostPath: + path: /dev + type: Directory From 4d11dab272fc358c918a1a6b83db8f938535cdb0 Mon Sep 17 00:00:00 2001 From: David Zhu Date: Thu, 12 Apr 2018 18:21:59 -0700 Subject: [PATCH 2/2] CSI test refactor to be more easily extensible for more plugins when there are more tests --- test/e2e/storage/BUILD | 2 +- .../storage/{csi_defs.go => csi_objects.go} | 153 ++++++- test/e2e/storage/csi_volumes.go | 383 +++++++----------- .../gce-pd/controller_service.yaml | 1 - .../storage-csi/gce-pd/controller_ss.yaml | 5 - .../storage-csi/gce-pd/node_ds.yaml | 3 - 6 files changed, 305 insertions(+), 242 deletions(-) rename test/e2e/storage/{csi_defs.go => csi_objects.go} (60%) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 099cdc855e3..1e133811fc8 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = [ - "csi_defs.go", + "csi_objects.go", "csi_volumes.go", "empty_dir_wrapper.go", "flexvolume.go", diff --git a/test/e2e/storage/csi_defs.go b/test/e2e/storage/csi_objects.go similarity index 60% rename from test/e2e/storage/csi_defs.go rename to test/e2e/storage/csi_objects.go index 8e956bdb681..eba393cbc25 100644 --- a/test/e2e/storage/csi_defs.go +++ b/test/e2e/storage/csi_objects.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,19 +20,166 @@ limitations under the License. package storage import ( + "fmt" + "time" + "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + restclient "k8s.io/client-go/rest" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/manifest" + + . "github.com/onsi/ginkgo" ) const ( - csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0" + csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0" + csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0" + csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0" + csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0" ) +// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests +// are parallelizable. This role will be shared with many of the CSI tests. +func csiDriverRegistrarClusterRole( + config framework.VolumeTestConfig, +) *rbacv1.ClusterRole { + // TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved + By("Creating an impersonating superuser kubernetes clientset to define cluster role") + rc, err := framework.LoadConfig() + framework.ExpectNoError(err) + rc.Impersonate = restclient.ImpersonationConfig{ + UserName: "superuser", + Groups: []string{"system:masters"}, + } + superuserClientset, err := clientset.NewForConfig(rc) + framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err) + By("Creating the CSI driver registrar cluster role") + clusterRoleClient := superuserClientset.RbacV1().ClusterRoles() + role := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: csiDriverRegistrarClusterRoleName, + }, + Rules: []rbacv1.PolicyRule{ + + { + APIGroups: []string{""}, + Resources: []string{"events"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"nodes"}, + Verbs: []string{"get", "update", "patch"}, + }, + }, + } + + ret, err := clusterRoleClient.Create(role) + if err != nil { + if apierrs.IsAlreadyExists(err) { + return ret + } + framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err) + } + + return ret +} + +func csiServiceAccount( + client clientset.Interface, + config framework.VolumeTestConfig, + componentName string, + teardown bool, +) *v1.ServiceAccount { + creatingString := "Creating" + if teardown { + creatingString = "Deleting" + } + By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName)) + serviceAccountName := config.Prefix + "-" + componentName + "-service-account" + serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace) + sa := &v1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: serviceAccountName, + }, + } + + serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return nil + } + + ret, err := serviceAccountClient.Create(sa) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err) + } + + return ret +} + +func csiClusterRoleBindings( + client clientset.Interface, + config framework.VolumeTestConfig, + teardown bool, + sa *v1.ServiceAccount, + clusterRolesNames []string, +) { + bindingString := "Binding" + if teardown { + bindingString = "Unbinding" + } + By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName())) + clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings() + for _, clusterRoleName := range clusterRolesNames { + + binding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding", + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Name: sa.GetName(), + Namespace: sa.GetNamespace(), + }, + }, + RoleRef: rbacv1.RoleRef{ + Kind: "ClusterRole", + Name: clusterRoleName, + APIGroup: "rbac.authorization.k8s.io", + }, + } + + clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{}) + err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { + _, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{}) + return apierrs.IsNotFound(err), nil + }) + framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) + + if teardown { + return + } + + _, err = clusterRoleBindingClient.Create(binding) + if err != nil { + framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err) + } + } +} + func csiHostPathPod( client clientset.Interface, config framework.VolumeTestConfig, @@ -200,7 +347,7 @@ func csiHostPathPod( return ret } -func csiGCEPDSetup( +func deployGCEPDCSIDriver( client clientset.Interface, config framework.VolumeTestConfig, teardown bool, diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 170fa9d2a45..369d480d074 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -1,5 +1,5 @@ /* -Copyright 2017 The Kubernetes Authors. +Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,13 +22,8 @@ import ( "time" "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" - restclient "k8s.io/client-go/rest" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -38,139 +33,21 @@ import ( ) const ( - csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0" - csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0" - csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0" csiExternalProvisionerClusterRoleName string = "system:csi-external-provisioner" csiExternalAttacherClusterRoleName string = "system:csi-external-attacher" csiDriverRegistrarClusterRoleName string = "csi-driver-registrar" ) -// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests -// are parallelizable. This role will be shared with many of the CSI tests. -func csiDriverRegistrarClusterRole( - config framework.VolumeTestConfig, -) *rbacv1.ClusterRole { - // TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved - By("Creating an impersonating superuser kubernetes clientset to define cluster role") - rc, err := framework.LoadConfig() - framework.ExpectNoError(err) - rc.Impersonate = restclient.ImpersonationConfig{ - UserName: "superuser", - Groups: []string{"system:masters"}, - } - superuserClientset, err := clientset.NewForConfig(rc) - By("Creating the CSI driver registrar cluster role") - clusterRoleClient := superuserClientset.RbacV1().ClusterRoles() - role := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: csiDriverRegistrarClusterRoleName, - }, - Rules: []rbacv1.PolicyRule{ - - { - APIGroups: []string{""}, - Resources: []string{"events"}, - Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"nodes"}, - Verbs: []string{"get", "update", "patch"}, - }, - }, - } - - ret, err := clusterRoleClient.Create(role) - if err != nil { - if apierrs.IsAlreadyExists(err) { - return ret - } - framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err) - } - - return ret +type csiTestDriver interface { + createCSIDriver() + cleanupCSIDriver() + createStorageClassTest(node v1.Node) storageClassTest } -func csiServiceAccount( - client clientset.Interface, - config framework.VolumeTestConfig, - componentName string, - teardown bool, -) *v1.ServiceAccount { - By(fmt.Sprintf("Creating a CSI service account for %v", componentName)) - serviceAccountName := config.Prefix + "-" + componentName + "-service-account" - serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace) - sa := &v1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: serviceAccountName, - }, - } - - serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{}) - err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { - _, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{}) - return apierrs.IsNotFound(err), nil - }) - framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) - - if teardown { - return nil - } - - ret, err := serviceAccountClient.Create(sa) - if err != nil { - framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err) - } - - return ret -} - -func csiClusterRoleBindings( - client clientset.Interface, - config framework.VolumeTestConfig, - teardown bool, - sa *v1.ServiceAccount, - clusterRolesNames []string, -) { - By(fmt.Sprintf("Binding cluster roles %v to the CSI service account %v", clusterRolesNames, sa.GetName())) - clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings() - for _, clusterRoleName := range clusterRolesNames { - - binding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding", - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Name: sa.GetName(), - Namespace: sa.GetNamespace(), - }, - }, - RoleRef: rbacv1.RoleRef{ - Kind: "ClusterRole", - Name: clusterRoleName, - APIGroup: "rbac.authorization.k8s.io", - }, - } - - clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{}) - err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) { - _, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{}) - return apierrs.IsNotFound(err), nil - }) - framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err) - - if teardown { - return - } - - _, err = clusterRoleBindingClient.Create(binding) - if err != nil { - framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err) - } - } +var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{ + "hostPath": initCSIHostpath, + // Feature tag to skip test in CI, pending fix of #62237 + "[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD, } var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() { @@ -198,103 +75,151 @@ var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() { csiDriverRegistrarClusterRole(config) }) - // Create one of these for each of the drivers to be tested - // CSI hostPath driver test - Describe("Sanity CSI plugin test using hostPath CSI driver", func() { - var ( - serviceAccount *v1.ServiceAccount - combinedClusterRoleNames []string = []string{ - csiExternalAttacherClusterRoleName, - csiExternalProvisionerClusterRoleName, - csiDriverRegistrarClusterRoleName, - } - ) + for driverName, initCSIDriver := range csiTestDrivers { + curDriverName := driverName + curInitCSIDriver := initCSIDriver - BeforeEach(func() { - By("deploying csi hostpath driver") - serviceAccount = csiServiceAccount(cs, config, "hostpath", false) - csiClusterRoleBindings(cs, config, false, serviceAccount, combinedClusterRoleNames) - csiHostPathPod(cs, config, false, f, serviceAccount) + Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() { + var ( + driver csiTestDriver + ) + + BeforeEach(func() { + driver = curInitCSIDriver(f, config) + driver.createCSIDriver() + }) + + AfterEach(func() { + driver.cleanupCSIDriver() + }) + + It("should provision storage", func() { + t := driver.createStorageClassTest(node) + claim := newClaim(t, ns.GetName(), "") + class := newStorageClass(t, ns.GetName(), "") + claim.Spec.StorageClassName = &class.ObjectMeta.Name + testDynamicProvisioning(t, cs, claim, class) + }) }) - - AfterEach(func() { - By("uninstalling csi hostpath driver") - csiHostPathPod(cs, config, true, f, serviceAccount) - csiClusterRoleBindings(cs, config, true, serviceAccount, combinedClusterRoleNames) - csiServiceAccount(cs, config, "hostpath", true) - }) - - It("should provision storage with a hostPath CSI driver", func() { - t := storageClassTest{ - name: "csi-hostpath", - provisioner: "csi-hostpath", - parameters: map[string]string{}, - claimSize: "1Gi", - expectedSize: "1Gi", - nodeName: node.Name, - } - - claim := newClaim(t, ns.GetName(), "") - class := newStorageClass(t, ns.GetName(), "") - claim.Spec.StorageClassName = &class.ObjectMeta.Name - testDynamicProvisioning(t, cs, claim, class) - }) - }) - - Describe("[Feature: CSI] Sanity CSI plugin test using GCE-PD CSI driver", func() { - var ( - controllerClusterRoles []string = []string{ - csiExternalAttacherClusterRoleName, - csiExternalProvisionerClusterRoleName, - } - nodeClusterRoles []string = []string{ - csiDriverRegistrarClusterRoleName, - } - controllerServiceAccount *v1.ServiceAccount - nodeServiceAccount *v1.ServiceAccount - ) - - BeforeEach(func() { - framework.SkipUnlessProviderIs("gce", "gke") - // Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa" - // kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}} - // TODO(GITHUBISSUE): Inject the necessary credentials automatically to the driver containers in e2e test - framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute) - - By("deploying gce-pd driver") - controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false) - nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false) - csiClusterRoleBindings(cs, config, false, controllerServiceAccount, controllerClusterRoles) - csiClusterRoleBindings(cs, config, false, nodeServiceAccount, nodeClusterRoles) - csiGCEPDSetup(cs, config, false, f, nodeServiceAccount, controllerServiceAccount) - }) - - AfterEach(func() { - By("uninstalling gce-pd driver") - csiGCEPDSetup(cs, config, true, f, nodeServiceAccount, controllerServiceAccount) - csiClusterRoleBindings(cs, config, true, controllerServiceAccount, controllerClusterRoles) - csiClusterRoleBindings(cs, config, true, nodeServiceAccount, nodeClusterRoles) - csiServiceAccount(cs, config, "gce-controller", true) - csiServiceAccount(cs, config, "gce-node", true) - }) - - It("should provision storage with a GCE-PD CSI driver", func() { - nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain] - Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName()) - t := storageClassTest{ - name: "csi-gce-pd", - provisioner: "csi-gce-pd", - parameters: map[string]string{"type": "pd-standard", "zone": nodeZone}, - claimSize: "5Gi", - expectedSize: "5Gi", - nodeName: node.Name, - } - - claim := newClaim(t, ns.GetName(), "") - class := newStorageClass(t, ns.GetName(), "") - claim.Spec.StorageClassName = &class.ObjectMeta.Name - testDynamicProvisioning(t, cs, claim, class) - }) - - }) + } }) + +type hostpathCSIDriver struct { + combinedClusterRoleNames []string + serviceAccount *v1.ServiceAccount + + f *framework.Framework + config framework.VolumeTestConfig +} + +func initCSIHostpath(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver { + return &hostpathCSIDriver{ + combinedClusterRoleNames: []string{ + csiExternalAttacherClusterRoleName, + csiExternalProvisionerClusterRoleName, + csiDriverRegistrarClusterRoleName, + }, + f: f, + config: config, + } +} + +func (h *hostpathCSIDriver) createStorageClassTest(node v1.Node) storageClassTest { + return storageClassTest{ + name: "csi-hostpath", + provisioner: "csi-hostpath", + parameters: map[string]string{}, + claimSize: "1Gi", + expectedSize: "1Gi", + nodeName: node.Name, + } +} + +func (h *hostpathCSIDriver) createCSIDriver() { + By("deploying csi hostpath driver") + f := h.f + cs := f.ClientSet + config := h.config + h.serviceAccount = csiServiceAccount(cs, config, "hostpath", false) + csiClusterRoleBindings(cs, config, false, h.serviceAccount, h.combinedClusterRoleNames) + csiHostPathPod(cs, config, false, f, h.serviceAccount) +} + +func (h *hostpathCSIDriver) cleanupCSIDriver() { + By("uninstalling csi hostpath driver") + f := h.f + cs := f.ClientSet + config := h.config + csiHostPathPod(cs, config, true, f, h.serviceAccount) + csiClusterRoleBindings(cs, config, true, h.serviceAccount, h.combinedClusterRoleNames) + csiServiceAccount(cs, config, "hostpath", true) +} + +type gcePDCSIDriver struct { + controllerClusterRoles []string + nodeClusterRoles []string + controllerServiceAccount *v1.ServiceAccount + nodeServiceAccount *v1.ServiceAccount + + f *framework.Framework + config framework.VolumeTestConfig +} + +func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver { + cs := f.ClientSet + framework.SkipUnlessProviderIs("gce", "gke") + // Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa" + // kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}} + // TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test + framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute) + + return &gcePDCSIDriver{ + nodeClusterRoles: []string{ + csiDriverRegistrarClusterRoleName, + }, + controllerClusterRoles: []string{ + csiExternalAttacherClusterRoleName, + csiExternalProvisionerClusterRoleName, + }, + f: f, + config: config, + } +} + +func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) storageClassTest { + nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain] + Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName()) + + return storageClassTest{ + name: "csi-gce-pd", + provisioner: "csi-gce-pd", + parameters: map[string]string{"type": "pd-standard", "zone": nodeZone}, + claimSize: "5Gi", + expectedSize: "5Gi", + nodeName: node.Name, + } +} + +func (g *gcePDCSIDriver) createCSIDriver() { + By("deploying gce-pd driver") + f := g.f + cs := f.ClientSet + config := g.config + g.controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false /* teardown */) + g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */) + csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles) + csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles) + deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount) +} + +func (g *gcePDCSIDriver) cleanupCSIDriver() { + By("uninstalling gce-pd driver") + f := g.f + cs := f.ClientSet + config := g.config + deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount) + csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles) + csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles) + csiServiceAccount(cs, config, "gce-controller", true /* teardown */) + csiServiceAccount(cs, config, "gce-node", true /* teardown */) +} diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml index 7142ceb56a4..6c509b2fd08 100644 --- a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml @@ -10,4 +10,3 @@ spec: ports: - name: dummy port: 12345 - \ No newline at end of file diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml index fec87821648..76f1ca9b723 100644 --- a/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml @@ -41,11 +41,6 @@ spec: - name: socket-dir mountPath: /csi - name: gce-driver - securityContext: - privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true imagePullPolicy: Always image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha args: diff --git a/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml index e6886e6efa8..11890676e78 100644 --- a/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml +++ b/test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml @@ -33,9 +33,6 @@ spec: - name: gce-driver securityContext: privileged: true - capabilities: - add: ["SYS_ADMIN"] - allowPrivilegeEscalation: true imagePullPolicy: Always image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha args: