diff --git a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go index d33b1460a18..051de8e0167 100644 --- a/test/e2e/storage/vsphere/persistent_volumes-vsphere.go +++ b/test/e2e/storage/vsphere/persistent_volumes-vsphere.go @@ -19,8 +19,8 @@ package vsphere import ( "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 4. Create a POD using the PVC. 5. Verify Disk and Attached to the node. */ - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) c = f.ClientSet @@ -95,23 +95,23 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { StorageClassName: &emptyStorageClass, } } - By("Creating the PV and PVC") + ginkgo.By("Creating the PV and PVC") pv, pvc, err = framework.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) framework.ExpectNoError(err) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) - By("Creating the Client Pod") + ginkgo.By("Creating the Client Pod") clientPod, err = framework.CreateClientPod(c, ns, pvc) framework.ExpectNoError(err) node = clientPod.Spec.NodeName - By("Verify disk should be attached to the node") + ginkgo.By("Verify disk should be attached to the node") isAttached, err := diskIsAttached(volumePath, node) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), "disk is not attached with the node") + gomega.Expect(isAttached).To(gomega.BeTrue(), "disk is not attached with the node") }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources") if c != nil { framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name) @@ -147,12 +147,12 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 2. Delete POD, POD deletion should succeed. */ - It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { - By("Deleting the Claim") + ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { + ginkgo.By("Deleting the Claim") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil - By("Deleting the Pod") + ginkgo.By("Deleting the Pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) }) @@ -163,12 +163,12 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 1. Delete PV. 2. Delete POD, POD deletion should succeed. */ - It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() { - By("Deleting the Persistent Volume") + ginkgo.It("should test that deleting the PV before the pod does not cause pod deletion to fail on vspehre volume detach", func() { + ginkgo.By("Deleting the Persistent Volume") framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) pv = nil - By("Deleting the pod") + ginkgo.By("Deleting the pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod), "Failed to delete pod ", clientPod.Name) }) /* @@ -178,7 +178,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 2. Restart kubelet 3. Verify that written file is accessible after kubelet restart */ - It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { + ginkgo.It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() { utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod) }) @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 4. Start kubelet. 5. Verify that volume mount not to be found. */ - It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { + ginkgo.It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() { utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod) }) @@ -205,15 +205,15 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() { 2. Wait for namespace to get deleted. (Namespace deletion should trigger deletion of belonging pods) 3. Verify volume should be detached from the node. */ - It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { - By("Deleting the Namespace") + ginkgo.It("should test that deleting the Namespace of a PVC and Pod causes the successful detach of vsphere volume", func() { + ginkgo.By("Deleting the Namespace") err := c.CoreV1().Namespaces().Delete(ns, nil) framework.ExpectNoError(err) err = framework.WaitForNamespacesDeleted(c, []string{ns}, 3*time.Minute) framework.ExpectNoError(err) - By("Verifying Persistent Disk detaches") + ginkgo.By("Verifying Persistent Disk detaches") waitForVSphereDiskToDetach(volumePath, node) }) }) diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index ddedcdc83cb..580c19d7872 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -20,8 +20,8 @@ import ( "strconv" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,14 +42,14 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { c = f.ClientSet ns = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) }) utils.SIGDescribe("persistentvolumereclaim:vsphere", func() { - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) nodeInfo = GetReadySchedulableRandomNodeInfo() @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { volumePath = "" }) - AfterEach(func() { + ginkgo.AfterEach(func() { testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc) }) @@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { 5. Delete PVC 6. Verify PV is deleted automatically. */ - It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { + ginkgo.It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { var err error volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) framework.ExpectNoError(err) @@ -82,7 +82,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { deletePVCAfterBind(c, ns, pvc, pv) pvc = nil - By("verify pv is deleted") + ginkgo.By("verify pv is deleted") err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { 8. Delete the pod. 9. Verify PV should be detached from the node and automatically deleted. */ - It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() { + ginkgo.It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() { var err error volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) @@ -110,35 +110,35 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { // Wait for PV and PVC to Bind framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) - By("Creating the Pod") + ginkgo.By("Creating the Pod") pod, err := framework.CreateClientPod(c, ns, pvc) framework.ExpectNoError(err) - By("Deleting the Claim") + ginkgo.By("Deleting the Claim") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil // Verify PV is Present, after PVC is deleted and PV status should be Failed. pv, err := c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred()) + gomega.Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(gomega.HaveOccurred()) - By("Verify the volume is attached to the node") + ginkgo.By("Verify the volume is attached to the node") isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) - Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) - Expect(isVolumeAttached).To(BeTrue()) + gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred()) + gomega.Expect(isVolumeAttached).To(gomega.BeTrue()) - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}) e2elog.Logf("Verified that Volume is accessible in the POD after deleting PV claim") - By("Deleting the Pod") + ginkgo.By("Deleting the Pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) - By("Verify PV is detached from the node after Pod is deleted") - Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred()) + ginkgo.By("Verify PV is detached from the node after Pod is deleted") + gomega.Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(gomega.HaveOccurred()) - By("Verify PV should be deleted automatically") + ginkgo.By("Verify PV should be deleted automatically") framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) pv = nil volumePath = "" @@ -162,7 +162,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { 11. Created POD using PVC created in Step 10 and verify volume content is matching. */ - It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() { + ginkgo.It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() { var err error var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) @@ -171,27 +171,27 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { writeContentToVSpherePV(c, pvc, volumeFileContent) - By("Delete PVC") + ginkgo.By("Delete PVC") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc = nil - By("Verify PV is retained") + ginkgo.By("Verify PV is retained") e2elog.Logf("Waiting for PV %v to become Released", pv.Name) err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) - By("Creating the PV for same volume path") + ginkgo.By("Creating the PV for same volume path") pv = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimRetain, nil) pv, err = c.CoreV1().PersistentVolumes().Create(pv) framework.ExpectNoError(err) - By("creating the pvc") + ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) framework.ExpectNoError(err) - By("wait for the pv and pvc to bind") + ginkgo.By("wait for the pv and pvc to bind") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) verifyContentOfVSpherePV(c, pvc, volumeFileContent) @@ -201,19 +201,19 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { // Test Setup for persistentvolumereclaim tests for vSphere Provider func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) { - By("running testSetupVSpherePersistentVolumeReclaim") - By("creating vmdk") + ginkgo.By("running testSetupVSpherePersistentVolumeReclaim") + ginkgo.By("creating vmdk") volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) if err != nil { return } - By("creating the pv") + ginkgo.By("creating the pv") pv = getVSpherePersistentVolumeSpec(volumePath, persistentVolumeReclaimPolicy, nil) pv, err = c.CoreV1().PersistentVolumes().Create(pv) if err != nil { return } - By("creating the pvc") + ginkgo.By("creating the pvc") pvc = getVSpherePersistentVolumeClaimSpec(ns, nil) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc) return @@ -221,7 +221,7 @@ func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *No // Test Cleanup for persistentvolumereclaim tests for vSphere Provider func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { - By("running testCleanupVSpherePersistentVolumeReclaim") + ginkgo.By("running testCleanupVSpherePersistentVolumeReclaim") if len(volumePath) > 0 { err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) framework.ExpectNoError(err) @@ -238,10 +238,10 @@ func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo * func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) { var err error - By("wait for the pv and pvc to bind") + ginkgo.By("wait for the pv and pvc to bind") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) - By("delete pvc") + ginkgo.By("delete pvc") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(pvc.Name, metav1.GetOptions{}) if !apierrs.IsNotFound(err) { diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index 9226cac96d1..4b4272cf16c 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -19,7 +19,7 @@ package vsphere import ( "time" - . "github.com/onsi/ginkgo" + "github.com/onsi/ginkgo" "k8s.io/api/core/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -57,7 +57,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { err error nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") c = f.ClientSet ns = f.Namespace.Name @@ -72,67 +72,67 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { }) utils.SIGDescribe("Selector-Label Volume Binding:vsphere", func() { - AfterEach(func() { - By("Running clean up actions") + ginkgo.AfterEach(func() { + ginkgo.By("Running clean up actions") if framework.ProviderIs("vsphere") { testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol) } }) - It("should bind volume with claim for given label", func() { + ginkgo.It("should bind volume with claim for given label", func() { volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels) framework.ExpectNoError(err) - By("wait for the pvc_ssd to bind with pv_ssd") + ginkgo.By("wait for the pvc_ssd to bind with pv_ssd") framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd)) - By("Verify status of pvc_vvol is pending") + ginkgo.By("Verify status of pvc_vvol is pending") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) - By("delete pvc_ssd") + ginkgo.By("delete pvc_ssd") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name) - By("verify pv_ssd is deleted") + ginkgo.By("verify pv_ssd is deleted") err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second) framework.ExpectNoError(err) volumePath = "" - By("delete pvc_vvol") + ginkgo.By("delete pvc_vvol") framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name) }) }) }) func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) { - By("creating vmdk") + ginkgo.By("creating vmdk") volumePath = "" volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) if err != nil { return } - By("creating the pv with label volume-type:ssd") + ginkgo.By("creating the pv with label volume-type:ssd") pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels) pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd) if err != nil { return } - By("creating pvc with label selector to match with volume-type:vvol") + ginkgo.By("creating pvc with label selector to match with volume-type:vvol") pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels) pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol) if err != nil { return } - By("creating pvc with label selector to match with volume-type:ssd") + ginkgo.By("creating pvc with label selector to match with volume-type:ssd") pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels) pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd) return } func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) { - By("running testCleanupVSpherePVClabelselector") + ginkgo.By("running testCleanupVSpherePVClabelselector") if len(volumePath) > 0 { nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) } diff --git a/test/e2e/storage/vsphere/vsphere_common.go b/test/e2e/storage/vsphere/vsphere_common.go index b85598f9ab5..a8950928d74 100644 --- a/test/e2e/storage/vsphere/vsphere_common.go +++ b/test/e2e/storage/vsphere/vsphere_common.go @@ -20,7 +20,7 @@ import ( "os" "strconv" - . "github.com/onsi/gomega" + "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e/framework" ) @@ -67,7 +67,7 @@ const ( func GetAndExpectStringEnvVar(varName string) string { varValue := os.Getenv(varName) - Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set") + gomega.Expect(varValue).NotTo(gomega.BeEmpty(), "ENV "+varName+" is not set") return varValue } diff --git a/test/e2e/storage/vsphere/vsphere_scale.go b/test/e2e/storage/vsphere/vsphere_scale.go index 4959e66a8dc..3dd7e4538ba 100644 --- a/test/e2e/storage/vsphere/vsphere_scale.go +++ b/test/e2e/storage/vsphere/vsphere_scale.go @@ -20,8 +20,8 @@ import ( "fmt" "strconv" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" storageV1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -79,8 +79,8 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod) numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances) - Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5") - Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count") + gomega.Expect(numberOfInstances > 5).NotTo(gomega.BeTrue(), "Maximum allowed instances are 5") + gomega.Expect(numberOfInstances > volumeCount).NotTo(gomega.BeTrue(), "Number of instances should be less than the total volume count") policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) @@ -108,14 +108,14 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { } }) - It("vsphere scale tests", func() { + ginkgo.It("vsphere scale tests", func() { var pvcClaimList []string nodeVolumeMap := make(map[string][]string) // Volumes will be provisioned with each different types of Storage Class scArrays := make([]*storageV1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class - By(fmt.Sprintf("Creating Storage Class : %q", scname)) + ginkgo.By(fmt.Sprintf("Creating Storage Class : %q", scname)) var sc *storageV1.StorageClass scParams := make(map[string]string) var err error @@ -130,7 +130,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { scParams[Datastore] = datastoreName } sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil)) - Expect(sc).NotTo(BeNil(), "Storage class is empty") + gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty") framework.ExpectNoError(err, "Failed to create storage class") defer client.StorageV1().StorageClasses().Delete(scname, nil) scArrays[index] = sc @@ -154,11 +154,11 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() { podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) for _, pod := range podList.Items { pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...) - By("Deleting pod") + ginkgo.By("Deleting pod") err = framework.DeletePodWithWait(f, client, &pod) framework.ExpectNoError(err) } - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") err = waitForVSphereDisksToDetach(nodeVolumeMap) framework.ExpectNoError(err) @@ -182,7 +182,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string { // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) { - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() nodeVolumeMap := make(map[string][]string) nodeSelectorIndex := 0 for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod { @@ -191,17 +191,17 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s } pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod) for i := 0; i < volumesPerPod; i++ { - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", sc[index%len(sc)])) framework.ExpectNoError(err) pvclaims[i] = pvclaim } - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)] // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "") @@ -210,7 +210,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s for _, pv := range persistentvolumes { nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) } - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) nodeSelectorIndex++ } diff --git a/test/e2e/storage/vsphere/vsphere_statefulsets.go b/test/e2e/storage/vsphere/vsphere_statefulsets.go index 0804b2245da..ae47c1be067 100644 --- a/test/e2e/storage/vsphere/vsphere_statefulsets.go +++ b/test/e2e/storage/vsphere/vsphere_statefulsets.go @@ -19,8 +19,8 @@ package vsphere import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -57,19 +57,19 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { namespace string client clientset.Interface ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") namespace = f.Namespace.Name client = f.ClientSet Bootstrap(f) }) - AfterEach(func() { + ginkgo.AfterEach(func() { e2elog.Logf("Deleting all statefulset in namespace: %v", namespace) framework.DeleteAllStatefulSets(client, namespace) }) - It("vsphere statefulset testing", func() { - By("Creating StorageClass for Statefulset") + ginkgo.It("vsphere statefulset testing", func() { + ginkgo.By("Creating StorageClass for Statefulset") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil) @@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(sc.Name, nil) - By("Creating statefulset") + ginkgo.By("Creating statefulset") statefulsetTester := framework.NewStatefulSetTester(client) statefulset := statefulsetTester.CreateStatefulSet(manifestPath, namespace) replicas := *(statefulset.Spec.Replicas) @@ -85,8 +85,8 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) framework.ExpectNoError(statefulsetTester.CheckMount(statefulset, mountPath)) ssPodsBeforeScaleDown := statefulsetTester.GetPodList(statefulset) - Expect(ssPodsBeforeScaleDown.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) - Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") + gomega.Expect(ssPodsBeforeScaleDown.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsBeforeScaleDown.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") // Get the list of Volumes attached to Pods before scale down volumesBeforeScaleDown := make(map[string]string) @@ -101,17 +101,17 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { } } - By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) + ginkgo.By(fmt.Sprintf("Scaling down statefulsets to number of Replica: %v", replicas-1)) _, scaledownErr := statefulsetTester.Scale(statefulset, replicas-1) framework.ExpectNoError(scaledownErr) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1) // After scale down, verify vsphere volumes are detached from deleted pods - By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") + ginkgo.By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") for _, sspod := range ssPodsBeforeScaleDown.Items { _, err := client.CoreV1().Pods(namespace).Get(sspod.Name, metav1.GetOptions{}) if err != nil { - Expect(apierrs.IsNotFound(err), BeTrue()) + gomega.Expect(apierrs.IsNotFound(err), gomega.BeTrue()) for _, volumespec := range sspod.Spec.Volumes { if volumespec.PersistentVolumeClaim != nil { vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) @@ -122,18 +122,18 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { } } - By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) + ginkgo.By(fmt.Sprintf("Scaling up statefulsets to number of Replica: %v", replicas)) _, scaleupErr := statefulsetTester.Scale(statefulset, replicas) framework.ExpectNoError(scaleupErr) statefulsetTester.WaitForStatusReplicas(statefulset, replicas) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas) ssPodsAfterScaleUp := statefulsetTester.GetPodList(statefulset) - Expect(ssPodsAfterScaleUp.Items).NotTo(BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) - Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(BeTrue(), "Number of Pods in the statefulset should match with number of replicas") + gomega.Expect(ssPodsAfterScaleUp.Items).NotTo(gomega.BeEmpty(), fmt.Sprintf("Unable to get list of Pods from the Statefulset: %v", statefulset.Name)) + gomega.Expect(len(ssPodsAfterScaleUp.Items) == int(replicas)).To(gomega.BeTrue(), "Number of Pods in the statefulset should match with number of replicas") // After scale up, verify all vsphere volumes are attached to node VMs. - By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") + ginkgo.By("Verify all volumes are attached to Nodes after Statefulsets is scaled up") for _, sspod := range ssPodsAfterScaleUp.Items { err := framework.WaitForPodsReady(client, statefulset.Namespace, sspod.Name, 0) framework.ExpectNoError(err) @@ -144,9 +144,9 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() { vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) e2elog.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) // Verify scale up has re-attached the same volumes and not introduced new volume - Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse()) + gomega.Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(gomega.BeFalse()) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName) - Expect(isVolumeAttached).To(BeTrue()) + gomega.Expect(isVolumeAttached).To(gomega.BeTrue()) framework.ExpectNoError(verifyDiskAttachedError) } } diff --git a/test/e2e/storage/vsphere/vsphere_stress.go b/test/e2e/storage/vsphere/vsphere_stress.go index 87407acb69b..d8e8ee4b231 100644 --- a/test/e2e/storage/vsphere/vsphere_stress.go +++ b/test/e2e/storage/vsphere/vsphere_stress.go @@ -20,8 +20,8 @@ import ( "fmt" "sync" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" storageV1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,34 +53,34 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4} ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") client = f.ClientSet namespace = f.Namespace.Name nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") // if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times. // Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class, // Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc. instances = GetAndExpectIntEnvVar(VCPStressInstances) - Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items))) - Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") + gomega.Expect(instances <= volumesPerNode*len(nodeList.Items)).To(gomega.BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items))) + gomega.Expect(instances > len(scNames)).To(gomega.BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes") iterations = GetAndExpectIntEnvVar(VCPStressIterations) framework.ExpectNoError(err, "Error Parsing VCP_STRESS_ITERATIONS") - Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") + gomega.Expect(iterations > 0).To(gomega.BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0") policyName = GetAndExpectStringEnvVar(SPBMPolicyName) datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) }) - It("vsphere stress tests", func() { + ginkgo.It("vsphere stress tests", func() { scArrays := make([]*storageV1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class - By(fmt.Sprintf("Creating Storage Class : %v", scname)) + ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname)) var sc *storageV1.StorageClass var err error switch scname { @@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) } - Expect(sc).NotTo(BeNil()) + gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(scname, nil) scArrays[index] = sc @@ -123,50 +123,50 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun // goroutine to perform volume lifecycle operations in parallel func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) { defer wg.Done() - defer GinkgoRecover() + defer ginkgo.GinkgoRecover() for iterationCount := 0; iterationCount < iterations; iterationCount++ { logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1) - By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) + ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) + ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) + ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) - Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(HaveOccurred()) + ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) + gomega.Expect(f.WaitForPodRunningSlow(pod.Name)).NotTo(gomega.HaveOccurred()) // Get the copy of the Pod to know the assigned node name. pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) + ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) - Expect(isVolumeAttached).To(BeTrue()) - Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) + gomega.Expect(isVolumeAttached).To(gomega.BeTrue()) + gomega.Expect(verifyDiskAttachedError).NotTo(gomega.HaveOccurred()) - By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) + ginkgo.By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) + ginkgo.By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) + ginkgo.By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) framework.ExpectNoError(err) - By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) - Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(HaveOccurred()) + ginkgo.By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) + gomega.Expect(framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)).NotTo(gomega.HaveOccurred()) } } diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 6a3360c8c47..c183bf3a481 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -24,8 +24,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/mo" @@ -404,7 +404,7 @@ func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persiste // Verify disks are attached to the node isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) + gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) // Verify Volumes are accessible filepath := filepath.Join("/mnt/", fmt.Sprintf("volume%v", index+1), "/emptyFile.txt") _, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/touch", filepath}, "", time.Minute) @@ -441,7 +441,7 @@ func verifyVolumeCreationOnRightZone(persistentvolumes []*v1.PersistentVolume, n } } } - Expect(commonDatastores).To(ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.") + gomega.Expect(commonDatastores).To(gomega.ContainElement(datastoreRef.Value), "PV was created in an unsupported zone.") } } @@ -631,7 +631,7 @@ func getVMXFilePath(vmObject *object.VirtualMachine) (vmxPath string) { var nodeVM mo.VirtualMachine err := vmObject.Properties(ctx, vmObject.Reference(), []string{"config.files"}, &nodeVM) framework.ExpectNoError(err) - Expect(nodeVM.Config).NotTo(BeNil()) + gomega.Expect(nodeVM.Config).NotTo(gomega.BeNil()) vmxPath = nodeVM.Config.Files.VmPathName e2elog.Logf("vmx file path is %s", vmxPath) @@ -643,7 +643,7 @@ func verifyReadyNodeCount(client clientset.Interface, expectedNodes int) bool { numNodes := 0 for i := 0; i < 36; i++ { nodeList := framework.GetReadySchedulableNodesOrDie(client) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") numNodes = len(nodeList.Items) if numNodes == expectedNodes { @@ -777,7 +777,7 @@ func getUUIDFromProviderID(providerID string) string { // GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state func GetReadySchedulableNodeInfos() []*NodeInfo { nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") var nodesInfo []*NodeInfo for _, node := range nodeList.Items { nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name) @@ -793,7 +793,7 @@ func GetReadySchedulableNodeInfos() []*NodeInfo { // and it's associated NodeInfo object is returned. func GetReadySchedulableRandomNodeInfo() *NodeInfo { nodesInfo := GetReadySchedulableNodeInfos() - Expect(nodesInfo).NotTo(BeEmpty()) + gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty()) return nodesInfo[rand.Int()%len(nodesInfo)] } @@ -815,7 +815,7 @@ func invokeVCenterServiceControl(command, service, host string) error { func expectVolumeToBeAttached(nodeName, volumePath string) { isAttached, err := diskIsAttached(volumePath, nodeName) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) + gomega.Expect(isAttached).To(gomega.BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) } // expectVolumesToBeAttached checks if the given Volumes are attached to the @@ -824,7 +824,7 @@ func expectVolumesToBeAttached(pods []*v1.Pod, volumePaths []string) { for i, pod := range pods { nodeName := pod.Spec.NodeName volumePath := volumePaths[i] - By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) } } @@ -835,7 +835,7 @@ func expectFilesToBeAccessible(namespace string, pods []*v1.Pod, filePaths []str for i, pod := range pods { podName := pod.Name filePath := filePaths[i] - By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName)) + ginkgo.By(fmt.Sprintf("Verifying that file %v is accessible on pod %v", filePath, podName)) verifyFilesExistOnVSphereVolume(namespace, podName, filePath) } } @@ -861,7 +861,7 @@ func expectFileContentsToMatch(namespace string, pods []*v1.Pod, filePaths []str for i, pod := range pods { podName := pod.Name filePath := filePaths[i] - By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName)) + ginkgo.By(fmt.Sprintf("Matching file content for %v on pod %v", filePath, podName)) expectFileContentToMatch(namespace, podName, filePath, contents[i]) } } diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index a655c9e8598..f6cdfa55885 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -17,8 +17,8 @@ limitations under the License. package vsphere import ( - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" @@ -46,7 +46,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -66,10 +66,10 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 6. Delete the volume */ - It("verify static provisioning on clustered datastore", func() { + ginkgo.It("verify static provisioning on clustered datastore", func() { var volumePath string - By("creating a test vsphere volume") + ginkgo.By("creating a test vsphere volume") volumeOptions := new(VolumeOptions) volumeOptions.CapacityKB = 2097152 volumeOptions.Name = "e2e-vmdk-" + namespace @@ -79,31 +79,31 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v framework.ExpectNoError(err) defer func() { - By("Deleting the vsphere volume") + ginkgo.By("Deleting the vsphere volume") nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) }() podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) - By("Creating pod") + ginkgo.By("Creating pod") pod, err := client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) - By("Waiting for pod to be ready") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be ready") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) // get fresh pod info pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) nodeName := pod.Spec.NodeName - By("Verifying volume is attached") + ginkgo.By("Verifying volume is attached") expectVolumeToBeAttached(nodeName, volumePath) - By("Deleting pod") + ginkgo.By("Deleting pod") err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") err = waitForVSphereDiskToDetach(volumePath, nodeName) framework.ExpectNoError(err) }) @@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 1. Create storage class parameter and specify datastore to be a clustered datastore name 2. invokeValidPolicyTest - util to do e2e dynamic provision test */ - It("verify dynamic provision with default parameter on clustered datastore", func() { + ginkgo.It("verify dynamic provision with default parameter on clustered datastore", func() { scParameters[Datastore] = clusterDatastore invokeValidPolicyTest(f, client, namespace, scParameters) }) @@ -123,7 +123,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v 1. Create storage class parameter and specify storage policy to be a tag based spbm policy 2. invokeValidPolicyTest - util to do e2e dynamic provision test */ - It("verify dynamic provision with spbm policy on clustered datastore", func() { + ginkgo.It("verify dynamic provision with spbm policy on clustered datastore", func() { policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster) scParameters[SpbmStoragePolicy] = policyDatastoreCluster invokeValidPolicyTest(f, client, namespace, scParameters) diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index 2e2774a1e9d..5e1d74f227b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -21,8 +21,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -52,7 +52,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", namespace string scParameters map[string]string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -64,12 +64,12 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", } }) - It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { - By("Invoking Test for invalid datastore") + ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() { + ginkgo.By("Invoking Test for invalid datastore") scParameters[Datastore] = InvalidDatastore scParameters[DiskFormat] = ThinDisk err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found` if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -78,19 +78,19 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", }) func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { - By("Creating Storage Class With Invalid Datastore") + ginkgo.By("Creating Storage Class With Invalid Datastore") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Expect claim to fail provisioning volume") + ginkgo.By("Expect claim to fail provisioning volume") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 3616f8fb7ec..e5ba9cc23d7 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -20,8 +20,8 @@ import ( "context" "path/filepath" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25/types" @@ -65,7 +65,7 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { nodeKeyValueLabel map[string]string nodeLabelValue string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -86,16 +86,16 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { } }) - It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { - By("Invoking Test for diskformat: eagerzeroedthick") + ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.By("Invoking Test for diskformat: eagerzeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "eagerzeroedthick") }) - It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() { - By("Invoking Test for diskformat: zeroedthick") + ginkgo.It("verify disk format type - zeroedthick is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.By("Invoking Test for diskformat: zeroedthick") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "zeroedthick") }) - It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() { - By("Invoking Test for diskformat: thin") + ginkgo.It("verify disk format type - thin is honored for dynamically provisioned pv using storageclass", func() { + ginkgo.By("Invoking Test for diskformat: thin") invokeTest(f, client, namespace, nodeName, nodeKeyValueLabel, "thin") }) }) @@ -106,14 +106,14 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st scParameters := make(map[string]string) scParameters["diskformat"] = diskFormat - By("Creating Storage Class With DiskFormat") + ginkgo.By("Creating Storage Class With DiskFormat") storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass) pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(pvclaimSpec) framework.ExpectNoError(err) @@ -122,7 +122,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st client.CoreV1().PersistentVolumeClaims(namespace).Delete(pvclaimSpec.Name, nil) }() - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) @@ -138,32 +138,32 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st PV is required to be attached to the Node. so that using govmomi API we can grab Disk's Backing Info to check EagerlyScrub and ThinProvisioned property */ - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node podSpec := getVSpherePodSpecWithClaim(pvclaim.Name, nodeKeyValueLabel, "while true ; do sleep 2 ; done") pod, err := client.CoreV1().Pods(namespace).Create(podSpec) framework.ExpectNoError(err) - By("Waiting for pod to be running") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be running") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName) - Expect(isAttached).To(BeTrue()) + gomega.Expect(isAttached).To(gomega.BeTrue()) framework.ExpectNoError(err) - By("Verify Disk Format") - Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed") + ginkgo.By("Verify Disk Format") + gomega.Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(gomega.BeTrue(), "DiskFormat Verification Failed") var volumePaths []string volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath) - By("Delete pod and wait for volume to be detached from node") + ginkgo.By("Delete pod and wait for volume to be detached from node") deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths) } func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool { - By("Verifing disk format") + ginkgo.By("Verifing disk format") eagerlyScrub := false thinProvisioned := false diskFound := false @@ -194,7 +194,7 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath } } - Expect(diskFound).To(BeTrue(), "Failed to find disk") + gomega.Expect(diskFound).To(gomega.BeTrue(), "Failed to find disk") isDiskFormatCorrect := false if diskFormat == "eagerzeroedthick" { if eagerlyScrub == true && thinProvisioned == false { diff --git a/test/e2e/storage/vsphere/vsphere_volume_disksize.go b/test/e2e/storage/vsphere/vsphere_volume_disksize.go index b46d322e11b..3289b0eebcb 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_disksize.go +++ b/test/e2e/storage/vsphere/vsphere_volume_disksize.go @@ -19,8 +19,8 @@ package vsphere import ( "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -50,7 +50,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { scParameters map[string]string datastore string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -59,38 +59,38 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() { datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName) }) - It("verify dynamically provisioned pv has size rounded up correctly", func() { - By("Invoking Test disk size") + ginkgo.It("verify dynamically provisioned pv has size rounded up correctly", func() { + ginkgo.By("Invoking Test disk size") scParameters[Datastore] = datastore scParameters[DiskFormat] = ThinDisk diskSize := "1" expectedDiskSize := "1Mi" - By("Creating Storage Class") + ginkgo.By("Creating Storage Class") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil)) framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, diskSize, storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) framework.ExpectNoError(err) - By("Getting new copy of PVC") + ginkgo.By("Getting new copy of PVC") pvclaim, err = client.CoreV1().PersistentVolumeClaims(pvclaim.Namespace).Get(pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - By("Getting PV created") + ginkgo.By("Getting PV created") pv, err := client.CoreV1().PersistentVolumes().Get(pvclaim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) - By("Verifying if provisioned PV has the correct size") + ginkgo.By("Verifying if provisioned PV has the correct size") expectedCapacity := resource.MustParse(expectedDiskSize) pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] - Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value())) + gomega.Expect(pvCapacity.Value()).To(gomega.Equal(expectedCapacity.Value())) }) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 9746490558e..74982c083ab 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -20,8 +20,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -69,26 +69,26 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() { client clientset.Interface namespace string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty()) + gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty()) }) - It("verify fstype - ext3 formatted volume", func() { - By("Invoking Test for fstype: ext3") + ginkgo.It("verify fstype - ext3 formatted volume", func() { + ginkgo.By("Invoking Test for fstype: ext3") invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType) }) - It("verify fstype - default value should be ext4", func() { - By("Invoking Test for fstype: Default Value - ext4") + ginkgo.It("verify fstype - default value should be ext4", func() { + ginkgo.By("Invoking Test for fstype: Default Value - ext4") invokeTestForFstype(f, client, namespace, "", Ext4FSType) }) - It("verify invalid fstype", func() { - By("Invoking Test for fstype: invalid Value") + ginkgo.It("verify invalid fstype", func() { + ginkgo.By("Invoking Test for fstype: invalid Value") invokeTestForInvalidFstype(f, client, namespace, InvalidFSType) }) }) @@ -99,7 +99,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam scParameters["fstype"] = fstype // Create Persistent Volume - By("Creating Storage Class With Fstype") + ginkgo.By("Creating Storage Class With Fstype") pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) // Create Pod and verify the persistent volume is accessible @@ -110,7 +110,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam // Detach and delete volume detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) } func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) { @@ -118,24 +118,24 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa scParameters["fstype"] = fstype // Create Persistent Volume - By("Creating Storage Class With Invalid Fstype") + ginkgo.By("Creating Storage Class With Invalid Fstype") pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) // Detach and delete volume detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) - Expect(eventList.Items).NotTo(BeEmpty()) + gomega.Expect(eventList.Items).NotTo(gomega.BeEmpty()) errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found` isFound := false for _, item := range eventList.Items { @@ -143,7 +143,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa isFound = true } } - Expect(isFound).To(BeTrue(), "Unable to verify MountVolume.MountDevice failure") + gomega.Expect(isFound).To(gomega.BeTrue(), "Unable to verify MountVolume.MountDevice failure") } func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { @@ -151,13 +151,13 @@ func createVolume(client clientset.Interface, namespace string, scParameters map framework.ExpectNoError(err) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Create(getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) return pvclaim, persistentvolumes @@ -166,13 +166,13 @@ func createVolume(client clientset.Interface, namespace string, scParameters map func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod { var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand) framework.ExpectNoError(err) // Asserts: Right disk is attached to the pod - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) return pod } @@ -180,11 +180,11 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st // detachVolume delete the volume passed in the argument and wait until volume is detached from the node, func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) - Expect(err).To(BeNil()) + gomega.Expect(err).To(gomega.BeNil()) nodeName := pod.Spec.NodeName - By("Deleting pod") + ginkgo.By("Deleting pod") framework.DeletePodWithWait(f, client, pod) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") waitForVSphereDiskToDetach(volPath, nodeName) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go index e437fdf6a99..a4f71d39334 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_master_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_master_restart.go @@ -19,8 +19,8 @@ package vsphere import ( "fmt" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,7 +55,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup nodeNameList []string nodeInfo *NodeInfo ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -79,22 +79,22 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup } }) - It("verify volume remains attached after master kubelet restart", func() { + ginkgo.It("verify volume remains attached after master kubelet restart", func() { // Create pod on each node for i := 0; i < numNodes; i++ { - By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) + ginkgo.By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) + ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nodeKeyValueLabelList[i], nil) pod, err := client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) defer framework.DeletePodWithWait(f, client, pod) - By("Waiting for pod to be ready") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be ready") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -102,16 +102,16 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup pods = append(pods, pod) nodeName := pod.Spec.NodeName - By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Verify volume %s is attached to the node %s", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) } - By("Restarting kubelet on master node") + ginkgo.By("Restarting kubelet on master node") masterAddress := framework.GetMasterHost() + ":22" err := framework.RestartKubelet(masterAddress) framework.ExpectNoError(err, "Unable to restart kubelet on master node") - By("Verifying the kubelet on master node is up") + ginkgo.By("Verifying the kubelet on master node is up") err = framework.WaitForKubeletUp(masterAddress) framework.ExpectNoError(err) @@ -119,18 +119,18 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup volumePath := volumePaths[i] nodeName := pod.Spec.NodeName - By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("After master restart, verify volume %v is attached to the node %v", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) - By(fmt.Sprintf("Deleting pod on node %s", nodeName)) + ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName)) err = waitForVSphereDiskToDetach(volumePath, nodeName) framework.ExpectNoError(err) - By(fmt.Sprintf("Deleting volume %s", volumePath)) + ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go index 2b53fd36607..8e5d6133575 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_delete.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_delete.go @@ -20,8 +20,8 @@ import ( "context" "os" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/object" clientset "k8s.io/client-go/kubernetes" @@ -38,7 +38,7 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] err error ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -46,14 +46,14 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(err) workingDir = os.Getenv("VSPHERE_WORKING_DIR") - Expect(workingDir).NotTo(BeEmpty()) + gomega.Expect(workingDir).NotTo(gomega.BeEmpty()) }) - It("node unregister", func() { - By("Get total Ready nodes") + ginkgo.It("node unregister", func() { + ginkgo.By("Get total Ready nodes") nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test") + gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test") totalNodesCount := len(nodeList.Items) nodeVM := nodeList.Items[0] @@ -75,44 +75,44 @@ var _ = utils.SIGDescribe("Node Unregister [Feature:vsphere] [Slow] [Disruptive] framework.ExpectNoError(err) // Unregister Node VM - By("Unregister a node VM") + ginkgo.By("Unregister a node VM") unregisterNodeVM(nodeVM.ObjectMeta.Name, vmObject) // Ready nodes should be 1 less - By("Verifying the ready node counts") - Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(BeTrue(), "Unable to verify expected ready node count") + ginkgo.By("Verifying the ready node counts") + gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount-1)).To(gomega.BeTrue(), "Unable to verify expected ready node count") nodeList = framework.GetReadySchedulableNodesOrDie(client) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") var nodeNameList []string for _, node := range nodeList.Items { nodeNameList = append(nodeNameList, node.ObjectMeta.Name) } - Expect(nodeNameList).NotTo(ContainElement(nodeVM.ObjectMeta.Name)) + gomega.Expect(nodeNameList).NotTo(gomega.ContainElement(nodeVM.ObjectMeta.Name)) // Register Node VM - By("Register back the node VM") + ginkgo.By("Register back the node VM") registerNodeVM(nodeVM.ObjectMeta.Name, workingDir, vmxFilePath, vmPool, vmHost) // Ready nodes should be equal to earlier count - By("Verifying the ready node counts") - Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(BeTrue(), "Unable to verify expected ready node count") + ginkgo.By("Verifying the ready node counts") + gomega.Expect(verifyReadyNodeCount(f.ClientSet, totalNodesCount)).To(gomega.BeTrue(), "Unable to verify expected ready node count") nodeList = framework.GetReadySchedulableNodesOrDie(client) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") nodeNameList = nodeNameList[:0] for _, node := range nodeList.Items { nodeNameList = append(nodeNameList, node.ObjectMeta.Name) } - Expect(nodeNameList).To(ContainElement(nodeVM.ObjectMeta.Name)) + gomega.Expect(nodeNameList).To(gomega.ContainElement(nodeVM.ObjectMeta.Name)) // Sanity test that pod provisioning works - By("Sanity check for volume lifecycle") + ginkgo.By("Sanity check for volume lifecycle") scParameters := make(map[string]string) storagePolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY") - Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment") + gomega.Expect(storagePolicy).NotTo(gomega.BeEmpty(), "Please set VSPHERE_SPBM_GOLD_POLICY system environment") scParameters[SpbmStoragePolicy] = storagePolicy invokeValidPolicyTest(f, client, namespace, scParameters) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go index a4a095c5d28..ab042e322db 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go +++ b/test/e2e/storage/vsphere/vsphere_volume_node_poweroff.go @@ -21,8 +21,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "github.com/vmware/govmomi/object" vimtypes "github.com/vmware/govmomi/vim25/types" @@ -49,15 +49,15 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", namespace string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name framework.ExpectNoError(framework.WaitForAllNodesSchedulable(client, framework.TestContext.NodeSchedulableTimeout)) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") - Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test") + gomega.Expect(nodeList.Items).NotTo(gomega.BeEmpty(), "Unable to find ready and schedulable Node") + gomega.Expect(len(nodeList.Items) > 1).To(gomega.BeTrue(), "At least 2 nodes are required for this test") }) /* @@ -75,43 +75,43 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", 11. Delete the PVC 12. Delete the StorageClass */ - It("verify volume status after node power off", func() { - By("Creating a Storage Class") + ginkgo.It("verify volume status after node power off", func() { + ginkgo.By("Creating a Storage Class") storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil) storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaimSpec := getVSphereClaimSpecWithStorageClass(namespace, "1Gi", storageclass) pvclaim, err := framework.CreatePVC(client, namespace, pvclaimSpec) framework.ExpectNoError(err, fmt.Sprintf("Failed to create PVC with err: %v", err)) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Waiting for PVC to be in bound phase") + ginkgo.By("Waiting for PVC to be in bound phase") pvclaims := []*v1.PersistentVolumeClaim{pvclaim} pvs, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err)) volumePath := pvs[0].Spec.VsphereVolume.VolumePath - By("Creating a Deployment") + ginkgo.By("Creating a Deployment") deployment, err := e2edeploy.CreateDeployment(client, int32(1), map[string]string{"test": "app"}, nil, namespace, pvclaims, "") framework.ExpectNoError(err, fmt.Sprintf("Failed to create Deployment with err: %v", err)) defer client.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{}) - By("Get pod from the deployement") + ginkgo.By("Get pod from the deployement") podList, err := e2edeploy.GetPodsForDeployment(client, deployment) framework.ExpectNoError(err, fmt.Sprintf("Failed to get pod from the deployement with err: %v", err)) - Expect(podList.Items).NotTo(BeEmpty()) + gomega.Expect(podList.Items).NotTo(gomega.BeEmpty()) pod := podList.Items[0] node1 := pod.Spec.NodeName - By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) + ginkgo.By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) isAttached, err := diskIsAttached(volumePath, node1) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), "Disk is not attached to the node") + gomega.Expect(isAttached).To(gomega.BeTrue(), "Disk is not attached to the node") - By(fmt.Sprintf("Power off the node: %v", node1)) + ginkgo.By(fmt.Sprintf("Power off the node: %v", node1)) nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) @@ -128,15 +128,15 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]", node2, err := waitForPodToFailover(client, deployment, node1) framework.ExpectNoError(err, "Pod did not fail over to a different node") - By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) + ginkgo.By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) err = waitForVSphereDiskToAttach(volumePath, node2) framework.ExpectNoError(err, "Disk is not attached to the node") - By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) + ginkgo.By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) err = waitForVSphereDiskToDetach(volumePath, node1) framework.ExpectNoError(err, "Disk is not detached from the node") - By(fmt.Sprintf("Power on the previous node: %v", node1)) + ginkgo.By(fmt.Sprintf("Power on the previous node: %v", node1)) vm.PowerOn(ctx) err = vm.WaitForPowerState(ctx, vimtypes.VirtualMachinePowerStatePoweredOn) framework.ExpectNoError(err, "Unable to power on the node") diff --git a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go index 4e995ddac47..af1d482b10b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go +++ b/test/e2e/storage/vsphere/vsphere_volume_ops_storm.go @@ -21,8 +21,8 @@ import ( "os" "strconv" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" storage "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" @@ -58,12 +58,12 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { err error volume_ops_scale int ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty()) + gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty()) if os.Getenv("VOLUME_OPS_SCALE") != "" { volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE")) framework.ExpectNoError(err) @@ -72,25 +72,25 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { } pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale) }) - AfterEach(func() { - By("Deleting PVCs") + ginkgo.AfterEach(func() { + ginkgo.By("Deleting PVCs") for _, claim := range pvclaims { framework.DeletePersistentVolumeClaim(client, claim.Name, namespace) } - By("Deleting StorageClass") + ginkgo.By("Deleting StorageClass") err = client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) framework.ExpectNoError(err) }) - It("should create pod with many volumes and verify no attach call fails", func() { - By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale)) - By("Creating Storage Class") + ginkgo.It("should create pod with many volumes and verify no attach call fails", func() { + ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale)) + ginkgo.By("Creating Storage Class") scParameters := make(map[string]string) scParameters["diskformat"] = "thin" storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil)) framework.ExpectNoError(err) - By("Creating PVCs using the Storage Class") + ginkgo.By("Creating PVCs using the Storage Class") count := 0 for count < volume_ops_scale { pvclaims[count], err = framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) @@ -98,21 +98,21 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() { count++ } - By("Waiting for all claims to be in bound phase") + ginkgo.By("Waiting for all claims to be in bound phase") persistentvolumes, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PVs to the node") + ginkgo.By("Creating pod to attach PVs to the node") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By("Verify all volumes are accessible and available in the pod") + ginkgo.By("Verify all volumes are accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By("Deleting pod") + ginkgo.By("Deleting pod") framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod)) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") for _, pv := range persistentvolumes { waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_perf.go b/test/e2e/storage/vsphere/vsphere_volume_perf.go index 0966bd11e2b..7873b13b7ab 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_perf.go +++ b/test/e2e/storage/vsphere/vsphere_volume_perf.go @@ -20,8 +20,8 @@ import ( "fmt" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" storageV1 "k8s.io/api/storage/v1" clientset "k8s.io/client-go/kubernetes" @@ -61,7 +61,7 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { iterations int ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -76,18 +76,18 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() { datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName) nodes := framework.GetReadySchedulableNodesOrDie(client) - Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items)) + gomega.Expect(len(nodes.Items)).To(gomega.BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items)) msg := fmt.Sprintf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), SCSIUnitsAvailablePerNode*len(nodes.Items)) - Expect(volumeCount).To(BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg) + gomega.Expect(volumeCount).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode*len(nodes.Items)), msg) msg = fmt.Sprintf("Cannot attach %d volumes per pod. Maximum volumes that can be attached per pod is %d", volumesPerPod, SCSIUnitsAvailablePerNode) - Expect(volumesPerPod).To(BeNumerically("<=", SCSIUnitsAvailablePerNode), msg) + gomega.Expect(volumesPerPod).To(gomega.BeNumerically("<=", SCSIUnitsAvailablePerNode), msg) nodeSelectorList = createNodeLabels(client, namespace, nodes) }) - It("vcp performance tests", func() { + ginkgo.It("vcp performance tests", func() { scList := getTestStorageClasses(client, policyName, datastoreName) defer func(scList []*storageV1.StorageClass) { for _, sc := range scList { @@ -124,7 +124,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName scArrays := make([]*storageV1.StorageClass, len(scNames)) for index, scname := range scNames { // Create vSphere Storage Class - By(fmt.Sprintf("Creating Storage Class : %v", scname)) + ginkgo.By(fmt.Sprintf("Creating Storage Class : %v", scname)) var sc *storageV1.StorageClass var err error switch scname { @@ -147,7 +147,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil) sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec) } - Expect(sc).NotTo(BeNil()) + gomega.Expect(sc).NotTo(gomega.BeNil()) framework.ExpectNoError(err) scArrays[index] = sc } @@ -165,7 +165,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I latency = make(map[string]float64) numPods := volumeCount / volumesPerPod - By(fmt.Sprintf("Creating %d PVCs", volumeCount)) + ginkgo.By(fmt.Sprintf("Creating %d PVCs", volumeCount)) start := time.Now() for i := 0; i < numPods; i++ { var pvclaims []*v1.PersistentVolumeClaim @@ -185,7 +185,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I elapsed := time.Since(start) latency[CreateOp] = elapsed.Seconds() - By("Creating pod to attach PVs to the node") + ginkgo.By("Creating pod to attach PVs to the node") start = time.Now() for i, pvclaims := range totalpvclaims { nodeSelector := nodeSelectorList[i%len(nodeSelectorList)] @@ -202,7 +202,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I verifyVSphereVolumesAccessible(client, pod, totalpvs[i]) } - By("Deleting pods") + ginkgo.By("Deleting pods") start = time.Now() for _, pod := range totalpods { err := framework.DeletePodWithWait(f, client, pod) @@ -220,7 +220,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I err := waitForVSphereDisksToDetach(nodeVolumeMap) framework.ExpectNoError(err) - By("Deleting the PVCs") + ginkgo.By("Deleting the PVCs") start = time.Now() for _, pvclaims := range totalpvclaims { for _, pvc := range pvclaims { diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 2c1e152d653..4b61542259f 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -21,8 +21,8 @@ import ( "strconv" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" @@ -47,7 +47,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { nodeInfo *NodeInfo vsp *VSphere ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) c = f.ClientSet @@ -59,13 +59,13 @@ var _ = utils.SIGDescribe("Volume Placement", func() { nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) vsp = nodeInfo.VSphere } - By("creating vmdk") + ginkgo.By("creating vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) }) - AfterEach(func() { + ginkgo.AfterEach(func() { for _, volumePath := range volumePaths { vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef) } @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { */ - It("should create and delete pod with the same volume source on the same worker node", func() { + ginkgo.It("should create and delete pod with the same volume source on the same worker node", func() { var volumeFiles []string pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) @@ -113,7 +113,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) + ginkgo.By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable @@ -142,7 +142,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 13. Delete pod. */ - It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() { + ginkgo.It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() { var volumeFiles []string pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable @@ -152,7 +152,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) + ginkgo.By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths) newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) @@ -177,13 +177,13 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 10. Wait for vmdk1 and vmdk2 to be detached from node. */ - It("should create and delete pod with multiple volumes from same datastore", func() { - By("creating another vmdk") + ginkgo.It("should create and delete pod with multiple volumes from same datastore", func() { + ginkgo.By("creating another vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { } createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod @@ -219,8 +219,8 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 9. Delete POD. 10. Wait for vmdk1 and vmdk2 to be detached from node. */ - It("should create and delete pod with multiple volumes from different datastore", func() { - By("creating another vmdk on non default shared datastore") + ginkgo.It("should create and delete pod with multiple volumes from different datastore", func() { + ginkgo.By("creating another vmdk on non default shared datastore") var volumeOptions *VolumeOptions volumeOptions = new(VolumeOptions) volumeOptions.CapacityKB = 2097152 @@ -231,7 +231,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable @@ -243,7 +243,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths) - By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths) // Create empty files on the mounted volumes on the pod to verify volume is writable // Verify newly and previously created files present on the volume mounted on the pod @@ -271,7 +271,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() { 10. Repeatedly (5 times) perform step 4 to 9 and verify associated volume's content is matching. 11. Wait for vmdk1 and vmdk2 to be detached from node. */ - It("test back to back pod creation and deletion with different volume sources on the same worker node", func() { + ginkgo.It("test back to back pod creation and deletion with different volume sources on the same worker node", func() { var ( podA *v1.Pod podB *v1.Pod @@ -282,10 +282,10 @@ var _ = utils.SIGDescribe("Volume Placement", func() { ) defer func() { - By("clean up undeleted pods") + ginkgo.By("clean up undeleted pods") framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "defer: Failed to delete pod ", podA.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name) - By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) + ginkgo.By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) for _, volumePath := range volumePaths { framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name)) } @@ -293,17 +293,17 @@ var _ = utils.SIGDescribe("Volume Placement", func() { testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0]) // Create another VMDK Volume - By("creating another vmdk") + ginkgo.By("creating another vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) testvolumePathsPodB = append(testvolumePathsPodA, volumePath) for index := 0; index < 5; index++ { - By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) + ginkgo.By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA) - By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) + ginkgo.By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB) podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1) @@ -312,21 +312,21 @@ var _ = utils.SIGDescribe("Volume Placement", func() { podBFiles = append(podBFiles, podBFileName) // Create empty files on the mounted volumes on the pod to verify volume is writable - By("Creating empty file on volume mounted on pod-A") + ginkgo.By("Creating empty file on volume mounted on pod-A") framework.CreateEmptyFileOnPod(ns, podA.Name, podAFileName) - By("Creating empty file volume mounted on pod-B") + ginkgo.By("Creating empty file volume mounted on pod-B") framework.CreateEmptyFileOnPod(ns, podB.Name, podBFileName) // Verify newly and previously created files present on the volume mounted on the pod - By("Verify newly Created file and previously created files present on volume mounted on pod-A") + ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-A") verifyFilesExistOnVSphereVolume(ns, podA.Name, podAFiles...) - By("Verify newly Created file and previously created files present on volume mounted on pod-B") + ginkgo.By("Verify newly Created file and previously created files present on volume mounted on pod-B") verifyFilesExistOnVSphereVolume(ns, podB.Name, podBFiles...) - By("Deleting pod-A") + ginkgo.By("Deleting pod-A") framework.ExpectNoError(framework.DeletePodWithWait(f, c, podA), "Failed to delete pod ", podA.Name) - By("Deleting pod-B") + ginkgo.By("Deleting pod-B") framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "Failed to delete pod ", podB.Name) } }) @@ -354,38 +354,38 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod { var pod *v1.Pod var err error - By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) + ginkgo.By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) podspec := getVSpherePodSpecWithVolumePaths(volumePaths, nodeKeyValueLabel, nil) pod, err = client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) - By("Waiting for pod to be ready") - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By("Waiting for pod to be ready") + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) - By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) + ginkgo.By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) for _, volumePath := range volumePaths { isAttached, err := diskIsAttached(volumePath, nodeName) framework.ExpectNoError(err) - Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node") + gomega.Expect(isAttached).To(gomega.BeTrue(), "disk:"+volumePath+" is not attached with the node") } return pod } func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfilesToCreate []string, filesToCheck []string) { // Create empty files on the mounted volumes on the pod to verify volume is writable - By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname)) + ginkgo.By(fmt.Sprintf("Creating empty file on volume mounted on: %v", podname)) createEmptyFilesOnVSphereVolume(namespace, podname, newEmptyfilesToCreate) // Verify newly and previously created files present on the volume mounted on the pod - By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname)) + ginkgo.By(fmt.Sprintf("Verify newly Created file and previously created files present on volume mounted on: %v", podname)) verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck...) } func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) { - By("Deleting pod") + ginkgo.By("Deleting pod") framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) - By("Waiting for volume to be detached from the node") + ginkgo.By("Waiting for volume to be detached from the node") for _, volumePath := range volumePaths { framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName)) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go index 6cc4412dda9..08900a90a4c 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vpxd_restart.go @@ -21,8 +21,8 @@ import ( "strconv" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs vcNodesMap map[string][]node ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { // Requires SSH access to vCenter. framework.SkipUnlessProviderIs("vsphere") @@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs nodes := framework.GetReadySchedulableNodesOrDie(client) numNodes := len(nodes.Items) - Expect(numNodes).NotTo(BeZero(), "No nodes are available for testing volume access through vpxd restart") + gomega.Expect(numNodes).NotTo(gomega.BeZero(), "No nodes are available for testing volume access through vpxd restart") vcNodesMap = make(map[string][]node) for i := 0; i < numNodes; i++ { @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs } }) - It("verify volume remains attached through vpxd restart", func() { + ginkgo.It("verify volume remains attached through vpxd restart", func() { for vcHost, nodes := range vcNodesMap { var ( volumePaths []string @@ -109,28 +109,28 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs e2elog.Logf("Testing for nodes on vCenter host: %s", vcHost) for i, node := range nodes { - By(fmt.Sprintf("Creating test vsphere volume %d", i)) + ginkgo.By(fmt.Sprintf("Creating test vsphere volume %d", i)) volumePath, err := node.nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, node.nodeInfo.DataCenterRef) framework.ExpectNoError(err) volumePaths = append(volumePaths, volumePath) - By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) + ginkgo.By(fmt.Sprintf("Creating pod %d on node %v", i, node.name)) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, node.kvLabels, nil) pod, err := client.CoreV1().Pods(namespace).Create(podspec) framework.ExpectNoError(err) - By(fmt.Sprintf("Waiting for pod %d to be ready", i)) - Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) + ginkgo.By(fmt.Sprintf("Waiting for pod %d to be ready", i)) + gomega.Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(gomega.Succeed()) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) pods = append(pods, pod) nodeName := pod.Spec.NodeName - By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Verifying that volume %v is attached to node %v", volumePath, nodeName)) expectVolumeToBeAttached(nodeName, volumePath) - By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i)) + ginkgo.By(fmt.Sprintf("Creating a file with random content on the volume mounted on pod %d", i)) filePath := fmt.Sprintf("/mnt/volume1/%v_vpxd_restart_test_%v.txt", namespace, strconv.FormatInt(time.Now().UnixNano(), 10)) randomContent := fmt.Sprintf("Random Content -- %v", strconv.FormatInt(time.Now().UnixNano(), 10)) err = writeContentToPodFile(namespace, pod.Name, filePath, randomContent) @@ -139,7 +139,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs fileContents = append(fileContents, randomContent) } - By("Stopping vpxd on the vCenter host") + ginkgo.By("Stopping vpxd on the vCenter host") vcAddress := vcHost + ":22" err := invokeVCenterServiceControl("stop", vpxdServiceName, vcAddress) framework.ExpectNoError(err, "Unable to stop vpxd on the vCenter host") @@ -147,7 +147,7 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs expectFilesToBeAccessible(namespace, pods, filePaths) expectFileContentsToMatch(namespace, pods, filePaths, fileContents) - By("Starting vpxd on the vCenter host") + ginkgo.By("Starting vpxd on the vCenter host") err = invokeVCenterServiceControl("start", vpxdServiceName, vcAddress) framework.ExpectNoError(err, "Unable to start vpxd on the vCenter host") @@ -160,15 +160,15 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs nodeName := pod.Spec.NodeName volumePath := volumePaths[i] - By(fmt.Sprintf("Deleting pod on node %s", nodeName)) + ginkgo.By(fmt.Sprintf("Deleting pod on node %s", nodeName)) err = framework.DeletePodWithWait(f, client, pod) framework.ExpectNoError(err) - By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) + ginkgo.By(fmt.Sprintf("Waiting for volume %s to be detached from node %s", volumePath, nodeName)) err = waitForVSphereDiskToDetach(volumePath, nodeName) framework.ExpectNoError(err) - By(fmt.Sprintf("Deleting volume %s", volumePath)) + ginkgo.By(fmt.Sprintf("Deleting volume %s", volumePath)) err = node.nodeInfo.VSphere.DeleteVolume(volumePath, node.nodeInfo.DataCenterRef) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 4af59a4025e..ab587cb51db 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -23,8 +23,8 @@ import ( "strings" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" @@ -97,7 +97,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp tagPolicy string masterNode string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -111,13 +111,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp framework.Failf("Unable to find ready and schedulable Node") } masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client) - Expect(masternodes).NotTo(BeEmpty()) + gomega.Expect(masternodes).NotTo(gomega.BeEmpty()) masterNode = masternodes.List()[0] }) // Valid policy. - It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal)) scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) @@ -125,8 +125,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) scParameters[Policy_DiskStripes] = "1" scParameters[Policy_ObjectSpaceReservation] = "30" e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) @@ -134,8 +134,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal)) scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = VsanDatastore @@ -144,8 +144,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Valid policy. - It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) + ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal)) scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) @@ -153,13 +153,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) // Invalid VSAN storage capabilities parameters. - It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) + ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal)) scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -168,13 +168,13 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Invalid policy on a VSAN test bed. // diskStripes value has to be between 1 and 12. - It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) + ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal)) scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Invalid value for " + Policy_DiskStripes + "." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -183,12 +183,12 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Invalid policy on a VSAN test bed. // hostFailuresToTolerate value has to be between 0 and 3 including. - It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) + ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal)) scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "." if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -197,14 +197,14 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp // Specify a valid VSAN policy on a non-VSAN test bed. // The test should fail. - It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore)) + ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore)) scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = VmfsDatastore e2elog.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " + "The policy parameters will work only with VSAN Datastore." if !strings.Contains(err.Error(), errorMsg) { @@ -212,15 +212,15 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp } }) - It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) + ginkgo.It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName)) scParameters[SpbmStoragePolicy] = policyName scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy: %+v", scParameters) invokeValidPolicyTest(f, client, namespace, scParameters) }) - It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() { + ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() { scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Datastore] = VsanDatastore @@ -229,42 +229,42 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters) }) - It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore)) + ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore)) scParameters[SpbmStoragePolicy] = tagPolicy scParameters[Datastore] = VsanDatastore scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\"" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) } }) - It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) + ginkgo.It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy)) scParameters[SpbmStoragePolicy] = BronzeStoragePolicy scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) } }) - It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() { - By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) + ginkgo.It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() { + ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName)) scParameters[SpbmStoragePolicy] = policyName - Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty()) + gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty()) scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal scParameters[DiskFormat] = ThinDisk e2elog.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters) err := invokeInvalidPolicyTestNeg(client, namespace, scParameters) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) @@ -273,71 +273,71 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp }) func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) { - By("Creating Storage Class With storage policy params") + ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") // Create pod to attach Volume to Node pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By("Deleting pod") + ginkgo.By("Deleting pod") framework.DeletePodWithWait(f, client, pod) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { - By("Creating Storage Class With storage policy params") + ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) } func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) { - By("Creating Storage Class With storage policy params") + ginkgo.By("Creating Storage Class With storage policy params") storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Expect claim to fail provisioning volume") + ginkgo.By("Expect claim to fail provisioning volume") _, err = framework.WaitForPVClaimBoundPhase(client, pvclaims, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(pvclaim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) @@ -351,5 +351,5 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.." nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode) - Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg) + gomega.Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(gomega.BeTrue(), errorMsg) } diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index f1679557a4a..03f0e0052ff 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -21,8 +21,8 @@ import ( "strings" "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Zone Support", func() { zoneC string zoneD string ) - BeforeEach(func() { + ginkgo.BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) client = f.ClientSet @@ -115,52 +115,52 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { - By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) + ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) zones = append(zones, zoneA) verifyPVZoneLabels(client, namespace, nil, zones) }) - It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() { - By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) + ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) verifyPVZoneLabels(client, namespace, nil, zones) }) - It("Verify PVC creation with invalid zone specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) + ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD)) zones = append(zones, zoneD) err := verifyPVCCreationFails(client, namespace, nil, zones) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]" if !strings.Contains(err.Error(), errorMsg) { framework.ExpectNoError(err, errorMsg) } }) - It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { - By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) }) - It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() { - By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) zones = append(zones, zoneA) zones = append(zones, zoneB) verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones) }) - It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) + ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneC) err := verifyPVCCreationFails(client, namespace, scParameters, zones) @@ -170,22 +170,22 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) + ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy zones = append(zones, zoneB) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) + ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy)) scParameters[SpbmStoragePolicy] = nonCompatPolicy zones = append(zones, zoneA) err := verifyPVCCreationFails(client, namespace, scParameters, zones) @@ -195,16 +195,16 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones, datastore and storage policy specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones) }) - It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy)) + ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneA, vsanDatastore1, nonCompatPolicy)) scParameters[SpbmStoragePolicy] = nonCompatPolicy scParameters[Datastore] = vsanDatastore1 zones = append(zones, zoneA) @@ -215,8 +215,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy)) + ginkgo.It("Verify PVC creation with incompatible zone along with compatible storagePolicy and datastore combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s datastore :%s and storagePolicy :%s", zoneC, vsanDatastore2, compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore2 zones = append(zones, zoneC) @@ -227,8 +227,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with no zones")) + ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with no zones")) err := verifyPVCCreationFails(client, namespace, nil, nil) errorMsg := "No shared datastores found in the Kubernetes cluster" if !strings.Contains(err.Error(), errorMsg) { @@ -236,8 +236,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) + ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1)) scParameters[Datastore] = vsanDatastore1 err := verifyPVCCreationFails(client, namespace, scParameters, nil) errorMsg := "No shared datastores found in the Kubernetes cluster" @@ -246,8 +246,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) + ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy)) scParameters[SpbmStoragePolicy] = compatPolicy err := verifyPVCCreationFails(client, namespace, scParameters, nil) errorMsg := "No shared datastores found in the Kubernetes cluster" @@ -256,8 +256,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() { - By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) + ginkgo.It("Verify PVC creation with compatible policy and datastore without any zones specified in the storage class fails (No shared datastores exist among all the nodes)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1)) scParameters[SpbmStoragePolicy] = compatPolicy scParameters[Datastore] = vsanDatastore1 err := verifyPVCCreationFails(client, namespace, scParameters, nil) @@ -267,8 +267,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() { - By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) + ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC)) zones = append(zones, zoneC) err := verifyPVCCreationFails(client, namespace, nil, zones) errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]" @@ -277,8 +277,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() { - By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in the storage class. (No shared datastores exist among both zones)", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC)) zones = append(zones, zoneA) zones = append(zones, zoneC) err := verifyPVCCreationFails(client, namespace, nil, zones) @@ -288,8 +288,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() { - By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) + ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA)) scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal zones = append(zones, zoneA) err := verifyPVCCreationFails(client, namespace, scParameters, zones) @@ -299,8 +299,8 @@ var _ = utils.SIGDescribe("Zone Support", func() { } }) - It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() { - By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA)) + ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() { + ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA)) scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal scParameters[Datastore] = vsanDatastore1 @@ -314,31 +314,31 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Creating pod to attach PV to the node") + ginkgo.By("Creating pod to attach PV to the node") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") framework.ExpectNoError(err) - By("Verify persistent volume was created on the right zone") + ginkgo.By("Verify persistent volume was created on the right zone") verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones) - By("Verify the volume is accessible and available in the pod") + ginkgo.By("Verify the volume is accessible and available in the pod") verifyVSphereVolumesAccessible(client, pod, persistentvolumes) - By("Deleting pod") + ginkgo.By("Deleting pod") framework.DeletePodWithWait(f, client, pod) - By("Waiting for volumes to be detached from the node") + ginkgo.By("Waiting for volumes to be detached from the node") waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName) } @@ -347,7 +347,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the Storage Class") + ginkgo.By("Creating PVC using the Storage Class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) @@ -355,9 +355,9 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute) - Expect(err).To(HaveOccurred()) + gomega.Expect(err).To(gomega.HaveOccurred()) eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{}) e2elog.Logf("Failure message : %+q", eventList.Items[0].Message) @@ -369,23 +369,23 @@ func verifyPVZoneLabels(client clientset.Interface, namespace string, scParamete framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil) - By("Creating PVC using the storage class") + ginkgo.By("Creating PVC using the storage class") pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass)) framework.ExpectNoError(err) defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace) var pvclaims []*v1.PersistentVolumeClaim pvclaims = append(pvclaims, pvclaim) - By("Waiting for claim to be in bound phase") + ginkgo.By("Waiting for claim to be in bound phase") persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) framework.ExpectNoError(err) - By("Verify zone information is present in the volume labels") + ginkgo.By("Verify zone information is present in the volume labels") for _, pv := range persistentvolumes { // Multiple zones are separated with "__" pvZoneLabels := strings.Split(pv.ObjectMeta.Labels["failure-domain.beta.kubernetes.io/zone"], "__") for _, zone := range zones { - Expect(pvZoneLabels).Should(ContainElement(zone), "Incorrect or missing zone labels in pv.") + gomega.Expect(pvZoneLabels).Should(gomega.ContainElement(zone), "Incorrect or missing zone labels in pv.") } } }